Login
[x]
Log in using an account from:
Fedora Account System
Red Hat Associate
Red Hat Customer
Or login using a Red Hat Bugzilla account
Forgot Password
Login:
Hide Forgot
Create an Account
Red Hat Bugzilla – Attachment 928070 Details for
Bug 1131271
Lock replies use wrong source IP if client access server via 2 different virtual IPs [patch attached]
[?]
New
Simple Search
Advanced Search
My Links
Browse
Requests
Reports
Current State
Search
Tabular reports
Graphical reports
Duplicates
Other Reports
User Changes
Plotly Reports
Bug Status
Bug Severity
Non-Defaults
|
Product Dashboard
Help
Page Help!
Bug Writing Guidelines
What's new
Browser Support Policy
5.0.4.rh83 Release notes
FAQ
Guides index
User guide
Web Services
Contact
Legal
This site requires JavaScript to be enabled to function correctly, please enable it.
[patch]
Patch to maintain a list of rpc_clnt structures for each caller using multiple IPs to access server
glusterfs-3.5.2-sourceip.patch (text/plain), 7.17 KB, created by
Philip Spencer
on 2014-08-18 20:36:17 UTC
(
hide
)
Description:
Patch to maintain a list of rpc_clnt structures for each caller using multiple IPs to access server
Filename:
MIME Type:
Creator:
Philip Spencer
Created:
2014-08-18 20:36:17 UTC
Size:
7.17 KB
patch
obsolete
>--- glusterfs-3.5.2/xlators/nfs/server/src/nlm4.h.orig 2014-07-31 07:05:35.000000000 -0400 >+++ glusterfs-3.5.2/xlators/nfs/server/src/nlm4.h 2014-08-18 15:55:55.535697320 -0400 >@@ -54,11 +54,24 @@ > struct list_head nlm_clients; > struct list_head fdes; > struct list_head shares; >- struct rpc_clnt *rpc_clnt; >+ /* A client may attempt to access a server by different virtual IPs >+ the server is using. We need to track these connections separately, >+ so that replies to lock requests can come from the source IP that >+ the client is expecting, rather than from the source IP reused from >+ an rpc_clnt entry that was what the client used when talking to the >+ server at a different virtual IP address. So, we maintain a list of >+ rpc_clnt structures instead of a single one. */ >+ struct list_head rpc_clnt_list; > char *caller_name; > int nsm_monitor; > } nlm_client_t; > >+/* This is the structure for an entry in the list of rpc_clnt structures */ >+typedef struct nlm_rpc_clnt { >+ struct list_head rpc_clnts; >+ struct rpc_clnt *rpc_clnt; >+} nlm_rpc_clnt_t; >+ > typedef struct nlm_share { > struct list_head client_list; > struct list_head inode_list; >--- glusterfs-3.5.2/xlators/nfs/server/src/nlm4.c.orig 2014-08-18 13:31:38.385477391 -0400 >+++ glusterfs-3.5.2/xlators/nfs/server/src/nlm4.c 2014-08-18 16:15:59.455035868 -0400 >@@ -301,8 +301,57 @@ > return monitor; > } > >+rpc_clnt_t *nlm_rpc_clnt_from_nlm_client (nlm_client_t *nlmclnt, >+ union gf_sock_union *my_addr) >+{ >+ /* Internal helper routine, called while nlm_client_list_lk is locked, >+ to examine the list of rpc_clnt structures in an nlm_client structure, >+ and return the one appropriate for the server address in my_addr. */ >+ union gf_sock_union su; >+ nlm_rpc_clnt_t *nlm_rpc_clnt = NULL; >+ char my_name[INET6_ADDRSTRLEN+1] = { 0, }; >+ char test_name[INET6_ADDRSTRLEN+1] = { 0, }; >+ switch (my_addr->sa.sa_family) { >+ case AF_INET6: >+ inet_ntop(AF_INET6, &my_addr->sin6.sin6_addr, >+ my_name, INET6_ADDRSTRLEN+1); >+ break; >+ case AF_INET: >+ inet_ntop(AF_INET, &my_addr->sin.sin_addr, >+ my_name, INET6_ADDRSTRLEN+1); >+ break; >+ } >+ >+ >+ list_for_each_entry(nlm_rpc_clnt, &nlmclnt->rpc_clnt_list, rpc_clnts) { >+ rpc_transport_get_myaddr(nlm_rpc_clnt->rpc_clnt->conn.trans, >+ NULL, 0, &su.storage, >+ sizeof(su.storage)); >+ if (su.sa.sa_family != my_addr->sa.sa_family) continue; >+ switch (su.sa.sa_family) { >+ case AF_INET6: >+ inet_ntop(AF_INET6, &su.sin6.sin6_addr, >+ test_name, INET6_ADDRSTRLEN+1); >+ >+ break; >+ case AF_INET: >+ inet_ntop(AF_INET, &su.sin.sin_addr, >+ test_name, INET6_ADDRSTRLEN+1); >+ break; >+ default: >+ test_name[0] = 0; >+ } >+ gf_log(GF_NLM, GF_LOG_DEBUG, "Looking for source name %s in %s", >+ my_name, test_name); >+ >+ if (0 == strncmp(my_name, test_name, INET6_ADDRSTRLEN)) >+ return nlm_rpc_clnt->rpc_clnt; >+ } >+ return NULL; >+} >+ > rpc_clnt_t * >-nlm_get_rpc_clnt (char *caller_name) >+nlm_get_rpc_clnt (char *caller_name, union gf_sock_union *my_addr) > { > nlm_client_t *nlmclnt = NULL; > int nlmclnt_found = 0; >@@ -317,17 +366,22 @@ > } > if (!nlmclnt_found) > goto ret; >- if (nlmclnt->rpc_clnt) >- rpc_clnt = rpc_clnt_ref (nlmclnt->rpc_clnt); >+ >+ rpc_clnt = nlm_rpc_clnt_from_nlm_client(nlmclnt, my_addr); >+ if (rpc_clnt) >+ rpc_clnt = rpc_clnt_ref (rpc_clnt); > ret: > UNLOCK (&nlm_client_list_lk); > return rpc_clnt; > } > > int >-nlm_set_rpc_clnt (rpc_clnt_t *rpc_clnt, char *caller_name) >+nlm_set_rpc_clnt (rpc_clnt_t *rpc_clnt, char *caller_name, >+ union gf_sock_union *my_addr) > { > nlm_client_t *nlmclnt = NULL; >+ nlm_rpc_clnt_t *nlm_rpc_clnt = NULL; >+ > int nlmclnt_found = 0; > int ret = -1; > >@@ -348,14 +402,22 @@ > INIT_LIST_HEAD(&nlmclnt->fdes); > INIT_LIST_HEAD(&nlmclnt->nlm_clients); > INIT_LIST_HEAD(&nlmclnt->shares); >+ INIT_LIST_HEAD(&nlmclnt->rpc_clnt_list); > > list_add (&nlmclnt->nlm_clients, &nlm_client_list); > nlmclnt->caller_name = gf_strdup (caller_name); > } > >- if (nlmclnt->rpc_clnt == NULL) { >- nlmclnt->rpc_clnt = rpc_clnt_ref (rpc_clnt); >- } >+ if (! nlm_rpc_clnt_from_nlm_client (nlmclnt, my_addr)) { >+ nlm_rpc_clnt = GF_CALLOC(1, sizeof(*nlm_rpc_clnt), >+ gf_nfs_mt_nlm4_nlmclnt); >+ nlm_rpc_clnt->rpc_clnt = rpc_clnt_ref(rpc_clnt); >+ if (nlm_rpc_clnt == NULL) >+ goto ret; >+ INIT_LIST_HEAD(&nlm_rpc_clnt->rpc_clnts); >+ list_add (&nlm_rpc_clnt->rpc_clnts, &nlmclnt->rpc_clnt_list); >+ } >+ > ret = 0; > ret: > UNLOCK (&nlm_client_list_lk); >@@ -366,14 +428,19 @@ > nlm_unset_rpc_clnt (rpc_clnt_t *rpc) > { > nlm_client_t *nlmclnt = NULL; >+ nlm_rpc_clnt_t *nlm_rpc_clnt = NULL; > rpc_clnt_t *rpc_clnt = NULL; > > LOCK (&nlm_client_list_lk); > list_for_each_entry (nlmclnt, &nlm_client_list, nlm_clients) { >- if (rpc == nlmclnt->rpc_clnt) { >- rpc_clnt = nlmclnt->rpc_clnt; >- nlmclnt->rpc_clnt = NULL; >- break; >+ list_for_each_entry(nlm_rpc_clnt, &nlmclnt->rpc_clnt_list, >+ rpc_clnts) { >+ if (rpc == nlm_rpc_clnt->rpc_clnt) { >+ rpc_clnt = nlm_rpc_clnt->rpc_clnt; >+ list_del(&nlm_rpc_clnt->rpc_clnts); >+ GF_FREE(nlm_rpc_clnt); >+ break; >+ } > } > } > UNLOCK (&nlm_client_list_lk); >@@ -414,6 +481,7 @@ > INIT_LIST_HEAD(&nlmclnt->fdes); > INIT_LIST_HEAD(&nlmclnt->nlm_clients); > INIT_LIST_HEAD(&nlmclnt->shares); >+ INIT_LIST_HEAD(&nlmclnt->rpc_clnt_list); > > list_add (&nlmclnt->nlm_clients, &nlm_client_list); > nlmclnt->caller_name = gf_strdup (caller_name); >@@ -896,13 +964,17 @@ > int ret = 0; > char *caller_name = NULL; > nfs3_call_state_t *cs = NULL; >+ union gf_sock_union sock_union; > > cs = mydata; > caller_name = cs->args.nlm4_lockargs.alock.caller_name; > > switch (fn) { > case RPC_CLNT_CONNECT: >- ret = nlm_set_rpc_clnt (rpc_clnt, caller_name); >+ rpc_transport_get_myaddr(rpc_clnt->conn.trans, >+ NULL, 0, &sock_union.storage, >+ sizeof(sock_union.storage)); >+ ret = nlm_set_rpc_clnt (rpc_clnt, caller_name, &sock_union); > if (ret == -1) { > gf_log (GF_NLM, GF_LOG_ERROR, "Failed to set rpc clnt"); > goto err; >@@ -1055,7 +1127,12 @@ > char peerip[INET6_ADDRSTRLEN+1]; > union gf_sock_union sock_union; > >- rpc_clnt = nlm_get_rpc_clnt (cs->args.nlm4_lockargs.alock.caller_name); >+ rpc_transport_get_myaddr (cs->trans, NULL, 0, &sock_union.storage, >+ sizeof (sock_union.storage)); >+ >+ >+ rpc_clnt = nlm_get_rpc_clnt (cs->args.nlm4_lockargs.alock.caller_name, >+ &sock_union); > if (rpc_clnt == NULL) { > nlm4_establish_callback ((void*)cs); > return;
You cannot view the attachment while viewing its details because your browser does not support IFRAMEs.
View the attachment on a separate page
.
View Attachment As Diff
View Attachment As Raw
Actions:
View
|
Diff
Attachments on
bug 1131271
: 928070