Login
[x]
Log in using an account from:
Fedora Account System
Red Hat Associate
Red Hat Customer
Or login using a Red Hat Bugzilla account
Forgot Password
Login:
Hide Forgot
Create an Account
Red Hat Bugzilla – Attachment 152373 Details for
Bug 214477
Multiple "exclusive" services are started on the same node
[?]
New
Simple Search
Advanced Search
My Links
Browse
Requests
Reports
Current State
Search
Tabular reports
Graphical reports
Duplicates
Other Reports
User Changes
Plotly Reports
Bug Status
Bug Severity
Non-Defaults
|
Product Dashboard
Help
Page Help!
Bug Writing Guidelines
What's new
Browser Support Policy
5.0.4.rh83 Release notes
FAQ
Guides index
User guide
Web Services
Contact
Legal
This site requires JavaScript to be enabled to function correctly, please enable it.
[patch]
diff-rhel5-rgmanager-2.0.23-1
diff-rhel5-rgmanager-2.0.23-1 (text/plain), 5.42 KB, created by
Andrey Mirkin
on 2007-04-12 08:41:03 UTC
(
hide
)
Description:
diff-rhel5-rgmanager-2.0.23-1
Filename:
MIME Type:
Creator:
Andrey Mirkin
Created:
2007-04-12 08:41:03 UTC
Size:
5.42 KB
patch
obsolete
>--- ./src/daemons/rg_state.c.rg 2007-04-05 12:37:36.000000000 +0400 >+++ ./src/daemons/rg_state.c 2007-04-12 12:21:01.000000000 +0400 >@@ -48,6 +48,8 @@ int get_rg_state(char *servicename, rg_s > void get_recovery_policy(char *rg_name, char *buf, size_t buflen); > int check_depend_safe(char *servicename); > int group_migratory(char *servicename); >+int have_exclusive_resources(); >+int check_exclusive_resources(cluster_member_list_t *membership, char *svcName); > > > int >@@ -691,6 +693,10 @@ svc_advise_start(rg_state_t *svcStatus, > ret = 1; > break; > } >+ if (req == RG_START_RECOVER) { >+ ret = 1; >+ break; >+ } > > clulog(LOG_DEBUG, "Not starting disabled RG %s\n", > svcName); >@@ -1449,6 +1455,7 @@ exhausted: > } > > >+pthread_mutex_t exclusive_mutex = PTHREAD_MUTEX_INITIALIZER; > /** > * handle_start_req - Handle a generic start request from a user or during > * service manager boot. >@@ -1464,6 +1471,7 @@ handle_start_req(char *svcName, int req, > { > int ret, tolerance = FOD_BEST; > cluster_member_list_t *membership = member_list(); >+ int need_check = have_exclusive_resources(); > > /* > * When a service request is from a user application (eg, clusvcadm), >@@ -1479,6 +1487,18 @@ handle_start_req(char *svcName, int req, > free_member_list(membership); > return RG_EFAIL; > } >+ if (need_check) { >+ pthread_mutex_lock(&exclusive_mutex); >+ ret = check_exclusive_resources(membership, svcName); >+ if (ret != 0) { >+ free_member_list(membership); >+ pthread_mutex_unlock(&exclusive_mutex); >+ if (ret > 0) >+ goto relocate; >+ else >+ return RG_EFAIL; >+ } >+ } > free_member_list(membership); > > /* Check for dependency. We cannot start unless our >@@ -1491,6 +1511,8 @@ handle_start_req(char *svcName, int req, > * mask here - so that we can try all nodes if necessary. > */ > ret = svc_start(svcName, req); >+ if (need_check) >+ pthread_mutex_unlock(&exclusive_mutex); > > /* > If services are locked, return the error >@@ -1530,6 +1552,7 @@ handle_start_req(char *svcName, int req, > return RG_EABORT; > } > >+relocate: > /* > * OK, it failed to start - but succeeded to stop. Now, > * we should relocate the service. >@@ -1567,6 +1590,7 @@ handle_start_remote_req(char *svcName, i > int x; > uint32_t me = my_id(); > cluster_member_list_t *membership = member_list(); >+ int need_check = have_exclusive_resources(); > > /* XXX ok, so we need to say "should I start this if I was the > only cluster member online */ >@@ -1587,10 +1611,23 @@ handle_start_remote_req(char *svcName, i > free_member_list(membership); > return RG_EFAIL; > } >+ if (need_check) { >+ pthread_mutex_lock(&exclusive_mutex); >+ if (check_exclusive_resources(membership, svcName) != 0) { >+ free_member_list(membership); >+ pthread_mutex_unlock(&exclusive_mutex); >+ return RG_EFAIL; >+ } >+ } > free_member_list(membership); > >- if (svc_start(svcName, req) == 0) >+ if (svc_start(svcName, req) == 0) { >+ if (need_check) >+ pthread_mutex_unlock(&exclusive_mutex); > return 0; >+ } >+ if (need_check) >+ pthread_mutex_unlock(&exclusive_mutex); > > if (svc_stop(svcName, RG_STOP_RECOVER) == 0) > return RG_EFAIL; >--- ./src/daemons/groups.c.rg 2007-01-17 19:31:23.000000000 +0300 >+++ ./src/daemons/groups.c 2007-04-12 12:23:58.000000000 +0400 >@@ -20,6 +20,7 @@ > //#define DEBUG > #include <platform.h> > #include <resgroup.h> >+#include <reslist.h> > #include <vf.h> > #include <message.h> > #include <ccs.h> >@@ -138,6 +139,105 @@ count_resource_groups(cluster_member_lis > } > > >+int >+count_resource_groups_local(cman_node_t *mp) >+{ >+ resource_t *res; >+ resource_node_t *node; >+ char rgname[64], *val; >+ rg_state_t st; >+ >+ mp->cn_svccount = 0; >+ mp->cn_svcexcl = 0; >+ >+ pthread_rwlock_rdlock(&resource_lock); >+ >+ list_do(&_tree, node) { >+ >+ res = node->rn_resource; >+ >+ res_build_name(rgname, sizeof(rgname), res); >+ >+ if (get_rg_state_local(rgname, &st) < 0) { >+ continue; >+ } >+ >+ if (st.rs_state != RG_STATE_STARTED && >+ st.rs_state != RG_STATE_STARTING) >+ continue; >+ >+ if (mp->cn_nodeid != st.rs_owner) >+ continue; >+ >+ ++mp->cn_svccount; >+ >+ val = res_attr_value(res, "exclusive"); >+ if (val && ((!strcmp(val, "yes") || >+ (atoi(val)>0))) ) { >+ ++mp->cn_svcexcl; >+ } >+ >+ } while (!list_done(&_tree, node)); >+ >+ pthread_rwlock_unlock(&resource_lock); >+ return 0; >+} >+ >+ >+int >+have_exclusive_resources(void) >+{ >+ resource_t *res; >+ char *val; >+ >+ pthread_rwlock_rdlock(&resource_lock); >+ >+ list_do(&_resources, res) { >+ val = res_attr_value(res, "exclusive"); >+ if (val && ((!strcmp(val, "yes") || >+ (atoi(val)>0))) ) { >+ pthread_rwlock_unlock(&resource_lock); >+ return 1; >+ } >+ >+ } while (!list_done(&_resources, res)); >+ >+ pthread_rwlock_unlock(&resource_lock); >+ >+ return 0; >+} >+ >+ >+int >+check_exclusive_resources(cluster_member_list_t *membership, char *svcName) >+{ >+ cman_node_t *mp; >+ int exclusive, count; >+ resource_t *res; >+ char *val; >+ >+ mp = memb_id_to_p(membership, my_id()); >+ assert(mp); >+ count_resource_groups_local(mp); >+ exclusive = mp->cn_svcexcl; >+ count = mp->cn_svccount; >+ pthread_rwlock_rdlock(&resource_lock); >+ res = find_root_by_ref(&_resources, svcName); >+ if (!res) { >+ pthread_rwlock_unlock(&resource_lock); >+ return RG_EFAIL; >+ } >+ val = res_attr_value(res, "exclusive"); >+ pthread_rwlock_unlock(&resource_lock); >+ if (exclusive || (count && val && >+ (!strcmp(val, "yes") || (atoi(val)>0)))) { >+ return RG_YES; >+ } >+ >+ return 0; >+} >+ >+ > /** > Find the best target node for a service *besides* the current service > owner. Takes into account:
You cannot view the attachment while viewing its details because your browser does not support IFRAMEs.
View the attachment on a separate page
.
View Attachment As Diff
View Attachment As Raw
Actions:
View
|
Diff
Attachments on
bug 214477
:
151735
|
152372
| 152373