Login
[x]
Log in using an account from:
Fedora Account System
Red Hat Associate
Red Hat Customer
Or login using a Red Hat Bugzilla account
Forgot Password
Login:
Hide Forgot
Create an Account
Red Hat Bugzilla – Attachment 152372 Details for
Bug 214477
Multiple "exclusive" services are started on the same node
[?]
New
Simple Search
Advanced Search
My Links
Browse
Requests
Reports
Current State
Search
Tabular reports
Graphical reports
Duplicates
Other Reports
User Changes
Plotly Reports
Bug Status
Bug Severity
Non-Defaults
|
Product Dashboard
Help
Page Help!
Bug Writing Guidelines
What's new
Browser Support Policy
5.0.4.rh83 Release notes
FAQ
Guides index
User guide
Web Services
Contact
Legal
This site requires JavaScript to be enabled to function correctly, please enable it.
[patch]
diff-rhel4-rgmanager-1.9.54-1
diff-rhel4-rgmanager-1.9.54-1 (text/plain), 5.63 KB, created by
Andrey Mirkin
on 2007-04-12 08:38:21 UTC
(
hide
)
Description:
diff-rhel4-rgmanager-1.9.54-1
Filename:
MIME Type:
Creator:
Andrey Mirkin
Created:
2007-04-12 08:38:21 UTC
Size:
5.63 KB
patch
obsolete
>--- ./src/daemons/rg_state.c.rg 2006-10-06 20:30:09.000000000 +0400 >+++ ./src/daemons/rg_state.c 2007-04-12 12:07:36.000000000 +0400 >@@ -41,6 +41,8 @@ static int _svc_stop_finish(char *svcNam > int set_rg_state(char *servicename, rg_state_t *svcblk); > int get_rg_state(char *servicename, rg_state_t *svcblk); > void get_recovery_policy(char *rg_name, char *buf, size_t buflen); >+int have_exclusive_resources(); >+int check_exclusive_resources(cluster_member_list_t *membership, char *svcName); > > > uint64_t >@@ -714,6 +716,10 @@ svc_advise_start(rg_state_t *svcStatus, > ret = 1; > break; > } >+ if (req == RG_START_RECOVER) { >+ ret = 1; >+ break; >+ } > > clulog(LOG_DEBUG, "Not starting disabled RG %s\n", > svcName); >@@ -1352,6 +1358,7 @@ exhausted: > } > > >+pthread_mutex_t exclusive_mutex = PTHREAD_MUTEX_INITIALIZER; > /** > * handle_start_req - Handle a generic start request from a user or during > * service manager boot. >@@ -1367,6 +1374,7 @@ handle_start_req(char *svcName, int req, > { > int ret, tolerance = FOD_BEST; > cluster_member_list_t *membership = member_list(); >+ int need_check = have_exclusive_resources(); > > /* > * When a service request is from a user application (eg, clusvcadm), >@@ -1382,6 +1390,18 @@ handle_start_req(char *svcName, int req, > cml_free(membership); > return FAIL; > } >+ if (need_check) { >+ pthread_mutex_lock(&exclusive_mutex); >+ ret = check_exclusive_resources(membership, svcName); >+ if (ret != 0) { >+ cml_free(membership); >+ pthread_mutex_unlock(&exclusive_mutex); >+ if (ret > 0) >+ goto relocate; >+ else >+ return FAIL; >+ } >+ } > cml_free(membership); > > /* >@@ -1389,6 +1409,8 @@ handle_start_req(char *svcName, int req, > * mask here - so that we can try all nodes if necessary. > */ > ret = svc_start(svcName, req); >+ if (need_check) >+ pthread_mutex_unlock(&exclusive_mutex); > > /* > If services are locked, return the error >@@ -1428,6 +1450,7 @@ handle_start_req(char *svcName, int req, > return RG_EABORT; > } > >+relocate: > /* > * OK, it failed to start - but succeeded to stop. Now, > * we should relocate the service. >@@ -1465,6 +1488,7 @@ handle_start_remote_req(char *svcName, i > int x; > uint64_t me = my_id(); > cluster_member_list_t *membership = member_list(); >+ int need_check = have_exclusive_resources(); > > /* XXX ok, so we need to say "should I start this if I was the > only cluster member online */ >@@ -1485,10 +1509,23 @@ handle_start_remote_req(char *svcName, i > cml_free(membership); > return FAIL; > } >+ if (need_check) { >+ pthread_mutex_lock(&exclusive_mutex); >+ if (check_exclusive_resources(membership, svcName) != 0) { >+ pthread_mutex_unlock(&exclusive_mutex); >+ cml_free(membership); >+ return FAIL; >+ } >+ } > cml_free(membership); > >- if (svc_start(svcName, req) == 0) >+ if (svc_start(svcName, req) == 0) { >+ if (need_check) >+ pthread_mutex_unlock(&exclusive_mutex); > return 0; >+ } >+ if (need_check) >+ pthread_mutex_unlock(&exclusive_mutex); > > if (svc_stop(svcName, RG_STOP_RECOVER) == 0) > return FAIL; >--- ./src/daemons/groups.c.rg 2006-10-06 20:30:09.000000000 +0400 >+++ ./src/daemons/groups.c 2007-04-12 11:47:44.000000000 +0400 >@@ -22,6 +22,7 @@ > #include <magma.h> > #include <magmamsg.h> > #include <resgroup.h> >+#include <reslist.h> > #include <vf.h> > #include <magma.h> > #include <ccs.h> >@@ -132,6 +133,106 @@ count_resource_groups(cluster_member_lis > } > > >+int get_rg_state_local(char *, rg_state_t *); >+int >+count_resource_groups_local(cluster_member_t *mp) >+{ >+ resource_t *res; >+ char *rgname, *val; >+ rg_state_t st; >+ >+ mp->cm_svccount = 0; >+ mp->cm_svcexcl = 0; >+ >+ pthread_rwlock_rdlock(&resource_lock); >+ >+ list_do(&_resources, res) { >+ if (res->r_rule->rr_root == 0) >+ continue; >+ >+ rgname = res->r_attrs[0].ra_value; >+ >+ if (get_rg_state_local(rgname, &st) < 0) { >+ continue; >+ } >+ >+ if (st.rs_state != RG_STATE_STARTED && >+ st.rs_state != RG_STATE_STARTING) >+ continue; >+ >+ if (mp->cm_id != st.rs_owner) >+ continue; >+ >+ ++mp->cm_svccount; >+ >+ val = res_attr_value(res, "exclusive"); >+ if (val && ((!strcmp(val, "yes") || >+ (atoi(val)>0))) ) { >+ ++mp->cm_svcexcl; >+ } >+ >+ } while (!list_done(&_resources, res)); >+ >+ pthread_rwlock_unlock(&resource_lock); >+ >+ return 0; >+} >+ >+ >+int >+have_exclusive_resources() >+{ >+ resource_t *res; >+ char *val; >+ >+ pthread_rwlock_rdlock(&resource_lock); >+ >+ list_do(&_resources, res) { >+ val = res_attr_value(res, "exclusive"); >+ if (val && ((!strcmp(val, "yes") || >+ (atoi(val)>0))) ) { >+ pthread_rwlock_unlock(&resource_lock); >+ return 1; >+ } >+ >+ } while (!list_done(&_resources, res)); >+ >+ pthread_rwlock_unlock(&resource_lock); >+ >+ return 0; >+} >+ >+ >+int >+check_exclusive_resources(cluster_member_list_t *membership, char *svcName) >+{ >+ cluster_member_t *mp; >+ int exclusive, count; >+ resource_t *res; >+ char *val; >+ >+ mp = memb_id_to_p(membership, my_id()); >+ assert(mp); >+ count_resource_groups_local(mp); >+ exclusive = mp->cm_svcexcl; >+ count = mp->cm_svccount; >+ pthread_rwlock_rdlock(&resource_lock); >+ res = find_root_by_ref(&_resources, svcName); >+ if (!res) { >+ pthread_rwlock_unlock(&resource_lock); >+ return FAIL; >+ } >+ val = res_attr_value(res, "exclusive"); >+ pthread_rwlock_unlock(&resource_lock); >+ if (exclusive || (count && val && >+ (!strcmp(val, "yes") || (atoi(val)>0)))) { >+ return 1; >+ } >+ >+ return 0; >+} >+ >+ > /** > Find the best target node for a service *besides* the current service > owner. Takes into account: >@@ -576,7 +677,6 @@ group_property(char *groupname, char *pr > @param rgname Resource group name whose state we want to send. > @see send_rg_states > */ >-int get_rg_state_local(char *, rg_state_t *); > void > send_rg_state(int fd, char *rgname, int fast) > {
You cannot view the attachment while viewing its details because your browser does not support IFRAMEs.
View the attachment on a separate page
.
View Attachment As Diff
View Attachment As Raw
Actions:
View
|
Diff
Attachments on
bug 214477
:
151735
| 152372 |
152373