Login
[x]
Log in using an account from:
Fedora Account System
Red Hat Associate
Red Hat Customer
Or login using a Red Hat Bugzilla account
Forgot Password
Login:
Hide Forgot
Create an Account
Red Hat Bugzilla – Attachment 156908 Details for
Bug 211469
RFE: flag for clusvcadm to respect failover domain priorities (rhcs5)
[?]
New
Simple Search
Advanced Search
My Links
Browse
Requests
Reports
Current State
Search
Tabular reports
Graphical reports
Duplicates
Other Reports
User Changes
Plotly Reports
Bug Status
Bug Severity
Non-Defaults
|
Product Dashboard
Help
Page Help!
Bug Writing Guidelines
What's new
Browser Support Policy
5.0.4.rh83 Release notes
FAQ
Guides index
User guide
Web Services
Contact
Legal
This site requires JavaScript to be enabled to function correctly, please enable it.
[patch]
Respect failover patch against rgmanager-2.0.24
bug-243740.diff (text/plain), 7.75 KB, created by
Marek Grac
on 2007-06-13 19:36:34 UTC
(
hide
)
Description:
Respect failover patch against rgmanager-2.0.24
Filename:
MIME Type:
Creator:
Marek Grac
Created:
2007-06-13 19:36:34 UTC
Size:
7.75 KB
patch
obsolete
>diff -urNp rgmanager-2.0.24.orig/include/resgroup.h rgmanager-2.0.24/include/resgroup.h >--- rgmanager-2.0.24.orig/include/resgroup.h 2007-04-09 18:01:55.000000000 +0200 >+++ rgmanager-2.0.24/include/resgroup.h 2007-06-12 22:33:17.000000000 +0200 >@@ -86,6 +86,7 @@ const char *rg_req_str(int req); > int handle_relocate_req(char *svcName, int request, int preferred_target, > int *new_owner); > int handle_start_req(char *svcName, int req, int *new_owner); >+int handle_fd_start_req(char *svcName, int req, int *new_owner); > int handle_recover_req(char *svcName, int *new_owner); > int handle_start_remote_req(char *svcName, int req); > >diff -urNp rgmanager-2.0.24.orig/src/daemons/main.c rgmanager-2.0.24/src/daemons/main.c >--- rgmanager-2.0.24.orig/src/daemons/main.c 2007-04-09 18:01:55.000000000 +0200 >+++ rgmanager-2.0.24/src/daemons/main.c 2007-06-12 22:33:22.000000000 +0200 >@@ -491,7 +491,9 @@ dispatch_msg(msgctx_t *ctx, int nodeid, > /* Queue request */ > rt_enqueue_request(msg_sm->sm_data.d_svcName, > msg_sm->sm_data.d_action, >- ctx, 0, msg_sm->sm_data.d_svcOwner, 0, 0); >+ ctx, 0, msg_sm->sm_data.d_svcOwner, >+ msg_sm->sm_hdr.gh_arg1, >+ msg_sm->sm_hdr.gh_arg2); > return 0; > > case RG_EVENT: >diff -urNp rgmanager-2.0.24.orig/src/daemons/rg_state.c rgmanager-2.0.24/src/daemons/rg_state.c >--- rgmanager-2.0.24.orig/src/daemons/rg_state.c 2007-04-09 18:01:55.000000000 +0200 >+++ rgmanager-2.0.24/src/daemons/rg_state.c 2007-06-13 20:13:01.000000000 +0200 >@@ -1198,7 +1198,7 @@ relocate_service(char *svcName, int requ > strncpy(msg_relo.sm_data.d_svcName, svcName, > sizeof(msg_relo.sm_data.d_svcName)); > msg_relo.sm_data.d_ret = 0; >- >+ msg_relo.sm_data.d_svcOwner = target; > /* Open a connection to the other node */ > > if (msg_open(MSG_CLUSTER, target, RG_PORT, &ctx, 2)< 0) { >@@ -1478,7 +1478,7 @@ handle_start_req(char *svcName, int req, > */ > if (req == RG_ENABLE) > tolerance = FOD_GOOD; >- >+/* > if (req != RG_RESTART && > req != RG_START_RECOVER && > (node_should_start_safe(my_id(), membership, svcName) < >@@ -1487,7 +1487,7 @@ handle_start_req(char *svcName, int req, > return RG_EFAIL; > } > free_member_list(membership); >- >+*/ > /* Check for dependency. We cannot start unless our > dependency is met */ > if (check_depend_safe(svcName) == 0) >@@ -1596,8 +1596,13 @@ handle_start_remote_req(char *svcName, i > } > free_member_list(membership); > >- if (svc_start(svcName, req) == 0) >- return 0; >+ x = svc_start(svcName, req); >+ >+ if (x == 0) >+ return 0; >+ >+ if (x == RG_ERUN) >+ return RG_ERUN; > > if (svc_stop(svcName, RG_STOP_RECOVER) == 0) > return RG_EFAIL; >@@ -1626,3 +1631,44 @@ handle_recover_req(char *svcName, int *n > > return handle_start_req(svcName, RG_START_RECOVER, new_owner); > } >+ >+int >+handle_fd_start_req(char *svcName, int request, int *new_owner) >+{ >+ cluster_member_list_t *allowed_nodes; >+ int target, me = my_id(); >+ int ret; >+ >+ allowed_nodes = member_list(); >+ >+ while (memb_count(allowed_nodes)) { >+ target = best_target_node(allowed_nodes, -1, >+ svcName, 1); >+ if (target == me) { >+ ret = handle_start_remote_req(svcName, request); >+ } else { >+ ret = relocate_service(svcName, request, target); >+ } >+ >+ switch(ret) { >+ case RG_ESUCCESS: >+ return RG_ESUCCESS; >+ case RG_ERUN: >+ return RG_ERUN; >+ case RG_EFAIL: >+ memb_mark_down(allowed_nodes, target); >+ continue; >+ case RG_EABORT: >+ svc_report_failure(svcName); >+ free_member_list(allowed_nodes); >+ return RG_EFAIL; >+ default: >+ clulog(LOG_ERR, >+ "#6X: Invalid reply [%d] from member %d during" >+ " relocate operation!\n", ret, target); >+ } >+ } >+ >+ free_member_list(allowed_nodes); >+ return RG_EFAIL; >+} >diff -urNp rgmanager-2.0.24.orig/src/daemons/rg_thread.c rgmanager-2.0.24/src/daemons/rg_thread.c >--- rgmanager-2.0.24.orig/src/daemons/rg_thread.c 2007-04-09 18:01:55.000000000 +0200 >+++ rgmanager-2.0.24/src/daemons/rg_thread.c 2007-06-13 20:13:49.000000000 +0200 >@@ -244,8 +244,15 @@ resgroup_thread_main(void *arg) > break; > } > case RG_START: >- error = handle_start_req(myname, req->rr_request, >- &newowner); >+ if (req->rr_arg0) { >+ error = handle_fd_start_req(myname, >+ req->rr_request, >+ &newowner); >+ } else { >+ error = handle_start_req(myname, >+ req->rr_request, >+ &newowner); >+ } > break; > > case RG_RELOCATE: >diff -urNp rgmanager-2.0.24.orig/src/utils/clusvcadm.c rgmanager-2.0.24/src/utils/clusvcadm.c >--- rgmanager-2.0.24.orig/src/utils/clusvcadm.c 2007-04-09 18:01:55.000000000 +0200 >+++ rgmanager-2.0.24/src/utils/clusvcadm.c 2007-06-13 20:15:09.000000000 +0200 >@@ -39,11 +39,14 @@ > > > void >-build_message(SmMessageSt *msgp, int action, char *svcName, int target) >+build_message(SmMessageSt *msgp, int action, char *svcName, int target, >+ int arg1, int arg2) > { > msgp->sm_hdr.gh_magic = GENERIC_HDR_MAGIC; > msgp->sm_hdr.gh_command = RG_ACTION_REQUEST; > msgp->sm_hdr.gh_length = sizeof(*msgp); >+ msgp->sm_hdr.gh_arg1 = arg1; >+ msgp->sm_hdr.gh_arg2 = arg2; > msgp->sm_data.d_action = action; > strncpy(msgp->sm_data.d_svcName, svcName, > sizeof(msgp->sm_data.d_svcName)); >@@ -155,6 +158,8 @@ printf(" %s -v > printf(" %s -d <group> Disable <group>\n", name); > printf(" %s -e <group> Enable <group>\n", > name); >+printf(" %s -e <group> -F Enable <group> according to failover\n" >+ " domain rules\n", name); > printf(" %s -e <group> -m <member> Enable <group>" > " on <member>\n", name); > printf(" %s -r <group> -m <member> Relocate <group> [to <member>]\n", >@@ -230,6 +235,7 @@ main(int argc, char **argv) > SmMessageSt msg; > generic_msg_hdr *h = (generic_msg_hdr *)&msg; > int action = RG_STATUS; >+ int fod = 0; > int node_specified = 0; > int me, svctarget = 0; > char *actionstr = NULL; >@@ -240,7 +246,7 @@ main(int argc, char **argv) > return 1; > } > >- while ((opt = getopt(argc, argv, "lSue:M:d:r:n:m:vR:s:qh?")) != EOF) { >+ while ((opt = getopt(argc, argv, "lSue:M:d:r:n:m:FvR:s:qh?")) != EOF) { > switch (opt) { > case 'l': > return do_lock(); >@@ -257,6 +263,14 @@ main(int argc, char **argv) > action = RG_ENABLE; > svcname = optarg; > break; >+ case 'F': >+ if (node_specified) { >+ fprintf(stderr, >+ "Cannot use '-F' with '-n' or '-m'\n"); >+ return 1; >+ } >+ fod = 1; >+ break; > case 'd': > /* DISABLE */ > actionstr = "disabling"; >@@ -288,6 +302,11 @@ main(int argc, char **argv) > break; > case 'm': /* member ... */ > case 'n': /* node .. same thing */ >+ if (fod) { >+ fprintf(stderr, >+ "Cannot use '-X' with '-n' or '-m'\n"); >+ return 1; >+ } > strncpy(nodename,optarg,sizeof(nodename)); > node_specified = 1; > break; >@@ -351,8 +370,8 @@ main(int argc, char **argv) > */ > //strcpy(nodename,"me"); > } >- >- build_message(&msg, action, svcname, svctarget); >+ >+ build_message(&msg, action, svcname, svctarget, fod, 0); > > if (action != RG_RELOCATE && action != RG_MIGRATE) { > if (!node_specified)
You cannot view the attachment while viewing its details because your browser does not support IFRAMEs.
View the attachment on a separate page
.
View Attachment As Diff
View Attachment As Raw
Actions:
View
|
Diff
Attachments on
bug 211469
: 156908