Login
[x]
Log in using an account from:
Fedora Account System
Red Hat Associate
Red Hat Customer
Or login using a Red Hat Bugzilla account
Forgot Password
Login:
Hide Forgot
Create an Account
Red Hat Bugzilla – Attachment 935688 Details for
Bug 1072415
pcs GUI should support creating dual-ring clusters (RRP)
[?]
New
Simple Search
Advanced Search
My Links
Browse
Requests
Reports
Current State
Search
Tabular reports
Graphical reports
Duplicates
Other Reports
User Changes
Plotly Reports
Bug Status
Bug Severity
Non-Defaults
|
Product Dashboard
Help
Page Help!
Bug Writing Guidelines
What's new
Browser Support Policy
5.0.4.rh83 Release notes
FAQ
Guides index
User guide
Web Services
Contact
Legal
This site requires JavaScript to be enabled to function correctly, please enable it.
[patch]
proposed fix 1/2
0001-Add-RRP-support-in-adding-and-removing-nodes.patch (text/plain), 12.90 KB, created by
Tomas Jelinek
on 2014-09-09 13:10:03 UTC
(
hide
)
Description:
proposed fix 1/2
Filename:
MIME Type:
Creator:
Tomas Jelinek
Created:
2014-09-09 13:10:03 UTC
Size:
12.90 KB
patch
obsolete
>From 8a67fd4e56bb5b9e40930294b8f80d717f037b5b Mon Sep 17 00:00:00 2001 >From: Tomas Jelinek <tojeline@redhat.com> >Date: Mon, 8 Sep 2014 16:11:45 +0200 >Subject: [PATCH 1/2] Add RRP support in adding and removing nodes > >--- > pcs/cluster.py | 54 ++++++++++++++++++++++--------- > pcs/pcs.8 | 4 +-- > pcs/usage.py | 6 ++-- > pcs/utils.py | 100 +++++++++++++++++++++++++++++++++++++++------------------ > pcsd/remote.rb | 6 +++- > 5 files changed, 119 insertions(+), 51 deletions(-) > >diff --git a/pcs/cluster.py b/pcs/cluster.py >index 9c3366b..d174da6 100644 >--- a/pcs/cluster.py >+++ b/pcs/cluster.py >@@ -721,43 +721,67 @@ def cluster_node(argv): > sys.exit(1) > > node = argv[1] >- status,output = utils.checkAuthorization(node) >+ if "," in node: >+ node0 = node.split(",")[0] >+ node1 = node.split(",")[1] >+ else: >+ node0 = node >+ node1 = None >+ >+ status,output = utils.checkAuthorization(node0) > if status == 2: >- utils.err("pcsd is not running on %s" % node) >+ utils.err("pcsd is not running on %s" % node0) > elif status == 3: >- utils.err("%s is not yet authenticated (try pcs cluster auth %s)" % (node, node)) >+ utils.err( >+ "%s is not yet authenticated (try pcs cluster auth %s)" >+ % (node0, node0) >+ ) > > if add_node == True: >+ if node1 is None and utils.need_ring1_address(utils.getCorosyncConf()): >+ utils.err( >+ "cluster is configured for RRP, " >+ "you have to specify ring 1 address for the node" >+ ) >+ elif ( >+ node1 is not None >+ and >+ not utils.need_ring1_address(utils.getCorosyncConf()) >+ ): >+ utils.err( >+ "cluster is not configured for RRP, " >+ "you must not specify ring 1 address for the node" >+ ) > corosync_conf = None >- (canAdd, error) = utils.canAddNodeToCluster(node) >+ (canAdd, error) = utils.canAddNodeToCluster(node0) > if not canAdd: >- utils.err("Unable to add '%s' to cluster: %s" % (node,error)) >+ utils.err("Unable to add '%s' to cluster: %s" % (node0, error)) > > for my_node in utils.getNodesFromCorosyncConf(): >- retval, output = utils.addLocalNode(my_node,node) >+ retval, output = utils.addLocalNode(my_node, node0, node1) > if retval != 0: >- print >> sys.stderr, "Error: unable to add %s on %s - %s" % (node,my_node,output.strip()) >+ print >> sys.stderr, "Error: unable to add %s on %s - %s" % (node0, my_node, output.strip()) > else: > print "%s: Corosync updated" % my_node > corosync_conf = output > if corosync_conf != None: >- utils.setCorosyncConfig(node, corosync_conf) >+ utils.setCorosyncConfig(node0, corosync_conf) > if "--enable" in utils.pcs_options: >- utils.enableCluster(node) >+ utils.enableCluster(node0) > if "--start" in utils.pcs_options: >- utils.startCluster(node) >+ utils.startCluster(node0) > else: > utils.err("Unable to update any nodes") > else: > nodesRemoved = False > c_nodes = utils.getNodesFromCorosyncConf() >- destroy_cluster([node]) >+ destroy_cluster([node0]) > for my_node in c_nodes: >- if my_node == node: >+ if my_node == node0: > continue >- retval, output = utils.removeLocalNode(my_node,node) >+ retval, output = utils.removeLocalNode(my_node, node0) > if retval != 0: >- print >> sys.stderr, "Error: unable to remove %s on %s - %s" % (node,my_node,output.strip()) >+ print >> sys.stderr, "Error: unable to remove %s on %s - %s" % (node0,my_node,output.strip()) > else: > if output[0] == 0: > print "%s: Corosync updated" % my_node >@@ -767,7 +791,7 @@ def cluster_node(argv): > if nodesRemoved == False: > utils.err("Unable to update any nodes") > >- output, retval = utils.run(["crm_node", "--force","-R", node]) >+ output, retval = utils.run(["crm_node", "--force", "-R", node0]) > > def cluster_localnode(argv): > if len(argv) != 2: >diff --git a/pcs/pcs.8 b/pcs/pcs.8 >index ac09816..1991cf5 100644 >--- a/pcs/pcs.8 >+++ b/pcs/pcs.8 >@@ -235,8 +235,8 @@ Rollback the CIB to specified revision. Use the cib-revisions command to get a > edit > Edit the cib in the editor specified by the $EDITOR environment variable and push out any changes upon saving > .TP >-node add <node> [\fB\-\-start\fR] [\fB\-\-enable\fR] >-Add the node to corosync.conf and corosync on all nodes in the cluster and sync the new corosync.conf to the new node. If \fB\-\-start\fR is specified also start corosync/pacemaker on the new node, if \fB\-\-enable\fR is specified enable corosync/pacemaker on new node >+node add <node[,node\-altaddr]> [\fB\-\-start\fR] [\fB\-\-enable\fR] >+Add the node to corosync.conf and corosync on all nodes in the cluster and sync the new corosync.conf to the new node. If \fB\-\-start\fR is specified also start corosync/pacemaker on the new node, if \fB\-\-enable\fR is specified enable corosync/pacemaker on new node. When using Redundant Ring Protocol (RRP) with udpu transport, specify the ring 0 address first followed by a ',' and then the ring 1 address. > .TP > node remove <node> > Shutdown specified node and remove it from pacemaker and corosync on all other nodes in the cluster >diff --git a/pcs/usage.py b/pcs/usage.py >index 2e8e62a..e3ccace 100644 >--- a/pcs/usage.py >+++ b/pcs/usage.py >@@ -582,11 +582,13 @@ Commands: > Edit the cib in the editor specified by the $EDITOR environment > variable and push out any changes upon saving > >- node add <node> [--start] [--enable] >+ node add <node[,node-altaddr]> [--start] [--enable] > Add the node to corosync.conf and corosync on all nodes in the cluster > and sync the new corosync.conf to the new node. If --start is specified > also start corosync/pacemaker on the new node, if --enable is specified >- enable corosync/pacemaker on new node >+ enable corosync/pacemaker on new node. >+ When using Redundant Ring Protocol (RRP) with udpu transport, specify >+ the ring 0 address first followed by a ',' and then the ring 1 address. > > node remove <node> > Shutdown specified node and remove it from pacemaker and corosync on >diff --git a/pcs/utils.py b/pcs/utils.py >index d85aba1..7df43f8 100644 >--- a/pcs/utils.py >+++ b/pcs/utils.py >@@ -234,8 +234,11 @@ def canAddNodeToCluster(node): > > return (False,"error checking node availability") > >-def addLocalNode(node,node_to_add): >- data = urllib.urlencode({'new_nodename':node_to_add}) >+def addLocalNode(node, node_to_add, ring1_addr=None): >+ options = {'new_nodename': node_to_add} >+ if ring1_addr: >+ options['new_ring1addr'] = ring1_addr >+ data = urllib.urlencode(options) > retval, output = sendHTTPRequest(node, 'remote/add_node', data, False, False) > if retval == 0: > try: >@@ -418,15 +421,21 @@ def getCorosyncActiveNodes(): > # Add node specified to corosync.conf and reload corosync.conf (if running) > def addNodeToCorosync(node): > # Before adding, make sure node isn't already in corosync.conf >+ if "," in node: >+ node0 = node.split(",")[0] >+ node1 = node.split(",")[1] >+ else: >+ node0 = node >+ node1 = None > used_node_ids = [] > num_nodes_in_conf = 0 > for c_node in getNodesFromCorosyncConf(): >- if c_node == node: >+ if (c_node == node0) or (c_node == node1): > err("node already exists in corosync.conf") > num_nodes_in_conf = num_nodes_in_conf + 1 > if "--corosync_conf" not in pcs_options: > for c_node in getCorosyncActiveNodes(): >- if c_node == node: >+ if (c_node == node0) or (c_node == node1): > err("Node already exists in running corosync") > corosync_conf = getCorosyncConf() > new_nodeid = getNextNodeID(corosync_conf) >@@ -446,11 +455,11 @@ def addNodeToCorosync(node): > count += 1 > new_corosync_conf = corosync_conf[:count] > new_corosync_conf += " node {\n" >- if "," in node: >- new_corosync_conf += " ring0_addr: %s\n" % (node.split(",")[0]) >- new_corosync_conf += " ring1_addr: %s\n" % (node.split(",")[1]) >+ if node1 is not None: >+ new_corosync_conf += " ring0_addr: %s\n" % (node0) >+ new_corosync_conf += " ring1_addr: %s\n" % (node1) > else: >- new_corosync_conf += " ring0_addr: %s\n" % (node) >+ new_corosync_conf += " ring0_addr: %s\n" % (node0) > new_corosync_conf += " nodeid: %d\n" % (new_nodeid) > new_corosync_conf += " }\n" > new_corosync_conf += corosync_conf[count:] >@@ -490,19 +499,16 @@ def addNodeToClusterConf(node): > > # TODO: Need to make this smarter about parsing files not generated by pcs > def removeNodeFromCorosync(node): >- rrp = False >- if "," in node: >- rrp = True >- > removed_node = False > node_found = False > num_nodes_in_conf = 0 > >- if rrp: >+ if "," in node: > node0 = node.split(",")[0] > node1 = node.split(",")[1] > else: > node0 = node >+ node1 = None > > for c_node in getNodesFromCorosyncConf(): > if c_node == node0: >@@ -512,25 +518,37 @@ def removeNodeFromCorosync(node): > if not node_found: > return False > >- corosync_conf = getCorosyncConf().split("\n") >- for x in range(len(corosync_conf)): >- if corosync_conf[x].find("node {") != -1: >- >- match = False >- if (rrp and corosync_conf[x+1].find("ring0_addr: "+node0 ) != -1 and corosync_conf[x+2].find("ring1_addr: "+node1 ) != -1) or (not rrp and corosync_conf[x+1].find("ring0_addr: "+node0) != -1): >- match = True >- >- if match: >- if rrp: >- new_corosync_conf = "\n".join(corosync_conf[0:x] + corosync_conf[x+5:]) >- else: >- new_corosync_conf = "\n".join(corosync_conf[0:x] + corosync_conf[x+4:]) >- >- if num_nodes_in_conf == 3: >- new_corosync_conf = addQuorumOption(new_corosync_conf,("two_node","1")) >- setCorosyncConf(new_corosync_conf) >- reloadCorosync() >+ new_corosync_conf_lines = [] >+ in_node = False >+ node_match = False >+ node_buffer = [] >+ for line in getCorosyncConf().split("\n"): >+ if in_node: >+ node_buffer.append(line) >+ if ( >+ ("ring0_addr: " + node0 in line) >+ or >+ (node1 is not None and "ring0_addr: " + node1 in line) >+ ): >+ node_match = True > removed_node = True >+ if "}" in line: >+ if not node_match: >+ new_corosync_conf_lines.extend(node_buffer) >+ node_buffer = [] >+ node_match = False >+ elif "node {" in line: >+ node_buffer.append(line) >+ in_node = True >+ else: >+ new_corosync_conf_lines.append(line) >+ new_corosync_conf = "\n".join(new_corosync_conf_lines) + "\n" >+ >+ if removed_node: >+ if num_nodes_in_conf == 3: >+ new_corosync_conf = addQuorumOption(new_corosync_conf,("two_node","1")) >+ setCorosyncConf(new_corosync_conf) >+ reloadCorosync() > > return removed_node > >@@ -624,6 +642,26 @@ def getNextNodeID(corosync_conf): > > return highest + 1 > >+def need_ring1_address(corosync_conf): >+ line_list = corosync_conf.split("\n") >+ in_totem = False >+ udpu_transport = False >+ rrp = False >+ for line in line_list: >+ line = line.strip() >+ if in_totem: >+ if ":" in line: >+ name, value = map(lambda x: x.strip(), line.split(":")) >+ if name == "transport" and value == "udpu": >+ udpu_transport = True >+ if name == "rrp_mode" and value in ["active", "passive"]: >+ rrp = True >+ if "}" in line: >+ in_totem = False >+ if line.startswith("totem {"): >+ in_totem = True >+ return udpu_transport and rrp >+ > # Restore default behavior before starting subprocesses > def subprocess_setup(): > signal.signal(signal.SIGPIPE, signal.SIG_DFL) >diff --git a/pcsd/remote.rb b/pcsd/remote.rb >index 8fcf981..3e7ef01 100644 >--- a/pcsd/remote.rb >+++ b/pcsd/remote.rb >@@ -333,7 +333,11 @@ def remote_add_node(params,all = false) > end > > if params[:new_nodename] != nil >- retval, output = add_node(params[:new_nodename],all,auto_start) >+ node = params[:new_nodename] >+ if params[:new_ring1addr] != nil >+ node += ',' + params[:new_ring1addr] >+ end >+ retval, output = add_node(node, all, auto_start) > end > > if retval == 0 >-- >1.9.1 >
You cannot view the attachment while viewing its details because your browser does not support IFRAMEs.
View the attachment on a separate page
.
View Attachment As Diff
View Attachment As Raw
Actions:
View
|
Diff
Attachments on
bug 1072415
: 935688 |
935689
|
940740