Login
[x]
Log in using an account from:
Fedora Account System
Red Hat Associate
Red Hat Customer
Or login using a Red Hat Bugzilla account
Forgot Password
Login:
Hide Forgot
Create an Account
Red Hat Bugzilla – Attachment 934080 Details for
Bug 1024492
pcs should handle full cluster config backup/restore
[?]
New
Simple Search
Advanced Search
My Links
Browse
Requests
Reports
Current State
Search
Tabular reports
Graphical reports
Duplicates
Other Reports
User Changes
Plotly Reports
Bug Status
Bug Severity
Non-Defaults
|
Product Dashboard
Help
Page Help!
Bug Writing Guidelines
What's new
Browser Support Policy
5.0.4.rh83 Release notes
FAQ
Guides index
User guide
Web Services
Contact
Legal
This site requires JavaScript to be enabled to function correctly, please enable it.
[patch]
proposed fix
0001-Add-ability-to-backup-and-restore-cluster-configurat.patch (text/plain), 23.81 KB, created by
Tomas Jelinek
on 2014-09-03 12:46:04 UTC
(
hide
)
Description:
proposed fix
Filename:
MIME Type:
Creator:
Tomas Jelinek
Created:
2014-09-03 12:46:04 UTC
Size:
23.81 KB
patch
obsolete
>From 6e534c525ab9f922ffa262ff777eb54a4b40659a Mon Sep 17 00:00:00 2001 >From: Tomas Jelinek <tojeline@redhat.com> >Date: Wed, 3 Sep 2014 14:05:26 +0200 >Subject: [PATCH] Add ability to backup and restore cluster configuration > >--- > pcs/cluster.py | 24 ----- > pcs/config.py | 300 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ > pcs/pcs.8 | 12 ++- > pcs/pcs.py | 6 +- > pcs/status.py | 22 ++++ > pcs/usage.py | 28 +++++- > pcs/utils.py | 47 ++++++++- > pcsd/pcs.rb | 9 ++ > pcsd/pcsd.rb | 1 + > pcsd/remote.rb | 71 +++++++++++++ > pcsd/resource.rb | 2 +- > 11 files changed, 486 insertions(+), 36 deletions(-) > create mode 100644 pcs/config.py > >diff --git a/pcs/cluster.py b/pcs/cluster.py >index 054d5d2..9918fc8 100644 >--- a/pcs/cluster.py >+++ b/pcs/cluster.py >@@ -935,29 +935,6 @@ def cluster_reload(argv): > utils.err(output.rstrip()) > print "Corosync reloaded" > >-def print_config(): >- print "Cluster Name: %s" % utils.getClusterName() >- status.nodes_status(["config"]) >- print "" >- print "" >- print "Resources: " >- utils.pcs_options["--all"] = 1 >- utils.pcs_options["--full"] = 1 >- resource.resource_show([]) >- print "" >- print "Stonith Devices: " >- resource.resource_show([], True) >- print "Fencing Levels: " >- print "" >- stonith.stonith_level_show() >- constraint.location_show([]) >- constraint.order_show([]) >- constraint.colocation_show([]) >- print "" >- del utils.pcs_options["--all"] >- prop.list_property([]) >- cluster_uidgid([], True) >- > # Completely tear down the cluster & remove config files > # Code taken from cluster-clean script in pacemaker > def cluster_destroy(argv): >@@ -1092,4 +1069,3 @@ def cluster_remote_node(argv): > usage.cluster(["remote-node"]) > sys.exit(1) > >- >diff --git a/pcs/config.py b/pcs/config.py >new file mode 100644 >index 0000000..2f3316c >--- /dev/null >+++ b/pcs/config.py >@@ -0,0 +1,300 @@ >+import sys >+import os >+import cStringIO >+import tarfile >+import json >+ >+import settings >+import utils >+import cluster >+import constraint >+import prop >+import resource >+import status >+import stonith >+import usage >+ >+def config_cmd(argv): >+ if len(argv) == 0: >+ config_show(argv) >+ return >+ >+ sub_cmd = argv.pop(0) >+ if sub_cmd == "help": >+ usage.config(argv) >+ elif sub_cmd == "show": >+ config_show(argv) >+ elif sub_cmd == "backup": >+ config_backup(argv) >+ elif sub_cmd == "restore": >+ config_restore(argv) >+ else: >+ usage.config() >+ sys.exit(1) >+ >+def config_show(argv): >+ print "Cluster Name: %s" % utils.getClusterName() >+ status.nodes_status(["config"]) >+ print "" >+ print "" >+ print "Resources: " >+ utils.pcs_options["--all"] = 1 >+ utils.pcs_options["--full"] = 1 >+ resource.resource_show([]) >+ print "" >+ print "Stonith Devices: " >+ resource.resource_show([], True) >+ print "Fencing Levels: " >+ print "" >+ stonith.stonith_level_show() >+ constraint.location_show([]) >+ constraint.order_show([]) >+ constraint.colocation_show([]) >+ print "" >+ del utils.pcs_options["--all"] >+ prop.list_property([]) >+ cluster.cluster_uidgid([], True) >+ >+def config_backup(argv): >+ if len(argv) > 1: >+ usage.config(["backup"]) >+ sys.exit(1) >+ >+ outfile_name = None >+ if argv: >+ outfile_name = argv[0] >+ if not outfile_name.endswith(".tar.bz2"): >+ outfile_name += ".tar.bz2" >+ >+ tar_data = config_backup_local() >+ if outfile_name: >+ ok, message = utils.write_file(outfile_name, tar_data) >+ if not ok: >+ utils.err(message) >+ else: >+ sys.stdout.write(tar_data) >+ >+def config_backup_local(): >+ file_list = config_backup_path_list() >+ tar_data = cStringIO.StringIO() >+ >+ try: >+ tarball = tarfile.open(fileobj=tar_data, mode="w|bz2") >+ config_backup_add_version_to_tarball(tarball) >+ for tar_path, path_info in file_list.items(): >+ if ( >+ not os.path.exists(path_info["path"]) >+ and >+ not path_info["required"] >+ ): >+ continue >+ tarball.add(path_info["path"], tar_path) >+ tarball.close() >+ except (tarfile.TarError, EnvironmentError) as e: >+ utils.err("unable to create tarball: %s" % e) >+ >+ tar = tar_data.getvalue() >+ tar_data.close() >+ return tar >+ >+def config_restore(argv): >+ if len(argv) > 1: >+ usage.config(["restore"]) >+ sys.exit(1) >+ >+ infile_name = infile_obj = None >+ if argv: >+ infile_name = argv[0] >+ if not infile_name: >+ infile_obj = cStringIO.StringIO(sys.stdin.read()) >+ >+ if "--local" in utils.pcs_options: >+ config_restore_local(infile_name, infile_obj) >+ else: >+ config_restore_remote(infile_name, infile_obj) >+ >+def config_restore_remote(infile_name, infile_obj): >+ extracted = { >+ "version.txt": "", >+ "corosync.conf": "", >+ "cluster.conf": "", >+ } >+ try: >+ tarball = tarfile.open(infile_name, "r|*", infile_obj) >+ while True: >+ tar_member_info = tarball.next() >+ if tar_member_info is None: >+ break >+ if tar_member_info.name in extracted: >+ tar_member = tarball.extractfile(tar_member_info) >+ extracted[tar_member_info.name] = tar_member.read() >+ tar_member.close() >+ tarball.close() >+ except (tarfile.TarError, EnvironmentError) as e: >+ utils.err("unable to read the tarball: %s" % e) >+ >+ config_backup_check_version(extracted["version.txt"]) >+ >+ node_list = utils.getNodesFromCorosyncConf( >+ extracted["cluster.conf" if utils.is_rhel6() else "corosync.conf"] >+ ) >+ if not node_list: >+ utils.err("no nodes found in the tarball") >+ >+ for node in node_list: >+ try: >+ retval, output = utils.checkStatus(node) >+ if retval != 0: >+ utils.err("unable to determine status of the node %s" % node) >+ status = json.loads(output) >+ if status["corosync"] or status["pacemaker"] or status["cman"]: >+ utils.err( >+ "Cluster is currently running on node %s. You need to stop " >+ "the cluster in order to restore the configuration." >+ % node >+ ) >+ except (ValueError, NameError): >+ utils.err("unable to determine status of the node %s" % node) >+ >+ if infile_obj: >+ infile_obj.seek(0) >+ tarball_data = infile_obj.read() >+ else: >+ with open(infile_name, "r") as tarball: >+ tarball_data = tarball.read() >+ >+ error_list = [] >+ for node in node_list: >+ retval, error = utils.restoreConfig(node, tarball_data) >+ if retval != 0: >+ error_list.append(error) >+ if error_list: >+ utils.err("unable to restore all nodes\n" + "\n".join(error_list)) >+ >+def config_restore_local(infile_name, infile_obj): >+ if ( >+ status.is_cman_running() >+ or >+ status.is_corosyc_running() >+ or >+ status.is_pacemaker_running() >+ ): >+ utils.err( >+ "Cluster is currently running on this node. You need to stop " >+ "the cluster in order to restore the configuration." >+ ) >+ >+ file_list = config_backup_path_list() >+ tarball_file_list = [] >+ version = None >+ try: >+ tarball = tarfile.open(infile_name, "r|*", infile_obj) >+ while True: >+ tar_member_info = tarball.next() >+ if tar_member_info is None: >+ break >+ if tar_member_info.name == "version.txt": >+ version_data = tarball.extractfile(tar_member_info) >+ version = version_data.read() >+ version_data.close() >+ continue >+ tarball_file_list.append(tar_member_info.name) >+ tarball.close() >+ >+ required_file_list = [ >+ tar_path >+ for tar_path, path_info in file_list.items() >+ if path_info["required"] >+ ] >+ missing = set(required_file_list) - set(tarball_file_list) >+ if missing: >+ utils.err( >+ "unable to restore the cluster, missing files in backup: %s" >+ % ", ".join(missing) >+ ) >+ >+ config_backup_check_version(version) >+ >+ if infile_obj: >+ infile_obj.seek(0) >+ tarball = tarfile.open(infile_name, "r|*", infile_obj) >+ while True: >+ tar_member_info = tarball.next() >+ if tar_member_info is None: >+ break >+ extract_info = None >+ path = tar_member_info.name >+ while path: >+ if path in file_list: >+ extract_info = file_list[path] >+ break >+ path = os.path.dirname(path) >+ if not extract_info: >+ continue >+ tarball.extractall( >+ os.path.dirname(extract_info["path"]), >+ [tar_member_info] >+ ) >+ tarball.close() >+ except (tarfile.TarError, EnvironmentError) as e: >+ utils.err("unable to restore the cluster: %s" % e) >+ >+ try: >+ sig_path = os.path.join(settings.cib_dir, "cib.xml.sig") >+ if os.path.exists(sig_path): >+ os.remove(sig_path) >+ except EnvironmentError as e: >+ utils.err("unable to remove %s: %s" % (sig_path, e)) >+ >+def config_backup_path_list(): >+ file_list = { >+ "cib.xml": { >+ "path": os.path.join(settings.cib_dir, "cib.xml"), >+ "required": True, >+ }, >+ } >+ if utils.is_rhel6(): >+ file_list["cluster.conf"] = { >+ "path": settings.cluster_conf_file, >+ "required": True, >+ } >+ else: >+ file_list["corosync.conf"] = { >+ "path": settings.corosync_conf_file, >+ "required": True, >+ } >+ file_list["uidgid.d"] = { >+ "path": settings.corosync_uidgid_dir.rstrip("/"), >+ "required": False, >+ } >+ return file_list >+ >+def config_backup_check_version(version): >+ try: >+ version_number = int(version) >+ supported_version = config_backup_version() >+ if version_number > supported_version: >+ utils.err( >+ "Unsupported version of the backup, " >+ "supported version is %d, backup version is %d" >+ % (supported_version, version_number) >+ ) >+ if version_number < supported_version: >+ print( >+ "Warning: restoring from the backup version %d, " >+ "current supported version is %s" >+ % (version_number, supported_version) >+ ) >+ except TypeError: >+ utils.err("Cannot determine version of the backup") >+ >+def config_backup_add_version_to_tarball(tarball, version=None): >+ version = version if version is not None else str(config_backup_version()) >+ version_info = tarfile.TarInfo("version.txt") >+ version_info.size = len(version) >+ version_info.type = tarfile.REGTYPE >+ tarball.addfile(version_info, cStringIO.StringIO(version)) >+ >+def config_backup_version(): >+ return 1 >+ >diff --git a/pcs/pcs.8 b/pcs/pcs.8 >index 50c21d8..c3d030c 100644 >--- a/pcs/pcs.8 >+++ b/pcs/pcs.8 >@@ -44,7 +44,7 @@ status > View cluster status > .TP > config >-Print full cluster configuration >+View and manage cluster configuration > .SS "resource" > .TP > show [resource id] [\fB\-\-full\fR] [\fB\-\-groups\fR] >@@ -473,6 +473,16 @@ Show the current status of pcsd on the specified nodes > .TP > xml > View xml version of status (output from crm_mon \fB\-r\fR \fB\-1\fR \fB\-X\fR) >+.SS "config" >+.TP >+[show] >+View full cluster configuration >+.TP >+backup [filename] >+Creates the tarball containing the cluster configuration files. If filename is not specified the standard output will be used. >+.TP >+restore [\fB\-\-local\fR] [filename] >+Restores the cluster configuration files on all nodes from the backup. If filename is not specified the standard input will be used. If \fB\-\-local\fR is specified only the files on the current node will be restored. > .SH EXAMPLES > .TP > Show all resources >diff --git a/pcs/pcs.py b/pcs/pcs.py >index 229c050..505b023 100755 >--- a/pcs/pcs.py >+++ b/pcs/pcs.py >@@ -11,6 +11,7 @@ import acl > import utils > import status > import settings >+import config > > usefile = False > filename = "" >@@ -126,10 +127,7 @@ def main(argv): > elif (command == "status"): > status.status_cmd(argv) > elif (command == "config"): >- if "--help" in utils.pcs_options or "-h" in utils.pcs_options or (len(argv) > 0 and argv[0] == "help"): >- usage.main() >- else: >- cluster.print_config() >+ config.config_cmd(argv) > else: > usage.main() > sys.exit(1) >diff --git a/pcs/status.py b/pcs/status.py >index 994cb3c..af1a527 100644 >--- a/pcs/status.py >+++ b/pcs/status.py >@@ -186,3 +186,25 @@ def xml_status(): > if (retval != 0): > utils.err("running crm_mon, is pacemaker running?") > print output >+ >+def is_cman_running(): >+ if utils.is_systemctl(): >+ output, retval = utils.run(["systemctl", "status", "cman.service"]) >+ else: >+ output, retval = utils.run(["service", "cman", "status"]) >+ return retval == 0 >+ >+def is_corosyc_running(): >+ if utils.is_systemctl(): >+ output, retval = utils.run(["systemctl", "status", "corosync.service"]) >+ else: >+ output, retval = utils.run(["service", "corosync", "status"]) >+ return retval == 0 >+ >+def is_pacemaker_running(): >+ if utils.is_systemctl(): >+ output, retval = utils.run(["systemctl", "status", "pacemaker.service"]) >+ else: >+ output, retval = utils.run(["service", "pacemaker", "status"]) >+ return retval == 0 >+ >diff --git a/pcs/usage.py b/pcs/usage.py >index 1f28f99..2e8e62a 100644 >--- a/pcs/usage.py >+++ b/pcs/usage.py >@@ -12,6 +12,7 @@ def full_usage(): > out += strip_extras(constraint([],False)) > out += strip_extras(acl([],False)) > out += strip_extras(status([],False)) >+ out += strip_extras(config([],False)) > print out.strip() > print "Examples:\n" + examples.replace(" \ ","") > >@@ -128,6 +129,7 @@ def sub_generate_bash_completion(): > tree["acl"] = generate_tree(acl([],False)) > tree["constraint"] = generate_tree(constraint([],False)) > tree["status"] = generate_tree(status([],False)) >+ tree["config"] = generate_tree(config([],False)) > print """ > _pcs() > { >@@ -201,7 +203,7 @@ Commands: > property Set pacemaker properties > acl Set pacemaker access control lists > status View cluster status >- config Print full cluster configuration >+ config View and manage cluster configuration > """ > # Advanced usage to possibly add later > # --corosync_conf=<corosync file> Specify alternative corosync.conf file >@@ -962,3 +964,27 @@ Commands: > print sub_usage(args, output) > else: > return output >+ >+def config(args=[], pout=True): >+ output = """ >+Usage: pcs config [commands]... >+View and manage cluster configuration >+ >+Commands: >+ [show] >+ View full cluster configuration >+ >+ backup [filename] >+ Creates the tarball containing the cluster configuration files. >+ If filename is not specified the standard output will be used. >+ >+ restore [--local] [filename] >+ Restores the cluster configuration files on all nodes from the backup. >+ If filename is not specified the standard input will be used. >+ If --local is specified only the files on the current node will >+ be restored. >+""" >+ if pout: >+ print sub_usage(args, output) >+ else: >+ return output >diff --git a/pcs/utils.py b/pcs/utils.py >index c8f511a..d85aba1 100644 >--- a/pcs/utils.py >+++ b/pcs/utils.py >@@ -47,8 +47,7 @@ def checkAndUpgradeCIB(major,minor,rev): > > # Check status of node > def checkStatus(node): >- out = sendHTTPRequest(node, 'remote/status', None, False) >- return out >+ return sendHTTPRequest(node, 'remote/status', None, False, False) > > # Check and see if we're authorized (faster than a status check) > def checkAuthorization(node): >@@ -215,6 +214,10 @@ def disableCluster(node): > def destroyCluster(node): > return sendHTTPRequest(node, 'remote/cluster_destroy') > >+def restoreConfig(node, tarball_data): >+ data = urllib.urlencode({"tarball": tarball_data}) >+ return sendHTTPRequest(node, "remote/config_restore", data, False, True) >+ > def canAddNodeToCluster(node): > retval, output = sendHTTPRequest(node, 'remote/node_available', [], False, False) > if retval == 0: >@@ -304,10 +307,13 @@ def sendHTTPRequest(host, request, data = None, printResult = True, printSuccess > print "Unable to connect to %s (%s)" % (host, e.reason) > return (2,"Unable to connect to %s (%s)" % (host, e.reason)) > >-def getNodesFromCorosyncConf(): >+def getNodesFromCorosyncConf(conf_text=None): > if is_rhel6(): > try: >- dom = parse(settings.cluster_conf_file) >+ dom = ( >+ parse(settings.cluster_conf_file) if conf_text is None >+ else parseString(conf_text) >+ ) > except IOError: > err("Unable to open cluster.conf file to get nodes list") > return [ >@@ -316,7 +322,8 @@ def getNodesFromCorosyncConf(): > ] > > nodes = [] >- lines = getCorosyncConf().strip().split('\n') >+ corosync_conf = getCorosyncConf() if conf_text is None else conf_text >+ lines = corosync_conf.strip().split('\n') > preg = re.compile(r'.*ring0_addr: (.*)') > for line in lines: > match = preg.match(line) >@@ -891,6 +898,20 @@ def dom_get_element_with_id(dom, tag_name, element_id): > return elem > return None > >+def dom_get_children_by_tag_name(dom_el, tag_name): >+ return [ >+ node >+ for node in dom_el.childNodes >+ if node.nodeType == xml.dom.minidom.Node.ELEMENT_NODE >+ and node.tagName == tag_name >+ ] >+ >+def dom_get_child_by_tag_name(dom_el, tag_name): >+ children = dom_get_children_by_tag_name(dom_el, tag_name) >+ if children: >+ return children[0] >+ return None >+ > # Check if resoure is started (or stopped) for 'wait' seconds > def is_resource_started(resource,wait,stopped=False): > expire_time = int(time.time()) + wait >@@ -1508,3 +1529,19 @@ def disableServices(): > else: > run(["chkconfig", "corosync", "off"]) > run(["chkconfig", "pacemaker", "off"]) >+ >+def write_file(path, data): >+ if os.path.exists(path): >+ if not "--force" in pcs_options: >+ return False, "'%s' already exists, use --force to overwrite" % path >+ else: >+ try: >+ os.remove(path) >+ except EnvironmentError as e: >+ return False, "unable to remove '%s': %s" % (path, e) >+ try: >+ with open(path, "w") as outfile: >+ outfile.write(data) >+ except EnvironmentError as e: >+ return False, "unable to write to '%s': %s" % (path, e) >+ return True, "" >diff --git a/pcsd/pcs.rb b/pcsd/pcs.rb >index 11397fe..7cc9b2e 100644 >--- a/pcsd/pcs.rb >+++ b/pcsd/pcs.rb >@@ -391,6 +391,15 @@ def pacemaker_enabled?() > return $?.success? > end > >+def cman_running?() >+ if not ISRHEL6 >+ `systemctl status cman.service` >+ else >+ `service cman status` >+ end >+ return $?.success? >+end >+ > def pcsd_enabled?() > if not ISRHEL6 > `systemctl is-enabled pcsd.service` >diff --git a/pcsd/pcsd.rb b/pcsd/pcsd.rb >index b9b76a7..756ab8e 100644 >--- a/pcsd/pcsd.rb >+++ b/pcsd/pcsd.rb >@@ -77,6 +77,7 @@ configure do > else > COROSYNC_CMAPCTL = "/usr/sbin/corosync-cmapctl" > end >+ CMAN_TOOL = "/usr/sbin/cman_tool" > COROSYNC_CONF = "/etc/corosync/corosync.conf" > CLUSTER_CONF = "/etc/cluster/cluster.conf" > CIBADMIN = "/usr/sbin/cibadmin" >diff --git a/pcsd/remote.rb b/pcsd/remote.rb >index f4257c4..8fcf981 100644 >--- a/pcsd/remote.rb >+++ b/pcsd/remote.rb >@@ -4,6 +4,7 @@ require 'uri' > require 'pcs.rb' > require 'resource.rb' > require 'open3' >+require 'open4' > > # Commands for remote access > def remote(params,request) >@@ -42,6 +43,10 @@ def remote(params,request) > return cluster_start(params) > when "cluster_stop" > return cluster_stop(params) >+ when "config_backup" >+ return config_backup(params) >+ when "config_restore" >+ return config_restore(params) > when "node_restart" > return node_restart(params) > when "node_standby" >@@ -143,6 +148,54 @@ def cluster_stop(params) > end > end > >+def config_backup(params) >+ if params[:name] >+ code, response = send_request_with_token( >+ params[:name], 'config_backup', true >+ ) >+ else >+ $logger.info "Backup node configuration" >+ stdout, stderr, retval = run_cmd(PCS, "config", "backup") >+ if retval == 0 >+ $logger.info "Backup successful" >+ return [200, stdout] >+ end >+ $logger.info "Error during backup: #{stderr.join(' ').strip()}" >+ return [400, "Unable to backup node: #{stderr.join(' ')}"] >+ end >+end >+ >+def config_restore(params) >+ if params[:name] >+ code, response = send_request_with_token( >+ params[:name], 'config_restore', true, {:tarball => params[:tarball]} >+ ) >+ else >+ $logger.info "Restore node configuration" >+ if params[:tarball] != nil and params[:tarball] != "" >+ out = "" >+ errout = "" >+ status = Open4::popen4(PCS, "config", "restore", "--local") { |pid, stdin, stdout, stderr| >+ stdin.print(params[:tarball]) >+ stdin.close() >+ out = stdout.readlines() >+ errout = stderr.readlines() >+ } >+ retval = status.exitstatus >+ if retval == 0 >+ $logger.info "Restore successful" >+ return "Succeeded" >+ else >+ $logger.info "Error during restore: #{errout.join(' ').strip()}" >+ return errout.length > 0 ? errout.join(' ').strip() : "Error" >+ end >+ else >+ $logger.info "Error: Invalid tarball" >+ return "Error: Invalid tarball" >+ end >+ end >+end >+ > def node_restart(params) > if params[:name] > code, response = send_request_with_token(params[:name], 'node_restart', true) >@@ -372,6 +425,7 @@ def node_status(params) > corosync_enabled = corosync_enabled? > pacemaker_status = pacemaker_running? > pacemaker_enabled = pacemaker_enabled? >+ cman_status = cman_running? > pcsd_enabled = pcsd_enabled? > > corosync_online = [] >@@ -431,6 +485,7 @@ def node_status(params) > cluster_settings = getAllSettings() > node_attributes = get_node_attributes()[$cur_node_name] > status = {"uptime" => uptime, "corosync" => corosync_status, "pacemaker" => pacemaker_status, >+ "cman" => cman_status, > "corosync_enabled" => corosync_enabled, "pacemaker_enabled" => pacemaker_enabled, > "pcsd_enabled" => pcsd_enabled, > "corosync_online" => corosync_online, "corosync_offline" => corosync_offline, >@@ -953,6 +1008,22 @@ def wizard_submit(params) > end > > def get_local_node_id >+ if ISRHEL6 >+ out, errout, retval = run_cmd(COROSYNC_CMAPCTL, "cluster.cman") >+ if retval != 0 >+ return "" >+ end >+ match = /cluster\.nodename=(.*)/.match(out.join("\n")) >+ if not match >+ return "" >+ end >+ local_node_name = match[1] >+ out, errout, retval = run_cmd(CMAN_TOOL, "nodes", "-F", "id", "-n", local_node_name) >+ if retval != 0 >+ return "" >+ end >+ return out[0].strip() >+ end > out, errout, retval = run_cmd(COROSYNC_CMAPCTL, "-g", "runtime.votequorum.this_node_id") > if retval != 0 > return "" >diff --git a/pcsd/resource.rb b/pcsd/resource.rb >index 8fcfa6e..084f1e7 100644 >--- a/pcsd/resource.rb >+++ b/pcsd/resource.rb >@@ -52,7 +52,7 @@ def getResourcesGroups(get_fence_devices = false, get_all_options = false) > group_list.push(e.attributes["id"]) > end > >- resource_list.sort_by!{|a| (a.group ? "1" : "0").to_s + a.group.to_s + "-" + a.id} >+ resource_list = resource_list.sort_by{|a| (a.group ? "1" : "0").to_s + a.group.to_s + "-" + a.id} > > if get_all_options > stdout, stderror, retval = run_cmd("cibadmin", "-Q", "-l") >-- >1.9.1 >
You cannot view the attachment while viewing its details because your browser does not support IFRAMEs.
View the attachment on a separate page
.
View Attachment As Diff
View Attachment As Raw
Actions:
View
|
Diff
Attachments on
bug 1024492
: 934080