+++ This bug was initially created as a clone of Bug #1268822 +++ Description of problem: the number of bricks remain one for n number of bricks in the coldtype. Version-Release number of selected component (if applicable): How reproducible: 100% Steps to Reproduce: 1.create a gluster tier volume with more than one brick in cold type 2.issue gluster v info --xml 3. Actual results: <coldBricks> <coldBrickType>Replicate</coldBrickType> <numberOfBricks>1 x 2 = 2</numberOfBricks> <brick uuid="149ac603-8078-41c5-8f71-7373f2a3016f">10.70.42.203:/data/gluster/tier/b1_1<name>10.70.42.203:/data/gluster/tier/b1_1</name><hostUuid>149ac603-8078-41c5-8f71-7373f2a3016f</hostUuid></brick> <brick uuid="149ac603-8078-41c5-8f71-7373f2a3016f">10.70.42.203:/data/gluster/tier/b1_2<name>10.70.42.203:/data/gluster/tier/b1_2</name><hostUuid>149ac603-8078-41c5-8f71-7373f2a3016f</hostUuid></brick> <brick uuid="149ac603-8078-41c5-8f71-7373f2a3016f">10.70.42.203:/data/gluster/tier/b2_1<name>10.70.42.203:/data/gluster/tier/b2_1</name><hostUuid>149ac603-8078-41c5-8f71-7373f2a3016f</hostUuid></brick> <brick uuid="149ac603-8078-41c5-8f71-7373f2a3016f">10.70.42.203:/data/gluster/tier/b2_2<name>10.70.42.203:/data/gluster/tier/b2_2</name><hostUuid>149ac603-8078-41c5-8f71-7373f2a3016f</hostUuid></brick> <brick uuid="149ac603-8078-41c5-8f71-7373f2a3016f">10.70.42.203:/data/gluster/tier/b3_1<name>10.70.42.203:/data/gluster/tier/b3_1</name><hostUuid>149ac603-8078-41c5-8f71-7373f2a3016f</hostUuid></brick> <brick uuid="149ac603-8078-41c5-8f71-7373f2a3016f">10.70.42.203:/data/gluster/tier/b3_2<name>10.70.42.203:/data/gluster/tier/b3_2</name><hostUuid>149ac603-8078-41c5-8f71-7373f2a3016f</hostUuid></brick> </coldBricks> Expected results: <coldBricks> <coldBrickType>Distributed-Replicate</coldBrickType> <numberOfBricks>3 x 2 = 6</numberOfBricks> <brick uuid="149ac603-8078-41c5-8f71-7373f2a3016f">10.70.42.203:/data/gluster/tier/b1_1<name>10.70.42.203:/data/gluster/tier/b1_1</name><hostUuid>149ac603-8078-41c5-8f71-7373f2a3016f</hostUuid></brick> <brick uuid="149ac603-8078-41c5-8f71-7373f2a3016f">10.70.42.203:/data/gluster/tier/b1_2<name>10.70.42.203:/data/gluster/tier/b1_2</name><hostUuid>149ac603-8078-41c5-8f71-7373f2a3016f</hostUuid></brick> <brick uuid="149ac603-8078-41c5-8f71-7373f2a3016f">10.70.42.203:/data/gluster/tier/b2_1<name>10.70.42.203:/data/gluster/tier/b2_1</name><hostUuid>149ac603-8078-41c5-8f71-7373f2a3016f</hostUuid></brick> <brick uuid="149ac603-8078-41c5-8f71-7373f2a3016f">10.70.42.203:/data/gluster/tier/b2_2<name>10.70.42.203:/data/gluster/tier/b2_2</name><hostUuid>149ac603-8078-41c5-8f71-7373f2a3016f</hostUuid></brick> <brick uuid="149ac603-8078-41c5-8f71-7373f2a3016f">10.70.42.203:/data/gluster/tier/b3_1<name>10.70.42.203:/data/gluster/tier/b3_1</name><hostUuid>149ac603-8078-41c5-8f71-7373f2a3016f</hostUuid></brick> <brick uuid="149ac603-8078-41c5-8f71-7373f2a3016f">10.70.42.203:/data/gluster/tier/b3_2<name>10.70.42.203:/data/gluster/tier/b3_2</name><hostUuid>149ac603-8078-41c5-8f71-7373f2a3016f</hostUuid></brick> </coldBricks> Additional info:
Working now: [root@zod ~]# rpm -qa|grep gluster glusterfs-libs-3.7.5-5.el7rhgs.x86_64 glusterfs-fuse-3.7.5-5.el7rhgs.x86_64 glusterfs-3.7.5-5.el7rhgs.x86_64 glusterfs-server-3.7.5-5.el7rhgs.x86_64 glusterfs-client-xlators-3.7.5-5.el7rhgs.x86_64 glusterfs-cli-3.7.5-5.el7rhgs.x86_64 glusterfs-api-3.7.5-5.el7rhgs.x86_64 glusterfs-debuginfo-3.7.5-5.el7rhgs.x86_64 [root@zod ~]# gluster v status quota_one --xml <?xml version="1.0" encoding="UTF-8" standalone="yes"?> <cliOutput> <opRet>0</opRet> <opErrno>0</opErrno> <opErrstr/> <volStatus> <volumes> <volume> <volName>quota_one</volName> <nodeCount>14</nodeCount> <hotBricks> <node> <hostname>yarrow</hostname> <path>/dummy/brick101/quota_one_hot</path> <peerid>236f7068-8b99-4aa0-a0b5-40b76146cdf4</peerid> <status>1</status> <port>49185</port> <ports> <tcp>49185</tcp> <rdma>N/A</rdma> </ports> <pid>18811</pid> </node> <node> <hostname>zod</hostname> <path>/dummy/brick101/quota_one_hot</path> <peerid>ad002db4-bdc0-43e3-aae7-c209012140b0</peerid> <status>1</status> <port>49185</port> <ports> <tcp>49185</tcp> <rdma>N/A</rdma> </ports> <pid>20257</pid> </node> <node> <hostname>yarrow</hostname> <path>/dummy/brick100/quota_one_hot</path> <peerid>236f7068-8b99-4aa0-a0b5-40b76146cdf4</peerid> <status>1</status> <port>49184</port> <ports> <tcp>49184</tcp> <rdma>N/A</rdma> </ports> <pid>18854</pid> </node> <node> <hostname>zod</hostname> <path>/dummy/brick100/quota_one_hot</path> <peerid>ad002db4-bdc0-43e3-aae7-c209012140b0</peerid> <status>1</status> <port>49184</port> <ports> <tcp>49184</tcp> <rdma>N/A</rdma> </ports> <pid>20275</pid> </node> </hotBricks> <coldBricks> <node> <hostname>zod</hostname> <path>/rhs/brick1/quota_one</path> <peerid>ad002db4-bdc0-43e3-aae7-c209012140b0</peerid> <status>1</status> <port>49182</port> <ports> <tcp>49182</tcp> <rdma>N/A</rdma> </ports> <pid>20293</pid> </node> <node> <hostname>yarrow</hostname> <path>/rhs/brick1/quota_one</path> <peerid>236f7068-8b99-4aa0-a0b5-40b76146cdf4</peerid> <status>1</status> <port>49182</port> <ports> <tcp>49182</tcp> <rdma>N/A</rdma> </ports> <pid>18883</pid> </node> <node> <hostname>zod</hostname> <path>/rhs/brick2/quota_one</path> <peerid>ad002db4-bdc0-43e3-aae7-c209012140b0</peerid> <status>1</status> <port>49183</port> <ports> <tcp>49183</tcp> <rdma>N/A</rdma> </ports> <pid>20311</pid> </node> <node> <hostname>yarrow</hostname> <path>/rhs/brick2/quota_one</path> <peerid>236f7068-8b99-4aa0-a0b5-40b76146cdf4</peerid> <status>1</status> <port>49183</port> <ports> <tcp>49183</tcp> <rdma>N/A</rdma> </ports> <pid>18901</pid> </node> <node> <hostname>NFS Server</hostname> <path>localhost</path> <peerid>ad002db4-bdc0-43e3-aae7-c209012140b0</peerid> <status>0</status> <port>N/A</port> <ports> <tcp>N/A</tcp> <rdma>N/A</rdma> </ports> <pid>-1</pid> </node> <node> <hostname>Self-heal Daemon</hostname> <path>localhost</path> <peerid>ad002db4-bdc0-43e3-aae7-c209012140b0</peerid> <status>1</status> <port>N/A</port> <ports> <tcp>N/A</tcp> <rdma>N/A</rdma> </ports> <pid>20347</pid> </node> <node> <hostname>Quota Daemon</hostname> <path>localhost</path> <peerid>ad002db4-bdc0-43e3-aae7-c209012140b0</peerid> <status>1</status> <port>N/A</port> <ports> <tcp>N/A</tcp> <rdma>N/A</rdma> </ports> <pid>20356</pid> </node> <node> <hostname>NFS Server</hostname> <path>10.70.34.43</path> <peerid>236f7068-8b99-4aa0-a0b5-40b76146cdf4</peerid> <status>0</status> <port>N/A</port> <ports> <tcp>N/A</tcp> <rdma>N/A</rdma> </ports> <pid>-1</pid> </node> <node> <hostname>Self-heal Daemon</hostname> <path>10.70.34.43</path> <peerid>236f7068-8b99-4aa0-a0b5-40b76146cdf4</peerid> <status>1</status> <port>N/A</port> <ports> <tcp>N/A</tcp> <rdma>N/A</rdma> </ports> <pid>19003</pid> </node> <node> <hostname>Quota Daemon</hostname> <path>10.70.34.43</path> <peerid>236f7068-8b99-4aa0-a0b5-40b76146cdf4</peerid> <status>1</status> <port>N/A</port> <ports> <tcp>N/A</tcp> <rdma>N/A</rdma> </ports> <pid>19012</pid> </node> </coldBricks> <tasks> <task> <type>Tier migration</type> <id>eae47ea7-aea5-4220-8f1d-c6cfc145875d</id> <status>1</status> <statusStr>in progress</statusStr> </task> </tasks> </volume> </volumes> </volStatus> </cliOutput> [root@zod ~]# [root@zod ~]# [root@zod ~]# [root@zod ~]# gluster v info quota_one --xml <?xml version="1.0" encoding="UTF-8" standalone="yes"?> <cliOutput> <opRet>0</opRet> <opErrno>0</opErrno> <opErrstr/> <volInfo> <volumes> <volume> <name>quota_one</name> <id>1f7be42a-0213-4e7c-9721-392a3747a19a</id> <status>1</status> <statusStr>Started</statusStr> <brickCount>8</brickCount> <distCount>2</distCount> <stripeCount>1</stripeCount> <replicaCount>2</replicaCount> <disperseCount>0</disperseCount> <redundancyCount>0</redundancyCount> <type>5</type> <typeStr>Tier</typeStr> <transport>0</transport> <xlators/> <bricks> <hotBricks> <hotBrickType>Distributed-Replicate</hotBrickType> <numberOfBricks>2 x 2 = 4</numberOfBricks> <brick uuid="236f7068-8b99-4aa0-a0b5-40b76146cdf4">yarrow:/dummy/brick101/quota_one_hot<name>yarrow:/dummy/brick101/quota_one_hot</name><hostUuid>236f7068-8b99-4aa0-a0b5-40b76146cdf4</hostUuid></brick> <brick uuid="236f7068-8b99-4aa0-a0b5-40b76146cdf4">zod:/dummy/brick101/quota_one_hot<name>zod:/dummy/brick101/quota_one_hot</name><hostUuid>236f7068-8b99-4aa0-a0b5-40b76146cdf4</hostUuid></brick> <brick uuid="236f7068-8b99-4aa0-a0b5-40b76146cdf4">yarrow:/dummy/brick100/quota_one_hot<name>yarrow:/dummy/brick100/quota_one_hot</name><hostUuid>236f7068-8b99-4aa0-a0b5-40b76146cdf4</hostUuid></brick> <brick uuid="236f7068-8b99-4aa0-a0b5-40b76146cdf4">zod:/dummy/brick100/quota_one_hot<name>zod:/dummy/brick100/quota_one_hot</name><hostUuid>236f7068-8b99-4aa0-a0b5-40b76146cdf4</hostUuid></brick> </hotBricks> <coldBricks> <coldBrickType>Distributed-Replicate</coldBrickType> <numberOfBricks>2 x 2 = 4</numberOfBricks> <brick uuid="236f7068-8b99-4aa0-a0b5-40b76146cdf4">zod:/rhs/brick1/quota_one<name>zod:/rhs/brick1/quota_one</name><hostUuid>236f7068-8b99-4aa0-a0b5-40b76146cdf4</hostUuid></brick> <brick uuid="236f7068-8b99-4aa0-a0b5-40b76146cdf4">yarrow:/rhs/brick1/quota_one<name>yarrow:/rhs/brick1/quota_one</name><hostUuid>236f7068-8b99-4aa0-a0b5-40b76146cdf4</hostUuid></brick> <brick uuid="236f7068-8b99-4aa0-a0b5-40b76146cdf4">zod:/rhs/brick2/quota_one<name>zod:/rhs/brick2/quota_one</name><hostUuid>236f7068-8b99-4aa0-a0b5-40b76146cdf4</hostUuid></brick> <brick uuid="236f7068-8b99-4aa0-a0b5-40b76146cdf4">yarrow:/rhs/brick2/quota_one<name>yarrow:/rhs/brick2/quota_one</name><hostUuid>236f7068-8b99-4aa0-a0b5-40b76146cdf4</hostUuid></brick> </coldBricks> </bricks> <optCount>6</optCount> <options> <option> <name>diagnostics.brick-log-level</name> <value>TRACE</value> </option> <option> <name>features.quota-deem-statfs</name> <value>on</value> </option> <option> <name>features.ctr-enabled</name> <value>on</value> </option> <option> <name>features.inode-quota</name> <value>on</value> </option> <option> <name>features.quota</name> <value>on</value> </option> <option> <name>performance.readdir-ahead</name> <value>on</value> </option> </options> </volume> <count>1</count> </volumes> </volInfo> </cliOutput> [root@zod ~]#
Since the problem described in this bug report should be resolved in a recent advisory, it has been closed with a resolution of ERRATA. For information on the advisory, and where to find the updated files, follow the link below. If the solution does not work for you, open a new bug report. https://rhn.redhat.com/errata/RHBA-2016-0193.html