Bug 763370 (GLUSTER-1638)

Summary: 3 replica creation creates only 2 replicas in volfile for nfs
Product: [Community] GlusterFS Reporter: Shehjar Tikoo <shehjart>
Component: cliAssignee: Amar Tumballi <amarts>
Status: CLOSED CURRENTRELEASE QA Contact:
Severity: high Docs Contact:
Priority: low    
Version: 3.1-alphaCC: gluster-bugs, rabhat, vraman
Target Milestone: ---   
Target Release: ---   
Hardware: All   
OS: Linux   
Whiteboard:
Fixed In Version: Doc Type: Bug Fix
Doc Text:
Story Points: ---
Clone Of: Environment:
Last Closed: Type: ---
Regression: RTP Mount Type: All
Documentation: --- CRM:
Verified Versions: Category: ---
oVirt Team: --- RHEL 7.3 requirements from Atomic Host:
Cloudforms Team: --- Target Upstream Version:
Attachments:
Description Flags
Generated volume file none

Description Shehjar Tikoo 2010-09-18 05:57:56 UTC
Created attachment 311

Comment 1 Shehjar Tikoo 2010-09-18 08:57:17 UTC
Take 4 servers on which the exports are structured as:

/testdirs/4d-3r-master/disk1
/testdirs/4d-3r-master/disk2
/testdirs/4d-3r-master/disk3

Together these will form 12 bricks, (4 servers X 3 bricks each)

On qa26, to start a distributed-replicated with 3 replicas, I run:

# gluster volume create 4dist-3repl  replica 3 192.168.1.78:/testdirs/4d-3r-master/disk1/ 192.168.1.78:/testdirs/4d-3r-master/disk2/ 192.168.1.78:/testdirs/4d-3r-master/disk3/ 192.168.1.79:/testdirs/4d-3r-master/disk1/ 192.168.1.79:/testdirs/4d-3r-master/disk2/ 192.168.1.79:/testdirs/4d-3r-master/disk3/ 192.168.1.80:/testdirs/4d-3r-master/disk1/ 192.168.1.80:/testdirs/4d-3r-master/disk2/ 192.168.1.80:/testdirs/4d-3r-master/disk3/ 192.168.1.77:/testdirs/4d-3r-master/disk1/ 192.168.1.77:/testdirs/4d-3r-master/disk2/ 192.168.1.77:/testdirs/4d-3r-master/disk3/

After starting the volume file has the first replicate generated as:

volume dr-client-0
    type protocol/client
    option transport-type tcp
    option remote-host 192.168.1.77
    option transport.socket.nodelay on
    option remote-subvolume /testdirs/4d-3r-master/disk1
end-volume

volume dr-client-1
    type protocol/client
    option transport-type tcp
    option remote-host 192.168.1.78
    option transport.socket.nodelay on
    option remote-subvolume /testdirs/4d-3r-master/disk1
end-volume

volume dr-client-2
    type protocol/client
    option transport-type tcp
    option remote-host 192.168.1.79
    option transport.socket.nodelay on
    option remote-subvolume /testdirs/4d-3r-master/disk1
end-volume

volume dr-client-3
    type protocol/client
    option transport-type tcp
    option remote-host 192.168.1.80
    option transport.socket.nodelay on
    option remote-subvolume /testdirs/4d-3r-master/disk1
end-volume

volume dr-replicate-0
    type cluster/replicate
#   option read-subvolume on
#   option favorite-child on
#   option background-self-heal-count on
#   option data-self-heal on
#   option data-self-heal-algorithm on
#   option data-self-heal-window-size on
#   option metadata-self-heal on
#   option entry-self-heal on
#   option data-change-log on
#   option metadata-change-log on
#   option entry-change-log on
#   option strict-readdir on
    subvolumes dr-client-0 dr-client-1
end-volume

See attached file for complete vol file. I think to reproduce, we will not need 4 servers, just three bricks on single server may suffice to show the above behaviour.

Comment 2 Amar Tumballi 2010-09-21 08:45:22 UTC
Worked for me on a single system.. will try over more machine, and see if the behavior is different..

Comment 3 Raghavendra Bhat 2010-09-23 09:29:01 UTC
checked on 4 machines. It worked fine.

Comment 4 Shehjar Tikoo 2010-09-23 09:52:40 UTC
i checked it again with 12 bricks and it worked this time. Will re-open if i see it again. thanks.