| Summary: | [cd24be007c64bd10d8c28e8e9b1c988478a32c8c]: glusterfs client crashed after starting | |||
|---|---|---|---|---|
| Product: | [Community] GlusterFS | Reporter: | Raghavendra Bhat <rabhat> | |
| Component: | protocol | Assignee: | Amar Tumballi <amarts> | |
| Status: | CLOSED CURRENTRELEASE | QA Contact: | ||
| Severity: | high | Docs Contact: | ||
| Priority: | high | |||
| Version: | mainline | CC: | gluster-bugs, vraman | |
| Target Milestone: | --- | |||
| Target Release: | --- | |||
| Hardware: | Unspecified | |||
| OS: | Linux | |||
| Whiteboard: | ||||
| Fixed In Version: | glusterfs-3.4.0 | Doc Type: | Bug Fix | |
| Doc Text: | Story Points: | --- | ||
| Clone Of: | ||||
| : | 815028 (view as bug list) | Environment: | ||
| Last Closed: | 2013-07-24 17:47:02 UTC | Type: | --- | |
| Regression: | --- | Mount Type: | --- | |
| Documentation: | --- | CRM: | ||
| Verified Versions: | glusterfs-3.3.0qa40 | Category: | --- | |
| oVirt Team: | --- | RHEL 7.3 requirements from Atomic Host: | ||
| Cloudforms Team: | --- | Target Upstream Version: | ||
| Bug Depends On: | ||||
| Bug Blocks: | 811632, 815028, 817967 | |||
|
Description
Raghavendra Bhat
2011-12-19 05:45:32 UTC
This is the backtrace of the core generated.
Core was generated by `glusterfs --volfile-server=Nightly-Sanity --volfile-id=vol /export/nightly/moun'.
Program terminated with signal 6, Aborted.
#0 0x0000003e58c30265 in raise () from /lib64/libc.so.6
(gdb) bt
#0 0x0000003e58c30265 in raise () from /lib64/libc.so.6
#1 0x0000003e58c31d10 in abort () from /lib64/libc.so.6
#2 0x0000003e58c296e6 in __assert_fail () from /lib64/libc.so.6
#3 0x00002aaaab1c0991 in client3_1_stat (frame=0x2b5edec1a5ac, this=0x1cc35f50, data=0x7fff367b0520)
at ../../../../../xlators/protocol/client/src/client3_1-fops.c:2676
#4 0x00002aaaab1acfd6 in client_stat (frame=0x2b5edec1a5ac, this=0x1cc35f50, loc=0x7fff367b0680)
at ../../../../../xlators/protocol/client/src/client.c:296
#5 0x00002aaaab3fb4df in stripe_readdirp_cbk (frame=0x2b5edec1a454, cookie=0x2b5edec1a500, this=0x1cc3a110, op_ret=9, op_errno=2,
orig_entries=0x7fff367b0790) at ../../../../../xlators/cluster/stripe/src/stripe.c:4226
#6 0x00002aaaab1be0cd in client3_1_readdirp_cbk (req=0x2aaaacae904c, iov=0x2aaaacae908c, count=1, myframe=0x2b5edec1a500)
at ../../../../../xlators/protocol/client/src/client3_1-fops.c:1955
#7 0x00002b5eddb2725e in rpc_clnt_handle_reply (clnt=0x1cc4df30, pollin=0x1cc4f140) at ../../../../rpc/rpc-lib/src/rpc-clnt.c:789
#8 0x00002b5eddb275b6 in rpc_clnt_notify (trans=0x1cc4e200, mydata=0x1cc4df60, event=RPC_TRANSPORT_MSG_RECEIVED, data=0x1cc4f140)
at ../../../../rpc/rpc-lib/src/rpc-clnt.c:908
#9 0x00002b5eddb239f3 in rpc_transport_notify (this=0x1cc4e200, event=RPC_TRANSPORT_MSG_RECEIVED, data=0x1cc4f140)
at ../../../../rpc/rpc-lib/src/rpc-transport.c:498
#10 0x00002aaaaad7dee3 in socket_event_poll_in (this=0x1cc4e200) at ../../../../../rpc/rpc-transport/socket/src/socket.c:1675
#11 0x00002aaaaad7e425 in socket_event_handler (fd=14, idx=4, data=0x1cc4e200, poll_in=1, poll_out=0, poll_err=0)
at ../../../../../rpc/rpc-transport/socket/src/socket.c:1790
#12 0x00002b5edd8cb854 in event_dispatch_epoll_handler (event_pool=0x1cc2a9a0, events=0x1cc30310, i=0)
at ../../../libglusterfs/src/event.c:794
#13 0x00002b5edd8cba59 in event_dispatch_epoll (event_pool=0x1cc2a9a0) at ../../../libglusterfs/src/event.c:856
#14 0x00002b5edd8cbdb3 in event_dispatch (event_pool=0x1cc2a9a0) at ../../../libglusterfs/src/event.c:956
#15 0x00000000004078c2 in main (argc=6, argv=0x7fff367b0d98) at ../../../glusterfsd/src/glusterfsd.c:1601
(gdb) f 3
#3 0x00002aaaab1c0991 in client3_1_stat (frame=0x2b5edec1a5ac, this=0x1cc35f50, data=0x7fff367b0520)
at ../../../../../xlators/protocol/client/src/client3_1-fops.c:2676
2676 GF_ASSERT_AND_GOTO_WITH_ERROR (this->name,
(gdb) l
2671 if (!uuid_is_null (args->loc->inode->gfid))
2672 memcpy (req.gfid, args->loc->inode->gfid, 16);
2673 else
2674 memcpy (req.gfid, args->loc->gfid, 16);
2675
2676 GF_ASSERT_AND_GOTO_WITH_ERROR (this->name,
2677 !uuid_is_null (*((uuid_t*)req.gfid)),
2678 unwind, op_errno, EINVAL);
2679 req.path = (char *)args->loc->path;
2680 conf = this->private;
(gdb) p req.gfid
$1 = '\000' <repeats 15 times>
(gdb) p *args->loc
$2 = {path = 0x1cc51cd0 "/run8724/coverage/dir/hardlink", name = 0x1cc51ce6 "hardlink", inode = 0x2aaaacdb329c, parent = 0x2aaaacdb3208,
gfid = '\000' <repeats 15 times>, pargfid = '\000' <repeats 15 times>}
(gdb) p *args->loc->inode
$3 = {table = 0x1cc50270, gfid = '\000' <repeats 15 times>, lock = 1, nlookup = 0, ref = 1, ia_type = IA_INVAL, fd_list = {
next = 0x2aaaacdb32cc, prev = 0x2aaaacdb32cc}, dentry_list = {next = 0x2aaaacdb32dc, prev = 0x2aaaacdb32dc}, hash = {
next = 0x2aaaacdb32ec, prev = 0x2aaaacdb32ec}, list = {next = 0x2aaaacdb3268, prev = 0x1cc502d0}, _ctx = 0x1cc53870}
(gdb) f 6
#6 0x00002aaaab1be0cd in client3_1_readdirp_cbk (req=0x2aaaacae904c, iov=0x2aaaacae908c, count=1, myframe=0x2b5edec1a500)
at ../../../../../xlators/protocol/client/src/client3_1-fops.c:1955
1955 STACK_UNWIND_STRICT (readdirp, frame, rsp.op_ret,
(gdb) up
#7 0x00002b5eddb2725e in rpc_clnt_handle_reply (clnt=0x1cc4df30, pollin=0x1cc4f140) at ../../../../rpc/rpc-lib/src/rpc-clnt.c:789
789 req->cbkfn (req, req->rsp, req->rspcnt, saved_frame->frame);
(gdb) down
#6 0x00002aaaab1be0cd in client3_1_readdirp_cbk (req=0x2aaaacae904c, iov=0x2aaaacae908c, count=1, myframe=0x2b5edec1a500)
at ../../../../../xlators/protocol/client/src/client3_1-fops.c:1955
1955 STACK_UNWIND_STRICT (readdirp, frame, rsp.op_ret,
(gdb) down
#5 0x00002aaaab3fb4df in stripe_readdirp_cbk (frame=0x2b5edec1a454, cookie=0x2b5edec1a500, this=0x1cc3a110, op_ret=9, op_errno=2,
orig_entries=0x7fff367b0790) at ../../../../../xlators/cluster/stripe/src/stripe.c:4226
4226 STACK_WIND_COOKIE (frame, stripe_readdirp_entry_stat_cbk,
(gdb) inode = inode_new (local->fd->inode->table);
(gdb)
4205 if (!inode)
4206 goto out;
4207
4208 loc.inode = inode;
4209 loc.parent = local->fd->inode;
4210 ret = inode_path (local->fd->inode, local_entry->d_name, &path);
4211 if (ret != -1) {
4212 loc.path = path;
4213 } else if (inode) {
4214 ret = inode_path (inode, NULL, &path);
(gdb)
4215 if (ret != -1) {
4216 loc.path = path;
4217 } else {
4218 goto out;
4219 }
4220 }
4221
4222 loc.name = strrchr (loc.path, '/');
4223 loc.name++;
4224 trav = this->children;
(gdb)
4225 while (trav) {
4226 STACK_WIND_COOKIE (frame, stripe_readdirp_entry_stat_cbk,
4227 local_entry, trav->xlator,
4228 trav->xlator->fops->stat, &loc)
In stripe_readdirp_cbk we are creating a new inode, without any gfid set and stackwinding to the below xlators. protocol/client cant see the gfid in both loc and loc->inode and thus asserts.
CHANGE: http://review.gluster.com/806 (cluster/stripe: copy the gfid from the dentry structure to loc) merged in master by Vijay Bellur (vijay) CHANGE: http://review.gluster.com/2544 (cluster/stripe: copy the gfid from the dentry structure to loc) merged in release-3.2 by Vijay Bellur (vijay) Checked with glusterfs-3.3.0qa40. Now the client does not assert since it can find the gfid. |