Bug 1233657 - [Backup]: Delete entry logged in the outfile for a symbolic link, without the corresponding NEW entry
Summary: [Backup]: Delete entry logged in the outfile for a symbolic link, without the...
Keywords:
Status: CLOSED WONTFIX
Alias: None
Product: Red Hat Gluster Storage
Classification: Red Hat Storage
Component: glusterfind
Version: rhgs-3.1
Hardware: Unspecified
OS: Unspecified
unspecified
medium
Target Milestone: ---
: ---
Assignee: Bug Updates Notification Mailing List
QA Contact: Sweta Anandpara
URL:
Whiteboard:
Depends On:
Blocks: 1223636
TreeView+ depends on / blocked
 
Reported: 2015-06-19 11:43 UTC by Sweta Anandpara
Modified: 2018-04-16 03:03 UTC (History)
5 users (show)

Fixed In Version:
Doc Type: Bug Fix
Doc Text:
Clone Of:
Environment:
Last Closed: 2018-04-16 03:03:57 UTC
Embargoed:


Attachments (Terms of Use)

Description Sweta Anandpara 2015-06-19 11:43:15 UTC
Description of problem:
If a symbolic link (or any file/directory for that matter) is created and then deleted within the same interval, the entry should not get captured at all in the output file. 

1) It sub-optimal to get:
NEW link1
DELETE link1

2) And it is incorrect to get only this:
DELETE link1

I am seeing (2) for a link file that was wrongly created. Pasted below are the logs:

##########   client logs      ################

[root@dhcp43-59 ozone2]# 
[root@dhcp43-59 ozone2]# mount | grep ozone
10.70.42.30:/ozone on /mnt/ozone type nfs (rw,addr=10.70.42.30)
10.70.43.191:/ozone on /mnt/ozone2 type nfs (rw,addr=10.70.43.191)
[root@dhcp43-59 ozone2]# 
[root@dhcp43-59 ozone2]# 
[root@dhcp43-59 ozone2]# mkdir dir1
[root@dhcp43-59 ozone2]# mkdir dir1/dir2
[root@dhcp43-59 ozone2]# touch dir1/dir2/a
[root@dhcp43-59 ozone2]# ln -s dir1/dir2/a dir1/a_link
[root@dhcp43-59 ozone2]# ls -lrt
total 308
-rw-r--r--.   1 root root 102728 Jun 18 18:07 5582ba18%%FTZ39AW4RV.tar.gz
-rwxr-xr-x.   1  501  500 102400 Jun 18 18:36 V6MO_newhdln
-rw-r--r--.   1 root root 102400 Jun 18 20:47 5582baeb%%SNRP8ENTE9
lrwxrwxrwx.   1 root root      7 Jun 18 21:00 level20_sln2 -> level20
-rw-r--r--.   1 root root      0 Jun 19 15:46 a
-rw-r--r--.   1 root root      0 Jun 19 15:47 b
drwxr-xr-x.   3 root root     20 Jun 19 18:04 level10
drwxr-xr-x.   3 root root     20 Jun 19 18:04 level02
drwxr-xr-x.   2 root root      6 Jun 19 20:18 level00
drwxr-xr-x. 100 root root   4096 Jun 19 22:21 etc
drwxr-xr-x.   3  502  500    115 Jun 19 22:21 level01
drwxr-xr-x.   3 root root     30 Jun 19 22:34 dir1
[root@dhcp43-59 ozone2]# ls -lrt dir1
total 1
drwxr-xr-x. 2 root root  6 Jun 19 22:34 dir2
lrwxrwxrwx. 1 root root 11 Jun 19 22:34 a_link -> dir1/dir2/a
[root@dhcp43-59 ozone2]# mv a_link dir1/dir2/
mv: cannot stat `a_link': No such file or directory
[root@dhcp43-59 ozone2]# mv dir1/a_link dir1/dir2/
[root@dhcp43-59 ozone2]# ls -lrt dir1/
total 1
drwxr-xr-x. 2 root root 19 Jun 19 22:35 dir2
[root@dhcp43-59 ozone2]# ls -lrt dir1/dir2
total 1
-rw-r--r--. 1 root root  0 Jun 19 22:34 a
lrwxrwxrwx. 1 root root 11 Jun 19 22:34 a_link -> dir1/dir2/a
[root@dhcp43-59 ozone2]# rm dir1/dir2/a_link 
rm: remove symbolic link `dir1/dir2/a_link'? y
[root@dhcp43-59 ozone2]# ls -lrt
total 326
-rw-r--r--.   1 root root 102728 Jun 18 18:07 5582ba18%%FTZ39AW4RV.tar.gz
-rwxr-xr-x.   1  501  500 102400 Jun 18 18:36 V6MO_newhdln
-rw-r--r--.   1 root root 102400 Jun 18 20:47 5582baeb%%SNRP8ENTE9
lrwxrwxrwx.   1 root root      7 Jun 18 21:00 level20_sln2 -> level20
-rw-r--r--.   1 root root      0 Jun 19 15:46 a
-rw-r--r--.   1 root root      0 Jun 19 15:47 b
drwxr-xr-x.   3 root root    128 Jun 19 18:04 level10
drwxr-xr-x.   2 root root     86 Jun 19 21:14 level00
drwxr-xr-x.   3 root root    128 Jun 19 21:23 level02
drwxr-xr-x. 100 root root  12288 Jun 19 22:21 etc
drwxr-xr-x.   3  502  500   8227 Jun 19 22:21 level01
drwxr-xr-x.   3 root root     51 Jun 19 22:35 dir1
[root@dhcp43-59 ozone2]# ls -lrt dir1/
total 2
drwxr-xr-x. 2 root root 26 Jun 19 22:35 dir2
[root@dhcp43-59 ozone2]# ls -lrt dir1/dir2/
total 0
-rw-r--r--. 1 root root 0 Jun 19 22:34 a
[root@dhcp43-59 ozone2]# 


##################   server logs       ###################33

[root@dhcp43-191 ~]# glusterfind post sesso1 ozone
Session sesso1 with volume ozone updated
[root@dhcp43-191 ~]# glusterfind pre sesso1 ozone /tmp/outo1.txt 
Generated output file /tmp/outo1.txt
[root@dhcp43-191 ~]# cat /tmp/outo1.txt 
NEW dir1 
NEW dir1%2F%2Fdir2 
NEW dir1%2Fdir2%2F%2Fa 
NEW dir1%2Fdir2%2F%2Fa_link 
DELETE dir1%2Fdir2%2Fa_link 
[root@dhcp43-191 ~]# glusterfind pre sesso1 ozone /tmp/outo1.txt --regenerate-outfile
Generated output file /tmp/outo1.txt
[root@dhcp43-191 ~]# cat /tmp/outo1.txt 
NEW dir1 
NEW dir1%2F%2Fdir2 
NEW dir1%2Fdir2%2F%2Fa 
DELETE dir1%2Fdir2%2Fa_link 
[root@dhcp43-191 ~]# date
Fri Jun 19 22:36:50 IST 2015
[root@dhcp43-191 ~]# glusterfind pre sesso1 ozone /tmp/outo1.txt --regenerate-outfile
Generated output file /tmp/outo1.txt
[root@dhcp43-191 ~]# cat /tmp/outo1.txt 
NEW dir1 
NEW dir1%2F%2Fdir2 
NEW dir1%2Fdir2%2F%2Fa 
DELETE dir1%2Fdir2%2Fa_link 
[root@dhcp43-191 ~]# date
Fri Jun 19 22:37:37 IST 2015
[root@dhcp43-191 ~]# glusterfind pre sesso1 ozone /tmp/outo1.txt --regenerate-outfile
Generated output file /tmp/outo1.txt
[root@dhcp43-191 ~]# cat /tmp/outo1.txt 
NEW dir1 
NEW dir1%2F%2Fdir2 
NEW dir1%2Fdir2%2F%2Fa 
DELETE dir1%2Fdir2%2Fa_link 
[root@dhcp43-191 ~]# 
[root@dhcp43-191 ~]# 
[root@dhcp43-191 ~]# gluster v status ozone
Status of volume: ozone
Gluster process                             TCP Port  RDMA Port  Online  Pid
------------------------------------------------------------------------------
Brick 10.70.43.191:/rhs/thinbrick1/ozone    49153     0          Y       6807 
Brick 10.70.42.202:/rhs/thinbrick1/ozone    49153     0          Y       31482
Brick 10.70.43.191:/rhs/thinbrick2/ozone    49154     0          Y       6815 
Brick 10.70.42.202:/rhs/thinbrick2/ozone    49154     0          Y       31489
Brick 10.70.42.30:/rhs/thinbrick1/ozone     49153     0          Y       1999 
Brick 10.70.42.147:/rhs/thinbrick1/ozone    49152     0          Y       31451
NFS Server on localhost                     2049      0          Y       32179
Self-heal Daemon on localhost               N/A       N/A        Y       32187
NFS Server on 10.70.42.147                  2049      0          Y       22317
Self-heal Daemon on 10.70.42.147            N/A       N/A        Y       22325
NFS Server on 10.70.42.202                  2049      0          Y       22628
Self-heal Daemon on 10.70.42.202            N/A       N/A        Y       22636
NFS Server on 10.70.42.30                   2049      0          Y       15583
Self-heal Daemon on 10.70.42.30             N/A       N/A        Y       15591
 
Task Status of Volume ozone
------------------------------------------------------------------------------
There are no active volume tasks
 
[root@dhcp43-191 ~]# gluster v info ozone
 
Volume Name: ozone
Type: Distributed-Replicate
Volume ID: 9ef1ace8-505d-4d97-aa23-4296aa685f76
Status: Started
Number of Bricks: 3 x 2 = 6
Transport-type: tcp
Bricks:
Brick1: 10.70.43.191:/rhs/thinbrick1/ozone
Brick2: 10.70.42.202:/rhs/thinbrick1/ozone
Brick3: 10.70.43.191:/rhs/thinbrick2/ozone
Brick4: 10.70.42.202:/rhs/thinbrick2/ozone
Brick5: 10.70.42.30:/rhs/thinbrick1/ozone
Brick6: 10.70.42.147:/rhs/thinbrick1/ozone
Options Reconfigured:
performance.readdir-ahead: on
storage.build-pgfid: on
changelog.changelog: on
changelog.capture-del-path: on
[root@dhcp43-191 ~]# 
[root@dhcp43-191 ~]# rpm -qa | grep gluster
glusterfs-libs-3.7.1-4.el6rhs.x86_64
glusterfs-api-3.7.1-4.el6rhs.x86_64
glusterfs-client-xlators-3.7.1-4.el6rhs.x86_64
glusterfs-fuse-3.7.1-4.el6rhs.x86_64
glusterfs-cli-3.7.1-4.el6rhs.x86_64
glusterfs-3.7.1-4.el6rhs.x86_64
glusterfs-server-3.7.1-4.el6rhs.x86_64
[root@dhcp43-191 ~]# 
[root@dhcp43-191 ~]# 
[root@dhcp43-191 ~]# glusterfind list
SESSION                   VOLUME                    SESSION TIME             
---------------------------------------------------------------------------
sesso3                    ozone                     2015-06-18 16:27:30      
sesso1                    ozone                     2015-06-19 22:32:29      
sesso2                    ozone                     2015-06-19 22:44:40      
sesso4                    ozone                     2015-06-18 16:27:38      
[root@dhcp43-191 ~]# 


Version-Release number of selected component (if applicable):
glusterfs-3.7.1-4.el6rhs.x86_64

How reproducible: 1:1

Comment 7 Amar Tumballi 2018-04-16 03:03:57 UTC
Feel free to open this bug if the issue still persists and you require a fix. Closing this as WONTFIX as we are not working on this bug, and treating it as a 'TIMEOUT'.


Note You need to log in before you can comment on or make changes to this bug.