Bug 1218488 - Brick and nfs processes gets killed with OOM
Summary: Brick and nfs processes gets killed with OOM
Keywords:
Status: CLOSED CURRENTRELEASE
Alias: None
Product: GlusterFS
Classification: Community
Component: nfs
Version: mainline
Hardware: Unspecified
OS: Unspecified
unspecified
unspecified
Target Milestone: ---
Assignee: Pranith Kumar K
QA Contact:
URL:
Whiteboard:
Depends On:
Blocks: qe_tracker_everglades 1224198
TreeView+ depends on / blocked
 
Reported: 2015-05-05 05:13 UTC by Bhaskarakiran
Modified: 2018-11-20 08:32 UTC (History)
4 users (show)

Fixed In Version: v3.7.0
Clone Of:
: 1224198 (view as bug list)
Environment:
Last Closed: 2018-11-20 08:32:25 UTC
Regression: ---
Mount Type: ---
Documentation: ---
CRM:
Verified Versions:
Embargoed:


Attachments (Terms of Use)

Description Bhaskarakiran 2015-05-05 05:13:22 UTC
Description of problem:
=======================

distributed disperse EC volume (2x(8+4) is nfs mounted on client. IO (plain directory creation and files creation) is run from client. There are around 100000 directories and 10000 files existing already and data is added. Did 'ls' on the directory to list and the mount hangs. The sequence of events that are happening are :

1. mount process hangs
2. bricks crash
3. NFS server doesn't respond
4. rpcinfo doesn't show the volume export
5. NFS server crashes with OOM

This is being tested on master with pranith's patches related to heal applied.

Version-Release number of selected component (if applicable):
============================================================
[root@vertigo bricks]# gluster --version
glusterfs 3.8dev built on Apr 30 2015 11:14:47
Repository revision: git://git.gluster.com/glusterfs.git
Copyright (c) 2006-2011 Gluster Inc. <http://www.gluster.com>
GlusterFS comes with ABSOLUTELY NO WARRANTY.
You may redistribute copies of GlusterFS under the terms of the GNU General Public License.

How reproducible:
=================
100%

Steps to Reproduce:
1. Create a 2x(8+4) ec volume and nfs mount on the client
2. create around 200k directories and 10k files
3. do a 'ls' on the directories to list.

Actual results:
===============
Client mount hung and bricks and nfs servers crashed with OOM.

Expected results:


Additional info:
sosreports will be copied to rhsqe-repo:/sosreports/<bugid>

Comment 3 Pranith Kumar K 2015-05-18 17:02:01 UTC
These are the leaks found, which are merged as part of: http://review.gluster.com/#/c/10693/

From 78c8532a0b3f5a856c7aebaffdff80fa712b6817 Mon Sep 17 00:00:00 2001
From: Pranith Kumar K <pkarampu>
Date: Tue, 5 May 2015 14:57:30 +0530
Subject: [PATCH 1/1] cluster/ec: Fix memory leaks

Change-Id: Ie4e353ec3e1a92627c5f3fa0afee37c9e602c847
Signed-off-by: Pranith Kumar K <pkarampu>
---
 xlators/cluster/ec/src/ec-heal.c       |  7 ++++---
 xlators/cluster/ec/src/ec-heald.c      | 14 +++++++++-----
 xlators/cluster/ec/src/ec-inode-read.c |  2 +-
 3 files changed, 14 insertions(+), 9 deletions(-)

diff --git a/xlators/cluster/ec/src/ec-heal.c b/xlators/cluster/ec/src/ec-heal.c
index ac796b2..a517fc7 100644
--- a/xlators/cluster/ec/src/ec-heal.c
+++ b/xlators/cluster/ec/src/ec-heal.c
@@ -2315,9 +2315,9 @@ ec_create_name (call_frame_t *frame, ec_t *ec, inode_t *parent, char *name,
 
         ret = 0;
 out:
+        cluster_replies_wipe (replies, ec->nodes);
         loc_wipe (&loc);
         loc_wipe (&srcloc);
-        EC_REPLIES_ALLOC (replies, ec->nodes);
         if (xdata)
                 dict_unref (xdata);
         return ret;
@@ -2340,6 +2340,7 @@ __ec_heal_name (call_frame_t *frame, ec_t *ec, inode_t *parent, char *name,
         unsigned char      *same     = NULL;
         unsigned char      *gfidless = NULL;
 
+        EC_REPLIES_ALLOC (replies, ec->nodes);
         loc.parent = inode_ref (parent);
         loc.inode = inode_new (parent->table);
         gf_uuid_copy (loc.pargfid, parent->gfid);
@@ -2360,7 +2361,6 @@ __ec_heal_name (call_frame_t *frame, ec_t *ec, inode_t *parent, char *name,
         output = alloca0 (ec->nodes);
         gfidless = alloca0 (ec->nodes);
         enoent = alloca0 (ec->nodes);
-        EC_REPLIES_ALLOC (replies, ec->nodes);
         ret = cluster_lookup (ec->xl_list, participants, ec->nodes, replies,
                               output, frame, ec->xl, &loc, NULL);
         for (i = 0; i < ec->nodes; i++) {
@@ -2529,6 +2529,7 @@ ec_heal_names (call_frame_t *frame, ec_t *ec, inode_t *inode,
                 if (EC_COUNT (participants, ec->nodes) <= ec->fragments)
                         return -ENOTCONN;
         }
+        loc_wipe (&loc);
         return 0;
 }
 
@@ -3348,7 +3349,7 @@ ec_heal_data (call_frame_t *frame, ec_t *ec, gf_boolean_t block, inode_t *inode,
         locked_on  = alloca0(ec->nodes);
         output     = alloca0(ec->nodes);
         up_subvols = alloca0(ec->nodes);
-        loc. inode = inode_ref (inode);
+        loc.inode = inode_ref (inode);
         gf_uuid_copy (loc.gfid, inode->gfid);
 
         fd = fd_create (inode, 0);
diff --git a/xlators/cluster/ec/src/ec-heald.c b/xlators/cluster/ec/src/ec-heald.c
index 53b3996..a7cf8f7 100644
--- a/xlators/cluster/ec/src/ec-heald.c
+++ b/xlators/cluster/ec/src/ec-heald.c
@@ -18,7 +18,7 @@
 #include "syncop-utils.h"
 #include "protocol-common.h"
 
-#define SHD_INODE_LRU_LIMIT          2048
+#define SHD_INODE_LRU_LIMIT          10
 #define ASSERT_LOCAL(this, healer)				        \
         do {                                                            \
                 if (!ec_shd_is_subvol_local (this, healer->subvol)) {	\
@@ -224,8 +224,8 @@ ec_shd_index_heal (xlator_t *subvol, gf_dirent_t *entry, loc_t *parent,
                    void *data)
 {
         struct subvol_healer *healer = data;
-        ec_t                 *ec = NULL;
-        loc_t                loc = {0};
+        ec_t                 *ec     = NULL;
+        loc_t                loc     = {0};
         int                  ret     = 0;
 
         ec = healer->this->private;
@@ -254,6 +254,8 @@ ec_shd_index_heal (xlator_t *subvol, gf_dirent_t *entry, loc_t *parent,
         ec_shd_selfheal (healer, healer->subvol, &loc);
 
 out:
+        if (loc.inode)
+                inode_forget (loc.inode, 0);
         loc_wipe (&loc);
 
         return 0;
@@ -280,7 +282,7 @@ ec_shd_index_sweep (struct subvol_healer *healer)
         ret = syncop_dir_scan (subvol, &loc, GF_CLIENT_PID_AFR_SELF_HEALD,
                                healer, ec_shd_index_heal);
 
-        inode_forget (loc.inode, 1);
+        inode_forget (loc.inode, 0);
         loc_wipe (&loc);
 
         return ret;
@@ -318,10 +320,12 @@ ec_shd_full_heal (xlator_t *subvol, gf_dirent_t *entry, loc_t *parent,
 
         ec_shd_selfheal (healer, healer->subvol, &loc);
 
-        loc_wipe (&loc);
         ret = 0;
 
 out:
+        if (loc.inode)
+                inode_forget (loc.inode, 0);
+        loc_wipe (&loc);
         return ret;
 }
 
diff --git a/xlators/cluster/ec/src/ec-inode-read.c b/xlators/cluster/ec/src/ec-inode-read.c
index 3483dfb..273d50f 100644
--- a/xlators/cluster/ec/src/ec-inode-read.c
+++ b/xlators/cluster/ec/src/ec-inode-read.c
@@ -374,7 +374,7 @@ int32_t ec_getxattr_heal_cbk(call_frame_t *frame, void *cookie, xlator_t *xl,
                 goto out;
             }
 
-            if (dict_set_str(dict, EC_XATTR_HEAL, str) != 0) {
+            if (dict_set_dynstr(dict, EC_XATTR_HEAL, str) != 0) {
                 GF_FREE(str);
                 dict_unref(dict);
                 dict = NULL;
-- 
1.9.0

Comment 4 Bhaskarakiran 2015-05-22 07:05:20 UTC
The fix is specific to nfs crash but the bricks are getting killed with OOM messages. Moving back the bug.

Comment 6 Ashish Pandey 2018-11-20 08:25:18 UTC
The patch sent should fix some of the issues.
As it is a very old bug and we have not seeing this issue in recent time, it is better to close this issue for now.


Note You need to log in before you can comment on or make changes to this bug.