Login
[x]
Log in using an account from:
Fedora Account System
Red Hat Associate
Red Hat Customer
Or login using a Red Hat Bugzilla account
Forgot Password
Login:
Hide Forgot
Create an Account
Red Hat Bugzilla – Attachment 306208 Details for
Bug 432057
GFS2: d_doio stuck in readv() waiting for pagelock.
[?]
New
Simple Search
Advanced Search
My Links
Browse
Requests
Reports
Current State
Search
Tabular reports
Graphical reports
Duplicates
Other Reports
User Changes
Plotly Reports
Bug Status
Bug Severity
Non-Defaults
|
Product Dashboard
Help
Page Help!
Bug Writing Guidelines
What's new
Browser Support Policy
5.0.4.rh83 Release notes
FAQ
Guides index
User guide
Web Services
Contact
Legal
This site requires JavaScript to be enabled to function correctly, please enable it.
[patch]
My patch from today
20May2008.bob.patch (text/plain), 97.33 KB, created by
Robert Peterson
on 2008-05-21 03:57:56 UTC
(
hide
)
Description:
My patch from today
Filename:
MIME Type:
Creator:
Robert Peterson
Created:
2008-05-21 03:57:56 UTC
Size:
97.33 KB
patch
obsolete
>diff -pur a/fs/dlm/lock.c b/fs/dlm/lock.c >--- a/fs/dlm/lock.c 2008-05-02 09:33:41.000000000 -0500 >+++ b/fs/dlm/lock.c 2008-05-20 16:05:05.000000000 -0500 >@@ -1783,7 +1783,8 @@ static void grant_pending_locks(struct d > > list_for_each_entry_safe(lkb, s, &r->res_grantqueue, lkb_statequeue) { > if (lkb->lkb_bastaddr && lock_requires_bast(lkb, high, cw)) { >- if (cw && high == DLM_LOCK_PR) >+ if (cw && high == DLM_LOCK_PR && >+ lkb->lkb_grmode == DLM_LOCK_PR) > queue_bast(r, lkb, DLM_LOCK_CW); > else > queue_bast(r, lkb, high); >diff -pur a/fs/gfs2/glock.c b/fs/gfs2/glock.c >--- a/fs/gfs2/glock.c 2008-04-29 13:28:50.000000000 -0500 >+++ b/fs/gfs2/glock.c 2008-05-20 20:59:42.000000000 -0500 >@@ -47,21 +47,20 @@ struct gfs2_gl_hash_bucket { > struct hlist_head hb_list; > }; > >-struct glock_iter { >- int hash; /* hash bucket index */ >- struct gfs2_sbd *sdp; /* incore superblock */ >- struct gfs2_glock *gl; /* current glock struct */ >- struct seq_file *seq; /* sequence file for debugfs */ >- char string[512]; /* scratch space */ >+struct gfs2_glock_iter { >+ int hash; /* hash bucket index */ >+ struct gfs2_sbd *sdp; /* incore superblock */ >+ struct gfs2_glock *gl; /* current glock struct */ >+ char string[512]; /* scratch space */ > }; > > typedef void (*glock_examiner) (struct gfs2_glock * gl); > > static int gfs2_dump_lockstate(struct gfs2_sbd *sdp); >-static int dump_glock(struct glock_iter *gi, struct gfs2_glock *gl); >-static void gfs2_glock_xmote_th(struct gfs2_glock *gl, struct gfs2_holder *gh); >-static void gfs2_glock_drop_th(struct gfs2_glock *gl); >-static void run_queue(struct gfs2_glock *gl); >+static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl); >+#define GLOCK_BUG_ON(gl,x) do { if (unlikely(x)) { __dump_glock(NULL, gl); BUG(); } } while(0) >+static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target); >+ > static DECLARE_RWSEM(gfs2_umount_flush_sem); > > #define GFS2_GL_HASH_SHIFT 15 >@@ -122,31 +121,36 @@ static inline rwlock_t *gl_lock_addr(uns > } > #endif > >-/** >- * relaxed_state_ok - is a requested lock compatible with the current lock mode? >- * @actual: the current state of the lock >- * @requested: the lock state that was requested by the caller >- * @flags: the modifier flags passed in by the caller >- * >- * Returns: 1 if the locks are compatible, 0 otherwise >- */ >- >-static inline int relaxed_state_ok(unsigned int actual, unsigned requested, >- int flags) >+/* copied from upstream kernel: */ >+void *__seq_open_private(struct file *f, struct seq_operations *ops, >+ int psize) > { >- if (actual == requested) >- return 1; >+ int rc; >+ void *private; >+ struct seq_file *seq; > >- if (flags & GL_EXACT) >- return 0; >+ private = kzalloc(psize, GFP_KERNEL); >+ if (private == NULL) >+ goto out; > >- if (actual == LM_ST_EXCLUSIVE && requested == LM_ST_SHARED) >- return 1; >+ rc = seq_open(f, ops); >+ if (rc < 0) >+ goto out_free; >+ >+ seq = f->private_data; >+ seq->private = private; >+ return private; > >- if (actual != LM_ST_UNLOCKED && (flags & LM_FLAG_ANY)) >- return 1; >+out_free: >+ kfree(private); >+out: >+ return NULL; >+} > >- return 0; >+int seq_open_private(struct file *filp, struct seq_operations *ops, >+ int psize) >+{ >+ return __seq_open_private(filp, ops, psize) ? 0 : -ENOMEM; > } > > /** >@@ -210,18 +214,14 @@ void gfs2_glock_hold(struct gfs2_glock * > int gfs2_glock_put(struct gfs2_glock *gl) > { > int rv = 0; >- struct gfs2_sbd *sdp = gl->gl_sbd; > > write_lock(gl_lock_addr(gl->gl_hash)); > if (atomic_dec_and_test(&gl->gl_ref)) { > hlist_del(&gl->gl_list); > write_unlock(gl_lock_addr(gl->gl_hash)); >- BUG_ON(spin_is_locked(&gl->gl_spin)); >- gfs2_assert(sdp, gl->gl_state == LM_ST_UNLOCKED); >- gfs2_assert(sdp, list_empty(&gl->gl_reclaim)); >- gfs2_assert(sdp, list_empty(&gl->gl_holders)); >- gfs2_assert(sdp, list_empty(&gl->gl_waiters1)); >- gfs2_assert(sdp, list_empty(&gl->gl_waiters3)); >+ GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_UNLOCKED); >+ GLOCK_BUG_ON(gl, !list_empty(&gl->gl_reclaim)); >+ GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders)); > glock_free(gl); > rv = 1; > goto out; >@@ -281,16 +281,389 @@ static struct gfs2_glock *gfs2_glock_fin > return gl; > } > >+/** >+ * may_grant - check if its ok to grant a new lock >+ * @gl: The glock >+ * @gh: The lock request which we wish to grant >+ * >+ * Returns: true if its ok to grant the lock >+ */ >+ >+static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh) >+{ >+ const struct gfs2_holder *gh_head = list_entry(gl->gl_holders.next, const struct gfs2_holder, gh_list); >+ if ((gh->gh_state == LM_ST_EXCLUSIVE || >+ gh_head->gh_state == LM_ST_EXCLUSIVE) && gh != gh_head) >+ return 0; >+ if (gl->gl_state == gh->gh_state) >+ return 1; >+ if (gh->gh_flags & GL_EXACT) >+ return 0; >+ if (gh->gh_state == LM_ST_SHARED && gl->gl_state == LM_ST_EXCLUSIVE) >+ return 1; >+ if (gl->gl_state != LM_ST_UNLOCKED && (gh->gh_flags & LM_FLAG_ANY)) >+ return 1; >+ return 0; >+} >+ >+static void gfs2_holder_wake(struct gfs2_holder *gh) >+{ >+ clear_bit(HIF_WAIT, &gh->gh_iflags); >+ smp_mb__after_clear_bit(); >+ wake_up_bit(&gh->gh_iflags, HIF_WAIT); >+} >+ >+/** >+ * do_promote - promote as many requests as possible on the current queue >+ * @gl: The glock >+ * >+ * Returns: true if there is a blocked holder at the head of the list >+ */ >+ >+static int do_promote(struct gfs2_glock *gl) >+{ >+ const struct gfs2_glock_operations *glops = gl->gl_ops; >+ struct gfs2_holder *gh, *tmp; >+ int ret; >+ >+restart: >+ list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) { >+ if (test_bit(HIF_HOLDER, &gh->gh_iflags)) >+ continue; >+ if (may_grant(gl, gh)) { >+ if (gh->gh_list.prev == &gl->gl_holders && >+ glops->go_lock) { >+ spin_unlock(&gl->gl_spin); >+ /* FIXME: eliminate this eventually */ >+ ret = glops->go_lock(gh); >+ spin_lock(&gl->gl_spin); >+ if (ret) { >+ gh->gh_error = ret; >+ list_del_init(&gh->gh_list); >+ gfs2_holder_wake(gh); >+ goto restart; >+ } >+ set_bit(HIF_HOLDER, &gh->gh_iflags); >+ gfs2_holder_wake(gh); >+ goto restart; >+ } >+ set_bit(HIF_HOLDER, &gh->gh_iflags); >+ gfs2_holder_wake(gh); >+ continue; >+ } >+ if (gh->gh_list.prev == &gl->gl_holders) >+ return 1; >+ break; >+ } >+ return 0; >+} >+ >+/** >+ * do_error - Something unexpected has happened during a lock request >+ * >+ */ >+ >+static inline void do_error(struct gfs2_glock *gl, const int ret) >+{ >+ struct gfs2_holder *gh, *tmp; >+ >+ list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) { >+ if (test_bit(HIF_HOLDER, &gh->gh_iflags)) >+ continue; >+ if (ret & LM_OUT_ERROR) >+ gh->gh_error = -EIO; >+ else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) { >+ set_bit(GLF_TRYFAILED, &gl->gl_flags); >+ gh->gh_error = GLR_TRYFAILED; >+ } else >+ continue; >+ list_del_init(&gh->gh_list); >+ gfs2_holder_wake(gh); >+ } >+} >+ >+/** >+ * find_first_waiter - find the first gh that's waiting for the glock >+ * @gl: the glock >+ */ >+ >+static inline struct gfs2_holder *find_first_waiter(const struct gfs2_glock *gl) >+{ >+ struct gfs2_holder *gh; >+ >+ list_for_each_entry(gh, &gl->gl_holders, gh_list) { >+ if (!test_bit(HIF_HOLDER, &gh->gh_iflags)) >+ return gh; >+ } >+ return NULL; >+} >+ >+/** >+ * state_change - record that the glock is now in a different state >+ * @gl: the glock >+ * @new_state the new state >+ * >+ */ >+ >+static void state_change(struct gfs2_glock *gl, unsigned int new_state) >+{ >+ int held1, held2; >+ >+ held1 = (gl->gl_state != LM_ST_UNLOCKED); >+ held2 = (new_state != LM_ST_UNLOCKED); >+ >+ if (held1 != held2) { >+ if (held2) >+ gfs2_glock_hold(gl); >+ else >+ gfs2_glock_put(gl); >+ } >+ >+ gl->gl_state = new_state; >+ gl->gl_tchange = jiffies; >+} >+ >+static void gfs2_demote_wake(struct gfs2_glock *gl) >+{ >+ gl->gl_prevdemote_state = gl->gl_demote_state; >+ gl->gl_lastdemote_place = "gfs2_demote_wake"; >+ gl->gl_demote_state = LM_ST_EXCLUSIVE; >+ clear_bit(GLF_DEMOTE, &gl->gl_flags); >+ smp_mb__after_clear_bit(); >+ wake_up_bit(&gl->gl_flags, GLF_DEMOTE); >+} >+ >+/** >+ * finish_xmote - The DLM has replied to one of our lock requests >+ * @gl: The glock >+ * @ret: The status from the DLM >+ * >+ */ >+ >+static void finish_xmote(struct gfs2_glock *gl, unsigned int ret) >+{ >+ const struct gfs2_glock_operations *glops = gl->gl_ops; >+ struct gfs2_holder *gh; >+ unsigned state = ret & LM_OUT_ST_MASK; >+ >+ spin_lock(&gl->gl_spin); >+ state_change(gl, state); >+ gh = find_first_waiter(gl); >+ >+ /* Demote to UN request arrived during demote to SH or DF */ >+ if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) && >+ state != LM_ST_UNLOCKED && gl->gl_demote_state == LM_ST_UNLOCKED) >+ gl->gl_target = LM_ST_UNLOCKED; >+ >+ /* Check for state != intended state */ >+ if (unlikely(state != gl->gl_target)) { >+ if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) { >+ /* move to back of queue and try next entry */ >+ if (ret & LM_OUT_CANCELED) { >+ if ((gh->gh_flags & LM_FLAG_PRIORITY) == 0) >+ list_move_tail(&gh->gh_list, &gl->gl_holders); >+ gh = find_first_waiter(gl); >+ gl->gl_target = gh->gh_state; >+ goto retry; >+ } >+ /* Some error or failed "try lock" - report it */ >+ if ((ret & LM_OUT_ERROR) || >+ (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) { >+ gl->gl_target = gl->gl_state; >+ do_error(gl, ret); >+ goto out; >+ } >+ } >+ switch(state) { >+ /* Unlocked due to conversion deadlock, try again */ >+ case LM_ST_UNLOCKED: >+retry: >+ do_xmote(gl, gh, gl->gl_target); >+ break; >+ /* Conversion fails, unlock and try again */ >+ case LM_ST_SHARED: >+ case LM_ST_DEFERRED: >+ do_xmote(gl, gh, LM_ST_UNLOCKED); >+ break; >+ default: /* Everything else */ >+ printk(KERN_ERR "GFS2: wanted %u got %u\n", gl->gl_target, state); >+ GLOCK_BUG_ON(gl, 1); >+ } >+ spin_unlock(&gl->gl_spin); >+ gfs2_glock_put(gl); >+ return; >+ } >+ >+ /* Fast path - we got what we asked for */ >+ if (test_and_clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) { >+ set_bit(GLF_DEMOTE_FROM_FINISH_XMOTE, &gl->gl_flags); >+ gfs2_demote_wake(gl); >+ } >+ if (state != LM_ST_UNLOCKED) { >+ if (glops->go_xmote_bh) { >+ int rv; >+ spin_unlock(&gl->gl_spin); >+ rv = glops->go_xmote_bh(gl, gh); >+ if (rv == -EAGAIN) >+ return; >+ spin_lock(&gl->gl_spin); >+ if (rv) { >+ do_error(gl, rv); >+ goto out; >+ } >+ } >+ do_promote(gl); >+ } >+out: >+ clear_bit(GLF_LOCK, &gl->gl_flags); >+ spin_unlock(&gl->gl_spin); >+ gfs2_glock_put(gl); >+} >+ >+static unsigned int gfs2_lm_lock(struct gfs2_sbd *sdp, void *lock, >+ unsigned int cur_state, unsigned int req_state, >+ unsigned int flags) >+{ >+ int ret = LM_OUT_ERROR; >+ if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) >+ ret = sdp->sd_lockstruct.ls_ops->lm_lock(lock, cur_state, >+ req_state, flags); >+ return ret; >+} >+ >+/** >+ * do_xmote - Calls the DLM to change the state of a lock >+ * @gl: The lock state >+ * @gh: The holder (only for promotes) >+ * @target: The target lock state >+ * >+ */ >+ >+static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target) >+{ >+ const struct gfs2_glock_operations *glops = gl->gl_ops; >+ struct gfs2_sbd *sdp = gl->gl_sbd; >+ unsigned int lck_flags = gh ? gh->gh_flags : 0; >+ int ret; >+ >+ lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP | >+ LM_FLAG_PRIORITY); >+ BUG_ON(gl->gl_state == target); >+ BUG_ON(gl->gl_state == gl->gl_target); >+ if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) && >+ glops->go_inval) { >+ set_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags); >+ do_error(gl, 0); /* Fail queued try locks */ >+ } >+ spin_unlock(&gl->gl_spin); >+ if (glops->go_xmote_th) >+ glops->go_xmote_th(gl); >+ if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags)) >+ glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA); >+ clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags); >+ >+ gfs2_glock_hold(gl); >+ if (target != LM_ST_UNLOCKED && (gl->gl_state == LM_ST_SHARED || >+ gl->gl_state == LM_ST_DEFERRED) && >+ !(lck_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) >+ lck_flags |= LM_FLAG_TRY_1CB; >+ ret = gfs2_lm_lock(sdp, gl->gl_lock, gl->gl_state, target, lck_flags); >+ >+ if (!(ret & LM_OUT_ASYNC)) { >+ finish_xmote(gl, ret); >+ gfs2_glock_hold(gl); >+ if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) >+ gfs2_glock_put(gl); >+ } else { >+ GLOCK_BUG_ON(gl, ret != LM_OUT_ASYNC); >+ } >+ spin_lock(&gl->gl_spin); >+} >+ >+/** >+ * find_first_holder - find the first "holder" gh >+ * @gl: the glock >+ */ >+ >+static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl) >+{ >+ struct gfs2_holder *gh; >+ >+ if (!list_empty(&gl->gl_holders)) { >+ gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list); >+ if (test_bit(HIF_HOLDER, &gh->gh_iflags)) >+ return gh; >+ } >+ return NULL; >+} >+ >+/** >+ * run_queue - do all outstanding tasks related to a glock >+ * @gl: The glock in question >+ * @nonblock: True if we must not block in run_queue >+ * >+ */ >+ >+static void run_queue(struct gfs2_glock *gl, const int nonblock) >+{ >+ struct gfs2_holder *gh = NULL; >+ >+ if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) >+ return; >+ >+ GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)); >+ >+ if (test_bit(GLF_DEMOTE, &gl->gl_flags) && >+ gl->gl_demote_state != gl->gl_state) { >+ if (find_first_holder(gl)) >+ goto out; >+ if (nonblock) >+ goto out_sched; >+ set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags); >+ gl->gl_target = gl->gl_demote_state; >+ } else { >+ if (test_bit(GLF_DEMOTE, &gl->gl_flags)) { >+ clear_bit(GLF_DEMOTE_FROM_FINISH_XMOTE, &gl->gl_flags); >+ gfs2_demote_wake(gl); >+ } >+ if (do_promote(gl) == 0) >+ goto out; >+ gh = find_first_waiter(gl); >+ gl->gl_target = gh->gh_state; >+ if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) >+ do_error(gl, 0); /* Fail queued try locks */ >+ } >+ do_xmote(gl, gh, gl->gl_target); >+ return; >+ >+out_sched: >+ gfs2_glock_hold(gl); >+ if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) >+ gfs2_glock_put(gl); >+out: >+ clear_bit(GLF_LOCK, &gl->gl_flags); >+} >+ > static void glock_work_func(void *data) > { >+ unsigned long delay = 0; > struct gfs2_glock *gl = (struct gfs2_glock *)data; > >+ if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) >+ finish_xmote(gl, gl->gl_reply); > spin_lock(&gl->gl_spin); >- if (test_and_clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags)) >- set_bit(GLF_DEMOTE, &gl->gl_flags); >- run_queue(gl); >+ if (test_and_clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags)) { >+ unsigned long holdtime, now = jiffies; >+ holdtime = gl->gl_tchange + gl->gl_ops->go_min_hold_time; >+ if (time_before(now, holdtime)) >+ delay = holdtime - now; >+ set_bit(delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE, &gl->gl_flags); >+ } >+ run_queue(gl, 0); > spin_unlock(&gl->gl_spin); >- gfs2_glock_put(gl); >+ if (!delay || >+ queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0) >+ gfs2_glock_put(gl); > } > > /** >@@ -332,12 +705,12 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, > gl->gl_name = name; > atomic_set(&gl->gl_ref, 1); > gl->gl_state = LM_ST_UNLOCKED; >+ gl->gl_target = LM_ST_UNLOCKED; > gl->gl_demote_state = LM_ST_EXCLUSIVE; >+ gl->gl_prevdemote_state = LM_ST_EXCLUSIVE; >+ gl->gl_lastdemote_place = "gfs2_glock_get"; > gl->gl_hash = hash; >- gl->gl_owner_pid = 0; >- gl->gl_ip = 0; > gl->gl_ops = glops; >- gl->gl_req_gh = NULL; > gl->gl_stamp = jiffies; > gl->gl_tchange = jiffies; > gl->gl_object = NULL; >@@ -430,646 +803,88 @@ void gfs2_holder_reinit(unsigned int sta > * > */ > >-void gfs2_holder_uninit(struct gfs2_holder *gh) >-{ >- gfs2_glock_put(gh->gh_gl); >- gh->gh_gl = NULL; >- gh->gh_ip = 0; >-} >- >-static void gfs2_holder_wake(struct gfs2_holder *gh) >-{ >- clear_bit(HIF_WAIT, &gh->gh_iflags); >- smp_mb__after_clear_bit(); >- wake_up_bit(&gh->gh_iflags, HIF_WAIT); >-} >- >-static int just_schedule(void *word) >-{ >- schedule(); >- return 0; >-} >- >-static void wait_on_holder(struct gfs2_holder *gh) >-{ >- might_sleep(); >- wait_on_bit(&gh->gh_iflags, HIF_WAIT, just_schedule, TASK_UNINTERRUPTIBLE); >-} >- >-static void gfs2_demote_wake(struct gfs2_glock *gl) >-{ >- BUG_ON(!spin_is_locked(&gl->gl_spin)); >- gl->gl_demote_state = LM_ST_EXCLUSIVE; >- clear_bit(GLF_DEMOTE, &gl->gl_flags); >- smp_mb__after_clear_bit(); >- wake_up_bit(&gl->gl_flags, GLF_DEMOTE); >-} >- >-static void wait_on_demote(struct gfs2_glock *gl) >-{ >- might_sleep(); >- wait_on_bit(&gl->gl_flags, GLF_DEMOTE, just_schedule, TASK_UNINTERRUPTIBLE); >-} >- >-/** >- * rq_mutex - process a mutex request in the queue >- * @gh: the glock holder >- * >- * Returns: 1 if the queue is blocked >- */ >- >-static int rq_mutex(struct gfs2_holder *gh) >-{ >- struct gfs2_glock *gl = gh->gh_gl; >- >- list_del_init(&gh->gh_list); >- /* gh->gh_error never examined. */ >- set_bit(GLF_LOCK, &gl->gl_flags); >- clear_bit(HIF_WAIT, &gh->gh_iflags); >- smp_mb(); >- wake_up_bit(&gh->gh_iflags, HIF_WAIT); >- >- return 1; >-} >- >-/** >- * rq_promote - process a promote request in the queue >- * @gh: the glock holder >- * >- * Acquire a new inter-node lock, or change a lock state to more restrictive. >- * >- * Returns: 1 if the queue is blocked >- */ >- >-static int rq_promote(struct gfs2_holder *gh) >-{ >- struct gfs2_glock *gl = gh->gh_gl; >- struct gfs2_sbd *sdp = gl->gl_sbd; >- >- if (!relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) { >- if (list_empty(&gl->gl_holders)) { >- gl->gl_req_gh = gh; >- set_bit(GLF_LOCK, &gl->gl_flags); >- spin_unlock(&gl->gl_spin); >- >- if (atomic_read(&sdp->sd_reclaim_count) > >- gfs2_tune_get(sdp, gt_reclaim_limit) && >- !(gh->gh_flags & LM_FLAG_PRIORITY)) { >- gfs2_reclaim_glock(sdp); >- gfs2_reclaim_glock(sdp); >- } >- >- gfs2_glock_xmote_th(gh->gh_gl, gh); >- spin_lock(&gl->gl_spin); >- } >- return 1; >- } >- >- if (list_empty(&gl->gl_holders)) { >- set_bit(HIF_FIRST, &gh->gh_iflags); >- set_bit(GLF_LOCK, &gl->gl_flags); >- } else { >- struct gfs2_holder *next_gh; >- if (gh->gh_state == LM_ST_EXCLUSIVE) >- return 1; >- next_gh = list_entry(gl->gl_holders.next, struct gfs2_holder, >- gh_list); >- if (next_gh->gh_state == LM_ST_EXCLUSIVE) >- return 1; >- } >- >- list_move_tail(&gh->gh_list, &gl->gl_holders); >- gh->gh_error = 0; >- set_bit(HIF_HOLDER, &gh->gh_iflags); >- >- gfs2_holder_wake(gh); >- >- return 0; >-} >- >-/** >- * rq_demote - process a demote request in the queue >- * @gh: the glock holder >- * >- * Returns: 1 if the queue is blocked >- */ >- >-static int rq_demote(struct gfs2_glock *gl) >-{ >- if (!list_empty(&gl->gl_holders)) >- return 1; >- >- if (gl->gl_state == gl->gl_demote_state || >- gl->gl_state == LM_ST_UNLOCKED) { >- gfs2_demote_wake(gl); >- return 0; >- } >- >- set_bit(GLF_LOCK, &gl->gl_flags); >- set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags); >- >- if (gl->gl_demote_state == LM_ST_UNLOCKED || >- gl->gl_state != LM_ST_EXCLUSIVE) { >- spin_unlock(&gl->gl_spin); >- gfs2_glock_drop_th(gl); >- } else { >- spin_unlock(&gl->gl_spin); >- gfs2_glock_xmote_th(gl, NULL); >- } >- >- spin_lock(&gl->gl_spin); >- clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags); >- >- return 0; >-} >- >-/** >- * run_queue - process holder structures on a glock >- * @gl: the glock >- * >- */ >-static void run_queue(struct gfs2_glock *gl) >-{ >- struct gfs2_holder *gh; >- int blocked = 1; >- >- for (;;) { >- if (test_bit(GLF_LOCK, &gl->gl_flags)) >- break; >- >- if (!list_empty(&gl->gl_waiters1)) { >- gh = list_entry(gl->gl_waiters1.next, >- struct gfs2_holder, gh_list); >- >- blocked = rq_mutex(gh); >- } else if (test_bit(GLF_DEMOTE, &gl->gl_flags)) { >- blocked = rq_demote(gl); >- if (test_bit(GLF_WAITERS2, &gl->gl_flags) && >- !blocked) { >- set_bit(GLF_DEMOTE, &gl->gl_flags); >- gl->gl_demote_state = LM_ST_UNLOCKED; >- } >- clear_bit(GLF_WAITERS2, &gl->gl_flags); >- } else if (!list_empty(&gl->gl_waiters3)) { >- gh = list_entry(gl->gl_waiters3.next, >- struct gfs2_holder, gh_list); >- >- if (test_bit(HIF_PROMOTE, &gh->gh_iflags)) >- blocked = rq_promote(gh); >- else >- gfs2_assert_warn(gl->gl_sbd, 0); >- >- } else >- break; >- >- if (blocked) >- break; >- } >-} >- >-/** >- * gfs2_glmutex_lock - acquire a local lock on a glock >- * @gl: the glock >- * >- * Gives caller exclusive access to manipulate a glock structure. >- */ >- >-static void gfs2_glmutex_lock(struct gfs2_glock *gl) >-{ >- spin_lock(&gl->gl_spin); >- if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { >- struct gfs2_holder gh; >- >- gfs2_holder_init(gl, 0, 0, &gh); >- set_bit(HIF_WAIT, &gh.gh_iflags); >- list_add_tail(&gh.gh_list, &gl->gl_waiters1); >- spin_unlock(&gl->gl_spin); >- wait_on_holder(&gh); >- gfs2_holder_uninit(&gh); >- } else { >- gl->gl_owner_pid = current->pid; >- gl->gl_ip = (unsigned long)__builtin_return_address(0); >- spin_unlock(&gl->gl_spin); >- } >-} >- >-/** >- * gfs2_glmutex_trylock - try to acquire a local lock on a glock >- * @gl: the glock >- * >- * Returns: 1 if the glock is acquired >- */ >- >-static int gfs2_glmutex_trylock(struct gfs2_glock *gl) >-{ >- int acquired = 1; >- >- spin_lock(&gl->gl_spin); >- if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { >- acquired = 0; >- } else { >- gl->gl_owner_pid = current->pid; >- gl->gl_ip = (unsigned long)__builtin_return_address(0); >- } >- spin_unlock(&gl->gl_spin); >- >- return acquired; >-} >- >-/** >- * gfs2_glmutex_unlock - release a local lock on a glock >- * @gl: the glock >- * >- */ >- >-static void gfs2_glmutex_unlock(struct gfs2_glock *gl) >-{ >- spin_lock(&gl->gl_spin); >- clear_bit(GLF_LOCK, &gl->gl_flags); >- gl->gl_owner_pid = 0; >- gl->gl_ip = 0; >- run_queue(gl); >- BUG_ON(!spin_is_locked(&gl->gl_spin)); >- spin_unlock(&gl->gl_spin); >-} >- >-/** >- * handle_callback - process a demote request >- * @gl: the glock >- * @state: the state the caller wants us to change to >- * >- * There are only two requests that we are going to see in actual >- * practise: LM_ST_SHARED and LM_ST_UNLOCKED >- */ >- >-static void handle_callback(struct gfs2_glock *gl, unsigned int state, >- int remote, unsigned long delay) >-{ >- int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE; >- >- spin_lock(&gl->gl_spin); >- set_bit(bit, &gl->gl_flags); >- if (gl->gl_demote_state == LM_ST_EXCLUSIVE) { >- gl->gl_demote_state = state; >- gl->gl_demote_time = jiffies; >- if (remote && gl->gl_ops->go_type == LM_TYPE_IOPEN && >- gl->gl_object) { >- gfs2_glock_schedule_for_reclaim(gl); >- spin_unlock(&gl->gl_spin); >- return; >- } >- } else if (gl->gl_demote_state != LM_ST_UNLOCKED && >- gl->gl_demote_state != state) { >- if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) >- set_bit(GLF_WAITERS2, &gl->gl_flags); >- else >- gl->gl_demote_state = LM_ST_UNLOCKED; >- } >- spin_unlock(&gl->gl_spin); >-} >- >-/** >- * state_change - record that the glock is now in a different state >- * @gl: the glock >- * @new_state the new state >- * >- */ >- >-static void state_change(struct gfs2_glock *gl, unsigned int new_state) >-{ >- int held1, held2; >- >- held1 = (gl->gl_state != LM_ST_UNLOCKED); >- held2 = (new_state != LM_ST_UNLOCKED); >- >- if (held1 != held2) { >- if (held2) >- gfs2_glock_hold(gl); >- else >- gfs2_glock_put(gl); >- } >- >- gl->gl_state = new_state; >- gl->gl_tchange = jiffies; >-} >- >-/** >- * drop_bh - Called after a lock module unlock completes >- * @gl: the glock >- * @ret: the return status >- * >- * Doesn't wake up the process waiting on the struct gfs2_holder (if any) >- * Doesn't drop the reference on the glock the top half took out >- * >- */ >- >-static void drop_bh(struct gfs2_glock *gl, unsigned int ret) >-{ >- struct gfs2_sbd *sdp = gl->gl_sbd; >- const struct gfs2_glock_operations *glops = gl->gl_ops; >- >- gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags)); >- gfs2_assert_warn(sdp, list_empty(&gl->gl_holders)); >- gfs2_assert_warn(sdp, !ret); >- >- state_change(gl, LM_ST_UNLOCKED); >- >- if (glops->go_inval) >- glops->go_inval(gl, DIO_METADATA); >- >- spin_lock(&gl->gl_spin); >- gfs2_demote_wake(gl); >- clear_bit(GLF_LOCK, &gl->gl_flags); >- spin_unlock(&gl->gl_spin); >- gfs2_glock_put(gl); >-} >- >-/** >- * xmote_bh - Called after the lock module is done acquiring a lock >- * @gl: The glock in question >- * @ret: the int returned from the lock module >- * >- */ >- >-static void xmote_bh(struct gfs2_glock *gl, unsigned int ret) >-{ >- struct gfs2_sbd *sdp = gl->gl_sbd; >- const struct gfs2_glock_operations *glops = gl->gl_ops; >- struct gfs2_holder *gh = gl->gl_req_gh; >- int prev_state = gl->gl_state; >- int op_done = 1; >- >- if (!gh && (ret & LM_OUT_ST_MASK) == LM_ST_UNLOCKED) { >- drop_bh(gl, ret); >- return; >- } >- >- gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags)); >- gfs2_assert_warn(sdp, list_empty(&gl->gl_holders)); >- gfs2_assert_warn(sdp, !(ret & LM_OUT_ASYNC)); >- >- state_change(gl, ret & LM_OUT_ST_MASK); >- >- if (prev_state != LM_ST_UNLOCKED && !(ret & LM_OUT_CACHEABLE)) { >- if (glops->go_inval) >- glops->go_inval(gl, DIO_METADATA); >- } else if (gl->gl_state == LM_ST_DEFERRED) { >- /* We might not want to do this here. >- Look at moving to the inode glops. */ >- if (glops->go_inval) >- glops->go_inval(gl, 0); >- } >- >- /* Deal with each possible exit condition */ >- >- if (!gh) { >- gl->gl_stamp = jiffies; >- if (ret & LM_OUT_CANCELED) { >- op_done = 0; >- } else { >- spin_lock(&gl->gl_spin); >- if (gl->gl_state != gl->gl_demote_state) { >- spin_unlock(&gl->gl_spin); >- gfs2_glock_drop_th(gl); >- gfs2_glock_put(gl); >- return; >- } >- gfs2_demote_wake(gl); >- spin_unlock(&gl->gl_spin); >- } >- } else { >- spin_lock(&gl->gl_spin); >- list_del_init(&gh->gh_list); >- gh->gh_error = -EIO; >- if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) >- goto out; >- gh->gh_error = GLR_CANCELED; >- if (ret & LM_OUT_CANCELED) >- goto out; >- if (relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) { >- list_add_tail(&gh->gh_list, &gl->gl_holders); >- gh->gh_error = 0; >- set_bit(HIF_HOLDER, &gh->gh_iflags); >- set_bit(HIF_FIRST, &gh->gh_iflags); >- op_done = 0; >- goto out; >- } >- gh->gh_error = GLR_TRYFAILED; >- if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) >- goto out; >- gh->gh_error = -EINVAL; >- if (gfs2_assert_withdraw(sdp, 0) == -1) >- fs_err(sdp, "ret = 0x%.8X\n", ret); >-out: >- spin_unlock(&gl->gl_spin); >- } >- >- if (glops->go_xmote_bh) >- glops->go_xmote_bh(gl); >- >- if (op_done) { >- spin_lock(&gl->gl_spin); >- gl->gl_req_gh = NULL; >- clear_bit(GLF_LOCK, &gl->gl_flags); >- spin_unlock(&gl->gl_spin); >- } >- >- gfs2_glock_put(gl); >- >- if (gh) >- gfs2_holder_wake(gh); >-} >- >-/** >- * gfs2_glock_xmote_th - Call into the lock module to acquire or change a glock >- * @gl: The glock in question >- * @state: the requested state >- * @flags: modifier flags to the lock call >- * >- */ >- >-static void gfs2_glock_xmote_th(struct gfs2_glock *gl, struct gfs2_holder *gh) >-{ >- struct gfs2_sbd *sdp = gl->gl_sbd; >- int flags = gh ? gh->gh_flags : 0; >- unsigned state = gh ? gh->gh_state : gl->gl_demote_state; >- const struct gfs2_glock_operations *glops = gl->gl_ops; >- int lck_flags = flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB | >- LM_FLAG_NOEXP | LM_FLAG_ANY | >- LM_FLAG_PRIORITY); >- unsigned int lck_ret; >- >- if (glops->go_xmote_th) >- glops->go_xmote_th(gl); >- >- gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags)); >- gfs2_assert_warn(sdp, list_empty(&gl->gl_holders)); >- gfs2_assert_warn(sdp, state != LM_ST_UNLOCKED); >- gfs2_assert_warn(sdp, state != gl->gl_state); >- >- gfs2_glock_hold(gl); >- >- lck_ret = gfs2_lm_lock(sdp, gl->gl_lock, gl->gl_state, state, lck_flags); >- >- if (gfs2_assert_withdraw(sdp, !(lck_ret & LM_OUT_ERROR))) >- return; >- >- if (lck_ret & LM_OUT_ASYNC) >- gfs2_assert_warn(sdp, lck_ret == LM_OUT_ASYNC); >- else >- xmote_bh(gl, lck_ret); >-} >- >-/** >- * gfs2_glock_drop_th - call into the lock module to unlock a lock >- * @gl: the glock >- * >- */ >- >-static void gfs2_glock_drop_th(struct gfs2_glock *gl) >-{ >- struct gfs2_sbd *sdp = gl->gl_sbd; >- const struct gfs2_glock_operations *glops = gl->gl_ops; >- unsigned int ret; >- >- if (glops->go_drop_th) >- glops->go_drop_th(gl); >- >- gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags)); >- gfs2_assert_warn(sdp, list_empty(&gl->gl_holders)); >- gfs2_assert_warn(sdp, gl->gl_state != LM_ST_UNLOCKED); >- >- gfs2_glock_hold(gl); >+void gfs2_holder_uninit(struct gfs2_holder *gh) >+{ >+ gfs2_glock_put(gh->gh_gl); >+ gh->gh_gl = NULL; >+ gh->gh_ip = 0; >+} > >- ret = gfs2_lm_unlock(sdp, gl->gl_lock, gl->gl_state); >+static int just_schedule(void *word) >+{ >+ schedule(); >+ return 0; >+} > >- if (gfs2_assert_withdraw(sdp, !(ret & LM_OUT_ERROR))) >- return; >+static void wait_on_holder(struct gfs2_holder *gh) >+{ >+ might_sleep(); >+ wait_on_bit(&gh->gh_iflags, HIF_WAIT, just_schedule, TASK_UNINTERRUPTIBLE); >+} > >- if (!ret) >- drop_bh(gl, ret); >- else >- gfs2_assert_warn(sdp, ret == LM_OUT_ASYNC); >+static void wait_on_demote(struct gfs2_glock *gl) >+{ >+ might_sleep(); >+ wait_on_bit(&gl->gl_flags, GLF_DEMOTE, just_schedule, TASK_UNINTERRUPTIBLE); > } > > /** >- * do_cancels - cancel requests for locks stuck waiting on an expire flag >- * @gh: the LM_FLAG_PRIORITY holder waiting to acquire the lock >+ * handle_callback - process a demote request >+ * @gl: the glock >+ * @state: the state the caller wants us to change to > * >- * Don't cancel GL_NOCANCEL requests. >+ * There are only two requests that we are going to see in actual >+ * practise: LM_ST_SHARED and LM_ST_UNLOCKED > */ > >-static void do_cancels(struct gfs2_holder *gh) >+static void handle_callback(struct gfs2_glock *gl, unsigned int state, >+ int remote, unsigned long delay) > { >- struct gfs2_glock *gl = gh->gh_gl; >- >- spin_lock(&gl->gl_spin); >+ int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE; > >- while (gl->gl_req_gh != gh && >- !test_bit(HIF_HOLDER, &gh->gh_iflags) && >- !list_empty(&gh->gh_list)) { >- if (!(gl->gl_req_gh && (gl->gl_req_gh->gh_flags & GL_NOCANCEL))) { >- spin_unlock(&gl->gl_spin); >- gfs2_lm_cancel(gl->gl_sbd, gl->gl_lock); >- msleep(100); >- spin_lock(&gl->gl_spin); >- } else { >- spin_unlock(&gl->gl_spin); >- msleep(100); >- spin_lock(&gl->gl_spin); >- } >+ set_bit(bit, &gl->gl_flags); >+ if (gl->gl_demote_state == LM_ST_EXCLUSIVE) { >+ gl->gl_prevdemote_state = gl->gl_demote_state; >+ gl->gl_lastdemote_place = "handle_callback EX"; >+ gl->gl_demote_state = state; >+ gl->gl_demote_time = jiffies; >+ if (remote && gl->gl_ops->go_type == LM_TYPE_IOPEN && >+ gl->gl_object) >+ gfs2_glock_schedule_for_reclaim(gl); >+ } else if (gl->gl_demote_state != LM_ST_UNLOCKED && >+ gl->gl_demote_state != state) { >+ gl->gl_prevdemote_state = gl->gl_demote_state; >+ gl->gl_lastdemote_place = "handle_callback UN"; >+ gl->gl_demote_state = LM_ST_UNLOCKED; > } >- >- spin_unlock(&gl->gl_spin); > } > > /** >- * glock_wait_internal - wait on a glock acquisition >+ * gfs2_glock_wait - wait on a glock acquisition > * @gh: the glock holder > * > * Returns: 0 on success > */ > >-static int glock_wait_internal(struct gfs2_holder *gh) >+int gfs2_glock_wait(struct gfs2_holder *gh) > { >- struct gfs2_glock *gl = gh->gh_gl; >- struct gfs2_sbd *sdp = gl->gl_sbd; >- const struct gfs2_glock_operations *glops = gl->gl_ops; >- >- if (test_bit(HIF_ABORTED, &gh->gh_iflags)) >- return -EIO; >- >- if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) { >- spin_lock(&gl->gl_spin); >- if (gl->gl_req_gh != gh && >- !test_bit(HIF_HOLDER, &gh->gh_iflags) && >- !list_empty(&gh->gh_list)) { >- list_del_init(&gh->gh_list); >- gh->gh_error = GLR_TRYFAILED; >- run_queue(gl); >- spin_unlock(&gl->gl_spin); >- return gh->gh_error; >- } >- spin_unlock(&gl->gl_spin); >- } >- >- if (gh->gh_flags & LM_FLAG_PRIORITY) >- do_cancels(gh); >- > wait_on_holder(gh); >- if (gh->gh_error) >- return gh->gh_error; >- >- gfs2_assert_withdraw(sdp, test_bit(HIF_HOLDER, &gh->gh_iflags)); >- gfs2_assert_withdraw(sdp, relaxed_state_ok(gl->gl_state, gh->gh_state, >- gh->gh_flags)); >- >- if (test_bit(HIF_FIRST, &gh->gh_iflags)) { >- gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags)); >- >- if (glops->go_lock) { >- gh->gh_error = glops->go_lock(gh); >- if (gh->gh_error) { >- spin_lock(&gl->gl_spin); >- list_del_init(&gh->gh_list); >- spin_unlock(&gl->gl_spin); >- } >- } >- >- spin_lock(&gl->gl_spin); >- gl->gl_req_gh = NULL; >- clear_bit(GLF_LOCK, &gl->gl_flags); >- run_queue(gl); >- spin_unlock(&gl->gl_spin); >- } >- > return gh->gh_error; > } > >-static inline struct gfs2_holder * >-find_holder_by_owner(struct list_head *head, pid_t pid) >-{ >- struct gfs2_holder *gh; >- >- list_for_each_entry(gh, head, gh_list) { >- if (gh->gh_owner_pid == pid) >- return gh; >- } >- >- return NULL; >-} >- >-static void print_dbg(struct glock_iter *gi, const char *fmt, ...) >+void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...) > { > va_list args; > > va_start(args, fmt); >- if (gi) { >+ if (seq) { >+ struct gfs2_glock_iter *gi = seq->private; > vsprintf(gi->string, fmt, args); >- seq_printf(gi->seq, gi->string); >- } >- else >+ seq_printf(seq, gi->string); >+ } else { >+ printk(KERN_ERR " "); > vprintk(fmt, args); >+ } > va_end(args); > } > >@@ -1077,48 +892,80 @@ static void print_dbg(struct glock_iter > * add_to_queue - Add a holder to the wait queue (but look for recursion) > * @gh: the holder structure to add > * >+ * Eventually we should move the recursive locking trap to a >+ * debugging option or something like that. This is the fast >+ * path and needs to have the minimum number of distractions. >+ * > */ > >-static void add_to_queue(struct gfs2_holder *gh) >+static inline void add_to_queue(struct gfs2_holder *gh) > { > struct gfs2_glock *gl = gh->gh_gl; >- struct gfs2_holder *existing; >+ struct gfs2_sbd *sdp = gl->gl_sbd; >+ struct list_head *insert_pt = NULL; >+ struct gfs2_holder *gh2; >+ int try_lock = 0; > >- BUG_ON(!gh->gh_owner_pid); >+ BUG_ON(gh->gh_owner_pid == 0); > if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags)) >- BUG(); >+ GLOCK_BUG_ON(gl, 1); > >- if (!(gh->gh_flags & GL_FLOCK)) { >- existing = find_holder_by_owner(&gl->gl_holders, >- gh->gh_owner_pid); >- if (existing) { >- print_symbol(KERN_WARNING "original: %s\n", >- existing->gh_ip); >- printk(KERN_INFO "pid : %d\n", existing->gh_owner_pid); >- printk(KERN_INFO "lock type : %d lock state : %d\n", >- existing->gh_gl->gl_name.ln_type, >- existing->gh_gl->gl_state); >- print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip); >- printk(KERN_INFO "pid : %d\n", gh->gh_owner_pid); >- printk(KERN_INFO "lock type : %d lock state : %d\n", >- gl->gl_name.ln_type, gl->gl_state); >- BUG(); >- } >- >- existing = find_holder_by_owner(&gl->gl_waiters3, >- gh->gh_owner_pid); >- if (existing) { >- print_symbol(KERN_WARNING "original: %s\n", >- existing->gh_ip); >- print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip); >- BUG(); >+ if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) { >+ if (test_bit(GLF_LOCK, &gl->gl_flags)) >+ try_lock = 1; >+ if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags)) >+ goto fail; >+ } >+ >+ list_for_each_entry(gh2, &gl->gl_holders, gh_list) { >+ if (unlikely(gh2->gh_owner_pid == gh->gh_owner_pid && >+ (gh->gh_gl->gl_ops->go_type != LM_TYPE_FLOCK))) >+ goto trap_recursive; >+ if (try_lock && >+ !(gh2->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) && >+ !may_grant(gl, gh)) { >+fail: >+ gh->gh_error = GLR_TRYFAILED; >+ set_bit(GLF_TRYFAILED, &gl->gl_flags); >+ gfs2_holder_wake(gh); >+ return; > } >+ if (test_bit(HIF_HOLDER, &gh2->gh_iflags)) >+ continue; >+ if (unlikely(!insert_pt && >+ (gh->gh_flags & LM_FLAG_PRIORITY || >+ (try_lock && >+ test_and_clear_bit(GLF_TRYFAILED, >+ &gl->gl_flags))))) >+ insert_pt = &gh2->gh_list; >+ } >+ if (likely(insert_pt == NULL)) { >+ list_add_tail(&gh->gh_list, &gl->gl_holders); >+ if (likely(!(gh->gh_flags & LM_FLAG_PRIORITY))) >+ return; >+ } else { >+ list_add_tail(&gh->gh_list, insert_pt); >+ } >+ >+ gh2 = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list); >+ if (gh != gh2) { >+ spin_unlock(&gl->gl_spin); >+ sdp->sd_lockstruct.ls_ops->lm_cancel(gl->gl_lock); >+ spin_lock(&gl->gl_spin); > } >+ return; > >- if (gh->gh_flags & LM_FLAG_PRIORITY) >- list_add(&gh->gh_list, &gl->gl_waiters3); >- else >- list_add_tail(&gh->gh_list, &gl->gl_waiters3); >+trap_recursive: >+ print_symbol(KERN_ERR "original: %s\n", gh2->gh_ip); >+ printk(KERN_ERR "pid : %d\n", gh2->gh_owner_pid); >+ printk(KERN_ERR "lock type: %d req lock state : %d\n", >+ gh2->gh_gl->gl_name.ln_type, gh2->gh_state); >+ print_symbol(KERN_ERR "new: %s\n", gh->gh_ip); >+ printk(KERN_ERR "pid: %d\n", gh->gh_owner_pid); >+ printk(KERN_ERR "lock type: %d req lock state : %d\n", >+ gh->gh_gl->gl_name.ln_type, gh->gh_state); >+ __dump_glock(NULL, gl); >+ BUG(); > } > > /** >@@ -1136,26 +983,16 @@ int gfs2_glock_nq(struct gfs2_holder *gh > struct gfs2_sbd *sdp = gl->gl_sbd; > int error = 0; > >-restart: >- if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) { >- set_bit(HIF_ABORTED, &gh->gh_iflags); >+ if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) > return -EIO; >- } >- >- set_bit(HIF_PROMOTE, &gh->gh_iflags); > > spin_lock(&gl->gl_spin); > add_to_queue(gh); >- run_queue(gl); >+ run_queue(gl, 1); > spin_unlock(&gl->gl_spin); > >- if (!(gh->gh_flags & GL_ASYNC)) { >- error = glock_wait_internal(gh); >- if (error == GLR_CANCELED) { >- msleep(100); >- goto restart; >- } >- } >+ if (!(gh->gh_flags & GL_ASYNC)) >+ error = gfs2_glock_wait(gh); > > return error; > } >@@ -1169,48 +1006,7 @@ restart: > > int gfs2_glock_poll(struct gfs2_holder *gh) > { >- struct gfs2_glock *gl = gh->gh_gl; >- int ready = 0; >- >- spin_lock(&gl->gl_spin); >- >- if (test_bit(HIF_HOLDER, &gh->gh_iflags)) >- ready = 1; >- else if (list_empty(&gh->gh_list)) { >- if (gh->gh_error == GLR_CANCELED) { >- spin_unlock(&gl->gl_spin); >- msleep(100); >- if (gfs2_glock_nq(gh)) >- return 1; >- return 0; >- } else >- ready = 1; >- } >- >- spin_unlock(&gl->gl_spin); >- >- return ready; >-} >- >-/** >- * gfs2_glock_wait - wait for a lock acquisition that ended in a GLR_ASYNC >- * @gh: the holder structure >- * >- * Returns: 0, GLR_TRYFAILED, or errno on failure >- */ >- >-int gfs2_glock_wait(struct gfs2_holder *gh) >-{ >- int error; >- >- error = glock_wait_internal(gh); >- if (error == GLR_CANCELED) { >- msleep(100); >- gh->gh_flags &= ~GL_ASYNC; >- error = gfs2_glock_nq(gh); >- } >- >- return error; >+ return test_bit(HIF_WAIT, &gh->gh_iflags) ? 0 : 1; > } > > /** >@@ -1224,26 +1020,30 @@ void gfs2_glock_dq(struct gfs2_holder *g > struct gfs2_glock *gl = gh->gh_gl; > const struct gfs2_glock_operations *glops = gl->gl_ops; > unsigned delay = 0; >+ int fast_path = 0; > >+ spin_lock(&gl->gl_spin); > if (gh->gh_flags & GL_NOCACHE) > handle_callback(gl, LM_ST_UNLOCKED, 0, 0); > >- gfs2_glmutex_lock(gl); >- >- spin_lock(&gl->gl_spin); > list_del_init(&gh->gh_list); >- >- if (list_empty(&gl->gl_holders)) { >+ if (find_first_holder(gl) == NULL) { > if (glops->go_unlock) { >+ GLOCK_BUG_ON(gl, test_and_set_bit(GLF_LOCK, &gl->gl_flags)); > spin_unlock(&gl->gl_spin); > glops->go_unlock(gh); > spin_lock(&gl->gl_spin); >+ clear_bit(GLF_LOCK, &gl->gl_flags); > } > gl->gl_stamp = jiffies; >+ if (list_empty(&gl->gl_holders) && >+ !test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && >+ !test_bit(GLF_DEMOTE, &gl->gl_flags)) >+ fast_path = 1; > } >- >- clear_bit(GLF_LOCK, &gl->gl_flags); > spin_unlock(&gl->gl_spin); >+ if (likely(fast_path)) >+ return; > > gfs2_glock_hold(gl); > if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && >@@ -1424,6 +1224,14 @@ void gfs2_glock_dq_uninit_m(unsigned int > gfs2_glock_dq_uninit(&ghs[x]); > } > >+static int gfs2_lm_hold_lvb(struct gfs2_sbd *sdp, void *lock, char **lvbp) >+{ >+ int error = -EIO; >+ if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) >+ error = sdp->sd_lockstruct.ls_ops->lm_hold_lvb(lock, lvbp); >+ return error; >+} >+ > /** > * gfs2_lvb_hold - attach a LVB from a glock > * @gl: The glock in question >@@ -1434,20 +1242,14 @@ int gfs2_lvb_hold(struct gfs2_glock *gl) > { > int error; > >- gfs2_glmutex_lock(gl); >- > if (!atomic_read(&gl->gl_lvb_count)) { > error = gfs2_lm_hold_lvb(gl->gl_sbd, gl->gl_lock, &gl->gl_lvb); >- if (error) { >- gfs2_glmutex_unlock(gl); >+ if (error) > return error; >- } > gfs2_glock_hold(gl); > } > atomic_inc(&gl->gl_lvb_count); > >- gfs2_glmutex_unlock(gl); >- > return 0; > } > >@@ -1459,17 +1261,16 @@ int gfs2_lvb_hold(struct gfs2_glock *gl) > > void gfs2_lvb_unhold(struct gfs2_glock *gl) > { >- gfs2_glock_hold(gl); >- gfs2_glmutex_lock(gl); >+ struct gfs2_sbd *sdp = gl->gl_sbd; > >+ gfs2_glock_hold(gl); > gfs2_assert(gl->gl_sbd, atomic_read(&gl->gl_lvb_count) > 0); > if (atomic_dec_and_test(&gl->gl_lvb_count)) { >- gfs2_lm_unhold_lvb(gl->gl_sbd, gl->gl_lock, gl->gl_lvb); >+ if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) >+ sdp->sd_lockstruct.ls_ops->lm_unhold_lvb(gl->gl_lock, gl->gl_lvb); > gl->gl_lvb = NULL; > gfs2_glock_put(gl); > } >- >- gfs2_glmutex_unlock(gl); > gfs2_glock_put(gl); > } > >@@ -1489,7 +1290,9 @@ static void blocking_cb(struct gfs2_sbd > if (time_before(now, holdtime)) > delay = holdtime - now; > >+ spin_lock(&gl->gl_spin); > handle_callback(gl, state, 1, delay); >+ spin_unlock(&gl->gl_spin); > if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0) > gfs2_glock_put(gl); > } >@@ -1530,7 +1333,8 @@ void gfs2_glock_cb(void *cb_data, unsign > gl = gfs2_glock_find(sdp, &async->lc_name); > if (gfs2_assert_warn(sdp, gl)) > return; >- xmote_bh(gl, async->lc_ret); >+ gl->gl_reply = async->lc_ret; >+ set_bit(GLF_REPLY_PENDING, &gl->gl_flags); > if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) > gfs2_glock_put(gl); > up_read(&gfs2_umount_flush_sem); >@@ -1588,11 +1392,10 @@ void gfs2_glock_schedule_for_reclaim(str > if (list_empty(&gl->gl_reclaim)) { > gfs2_glock_hold(gl); > list_add(&gl->gl_reclaim, &sdp->sd_reclaim_list); >- atomic_inc(&sdp->sd_reclaim_count); >+ atomic_inc(&sdp->sd_reclaim_count); > spin_unlock(&sdp->sd_reclaim_lock); > wake_up(&sdp->sd_reclaim_wq); >- } >- else >+ } else > spin_unlock(&sdp->sd_reclaim_lock); > } > >@@ -1609,6 +1412,7 @@ void gfs2_glock_schedule_for_reclaim(str > void gfs2_reclaim_glock(struct gfs2_sbd *sdp) > { > struct gfs2_glock *gl; >+ int done_callback = 0; > > spin_lock(&sdp->sd_reclaim_lock); > if (list_empty(&sdp->sd_reclaim_list)) { >@@ -1623,14 +1427,16 @@ void gfs2_reclaim_glock(struct gfs2_sbd > atomic_dec(&sdp->sd_reclaim_count); > atomic_inc(&sdp->sd_reclaimed); > >- if (gfs2_glmutex_trylock(gl)) { >- if (list_empty(&gl->gl_holders) && >- gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl)) >- handle_callback(gl, LM_ST_UNLOCKED, 0, 0); >- gfs2_glmutex_unlock(gl); >+ spin_lock(&gl->gl_spin); >+ if (find_first_holder(gl) == NULL && >+ gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl)) { >+ handle_callback(gl, LM_ST_UNLOCKED, 0, 0); >+ done_callback = 1; > } >- >- gfs2_glock_put(gl); >+ spin_unlock(&gl->gl_spin); >+ if (!done_callback || >+ queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) >+ gfs2_glock_put(gl); > } > > /** >@@ -1687,18 +1493,14 @@ static void scan_glock(struct gfs2_glock > { > if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object) > return; >+ if (test_bit(GLF_LOCK, &gl->gl_flags)) >+ return; > >- if (gfs2_glmutex_trylock(gl)) { >- if (list_empty(&gl->gl_holders) && >- gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl)) >- goto out_schedule; >- gfs2_glmutex_unlock(gl); >- } >- return; >- >-out_schedule: >- gfs2_glmutex_unlock(gl); >- gfs2_glock_schedule_for_reclaim(gl); >+ spin_lock(&gl->gl_spin); >+ if (find_first_holder(gl) == NULL && >+ gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl)) >+ gfs2_glock_schedule_for_reclaim(gl); >+ spin_unlock(&gl->gl_spin); > } > > /** >@@ -1723,12 +1525,13 @@ static void clear_glock(struct gfs2_gloc > spin_unlock(&sdp->sd_reclaim_lock); > } > >- if (gfs2_glmutex_trylock(gl)) { >- if (list_empty(&gl->gl_holders) && >- gl->gl_state != LM_ST_UNLOCKED) >- handle_callback(gl, LM_ST_UNLOCKED, 0, 0); >- gfs2_glmutex_unlock(gl); >- } >+ spin_lock(&gl->gl_spin); >+ if (find_first_holder(gl) == NULL && gl->gl_state != LM_ST_UNLOCKED) >+ handle_callback(gl, LM_ST_UNLOCKED, 0, 0); >+ spin_unlock(&gl->gl_spin); >+ gfs2_glock_hold(gl); >+ if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) >+ gfs2_glock_put(gl); > } > > /** >@@ -1773,181 +1576,169 @@ void gfs2_gl_hash_clear(struct gfs2_sbd > } > } > >-/* >- * Diagnostic routines to help debug distributed deadlock >- */ >- >-static void gfs2_print_symbol(struct glock_iter *gi, const char *fmt, >- unsigned long address) >+static const char *state2str(unsigned state) > { >- char buffer[KSYM_SYMBOL_LEN]; >- >- sprint_symbol(buffer, address); >- print_dbg(gi, fmt, buffer); >+ switch(state) { >+ case LM_ST_UNLOCKED: >+ return "UN"; >+ case LM_ST_SHARED: >+ return "SH"; >+ case LM_ST_DEFERRED: >+ return "DF"; >+ case LM_ST_EXCLUSIVE: >+ return "EX"; >+ } >+ return "??"; >+} >+ >+static const char *hflags2str(char *buf, unsigned flags, unsigned long iflags) >+{ >+ char *p = buf; >+ if (flags & LM_FLAG_TRY) >+ *p++ = 't'; >+ if (flags & LM_FLAG_TRY_1CB) >+ *p++ = 'T'; >+ if (flags & LM_FLAG_NOEXP) >+ *p++ = 'e'; >+ if (flags & LM_FLAG_ANY) >+ *p++ = 'a'; >+ if (flags & LM_FLAG_PRIORITY) >+ *p++ = 'p'; >+ if (flags & GL_ASYNC) >+ *p++ = 'a'; >+ if (flags & GL_EXACT) >+ *p++ = 'E'; >+ if (flags & GL_ATIME) >+ *p++ = 'a'; >+ if (flags & GL_NOCACHE) >+ *p++ = 'c'; >+ if (test_bit(HIF_HOLDER, &iflags)) >+ *p++ = 'H'; >+ if (test_bit(HIF_WAIT, &iflags)) >+ *p++ = 'W'; >+ if (test_bit(HIF_FIRST, &iflags)) >+ *p++ = 'F'; >+ *p = 0; >+ return buf; > } > > /** > * dump_holder - print information about a glock holder >- * @str: a string naming the type of holder >+ * @seq: the seq_file struct > * @gh: the glock holder > * > * Returns: 0 on success, -ENOBUFS when we run out of space > */ > >-static int dump_holder(struct glock_iter *gi, char *str, >- struct gfs2_holder *gh) >+static int dump_holder(struct seq_file *seq, const struct gfs2_holder *gh) > { >- unsigned int x; >- struct task_struct *gh_owner; >+ struct task_struct *gh_owner = NULL; >+ char buffer[KSYM_SYMBOL_LEN]; >+ char flags_buf[32]; > >- print_dbg(gi, " %s\n", str); >- if (gh->gh_owner_pid) { >- print_dbg(gi, " owner = %ld ", (long)gh->gh_owner_pid); >+ sprint_symbol(buffer, gh->gh_ip); >+ if (gh->gh_owner_pid) > gh_owner = find_task_by_pid(gh->gh_owner_pid); >- if (gh_owner) >- print_dbg(gi, "(%s)\n", gh_owner->comm); >- else >- print_dbg(gi, "(ended)\n"); >- } else >- print_dbg(gi, " owner = -1\n"); >- print_dbg(gi, " gh_state = %u\n", gh->gh_state); >- print_dbg(gi, " gh_flags ="); >- for (x = 0; x < 32; x++) >- if (gh->gh_flags & (1 << x)) >- print_dbg(gi, " %u", x); >- print_dbg(gi, " \n"); >- print_dbg(gi, " error = %d\n", gh->gh_error); >- print_dbg(gi, " gh_iflags ="); >- for (x = 0; x < 32; x++) >- if (test_bit(x, &gh->gh_iflags)) >- print_dbg(gi, " %u", x); >- print_dbg(gi, " \n"); >- gfs2_print_symbol(gi, " initialized at: %s\n", gh->gh_ip); >- >+ gfs2_print_dbg(seq, " H: s:%s f:%s e:%d p:%ld [%s] %s\n", >+ state2str(gh->gh_state), >+ hflags2str(flags_buf, gh->gh_flags, gh->gh_iflags), >+ gh->gh_error, >+ gh->gh_owner_pid ? gh->gh_owner_pid : -1, >+ gh_owner ? gh_owner->comm : "(ended)", buffer); > return 0; > } > >-/** >- * dump_inode - print information about an inode >- * @ip: the inode >- * >- * Returns: 0 on success, -ENOBUFS when we run out of space >- */ >- >-static int dump_inode(struct glock_iter *gi, struct gfs2_inode *ip) >+static const char *gflags2str(char *buf, const unsigned long *gflags) > { >- unsigned int x; >- >- print_dbg(gi, " Inode:\n"); >- print_dbg(gi, " num = %llu/%llu\n", >- (unsigned long long)ip->i_no_formal_ino, >- (unsigned long long)ip->i_no_addr); >- print_dbg(gi, " type = %u\n", IF2DT(ip->i_inode.i_mode)); >- print_dbg(gi, " i_flags ="); >- for (x = 0; x < 32; x++) >- if (test_bit(x, &ip->i_flags)) >- print_dbg(gi, " %u", x); >- print_dbg(gi, " \n"); >- return 0; >+ char *p = buf; >+ if (test_bit(GLF_LOCK, gflags)) >+ *p++ = 'l'; >+ if (test_bit(GLF_STICKY, gflags)) >+ *p++ = 's'; >+ if (test_bit(GLF_DEMOTE, gflags)) >+ *p++ = 'D'; >+ if (test_bit(GLF_PENDING_DEMOTE, gflags)) >+ *p++ = 'd'; >+ if (test_bit(GLF_DEMOTE_IN_PROGRESS, gflags)) >+ *p++ = 'p'; >+ if (test_bit(GLF_DIRTY, gflags)) >+ *p++ = 'y'; >+ if (test_bit(GLF_LFLUSH, gflags)) >+ *p++ = 'f'; >+ if (test_bit(GLF_INVALIDATE_IN_PROGRESS, gflags)) >+ *p++ = 'i'; >+ if (test_bit(GLF_REPLY_PENDING, gflags)) >+ *p++ = 'r'; >+ if (test_bit(GLF_TRYFAILED, gflags)) >+ *p++ = 't'; >+ if (test_bit(GLF_DEMOTE_FROM_FINISH_XMOTE, gflags)) >+ *p++ = 'X'; >+ *p = 0; >+ return buf; > } > > /** >- * dump_glock - print information about a glock >+ * __dump_glock - print information about a glock >+ * @seq: The seq_file struct > * @gl: the glock >- * @count: where we are in the buffer >+ * >+ * The file format is as follows: >+ * One line per object, capital letters are used to indicate objects >+ * G = glock, I = Inode, R = rgrp, H = holder. Glocks are not indented, >+ * other objects are indented by a single space and follow the glock to >+ * which they are related. Fields are indicated by lower case letters >+ * followed by a colon and the field value, except for strings which are in >+ * [] so that its possible to see if they are composed of spaces for >+ * example. The field's are n = number (id of the object), f = flags, >+ * t = type, s = state, r = refcount, e = error, p = pid. > * > * Returns: 0 on success, -ENOBUFS when we run out of space > */ > >-static int dump_glock(struct glock_iter *gi, struct gfs2_glock *gl) >+static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl) > { >- struct gfs2_holder *gh; >- unsigned int x; >- int error = -ENOBUFS; >- struct task_struct *gl_owner; >+ const struct gfs2_glock_operations *glops = gl->gl_ops; >+ unsigned long long dtime; >+ const struct gfs2_holder *gh; >+ char gflags_buf[32]; >+ int error = 0; > >- spin_lock(&gl->gl_spin); >+ dtime = jiffies - gl->gl_demote_time; >+ dtime *= 1000000/HZ; /* demote time in uSec */ >+ if (!test_bit(GLF_DEMOTE, &gl->gl_flags)) >+ dtime = 0; >+ gfs2_print_dbg(seq, "G: s:%s n:%u/%llx f:%s t:%s d:%s/%llu l:%d a:%d r:%d %s/%s\n", >+ state2str(gl->gl_state), >+ gl->gl_name.ln_type, >+ (unsigned long long)gl->gl_name.ln_number, >+ gflags2str(gflags_buf, &gl->gl_flags), >+ state2str(gl->gl_target), >+ state2str(gl->gl_demote_state), dtime, >+ atomic_read(&gl->gl_lvb_count), >+ atomic_read(&gl->gl_ail_count), >+ atomic_read(&gl->gl_ref), >+ state2str(gl->gl_prevdemote_state), gl->gl_lastdemote_place); > >- print_dbg(gi, "Glock 0x%p (%u, 0x%llx)\n", gl, gl->gl_name.ln_type, >- (unsigned long long)gl->gl_name.ln_number); >- print_dbg(gi, " gl_flags ="); >- for (x = 0; x < 32; x++) { >- if (test_bit(x, &gl->gl_flags)) >- print_dbg(gi, " %u", x); >- } >- if (!test_bit(GLF_LOCK, &gl->gl_flags)) >- print_dbg(gi, " (unlocked)"); >- print_dbg(gi, " \n"); >- print_dbg(gi, " gl_ref = %d\n", atomic_read(&gl->gl_ref)); >- print_dbg(gi, " gl_state = %u\n", gl->gl_state); >- if (gl->gl_owner_pid) { >- gl_owner = find_task_by_pid(gl->gl_owner_pid); >- if (gl_owner) >- print_dbg(gi, " gl_owner = pid %d (%s)\n", >- gl->gl_owner_pid, gl_owner->comm); >- else >- print_dbg(gi, " gl_owner = %d (ended)\n", >- gl->gl_owner_pid); >- } else >- print_dbg(gi, " gl_owner = -1\n"); >- print_dbg(gi, " gl_ip = %lu\n", gl->gl_ip); >- print_dbg(gi, " req_gh = %s\n", (gl->gl_req_gh) ? "yes" : "no"); >- print_dbg(gi, " lvb_count = %d\n", atomic_read(&gl->gl_lvb_count)); >- print_dbg(gi, " object = %s\n", (gl->gl_object) ? "yes" : "no"); >- print_dbg(gi, " le = %s\n", >- (list_empty(&gl->gl_le.le_list)) ? "no" : "yes"); >- print_dbg(gi, " reclaim = %s\n", >- (list_empty(&gl->gl_reclaim)) ? "no" : "yes"); >- if (gl->gl_aspace) >- print_dbg(gi, " aspace = 0x%p nrpages = %lu\n", gl->gl_aspace, >- gl->gl_aspace->i_mapping->nrpages); >- else >- print_dbg(gi, " aspace = no\n"); >- print_dbg(gi, " ail = %d\n", atomic_read(&gl->gl_ail_count)); >- if (gl->gl_req_gh) { >- error = dump_holder(gi, "Request", gl->gl_req_gh); >- if (error) >- goto out; >- } > list_for_each_entry(gh, &gl->gl_holders, gh_list) { >- error = dump_holder(gi, "Holder", gh); >- if (error) >- goto out; >- } >- list_for_each_entry(gh, &gl->gl_waiters1, gh_list) { >- error = dump_holder(gi, "Waiter1", gh); >- if (error) >- goto out; >- } >- list_for_each_entry(gh, &gl->gl_waiters3, gh_list) { >- error = dump_holder(gi, "Waiter3", gh); >+ error = dump_holder(seq, gh); > if (error) > goto out; > } >- if (test_bit(GLF_DEMOTE, &gl->gl_flags)) { >- print_dbg(gi, " Demotion req to state %u (%llu uS ago)\n", >- gl->gl_demote_state, (unsigned long long) >- (jiffies - gl->gl_demote_time)*(1000000/HZ)); >- } >- if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object) { >- if (!test_bit(GLF_LOCK, &gl->gl_flags) && >- list_empty(&gl->gl_holders)) { >- error = dump_inode(gi, gl->gl_object); >- if (error) >- goto out; >- } else { >- error = -ENOBUFS; >- print_dbg(gi, " Inode: busy\n"); >- } >- } >- >- error = 0; >- >+ if (gl->gl_state != LM_ST_UNLOCKED && glops->go_dump) >+ error = glops->go_dump(seq, gl); > out: >- spin_unlock(&gl->gl_spin); > return error; > } > >+static int dump_glock(struct seq_file *seq, struct gfs2_glock *gl) >+{ >+ int ret; >+ spin_lock(&gl->gl_spin); >+ ret = __dump_glock(seq, gl); >+ spin_unlock(&gl->gl_spin); >+ return ret; >+} >+ > /** > * gfs2_dump_lockstate - print out the current lockstate > * @sdp: the filesystem >@@ -2050,7 +1841,7 @@ void gfs2_glock_exit(void) > module_param(scand_secs, uint, S_IRUGO|S_IWUSR); > MODULE_PARM_DESC(scand_secs, "The number of seconds between scand runs"); > >-static int gfs2_glock_iter_next(struct glock_iter *gi) >+static int gfs2_glock_iter_next(struct gfs2_glock_iter *gi) > { > struct gfs2_glock *gl; > >@@ -2068,7 +1859,7 @@ restart: > gfs2_glock_put(gl); > if (gl && gi->gl == NULL) > gi->hash++; >- while(gi->gl == NULL) { >+ while (gi->gl == NULL) { > if (gi->hash >= GFS2_GL_HASH_SIZE) > return 1; > read_lock(gl_lock_addr(gi->hash)); >@@ -2086,58 +1877,34 @@ restart: > return 0; > } > >-static void gfs2_glock_iter_free(struct glock_iter *gi) >+static void gfs2_glock_iter_free(struct gfs2_glock_iter *gi) > { > if (gi->gl) > gfs2_glock_put(gi->gl); >- kfree(gi); >-} >- >-static struct glock_iter *gfs2_glock_iter_init(struct gfs2_sbd *sdp) >-{ >- struct glock_iter *gi; >- >- gi = kmalloc(sizeof (*gi), GFP_KERNEL); >- if (!gi) >- return NULL; >- >- gi->sdp = sdp; >- gi->hash = 0; >- gi->seq = NULL; > gi->gl = NULL; >- memset(gi->string, 0, sizeof(gi->string)); >- >- if (gfs2_glock_iter_next(gi)) { >- gfs2_glock_iter_free(gi); >- return NULL; >- } >- >- return gi; > } > >-static void *gfs2_glock_seq_start(struct seq_file *file, loff_t *pos) >+static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos) > { >- struct glock_iter *gi; >+ struct gfs2_glock_iter *gi = seq->private; > loff_t n = *pos; > >- gi = gfs2_glock_iter_init(file->private); >- if (!gi) >- return NULL; >+ gi->hash = 0; > >- while(n--) { >+ do { > if (gfs2_glock_iter_next(gi)) { > gfs2_glock_iter_free(gi); > return NULL; > } >- } >+ } while (n--); > >- return gi; >+ return gi->gl; > } > >-static void *gfs2_glock_seq_next(struct seq_file *file, void *iter_ptr, >+static void *gfs2_glock_seq_next(struct seq_file *seq, void *iter_ptr, > loff_t *pos) > { >- struct glock_iter *gi = iter_ptr; >+ struct gfs2_glock_iter *gi = seq->private; > > (*pos)++; > >@@ -2146,24 +1913,18 @@ static void *gfs2_glock_seq_next(struct > return NULL; > } > >- return gi; >+ return gi->gl; > } > >-static void gfs2_glock_seq_stop(struct seq_file *file, void *iter_ptr) >+static void gfs2_glock_seq_stop(struct seq_file *seq, void *iter_ptr) > { >- struct glock_iter *gi = iter_ptr; >- if (gi) >- gfs2_glock_iter_free(gi); >+ struct gfs2_glock_iter *gi = seq->private; >+ gfs2_glock_iter_free(gi); > } > >-static int gfs2_glock_seq_show(struct seq_file *file, void *iter_ptr) >+static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr) > { >- struct glock_iter *gi = iter_ptr; >- >- gi->seq = file; >- dump_glock(gi, gi->gl); >- >- return 0; >+ return dump_glock(seq, iter_ptr); > } > > static struct seq_operations gfs2_glock_seq_ops = { >@@ -2175,17 +1936,14 @@ static struct seq_operations gfs2_glock_ > > static int gfs2_debugfs_open(struct inode *inode, struct file *file) > { >- struct seq_file *seq; >- int ret; >- >- ret = seq_open(file, &gfs2_glock_seq_ops); >- if (ret) >- return ret; >- >- seq = file->private_data; >- seq->private = inode->i_private; >- >- return 0; >+ int ret = seq_open_private(file, &gfs2_glock_seq_ops, >+ sizeof(struct gfs2_glock_iter)); >+ if (ret == 0) { >+ struct seq_file *seq = file->private_data; >+ struct gfs2_glock_iter *gi = seq->private; >+ gi->sdp = inode->i_private; >+ } >+ return ret; > } > > static const struct file_operations gfs2_debug_fops = { >@@ -2193,7 +1951,7 @@ static const struct file_operations gfs2 > .open = gfs2_debugfs_open, > .read = seq_read, > .llseek = seq_lseek, >- .release = seq_release >+ .release = seq_release_private, > }; > > int gfs2_create_debugfs_file(struct gfs2_sbd *sdp) >diff -pur a/fs/gfs2/glock.h b/fs/gfs2/glock.h >--- a/fs/gfs2/glock.h 2008-04-29 13:28:24.000000000 -0500 >+++ b/fs/gfs2/glock.h 2008-05-16 10:21:04.000000000 -0500 >@@ -25,28 +25,26 @@ > #define GL_SKIP 0x00000100 > #define GL_ATIME 0x00000200 > #define GL_NOCACHE 0x00000400 >-#define GL_FLOCK 0x00000800 >-#define GL_NOCANCEL 0x00001000 > > #define GLR_TRYFAILED 13 >-#define GLR_CANCELED 14 > >-static inline int gfs2_glock_is_locked_by_me(struct gfs2_glock *gl) >+static inline struct gfs2_holder *gfs2_glock_is_locked_by_me(struct gfs2_glock *gl) > { > struct gfs2_holder *gh; >- int locked = 0; > > /* Look in glock's list of holders for one with current task as owner */ > spin_lock(&gl->gl_spin); > list_for_each_entry(gh, &gl->gl_holders, gh_list) { >- if (gh->gh_owner_pid == current->pid) { >- locked = 1; >+ if (!test_bit(HIF_HOLDER, &gh->gh_iflags)) > break; >- } >+ if (gh->gh_owner_pid == current->pid) >+ goto out; > } >+ gh = NULL; >+out: > spin_unlock(&gl->gl_spin); > >- return locked; >+ return gh; > } > > static inline int gfs2_glock_is_held_excl(struct gfs2_glock *gl) >@@ -68,7 +66,7 @@ static inline int gfs2_glock_is_blocking > { > int ret; > spin_lock(&gl->gl_spin); >- ret = test_bit(GLF_DEMOTE, &gl->gl_flags) || !list_empty(&gl->gl_waiters3); >+ ret = test_bit(GLF_DEMOTE, &gl->gl_flags); > spin_unlock(&gl->gl_spin); > return ret; > } >@@ -97,6 +95,7 @@ int gfs2_glock_nq_num(struct gfs2_sbd *s > int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs); > void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs); > void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs); >+void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...); > > /** > * gfs2_glock_nq_init - intialize a holder and enqueue it on a glock >@@ -129,7 +128,6 @@ int gfs2_lvb_hold(struct gfs2_glock *gl) > void gfs2_lvb_unhold(struct gfs2_glock *gl); > > void gfs2_glock_cb(void *cb_data, unsigned int type, void *data); >- > void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl); > void gfs2_reclaim_glock(struct gfs2_sbd *sdp); > void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait); >diff -pur a/fs/gfs2/glops.c b/fs/gfs2/glops.c >--- a/fs/gfs2/glops.c 2008-04-29 13:34:10.000000000 -0500 >+++ b/fs/gfs2/glops.c 2008-05-16 16:00:04.000000000 -0500 >@@ -13,6 +13,7 @@ > #include <linux/buffer_head.h> > #include <linux/gfs2_ondisk.h> > #include <linux/lm_interface.h> >+#include <linux/bio.h> > > #include "gfs2.h" > #include "incore.h" >@@ -90,7 +91,6 @@ static void gfs2_pte_inval(struct gfs2_g > return; > > unmap_shared_mapping_range(inode->i_mapping, 0, 0); >- > if (test_bit(GIF_SW_PAGED, &ip->i_flags)) > set_bit(GLF_DIRTY, &gl->gl_flags); > >@@ -171,6 +171,11 @@ static void inode_go_sync(struct gfs2_gl > { > struct gfs2_inode *ip = gl->gl_object; > >+ if (gl->gl_state != LM_ST_UNLOCKED) >+ gfs2_pte_inval(gl); >+ if (gl->gl_state != LM_ST_EXCLUSIVE) >+ return; >+ > if (ip && !S_ISREG(ip->i_inode.i_mode)) > ip = NULL; > >@@ -195,58 +200,6 @@ static void inode_go_sync(struct gfs2_gl > } > > /** >- * inode_go_xmote_th - promote/demote a glock >- * @gl: the glock >- * @state: the requested state >- * @flags: >- * >- */ >- >-static void inode_go_xmote_th(struct gfs2_glock *gl) >-{ >- if (gl->gl_state != LM_ST_UNLOCKED) >- gfs2_pte_inval(gl); >- if (gl->gl_state == LM_ST_EXCLUSIVE) >- inode_go_sync(gl); >-} >- >-/** >- * inode_go_xmote_bh - After promoting/demoting a glock >- * @gl: the glock >- * >- */ >- >-static void inode_go_xmote_bh(struct gfs2_glock *gl) >-{ >- struct gfs2_holder *gh = gl->gl_req_gh; >- struct buffer_head *bh; >- int error; >- >- if (gl->gl_state != LM_ST_UNLOCKED && >- (!gh || !(gh->gh_flags & GL_SKIP))) { >- error = gfs2_meta_read(gl, gl->gl_name.ln_number, 0, &bh); >- if (!error) >- brelse(bh); >- } >-} >- >-/** >- * inode_go_drop_th - unlock a glock >- * @gl: the glock >- * >- * Invoked from rq_demote(). >- * Another node needs the lock in EXCLUSIVE mode, or lock (unused for too long) >- * is being purged from our node's glock cache; we're dropping lock. >- */ >- >-static void inode_go_drop_th(struct gfs2_glock *gl) >-{ >- gfs2_pte_inval(gl); >- if (gl->gl_state == LM_ST_EXCLUSIVE) >- inode_go_sync(gl); >-} >- >-/** > * inode_go_inval - prepare a inode glock to be released > * @gl: the glock > * @flags: >@@ -324,6 +277,26 @@ static int inode_go_lock(struct gfs2_hol > } > > /** >+ * inode_go_dump - print information about an inode >+ * @gi: The iterator >+ * @ip: the inode >+ * >+ * Returns: 0 on success, -ENOBUFS when we run out of space >+ */ >+ >+static int inode_go_dump(struct seq_file *seq, const struct gfs2_glock *gl) >+{ >+ const struct gfs2_inode *ip = gl->gl_object; >+ if (ip == NULL) >+ return 0; >+ gfs2_print_dbg(seq, " I: n:%llu/%llu t:%u f:0x%08lx\n", >+ (unsigned long long)ip->i_no_formal_ino, >+ (unsigned long long)ip->i_no_addr, >+ IF2DT(ip->i_inode.i_mode), ip->i_flags); >+ return 0; >+} >+ >+/** > * rgrp_go_demote_ok - Check to see if it's ok to unlock a RG's glock > * @gl: the glock > * >@@ -363,7 +336,23 @@ static void rgrp_go_unlock(struct gfs2_h > } > > /** >- * trans_go_xmote_th - promote/demote the transaction glock >+ * rgrp_go_dump - print out an rgrp >+ * @seq: The iterator >+ * @gl: The glock in question >+ * >+ */ >+ >+static int rgrp_go_dump(struct seq_file *seq, const struct gfs2_glock *gl) >+{ >+ const struct gfs2_rgrpd *rgd = gl->gl_object; >+ if (rgd == NULL) >+ return 0; >+ gfs2_print_dbg(seq, " R: n:%llu\n", (unsigned long long)rgd->rd_addr); >+ return 0; >+} >+ >+/** >+ * trans_go_sync - promote/demote the transaction glock > * @gl: the glock > * @state: the requested state > * @flags: >@@ -387,7 +376,7 @@ static void trans_go_xmote_th(struct gfs > * > */ > >-static void trans_go_xmote_bh(struct gfs2_glock *gl) >+static int trans_go_xmote_bh(struct gfs2_glock *gl, struct gfs2_holder *gh) > { > struct gfs2_sbd *sdp = gl->gl_sbd; > struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode); >@@ -395,8 +384,7 @@ static void trans_go_xmote_bh(struct gfs > struct gfs2_log_header_host head; > int error; > >- if (gl->gl_state != LM_ST_UNLOCKED && >- test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) { >+ if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) { > j_gl->gl_ops->go_inval(j_gl, DIO_METADATA); > > error = gfs2_find_jhead(sdp->sd_jdesc, &head); >@@ -411,24 +399,7 @@ static void trans_go_xmote_bh(struct gfs > gfs2_log_pointers_init(sdp, head.lh_blkno); > } > } >-} >- >-/** >- * trans_go_drop_th - unlock the transaction glock >- * @gl: the glock >- * >- * We want to sync the device even with localcaching. Remember >- * that localcaching journal replay only marks buffers dirty. >- */ >- >-static void trans_go_drop_th(struct gfs2_glock *gl) >-{ >- struct gfs2_sbd *sdp = gl->gl_sbd; >- >- if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) { >- gfs2_meta_syncfs(sdp); >- gfs2_log_shutdown(sdp); >- } >+ return 0; > } > > /** >@@ -445,36 +416,33 @@ static int quota_go_demote_ok(struct gfs > > const struct gfs2_glock_operations gfs2_meta_glops = { > .go_xmote_th = meta_go_sync, >- .go_drop_th = meta_go_sync, > .go_type = LM_TYPE_META, > }; > > const struct gfs2_glock_operations gfs2_inode_glops = { >- .go_xmote_th = inode_go_xmote_th, >- .go_xmote_bh = inode_go_xmote_bh, >- .go_drop_th = inode_go_drop_th, >+ .go_xmote_th = inode_go_sync, > .go_inval = inode_go_inval, > .go_demote_ok = inode_go_demote_ok, > .go_lock = inode_go_lock, >+ .go_dump = inode_go_dump, > .go_type = LM_TYPE_INODE, >- .go_min_hold_time = HZ / 10, >+ .go_min_hold_time = HZ / 5, > }; > > const struct gfs2_glock_operations gfs2_rgrp_glops = { > .go_xmote_th = meta_go_sync, >- .go_drop_th = meta_go_sync, > .go_inval = meta_go_inval, > .go_demote_ok = rgrp_go_demote_ok, > .go_lock = rgrp_go_lock, > .go_unlock = rgrp_go_unlock, >+ .go_dump = rgrp_go_dump, > .go_type = LM_TYPE_RGRP, >- .go_min_hold_time = HZ / 10, >+ .go_min_hold_time = HZ / 5, > }; > > const struct gfs2_glock_operations gfs2_trans_glops = { > .go_xmote_th = trans_go_xmote_th, > .go_xmote_bh = trans_go_xmote_bh, >- .go_drop_th = trans_go_drop_th, > .go_type = LM_TYPE_NONDISK, > }; > >diff -pur a/fs/gfs2/incore.h b/fs/gfs2/incore.h >--- a/fs/gfs2/incore.h 2008-04-29 13:34:10.000000000 -0500 >+++ b/fs/gfs2/incore.h 2008-05-20 18:02:46.000000000 -0500 >@@ -128,24 +128,20 @@ struct gfs2_bufdata { > > struct gfs2_glock_operations { > void (*go_xmote_th) (struct gfs2_glock *gl); >- void (*go_xmote_bh) (struct gfs2_glock *gl); >- void (*go_drop_th) (struct gfs2_glock *gl); >+ int (*go_xmote_bh) (struct gfs2_glock *gl, struct gfs2_holder *gh); > void (*go_inval) (struct gfs2_glock *gl, int flags); > int (*go_demote_ok) (struct gfs2_glock *gl); > int (*go_lock) (struct gfs2_holder *gh); > void (*go_unlock) (struct gfs2_holder *gh); >+ int (*go_dump)(struct seq_file *seq, const struct gfs2_glock *gl); > const int go_type; > const unsigned long go_min_hold_time; > }; > > enum { >- /* Actions */ >- HIF_PROMOTE = 1, >- > /* States */ >- HIF_HOLDER = 6, >+ HIF_HOLDER = 6, /* Set for gh that "holds" the glock */ > HIF_FIRST = 7, >- HIF_ABORTED = 9, > HIF_WAIT = 10, > }; > >@@ -158,19 +154,22 @@ struct gfs2_holder { > unsigned gh_flags; > > int gh_error; >- unsigned long gh_iflags; >+ unsigned long gh_iflags; /* HIF_... */ > unsigned long gh_ip; > }; > > enum { >- GLF_LOCK = 1, >- GLF_STICKY = 2, >- GLF_DEMOTE = 3, >- GLF_PENDING_DEMOTE = 4, >- GLF_DIRTY = 5, >- GLF_DEMOTE_IN_PROGRESS = 6, >- GLF_LFLUSH = 7, >- GLF_WAITERS2 = 8, >+ GLF_LOCK = 1, >+ GLF_STICKY = 2, >+ GLF_DEMOTE = 3, >+ GLF_PENDING_DEMOTE = 4, >+ GLF_DEMOTE_IN_PROGRESS = 5, >+ GLF_DIRTY = 6, >+ GLF_LFLUSH = 7, >+ GLF_INVALIDATE_IN_PROGRESS = 8, >+ GLF_REPLY_PENDING = 9, >+ GLF_TRYFAILED = 10, >+ GLF_DEMOTE_FROM_FINISH_XMOTE = 11, > }; > > struct gfs2_glock { >@@ -182,19 +181,16 @@ struct gfs2_glock { > spinlock_t gl_spin; > > unsigned int gl_state; >+ unsigned int gl_target; >+ unsigned int gl_reply; > unsigned int gl_hash; > unsigned int gl_demote_state; /* state requested by remote node */ >+ unsigned int gl_prevdemote_state; /* state requested by remote node */ >+ const char *gl_lastdemote_place; /* last place gl_demote_state set */ > unsigned long gl_demote_time; /* time of first demote request */ >- pid_t gl_owner_pid; >- unsigned long gl_ip; > struct list_head gl_holders; >- struct list_head gl_waiters1; /* HIF_MUTEX */ >- struct list_head gl_waiters3; /* HIF_PROMOTE */ > > const struct gfs2_glock_operations *gl_ops; >- >- struct gfs2_holder *gl_req_gh; >- > void *gl_lock; > char *gl_lvb; > atomic_t gl_lvb_count; >diff -pur a/fs/gfs2/lm.h b/fs/gfs2/lm.h >--- a/fs/gfs2/lm.h 2008-04-29 13:27:44.000000000 -0500 >+++ b/fs/gfs2/lm.h 2008-04-21 15:23:20.000000000 -0500 >@@ -22,13 +22,9 @@ int gfs2_lm_withdraw(struct gfs2_sbd *sd > int gfs2_lm_get_lock(struct gfs2_sbd *sdp, struct lm_lockname *name, > void **lockp); > void gfs2_lm_put_lock(struct gfs2_sbd *sdp, void *lock); >-unsigned int gfs2_lm_lock(struct gfs2_sbd *sdp, void *lock, >- unsigned int cur_state, unsigned int req_state, >- unsigned int flags); > unsigned int gfs2_lm_unlock(struct gfs2_sbd *sdp, void *lock, > unsigned int cur_state); > void gfs2_lm_cancel(struct gfs2_sbd *sdp, void *lock); >-int gfs2_lm_hold_lvb(struct gfs2_sbd *sdp, void *lock, char **lvbp); > void gfs2_lm_unhold_lvb(struct gfs2_sbd *sdp, void *lock, char *lvb); > int gfs2_lm_plock_get(struct gfs2_sbd *sdp, struct lm_lockname *name, > struct file *file, struct file_lock *fl); >diff -pur a/fs/gfs2/locking/dlm/lock.c b/fs/gfs2/locking/dlm/lock.c >--- a/fs/gfs2/locking/dlm/lock.c 2008-04-29 13:28:06.000000000 -0500 >+++ b/fs/gfs2/locking/dlm/lock.c 2008-05-12 11:13:44.000000000 -0500 >@@ -11,46 +11,63 @@ > > static char junk_lvb[GDLM_LVB_SIZE]; > >-static void queue_complete(struct gdlm_lock *lp) >+ >+/* convert dlm lock-mode to gfs lock-state */ >+ >+static s16 gdlm_make_lmstate(s16 dlmmode) > { >- struct gdlm_ls *ls = lp->ls; >+ switch (dlmmode) { >+ case DLM_LOCK_IV: >+ case DLM_LOCK_NL: >+ return LM_ST_UNLOCKED; >+ case DLM_LOCK_EX: >+ return LM_ST_EXCLUSIVE; >+ case DLM_LOCK_CW: >+ return LM_ST_DEFERRED; >+ case DLM_LOCK_PR: >+ return LM_ST_SHARED; >+ } >+ gdlm_assert(0, "unknown DLM mode %d", dlmmode); >+ return -1; >+} > >- clear_bit(LFL_ACTIVE, &lp->flags); >+/* A lock placed on this queue is re-submitted to DLM as soon as the lock_dlm >+ thread gets to it. */ >+ >+static void queue_submit(struct gdlm_lock *lp) >+{ >+ struct gdlm_ls *ls = lp->ls; > > spin_lock(&ls->async_lock); >- list_add_tail(&lp->clist, &ls->complete); >+ list_add_tail(&lp->delay_list, &ls->submit); > spin_unlock(&ls->async_lock); > wake_up(&ls->thread_wait); > } > >-static inline void gdlm_ast(void *astarg) >+static void wake_up_ast(struct gdlm_lock *lp) > { >- queue_complete(astarg); >+ clear_bit(LFL_AST_WAIT, &lp->flags); >+ smp_mb__after_clear_bit(); >+ wake_up_bit(&lp->flags, LFL_AST_WAIT); > } > >-static inline void gdlm_bast(void *astarg, int mode) >+static void gdlm_delete_lp(struct gdlm_lock *lp) > { >- struct gdlm_lock *lp = astarg; > struct gdlm_ls *ls = lp->ls; > >- if (!mode) { >- printk(KERN_INFO "lock_dlm: bast mode zero %x,%llx\n", >- lp->lockname.ln_type, >- (unsigned long long)lp->lockname.ln_number); >- return; >- } >- > spin_lock(&ls->async_lock); >- if (!lp->bast_mode) { >- list_add_tail(&lp->blist, &ls->blocking); >- lp->bast_mode = mode; >- } else if (lp->bast_mode < mode) >- lp->bast_mode = mode; >+ if (!list_empty(&lp->delay_list)) >+ list_del_init(&lp->delay_list); >+ gdlm_assert(!list_empty(&lp->all_list), "%x,%llx", lp->lockname.ln_type, >+ (unsigned long long)lp->lockname.ln_number); >+ list_del_init(&lp->all_list); >+ ls->all_locks_count--; > spin_unlock(&ls->async_lock); >- wake_up(&ls->thread_wait); >+ >+ kfree(lp); > } > >-void gdlm_queue_delayed(struct gdlm_lock *lp) >+static void gdlm_queue_delayed(struct gdlm_lock *lp) > { > struct gdlm_ls *ls = lp->ls; > >@@ -59,6 +76,247 @@ void gdlm_queue_delayed(struct gdlm_lock > spin_unlock(&ls->async_lock); > } > >+static void process_complete(struct gdlm_lock *lp) >+{ >+ struct gdlm_ls *ls = lp->ls; >+ struct lm_async_cb acb; >+ s16 prev_mode = lp->cur; >+ >+ memset(&acb, 0, sizeof(acb)); >+ >+ if (lp->lksb.sb_status == -DLM_ECANCEL) { >+ log_info("complete dlm cancel %x,%llx flags %lx", >+ lp->lockname.ln_type, >+ (unsigned long long)lp->lockname.ln_number, >+ lp->flags); >+ >+ lp->req = lp->cur; >+ acb.lc_ret |= LM_OUT_CANCELED; >+ if (lp->cur == DLM_LOCK_IV) >+ lp->lksb.sb_lkid = 0; >+ goto out; >+ } >+ >+ if (test_and_clear_bit(LFL_DLM_UNLOCK, &lp->flags)) { >+ if (lp->lksb.sb_status != -DLM_EUNLOCK) { >+ log_info("unlock sb_status %d %x,%llx flags %lx", >+ lp->lksb.sb_status, lp->lockname.ln_type, >+ (unsigned long long)lp->lockname.ln_number, >+ lp->flags); >+ return; >+ } >+ >+ lp->cur = DLM_LOCK_IV; >+ lp->req = DLM_LOCK_IV; >+ lp->lksb.sb_lkid = 0; >+ >+ if (test_and_clear_bit(LFL_UNLOCK_DELETE, &lp->flags)) { >+ gdlm_delete_lp(lp); >+ return; >+ } >+ goto out; >+ } >+ >+ if (lp->lksb.sb_flags & DLM_SBF_VALNOTVALID) >+ memset(lp->lksb.sb_lvbptr, 0, GDLM_LVB_SIZE); >+ >+ if (lp->lksb.sb_flags & DLM_SBF_ALTMODE) { >+ if (lp->req == DLM_LOCK_PR) >+ lp->req = DLM_LOCK_CW; >+ else if (lp->req == DLM_LOCK_CW) >+ lp->req = DLM_LOCK_PR; >+ } >+ >+ /* >+ * A canceled lock request. The lock was just taken off the delayed >+ * list and was never even submitted to dlm. >+ */ >+ >+ if (test_and_clear_bit(LFL_CANCEL, &lp->flags)) { >+ log_info("complete internal cancel %x,%llx", >+ lp->lockname.ln_type, >+ (unsigned long long)lp->lockname.ln_number); >+ lp->req = lp->cur; >+ acb.lc_ret |= LM_OUT_CANCELED; >+ goto out; >+ } >+ >+ /* >+ * An error occured. >+ */ >+ >+ if (lp->lksb.sb_status) { >+ /* a "normal" error */ >+ if ((lp->lksb.sb_status == -EAGAIN) && >+ (lp->lkf & DLM_LKF_NOQUEUE)) { >+ lp->req = lp->cur; >+ if (lp->cur == DLM_LOCK_IV) >+ lp->lksb.sb_lkid = 0; >+ goto out; >+ } >+ >+ /* this could only happen with cancels I think */ >+ log_info("ast sb_status %d %x,%llx flags %lx", >+ lp->lksb.sb_status, lp->lockname.ln_type, >+ (unsigned long long)lp->lockname.ln_number, >+ lp->flags); >+ if (lp->lksb.sb_status == -EDEADLOCK) { >+ lp->req = lp->cur; >+ if (lp->cur == DLM_LOCK_IV) >+ lp->lksb.sb_lkid = 0; >+ goto out; >+ } else >+ return; >+ } >+ >+ /* >+ * This is an AST for an EX->EX conversion for sync_lvb from GFS. >+ */ >+ >+ if (test_and_clear_bit(LFL_SYNC_LVB, &lp->flags)) { >+ wake_up_ast(lp); >+ return; >+ } >+ >+ /* >+ * A lock has been demoted to NL because it initially completed during >+ * BLOCK_LOCKS. Now it must be requested in the originally requested >+ * mode. >+ */ >+ >+ if (test_and_clear_bit(LFL_REREQUEST, &lp->flags)) { >+ gdlm_assert(lp->req == DLM_LOCK_NL, "%x,%llx", >+ lp->lockname.ln_type, >+ (unsigned long long)lp->lockname.ln_number); >+ gdlm_assert(lp->prev_req > DLM_LOCK_NL, "%x,%llx", >+ lp->lockname.ln_type, >+ (unsigned long long)lp->lockname.ln_number); >+ >+ lp->cur = DLM_LOCK_NL; >+ lp->req = lp->prev_req; >+ lp->prev_req = DLM_LOCK_IV; >+ lp->lkf &= ~DLM_LKF_CONVDEADLK; >+ >+ set_bit(LFL_NOCACHE, &lp->flags); >+ >+ if (test_bit(DFL_BLOCK_LOCKS, &ls->flags) && >+ !test_bit(LFL_NOBLOCK, &lp->flags)) >+ gdlm_queue_delayed(lp); >+ else >+ queue_submit(lp); >+ return; >+ } >+ >+ /* >+ * A request is granted during dlm recovery. It may be granted >+ * because the locks of a failed node were cleared. In that case, >+ * there may be inconsistent data beneath this lock and we must wait >+ * for recovery to complete to use it. When gfs recovery is done this >+ * granted lock will be converted to NL and then reacquired in this >+ * granted state. >+ */ >+ >+ if (test_bit(DFL_BLOCK_LOCKS, &ls->flags) && >+ !test_bit(LFL_NOBLOCK, &lp->flags) && >+ lp->req != DLM_LOCK_NL) { >+ >+ lp->cur = lp->req; >+ lp->prev_req = lp->req; >+ lp->req = DLM_LOCK_NL; >+ lp->lkf |= DLM_LKF_CONVERT; >+ lp->lkf &= ~DLM_LKF_CONVDEADLK; >+ >+ log_debug("rereq %x,%llx id %x %d,%d", >+ lp->lockname.ln_type, >+ (unsigned long long)lp->lockname.ln_number, >+ lp->lksb.sb_lkid, lp->cur, lp->req); >+ >+ set_bit(LFL_REREQUEST, &lp->flags); >+ queue_submit(lp); >+ return; >+ } >+ >+ /* >+ * DLM demoted the lock to NL before it was granted so GFS must be >+ * told it cannot cache data for this lock. >+ */ >+ >+ if (lp->lksb.sb_flags & DLM_SBF_DEMOTED) >+ set_bit(LFL_NOCACHE, &lp->flags); >+ >+out: >+ /* >+ * This is an internal lock_dlm lock >+ */ >+ >+ if (test_bit(LFL_INLOCK, &lp->flags)) { >+ clear_bit(LFL_NOBLOCK, &lp->flags); >+ lp->cur = lp->req; >+ wake_up_ast(lp); >+ return; >+ } >+ >+ /* >+ * Normal completion of a lock request. Tell GFS it now has the lock. >+ */ >+ >+ clear_bit(LFL_NOBLOCK, &lp->flags); >+ lp->cur = lp->req; >+ >+ acb.lc_name = lp->lockname; >+ acb.lc_ret |= gdlm_make_lmstate(lp->cur); >+ >+ if (!test_and_clear_bit(LFL_NOCACHE, &lp->flags) && >+ (lp->cur > DLM_LOCK_NL) && (prev_mode > DLM_LOCK_NL)) >+ acb.lc_ret |= LM_OUT_CACHEABLE; >+ >+ ls->fscb(ls->sdp, LM_CB_ASYNC, &acb); >+} >+ >+static void gdlm_ast(void *astarg) >+{ >+ struct gdlm_lock *lp = astarg; >+ clear_bit(LFL_ACTIVE, &lp->flags); >+ process_complete(lp); >+} >+ >+static void process_blocking(struct gdlm_lock *lp, int bast_mode) >+{ >+ struct gdlm_ls *ls = lp->ls; >+ unsigned int cb = 0; >+ >+ switch (gdlm_make_lmstate(bast_mode)) { >+ case LM_ST_EXCLUSIVE: >+ cb = LM_CB_NEED_E; >+ break; >+ case LM_ST_DEFERRED: >+ cb = LM_CB_NEED_D; >+ break; >+ case LM_ST_SHARED: >+ cb = LM_CB_NEED_S; >+ break; >+ default: >+ gdlm_assert(0, "unknown bast mode %u", bast_mode); >+ } >+ >+ ls->fscb(ls->sdp, cb, &lp->lockname); >+} >+ >+ >+static void gdlm_bast(void *astarg, int mode) >+{ >+ struct gdlm_lock *lp = astarg; >+ >+ if (!mode) { >+ printk(KERN_INFO "lock_dlm: bast mode zero %x,%llx\n", >+ lp->lockname.ln_type, >+ (unsigned long long)lp->lockname.ln_number); >+ return; >+ } >+ >+ process_blocking(lp, mode); >+} >+ > /* convert gfs lock-state to dlm lock-mode */ > > static s16 make_mode(s16 lmstate) >@@ -77,24 +335,6 @@ static s16 make_mode(s16 lmstate) > return -1; > } > >-/* convert dlm lock-mode to gfs lock-state */ >- >-s16 gdlm_make_lmstate(s16 dlmmode) >-{ >- switch (dlmmode) { >- case DLM_LOCK_IV: >- case DLM_LOCK_NL: >- return LM_ST_UNLOCKED; >- case DLM_LOCK_EX: >- return LM_ST_EXCLUSIVE; >- case DLM_LOCK_CW: >- return LM_ST_DEFERRED; >- case DLM_LOCK_PR: >- return LM_ST_SHARED; >- } >- gdlm_assert(0, "unknown DLM mode %d", dlmmode); >- return -1; >-} > > /* verify agreement with GFS on the current lock state, NB: DLM_LOCK_NL and > DLM_LOCK_IV are both considered LM_ST_UNLOCKED by GFS. */ >@@ -164,7 +404,7 @@ static int gdlm_create_lp(struct gdlm_ls > { > struct gdlm_lock *lp; > >- lp = kzalloc(sizeof(struct gdlm_lock), GFP_KERNEL); >+ lp = kzalloc(sizeof(struct gdlm_lock), GFP_NOFS); > if (!lp) > return -ENOMEM; > >@@ -172,10 +412,6 @@ static int gdlm_create_lp(struct gdlm_ls > make_strname(name, &lp->strname); > lp->ls = ls; > lp->cur = DLM_LOCK_IV; >- lp->lvb = NULL; >- lp->hold_null = NULL; >- INIT_LIST_HEAD(&lp->clist); >- INIT_LIST_HEAD(&lp->blist); > INIT_LIST_HEAD(&lp->delay_list); > > spin_lock(&ls->async_lock); >@@ -187,26 +423,6 @@ static int gdlm_create_lp(struct gdlm_ls > return 0; > } > >-void gdlm_delete_lp(struct gdlm_lock *lp) >-{ >- struct gdlm_ls *ls = lp->ls; >- >- spin_lock(&ls->async_lock); >- if (!list_empty(&lp->clist)) >- list_del_init(&lp->clist); >- if (!list_empty(&lp->blist)) >- list_del_init(&lp->blist); >- if (!list_empty(&lp->delay_list)) >- list_del_init(&lp->delay_list); >- gdlm_assert(!list_empty(&lp->all_list), "%x,%llx", lp->lockname.ln_type, >- (unsigned long long)lp->lockname.ln_number); >- list_del_init(&lp->all_list); >- ls->all_locks_count--; >- spin_unlock(&ls->async_lock); >- >- kfree(lp); >-} >- > int gdlm_get_lock(void *lockspace, struct lm_lockname *name, > void **lockp) > { >@@ -260,7 +476,7 @@ unsigned int gdlm_do_lock(struct gdlm_lo > > if ((error == -EAGAIN) && (lp->lkf & DLM_LKF_NOQUEUE)) { > lp->lksb.sb_status = -EAGAIN; >- queue_complete(lp); >+ gdlm_ast(lp); > error = 0; > } > >@@ -307,6 +523,9 @@ unsigned int gdlm_lock(void *lock, unsig > { > struct gdlm_lock *lp = lock; > >+ if (req_state == LM_ST_UNLOCKED) >+ return gdlm_unlock(lock, cur_state); >+ > clear_bit(LFL_DLM_CANCEL, &lp->flags); > if (flags & LM_FLAG_NOEXP) > set_bit(LFL_NOBLOCK, &lp->flags); >@@ -350,7 +569,7 @@ void gdlm_cancel(void *lock) > if (delay_list) { > set_bit(LFL_CANCEL, &lp->flags); > set_bit(LFL_ACTIVE, &lp->flags); >- queue_complete(lp); >+ gdlm_ast(lp); > return; > } > >@@ -382,7 +601,7 @@ static int gdlm_add_lvb(struct gdlm_lock > { > char *lvb; > >- lvb = kzalloc(GDLM_LVB_SIZE, GFP_KERNEL); >+ lvb = kzalloc(GDLM_LVB_SIZE, GFP_NOFS); > if (!lvb) > return -ENOMEM; > >diff -pur a/fs/gfs2/locking/dlm/lock_dlm.h b/fs/gfs2/locking/dlm/lock_dlm.h >--- a/fs/gfs2/locking/dlm/lock_dlm.h 2008-04-29 13:28:06.000000000 -0500 >+++ b/fs/gfs2/locking/dlm/lock_dlm.h 2008-05-12 11:12:59.000000000 -0500 >@@ -13,7 +13,6 @@ > #include <linux/module.h> > #include <linux/slab.h> > #include <linux/spinlock.h> >-#include <linux/module.h> > #include <linux/types.h> > #include <linux/string.h> > #include <linux/list.h> >@@ -72,15 +71,12 @@ struct gdlm_ls { > int recover_jid_done; > int recover_jid_status; > spinlock_t async_lock; >- struct list_head complete; >- struct list_head blocking; > struct list_head delayed; > struct list_head submit; > struct list_head all_locks; > u32 all_locks_count; > wait_queue_head_t wait_control; >- struct task_struct *thread1; >- struct task_struct *thread2; >+ struct task_struct *thread; > wait_queue_head_t thread_wait; > unsigned long drop_time; > int drop_locks_count; >@@ -117,10 +113,6 @@ struct gdlm_lock { > u32 lkf; /* dlm flags DLM_LKF_ */ > unsigned long flags; /* lock_dlm flags LFL_ */ > >- int bast_mode; /* protected by async_lock */ >- >- struct list_head clist; /* complete */ >- struct list_head blist; /* blocking */ > struct list_head delay_list; /* delayed */ > struct list_head all_list; /* all locks for the fs */ > struct gdlm_lock *hold_null; /* NL lock for hold_lvb */ >@@ -159,11 +151,8 @@ void gdlm_release_threads(struct gdlm_ls > > /* lock.c */ > >-s16 gdlm_make_lmstate(s16); >-void gdlm_queue_delayed(struct gdlm_lock *); > void gdlm_submit_delayed(struct gdlm_ls *); > int gdlm_release_all_locks(struct gdlm_ls *); >-void gdlm_delete_lp(struct gdlm_lock *); > unsigned int gdlm_do_lock(struct gdlm_lock *); > > int gdlm_get_lock(void *, struct lm_lockname *, void **); >@@ -174,6 +163,10 @@ void gdlm_cancel(void *); > int gdlm_hold_lvb(void *, char **); > void gdlm_unhold_lvb(void *, char *); > >+/* mount.c */ >+ >+extern const struct lm_lockops gdlm_ops; >+ > /* plock.c */ > > int gdlm_plock_init(void); >diff -pur a/fs/gfs2/locking/dlm/main.c b/fs/gfs2/locking/dlm/main.c >--- a/fs/gfs2/locking/dlm/main.c 2008-04-29 13:28:00.000000000 -0500 >+++ b/fs/gfs2/locking/dlm/main.c 2008-05-12 10:37:16.000000000 -0500 >@@ -11,8 +11,6 @@ > > #include "lock_dlm.h" > >-extern struct lm_lockops gdlm_ops; >- > static int __init init_lock_dlm(void) > { > int error; >diff -pur a/fs/gfs2/locking/dlm/mount.c b/fs/gfs2/locking/dlm/mount.c >--- a/fs/gfs2/locking/dlm/mount.c 2008-04-29 13:28:11.000000000 -0500 >+++ b/fs/gfs2/locking/dlm/mount.c 2008-05-12 10:44:58.000000000 -0500 >@@ -28,15 +28,11 @@ static struct gdlm_ls *init_gdlm(lm_call > ls->sdp = sdp; > ls->fsflags = flags; > spin_lock_init(&ls->async_lock); >- INIT_LIST_HEAD(&ls->complete); >- INIT_LIST_HEAD(&ls->blocking); > INIT_LIST_HEAD(&ls->delayed); > INIT_LIST_HEAD(&ls->submit); > INIT_LIST_HEAD(&ls->all_locks); > init_waitqueue_head(&ls->thread_wait); > init_waitqueue_head(&ls->wait_control); >- ls->thread1 = NULL; >- ls->thread2 = NULL; > ls->drop_time = jiffies; > ls->jid = -1; > >@@ -67,6 +63,11 @@ static int make_args(struct gdlm_ls *ls, > memset(data, 0, 256); > strncpy(data, data_arg, 255); > >+ if (!strlen(data)) { >+ log_error("no mount options, (u)mount helpers not installed"); >+ return -EINVAL; >+ } >+ > for (options = data; (x = strsep(&options, ":")); ) { > if (!*x) > continue; >diff -pur a/fs/gfs2/locking/dlm/sysfs.c b/fs/gfs2/locking/dlm/sysfs.c >--- a/fs/gfs2/locking/dlm/sysfs.c 2008-04-29 13:28:00.000000000 -0500 >+++ b/fs/gfs2/locking/dlm/sysfs.c 2008-05-12 10:45:36.000000000 -0500 >@@ -12,8 +12,6 @@ > > #include "lock_dlm.h" > >-extern struct lm_lockops gdlm_ops; >- > static ssize_t proto_name_show(struct gdlm_ls *ls, char *buf) > { > return sprintf(buf, "%s\n", gdlm_ops.lm_proto_name); >diff -pur a/fs/gfs2/locking/dlm/thread.c b/fs/gfs2/locking/dlm/thread.c >--- a/fs/gfs2/locking/dlm/thread.c 2008-04-29 13:28:29.000000000 -0500 >+++ b/fs/gfs2/locking/dlm/thread.c 2008-05-12 09:39:04.000000000 -0500 >@@ -9,247 +9,12 @@ > > #include "lock_dlm.h" > >-/* A lock placed on this queue is re-submitted to DLM as soon as the lock_dlm >- thread gets to it. */ >- >-static void queue_submit(struct gdlm_lock *lp) >-{ >- struct gdlm_ls *ls = lp->ls; >- >- spin_lock(&ls->async_lock); >- list_add_tail(&lp->delay_list, &ls->submit); >- spin_unlock(&ls->async_lock); >- wake_up(&ls->thread_wait); >-} >- >-static void process_blocking(struct gdlm_lock *lp, int bast_mode) >-{ >- struct gdlm_ls *ls = lp->ls; >- unsigned int cb = 0; >- >- switch (gdlm_make_lmstate(bast_mode)) { >- case LM_ST_EXCLUSIVE: >- cb = LM_CB_NEED_E; >- break; >- case LM_ST_DEFERRED: >- cb = LM_CB_NEED_D; >- break; >- case LM_ST_SHARED: >- cb = LM_CB_NEED_S; >- break; >- default: >- gdlm_assert(0, "unknown bast mode %u", lp->bast_mode); >- } >- >- ls->fscb(ls->sdp, cb, &lp->lockname); >-} >- >-static void wake_up_ast(struct gdlm_lock *lp) >-{ >- clear_bit(LFL_AST_WAIT, &lp->flags); >- smp_mb__after_clear_bit(); >- wake_up_bit(&lp->flags, LFL_AST_WAIT); >-} >- >-static void process_complete(struct gdlm_lock *lp) >-{ >- struct gdlm_ls *ls = lp->ls; >- struct lm_async_cb acb; >- s16 prev_mode = lp->cur; >- >- memset(&acb, 0, sizeof(acb)); >- >- if (lp->lksb.sb_status == -DLM_ECANCEL) { >- log_info("complete dlm cancel %x,%llx flags %lx", >- lp->lockname.ln_type, >- (unsigned long long)lp->lockname.ln_number, >- lp->flags); >- >- lp->req = lp->cur; >- acb.lc_ret |= LM_OUT_CANCELED; >- if (lp->cur == DLM_LOCK_IV) >- lp->lksb.sb_lkid = 0; >- goto out; >- } >- >- if (test_and_clear_bit(LFL_DLM_UNLOCK, &lp->flags)) { >- if (lp->lksb.sb_status != -DLM_EUNLOCK) { >- log_info("unlock sb_status %d %x,%llx flags %lx", >- lp->lksb.sb_status, lp->lockname.ln_type, >- (unsigned long long)lp->lockname.ln_number, >- lp->flags); >- return; >- } >- >- lp->cur = DLM_LOCK_IV; >- lp->req = DLM_LOCK_IV; >- lp->lksb.sb_lkid = 0; >- >- if (test_and_clear_bit(LFL_UNLOCK_DELETE, &lp->flags)) { >- gdlm_delete_lp(lp); >- return; >- } >- goto out; >- } >- >- if (lp->lksb.sb_flags & DLM_SBF_VALNOTVALID) >- memset(lp->lksb.sb_lvbptr, 0, GDLM_LVB_SIZE); >- >- if (lp->lksb.sb_flags & DLM_SBF_ALTMODE) { >- if (lp->req == DLM_LOCK_PR) >- lp->req = DLM_LOCK_CW; >- else if (lp->req == DLM_LOCK_CW) >- lp->req = DLM_LOCK_PR; >- } >- >- /* >- * A canceled lock request. The lock was just taken off the delayed >- * list and was never even submitted to dlm. >- */ >- >- if (test_and_clear_bit(LFL_CANCEL, &lp->flags)) { >- log_info("complete internal cancel %x,%llx", >- lp->lockname.ln_type, >- (unsigned long long)lp->lockname.ln_number); >- lp->req = lp->cur; >- acb.lc_ret |= LM_OUT_CANCELED; >- goto out; >- } >- >- /* >- * An error occured. >- */ >- >- if (lp->lksb.sb_status) { >- /* a "normal" error */ >- if ((lp->lksb.sb_status == -EAGAIN) && >- (lp->lkf & DLM_LKF_NOQUEUE)) { >- lp->req = lp->cur; >- if (lp->cur == DLM_LOCK_IV) >- lp->lksb.sb_lkid = 0; >- goto out; >- } >- >- /* this could only happen with cancels I think */ >- log_info("ast sb_status %d %x,%llx flags %lx", >- lp->lksb.sb_status, lp->lockname.ln_type, >- (unsigned long long)lp->lockname.ln_number, >- lp->flags); >- return; >- } >- >- /* >- * This is an AST for an EX->EX conversion for sync_lvb from GFS. >- */ >- >- if (test_and_clear_bit(LFL_SYNC_LVB, &lp->flags)) { >- wake_up_ast(lp); >- return; >- } >- >- /* >- * A lock has been demoted to NL because it initially completed during >- * BLOCK_LOCKS. Now it must be requested in the originally requested >- * mode. >- */ >- >- if (test_and_clear_bit(LFL_REREQUEST, &lp->flags)) { >- gdlm_assert(lp->req == DLM_LOCK_NL, "%x,%llx", >- lp->lockname.ln_type, >- (unsigned long long)lp->lockname.ln_number); >- gdlm_assert(lp->prev_req > DLM_LOCK_NL, "%x,%llx", >- lp->lockname.ln_type, >- (unsigned long long)lp->lockname.ln_number); >- >- lp->cur = DLM_LOCK_NL; >- lp->req = lp->prev_req; >- lp->prev_req = DLM_LOCK_IV; >- lp->lkf &= ~DLM_LKF_CONVDEADLK; >- >- set_bit(LFL_NOCACHE, &lp->flags); >- >- if (test_bit(DFL_BLOCK_LOCKS, &ls->flags) && >- !test_bit(LFL_NOBLOCK, &lp->flags)) >- gdlm_queue_delayed(lp); >- else >- queue_submit(lp); >- return; >- } >- >- /* >- * A request is granted during dlm recovery. It may be granted >- * because the locks of a failed node were cleared. In that case, >- * there may be inconsistent data beneath this lock and we must wait >- * for recovery to complete to use it. When gfs recovery is done this >- * granted lock will be converted to NL and then reacquired in this >- * granted state. >- */ >- >- if (test_bit(DFL_BLOCK_LOCKS, &ls->flags) && >- !test_bit(LFL_NOBLOCK, &lp->flags) && >- lp->req != DLM_LOCK_NL) { >- >- lp->cur = lp->req; >- lp->prev_req = lp->req; >- lp->req = DLM_LOCK_NL; >- lp->lkf |= DLM_LKF_CONVERT; >- lp->lkf &= ~DLM_LKF_CONVDEADLK; >- >- log_debug("rereq %x,%llx id %x %d,%d", >- lp->lockname.ln_type, >- (unsigned long long)lp->lockname.ln_number, >- lp->lksb.sb_lkid, lp->cur, lp->req); >- >- set_bit(LFL_REREQUEST, &lp->flags); >- queue_submit(lp); >- return; >- } >- >- /* >- * DLM demoted the lock to NL before it was granted so GFS must be >- * told it cannot cache data for this lock. >- */ >- >- if (lp->lksb.sb_flags & DLM_SBF_DEMOTED) >- set_bit(LFL_NOCACHE, &lp->flags); >- >-out: >- /* >- * This is an internal lock_dlm lock >- */ >- >- if (test_bit(LFL_INLOCK, &lp->flags)) { >- clear_bit(LFL_NOBLOCK, &lp->flags); >- lp->cur = lp->req; >- wake_up_ast(lp); >- return; >- } >- >- /* >- * Normal completion of a lock request. Tell GFS it now has the lock. >- */ >- >- clear_bit(LFL_NOBLOCK, &lp->flags); >- lp->cur = lp->req; >- >- acb.lc_name = lp->lockname; >- acb.lc_ret |= gdlm_make_lmstate(lp->cur); >- >- if (!test_and_clear_bit(LFL_NOCACHE, &lp->flags) && >- (lp->cur > DLM_LOCK_NL) && (prev_mode > DLM_LOCK_NL)) >- acb.lc_ret |= LM_OUT_CACHEABLE; >- >- ls->fscb(ls->sdp, LM_CB_ASYNC, &acb); >-} >- >-static inline int no_work(struct gdlm_ls *ls, int blocking) >+static inline int no_work(struct gdlm_ls *ls) > { > int ret; > > spin_lock(&ls->async_lock); >- ret = list_empty(&ls->complete) && list_empty(&ls->submit); >- if (ret && blocking) >- ret = list_empty(&ls->blocking); >+ ret = list_empty(&ls->submit); > spin_unlock(&ls->async_lock); > > return ret; >@@ -268,100 +33,55 @@ static inline int check_drop(struct gdlm > return 0; > } > >-static int gdlm_thread(void *data, int blist) >+static int gdlm_thread(void *data) > { > struct gdlm_ls *ls = (struct gdlm_ls *) data; > struct gdlm_lock *lp = NULL; >- uint8_t complete, blocking, submit, drop; >- >- /* Only thread1 is allowed to do blocking callbacks since gfs >- may wait for a completion callback within a blocking cb. */ > > while (!kthread_should_stop()) { > wait_event_interruptible(ls->thread_wait, >- !no_work(ls, blist) || kthread_should_stop()); >- >- complete = blocking = submit = drop = 0; >+ !no_work(ls) || kthread_should_stop()); > > spin_lock(&ls->async_lock); > >- if (blist && !list_empty(&ls->blocking)) { >- lp = list_entry(ls->blocking.next, struct gdlm_lock, >- blist); >- list_del_init(&lp->blist); >- blocking = lp->bast_mode; >- lp->bast_mode = 0; >- } else if (!list_empty(&ls->complete)) { >- lp = list_entry(ls->complete.next, struct gdlm_lock, >- clist); >- list_del_init(&lp->clist); >- complete = 1; >- } else if (!list_empty(&ls->submit)) { >+ if (!list_empty(&ls->submit)) { > lp = list_entry(ls->submit.next, struct gdlm_lock, > delay_list); > list_del_init(&lp->delay_list); >- submit = 1; >- } >- >- drop = check_drop(ls); >- spin_unlock(&ls->async_lock); >- >- if (complete) >- process_complete(lp); >- >- else if (blocking) >- process_blocking(lp, blocking); >- >- else if (submit) >+ spin_unlock(&ls->async_lock); > gdlm_do_lock(lp); >- >- if (drop) >+ spin_lock(&ls->async_lock); >+ } >+ /* Does this ever happen these days? I hope not anyway */ >+ if (check_drop(ls)) { >+ spin_unlock(&ls->async_lock); > ls->fscb(ls->sdp, LM_CB_DROPLOCKS, NULL); >- >- schedule(); >+ spin_lock(&ls->async_lock); >+ } >+ spin_unlock(&ls->async_lock); > } > > return 0; > } > >-static int gdlm_thread1(void *data) >-{ >- return gdlm_thread(data, 1); >-} >- >-static int gdlm_thread2(void *data) >-{ >- return gdlm_thread(data, 0); >-} >- > int gdlm_init_threads(struct gdlm_ls *ls) > { > struct task_struct *p; > int error; > >- p = kthread_run(gdlm_thread1, ls, "lock_dlm1"); >- error = IS_ERR(p); >- if (error) { >- log_error("can't start lock_dlm1 thread %d", error); >- return error; >- } >- ls->thread1 = p; >- >- p = kthread_run(gdlm_thread2, ls, "lock_dlm2"); >+ p = kthread_run(gdlm_thread, ls, "lock_dlm"); > error = IS_ERR(p); > if (error) { >- log_error("can't start lock_dlm2 thread %d", error); >- kthread_stop(ls->thread1); >+ log_error("can't start lock_dlm thread %d", error); > return error; > } >- ls->thread2 = p; >+ ls->thread = p; > > return 0; > } > > void gdlm_release_threads(struct gdlm_ls *ls) > { >- kthread_stop(ls->thread1); >- kthread_stop(ls->thread2); >+ kthread_stop(ls->thread); > } > >diff -pur a/fs/gfs2/locking/nolock/main.c b/fs/gfs2/locking/nolock/main.c >--- a/fs/gfs2/locking/nolock/main.c 2008-04-29 13:28:31.000000000 -0500 >+++ b/fs/gfs2/locking/nolock/main.c 2008-05-08 13:29:25.000000000 -0500 >@@ -109,6 +109,8 @@ static void nolock_put_lock(void *lock) > static unsigned int nolock_lock(void *lock, unsigned int cur_state, > unsigned int req_state, unsigned int flags) > { >+ if (req_state == LM_ST_UNLOCKED) >+ return 0; > return req_state | LM_OUT_CACHEABLE; > } > >diff -pur a/fs/gfs2/main.c b/fs/gfs2/main.c >--- a/fs/gfs2/main.c 2008-04-29 13:28:50.000000000 -0500 >+++ b/fs/gfs2/main.c 2008-05-16 10:44:56.000000000 -0500 >@@ -32,6 +32,7 @@ static void gfs2_init_inode_once(void *f > inode_init_once(&ip->i_inode); > init_rwsem(&ip->i_rw_mutex); > ip->i_alloc = NULL; >+ ip->i_gh.gh_gl = NULL; > } > } > >@@ -43,8 +44,6 @@ static void gfs2_init_glock_once(void *f > INIT_HLIST_NODE(&gl->gl_list); > spin_lock_init(&gl->gl_spin); > INIT_LIST_HEAD(&gl->gl_holders); >- INIT_LIST_HEAD(&gl->gl_waiters1); >- INIT_LIST_HEAD(&gl->gl_waiters3); > gl->gl_lvb = NULL; > atomic_set(&gl->gl_lvb_count, 0); > INIT_LIST_HEAD(&gl->gl_reclaim); >diff -pur a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c >--- a/fs/gfs2/meta_io.c 2008-04-29 13:34:11.000000000 -0500 >+++ b/fs/gfs2/meta_io.c 2008-05-14 11:42:25.000000000 -0500 >@@ -115,7 +115,7 @@ void gfs2_meta_sync(struct gfs2_glock *g > * Returns: the buffer > */ > >-static struct buffer_head *getbuf(struct gfs2_glock *gl, u64 blkno, int create) >+struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create) > { > struct address_space *mapping = gl->gl_aspace->i_mapping; > struct gfs2_sbd *sdp = gl->gl_sbd; >@@ -183,7 +183,7 @@ static void meta_prep_new(struct buffer_ > struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno) > { > struct buffer_head *bh; >- bh = getbuf(gl, blkno, CREATE); >+ bh = gfs2_getbuf(gl, blkno, CREATE); > meta_prep_new(bh); > return bh; > } >@@ -201,7 +201,7 @@ struct buffer_head *gfs2_meta_new(struct > int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags, > struct buffer_head **bhp) > { >- *bhp = getbuf(gl, blkno, CREATE); >+ *bhp = gfs2_getbuf(gl, blkno, CREATE); > if (!buffer_uptodate(*bhp)) { > ll_rw_block(READ, 1, bhp); > if (flags & DIO_WAIT) { >@@ -288,7 +288,7 @@ void gfs2_meta_wipe(struct gfs2_inode *i > struct buffer_head *bh; > > while (blen) { >- bh = getbuf(ip->i_gl, bstart, NO_CREATE); >+ bh = gfs2_getbuf(ip->i_gl, bstart, NO_CREATE); > if (bh) { > lock_buffer(bh); > gfs2_log_lock(sdp); >@@ -364,7 +364,7 @@ struct buffer_head *gfs2_meta_ra(struct > if (extlen > max_ra) > extlen = max_ra; > >- first_bh = getbuf(gl, dblock, CREATE); >+ first_bh = gfs2_getbuf(gl, dblock, CREATE); > > if (buffer_uptodate(first_bh)) > goto out; >@@ -375,7 +375,7 @@ struct buffer_head *gfs2_meta_ra(struct > extlen--; > > while (extlen) { >- bh = getbuf(gl, dblock, CREATE); >+ bh = gfs2_getbuf(gl, dblock, CREATE); > > if (!buffer_uptodate(bh) && !buffer_locked(bh)) > ll_rw_block(READA, 1, &bh); >diff -pur a/fs/gfs2/meta_io.h b/fs/gfs2/meta_io.h >--- a/fs/gfs2/meta_io.h 2008-04-29 13:28:47.000000000 -0500 >+++ b/fs/gfs2/meta_io.h 2008-05-14 11:43:22.000000000 -0500 >@@ -46,6 +46,7 @@ struct buffer_head *gfs2_meta_new(struct > int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, > int flags, struct buffer_head **bhp); > int gfs2_meta_wait(struct gfs2_sbd *sdp, struct buffer_head *bh); >+struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create); > > void gfs2_remove_from_journal(struct buffer_head *bh, struct gfs2_trans *tr, > int meta); >diff -pur a/fs/gfs2/ops_dentry.c b/fs/gfs2/ops_dentry.c >--- a/fs/gfs2/ops_dentry.c 2008-04-29 13:28:10.000000000 -0500 >+++ b/fs/gfs2/ops_dentry.c 2008-04-21 14:06:31.000000000 -0500 >@@ -55,7 +55,7 @@ static int gfs2_drevalidate(struct dentr > if (sdp->sd_args.ar_localcaching) > goto valid; > >- had_lock = gfs2_glock_is_locked_by_me(dip->i_gl); >+ had_lock = (gfs2_glock_is_locked_by_me(dip->i_gl) != NULL); > if (!had_lock) { > error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh); > if (error) >diff -pur a/fs/gfs2/ops_file.c b/fs/gfs2/ops_file.c >--- a/fs/gfs2/ops_file.c 2008-04-29 13:28:31.000000000 -0500 >+++ b/fs/gfs2/ops_file.c 2008-04-18 15:43:11.000000000 -0500 >@@ -561,8 +561,7 @@ static int do_flock(struct file *file, i > int error = 0; > > state = (fl->fl_type == F_WRLCK) ? LM_ST_EXCLUSIVE : LM_ST_SHARED; >- flags = (IS_SETLKW(cmd) ? 0 : LM_FLAG_TRY) | GL_EXACT | GL_NOCACHE >- | GL_FLOCK; >+ flags = (IS_SETLKW(cmd) ? 0 : LM_FLAG_TRY) | GL_EXACT | GL_NOCACHE; > > mutex_lock(&fp->f_fl_mutex); > >@@ -575,9 +574,8 @@ static int do_flock(struct file *file, i > gfs2_glock_dq_wait(fl_gh); > gfs2_holder_reinit(state, flags, fl_gh); > } else { >- error = gfs2_glock_get(GFS2_SB(&ip->i_inode), >- ip->i_no_addr, &gfs2_flock_glops, >- CREATE, &gl); >+ error = gfs2_glock_get(GFS2_SB(&ip->i_inode), ip->i_no_addr, >+ &gfs2_flock_glops, CREATE, &gl); > if (error) > goto out; > gfs2_holder_init(gl, state, flags, fl_gh); >diff -pur a/fs/gfs2/recovery.c b/fs/gfs2/recovery.c >--- a/fs/gfs2/recovery.c 2008-04-29 13:28:47.000000000 -0500 >+++ b/fs/gfs2/recovery.c 2008-04-18 15:44:58.000000000 -0500 >@@ -450,7 +450,7 @@ int gfs2_recover_journal(struct gfs2_jde > fs_info(sdp, "jid=%u: Trying to acquire journal lock...\n", > jd->jd_jid); > >- /* Aquire the journal lock so we can do recovery */ >+ /* Acquire the journal lock so we can do recovery */ > > error = gfs2_glock_nq_num(sdp, jd->jd_jid, &gfs2_journal_glops, > LM_ST_EXCLUSIVE, >@@ -496,7 +496,7 @@ int gfs2_recover_journal(struct gfs2_jde > > error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_SHARED, > LM_FLAG_NOEXP | LM_FLAG_PRIORITY | >- GL_NOCANCEL | GL_NOCACHE, &t_gh); >+ GL_NOCACHE, &t_gh); > if (error) > goto fail_gunlock_ji; > >diff -pur a/fs/gfs2/super.c b/fs/gfs2/super.c >--- a/fs/gfs2/super.c 2008-04-29 13:28:47.000000000 -0500 >+++ b/fs/gfs2/super.c 2008-04-18 15:11:57.000000000 -0500 >@@ -948,8 +948,7 @@ static int gfs2_lock_fs_check_clean(stru > } > > error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_DEFERRED, >- LM_FLAG_PRIORITY | GL_NOCACHE, >- t_gh); >+ GL_NOCACHE, t_gh); > > list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) { > error = gfs2_jdesc_check(jd); >diff -pur a/fs/gfs2/trans.c b/fs/gfs2/trans.c >--- a/fs/gfs2/trans.c 2008-04-29 13:28:47.000000000 -0500 >+++ b/fs/gfs2/trans.c 2008-05-14 12:02:13.000000000 -0500 >@@ -126,8 +126,8 @@ void gfs2_trans_add_gl(struct gfs2_glock > * @meta: Flag to indicate whether its metadata or not > */ > >-static void gfs2_attach_bufdata(struct gfs2_glock *gl, struct buffer_head *bh, >- int meta) >+void gfs2_attach_bufdata(struct gfs2_glock *gl, struct buffer_head *bh, >+ int meta) > { > struct gfs2_bufdata *bd; > >diff -pur a/fs/gfs2/trans.h b/fs/gfs2/trans.h >--- a/fs/gfs2/trans.h 2008-04-29 13:28:24.000000000 -0500 >+++ b/fs/gfs2/trans.h 2008-05-14 11:53:05.000000000 -0500 >@@ -35,7 +35,8 @@ void gfs2_trans_add_bh(struct gfs2_glock > void gfs2_trans_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd); > void gfs2_trans_add_unrevoke(struct gfs2_sbd *sdp, u64 blkno); > void gfs2_trans_add_rg(struct gfs2_rgrpd *rgd); >- >+void gfs2_attach_bufdata(struct gfs2_glock *gl, struct buffer_head *bh, >+ int meta); > > static inline int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks, > unsigned int revokes)
You cannot view the attachment while viewing its details because your browser does not support IFRAMEs.
View the attachment on a separate page
.
View Attachment As Diff
View Attachment As Raw
Actions:
View
|
Diff
Attachments on
bug 432057
:
294383
|
295614
|
296569
|
297894
|
298191
|
302891
|
303203
|
303208
|
303209
|
303500
|
303542
|
303631
|
303637
|
303646
|
304026
|
304098
|
304112
|
304161
|
304398
|
304409
|
304572
|
304636
|
304637
|
304749
|
304755
|
304811
|
304837
|
304876
|
304898
|
304946
|
304955
|
304976
|
304984
|
305126
|
305185
|
305237
|
305266
|
305377
|
305387
|
305404
|
305506
|
305513
|
305678
|
305702
|
305727
|
306084
|
306111
|
306191
|
306207
|
306208
|
306275
|
306287
|
306832
|
306972
|
307077
|
307096
|
307158