Login
[x]
Log in using an account from:
Fedora Account System
Red Hat Associate
Red Hat Customer
Or login using a Red Hat Bugzilla account
Forgot Password
Login:
Hide Forgot
Create an Account
Red Hat Bugzilla – Attachment 304098 Details for
Bug 432057
GFS2: d_doio stuck in readv() waiting for pagelock.
[?]
New
Simple Search
Advanced Search
My Links
Browse
Requests
Reports
Current State
Search
Tabular reports
Graphical reports
Duplicates
Other Reports
User Changes
Plotly Reports
Bug Status
Bug Severity
Non-Defaults
|
Product Dashboard
Help
Page Help!
Bug Writing Guidelines
What's new
Browser Support Policy
5.0.4.rh83 Release notes
FAQ
Guides index
User guide
Web Services
Contact
Legal
This site requires JavaScript to be enabled to function correctly, please enable it.
[patch]
Latest upstream patch
glock-fix-7.diff (text/plain), 57.78 KB, created by
Steve Whitehouse
on 2008-04-29 10:43:32 UTC
(
hide
)
Description:
Latest upstream patch
Filename:
MIME Type:
Creator:
Steve Whitehouse
Created:
2008-04-29 10:43:32 UTC
Size:
57.78 KB
patch
obsolete
>diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c >index d636b3e..628aed6 100644 >--- a/fs/gfs2/glock.c >+++ b/fs/gfs2/glock.c >@@ -45,21 +45,11 @@ struct gfs2_gl_hash_bucket { > struct hlist_head hb_list; > }; > >-struct glock_iter { >- int hash; /* hash bucket index */ >- struct gfs2_sbd *sdp; /* incore superblock */ >- struct gfs2_glock *gl; /* current glock struct */ >- struct seq_file *seq; /* sequence file for debugfs */ >- char string[512]; /* scratch space */ >-}; >- > typedef void (*glock_examiner) (struct gfs2_glock * gl); > > static int gfs2_dump_lockstate(struct gfs2_sbd *sdp); >-static int dump_glock(struct glock_iter *gi, struct gfs2_glock *gl); >-static void gfs2_glock_xmote_th(struct gfs2_glock *gl, struct gfs2_holder *gh); >-static void gfs2_glock_drop_th(struct gfs2_glock *gl); >-static void run_queue(struct gfs2_glock *gl); >+static int dump_glock(struct gfs2_glock_iter *gi, struct gfs2_glock *gl); >+static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target); > > static DECLARE_RWSEM(gfs2_umount_flush_sem); > static struct dentry *gfs2_root; >@@ -123,33 +113,6 @@ static inline rwlock_t *gl_lock_addr(unsigned int x) > #endif > > /** >- * relaxed_state_ok - is a requested lock compatible with the current lock mode? >- * @actual: the current state of the lock >- * @requested: the lock state that was requested by the caller >- * @flags: the modifier flags passed in by the caller >- * >- * Returns: 1 if the locks are compatible, 0 otherwise >- */ >- >-static inline int relaxed_state_ok(unsigned int actual, unsigned requested, >- int flags) >-{ >- if (actual == requested) >- return 1; >- >- if (flags & GL_EXACT) >- return 0; >- >- if (actual == LM_ST_EXCLUSIVE && requested == LM_ST_SHARED) >- return 1; >- >- if (actual != LM_ST_UNLOCKED && (flags & LM_FLAG_ANY)) >- return 1; >- >- return 0; >-} >- >-/** > * gl_hash() - Turn glock number into hash bucket number > * @lock: The glock number > * >@@ -220,8 +183,6 @@ int gfs2_glock_put(struct gfs2_glock *gl) > gfs2_assert(sdp, gl->gl_state == LM_ST_UNLOCKED); > gfs2_assert(sdp, list_empty(&gl->gl_reclaim)); > gfs2_assert(sdp, list_empty(&gl->gl_holders)); >- gfs2_assert(sdp, list_empty(&gl->gl_waiters1)); >- gfs2_assert(sdp, list_empty(&gl->gl_waiters3)); > glock_free(gl); > rv = 1; > goto out; >@@ -281,6 +242,341 @@ static struct gfs2_glock *gfs2_glock_find(const struct gfs2_sbd *sdp, > return gl; > } > >+/** >+ * may_grant - check if its ok to grant a new lock >+ * @gl: The glock >+ * @gh: The lock request which we wish to grant >+ * >+ * Returns: true if its ok to grant the lock >+ */ >+ >+static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh) >+{ >+ const struct gfs2_holder *gh_head = list_entry(gl->gl_holders.next, const struct gfs2_holder, gh_list); >+ if ((gh->gh_state == LM_ST_EXCLUSIVE || >+ gh_head->gh_state == LM_ST_EXCLUSIVE) && gh != gh_head) >+ return 0; >+ if (gl->gl_state == gh->gh_state) >+ return 1; >+ if (gh->gh_flags & GL_EXACT) >+ return 0; >+ if (gh->gh_state == LM_ST_SHARED && gl->gl_state == LM_ST_EXCLUSIVE) >+ return 1; >+ if (gl->gl_state != LM_ST_UNLOCKED && (gh->gh_flags & LM_FLAG_ANY)) >+ return 1; >+ return 0; >+} >+ >+static void gfs2_holder_wake(struct gfs2_holder *gh) >+{ >+ clear_bit(HIF_WAIT, &gh->gh_iflags); >+ smp_mb__after_clear_bit(); >+ wake_up_bit(&gh->gh_iflags, HIF_WAIT); >+} >+ >+/** >+ * do_promote - promote as many requests as possible on the current queue >+ * @gl: The glock >+ * >+ * Returns: true if there is a blocked holder at the head of the list >+ */ >+ >+static int do_promote(struct gfs2_glock *gl) >+{ >+ const struct gfs2_glock_operations *glops = gl->gl_ops; >+ struct gfs2_holder *gh, *tmp; >+ int ret; >+ >+restart: >+ list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) { >+ if (test_bit(HIF_HOLDER, &gh->gh_iflags)) >+ continue; >+ if (may_grant(gl, gh)) { >+ if (gh->gh_list.prev == &gl->gl_holders && >+ glops->go_lock) { >+ spin_unlock(&gl->gl_spin); >+ /* FIXME: eliminate this eventually */ >+ ret = glops->go_lock(gh); >+ spin_lock(&gl->gl_spin); >+ if (ret) { >+ gh->gh_error = ret; >+ list_del_init(&gh->gh_list); >+ gfs2_holder_wake(gh); >+ } >+ set_bit(HIF_HOLDER, &gh->gh_iflags); >+ gfs2_holder_wake(gh); >+ goto restart; >+ } >+ set_bit(HIF_HOLDER, &gh->gh_iflags); >+ gfs2_holder_wake(gh); >+ continue; >+ } >+ if (gh->gh_list.prev == &gl->gl_holders) >+ return 1; >+ break; >+ } >+ return 0; >+} >+ >+/** >+ * do_error - Something unexpected has happened during a lock request >+ * >+ */ >+ >+static inline void do_error(struct gfs2_glock *gl, const int ret) >+{ >+ struct gfs2_holder *gh, *tmp; >+ >+ list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) { >+ if (test_bit(HIF_HOLDER, &gh->gh_iflags)) >+ continue; >+ if (ret & LM_OUT_ERROR) >+ gh->gh_error = -EIO; >+ else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) >+ gh->gh_error = GLR_TRYFAILED; >+ else >+ continue; >+ list_del_init(&gh->gh_list); >+ gfs2_holder_wake(gh); >+ } >+} >+ >+/** >+ * find_first_waiter - find the first gh that's waiting for the glock >+ * @gl: the glock >+ */ >+ >+static inline struct gfs2_holder *find_first_waiter(const struct gfs2_glock *gl) >+{ >+ struct gfs2_holder *gh; >+ >+ list_for_each_entry(gh, &gl->gl_holders, gh_list) { >+ if (!test_bit(HIF_HOLDER, &gh->gh_iflags)) >+ return gh; >+ } >+ return NULL; >+} >+ >+/** >+ * state_change - record that the glock is now in a different state >+ * @gl: the glock >+ * @new_state the new state >+ * >+ */ >+ >+static void state_change(struct gfs2_glock *gl, unsigned int new_state) >+{ >+ int held1, held2; >+ >+ held1 = (gl->gl_state != LM_ST_UNLOCKED); >+ held2 = (new_state != LM_ST_UNLOCKED); >+ >+ if (held1 != held2) { >+ if (held2) >+ gfs2_glock_hold(gl); >+ else >+ gfs2_glock_put(gl); >+ } >+ >+ gl->gl_state = new_state; >+ gl->gl_tchange = jiffies; >+} >+ >+static void gfs2_demote_wake(struct gfs2_glock *gl) >+{ >+ gl->gl_demote_state = LM_ST_EXCLUSIVE; >+ clear_bit(GLF_DEMOTE, &gl->gl_flags); >+ smp_mb__after_clear_bit(); >+ wake_up_bit(&gl->gl_flags, GLF_DEMOTE); >+} >+ >+/** >+ * finish_xmote - The DLM has replied to one of our lock requests >+ * @gl: The glock >+ * @ret: The status from the DLM >+ * >+ */ >+ >+static void finish_xmote(struct gfs2_glock *gl, unsigned int ret) >+{ >+ const struct gfs2_glock_operations *glops = gl->gl_ops; >+ struct gfs2_holder *gh; >+ unsigned state = ret & LM_OUT_ST_MASK; >+ >+ spin_lock(&gl->gl_spin); >+ state_change(gl, state); >+ gh = find_first_waiter(gl); >+ >+ /* Demote to UN request arrived during demote to SH or DF */ >+ if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) && >+ state != LM_ST_UNLOCKED && gl->gl_demote_state == LM_ST_UNLOCKED) >+ gl->gl_target = LM_ST_UNLOCKED; >+ >+ /* Check for state != intended state */ >+ if (unlikely(state != gl->gl_target)) { >+ if (gh) { >+ /* move to back of queue and try next entry */ >+ if (ret & LM_OUT_CANCELED) { >+ if ((gh->gh_flags & LM_FLAG_PRIORITY) == 0) >+ list_move_tail(&gh->gh_list, &gl->gl_holders); >+ gh = find_first_waiter(gl); >+ gl->gl_target = gh->gh_state; >+ goto retry; >+ } >+ /* Some error or failed "try lock" - report it */ >+ if ((ret & LM_OUT_ERROR) || >+ (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) { >+ gl->gl_target = gl->gl_state; >+ do_error(gl, ret); >+ goto out; >+ } >+ } >+ switch(state) { >+ /* Unlocked due to conversion deadlock, try again */ >+ case LM_ST_UNLOCKED: >+retry: >+ do_xmote(gl, gh, gl->gl_target); >+ break; >+ /* Conversion fails, unlock and try again */ >+ case LM_ST_SHARED: >+ case LM_ST_DEFERRED: >+ do_xmote(gl, gh, LM_ST_UNLOCKED); >+ break; >+ default: /* Everything else */ >+ printk(KERN_ERR "GFS2: wanted %u got %u\n", gl->gl_target, state); >+ BUG(); >+ } >+ spin_unlock(&gl->gl_spin); >+ gfs2_glock_put(gl); >+ return; >+ } >+ >+ /* Fast path - we got what we asked for */ >+ if (test_and_clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) >+ gfs2_demote_wake(gl); >+ if (state != LM_ST_UNLOCKED) { >+ if (glops->go_xmote_bh) { >+ spin_unlock(&gl->gl_spin); >+ glops->go_xmote_bh(gl, gh); >+ spin_lock(&gl->gl_spin); >+ } >+ do_promote(gl); >+ } >+out: >+ clear_bit(GLF_LOCK, &gl->gl_flags); >+ spin_unlock(&gl->gl_spin); >+ gfs2_glock_put(gl); >+} >+ >+static unsigned int gfs2_lm_lock(struct gfs2_sbd *sdp, void *lock, >+ unsigned int cur_state, unsigned int req_state, >+ unsigned int flags) >+{ >+ int ret = LM_OUT_ERROR; >+ if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) >+ ret = sdp->sd_lockstruct.ls_ops->lm_lock(lock, cur_state, >+ req_state, flags); >+ return ret; >+} >+ >+/** >+ * do_xmote - Calls the DLM to change the state of a lock >+ * @gl: The lock state >+ * @gh: The holder (only for promotes) >+ * @target: The target lock state >+ * >+ */ >+ >+static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target) >+{ >+ const struct gfs2_glock_operations *glops = gl->gl_ops; >+ struct gfs2_sbd *sdp = gl->gl_sbd; >+ unsigned int lck_flags = gh ? gh->gh_flags : 0; >+ int ret; >+ >+ lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP | >+ LM_FLAG_PRIORITY); >+ BUG_ON(gl->gl_state == target); >+ BUG_ON(gl->gl_state == gl->gl_target); >+ spin_unlock(&gl->gl_spin); >+ if (glops->go_xmote_th) >+ glops->go_xmote_th(gl); >+ if ((target == LM_ST_UNLOCKED || >+ target == LM_ST_DEFERRED) && glops->go_inval) >+ glops->go_inval(gl, DIO_METADATA); >+ gfs2_glock_hold(gl); >+ if (target != LM_ST_UNLOCKED && (gl->gl_state == LM_ST_SHARED || >+ gl->gl_state == LM_ST_DEFERRED) && >+ !(lck_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) >+ lck_flags |= LM_FLAG_TRY_1CB; >+ ret = gfs2_lm_lock(sdp, gl->gl_lock, gl->gl_state, target, lck_flags); >+ >+ if (!(ret & LM_OUT_ASYNC)) >+ finish_xmote(gl, ret); >+ else >+ gfs2_assert_warn(sdp, ret == LM_OUT_ASYNC); >+ spin_lock(&gl->gl_spin); >+} >+ >+/** >+ * find_first_holder - find the first "holder" gh >+ * @gl: the glock >+ */ >+ >+static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl) >+{ >+ struct gfs2_holder *gh; >+ >+ if (!list_empty(&gl->gl_holders)) { >+ gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list); >+ if (test_bit(HIF_HOLDER, &gh->gh_iflags)) >+ return gh; >+ } >+ return NULL; >+} >+ >+/** >+ * run_queue - do all outstanding tasks related to a glock >+ * @gl: The glock in question >+ * @nonblock: True if we must not block in run_queue >+ * >+ */ >+ >+static void run_queue(struct gfs2_glock *gl, const int nonblock) >+{ >+ struct gfs2_holder *gh; >+ >+ if (test_bit(GLF_DEMOTE, &gl->gl_flags)) { >+ if (gl->gl_demote_state == gl->gl_state) { >+ gfs2_demote_wake(gl); >+ goto promote; >+ } >+ do_error(gl, 0); /* Fail queued try locks */ >+ if (nonblock) { >+ gfs2_glock_hold(gl); >+ if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) >+ gfs2_glock_put(gl); >+ return; >+ } >+ gh = find_first_holder(gl); >+ if (gh || test_and_set_bit(GLF_LOCK, &gl->gl_flags)) >+ return; >+ BUG_ON(test_and_set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)); >+ gl->gl_target = gl->gl_demote_state; >+ do_xmote(gl, NULL, gl->gl_demote_state); >+ } >+promote: >+ if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) >+ return; >+ if (do_promote(gl) == 0) { >+ clear_bit(GLF_LOCK, &gl->gl_flags); >+ return; >+ } >+ gh = find_first_waiter(gl); >+ gl->gl_target = gh->gh_state; >+ do_xmote(gl, gh, gh->gh_state); >+} >+ > static void glock_work_func(struct work_struct *work) > { > struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work); >@@ -288,7 +584,7 @@ static void glock_work_func(struct work_struct *work) > spin_lock(&gl->gl_spin); > if (test_and_clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags)) > set_bit(GLF_DEMOTE, &gl->gl_flags); >- run_queue(gl); >+ run_queue(gl, 0); > spin_unlock(&gl->gl_spin); > gfs2_glock_put(gl); > } >@@ -342,12 +638,10 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, > gl->gl_name = name; > atomic_set(&gl->gl_ref, 1); > gl->gl_state = LM_ST_UNLOCKED; >+ gl->gl_target = LM_ST_UNLOCKED; > gl->gl_demote_state = LM_ST_EXCLUSIVE; > gl->gl_hash = hash; >- gl->gl_owner_pid = NULL; >- gl->gl_ip = 0; > gl->gl_ops = glops; >- gl->gl_req_gh = NULL; > gl->gl_stamp = jiffies; > gl->gl_tchange = jiffies; > gl->gl_object = NULL; >@@ -447,13 +741,6 @@ void gfs2_holder_uninit(struct gfs2_holder *gh) > gh->gh_ip = 0; > } > >-static void gfs2_holder_wake(struct gfs2_holder *gh) >-{ >- clear_bit(HIF_WAIT, &gh->gh_iflags); >- smp_mb__after_clear_bit(); >- wake_up_bit(&gh->gh_iflags, HIF_WAIT); >-} >- > static int just_schedule(void *word) > { > schedule(); >@@ -466,14 +753,6 @@ static void wait_on_holder(struct gfs2_holder *gh) > wait_on_bit(&gh->gh_iflags, HIF_WAIT, just_schedule, TASK_UNINTERRUPTIBLE); > } > >-static void gfs2_demote_wake(struct gfs2_glock *gl) >-{ >- gl->gl_demote_state = LM_ST_EXCLUSIVE; >- clear_bit(GLF_DEMOTE, &gl->gl_flags); >- smp_mb__after_clear_bit(); >- wake_up_bit(&gl->gl_flags, GLF_DEMOTE); >-} >- > static void wait_on_demote(struct gfs2_glock *gl) > { > might_sleep(); >@@ -481,217 +760,6 @@ static void wait_on_demote(struct gfs2_glock *gl) > } > > /** >- * rq_mutex - process a mutex request in the queue >- * @gh: the glock holder >- * >- * Returns: 1 if the queue is blocked >- */ >- >-static int rq_mutex(struct gfs2_holder *gh) >-{ >- struct gfs2_glock *gl = gh->gh_gl; >- >- list_del_init(&gh->gh_list); >- /* gh->gh_error never examined. */ >- set_bit(GLF_LOCK, &gl->gl_flags); >- clear_bit(HIF_WAIT, &gh->gh_iflags); >- smp_mb(); >- wake_up_bit(&gh->gh_iflags, HIF_WAIT); >- >- return 1; >-} >- >-/** >- * rq_promote - process a promote request in the queue >- * @gh: the glock holder >- * >- * Acquire a new inter-node lock, or change a lock state to more restrictive. >- * >- * Returns: 1 if the queue is blocked >- */ >- >-static int rq_promote(struct gfs2_holder *gh) >-{ >- struct gfs2_glock *gl = gh->gh_gl; >- >- if (!relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) { >- if (list_empty(&gl->gl_holders)) { >- gl->gl_req_gh = gh; >- set_bit(GLF_LOCK, &gl->gl_flags); >- spin_unlock(&gl->gl_spin); >- gfs2_glock_xmote_th(gh->gh_gl, gh); >- spin_lock(&gl->gl_spin); >- } >- return 1; >- } >- >- if (list_empty(&gl->gl_holders)) { >- set_bit(HIF_FIRST, &gh->gh_iflags); >- set_bit(GLF_LOCK, &gl->gl_flags); >- } else { >- struct gfs2_holder *next_gh; >- if (gh->gh_state == LM_ST_EXCLUSIVE) >- return 1; >- next_gh = list_entry(gl->gl_holders.next, struct gfs2_holder, >- gh_list); >- if (next_gh->gh_state == LM_ST_EXCLUSIVE) >- return 1; >- } >- >- list_move_tail(&gh->gh_list, &gl->gl_holders); >- gh->gh_error = 0; >- set_bit(HIF_HOLDER, &gh->gh_iflags); >- >- gfs2_holder_wake(gh); >- >- return 0; >-} >- >-/** >- * rq_demote - process a demote request in the queue >- * @gh: the glock holder >- * >- * Returns: 1 if the queue is blocked >- */ >- >-static int rq_demote(struct gfs2_glock *gl) >-{ >- if (!list_empty(&gl->gl_holders)) >- return 1; >- >- if (gl->gl_state == gl->gl_demote_state || >- gl->gl_state == LM_ST_UNLOCKED) { >- gfs2_demote_wake(gl); >- return 0; >- } >- >- set_bit(GLF_LOCK, &gl->gl_flags); >- set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags); >- >- if (gl->gl_demote_state == LM_ST_UNLOCKED || >- gl->gl_state != LM_ST_EXCLUSIVE) { >- spin_unlock(&gl->gl_spin); >- gfs2_glock_drop_th(gl); >- } else { >- spin_unlock(&gl->gl_spin); >- gfs2_glock_xmote_th(gl, NULL); >- } >- >- spin_lock(&gl->gl_spin); >- clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags); >- >- return 0; >-} >- >-/** >- * run_queue - process holder structures on a glock >- * @gl: the glock >- * >- */ >-static void run_queue(struct gfs2_glock *gl) >-{ >- struct gfs2_holder *gh; >- int blocked = 1; >- >- for (;;) { >- if (test_bit(GLF_LOCK, &gl->gl_flags)) >- break; >- >- if (!list_empty(&gl->gl_waiters1)) { >- gh = list_entry(gl->gl_waiters1.next, >- struct gfs2_holder, gh_list); >- blocked = rq_mutex(gh); >- } else if (test_bit(GLF_DEMOTE, &gl->gl_flags)) { >- blocked = rq_demote(gl); >- if (test_bit(GLF_WAITERS2, &gl->gl_flags) && >- !blocked) { >- set_bit(GLF_DEMOTE, &gl->gl_flags); >- gl->gl_demote_state = LM_ST_UNLOCKED; >- } >- clear_bit(GLF_WAITERS2, &gl->gl_flags); >- } else if (!list_empty(&gl->gl_waiters3)) { >- gh = list_entry(gl->gl_waiters3.next, >- struct gfs2_holder, gh_list); >- blocked = rq_promote(gh); >- } else >- break; >- >- if (blocked) >- break; >- } >-} >- >-/** >- * gfs2_glmutex_lock - acquire a local lock on a glock >- * @gl: the glock >- * >- * Gives caller exclusive access to manipulate a glock structure. >- */ >- >-static void gfs2_glmutex_lock(struct gfs2_glock *gl) >-{ >- spin_lock(&gl->gl_spin); >- if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { >- struct gfs2_holder gh; >- >- gfs2_holder_init(gl, 0, 0, &gh); >- set_bit(HIF_WAIT, &gh.gh_iflags); >- list_add_tail(&gh.gh_list, &gl->gl_waiters1); >- spin_unlock(&gl->gl_spin); >- wait_on_holder(&gh); >- gfs2_holder_uninit(&gh); >- } else { >- gl->gl_owner_pid = get_pid(task_pid(current)); >- gl->gl_ip = (unsigned long)__builtin_return_address(0); >- spin_unlock(&gl->gl_spin); >- } >-} >- >-/** >- * gfs2_glmutex_trylock - try to acquire a local lock on a glock >- * @gl: the glock >- * >- * Returns: 1 if the glock is acquired >- */ >- >-static int gfs2_glmutex_trylock(struct gfs2_glock *gl) >-{ >- int acquired = 1; >- >- spin_lock(&gl->gl_spin); >- if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { >- acquired = 0; >- } else { >- gl->gl_owner_pid = get_pid(task_pid(current)); >- gl->gl_ip = (unsigned long)__builtin_return_address(0); >- } >- spin_unlock(&gl->gl_spin); >- >- return acquired; >-} >- >-/** >- * gfs2_glmutex_unlock - release a local lock on a glock >- * @gl: the glock >- * >- */ >- >-static void gfs2_glmutex_unlock(struct gfs2_glock *gl) >-{ >- struct pid *pid; >- >- spin_lock(&gl->gl_spin); >- clear_bit(GLF_LOCK, &gl->gl_flags); >- pid = gl->gl_owner_pid; >- gl->gl_owner_pid = NULL; >- gl->gl_ip = 0; >- run_queue(gl); >- spin_unlock(&gl->gl_spin); >- >- put_pid(pid); >-} >- >-/** > * handle_callback - process a demote request > * @gl: the glock > * @state: the state the caller wants us to change to >@@ -705,388 +773,39 @@ static void handle_callback(struct gfs2_glock *gl, unsigned int state, > { > int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE; > >- spin_lock(&gl->gl_spin); > set_bit(bit, &gl->gl_flags); > if (gl->gl_demote_state == LM_ST_EXCLUSIVE) { > gl->gl_demote_state = state; > gl->gl_demote_time = jiffies; > if (remote && gl->gl_ops->go_type == LM_TYPE_IOPEN && >- gl->gl_object) { >+ gl->gl_object) > gfs2_glock_schedule_for_reclaim(gl); >- spin_unlock(&gl->gl_spin); >- return; >- } > } else if (gl->gl_demote_state != LM_ST_UNLOCKED && > gl->gl_demote_state != state) { >- if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) >- set_bit(GLF_WAITERS2, &gl->gl_flags); >- else >- gl->gl_demote_state = LM_ST_UNLOCKED; >- } >- spin_unlock(&gl->gl_spin); >-} >- >-/** >- * state_change - record that the glock is now in a different state >- * @gl: the glock >- * @new_state the new state >- * >- */ >- >-static void state_change(struct gfs2_glock *gl, unsigned int new_state) >-{ >- int held1, held2; >- >- held1 = (gl->gl_state != LM_ST_UNLOCKED); >- held2 = (new_state != LM_ST_UNLOCKED); >- >- if (held1 != held2) { >- if (held2) >- gfs2_glock_hold(gl); >- else >- gfs2_glock_put(gl); >- } >- >- gl->gl_state = new_state; >- gl->gl_tchange = jiffies; >-} >- >-/** >- * drop_bh - Called after a lock module unlock completes >- * @gl: the glock >- * @ret: the return status >- * >- * Doesn't wake up the process waiting on the struct gfs2_holder (if any) >- * Doesn't drop the reference on the glock the top half took out >- * >- */ >- >-static void drop_bh(struct gfs2_glock *gl, unsigned int ret) >-{ >- struct gfs2_sbd *sdp = gl->gl_sbd; >- struct gfs2_holder *gh = gl->gl_req_gh; >- >- gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags)); >- gfs2_assert_warn(sdp, list_empty(&gl->gl_holders)); >- gfs2_assert_warn(sdp, !ret); >- >- state_change(gl, LM_ST_UNLOCKED); >- >- if (test_and_clear_bit(GLF_CONV_DEADLK, &gl->gl_flags)) { >- spin_lock(&gl->gl_spin); >- gh->gh_error = 0; >- spin_unlock(&gl->gl_spin); >- gfs2_glock_xmote_th(gl, gl->gl_req_gh); >- gfs2_glock_put(gl); >- return; >- } >- >- spin_lock(&gl->gl_spin); >- gfs2_demote_wake(gl); >- clear_bit(GLF_LOCK, &gl->gl_flags); >- spin_unlock(&gl->gl_spin); >- gfs2_glock_put(gl); >-} >- >-/** >- * xmote_bh - Called after the lock module is done acquiring a lock >- * @gl: The glock in question >- * @ret: the int returned from the lock module >- * >- */ >- >-static void xmote_bh(struct gfs2_glock *gl, unsigned int ret) >-{ >- struct gfs2_sbd *sdp = gl->gl_sbd; >- const struct gfs2_glock_operations *glops = gl->gl_ops; >- struct gfs2_holder *gh = gl->gl_req_gh; >- int op_done = 1; >- >- if (!gh && (ret & LM_OUT_ST_MASK) == LM_ST_UNLOCKED) { >- drop_bh(gl, ret); >- return; >- } >- >- gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags)); >- gfs2_assert_warn(sdp, list_empty(&gl->gl_holders)); >- gfs2_assert_warn(sdp, !(ret & LM_OUT_ASYNC)); >- >- state_change(gl, ret & LM_OUT_ST_MASK); >- >- /* Deal with each possible exit condition */ >- >- if (!gh) { >- gl->gl_stamp = jiffies; >- if (ret & LM_OUT_CANCELED) { >- op_done = 0; >- } else { >- spin_lock(&gl->gl_spin); >- if (gl->gl_state != gl->gl_demote_state) { >- spin_unlock(&gl->gl_spin); >- gfs2_glock_drop_th(gl); >- gfs2_glock_put(gl); >- return; >- } >- gfs2_demote_wake(gl); >- spin_unlock(&gl->gl_spin); >- } >- } else { >- spin_lock(&gl->gl_spin); >- if (ret & LM_OUT_CONV_DEADLK) { >- gh->gh_error = 0; >- set_bit(GLF_CONV_DEADLK, &gl->gl_flags); >- spin_unlock(&gl->gl_spin); >- gfs2_glock_drop_th(gl); >- gfs2_glock_put(gl); >- return; >- } >- list_del_init(&gh->gh_list); >- gh->gh_error = -EIO; >- if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) >- goto out; >- gh->gh_error = GLR_CANCELED; >- if (ret & LM_OUT_CANCELED) >- goto out; >- if (relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) { >- list_add_tail(&gh->gh_list, &gl->gl_holders); >- gh->gh_error = 0; >- set_bit(HIF_HOLDER, &gh->gh_iflags); >- set_bit(HIF_FIRST, &gh->gh_iflags); >- op_done = 0; >- goto out; >- } >- gh->gh_error = GLR_TRYFAILED; >- if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) >- goto out; >- gh->gh_error = -EINVAL; >- if (gfs2_assert_withdraw(sdp, 0) == -1) >- fs_err(sdp, "ret = 0x%.8X\n", ret); >-out: >- spin_unlock(&gl->gl_spin); >+ gl->gl_demote_state = LM_ST_UNLOCKED; > } >- >- if (glops->go_xmote_bh) >- glops->go_xmote_bh(gl); >- >- if (op_done) { >- spin_lock(&gl->gl_spin); >- gl->gl_req_gh = NULL; >- clear_bit(GLF_LOCK, &gl->gl_flags); >- spin_unlock(&gl->gl_spin); >- } >- >- gfs2_glock_put(gl); >- >- if (gh) >- gfs2_holder_wake(gh); >-} >- >-static unsigned int gfs2_lm_lock(struct gfs2_sbd *sdp, void *lock, >- unsigned int cur_state, unsigned int req_state, >- unsigned int flags) >-{ >- int ret = 0; >- if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) >- ret = sdp->sd_lockstruct.ls_ops->lm_lock(lock, cur_state, >- req_state, flags); >- return ret; >-} >- >-/** >- * gfs2_glock_xmote_th - Call into the lock module to acquire or change a glock >- * @gl: The glock in question >- * @state: the requested state >- * @flags: modifier flags to the lock call >- * >- */ >- >-static void gfs2_glock_xmote_th(struct gfs2_glock *gl, struct gfs2_holder *gh) >-{ >- struct gfs2_sbd *sdp = gl->gl_sbd; >- int flags = gh ? gh->gh_flags : 0; >- unsigned state = gh ? gh->gh_state : gl->gl_demote_state; >- const struct gfs2_glock_operations *glops = gl->gl_ops; >- int lck_flags = flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB | >- LM_FLAG_NOEXP | LM_FLAG_ANY | >- LM_FLAG_PRIORITY); >- unsigned int lck_ret; >- >- if (glops->go_xmote_th) >- glops->go_xmote_th(gl); >- if (state == LM_ST_DEFERRED && glops->go_inval) >- glops->go_inval(gl, DIO_METADATA); >- >- gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags)); >- gfs2_assert_warn(sdp, list_empty(&gl->gl_holders)); >- gfs2_assert_warn(sdp, state != LM_ST_UNLOCKED); >- gfs2_assert_warn(sdp, state != gl->gl_state); >- >- gfs2_glock_hold(gl); >- >- lck_ret = gfs2_lm_lock(sdp, gl->gl_lock, gl->gl_state, state, lck_flags); >- >- if (gfs2_assert_withdraw(sdp, !(lck_ret & LM_OUT_ERROR))) >- return; >- >- if (lck_ret & LM_OUT_ASYNC) >- gfs2_assert_warn(sdp, lck_ret == LM_OUT_ASYNC); >- else >- xmote_bh(gl, lck_ret); >-} >- >-static unsigned int gfs2_lm_unlock(struct gfs2_sbd *sdp, void *lock, >- unsigned int cur_state) >-{ >- int ret = 0; >- if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) >- ret = sdp->sd_lockstruct.ls_ops->lm_unlock(lock, cur_state); >- return ret; > } > > /** >- * gfs2_glock_drop_th - call into the lock module to unlock a lock >- * @gl: the glock >- * >- */ >- >-static void gfs2_glock_drop_th(struct gfs2_glock *gl) >-{ >- struct gfs2_sbd *sdp = gl->gl_sbd; >- const struct gfs2_glock_operations *glops = gl->gl_ops; >- unsigned int ret; >- >- if (glops->go_xmote_th) >- glops->go_xmote_th(gl); >- if (glops->go_inval) >- glops->go_inval(gl, DIO_METADATA); >- >- gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags)); >- gfs2_assert_warn(sdp, list_empty(&gl->gl_holders)); >- gfs2_assert_warn(sdp, gl->gl_state != LM_ST_UNLOCKED); >- >- gfs2_glock_hold(gl); >- >- ret = gfs2_lm_unlock(sdp, gl->gl_lock, gl->gl_state); >- >- if (gfs2_assert_withdraw(sdp, !(ret & LM_OUT_ERROR))) >- return; >- >- if (!ret) >- drop_bh(gl, ret); >- else >- gfs2_assert_warn(sdp, ret == LM_OUT_ASYNC); >-} >- >-/** >- * do_cancels - cancel requests for locks stuck waiting on an expire flag >- * @gh: the LM_FLAG_PRIORITY holder waiting to acquire the lock >- * >- * Don't cancel GL_NOCANCEL requests. >- */ >- >-static void do_cancels(struct gfs2_holder *gh) >-{ >- struct gfs2_glock *gl = gh->gh_gl; >- struct gfs2_sbd *sdp = gl->gl_sbd; >- >- spin_lock(&gl->gl_spin); >- >- while (gl->gl_req_gh != gh && >- !test_bit(HIF_HOLDER, &gh->gh_iflags) && >- !list_empty(&gh->gh_list)) { >- if (!(gl->gl_req_gh && (gl->gl_req_gh->gh_flags & GL_NOCANCEL))) { >- spin_unlock(&gl->gl_spin); >- if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) >- sdp->sd_lockstruct.ls_ops->lm_cancel(gl->gl_lock); >- msleep(100); >- spin_lock(&gl->gl_spin); >- } else { >- spin_unlock(&gl->gl_spin); >- msleep(100); >- spin_lock(&gl->gl_spin); >- } >- } >- >- spin_unlock(&gl->gl_spin); >-} >- >-/** >- * glock_wait_internal - wait on a glock acquisition >+ * gfs2_glock_wait - wait on a glock acquisition > * @gh: the glock holder > * > * Returns: 0 on success > */ > >-static int glock_wait_internal(struct gfs2_holder *gh) >+int gfs2_glock_wait(struct gfs2_holder *gh) > { > struct gfs2_glock *gl = gh->gh_gl; > struct gfs2_sbd *sdp = gl->gl_sbd; >- const struct gfs2_glock_operations *glops = gl->gl_ops; >- >- if (test_bit(HIF_ABORTED, &gh->gh_iflags)) >- return -EIO; >- >- if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) { >- spin_lock(&gl->gl_spin); >- if (gl->gl_req_gh != gh && >- !test_bit(HIF_HOLDER, &gh->gh_iflags) && >- !list_empty(&gh->gh_list)) { >- list_del_init(&gh->gh_list); >- gh->gh_error = GLR_TRYFAILED; >- run_queue(gl); >- spin_unlock(&gl->gl_spin); >- return gh->gh_error; >- } >- spin_unlock(&gl->gl_spin); >- } >- >- if (gh->gh_flags & LM_FLAG_PRIORITY) >- do_cancels(gh); > > wait_on_holder(gh); >- if (gh->gh_error) >- return gh->gh_error; >- >- gfs2_assert_withdraw(sdp, test_bit(HIF_HOLDER, &gh->gh_iflags)); >- gfs2_assert_withdraw(sdp, relaxed_state_ok(gl->gl_state, gh->gh_state, >- gh->gh_flags)); >- >- if (test_bit(HIF_FIRST, &gh->gh_iflags)) { >- gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags)); >- >- if (glops->go_lock) { >- gh->gh_error = glops->go_lock(gh); >- if (gh->gh_error) { >- spin_lock(&gl->gl_spin); >- list_del_init(&gh->gh_list); >- spin_unlock(&gl->gl_spin); >- } >- } >- >- spin_lock(&gl->gl_spin); >- gl->gl_req_gh = NULL; >- clear_bit(GLF_LOCK, &gl->gl_flags); >- run_queue(gl); >- spin_unlock(&gl->gl_spin); >- } >+ if (gh->gh_error == 0) >+ gfs2_assert_withdraw(sdp, test_bit(HIF_HOLDER, &gh->gh_iflags)); > > return gh->gh_error; > } > >-static inline struct gfs2_holder * >-find_holder_by_owner(struct list_head *head, struct pid *pid) >-{ >- struct gfs2_holder *gh; >- >- list_for_each_entry(gh, head, gh_list) { >- if (gh->gh_owner_pid == pid) >- return gh; >- } >- >- return NULL; >-} >- >-static void print_dbg(struct glock_iter *gi, const char *fmt, ...) >+void gfs2_print_dbg(struct gfs2_glock_iter *gi, const char *fmt, ...) > { > va_list args; > >@@ -1104,50 +823,54 @@ static void print_dbg(struct glock_iter *gi, const char *fmt, ...) > * add_to_queue - Add a holder to the wait queue (but look for recursion) > * @gh: the holder structure to add > * >+ * Eventually we should move the recursive locking trap to a >+ * debugging option or something like that. This is the fast >+ * path and needs to have the minimum number of distractions. >+ * > */ > >-static void add_to_queue(struct gfs2_holder *gh) >+static inline void add_to_queue(struct gfs2_holder *gh) > { > struct gfs2_glock *gl = gh->gh_gl; >- struct gfs2_holder *existing; >+ struct gfs2_sbd *sdp = gl->gl_sbd; >+ struct list_head *insert_pt = NULL; >+ struct gfs2_holder *gh2; > > BUG_ON(gh->gh_owner_pid == NULL); > if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags)) > BUG(); > >- if (!(gh->gh_flags & GL_FLOCK)) { >- existing = find_holder_by_owner(&gl->gl_holders, >- gh->gh_owner_pid); >- if (existing) { >- print_symbol(KERN_WARNING "original: %s\n", >- existing->gh_ip); >- printk(KERN_INFO "pid : %d\n", >- pid_nr(existing->gh_owner_pid)); >- printk(KERN_INFO "lock type : %d lock state : %d\n", >- existing->gh_gl->gl_name.ln_type, >- existing->gh_gl->gl_state); >- print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip); >- printk(KERN_INFO "pid : %d\n", >- pid_nr(gh->gh_owner_pid)); >- printk(KERN_INFO "lock type : %d lock state : %d\n", >- gl->gl_name.ln_type, gl->gl_state); >- BUG(); >- } >- >- existing = find_holder_by_owner(&gl->gl_waiters3, >- gh->gh_owner_pid); >- if (existing) { >- print_symbol(KERN_WARNING "original: %s\n", >- existing->gh_ip); >- print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip); >- BUG(); >- } >+ list_for_each_entry(gh2, &gl->gl_holders, gh_list) { >+ if (unlikely(gh2->gh_owner_pid == gh->gh_owner_pid && >+ (gh->gh_gl->gl_ops->go_type != LM_TYPE_FLOCK))) >+ goto trap_recursive; >+ if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && >+ !insert_pt && !test_bit(HIF_HOLDER, &gh2->gh_iflags))) >+ insert_pt = &gh2->gh_list; >+ } >+ if (likely(insert_pt == NULL)) { >+ list_add_tail(&gh->gh_list, &gl->gl_holders); >+ return; >+ } >+ list_add_tail(&gh->gh_list, insert_pt); >+ if (test_bit(GLF_LOCK, &gl->gl_flags)) { >+ spin_unlock(&gl->gl_spin); >+ sdp->sd_lockstruct.ls_ops->lm_cancel(gl->gl_lock); >+ spin_lock(&gl->gl_spin); > } >+ return; > >- if (gh->gh_flags & LM_FLAG_PRIORITY) >- list_add(&gh->gh_list, &gl->gl_waiters3); >- else >- list_add_tail(&gh->gh_list, &gl->gl_waiters3); >+trap_recursive: >+ print_symbol(KERN_WARNING "original: %s\n", gh2->gh_ip); >+ printk(KERN_INFO "pid: %d\n", pid_nr(gh2->gh_owner_pid)); >+ printk(KERN_INFO "lock type: %d req lock state : %d\n", >+ gh2->gh_gl->gl_name.ln_type, gh2->gh_state); >+ print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip); >+ printk(KERN_INFO "pid: %d\n", pid_nr(gh->gh_owner_pid)); >+ printk(KERN_INFO "lock type: %d req lock state : %d\n", >+ gh->gh_gl->gl_name.ln_type, gh->gh_state); >+ printk(KERN_INFO "current lock state: %u\n", gl->gl_state); >+ BUG(); > } > > /** >@@ -1165,24 +888,16 @@ int gfs2_glock_nq(struct gfs2_holder *gh) > struct gfs2_sbd *sdp = gl->gl_sbd; > int error = 0; > >-restart: >- if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) { >- set_bit(HIF_ABORTED, &gh->gh_iflags); >+ if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) > return -EIO; >- } > > spin_lock(&gl->gl_spin); > add_to_queue(gh); >- run_queue(gl); >+ run_queue(gl, 1); > spin_unlock(&gl->gl_spin); > >- if (!(gh->gh_flags & GL_ASYNC)) { >- error = glock_wait_internal(gh); >- if (error == GLR_CANCELED) { >- msleep(100); >- goto restart; >- } >- } >+ if (!(gh->gh_flags & GL_ASYNC)) >+ error = gfs2_glock_wait(gh); > > return error; > } >@@ -1196,48 +911,7 @@ restart: > > int gfs2_glock_poll(struct gfs2_holder *gh) > { >- struct gfs2_glock *gl = gh->gh_gl; >- int ready = 0; >- >- spin_lock(&gl->gl_spin); >- >- if (test_bit(HIF_HOLDER, &gh->gh_iflags)) >- ready = 1; >- else if (list_empty(&gh->gh_list)) { >- if (gh->gh_error == GLR_CANCELED) { >- spin_unlock(&gl->gl_spin); >- msleep(100); >- if (gfs2_glock_nq(gh)) >- return 1; >- return 0; >- } else >- ready = 1; >- } >- >- spin_unlock(&gl->gl_spin); >- >- return ready; >-} >- >-/** >- * gfs2_glock_wait - wait for a lock acquisition that ended in a GLR_ASYNC >- * @gh: the holder structure >- * >- * Returns: 0, GLR_TRYFAILED, or errno on failure >- */ >- >-int gfs2_glock_wait(struct gfs2_holder *gh) >-{ >- int error; >- >- error = glock_wait_internal(gh); >- if (error == GLR_CANCELED) { >- msleep(100); >- gh->gh_flags &= ~GL_ASYNC; >- error = gfs2_glock_nq(gh); >- } >- >- return error; >+ return test_bit(HIF_WAIT, &gh->gh_iflags) ? 0 : 1; > } > > /** >@@ -1251,26 +925,30 @@ void gfs2_glock_dq(struct gfs2_holder *gh) > struct gfs2_glock *gl = gh->gh_gl; > const struct gfs2_glock_operations *glops = gl->gl_ops; > unsigned delay = 0; >+ int fast_path = 0; > >+ spin_lock(&gl->gl_spin); > if (gh->gh_flags & GL_NOCACHE) > handle_callback(gl, LM_ST_UNLOCKED, 0, 0); > >- gfs2_glmutex_lock(gl); >- >- spin_lock(&gl->gl_spin); > list_del_init(&gh->gh_list); >- >- if (list_empty(&gl->gl_holders)) { >+ if (find_first_holder(gl) == NULL) { > if (glops->go_unlock) { >+ BUG_ON(test_and_set_bit(GLF_LOCK, &gl->gl_flags)); > spin_unlock(&gl->gl_spin); > glops->go_unlock(gh); > spin_lock(&gl->gl_spin); >+ clear_bit(GLF_LOCK, &gl->gl_flags); > } > gl->gl_stamp = jiffies; >+ if (list_empty(&gl->gl_holders) && >+ !test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && >+ !test_bit(GLF_DEMOTE, &gl->gl_flags)) >+ fast_path = 1; > } >- >- clear_bit(GLF_LOCK, &gl->gl_flags); > spin_unlock(&gl->gl_spin); >+ if (likely(fast_path)) >+ return; > > gfs2_glock_hold(gl); > if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && >@@ -1469,20 +1147,14 @@ int gfs2_lvb_hold(struct gfs2_glock *gl) > { > int error; > >- gfs2_glmutex_lock(gl); >- > if (!atomic_read(&gl->gl_lvb_count)) { > error = gfs2_lm_hold_lvb(gl->gl_sbd, gl->gl_lock, &gl->gl_lvb); >- if (error) { >- gfs2_glmutex_unlock(gl); >+ if (error) > return error; >- } > gfs2_glock_hold(gl); > } > atomic_inc(&gl->gl_lvb_count); > >- gfs2_glmutex_unlock(gl); >- > return 0; > } > >@@ -1497,8 +1169,6 @@ void gfs2_lvb_unhold(struct gfs2_glock *gl) > struct gfs2_sbd *sdp = gl->gl_sbd; > > gfs2_glock_hold(gl); >- gfs2_glmutex_lock(gl); >- > gfs2_assert(gl->gl_sbd, atomic_read(&gl->gl_lvb_count) > 0); > if (atomic_dec_and_test(&gl->gl_lvb_count)) { > if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) >@@ -1506,8 +1176,6 @@ void gfs2_lvb_unhold(struct gfs2_glock *gl) > gl->gl_lvb = NULL; > gfs2_glock_put(gl); > } >- >- gfs2_glmutex_unlock(gl); > gfs2_glock_put(gl); > } > >@@ -1527,7 +1195,9 @@ static void blocking_cb(struct gfs2_sbd *sdp, struct lm_lockname *name, > if (time_before(now, holdtime)) > delay = holdtime - now; > >+ spin_lock(&gl->gl_spin); > handle_callback(gl, state, 1, delay); >+ spin_unlock(&gl->gl_spin); > if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0) > gfs2_glock_put(gl); > } >@@ -1568,7 +1238,7 @@ void gfs2_glock_cb(void *cb_data, unsigned int type, void *data) > gl = gfs2_glock_find(sdp, &async->lc_name); > if (gfs2_assert_warn(sdp, gl)) > return; >- xmote_bh(gl, async->lc_ret); >+ finish_xmote(gl, async->lc_ret); > if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) > gfs2_glock_put(gl); > up_read(&gfs2_umount_flush_sem); >@@ -1646,6 +1316,7 @@ void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl) > void gfs2_reclaim_glock(struct gfs2_sbd *sdp) > { > struct gfs2_glock *gl; >+ int done_callback = 0; > > spin_lock(&sdp->sd_reclaim_lock); > if (list_empty(&sdp->sd_reclaim_list)) { >@@ -1660,14 +1331,16 @@ void gfs2_reclaim_glock(struct gfs2_sbd *sdp) > atomic_dec(&sdp->sd_reclaim_count); > atomic_inc(&sdp->sd_reclaimed); > >- if (gfs2_glmutex_trylock(gl)) { >- if (list_empty(&gl->gl_holders) && >- gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl)) >- handle_callback(gl, LM_ST_UNLOCKED, 0, 0); >- gfs2_glmutex_unlock(gl); >+ spin_lock(&gl->gl_spin); >+ if (find_first_holder(gl) == NULL && >+ gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl)) { >+ handle_callback(gl, LM_ST_UNLOCKED, 0, 0); >+ done_callback = 1; > } >- >- gfs2_glock_put(gl); >+ spin_unlock(&gl->gl_spin); >+ if (!done_callback || >+ queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) >+ gfs2_glock_put(gl); > } > > /** >@@ -1724,18 +1397,14 @@ static void scan_glock(struct gfs2_glock *gl) > { > if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object) > return; >+ if (test_bit(GLF_LOCK, &gl->gl_flags)) >+ return; > >- if (gfs2_glmutex_trylock(gl)) { >- if (list_empty(&gl->gl_holders) && >- gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl)) >- goto out_schedule; >- gfs2_glmutex_unlock(gl); >- } >- return; >- >-out_schedule: >- gfs2_glmutex_unlock(gl); >- gfs2_glock_schedule_for_reclaim(gl); >+ spin_lock(&gl->gl_spin); >+ if (find_first_holder(gl) == NULL && >+ gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl)) >+ gfs2_glock_schedule_for_reclaim(gl); >+ spin_unlock(&gl->gl_spin); > } > > /** >@@ -1760,12 +1429,13 @@ static void clear_glock(struct gfs2_glock *gl) > spin_unlock(&sdp->sd_reclaim_lock); > } > >- if (gfs2_glmutex_trylock(gl)) { >- if (list_empty(&gl->gl_holders) && >- gl->gl_state != LM_ST_UNLOCKED) >- handle_callback(gl, LM_ST_UNLOCKED, 0, 0); >- gfs2_glmutex_unlock(gl); >- } >+ spin_lock(&gl->gl_spin); >+ if (find_first_holder(gl) == NULL && gl->gl_state != LM_ST_UNLOCKED) >+ handle_callback(gl, LM_ST_UNLOCKED, 0, 0); >+ spin_unlock(&gl->gl_spin); >+ gfs2_glock_hold(gl); >+ if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) >+ gfs2_glock_put(gl); > } > > /** >@@ -1810,175 +1480,147 @@ void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait) > } > } > >-/* >- * Diagnostic routines to help debug distributed deadlock >- */ >- >-static void gfs2_print_symbol(struct glock_iter *gi, const char *fmt, >- unsigned long address) >+static const char *state2str(unsigned state) > { >- char buffer[KSYM_SYMBOL_LEN]; >- >- sprint_symbol(buffer, address); >- print_dbg(gi, fmt, buffer); >+ switch(state) { >+ case LM_ST_UNLOCKED: >+ return "UN"; >+ case LM_ST_SHARED: >+ return "SH"; >+ case LM_ST_DEFERRED: >+ return "DF"; >+ case LM_ST_EXCLUSIVE: >+ return "EX"; >+ } >+ return "??"; >+} >+ >+static const char *hflags2str(char *buf, unsigned flags, unsigned long iflags) >+{ >+ char *p = buf; >+ if (flags & LM_FLAG_TRY) >+ *p++ = 't'; >+ if (flags & LM_FLAG_TRY_1CB) >+ *p++ = 'T'; >+ if (flags & LM_FLAG_NOEXP) >+ *p++ = 'e'; >+ if (flags & LM_FLAG_ANY) >+ *p++ = 'a'; >+ if (flags & LM_FLAG_PRIORITY) >+ *p++ = 'p'; >+ if (flags & GL_ASYNC) >+ *p++ = 'a'; >+ if (flags & GL_EXACT) >+ *p++ = 'E'; >+ if (flags & GL_ATIME) >+ *p++ = 'a'; >+ if (flags & GL_NOCACHE) >+ *p++ = 'c'; >+ if (test_bit(HIF_HOLDER, &iflags)) >+ *p++ = 'H'; >+ if (test_bit(HIF_WAIT, &iflags)) >+ *p++ = 'W'; >+ if (test_bit(HIF_FIRST, &iflags)) >+ *p++ = 'F'; >+ *p = 0; >+ return buf; > } > > /** > * dump_holder - print information about a glock holder >- * @str: a string naming the type of holder > * @gh: the glock holder > * > * Returns: 0 on success, -ENOBUFS when we run out of space > */ > >-static int dump_holder(struct glock_iter *gi, char *str, >- struct gfs2_holder *gh) >+static int dump_holder(struct gfs2_glock_iter *gi, struct gfs2_holder *gh) > { >- unsigned int x; >- struct task_struct *gh_owner; >+ struct task_struct *gh_owner = NULL; >+ char buffer[KSYM_SYMBOL_LEN]; >+ char flags_buf[32]; > >- print_dbg(gi, " %s\n", str); >- if (gh->gh_owner_pid) { >- print_dbg(gi, " owner = %ld ", >- (long)pid_nr(gh->gh_owner_pid)); >+ sprint_symbol(buffer, gh->gh_ip); >+ if (gh->gh_owner_pid) > gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID); >- if (gh_owner) >- print_dbg(gi, "(%s)\n", gh_owner->comm); >- else >- print_dbg(gi, "(ended)\n"); >- } else >- print_dbg(gi, " owner = -1\n"); >- print_dbg(gi, " gh_state = %u\n", gh->gh_state); >- print_dbg(gi, " gh_flags ="); >- for (x = 0; x < 32; x++) >- if (gh->gh_flags & (1 << x)) >- print_dbg(gi, " %u", x); >- print_dbg(gi, " \n"); >- print_dbg(gi, " error = %d\n", gh->gh_error); >- print_dbg(gi, " gh_iflags ="); >- for (x = 0; x < 32; x++) >- if (test_bit(x, &gh->gh_iflags)) >- print_dbg(gi, " %u", x); >- print_dbg(gi, " \n"); >- gfs2_print_symbol(gi, " initialized at: %s\n", gh->gh_ip); >- >+ gfs2_print_dbg(gi, " H: s:%s f:%s e:%d p:%ld [%s] %s\n", >+ state2str(gh->gh_state), >+ hflags2str(flags_buf, gh->gh_flags, gh->gh_iflags), >+ gh->gh_error, >+ gh->gh_owner_pid ? (long)pid_nr(gh->gh_owner_pid) : -1, >+ gh_owner ? gh_owner->comm : "(ended)", buffer); > return 0; > } > >-/** >- * dump_inode - print information about an inode >- * @ip: the inode >- * >- * Returns: 0 on success, -ENOBUFS when we run out of space >- */ >- >-static int dump_inode(struct glock_iter *gi, struct gfs2_inode *ip) >+static const char *gflags2str(char *buf, unsigned long gflags) > { >- unsigned int x; >- >- print_dbg(gi, " Inode:\n"); >- print_dbg(gi, " num = %llu/%llu\n", >- (unsigned long long)ip->i_no_formal_ino, >- (unsigned long long)ip->i_no_addr); >- print_dbg(gi, " type = %u\n", IF2DT(ip->i_inode.i_mode)); >- print_dbg(gi, " i_flags ="); >- for (x = 0; x < 32; x++) >- if (test_bit(x, &ip->i_flags)) >- print_dbg(gi, " %u", x); >- print_dbg(gi, " \n"); >- return 0; >+ char *p = buf; >+ if (test_bit(GLF_LOCK, &gflags)) >+ *p++ = 'l'; >+ if (test_bit(GLF_STICKY, &gflags)) >+ *p++ = 's'; >+ if (test_bit(GLF_DEMOTE, &gflags)) >+ *p++ = 'D'; >+ if (test_bit(GLF_PENDING_DEMOTE, &gflags)) >+ *p++ = 'd'; >+ if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gflags)) >+ *p++ = 'p'; >+ if (test_bit(GLF_DIRTY, &gflags)) >+ *p++ = 'y'; >+ if (test_bit(GLF_LFLUSH, &gflags)) >+ *p++ = 'f'; >+ *p = 0; >+ return buf; > } > > /** > * dump_glock - print information about a glock >+ * @gi: The iteration state > * @gl: the glock >- * @count: where we are in the buffer >+ * >+ * The file format is as follows: >+ * One line per object, capital letters are used to indicate objects >+ * G = glock, I = Inode, R = rgrp, H = holder. Glocks are not indented >+ * other objects are indented by a single space and follow the glock to >+ * which they are related. Fields are indicated by lower case letters >+ * followed by a colon and the field value, except for strings which are in >+ * [] so that its possible to see if they are composed of spaces for >+ * example. The field's are n = number (id of the object), f = flags, >+ * t = type, s = state, r = refcount, e = error, p = pid. > * > * Returns: 0 on success, -ENOBUFS when we run out of space > */ > >-static int dump_glock(struct glock_iter *gi, struct gfs2_glock *gl) >+static int dump_glock(struct gfs2_glock_iter *gi, struct gfs2_glock *gl) > { >+ const struct gfs2_glock_operations *glops = gl->gl_ops; >+ unsigned long long dtime; > struct gfs2_holder *gh; >- unsigned int x; >- int error = -ENOBUFS; >- struct task_struct *gl_owner; >+ char gflags_buf[32]; >+ int error = 0; > > spin_lock(&gl->gl_spin); >+ dtime = jiffies - gl->gl_demote_time; >+ dtime *= 1000000/HZ; /* demote time in uSec */ >+ if (!test_bit(GLF_DEMOTE, &gl->gl_flags)) >+ dtime = 0; >+ gfs2_print_dbg(gi, "G: s:%s n:%u/%llu f:%s t:%s d:%s/%llu l:%d a:%d r:%d\n", >+ state2str(gl->gl_state), >+ gl->gl_name.ln_type, >+ (unsigned long long)gl->gl_name.ln_number, >+ gflags2str(gflags_buf, gl->gl_flags), >+ state2str(gl->gl_target), >+ state2str(gl->gl_demote_state), dtime, >+ atomic_read(&gl->gl_lvb_count), >+ atomic_read(&gl->gl_ail_count), >+ atomic_read(&gl->gl_ref)); > >- print_dbg(gi, "Glock 0x%p (%u, 0x%llx)\n", gl, gl->gl_name.ln_type, >- (unsigned long long)gl->gl_name.ln_number); >- print_dbg(gi, " gl_flags ="); >- for (x = 0; x < 32; x++) { >- if (test_bit(x, &gl->gl_flags)) >- print_dbg(gi, " %u", x); >- } >- if (!test_bit(GLF_LOCK, &gl->gl_flags)) >- print_dbg(gi, " (unlocked)"); >- print_dbg(gi, " \n"); >- print_dbg(gi, " gl_ref = %d\n", atomic_read(&gl->gl_ref)); >- print_dbg(gi, " gl_state = %u\n", gl->gl_state); >- if (gl->gl_owner_pid) { >- gl_owner = pid_task(gl->gl_owner_pid, PIDTYPE_PID); >- if (gl_owner) >- print_dbg(gi, " gl_owner = pid %d (%s)\n", >- pid_nr(gl->gl_owner_pid), gl_owner->comm); >- else >- print_dbg(gi, " gl_owner = %d (ended)\n", >- pid_nr(gl->gl_owner_pid)); >- } else >- print_dbg(gi, " gl_owner = -1\n"); >- print_dbg(gi, " gl_ip = %lu\n", gl->gl_ip); >- print_dbg(gi, " req_gh = %s\n", (gl->gl_req_gh) ? "yes" : "no"); >- print_dbg(gi, " lvb_count = %d\n", atomic_read(&gl->gl_lvb_count)); >- print_dbg(gi, " object = %s\n", (gl->gl_object) ? "yes" : "no"); >- print_dbg(gi, " reclaim = %s\n", >- (list_empty(&gl->gl_reclaim)) ? "no" : "yes"); >- if (gl->gl_aspace) >- print_dbg(gi, " aspace = 0x%p nrpages = %lu\n", gl->gl_aspace, >- gl->gl_aspace->i_mapping->nrpages); >- else >- print_dbg(gi, " aspace = no\n"); >- print_dbg(gi, " ail = %d\n", atomic_read(&gl->gl_ail_count)); >- if (gl->gl_req_gh) { >- error = dump_holder(gi, "Request", gl->gl_req_gh); >- if (error) >- goto out; >- } > list_for_each_entry(gh, &gl->gl_holders, gh_list) { >- error = dump_holder(gi, "Holder", gh); >+ error = dump_holder(gi, gh); > if (error) > goto out; > } >- list_for_each_entry(gh, &gl->gl_waiters1, gh_list) { >- error = dump_holder(gi, "Waiter1", gh); >- if (error) >- goto out; >- } >- list_for_each_entry(gh, &gl->gl_waiters3, gh_list) { >- error = dump_holder(gi, "Waiter3", gh); >- if (error) >- goto out; >- } >- if (test_bit(GLF_DEMOTE, &gl->gl_flags)) { >- print_dbg(gi, " Demotion req to state %u (%llu uS ago)\n", >- gl->gl_demote_state, (unsigned long long) >- (jiffies - gl->gl_demote_time)*(1000000/HZ)); >- } >- if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object) { >- if (!test_bit(GLF_LOCK, &gl->gl_flags) && >- list_empty(&gl->gl_holders)) { >- error = dump_inode(gi, gl->gl_object); >- if (error) >- goto out; >- } else { >- error = -ENOBUFS; >- print_dbg(gi, " Inode: busy\n"); >- } >- } >- >- error = 0; >- >+ if (gl->gl_state != LM_ST_UNLOCKED && glops->go_dump) >+ error = glops->go_dump(gi, gl); > out: > spin_unlock(&gl->gl_spin); > return error; >@@ -2086,7 +1728,7 @@ void gfs2_glock_exit(void) > module_param(scand_secs, uint, S_IRUGO|S_IWUSR); > MODULE_PARM_DESC(scand_secs, "The number of seconds between scand runs"); > >-static int gfs2_glock_iter_next(struct glock_iter *gi) >+static int gfs2_glock_iter_next(struct gfs2_glock_iter *gi) > { > struct gfs2_glock *gl; > >@@ -2104,7 +1746,7 @@ restart: > gfs2_glock_put(gl); > if (gl && gi->gl == NULL) > gi->hash++; >- while(gi->gl == NULL) { >+ while (gi->gl == NULL) { > if (gi->hash >= GFS2_GL_HASH_SIZE) > return 1; > read_lock(gl_lock_addr(gi->hash)); >@@ -2122,16 +1764,16 @@ restart: > return 0; > } > >-static void gfs2_glock_iter_free(struct glock_iter *gi) >+static void gfs2_glock_iter_free(struct gfs2_glock_iter *gi) > { > if (gi->gl) > gfs2_glock_put(gi->gl); > kfree(gi); > } > >-static struct glock_iter *gfs2_glock_iter_init(struct gfs2_sbd *sdp) >+static struct gfs2_glock_iter *gfs2_glock_iter_init(struct gfs2_sbd *sdp) > { >- struct glock_iter *gi; >+ struct gfs2_glock_iter *gi; > > gi = kmalloc(sizeof (*gi), GFP_KERNEL); > if (!gi) >@@ -2153,14 +1795,14 @@ static struct glock_iter *gfs2_glock_iter_init(struct gfs2_sbd *sdp) > > static void *gfs2_glock_seq_start(struct seq_file *file, loff_t *pos) > { >- struct glock_iter *gi; >+ struct gfs2_glock_iter *gi; > loff_t n = *pos; > > gi = gfs2_glock_iter_init(file->private); > if (!gi) > return NULL; > >- while(n--) { >+ while (n--) { > if (gfs2_glock_iter_next(gi)) { > gfs2_glock_iter_free(gi); > return NULL; >@@ -2173,7 +1815,7 @@ static void *gfs2_glock_seq_start(struct seq_file *file, loff_t *pos) > static void *gfs2_glock_seq_next(struct seq_file *file, void *iter_ptr, > loff_t *pos) > { >- struct glock_iter *gi = iter_ptr; >+ struct gfs2_glock_iter *gi = iter_ptr; > > (*pos)++; > >@@ -2187,14 +1829,14 @@ static void *gfs2_glock_seq_next(struct seq_file *file, void *iter_ptr, > > static void gfs2_glock_seq_stop(struct seq_file *file, void *iter_ptr) > { >- struct glock_iter *gi = iter_ptr; >+ struct gfs2_glock_iter *gi = iter_ptr; > if (gi) > gfs2_glock_iter_free(gi); > } > > static int gfs2_glock_seq_show(struct seq_file *file, void *iter_ptr) > { >- struct glock_iter *gi = iter_ptr; >+ struct gfs2_glock_iter *gi = iter_ptr; > > gi->seq = file; > dump_glock(gi, gi->gl); >diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h >index cdad3e6..d68a060 100644 >--- a/fs/gfs2/glock.h >+++ b/fs/gfs2/glock.h >@@ -26,11 +26,8 @@ > #define GL_SKIP 0x00000100 > #define GL_ATIME 0x00000200 > #define GL_NOCACHE 0x00000400 >-#define GL_FLOCK 0x00000800 >-#define GL_NOCANCEL 0x00001000 > > #define GLR_TRYFAILED 13 >-#define GLR_CANCELED 14 > > static inline struct gfs2_holder *gfs2_glock_is_locked_by_me(struct gfs2_glock *gl) > { >@@ -41,6 +38,8 @@ static inline struct gfs2_holder *gfs2_glock_is_locked_by_me(struct gfs2_glock * > spin_lock(&gl->gl_spin); > pid = task_pid(current); > list_for_each_entry(gh, &gl->gl_holders, gh_list) { >+ if (!test_bit(HIF_HOLDER, &gh->gh_iflags)) >+ break; > if (gh->gh_owner_pid == pid) > goto out; > } >@@ -70,7 +69,7 @@ static inline int gfs2_glock_is_blocking(struct gfs2_glock *gl) > { > int ret; > spin_lock(&gl->gl_spin); >- ret = test_bit(GLF_DEMOTE, &gl->gl_flags) || !list_empty(&gl->gl_waiters3); >+ ret = test_bit(GLF_DEMOTE, &gl->gl_flags); > spin_unlock(&gl->gl_spin); > return ret; > } >@@ -98,6 +97,7 @@ int gfs2_glock_nq_num(struct gfs2_sbd *sdp, > int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs); > void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs); > void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs); >+void gfs2_print_dbg(struct gfs2_glock_iter *gi, const char *fmt, ...); > > /** > * gfs2_glock_nq_init - intialize a holder and enqueue it on a glock >diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c >index d31bada..11c7e5e 100644 >--- a/fs/gfs2/glops.c >+++ b/fs/gfs2/glops.c >@@ -177,9 +177,8 @@ static void inode_go_sync(struct gfs2_glock *gl) > * > */ > >-static void inode_go_xmote_bh(struct gfs2_glock *gl) >+static void inode_go_xmote_bh(struct gfs2_glock *gl, struct gfs2_holder *gh) > { >- struct gfs2_holder *gh = gl->gl_req_gh; > struct buffer_head *bh; > int error; > >@@ -267,6 +266,26 @@ static int inode_go_lock(struct gfs2_holder *gh) > } > > /** >+ * inode_go_dump - print information about an inode >+ * @gi: The iterator >+ * @ip: the inode >+ * >+ * Returns: 0 on success, -ENOBUFS when we run out of space >+ */ >+ >+static int inode_go_dump(struct gfs2_glock_iter *gi, struct gfs2_glock *gl) >+{ >+ struct gfs2_inode *ip = gl->gl_object; >+ if (ip == NULL) >+ return 0; >+ gfs2_print_dbg(gi, " I: n:%llu/%llu t:%u f:0x%08lx\n", >+ (unsigned long long)ip->i_no_formal_ino, >+ (unsigned long long)ip->i_no_addr, >+ IF2DT(ip->i_inode.i_mode), ip->i_flags); >+ return 0; >+} >+ >+/** > * rgrp_go_demote_ok - Check to see if it's ok to unlock a RG's glock > * @gl: the glock > * >@@ -306,6 +325,22 @@ static void rgrp_go_unlock(struct gfs2_holder *gh) > } > > /** >+ * rgrp_go_dump - print out an rgrp >+ * @gi: The iterator >+ * @gl: The glock in question >+ * >+ */ >+ >+static int rgrp_go_dump(struct gfs2_glock_iter *gi, struct gfs2_glock *gl) >+{ >+ struct gfs2_rgrpd *rgd = gl->gl_object; >+ if (rgd == NULL) >+ return 0; >+ gfs2_print_dbg(gi, " R: n:%llu\n", (unsigned long long)rgd->rd_addr); >+ return 0; >+} >+ >+/** > * trans_go_sync - promote/demote the transaction glock > * @gl: the glock > * @state: the requested state >@@ -330,7 +365,7 @@ static void trans_go_sync(struct gfs2_glock *gl) > * > */ > >-static void trans_go_xmote_bh(struct gfs2_glock *gl) >+static void trans_go_xmote_bh(struct gfs2_glock *gl, struct gfs2_holder *gh) > { > struct gfs2_sbd *sdp = gl->gl_sbd; > struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode); >@@ -379,6 +414,7 @@ const struct gfs2_glock_operations gfs2_inode_glops = { > .go_inval = inode_go_inval, > .go_demote_ok = inode_go_demote_ok, > .go_lock = inode_go_lock, >+ .go_dump = inode_go_dump, > .go_type = LM_TYPE_INODE, > .go_min_hold_time = HZ / 10, > }; >@@ -389,6 +425,7 @@ const struct gfs2_glock_operations gfs2_rgrp_glops = { > .go_demote_ok = rgrp_go_demote_ok, > .go_lock = rgrp_go_lock, > .go_unlock = rgrp_go_unlock, >+ .go_dump = rgrp_go_dump, > .go_type = LM_TYPE_RGRP, > .go_min_hold_time = HZ / 10, > }; >diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h >index 9c2c0b9..0527a5c 100644 >--- a/fs/gfs2/incore.h >+++ b/fs/gfs2/incore.h >@@ -126,22 +126,30 @@ struct gfs2_bufdata { > struct list_head bd_ail_gl_list; > }; > >+struct gfs2_glock_iter { >+ int hash; /* hash bucket index */ >+ struct gfs2_sbd *sdp; /* incore superblock */ >+ struct gfs2_glock *gl; /* current glock struct */ >+ struct seq_file *seq; /* sequence file for debugfs */ >+ char string[512]; /* scratch space */ >+}; >+ > struct gfs2_glock_operations { > void (*go_xmote_th) (struct gfs2_glock *gl); >- void (*go_xmote_bh) (struct gfs2_glock *gl); >+ void (*go_xmote_bh) (struct gfs2_glock *gl, struct gfs2_holder *gh); > void (*go_inval) (struct gfs2_glock *gl, int flags); > int (*go_demote_ok) (struct gfs2_glock *gl); > int (*go_lock) (struct gfs2_holder *gh); > void (*go_unlock) (struct gfs2_holder *gh); >+ int (*go_dump)(struct gfs2_glock_iter *gi, struct gfs2_glock *gl); > const int go_type; > const unsigned long go_min_hold_time; > }; > > enum { > /* States */ >- HIF_HOLDER = 6, >+ HIF_HOLDER = 6, /* Set for gh that "holds" the glock */ > HIF_FIRST = 7, >- HIF_ABORTED = 9, > HIF_WAIT = 10, > }; > >@@ -154,7 +162,7 @@ struct gfs2_holder { > unsigned gh_flags; > > int gh_error; >- unsigned long gh_iflags; >+ unsigned long gh_iflags; /* HIF_... */ > unsigned long gh_ip; > }; > >@@ -163,11 +171,9 @@ enum { > GLF_STICKY = 2, > GLF_DEMOTE = 3, > GLF_PENDING_DEMOTE = 4, >- GLF_DIRTY = 5, >- GLF_DEMOTE_IN_PROGRESS = 6, >+ GLF_DEMOTE_IN_PROGRESS = 5, >+ GLF_DIRTY = 6, > GLF_LFLUSH = 7, >- GLF_WAITERS2 = 8, >- GLF_CONV_DEADLK = 9, > }; > > struct gfs2_glock { >@@ -179,19 +185,13 @@ struct gfs2_glock { > spinlock_t gl_spin; > > unsigned int gl_state; >+ unsigned int gl_target; > unsigned int gl_hash; > unsigned int gl_demote_state; /* state requested by remote node */ > unsigned long gl_demote_time; /* time of first demote request */ >- struct pid *gl_owner_pid; >- unsigned long gl_ip; > struct list_head gl_holders; >- struct list_head gl_waiters1; /* HIF_MUTEX */ >- struct list_head gl_waiters3; /* HIF_PROMOTE */ > > const struct gfs2_glock_operations *gl_ops; >- >- struct gfs2_holder *gl_req_gh; >- > void *gl_lock; > char *gl_lvb; > atomic_t gl_lvb_count; >diff --git a/fs/gfs2/locking/dlm/lock.c b/fs/gfs2/locking/dlm/lock.c >index cf7ea8a..fed9a67 100644 >--- a/fs/gfs2/locking/dlm/lock.c >+++ b/fs/gfs2/locking/dlm/lock.c >@@ -308,6 +308,9 @@ unsigned int gdlm_lock(void *lock, unsigned int cur_state, > { > struct gdlm_lock *lp = lock; > >+ if (req_state == LM_ST_UNLOCKED) >+ return gdlm_unlock(lock, cur_state); >+ > clear_bit(LFL_DLM_CANCEL, &lp->flags); > if (flags & LM_FLAG_NOEXP) > set_bit(LFL_NOBLOCK, &lp->flags); >diff --git a/fs/gfs2/locking/nolock/main.c b/fs/gfs2/locking/nolock/main.c >index 284a5ec..627bfb7 100644 >--- a/fs/gfs2/locking/nolock/main.c >+++ b/fs/gfs2/locking/nolock/main.c >@@ -107,6 +107,8 @@ static void nolock_put_lock(void *lock) > static unsigned int nolock_lock(void *lock, unsigned int cur_state, > unsigned int req_state, unsigned int flags) > { >+ if (req_state == LM_ST_UNLOCKED) >+ return 0; > return req_state | LM_OUT_CACHEABLE; > } > >diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c >index 053e2eb..bcc668d 100644 >--- a/fs/gfs2/main.c >+++ b/fs/gfs2/main.c >@@ -40,8 +40,6 @@ static void gfs2_init_glock_once(struct kmem_cache *cachep, void *foo) > INIT_HLIST_NODE(&gl->gl_list); > spin_lock_init(&gl->gl_spin); > INIT_LIST_HEAD(&gl->gl_holders); >- INIT_LIST_HEAD(&gl->gl_waiters1); >- INIT_LIST_HEAD(&gl->gl_waiters3); > gl->gl_lvb = NULL; > atomic_set(&gl->gl_lvb_count, 0); > INIT_LIST_HEAD(&gl->gl_reclaim); >diff --git a/fs/gfs2/ops_address.c b/fs/gfs2/ops_address.c >index 90a04a6..aba0df1 100644 >--- a/fs/gfs2/ops_address.c >+++ b/fs/gfs2/ops_address.c >@@ -507,26 +507,23 @@ static int __gfs2_readpage(void *file, struct page *page) > static int gfs2_readpage(struct file *file, struct page *page) > { > struct gfs2_inode *ip = GFS2_I(page->mapping->host); >- struct gfs2_holder *gh; >+ struct gfs2_holder gh; > int error; > >- gh = gfs2_glock_is_locked_by_me(ip->i_gl); >- if (!gh) { >- gh = kmalloc(sizeof(struct gfs2_holder), GFP_NOFS); >- if (!gh) >- return -ENOBUFS; >- gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME, gh); >+ gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME|LM_FLAG_TRY_1CB, &gh); >+ error = gfs2_glock_nq_atime(&gh); >+ if (unlikely(error)) { > unlock_page(page); >- error = gfs2_glock_nq_atime(gh); >- if (likely(error != 0)) >- goto out; >- return AOP_TRUNCATED_PAGE; >+ goto out; > } > error = __gfs2_readpage(file, page); >- gfs2_glock_dq(gh); >+ gfs2_glock_dq(&gh); > out: >- gfs2_holder_uninit(gh); >- kfree(gh); >+ gfs2_holder_uninit(&gh); >+ if (error == GLR_TRYFAILED) { >+ yield(); >+ return AOP_TRUNCATED_PAGE; >+ } > return error; > } > >diff --git a/fs/gfs2/ops_file.c b/fs/gfs2/ops_file.c >index e1b7d52..0ff512a 100644 >--- a/fs/gfs2/ops_file.c >+++ b/fs/gfs2/ops_file.c >@@ -669,8 +669,7 @@ static int do_flock(struct file *file, int cmd, struct file_lock *fl) > int error = 0; > > state = (fl->fl_type == F_WRLCK) ? LM_ST_EXCLUSIVE : LM_ST_SHARED; >- flags = (IS_SETLKW(cmd) ? 0 : LM_FLAG_TRY) | GL_EXACT | GL_NOCACHE >- | GL_FLOCK; >+ flags = (IS_SETLKW(cmd) ? 0 : LM_FLAG_TRY) | GL_EXACT | GL_NOCACHE; > > mutex_lock(&fp->f_fl_mutex); > >@@ -683,9 +682,8 @@ static int do_flock(struct file *file, int cmd, struct file_lock *fl) > gfs2_glock_dq_wait(fl_gh); > gfs2_holder_reinit(state, flags, fl_gh); > } else { >- error = gfs2_glock_get(GFS2_SB(&ip->i_inode), >- ip->i_no_addr, &gfs2_flock_glops, >- CREATE, &gl); >+ error = gfs2_glock_get(GFS2_SB(&ip->i_inode), ip->i_no_addr, >+ &gfs2_flock_glops, CREATE, &gl); > if (error) > goto out; > gfs2_holder_init(gl, state, flags, fl_gh); >diff --git a/fs/gfs2/recovery.c b/fs/gfs2/recovery.c >index 2888e4b..fdd3f0f 100644 >--- a/fs/gfs2/recovery.c >+++ b/fs/gfs2/recovery.c >@@ -505,7 +505,7 @@ int gfs2_recover_journal(struct gfs2_jdesc *jd) > > error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_SHARED, > LM_FLAG_NOEXP | LM_FLAG_PRIORITY | >- GL_NOCANCEL | GL_NOCACHE, &t_gh); >+ GL_NOCACHE, &t_gh); > if (error) > goto fail_gunlock_ji; > >diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c >index 7aeacbc..12fe38f 100644 >--- a/fs/gfs2/super.c >+++ b/fs/gfs2/super.c >@@ -941,8 +941,7 @@ static int gfs2_lock_fs_check_clean(struct gfs2_sbd *sdp, > } > > error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_DEFERRED, >- LM_FLAG_PRIORITY | GL_NOCACHE, >- t_gh); >+ GL_NOCACHE, t_gh); > > list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) { > error = gfs2_jdesc_check(jd);
You cannot view the attachment while viewing its details because your browser does not support IFRAMEs.
View the attachment on a separate page
.
View Attachment As Diff
View Attachment As Raw
Actions:
View
|
Diff
Attachments on
bug 432057
:
294383
|
295614
|
296569
|
297894
|
298191
|
302891
|
303203
|
303208
|
303209
|
303500
|
303542
|
303631
|
303637
|
303646
|
304026
|
304098
|
304112
|
304161
|
304398
|
304409
|
304572
|
304636
|
304637
|
304749
|
304755
|
304811
|
304837
|
304876
|
304898
|
304946
|
304955
|
304976
|
304984
|
305126
|
305185
|
305237
|
305266
|
305377
|
305387
|
305404
|
305506
|
305513
|
305678
|
305702
|
305727
|
306084
|
306111
|
306191
|
306207
|
306208
|
306275
|
306287
|
306832
|
306972
|
307077
|
307096
|
307158