Login
[x]
Log in using an account from:
Fedora Account System
Red Hat Associate
Red Hat Customer
Or login using a Red Hat Bugzilla account
Forgot Password
Login:
Hide Forgot
Create an Account
Red Hat Bugzilla – Attachment 309545 Details for
Bug 447748
GFS2: lock_dlm is not always delivering callbacks in the right order
[?]
New
Simple Search
Advanced Search
My Links
Browse
Requests
Reports
Current State
Search
Tabular reports
Graphical reports
Duplicates
Other Reports
User Changes
Plotly Reports
Bug Status
Bug Severity
Non-Defaults
|
Product Dashboard
Help
Page Help!
Bug Writing Guidelines
What's new
Browser Support Policy
5.0.4.rh83 Release notes
FAQ
Guides index
User guide
Web Services
Contact
Legal
This site requires JavaScript to be enabled to function correctly, please enable it.
[patch]
RHEL5 patch - Try #1
447748.16Jun2008.patch (text/plain), 135.11 KB, created by
Robert Peterson
on 2008-06-16 22:21:04 UTC
(
hide
)
Description:
RHEL5 patch - Try #1
Filename:
MIME Type:
Creator:
Robert Peterson
Created:
2008-06-16 22:21:04 UTC
Size:
135.11 KB
patch
obsolete
>diff -purN a/fs/gfs2/acl.c b/fs/gfs2/acl.c >--- a/fs/gfs2/acl.c 2008-04-30 16:26:34.000000000 -0500 >+++ b/fs/gfs2/acl.c 2008-06-16 09:14:31.000000000 -0500 >@@ -15,7 +15,6 @@ > #include <linux/posix_acl.h> > #include <linux/posix_acl_xattr.h> > #include <linux/gfs2_ondisk.h> >-#include <linux/lm_interface.h> > > #include "gfs2.h" > #include "incore.h" >diff -purN a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c >--- a/fs/gfs2/bmap.c 2008-04-30 16:26:44.000000000 -0500 >+++ b/fs/gfs2/bmap.c 2008-06-16 09:14:31.000000000 -0500 >@@ -13,7 +13,6 @@ > #include <linux/buffer_head.h> > #include <linux/gfs2_ondisk.h> > #include <linux/crc32.h> >-#include <linux/lm_interface.h> > > #include "gfs2.h" > #include "incore.h" >diff -purN a/fs/gfs2/daemon.c b/fs/gfs2/daemon.c >--- a/fs/gfs2/daemon.c 2008-04-30 16:26:38.000000000 -0500 >+++ b/fs/gfs2/daemon.c 2008-06-16 09:22:19.000000000 -0500 >@@ -15,7 +15,6 @@ > #include <linux/kthread.h> > #include <linux/delay.h> > #include <linux/gfs2_ondisk.h> >-#include <linux/lm_interface.h> > > #include "gfs2.h" > #include "incore.h" >diff -purN a/fs/gfs2/dir.c b/fs/gfs2/dir.c >--- a/fs/gfs2/dir.c 2008-04-30 16:26:44.000000000 -0500 >+++ b/fs/gfs2/dir.c 2008-06-16 09:14:31.000000000 -0500 >@@ -60,7 +60,6 @@ > #include <linux/gfs2_ondisk.h> > #include <linux/crc32.h> > #include <linux/vmalloc.h> >-#include <linux/lm_interface.h> > > #include "gfs2.h" > #include "incore.h" >diff -purN a/fs/gfs2/eaops.c b/fs/gfs2/eaops.c >--- a/fs/gfs2/eaops.c 2008-04-30 16:26:39.000000000 -0500 >+++ b/fs/gfs2/eaops.c 2008-06-16 09:14:31.000000000 -0500 >@@ -13,7 +13,6 @@ > #include <linux/buffer_head.h> > #include <linux/xattr.h> > #include <linux/gfs2_ondisk.h> >-#include <linux/lm_interface.h> > #include <asm/uaccess.h> > > #include "gfs2.h" >diff -purN a/fs/gfs2/eattr.c b/fs/gfs2/eattr.c >--- a/fs/gfs2/eattr.c 2008-04-30 16:26:44.000000000 -0500 >+++ b/fs/gfs2/eattr.c 2008-06-16 09:14:31.000000000 -0500 >@@ -13,7 +13,6 @@ > #include <linux/buffer_head.h> > #include <linux/xattr.h> > #include <linux/gfs2_ondisk.h> >-#include <linux/lm_interface.h> > #include <asm/uaccess.h> > > #include "gfs2.h" >diff -purN a/fs/gfs2/gfs2.h b/fs/gfs2/gfs2.h >--- a/fs/gfs2/gfs2.h 2008-04-30 16:26:29.000000000 -0500 >+++ b/fs/gfs2/gfs2.h 2008-06-13 15:03:19.000000000 -0500 >@@ -16,11 +16,6 @@ enum { > }; > > enum { >- NO_WAIT = 0, >- WAIT = 1, >-}; >- >-enum { > NO_FORCE = 0, > FORCE = 1, > }; >diff -purN a/fs/gfs2/glock.c b/fs/gfs2/glock.c >--- a/fs/gfs2/glock.c 2008-06-13 14:04:43.000000000 -0500 >+++ b/fs/gfs2/glock.c 2008-06-16 10:51:56.000000000 -0500 >@@ -10,7 +10,6 @@ > #include <linux/sched.h> > #include <linux/slab.h> > #include <linux/spinlock.h> >-#include <linux/completion.h> > #include <linux/buffer_head.h> > #include <linux/delay.h> > #include <linux/sort.h> >@@ -18,7 +17,6 @@ > #include <linux/kallsyms.h> > #include <linux/gfs2_ondisk.h> > #include <linux/list.h> >-#include <linux/lm_interface.h> > #include <linux/wait.h> > #include <linux/module.h> > #include <linux/rwsem.h> >@@ -36,7 +34,6 @@ > #include "glock.h" > #include "glops.h" > #include "inode.h" >-#include "lm.h" > #include "lops.h" > #include "meta_io.h" > #include "quota.h" >@@ -186,12 +183,10 @@ static void glock_free(struct gfs2_glock > struct gfs2_sbd *sdp = gl->gl_sbd; > struct inode *aspace = gl->gl_aspace; > >- gfs2_lm_put_lock(sdp, gl->gl_lock); >- > if (aspace) > gfs2_aspace_put(aspace); > >- kmem_cache_free(gfs2_glock_cachep, gl); >+ sdp->sd_lockstruct.ls_ops->lm_put_lock(gfs2_glock_cachep, gl); > } > > /** >@@ -261,27 +256,6 @@ static struct gfs2_glock *search_bucket( > } > > /** >- * gfs2_glock_find() - Find glock by lock number >- * @sdp: The GFS2 superblock >- * @name: The lock name >- * >- * Returns: NULL, or the struct gfs2_glock with the requested number >- */ >- >-static struct gfs2_glock *gfs2_glock_find(const struct gfs2_sbd *sdp, >- const struct lm_lockname *name) >-{ >- unsigned int hash = gl_hash(sdp, name); >- struct gfs2_glock *gl; >- >- read_lock(gl_lock_addr(hash)); >- gl = search_bucket(hash, sdp, name); >- read_unlock(gl_lock_addr(hash)); >- >- return gl; >-} >- >-/** > * may_grant - check if its ok to grant a new lock > * @gl: The glock > * @gh: The lock request which we wish to grant >@@ -516,12 +490,16 @@ out: > } > > static unsigned int gfs2_lm_lock(struct gfs2_sbd *sdp, void *lock, >- unsigned int cur_state, unsigned int req_state, >+ unsigned int req_state, > unsigned int flags) > { > int ret = LM_OUT_ERROR; >+ >+ if (!sdp->sd_lockstruct.ls_ops->lm_lock) >+ return req_state == LM_ST_UNLOCKED ? 0 : req_state; >+ > if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) >- ret = sdp->sd_lockstruct.ls_ops->lm_lock(lock, cur_state, >+ ret = sdp->sd_lockstruct.ls_ops->lm_lock(lock, > req_state, flags); > return ret; > } >@@ -562,7 +540,7 @@ static void do_xmote(struct gfs2_glock * > gl->gl_state == LM_ST_DEFERRED) && > !(lck_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) > lck_flags |= LM_FLAG_TRY_1CB; >- ret = gfs2_lm_lock(sdp, gl->gl_lock, gl->gl_state, target, lck_flags); >+ ret = gfs2_lm_lock(sdp, gl, target, lck_flags); > > if (!(ret & LM_OUT_ASYNC)) { > finish_xmote(gl, ret); >@@ -702,6 +680,8 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, > gl->gl_demote_state = LM_ST_EXCLUSIVE; > gl->gl_hash = hash; > gl->gl_ops = glops; >+ sprintf(gl->gl_strname, "%8x%16llx", name.ln_type, (unsigned long long)number); >+ memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb)); > gl->gl_stamp = jiffies; > gl->gl_tchange = jiffies; > gl->gl_object = NULL; >@@ -720,10 +700,6 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, > } > } > >- error = gfs2_lm_get_lock(sdp, &name, &gl->gl_lock); >- if (error) >- goto fail_aspace; >- > write_lock(gl_lock_addr(hash)); > tmp = search_bucket(hash, sdp, &name); > if (tmp) { >@@ -739,9 +715,6 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, > > return 0; > >-fail_aspace: >- if (gl->gl_aspace) >- gfs2_aspace_put(gl->gl_aspace); > fail: > kmem_cache_free(gfs2_glock_cachep, gl); > return error; >@@ -932,7 +905,8 @@ do_cancel: > gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list); > if (!(gh->gh_flags & LM_FLAG_PRIORITY)) { > spin_unlock(&gl->gl_spin); >- sdp->sd_lockstruct.ls_ops->lm_cancel(gl->gl_lock); >+ if (sdp->sd_lockstruct.ls_ops->lm_cancel) >+ sdp->sd_lockstruct.ls_ops->lm_cancel(gl); > spin_lock(&gl->gl_spin); > } > return; >@@ -1206,68 +1180,12 @@ void gfs2_glock_dq_uninit_m(unsigned int > gfs2_glock_dq_uninit(&ghs[x]); > } > >-static int gfs2_lm_hold_lvb(struct gfs2_sbd *sdp, void *lock, char **lvbp) >-{ >- int error = -EIO; >- if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) >- error = sdp->sd_lockstruct.ls_ops->lm_hold_lvb(lock, lvbp); >- return error; >-} >- >-/** >- * gfs2_lvb_hold - attach a LVB from a glock >- * @gl: The glock in question >- * >- */ >- >-int gfs2_lvb_hold(struct gfs2_glock *gl) >-{ >- int error; >- >- if (!atomic_read(&gl->gl_lvb_count)) { >- error = gfs2_lm_hold_lvb(gl->gl_sbd, gl->gl_lock, &gl->gl_lvb); >- if (error) >- return error; >- gfs2_glock_hold(gl); >- } >- atomic_inc(&gl->gl_lvb_count); >- >- return 0; >-} >- >-/** >- * gfs2_lvb_unhold - detach a LVB from a glock >- * @gl: The glock in question >- * >- */ >- >-void gfs2_lvb_unhold(struct gfs2_glock *gl) >-{ >- struct gfs2_sbd *sdp = gl->gl_sbd; >- >- gfs2_glock_hold(gl); >- gfs2_assert(gl->gl_sbd, atomic_read(&gl->gl_lvb_count) > 0); >- if (atomic_dec_and_test(&gl->gl_lvb_count)) { >- if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) >- sdp->sd_lockstruct.ls_ops->lm_unhold_lvb(gl->gl_lock, gl->gl_lvb); >- gl->gl_lvb = NULL; >- gfs2_glock_put(gl); >- } >- gfs2_glock_put(gl); >-} >- >-static void blocking_cb(struct gfs2_sbd *sdp, struct lm_lockname *name, >- unsigned int state) >+void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state) > { >- struct gfs2_glock *gl; > unsigned long delay = 0; > unsigned long holdtime; > unsigned long now = jiffies; > >- gl = gfs2_glock_find(sdp, name); >- if (!gl) >- return; >- > holdtime = gl->gl_tchange + gl->gl_ops->go_min_hold_time; > if (time_before(now, holdtime)) > delay = holdtime - now; >@@ -1280,64 +1198,35 @@ static void blocking_cb(struct gfs2_sbd > } > > /** >- * gfs2_glock_cb - Callback used by locking module >- * @sdp: Pointer to the superblock >- * @type: Type of callback >- * @data: Type dependent data pointer >+ * gfs2_glock_complete - Callback used by locking >+ * @gl: Pointer to the glock >+ * @ret: The return value from the dlm > * >- * Called by the locking module when it wants to tell us something. >- * Either we need to drop a lock, one of our ASYNC requests completed, or >- * a journal from another client needs to be recovered. > */ > >-void gfs2_glock_cb(void *cb_data, unsigned int type, void *data) >+void gfs2_glock_complete(struct gfs2_glock *gl, int ret) > { >- struct gfs2_sbd *sdp = cb_data; >- >- switch (type) { >- case LM_CB_NEED_E: >- blocking_cb(sdp, data, LM_ST_UNLOCKED); >- return; >- >- case LM_CB_NEED_D: >- blocking_cb(sdp, data, LM_ST_DEFERRED); >- return; >- >- case LM_CB_NEED_S: >- blocking_cb(sdp, data, LM_ST_SHARED); >- return; >- >- case LM_CB_ASYNC: { >- struct lm_async_cb *async = data; >- struct gfs2_glock *gl; >- >- down_read(&gfs2_umount_flush_sem); >- gl = gfs2_glock_find(sdp, &async->lc_name); >- if (gfs2_assert_warn(sdp, gl)) >+ struct lm_lockstruct *ls = &gl->gl_sbd->sd_lockstruct; >+ down_read(&gfs2_umount_flush_sem); >+ gl->gl_reply = ret; >+ if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_flags))) { >+ struct gfs2_holder *gh; >+ spin_lock(&gl->gl_spin); >+ gh = find_first_waiter(gl); >+ if ((!(gh && (gh->gh_flags & LM_FLAG_NOEXP)) && >+ (gl->gl_target != LM_ST_UNLOCKED)) || >+ ((ret & ~LM_OUT_ST_MASK) != 0)) >+ set_bit(GLF_FROZEN, &gl->gl_flags); >+ spin_unlock(&gl->gl_spin); >+ if (test_bit(GLF_FROZEN, &gl->gl_flags)) { >+ up_read(&gfs2_umount_flush_sem); > return; >- gl->gl_reply = async->lc_ret; >- set_bit(GLF_REPLY_PENDING, &gl->gl_flags); >- if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) >- gfs2_glock_put(gl); >- up_read(&gfs2_umount_flush_sem); >- return; >- } >- >- case LM_CB_NEED_RECOVERY: >- gfs2_jdesc_make_dirty(sdp, *(unsigned int *)data); >- if (sdp->sd_recoverd_process) >- wake_up_process(sdp->sd_recoverd_process); >- return; >- >- case LM_CB_DROPLOCKS: >- gfs2_gl_hash_clear(sdp, NO_WAIT); >- gfs2_quota_scan(sdp); >- return; >- >- default: >- gfs2_assert_warn(sdp, 0); >- return; >+ } > } >+ set_bit(GLF_REPLY_PENDING, &gl->gl_flags); >+ if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) >+ gfs2_glock_put(gl); >+ up_read(&gfs2_umount_flush_sem); > } > > /** >@@ -1465,6 +1354,26 @@ out: > return has_entries; > } > >+ >+/** >+ * thaw_glock - thaw out a glock which has an unprocessed reply waiting >+ * @gl: The glock to thaw >+ * >+ * N.B. When we freeze a glock, we leave a ref to the glock outstanding, >+ * so this has to result in the ref count being dropped by one. >+ */ >+ >+static void thaw_glock(struct gfs2_glock *gl) >+{ >+ if (!test_and_clear_bit(GLF_FROZEN, &gl->gl_flags)) >+ return; >+ down_read(&gfs2_umount_flush_sem); >+ set_bit(GLF_REPLY_PENDING, &gl->gl_flags); >+ if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) >+ gfs2_glock_put(gl); >+ up_read(&gfs2_umount_flush_sem); >+} >+ > /** > * scan_glock - look at a glock and see if we can reclaim it > * @gl: the glock to look at >@@ -1517,15 +1426,28 @@ static void clear_glock(struct gfs2_gloc > } > > /** >+ * gfs2_glock_thaw - Thaw any frozen glocks >+ * @sdp: The super block >+ * >+ */ >+ >+void gfs2_glock_thaw(struct gfs2_sbd *sdp) >+{ >+ unsigned x; >+ >+ for (x = 0; x < GFS2_GL_HASH_SIZE; x++) >+ examine_bucket(thaw_glock, sdp, x); >+} >+ >+/** > * gfs2_gl_hash_clear - Empty out the glock hash table > * @sdp: the filesystem > * @wait: wait until it's all gone > * >- * Called when unmounting the filesystem, or when inter-node lock manager >- * requests DROPLOCKS because it is running out of capacity. >+ * Called when unmounting the filesystem. > */ > >-void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait) >+void gfs2_gl_hash_clear(struct gfs2_sbd *sdp) > { > unsigned long t; > unsigned int x; >@@ -1540,7 +1462,7 @@ void gfs2_gl_hash_clear(struct gfs2_sbd > cont = 1; > } > >- if (!wait || !cont) >+ if (!cont) > break; > > if (time_after_eq(jiffies, >@@ -1583,7 +1505,7 @@ static const char *hflags2str(char *buf, > if (flags & LM_FLAG_NOEXP) > *p++ = 'e'; > if (flags & LM_FLAG_ANY) >- *p++ = 'a'; >+ *p++ = 'A'; > if (flags & LM_FLAG_PRIORITY) > *p++ = 'p'; > if (flags & GL_ASYNC) >@@ -1651,6 +1573,10 @@ static const char *gflags2str(char *buf, > *p++ = 'i'; > if (test_bit(GLF_REPLY_PENDING, gflags)) > *p++ = 'r'; >+ if (test_bit(GLF_INITIAL, gflags)) >+ *p++ = 'I'; >+ if (test_bit(GLF_FROZEN, gflags)) >+ *p++ = 'F'; > *p = 0; > return buf; > } >@@ -1685,14 +1611,13 @@ static int __dump_glock(struct seq_file > dtime *= 1000000/HZ; /* demote time in uSec */ > if (!test_bit(GLF_DEMOTE, &gl->gl_flags)) > dtime = 0; >- gfs2_print_dbg(seq, "G: s:%s n:%u/%llx f:%s t:%s d:%s/%llu l:%d a:%d r:%d\n", >+ gfs2_print_dbg(seq, "G: s:%s n:%u/%llx f:%s t:%s d:%s/%llu a:%d r:%d\n", > state2str(gl->gl_state), > gl->gl_name.ln_type, > (unsigned long long)gl->gl_name.ln_number, > gflags2str(gflags_buf, &gl->gl_flags), > state2str(gl->gl_target), > state2str(gl->gl_demote_state), dtime, >- atomic_read(&gl->gl_lvb_count), > atomic_read(&gl->gl_ail_count), > atomic_read(&gl->gl_ref)); > >@@ -1782,8 +1707,6 @@ static int gfs2_scand(void *data) > return 0; > } > >- >- > int __init gfs2_glock_init(void) > { > unsigned i; >diff -purN a/fs/gfs2/glock.h b/fs/gfs2/glock.h >--- a/fs/gfs2/glock.h 2008-06-13 14:04:43.000000000 -0500 >+++ b/fs/gfs2/glock.h 2008-06-16 16:09:46.000000000 -0500 >@@ -10,15 +10,77 @@ > #ifndef __GLOCK_DOT_H__ > #define __GLOCK_DOT_H__ > >+#include <linux/parser.h> > #include "incore.h" > >-/* Flags for lock requests; used in gfs2_holder gh_flag field. >- From lm_interface.h: >+enum { >+ Opt_jid, >+ Opt_id, >+ Opt_first, >+ Opt_nodir, >+ Opt_err, >+}; >+ >+/* >+ * lm_lockname types >+ */ >+ >+#define LM_TYPE_RESERVED 0x00 >+#define LM_TYPE_NONDISK 0x01 >+#define LM_TYPE_INODE 0x02 >+#define LM_TYPE_RGRP 0x03 >+#define LM_TYPE_META 0x04 >+#define LM_TYPE_IOPEN 0x05 >+#define LM_TYPE_FLOCK 0x06 >+#define LM_TYPE_PLOCK 0x07 >+#define LM_TYPE_QUOTA 0x08 >+#define LM_TYPE_JOURNAL 0x09 >+ >+/* >+ * lm_lock() states >+ * >+ * SHARED is compatible with SHARED, not with DEFERRED or EX. >+ * DEFERRED is compatible with DEFERRED, not with SHARED or EX. >+ */ >+ >+#define LM_ST_UNLOCKED 0 >+#define LM_ST_EXCLUSIVE 1 >+#define LM_ST_DEFERRED 2 >+#define LM_ST_SHARED 3 >+ >+/* >+ * lm_lock() flags >+ * >+ * LM_FLAG_TRY >+ * Don't wait to acquire the lock if it can't be granted immediately. >+ * >+ * LM_FLAG_TRY_1CB >+ * Send one blocking callback if TRY is set and the lock is not granted. >+ * >+ * LM_FLAG_NOEXP >+ * GFS sets this flag on lock requests it makes while doing journal recovery. >+ * These special requests should not be blocked due to the recovery like >+ * ordinary locks would be. >+ * >+ * LM_FLAG_ANY >+ * A SHARED request may also be granted in DEFERRED, or a DEFERRED request may >+ * also be granted in SHARED. The preferred state is whichever is compatible >+ * with other granted locks, or the specified state if no other locks exist. >+ * >+ * LM_FLAG_PRIORITY >+ * Override fairness considerations. Suppose a lock is held in a shared state >+ * and there is a pending request for the deferred state. A shared lock >+ * request with the priority flag would be allowed to bypass the deferred >+ * request and directly join the other shared lock. A shared lock request >+ * without the priority flag might be forced to wait until the deferred >+ * requested had acquired and released the lock. >+ */ >+ > #define LM_FLAG_TRY 0x00000001 > #define LM_FLAG_TRY_1CB 0x00000002 > #define LM_FLAG_NOEXP 0x00000004 > #define LM_FLAG_ANY 0x00000008 >-#define LM_FLAG_PRIORITY 0x00000010 */ >+#define LM_FLAG_PRIORITY 0x00000010 > > #define GL_ASYNC 0x00000040 > #define GL_EXACT 0x00000080 >@@ -26,8 +88,46 @@ > #define GL_ATIME 0x00000200 > #define GL_NOCACHE 0x00000400 > >+/* >+ * lm_lock() and lm_async_cb return flags >+ * >+ * LM_OUT_ST_MASK >+ * Masks the lower two bits of lock state in the returned value. >+ * >+ * LM_OUT_CANCELED >+ * The lock request was canceled. >+ * >+ * LM_OUT_ASYNC >+ * The result of the request will be returned in an LM_CB_ASYNC callback. >+ * >+ */ >+ >+#define LM_OUT_ST_MASK 0x00000003 >+#define LM_OUT_CANCELED 0x00000008 >+#define LM_OUT_ASYNC 0x00000080 >+#define LM_OUT_ERROR 0x00000100 >+ >+/* >+ * lm_recovery_done() messages >+ */ >+ >+#define LM_RD_GAVEUP 308 >+#define LM_RD_SUCCESS 309 >+ > #define GLR_TRYFAILED 13 > >+struct lm_lockops { >+ const char *lm_proto_name; >+ int (*lm_mount) (struct gfs2_sbd *sdp, const char *fsname); >+ void (*lm_unmount) (struct gfs2_sbd *sdp); >+ void (*lm_withdraw) (struct gfs2_sbd *sdp); >+ void (*lm_put_lock) (struct kmem_cache *cachep, void *gl); >+ unsigned int (*lm_lock) (struct gfs2_glock *gl, >+ unsigned int req_state, unsigned int flags); >+ void (*lm_cancel) (struct gfs2_glock *gl); >+ match_table_t *lm_tokens; >+}; >+ > static inline struct gfs2_holder *gfs2_glock_is_locked_by_me(struct gfs2_glock *gl) > { > struct gfs2_holder *gh; >@@ -122,15 +222,12 @@ static inline int gfs2_glock_nq_init(str > return error; > } > >-/* Lock Value Block functions */ >- >-int gfs2_lvb_hold(struct gfs2_glock *gl); >-void gfs2_lvb_unhold(struct gfs2_glock *gl); >- >-void gfs2_glock_cb(void *cb_data, unsigned int type, void *data); >+void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state); >+void gfs2_glock_complete(struct gfs2_glock *gl, int ret); > void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl); > void gfs2_reclaim_glock(struct gfs2_sbd *sdp); >-void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait); >+void gfs2_gl_hash_clear(struct gfs2_sbd *sdp); >+void gfs2_glock_thaw(struct gfs2_sbd *sdp); > > int __init gfs2_glock_init(void); > void gfs2_glock_exit(void); >@@ -140,4 +237,6 @@ void gfs2_delete_debugfs_file(struct gfs > int gfs2_register_debugfs(void); > void gfs2_unregister_debugfs(void); > >+extern const struct lm_lockops gfs2_dlm_ops; >+ > #endif /* __GLOCK_DOT_H__ */ >diff -purN a/fs/gfs2/glops.c b/fs/gfs2/glops.c >--- a/fs/gfs2/glops.c 2008-06-13 14:04:43.000000000 -0500 >+++ b/fs/gfs2/glops.c 2008-06-16 09:14:31.000000000 -0500 >@@ -12,7 +12,6 @@ > #include <linux/completion.h> > #include <linux/buffer_head.h> > #include <linux/gfs2_ondisk.h> >-#include <linux/lm_interface.h> > #include <linux/bio.h> > > #include "gfs2.h" >@@ -395,18 +394,6 @@ static int trans_go_xmote_bh(struct gfs2 > return 0; > } > >-/** >- * quota_go_demote_ok - Check to see if it's ok to unlock a quota glock >- * @gl: the glock >- * >- * Returns: 1 if it's ok >- */ >- >-static int quota_go_demote_ok(struct gfs2_glock *gl) >-{ >- return !atomic_read(&gl->gl_lvb_count); >-} >- > const struct gfs2_glock_operations gfs2_meta_glops = { > .go_xmote_th = meta_go_sync, > .go_type = LM_TYPE_META, >@@ -452,7 +439,6 @@ const struct gfs2_glock_operations gfs2_ > }; > > const struct gfs2_glock_operations gfs2_quota_glops = { >- .go_demote_ok = quota_go_demote_ok, > .go_type = LM_TYPE_QUOTA, > }; > >diff -purN a/fs/gfs2/incore.h b/fs/gfs2/incore.h >--- a/fs/gfs2/incore.h 2008-06-13 14:04:43.000000000 -0500 >+++ b/fs/gfs2/incore.h 2008-06-16 17:02:01.000000000 -0500 >@@ -12,6 +12,8 @@ > > #include <linux/fs.h> > #include <linux/workqueue.h> >+#include <linux/dlm.h> >+#include <linux/buffer_head.h> > > #define DIO_WAIT 0x00000010 > #define DIO_METADATA 0x00000020 >@@ -26,6 +28,7 @@ struct gfs2_trans; > struct gfs2_ail; > struct gfs2_jdesc; > struct gfs2_sbd; >+struct lm_lockops; > > typedef void (*gfs2_glop_bh_t) (struct gfs2_glock *gl, unsigned int ret); > >@@ -126,6 +129,31 @@ struct gfs2_bufdata { > struct list_head bd_ail_gl_list; > }; > >+/* >+ * Internally, we prefix things with gdlm_ and GDLM_ (for gfs-dlm) since a >+ * prefix of lock_dlm_ gets awkward. >+ */ >+ >+#define GDLM_STRNAME_BYTES 24 >+#define GDLM_LVB_SIZE 32 >+#define GDLM_NAME_LEN 128 >+ >+enum { >+ DFL_BLOCK_LOCKS = 0, >+ DFL_SPECTATOR = 1, >+ DFL_WITHDRAW = 2, >+}; >+ >+struct lm_lockname { >+ u64 ln_number; >+ unsigned int ln_type; >+}; >+ >+#define lm_name_equal(name1, name2) \ >+ (((name1)->ln_number == (name2)->ln_number) && \ >+ ((name1)->ln_type == (name2)->ln_type)) >+ >+ > struct gfs2_glock_operations { > void (*go_xmote_th) (struct gfs2_glock *gl); > int (*go_xmote_bh) (struct gfs2_glock *gl, struct gfs2_holder *gh); >@@ -168,6 +196,8 @@ enum { > GLF_LFLUSH = 7, > GLF_INVALIDATE_IN_PROGRESS = 8, > GLF_REPLY_PENDING = 9, >+ GLF_INITIAL = 10, >+ GLF_FROZEN = 11, > }; > > struct gfs2_glock { >@@ -187,10 +217,9 @@ struct gfs2_glock { > struct list_head gl_holders; > > const struct gfs2_glock_operations *gl_ops; >- void *gl_lock; >- char *gl_lvb; >- atomic_t gl_lvb_count; >- >+ char gl_strname[GDLM_STRNAME_BYTES]; >+ struct dlm_lksb gl_lksb; >+ char gl_lvb[32]; > unsigned long gl_stamp; > unsigned long gl_tchange; > void *gl_object; >@@ -206,6 +235,8 @@ struct gfs2_glock { > struct work_struct gl_work; > }; > >+#define GFS2_MIN_LVB_SIZE 32 /* Min size of LVB that gfs2 supports */ >+ > struct gfs2_alloc { > /* Quota stuff */ > >@@ -467,6 +498,31 @@ struct gfs2_sb_host { > char sb_locktable[GFS2_LOCKNAME_LEN]; > }; > >+/* >+ * lm_mount() return values >+ * >+ * ls_jid - the journal ID this node should use >+ * ls_first - this node is the first to mount the file system >+ * ls_lockspace - lock module's context for this file system >+ * ls_ops - lock module's functions >+ */ >+ >+struct lm_lockstruct { >+ u32 ls_id; >+ unsigned int ls_jid; >+ unsigned int ls_first; >+ unsigned int ls_first_done; >+ unsigned int ls_nodir; >+ struct lm_lockops *ls_ops; >+ unsigned long ls_flags; >+ struct kobject ls_kobj; >+ dlm_lockspace_t *ls_dlm; >+ >+ int ls_recover_jid; >+ int ls_recover_jid_done; >+ int ls_recover_jid_status; >+}; >+ > struct gfs2_sbd { > struct super_block *sd_vfs; > struct super_block *sd_vfs_meta; >diff -purN a/fs/gfs2/inode.c b/fs/gfs2/inode.c >--- a/fs/gfs2/inode.c 2008-06-13 14:04:43.000000000 -0500 >+++ b/fs/gfs2/inode.c 2008-06-16 09:14:31.000000000 -0500 >@@ -16,7 +16,6 @@ > #include <linux/sort.h> > #include <linux/gfs2_ondisk.h> > #include <linux/crc32.h> >-#include <linux/lm_interface.h> > #include <linux/security.h> > > #include "gfs2.h" >@@ -137,16 +136,16 @@ void gfs2_set_iop(struct inode *inode) > > if (S_ISREG(mode)) { > inode->i_op = &gfs2_file_iops; >- if (sdp->sd_args.ar_localflocks) >- inode->i_fop = &gfs2_file_fops_nolock; >+ if (gfs2_localflocks(sdp)) >+ inode->i_fop = gfs2_file_fops_nolock; > else >- inode->i_fop = &gfs2_file_fops; >+ inode->i_fop = gfs2_file_fops; > } else if (S_ISDIR(mode)) { > inode->i_op = &gfs2_dir_iops; >- if (sdp->sd_args.ar_localflocks) >- inode->i_fop = &gfs2_dir_fops_nolock; >+ if (gfs2_localflocks(sdp)) >+ inode->i_fop = gfs2_dir_fops_nolock; > else >- inode->i_fop = &gfs2_dir_fops; >+ inode->i_fop = gfs2_dir_fops; > } else if (S_ISLNK(mode)) { > inode->i_op = &gfs2_symlink_iops; > } else { >diff -purN a/fs/gfs2/Kconfig b/fs/gfs2/Kconfig >--- a/fs/gfs2/Kconfig 2008-04-30 16:26:34.000000000 -0500 >+++ b/fs/gfs2/Kconfig 2008-06-16 08:58:34.000000000 -0500 >@@ -1,6 +1,10 @@ > config GFS2_FS > tristate "GFS2 file system support" > depends on EXPERIMENTAL >+ select DLM if GFS2_FS_LOCKING_DLM >+ select CONFIGFS_FS if GFS2_FS_LOCKING_DLM >+ select SYSFS if GFS2_FS_LOCKING_DLM >+ select IP_SCTP if DLM_SCTP > select FS_POSIX_ACL > select CRC32 > help >@@ -33,11 +37,8 @@ config GFS2_FS_LOCKING_NOLOCK > module. > > config GFS2_FS_LOCKING_DLM >- tristate "GFS2 DLM locking module" >- depends on GFS2_FS && SYSFS && NET && INET && (IPV6 || IPV6=n) >-# select IP_SCTP if DLM_SCTP >- select CONFIGFS_FS >- select DLM >+ bool "GFS2 DLM locking" >+ depends on GFS2_FS && NET && INET && (IPV6 || IPV6=n) > help > Multiple node locking module for GFS2 > >diff -purN a/fs/gfs2/lm.c b/fs/gfs2/lm.c >--- a/fs/gfs2/lm.c 2008-04-30 16:26:34.000000000 -0500 >+++ b/fs/gfs2/lm.c 1969-12-31 18:00:00.000000000 -0600 >@@ -1,210 +0,0 @@ >-/* >- * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. >- * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. >- * >- * This copyrighted material is made available to anyone wishing to use, >- * modify, copy, or redistribute it subject to the terms and conditions >- * of the GNU General Public License version 2. >- */ >- >-#include <linux/slab.h> >-#include <linux/spinlock.h> >-#include <linux/completion.h> >-#include <linux/buffer_head.h> >-#include <linux/delay.h> >-#include <linux/gfs2_ondisk.h> >-#include <linux/lm_interface.h> >- >-#include "gfs2.h" >-#include "incore.h" >-#include "glock.h" >-#include "lm.h" >-#include "super.h" >-#include "util.h" >- >-/** >- * gfs2_lm_mount - mount a locking protocol >- * @sdp: the filesystem >- * @args: mount arguements >- * @silent: if 1, don't complain if the FS isn't a GFS2 fs >- * >- * Returns: errno >- */ >- >-int gfs2_lm_mount(struct gfs2_sbd *sdp, int silent) >-{ >- char *proto = sdp->sd_proto_name; >- char *table = sdp->sd_table_name; >- int flags = 0; >- int error; >- >- if (sdp->sd_args.ar_spectator) >- flags |= LM_MFLAG_SPECTATOR; >- >- fs_info(sdp, "Trying to join cluster \"%s\", \"%s\"\n", proto, table); >- >- error = gfs2_mount_lockproto(proto, table, sdp->sd_args.ar_hostdata, >- gfs2_glock_cb, sdp, >- GFS2_MIN_LVB_SIZE, flags, >- &sdp->sd_lockstruct, &sdp->sd_kobj); >- if (error) { >- fs_info(sdp, "can't mount proto=%s, table=%s, hostdata=%s\n", >- proto, table, sdp->sd_args.ar_hostdata); >- goto out; >- } >- >- if (gfs2_assert_warn(sdp, sdp->sd_lockstruct.ls_lockspace) || >- gfs2_assert_warn(sdp, sdp->sd_lockstruct.ls_ops) || >- gfs2_assert_warn(sdp, sdp->sd_lockstruct.ls_lvb_size >= >- GFS2_MIN_LVB_SIZE)) { >- gfs2_unmount_lockproto(&sdp->sd_lockstruct); >- goto out; >- } >- >- if (sdp->sd_args.ar_spectator) >- snprintf(sdp->sd_fsname, GFS2_FSNAME_LEN, "%s.s", table); >- else >- snprintf(sdp->sd_fsname, GFS2_FSNAME_LEN, "%s.%u", table, >- sdp->sd_lockstruct.ls_jid); >- >- fs_info(sdp, "Joined cluster. Now mounting FS...\n"); >- >- if ((sdp->sd_lockstruct.ls_flags & LM_LSFLAG_LOCAL) && >- !sdp->sd_args.ar_ignore_local_fs) { >- sdp->sd_args.ar_localflocks = 1; >- sdp->sd_args.ar_localcaching = 1; >- } >- >-out: >- return error; >-} >- >-void gfs2_lm_others_may_mount(struct gfs2_sbd *sdp) >-{ >- if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) >- sdp->sd_lockstruct.ls_ops->lm_others_may_mount( >- sdp->sd_lockstruct.ls_lockspace); >-} >- >-void gfs2_lm_unmount(struct gfs2_sbd *sdp) >-{ >- if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) >- gfs2_unmount_lockproto(&sdp->sd_lockstruct); >-} >- >-int gfs2_lm_withdraw(struct gfs2_sbd *sdp, char *fmt, ...) >-{ >- va_list args; >- >- if (test_and_set_bit(SDF_SHUTDOWN, &sdp->sd_flags)) >- return 0; >- >- va_start(args, fmt); >- vprintk(fmt, args); >- va_end(args); >- >- fs_err(sdp, "about to withdraw this file system\n"); >- BUG_ON(sdp->sd_args.ar_debug); >- >- fs_err(sdp, "telling LM to withdraw\n"); >- gfs2_withdraw_lockproto(&sdp->sd_lockstruct); >- fs_err(sdp, "withdrawn\n"); >- dump_stack(); >- >- return -1; >-} >- >-int gfs2_lm_get_lock(struct gfs2_sbd *sdp, struct lm_lockname *name, >- void **lockp) >-{ >- int error = -EIO; >- if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) >- error = sdp->sd_lockstruct.ls_ops->lm_get_lock( >- sdp->sd_lockstruct.ls_lockspace, name, lockp); >- return error; >-} >- >-void gfs2_lm_put_lock(struct gfs2_sbd *sdp, void *lock) >-{ >- if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) >- sdp->sd_lockstruct.ls_ops->lm_put_lock(lock); >-} >- >-unsigned int gfs2_lm_lock(struct gfs2_sbd *sdp, void *lock, >- unsigned int cur_state, unsigned int req_state, >- unsigned int flags) >-{ >- int ret = 0; >- if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) >- ret = sdp->sd_lockstruct.ls_ops->lm_lock(lock, cur_state, >- req_state, flags); >- return ret; >-} >- >-unsigned int gfs2_lm_unlock(struct gfs2_sbd *sdp, void *lock, >- unsigned int cur_state) >-{ >- int ret = 0; >- if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) >- ret = sdp->sd_lockstruct.ls_ops->lm_unlock(lock, cur_state); >- return ret; >-} >- >-void gfs2_lm_cancel(struct gfs2_sbd *sdp, void *lock) >-{ >- if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) >- sdp->sd_lockstruct.ls_ops->lm_cancel(lock); >-} >- >-int gfs2_lm_hold_lvb(struct gfs2_sbd *sdp, void *lock, char **lvbp) >-{ >- int error = -EIO; >- if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) >- error = sdp->sd_lockstruct.ls_ops->lm_hold_lvb(lock, lvbp); >- return error; >-} >- >-void gfs2_lm_unhold_lvb(struct gfs2_sbd *sdp, void *lock, char *lvb) >-{ >- if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) >- sdp->sd_lockstruct.ls_ops->lm_unhold_lvb(lock, lvb); >-} >- >-int gfs2_lm_plock_get(struct gfs2_sbd *sdp, struct lm_lockname *name, >- struct file *file, struct file_lock *fl) >-{ >- int error = -EIO; >- if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) >- error = sdp->sd_lockstruct.ls_ops->lm_plock_get( >- sdp->sd_lockstruct.ls_lockspace, name, file, fl); >- return error; >-} >- >-int gfs2_lm_plock(struct gfs2_sbd *sdp, struct lm_lockname *name, >- struct file *file, int cmd, struct file_lock *fl) >-{ >- int error = -EIO; >- if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) >- error = sdp->sd_lockstruct.ls_ops->lm_plock( >- sdp->sd_lockstruct.ls_lockspace, name, file, cmd, fl); >- return error; >-} >- >-int gfs2_lm_punlock(struct gfs2_sbd *sdp, struct lm_lockname *name, >- struct file *file, struct file_lock *fl) >-{ >- int error = -EIO; >- if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) >- error = sdp->sd_lockstruct.ls_ops->lm_punlock( >- sdp->sd_lockstruct.ls_lockspace, name, file, fl); >- return error; >-} >- >-void gfs2_lm_recovery_done(struct gfs2_sbd *sdp, unsigned int jid, >- unsigned int message) >-{ >- if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) >- sdp->sd_lockstruct.ls_ops->lm_recovery_done( >- sdp->sd_lockstruct.ls_lockspace, jid, message); >-} >- >diff -purN a/fs/gfs2/lm.h b/fs/gfs2/lm.h >--- a/fs/gfs2/lm.h 2008-06-13 14:04:43.000000000 -0500 >+++ b/fs/gfs2/lm.h 1969-12-31 18:00:00.000000000 -0600 >@@ -1,38 +0,0 @@ >-/* >- * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. >- * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. >- * >- * This copyrighted material is made available to anyone wishing to use, >- * modify, copy, or redistribute it subject to the terms and conditions >- * of the GNU General Public License version 2. >- */ >- >-#ifndef __LM_DOT_H__ >-#define __LM_DOT_H__ >- >-struct gfs2_sbd; >- >-#define GFS2_MIN_LVB_SIZE 32 >- >-int gfs2_lm_mount(struct gfs2_sbd *sdp, int silent); >-void gfs2_lm_others_may_mount(struct gfs2_sbd *sdp); >-void gfs2_lm_unmount(struct gfs2_sbd *sdp); >-int gfs2_lm_withdraw(struct gfs2_sbd *sdp, char *fmt, ...) >- __attribute__ ((format(printf, 2, 3))); >-int gfs2_lm_get_lock(struct gfs2_sbd *sdp, struct lm_lockname *name, >- void **lockp); >-void gfs2_lm_put_lock(struct gfs2_sbd *sdp, void *lock); >-unsigned int gfs2_lm_unlock(struct gfs2_sbd *sdp, void *lock, >- unsigned int cur_state); >-void gfs2_lm_cancel(struct gfs2_sbd *sdp, void *lock); >-void gfs2_lm_unhold_lvb(struct gfs2_sbd *sdp, void *lock, char *lvb); >-int gfs2_lm_plock_get(struct gfs2_sbd *sdp, struct lm_lockname *name, >- struct file *file, struct file_lock *fl); >-int gfs2_lm_plock(struct gfs2_sbd *sdp, struct lm_lockname *name, >- struct file *file, int cmd, struct file_lock *fl); >-int gfs2_lm_punlock(struct gfs2_sbd *sdp, struct lm_lockname *name, >- struct file *file, struct file_lock *fl); >-void gfs2_lm_recovery_done(struct gfs2_sbd *sdp, unsigned int jid, >- unsigned int message); >- >-#endif /* __LM_DOT_H__ */ >diff -purN a/fs/gfs2/locking/dlm/lock.c b/fs/gfs2/locking/dlm/lock.c >--- a/fs/gfs2/locking/dlm/lock.c 2008-06-13 14:04:43.000000000 -0500 >+++ b/fs/gfs2/locking/dlm/lock.c 1969-12-31 18:00:00.000000000 -0600 >@@ -1,530 +0,0 @@ >-/* >- * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. >- * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved. >- * >- * This copyrighted material is made available to anyone wishing to use, >- * modify, copy, or redistribute it subject to the terms and conditions >- * of the GNU General Public License version 2. >- */ >- >-#include "lock_dlm.h" >- >-static char junk_lvb[GDLM_LVB_SIZE]; >- >-static void queue_complete(struct gdlm_lock *lp) >-{ >- struct gdlm_ls *ls = lp->ls; >- >- clear_bit(LFL_ACTIVE, &lp->flags); >- >- spin_lock(&ls->async_lock); >- list_add_tail(&lp->clist, &ls->complete); >- spin_unlock(&ls->async_lock); >- wake_up(&ls->thread_wait); >-} >- >-static inline void gdlm_ast(void *astarg) >-{ >- queue_complete(astarg); >-} >- >-static inline void gdlm_bast(void *astarg, int mode) >-{ >- struct gdlm_lock *lp = astarg; >- struct gdlm_ls *ls = lp->ls; >- >- if (!mode) { >- printk(KERN_INFO "lock_dlm: bast mode zero %x,%llx\n", >- lp->lockname.ln_type, >- (unsigned long long)lp->lockname.ln_number); >- return; >- } >- >- spin_lock(&ls->async_lock); >- if (!lp->bast_mode) { >- list_add_tail(&lp->blist, &ls->blocking); >- lp->bast_mode = mode; >- } else if (lp->bast_mode < mode) >- lp->bast_mode = mode; >- spin_unlock(&ls->async_lock); >- wake_up(&ls->thread_wait); >-} >- >-void gdlm_queue_delayed(struct gdlm_lock *lp) >-{ >- struct gdlm_ls *ls = lp->ls; >- >- spin_lock(&ls->async_lock); >- list_add_tail(&lp->delay_list, &ls->delayed); >- spin_unlock(&ls->async_lock); >-} >- >-/* convert gfs lock-state to dlm lock-mode */ >- >-static s16 make_mode(s16 lmstate) >-{ >- switch (lmstate) { >- case LM_ST_UNLOCKED: >- return DLM_LOCK_NL; >- case LM_ST_EXCLUSIVE: >- return DLM_LOCK_EX; >- case LM_ST_DEFERRED: >- return DLM_LOCK_CW; >- case LM_ST_SHARED: >- return DLM_LOCK_PR; >- } >- gdlm_assert(0, "unknown LM state %d", lmstate); >- return -1; >-} >- >-/* convert dlm lock-mode to gfs lock-state */ >- >-s16 gdlm_make_lmstate(s16 dlmmode) >-{ >- switch (dlmmode) { >- case DLM_LOCK_IV: >- case DLM_LOCK_NL: >- return LM_ST_UNLOCKED; >- case DLM_LOCK_EX: >- return LM_ST_EXCLUSIVE; >- case DLM_LOCK_CW: >- return LM_ST_DEFERRED; >- case DLM_LOCK_PR: >- return LM_ST_SHARED; >- } >- gdlm_assert(0, "unknown DLM mode %d", dlmmode); >- return -1; >-} >- >-/* verify agreement with GFS on the current lock state, NB: DLM_LOCK_NL and >- DLM_LOCK_IV are both considered LM_ST_UNLOCKED by GFS. */ >- >-static void check_cur_state(struct gdlm_lock *lp, unsigned int cur_state) >-{ >- s16 cur = make_mode(cur_state); >- if (lp->cur != DLM_LOCK_IV) >- gdlm_assert(lp->cur == cur, "%d, %d", lp->cur, cur); >-} >- >-static inline unsigned int make_flags(struct gdlm_lock *lp, >- unsigned int gfs_flags, >- s16 cur, s16 req) >-{ >- unsigned int lkf = 0; >- >- if (gfs_flags & LM_FLAG_TRY) >- lkf |= DLM_LKF_NOQUEUE; >- >- if (gfs_flags & LM_FLAG_TRY_1CB) { >- lkf |= DLM_LKF_NOQUEUE; >- lkf |= DLM_LKF_NOQUEUEBAST; >- } >- >- if (gfs_flags & LM_FLAG_PRIORITY) { >- lkf |= DLM_LKF_NOORDER; >- lkf |= DLM_LKF_HEADQUE; >- } >- >- if (gfs_flags & LM_FLAG_ANY) { >- if (req == DLM_LOCK_PR) >- lkf |= DLM_LKF_ALTCW; >- else if (req == DLM_LOCK_CW) >- lkf |= DLM_LKF_ALTPR; >- } >- >- if (lp->lksb.sb_lkid != 0) { >- lkf |= DLM_LKF_CONVERT; >- >- /* Conversion deadlock avoidance by DLM */ >- >- if (!test_bit(LFL_FORCE_PROMOTE, &lp->flags) && >- !(lkf & DLM_LKF_NOQUEUE) && >- cur > DLM_LOCK_NL && req > DLM_LOCK_NL && cur != req) >- lkf |= DLM_LKF_CONVDEADLK; >- } >- >- if (lp->lvb) >- lkf |= DLM_LKF_VALBLK; >- >- return lkf; >-} >- >-/* make_strname - convert GFS lock numbers to a string */ >- >-static inline void make_strname(const struct lm_lockname *lockname, >- struct gdlm_strname *str) >-{ >- sprintf(str->name, "%8x%16llx", lockname->ln_type, >- (unsigned long long)lockname->ln_number); >- str->namelen = GDLM_STRNAME_BYTES; >-} >- >-static int gdlm_create_lp(struct gdlm_ls *ls, struct lm_lockname *name, >- struct gdlm_lock **lpp) >-{ >- struct gdlm_lock *lp; >- >- lp = kzalloc(sizeof(struct gdlm_lock), GFP_KERNEL); >- if (!lp) >- return -ENOMEM; >- >- lp->lockname = *name; >- make_strname(name, &lp->strname); >- lp->ls = ls; >- lp->cur = DLM_LOCK_IV; >- lp->lvb = NULL; >- lp->hold_null = NULL; >- INIT_LIST_HEAD(&lp->clist); >- INIT_LIST_HEAD(&lp->blist); >- INIT_LIST_HEAD(&lp->delay_list); >- >- spin_lock(&ls->async_lock); >- list_add(&lp->all_list, &ls->all_locks); >- ls->all_locks_count++; >- spin_unlock(&ls->async_lock); >- >- *lpp = lp; >- return 0; >-} >- >-void gdlm_delete_lp(struct gdlm_lock *lp) >-{ >- struct gdlm_ls *ls = lp->ls; >- >- spin_lock(&ls->async_lock); >- if (!list_empty(&lp->clist)) >- list_del_init(&lp->clist); >- if (!list_empty(&lp->blist)) >- list_del_init(&lp->blist); >- if (!list_empty(&lp->delay_list)) >- list_del_init(&lp->delay_list); >- gdlm_assert(!list_empty(&lp->all_list), "%x,%llx", lp->lockname.ln_type, >- (unsigned long long)lp->lockname.ln_number); >- list_del_init(&lp->all_list); >- ls->all_locks_count--; >- spin_unlock(&ls->async_lock); >- >- kfree(lp); >-} >- >-int gdlm_get_lock(void *lockspace, struct lm_lockname *name, >- void **lockp) >-{ >- struct gdlm_lock *lp; >- int error; >- >- error = gdlm_create_lp(lockspace, name, &lp); >- >- *lockp = lp; >- return error; >-} >- >-void gdlm_put_lock(void *lock) >-{ >- gdlm_delete_lp(lock); >-} >- >-unsigned int gdlm_do_lock(struct gdlm_lock *lp) >-{ >- struct gdlm_ls *ls = lp->ls; >- int error, bast = 1; >- >- /* >- * When recovery is in progress, delay lock requests for submission >- * once recovery is done. Requests for recovery (NOEXP) and unlocks >- * can pass. >- */ >- >- if (test_bit(DFL_BLOCK_LOCKS, &ls->flags) && >- !test_bit(LFL_NOBLOCK, &lp->flags) && lp->req != DLM_LOCK_NL) { >- gdlm_queue_delayed(lp); >- return LM_OUT_ASYNC; >- } >- >- /* >- * Submit the actual lock request. >- */ >- >- if (test_bit(LFL_NOBAST, &lp->flags)) >- bast = 0; >- >- set_bit(LFL_ACTIVE, &lp->flags); >- >- log_debug("lk %x,%llx id %x %d,%d %x", lp->lockname.ln_type, >- (unsigned long long)lp->lockname.ln_number, lp->lksb.sb_lkid, >- lp->cur, lp->req, lp->lkf); >- >- error = dlm_lock(ls->dlm_lockspace, lp->req, &lp->lksb, lp->lkf, >- lp->strname.name, lp->strname.namelen, 0, gdlm_ast, >- lp, bast ? gdlm_bast : NULL); >- >- if ((error == -EAGAIN) && (lp->lkf & DLM_LKF_NOQUEUE)) { >- lp->lksb.sb_status = -EAGAIN; >- queue_complete(lp); >- error = 0; >- } >- >- if (error) { >- log_error("%s: gdlm_lock %x,%llx err=%d cur=%d req=%d lkf=%x " >- "flags=%lx", ls->fsname, lp->lockname.ln_type, >- (unsigned long long)lp->lockname.ln_number, error, >- lp->cur, lp->req, lp->lkf, lp->flags); >- return LM_OUT_ERROR; >- } >- return LM_OUT_ASYNC; >-} >- >-static unsigned int gdlm_do_unlock(struct gdlm_lock *lp) >-{ >- struct gdlm_ls *ls = lp->ls; >- unsigned int lkf = 0; >- int error; >- >- set_bit(LFL_DLM_UNLOCK, &lp->flags); >- set_bit(LFL_ACTIVE, &lp->flags); >- >- if (lp->lvb) >- lkf = DLM_LKF_VALBLK; >- >- log_debug("un %x,%llx %x %d %x", lp->lockname.ln_type, >- (unsigned long long)lp->lockname.ln_number, >- lp->lksb.sb_lkid, lp->cur, lkf); >- >- error = dlm_unlock(ls->dlm_lockspace, lp->lksb.sb_lkid, lkf, NULL, lp); >- >- if (error) { >- log_error("%s: gdlm_unlock %x,%llx err=%d cur=%d req=%d lkf=%x " >- "flags=%lx", ls->fsname, lp->lockname.ln_type, >- (unsigned long long)lp->lockname.ln_number, error, >- lp->cur, lp->req, lp->lkf, lp->flags); >- return LM_OUT_ERROR; >- } >- return LM_OUT_ASYNC; >-} >- >-unsigned int gdlm_lock(void *lock, unsigned int cur_state, >- unsigned int req_state, unsigned int flags) >-{ >- struct gdlm_lock *lp = lock; >- >- if (req_state == LM_ST_UNLOCKED) >- return gdlm_unlock(lock, cur_state); >- >- clear_bit(LFL_DLM_CANCEL, &lp->flags); >- if (flags & LM_FLAG_NOEXP) >- set_bit(LFL_NOBLOCK, &lp->flags); >- >- check_cur_state(lp, cur_state); >- lp->req = make_mode(req_state); >- lp->lkf = make_flags(lp, flags, lp->cur, lp->req); >- >- return gdlm_do_lock(lp); >-} >- >-unsigned int gdlm_unlock(void *lock, unsigned int cur_state) >-{ >- struct gdlm_lock *lp = lock; >- >- clear_bit(LFL_DLM_CANCEL, &lp->flags); >- if (lp->cur == DLM_LOCK_IV) >- return 0; >- return gdlm_do_unlock(lp); >-} >- >-void gdlm_cancel(void *lock) >-{ >- struct gdlm_lock *lp = lock; >- struct gdlm_ls *ls = lp->ls; >- int error, delay_list = 0; >- >- if (test_bit(LFL_DLM_CANCEL, &lp->flags)) >- return; >- >- log_info("gdlm_cancel %x,%llx flags %lx", lp->lockname.ln_type, >- (unsigned long long)lp->lockname.ln_number, lp->flags); >- >- spin_lock(&ls->async_lock); >- if (!list_empty(&lp->delay_list)) { >- list_del_init(&lp->delay_list); >- delay_list = 1; >- } >- spin_unlock(&ls->async_lock); >- >- if (delay_list) { >- set_bit(LFL_CANCEL, &lp->flags); >- set_bit(LFL_ACTIVE, &lp->flags); >- queue_complete(lp); >- return; >- } >- >- if (!test_bit(LFL_ACTIVE, &lp->flags) || >- test_bit(LFL_DLM_UNLOCK, &lp->flags)) { >- log_info("gdlm_cancel skip %x,%llx flags %lx", >- lp->lockname.ln_type, >- (unsigned long long)lp->lockname.ln_number, lp->flags); >- return; >- } >- >- /* the lock is blocked in the dlm */ >- >- set_bit(LFL_DLM_CANCEL, &lp->flags); >- set_bit(LFL_ACTIVE, &lp->flags); >- >- error = dlm_unlock(ls->dlm_lockspace, lp->lksb.sb_lkid, DLM_LKF_CANCEL, >- NULL, lp); >- >- log_info("gdlm_cancel rv %d %x,%llx flags %lx", error, >- lp->lockname.ln_type, >- (unsigned long long)lp->lockname.ln_number, lp->flags); >- >- if (error == -EBUSY) >- clear_bit(LFL_DLM_CANCEL, &lp->flags); >-} >- >-static int gdlm_add_lvb(struct gdlm_lock *lp) >-{ >- char *lvb; >- >- lvb = kzalloc(GDLM_LVB_SIZE, GFP_KERNEL); >- if (!lvb) >- return -ENOMEM; >- >- lp->lksb.sb_lvbptr = lvb; >- lp->lvb = lvb; >- return 0; >-} >- >-static void gdlm_del_lvb(struct gdlm_lock *lp) >-{ >- kfree(lp->lvb); >- lp->lvb = NULL; >- lp->lksb.sb_lvbptr = NULL; >-} >- >-static int gdlm_ast_wait(void *word) >-{ >- schedule(); >- return 0; >-} >- >-/* This can do a synchronous dlm request (requiring a lock_dlm thread to get >- the completion) because gfs won't call hold_lvb() during a callback (from >- the context of a lock_dlm thread). */ >- >-static int hold_null_lock(struct gdlm_lock *lp) >-{ >- struct gdlm_lock *lpn = NULL; >- int error; >- >- if (lp->hold_null) { >- printk(KERN_INFO "lock_dlm: lvb already held\n"); >- return 0; >- } >- >- error = gdlm_create_lp(lp->ls, &lp->lockname, &lpn); >- if (error) >- goto out; >- >- lpn->lksb.sb_lvbptr = junk_lvb; >- lpn->lvb = junk_lvb; >- >- lpn->req = DLM_LOCK_NL; >- lpn->lkf = DLM_LKF_VALBLK | DLM_LKF_EXPEDITE; >- set_bit(LFL_NOBAST, &lpn->flags); >- set_bit(LFL_INLOCK, &lpn->flags); >- set_bit(LFL_AST_WAIT, &lpn->flags); >- >- gdlm_do_lock(lpn); >- wait_on_bit(&lpn->flags, LFL_AST_WAIT, gdlm_ast_wait, TASK_UNINTERRUPTIBLE); >- error = lpn->lksb.sb_status; >- if (error) { >- printk(KERN_INFO "lock_dlm: hold_null_lock dlm error %d\n", >- error); >- gdlm_delete_lp(lpn); >- lpn = NULL; >- } >-out: >- lp->hold_null = lpn; >- return error; >-} >- >-/* This cannot do a synchronous dlm request (requiring a lock_dlm thread to get >- the completion) because gfs may call unhold_lvb() during a callback (from >- the context of a lock_dlm thread) which could cause a deadlock since the >- other lock_dlm thread could be engaged in recovery. */ >- >-static void unhold_null_lock(struct gdlm_lock *lp) >-{ >- struct gdlm_lock *lpn = lp->hold_null; >- >- gdlm_assert(lpn, "%x,%llx", lp->lockname.ln_type, >- (unsigned long long)lp->lockname.ln_number); >- lpn->lksb.sb_lvbptr = NULL; >- lpn->lvb = NULL; >- set_bit(LFL_UNLOCK_DELETE, &lpn->flags); >- gdlm_do_unlock(lpn); >- lp->hold_null = NULL; >-} >- >-/* Acquire a NL lock because gfs requires the value block to remain >- intact on the resource while the lvb is "held" even if it's holding no locks >- on the resource. */ >- >-int gdlm_hold_lvb(void *lock, char **lvbp) >-{ >- struct gdlm_lock *lp = lock; >- int error; >- >- error = gdlm_add_lvb(lp); >- if (error) >- return error; >- >- *lvbp = lp->lvb; >- >- error = hold_null_lock(lp); >- if (error) >- gdlm_del_lvb(lp); >- >- return error; >-} >- >-void gdlm_unhold_lvb(void *lock, char *lvb) >-{ >- struct gdlm_lock *lp = lock; >- >- unhold_null_lock(lp); >- gdlm_del_lvb(lp); >-} >- >-void gdlm_submit_delayed(struct gdlm_ls *ls) >-{ >- struct gdlm_lock *lp, *safe; >- >- spin_lock(&ls->async_lock); >- list_for_each_entry_safe(lp, safe, &ls->delayed, delay_list) { >- list_del_init(&lp->delay_list); >- list_add_tail(&lp->delay_list, &ls->submit); >- } >- spin_unlock(&ls->async_lock); >- wake_up(&ls->thread_wait); >-} >- >-int gdlm_release_all_locks(struct gdlm_ls *ls) >-{ >- struct gdlm_lock *lp, *safe; >- int count = 0; >- >- spin_lock(&ls->async_lock); >- list_for_each_entry_safe(lp, safe, &ls->all_locks, all_list) { >- list_del_init(&lp->all_list); >- >- if (lp->lvb && lp->lvb != junk_lvb) >- kfree(lp->lvb); >- kfree(lp); >- count++; >- } >- spin_unlock(&ls->async_lock); >- >- return count; >-} >- >diff -purN a/fs/gfs2/locking/dlm/lock.c.bz432057-comment115 b/fs/gfs2/locking/dlm/lock.c.bz432057-comment115 >--- a/fs/gfs2/locking/dlm/lock.c.bz432057-comment115 2008-04-30 16:26:35.000000000 -0500 >+++ b/fs/gfs2/locking/dlm/lock.c.bz432057-comment115 1969-12-31 18:00:00.000000000 -0600 >@@ -1,527 +0,0 @@ >-/* >- * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. >- * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved. >- * >- * This copyrighted material is made available to anyone wishing to use, >- * modify, copy, or redistribute it subject to the terms and conditions >- * of the GNU General Public License version 2. >- */ >- >-#include "lock_dlm.h" >- >-static char junk_lvb[GDLM_LVB_SIZE]; >- >-static void queue_complete(struct gdlm_lock *lp) >-{ >- struct gdlm_ls *ls = lp->ls; >- >- clear_bit(LFL_ACTIVE, &lp->flags); >- >- spin_lock(&ls->async_lock); >- list_add_tail(&lp->clist, &ls->complete); >- spin_unlock(&ls->async_lock); >- wake_up(&ls->thread_wait); >-} >- >-static inline void gdlm_ast(void *astarg) >-{ >- queue_complete(astarg); >-} >- >-static inline void gdlm_bast(void *astarg, int mode) >-{ >- struct gdlm_lock *lp = astarg; >- struct gdlm_ls *ls = lp->ls; >- >- if (!mode) { >- printk(KERN_INFO "lock_dlm: bast mode zero %x,%llx\n", >- lp->lockname.ln_type, >- (unsigned long long)lp->lockname.ln_number); >- return; >- } >- >- spin_lock(&ls->async_lock); >- if (!lp->bast_mode) { >- list_add_tail(&lp->blist, &ls->blocking); >- lp->bast_mode = mode; >- } else if (lp->bast_mode < mode) >- lp->bast_mode = mode; >- spin_unlock(&ls->async_lock); >- wake_up(&ls->thread_wait); >-} >- >-void gdlm_queue_delayed(struct gdlm_lock *lp) >-{ >- struct gdlm_ls *ls = lp->ls; >- >- spin_lock(&ls->async_lock); >- list_add_tail(&lp->delay_list, &ls->delayed); >- spin_unlock(&ls->async_lock); >-} >- >-/* convert gfs lock-state to dlm lock-mode */ >- >-static s16 make_mode(s16 lmstate) >-{ >- switch (lmstate) { >- case LM_ST_UNLOCKED: >- return DLM_LOCK_NL; >- case LM_ST_EXCLUSIVE: >- return DLM_LOCK_EX; >- case LM_ST_DEFERRED: >- return DLM_LOCK_CW; >- case LM_ST_SHARED: >- return DLM_LOCK_PR; >- } >- gdlm_assert(0, "unknown LM state %d", lmstate); >- return -1; >-} >- >-/* convert dlm lock-mode to gfs lock-state */ >- >-s16 gdlm_make_lmstate(s16 dlmmode) >-{ >- switch (dlmmode) { >- case DLM_LOCK_IV: >- case DLM_LOCK_NL: >- return LM_ST_UNLOCKED; >- case DLM_LOCK_EX: >- return LM_ST_EXCLUSIVE; >- case DLM_LOCK_CW: >- return LM_ST_DEFERRED; >- case DLM_LOCK_PR: >- return LM_ST_SHARED; >- } >- gdlm_assert(0, "unknown DLM mode %d", dlmmode); >- return -1; >-} >- >-/* verify agreement with GFS on the current lock state, NB: DLM_LOCK_NL and >- DLM_LOCK_IV are both considered LM_ST_UNLOCKED by GFS. */ >- >-static void check_cur_state(struct gdlm_lock *lp, unsigned int cur_state) >-{ >- s16 cur = make_mode(cur_state); >- if (lp->cur != DLM_LOCK_IV) >- gdlm_assert(lp->cur == cur, "%d, %d", lp->cur, cur); >-} >- >-static inline unsigned int make_flags(struct gdlm_lock *lp, >- unsigned int gfs_flags, >- s16 cur, s16 req) >-{ >- unsigned int lkf = 0; >- >- if (gfs_flags & LM_FLAG_TRY) >- lkf |= DLM_LKF_NOQUEUE; >- >- if (gfs_flags & LM_FLAG_TRY_1CB) { >- lkf |= DLM_LKF_NOQUEUE; >- lkf |= DLM_LKF_NOQUEUEBAST; >- } >- >- if (gfs_flags & LM_FLAG_PRIORITY) { >- lkf |= DLM_LKF_NOORDER; >- lkf |= DLM_LKF_HEADQUE; >- } >- >- if (gfs_flags & LM_FLAG_ANY) { >- if (req == DLM_LOCK_PR) >- lkf |= DLM_LKF_ALTCW; >- else if (req == DLM_LOCK_CW) >- lkf |= DLM_LKF_ALTPR; >- } >- >- if (lp->lksb.sb_lkid != 0) { >- lkf |= DLM_LKF_CONVERT; >- >- /* Conversion deadlock avoidance by DLM */ >- >- if (!test_bit(LFL_FORCE_PROMOTE, &lp->flags) && >- !(lkf & DLM_LKF_NOQUEUE) && >- cur > DLM_LOCK_NL && req > DLM_LOCK_NL && cur != req) >- lkf |= DLM_LKF_CONVDEADLK; >- } >- >- if (lp->lvb) >- lkf |= DLM_LKF_VALBLK; >- >- return lkf; >-} >- >-/* make_strname - convert GFS lock numbers to a string */ >- >-static inline void make_strname(const struct lm_lockname *lockname, >- struct gdlm_strname *str) >-{ >- sprintf(str->name, "%8x%16llx", lockname->ln_type, >- (unsigned long long)lockname->ln_number); >- str->namelen = GDLM_STRNAME_BYTES; >-} >- >-static int gdlm_create_lp(struct gdlm_ls *ls, struct lm_lockname *name, >- struct gdlm_lock **lpp) >-{ >- struct gdlm_lock *lp; >- >- lp = kzalloc(sizeof(struct gdlm_lock), GFP_KERNEL); >- if (!lp) >- return -ENOMEM; >- >- lp->lockname = *name; >- make_strname(name, &lp->strname); >- lp->ls = ls; >- lp->cur = DLM_LOCK_IV; >- lp->lvb = NULL; >- lp->hold_null = NULL; >- INIT_LIST_HEAD(&lp->clist); >- INIT_LIST_HEAD(&lp->blist); >- INIT_LIST_HEAD(&lp->delay_list); >- >- spin_lock(&ls->async_lock); >- list_add(&lp->all_list, &ls->all_locks); >- ls->all_locks_count++; >- spin_unlock(&ls->async_lock); >- >- *lpp = lp; >- return 0; >-} >- >-void gdlm_delete_lp(struct gdlm_lock *lp) >-{ >- struct gdlm_ls *ls = lp->ls; >- >- spin_lock(&ls->async_lock); >- if (!list_empty(&lp->clist)) >- list_del_init(&lp->clist); >- if (!list_empty(&lp->blist)) >- list_del_init(&lp->blist); >- if (!list_empty(&lp->delay_list)) >- list_del_init(&lp->delay_list); >- gdlm_assert(!list_empty(&lp->all_list), "%x,%llx", lp->lockname.ln_type, >- (unsigned long long)lp->lockname.ln_number); >- list_del_init(&lp->all_list); >- ls->all_locks_count--; >- spin_unlock(&ls->async_lock); >- >- kfree(lp); >-} >- >-int gdlm_get_lock(void *lockspace, struct lm_lockname *name, >- void **lockp) >-{ >- struct gdlm_lock *lp; >- int error; >- >- error = gdlm_create_lp(lockspace, name, &lp); >- >- *lockp = lp; >- return error; >-} >- >-void gdlm_put_lock(void *lock) >-{ >- gdlm_delete_lp(lock); >-} >- >-unsigned int gdlm_do_lock(struct gdlm_lock *lp) >-{ >- struct gdlm_ls *ls = lp->ls; >- int error, bast = 1; >- >- /* >- * When recovery is in progress, delay lock requests for submission >- * once recovery is done. Requests for recovery (NOEXP) and unlocks >- * can pass. >- */ >- >- if (test_bit(DFL_BLOCK_LOCKS, &ls->flags) && >- !test_bit(LFL_NOBLOCK, &lp->flags) && lp->req != DLM_LOCK_NL) { >- gdlm_queue_delayed(lp); >- return LM_OUT_ASYNC; >- } >- >- /* >- * Submit the actual lock request. >- */ >- >- if (test_bit(LFL_NOBAST, &lp->flags)) >- bast = 0; >- >- set_bit(LFL_ACTIVE, &lp->flags); >- >- log_debug("lk %x,%llx id %x %d,%d %x", lp->lockname.ln_type, >- (unsigned long long)lp->lockname.ln_number, lp->lksb.sb_lkid, >- lp->cur, lp->req, lp->lkf); >- >- error = dlm_lock(ls->dlm_lockspace, lp->req, &lp->lksb, lp->lkf, >- lp->strname.name, lp->strname.namelen, 0, gdlm_ast, >- lp, bast ? gdlm_bast : NULL); >- >- if ((error == -EAGAIN) && (lp->lkf & DLM_LKF_NOQUEUE)) { >- lp->lksb.sb_status = -EAGAIN; >- queue_complete(lp); >- error = 0; >- } >- >- if (error) { >- log_error("%s: gdlm_lock %x,%llx err=%d cur=%d req=%d lkf=%x " >- "flags=%lx", ls->fsname, lp->lockname.ln_type, >- (unsigned long long)lp->lockname.ln_number, error, >- lp->cur, lp->req, lp->lkf, lp->flags); >- return LM_OUT_ERROR; >- } >- return LM_OUT_ASYNC; >-} >- >-static unsigned int gdlm_do_unlock(struct gdlm_lock *lp) >-{ >- struct gdlm_ls *ls = lp->ls; >- unsigned int lkf = 0; >- int error; >- >- set_bit(LFL_DLM_UNLOCK, &lp->flags); >- set_bit(LFL_ACTIVE, &lp->flags); >- >- if (lp->lvb) >- lkf = DLM_LKF_VALBLK; >- >- log_debug("un %x,%llx %x %d %x", lp->lockname.ln_type, >- (unsigned long long)lp->lockname.ln_number, >- lp->lksb.sb_lkid, lp->cur, lkf); >- >- error = dlm_unlock(ls->dlm_lockspace, lp->lksb.sb_lkid, lkf, NULL, lp); >- >- if (error) { >- log_error("%s: gdlm_unlock %x,%llx err=%d cur=%d req=%d lkf=%x " >- "flags=%lx", ls->fsname, lp->lockname.ln_type, >- (unsigned long long)lp->lockname.ln_number, error, >- lp->cur, lp->req, lp->lkf, lp->flags); >- return LM_OUT_ERROR; >- } >- return LM_OUT_ASYNC; >-} >- >-unsigned int gdlm_lock(void *lock, unsigned int cur_state, >- unsigned int req_state, unsigned int flags) >-{ >- struct gdlm_lock *lp = lock; >- >- clear_bit(LFL_DLM_CANCEL, &lp->flags); >- if (flags & LM_FLAG_NOEXP) >- set_bit(LFL_NOBLOCK, &lp->flags); >- >- check_cur_state(lp, cur_state); >- lp->req = make_mode(req_state); >- lp->lkf = make_flags(lp, flags, lp->cur, lp->req); >- >- return gdlm_do_lock(lp); >-} >- >-unsigned int gdlm_unlock(void *lock, unsigned int cur_state) >-{ >- struct gdlm_lock *lp = lock; >- >- clear_bit(LFL_DLM_CANCEL, &lp->flags); >- if (lp->cur == DLM_LOCK_IV) >- return 0; >- return gdlm_do_unlock(lp); >-} >- >-void gdlm_cancel(void *lock) >-{ >- struct gdlm_lock *lp = lock; >- struct gdlm_ls *ls = lp->ls; >- int error, delay_list = 0; >- >- if (test_bit(LFL_DLM_CANCEL, &lp->flags)) >- return; >- >- log_info("gdlm_cancel %x,%llx flags %lx", lp->lockname.ln_type, >- (unsigned long long)lp->lockname.ln_number, lp->flags); >- >- spin_lock(&ls->async_lock); >- if (!list_empty(&lp->delay_list)) { >- list_del_init(&lp->delay_list); >- delay_list = 1; >- } >- spin_unlock(&ls->async_lock); >- >- if (delay_list) { >- set_bit(LFL_CANCEL, &lp->flags); >- set_bit(LFL_ACTIVE, &lp->flags); >- queue_complete(lp); >- return; >- } >- >- if (!test_bit(LFL_ACTIVE, &lp->flags) || >- test_bit(LFL_DLM_UNLOCK, &lp->flags)) { >- log_info("gdlm_cancel skip %x,%llx flags %lx", >- lp->lockname.ln_type, >- (unsigned long long)lp->lockname.ln_number, lp->flags); >- return; >- } >- >- /* the lock is blocked in the dlm */ >- >- set_bit(LFL_DLM_CANCEL, &lp->flags); >- set_bit(LFL_ACTIVE, &lp->flags); >- >- error = dlm_unlock(ls->dlm_lockspace, lp->lksb.sb_lkid, DLM_LKF_CANCEL, >- NULL, lp); >- >- log_info("gdlm_cancel rv %d %x,%llx flags %lx", error, >- lp->lockname.ln_type, >- (unsigned long long)lp->lockname.ln_number, lp->flags); >- >- if (error == -EBUSY) >- clear_bit(LFL_DLM_CANCEL, &lp->flags); >-} >- >-static int gdlm_add_lvb(struct gdlm_lock *lp) >-{ >- char *lvb; >- >- lvb = kzalloc(GDLM_LVB_SIZE, GFP_KERNEL); >- if (!lvb) >- return -ENOMEM; >- >- lp->lksb.sb_lvbptr = lvb; >- lp->lvb = lvb; >- return 0; >-} >- >-static void gdlm_del_lvb(struct gdlm_lock *lp) >-{ >- kfree(lp->lvb); >- lp->lvb = NULL; >- lp->lksb.sb_lvbptr = NULL; >-} >- >-static int gdlm_ast_wait(void *word) >-{ >- schedule(); >- return 0; >-} >- >-/* This can do a synchronous dlm request (requiring a lock_dlm thread to get >- the completion) because gfs won't call hold_lvb() during a callback (from >- the context of a lock_dlm thread). */ >- >-static int hold_null_lock(struct gdlm_lock *lp) >-{ >- struct gdlm_lock *lpn = NULL; >- int error; >- >- if (lp->hold_null) { >- printk(KERN_INFO "lock_dlm: lvb already held\n"); >- return 0; >- } >- >- error = gdlm_create_lp(lp->ls, &lp->lockname, &lpn); >- if (error) >- goto out; >- >- lpn->lksb.sb_lvbptr = junk_lvb; >- lpn->lvb = junk_lvb; >- >- lpn->req = DLM_LOCK_NL; >- lpn->lkf = DLM_LKF_VALBLK | DLM_LKF_EXPEDITE; >- set_bit(LFL_NOBAST, &lpn->flags); >- set_bit(LFL_INLOCK, &lpn->flags); >- set_bit(LFL_AST_WAIT, &lpn->flags); >- >- gdlm_do_lock(lpn); >- wait_on_bit(&lpn->flags, LFL_AST_WAIT, gdlm_ast_wait, TASK_UNINTERRUPTIBLE); >- error = lpn->lksb.sb_status; >- if (error) { >- printk(KERN_INFO "lock_dlm: hold_null_lock dlm error %d\n", >- error); >- gdlm_delete_lp(lpn); >- lpn = NULL; >- } >-out: >- lp->hold_null = lpn; >- return error; >-} >- >-/* This cannot do a synchronous dlm request (requiring a lock_dlm thread to get >- the completion) because gfs may call unhold_lvb() during a callback (from >- the context of a lock_dlm thread) which could cause a deadlock since the >- other lock_dlm thread could be engaged in recovery. */ >- >-static void unhold_null_lock(struct gdlm_lock *lp) >-{ >- struct gdlm_lock *lpn = lp->hold_null; >- >- gdlm_assert(lpn, "%x,%llx", lp->lockname.ln_type, >- (unsigned long long)lp->lockname.ln_number); >- lpn->lksb.sb_lvbptr = NULL; >- lpn->lvb = NULL; >- set_bit(LFL_UNLOCK_DELETE, &lpn->flags); >- gdlm_do_unlock(lpn); >- lp->hold_null = NULL; >-} >- >-/* Acquire a NL lock because gfs requires the value block to remain >- intact on the resource while the lvb is "held" even if it's holding no locks >- on the resource. */ >- >-int gdlm_hold_lvb(void *lock, char **lvbp) >-{ >- struct gdlm_lock *lp = lock; >- int error; >- >- error = gdlm_add_lvb(lp); >- if (error) >- return error; >- >- *lvbp = lp->lvb; >- >- error = hold_null_lock(lp); >- if (error) >- gdlm_del_lvb(lp); >- >- return error; >-} >- >-void gdlm_unhold_lvb(void *lock, char *lvb) >-{ >- struct gdlm_lock *lp = lock; >- >- unhold_null_lock(lp); >- gdlm_del_lvb(lp); >-} >- >-void gdlm_submit_delayed(struct gdlm_ls *ls) >-{ >- struct gdlm_lock *lp, *safe; >- >- spin_lock(&ls->async_lock); >- list_for_each_entry_safe(lp, safe, &ls->delayed, delay_list) { >- list_del_init(&lp->delay_list); >- list_add_tail(&lp->delay_list, &ls->submit); >- } >- spin_unlock(&ls->async_lock); >- wake_up(&ls->thread_wait); >-} >- >-int gdlm_release_all_locks(struct gdlm_ls *ls) >-{ >- struct gdlm_lock *lp, *safe; >- int count = 0; >- >- spin_lock(&ls->async_lock); >- list_for_each_entry_safe(lp, safe, &ls->all_locks, all_list) { >- list_del_init(&lp->all_list); >- >- if (lp->lvb && lp->lvb != junk_lvb) >- kfree(lp->lvb); >- kfree(lp); >- count++; >- } >- spin_unlock(&ls->async_lock); >- >- return count; >-} >- >diff -purN a/fs/gfs2/locking/dlm/lock_dlm.h b/fs/gfs2/locking/dlm/lock_dlm.h >--- a/fs/gfs2/locking/dlm/lock_dlm.h 2008-04-30 16:26:35.000000000 -0500 >+++ b/fs/gfs2/locking/dlm/lock_dlm.h 1969-12-31 18:00:00.000000000 -0600 >@@ -1,188 +0,0 @@ >-/* >- * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. >- * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved. >- * >- * This copyrighted material is made available to anyone wishing to use, >- * modify, copy, or redistribute it subject to the terms and conditions >- * of the GNU General Public License version 2. >- */ >- >-#ifndef LOCK_DLM_DOT_H >-#define LOCK_DLM_DOT_H >- >-#include <linux/module.h> >-#include <linux/slab.h> >-#include <linux/spinlock.h> >-#include <linux/module.h> >-#include <linux/types.h> >-#include <linux/string.h> >-#include <linux/list.h> >-#include <linux/socket.h> >-#include <linux/delay.h> >-#include <linux/kthread.h> >-#include <linux/kobject.h> >-#include <linux/fcntl.h> >-#include <linux/wait.h> >-#include <net/sock.h> >- >-#include <linux/dlm.h> >-#include <linux/lm_interface.h> >- >-/* >- * Internally, we prefix things with gdlm_ and GDLM_ (for gfs-dlm) since a >- * prefix of lock_dlm_ gets awkward. Externally, GFS refers to this module >- * as "lock_dlm". >- */ >- >-#define GDLM_STRNAME_BYTES 24 >-#define GDLM_LVB_SIZE 32 >-#define GDLM_DROP_COUNT 0 >-#define GDLM_DROP_PERIOD 60 >-#define GDLM_NAME_LEN 128 >- >-/* GFS uses 12 bytes to identify a resource (32 bit type + 64 bit number). >- We sprintf these numbers into a 24 byte string of hex values to make them >- human-readable (to make debugging simpler.) */ >- >-struct gdlm_strname { >- unsigned char name[GDLM_STRNAME_BYTES]; >- unsigned short namelen; >-}; >- >-enum { >- DFL_BLOCK_LOCKS = 0, >- DFL_SPECTATOR = 1, >- DFL_WITHDRAW = 2, >-}; >- >-struct gdlm_ls { >- u32 id; >- int jid; >- int first; >- int first_done; >- unsigned long flags; >- struct kobject kobj; >- char clustername[GDLM_NAME_LEN]; >- char fsname[GDLM_NAME_LEN]; >- int fsflags; >- dlm_lockspace_t *dlm_lockspace; >- lm_callback_t fscb; >- struct gfs2_sbd *sdp; >- int recover_jid; >- int recover_jid_done; >- int recover_jid_status; >- spinlock_t async_lock; >- struct list_head complete; >- struct list_head blocking; >- struct list_head delayed; >- struct list_head submit; >- struct list_head all_locks; >- u32 all_locks_count; >- wait_queue_head_t wait_control; >- struct task_struct *thread1; >- struct task_struct *thread2; >- wait_queue_head_t thread_wait; >- unsigned long drop_time; >- int drop_locks_count; >- int drop_locks_period; >-}; >- >-enum { >- LFL_NOBLOCK = 0, >- LFL_NOCACHE = 1, >- LFL_DLM_UNLOCK = 2, >- LFL_DLM_CANCEL = 3, >- LFL_SYNC_LVB = 4, >- LFL_FORCE_PROMOTE = 5, >- LFL_REREQUEST = 6, >- LFL_ACTIVE = 7, >- LFL_INLOCK = 8, >- LFL_CANCEL = 9, >- LFL_NOBAST = 10, >- LFL_HEADQUE = 11, >- LFL_UNLOCK_DELETE = 12, >- LFL_AST_WAIT = 13, >-}; >- >-struct gdlm_lock { >- struct gdlm_ls *ls; >- struct lm_lockname lockname; >- struct gdlm_strname strname; >- char *lvb; >- struct dlm_lksb lksb; >- >- s16 cur; >- s16 req; >- s16 prev_req; >- u32 lkf; /* dlm flags DLM_LKF_ */ >- unsigned long flags; /* lock_dlm flags LFL_ */ >- >- int bast_mode; /* protected by async_lock */ >- >- struct list_head clist; /* complete */ >- struct list_head blist; /* blocking */ >- struct list_head delay_list; /* delayed */ >- struct list_head all_list; /* all locks for the fs */ >- struct gdlm_lock *hold_null; /* NL lock for hold_lvb */ >-}; >- >-#define gdlm_assert(assertion, fmt, args...) \ >-do { \ >- if (unlikely(!(assertion))) { \ >- printk(KERN_EMERG "lock_dlm: fatal assertion failed \"%s\"\n" \ >- "lock_dlm: " fmt "\n", \ >- #assertion, ##args); \ >- BUG(); \ >- } \ >-} while (0) >- >-#define log_print(lev, fmt, arg...) printk(lev "lock_dlm: " fmt "\n" , ## arg) >-#define log_info(fmt, arg...) log_print(KERN_INFO , fmt , ## arg) >-#define log_error(fmt, arg...) log_print(KERN_ERR , fmt , ## arg) >-#ifdef LOCK_DLM_LOG_DEBUG >-#define log_debug(fmt, arg...) log_print(KERN_DEBUG , fmt , ## arg) >-#else >-#define log_debug(fmt, arg...) >-#endif >- >-/* sysfs.c */ >- >-int gdlm_sysfs_init(void); >-void gdlm_sysfs_exit(void); >-int gdlm_kobject_setup(struct gdlm_ls *, struct kobject *); >-void gdlm_kobject_release(struct gdlm_ls *); >- >-/* thread.c */ >- >-int gdlm_init_threads(struct gdlm_ls *); >-void gdlm_release_threads(struct gdlm_ls *); >- >-/* lock.c */ >- >-s16 gdlm_make_lmstate(s16); >-void gdlm_queue_delayed(struct gdlm_lock *); >-void gdlm_submit_delayed(struct gdlm_ls *); >-int gdlm_release_all_locks(struct gdlm_ls *); >-void gdlm_delete_lp(struct gdlm_lock *); >-unsigned int gdlm_do_lock(struct gdlm_lock *); >- >-int gdlm_get_lock(void *, struct lm_lockname *, void **); >-void gdlm_put_lock(void *); >-unsigned int gdlm_lock(void *, unsigned int, unsigned int, unsigned int); >-unsigned int gdlm_unlock(void *, unsigned int); >-void gdlm_cancel(void *); >-int gdlm_hold_lvb(void *, char **); >-void gdlm_unhold_lvb(void *, char *); >- >-/* plock.c */ >- >-int gdlm_plock_init(void); >-void gdlm_plock_exit(void); >-int gdlm_plock(void *, struct lm_lockname *, struct file *, int, >- struct file_lock *); >-int gdlm_plock_get(void *, struct lm_lockname *, struct file *, >- struct file_lock *); >-int gdlm_punlock(void *, struct lm_lockname *, struct file *, >- struct file_lock *); >-#endif >- >diff -purN a/fs/gfs2/locking/dlm/main.c b/fs/gfs2/locking/dlm/main.c >--- a/fs/gfs2/locking/dlm/main.c 2008-04-30 16:26:31.000000000 -0500 >+++ b/fs/gfs2/locking/dlm/main.c 1969-12-31 18:00:00.000000000 -0600 >@@ -1,58 +0,0 @@ >-/* >- * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. >- * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved. >- * >- * This copyrighted material is made available to anyone wishing to use, >- * modify, copy, or redistribute it subject to the terms and conditions >- * of the GNU General Public License version 2. >- */ >- >-#include <linux/init.h> >- >-#include "lock_dlm.h" >- >-extern struct lm_lockops gdlm_ops; >- >-static int __init init_lock_dlm(void) >-{ >- int error; >- >- error = gfs2_register_lockproto(&gdlm_ops); >- if (error) { >- printk(KERN_WARNING "lock_dlm: can't register protocol: %d\n", >- error); >- return error; >- } >- >- error = gdlm_sysfs_init(); >- if (error) { >- gfs2_unregister_lockproto(&gdlm_ops); >- return error; >- } >- >- error = gdlm_plock_init(); >- if (error) { >- gdlm_sysfs_exit(); >- gfs2_unregister_lockproto(&gdlm_ops); >- return error; >- } >- >- printk(KERN_INFO >- "Lock_DLM (built %s %s) installed\n", __DATE__, __TIME__); >- return 0; >-} >- >-static void __exit exit_lock_dlm(void) >-{ >- gdlm_plock_exit(); >- gdlm_sysfs_exit(); >- gfs2_unregister_lockproto(&gdlm_ops); >-} >- >-module_init(init_lock_dlm); >-module_exit(exit_lock_dlm); >- >-MODULE_DESCRIPTION("GFS DLM Locking Module"); >-MODULE_AUTHOR("Red Hat, Inc."); >-MODULE_LICENSE("GPL"); >- >diff -purN a/fs/gfs2/locking/dlm/Makefile b/fs/gfs2/locking/dlm/Makefile >--- a/fs/gfs2/locking/dlm/Makefile 2008-04-30 16:26:29.000000000 -0500 >+++ b/fs/gfs2/locking/dlm/Makefile 1969-12-31 18:00:00.000000000 -0600 >@@ -1,3 +0,0 @@ >-obj-$(CONFIG_GFS2_FS_LOCKING_DLM) += lock_dlm.o >-lock_dlm-y := lock.o main.o mount.o sysfs.o thread.o plock.o >- >diff -purN a/fs/gfs2/locking/dlm/mount.c b/fs/gfs2/locking/dlm/mount.c >--- a/fs/gfs2/locking/dlm/mount.c 2008-04-30 16:26:35.000000000 -0500 >+++ b/fs/gfs2/locking/dlm/mount.c 1969-12-31 18:00:00.000000000 -0600 >@@ -1,253 +0,0 @@ >-/* >- * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. >- * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved. >- * >- * This copyrighted material is made available to anyone wishing to use, >- * modify, copy, or redistribute it subject to the terms and conditions >- * of the GNU General Public License version 2. >- */ >- >-#include "lock_dlm.h" >- >-const struct lm_lockops gdlm_ops; >- >- >-static struct gdlm_ls *init_gdlm(lm_callback_t cb, struct gfs2_sbd *sdp, >- int flags, char *table_name) >-{ >- struct gdlm_ls *ls; >- char buf[256], *p; >- >- ls = kzalloc(sizeof(struct gdlm_ls), GFP_KERNEL); >- if (!ls) >- return NULL; >- >- ls->drop_locks_count = GDLM_DROP_COUNT; >- ls->drop_locks_period = GDLM_DROP_PERIOD; >- ls->fscb = cb; >- ls->sdp = sdp; >- ls->fsflags = flags; >- spin_lock_init(&ls->async_lock); >- INIT_LIST_HEAD(&ls->complete); >- INIT_LIST_HEAD(&ls->blocking); >- INIT_LIST_HEAD(&ls->delayed); >- INIT_LIST_HEAD(&ls->submit); >- INIT_LIST_HEAD(&ls->all_locks); >- init_waitqueue_head(&ls->thread_wait); >- init_waitqueue_head(&ls->wait_control); >- ls->thread1 = NULL; >- ls->thread2 = NULL; >- ls->drop_time = jiffies; >- ls->jid = -1; >- >- strncpy(buf, table_name, 256); >- buf[255] = '\0'; >- >- p = strchr(buf, ':'); >- if (!p) { >- log_info("invalid table_name \"%s\"", table_name); >- kfree(ls); >- return NULL; >- } >- *p = '\0'; >- p++; >- >- strncpy(ls->clustername, buf, GDLM_NAME_LEN); >- strncpy(ls->fsname, p, GDLM_NAME_LEN); >- >- return ls; >-} >- >-static int make_args(struct gdlm_ls *ls, char *data_arg, int *nodir) >-{ >- char data[256]; >- char *options, *x, *y; >- int error = 0; >- >- memset(data, 0, 256); >- strncpy(data, data_arg, 255); >- >- for (options = data; (x = strsep(&options, ":")); ) { >- if (!*x) >- continue; >- >- y = strchr(x, '='); >- if (y) >- *y++ = 0; >- >- if (!strcmp(x, "jid")) { >- if (!y) { >- log_error("need argument to jid"); >- error = -EINVAL; >- break; >- } >- sscanf(y, "%u", &ls->jid); >- >- } else if (!strcmp(x, "first")) { >- if (!y) { >- log_error("need argument to first"); >- error = -EINVAL; >- break; >- } >- sscanf(y, "%u", &ls->first); >- >- } else if (!strcmp(x, "id")) { >- if (!y) { >- log_error("need argument to id"); >- error = -EINVAL; >- break; >- } >- sscanf(y, "%u", &ls->id); >- >- } else if (!strcmp(x, "nodir")) { >- if (!y) { >- log_error("need argument to nodir"); >- error = -EINVAL; >- break; >- } >- sscanf(y, "%u", nodir); >- >- } else { >- log_error("unkonwn option: %s", x); >- error = -EINVAL; >- break; >- } >- } >- >- return error; >-} >- >-static int gdlm_mount(char *table_name, char *host_data, >- lm_callback_t cb, void *cb_data, >- unsigned int min_lvb_size, int flags, >- struct lm_lockstruct *lockstruct, >- struct kobject *fskobj) >-{ >- struct gdlm_ls *ls; >- int error = -ENOMEM, nodir = 0; >- >- if (min_lvb_size > GDLM_LVB_SIZE) >- goto out; >- >- ls = init_gdlm(cb, cb_data, flags, table_name); >- if (!ls) >- goto out; >- >- error = make_args(ls, host_data, &nodir); >- if (error) >- goto out; >- >- error = gdlm_init_threads(ls); >- if (error) >- goto out_free; >- >- error = gdlm_kobject_setup(ls, fskobj); >- if (error) >- goto out_thread; >- >- error = dlm_new_lockspace(ls->fsname, strlen(ls->fsname), >- &ls->dlm_lockspace, >- DLM_LSFL_FS | (nodir ? DLM_LSFL_NODIR : 0), >- GDLM_LVB_SIZE); >- if (error) { >- log_error("dlm_new_lockspace error %d", error); >- goto out_kobj; >- } >- >- lockstruct->ls_jid = ls->jid; >- lockstruct->ls_first = ls->first; >- lockstruct->ls_lockspace = ls; >- lockstruct->ls_ops = &gdlm_ops; >- lockstruct->ls_flags = 0; >- lockstruct->ls_lvb_size = GDLM_LVB_SIZE; >- return 0; >- >-out_kobj: >- gdlm_kobject_release(ls); >-out_thread: >- gdlm_release_threads(ls); >-out_free: >- kfree(ls); >-out: >- return error; >-} >- >-static void gdlm_unmount(void *lockspace) >-{ >- struct gdlm_ls *ls = lockspace; >- int rv; >- >- log_debug("unmount flags %lx", ls->flags); >- >- /* FIXME: serialize unmount and withdraw in case they >- happen at once. Also, if unmount follows withdraw, >- wait for withdraw to finish. */ >- >- if (test_bit(DFL_WITHDRAW, &ls->flags)) >- goto out; >- >- gdlm_kobject_release(ls); >- dlm_release_lockspace(ls->dlm_lockspace, 2); >- gdlm_release_threads(ls); >- rv = gdlm_release_all_locks(ls); >- if (rv) >- log_info("gdlm_unmount: %d stray locks freed", rv); >-out: >- kfree(ls); >-} >- >-static void gdlm_recovery_done(void *lockspace, unsigned int jid, >- unsigned int message) >-{ >- struct gdlm_ls *ls = lockspace; >- ls->recover_jid_done = jid; >- ls->recover_jid_status = message; >- kobject_uevent(&ls->kobj, KOBJ_CHANGE); >-} >- >-static void gdlm_others_may_mount(void *lockspace) >-{ >- struct gdlm_ls *ls = lockspace; >- ls->first_done = 1; >- kobject_uevent(&ls->kobj, KOBJ_CHANGE); >-} >- >-/* Userspace gets the offline uevent, blocks new gfs locks on >- other mounters, and lets us know (sets WITHDRAW flag). Then, >- userspace leaves the mount group while we leave the lockspace. */ >- >-static void gdlm_withdraw(void *lockspace) >-{ >- struct gdlm_ls *ls = lockspace; >- >- kobject_uevent(&ls->kobj, KOBJ_OFFLINE); >- >- wait_event_interruptible(ls->wait_control, >- test_bit(DFL_WITHDRAW, &ls->flags)); >- >- dlm_release_lockspace(ls->dlm_lockspace, 2); >- gdlm_release_threads(ls); >- gdlm_release_all_locks(ls); >- gdlm_kobject_release(ls); >-} >- >-const struct lm_lockops gdlm_ops = { >- .lm_proto_name = "lock_dlm", >- .lm_mount = gdlm_mount, >- .lm_others_may_mount = gdlm_others_may_mount, >- .lm_unmount = gdlm_unmount, >- .lm_withdraw = gdlm_withdraw, >- .lm_get_lock = gdlm_get_lock, >- .lm_put_lock = gdlm_put_lock, >- .lm_lock = gdlm_lock, >- .lm_unlock = gdlm_unlock, >- .lm_plock = gdlm_plock, >- .lm_punlock = gdlm_punlock, >- .lm_plock_get = gdlm_plock_get, >- .lm_cancel = gdlm_cancel, >- .lm_hold_lvb = gdlm_hold_lvb, >- .lm_unhold_lvb = gdlm_unhold_lvb, >- .lm_recovery_done = gdlm_recovery_done, >- .lm_owner = THIS_MODULE, >-}; >- >diff -purN a/fs/gfs2/locking/dlm/plock.c b/fs/gfs2/locking/dlm/plock.c >--- a/fs/gfs2/locking/dlm/plock.c 2008-04-30 16:26:40.000000000 -0500 >+++ b/fs/gfs2/locking/dlm/plock.c 1969-12-31 18:00:00.000000000 -0600 >@@ -1,408 +0,0 @@ >-/* >- * Copyright (C) 2005 Red Hat, Inc. All rights reserved. >- * >- * This copyrighted material is made available to anyone wishing to use, >- * modify, copy, or redistribute it subject to the terms and conditions >- * of the GNU General Public License version 2. >- */ >- >-#include <linux/miscdevice.h> >-#include <linux/lock_dlm_plock.h> >- >-#include "lock_dlm.h" >- >- >-static spinlock_t ops_lock; >-static struct list_head send_list; >-static struct list_head recv_list; >-static wait_queue_head_t send_wq; >-static wait_queue_head_t recv_wq; >- >-struct plock_op { >- struct list_head list; >- int done; >- struct gdlm_plock_info info; >-}; >- >-struct plock_xop { >- struct plock_op xop; >- void *callback; >- void *fl; >- void *file; >- struct file_lock flc; >-}; >- >- >-static inline void set_version(struct gdlm_plock_info *info) >-{ >- info->version[0] = GDLM_PLOCK_VERSION_MAJOR; >- info->version[1] = GDLM_PLOCK_VERSION_MINOR; >- info->version[2] = GDLM_PLOCK_VERSION_PATCH; >-} >- >-static int check_version(struct gdlm_plock_info *info) >-{ >- if ((GDLM_PLOCK_VERSION_MAJOR != info->version[0]) || >- (GDLM_PLOCK_VERSION_MINOR < info->version[1])) { >- log_error("plock device version mismatch: " >- "kernel (%u.%u.%u), user (%u.%u.%u)", >- GDLM_PLOCK_VERSION_MAJOR, >- GDLM_PLOCK_VERSION_MINOR, >- GDLM_PLOCK_VERSION_PATCH, >- info->version[0], >- info->version[1], >- info->version[2]); >- return -EINVAL; >- } >- return 0; >-} >- >-static void send_op(struct plock_op *op) >-{ >- set_version(&op->info); >- INIT_LIST_HEAD(&op->list); >- spin_lock(&ops_lock); >- list_add_tail(&op->list, &send_list); >- spin_unlock(&ops_lock); >- wake_up(&send_wq); >-} >- >-int gdlm_plock(void *lockspace, struct lm_lockname *name, >- struct file *file, int cmd, struct file_lock *fl) >-{ >- struct gdlm_ls *ls = lockspace; >- struct plock_op *op; >- struct plock_xop *xop; >- int rv; >- >- xop = kzalloc(sizeof(*xop), GFP_KERNEL); >- if (!xop) >- return -ENOMEM; >- >- op = &xop->xop; >- op->info.optype = GDLM_PLOCK_OP_LOCK; >- op->info.pid = fl->fl_pid; >- op->info.ex = (fl->fl_type == F_WRLCK); >- op->info.wait = IS_SETLKW(cmd); >- op->info.fsid = ls->id; >- op->info.number = name->ln_number; >- op->info.start = fl->fl_start; >- op->info.end = fl->fl_end; >- if ((fl->fl_flags & FL_GRANT) && >- fl->fl_lmops && fl->fl_lmops->fl_grant) { >- /* fl_owner is lockd which doesn't distinguish >- processes on the nfs client */ >- op->info.owner = (__u64) fl->fl_pid; >- xop->callback = fl->fl_lmops->fl_grant; >- locks_init_lock(&xop->flc); >- locks_copy_lock(&xop->flc, fl); >- xop->fl = fl; >- xop->file = file; >- } else { >- xop->callback = NULL; >- op->info.owner = (__u64)(long) fl->fl_owner; >- } >- >- send_op(op); >- >- if (xop->callback == NULL) >- wait_event(recv_wq, (op->done != 0)); >- else >- return -EINPROGRESS; >- >- spin_lock(&ops_lock); >- if (!list_empty(&op->list)) { >- printk(KERN_INFO "plock op on list\n"); >- list_del(&op->list); >- } >- spin_unlock(&ops_lock); >- >- rv = op->info.rv; >- >- if (!rv) { >- if (posix_lock_file_wait(file, fl) < 0) >- log_error("gdlm_plock: vfs lock error %x,%llx", >- name->ln_type, >- (unsigned long long)name->ln_number); >- } >- >- kfree(xop); >- return rv; >-} >- >-/* Returns failure iff a succesful lock operation should be canceled */ >-static int gdlm_plock_callback(struct plock_op *op) >-{ >- struct file *file; >- struct file_lock *fl; >- struct file_lock *flc; >- int (*notify)(void *, void *, int) = NULL; >- struct plock_xop *xop = (struct plock_xop *)op; >- int rv = 0; >- >- spin_lock(&ops_lock); >- if (!list_empty(&op->list)) { >- printk(KERN_INFO "plock op on list\n"); >- list_del(&op->list); >- } >- spin_unlock(&ops_lock); >- >- /* check if the following 2 are still valid or make a copy */ >- file = xop->file; >- flc = &xop->flc; >- fl = xop->fl; >- notify = xop->callback; >- >- if (op->info.rv) { >- notify(flc, NULL, op->info.rv); >- goto out; >- } >- >- /* got fs lock; bookkeep locally as well: */ >- flc->fl_flags &= ~FL_SLEEP; >- if (posix_lock_file(file, flc)) { >- /* >- * This can only happen in the case of kmalloc() failure. >- * The filesystem's own lock is the authoritative lock, >- * so a failure to get the lock locally is not a disaster. >- * As long as GFS cannot reliably cancel locks (especially >- * in a low-memory situation), we're better off ignoring >- * this failure than trying to recover. >- */ >- log_error("gdlm_plock: vfs lock error file %p fl %p", >- file, fl); >- } >- >- rv = notify(flc, NULL, 0); >- if (rv) { >- /* XXX: We need to cancel the fs lock here: */ >- printk(KERN_ERR "gfs2 lock granted after lock request failed;" >- " dangling lock!\n"); >- goto out; >- } >- >-out: >- kfree(xop); >- return rv; >-} >- >-int gdlm_punlock(void *lockspace, struct lm_lockname *name, >- struct file *file, struct file_lock *fl) >-{ >- struct gdlm_ls *ls = lockspace; >- struct plock_op *op; >- int rv; >- >- op = kzalloc(sizeof(*op), GFP_KERNEL); >- if (!op) >- return -ENOMEM; >- >- if (posix_lock_file_wait(file, fl) < 0) >- log_error("gdlm_punlock: vfs unlock error %x,%llx", >- name->ln_type, (unsigned long long)name->ln_number); >- >- op->info.optype = GDLM_PLOCK_OP_UNLOCK; >- op->info.pid = fl->fl_pid; >- op->info.fsid = ls->id; >- op->info.number = name->ln_number; >- op->info.start = fl->fl_start; >- op->info.end = fl->fl_end; >- if ((fl->fl_flags & FL_GRANT) && >- fl->fl_lmops && fl->fl_lmops->fl_grant) >- op->info.owner = (__u64) fl->fl_pid; >- else >- op->info.owner = (__u64)(long) fl->fl_owner; >- >- send_op(op); >- wait_event(recv_wq, (op->done != 0)); >- >- spin_lock(&ops_lock); >- if (!list_empty(&op->list)) { >- printk(KERN_INFO "punlock op on list\n"); >- list_del(&op->list); >- } >- spin_unlock(&ops_lock); >- >- rv = op->info.rv; >- >- if (rv == -ENOENT) >- rv = 0; >- >- kfree(op); >- return rv; >-} >- >-int gdlm_plock_get(void *lockspace, struct lm_lockname *name, >- struct file *file, struct file_lock *fl) >-{ >- struct gdlm_ls *ls = lockspace; >- struct plock_op *op; >- int rv; >- >- op = kzalloc(sizeof(*op), GFP_KERNEL); >- if (!op) >- return -ENOMEM; >- >- op->info.optype = GDLM_PLOCK_OP_GET; >- op->info.pid = fl->fl_pid; >- op->info.ex = (fl->fl_type == F_WRLCK); >- op->info.fsid = ls->id; >- op->info.number = name->ln_number; >- op->info.start = fl->fl_start; >- op->info.end = fl->fl_end; >- if ((fl->fl_flags & FL_GRANT) && >- fl->fl_lmops && fl->fl_lmops->fl_grant) >- op->info.owner = (__u64) fl->fl_pid; >- else >- op->info.owner = (__u64)(long) fl->fl_owner; >- >- >- send_op(op); >- wait_event(recv_wq, (op->done != 0)); >- >- spin_lock(&ops_lock); >- if (!list_empty(&op->list)) { >- printk(KERN_INFO "plock_get op on list\n"); >- list_del(&op->list); >- } >- spin_unlock(&ops_lock); >- >- /* info.rv from userspace is 1 for conflict, 0 for no-conflict, >- -ENOENT if there are no locks on the file */ >- >- rv = op->info.rv; >- >- fl->fl_type = F_UNLCK; >- if (rv == -ENOENT) >- rv = 0; >- else if (rv > 0) { >- fl->fl_type = (op->info.ex) ? F_WRLCK : F_RDLCK; >- fl->fl_pid = op->info.pid; >- fl->fl_start = op->info.start; >- fl->fl_end = op->info.end; >- rv = 0; >- } >- >- kfree(op); >- return rv; >-} >- >-/* a read copies out one plock request from the send list */ >-static ssize_t dev_read(struct file *file, char __user *u, size_t count, >- loff_t *ppos) >-{ >- struct gdlm_plock_info info; >- struct plock_op *op = NULL; >- >- if (count < sizeof(info)) >- return -EINVAL; >- >- spin_lock(&ops_lock); >- if (!list_empty(&send_list)) { >- op = list_entry(send_list.next, struct plock_op, list); >- list_move(&op->list, &recv_list); >- memcpy(&info, &op->info, sizeof(info)); >- } >- spin_unlock(&ops_lock); >- >- if (!op) >- return -EAGAIN; >- >- if (copy_to_user(u, &info, sizeof(info))) >- return -EFAULT; >- return sizeof(info); >-} >- >-/* a write copies in one plock result that should match a plock_op >- on the recv list */ >-static ssize_t dev_write(struct file *file, const char __user *u, size_t count, >- loff_t *ppos) >-{ >- struct gdlm_plock_info info; >- struct plock_op *op; >- int found = 0; >- >- if (count != sizeof(info)) >- return -EINVAL; >- >- if (copy_from_user(&info, u, sizeof(info))) >- return -EFAULT; >- >- if (check_version(&info)) >- return -EINVAL; >- >- spin_lock(&ops_lock); >- list_for_each_entry(op, &recv_list, list) { >- if (op->info.fsid == info.fsid && op->info.number == info.number && >- op->info.owner == info.owner) { >- list_del_init(&op->list); >- found = 1; >- op->done = 1; >- memcpy(&op->info, &info, sizeof(info)); >- break; >- } >- } >- spin_unlock(&ops_lock); >- >- if (found) { >- struct plock_xop *xop; >- xop = (struct plock_xop *)op; >- if (xop->callback) >- count = gdlm_plock_callback(op); >- else >- wake_up(&recv_wq); >- } else >- printk(KERN_INFO "gdlm dev_write no op %x %llx\n", info.fsid, >- (unsigned long long)info.number); >- return count; >-} >- >-static unsigned int dev_poll(struct file *file, poll_table *wait) >-{ >- poll_wait(file, &send_wq, wait); >- >- spin_lock(&ops_lock); >- if (!list_empty(&send_list)) { >- spin_unlock(&ops_lock); >- return POLLIN | POLLRDNORM; >- } >- spin_unlock(&ops_lock); >- return 0; >-} >- >-static struct file_operations dev_fops = { >- .read = dev_read, >- .write = dev_write, >- .poll = dev_poll, >- .owner = THIS_MODULE >-}; >- >-static struct miscdevice plock_dev_misc = { >- .minor = MISC_DYNAMIC_MINOR, >- .name = GDLM_PLOCK_MISC_NAME, >- .fops = &dev_fops >-}; >- >-int gdlm_plock_init(void) >-{ >- int rv; >- >- spin_lock_init(&ops_lock); >- INIT_LIST_HEAD(&send_list); >- INIT_LIST_HEAD(&recv_list); >- init_waitqueue_head(&send_wq); >- init_waitqueue_head(&recv_wq); >- >- rv = misc_register(&plock_dev_misc); >- if (rv) >- printk(KERN_INFO "gdlm_plock_init: misc_register failed %d", >- rv); >- return rv; >-} >- >-void gdlm_plock_exit(void) >-{ >- if (misc_deregister(&plock_dev_misc) < 0) >- printk(KERN_INFO "gdlm_plock_exit: misc_deregister failed"); >-} >- >diff -purN a/fs/gfs2/locking/dlm/sysfs.c b/fs/gfs2/locking/dlm/sysfs.c >--- a/fs/gfs2/locking/dlm/sysfs.c 2008-04-30 16:26:31.000000000 -0500 >+++ b/fs/gfs2/locking/dlm/sysfs.c 1969-12-31 18:00:00.000000000 -0600 >@@ -1,239 +0,0 @@ >-/* >- * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. >- * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved. >- * >- * This copyrighted material is made available to anyone wishing to use, >- * modify, copy, or redistribute it subject to the terms and conditions >- * of the GNU General Public License version 2. >- */ >- >-#include <linux/ctype.h> >-#include <linux/stat.h> >- >-#include "lock_dlm.h" >- >-extern struct lm_lockops gdlm_ops; >- >-static ssize_t proto_name_show(struct gdlm_ls *ls, char *buf) >-{ >- return sprintf(buf, "%s\n", gdlm_ops.lm_proto_name); >-} >- >-static ssize_t block_show(struct gdlm_ls *ls, char *buf) >-{ >- ssize_t ret; >- int val = 0; >- >- if (test_bit(DFL_BLOCK_LOCKS, &ls->flags)) >- val = 1; >- ret = sprintf(buf, "%d\n", val); >- return ret; >-} >- >-static ssize_t block_store(struct gdlm_ls *ls, const char *buf, size_t len) >-{ >- ssize_t ret = len; >- int val; >- >- val = simple_strtol(buf, NULL, 0); >- >- if (val == 1) >- set_bit(DFL_BLOCK_LOCKS, &ls->flags); >- else if (val == 0) { >- clear_bit(DFL_BLOCK_LOCKS, &ls->flags); >- gdlm_submit_delayed(ls); >- } else { >- ret = -EINVAL; >- } >- return ret; >-} >- >-static ssize_t withdraw_show(struct gdlm_ls *ls, char *buf) >-{ >- ssize_t ret; >- int val = 0; >- >- if (test_bit(DFL_WITHDRAW, &ls->flags)) >- val = 1; >- ret = sprintf(buf, "%d\n", val); >- return ret; >-} >- >-static ssize_t withdraw_store(struct gdlm_ls *ls, const char *buf, size_t len) >-{ >- ssize_t ret = len; >- int val; >- >- val = simple_strtol(buf, NULL, 0); >- >- if (val == 1) >- set_bit(DFL_WITHDRAW, &ls->flags); >- else >- ret = -EINVAL; >- wake_up(&ls->wait_control); >- return ret; >-} >- >-static ssize_t id_show(struct gdlm_ls *ls, char *buf) >-{ >- return sprintf(buf, "%u\n", ls->id); >-} >- >-static ssize_t jid_show(struct gdlm_ls *ls, char *buf) >-{ >- return sprintf(buf, "%d\n", ls->jid); >-} >- >-static ssize_t first_show(struct gdlm_ls *ls, char *buf) >-{ >- return sprintf(buf, "%d\n", ls->first); >-} >- >-static ssize_t first_done_show(struct gdlm_ls *ls, char *buf) >-{ >- return sprintf(buf, "%d\n", ls->first_done); >-} >- >-static ssize_t recover_show(struct gdlm_ls *ls, char *buf) >-{ >- return sprintf(buf, "%d\n", ls->recover_jid); >-} >- >-static ssize_t recover_store(struct gdlm_ls *ls, const char *buf, size_t len) >-{ >- ls->recover_jid = simple_strtol(buf, NULL, 0); >- ls->fscb(ls->sdp, LM_CB_NEED_RECOVERY, &ls->recover_jid); >- return len; >-} >- >-static ssize_t recover_done_show(struct gdlm_ls *ls, char *buf) >-{ >- return sprintf(buf, "%d\n", ls->recover_jid_done); >-} >- >-static ssize_t recover_status_show(struct gdlm_ls *ls, char *buf) >-{ >- return sprintf(buf, "%d\n", ls->recover_jid_status); >-} >- >-static ssize_t drop_count_show(struct gdlm_ls *ls, char *buf) >-{ >- return sprintf(buf, "%d\n", ls->drop_locks_count); >-} >- >-static ssize_t drop_count_store(struct gdlm_ls *ls, const char *buf, size_t len) >-{ >- ls->drop_locks_count = simple_strtol(buf, NULL, 0); >- return len; >-} >- >-struct gdlm_attr { >- struct attribute attr; >- ssize_t (*show)(struct gdlm_ls *, char *); >- ssize_t (*store)(struct gdlm_ls *, const char *, size_t); >-}; >- >-#define GDLM_ATTR(_name,_mode,_show,_store) \ >-static struct gdlm_attr gdlm_attr_##_name = __ATTR(_name,_mode,_show,_store) >- >-GDLM_ATTR(proto_name, 0444, proto_name_show, NULL); >-GDLM_ATTR(block, 0644, block_show, block_store); >-GDLM_ATTR(withdraw, 0644, withdraw_show, withdraw_store); >-GDLM_ATTR(id, 0444, id_show, NULL); >-GDLM_ATTR(jid, 0444, jid_show, NULL); >-GDLM_ATTR(first, 0444, first_show, NULL); >-GDLM_ATTR(first_done, 0444, first_done_show, NULL); >-GDLM_ATTR(recover, 0644, recover_show, recover_store); >-GDLM_ATTR(recover_done, 0444, recover_done_show, NULL); >-GDLM_ATTR(recover_status, 0444, recover_status_show, NULL); >-GDLM_ATTR(drop_count, 0644, drop_count_show, drop_count_store); >- >-static struct attribute *gdlm_attrs[] = { >- &gdlm_attr_proto_name.attr, >- &gdlm_attr_block.attr, >- &gdlm_attr_withdraw.attr, >- &gdlm_attr_id.attr, >- &gdlm_attr_jid.attr, >- &gdlm_attr_first.attr, >- &gdlm_attr_first_done.attr, >- &gdlm_attr_recover.attr, >- &gdlm_attr_recover_done.attr, >- &gdlm_attr_recover_status.attr, >- &gdlm_attr_drop_count.attr, >- NULL, >-}; >- >-static ssize_t gdlm_attr_show(struct kobject *kobj, struct attribute *attr, >- char *buf) >-{ >- struct gdlm_ls *ls = container_of(kobj, struct gdlm_ls, kobj); >- struct gdlm_attr *a = container_of(attr, struct gdlm_attr, attr); >- return a->show ? a->show(ls, buf) : 0; >-} >- >-static ssize_t gdlm_attr_store(struct kobject *kobj, struct attribute *attr, >- const char *buf, size_t len) >-{ >- struct gdlm_ls *ls = container_of(kobj, struct gdlm_ls, kobj); >- struct gdlm_attr *a = container_of(attr, struct gdlm_attr, attr); >- return a->store ? a->store(ls, buf, len) : len; >-} >- >-static struct sysfs_ops gdlm_attr_ops = { >- .show = gdlm_attr_show, >- .store = gdlm_attr_store, >-}; >- >-static struct kobj_type gdlm_ktype = { >- .default_attrs = gdlm_attrs, >- .sysfs_ops = &gdlm_attr_ops, >-}; >- >-static struct kset gdlm_kset = { >- .subsys = &kernel_subsys, >- .kobj = {.name = "lock_dlm",}, >- .ktype = &gdlm_ktype, >-}; >- >-int gdlm_kobject_setup(struct gdlm_ls *ls, struct kobject *fskobj) >-{ >- int error; >- >- error = kobject_set_name(&ls->kobj, "%s", "lock_module"); >- if (error) { >- log_error("can't set kobj name %d", error); >- return error; >- } >- >- ls->kobj.kset = &gdlm_kset; >- ls->kobj.ktype = &gdlm_ktype; >- ls->kobj.parent = fskobj; >- >- error = kobject_register(&ls->kobj); >- if (error) >- log_error("can't register kobj %d", error); >- >- return error; >-} >- >-void gdlm_kobject_release(struct gdlm_ls *ls) >-{ >- kobject_unregister(&ls->kobj); >-} >- >-int gdlm_sysfs_init(void) >-{ >- int error; >- >- error = kset_register(&gdlm_kset); >- if (error) >- printk("lock_dlm: cannot register kset %d\n", error); >- >- return error; >-} >- >-void gdlm_sysfs_exit(void) >-{ >- kset_unregister(&gdlm_kset); >-} >- >diff -purN a/fs/gfs2/locking/dlm/thread.c b/fs/gfs2/locking/dlm/thread.c >--- a/fs/gfs2/locking/dlm/thread.c 2008-04-30 16:26:39.000000000 -0500 >+++ b/fs/gfs2/locking/dlm/thread.c 1969-12-31 18:00:00.000000000 -0600 >@@ -1,367 +0,0 @@ >-/* >- * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. >- * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved. >- * >- * This copyrighted material is made available to anyone wishing to use, >- * modify, copy, or redistribute it subject to the terms and conditions >- * of the GNU General Public License version 2. >- */ >- >-#include "lock_dlm.h" >- >-/* A lock placed on this queue is re-submitted to DLM as soon as the lock_dlm >- thread gets to it. */ >- >-static void queue_submit(struct gdlm_lock *lp) >-{ >- struct gdlm_ls *ls = lp->ls; >- >- spin_lock(&ls->async_lock); >- list_add_tail(&lp->delay_list, &ls->submit); >- spin_unlock(&ls->async_lock); >- wake_up(&ls->thread_wait); >-} >- >-static void process_blocking(struct gdlm_lock *lp, int bast_mode) >-{ >- struct gdlm_ls *ls = lp->ls; >- unsigned int cb = 0; >- >- switch (gdlm_make_lmstate(bast_mode)) { >- case LM_ST_EXCLUSIVE: >- cb = LM_CB_NEED_E; >- break; >- case LM_ST_DEFERRED: >- cb = LM_CB_NEED_D; >- break; >- case LM_ST_SHARED: >- cb = LM_CB_NEED_S; >- break; >- default: >- gdlm_assert(0, "unknown bast mode %u", lp->bast_mode); >- } >- >- ls->fscb(ls->sdp, cb, &lp->lockname); >-} >- >-static void wake_up_ast(struct gdlm_lock *lp) >-{ >- clear_bit(LFL_AST_WAIT, &lp->flags); >- smp_mb__after_clear_bit(); >- wake_up_bit(&lp->flags, LFL_AST_WAIT); >-} >- >-static void process_complete(struct gdlm_lock *lp) >-{ >- struct gdlm_ls *ls = lp->ls; >- struct lm_async_cb acb; >- s16 prev_mode = lp->cur; >- >- memset(&acb, 0, sizeof(acb)); >- >- if (lp->lksb.sb_status == -DLM_ECANCEL) { >- log_info("complete dlm cancel %x,%llx flags %lx", >- lp->lockname.ln_type, >- (unsigned long long)lp->lockname.ln_number, >- lp->flags); >- >- lp->req = lp->cur; >- acb.lc_ret |= LM_OUT_CANCELED; >- if (lp->cur == DLM_LOCK_IV) >- lp->lksb.sb_lkid = 0; >- goto out; >- } >- >- if (test_and_clear_bit(LFL_DLM_UNLOCK, &lp->flags)) { >- if (lp->lksb.sb_status != -DLM_EUNLOCK) { >- log_info("unlock sb_status %d %x,%llx flags %lx", >- lp->lksb.sb_status, lp->lockname.ln_type, >- (unsigned long long)lp->lockname.ln_number, >- lp->flags); >- return; >- } >- >- lp->cur = DLM_LOCK_IV; >- lp->req = DLM_LOCK_IV; >- lp->lksb.sb_lkid = 0; >- >- if (test_and_clear_bit(LFL_UNLOCK_DELETE, &lp->flags)) { >- gdlm_delete_lp(lp); >- return; >- } >- goto out; >- } >- >- if (lp->lksb.sb_flags & DLM_SBF_VALNOTVALID) >- memset(lp->lksb.sb_lvbptr, 0, GDLM_LVB_SIZE); >- >- if (lp->lksb.sb_flags & DLM_SBF_ALTMODE) { >- if (lp->req == DLM_LOCK_PR) >- lp->req = DLM_LOCK_CW; >- else if (lp->req == DLM_LOCK_CW) >- lp->req = DLM_LOCK_PR; >- } >- >- /* >- * A canceled lock request. The lock was just taken off the delayed >- * list and was never even submitted to dlm. >- */ >- >- if (test_and_clear_bit(LFL_CANCEL, &lp->flags)) { >- log_info("complete internal cancel %x,%llx", >- lp->lockname.ln_type, >- (unsigned long long)lp->lockname.ln_number); >- lp->req = lp->cur; >- acb.lc_ret |= LM_OUT_CANCELED; >- goto out; >- } >- >- /* >- * An error occured. >- */ >- >- if (lp->lksb.sb_status) { >- /* a "normal" error */ >- if ((lp->lksb.sb_status == -EAGAIN) && >- (lp->lkf & DLM_LKF_NOQUEUE)) { >- lp->req = lp->cur; >- if (lp->cur == DLM_LOCK_IV) >- lp->lksb.sb_lkid = 0; >- goto out; >- } >- >- /* this could only happen with cancels I think */ >- log_info("ast sb_status %d %x,%llx flags %lx", >- lp->lksb.sb_status, lp->lockname.ln_type, >- (unsigned long long)lp->lockname.ln_number, >- lp->flags); >- return; >- } >- >- /* >- * This is an AST for an EX->EX conversion for sync_lvb from GFS. >- */ >- >- if (test_and_clear_bit(LFL_SYNC_LVB, &lp->flags)) { >- wake_up_ast(lp); >- return; >- } >- >- /* >- * A lock has been demoted to NL because it initially completed during >- * BLOCK_LOCKS. Now it must be requested in the originally requested >- * mode. >- */ >- >- if (test_and_clear_bit(LFL_REREQUEST, &lp->flags)) { >- gdlm_assert(lp->req == DLM_LOCK_NL, "%x,%llx", >- lp->lockname.ln_type, >- (unsigned long long)lp->lockname.ln_number); >- gdlm_assert(lp->prev_req > DLM_LOCK_NL, "%x,%llx", >- lp->lockname.ln_type, >- (unsigned long long)lp->lockname.ln_number); >- >- lp->cur = DLM_LOCK_NL; >- lp->req = lp->prev_req; >- lp->prev_req = DLM_LOCK_IV; >- lp->lkf &= ~DLM_LKF_CONVDEADLK; >- >- set_bit(LFL_NOCACHE, &lp->flags); >- >- if (test_bit(DFL_BLOCK_LOCKS, &ls->flags) && >- !test_bit(LFL_NOBLOCK, &lp->flags)) >- gdlm_queue_delayed(lp); >- else >- queue_submit(lp); >- return; >- } >- >- /* >- * A request is granted during dlm recovery. It may be granted >- * because the locks of a failed node were cleared. In that case, >- * there may be inconsistent data beneath this lock and we must wait >- * for recovery to complete to use it. When gfs recovery is done this >- * granted lock will be converted to NL and then reacquired in this >- * granted state. >- */ >- >- if (test_bit(DFL_BLOCK_LOCKS, &ls->flags) && >- !test_bit(LFL_NOBLOCK, &lp->flags) && >- lp->req != DLM_LOCK_NL) { >- >- lp->cur = lp->req; >- lp->prev_req = lp->req; >- lp->req = DLM_LOCK_NL; >- lp->lkf |= DLM_LKF_CONVERT; >- lp->lkf &= ~DLM_LKF_CONVDEADLK; >- >- log_debug("rereq %x,%llx id %x %d,%d", >- lp->lockname.ln_type, >- (unsigned long long)lp->lockname.ln_number, >- lp->lksb.sb_lkid, lp->cur, lp->req); >- >- set_bit(LFL_REREQUEST, &lp->flags); >- queue_submit(lp); >- return; >- } >- >- /* >- * DLM demoted the lock to NL before it was granted so GFS must be >- * told it cannot cache data for this lock. >- */ >- >- if (lp->lksb.sb_flags & DLM_SBF_DEMOTED) >- set_bit(LFL_NOCACHE, &lp->flags); >- >-out: >- /* >- * This is an internal lock_dlm lock >- */ >- >- if (test_bit(LFL_INLOCK, &lp->flags)) { >- clear_bit(LFL_NOBLOCK, &lp->flags); >- lp->cur = lp->req; >- wake_up_ast(lp); >- return; >- } >- >- /* >- * Normal completion of a lock request. Tell GFS it now has the lock. >- */ >- >- clear_bit(LFL_NOBLOCK, &lp->flags); >- lp->cur = lp->req; >- >- acb.lc_name = lp->lockname; >- acb.lc_ret |= gdlm_make_lmstate(lp->cur); >- >- if (!test_and_clear_bit(LFL_NOCACHE, &lp->flags) && >- (lp->cur > DLM_LOCK_NL) && (prev_mode > DLM_LOCK_NL)) >- acb.lc_ret |= LM_OUT_CACHEABLE; >- >- ls->fscb(ls->sdp, LM_CB_ASYNC, &acb); >-} >- >-static inline int no_work(struct gdlm_ls *ls, int blocking) >-{ >- int ret; >- >- spin_lock(&ls->async_lock); >- ret = list_empty(&ls->complete) && list_empty(&ls->submit); >- if (ret && blocking) >- ret = list_empty(&ls->blocking); >- spin_unlock(&ls->async_lock); >- >- return ret; >-} >- >-static inline int check_drop(struct gdlm_ls *ls) >-{ >- if (!ls->drop_locks_count) >- return 0; >- >- if (time_after(jiffies, ls->drop_time + ls->drop_locks_period * HZ)) { >- ls->drop_time = jiffies; >- if (ls->all_locks_count >= ls->drop_locks_count) >- return 1; >- } >- return 0; >-} >- >-static int gdlm_thread(void *data, int blist) >-{ >- struct gdlm_ls *ls = (struct gdlm_ls *) data; >- struct gdlm_lock *lp = NULL; >- uint8_t complete, blocking, submit, drop; >- >- /* Only thread1 is allowed to do blocking callbacks since gfs >- may wait for a completion callback within a blocking cb. */ >- >- while (!kthread_should_stop()) { >- wait_event_interruptible(ls->thread_wait, >- !no_work(ls, blist) || kthread_should_stop()); >- >- complete = blocking = submit = drop = 0; >- >- spin_lock(&ls->async_lock); >- >- if (blist && !list_empty(&ls->blocking)) { >- lp = list_entry(ls->blocking.next, struct gdlm_lock, >- blist); >- list_del_init(&lp->blist); >- blocking = lp->bast_mode; >- lp->bast_mode = 0; >- } else if (!list_empty(&ls->complete)) { >- lp = list_entry(ls->complete.next, struct gdlm_lock, >- clist); >- list_del_init(&lp->clist); >- complete = 1; >- } else if (!list_empty(&ls->submit)) { >- lp = list_entry(ls->submit.next, struct gdlm_lock, >- delay_list); >- list_del_init(&lp->delay_list); >- submit = 1; >- } >- >- drop = check_drop(ls); >- spin_unlock(&ls->async_lock); >- >- if (complete) >- process_complete(lp); >- >- else if (blocking) >- process_blocking(lp, blocking); >- >- else if (submit) >- gdlm_do_lock(lp); >- >- if (drop) >- ls->fscb(ls->sdp, LM_CB_DROPLOCKS, NULL); >- >- schedule(); >- } >- >- return 0; >-} >- >-static int gdlm_thread1(void *data) >-{ >- return gdlm_thread(data, 1); >-} >- >-static int gdlm_thread2(void *data) >-{ >- return gdlm_thread(data, 0); >-} >- >-int gdlm_init_threads(struct gdlm_ls *ls) >-{ >- struct task_struct *p; >- int error; >- >- p = kthread_run(gdlm_thread1, ls, "lock_dlm1"); >- error = IS_ERR(p); >- if (error) { >- log_error("can't start lock_dlm1 thread %d", error); >- return error; >- } >- ls->thread1 = p; >- >- p = kthread_run(gdlm_thread2, ls, "lock_dlm2"); >- error = IS_ERR(p); >- if (error) { >- log_error("can't start lock_dlm2 thread %d", error); >- kthread_stop(ls->thread1); >- return error; >- } >- ls->thread2 = p; >- >- return 0; >-} >- >-void gdlm_release_threads(struct gdlm_ls *ls) >-{ >- kthread_stop(ls->thread1); >- kthread_stop(ls->thread2); >-} >- >diff -purN a/fs/gfs2/locking.c b/fs/gfs2/locking.c >--- a/fs/gfs2/locking.c 2008-04-30 16:26:29.000000000 -0500 >+++ b/fs/gfs2/locking.c 1969-12-31 18:00:00.000000000 -0600 >@@ -1,186 +0,0 @@ >-/* >- * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. >- * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. >- * >- * This copyrighted material is made available to anyone wishing to use, >- * modify, copy, or redistribute it subject to the terms and conditions >- * of the GNU General Public License version 2. >- */ >- >-#include <linux/module.h> >-#include <linux/init.h> >-#include <linux/string.h> >-#include <linux/slab.h> >-#include <linux/wait.h> >-#include <linux/sched.h> >-#include <linux/kmod.h> >-#include <linux/fs.h> >-#include <linux/delay.h> >-#include <linux/lm_interface.h> >- >-struct lmh_wrapper { >- struct list_head lw_list; >- const struct lm_lockops *lw_ops; >-}; >- >-/* List of registered low-level locking protocols. A file system selects one >- of them by name at mount time, e.g. lock_nolock, lock_dlm. */ >- >-static LIST_HEAD(lmh_list); >-static DEFINE_MUTEX(lmh_lock); >- >-/** >- * gfs2_register_lockproto - Register a low-level locking protocol >- * @proto: the protocol definition >- * >- * Returns: 0 on success, -EXXX on failure >- */ >- >-int gfs2_register_lockproto(const struct lm_lockops *proto) >-{ >- struct lmh_wrapper *lw; >- >- mutex_lock(&lmh_lock); >- >- list_for_each_entry(lw, &lmh_list, lw_list) { >- if (!strcmp(lw->lw_ops->lm_proto_name, proto->lm_proto_name)) { >- mutex_unlock(&lmh_lock); >- printk(KERN_INFO "GFS2: protocol %s already exists\n", >- proto->lm_proto_name); >- return -EEXIST; >- } >- } >- >- lw = kzalloc(sizeof(struct lmh_wrapper), GFP_KERNEL); >- if (!lw) { >- mutex_unlock(&lmh_lock); >- return -ENOMEM; >- } >- >- lw->lw_ops = proto; >- list_add(&lw->lw_list, &lmh_list); >- >- mutex_unlock(&lmh_lock); >- >- return 0; >-} >- >-/** >- * gfs2_unregister_lockproto - Unregister a low-level locking protocol >- * @proto: the protocol definition >- * >- */ >- >-void gfs2_unregister_lockproto(const struct lm_lockops *proto) >-{ >- struct lmh_wrapper *lw; >- >- mutex_lock(&lmh_lock); >- >- list_for_each_entry(lw, &lmh_list, lw_list) { >- if (!strcmp(lw->lw_ops->lm_proto_name, proto->lm_proto_name)) { >- list_del(&lw->lw_list); >- mutex_unlock(&lmh_lock); >- kfree(lw); >- return; >- } >- } >- >- mutex_unlock(&lmh_lock); >- >- printk(KERN_WARNING "GFS2: can't unregister lock protocol %s\n", >- proto->lm_proto_name); >-} >- >-/** >- * gfs2_mount_lockproto - Mount a lock protocol >- * @proto_name - the name of the protocol >- * @table_name - the name of the lock space >- * @host_data - data specific to this host >- * @cb - the callback to the code using the lock module >- * @sdp - The GFS2 superblock >- * @min_lvb_size - the mininum LVB size that the caller can deal with >- * @flags - LM_MFLAG_* >- * @lockstruct - a structure returned describing the mount >- * >- * Returns: 0 on success, -EXXX on failure >- */ >- >-int gfs2_mount_lockproto(char *proto_name, char *table_name, char *host_data, >- lm_callback_t cb, void *cb_data, >- unsigned int min_lvb_size, int flags, >- struct lm_lockstruct *lockstruct, >- struct kobject *fskobj) >-{ >- struct lmh_wrapper *lw = NULL; >- int try = 0; >- int error, found; >- >-retry: >- mutex_lock(&lmh_lock); >- >- found = 0; >- list_for_each_entry(lw, &lmh_list, lw_list) { >- if (!strcmp(lw->lw_ops->lm_proto_name, proto_name)) { >- found = 1; >- break; >- } >- } >- >- if (!found) { >- if (!try && capable(CAP_SYS_MODULE)) { >- try = 1; >- mutex_unlock(&lmh_lock); >- request_module(proto_name); >- goto retry; >- } >- printk(KERN_INFO "GFS2: can't find protocol %s\n", proto_name); >- error = -ENOENT; >- goto out; >- } >- >- if (!try_module_get(lw->lw_ops->lm_owner)) { >- try = 0; >- mutex_unlock(&lmh_lock); >- msleep(1000); >- goto retry; >- } >- >- error = lw->lw_ops->lm_mount(table_name, host_data, cb, cb_data, >- min_lvb_size, flags, lockstruct, fskobj); >- if (error) >- module_put(lw->lw_ops->lm_owner); >-out: >- mutex_unlock(&lmh_lock); >- return error; >-} >- >-void gfs2_unmount_lockproto(struct lm_lockstruct *lockstruct) >-{ >- mutex_lock(&lmh_lock); >- lockstruct->ls_ops->lm_unmount(lockstruct->ls_lockspace); >- if (lockstruct->ls_ops->lm_owner) >- module_put(lockstruct->ls_ops->lm_owner); >- mutex_unlock(&lmh_lock); >-} >- >-/** >- * gfs2_withdraw_lockproto - abnormally unmount a lock module >- * @lockstruct: the lockstruct passed into mount >- * >- */ >- >-void gfs2_withdraw_lockproto(struct lm_lockstruct *lockstruct) >-{ >- mutex_lock(&lmh_lock); >- lockstruct->ls_ops->lm_withdraw(lockstruct->ls_lockspace); >- if (lockstruct->ls_ops->lm_owner) >- module_put(lockstruct->ls_ops->lm_owner); >- mutex_unlock(&lmh_lock); >-} >- >-EXPORT_SYMBOL_GPL(gfs2_register_lockproto); >-EXPORT_SYMBOL_GPL(gfs2_unregister_lockproto); >-EXPORT_SYMBOL_GPL(gfs2_withdraw_lockproto); >-EXPORT_SYMBOL_GPL(gfs2_mount_lockproto); >-EXPORT_SYMBOL_GPL(gfs2_unmount_lockproto); >diff -purN a/fs/gfs2/log.c b/fs/gfs2/log.c >--- a/fs/gfs2/log.c 2008-04-30 16:26:44.000000000 -0500 >+++ b/fs/gfs2/log.c 2008-06-16 09:14:53.000000000 -0500 >@@ -14,7 +14,6 @@ > #include <linux/buffer_head.h> > #include <linux/gfs2_ondisk.h> > #include <linux/crc32.h> >-#include <linux/lm_interface.h> > #include <linux/delay.h> > > #include "gfs2.h" >diff -purN a/fs/gfs2/lops.c b/fs/gfs2/lops.c >--- a/fs/gfs2/lops.c 2008-04-30 16:26:44.000000000 -0500 >+++ b/fs/gfs2/lops.c 2008-06-16 09:14:53.000000000 -0500 >@@ -13,7 +13,6 @@ > #include <linux/completion.h> > #include <linux/buffer_head.h> > #include <linux/gfs2_ondisk.h> >-#include <linux/lm_interface.h> > > #include "gfs2.h" > #include "incore.h" >diff -purN a/fs/gfs2/main.c b/fs/gfs2/main.c >--- a/fs/gfs2/main.c 2008-06-13 14:04:43.000000000 -0500 >+++ b/fs/gfs2/main.c 2008-06-16 09:50:32.000000000 -0500 >@@ -14,7 +14,6 @@ > #include <linux/module.h> > #include <linux/init.h> > #include <linux/gfs2_ondisk.h> >-#include <linux/lm_interface.h> > #include <asm/atomic.h> > > #include "gfs2.h" >@@ -44,8 +43,6 @@ static void gfs2_init_glock_once(void *f > INIT_HLIST_NODE(&gl->gl_list); > spin_lock_init(&gl->gl_spin); > INIT_LIST_HEAD(&gl->gl_holders); >- gl->gl_lvb = NULL; >- atomic_set(&gl->gl_lvb_count, 0); > INIT_LIST_HEAD(&gl->gl_reclaim); > INIT_LIST_HEAD(&gl->gl_ail_list); > atomic_set(&gl->gl_ail_count, 0); >diff -purN a/fs/gfs2/Makefile b/fs/gfs2/Makefile >--- a/fs/gfs2/Makefile 2008-06-13 14:04:43.000000000 -0500 >+++ b/fs/gfs2/Makefile 2008-06-16 12:45:47.000000000 -0500 >@@ -1,10 +1,8 @@ > obj-$(CONFIG_GFS2_FS) += gfs2.o > gfs2-y := acl.o bmap.o daemon.o dir.o eaops.o eattr.o glock.o \ >- glops.o inode.o lm.o log.o lops.o locking.o main.o meta_io.o \ >+ glops.o inode.o log.o lops.o main.o meta_io.o \ > mount.o ops_address.o ops_dentry.o ops_export.o ops_file.o \ > ops_fstype.o ops_inode.o ops_super.o quota.o \ > recovery.o rgrp.o super.o sys.o trans.o util.o > >-obj-$(CONFIG_GFS2_FS_LOCKING_NOLOCK) += locking/nolock/ >-obj-$(CONFIG_GFS2_FS_LOCKING_DLM) += locking/dlm/ >- >+gfs2-$(CONFIG_GFS2_FS_LOCKING_DLM) += lock_dlm.o >diff -purN a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c >--- a/fs/gfs2/meta_io.c 2008-06-13 14:04:43.000000000 -0500 >+++ b/fs/gfs2/meta_io.c 2008-06-16 09:14:53.000000000 -0500 >@@ -19,7 +19,6 @@ > #include <linux/delay.h> > #include <linux/bio.h> > #include <linux/gfs2_ondisk.h> >-#include <linux/lm_interface.h> > > #include "gfs2.h" > #include "incore.h" >diff -purN a/fs/gfs2/mount.c b/fs/gfs2/mount.c >--- a/fs/gfs2/mount.c 2008-04-30 16:26:39.000000000 -0500 >+++ b/fs/gfs2/mount.c 2008-06-16 09:14:53.000000000 -0500 >@@ -12,7 +12,6 @@ > #include <linux/completion.h> > #include <linux/buffer_head.h> > #include <linux/gfs2_ondisk.h> >-#include <linux/lm_interface.h> > #include <linux/parser.h> > > #include "gfs2.h" >diff -purN a/fs/gfs2/ops_address.c b/fs/gfs2/ops_address.c >--- a/fs/gfs2/ops_address.c 2008-06-13 14:04:43.000000000 -0500 >+++ b/fs/gfs2/ops_address.c 2008-06-16 09:51:40.000000000 -0500 >@@ -18,7 +18,6 @@ > #include <linux/fs.h> > #include <linux/writeback.h> > #include <linux/gfs2_ondisk.h> >-#include <linux/lm_interface.h> > #include <linux/swap.h> > > #include "gfs2.h" >diff -purN a/fs/gfs2/ops_dentry.c b/fs/gfs2/ops_dentry.c >--- a/fs/gfs2/ops_dentry.c 2008-06-13 14:04:43.000000000 -0500 >+++ b/fs/gfs2/ops_dentry.c 2008-06-16 09:14:53.000000000 -0500 >@@ -14,7 +14,6 @@ > #include <linux/smp_lock.h> > #include <linux/gfs2_ondisk.h> > #include <linux/crc32.h> >-#include <linux/lm_interface.h> > > #include "gfs2.h" > #include "incore.h" >diff -purN a/fs/gfs2/ops_export.c b/fs/gfs2/ops_export.c >--- a/fs/gfs2/ops_export.c 2008-04-30 16:26:39.000000000 -0500 >+++ b/fs/gfs2/ops_export.c 2008-06-16 09:14:53.000000000 -0500 >@@ -13,7 +13,6 @@ > #include <linux/buffer_head.h> > #include <linux/gfs2_ondisk.h> > #include <linux/crc32.h> >-#include <linux/lm_interface.h> > > #include "gfs2.h" > #include "incore.h" >diff -purN a/fs/gfs2/ops_file.c b/fs/gfs2/ops_file.c >--- a/fs/gfs2/ops_file.c 2008-06-13 14:04:43.000000000 -0500 >+++ b/fs/gfs2/ops_file.c 2008-06-16 16:02:23.000000000 -0500 >@@ -21,9 +21,10 @@ > #include <linux/gfs2_ondisk.h> > #include <linux/ext2_fs.h> > #include <linux/crc32.h> >-#include <linux/lm_interface.h> > #include <linux/writeback.h> > #include <asm/uaccess.h> >+#include <linux/dlm.h> >+#include <linux/dlm_plock.h> > > #include "gfs2.h" > #include "incore.h" >@@ -32,7 +33,6 @@ > #include "glock.h" > #include "glops.h" > #include "inode.h" >-#include "lm.h" > #include "log.h" > #include "meta_io.h" > #include "ops_vm.h" >@@ -368,7 +368,6 @@ static int gfs2_page_mkwrite(struct vm_a > ret = gfs2_write_alloc_required(ip, pos, PAGE_CACHE_SIZE, &alloc_required); > if (ret || !alloc_required) > goto out_unlock; >- > ret = -ENOMEM; > al = gfs2_alloc_get(ip); > if (al == NULL) >@@ -397,9 +396,9 @@ static int gfs2_page_mkwrite(struct vm_a > lock_page(page); > ret = -EINVAL; > last_index = ip->i_inode.i_size >> PAGE_CACHE_SHIFT; >- > if (page->index > last_index) > goto out_unlock_page; >+ ret = 0; > if (!PageUptodate(page) || page->mapping != ip->i_inode.i_mapping) > goto out_unlock_page; > if (gfs2_is_stuffed(ip)) { >@@ -578,6 +577,8 @@ static int gfs2_fsync(struct file *file, > return ret; > } > >+#ifdef CONFIG_GFS2_FS_LOCKING_DLM >+ > /** > * gfs2_lock - acquire/release a posix lock on a file > * @file: the file pointer >@@ -591,9 +592,7 @@ static int gfs2_lock(struct file *file, > { > struct gfs2_inode *ip = GFS2_I(file->f_mapping->host); > struct gfs2_sbd *sdp = GFS2_SB(file->f_mapping->host); >- struct lm_lockname name = >- { .ln_number = ip->i_no_addr, >- .ln_type = LM_TYPE_PLOCK }; >+ struct lm_lockstruct *ls = &sdp->sd_lockstruct; > > if (!(fl->fl_flags & FL_POSIX)) > return -ENOLCK; >@@ -614,12 +613,14 @@ static int gfs2_lock(struct file *file, > cmd = F_SETLK; > fl->fl_type = F_UNLCK; > } >+ if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) >+ return -EIO; > if (IS_GETLK(cmd)) >- return gfs2_lm_plock_get(sdp, &name, file, fl); >+ return dlm_posix_get(ls->ls_dlm, ip->i_no_addr, file, fl); > else if (fl->fl_type == F_UNLCK) >- return gfs2_lm_punlock(sdp, &name, file, fl); >+ return dlm_posix_unlock(ls, ip->i_no_addr, file, fl); > else >- return gfs2_lm_plock(sdp, &name, file, cmd, fl); >+ return dlm_posix_lock(ls, ip->i_no_addr, file, cmd, fl); > } > > static int do_flock(struct file *file, int cmd, struct file_lock *fl) >@@ -705,7 +706,7 @@ static int gfs2_flock(struct file *file, > return do_flock(file, cmd, fl); > } > >-const struct file_operations gfs2_file_fops = { >+const struct file_operations *gfs2_file_fops = &(const struct file_operations){ > .llseek = gfs2_llseek, > .read = generic_file_read, > .readv = generic_file_readv, >@@ -725,7 +726,7 @@ const struct file_operations gfs2_file_f > .splice_write = generic_file_splice_write, > }; > >-const struct file_operations gfs2_dir_fops = { >+const struct file_operations *gfs2_dir_fops = &(const struct file_operations){ > .readdir = gfs2_readdir, > .unlocked_ioctl = gfs2_ioctl, > .open = gfs2_open, >@@ -735,7 +736,9 @@ const struct file_operations gfs2_dir_fo > .flock = gfs2_flock, > }; > >-const struct file_operations gfs2_file_fops_nolock = { >+#endif /* CONFIG_GFS2_FS_LOCKING_DLM */ >+ >+const struct file_operations *gfs2_file_fops_nolock = &(const struct file_operations){ > .llseek = gfs2_llseek, > .read = generic_file_read, > .readv = generic_file_readv, >@@ -753,7 +756,7 @@ const struct file_operations gfs2_file_f > .splice_write = generic_file_splice_write, > }; > >-const struct file_operations gfs2_dir_fops_nolock = { >+const struct file_operations *gfs2_dir_fops_nolock = &(const struct file_operations){ > .readdir = gfs2_readdir, > .unlocked_ioctl = gfs2_ioctl, > .open = gfs2_open, >diff -purN a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c >--- a/fs/gfs2/ops_fstype.c 2008-04-30 16:26:44.000000000 -0500 >+++ b/fs/gfs2/ops_fstype.c 2008-06-16 17:05:52.000000000 -0500 >@@ -17,7 +17,6 @@ > #include <linux/namei.h> > #include <linux/mount.h> > #include <linux/gfs2_ondisk.h> >-#include <linux/lm_interface.h> > > #include "gfs2.h" > #include "incore.h" >@@ -26,9 +25,9 @@ > #include "glock.h" > #include "glops.h" > #include "inode.h" >-#include "lm.h" > #include "mount.h" > #include "ops_fstype.h" >+#include "ops_dentry.h" > #include "ops_super.h" > #include "recovery.h" > #include "rgrp.h" >@@ -363,6 +362,13 @@ int map_journal_extents(struct gfs2_sbd > return rc; > } > >+static void gfs2_others_may_mount(struct gfs2_sbd *sdp) >+{ >+ struct lm_lockstruct *ls = &sdp->sd_lockstruct; >+ ls->ls_first_done = 1; >+ kobject_uevent(&ls->ls_kobj, KOBJ_CHANGE); >+} >+ > static int init_journal(struct gfs2_sbd *sdp, int undo) > { > struct gfs2_holder ji_gh; >@@ -453,7 +459,7 @@ static int init_journal(struct gfs2_sbd > } > } > >- gfs2_lm_others_may_mount(sdp); >+ gfs2_others_may_mount(sdp); > } else if (!sdp->sd_args.ar_spectator) { > error = gfs2_recover_journal(sdp->sd_jdesc); > if (error) { >@@ -703,6 +709,124 @@ fail: > return error; > } > >+static match_table_t nolock_tokens = { >+ { Opt_jid, "jid=%d\n", }, >+ { Opt_err, NULL }, >+}; >+ >+static struct lm_lockops nolock_ops = { >+ .lm_proto_name = "lock_nolock", >+ .lm_put_lock = kmem_cache_free, >+ .lm_tokens = &nolock_tokens, >+}; >+ >+/** >+ * gfs2_lm_mount - mount a locking protocol >+ * @sdp: the filesystem >+ * @args: mount arguements >+ * @silent: if 1, don't complain if the FS isn't a GFS2 fs >+ * >+ * Returns: errno >+ */ >+ >+static int gfs2_lm_mount(struct gfs2_sbd *sdp, int silent) >+{ >+ struct lm_lockops *lm; >+ struct lm_lockstruct *ls = &sdp->sd_lockstruct; >+ struct gfs2_args *args = &sdp->sd_args; >+ const char *proto = sdp->sd_proto_name; >+ const char *table = sdp->sd_table_name; >+ const char *fsname; >+ char *o, *options; >+ int ret; >+ >+ if (!strcmp("lock_nolock", proto)) { >+ lm = &nolock_ops; >+ sdp->sd_args.ar_localflocks = 1; >+ sdp->sd_args.ar_localcaching = 1; >+#ifdef CONFIG_FS_GFS2_LOCK_DLM >+ } else if (!strcmp("lock_dlm", proto)) { >+ lm = &gfs2_dlm_ops; >+#endif >+ } else { >+ printk(KERN_INFO "GFS2: can't find protocol %s\n", proto); >+ return -ENOENT; >+ } >+ >+ fs_info(sdp, "Trying to join cluster \"%s\", \"%s\"\n", proto, table); >+ >+ ls->ls_ops = lm; >+ ls->ls_first = 1; >+ ls->ls_id = 0; >+ >+ for (options = args->ar_hostdata; (o = strsep(&options, ":")); ) { >+ substring_t tmp[MAX_OPT_ARGS]; >+ int token, option; >+ >+ if (!o || !*o) >+ continue; >+ >+ token = match_token(o, *lm->lm_tokens, tmp); >+ switch (token) { >+ case Opt_jid: >+ ret = match_int(&tmp[0], &option); >+ if (ret || option < 0) >+ goto hostdata_error; >+ ls->ls_jid = option; >+ break; >+ case Opt_id: >+ ret = match_int(&tmp[0], &option); >+ if (ret || option < 0) >+ goto hostdata_error; >+ ls->ls_id = option; >+ break; >+ case Opt_first: >+ ret = match_int(&tmp[0], &option); >+ if (ret || (option != 0 && option != 1)) >+ goto hostdata_error; >+ ls->ls_first = option; >+ break; >+ case Opt_nodir: >+ ret = match_int(&tmp[0], &option); >+ if (ret || (option != 0 && option != 1)) >+ goto hostdata_error; >+ ls->ls_nodir = option; >+ break; >+ case Opt_err: >+ default: >+hostdata_error: >+ fs_info(sdp, "unknown hostdata (%s)\n", o); >+ return -EINVAL; >+ } >+ } >+ >+ if (sdp->sd_args.ar_spectator) >+ snprintf(sdp->sd_fsname, GFS2_FSNAME_LEN, "%s.s", table); >+ else >+ snprintf(sdp->sd_fsname, GFS2_FSNAME_LEN, "%s.%u", table, >+ sdp->sd_lockstruct.ls_jid); >+ >+ fsname = strchr(table, ':'); >+ if (fsname) >+ fsname++; >+ if (lm->lm_mount == NULL) { >+ fs_info(sdp, "Now mounting FS...\n"); >+ return 0; >+ } >+ ret = lm->lm_mount(sdp, fsname); >+ if (ret == 0) >+ fs_info(sdp, "Joined cluster. Now mounting FS...\n"); >+ return ret; >+} >+ >+void gfs2_lm_unmount(struct gfs2_sbd *sdp) >+{ >+ const struct lm_lockops *lm = sdp->sd_lockstruct.ls_ops; >+ if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) && >+ lm->lm_unmount) >+ lm->lm_unmount(sdp); >+} >+ > /** > * fill_super - Read in superblock > * @sb: The VFS superblock >@@ -803,7 +927,7 @@ fail_sb: > fail_locking: > init_locking(sdp, &mount_gh, UNDO); > fail_lm: >- gfs2_gl_hash_clear(sdp, WAIT); >+ gfs2_gl_hash_clear(sdp); > gfs2_lm_unmount(sdp); > while (invalidate_inodes(sb)) > yield(); >diff -purN a/fs/gfs2/ops_inode.c b/fs/gfs2/ops_inode.c >--- a/fs/gfs2/ops_inode.c 2008-04-30 16:26:41.000000000 -0500 >+++ b/fs/gfs2/ops_inode.c 2008-06-16 11:10:42.000000000 -0500 >@@ -18,7 +18,6 @@ > #include <linux/posix_acl.h> > #include <linux/gfs2_ondisk.h> > #include <linux/crc32.h> >-#include <linux/lm_interface.h> > #include <asm/uaccess.h> > > #include "gfs2.h" >@@ -69,7 +68,7 @@ static int gfs2_create(struct inode *dir > mark_inode_dirty(inode); > break; > } else if (PTR_ERR(inode) != -EEXIST || >- (nd->intent.open.flags & O_EXCL)) { >+ (nd && (nd->intent.open.flags & O_EXCL))) { > gfs2_holder_uninit(ghs); > return PTR_ERR(inode); > } >diff -purN a/fs/gfs2/ops_inode.h b/fs/gfs2/ops_inode.h >--- a/fs/gfs2/ops_inode.h 2008-06-13 14:04:43.000000000 -0500 >+++ b/fs/gfs2/ops_inode.h 2008-06-16 09:09:47.000000000 -0500 >@@ -11,16 +11,31 @@ > #define __OPS_INODE_DOT_H__ > > #include <linux/fs.h> >+#include "incore.h" > > extern const struct inode_operations gfs2_file_iops; > extern const struct inode_operations gfs2_dir_iops; > extern const struct inode_operations gfs2_symlink_iops; > extern const struct inode_operations gfs2_dev_iops; >-extern const struct file_operations gfs2_file_fops; >-extern const struct file_operations gfs2_dir_fops; >-extern const struct file_operations gfs2_file_fops_nolock; >-extern const struct file_operations gfs2_dir_fops_nolock; >+extern const struct file_operations *gfs2_file_fops_nolock; >+extern const struct file_operations *gfs2_dir_fops_nolock; > > extern void gfs2_set_inode_flags(struct inode *inode); > >+#ifdef CONFIG_GFS2_FS_LOCKING_DLM >+extern const struct file_operations *gfs2_file_fops; >+extern const struct file_operations *gfs2_dir_fops; >+static inline int gfs2_localflocks(const struct gfs2_sbd *sdp) >+{ >+ return sdp->sd_args.ar_localflocks; >+} >+#else /* Single node only */ >+#define gfs2_file_fops NULL >+#define gfs2_dir_fops NULL >+static inline int gfs2_localflocks(const struct gfs2_sbd *sdp) >+{ >+ return 1; >+} >+#endif /* CONFIG_GFS2_FS_LOCKING_DLM */ >+ > #endif /* __OPS_INODE_DOT_H__ */ >diff -purN a/fs/gfs2/ops_super.c b/fs/gfs2/ops_super.c >--- a/fs/gfs2/ops_super.c 2008-06-13 14:04:43.000000000 -0500 >+++ b/fs/gfs2/ops_super.c 2008-06-16 11:10:58.000000000 -0500 >@@ -19,13 +19,11 @@ > #include <linux/delay.h> > #include <linux/gfs2_ondisk.h> > #include <linux/crc32.h> >-#include <linux/lm_interface.h> > > #include "gfs2.h" > #include "incore.h" > #include "glock.h" > #include "inode.h" >-#include "lm.h" > #include "log.h" > #include "mount.h" > #include "ops_super.h" >@@ -127,7 +125,7 @@ static void gfs2_put_super(struct super_ > gfs2_clear_rgrpd(sdp); > gfs2_jindex_free(sdp); > /* Take apart glock structures and buffer lists */ >- gfs2_gl_hash_clear(sdp, WAIT); >+ gfs2_gl_hash_clear(sdp); > /* Unmount the locking protocol */ > gfs2_lm_unmount(sdp); > >diff -purN a/fs/gfs2/quota.c b/fs/gfs2/quota.c >--- a/fs/gfs2/quota.c 2008-06-13 14:04:43.000000000 -0500 >+++ b/fs/gfs2/quota.c 2008-06-16 09:14:53.000000000 -0500 >@@ -45,7 +45,6 @@ > #include <linux/fs.h> > #include <linux/bio.h> > #include <linux/gfs2_ondisk.h> >-#include <linux/lm_interface.h> > > #include "gfs2.h" > #include "incore.h" >@@ -109,11 +108,6 @@ static int qd_alloc(struct gfs2_sbd *sdp > if (error) > goto fail; > >- error = gfs2_lvb_hold(qd->qd_gl); >- gfs2_glock_put(qd->qd_gl); >- if (error) >- goto fail; >- > *qdp = qd; > > return 0; >@@ -157,7 +151,7 @@ static int qd_get(struct gfs2_sbd *sdp, > > if (qd || !create) { > if (new_qd) { >- gfs2_lvb_unhold(new_qd->qd_gl); >+ gfs2_glock_put(new_qd->qd_gl); > kfree(new_qd); > } > *qdp = qd; >@@ -1222,7 +1216,7 @@ void gfs2_quota_scan(struct gfs2_sbd *sd > gfs2_assert_warn(sdp, !qd->qd_slot_count); > gfs2_assert_warn(sdp, !qd->qd_bh_count); > >- gfs2_lvb_unhold(qd->qd_gl); >+ gfs2_glock_put(qd->qd_gl); > kfree(qd); > } > } >@@ -1257,7 +1251,7 @@ void gfs2_quota_cleanup(struct gfs2_sbd > gfs2_assert_warn(sdp, qd->qd_slot_count == 1); > gfs2_assert_warn(sdp, !qd->qd_bh_count); > >- gfs2_lvb_unhold(qd->qd_gl); >+ gfs2_glock_put(qd->qd_gl); > kfree(qd); > > spin_lock(&sdp->sd_quota_spin); >diff -purN a/fs/gfs2/recovery.c b/fs/gfs2/recovery.c >--- a/fs/gfs2/recovery.c 2008-06-13 14:04:43.000000000 -0500 >+++ b/fs/gfs2/recovery.c 2008-06-16 11:13:26.000000000 -0500 >@@ -13,14 +13,12 @@ > #include <linux/buffer_head.h> > #include <linux/gfs2_ondisk.h> > #include <linux/crc32.h> >-#include <linux/lm_interface.h> > > #include "gfs2.h" > #include "incore.h" > #include "bmap.h" > #include "glock.h" > #include "glops.h" >-#include "lm.h" > #include "lops.h" > #include "meta_io.h" > #include "recovery.h" >@@ -425,8 +423,18 @@ static int clean_journal(struct gfs2_jde > return error; > } > >+void gfs2_recovery_done(struct gfs2_sbd *sdp, unsigned int jid, >+ unsigned int message) >+{ >+ struct lm_lockstruct *ls = &sdp->sd_lockstruct; >+ >+ ls->ls_recover_jid_done = jid; >+ ls->ls_recover_jid_status = message; >+ kobject_uevent(&ls->ls_kobj, KOBJ_CHANGE); >+} >+ > /** >- * gfs2_recover_journal - recovery a given journal >+ * gfs2_recover_journal - recover a given journal > * @jd: the struct gfs2_jdesc describing the journal > * > * Acquire the journal's lock, check to see if the journal is clean, and >@@ -551,7 +559,7 @@ int gfs2_recover_journal(struct gfs2_jde > if (jd->jd_jid != sdp->sd_lockstruct.ls_jid) > gfs2_glock_dq_uninit(&ji_gh); > >- gfs2_lm_recovery_done(sdp, jd->jd_jid, LM_RD_SUCCESS); >+ gfs2_recovery_done(sdp, jd->jd_jid, LM_RD_SUCCESS); > > if (jd->jd_jid != sdp->sd_lockstruct.ls_jid) > gfs2_glock_dq_uninit(&j_gh); >@@ -571,7 +579,7 @@ fail_gunlock_j: > fs_info(sdp, "jid=%u: %s\n", jd->jd_jid, (error) ? "Failed" : "Done"); > > fail: >- gfs2_lm_recovery_done(sdp, jd->jd_jid, LM_RD_GAVEUP); >+ gfs2_recovery_done(sdp, jd->jd_jid, LM_RD_GAVEUP); > return error; > } > >diff -purN a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c >--- a/fs/gfs2/rgrp.c 2008-06-13 14:04:43.000000000 -0500 >+++ b/fs/gfs2/rgrp.c 2008-06-16 09:14:53.000000000 -0500 >@@ -13,7 +13,6 @@ > #include <linux/buffer_head.h> > #include <linux/fs.h> > #include <linux/gfs2_ondisk.h> >-#include <linux/lm_interface.h> > #include <linux/prefetch.h> > > #include "gfs2.h" >diff -purN a/fs/gfs2/super.c b/fs/gfs2/super.c >--- a/fs/gfs2/super.c 2008-06-13 14:04:43.000000000 -0500 >+++ b/fs/gfs2/super.c 2008-06-16 09:14:53.000000000 -0500 >@@ -15,7 +15,6 @@ > #include <linux/crc32.h> > #include <linux/gfs2_ondisk.h> > #include <linux/bio.h> >-#include <linux/lm_interface.h> > > #include "gfs2.h" > #include "incore.h" >diff -purN a/fs/gfs2/super.h b/fs/gfs2/super.h >--- a/fs/gfs2/super.h 2008-04-30 16:26:35.000000000 -0500 >+++ b/fs/gfs2/super.h 2008-06-16 11:16:21.000000000 -0500 >@@ -17,6 +17,7 @@ void gfs2_tune_init(struct gfs2_tune *gt > int gfs2_check_sb(struct gfs2_sbd *sdp, struct gfs2_sb_host *sb, int silent); > int gfs2_read_sb(struct gfs2_sbd *sdp, struct gfs2_glock *gl, int silent); > int gfs2_read_super(struct gfs2_sbd *sdp, sector_t sector); >+void gfs2_lm_unmount(struct gfs2_sbd *sdp); > > static inline unsigned int gfs2_jindex_size(struct gfs2_sbd *sdp) > { >diff -purN a/fs/gfs2/sys.c b/fs/gfs2/sys.c >--- a/fs/gfs2/sys.c 2008-04-30 16:26:44.000000000 -0500 >+++ b/fs/gfs2/sys.c 2008-06-16 16:42:33.000000000 -0500 >@@ -14,13 +14,11 @@ > #include <linux/buffer_head.h> > #include <linux/module.h> > #include <linux/kobject.h> >-#include <linux/gfs2_ondisk.h> >-#include <linux/lm_interface.h> > #include <asm/uaccess.h> >+#include <linux/gfs2_ondisk.h> > > #include "gfs2.h" > #include "incore.h" >-#include "lm.h" > #include "sys.h" > #include "super.h" > #include "glock.h" >@@ -111,18 +109,6 @@ static ssize_t statfs_sync_store(struct > return len; > } > >-static ssize_t shrink_store(struct gfs2_sbd *sdp, const char *buf, size_t len) >-{ >- if (!capable(CAP_SYS_ADMIN)) >- return -EACCES; >- >- if (simple_strtol(buf, NULL, 0) != 1) >- return -EINVAL; >- >- gfs2_gl_hash_clear(sdp, NO_WAIT); >- return len; >-} >- > static ssize_t quota_sync_store(struct gfs2_sbd *sdp, const char *buf, > size_t len) > { >@@ -176,7 +162,6 @@ static struct gfs2_attr gfs2_attr_##name > GFS2_ATTR(id, 0444, id_show, NULL); > GFS2_ATTR(fsname, 0444, fsname_show, NULL); > GFS2_ATTR(freeze, 0644, freeze_show, freeze_store); >-GFS2_ATTR(shrink, 0200, NULL, shrink_store); > GFS2_ATTR(withdraw, 0644, withdraw_show, withdraw_store); > GFS2_ATTR(statfs_sync, 0200, NULL, statfs_sync_store); > GFS2_ATTR(quota_sync, 0200, NULL, quota_sync_store); >@@ -187,7 +172,6 @@ static struct attribute *gfs2_attrs[] = > &gfs2_attr_id.attr, > &gfs2_attr_fsname.attr, > &gfs2_attr_freeze.attr, >- &gfs2_attr_shrink.attr, > &gfs2_attr_withdraw.attr, > &gfs2_attr_statfs_sync.attr, > &gfs2_attr_quota_sync.attr, >@@ -246,14 +230,159 @@ static struct lockstruct_attr lockstruct > > LOCKSTRUCT_ATTR(jid, "%u\n"); > LOCKSTRUCT_ATTR(first, "%u\n"); >-LOCKSTRUCT_ATTR(lvb_size, "%u\n"); >-LOCKSTRUCT_ATTR(flags, "%d\n"); > > static struct attribute *lockstruct_attrs[] = { > &lockstruct_attr_jid.attr, > &lockstruct_attr_first.attr, >- &lockstruct_attr_lvb_size.attr, >- &lockstruct_attr_flags.attr, >+ NULL, >+}; >+ >+/* >+ * lock_module. Originally from lock_dlm >+ */ >+ >+static ssize_t proto_name_show(struct gfs2_sbd *sdp, char *buf) >+{ >+ const struct lm_lockops *ops = sdp->sd_lockstruct.ls_ops; >+ return sprintf(buf, "%s\n", ops->lm_proto_name); >+} >+ >+static ssize_t block_show(struct gfs2_sbd *sdp, char *buf) >+{ >+ struct lm_lockstruct *ls = &sdp->sd_lockstruct; >+ ssize_t ret; >+ int val = 0; >+ >+ if (test_bit(DFL_BLOCK_LOCKS, &ls->ls_flags)) >+ val = 1; >+ ret = sprintf(buf, "%d\n", val); >+ return ret; >+} >+ >+static ssize_t block_store(struct gfs2_sbd *sdp, const char *buf, size_t len) >+{ >+ struct lm_lockstruct *ls = &sdp->sd_lockstruct; >+ ssize_t ret = len; >+ int val; >+ >+ val = simple_strtol(buf, NULL, 0); >+ >+ if (val == 1) >+ set_bit(DFL_BLOCK_LOCKS, &ls->ls_flags); >+ else if (val == 0) { >+ clear_bit(DFL_BLOCK_LOCKS, &ls->ls_flags); >+ smp_mb__after_clear_bit(); >+ gfs2_glock_thaw(sdp); >+ } else { >+ ret = -EINVAL; >+ } >+ return ret; >+} >+ >+static ssize_t lkwithdraw_show(struct gfs2_sbd *sdp, char *buf) >+{ >+ struct lm_lockstruct *ls = &sdp->sd_lockstruct; >+ ssize_t ret; >+ int val = 0; >+ >+ if (test_bit(DFL_WITHDRAW, &ls->ls_flags)) >+ val = 1; >+ ret = sprintf(buf, "%d\n", val); >+ return ret; >+} >+ >+static ssize_t lkwithdraw_store(struct gfs2_sbd *sdp, const char *buf, size_t len) >+{ >+ struct lm_lockstruct *ls = &sdp->sd_lockstruct; >+ ssize_t ret = len; >+ int val; >+ >+ val = simple_strtol(buf, NULL, 0); >+ >+ if (val == 1) >+ set_bit(DFL_WITHDRAW, &ls->ls_flags); >+ else >+ ret = -EINVAL; >+ /* wake_up(&ls->wait_control); */ >+ return ret; >+} >+ >+static ssize_t lkid_show(struct gfs2_sbd *sdp, char *buf) >+{ >+ struct lm_lockstruct *ls = &sdp->sd_lockstruct; >+ return sprintf(buf, "%u\n", ls->ls_id); >+} >+ >+static ssize_t lkfirst_show(struct gfs2_sbd *sdp, char *buf) >+{ >+ struct lm_lockstruct *ls = &sdp->sd_lockstruct; >+ return sprintf(buf, "%d\n", ls->ls_first); >+} >+ >+static ssize_t first_done_show(struct gfs2_sbd *sdp, char *buf) >+{ >+ struct lm_lockstruct *ls = &sdp->sd_lockstruct; >+ return sprintf(buf, "%d\n", ls->ls_first_done); >+} >+ >+static ssize_t recover_show(struct gfs2_sbd *sdp, char *buf) >+{ >+ struct lm_lockstruct *ls = &sdp->sd_lockstruct; >+ return sprintf(buf, "%d\n", ls->ls_recover_jid); >+} >+ >+static ssize_t recover_store(struct gfs2_sbd *sdp, const char *buf, size_t len) >+{ >+ struct lm_lockstruct *ls = &sdp->sd_lockstruct; >+ ls->ls_recover_jid = simple_strtol(buf, NULL, 0); >+ gfs2_jdesc_make_dirty(sdp, ls->ls_recover_jid); >+ if (sdp->sd_recoverd_process) >+ wake_up_process(sdp->sd_recoverd_process); >+ return len; >+} >+ >+static ssize_t recover_done_show(struct gfs2_sbd *sdp, char *buf) >+{ >+ struct lm_lockstruct *ls = &sdp->sd_lockstruct; >+ return sprintf(buf, "%d\n", ls->ls_recover_jid_done); >+} >+ >+static ssize_t recover_status_show(struct gfs2_sbd *sdp, char *buf) >+{ >+ struct lm_lockstruct *ls = &sdp->sd_lockstruct; >+ return sprintf(buf, "%d\n", ls->ls_recover_jid_status); >+} >+ >+struct gdlm_attr { >+ struct attribute attr; >+ ssize_t (*show)(struct gfs2_sbd *sdp, char *); >+ ssize_t (*store)(struct gfs2_sbd *sdp, const char *, size_t); >+}; >+ >+#define GDLM_ATTR(_name,_mode,_show,_store) \ >+static struct gdlm_attr gdlm_attr_##_name = __ATTR(_name,_mode,_show,_store) >+ >+GDLM_ATTR(proto_name, 0444, proto_name_show, NULL); >+GDLM_ATTR(block, 0644, block_show, block_store); >+GDLM_ATTR(withdraw, 0644, lkwithdraw_show, lkwithdraw_store); >+GDLM_ATTR(id, 0444, lkid_show, NULL); >+GDLM_ATTR(first, 0444, lkfirst_show, NULL); >+GDLM_ATTR(first_done, 0444, first_done_show, NULL); >+GDLM_ATTR(recover, 0644, recover_show, recover_store); >+GDLM_ATTR(recover_done, 0444, recover_done_show, NULL); >+GDLM_ATTR(recover_status, 0444, recover_status_show, NULL); >+ >+static struct attribute *lock_module_attrs[] = { >+ &gdlm_attr_proto_name.attr, >+ &gdlm_attr_block.attr, >+ &gdlm_attr_withdraw.attr, >+ &gdlm_attr_id.attr, >+ &lockstruct_attr_jid.attr, >+ &gdlm_attr_first.attr, >+ &gdlm_attr_first_done.attr, >+ &gdlm_attr_recover.attr, >+ &gdlm_attr_recover_done.attr, >+ &gdlm_attr_recover_status.attr, > NULL, > }; > >@@ -488,8 +617,46 @@ static struct attribute_group tune_group > .attrs = tune_attrs, > }; > >+static struct kobj_type gdlm_ktype = { >+ .default_attrs = lock_module_attrs, >+ .sysfs_ops = &gfs2_attr_ops, >+}; >+ >+static struct kset gdlm_kset = { >+ .subsys = &kernel_subsys, >+ .kobj = {.name = "lock_dlm",}, >+ .ktype = &gdlm_ktype, >+}; >+ >+int gfs2_kobject_setup(struct lm_lockstruct *ls, struct kobject *fskobj) >+{ >+ int error; >+ >+ error = kobject_set_name(&ls->ls_kobj, "%s", "lock_module"); >+ if (error) { >+ printk(KERN_ERR "can't set kobj name %d", error); >+ return error; >+ } >+ >+ ls->ls_kobj.kset = &gdlm_kset; >+ ls->ls_kobj.ktype = &gdlm_ktype; >+ ls->ls_kobj.parent = fskobj; >+ >+ error = kobject_register(&ls->ls_kobj); >+ if (error) >+ printk(KERN_ERR "can't register kobj %d", error); >+ >+ return error; >+} >+ >+void gfs2_kobject_release(struct lm_lockstruct *ls) >+{ >+ kobject_unregister(&ls->ls_kobj); >+} >+ > int gfs2_sys_fs_add(struct gfs2_sbd *sdp) > { >+ struct lm_lockstruct *ls = &sdp->sd_lockstruct; > int error; > > sdp->sd_kobj.kset = &gfs2_kset; >@@ -519,14 +686,23 @@ int gfs2_sys_fs_add(struct gfs2_sbd *sdp > if (error) > goto fail_args; > >+ error = kset_register(&gdlm_kset); >+ if (error) { >+ printk("lock_dlm: cannot register kset %d\n", error); >+ goto fail_tune; >+ } >+ >+ kobject_uevent(&sdp->sd_kobj, KOBJ_ADD); > return 0; > >+fail_tune: >+ sysfs_remove_group(&sdp->sd_kobj, &tune_group); > fail_args: > sysfs_remove_group(&sdp->sd_kobj, &args_group); > fail_counters: > sysfs_remove_group(&sdp->sd_kobj, &counters_group); > fail_lockstruct: >- sysfs_remove_group(&sdp->sd_kobj, &lockstruct_group); >+ kobject_put(&ls->ls_kobj); > fail_reg: > kobject_unregister(&sdp->sd_kobj); > fail: >@@ -536,10 +712,12 @@ fail: > > void gfs2_sys_fs_del(struct gfs2_sbd *sdp) > { >+ struct lm_lockstruct *ls = &sdp->sd_lockstruct; > sysfs_remove_group(&sdp->sd_kobj, &tune_group); > sysfs_remove_group(&sdp->sd_kobj, &args_group); > sysfs_remove_group(&sdp->sd_kobj, &counters_group); > sysfs_remove_group(&sdp->sd_kobj, &lockstruct_group); >+ kobject_put(&ls->ls_kobj); > kobject_unregister(&sdp->sd_kobj); > } > >diff -purN a/fs/gfs2/trans.c b/fs/gfs2/trans.c >--- a/fs/gfs2/trans.c 2008-04-30 16:26:44.000000000 -0500 >+++ b/fs/gfs2/trans.c 2008-06-16 09:14:53.000000000 -0500 >@@ -12,9 +12,8 @@ > #include <linux/spinlock.h> > #include <linux/completion.h> > #include <linux/buffer_head.h> >-#include <linux/gfs2_ondisk.h> > #include <linux/kallsyms.h> >-#include <linux/lm_interface.h> >+#include <linux/gfs2_ondisk.h> > > #include "gfs2.h" > #include "incore.h" >diff -purN a/fs/gfs2/util.c b/fs/gfs2/util.c >--- a/fs/gfs2/util.c 2008-04-30 16:26:44.000000000 -0500 >+++ b/fs/gfs2/util.c 2008-06-16 11:18:47.000000000 -0500 >@@ -13,13 +13,11 @@ > #include <linux/buffer_head.h> > #include <linux/crc32.h> > #include <linux/gfs2_ondisk.h> >-#include <linux/lm_interface.h> > #include <asm/uaccess.h> > > #include "gfs2.h" > #include "incore.h" > #include "glock.h" >-#include "lm.h" > #include "util.h" > > struct kmem_cache *gfs2_glock_cachep __read_mostly; >@@ -33,6 +31,34 @@ void gfs2_assert_i(struct gfs2_sbd *sdp) > sdp->sd_fsname); > } > >+int gfs2_lm_withdraw(struct gfs2_sbd *sdp, char *fmt, ...) >+{ >+ struct lm_lockstruct *ls = &sdp->sd_lockstruct; >+ struct lm_lockops *lm = ls->ls_ops; >+ va_list args; >+ >+ if (test_and_set_bit(SDF_SHUTDOWN, &sdp->sd_flags)) >+ return 0; >+ >+ va_start(args, fmt); >+ vprintk(fmt, args); >+ va_end(args); >+ >+ fs_err(sdp, "about to withdraw this file system\n"); >+ BUG_ON(sdp->sd_args.ar_debug); >+ >+ kobject_uevent(&ls->ls_kobj, KOBJ_OFFLINE); >+ >+ if (lm->lm_unmount) { >+ fs_err(sdp, "telling LM to unmount\n"); >+ lm->lm_unmount(sdp); >+ } >+ fs_err(sdp, "withdrawn\n"); >+ dump_stack(); >+ >+ return -1; >+} >+ > /** > * gfs2_assert_withdraw_i - Cause the machine to withdraw if @assertion is false > * Returns: -1 if this call withdrew the machine, >diff -purN a/fs/gfs2/util.h b/fs/gfs2/util.h >--- a/fs/gfs2/util.h 2008-04-30 16:26:44.000000000 -0500 >+++ b/fs/gfs2/util.h 2008-06-16 11:19:10.000000000 -0500 >@@ -164,6 +164,7 @@ gfs2_tune_get_i(&(sdp)->sd_tune, &(sdp)- > > void gfs2_icbit_munge(struct gfs2_sbd *sdp, unsigned char **bitmap, > unsigned int bit, int new_value); >+int gfs2_lm_withdraw(struct gfs2_sbd *sdp, char *fmt, ...); > > #endif /* __UTIL_DOT_H__ */ > >diff -purN a/include/linux/lm_interface.h b/include/linux/lm_interface.h >--- a/include/linux/lm_interface.h 2008-06-13 14:59:43.000000000 -0500 >+++ b/include/linux/lm_interface.h 1969-12-31 18:00:00.000000000 -0600 >@@ -1,273 +0,0 @@ >-/* >- * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. >- * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. >- * >- * This copyrighted material is made available to anyone wishing to use, >- * modify, copy, or redistribute it subject to the terms and conditions >- * of the GNU General Public License version 2. >- */ >- >-#ifndef __LM_INTERFACE_DOT_H__ >-#define __LM_INTERFACE_DOT_H__ >- >- >-typedef void (*lm_callback_t) (void *ptr, unsigned int type, void *data); >- >-/* >- * lm_mount() flags >- * >- * LM_MFLAG_SPECTATOR >- * GFS is asking to join the filesystem's lockspace, but it doesn't want to >- * modify the filesystem. The lock module shouldn't assign a journal to the FS >- * mount. It shouldn't send recovery callbacks to the FS mount. If the node >- * dies or withdraws, all locks can be wiped immediately. >- */ >- >-#define LM_MFLAG_SPECTATOR 0x00000001 >- >-/* >- * lm_lockstruct flags >- * >- * LM_LSFLAG_LOCAL >- * The lock_nolock module returns LM_LSFLAG_LOCAL to GFS, indicating that GFS >- * can make single-node optimizations. >- */ >- >-#define LM_LSFLAG_LOCAL 0x00000001 >- >-/* >- * lm_lockname types >- */ >- >-#define LM_TYPE_RESERVED 0x00 >-#define LM_TYPE_NONDISK 0x01 >-#define LM_TYPE_INODE 0x02 >-#define LM_TYPE_RGRP 0x03 >-#define LM_TYPE_META 0x04 >-#define LM_TYPE_IOPEN 0x05 >-#define LM_TYPE_FLOCK 0x06 >-#define LM_TYPE_PLOCK 0x07 >-#define LM_TYPE_QUOTA 0x08 >-#define LM_TYPE_JOURNAL 0x09 >- >-/* >- * lm_lock() states >- * >- * SHARED is compatible with SHARED, not with DEFERRED or EX. >- * DEFERRED is compatible with DEFERRED, not with SHARED or EX. >- */ >- >-#define LM_ST_UNLOCKED 0 >-#define LM_ST_EXCLUSIVE 1 >-#define LM_ST_DEFERRED 2 >-#define LM_ST_SHARED 3 >- >-/* >- * lm_lock() flags >- * >- * LM_FLAG_TRY >- * Don't wait to acquire the lock if it can't be granted immediately. >- * >- * LM_FLAG_TRY_1CB >- * Send one blocking callback if TRY is set and the lock is not granted. >- * >- * LM_FLAG_NOEXP >- * GFS sets this flag on lock requests it makes while doing journal recovery. >- * These special requests should not be blocked due to the recovery like >- * ordinary locks would be. >- * >- * LM_FLAG_ANY >- * A SHARED request may also be granted in DEFERRED, or a DEFERRED request may >- * also be granted in SHARED. The preferred state is whichever is compatible >- * with other granted locks, or the specified state if no other locks exist. >- * >- * LM_FLAG_PRIORITY >- * Override fairness considerations. Suppose a lock is held in a shared state >- * and there is a pending request for the deferred state. A shared lock >- * request with the priority flag would be allowed to bypass the deferred >- * request and directly join the other shared lock. A shared lock request >- * without the priority flag might be forced to wait until the deferred >- * requested had acquired and released the lock. >- */ >- >-#define LM_FLAG_TRY 0x00000001 >-#define LM_FLAG_TRY_1CB 0x00000002 >-#define LM_FLAG_NOEXP 0x00000004 >-#define LM_FLAG_ANY 0x00000008 >-#define LM_FLAG_PRIORITY 0x00000010 >- >-/* >- * lm_lock() and lm_async_cb return flags >- * >- * LM_OUT_ST_MASK >- * Masks the lower two bits of lock state in the returned value. >- * >- * LM_OUT_CACHEABLE >- * The lock hasn't been released so GFS can continue to cache data for it. >- * >- * LM_OUT_CANCELED >- * The lock request was canceled. >- * >- * LM_OUT_ASYNC >- * The result of the request will be returned in an LM_CB_ASYNC callback. >- */ >- >-#define LM_OUT_ST_MASK 0x00000003 >-#define LM_OUT_CACHEABLE 0x00000004 >-#define LM_OUT_CANCELED 0x00000008 >-#define LM_OUT_ASYNC 0x00000080 >-#define LM_OUT_ERROR 0x00000100 >- >-/* >- * lm_callback_t types >- * >- * LM_CB_NEED_E LM_CB_NEED_D LM_CB_NEED_S >- * Blocking callback, a remote node is requesting the given lock in >- * EXCLUSIVE, DEFERRED, or SHARED. >- * >- * LM_CB_NEED_RECOVERY >- * The given journal needs to be recovered. >- * >- * LM_CB_DROPLOCKS >- * Reduce the number of cached locks. >- * >- * LM_CB_ASYNC >- * The given lock has been granted. >- */ >- >-#define LM_CB_NEED_E 257 >-#define LM_CB_NEED_D 258 >-#define LM_CB_NEED_S 259 >-#define LM_CB_NEED_RECOVERY 260 >-#define LM_CB_DROPLOCKS 261 >-#define LM_CB_ASYNC 262 >- >-/* >- * lm_recovery_done() messages >- */ >- >-#define LM_RD_GAVEUP 308 >-#define LM_RD_SUCCESS 309 >- >- >-struct lm_lockname { >- u64 ln_number; >- unsigned int ln_type; >-}; >- >-#define lm_name_equal(name1, name2) \ >- (((name1)->ln_number == (name2)->ln_number) && \ >- ((name1)->ln_type == (name2)->ln_type)) \ >- >-struct lm_async_cb { >- struct lm_lockname lc_name; >- int lc_ret; >-}; >- >-struct lm_lockstruct; >- >-struct lm_lockops { >- const char *lm_proto_name; >- >- /* >- * Mount/Unmount >- */ >- >- int (*lm_mount) (char *table_name, char *host_data, >- lm_callback_t cb, void *cb_data, >- unsigned int min_lvb_size, int flags, >- struct lm_lockstruct *lockstruct, >- struct kobject *fskobj); >- >- void (*lm_others_may_mount) (void *lockspace); >- >- void (*lm_unmount) (void *lockspace); >- >- void (*lm_withdraw) (void *lockspace); >- >- /* >- * Lock oriented operations >- */ >- >- int (*lm_get_lock) (void *lockspace, struct lm_lockname *name, void **lockp); >- >- void (*lm_put_lock) (void *lock); >- >- unsigned int (*lm_lock) (void *lock, unsigned int cur_state, >- unsigned int req_state, unsigned int flags); >- >- unsigned int (*lm_unlock) (void *lock, unsigned int cur_state); >- >- void (*lm_cancel) (void *lock); >- >- int (*lm_hold_lvb) (void *lock, char **lvbp); >- void (*lm_unhold_lvb) (void *lock, char *lvb); >- >- /* >- * Posix Lock oriented operations >- */ >- >- int (*lm_plock_get) (void *lockspace, struct lm_lockname *name, >- struct file *file, struct file_lock *fl); >- >- int (*lm_plock) (void *lockspace, struct lm_lockname *name, >- struct file *file, int cmd, struct file_lock *fl); >- >- int (*lm_punlock) (void *lockspace, struct lm_lockname *name, >- struct file *file, struct file_lock *fl); >- >- /* >- * Client oriented operations >- */ >- >- void (*lm_recovery_done) (void *lockspace, unsigned int jid, >- unsigned int message); >- >- struct module *lm_owner; >-}; >- >-/* >- * lm_mount() return values >- * >- * ls_jid - the journal ID this node should use >- * ls_first - this node is the first to mount the file system >- * ls_lvb_size - size in bytes of lock value blocks >- * ls_lockspace - lock module's context for this file system >- * ls_ops - lock module's functions >- * ls_flags - lock module features >- */ >- >-struct lm_lockstruct { >- unsigned int ls_jid; >- unsigned int ls_first; >- unsigned int ls_lvb_size; >- void *ls_lockspace; >- const struct lm_lockops *ls_ops; >- int ls_flags; >-}; >- >-/* >- * Lock module bottom interface. A lock module makes itself available to GFS >- * with these functions. >- */ >- >-int gfs2_register_lockproto(const struct lm_lockops *proto); >-void gfs2_unregister_lockproto(const struct lm_lockops *proto); >- >-/* >- * Lock module top interface. GFS calls these functions when mounting or >- * unmounting a file system. >- */ >- >-int gfs2_mount_lockproto(char *proto_name, char *table_name, char *host_data, >- lm_callback_t cb, void *cb_data, >- unsigned int min_lvb_size, int flags, >- struct lm_lockstruct *lockstruct, >- struct kobject *fskobj); >- >-void gfs2_unmount_lockproto(struct lm_lockstruct *lockstruct); >- >-void gfs2_withdraw_lockproto(struct lm_lockstruct *lockstruct); >- >-#endif /* __LM_INTERFACE_DOT_H__ */ >-
You cannot view the attachment while viewing its details because your browser does not support IFRAMEs.
View the attachment on a separate page
.
View Attachment As Diff
View Attachment As Raw
Actions:
View
|
Diff
Attachments on
bug 447748
:
307210
|
309243
|
309244
|
309246
|
309248
|
309253
| 309545 |
310134
|
311171