summaryrefslogtreecommitdiffstats
path: root/fs/gfs2/main.c
diff options
context:
space:
mode:
authorSteven Whitehouse <swhiteho@redhat.com>2011-01-19 10:30:01 +0100
committerSteven Whitehouse <swhiteho@redhat.com>2011-01-21 10:39:08 +0100
commitbc015cb84129eb1451913cfebece270bf7a39e0f (patch)
tree4f116a61b802d87ae80051e9ae05d8fcb73d9ae7 /fs/gfs2/main.c
parentMerge branch 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/ker... (diff)
downloadlinux-bc015cb84129eb1451913cfebece270bf7a39e0f.tar.xz
linux-bc015cb84129eb1451913cfebece270bf7a39e0f.zip
GFS2: Use RCU for glock hash table
This has a number of advantages: - Reduces contention on the hash table lock - Makes the code smaller and simpler - Should speed up glock dumps when under load - Removes ref count changing in examine_bucket - No longer need hash chain lock in glock_put() in common case There are some further changes which this enables and which we may do in the future. One is to look at using SLAB_RCU, and another is to look at using a per-cpu counter for the per-sb glock counter, since that is touched twice in the lifetime of each glock (but only used at umount time). Signed-off-by: Steven Whitehouse <swhiteho@redhat.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'fs/gfs2/main.c')
-rw-r--r--fs/gfs2/main.c6
1 files changed, 5 insertions, 1 deletions
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c
index ebef7ab6e17e..d850004f2080 100644
--- a/fs/gfs2/main.c
+++ b/fs/gfs2/main.c
@@ -14,6 +14,8 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/gfs2_ondisk.h>
+#include <linux/rcupdate.h>
+#include <linux/rculist_bl.h>
#include <asm/atomic.h>
#include "gfs2.h"
@@ -45,7 +47,7 @@ static void gfs2_init_glock_once(void *foo)
{
struct gfs2_glock *gl = foo;
- INIT_HLIST_NODE(&gl->gl_list);
+ INIT_HLIST_BL_NODE(&gl->gl_list);
spin_lock_init(&gl->gl_spin);
INIT_LIST_HEAD(&gl->gl_holders);
INIT_LIST_HEAD(&gl->gl_lru);
@@ -198,6 +200,8 @@ static void __exit exit_gfs2_fs(void)
unregister_filesystem(&gfs2meta_fs_type);
destroy_workqueue(gfs_recovery_wq);
+ rcu_barrier();
+
kmem_cache_destroy(gfs2_quotad_cachep);
kmem_cache_destroy(gfs2_rgrpd_cachep);
kmem_cache_destroy(gfs2_bufdata_cachep);