summaryrefslogtreecommitdiffstats
path: root/fs/inotify.c
diff options
context:
space:
mode:
authorEric Dumazet <dada1@cosmosbay.com>2006-03-26 11:37:24 +0200
committerLinus Torvalds <torvalds@g5.osdl.org>2006-03-26 18:56:56 +0200
commitfa3536cc144c1298f2ed9416c33f3b77fa2cd37a (patch)
tree5484541319b86ae7dac0def4db7925f7cc7008e7 /fs/inotify.c
parent[PATCH] hpet header sanitization (diff)
downloadlinux-fa3536cc144c1298f2ed9416c33f3b77fa2cd37a.tar.xz
linux-fa3536cc144c1298f2ed9416c33f3b77fa2cd37a.zip
[PATCH] Use __read_mostly on some hot fs variables
I discovered on oprofile hunting on a SMP platform that dentry lookups were slowed down because d_hash_mask, d_hash_shift and dentry_hashtable were in a cache line that contained inodes_stat. So each time inodes_stats is changed by a cpu, other cpus have to refill their cache line. This patch moves some variables to the __read_mostly section, in order to avoid false sharing. RCU dentry lookups can go full speed. Signed-off-by: Eric Dumazet <dada1@cosmosbay.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'fs/inotify.c')
-rw-r--r--fs/inotify.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/fs/inotify.c b/fs/inotify.c
index a61e93e17853..f48a3dae0712 100644
--- a/fs/inotify.c
+++ b/fs/inotify.c
@@ -39,15 +39,15 @@
static atomic_t inotify_cookie;
-static kmem_cache_t *watch_cachep;
-static kmem_cache_t *event_cachep;
+static kmem_cache_t *watch_cachep __read_mostly;
+static kmem_cache_t *event_cachep __read_mostly;
-static struct vfsmount *inotify_mnt;
+static struct vfsmount *inotify_mnt __read_mostly;
/* these are configurable via /proc/sys/fs/inotify/ */
-int inotify_max_user_instances;
-int inotify_max_user_watches;
-int inotify_max_queued_events;
+int inotify_max_user_instances __read_mostly;
+int inotify_max_user_watches __read_mostly;
+int inotify_max_queued_events __read_mostly;
/*
* Lock ordering: