summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorAlexander Duyck <alexander.h.duyck@intel.com>2010-05-21 23:41:35 +0200
committerPekka Enberg <penberg@cs.helsinki.fi>2010-05-24 20:11:29 +0200
commit73367bd8eef4f4eb311005886aaa916013073265 (patch)
treed603667689cfea1d8de49fe3c7fada7f6b6eae53 /include
parentMerge git://git.kernel.org/pub/scm/linux/kernel/git/davem/ide-2.6 (diff)
downloadlinux-73367bd8eef4f4eb311005886aaa916013073265.tar.xz
linux-73367bd8eef4f4eb311005886aaa916013073265.zip
slub: move kmem_cache_node into it's own cacheline
This patch is meant to improve the performance of SLUB by moving the local kmem_cache_node lock into it's own cacheline separate from kmem_cache. This is accomplished by simply removing the local_node when NUMA is enabled. On my system with 2 nodes I saw around a 5% performance increase w/ hackbench times dropping from 6.2 seconds to 5.9 seconds on average. I suspect the performance gain would increase as the number of nodes increases, but I do not have the data to currently back that up. Bugzilla-Reference: http://bugzilla.kernel.org/show_bug.cgi?id=15713 Cc: <stable@kernel.org> Reported-by: Alex Shi <alex.shi@intel.com> Tested-by: Alex Shi <alex.shi@intel.com> Acked-by: Yanmin Zhang <yanmin_zhang@linux.intel.com> Acked-by: Christoph Lameter <cl@linux-foundation.org> Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
Diffstat (limited to 'include')
-rw-r--r--include/linux/slub_def.h9
1 files changed, 3 insertions, 6 deletions
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 55695c8d2f8a..6ac37664e8fe 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -75,12 +75,6 @@ struct kmem_cache {
int offset; /* Free pointer offset. */
struct kmem_cache_order_objects oo;
- /*
- * Avoid an extra cache line for UP, SMP and for the node local to
- * struct kmem_cache.
- */
- struct kmem_cache_node local_node;
-
/* Allocation and freeing of slabs */
struct kmem_cache_order_objects max;
struct kmem_cache_order_objects min;
@@ -102,6 +96,9 @@ struct kmem_cache {
*/
int remote_node_defrag_ratio;
struct kmem_cache_node *node[MAX_NUMNODES];
+#else
+ /* Avoid an extra cache line for UP */
+ struct kmem_cache_node local_node;
#endif
};