summaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2014-04-08 00:39:42 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2014-04-08 01:36:14 +0200
commit88da03a67674bcd6e9ecf18a0a182cf1303056ba (patch)
treef822c90ff7a8e62e6d7c3c82854b510786e7222b /mm/slub.c
parentnet: replace __this_cpu_inc in route.c with raw_cpu_inc (diff)
downloadlinux-88da03a67674bcd6e9ecf18a0a182cf1303056ba.tar.xz
linux-88da03a67674bcd6e9ecf18a0a182cf1303056ba.zip
slub: use raw_cpu_inc for incrementing statistics
Statistics are not critical to the operation of the allocation but should also not cause too much overhead. When __this_cpu_inc is altered to check if preemption is disabled this triggers. Use raw_cpu_inc to avoid the checks. Using this_cpu_ops may cause interrupt disable/enable sequences on various arches which may significantly impact allocator performance. [akpm@linux-foundation.org: add comment] Signed-off-by: Christoph Lameter <cl@linux.com> Cc: Fengguang Wu <fengguang.wu@intel.com> Cc: Pekka Enberg <penberg@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to '')
-rw-r--r--mm/slub.c6
1 files changed, 5 insertions, 1 deletions
diff --git a/mm/slub.c b/mm/slub.c
index e7451861f95d..f620bbf4054a 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -224,7 +224,11 @@ static inline void memcg_propagate_slab_attrs(struct kmem_cache *s) { }
static inline void stat(const struct kmem_cache *s, enum stat_item si)
{
#ifdef CONFIG_SLUB_STATS
- __this_cpu_inc(s->cpu_slab->stat[si]);
+ /*
+ * The rmw is racy on a preemptible kernel but this is acceptable, so
+ * avoid this_cpu_add()'s irq-disable overhead.
+ */
+ raw_cpu_inc(s->cpu_slab->stat[si]);
#endif
}