summaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorWei Yang <weiyang@linux.vnet.ibm.com>2014-08-07 01:04:42 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2014-08-07 03:01:15 +0200
commit54266640709a24c9844245d0d9f36b9cb1f31326 (patch)
treec3fc54c3c86b0ff0d06f244baadada4f7d0dd2a3 /mm/slub.c
parentslab: change int to size_t for representing allocation size (diff)
downloadlinux-54266640709a24c9844245d0d9f36b9cb1f31326.tar.xz
linux-54266640709a24c9844245d0d9f36b9cb1f31326.zip
slub: avoid duplicate creation on the first object
When a kmem_cache is created with ctor, each object in the kmem_cache will be initialized before ready to use. While in slub implementation, the first object will be initialized twice. This patch reduces the duplication of initialization of the first object. Fix commit 7656c72b ("SLUB: add macros for scanning objects in a slab"). Signed-off-by: Wei Yang <weiyang@linux.vnet.ibm.com> Acked-by: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c19
1 files changed, 11 insertions, 8 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 92d8139c556d..1f1f838326a0 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -283,6 +283,10 @@ static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
for (__p = (__addr); __p < (__addr) + (__objects) * (__s)->size;\
__p += (__s)->size)
+#define for_each_object_idx(__p, __idx, __s, __addr, __objects) \
+ for (__p = (__addr), __idx = 1; __idx <= __objects;\
+ __p += (__s)->size, __idx++)
+
/* Determine object index from a given position */
static inline int slab_index(void *p, struct kmem_cache *s, void *addr)
{
@@ -1379,9 +1383,9 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
{
struct page *page;
void *start;
- void *last;
void *p;
int order;
+ int idx;
BUG_ON(flags & GFP_SLAB_BUG_MASK);
@@ -1402,14 +1406,13 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
if (unlikely(s->flags & SLAB_POISON))
memset(start, POISON_INUSE, PAGE_SIZE << order);
- last = start;
- for_each_object(p, s, start, page->objects) {
- setup_object(s, page, last);
- set_freepointer(s, last, p);
- last = p;
+ for_each_object_idx(p, idx, s, start, page->objects) {
+ setup_object(s, page, p);
+ if (likely(idx < page->objects))
+ set_freepointer(s, p, p + s->size);
+ else
+ set_freepointer(s, p, NULL);
}
- setup_object(s, page, last);
- set_freepointer(s, last, NULL);
page->freelist = start;
page->inuse = page->objects;