diff options
author | Joonsoo Kim <iamjoonsoo.kim@lge.com> | 2014-10-10 00:26:09 +0200 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-10-10 04:25:51 +0200 |
commit | 25c4f304be8cd6831105d3a2876028e4ecd254a1 (patch) | |
tree | 53f8a895514bd821561463b0eb65850f869f3a6d /mm/slab.c | |
parent | mm/slab: noinline __ac_put_obj() (diff) | |
download | linux-25c4f304be8cd6831105d3a2876028e4ecd254a1.tar.xz linux-25c4f304be8cd6831105d3a2876028e4ecd254a1.zip |
mm/slab: factor out unlikely part of cache_free_alien()
cache_free_alien() is rarely used function when node mismatch. But, it is
defined with inline attribute so it is inlined to __cache_free() which is
core free function of slab allocator. It uselessly makes
kmem_cache_free()/kfree() functions large. What we really need to inline
is just checking node match so this patch factor out other parts of
cache_free_alien() to reduce code size of kmem_cache_free()/ kfree().
<Before>
nm -S mm/slab.o | grep -e "T kfree" -e "T kmem_cache_free"
00000000000011e0 0000000000000228 T kfree
0000000000000670 0000000000000216 T kmem_cache_free
<After>
nm -S mm/slab.o | grep -e "T kfree" -e "T kmem_cache_free"
0000000000001110 00000000000001b5 T kfree
0000000000000750 0000000000000181 T kmem_cache_free
You can see slightly reduced size of text: 0x228->0x1b5, 0x216->0x181.
Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Zhang Yanfei <zhangyanfei@cn.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slab.c')
-rw-r--r-- | mm/slab.c | 38 |
1 files changed, 21 insertions, 17 deletions
diff --git a/mm/slab.c b/mm/slab.c index 7c9ca82f6be9..f989af87b72c 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -984,46 +984,50 @@ static void drain_alien_cache(struct kmem_cache *cachep, } } -static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) +static int __cache_free_alien(struct kmem_cache *cachep, void *objp, + int node, int page_node) { - int nodeid = page_to_nid(virt_to_page(objp)); struct kmem_cache_node *n; struct alien_cache *alien = NULL; struct array_cache *ac; - int node; LIST_HEAD(list); - node = numa_mem_id(); - - /* - * Make sure we are not freeing a object from another node to the array - * cache on this cpu. - */ - if (likely(nodeid == node)) - return 0; - n = get_node(cachep, node); STATS_INC_NODEFREES(cachep); - if (n->alien && n->alien[nodeid]) { - alien = n->alien[nodeid]; + if (n->alien && n->alien[page_node]) { + alien = n->alien[page_node]; ac = &alien->ac; spin_lock(&alien->lock); if (unlikely(ac->avail == ac->limit)) { STATS_INC_ACOVERFLOW(cachep); - __drain_alien_cache(cachep, ac, nodeid, &list); + __drain_alien_cache(cachep, ac, page_node, &list); } ac_put_obj(cachep, ac, objp); spin_unlock(&alien->lock); slabs_destroy(cachep, &list); } else { - n = get_node(cachep, nodeid); + n = get_node(cachep, page_node); spin_lock(&n->list_lock); - free_block(cachep, &objp, 1, nodeid, &list); + free_block(cachep, &objp, 1, page_node, &list); spin_unlock(&n->list_lock); slabs_destroy(cachep, &list); } return 1; } + +static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) +{ + int page_node = page_to_nid(virt_to_page(objp)); + int node = numa_mem_id(); + /* + * Make sure we are not freeing a object from another node to the array + * cache on this cpu. + */ + if (likely(node == page_node)) + return 0; + + return __cache_free_alien(cachep, objp, node, page_node); +} #endif /* |