summaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
authorEzequiel Garcia <elezegarcia@gmail.com>2012-09-08 22:47:57 +0200
committerPekka Enberg <penberg@kernel.org>2012-09-25 09:18:34 +0200
commit48356303ff8cce7036f13a23df9119d3f47461ce (patch)
tree3e3eeb7d4daf12e7d68037d6c80518e929221aa4 /mm/slab.c
parentmm, slab: Match SLAB and SLUB kmem_cache_alloc_xxx_trace() prototype (diff)
downloadlinux-48356303ff8cce7036f13a23df9119d3f47461ce.tar.xz
linux-48356303ff8cce7036f13a23df9119d3f47461ce.zip
mm, slab: Rename __cache_alloc() -> slab_alloc()
This patch does not fix anything and its only goal is to produce common code between SLAB and SLUB. Cc: Christoph Lameter <cl@linux.com> Signed-off-by: Ezequiel Garcia <elezegarcia@gmail.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 4c54a2357937..d011030e9613 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3561,7 +3561,7 @@ done:
* Fallback to other node is possible if __GFP_THISNODE is not set.
*/
static __always_inline void *
-__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
+slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
unsigned long caller)
{
unsigned long save_flags;
@@ -3648,7 +3648,7 @@ __do_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
#endif /* CONFIG_NUMA */
static __always_inline void *
-__cache_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
+slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
{
unsigned long save_flags;
void *objp;
@@ -3824,7 +3824,7 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp,
*/
void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
{
- void *ret = __cache_alloc(cachep, flags, _RET_IP_);
+ void *ret = slab_alloc(cachep, flags, _RET_IP_);
trace_kmem_cache_alloc(_RET_IP_, ret,
cachep->object_size, cachep->size, flags);
@@ -3839,7 +3839,7 @@ kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
{
void *ret;
- ret = __cache_alloc(cachep, flags, _RET_IP_);
+ ret = slab_alloc(cachep, flags, _RET_IP_);
trace_kmalloc(_RET_IP_, ret,
size, cachep->size, flags);
@@ -3851,7 +3851,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_trace);
#ifdef CONFIG_NUMA
void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
{
- void *ret = __cache_alloc_node(cachep, flags, nodeid, _RET_IP_);
+ void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);
trace_kmem_cache_alloc_node(_RET_IP_, ret,
cachep->object_size, cachep->size,
@@ -3869,7 +3869,7 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
{
void *ret;
- ret = __cache_alloc_node(cachep, flags, nodeid, _RET_IP);
+ ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP);
trace_kmalloc_node(_RET_IP_, ret,
size, cachep->size,
@@ -3932,7 +3932,7 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
cachep = __find_general_cachep(size, flags);
if (unlikely(ZERO_OR_NULL_PTR(cachep)))
return cachep;
- ret = __cache_alloc(cachep, flags, caller);
+ ret = slab_alloc(cachep, flags, caller);
trace_kmalloc(caller, ret,
size, cachep->size, flags);