summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorZhouping Liu <zliu@redhat.com>2013-05-16 05:36:23 +0200
committerPekka Enberg <penberg@kernel.org>2013-06-08 13:30:42 +0200
commitd0d04b78f403b0bcfe03315e16b50d196610720d (patch)
treee37ddc853888ebee6dcdc0dee75c10139171ad30 /mm
parentmm, slab_common: Fix bootstrap creation of kmalloc caches (diff)
downloadlinux-d0d04b78f403b0bcfe03315e16b50d196610720d.tar.xz
linux-d0d04b78f403b0bcfe03315e16b50d196610720d.zip
mm, slab: moved kmem_cache_alloc_node comment to correct place
After several fixing about kmem_cache_alloc_node(), its comment was splitted. This patch moved it on top of kmem_cache_alloc_node() definition. Signed-off-by: Zhouping Liu <zliu@redhat.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/slab.c23
1 files changed, 11 insertions, 12 deletions
diff --git a/mm/slab.c b/mm/slab.c
index a98f8db93670..273a5ac2ade3 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3340,18 +3340,6 @@ done:
return obj;
}
-/**
- * kmem_cache_alloc_node - Allocate an object on the specified node
- * @cachep: The cache to allocate from.
- * @flags: See kmalloc().
- * @nodeid: node number of the target node.
- * @caller: return address of caller, used for debug information
- *
- * Identical to kmem_cache_alloc but it will allocate memory on the given
- * node, which can improve the performance for cpu bound structures.
- *
- * Fallback to other node is possible if __GFP_THISNODE is not set.
- */
static __always_inline void *
slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
unsigned long caller)
@@ -3645,6 +3633,17 @@ EXPORT_SYMBOL(kmem_cache_alloc_trace);
#endif
#ifdef CONFIG_NUMA
+/**
+ * kmem_cache_alloc_node - Allocate an object on the specified node
+ * @cachep: The cache to allocate from.
+ * @flags: See kmalloc().
+ * @nodeid: node number of the target node.
+ *
+ * Identical to kmem_cache_alloc but it will allocate memory on the given
+ * node, which can improve the performance for cpu bound structures.
+ *
+ * Fallback to other node is possible if __GFP_THISNODE is not set.
+ */
void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
{
void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_);