summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux-foundation.org>2010-01-22 00:43:35 +0100
committerPekka Enberg <penberg@cs.helsinki.fi>2010-01-22 17:33:38 +0100
commit91efd773c74bb26b5409c85ad755d536448e229c (patch)
treeb812dadb615ecff08e4d3ebe97483f192d0be27d /mm
parentslub: remove impossible condition (diff)
downloadlinux-91efd773c74bb26b5409c85ad755d536448e229c.tar.xz
linux-91efd773c74bb26b5409c85ad755d536448e229c.zip
dma kmalloc handling fixes
1. We need kmalloc_percpu for all of the now extended kmalloc caches array not just for each shift value. 2. init_kmem_cache_nodes() must assume node 0 locality for statically allocated dma kmem_cache structures even after boot is complete. Reported-and-tested-by: Alex Chiang <achiang@hp.com> Signed-off-by: Christoph Lameter <cl@linux-foundation.org> Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
Diffstat (limited to 'mm')
-rw-r--r--mm/slub.c5
1 files changed, 3 insertions, 2 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 8fbb2fd70b64..bd4a9e942ace 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2062,7 +2062,7 @@ init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s)
#endif
}
-static DEFINE_PER_CPU(struct kmem_cache_cpu, kmalloc_percpu[SLUB_PAGE_SHIFT]);
+static DEFINE_PER_CPU(struct kmem_cache_cpu, kmalloc_percpu[KMALLOC_CACHES]);
static inline int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags)
{
@@ -2148,7 +2148,8 @@ static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags)
int node;
int local_node;
- if (slab_state >= UP)
+ if (slab_state >= UP && (s < kmalloc_caches ||
+ s > kmalloc_caches + KMALLOC_CACHES))
local_node = page_to_nid(virt_to_page(s));
else
local_node = 0;