diff options
-rw-r--r-- | include/linux/slub_def.h | 5 | ||||
-rw-r--r-- | mm/slub.c | 39 |
2 files changed, 2 insertions, 42 deletions
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index a6c43ec6a4a5..b33c0f2e61dc 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -96,11 +96,8 @@ struct kmem_cache { * Defragmentation by allocating from a remote node. */ int remote_node_defrag_ratio; - struct kmem_cache_node *node[MAX_NUMNODES]; -#else - /* Avoid an extra cache line for UP */ - struct kmem_cache_node local_node; #endif + struct kmem_cache_node *node[MAX_NUMNODES]; }; /* diff --git a/mm/slub.c b/mm/slub.c index 7e1fe663795a..064bda294af2 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -233,11 +233,7 @@ int slab_is_available(void) static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node) { -#ifdef CONFIG_NUMA return s->node[node]; -#else - return &s->local_node; -#endif } /* Verify that a pointer has an address that is valid within a slab page */ @@ -871,7 +867,7 @@ static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects) * dilemma by deferring the increment of the count during * bootstrap (see early_kmem_cache_node_alloc). */ - if (!NUMA_BUILD || n) { + if (n) { atomic_long_inc(&n->nr_slabs); atomic_long_add(objects, &n->total_objects); } @@ -2112,7 +2108,6 @@ static inline int alloc_kmem_cache_cpus(struct kmem_cache *s) return s->cpu_slab != NULL; } -#ifdef CONFIG_NUMA static struct kmem_cache *kmem_cache_node; /* @@ -2202,17 +2197,6 @@ static int init_kmem_cache_nodes(struct kmem_cache *s) } return 1; } -#else -static void free_kmem_cache_nodes(struct kmem_cache *s) -{ -} - -static int init_kmem_cache_nodes(struct kmem_cache *s) -{ - init_kmem_cache_node(&s->local_node, s); - return 1; -} -#endif static void set_min_partial(struct kmem_cache *s, unsigned long min) { @@ -3023,8 +3007,6 @@ void __init kmem_cache_init(void) int caches = 0; struct kmem_cache *temp_kmem_cache; int order; - -#ifdef CONFIG_NUMA struct kmem_cache *temp_kmem_cache_node; unsigned long kmalloc_size; @@ -3048,12 +3030,6 @@ void __init kmem_cache_init(void) 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI); -#else - /* Allocate a single kmem_cache from the page allocator */ - kmem_size = sizeof(struct kmem_cache); - order = get_order(kmem_size); - kmem_cache = (void *)__get_free_pages(GFP_NOWAIT, order); -#endif /* Able to allocate the per node structures */ slab_state = PARTIAL; @@ -3064,7 +3040,6 @@ void __init kmem_cache_init(void) kmem_cache = kmem_cache_alloc(kmem_cache, GFP_NOWAIT); memcpy(kmem_cache, temp_kmem_cache, kmem_size); -#ifdef CONFIG_NUMA /* * Allocate kmem_cache_node properly from the kmem_cache slab. * kmem_cache_node is separately allocated so no need to @@ -3078,18 +3053,6 @@ void __init kmem_cache_init(void) kmem_cache_bootstrap_fixup(kmem_cache_node); caches++; -#else - /* - * kmem_cache has kmem_cache_node embedded and we moved it! - * Update the list heads - */ - INIT_LIST_HEAD(&kmem_cache->local_node.partial); - list_splice(&temp_kmem_cache->local_node.partial, &kmem_cache->local_node.partial); -#ifdef CONFIG_SLUB_DEBUG - INIT_LIST_HEAD(&kmem_cache->local_node.full); - list_splice(&temp_kmem_cache->local_node.full, &kmem_cache->local_node.full); -#endif -#endif kmem_cache_bootstrap_fixup(kmem_cache); caches++; /* Free temporary boot structure */ |