summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2009-06-18 05:24:12 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2009-06-18 22:12:57 +0200
commitdcce284a259373f9e5570f2e33f79eca84fcf565 (patch)
treeafc4b23208974f17c080ea3d2ecfbaca4254c010
parentMerge branch 'for-linus' of git://neil.brown.name/md (diff)
downloadlinux-dcce284a259373f9e5570f2e33f79eca84fcf565.tar.xz
linux-dcce284a259373f9e5570f2e33f79eca84fcf565.zip
mm: Extend gfp masking to the page allocator
The page allocator also needs the masking of gfp flags during boot, so this moves it out of slab/slub and uses it with the page allocator as well. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Acked-by: Pekka Enberg <penberg@cs.helsinki.fi> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/gfp.h9
-rw-r--r--init/main.c4
-rw-r--r--mm/page_alloc.c3
-rw-r--r--mm/slab.c15
-rw-r--r--mm/slub.c12
5 files changed, 18 insertions, 25 deletions
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index cfdb35d71bca..7c777a0da17a 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -99,7 +99,7 @@ struct vm_area_struct;
__GFP_NORETRY|__GFP_NOMEMALLOC)
/* Control slab gfp mask during early boot */
-#define SLAB_GFP_BOOT_MASK __GFP_BITS_MASK & ~(__GFP_WAIT|__GFP_IO|__GFP_FS)
+#define GFP_BOOT_MASK __GFP_BITS_MASK & ~(__GFP_WAIT|__GFP_IO|__GFP_FS)
/* Control allocation constraints */
#define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE)
@@ -348,4 +348,11 @@ static inline void oom_killer_enable(void)
oom_killer_disabled = false;
}
+extern gfp_t gfp_allowed_mask;
+
+static inline void set_gfp_allowed_mask(gfp_t mask)
+{
+ gfp_allowed_mask = mask;
+}
+
#endif /* __LINUX_GFP_H */
diff --git a/init/main.c b/init/main.c
index 1a65fdd06318..09131ec090c1 100644
--- a/init/main.c
+++ b/init/main.c
@@ -642,6 +642,10 @@ asmlinkage void __init start_kernel(void)
"enabled early\n");
early_boot_irqs_on();
local_irq_enable();
+
+ /* Interrupts are enabled now so all GFP allocations are safe. */
+ set_gfp_allowed_mask(__GFP_BITS_MASK);
+
kmem_cache_init_late();
/*
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index a5f3c278c573..6f0753fe694c 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -73,6 +73,7 @@ unsigned long totalram_pages __read_mostly;
unsigned long totalreserve_pages __read_mostly;
unsigned long highest_memmap_pfn __read_mostly;
int percpu_pagelist_fraction;
+gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
int pageblock_order __read_mostly;
@@ -1863,6 +1864,8 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
struct page *page;
int migratetype = allocflags_to_migratetype(gfp_mask);
+ gfp_mask &= gfp_allowed_mask;
+
lockdep_trace_alloc(gfp_mask);
might_sleep_if(gfp_mask & __GFP_WAIT);
diff --git a/mm/slab.c b/mm/slab.c
index d08692303f6e..e74a16e4ced6 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -305,12 +305,6 @@ struct kmem_list3 {
};
/*
- * The slab allocator is initialized with interrupts disabled. Therefore, make
- * sure early boot allocations don't accidentally enable interrupts.
- */
-static gfp_t slab_gfp_mask __read_mostly = SLAB_GFP_BOOT_MASK;
-
-/*
* Need this for bootstrapping a per node allocator.
*/
#define NUM_INIT_LISTS (3 * MAX_NUMNODES)
@@ -1559,11 +1553,6 @@ void __init kmem_cache_init_late(void)
{
struct kmem_cache *cachep;
- /*
- * Interrupts are enabled now so all GFP allocations are safe.
- */
- slab_gfp_mask = __GFP_BITS_MASK;
-
/* 6) resize the head arrays to their final sizes */
mutex_lock(&cache_chain_mutex);
list_for_each_entry(cachep, &cache_chain, next)
@@ -3307,7 +3296,7 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
unsigned long save_flags;
void *ptr;
- flags &= slab_gfp_mask;
+ flags &= gfp_allowed_mask;
lockdep_trace_alloc(flags);
@@ -3392,7 +3381,7 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
unsigned long save_flags;
void *objp;
- flags &= slab_gfp_mask;
+ flags &= gfp_allowed_mask;
lockdep_trace_alloc(flags);
diff --git a/mm/slub.c b/mm/slub.c
index 4c6449310a0e..ce62b770e2fc 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -179,12 +179,6 @@ static enum {
SYSFS /* Sysfs up */
} slab_state = DOWN;
-/*
- * The slab allocator is initialized with interrupts disabled. Therefore, make
- * sure early boot allocations don't accidentally enable interrupts.
- */
-static gfp_t slab_gfp_mask __read_mostly = SLAB_GFP_BOOT_MASK;
-
/* A list of all slab caches on the system */
static DECLARE_RWSEM(slub_lock);
static LIST_HEAD(slab_caches);
@@ -1692,7 +1686,7 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
unsigned long flags;
unsigned int objsize;
- gfpflags &= slab_gfp_mask;
+ gfpflags &= gfp_allowed_mask;
lockdep_trace_alloc(gfpflags);
might_sleep_if(gfpflags & __GFP_WAIT);
@@ -3220,10 +3214,6 @@ void __init kmem_cache_init(void)
void __init kmem_cache_init_late(void)
{
- /*
- * Interrupts are enabled now so all GFP allocations are safe.
- */
- slab_gfp_mask = __GFP_BITS_MASK;
}
/*