summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorPekka Enberg <penberg@cs.helsinki.fi>2009-06-24 20:59:51 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2009-06-24 21:20:14 +0200
commitba52270d18fb17ce2cf176b35419dab1e43fe4a3 (patch)
tree6023e8e0b5b722be7193902894934aaef21e5144 /mm
parentDon't warn about order-1 allocations with __GFP_NOFAIL (diff)
downloadlinux-ba52270d18fb17ce2cf176b35419dab1e43fe4a3.tar.xz
linux-ba52270d18fb17ce2cf176b35419dab1e43fe4a3.zip
SLUB: Don't pass __GFP_FAIL for the initial allocation
SLUB uses higher order allocations by default but falls back to small orders under memory pressure. Make sure the GFP mask used in the initial allocation doesn't include __GFP_NOFAIL. Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/slub.c10
1 files changed, 8 insertions, 2 deletions
diff --git a/mm/slub.c b/mm/slub.c
index ce62b770e2fc..819f056b39c6 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1085,11 +1085,17 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
{
struct page *page;
struct kmem_cache_order_objects oo = s->oo;
+ gfp_t alloc_gfp;
flags |= s->allocflags;
- page = alloc_slab_page(flags | __GFP_NOWARN | __GFP_NORETRY, node,
- oo);
+ /*
+ * Let the initial higher-order allocation fail under memory pressure
+ * so we fall-back to the minimum order allocation.
+ */
+ alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL;
+
+ page = alloc_slab_page(alloc_gfp, node, oo);
if (unlikely(!page)) {
oo = s->min;
/*