diff options
author | Mel Gorman <mgorman@techsingularity.net> | 2018-12-28 09:35:48 +0100 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-12-28 21:11:48 +0100 |
commit | 0a79cdad5eb213b3a629e624565b1b3bf9192b7c (patch) | |
tree | 06aee01f5342cc975b477a9dc9e2987840453f59 /mm/page_alloc.c | |
parent | mm: move zone watermark accesses behind an accessor (diff) | |
download | linux-0a79cdad5eb213b3a629e624565b1b3bf9192b7c.tar.xz linux-0a79cdad5eb213b3a629e624565b1b3bf9192b7c.zip |
mm: use alloc_flags to record if kswapd can wake
This is a preparation patch that copies the GFP flag __GFP_KSWAPD_RECLAIM
into alloc_flags. This is a preparation patch only that avoids having to
pass gfp_mask through a long callchain in a future patch.
Note that the setting in the fast path happens in alloc_flags_nofragment()
and it may be claimed that this has nothing to do with ALLOC_NO_FRAGMENT.
That's true in this patch but is not true later so it's done now for
easier review to show where the flag needs to be recorded.
No functional change.
[mgorman@techsingularity.net: ALLOC_KSWAPD flag needs to be applied in the !CONFIG_ZONE_DMA32 case]
Link: http://lkml.kernel.org/r/20181126143503.GO23260@techsingularity.net
Link: http://lkml.kernel.org/r/20181123114528.28802-4-mgorman@techsingularity.net
Signed-off-by: Mel Gorman <mgorman@techsingularity.net>
Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Zi Yan <zi.yan@cs.rutgers.edu>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to '')
-rw-r--r-- | mm/page_alloc.c | 33 |
1 files changed, 18 insertions, 15 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 2046e333ea8f..32b3e121a388 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -3268,7 +3268,6 @@ static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) } #endif /* CONFIG_NUMA */ -#ifdef CONFIG_ZONE_DMA32 /* * The restriction on ZONE_DMA32 as being a suitable zone to use to avoid * fragmentation is subtle. If the preferred zone was HIGHMEM then @@ -3278,10 +3277,16 @@ static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) * fragmentation between the Normal and DMA32 zones. */ static inline unsigned int -alloc_flags_nofragment(struct zone *zone) +alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask) { + unsigned int alloc_flags = 0; + + if (gfp_mask & __GFP_KSWAPD_RECLAIM) + alloc_flags |= ALLOC_KSWAPD; + +#ifdef CONFIG_ZONE_DMA32 if (zone_idx(zone) != ZONE_NORMAL) - return 0; + goto out; /* * If ZONE_DMA32 exists, assume it is the one after ZONE_NORMAL and @@ -3290,17 +3295,12 @@ alloc_flags_nofragment(struct zone *zone) */ BUILD_BUG_ON(ZONE_NORMAL - ZONE_DMA32 != 1); if (nr_online_nodes > 1 && !populated_zone(--zone)) - return 0; + goto out; - return ALLOC_NOFRAGMENT; -} -#else -static inline unsigned int -alloc_flags_nofragment(struct zone *zone) -{ - return 0; +out: +#endif /* CONFIG_ZONE_DMA32 */ + return alloc_flags; } -#endif /* * get_page_from_freelist goes through the zonelist trying to allocate @@ -3939,6 +3939,9 @@ gfp_to_alloc_flags(gfp_t gfp_mask) } else if (unlikely(rt_task(current)) && !in_interrupt()) alloc_flags |= ALLOC_HARDER; + if (gfp_mask & __GFP_KSWAPD_RECLAIM) + alloc_flags |= ALLOC_KSWAPD; + #ifdef CONFIG_CMA if (gfpflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE) alloc_flags |= ALLOC_CMA; @@ -4170,7 +4173,7 @@ retry_cpuset: if (!ac->preferred_zoneref->zone) goto nopage; - if (gfp_mask & __GFP_KSWAPD_RECLAIM) + if (alloc_flags & ALLOC_KSWAPD) wake_all_kswapds(order, gfp_mask, ac); /* @@ -4228,7 +4231,7 @@ retry_cpuset: retry: /* Ensure kswapd doesn't accidentally go to sleep as long as we loop */ - if (gfp_mask & __GFP_KSWAPD_RECLAIM) + if (alloc_flags & ALLOC_KSWAPD) wake_all_kswapds(order, gfp_mask, ac); reserve_flags = __gfp_pfmemalloc_flags(gfp_mask); @@ -4451,7 +4454,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid, * Forbid the first pass from falling back to types that fragment * memory until all local zones are considered. */ - alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone); + alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone, gfp_mask); /* First allocation attempt */ page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac); |