diff options
author | Vlastimil Babka <vbabka@suse.cz> | 2016-07-29 00:49:28 +0200 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-07-29 01:07:41 +0200 |
commit | a5508cd83f10f663e05d212cb81f600a3af46e40 (patch) | |
tree | b52a09bd0be8e756a48b6673c7746b3b0e1b6726 /mm/page_alloc.c | |
parent | mm, thp: remove __GFP_NORETRY from khugepaged and madvised allocations (diff) | |
download | linux-a5508cd83f10f663e05d212cb81f600a3af46e40.tar.xz linux-a5508cd83f10f663e05d212cb81f600a3af46e40.zip |
mm, compaction: introduce direct compaction priority
In the context of direct compaction, for some types of allocations we
would like the compaction to either succeed or definitely fail while
trying as hard as possible. Current async/sync_light migration mode is
insufficient, as there are heuristics such as caching scanner positions,
marking pageblocks as unsuitable or deferring compaction for a zone. At
least the final compaction attempt should be able to override these
heuristics.
To communicate how hard compaction should try, we replace migration mode
with a new enum compact_priority and change the relevant function
signatures. In compact_zone_order() where struct compact_control is
constructed, the priority is mapped to suitable control flags. This
patch itself has no functional change, as the current priority levels
are mapped back to the same migration modes as before. Expanding them
will be done next.
Note that !CONFIG_COMPACTION variant of try_to_compact_pages() is
removed, as the only caller exists under CONFIG_COMPACTION.
Link: http://lkml.kernel.org/r/20160721073614.24395-8-vbabka@suse.cz
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Acked-by: Michal Hocko <mhocko@suse.com>
Acked-by: Mel Gorman <mgorman@techsingularity.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r-- | mm/page_alloc.c | 28 |
1 files changed, 14 insertions, 14 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 09ba67487897..26c6fe74f5c5 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -3096,7 +3096,7 @@ out: static struct page * __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, unsigned int alloc_flags, const struct alloc_context *ac, - enum migrate_mode mode, enum compact_result *compact_result) + enum compact_priority prio, enum compact_result *compact_result) { struct page *page; int contended_compaction; @@ -3106,7 +3106,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, current->flags |= PF_MEMALLOC; *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac, - mode, &contended_compaction); + prio, &contended_compaction); current->flags &= ~PF_MEMALLOC; if (*compact_result <= COMPACT_INACTIVE) @@ -3160,7 +3160,8 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, static inline bool should_compact_retry(struct alloc_context *ac, int order, int alloc_flags, - enum compact_result compact_result, enum migrate_mode *migrate_mode, + enum compact_result compact_result, + enum compact_priority *compact_priority, int compaction_retries) { int max_retries = MAX_COMPACT_RETRIES; @@ -3171,11 +3172,11 @@ should_compact_retry(struct alloc_context *ac, int order, int alloc_flags, /* * compaction considers all the zone as desperately out of memory * so it doesn't really make much sense to retry except when the - * failure could be caused by weak migration mode. + * failure could be caused by insufficient priority */ if (compaction_failed(compact_result)) { - if (*migrate_mode == MIGRATE_ASYNC) { - *migrate_mode = MIGRATE_SYNC_LIGHT; + if (*compact_priority > MIN_COMPACT_PRIORITY) { + (*compact_priority)--; return true; } return false; @@ -3209,7 +3210,7 @@ should_compact_retry(struct alloc_context *ac, int order, int alloc_flags, static inline struct page * __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, unsigned int alloc_flags, const struct alloc_context *ac, - enum migrate_mode mode, enum compact_result *compact_result) + enum compact_priority prio, enum compact_result *compact_result) { *compact_result = COMPACT_SKIPPED; return NULL; @@ -3218,7 +3219,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, static inline bool should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_flags, enum compact_result compact_result, - enum migrate_mode *migrate_mode, + enum compact_priority *compact_priority, int compaction_retries) { struct zone *zone; @@ -3473,7 +3474,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, struct page *page = NULL; unsigned int alloc_flags; unsigned long did_some_progress; - enum migrate_mode migration_mode = MIGRATE_SYNC_LIGHT; + enum compact_priority compact_priority = DEF_COMPACT_PRIORITY; enum compact_result compact_result; int compaction_retries = 0; int no_progress_loops = 0; @@ -3525,7 +3526,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, !gfp_pfmemalloc_allowed(gfp_mask)) { page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac, - MIGRATE_ASYNC, + INIT_COMPACT_PRIORITY, &compact_result); if (page) goto got_pg; @@ -3558,7 +3559,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, * sync compaction could be very expensive, so keep * using async compaction. */ - migration_mode = MIGRATE_ASYNC; + compact_priority = INIT_COMPACT_PRIORITY; } } @@ -3624,8 +3625,7 @@ retry: /* Try direct compaction and then allocating */ page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac, - migration_mode, - &compact_result); + compact_priority, &compact_result); if (page) goto got_pg; @@ -3665,7 +3665,7 @@ retry: */ if (did_some_progress > 0 && should_compact_retry(ac, order, alloc_flags, - compact_result, &migration_mode, + compact_result, &compact_priority, compaction_retries)) goto retry; |