diff options
Diffstat (limited to 'mm/compaction.c')
-rw-r--r-- | mm/compaction.c | 29 |
1 files changed, 14 insertions, 15 deletions
diff --git a/mm/compaction.c b/mm/compaction.c index 13cb7a961b31..dbcfdfce1b82 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -157,7 +157,7 @@ EXPORT_SYMBOL(__ClearPageMovable); * allocation success. 1 << compact_defer_shift, compactions are skipped up * to a limit of 1 << COMPACT_MAX_DEFER_SHIFT */ -void defer_compaction(struct zone *zone, int order) +static void defer_compaction(struct zone *zone, int order) { zone->compact_considered = 0; zone->compact_defer_shift++; @@ -172,7 +172,7 @@ void defer_compaction(struct zone *zone, int order) } /* Returns true if compaction should be skipped this time */ -bool compaction_deferred(struct zone *zone, int order) +static bool compaction_deferred(struct zone *zone, int order) { unsigned long defer_limit = 1UL << zone->compact_defer_shift; @@ -209,7 +209,7 @@ void compaction_defer_reset(struct zone *zone, int order, } /* Returns true if restarting compaction after many failures */ -bool compaction_restarting(struct zone *zone, int order) +static bool compaction_restarting(struct zone *zone, int order) { if (order < zone->compact_order_failed) return false; @@ -237,7 +237,7 @@ static void reset_cached_positions(struct zone *zone) } /* - * Compound pages of >= pageblock_order should consistenly be skipped until + * Compound pages of >= pageblock_order should consistently be skipped until * released. It is always pointless to compact pages of such order (if they are * migratable), and the pageblocks they occupy cannot contain any free pages. */ @@ -2070,13 +2070,6 @@ static enum compact_result compact_finished(struct compact_control *cc) return ret; } -/* - * compaction_suitable: Is this suitable to run compaction on this zone now? - * Returns - * COMPACT_SKIPPED - If there are too few free pages for compaction - * COMPACT_SUCCESS - If the allocation would succeed without compaction - * COMPACT_CONTINUE - If compaction should run now - */ static enum compact_result __compaction_suitable(struct zone *zone, int order, unsigned int alloc_flags, int highest_zoneidx, @@ -2120,6 +2113,13 @@ static enum compact_result __compaction_suitable(struct zone *zone, int order, return COMPACT_CONTINUE; } +/* + * compaction_suitable: Is this suitable to run compaction on this zone now? + * Returns + * COMPACT_SKIPPED - If there are too few free pages for compaction + * COMPACT_SUCCESS - If the allocation would succeed without compaction + * COMPACT_CONTINUE - If compaction should run now + */ enum compact_result compaction_suitable(struct zone *zone, int order, unsigned int alloc_flags, int highest_zoneidx) @@ -2275,7 +2275,7 @@ compact_zone(struct compact_control *cc, struct capture_control *capc) while ((ret = compact_finished(cc)) == COMPACT_CONTINUE) { int err; - unsigned long start_pfn = cc->migrate_pfn; + unsigned long iteration_start_pfn = cc->migrate_pfn; /* * Avoid multiple rescans which can happen if a page cannot be @@ -2287,7 +2287,7 @@ compact_zone(struct compact_control *cc, struct capture_control *capc) */ cc->rescan = false; if (pageblock_start_pfn(last_migrated_pfn) == - pageblock_start_pfn(start_pfn)) { + pageblock_start_pfn(iteration_start_pfn)) { cc->rescan = true; } @@ -2311,8 +2311,7 @@ compact_zone(struct compact_control *cc, struct capture_control *capc) goto check_drain; case ISOLATE_SUCCESS: update_cached = false; - last_migrated_pfn = start_pfn; - ; + last_migrated_pfn = iteration_start_pfn; } err = migrate_pages(&cc->migratepages, compaction_alloc, |