diff options
Diffstat (limited to 'mm/compaction.c')
-rw-r--r-- | mm/compaction.c | 93 |
1 files changed, 35 insertions, 58 deletions
diff --git a/mm/compaction.c b/mm/compaction.c index fe915db6149b..1f89b969c12b 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -317,7 +317,6 @@ __reset_isolation_pfn(struct zone *zone, unsigned long pfn, bool check_source, } page += (1 << PAGE_ALLOC_COSTLY_ORDER); - pfn += (1 << PAGE_ALLOC_COSTLY_ORDER); } while (page <= end_page); return false; @@ -514,15 +513,12 @@ static bool compact_lock_irqsave(spinlock_t *lock, unsigned long *flags, * very heavily contended. The lock should be periodically unlocked to avoid * having disabled IRQs for a long time, even when there is nobody waiting on * the lock. It might also be that allowing the IRQs will result in - * need_resched() becoming true. If scheduling is needed, async compaction - * aborts. Sync compaction schedules. + * need_resched() becoming true. If scheduling is needed, compaction schedules. * Either compaction type will also abort if a fatal signal is pending. * In either case if the lock was locked, it is dropped and not regained. * - * Returns true if compaction should abort due to fatal signal pending, or - * async compaction due to need_resched() - * Returns false when compaction can continue (sync compaction might have - * scheduled) + * Returns true if compaction should abort due to fatal signal pending. + * Returns false when compaction can continue. */ static bool compact_unlock_should_abort(spinlock_t *lock, unsigned long flags, bool *locked, struct compact_control *cc) @@ -575,9 +571,9 @@ static unsigned long isolate_freepages_block(struct compact_control *cc, /* * Periodically drop the lock (if held) regardless of its * contention, to give chance to IRQs. Abort if fatal signal - * pending or async compaction detects need_resched() + * pending. */ - if (!(blockpfn % SWAP_CLUSTER_MAX) + if (!(blockpfn % COMPACT_CLUSTER_MAX) && compact_unlock_should_abort(&cc->zone->lock, flags, &locked, cc)) break; @@ -603,13 +599,7 @@ static unsigned long isolate_freepages_block(struct compact_control *cc, if (!PageBuddy(page)) goto isolate_fail; - /* - * If we already hold the lock, we can skip some rechecking. - * Note that if we hold the lock now, checked_pageblock was - * already set in some previous iteration (or strict is true), - * so it is correct to skip the suitable migration target - * recheck as well. - */ + /* If we already hold the lock, we can skip some rechecking. */ if (!locked) { locked = compact_lock_irqsave(&cc->zone->lock, &flags, cc); @@ -872,7 +862,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, * contention, to give chance to IRQs. Abort completely if * a fatal signal is pending. */ - if (!(low_pfn % SWAP_CLUSTER_MAX)) { + if (!(low_pfn % COMPACT_CLUSTER_MAX)) { if (locked) { unlock_page_lruvec_irqrestore(locked, flags); locked = NULL; @@ -899,7 +889,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, * not falsely conclude that the block should be skipped. */ if (!valid_page && IS_ALIGNED(low_pfn, pageblock_nr_pages)) { - if (!cc->ignore_skip_hint && get_pageblock_skip(page)) { + if (!isolation_suitable(cc, page)) { low_pfn = end_pfn; page = NULL; goto isolate_abort; @@ -918,7 +908,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, /* Do not report -EBUSY down the chain */ if (ret == -EBUSY) ret = 0; - low_pfn += (1UL << compound_order(page)) - 1; + low_pfn += compound_nr(page) - 1; goto isolate_fail; } @@ -1542,7 +1532,7 @@ fast_isolate_freepages(struct compact_control *cc) * not found, be pessimistic for direct compaction * and use the min mark. */ - if (highest) { + if (highest >= min_pfn) { page = pfn_to_page(highest); cc->free_pfn = highest; } else { @@ -1587,7 +1577,7 @@ static void isolate_freepages(struct compact_control *cc) unsigned int stride; /* Try a small search of the free lists for a candidate */ - isolate_start_pfn = fast_isolate_freepages(cc); + fast_isolate_freepages(cc); if (cc->nr_freepages) goto splitmap; @@ -1624,7 +1614,7 @@ static void isolate_freepages(struct compact_control *cc) * This can iterate a massively long zone without finding any * suitable migration targets, so periodically check resched. */ - if (!(block_start_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages))) + if (!(block_start_pfn % (COMPACT_CLUSTER_MAX * pageblock_nr_pages))) cond_resched(); page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn, @@ -1858,6 +1848,8 @@ static unsigned long fast_find_migrateblock(struct compact_control *cc) update_fast_start_pfn(cc, free_pfn); pfn = pageblock_start_pfn(free_pfn); + if (pfn < cc->zone->zone_start_pfn) + pfn = cc->zone->zone_start_pfn; cc->fast_search_fail = 0; found_block = true; set_pageblock_skip(freepage); @@ -1931,7 +1923,7 @@ static isolate_migrate_t isolate_migratepages(struct compact_control *cc) * many pageblocks unsuitable, so periodically check if we * need to schedule. */ - if (!(low_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages))) + if (!(low_pfn % (COMPACT_CLUSTER_MAX * pageblock_nr_pages))) cond_resched(); page = pageblock_pfn_to_page(block_start_pfn, @@ -1951,12 +1943,12 @@ static isolate_migrate_t isolate_migratepages(struct compact_control *cc) continue; /* - * For async compaction, also only scan in MOVABLE blocks - * without huge pages. Async compaction is optimistic to see - * if the minimum amount of work satisfies the allocation. - * The cached PFN is updated as it's possible that all - * remaining blocks between source and target are unsuitable - * and the compaction scanners fail to meet. + * For async direct compaction, only scan the pageblocks of the + * same migratetype without huge pages. Async direct compaction + * is optimistic to see if the minimum amount of work satisfies + * the allocation. The cached PFN is updated as it's possible + * that all remaining blocks between source and target are + * unsuitable and the compaction scanners fail to meet. */ if (!suitable_migration_source(cc, page)) { update_cached_migrate(cc, block_end_pfn); @@ -2144,29 +2136,16 @@ static enum compact_result __compact_finished(struct compact_control *cc) * other migratetype buddy lists. */ if (find_suitable_fallback(area, order, migratetype, - true, &can_steal) != -1) { - - /* movable pages are OK in any pageblock */ - if (migratetype == MIGRATE_MOVABLE) - return COMPACT_SUCCESS; - + true, &can_steal) != -1) /* - * We are stealing for a non-movable allocation. Make - * sure we finish compacting the current pageblock - * first so it is as free as possible and we won't - * have to steal another one soon. This only applies - * to sync compaction, as async compaction operates - * on pageblocks of the same migratetype. + * Movable pages are OK in any pageblock. If we are + * stealing for a non-movable allocation, make sure + * we finish compacting the current pageblock first + * (which is assured by the above migrate_pfn align + * check) so it is as free as possible and we won't + * have to steal another one soon. */ - if (cc->mode == MIGRATE_ASYNC || - IS_ALIGNED(cc->migrate_pfn, - pageblock_nr_pages)) { - return COMPACT_SUCCESS; - } - - ret = COMPACT_CONTINUE; - break; - } + return COMPACT_SUCCESS; } out: @@ -2301,7 +2280,7 @@ bool compaction_zonelist_suitable(struct alloc_context *ac, int order, available += zone_page_state_snapshot(zone, NR_FREE_PAGES); compact_result = __compaction_suitable(zone, order, alloc_flags, ac->highest_zoneidx, available); - if (compact_result != COMPACT_SKIPPED) + if (compact_result == COMPACT_CONTINUE) return true; } @@ -2592,7 +2571,7 @@ enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order, unsigned int alloc_flags, const struct alloc_context *ac, enum compact_priority prio, struct page **capture) { - int may_perform_io = gfp_mask & __GFP_IO; + int may_perform_io = (__force int)(gfp_mask & __GFP_IO); struct zoneref *z; struct zone *zone; enum compact_result rc = COMPACT_SKIPPED; @@ -3016,21 +2995,18 @@ static int kcompactd(void *p) * This kcompactd start function will be called by init and node-hot-add. * On node-hot-add, kcompactd will moved to proper cpus if cpus are hot-added. */ -int kcompactd_run(int nid) +void kcompactd_run(int nid) { pg_data_t *pgdat = NODE_DATA(nid); - int ret = 0; if (pgdat->kcompactd) - return 0; + return; pgdat->kcompactd = kthread_run(kcompactd, pgdat, "kcompactd%d", nid); if (IS_ERR(pgdat->kcompactd)) { pr_err("Failed to start kcompactd on node %d\n", nid); - ret = PTR_ERR(pgdat->kcompactd); pgdat->kcompactd = NULL; } - return ret; } /* @@ -3065,7 +3041,8 @@ static int kcompactd_cpu_online(unsigned int cpu) if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids) /* One of our CPUs online: restore mask */ - set_cpus_allowed_ptr(pgdat->kcompactd, mask); + if (pgdat->kcompactd) + set_cpus_allowed_ptr(pgdat->kcompactd, mask); } return 0; } |