diff options
-rw-r--r-- | include/linux/mmzone.h | 6 | ||||
-rw-r--r-- | mm/page_alloc.c | 13 |
2 files changed, 19 insertions, 0 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index bd791e452ad7..67ab5febabf7 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -490,6 +490,12 @@ struct zone { unsigned long managed_pages; /* + * Number of MIGRATE_RESEVE page block. To maintain for just + * optimization. Protected by zone->lock. + */ + int nr_migrate_reserve_block; + + /* * rarely used fields: */ const char *name; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 5248fe070aa4..89d81f4429ca 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -3901,6 +3901,7 @@ static void setup_zone_migrate_reserve(struct zone *zone) struct page *page; unsigned long block_migratetype; int reserve; + int old_reserve; /* * Get the start pfn, end pfn and the number of blocks to reserve @@ -3922,6 +3923,12 @@ static void setup_zone_migrate_reserve(struct zone *zone) * future allocation of hugepages at runtime. */ reserve = min(2, reserve); + old_reserve = zone->nr_migrate_reserve_block; + + /* When memory hot-add, we almost always need to do nothing */ + if (reserve == old_reserve) + return; + zone->nr_migrate_reserve_block = reserve; for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { if (!pfn_valid(pfn)) @@ -3959,6 +3966,12 @@ static void setup_zone_migrate_reserve(struct zone *zone) reserve--; continue; } + } else if (!old_reserve) { + /* + * At boot time we don't need to scan the whole zone + * for turning off MIGRATE_RESERVE. + */ + break; } /* |