summaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorMel Gorman <mgorman@suse.de>2015-06-30 23:57:09 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2015-07-01 04:44:56 +0200
commit54608c3f3a448f1e042f5d9f3b873cc8dc022f27 (patch)
tree410305a0ade6719f350a8c5f2456cec85c4e462a /mm/page_alloc.c
parentmm: meminit: initialise remaining struct pages in parallel with kswapd (diff)
downloadlinux-54608c3f3a448f1e042f5d9f3b873cc8dc022f27.tar.xz
linux-54608c3f3a448f1e042f5d9f3b873cc8dc022f27.zip
mm: meminit: minimise number of pfn->page lookups during initialisation
Deferred struct page initialisation is using pfn_to_page() on every PFN unnecessarily. This patch minimises the number of lookups and scheduler checks. Signed-off-by: Mel Gorman <mgorman@suse.de> Tested-by: Nate Zimmer <nzimmer@sgi.com> Tested-by: Waiman Long <waiman.long@hp.com> Tested-by: Daniel J Blueman <daniel@numascale.com> Acked-by: Pekka Enberg <penberg@kernel.org> Cc: Robin Holt <robinmholt@gmail.com> Cc: Nate Zimmer <nzimmer@sgi.com> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Waiman Long <waiman.long@hp.com> Cc: Scott Norton <scott.norton@hp.com> Cc: "Luck, Tony" <tony.luck@intel.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c29
1 files changed, 24 insertions, 5 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index c30f5a0535fd..0f770cc13450 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1091,6 +1091,7 @@ void __defermem_init deferred_init_memmap(int nid)
for_each_mem_pfn_range(i, nid, &walk_start, &walk_end, NULL) {
unsigned long pfn, end_pfn;
+ struct page *page = NULL;
end_pfn = min(walk_end, zone_end_pfn(zone));
pfn = first_init_pfn;
@@ -1100,13 +1101,32 @@ void __defermem_init deferred_init_memmap(int nid)
pfn = zone->zone_start_pfn;
for (; pfn < end_pfn; pfn++) {
- struct page *page;
-
- if (!pfn_valid(pfn))
+ if (!pfn_valid_within(pfn))
continue;
- if (!meminit_pfn_in_nid(pfn, nid, &nid_init_state))
+ /*
+ * Ensure pfn_valid is checked every
+ * MAX_ORDER_NR_PAGES for memory holes
+ */
+ if ((pfn & (MAX_ORDER_NR_PAGES - 1)) == 0) {
+ if (!pfn_valid(pfn)) {
+ page = NULL;
+ continue;
+ }
+ }
+
+ if (!meminit_pfn_in_nid(pfn, nid, &nid_init_state)) {
+ page = NULL;
continue;
+ }
+
+ /* Minimise pfn page lookups and scheduler checks */
+ if (page && (pfn & (MAX_ORDER_NR_PAGES - 1)) != 0) {
+ page++;
+ } else {
+ page = pfn_to_page(pfn);
+ cond_resched();
+ }
if (page->flags) {
VM_BUG_ON(page_zone(page) != zone);
@@ -1116,7 +1136,6 @@ void __defermem_init deferred_init_memmap(int nid)
__init_single_page(page, pfn, zid, nid);
__free_pages_boot_core(page, pfn, 0);
nr_pages++;
- cond_resched();
}
first_init_pfn = max(end_pfn, first_init_pfn);
}