summaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux-foundation.org>2009-06-17 00:32:15 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2009-06-17 04:47:35 +0200
commit62bc62a873116805774ffd37d7f86aa4faa832b1 (patch)
tree7f3f5b94b2d484a13ca27b8d3f6f54cfe126d7c0 /mm/page_alloc.c
parentpage allocator: get the pageblock migratetype without disabling interrupts (diff)
downloadlinux-62bc62a873116805774ffd37d7f86aa4faa832b1.tar.xz
linux-62bc62a873116805774ffd37d7f86aa4faa832b1.zip
page allocator: use a pre-calculated value instead of num_online_nodes() in fast paths
num_online_nodes() is called in a number of places but most often by the page allocator when deciding whether the zonelist needs to be filtered based on cpusets or the zonelist cache. This is actually a heavy function and touches a number of cache lines. This patch stores the number of online nodes at boot time and updates the value when nodes get onlined and offlined. The value is then used in a number of important paths in place of num_online_nodes(). [rientjes@google.com: do not override definition of node_set_online() with macro] Signed-off-by: Christoph Lameter <cl@linux-foundation.org> Signed-off-by: Mel Gorman <mel@csn.ul.ie> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Pekka Enberg <penberg@cs.helsinki.fi> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Nick Piggin <nickpiggin@yahoo.com.au> Cc: Dave Hansen <dave@linux.vnet.ibm.com> Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com> Signed-off-by: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c10
1 files changed, 6 insertions, 4 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index e60e41474332..0c9f406e3c44 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -161,7 +161,9 @@ static unsigned long __meminitdata dma_reserve;
#if MAX_NUMNODES > 1
int nr_node_ids __read_mostly = MAX_NUMNODES;
+int nr_online_nodes __read_mostly = 1;
EXPORT_SYMBOL(nr_node_ids);
+EXPORT_SYMBOL(nr_online_nodes);
#endif
int page_group_by_mobility_disabled __read_mostly;
@@ -1466,7 +1468,7 @@ this_zone_full:
if (NUMA_BUILD)
zlc_mark_zone_full(zonelist, z);
try_next_zone:
- if (NUMA_BUILD && !did_zlc_setup && num_online_nodes() > 1) {
+ if (NUMA_BUILD && !did_zlc_setup && nr_online_nodes > 1) {
/*
* we do zlc_setup after the first zone is tried but only
* if there are multiple nodes make it worthwhile
@@ -2265,7 +2267,7 @@ int numa_zonelist_order_handler(ctl_table *table, int write,
}
-#define MAX_NODE_LOAD (num_online_nodes())
+#define MAX_NODE_LOAD (nr_online_nodes)
static int node_load[MAX_NUMNODES];
/**
@@ -2474,7 +2476,7 @@ static void build_zonelists(pg_data_t *pgdat)
/* NUMA-aware ordering of nodes */
local_node = pgdat->node_id;
- load = num_online_nodes();
+ load = nr_online_nodes;
prev_node = local_node;
nodes_clear(used_mask);
@@ -2625,7 +2627,7 @@ void build_all_zonelists(void)
printk("Built %i zonelists in %s order, mobility grouping %s. "
"Total pages: %ld\n",
- num_online_nodes(),
+ nr_online_nodes,
zonelist_order_name[current_zonelist_order],
page_group_by_mobility_disabled ? "off" : "on",
vm_total_pages);