diff options
author | Christoph Lameter <clameter@sgi.com> | 2007-10-16 10:25:37 +0200 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-10-16 18:42:59 +0200 |
commit | 523b945855a1427000ffc707c610abe5947ae607 (patch) | |
tree | 2d84b5b6822a2a20bfd79146c08ce06ac8c80b9b /mm/page_alloc.c | |
parent | Memoryless nodes: drop one memoryless node boot warning (diff) | |
download | linux-523b945855a1427000ffc707c610abe5947ae607.tar.xz linux-523b945855a1427000ffc707c610abe5947ae607.zip |
Memoryless nodes: Fix GFP_THISNODE behavior
GFP_THISNODE checks that the zone selected is within the pgdat (node) of the
first zone of a nodelist. That only works if the node has memory. A
memoryless node will have its first node on another pgdat (node).
GFP_THISNODE currently will return simply memory on the first pgdat. Thus it
is returning memory on other nodes. GFP_THISNODE should fail if there is no
local memory on a node.
Add a new set of zonelists for each node that only contain the nodes that
belong to the zones itself so that no fallback is possible.
Then modify gfp_type to pickup the right zone based on the presence of
__GFP_THISNODE.
Drop the existing GFP_THISNODE checks from the page_allocators hot path.
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Acked-by: Nishanth Aravamudan <nacc@us.ibm.com>
Tested-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Acked-by: Bob Picco <bob.picco@hp.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Mel Gorman <mel@skynet.ie>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to '')
-rw-r--r-- | mm/page_alloc.c | 28 |
1 files changed, 23 insertions, 5 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index d06f6e0f75aa..2f547f45de18 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1191,9 +1191,6 @@ zonelist_scan: !zlc_zone_worth_trying(zonelist, z, allowednodes)) continue; zone = *z; - if (unlikely(NUMA_BUILD && (gfp_mask & __GFP_THISNODE) && - zone->zone_pgdat != zonelist->zones[0]->zone_pgdat)) - break; if ((alloc_flags & ALLOC_CPUSET) && !cpuset_zone_allowed_softwall(zone, gfp_mask)) goto try_next_zone; @@ -1262,7 +1259,10 @@ restart: z = zonelist->zones; /* the list of zones suitable for gfp_mask */ if (unlikely(*z == NULL)) { - /* Should this ever happen?? */ + /* + * Happens if we have an empty zonelist as a result of + * GFP_THISNODE being used on a memoryless node + */ return NULL; } @@ -1858,6 +1858,22 @@ static void build_zonelists_in_node_order(pg_data_t *pgdat, int node) } /* + * Build gfp_thisnode zonelists + */ +static void build_thisnode_zonelists(pg_data_t *pgdat) +{ + enum zone_type i; + int j; + struct zonelist *zonelist; + + for (i = 0; i < MAX_NR_ZONES; i++) { + zonelist = pgdat->node_zonelists + MAX_NR_ZONES + i; + j = build_zonelists_node(pgdat, zonelist, 0, i); + zonelist->zones[j] = NULL; + } +} + +/* * Build zonelists ordered by zone and nodes within zones. * This results in conserving DMA zone[s] until all Normal memory is * exhausted, but results in overflowing to remote node while memory @@ -1961,7 +1977,7 @@ static void build_zonelists(pg_data_t *pgdat) int order = current_zonelist_order; /* initialize zonelists */ - for (i = 0; i < MAX_NR_ZONES; i++) { + for (i = 0; i < MAX_ZONELISTS; i++) { zonelist = pgdat->node_zonelists + i; zonelist->zones[0] = NULL; } @@ -2006,6 +2022,8 @@ static void build_zonelists(pg_data_t *pgdat) /* calculate node order -- i.e., DMA last! */ build_zonelists_in_zone_order(pgdat, j); } + + build_thisnode_zonelists(pgdat); } /* Construct the zonelist performance cache - see further mmzone.h */ |