summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2006-09-26 08:31:51 +0200
committerLinus Torvalds <torvalds@g5.osdl.org>2006-09-26 17:48:51 +0200
commit8417bba4b151346ed475fcc923693c9e3be89063 (patch)
tree93d559e32bc76077c1f837aed09a5df56849c610
parent[PATCH] Extract the allocpercpu functions from the slab allocator (diff)
downloadlinux-8417bba4b151346ed475fcc923693c9e3be89063.tar.xz
linux-8417bba4b151346ed475fcc923693c9e3be89063.zip
[PATCH] Replace min_unmapped_ratio by min_unmapped_pages in struct zone
*_pages is a better description of the role of the variable. Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to '')
-rw-r--r--include/linux/mmzone.h2
-rw-r--r--mm/page_alloc.c4
-rw-r--r--mm/vmscan.c2
3 files changed, 4 insertions, 4 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 7fe317164b73..a703527e2b45 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -169,7 +169,7 @@ struct zone {
/*
* zone reclaim becomes active if more unmapped pages exist.
*/
- unsigned long min_unmapped_ratio;
+ unsigned long min_unmapped_pages;
struct per_cpu_pageset *pageset[NR_CPUS];
#else
struct per_cpu_pageset pageset[NR_CPUS];
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index f7ea020c23ea..5da6bc4e0a6b 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2002,7 +2002,7 @@ static void __meminit free_area_init_core(struct pglist_data *pgdat,
zone->spanned_pages = size;
zone->present_pages = realsize;
#ifdef CONFIG_NUMA
- zone->min_unmapped_ratio = (realsize*sysctl_min_unmapped_ratio)
+ zone->min_unmapped_pages = (realsize*sysctl_min_unmapped_ratio)
/ 100;
#endif
zone->name = zone_names[j];
@@ -2313,7 +2313,7 @@ int sysctl_min_unmapped_ratio_sysctl_handler(ctl_table *table, int write,
return rc;
for_each_zone(zone)
- zone->min_unmapped_ratio = (zone->present_pages *
+ zone->min_unmapped_pages = (zone->present_pages *
sysctl_min_unmapped_ratio) / 100;
return 0;
}
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 8f35d7d585cb..5154c25e8440 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1618,7 +1618,7 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
* unmapped file backed pages.
*/
if (zone_page_state(zone, NR_FILE_PAGES) -
- zone_page_state(zone, NR_FILE_MAPPED) <= zone->min_unmapped_ratio)
+ zone_page_state(zone, NR_FILE_MAPPED) <= zone->min_unmapped_pages)
return 0;
/*