summaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
authorRik van Riel <riel@redhat.com>2011-01-14 00:47:13 +0100
committerLinus Torvalds <torvalds@linux-foundation.org>2011-01-14 02:32:46 +0100
commit9992af102974f3f8a02a1f2729c3461881539e26 (patch)
tree40958e1a8bd7efc7c9a4d28e2b77d86bb8688734 /mm/vmscan.c
parentthp: fix anon memory statistics with transparent hugepages (diff)
downloadlinux-9992af102974f3f8a02a1f2729c3461881539e26.tar.xz
linux-9992af102974f3f8a02a1f2729c3461881539e26.zip
thp: scale nr_rotated to balance memory pressure
Make sure we scale up nr_rotated when we encounter a referenced transparent huge page. This ensures pageout scanning balance is not distorted when there are huge pages on the LRU. Signed-off-by: Rik van Riel <riel@redhat.com> Signed-off-by: Andrea Arcangeli <aarcange@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to '')
-rw-r--r--mm/vmscan.c5
1 files changed, 3 insertions, 2 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 0882014d2ce5..47a50962ce81 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1276,7 +1276,8 @@ putback_lru_pages(struct zone *zone, struct scan_control *sc,
add_page_to_lru_list(zone, page, lru);
if (is_active_lru(lru)) {
int file = is_file_lru(lru);
- reclaim_stat->recent_rotated[file]++;
+ int numpages = hpage_nr_pages(page);
+ reclaim_stat->recent_rotated[file] += numpages;
}
if (!pagevec_add(&pvec, page)) {
spin_unlock_irq(&zone->lru_lock);
@@ -1552,7 +1553,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
}
if (page_referenced(page, 0, sc->mem_cgroup, &vm_flags)) {
- nr_rotated++;
+ nr_rotated += hpage_nr_pages(page);
/*
* Identify referenced, file-backed active pages and
* give them one more trip around the active list. So