summaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorMel Gorman <mel@csn.ul.ie>2009-06-17 00:32:08 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2009-06-17 04:47:34 +0200
commitda456f14d2f2d7350f2b9440af79c85a34c7eed5 (patch)
treefb857a24a561153e25e91a2ad55fa4123f8b152c /mm/page_alloc.c
parentpage allocator: do not call get_pageblock_migratetype() more than necessary (diff)
downloadlinux-da456f14d2f2d7350f2b9440af79c85a34c7eed5.tar.xz
linux-da456f14d2f2d7350f2b9440af79c85a34c7eed5.zip
page allocator: do not disable interrupts in free_page_mlock()
free_page_mlock() tests and clears PG_mlocked using locked versions of the bit operations. If set, it disables interrupts to update counters and this happens on every page free even though interrupts are disabled very shortly afterwards a second time. This is wasteful. This patch splits what free_page_mlock() does. The bit check is still made. However, the update of counters is delayed until the interrupts are disabled and the non-lock version for clearing the bit is used. One potential weirdness with this split is that the counters do not get updated if the bad_page() check is triggered but a system showing bad pages is getting screwed already. Signed-off-by: Mel Gorman <mel@csn.ul.ie> Reviewed-by: Christoph Lameter <cl@linux-foundation.org> Reviewed-by: Pekka Enberg <penberg@cs.helsinki.fi> Reviewed-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Nick Piggin <nickpiggin@yahoo.com.au> Cc: Dave Hansen <dave@linux.vnet.ibm.com> Acked-by: Lee Schermerhorn <Lee.Schermerhorn@hp.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c8
1 files changed, 7 insertions, 1 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 8f334d339b08..03a386d24ef2 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -495,7 +495,6 @@ static inline void __free_one_page(struct page *page,
static inline int free_pages_check(struct page *page)
{
- free_page_mlock(page);
if (unlikely(page_mapcount(page) |
(page->mapping != NULL) |
(page_count(page) != 0) |
@@ -552,6 +551,7 @@ static void __free_pages_ok(struct page *page, unsigned int order)
unsigned long flags;
int i;
int bad = 0;
+ int clearMlocked = PageMlocked(page);
for (i = 0 ; i < (1 << order) ; ++i)
bad += free_pages_check(page + i);
@@ -567,6 +567,8 @@ static void __free_pages_ok(struct page *page, unsigned int order)
kernel_map_pages(page, 1 << order, 0);
local_irq_save(flags);
+ if (unlikely(clearMlocked))
+ free_page_mlock(page);
__count_vm_events(PGFREE, 1 << order);
free_one_page(page_zone(page), page, order,
get_pageblock_migratetype(page));
@@ -1013,6 +1015,7 @@ static void free_hot_cold_page(struct page *page, int cold)
struct zone *zone = page_zone(page);
struct per_cpu_pages *pcp;
unsigned long flags;
+ int clearMlocked = PageMlocked(page);
if (PageAnon(page))
page->mapping = NULL;
@@ -1028,7 +1031,10 @@ static void free_hot_cold_page(struct page *page, int cold)
pcp = &zone_pcp(zone, get_cpu())->pcp;
local_irq_save(flags);
+ if (unlikely(clearMlocked))
+ free_page_mlock(page);
__count_vm_event(PGFREE);
+
if (cold)
list_add_tail(&page->lru, &pcp->list);
else