summaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorNick Piggin <nickpiggin@yahoo.com.au>2006-01-06 09:10:56 +0100
committerLinus Torvalds <torvalds@g5.osdl.org>2006-01-06 17:33:25 +0100
commitc54ad30c784b84d0275152d0ca80985b21471811 (patch)
tree7a40d6ddbe67360a1d9c577e3a2987d140056303 /mm/page_alloc.c
parent[PATCH] mm: free_pages_and_swap_cache opt (diff)
downloadlinux-c54ad30c784b84d0275152d0ca80985b21471811.tar.xz
linux-c54ad30c784b84d0275152d0ca80985b21471811.zip
[PATCH] mm: pagealloc opt
Slightly optimise some page allocation and freeing functions by taking advantage of knowing whether or not interrupts are disabled. Signed-off-by: Nick Piggin <npiggin@suse.de> Cc: Hugh Dickins <hugh@veritas.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c18
1 files changed, 11 insertions, 7 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 07825c637a58..680cbe5b6ba2 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -375,11 +375,10 @@ static int
free_pages_bulk(struct zone *zone, int count,
struct list_head *list, unsigned int order)
{
- unsigned long flags;
struct page *page = NULL;
int ret = 0;
- spin_lock_irqsave(&zone->lock, flags);
+ spin_lock(&zone->lock);
zone->all_unreclaimable = 0;
zone->pages_scanned = 0;
while (!list_empty(list) && count--) {
@@ -389,12 +388,13 @@ free_pages_bulk(struct zone *zone, int count,
__free_pages_bulk(page, zone, order);
ret++;
}
- spin_unlock_irqrestore(&zone->lock, flags);
+ spin_unlock(&zone->lock);
return ret;
}
void __free_pages_ok(struct page *page, unsigned int order)
{
+ unsigned long flags;
LIST_HEAD(list);
int i;
int reserved = 0;
@@ -415,7 +415,9 @@ void __free_pages_ok(struct page *page, unsigned int order)
list_add(&page->lru, &list);
mod_page_state(pgfree, 1 << order);
kernel_map_pages(page, 1<<order, 0);
+ local_irq_save(flags);
free_pages_bulk(page_zone(page), 1, &list, order);
+ local_irq_restore(flags);
}
@@ -539,12 +541,11 @@ static struct page *__rmqueue(struct zone *zone, unsigned int order)
static int rmqueue_bulk(struct zone *zone, unsigned int order,
unsigned long count, struct list_head *list)
{
- unsigned long flags;
int i;
int allocated = 0;
struct page *page;
- spin_lock_irqsave(&zone->lock, flags);
+ spin_lock(&zone->lock);
for (i = 0; i < count; ++i) {
page = __rmqueue(zone, order);
if (page == NULL)
@@ -552,7 +553,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
allocated++;
list_add_tail(&page->lru, list);
}
- spin_unlock_irqrestore(&zone->lock, flags);
+ spin_unlock(&zone->lock);
return allocated;
}
@@ -589,6 +590,7 @@ void drain_remote_pages(void)
#if defined(CONFIG_PM) || defined(CONFIG_HOTPLUG_CPU)
static void __drain_pages(unsigned int cpu)
{
+ unsigned long flags;
struct zone *zone;
int i;
@@ -600,8 +602,10 @@ static void __drain_pages(unsigned int cpu)
struct per_cpu_pages *pcp;
pcp = &pset->pcp[i];
+ local_irq_save(flags);
pcp->count -= free_pages_bulk(zone, pcp->count,
&pcp->list, 0);
+ local_irq_restore(flags);
}
}
}
@@ -744,7 +748,7 @@ again:
if (pcp->count <= pcp->low)
pcp->count += rmqueue_bulk(zone, 0,
pcp->batch, &pcp->list);
- if (pcp->count) {
+ if (likely(pcp->count)) {
page = list_entry(pcp->list.next, struct page, lru);
list_del(&page->lru);
pcp->count--;