summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorAkinobu Mita <akinobu.mita@gmail.com>2007-05-06 23:50:19 +0200
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-07 21:12:57 +0200
commit4ab688c51226188f2d4ad4f789032c107944ef89 (patch)
treef0793bf529ada8da81a764766996db90073a020f /mm
parenthugetlbfs: add NULL check in hugetlb_zero_setup() (diff)
downloadlinux-4ab688c51226188f2d4ad4f789032c107944ef89.tar.xz
linux-4ab688c51226188f2d4ad4f789032c107944ef89.zip
slob: fix page order calculation on not 4KB page
SLOB doesn't calculate correct page order when page size is not 4KB. This patch fixes it with using get_order() instead of find_order() which is SLOB version of get_order(). Signed-off-by: Akinobu Mita <akinobu.mita@gmail.com> Acked-by: Matt Mackall <mpm@selenic.com> Cc: <stable@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/slob.c15
1 files changed, 3 insertions, 12 deletions
diff --git a/mm/slob.c b/mm/slob.c
index c9401a7eaa5f..c6933bc19bcd 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -150,15 +150,6 @@ static void slob_free(void *block, int size)
spin_unlock_irqrestore(&slob_lock, flags);
}
-static int FASTCALL(find_order(int size));
-static int fastcall find_order(int size)
-{
- int order = 0;
- for ( ; size > 4096 ; size >>=1)
- order++;
- return order;
-}
-
void *__kmalloc(size_t size, gfp_t gfp)
{
slob_t *m;
@@ -174,7 +165,7 @@ void *__kmalloc(size_t size, gfp_t gfp)
if (!bb)
return 0;
- bb->order = find_order(size);
+ bb->order = get_order(size);
bb->pages = (void *)__get_free_pages(gfp, bb->order);
if (bb->pages) {
@@ -318,7 +309,7 @@ void *kmem_cache_alloc(struct kmem_cache *c, gfp_t flags)
if (c->size < PAGE_SIZE)
b = slob_alloc(c->size, flags, c->align);
else
- b = (void *)__get_free_pages(flags, find_order(c->size));
+ b = (void *)__get_free_pages(flags, get_order(c->size));
if (c->ctor)
c->ctor(b, c, SLAB_CTOR_CONSTRUCTOR);
@@ -345,7 +336,7 @@ void kmem_cache_free(struct kmem_cache *c, void *b)
if (c->size < PAGE_SIZE)
slob_free(b, c->size);
else
- free_pages((unsigned long)b, find_order(c->size));
+ free_pages((unsigned long)b, get_order(c->size));
}
EXPORT_SYMBOL(kmem_cache_free);