summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2008-03-07 22:49:32 +0100
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2008-03-07 22:49:32 +0100
commitbb799dcadd60796ab13d32a698d92bfb92ce0d5c (patch)
treefb696ba0dccecfb5370856c3322e78f0c2cf5f48
parentMerge git://git.kernel.org/pub/scm/linux/kernel/git/bart/ide-2.6 (diff)
parentslub: fix typo in Documentation/vm/slub.txt (diff)
downloadlinux-bb799dcadd60796ab13d32a698d92bfb92ce0d5c.tar.xz
linux-bb799dcadd60796ab13d32a698d92bfb92ce0d5c.zip
Merge branch 'slab-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/christoph/vm
* 'slab-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/christoph/vm: slub: fix typo in Documentation/vm/slub.txt slab: NUMA slab allocator migration bugfix slub: Do not cross cacheline boundaries for very small objects slab - use angle brackets for include of kmalloc_sizes.h slab numa fallback logic: Do not pass unfiltered flags to page allocator slub statistics: Fix check for DEACTIVATE_REMOTE_FREES
-rw-r--r--Documentation/vm/slub.txt4
-rw-r--r--include/linux/slab_def.h4
-rw-r--r--mm/slab.c9
-rw-r--r--mm/slub.c13
4 files changed, 16 insertions, 14 deletions
diff --git a/Documentation/vm/slub.txt b/Documentation/vm/slub.txt
index dcf8bcf846d6..7c13f22a0c9e 100644
--- a/Documentation/vm/slub.txt
+++ b/Documentation/vm/slub.txt
@@ -50,14 +50,14 @@ F.e. in order to boot just with sanity checks and red zoning one would specify:
Trying to find an issue in the dentry cache? Try
- slub_debug=,dentry_cache
+ slub_debug=,dentry
to only enable debugging on the dentry cache.
Red zoning and tracking may realign the slab. We can just apply sanity checks
to the dentry cache with
- slub_debug=F,dentry_cache
+ slub_debug=F,dentry
In case you forgot to enable debugging on the kernel command line: It is
possible to enable debugging manually when the kernel is up. Look at the
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index fcc48096ee64..39c3a5eb8ebe 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -41,7 +41,7 @@ static inline void *kmalloc(size_t size, gfp_t flags)
goto found; \
else \
i++;
-#include "kmalloc_sizes.h"
+#include <linux/kmalloc_sizes.h>
#undef CACHE
{
extern void __you_cannot_kmalloc_that_much(void);
@@ -75,7 +75,7 @@ static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
goto found; \
else \
i++;
-#include "kmalloc_sizes.h"
+#include <linux/kmalloc_sizes.h>
#undef CACHE
{
extern void __you_cannot_kmalloc_that_much(void);
diff --git a/mm/slab.c b/mm/slab.c
index 473e6c2eaefb..e6c698f55674 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -333,7 +333,7 @@ static __always_inline int index_of(const size_t size)
return i; \
else \
i++;
-#include "linux/kmalloc_sizes.h"
+#include <linux/kmalloc_sizes.h>
#undef CACHE
__bad_size();
} else
@@ -2964,11 +2964,10 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
struct array_cache *ac;
int node;
- node = numa_node_id();
-
+retry:
check_irq_off();
+ node = numa_node_id();
ac = cpu_cache_get(cachep);
-retry:
batchcount = ac->batchcount;
if (!ac->touched && batchcount > BATCHREFILL_LIMIT) {
/*
@@ -3280,7 +3279,7 @@ retry:
if (local_flags & __GFP_WAIT)
local_irq_enable();
kmem_flagcheck(cache, flags);
- obj = kmem_getpages(cache, flags, -1);
+ obj = kmem_getpages(cache, local_flags, -1);
if (local_flags & __GFP_WAIT)
local_irq_disable();
if (obj) {
diff --git a/mm/slub.c b/mm/slub.c
index 0863fd38a5ce..96d63eb3ab17 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1368,7 +1368,7 @@ static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
struct page *page = c->page;
int tail = 1;
- if (c->freelist)
+ if (page->freelist)
stat(c, DEACTIVATE_REMOTE_FREES);
/*
* Merge cpu freelist into slab freelist. Typically we get here
@@ -1856,12 +1856,15 @@ static unsigned long calculate_alignment(unsigned long flags,
* The hardware cache alignment cannot override the specified
* alignment though. If that is greater then use it.
*/
- if ((flags & SLAB_HWCACHE_ALIGN) &&
- size > cache_line_size() / 2)
- return max_t(unsigned long, align, cache_line_size());
+ if (flags & SLAB_HWCACHE_ALIGN) {
+ unsigned long ralign = cache_line_size();
+ while (size <= ralign / 2)
+ ralign /= 2;
+ align = max(align, ralign);
+ }
if (align < ARCH_SLAB_MINALIGN)
- return ARCH_SLAB_MINALIGN;
+ align = ARCH_SLAB_MINALIGN;
return ALIGN(align, sizeof(void *));
}