summaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2011-06-01 19:25:45 +0200
committerPekka Enberg <penberg@kernel.org>2011-07-02 12:26:52 +0200
commit50d5c41cd151b21ac1dfc98f048210456ccacc20 (patch)
tree406983fe5aca70acad4df7c7f92286e4fc71d70b /mm/slub.c
parentslub: Push irq disable into allocate_slab() (diff)
downloadlinux-50d5c41cd151b21ac1dfc98f048210456ccacc20.tar.xz
linux-50d5c41cd151b21ac1dfc98f048210456ccacc20.zip
slub: Do not use frozen page flag but a bit in the page counters
Do not use a page flag for the frozen bit. It needs to be part of the state that is handled with cmpxchg_double(). So use a bit in the counter struct in the page struct for that purpose. Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/mm/slub.c b/mm/slub.c
index add2ae74046c..82b2d048a278 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -166,7 +166,7 @@ static inline int kmem_cache_debug(struct kmem_cache *s)
#define OO_SHIFT 16
#define OO_MASK ((1 << OO_SHIFT) - 1)
-#define MAX_OBJS_PER_PAGE 65535 /* since page.objects is u16 */
+#define MAX_OBJS_PER_PAGE 32767 /* since page.objects is u15 */
/* Internal SLUB flags */
#define __OBJECT_POISON 0x80000000UL /* Poison object */
@@ -1025,7 +1025,7 @@ static noinline int free_debug_processing(struct kmem_cache *s,
}
/* Special debug activities for freeing objects */
- if (!PageSlubFrozen(page) && !page->freelist)
+ if (!page->frozen && !page->freelist)
remove_full(s, page);
if (s->flags & SLAB_STORE_USER)
set_track(s, object, TRACK_FREE, addr);
@@ -1424,7 +1424,7 @@ static inline int lock_and_freeze_slab(struct kmem_cache_node *n,
{
if (slab_trylock(page)) {
__remove_partial(n, page);
- __SetPageSlubFrozen(page);
+ page->frozen = 1;
return 1;
}
return 0;
@@ -1538,7 +1538,7 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
{
struct kmem_cache_node *n = get_node(s, page_to_nid(page));
- __ClearPageSlubFrozen(page);
+ page->frozen = 0;
if (page->inuse) {
if (page->freelist) {
@@ -1868,7 +1868,7 @@ new_slab:
flush_slab(s, c);
slab_lock(page);
- __SetPageSlubFrozen(page);
+ page->frozen = 1;
c->node = page_to_nid(page);
c->page = page;
goto load_freelist;
@@ -2048,7 +2048,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
page->freelist = object;
page->inuse--;
- if (unlikely(PageSlubFrozen(page))) {
+ if (unlikely(page->frozen)) {
stat(s, FREE_FROZEN);
goto out_unlock;
}