summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2008-04-28 11:12:05 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2008-04-28 17:58:17 +0200
commit488514d1798289f56f80ed018e246179fe500383 (patch)
treee68d9f703dcbceed0cc08b03014d109d7ec3acd4 /lib
parentaio: io_getevents() should return if io_destroy() is invoked (diff)
downloadlinux-488514d1798289f56f80ed018e246179fe500383.tar.xz
linux-488514d1798289f56f80ed018e246179fe500383.zip
Remove set_migrateflags()
Migrate flags must be set on slab creation as agreed upon when the antifrag logic was reviewed. Otherwise some slabs of a slabcache will end up in the unmovable and others in the reclaimable section depending on which flag was active when a new slab page was allocated. This likely slid in somehow when antifrag was merged. Remove it. The buffer_heads are always allocated with __GFP_RECLAIMABLE because the SLAB_RECLAIM_ACCOUNT option is set. The set_migrateflags() never had any effect there. Radix tree allocations are not directly reclaimable but they are allocated with __GFP_RECLAIMABLE set on each allocation. We now set SLAB_RECLAIM_ACCOUNT on radix tree slab creation making sure that radix tree slabs are consistently placed in the reclaimable section. Radix tree slabs will also be accounted as such. There is then no user left of set_migratepages. So remove it. Signed-off-by: Christoph Lameter <clameter@sgi.com> Cc: Mel Gorman <mel@csn.ul.ie> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'lib')
-rw-r--r--lib/radix-tree.c9
1 files changed, 4 insertions, 5 deletions
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 65f0e758ec38..bd521716ab1a 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -114,8 +114,7 @@ radix_tree_node_alloc(struct radix_tree_root *root)
}
}
if (ret == NULL)
- ret = kmem_cache_alloc(radix_tree_node_cachep,
- set_migrateflags(gfp_mask, __GFP_RECLAIMABLE));
+ ret = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
BUG_ON(radix_tree_is_indirect_ptr(ret));
return ret;
@@ -150,8 +149,7 @@ int radix_tree_preload(gfp_t gfp_mask)
rtp = &__get_cpu_var(radix_tree_preloads);
while (rtp->nr < ARRAY_SIZE(rtp->nodes)) {
preempt_enable();
- node = kmem_cache_alloc(radix_tree_node_cachep,
- set_migrateflags(gfp_mask, __GFP_RECLAIMABLE));
+ node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
if (node == NULL)
goto out;
preempt_disable();
@@ -1098,7 +1096,8 @@ void __init radix_tree_init(void)
{
radix_tree_node_cachep = kmem_cache_create("radix_tree_node",
sizeof(struct radix_tree_node), 0,
- SLAB_PANIC, radix_tree_node_ctor);
+ SLAB_PANIC | SLAB_RECLAIM_ACCOUNT,
+ radix_tree_node_ctor);
radix_tree_init_maxindex();
hotcpu_notifier(radix_tree_callback, 0);
}