summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2007-05-06 23:49:55 +0200
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-07 21:12:55 +0200
commitf9a14399aea13830d8af6798a53207bb0a900945 (patch)
treeb2501f1ce1d2a4564cd9a29c55705e524f594ad1
parentmm: remove destroy_dirty_buffers from invalidate_bdev() (diff)
downloadlinux-f9a14399aea13830d8af6798a53207bb0a900945.tar.xz
linux-f9a14399aea13830d8af6798a53207bb0a900945.zip
mm: optimize kill_bdev()
Remove duplicate work in kill_bdev(). It currently invalidates and then truncates the bdev's mapping. invalidate_mapping_pages() will opportunistically remove pages from the mapping. And truncate_inode_pages() will forcefully remove all pages. The only thing truncate doesn't do is flush the bh lrus. So do that explicitly. This avoids (very unlikely) but possible invalid lookup results if the same bdev is quickly re-issued. It also will prevent extreme kernel latencies which are observed when blockdevs which have a large amount of pagecache are unmounted, by avoiding invalidate_mapping_pages() on that path. invalidate_mapping_pages() has no cond_resched (it can be called under spinlock), whereas truncate_inode_pages() has one. [akpm@linux-foundation.org: restore nrpages==0 optimisation] Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--fs/block_dev.c6
-rw-r--r--fs/buffer.c3
-rw-r--r--include/linux/buffer_head.h1
3 files changed, 6 insertions, 4 deletions
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 21e59acbcfdf..6fe49b9349ea 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -55,10 +55,12 @@ static sector_t max_block(struct block_device *bdev)
return retval;
}
-/* Kill _all_ buffers, dirty or not.. */
+/* Kill _all_ buffers and pagecache , dirty or not.. */
static void kill_bdev(struct block_device *bdev)
{
- invalidate_bdev(bdev);
+ if (bdev->bd_inode->i_mapping->nrpages == 0)
+ return;
+ invalidate_bh_lrus();
truncate_inode_pages(bdev->bd_inode->i_mapping, 0);
}
diff --git a/fs/buffer.c b/fs/buffer.c
index 630df3e6fe0c..80291aad6de6 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -44,7 +44,6 @@
#include <linux/bit_spinlock.h>
static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
-static void invalidate_bh_lrus(void);
#define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
@@ -1403,7 +1402,7 @@ static void invalidate_bh_lru(void *arg)
put_cpu_var(bh_lrus);
}
-static void invalidate_bh_lrus(void)
+void invalidate_bh_lrus(void)
{
on_each_cpu(invalidate_bh_lru, NULL, 1, 1);
}
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index 632c50b21386..5c6e12853a9b 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -182,6 +182,7 @@ void __brelse(struct buffer_head *);
void __bforget(struct buffer_head *);
void __breadahead(struct block_device *, sector_t block, unsigned int size);
struct buffer_head *__bread(struct block_device *, sector_t block, unsigned size);
+void invalidate_bh_lrus(void);
struct buffer_head *alloc_buffer_head(gfp_t gfp_flags);
void free_buffer_head(struct buffer_head * bh);
void FASTCALL(unlock_buffer(struct buffer_head *bh));