diff options
author | Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> | 2008-08-19 00:00:57 +0200 |
---|---|---|
committer | Theodore Ts'o <tytso@mit.edu> | 2008-08-19 00:00:57 +0200 |
commit | 5e745b041f2ccad63077118b40468521306f3962 (patch) | |
tree | 2a4d53c884f92899ee8e4f541c32861a4a577e1d /fs/ext4/inode.c | |
parent | ext4: Initialize writeback_index to 0 when allocating a new inode (diff) | |
download | linux-5e745b041f2ccad63077118b40468521306f3962.tar.xz linux-5e745b041f2ccad63077118b40468521306f3962.zip |
ext4: Fix small file fragmentation
For small file block allocations, mballoc uses per cpu prealloc
space. Use goal block when searching for the right prealloc
space. Also make sure ext4_da_writepages tries to write
all the pages for small files in single attempt
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
Diffstat (limited to 'fs/ext4/inode.c')
-rw-r--r-- | fs/ext4/inode.c | 21 |
1 files changed, 15 insertions, 6 deletions
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index d1906d9a22de..7e91913e325b 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -2282,13 +2282,12 @@ static int ext4_da_writepages_trans_blocks(struct inode *inode) static int ext4_da_writepages(struct address_space *mapping, struct writeback_control *wbc) { - struct inode *inode = mapping->host; handle_t *handle = NULL; - int needed_blocks; - int ret = 0; - long to_write; loff_t range_start = 0; - long pages_skipped = 0; + struct inode *inode = mapping->host; + int needed_blocks, ret = 0, nr_to_writebump = 0; + long to_write, pages_skipped = 0; + struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb); /* * No pages to write? This is mainly a kludge to avoid starting @@ -2297,6 +2296,16 @@ static int ext4_da_writepages(struct address_space *mapping, */ if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) return 0; + /* + * Make sure nr_to_write is >= sbi->s_mb_stream_request + * This make sure small files blocks are allocated in + * single attempt. This ensure that small files + * get less fragmented. + */ + if (wbc->nr_to_write < sbi->s_mb_stream_request) { + nr_to_writebump = sbi->s_mb_stream_request - wbc->nr_to_write; + wbc->nr_to_write = sbi->s_mb_stream_request; + } if (!wbc->range_cyclic) /* @@ -2377,7 +2386,7 @@ restart_loop: } out_writepages: - wbc->nr_to_write = to_write; + wbc->nr_to_write = to_write - nr_to_writebump; wbc->range_start = range_start; return ret; } |