From be1a12a0dfed06cf1e62e35bf91620dc610a451a Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Wed, 6 Apr 2011 13:05:22 -0400 Subject: Btrfs: deal with the case that we run out of space in the cache Currently we don't handle running out of space in the cache, so to fix this we keep track of how far in the cache we are. Then we only dirty the pages if we successfully modify all of them, otherwise if we have an error or run out of space we can just drop them and not worry about the vm writing them out. Thanks, Tested-by Johannes Hirte Signed-off-by: Josef Bacik --- fs/btrfs/ctree.h | 5 ++ fs/btrfs/file.c | 21 ++++---- fs/btrfs/free-space-cache.c | 117 +++++++++++++++++++++----------------------- 3 files changed, 69 insertions(+), 74 deletions(-) diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 3458b5725540..0d00a07b5b29 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2576,6 +2576,11 @@ int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode, int btrfs_mark_extent_written(struct btrfs_trans_handle *trans, struct inode *inode, u64 start, u64 end); int btrfs_release_file(struct inode *inode, struct file *file); +void btrfs_drop_pages(struct page **pages, size_t num_pages); +int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode, + struct page **pages, size_t num_pages, + loff_t pos, size_t write_bytes, + struct extent_state **cached); /* tree-defrag.c */ int btrfs_defrag_leaves(struct btrfs_trans_handle *trans, diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index e621ea54a3fd..75899a01dded 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -104,7 +104,7 @@ static noinline int btrfs_copy_from_user(loff_t pos, int num_pages, /* * unlocks pages after btrfs_file_write is done with them */ -static noinline void btrfs_drop_pages(struct page **pages, size_t num_pages) +void btrfs_drop_pages(struct page **pages, size_t num_pages) { size_t i; for (i = 0; i < num_pages; i++) { @@ -127,16 +127,13 @@ static noinline void btrfs_drop_pages(struct page **pages, size_t num_pages) * this also makes the decision about creating an inline extent vs * doing real data extents, marking pages dirty and delalloc as required. */ -static noinline int dirty_and_release_pages(struct btrfs_root *root, - struct file *file, - struct page **pages, - size_t num_pages, - loff_t pos, - size_t write_bytes) +int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode, + struct page **pages, size_t num_pages, + loff_t pos, size_t write_bytes, + struct extent_state **cached) { int err = 0; int i; - struct inode *inode = fdentry(file)->d_inode; u64 num_bytes; u64 start_pos; u64 end_of_last_block; @@ -149,7 +146,7 @@ static noinline int dirty_and_release_pages(struct btrfs_root *root, end_of_last_block = start_pos + num_bytes - 1; err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block, - NULL); + cached); if (err) return err; @@ -992,9 +989,9 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file, } if (copied > 0) { - ret = dirty_and_release_pages(root, file, pages, - dirty_pages, pos, - copied); + ret = btrfs_dirty_pages(root, inode, pages, + dirty_pages, pos, copied, + NULL); if (ret) { btrfs_delalloc_release_space(inode, dirty_pages << PAGE_CACHE_SHIFT); diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index f561c953205b..a3f420def0e9 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -508,6 +508,7 @@ int btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode; struct rb_node *node; struct list_head *pos, *n; + struct page **pages; struct page *page; struct extent_state *cached_state = NULL; struct btrfs_free_cluster *cluster = NULL; @@ -517,13 +518,13 @@ int btrfs_write_out_cache(struct btrfs_root *root, u64 start, end, len; u64 bytes = 0; u32 *crc, *checksums; - pgoff_t index = 0, last_index = 0; unsigned long first_page_offset; - int num_checksums; + int index = 0, num_pages = 0; int entries = 0; int bitmaps = 0; int ret = 0; bool next_page = false; + bool out_of_space = false; root = root->fs_info->tree_root; @@ -551,24 +552,31 @@ int btrfs_write_out_cache(struct btrfs_root *root, return 0; } - last_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT; + num_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> + PAGE_CACHE_SHIFT; filemap_write_and_wait(inode->i_mapping); btrfs_wait_ordered_range(inode, inode->i_size & ~(root->sectorsize - 1), (u64)-1); /* We need a checksum per page. */ - num_checksums = i_size_read(inode) / PAGE_CACHE_SIZE; - crc = checksums = kzalloc(sizeof(u32) * num_checksums, GFP_NOFS); + crc = checksums = kzalloc(sizeof(u32) * num_pages, GFP_NOFS); if (!crc) { iput(inode); return 0; } + pages = kzalloc(sizeof(struct page *) * num_pages, GFP_NOFS); + if (!pages) { + kfree(crc); + iput(inode); + return 0; + } + /* Since the first page has all of our checksums and our generation we * need to calculate the offset into the page that we can start writing * our entries. */ - first_page_offset = (sizeof(u32) * num_checksums) + sizeof(u64); + first_page_offset = (sizeof(u32) * num_pages) + sizeof(u64); /* Get the cluster for this block_group if it exists */ if (!list_empty(&block_group->cluster_list)) @@ -590,20 +598,18 @@ int btrfs_write_out_cache(struct btrfs_root *root, * after find_get_page at this point. Just putting this here so people * know and don't freak out. */ - while (index <= last_index) { + while (index < num_pages) { page = grab_cache_page(inode->i_mapping, index); if (!page) { - pgoff_t i = 0; + int i; - while (i < index) { - page = find_get_page(inode->i_mapping, i); - unlock_page(page); - page_cache_release(page); - page_cache_release(page); - i++; + for (i = 0; i < num_pages; i++) { + unlock_page(pages[i]); + page_cache_release(pages[i]); } goto out_free; } + pages[index] = page; index++; } @@ -631,7 +637,12 @@ int btrfs_write_out_cache(struct btrfs_root *root, offset = start_offset; } - page = find_get_page(inode->i_mapping, index); + if (index >= num_pages) { + out_of_space = true; + break; + } + + page = pages[index]; addr = kmap(page); entry = addr + start_offset; @@ -708,23 +719,6 @@ int btrfs_write_out_cache(struct btrfs_root *root, bytes += PAGE_CACHE_SIZE; - ClearPageChecked(page); - set_page_extent_mapped(page); - SetPageUptodate(page); - set_page_dirty(page); - - /* - * We need to release our reference we got for grab_cache_page, - * except for the first page which will hold our checksums, we - * do that below. - */ - if (index != 0) { - unlock_page(page); - page_cache_release(page); - } - - page_cache_release(page); - index++; } while (node || next_page); @@ -734,6 +728,10 @@ int btrfs_write_out_cache(struct btrfs_root *root, struct btrfs_free_space *entry = list_entry(pos, struct btrfs_free_space, list); + if (index >= num_pages) { + out_of_space = true; + break; + } page = find_get_page(inode->i_mapping, index); addr = kmap(page); @@ -745,64 +743,58 @@ int btrfs_write_out_cache(struct btrfs_root *root, crc++; bytes += PAGE_CACHE_SIZE; - ClearPageChecked(page); - set_page_extent_mapped(page); - SetPageUptodate(page); - set_page_dirty(page); - unlock_page(page); - page_cache_release(page); - page_cache_release(page); list_del_init(&entry->list); index++; } + if (out_of_space) { + btrfs_drop_pages(pages, num_pages); + unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0, + i_size_read(inode) - 1, &cached_state, + GFP_NOFS); + ret = 0; + goto out_free; + } + /* Zero out the rest of the pages just to make sure */ - while (index <= last_index) { + while (index < num_pages) { void *addr; - page = find_get_page(inode->i_mapping, index); - + page = pages[index]; addr = kmap(page); memset(addr, 0, PAGE_CACHE_SIZE); kunmap(page); - ClearPageChecked(page); - set_page_extent_mapped(page); - SetPageUptodate(page); - set_page_dirty(page); - unlock_page(page); - page_cache_release(page); - page_cache_release(page); bytes += PAGE_CACHE_SIZE; index++; } - btrfs_set_extent_delalloc(inode, 0, bytes - 1, &cached_state); - /* Write the checksums and trans id to the first page */ { void *addr; u64 *gen; - page = find_get_page(inode->i_mapping, 0); + page = pages[0]; addr = kmap(page); - memcpy(addr, checksums, sizeof(u32) * num_checksums); - gen = addr + (sizeof(u32) * num_checksums); + memcpy(addr, checksums, sizeof(u32) * num_pages); + gen = addr + (sizeof(u32) * num_pages); *gen = trans->transid; kunmap(page); - ClearPageChecked(page); - set_page_extent_mapped(page); - SetPageUptodate(page); - set_page_dirty(page); - unlock_page(page); - page_cache_release(page); - page_cache_release(page); } - BTRFS_I(inode)->generation = trans->transid; + ret = btrfs_dirty_pages(root, inode, pages, num_pages, 0, + bytes, &cached_state); + btrfs_drop_pages(pages, num_pages); unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1, &cached_state, GFP_NOFS); + if (ret) { + ret = 0; + goto out_free; + } + + BTRFS_I(inode)->generation = trans->transid; + filemap_write_and_wait(inode->i_mapping); key.objectid = BTRFS_FREE_SPACE_OBJECTID; @@ -853,6 +845,7 @@ out_free: BTRFS_I(inode)->generation = 0; } kfree(checksums); + kfree(pages); btrfs_update_inode(trans, root, inode); iput(inode); return ret; -- cgit v1.2.3 From 06d5a5899d6d3ac401d2359b5eac6d2a3a0fe331 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Tue, 5 Apr 2011 11:57:27 -0400 Subject: Btrfs: only retry transaction reservation once I saw a lockup where we kept getting into this start transaction->commit transaction loop because of enospce. The fact is if we fail to make our reservation, we've tried _everything_ several times, so we only need to try and commit the transaction once, and if that doesn't work then we really are out of space and need to just exit. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/transaction.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 5b158da7e0bb..4583008217e6 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -181,6 +181,7 @@ static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root, { struct btrfs_trans_handle *h; struct btrfs_transaction *cur_trans; + int retries = 0; int ret; if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) @@ -224,10 +225,18 @@ again: if (num_items > 0) { ret = btrfs_trans_reserve_metadata(h, root, num_items); - if (ret == -EAGAIN) { + if (ret == -EAGAIN && !retries) { + retries++; btrfs_commit_transaction(h, root); goto again; + } else if (ret == -EAGAIN) { + /* + * We have already retried and got EAGAIN, so really we + * don't have space, so set ret to -ENOSPC. + */ + ret = -ENOSPC; } + if (ret < 0) { btrfs_end_transaction(h, root); return ERR_PTR(ret); -- cgit v1.2.3 From 12ddb96cb6752218d8a1aeb696ec9b0ca7adb42f Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Tue, 5 Apr 2011 13:02:27 -0400 Subject: Btrfs: map the inode item when doing fill_inode_item Instead of calling kmap_atomic for every thing we set in the inode item, map the entire inode item at the start and unmap it at the end. This makes a sequential dd of 400mb O_DIRECT something like 1% faster. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/inode.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index cc6022842e0c..da2680263d9f 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -2589,6 +2589,13 @@ static void fill_inode_item(struct btrfs_trans_handle *trans, struct btrfs_inode_item *item, struct inode *inode) { + if (!leaf->map_token) + map_private_extent_buffer(leaf, (unsigned long)item, + sizeof(struct btrfs_inode_item), + &leaf->map_token, &leaf->kaddr, + &leaf->map_start, &leaf->map_len, + KM_USER1); + btrfs_set_inode_uid(leaf, item, inode->i_uid); btrfs_set_inode_gid(leaf, item, inode->i_gid); btrfs_set_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size); @@ -2617,6 +2624,11 @@ static void fill_inode_item(struct btrfs_trans_handle *trans, btrfs_set_inode_rdev(leaf, item, inode->i_rdev); btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags); btrfs_set_inode_block_group(leaf, item, BTRFS_I(inode)->block_group); + + if (leaf->map_token) { + unmap_extent_buffer(leaf, leaf->map_token, KM_USER1); + leaf->map_token = NULL; + } } /* -- cgit v1.2.3 From 1ef30be142d2cc60e2687ef267de864cf31be995 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Tue, 5 Apr 2011 19:25:36 -0400 Subject: Btrfs: do not call btrfs_update_inode in endio if nothing changed In the DIO code we often don't update the i_disk_size because the i_size isn't updated until after the DIO is completed, so basically we are allocating a path, doing a search, and updating the inode item for no reason since nothing changed. btrfs_ordered_update_i_size will return 1 if it didn't update i_disk_size, so only run btrfs_update_inode if btrfs_ordered_update_i_size returns 0. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/inode.c | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index da2680263d9f..4a238d676e5b 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -1769,9 +1769,12 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) add_pending_csums(trans, inode, ordered_extent->file_offset, &ordered_extent->list); - btrfs_ordered_update_i_size(inode, 0, ordered_extent); - ret = btrfs_update_inode(trans, root, inode); - BUG_ON(ret); + ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent); + if (!ret) { + ret = btrfs_update_inode(trans, root, inode); + BUG_ON(ret); + } + ret = 0; out: if (nolock) { if (trans) @@ -5865,8 +5868,10 @@ again: } add_pending_csums(trans, inode, ordered->file_offset, &ordered->list); - btrfs_ordered_update_i_size(inode, 0, ordered); - btrfs_update_inode(trans, root, inode); + ret = btrfs_ordered_update_i_size(inode, 0, ordered); + if (!ret) + btrfs_update_inode(trans, root, inode); + ret = 0; out_unlock: unlock_extent_cached(&BTRFS_I(inode)->io_tree, ordered->file_offset, ordered->file_offset + ordered->len - 1, -- cgit v1.2.3 From 02f57c7aedef1a537f4b16db7061cdd8efa3bb4e Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Wed, 6 Apr 2011 14:25:44 -0400 Subject: Btrfs: don't split dio bios if we don't have to We have been unconditionally allocating a new bio and re-adding all pages from our original bio to the new bio. This is needed if our original bio is larger than our stripe size, but if it is smaller than the stripe size then there is no need to do this. So check the map length and if we are under that then go ahead and submit the original bio. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/inode.c | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 4a238d676e5b..149c77fd1eb5 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -6006,13 +6006,6 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip, int ret = 0; int write = rw & REQ_WRITE; - bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, start_sector, GFP_NOFS); - if (!bio) - return -ENOMEM; - bio->bi_private = dip; - bio->bi_end_io = btrfs_end_dio_bio; - atomic_inc(&dip->pending_bios); - map_length = orig_bio->bi_size; ret = btrfs_map_block(map_tree, READ, start_sector << 9, &map_length, NULL, 0); @@ -6021,6 +6014,18 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip, return -EIO; } + if (map_length >= orig_bio->bi_size) { + bio = orig_bio; + goto submit; + } + + bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, start_sector, GFP_NOFS); + if (!bio) + return -ENOMEM; + bio->bi_private = dip; + bio->bi_end_io = btrfs_end_dio_bio; + atomic_inc(&dip->pending_bios); + while (bvec <= (orig_bio->bi_io_vec + orig_bio->bi_vcnt - 1)) { if (unlikely(map_length < submit_len + bvec->bv_len || bio_add_page(bio, bvec->bv_page, bvec->bv_len, @@ -6071,6 +6076,7 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip, } } +submit: ret = __btrfs_submit_dio_bio(bio, inode, rw, file_offset, skip_sum, csums); if (!ret) -- cgit v1.2.3 From 1ae399382512b3e4d6c923e53da9e45935577040 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Wed, 6 Apr 2011 14:41:34 -0400 Subject: Btrfs: do not use async submit for small DIO io's When looking at our DIO performance Chris said that for small IO's doing the async submit stuff tends to be more overhead than it's worth. With this on top of my other fixes I get about a 17-20% speedup doing a sequential dd with 4k IO's. Basically if we don't have to split the bio for the map length it's small enough to be directly submitted, otherwise go back to the async submit. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/inode.c | 24 +++++++++++++++++++----- 1 file changed, 19 insertions(+), 5 deletions(-) diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 149c77fd1eb5..2bb76c6157a4 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -5957,7 +5957,7 @@ static struct bio *btrfs_dio_bio_alloc(struct block_device *bdev, static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode, int rw, u64 file_offset, int skip_sum, - u32 *csums) + u32 *csums, int async_submit) { int write = rw & REQ_WRITE; struct btrfs_root *root = BTRFS_I(inode)->root; @@ -5968,13 +5968,24 @@ static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode, if (ret) goto err; - if (write && !skip_sum) { + if (skip_sum) + goto map; + + if (write && async_submit) { ret = btrfs_wq_submit_bio(root->fs_info, inode, rw, bio, 0, 0, file_offset, __btrfs_submit_bio_start_direct_io, __btrfs_submit_bio_done); goto err; + } else if (write) { + /* + * If we aren't doing async submit, calculate the csum of the + * bio now. + */ + ret = btrfs_csum_one_bio(root, inode, bio, file_offset, 1); + if (ret) + goto err; } else if (!skip_sum) { ret = btrfs_lookup_bio_sums_dio(root, inode, bio, file_offset, csums); @@ -5982,7 +5993,8 @@ static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode, goto err; } - ret = btrfs_map_bio(root, rw, bio, 0, 1); +map: + ret = btrfs_map_bio(root, rw, bio, 0, async_submit); err: bio_put(bio); return ret; @@ -6004,6 +6016,7 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip, int nr_pages = 0; u32 *csums = dip->csums; int ret = 0; + int async_submit = 0; int write = rw & REQ_WRITE; map_length = orig_bio->bi_size; @@ -6019,6 +6032,7 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip, goto submit; } + async_submit = 1; bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, start_sector, GFP_NOFS); if (!bio) return -ENOMEM; @@ -6039,7 +6053,7 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip, atomic_inc(&dip->pending_bios); ret = __btrfs_submit_dio_bio(bio, inode, rw, file_offset, skip_sum, - csums); + csums, async_submit); if (ret) { bio_put(bio); atomic_dec(&dip->pending_bios); @@ -6078,7 +6092,7 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip, submit: ret = __btrfs_submit_dio_bio(bio, inode, rw, file_offset, skip_sum, - csums); + csums, async_submit); if (!ret) return 0; -- cgit v1.2.3 From 16d299ac7446b5a75c5683a9ae11d7907d444c86 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Wed, 6 Apr 2011 14:53:07 -0400 Subject: Btrfs: reuse the extent_map we found when calling btrfs_get_extent In btrfs_get_block_direct we call btrfs_get_extent to lookup the extent for the range that we are looking for. If we don't find an extent, btrfs_get_extent will insert a extent_map for that area and mark it as a hole. So it does the job of allocating a new extent map and inserting it into the io tree. But if we're creating a new extent we free it up and redo all of that work. So instead pass the em to btrfs_new_extent_direct(), and if it will work just allocate the disk space and set it up properly and bypass the freeing/allocating of a new extent map and the expensive operation of inserting the thing into the io_tree. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/inode.c | 36 ++++++++++++++++++++++++++++-------- 1 file changed, 28 insertions(+), 8 deletions(-) diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 2bb76c6157a4..24310c9cb14f 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -5445,17 +5445,30 @@ out: } static struct extent_map *btrfs_new_extent_direct(struct inode *inode, + struct extent_map *em, u64 start, u64 len) { struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_trans_handle *trans; - struct extent_map *em; struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; struct btrfs_key ins; u64 alloc_hint; int ret; + bool insert = false; - btrfs_drop_extent_cache(inode, start, start + len - 1, 0); + /* + * Ok if the extent map we looked up is a hole and is for the exact + * range we want, there is no reason to allocate a new one, however if + * it is not right then we need to free this one and drop the cache for + * our range. + */ + if (em->block_start != EXTENT_MAP_HOLE || em->start != start || + em->len != len) { + free_extent_map(em); + em = NULL; + insert = true; + btrfs_drop_extent_cache(inode, start, start + len - 1, 0); + } trans = btrfs_join_transaction(root, 0); if (IS_ERR(trans)) @@ -5471,10 +5484,12 @@ static struct extent_map *btrfs_new_extent_direct(struct inode *inode, goto out; } - em = alloc_extent_map(GFP_NOFS); if (!em) { - em = ERR_PTR(-ENOMEM); - goto out; + em = alloc_extent_map(GFP_NOFS); + if (!em) { + em = ERR_PTR(-ENOMEM); + goto out; + } } em->start = start; @@ -5484,9 +5499,15 @@ static struct extent_map *btrfs_new_extent_direct(struct inode *inode, em->block_start = ins.objectid; em->block_len = ins.offset; em->bdev = root->fs_info->fs_devices->latest_bdev; + + /* + * We need to do this because if we're using the original em we searched + * for, we could have EXTENT_FLAG_VACANCY set, and we don't want that. + */ + em->flags = 0; set_bit(EXTENT_FLAG_PINNED, &em->flags); - while (1) { + while (insert) { write_lock(&em_tree->lock); ret = add_extent_mapping(em_tree, em); write_unlock(&em_tree->lock); @@ -5704,8 +5725,7 @@ must_cow: * it above */ len = bh_result->b_size; - free_extent_map(em); - em = btrfs_new_extent_direct(inode, start, len); + em = btrfs_new_extent_direct(inode, em, start, len); if (IS_ERR(em)) return PTR_ERR(em); len = min(len, em->len - (start - em->start)); -- cgit v1.2.3 From 93a54bc4c28a125978cddbe2db9e347391e3522d Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Wed, 6 Apr 2011 15:11:44 -0400 Subject: Btrfs: check for duplicate iov_base's when doing dio reads Apparently it is ok to submit a read to an IDE device with the same target page for different offsets. This is what Windows does under qemu. The problem is under DIO we expect them to be different buffers for checksumming reasons, and so this sort of thing will result in checksum errors, when in reality the file is fine. So when reading, check to make sure that all iov bases are different, and if they aren't fall back to buffered mode, since that will work out right. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/inode.c | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 24310c9cb14f..00d59c6a9769 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -6207,6 +6207,7 @@ static ssize_t check_direct_IO(struct btrfs_root *root, int rw, struct kiocb *io unsigned long nr_segs) { int seg; + int i; size_t size; unsigned long addr; unsigned blocksize_mask = root->sectorsize - 1; @@ -6221,8 +6222,22 @@ static ssize_t check_direct_IO(struct btrfs_root *root, int rw, struct kiocb *io addr = (unsigned long)iov[seg].iov_base; size = iov[seg].iov_len; end += size; - if ((addr & blocksize_mask) || (size & blocksize_mask)) + if ((addr & blocksize_mask) || (size & blocksize_mask)) goto out; + + /* If this is a write we don't need to check anymore */ + if (rw & WRITE) + continue; + + /* + * Check to make sure we don't have duplicate iov_base's in this + * iovec, if so return EINVAL, otherwise we'll get csum errors + * when reading back. + */ + for (i = seg + 1; i < nr_segs; i++) { + if (iov[seg].iov_base == iov[i].iov_base) + goto out; + } } retval = 0; out: -- cgit v1.2.3 From 8fb27640d0e2b43c5584bf0087431b7b8d3c319a Mon Sep 17 00:00:00 2001 From: Yoshinori Sano Date: Sat, 9 Apr 2011 02:30:07 +0000 Subject: Btrfs: fix memory leaks in btrfs_new_inode() This patch fixes memory leaks in btrfs_new_inode(). Signed-off-by: Yoshinori Sano Signed-off-by: Chris Mason --- fs/btrfs/inode.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index cc6022842e0c..2d1208f964eb 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -4526,14 +4526,17 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans, BUG_ON(!path); inode = new_inode(root->fs_info->sb); - if (!inode) + if (!inode) { + btrfs_free_path(path); return ERR_PTR(-ENOMEM); + } if (dir) { trace_btrfs_inode_request(dir); ret = btrfs_set_inode_index(dir, index); if (ret) { + btrfs_free_path(path); iput(inode); return ERR_PTR(ret); } -- cgit v1.2.3 From 3387206f26e1b48703e810175b98611a4fd8e8ea Mon Sep 17 00:00:00 2001 From: Sergei Trofimovich Date: Mon, 11 Apr 2011 21:52:52 +0000 Subject: btrfs: properly handle overlapping areas in memmove_extent_buffer Fix data corruption caused by memcpy() usage on overlapping data. I've observed it first when found out usermode linux crash on btrfs. ?all chain is the following: ------------[ cut here ]------------ WARNING: at /home/slyfox/linux-2.6/fs/btrfs/extent_io.c:3900 memcpy_extent_buffer+0x1a5/0x219() Call Trace: 6fa39a58: [<601b495e>] _raw_spin_unlock_irqrestore+0x18/0x1c 6fa39a68: [<60029ad9>] warn_slowpath_common+0x59/0x70 6fa39aa8: [<60029b05>] warn_slowpath_null+0x15/0x17 6fa39ab8: [<600efc97>] memcpy_extent_buffer+0x1a5/0x219 6fa39b48: [<600efd9f>] memmove_extent_buffer+0x94/0x208 6fa39bc8: [<600becbf>] btrfs_del_items+0x214/0x473 6fa39c78: [<600ce1b0>] btrfs_delete_one_dir_name+0x7c/0xda 6fa39cc8: [<600dad6b>] __btrfs_unlink_inode+0xad/0x25d 6fa39d08: [<600d7864>] btrfs_start_transaction+0xe/0x10 6fa39d48: [<600dc9ff>] btrfs_unlink_inode+0x1b/0x3b 6fa39d78: [<600e04bc>] btrfs_unlink+0x70/0xef 6fa39dc8: [<6007f0d0>] vfs_unlink+0x58/0xa3 6fa39df8: [<60080278>] do_unlinkat+0xd4/0x162 6fa39e48: [<600517db>] call_rcu_sched+0xe/0x10 6fa39e58: [<600452a8>] __put_cred+0x58/0x5a 6fa39e78: [<6007446c>] sys_faccessat+0x154/0x166 6fa39ed8: [<60080317>] sys_unlink+0x11/0x13 6fa39ee8: [<60016b80>] handle_syscall+0x58/0x70 6fa39f08: [<60021377>] userspace+0x2d4/0x381 6fa39fc8: [<60014507>] fork_handler+0x62/0x69 ---[ end trace 70b0ca2ef0266b93 ]--- http://www.mail-archive.com/linux-btrfs@vger.kernel.org/msg09302.html Signed-off-by: Sergei Trofimovich Reviewed-by: Josef Bacik Signed-off-by: Chris Mason --- fs/btrfs/extent_io.c | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 77c65a0bea34..864e0496cc1c 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -3885,6 +3885,12 @@ static void move_pages(struct page *dst_page, struct page *src_page, kunmap_atomic(dst_kaddr, KM_USER0); } +static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len) +{ + unsigned long distance = (src > dst) ? src - dst : dst - src; + return distance < len; +} + static void copy_pages(struct page *dst_page, struct page *src_page, unsigned long dst_off, unsigned long src_off, unsigned long len) @@ -3892,10 +3898,12 @@ static void copy_pages(struct page *dst_page, struct page *src_page, char *dst_kaddr = kmap_atomic(dst_page, KM_USER0); char *src_kaddr; - if (dst_page != src_page) + if (dst_page != src_page) { src_kaddr = kmap_atomic(src_page, KM_USER1); - else + } else { src_kaddr = dst_kaddr; + BUG_ON(areas_overlap(src_off, dst_off, len)); + } memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len); kunmap_atomic(dst_kaddr, KM_USER0); @@ -3970,7 +3978,7 @@ void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset, "len %lu len %lu\n", dst_offset, len, dst->len); BUG_ON(1); } - if (dst_offset < src_offset) { + if (!areas_overlap(src_offset, dst_offset, len)) { memcpy_extent_buffer(dst, dst_offset, src_offset, len); return; } -- cgit v1.2.3 From a1b75f7d961955e697ec377f90115e3517df98f9 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Fri, 8 Apr 2011 15:51:18 +0000 Subject: Btrfs: check for duplicate iov_base's when doing dio reads Apparently it is ok to submit a read to an IDE device with the same target page for different offsets. This is what Windows does under qemu. The problem is under DIO we expect them to be different buffers for checksumming reasons, and so this sort of thing will result in checksum errors, when in reality the file is fine. So when reading, check to make sure that all iov bases are different, and if they aren't fall back to buffered mode, since that will work out right. Thanks, Signed-off-by: Josef Bacik Signed-off-by: Chris Mason --- fs/btrfs/inode.c | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 2d1208f964eb..edafc28883af 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -6153,6 +6153,7 @@ static ssize_t check_direct_IO(struct btrfs_root *root, int rw, struct kiocb *io unsigned long nr_segs) { int seg; + int i; size_t size; unsigned long addr; unsigned blocksize_mask = root->sectorsize - 1; @@ -6167,8 +6168,22 @@ static ssize_t check_direct_IO(struct btrfs_root *root, int rw, struct kiocb *io addr = (unsigned long)iov[seg].iov_base; size = iov[seg].iov_len; end += size; - if ((addr & blocksize_mask) || (size & blocksize_mask)) + if ((addr & blocksize_mask) || (size & blocksize_mask)) goto out; + + /* If this is a write we don't need to check anymore */ + if (rw & WRITE) + continue; + + /* + * Check to make sure we don't have duplicate iov_base's in this + * iovec, if so return EINVAL, otherwise we'll get csum errors + * when reading back. + */ + for (i = seg + 1; i < nr_segs; i++) { + if (iov[seg].iov_base == iov[i].iov_base) + goto out; + } } retval = 0; out: -- cgit v1.2.3 From 13f2696f1da9700d401db0ac2bc27ebc17068b22 Mon Sep 17 00:00:00 2001 From: Daniel J Blueman Date: Mon, 11 Apr 2011 15:56:31 +0000 Subject: fix user annotation in ioctl.c Fix address space annotation correct in ioctl.c. Signed-off-by: Daniel J Blueman BTRFS_BLOCK_GROUP_SYSTEM, @@ -2387,7 +2387,7 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg) up_read(&info->groups_sem); } - user_dest = (struct btrfs_ioctl_space_info *) + user_dest = (struct btrfs_ioctl_space_info __user *) (arg + sizeof(struct btrfs_ioctl_space_args)); if (copy_to_user(user_dest, dest_orig, alloc_size)) Reviewed-by: Josef Bacik Signed-off-by: Chris Mason --- fs/btrfs/ioctl.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index f9c93a9ed4a7..f580a3a5d2fc 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -2287,7 +2287,7 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg) struct btrfs_ioctl_space_info space; struct btrfs_ioctl_space_info *dest; struct btrfs_ioctl_space_info *dest_orig; - struct btrfs_ioctl_space_info *user_dest; + struct btrfs_ioctl_space_info __user *user_dest; struct btrfs_space_info *info; u64 types[] = {BTRFS_BLOCK_GROUP_DATA, BTRFS_BLOCK_GROUP_SYSTEM, -- cgit v1.2.3 From e15d0542426f063dc53b4c51bdfc11e0bbe4d298 Mon Sep 17 00:00:00 2001 From: Xin Zhong Date: Wed, 6 Apr 2011 07:33:51 +0000 Subject: Btrfs: fix subvolume mount by name problem when default mount subvolume is set We create two subvolumes (meego_root and meego_home) in btrfs root directory. And set meego_root as default mount subvolume. After we remount btrfs, meego_root is mounted to top directory by default. Then when we try to mount meego_home (subvol=meego_home) to a subdirectory, it failed. The problem is when default mount subvolume is set to meego_root, we search meego_home in meego_root but can not find it. So the solution is to add a new mount option (subvolrootid) to specify subvol id of root and search subvol name in it. For our case, now we can use "-o subvolrootid=0,subvol=meego_home) to mount meego_home. Detail information can be found in meego bugzilla: https://bugs.meego.com/show_bug.cgi?id=15055 Signed-off-by: Zhong, Xin Signed-off-by: Chris Mason --- fs/btrfs/super.c | 42 +++++++++++++++++++++++++++++++++--------- 1 file changed, 33 insertions(+), 9 deletions(-) diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 58e7de9cc90c..0ac712efcdf2 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -159,7 +159,7 @@ enum { Opt_compress_type, Opt_compress_force, Opt_compress_force_type, Opt_notreelog, Opt_ratio, Opt_flushoncommit, Opt_discard, Opt_space_cache, Opt_clear_cache, Opt_user_subvol_rm_allowed, - Opt_enospc_debug, Opt_err, + Opt_enospc_debug, Opt_subvolrootid, Opt_err, }; static match_table_t tokens = { @@ -189,6 +189,7 @@ static match_table_t tokens = { {Opt_clear_cache, "clear_cache"}, {Opt_user_subvol_rm_allowed, "user_subvol_rm_allowed"}, {Opt_enospc_debug, "enospc_debug"}, + {Opt_subvolrootid, "subvolrootid=%d"}, {Opt_err, NULL}, }; @@ -232,6 +233,7 @@ int btrfs_parse_options(struct btrfs_root *root, char *options) break; case Opt_subvol: case Opt_subvolid: + case Opt_subvolrootid: case Opt_device: /* * These are parsed by btrfs_parse_early_options @@ -388,7 +390,7 @@ out: */ static int btrfs_parse_early_options(const char *options, fmode_t flags, void *holder, char **subvol_name, u64 *subvol_objectid, - struct btrfs_fs_devices **fs_devices) + u64 *subvol_rootid, struct btrfs_fs_devices **fs_devices) { substring_t args[MAX_OPT_ARGS]; char *opts, *orig, *p; @@ -429,6 +431,18 @@ static int btrfs_parse_early_options(const char *options, fmode_t flags, *subvol_objectid = intarg; } break; + case Opt_subvolrootid: + intarg = 0; + error = match_int(&args[0], &intarg); + if (!error) { + /* we want the original fs_tree */ + if (!intarg) + *subvol_rootid = + BTRFS_FS_TREE_OBJECTID; + else + *subvol_rootid = intarg; + } + break; case Opt_device: error = btrfs_scan_one_device(match_strdup(&args[0]), flags, holder, fs_devices); @@ -736,6 +750,7 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags, fmode_t mode = FMODE_READ; char *subvol_name = NULL; u64 subvol_objectid = 0; + u64 subvol_rootid = 0; int error = 0; if (!(flags & MS_RDONLY)) @@ -743,7 +758,7 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags, error = btrfs_parse_early_options(data, mode, fs_type, &subvol_name, &subvol_objectid, - &fs_devices); + &subvol_rootid, &fs_devices); if (error) return ERR_PTR(error); @@ -807,15 +822,17 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags, s->s_flags |= MS_ACTIVE; } - root = get_default_root(s, subvol_objectid); - if (IS_ERR(root)) { - error = PTR_ERR(root); - deactivate_locked_super(s); - goto error_free_subvol_name; - } /* if they gave us a subvolume name bind mount into that */ if (strcmp(subvol_name, ".")) { struct dentry *new_root; + + root = get_default_root(s, subvol_rootid); + if (IS_ERR(root)) { + error = PTR_ERR(root); + deactivate_locked_super(s); + goto error_free_subvol_name; + } + mutex_lock(&root->d_inode->i_mutex); new_root = lookup_one_len(subvol_name, root, strlen(subvol_name)); @@ -836,6 +853,13 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags, } dput(root); root = new_root; + } else { + root = get_default_root(s, subvol_objectid); + if (IS_ERR(root)) { + error = PTR_ERR(root); + deactivate_locked_super(s); + goto error_free_subvol_name; + } } kfree(subvol_name); -- cgit v1.2.3 From 13c5a93e7005d7dae0b6d070d25203593e692d13 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Mon, 11 Apr 2011 15:45:29 -0400 Subject: Btrfs: avoid taking the trans_mutex in btrfs_end_transaction I've been working on making our O_DIRECT latency not suck and I noticed we were taking the trans_mutex in btrfs_end_transaction. So to do this we convert num_writers and use_count to atomic_t's and just decrement them in btrfs_end_transaction. Instead of deleting the transaction from the trans list in put_transaction we do that in btrfs_commit_transaction() since that's the only time it actually needs to be removed from the list. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/disk-io.c | 2 +- fs/btrfs/transaction.c | 37 ++++++++++++++++--------------------- fs/btrfs/transaction.h | 4 ++-- 3 files changed, 19 insertions(+), 24 deletions(-) diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index a272bfd74ea0..ef6865c17cd6 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -3136,7 +3136,7 @@ static int btrfs_cleanup_transaction(struct btrfs_root *root) btrfs_destroy_pinned_extent(root, root->fs_info->pinned_extents); - t->use_count = 0; + atomic_set(&t->use_count, 0); list_del_init(&t->list); memset(t, 0, sizeof(*t)); kmem_cache_free(btrfs_transaction_cachep, t); diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 4583008217e6..c571734d5e5a 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -32,10 +32,8 @@ static noinline void put_transaction(struct btrfs_transaction *transaction) { - WARN_ON(transaction->use_count == 0); - transaction->use_count--; - if (transaction->use_count == 0) { - list_del_init(&transaction->list); + WARN_ON(atomic_read(&transaction->use_count) == 0); + if (atomic_dec_and_test(&transaction->use_count)) { memset(transaction, 0, sizeof(*transaction)); kmem_cache_free(btrfs_transaction_cachep, transaction); } @@ -60,14 +58,14 @@ static noinline int join_transaction(struct btrfs_root *root) if (!cur_trans) return -ENOMEM; root->fs_info->generation++; - cur_trans->num_writers = 1; + atomic_set(&cur_trans->num_writers, 1); cur_trans->num_joined = 0; cur_trans->transid = root->fs_info->generation; init_waitqueue_head(&cur_trans->writer_wait); init_waitqueue_head(&cur_trans->commit_wait); cur_trans->in_commit = 0; cur_trans->blocked = 0; - cur_trans->use_count = 1; + atomic_set(&cur_trans->use_count, 1); cur_trans->commit_done = 0; cur_trans->start_time = get_seconds(); @@ -88,7 +86,7 @@ static noinline int join_transaction(struct btrfs_root *root) root->fs_info->running_transaction = cur_trans; spin_unlock(&root->fs_info->new_trans_lock); } else { - cur_trans->num_writers++; + atomic_inc(&cur_trans->num_writers); cur_trans->num_joined++; } @@ -145,7 +143,7 @@ static void wait_current_trans(struct btrfs_root *root) cur_trans = root->fs_info->running_transaction; if (cur_trans && cur_trans->blocked) { DEFINE_WAIT(wait); - cur_trans->use_count++; + atomic_inc(&cur_trans->use_count); while (1) { prepare_to_wait(&root->fs_info->transaction_wait, &wait, TASK_UNINTERRUPTIBLE); @@ -205,7 +203,7 @@ again: } cur_trans = root->fs_info->running_transaction; - cur_trans->use_count++; + atomic_inc(&cur_trans->use_count); if (type != TRANS_JOIN_NOLOCK) mutex_unlock(&root->fs_info->trans_mutex); @@ -336,7 +334,7 @@ int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid) goto out_unlock; /* nothing committing|committed */ } - cur_trans->use_count++; + atomic_inc(&cur_trans->use_count); mutex_unlock(&root->fs_info->trans_mutex); wait_for_commit(root, cur_trans); @@ -466,18 +464,14 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans, wake_up_process(info->transaction_kthread); } - if (lock) - mutex_lock(&info->trans_mutex); WARN_ON(cur_trans != info->running_transaction); - WARN_ON(cur_trans->num_writers < 1); - cur_trans->num_writers--; + WARN_ON(atomic_read(&cur_trans->num_writers) < 1); + atomic_dec(&cur_trans->num_writers); smp_mb(); if (waitqueue_active(&cur_trans->writer_wait)) wake_up(&cur_trans->writer_wait); put_transaction(cur_trans); - if (lock) - mutex_unlock(&info->trans_mutex); if (current->journal_info == trans) current->journal_info = NULL; @@ -1187,7 +1181,7 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans, /* take transaction reference */ mutex_lock(&root->fs_info->trans_mutex); cur_trans = trans->transaction; - cur_trans->use_count++; + atomic_inc(&cur_trans->use_count); mutex_unlock(&root->fs_info->trans_mutex); btrfs_end_transaction(trans, root); @@ -1246,7 +1240,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, mutex_lock(&root->fs_info->trans_mutex); if (cur_trans->in_commit) { - cur_trans->use_count++; + atomic_inc(&cur_trans->use_count); mutex_unlock(&root->fs_info->trans_mutex); btrfs_end_transaction(trans, root); @@ -1268,7 +1262,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, prev_trans = list_entry(cur_trans->list.prev, struct btrfs_transaction, list); if (!prev_trans->commit_done) { - prev_trans->use_count++; + atomic_inc(&prev_trans->use_count); mutex_unlock(&root->fs_info->trans_mutex); wait_for_commit(root, prev_trans); @@ -1309,14 +1303,14 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, TASK_UNINTERRUPTIBLE); smp_mb(); - if (cur_trans->num_writers > 1) + if (atomic_read(&cur_trans->num_writers) > 1) schedule_timeout(MAX_SCHEDULE_TIMEOUT); else if (should_grow) schedule_timeout(1); mutex_lock(&root->fs_info->trans_mutex); finish_wait(&cur_trans->writer_wait, &wait); - } while (cur_trans->num_writers > 1 || + } while (atomic_read(&cur_trans->num_writers) > 1 || (should_grow && cur_trans->num_joined != joined)); ret = create_pending_snapshots(trans, root->fs_info); @@ -1403,6 +1397,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, wake_up(&cur_trans->commit_wait); + list_del_init(&cur_trans->list); put_transaction(cur_trans); put_transaction(cur_trans); diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h index 229a594cacd5..e441acc6c584 100644 --- a/fs/btrfs/transaction.h +++ b/fs/btrfs/transaction.h @@ -27,11 +27,11 @@ struct btrfs_transaction { * total writers in this transaction, it must be zero before the * transaction can end */ - unsigned long num_writers; + atomic_t num_writers; unsigned long num_joined; int in_commit; - int use_count; + atomic_t use_count; int commit_done; int blocked; struct list_head list; -- cgit v1.2.3 From 507903b81840a70cc6a179d4eb03584ad50e8c5b Mon Sep 17 00:00:00 2001 From: Arne Jansen Date: Wed, 6 Apr 2011 10:02:20 +0000 Subject: btrfs: using cached extent_state in set/unlock combinations In several places the sequence (set_extent_uptodate, unlock_extent) is used. This leads to a duplicate lookup of the extent state. This patch lets set_extent_uptodate return a cached extent_state which can be passed to unlock_extent_cached. The occurences of the above sequences are updated to use the cache. Only end_bio_extent_readpage is updated that it first gets a cached state to pass it to the readpage_end_io_hook as the prototype requested and is later on being used for set/unlock. Signed-off-by: Arne Jansen Signed-off-by: Chris Mason --- fs/btrfs/extent_io.c | 70 +++++++++++++++++++++++++++++++++++++++------------- fs/btrfs/extent_io.h | 2 +- fs/btrfs/inode.c | 2 +- 3 files changed, 55 insertions(+), 19 deletions(-) diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 864e0496cc1c..8dcfb77678de 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -690,6 +690,17 @@ static void cache_state(struct extent_state *state, } } +static void uncache_state(struct extent_state **cached_ptr) +{ + if (cached_ptr && (*cached_ptr)) { + struct extent_state *state = *cached_ptr; + if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY)) { + *cached_ptr = NULL; + free_extent_state(state); + } + } +} + /* * set some bits on a range in the tree. This may require allocations or * sleeping, so the gfp mask is used to indicate what is allowed. @@ -940,10 +951,10 @@ static int clear_extent_new(struct extent_io_tree *tree, u64 start, u64 end, } int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end, - gfp_t mask) + struct extent_state **cached_state, gfp_t mask) { - return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL, - NULL, mask); + return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, + NULL, cached_state, mask); } static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, @@ -1012,8 +1023,7 @@ int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end, mask); } -int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, - gfp_t mask) +int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask) { return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL, mask); @@ -1735,6 +1745,9 @@ static void end_bio_extent_readpage(struct bio *bio, int err) do { struct page *page = bvec->bv_page; + struct extent_state *cached = NULL; + struct extent_state *state; + tree = &BTRFS_I(page->mapping->host)->io_tree; start = ((u64)page->index << PAGE_CACHE_SHIFT) + @@ -1749,9 +1762,20 @@ static void end_bio_extent_readpage(struct bio *bio, int err) if (++bvec <= bvec_end) prefetchw(&bvec->bv_page->flags); + spin_lock(&tree->lock); + state = find_first_extent_bit_state(tree, start, 0); + if (state) { + /* + * take a reference on the state, unlock will drop + * the ref + */ + cache_state(state, &cached); + } + spin_unlock(&tree->lock); + if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) { ret = tree->ops->readpage_end_io_hook(page, start, end, - NULL); + state); if (ret) uptodate = 0; } @@ -1764,15 +1788,16 @@ static void end_bio_extent_readpage(struct bio *bio, int err) test_bit(BIO_UPTODATE, &bio->bi_flags); if (err) uptodate = 0; + uncache_state(&cached); continue; } } if (uptodate) { - set_extent_uptodate(tree, start, end, + set_extent_uptodate(tree, start, end, &cached, GFP_ATOMIC); } - unlock_extent(tree, start, end, GFP_ATOMIC); + unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC); if (whole_page) { if (uptodate) { @@ -1811,6 +1836,7 @@ static void end_bio_extent_preparewrite(struct bio *bio, int err) do { struct page *page = bvec->bv_page; + struct extent_state *cached = NULL; tree = &BTRFS_I(page->mapping->host)->io_tree; start = ((u64)page->index << PAGE_CACHE_SHIFT) + @@ -1821,13 +1847,14 @@ static void end_bio_extent_preparewrite(struct bio *bio, int err) prefetchw(&bvec->bv_page->flags); if (uptodate) { - set_extent_uptodate(tree, start, end, GFP_ATOMIC); + set_extent_uptodate(tree, start, end, &cached, + GFP_ATOMIC); } else { ClearPageUptodate(page); SetPageError(page); } - unlock_extent(tree, start, end, GFP_ATOMIC); + unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC); } while (bvec >= bio->bi_io_vec); @@ -2016,14 +2043,17 @@ static int __extent_read_full_page(struct extent_io_tree *tree, while (cur <= end) { if (cur >= last_byte) { char *userpage; + struct extent_state *cached = NULL; + iosize = PAGE_CACHE_SIZE - page_offset; userpage = kmap_atomic(page, KM_USER0); memset(userpage + page_offset, 0, iosize); flush_dcache_page(page); kunmap_atomic(userpage, KM_USER0); set_extent_uptodate(tree, cur, cur + iosize - 1, - GFP_NOFS); - unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS); + &cached, GFP_NOFS); + unlock_extent_cached(tree, cur, cur + iosize - 1, + &cached, GFP_NOFS); break; } em = get_extent(inode, page, page_offset, cur, @@ -2063,14 +2093,17 @@ static int __extent_read_full_page(struct extent_io_tree *tree, /* we've found a hole, just zero and go on */ if (block_start == EXTENT_MAP_HOLE) { char *userpage; + struct extent_state *cached = NULL; + userpage = kmap_atomic(page, KM_USER0); memset(userpage + page_offset, 0, iosize); flush_dcache_page(page); kunmap_atomic(userpage, KM_USER0); set_extent_uptodate(tree, cur, cur + iosize - 1, - GFP_NOFS); - unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS); + &cached, GFP_NOFS); + unlock_extent_cached(tree, cur, cur + iosize - 1, + &cached, GFP_NOFS); cur = cur + iosize; page_offset += iosize; continue; @@ -2789,9 +2822,12 @@ int extent_prepare_write(struct extent_io_tree *tree, iocount++; block_start = block_start + iosize; } else { - set_extent_uptodate(tree, block_start, cur_end, + struct extent_state *cached = NULL; + + set_extent_uptodate(tree, block_start, cur_end, &cached, GFP_NOFS); - unlock_extent(tree, block_start, cur_end, GFP_NOFS); + unlock_extent_cached(tree, block_start, cur_end, + &cached, GFP_NOFS); block_start = cur_end + 1; } page_offset = block_start & (PAGE_CACHE_SIZE - 1); @@ -3457,7 +3493,7 @@ int set_extent_buffer_uptodate(struct extent_io_tree *tree, num_pages = num_extent_pages(eb->start, eb->len); set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1, - GFP_NOFS); + NULL, GFP_NOFS); for (i = 0; i < num_pages; i++) { page = extent_buffer_page(eb, i); if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) || diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index f62c5442835d..af2d7179c372 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -208,7 +208,7 @@ int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits, int exclusive_bits, u64 *failed_start, struct extent_state **cached_state, gfp_t mask); int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end, - gfp_t mask); + struct extent_state **cached_state, gfp_t mask); int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask); int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end, diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index edafc28883af..5a993e0ec865 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -5226,7 +5226,7 @@ again: btrfs_mark_buffer_dirty(leaf); } set_extent_uptodate(io_tree, em->start, - extent_map_end(em) - 1, GFP_NOFS); + extent_map_end(em) - 1, NULL, GFP_NOFS); goto insert; } else { printk(KERN_ERR "btrfs unknown found_type %d\n", found_type); -- cgit v1.2.3 From 109b36a2bb3eebf5c9994980e724958a5b2b62b6 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Tue, 12 Apr 2011 13:57:39 -0400 Subject: Btrfs: make uncache_state unconditional The extent_io code can take cached pointers into the extent state trees, and these can make lookups much faster in common operations. The caching only happens when specific bits are set that prevent merging and splitting of the extent state. A help function was added to uncache the state, and it was testing the same set of conditionals. This can leak in very strange corner cases where the lock bit goes away unexpectedly. The uncaching should be unconditional. Once we have a ref on the extent we should always give it up. Signed-off-by: Chris Mason --- fs/btrfs/extent_io.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 8dcfb77678de..1c462f895c98 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -694,10 +694,8 @@ static void uncache_state(struct extent_state **cached_ptr) { if (cached_ptr && (*cached_ptr)) { struct extent_state *state = *cached_ptr; - if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY)) { - *cached_ptr = NULL; - free_extent_state(state); - } + *cached_ptr = NULL; + free_extent_state(state); } } @@ -1764,7 +1762,7 @@ static void end_bio_extent_readpage(struct bio *bio, int err) spin_lock(&tree->lock); state = find_first_extent_bit_state(tree, start, 0); - if (state) { + if (state && state->start == start) { /* * take a reference on the state, unlock will drop * the ref -- cgit v1.2.3 From 2e6a00356a066d34cd00872b067589549169ad48 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Thu, 17 Mar 2011 15:17:59 +0800 Subject: Btrfs: Check if btrfs_next_leaf() returns error in btrfs_listxattr() btrfs_next_leaf() can return -errno, and we should propagate it to userspace. This also simplifies how we walk the btree path. Signed-off-by: Li Zefan --- fs/btrfs/xattr.c | 33 ++++++++++++--------------------- 1 file changed, 12 insertions(+), 21 deletions(-) diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c index e5d22f280956..07b9bc350d5d 100644 --- a/fs/btrfs/xattr.c +++ b/fs/btrfs/xattr.c @@ -180,11 +180,10 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size) struct btrfs_path *path; struct extent_buffer *leaf; struct btrfs_dir_item *di; - int ret = 0, slot, advance; + int ret = 0, slot; size_t total_size = 0, size_left = size; unsigned long name_ptr; size_t name_len; - u32 nritems; /* * ok we want all objects associated with this id. @@ -204,34 +203,24 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size) ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) goto err; - advance = 0; + while (1) { leaf = path->nodes[0]; - nritems = btrfs_header_nritems(leaf); slot = path->slots[0]; /* this is where we start walking through the path */ - if (advance || slot >= nritems) { + if (slot >= btrfs_header_nritems(leaf)) { /* * if we've reached the last slot in this leaf we need * to go to the next leaf and reset everything */ - if (slot >= nritems-1) { - ret = btrfs_next_leaf(root, path); - if (ret) - break; - leaf = path->nodes[0]; - nritems = btrfs_header_nritems(leaf); - slot = path->slots[0]; - } else { - /* - * just walking through the slots on this leaf - */ - slot++; - path->slots[0]++; - } + ret = btrfs_next_leaf(root, path); + if (ret < 0) + goto err; + else if (ret > 0) + break; + continue; } - advance = 1; btrfs_item_key_to_cpu(leaf, &found_key, slot); @@ -250,7 +239,7 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size) /* we are just looking for how big our buffer needs to be */ if (!size) - continue; + goto next; if (!buffer || (name_len + 1) > size_left) { ret = -ERANGE; @@ -263,6 +252,8 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size) size_left -= name_len + 1; buffer += name_len + 1; +next: + path->slots[0]++; } ret = total_size; -- cgit v1.2.3 From b9e03af0bcc11310f6be4a3951c9ee2c26465011 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Wed, 23 Mar 2011 10:43:58 +0800 Subject: Btrfs: Check if btrfs_next_leaf() returns error in btrfs_real_readdir() btrfs_next_leaf() can return -errno, and we should propagate it to userspace. This also simplifies how we walk the btree path. Signed-off-by: Li Zefan --- fs/btrfs/inode.c | 28 ++++++++++------------------ 1 file changed, 10 insertions(+), 18 deletions(-) diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 55a6a0b416d7..b9f7f5258343 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -4221,10 +4221,8 @@ static int btrfs_real_readdir(struct file *filp, void *dirent, struct btrfs_key found_key; struct btrfs_path *path; int ret; - u32 nritems; struct extent_buffer *leaf; int slot; - int advance; unsigned char d_type; int over = 0; u32 di_cur; @@ -4267,27 +4265,19 @@ static int btrfs_real_readdir(struct file *filp, void *dirent, ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) goto err; - advance = 0; while (1) { leaf = path->nodes[0]; - nritems = btrfs_header_nritems(leaf); slot = path->slots[0]; - if (advance || slot >= nritems) { - if (slot >= nritems - 1) { - ret = btrfs_next_leaf(root, path); - if (ret) - break; - leaf = path->nodes[0]; - nritems = btrfs_header_nritems(leaf); - slot = path->slots[0]; - } else { - slot++; - path->slots[0]++; - } + if (slot >= btrfs_header_nritems(leaf)) { + ret = btrfs_next_leaf(root, path); + if (ret < 0) + goto err; + else if (ret > 0) + break; + continue; } - advance = 1; item = btrfs_item_nr(leaf, slot); btrfs_item_key_to_cpu(leaf, &found_key, slot); @@ -4296,7 +4286,7 @@ static int btrfs_real_readdir(struct file *filp, void *dirent, if (btrfs_key_type(&found_key) != key_type) break; if (found_key.offset < filp->f_pos) - continue; + goto next; filp->f_pos = found_key.offset; @@ -4349,6 +4339,8 @@ skip: di_cur += di_len; di = (struct btrfs_dir_item *)((char *)di + di_len); } +next: + path->slots[0]++; } /* Reached end of directory/root. Bump pos past the last item. */ -- cgit v1.2.3 From 3153495d8ed6a9bb9f00aea42c18dc488a885dd6 Mon Sep 17 00:00:00 2001 From: Miao Xie Date: Wed, 13 Apr 2011 13:19:21 +0800 Subject: Btrfs: Fix incorrect inode nlink in btrfs_link() Link count of the inode is not decreased if btrfs_set_inode_index() fails. Signed-off-by: Miao Xie Singed-off-by: Li Zefan --- fs/btrfs/inode.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index b9f7f5258343..a4157cfdd533 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -4846,9 +4846,6 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir, if (inode->i_nlink == ~0U) return -EMLINK; - btrfs_inc_nlink(inode); - inode->i_ctime = CURRENT_TIME; - err = btrfs_set_inode_index(dir, &index); if (err) goto fail; @@ -4864,6 +4861,9 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir, goto fail; } + btrfs_inc_nlink(inode); + inode->i_ctime = CURRENT_TIME; + btrfs_set_trans_block_group(trans, dir); ihold(inode); -- cgit v1.2.3 From 329c5056be8774255db04b01242a9ff4f02eb8ea Mon Sep 17 00:00:00 2001 From: Miao Xie Date: Wed, 13 Apr 2011 14:07:59 +0800 Subject: Btrfs: Check validity before setting an acl Call posix_acl_valid() to check if an acl is valid or not. Signed-off-by: Miao Xie Signed-off-by: Li Zefan --- fs/btrfs/acl.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c index 9c949348510b..a892bc27f13a 100644 --- a/fs/btrfs/acl.c +++ b/fs/btrfs/acl.c @@ -178,16 +178,17 @@ static int btrfs_xattr_acl_set(struct dentry *dentry, const char *name, if (value) { acl = posix_acl_from_xattr(value, size); - if (acl == NULL) { - value = NULL; - size = 0; + if (acl) { + ret = posix_acl_valid(acl); + if (ret) + goto out; } else if (IS_ERR(acl)) { return PTR_ERR(acl); } } ret = btrfs_set_acl(NULL, dentry->d_inode, acl, type); - +out: posix_acl_release(acl); return ret; -- cgit v1.2.3 From 0e4f8f888845f9dca540ad175884244e5db5eea2 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Fri, 15 Apr 2011 16:05:44 -0400 Subject: Btrfs: don't force chunk allocation in find_free_extent find_free_extent likes to allocate in contiguous clusters, which makes writeback faster, especially on SSD storage. As the FS fragments, these clusters become harder to find and we have to decide between allocating a new chunk to make more clusters or giving up on the cluster to allocate from the free space we have. Right now it creates too many chunks, and you can end up with a whole FS that is mostly empty metadata chunks. This commit changes the allocation code to be more strict and only allocate new chunks when we've made good use of the chunks we already have. Signed-off-by: Chris Mason --- fs/btrfs/extent-tree.c | 95 ++++++++++++++++++++++++++++++++++++++------------ 1 file changed, 73 insertions(+), 22 deletions(-) diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index f619c3cb13b7..26479484180d 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -33,6 +33,25 @@ #include "locking.h" #include "free-space-cache.h" +/* control flags for do_chunk_alloc's force field + * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk + * if we really need one. + * + * CHUNK_ALLOC_FORCE means it must try to allocate one + * + * CHUNK_ALLOC_LIMITED means to only try and allocate one + * if we have very few chunks already allocated. This is + * used as part of the clustering code to help make sure + * we have a good pool of storage to cluster in, without + * filling the FS with empty chunks + * + */ +enum { + CHUNK_ALLOC_NO_FORCE = 0, + CHUNK_ALLOC_FORCE = 1, + CHUNK_ALLOC_LIMITED = 2, +}; + static int update_block_group(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 bytenr, u64 num_bytes, int alloc); @@ -3019,7 +3038,7 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags, found->bytes_readonly = 0; found->bytes_may_use = 0; found->full = 0; - found->force_alloc = 0; + found->force_alloc = CHUNK_ALLOC_NO_FORCE; *space_info = found; list_add_rcu(&found->list, &info->space_info); atomic_set(&found->caching_threads, 0); @@ -3150,7 +3169,7 @@ again: if (!data_sinfo->full && alloc_chunk) { u64 alloc_target; - data_sinfo->force_alloc = 1; + data_sinfo->force_alloc = CHUNK_ALLOC_FORCE; spin_unlock(&data_sinfo->lock); alloc: alloc_target = btrfs_get_alloc_profile(root, 1); @@ -3160,7 +3179,8 @@ alloc: ret = do_chunk_alloc(trans, root->fs_info->extent_root, bytes + 2 * 1024 * 1024, - alloc_target, 0); + alloc_target, + CHUNK_ALLOC_NO_FORCE); btrfs_end_transaction(trans, root); if (ret < 0) { if (ret != -ENOSPC) @@ -3239,31 +3259,56 @@ static void force_metadata_allocation(struct btrfs_fs_info *info) rcu_read_lock(); list_for_each_entry_rcu(found, head, list) { if (found->flags & BTRFS_BLOCK_GROUP_METADATA) - found->force_alloc = 1; + found->force_alloc = CHUNK_ALLOC_FORCE; } rcu_read_unlock(); } static int should_alloc_chunk(struct btrfs_root *root, - struct btrfs_space_info *sinfo, u64 alloc_bytes) + struct btrfs_space_info *sinfo, u64 alloc_bytes, + int force) { u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly; + u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved; u64 thresh; - if (sinfo->bytes_used + sinfo->bytes_reserved + - alloc_bytes + 256 * 1024 * 1024 < num_bytes) + if (force == CHUNK_ALLOC_FORCE) + return 1; + + /* + * in limited mode, we want to have some free space up to + * about 1% of the FS size. + */ + if (force == CHUNK_ALLOC_LIMITED) { + thresh = btrfs_super_total_bytes(&root->fs_info->super_copy); + thresh = max_t(u64, 64 * 1024 * 1024, + div_factor_fine(thresh, 1)); + + if (num_bytes - num_allocated < thresh) + return 1; + } + + /* + * we have two similar checks here, one based on percentage + * and once based on a hard number of 256MB. The idea + * is that if we have a good amount of free + * room, don't allocate a chunk. A good mount is + * less than 80% utilized of the chunks we have allocated, + * or more than 256MB free + */ + if (num_allocated + alloc_bytes + 256 * 1024 * 1024 < num_bytes) return 0; - if (sinfo->bytes_used + sinfo->bytes_reserved + - alloc_bytes < div_factor(num_bytes, 8)) + if (num_allocated + alloc_bytes < div_factor(num_bytes, 8)) return 0; thresh = btrfs_super_total_bytes(&root->fs_info->super_copy); + + /* 256MB or 5% of the FS */ thresh = max_t(u64, 256 * 1024 * 1024, div_factor_fine(thresh, 5)); if (num_bytes > thresh && sinfo->bytes_used < div_factor(num_bytes, 3)) return 0; - return 1; } @@ -3289,17 +3334,17 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans, spin_lock(&space_info->lock); if (space_info->force_alloc) - force = 1; + force = space_info->force_alloc; if (space_info->full) { spin_unlock(&space_info->lock); goto out; } - if (!force && !should_alloc_chunk(extent_root, space_info, - alloc_bytes)) { + if (!should_alloc_chunk(extent_root, space_info, alloc_bytes, force)) { spin_unlock(&space_info->lock); goto out; } + spin_unlock(&space_info->lock); /* @@ -3327,7 +3372,7 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans, space_info->full = 1; else ret = 1; - space_info->force_alloc = 0; + space_info->force_alloc = CHUNK_ALLOC_NO_FORCE; spin_unlock(&space_info->lock); out: mutex_unlock(&extent_root->fs_info->chunk_mutex); @@ -5303,11 +5348,13 @@ loop: if (allowed_chunk_alloc) { ret = do_chunk_alloc(trans, root, num_bytes + - 2 * 1024 * 1024, data, 1); + 2 * 1024 * 1024, data, + CHUNK_ALLOC_LIMITED); allowed_chunk_alloc = 0; done_chunk_alloc = 1; - } else if (!done_chunk_alloc) { - space_info->force_alloc = 1; + } else if (!done_chunk_alloc && + space_info->force_alloc == CHUNK_ALLOC_NO_FORCE) { + space_info->force_alloc = CHUNK_ALLOC_LIMITED; } if (loop < LOOP_NO_EMPTY_SIZE) { @@ -5393,7 +5440,8 @@ again: */ if (empty_size || root->ref_cows) ret = do_chunk_alloc(trans, root->fs_info->extent_root, - num_bytes + 2 * 1024 * 1024, data, 0); + num_bytes + 2 * 1024 * 1024, data, + CHUNK_ALLOC_NO_FORCE); WARN_ON(num_bytes < root->sectorsize); ret = find_free_extent(trans, root, num_bytes, empty_size, @@ -5405,7 +5453,7 @@ again: num_bytes = num_bytes & ~(root->sectorsize - 1); num_bytes = max(num_bytes, min_alloc_size); do_chunk_alloc(trans, root->fs_info->extent_root, - num_bytes, data, 1); + num_bytes, data, CHUNK_ALLOC_FORCE); goto again; } if (ret == -ENOSPC && btrfs_test_opt(root, ENOSPC_DEBUG)) { @@ -8109,13 +8157,15 @@ int btrfs_set_block_group_ro(struct btrfs_root *root, alloc_flags = update_block_group_flags(root, cache->flags); if (alloc_flags != cache->flags) - do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, 1); + do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, + CHUNK_ALLOC_FORCE); ret = set_block_group_ro(cache); if (!ret) goto out; alloc_flags = get_alloc_profile(root, cache->space_info->flags); - ret = do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, 1); + ret = do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, + CHUNK_ALLOC_FORCE); if (ret < 0) goto out; ret = set_block_group_ro(cache); @@ -8128,7 +8178,8 @@ int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 type) { u64 alloc_flags = get_alloc_profile(root, type); - return do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, 1); + return do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, + CHUNK_ALLOC_FORCE); } /* -- cgit v1.2.3 From 0d399205edf3a4c290e76ebb36e541593af4a1b4 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Sat, 16 Apr 2011 06:55:39 -0400 Subject: Btrfs end_bio_extent_readpage should look for locked bits A recent commit caches the extent state in end_bio_extent_readpage, but the search it does should look for locked extents. This fixes things to make it more effective. Signed-off-by: Chris Mason --- fs/btrfs/extent_io.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 1c462f895c98..5ae0bffaa4d8 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -1761,7 +1761,7 @@ static void end_bio_extent_readpage(struct bio *bio, int err) prefetchw(&bvec->bv_page->flags); spin_lock(&tree->lock); - state = find_first_extent_bit_state(tree, start, 0); + state = find_first_extent_bit_state(tree, start, EXTENT_LOCKED); if (state && state->start == start) { /* * take a reference on the state, unlock will drop -- cgit v1.2.3 From 6d74119f1a3efad9dc7f79a16c201242324b731f Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Mon, 11 Apr 2011 20:20:11 -0400 Subject: Btrfs: avoid taking the chunk_mutex in do_chunk_alloc Everytime we try to allocate disk space we try and see if we can pre-emptively allocate a chunk, but in the common case we don't allocate anything, so there is no sense in taking the chunk_mutex at all. So instead if we are allocating a chunk, mark it in the space_info so we don't get two people trying to allocate at the same time. Thanks, Signed-off-by: Josef Bacik Reviewed-by: Liu Bo --- fs/btrfs/ctree.h | 4 +++- fs/btrfs/extent-tree.c | 30 +++++++++++++++++++++++++----- 2 files changed, 28 insertions(+), 6 deletions(-) diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 0d00a07b5b29..2e61fe1b6b8c 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -740,8 +740,10 @@ struct btrfs_space_info { */ unsigned long reservation_progress; - int full; /* indicates that we cannot allocate any more + int full:1; /* indicates that we cannot allocate any more chunks for this space */ + int chunk_alloc:1; /* set if we are allocating a chunk */ + int force_alloc; /* set if we need to force a chunk alloc for this space */ diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 26479484180d..31f33ba56fe8 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -3039,6 +3039,7 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags, found->bytes_may_use = 0; found->full = 0; found->force_alloc = CHUNK_ALLOC_NO_FORCE; + found->chunk_alloc = 0; *space_info = found; list_add_rcu(&found->list, &info->space_info); atomic_set(&found->caching_threads, 0); @@ -3318,10 +3319,9 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans, { struct btrfs_space_info *space_info; struct btrfs_fs_info *fs_info = extent_root->fs_info; + int wait_for_alloc = 0; int ret = 0; - mutex_lock(&fs_info->chunk_mutex); - flags = btrfs_reduce_alloc_profile(extent_root, flags); space_info = __find_space_info(extent_root->fs_info, flags); @@ -3332,21 +3332,40 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans, } BUG_ON(!space_info); +again: spin_lock(&space_info->lock); if (space_info->force_alloc) force = space_info->force_alloc; if (space_info->full) { spin_unlock(&space_info->lock); - goto out; + return 0; } if (!should_alloc_chunk(extent_root, space_info, alloc_bytes, force)) { spin_unlock(&space_info->lock); - goto out; + return 0; + } else if (space_info->chunk_alloc) { + wait_for_alloc = 1; + } else { + space_info->chunk_alloc = 1; } spin_unlock(&space_info->lock); + mutex_lock(&fs_info->chunk_mutex); + + /* + * The chunk_mutex is held throughout the entirety of a chunk + * allocation, so once we've acquired the chunk_mutex we know that the + * other guy is done and we need to recheck and see if we should + * allocate. + */ + if (wait_for_alloc) { + mutex_unlock(&fs_info->chunk_mutex); + wait_for_alloc = 0; + goto again; + } + /* * If we have mixed data/metadata chunks we want to make sure we keep * allocating mixed chunks instead of individual chunks. @@ -3372,9 +3391,10 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans, space_info->full = 1; else ret = 1; + space_info->force_alloc = CHUNK_ALLOC_NO_FORCE; + space_info->chunk_alloc = 0; spin_unlock(&space_info->lock); -out: mutex_unlock(&extent_root->fs_info->chunk_mutex); return ret; } -- cgit v1.2.3 From f65647c29b14f5a32ff6f3237b0ef3b375ed5a79 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Mon, 18 Apr 2011 08:55:34 -0400 Subject: Btrfs: fix free space cache leak The free space caching code was recently reworked to cache all the pages it needed instead of using find_get_page everywhere. One loop was missed though, so it ended up leaking pages. This fixes it to use our page array instead of find_get_page. Signed-off-by: Chris Mason --- fs/btrfs/free-space-cache.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index a3f420def0e9..11d2e9cea09e 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -732,7 +732,7 @@ int btrfs_write_out_cache(struct btrfs_root *root, out_of_space = true; break; } - page = find_get_page(inode->i_mapping, index); + page = pages[index]; addr = kmap(page); memcpy(addr, entry->bitmap, PAGE_CACHE_SIZE); -- cgit v1.2.3