From 74b2107543da4ed9607ec484f63c42362dc9fca6 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Wed, 13 Apr 2011 12:02:53 -0400 Subject: Btrfs: make sure to use the delalloc reserve when filling delalloc In the prealloc filling code and compressed code we don't set trans->block_rsv to the delalloc block reserve properly, which is going to make us use metadata from the wrong pool, this patch fixes that. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/inode.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'fs') diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 7cd8ab0ef04d..3b9f1643aa57 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -619,6 +619,7 @@ retry: trans = btrfs_join_transaction(root, 1); BUG_ON(IS_ERR(trans)); + trans->block_rsv = &root->fs_info->delalloc_block_rsv; ret = btrfs_reserve_extent(trans, root, async_extent->compressed_size, async_extent->compressed_size, @@ -1060,6 +1061,7 @@ static noinline int run_delalloc_nocow(struct inode *inode, trans = btrfs_join_transaction(root, 1); } BUG_ON(IS_ERR(trans)); + trans->block_rsv = &root->fs_info->delalloc_block_rsv; cow_start = (u64)-1; cur_offset = start; -- cgit v1.2.3 From 7a7eaa40a39bde4eefc91aadeb1ce3dc4e6a1252 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Wed, 13 Apr 2011 12:54:33 -0400 Subject: Btrfs: take away the num_items argument from btrfs_join_transaction I keep forgetting that btrfs_join_transaction() just ignores the num_items argument, which leads me to sending pointless patches and looking stupid :). So just kill the num_items argument from btrfs_join_transaction and btrfs_start_ioctl_transaction, since neither of them use it. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/disk-io.c | 6 +++--- fs/btrfs/extent-tree.c | 12 ++++++------ fs/btrfs/inode.c | 34 +++++++++++++++++----------------- fs/btrfs/ioctl.c | 4 ++-- fs/btrfs/relocation.c | 12 ++++++------ fs/btrfs/transaction.c | 13 +++++-------- fs/btrfs/transaction.h | 9 +++------ 7 files changed, 42 insertions(+), 48 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 228cf36ece83..9d6c9e332ca3 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1568,7 +1568,7 @@ static int transaction_kthread(void *arg) transid = cur->transid; spin_unlock(&root->fs_info->new_trans_lock); - trans = btrfs_join_transaction(root, 1); + trans = btrfs_join_transaction(root); BUG_ON(IS_ERR(trans)); if (transid == trans->transid) { ret = btrfs_commit_transaction(trans, root); @@ -2495,13 +2495,13 @@ int btrfs_commit_super(struct btrfs_root *root) down_write(&root->fs_info->cleanup_work_sem); up_write(&root->fs_info->cleanup_work_sem); - trans = btrfs_join_transaction(root, 1); + trans = btrfs_join_transaction(root); if (IS_ERR(trans)) return PTR_ERR(trans); ret = btrfs_commit_transaction(trans, root); BUG_ON(ret); /* run commit again to drop the original snapshot */ - trans = btrfs_join_transaction(root, 1); + trans = btrfs_join_transaction(root); if (IS_ERR(trans)) return PTR_ERR(trans); btrfs_commit_transaction(trans, root); diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 9ee6bd55e16c..941b28e78931 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -3174,7 +3174,7 @@ again: spin_unlock(&data_sinfo->lock); alloc: alloc_target = btrfs_get_alloc_profile(root, 1); - trans = btrfs_join_transaction(root, 1); + trans = btrfs_join_transaction(root); if (IS_ERR(trans)) return PTR_ERR(trans); @@ -3202,7 +3202,7 @@ alloc: commit_trans: if (!committed && !root->fs_info->open_ioctl_trans) { committed = 1; - trans = btrfs_join_transaction(root, 1); + trans = btrfs_join_transaction(root); if (IS_ERR(trans)) return PTR_ERR(trans); ret = btrfs_commit_transaction(trans, root); @@ -3589,7 +3589,7 @@ again: goto out; ret = -ENOSPC; - trans = btrfs_join_transaction(root, 1); + trans = btrfs_join_transaction(root); if (IS_ERR(trans)) goto out; ret = btrfs_commit_transaction(trans, root); @@ -3816,7 +3816,7 @@ int btrfs_block_rsv_check(struct btrfs_trans_handle *trans, if (trans) return -EAGAIN; - trans = btrfs_join_transaction(root, 1); + trans = btrfs_join_transaction(root); BUG_ON(IS_ERR(trans)); ret = btrfs_commit_transaction(trans, root); return 0; @@ -7649,7 +7649,7 @@ int btrfs_drop_dead_reloc_roots(struct btrfs_root *root) BUG_ON(reloc_root->commit_root != NULL); while (1) { - trans = btrfs_join_transaction(root, 1); + trans = btrfs_join_transaction(root); BUG_ON(IS_ERR(trans)); mutex_lock(&root->fs_info->drop_mutex); @@ -8176,7 +8176,7 @@ int btrfs_set_block_group_ro(struct btrfs_root *root, BUG_ON(cache->ro); - trans = btrfs_join_transaction(root, 1); + trans = btrfs_join_transaction(root); BUG_ON(IS_ERR(trans)); alloc_flags = update_block_group_flags(root, cache->flags); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 3b9f1643aa57..e47bdf0fb75a 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -420,7 +420,7 @@ again: } } if (start == 0) { - trans = btrfs_join_transaction(root, 1); + trans = btrfs_join_transaction(root); BUG_ON(IS_ERR(trans)); btrfs_set_trans_block_group(trans, inode); trans->block_rsv = &root->fs_info->delalloc_block_rsv; @@ -617,7 +617,7 @@ retry: async_extent->start + async_extent->ram_size - 1, GFP_NOFS); - trans = btrfs_join_transaction(root, 1); + trans = btrfs_join_transaction(root); BUG_ON(IS_ERR(trans)); trans->block_rsv = &root->fs_info->delalloc_block_rsv; ret = btrfs_reserve_extent(trans, root, @@ -779,7 +779,7 @@ static noinline int cow_file_range(struct inode *inode, int ret = 0; BUG_ON(root == root->fs_info->tree_root); - trans = btrfs_join_transaction(root, 1); + trans = btrfs_join_transaction(root); BUG_ON(IS_ERR(trans)); btrfs_set_trans_block_group(trans, inode); trans->block_rsv = &root->fs_info->delalloc_block_rsv; @@ -1056,9 +1056,9 @@ static noinline int run_delalloc_nocow(struct inode *inode, BUG_ON(!path); if (root == root->fs_info->tree_root) { nolock = true; - trans = btrfs_join_transaction_nolock(root, 1); + trans = btrfs_join_transaction_nolock(root); } else { - trans = btrfs_join_transaction(root, 1); + trans = btrfs_join_transaction(root); } BUG_ON(IS_ERR(trans)); trans->block_rsv = &root->fs_info->delalloc_block_rsv; @@ -1718,9 +1718,9 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent); if (!ret) { if (nolock) - trans = btrfs_join_transaction_nolock(root, 1); + trans = btrfs_join_transaction_nolock(root); else - trans = btrfs_join_transaction(root, 1); + trans = btrfs_join_transaction(root); BUG_ON(IS_ERR(trans)); btrfs_set_trans_block_group(trans, inode); trans->block_rsv = &root->fs_info->delalloc_block_rsv; @@ -1735,9 +1735,9 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) 0, &cached_state, GFP_NOFS); if (nolock) - trans = btrfs_join_transaction_nolock(root, 1); + trans = btrfs_join_transaction_nolock(root); else - trans = btrfs_join_transaction(root, 1); + trans = btrfs_join_transaction(root); BUG_ON(IS_ERR(trans)); btrfs_set_trans_block_group(trans, inode); trans->block_rsv = &root->fs_info->delalloc_block_rsv; @@ -2415,7 +2415,7 @@ int btrfs_orphan_cleanup(struct btrfs_root *root) (u64)-1); if (root->orphan_block_rsv || root->orphan_item_inserted) { - trans = btrfs_join_transaction(root, 1); + trans = btrfs_join_transaction(root); if (!IS_ERR(trans)) btrfs_end_transaction(trans, root); } @@ -4378,9 +4378,9 @@ int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc) if (wbc->sync_mode == WB_SYNC_ALL) { if (nolock) - trans = btrfs_join_transaction_nolock(root, 1); + trans = btrfs_join_transaction_nolock(root); else - trans = btrfs_join_transaction(root, 1); + trans = btrfs_join_transaction(root); if (IS_ERR(trans)) return PTR_ERR(trans); btrfs_set_trans_block_group(trans, inode); @@ -4407,7 +4407,7 @@ void btrfs_dirty_inode(struct inode *inode) if (BTRFS_I(inode)->dummy_inode) return; - trans = btrfs_join_transaction(root, 1); + trans = btrfs_join_transaction(root); BUG_ON(IS_ERR(trans)); btrfs_set_trans_block_group(trans, inode); @@ -5226,7 +5226,7 @@ again: free_extent_map(em); em = NULL; btrfs_release_path(root, path); - trans = btrfs_join_transaction(root, 1); + trans = btrfs_join_transaction(root); if (IS_ERR(trans)) return ERR_CAST(trans); goto again; @@ -5470,7 +5470,7 @@ static struct extent_map *btrfs_new_extent_direct(struct inode *inode, btrfs_drop_extent_cache(inode, start, start + len - 1, 0); } - trans = btrfs_join_transaction(root, 0); + trans = btrfs_join_transaction(root); if (IS_ERR(trans)) return ERR_CAST(trans); @@ -5703,7 +5703,7 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, * to make sure the current transaction stays open * while we look for nocow cross refs */ - trans = btrfs_join_transaction(root, 0); + trans = btrfs_join_transaction(root); if (IS_ERR(trans)) goto must_cow; @@ -5841,7 +5841,7 @@ again: BUG_ON(!ordered); - trans = btrfs_join_transaction(root, 1); + trans = btrfs_join_transaction(root); if (IS_ERR(trans)) { err = -ENOMEM; goto out; diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 2616f7ed4799..908c3d4b48c6 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -242,7 +242,7 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg) ip->flags &= ~(BTRFS_INODE_COMPRESS | BTRFS_INODE_NOCOMPRESS); } - trans = btrfs_join_transaction(root, 1); + trans = btrfs_join_transaction(root); BUG_ON(IS_ERR(trans)); ret = btrfs_update_inode(trans, root, inode); @@ -2182,7 +2182,7 @@ static long btrfs_ioctl_trans_start(struct file *file) mutex_unlock(&root->fs_info->trans_mutex); ret = -ENOMEM; - trans = btrfs_start_ioctl_transaction(root, 0); + trans = btrfs_start_ioctl_transaction(root); if (IS_ERR(trans)) goto out_drop; diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 199a80134312..8bb256667f2d 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -2149,7 +2149,7 @@ again: err = ret; } - trans = btrfs_join_transaction(rc->extent_root, 1); + trans = btrfs_join_transaction(rc->extent_root); if (IS_ERR(trans)) { if (!err) btrfs_block_rsv_release(rc->extent_root, @@ -3233,7 +3233,7 @@ truncate: goto out; } - trans = btrfs_join_transaction(root, 0); + trans = btrfs_join_transaction(root); if (IS_ERR(trans)) { btrfs_free_path(path); ret = PTR_ERR(trans); @@ -3642,7 +3642,7 @@ int prepare_to_relocate(struct reloc_control *rc) rc->create_reloc_tree = 1; set_reloc_control(rc); - trans = btrfs_join_transaction(rc->extent_root, 1); + trans = btrfs_join_transaction(rc->extent_root); BUG_ON(IS_ERR(trans)); btrfs_commit_transaction(trans, rc->extent_root); return 0; @@ -3831,7 +3831,7 @@ restart: btrfs_block_rsv_release(rc->extent_root, rc->block_rsv, (u64)-1); /* get rid of pinned extents */ - trans = btrfs_join_transaction(rc->extent_root, 1); + trans = btrfs_join_transaction(rc->extent_root); if (IS_ERR(trans)) err = PTR_ERR(trans); else @@ -4156,7 +4156,7 @@ int btrfs_recover_relocation(struct btrfs_root *root) set_reloc_control(rc); - trans = btrfs_join_transaction(rc->extent_root, 1); + trans = btrfs_join_transaction(rc->extent_root); if (IS_ERR(trans)) { unset_reloc_control(rc); err = PTR_ERR(trans); @@ -4190,7 +4190,7 @@ int btrfs_recover_relocation(struct btrfs_root *root) unset_reloc_control(rc); - trans = btrfs_join_transaction(rc->extent_root, 1); + trans = btrfs_join_transaction(rc->extent_root); if (IS_ERR(trans)) err = PTR_ERR(trans); else diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index c571734d5e5a..70bfb26df967 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -257,22 +257,19 @@ struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root, { return start_transaction(root, num_items, TRANS_START); } -struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root, - int num_blocks) +struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root) { return start_transaction(root, 0, TRANS_JOIN); } -struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root, - int num_blocks) +struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root) { return start_transaction(root, 0, TRANS_JOIN_NOLOCK); } -struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *r, - int num_blocks) +struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root) { - return start_transaction(r, 0, TRANS_USERSPACE); + return start_transaction(root, 0, TRANS_USERSPACE); } /* wait for a transaction commit to be fully complete */ @@ -1171,7 +1168,7 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans, INIT_DELAYED_WORK(&ac->work, do_async_commit); ac->root = root; - ac->newtrans = btrfs_join_transaction(root, 0); + ac->newtrans = btrfs_join_transaction(root); if (IS_ERR(ac->newtrans)) { int err = PTR_ERR(ac->newtrans); kfree(ac); diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h index e441acc6c584..1f573f09dba2 100644 --- a/fs/btrfs/transaction.h +++ b/fs/btrfs/transaction.h @@ -92,12 +92,9 @@ int btrfs_end_transaction_nolock(struct btrfs_trans_handle *trans, struct btrfs_root *root); struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root, int num_items); -struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root, - int num_blocks); -struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root, - int num_blocks); -struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *r, - int num_blocks); +struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root); +struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root); +struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root); int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid); int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans, struct btrfs_root *root); -- cgit v1.2.3 From 2a1eb4614d984d5cd4c928784e9afcf5c07f93be Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Wed, 13 Apr 2011 15:15:59 -0400 Subject: Btrfs: if we've already started a trans handle, use that one We currently track trans handles in current->journal_info, but we don't actually use it. This patch fixes it. This will cover the case where we have multiple people starting transactions down the call chain. This keeps us from having to allocate a new handle and all of that, we just increase the use count of the current handle, save the old block_rsv, and return. I tested this with xfstests and it worked out fine. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/transaction.c | 17 +++++++++++++++++ fs/btrfs/transaction.h | 2 ++ 2 files changed, 19 insertions(+) (limited to 'fs') diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 70bfb26df967..46f40564c168 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -184,6 +184,15 @@ static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root, if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) return ERR_PTR(-EROFS); + + if (current->journal_info) { + WARN_ON(type != TRANS_JOIN && type != TRANS_JOIN_NOLOCK); + h = current->journal_info; + h->use_count++; + h->orig_rsv = h->block_rsv; + h->block_rsv = NULL; + goto got_it; + } again: h = kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS); if (!h) @@ -213,7 +222,9 @@ again: h->block_group = 0; h->bytes_reserved = 0; h->delayed_ref_updates = 0; + h->use_count = 1; h->block_rsv = NULL; + h->orig_rsv = NULL; smp_mb(); if (cur_trans->blocked && may_wait_transaction(root, type)) { @@ -241,6 +252,7 @@ again: } } +got_it: if (type != TRANS_JOIN_NOLOCK) mutex_lock(&root->fs_info->trans_mutex); record_root_in_trans(h, root); @@ -428,6 +440,11 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans, struct btrfs_fs_info *info = root->fs_info; int count = 0; + if (--trans->use_count) { + trans->block_rsv = trans->orig_rsv; + return 0; + } + while (count < 4) { unsigned long cur = trans->delayed_ref_updates; trans->delayed_ref_updates = 0; diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h index 1f573f09dba2..154314f80f8d 100644 --- a/fs/btrfs/transaction.h +++ b/fs/btrfs/transaction.h @@ -47,11 +47,13 @@ struct btrfs_trans_handle { u64 transid; u64 block_group; u64 bytes_reserved; + unsigned long use_count; unsigned long blocks_reserved; unsigned long blocks_used; unsigned long delayed_ref_updates; struct btrfs_transaction *transaction; struct btrfs_block_rsv *block_rsv; + struct btrfs_block_rsv *orig_rsv; }; struct btrfs_pending_snapshot { -- cgit v1.2.3 From a4abeea41adfa3c143c289045f4625dfaeba2212 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Mon, 11 Apr 2011 17:25:13 -0400 Subject: Btrfs: kill trans_mutex We use trans_mutex for lots of things, here's a basic list 1) To serialize trans_handles joining the currently running transaction 2) To make sure that no new trans handles are started while we are committing 3) To protect the dead_roots list and the transaction lists Really the serializing trans_handles joining is not too hard, and can really get bogged down in acquiring a reference to the transaction. So replace the trans_mutex with a trans_lock spinlock and use it to do the following 1) Protect fs_info->running_transaction. All trans handles have to do is check this, and then take a reference of the transaction and keep on going. 2) Protect the fs_info->trans_list. This doesn't get used too much, basically it just holds the current transactions, which will usually just be the currently committing transaction and the currently running transaction at most. 3) Protect the dead roots list. This is only ever processed by splicing the list so this is relatively simple. 4) Protect the fs_info->reloc_ctl stuff. This is very lightweight and was using the trans_mutex before, so this is a pretty straightforward change. 5) Protect fs_info->no_trans_join. Because we don't hold the trans_lock over the entirety of the commit we need to have a way to block new people from creating a new transaction while we're doing our work. So we set no_trans_join and in join_transaction we test to see if that is set, and if it is we do a wait_on_commit. 6) Make the transaction use count atomic so we don't need to take locks to modify it when we're dropping references. 7) Add a commit_lock to the transaction to make sure multiple people trying to commit the same transaction don't race and commit at the same time. 8) Make open_ioctl_trans an atomic so we don't have to take any locks for ioctl trans. I have tested this with xfstests, but obviously it is a pretty hairy change so lots of testing is greatly appreciated. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/ctree.h | 6 +- fs/btrfs/disk-io.c | 30 +++--- fs/btrfs/extent-tree.c | 3 +- fs/btrfs/file.c | 4 +- fs/btrfs/ioctl.c | 12 +-- fs/btrfs/relocation.c | 16 +-- fs/btrfs/transaction.c | 271 ++++++++++++++++++++++++++----------------------- fs/btrfs/transaction.h | 4 +- 8 files changed, 177 insertions(+), 169 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 8f4b81de3ae2..522a39b0033d 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -919,7 +919,6 @@ struct btrfs_fs_info { * is required instead of the faster short fsync log commits */ u64 last_trans_log_full_commit; - u64 open_ioctl_trans; unsigned long mount_opt:20; unsigned long compress_type:4; u64 max_inline; @@ -936,7 +935,6 @@ struct btrfs_fs_info { struct super_block *sb; struct inode *btree_inode; struct backing_dev_info bdi; - struct mutex trans_mutex; struct mutex tree_log_mutex; struct mutex transaction_kthread_mutex; struct mutex cleaner_mutex; @@ -957,6 +955,7 @@ struct btrfs_fs_info { struct rw_semaphore subvol_sem; struct srcu_struct subvol_srcu; + spinlock_t trans_lock; struct list_head trans_list; struct list_head hashers; struct list_head dead_roots; @@ -969,6 +968,7 @@ struct btrfs_fs_info { atomic_t async_submit_draining; atomic_t nr_async_bios; atomic_t async_delalloc_pages; + atomic_t open_ioctl_trans; /* * this is used by the balancing code to wait for all the pending @@ -1032,6 +1032,7 @@ struct btrfs_fs_info { int closing; int log_root_recovering; int enospc_unlink; + int trans_no_join; u64 total_pinned; @@ -1053,7 +1054,6 @@ struct btrfs_fs_info { struct reloc_control *reloc_ctl; spinlock_t delalloc_lock; - spinlock_t new_trans_lock; u64 delalloc_bytes; /* data_alloc_cluster is only used in ssd mode */ diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 9d6c9e332ca3..93ef254ec432 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1551,22 +1551,22 @@ static int transaction_kthread(void *arg) vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE); mutex_lock(&root->fs_info->transaction_kthread_mutex); - spin_lock(&root->fs_info->new_trans_lock); + spin_lock(&root->fs_info->trans_lock); cur = root->fs_info->running_transaction; if (!cur) { - spin_unlock(&root->fs_info->new_trans_lock); + spin_unlock(&root->fs_info->trans_lock); goto sleep; } now = get_seconds(); if (!cur->blocked && (now < cur->start_time || now - cur->start_time < 30)) { - spin_unlock(&root->fs_info->new_trans_lock); + spin_unlock(&root->fs_info->trans_lock); delay = HZ * 5; goto sleep; } transid = cur->transid; - spin_unlock(&root->fs_info->new_trans_lock); + spin_unlock(&root->fs_info->trans_lock); trans = btrfs_join_transaction(root); BUG_ON(IS_ERR(trans)); @@ -1658,7 +1658,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, INIT_LIST_HEAD(&fs_info->ordered_operations); INIT_LIST_HEAD(&fs_info->caching_block_groups); spin_lock_init(&fs_info->delalloc_lock); - spin_lock_init(&fs_info->new_trans_lock); + spin_lock_init(&fs_info->trans_lock); spin_lock_init(&fs_info->ref_cache_lock); spin_lock_init(&fs_info->fs_roots_radix_lock); spin_lock_init(&fs_info->delayed_iput_lock); @@ -1687,6 +1687,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, fs_info->sb = sb; fs_info->max_inline = 8192 * 1024; fs_info->metadata_ratio = 0; + fs_info->trans_no_join = 0; fs_info->thread_pool_size = min_t(unsigned long, num_online_cpus() + 2, 8); @@ -1735,7 +1736,6 @@ struct btrfs_root *open_ctree(struct super_block *sb, fs_info->do_barriers = 1; - mutex_init(&fs_info->trans_mutex); mutex_init(&fs_info->ordered_operations_mutex); mutex_init(&fs_info->tree_log_mutex); mutex_init(&fs_info->chunk_mutex); @@ -3006,10 +3006,13 @@ static int btrfs_cleanup_transaction(struct btrfs_root *root) WARN_ON(1); - mutex_lock(&root->fs_info->trans_mutex); mutex_lock(&root->fs_info->transaction_kthread_mutex); + spin_lock(&root->fs_info->trans_lock); list_splice_init(&root->fs_info->trans_list, &list); + root->fs_info->trans_no_join = 1; + spin_unlock(&root->fs_info->trans_lock); + while (!list_empty(&list)) { t = list_entry(list.next, struct btrfs_transaction, list); if (!t) @@ -3034,23 +3037,18 @@ static int btrfs_cleanup_transaction(struct btrfs_root *root) t->blocked = 0; if (waitqueue_active(&root->fs_info->transaction_wait)) wake_up(&root->fs_info->transaction_wait); - mutex_unlock(&root->fs_info->trans_mutex); - mutex_lock(&root->fs_info->trans_mutex); t->commit_done = 1; if (waitqueue_active(&t->commit_wait)) wake_up(&t->commit_wait); - mutex_unlock(&root->fs_info->trans_mutex); - - mutex_lock(&root->fs_info->trans_mutex); btrfs_destroy_pending_snapshots(t); btrfs_destroy_delalloc_inodes(root); - spin_lock(&root->fs_info->new_trans_lock); + spin_lock(&root->fs_info->trans_lock); root->fs_info->running_transaction = NULL; - spin_unlock(&root->fs_info->new_trans_lock); + spin_unlock(&root->fs_info->trans_lock); btrfs_destroy_marked_extents(root, &t->dirty_pages, EXTENT_DIRTY); @@ -3064,8 +3062,10 @@ static int btrfs_cleanup_transaction(struct btrfs_root *root) kmem_cache_free(btrfs_transaction_cachep, t); } + spin_lock(&root->fs_info->trans_lock); + root->fs_info->trans_no_join = 0; + spin_unlock(&root->fs_info->trans_lock); mutex_unlock(&root->fs_info->transaction_kthread_mutex); - mutex_unlock(&root->fs_info->trans_mutex); return 0; } diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 941b28e78931..ca599654ce19 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -3200,7 +3200,8 @@ alloc: /* commit the current transaction and try again */ commit_trans: - if (!committed && !root->fs_info->open_ioctl_trans) { + if (!committed && + !atomic_read(&root->fs_info->open_ioctl_trans)) { committed = 1; trans = btrfs_join_transaction(root); if (IS_ERR(trans)) diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 75899a01dded..cd5e82e500cf 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -1222,14 +1222,12 @@ int btrfs_sync_file(struct file *file, int datasync) * the current transaction, we can bail out now without any * syncing */ - mutex_lock(&root->fs_info->trans_mutex); + smp_mb(); if (BTRFS_I(inode)->last_trans <= root->fs_info->last_trans_committed) { BTRFS_I(inode)->last_trans = 0; - mutex_unlock(&root->fs_info->trans_mutex); goto out; } - mutex_unlock(&root->fs_info->trans_mutex); /* * ok we haven't committed the transaction yet, lets do a commit diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 908c3d4b48c6..a578620e06a8 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -2177,9 +2177,7 @@ static long btrfs_ioctl_trans_start(struct file *file) if (ret) goto out; - mutex_lock(&root->fs_info->trans_mutex); - root->fs_info->open_ioctl_trans++; - mutex_unlock(&root->fs_info->trans_mutex); + atomic_inc(&root->fs_info->open_ioctl_trans); ret = -ENOMEM; trans = btrfs_start_ioctl_transaction(root); @@ -2190,9 +2188,7 @@ static long btrfs_ioctl_trans_start(struct file *file) return 0; out_drop: - mutex_lock(&root->fs_info->trans_mutex); - root->fs_info->open_ioctl_trans--; - mutex_unlock(&root->fs_info->trans_mutex); + atomic_dec(&root->fs_info->open_ioctl_trans); mnt_drop_write(file->f_path.mnt); out: return ret; @@ -2426,9 +2422,7 @@ long btrfs_ioctl_trans_end(struct file *file) btrfs_end_transaction(trans, root); - mutex_lock(&root->fs_info->trans_mutex); - root->fs_info->open_ioctl_trans--; - mutex_unlock(&root->fs_info->trans_mutex); + atomic_dec(&root->fs_info->open_ioctl_trans); mnt_drop_write(file->f_path.mnt); return 0; diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 8bb256667f2d..09c30d37d43e 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -2136,10 +2136,10 @@ int prepare_to_merge(struct reloc_control *rc, int err) u64 num_bytes = 0; int ret; - mutex_lock(&root->fs_info->trans_mutex); + spin_lock(&root->fs_info->trans_lock); rc->merging_rsv_size += root->nodesize * (BTRFS_MAX_LEVEL - 1) * 2; rc->merging_rsv_size += rc->nodes_relocated * 2; - mutex_unlock(&root->fs_info->trans_mutex); + spin_unlock(&root->fs_info->trans_lock); again: if (!err) { num_bytes = rc->merging_rsv_size; @@ -2208,9 +2208,9 @@ int merge_reloc_roots(struct reloc_control *rc) int ret; again: root = rc->extent_root; - mutex_lock(&root->fs_info->trans_mutex); + spin_lock(&root->fs_info->trans_lock); list_splice_init(&rc->reloc_roots, &reloc_roots); - mutex_unlock(&root->fs_info->trans_mutex); + spin_unlock(&root->fs_info->trans_lock); while (!list_empty(&reloc_roots)) { found = 1; @@ -3583,17 +3583,17 @@ next: static void set_reloc_control(struct reloc_control *rc) { struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; - mutex_lock(&fs_info->trans_mutex); + spin_lock(&fs_info->trans_lock); fs_info->reloc_ctl = rc; - mutex_unlock(&fs_info->trans_mutex); + spin_unlock(&fs_info->trans_lock); } static void unset_reloc_control(struct reloc_control *rc) { struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; - mutex_lock(&fs_info->trans_mutex); + spin_lock(&fs_info->trans_lock); fs_info->reloc_ctl = NULL; - mutex_unlock(&fs_info->trans_mutex); + spin_unlock(&fs_info->trans_lock); } static int check_extent_flags(u64 flags) diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 46f40564c168..43816f8b23e7 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -34,6 +34,7 @@ static noinline void put_transaction(struct btrfs_transaction *transaction) { WARN_ON(atomic_read(&transaction->use_count) == 0); if (atomic_dec_and_test(&transaction->use_count)) { + BUG_ON(!list_empty(&transaction->list)); memset(transaction, 0, sizeof(*transaction)); kmem_cache_free(btrfs_transaction_cachep, transaction); } @@ -48,47 +49,73 @@ static noinline void switch_commit_root(struct btrfs_root *root) /* * either allocate a new transaction or hop into the existing one */ -static noinline int join_transaction(struct btrfs_root *root) +static noinline int join_transaction(struct btrfs_root *root, int nofail) { struct btrfs_transaction *cur_trans; + + spin_lock(&root->fs_info->trans_lock); + if (root->fs_info->trans_no_join) { + if (!nofail) { + spin_unlock(&root->fs_info->trans_lock); + return -EBUSY; + } + } + cur_trans = root->fs_info->running_transaction; - if (!cur_trans) { - cur_trans = kmem_cache_alloc(btrfs_transaction_cachep, - GFP_NOFS); - if (!cur_trans) - return -ENOMEM; - root->fs_info->generation++; - atomic_set(&cur_trans->num_writers, 1); - cur_trans->num_joined = 0; - cur_trans->transid = root->fs_info->generation; - init_waitqueue_head(&cur_trans->writer_wait); - init_waitqueue_head(&cur_trans->commit_wait); - cur_trans->in_commit = 0; - cur_trans->blocked = 0; - atomic_set(&cur_trans->use_count, 1); - cur_trans->commit_done = 0; - cur_trans->start_time = get_seconds(); - - cur_trans->delayed_refs.root = RB_ROOT; - cur_trans->delayed_refs.num_entries = 0; - cur_trans->delayed_refs.num_heads_ready = 0; - cur_trans->delayed_refs.num_heads = 0; - cur_trans->delayed_refs.flushing = 0; - cur_trans->delayed_refs.run_delayed_start = 0; - spin_lock_init(&cur_trans->delayed_refs.lock); - - INIT_LIST_HEAD(&cur_trans->pending_snapshots); - list_add_tail(&cur_trans->list, &root->fs_info->trans_list); - extent_io_tree_init(&cur_trans->dirty_pages, - root->fs_info->btree_inode->i_mapping, - GFP_NOFS); - spin_lock(&root->fs_info->new_trans_lock); - root->fs_info->running_transaction = cur_trans; - spin_unlock(&root->fs_info->new_trans_lock); - } else { + if (cur_trans) { + atomic_inc(&cur_trans->use_count); + atomic_inc(&cur_trans->num_writers); + cur_trans->num_joined++; + spin_unlock(&root->fs_info->trans_lock); + return 0; + } + spin_unlock(&root->fs_info->trans_lock); + + cur_trans = kmem_cache_alloc(btrfs_transaction_cachep, GFP_NOFS); + if (!cur_trans) + return -ENOMEM; + spin_lock(&root->fs_info->trans_lock); + if (root->fs_info->running_transaction) { + kmem_cache_free(btrfs_transaction_cachep, cur_trans); + cur_trans = root->fs_info->running_transaction; + atomic_inc(&cur_trans->use_count); atomic_inc(&cur_trans->num_writers); cur_trans->num_joined++; + spin_unlock(&root->fs_info->trans_lock); + return 0; } + atomic_set(&cur_trans->num_writers, 1); + cur_trans->num_joined = 0; + init_waitqueue_head(&cur_trans->writer_wait); + init_waitqueue_head(&cur_trans->commit_wait); + cur_trans->in_commit = 0; + cur_trans->blocked = 0; + /* + * One for this trans handle, one so it will live on until we + * commit the transaction. + */ + atomic_set(&cur_trans->use_count, 2); + cur_trans->commit_done = 0; + cur_trans->start_time = get_seconds(); + + cur_trans->delayed_refs.root = RB_ROOT; + cur_trans->delayed_refs.num_entries = 0; + cur_trans->delayed_refs.num_heads_ready = 0; + cur_trans->delayed_refs.num_heads = 0; + cur_trans->delayed_refs.flushing = 0; + cur_trans->delayed_refs.run_delayed_start = 0; + spin_lock_init(&cur_trans->commit_lock); + spin_lock_init(&cur_trans->delayed_refs.lock); + + INIT_LIST_HEAD(&cur_trans->pending_snapshots); + list_add_tail(&cur_trans->list, &root->fs_info->trans_list); + extent_io_tree_init(&cur_trans->dirty_pages, + root->fs_info->btree_inode->i_mapping, + GFP_NOFS); + root->fs_info->generation++; + cur_trans->transid = root->fs_info->generation; + root->fs_info->running_transaction = cur_trans; + spin_unlock(&root->fs_info->trans_lock); return 0; } @@ -99,39 +126,28 @@ static noinline int join_transaction(struct btrfs_root *root) * to make sure the old root from before we joined the transaction is deleted * when the transaction commits */ -static noinline int record_root_in_trans(struct btrfs_trans_handle *trans, - struct btrfs_root *root) +int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans, + struct btrfs_root *root) { if (root->ref_cows && root->last_trans < trans->transid) { WARN_ON(root == root->fs_info->extent_root); WARN_ON(root->commit_root != root->node); + spin_lock(&root->fs_info->fs_roots_radix_lock); + if (root->last_trans == trans->transid) { + spin_unlock(&root->fs_info->fs_roots_radix_lock); + return 0; + } + root->last_trans = trans->transid; radix_tree_tag_set(&root->fs_info->fs_roots_radix, (unsigned long)root->root_key.objectid, BTRFS_ROOT_TRANS_TAG); - root->last_trans = trans->transid; + spin_unlock(&root->fs_info->fs_roots_radix_lock); btrfs_init_reloc_root(trans, root); } return 0; } -int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans, - struct btrfs_root *root) -{ - if (!root->ref_cows) - return 0; - - mutex_lock(&root->fs_info->trans_mutex); - if (root->last_trans == trans->transid) { - mutex_unlock(&root->fs_info->trans_mutex); - return 0; - } - - record_root_in_trans(trans, root); - mutex_unlock(&root->fs_info->trans_mutex); - return 0; -} - /* wait for commit against the current transaction to become unblocked * when this is done, it is safe to start a new transaction, but the current * transaction might not be fully on disk. @@ -140,21 +156,23 @@ static void wait_current_trans(struct btrfs_root *root) { struct btrfs_transaction *cur_trans; + spin_lock(&root->fs_info->trans_lock); cur_trans = root->fs_info->running_transaction; if (cur_trans && cur_trans->blocked) { DEFINE_WAIT(wait); atomic_inc(&cur_trans->use_count); + spin_unlock(&root->fs_info->trans_lock); while (1) { prepare_to_wait(&root->fs_info->transaction_wait, &wait, TASK_UNINTERRUPTIBLE); if (!cur_trans->blocked) break; - mutex_unlock(&root->fs_info->trans_mutex); schedule(); - mutex_lock(&root->fs_info->trans_mutex); } finish_wait(&root->fs_info->transaction_wait, &wait); put_transaction(cur_trans); + } else { + spin_unlock(&root->fs_info->trans_lock); } } @@ -167,10 +185,16 @@ enum btrfs_trans_type { static int may_wait_transaction(struct btrfs_root *root, int type) { - if (!root->fs_info->log_root_recovering && - ((type == TRANS_START && !root->fs_info->open_ioctl_trans) || - type == TRANS_USERSPACE)) + if (root->fs_info->log_root_recovering) + return 0; + + if (type == TRANS_USERSPACE) + return 1; + + if (type == TRANS_START && + !atomic_read(&root->fs_info->open_ioctl_trans)) return 1; + return 0; } @@ -198,23 +222,21 @@ again: if (!h) return ERR_PTR(-ENOMEM); - if (type != TRANS_JOIN_NOLOCK) - mutex_lock(&root->fs_info->trans_mutex); if (may_wait_transaction(root, type)) wait_current_trans(root); - ret = join_transaction(root); + do { + ret = join_transaction(root, type == TRANS_JOIN_NOLOCK); + if (ret == -EBUSY) + wait_current_trans(root); + } while (ret == -EBUSY); + if (ret < 0) { kmem_cache_free(btrfs_trans_handle_cachep, h); - if (type != TRANS_JOIN_NOLOCK) - mutex_unlock(&root->fs_info->trans_mutex); return ERR_PTR(ret); } cur_trans = root->fs_info->running_transaction; - atomic_inc(&cur_trans->use_count); - if (type != TRANS_JOIN_NOLOCK) - mutex_unlock(&root->fs_info->trans_mutex); h->transid = cur_trans->transid; h->transaction = cur_trans; @@ -253,11 +275,7 @@ again: } got_it: - if (type != TRANS_JOIN_NOLOCK) - mutex_lock(&root->fs_info->trans_mutex); - record_root_in_trans(h, root); - if (type != TRANS_JOIN_NOLOCK) - mutex_unlock(&root->fs_info->trans_mutex); + btrfs_record_root_in_trans(h, root); if (!current->journal_info && type != TRANS_USERSPACE) current->journal_info = h; @@ -289,17 +307,13 @@ static noinline int wait_for_commit(struct btrfs_root *root, struct btrfs_transaction *commit) { DEFINE_WAIT(wait); - mutex_lock(&root->fs_info->trans_mutex); while (!commit->commit_done) { prepare_to_wait(&commit->commit_wait, &wait, TASK_UNINTERRUPTIBLE); if (commit->commit_done) break; - mutex_unlock(&root->fs_info->trans_mutex); schedule(); - mutex_lock(&root->fs_info->trans_mutex); } - mutex_unlock(&root->fs_info->trans_mutex); finish_wait(&commit->commit_wait, &wait); return 0; } @@ -309,50 +323,49 @@ int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid) struct btrfs_transaction *cur_trans = NULL, *t; int ret; - mutex_lock(&root->fs_info->trans_mutex); - ret = 0; if (transid) { if (transid <= root->fs_info->last_trans_committed) - goto out_unlock; + goto out; /* find specified transaction */ + spin_lock(&root->fs_info->trans_lock); list_for_each_entry(t, &root->fs_info->trans_list, list) { if (t->transid == transid) { cur_trans = t; + atomic_inc(&cur_trans->use_count); break; } if (t->transid > transid) break; } + spin_unlock(&root->fs_info->trans_lock); ret = -EINVAL; if (!cur_trans) - goto out_unlock; /* bad transid */ + goto out; /* bad transid */ } else { /* find newest transaction that is committing | committed */ + spin_lock(&root->fs_info->trans_lock); list_for_each_entry_reverse(t, &root->fs_info->trans_list, list) { if (t->in_commit) { if (t->commit_done) - goto out_unlock; + goto out; cur_trans = t; + atomic_inc(&cur_trans->use_count); break; } } + spin_unlock(&root->fs_info->trans_lock); if (!cur_trans) - goto out_unlock; /* nothing committing|committed */ + goto out; /* nothing committing|committed */ } - atomic_inc(&cur_trans->use_count); - mutex_unlock(&root->fs_info->trans_mutex); - wait_for_commit(root, cur_trans); - mutex_lock(&root->fs_info->trans_mutex); put_transaction(cur_trans); ret = 0; -out_unlock: - mutex_unlock(&root->fs_info->trans_mutex); +out: return ret; } @@ -401,10 +414,8 @@ harder: void btrfs_throttle(struct btrfs_root *root) { - mutex_lock(&root->fs_info->trans_mutex); - if (!root->fs_info->open_ioctl_trans) + if (!atomic_read(&root->fs_info->open_ioctl_trans)) wait_current_trans(root); - mutex_unlock(&root->fs_info->trans_mutex); } static int should_end_transaction(struct btrfs_trans_handle *trans, @@ -422,6 +433,7 @@ int btrfs_should_end_transaction(struct btrfs_trans_handle *trans, struct btrfs_transaction *cur_trans = trans->transaction; int updates; + smp_mb(); if (cur_trans->blocked || cur_trans->delayed_refs.flushing) return 1; @@ -467,9 +479,11 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans, btrfs_trans_release_metadata(trans, root); - if (lock && !root->fs_info->open_ioctl_trans && - should_end_transaction(trans, root)) + if (lock && !atomic_read(&root->fs_info->open_ioctl_trans) && + should_end_transaction(trans, root)) { trans->transaction->blocked = 1; + smp_wmb(); + } if (lock && cur_trans->blocked && !cur_trans->in_commit) { if (throttle) @@ -739,9 +753,9 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans, */ int btrfs_add_dead_root(struct btrfs_root *root) { - mutex_lock(&root->fs_info->trans_mutex); + spin_lock(&root->fs_info->trans_lock); list_add(&root->root_list, &root->fs_info->dead_roots); - mutex_unlock(&root->fs_info->trans_mutex); + spin_unlock(&root->fs_info->trans_lock); return 0; } @@ -757,6 +771,7 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans, int ret; int err = 0; + spin_lock(&fs_info->fs_roots_radix_lock); while (1) { ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix, (void **)gang, 0, @@ -769,6 +784,7 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans, radix_tree_tag_clear(&fs_info->fs_roots_radix, (unsigned long)root->root_key.objectid, BTRFS_ROOT_TRANS_TAG); + spin_unlock(&fs_info->fs_roots_radix_lock); btrfs_free_log(trans, root); btrfs_update_reloc_root(trans, root); @@ -783,10 +799,12 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans, err = btrfs_update_root(trans, fs_info->tree_root, &root->root_key, &root->root_item); + spin_lock(&fs_info->fs_roots_radix_lock); if (err) break; } } + spin_unlock(&fs_info->fs_roots_radix_lock); return err; } @@ -972,7 +990,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, parent = dget_parent(dentry); parent_inode = parent->d_inode; parent_root = BTRFS_I(parent_inode)->root; - record_root_in_trans(trans, parent_root); + btrfs_record_root_in_trans(trans, parent_root); /* * insert the directory item @@ -990,7 +1008,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, ret = btrfs_update_inode(trans, parent_root, parent_inode); BUG_ON(ret); - record_root_in_trans(trans, root); + btrfs_record_root_in_trans(trans, root); btrfs_set_root_last_snapshot(&root->root_item, trans->transid); memcpy(new_root_item, &root->root_item, sizeof(*new_root_item)); btrfs_check_and_init_root_item(new_root_item); @@ -1080,20 +1098,20 @@ static void update_super_roots(struct btrfs_root *root) int btrfs_transaction_in_commit(struct btrfs_fs_info *info) { int ret = 0; - spin_lock(&info->new_trans_lock); + spin_lock(&info->trans_lock); if (info->running_transaction) ret = info->running_transaction->in_commit; - spin_unlock(&info->new_trans_lock); + spin_unlock(&info->trans_lock); return ret; } int btrfs_transaction_blocked(struct btrfs_fs_info *info) { int ret = 0; - spin_lock(&info->new_trans_lock); + spin_lock(&info->trans_lock); if (info->running_transaction) ret = info->running_transaction->blocked; - spin_unlock(&info->new_trans_lock); + spin_unlock(&info->trans_lock); return ret; } @@ -1117,9 +1135,7 @@ static void wait_current_trans_commit_start(struct btrfs_root *root, &wait); break; } - mutex_unlock(&root->fs_info->trans_mutex); schedule(); - mutex_lock(&root->fs_info->trans_mutex); finish_wait(&root->fs_info->transaction_blocked_wait, &wait); } } @@ -1145,9 +1161,7 @@ static void wait_current_trans_commit_start_and_unblock(struct btrfs_root *root, &wait); break; } - mutex_unlock(&root->fs_info->trans_mutex); schedule(); - mutex_lock(&root->fs_info->trans_mutex); finish_wait(&root->fs_info->transaction_wait, &wait); } @@ -1193,22 +1207,18 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans, } /* take transaction reference */ - mutex_lock(&root->fs_info->trans_mutex); cur_trans = trans->transaction; atomic_inc(&cur_trans->use_count); - mutex_unlock(&root->fs_info->trans_mutex); btrfs_end_transaction(trans, root); schedule_delayed_work(&ac->work, 0); /* wait for transaction to start and unblock */ - mutex_lock(&root->fs_info->trans_mutex); if (wait_for_unblock) wait_current_trans_commit_start_and_unblock(root, cur_trans); else wait_current_trans_commit_start(root, cur_trans); put_transaction(cur_trans); - mutex_unlock(&root->fs_info->trans_mutex); return 0; } @@ -1252,38 +1262,41 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, ret = btrfs_run_delayed_refs(trans, root, 0); BUG_ON(ret); - mutex_lock(&root->fs_info->trans_mutex); + spin_lock(&cur_trans->commit_lock); if (cur_trans->in_commit) { + spin_unlock(&cur_trans->commit_lock); atomic_inc(&cur_trans->use_count); - mutex_unlock(&root->fs_info->trans_mutex); btrfs_end_transaction(trans, root); ret = wait_for_commit(root, cur_trans); BUG_ON(ret); - mutex_lock(&root->fs_info->trans_mutex); put_transaction(cur_trans); - mutex_unlock(&root->fs_info->trans_mutex); return 0; } trans->transaction->in_commit = 1; trans->transaction->blocked = 1; + spin_unlock(&cur_trans->commit_lock); wake_up(&root->fs_info->transaction_blocked_wait); + spin_lock(&root->fs_info->trans_lock); if (cur_trans->list.prev != &root->fs_info->trans_list) { prev_trans = list_entry(cur_trans->list.prev, struct btrfs_transaction, list); if (!prev_trans->commit_done) { atomic_inc(&prev_trans->use_count); - mutex_unlock(&root->fs_info->trans_mutex); + spin_unlock(&root->fs_info->trans_lock); wait_for_commit(root, prev_trans); - mutex_lock(&root->fs_info->trans_mutex); put_transaction(prev_trans); + } else { + spin_unlock(&root->fs_info->trans_lock); } + } else { + spin_unlock(&root->fs_info->trans_lock); } if (now < cur_trans->start_time || now - cur_trans->start_time < 1) @@ -1291,12 +1304,12 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, do { int snap_pending = 0; + joined = cur_trans->num_joined; if (!list_empty(&trans->transaction->pending_snapshots)) snap_pending = 1; WARN_ON(cur_trans != trans->transaction); - mutex_unlock(&root->fs_info->trans_mutex); if (flush_on_commit || snap_pending) { btrfs_start_delalloc_inodes(root, 1); @@ -1316,14 +1329,15 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, prepare_to_wait(&cur_trans->writer_wait, &wait, TASK_UNINTERRUPTIBLE); - smp_mb(); if (atomic_read(&cur_trans->num_writers) > 1) schedule_timeout(MAX_SCHEDULE_TIMEOUT); else if (should_grow) schedule_timeout(1); - mutex_lock(&root->fs_info->trans_mutex); finish_wait(&cur_trans->writer_wait, &wait); + spin_lock(&root->fs_info->trans_lock); + root->fs_info->trans_no_join = 1; + spin_unlock(&root->fs_info->trans_lock); } while (atomic_read(&cur_trans->num_writers) > 1 || (should_grow && cur_trans->num_joined != joined)); @@ -1364,9 +1378,6 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, btrfs_prepare_extent_commit(trans, root); cur_trans = root->fs_info->running_transaction; - spin_lock(&root->fs_info->new_trans_lock); - root->fs_info->running_transaction = NULL; - spin_unlock(&root->fs_info->new_trans_lock); btrfs_set_root_node(&root->fs_info->tree_root->root_item, root->fs_info->tree_root->node); @@ -1387,10 +1398,13 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, sizeof(root->fs_info->super_copy)); trans->transaction->blocked = 0; + spin_lock(&root->fs_info->trans_lock); + root->fs_info->running_transaction = NULL; + root->fs_info->trans_no_join = 0; + spin_unlock(&root->fs_info->trans_lock); wake_up(&root->fs_info->transaction_wait); - mutex_unlock(&root->fs_info->trans_mutex); ret = btrfs_write_and_wait_transaction(trans, root); BUG_ON(ret); write_ctree_super(trans, root, 0); @@ -1403,22 +1417,21 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, btrfs_finish_extent_commit(trans, root); - mutex_lock(&root->fs_info->trans_mutex); - cur_trans->commit_done = 1; root->fs_info->last_trans_committed = cur_trans->transid; wake_up(&cur_trans->commit_wait); + spin_lock(&root->fs_info->trans_lock); list_del_init(&cur_trans->list); + spin_unlock(&root->fs_info->trans_lock); + put_transaction(cur_trans); put_transaction(cur_trans); trace_btrfs_transaction_commit(root); - mutex_unlock(&root->fs_info->trans_mutex); - if (current->journal_info == trans) current->journal_info = NULL; @@ -1438,9 +1451,9 @@ int btrfs_clean_old_snapshots(struct btrfs_root *root) LIST_HEAD(list); struct btrfs_fs_info *fs_info = root->fs_info; - mutex_lock(&fs_info->trans_mutex); + spin_lock(&fs_info->trans_lock); list_splice_init(&fs_info->dead_roots, &list); - mutex_unlock(&fs_info->trans_mutex); + spin_unlock(&fs_info->trans_lock); while (!list_empty(&list)) { root = list_entry(list.next, struct btrfs_root, root_list); diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h index 154314f80f8d..11c6efcd4ed2 100644 --- a/fs/btrfs/transaction.h +++ b/fs/btrfs/transaction.h @@ -28,10 +28,12 @@ struct btrfs_transaction { * transaction can end */ atomic_t num_writers; + atomic_t use_count; unsigned long num_joined; + + spinlock_t commit_lock; int in_commit; - atomic_t use_count; int commit_done; int blocked; struct list_head list; -- cgit v1.2.3 From fcb80c2affd63237cff5b34cba5756be7c976a5a Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Tue, 3 May 2011 10:40:22 -0400 Subject: Btrfs: fix how we do space reservation for truncate The ceph guys keep running into problems where we have space reserved in our orphan block rsv when freeing it up. This is because they tend to do snapshots alot, so their truncates tend to use a bunch of space, so when we go to do things like update the inode we have to steal reservation space in order to make the reservation happen. This happens because truncate can use as much space as it freaking feels like, but we still have to hold space for removing the orphan item and updating the inode, which will definitely always happen. So in order to fix this we need to split all of the reservation stuf up. So with this patch we have 1) The orphan block reserve which only holds the space for deleting our orphan item when everything is over. 2) The truncate block reserve which gets allocated and used specifically for the space that the truncate will use on a per truncate basis. 3) The transaction will always have 1 item's worth of data reserved so we can update the inode normally. Hopefully this will make the ceph problem go away. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/ctree.h | 3 ++ fs/btrfs/extent-tree.c | 46 +++++++++++++++----- fs/btrfs/inode.c | 111 +++++++++++++++++++++++++++++++++++++------------ 3 files changed, 123 insertions(+), 37 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 522a39b0033d..f31aed7fedd9 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2224,6 +2224,9 @@ int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv, void btrfs_block_rsv_release(struct btrfs_root *root, struct btrfs_block_rsv *block_rsv, u64 num_bytes); +int btrfs_truncate_reserve_metadata(struct btrfs_trans_handle *trans, + struct btrfs_root *root, + struct btrfs_block_rsv *rsv); int btrfs_set_block_group_ro(struct btrfs_root *root, struct btrfs_block_group_cache *cache); int btrfs_set_block_group_rw(struct btrfs_root *root, diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index ca599654ce19..a2ca561c70f0 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -3980,6 +3980,37 @@ static u64 calc_trans_metadata_size(struct btrfs_root *root, int num_items) 3 * num_items; } +int btrfs_truncate_reserve_metadata(struct btrfs_trans_handle *trans, + struct btrfs_root *root, + struct btrfs_block_rsv *rsv) +{ + struct btrfs_block_rsv *trans_rsv = &root->fs_info->trans_block_rsv; + u64 num_bytes; + int ret; + + /* + * Truncate should be freeing data, but give us 2 items just in case it + * needs to use some space. We may want to be smarter about this in the + * future. + */ + num_bytes = calc_trans_metadata_size(root, 2); + + /* We already have enough bytes, just return */ + if (rsv->reserved >= num_bytes) + return 0; + + num_bytes -= rsv->reserved; + + /* + * You should have reserved enough space before hand to do this, so this + * should not fail. + */ + ret = block_rsv_migrate_bytes(trans_rsv, rsv, num_bytes); + BUG_ON(ret); + + return 0; +} + int btrfs_trans_reserve_metadata(struct btrfs_trans_handle *trans, struct btrfs_root *root, int num_items) @@ -4020,23 +4051,18 @@ int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans, struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv; /* - * one for deleting orphan item, one for updating inode and - * two for calling btrfs_truncate_inode_items. - * - * btrfs_truncate_inode_items is a delete operation, it frees - * more space than it uses in most cases. So two units of - * metadata space should be enough for calling it many times. - * If all of the metadata space is used, we can commit - * transaction and use space it freed. + * We need to hold space in order to delete our orphan item once we've + * added it, so this takes the reservation so we can release it later + * when we are truly done with the orphan item. */ - u64 num_bytes = calc_trans_metadata_size(root, 4); + u64 num_bytes = calc_trans_metadata_size(root, 1); return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes); } void btrfs_orphan_release_metadata(struct inode *inode) { struct btrfs_root *root = BTRFS_I(inode)->root; - u64 num_bytes = calc_trans_metadata_size(root, 4); + u64 num_bytes = calc_trans_metadata_size(root, 1); btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes); } diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index e47bdf0fb75a..bc12ba23db5f 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -6591,6 +6591,7 @@ out: static int btrfs_truncate(struct inode *inode) { struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_block_rsv *rsv; int ret; int err = 0; struct btrfs_trans_handle *trans; @@ -6604,28 +6605,83 @@ static int btrfs_truncate(struct inode *inode) btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1); btrfs_ordered_update_i_size(inode, inode->i_size, NULL); - trans = btrfs_start_transaction(root, 5); - if (IS_ERR(trans)) - return PTR_ERR(trans); + /* + * Yes ladies and gentelment, this is indeed ugly. The fact is we have + * 3 things going on here + * + * 1) We need to reserve space for our orphan item and the space to + * delete our orphan item. Lord knows we don't want to have a dangling + * orphan item because we didn't reserve space to remove it. + * + * 2) We need to reserve space to update our inode. + * + * 3) We need to have something to cache all the space that is going to + * be free'd up by the truncate operation, but also have some slack + * space reserved in case it uses space during the truncate (thank you + * very much snapshotting). + * + * And we need these to all be seperate. The fact is we can use alot of + * space doing the truncate, and we have no earthly idea how much space + * we will use, so we need the truncate reservation to be seperate so it + * doesn't end up using space reserved for updating the inode or + * removing the orphan item. We also need to be able to stop the + * transaction and start a new one, which means we need to be able to + * update the inode several times, and we have no idea of knowing how + * many times that will be, so we can't just reserve 1 item for the + * entirety of the opration, so that has to be done seperately as well. + * Then there is the orphan item, which does indeed need to be held on + * to for the whole operation, and we need nobody to touch this reserved + * space except the orphan code. + * + * So that leaves us with + * + * 1) root->orphan_block_rsv - for the orphan deletion. + * 2) rsv - for the truncate reservation, which we will steal from the + * transaction reservation. + * 3) fs_info->trans_block_rsv - this will have 1 items worth left for + * updating the inode. + */ + rsv = btrfs_alloc_block_rsv(root); + if (!rsv) + return -ENOMEM; + btrfs_add_durable_block_rsv(root->fs_info, rsv); + + trans = btrfs_start_transaction(root, 4); + if (IS_ERR(trans)) { + err = PTR_ERR(trans); + goto out; + } btrfs_set_trans_block_group(trans, inode); + /* + * Reserve space for the truncate process. Truncate should be adding + * space, but if there are snapshots it may end up using space. + */ + ret = btrfs_truncate_reserve_metadata(trans, root, rsv); + BUG_ON(ret); + ret = btrfs_orphan_add(trans, inode); if (ret) { btrfs_end_transaction(trans, root); - return ret; + goto out; } nr = trans->blocks_used; btrfs_end_transaction(trans, root); btrfs_btree_balance_dirty(root, nr); - /* Now start a transaction for the truncate */ - trans = btrfs_start_transaction(root, 0); - if (IS_ERR(trans)) - return PTR_ERR(trans); + /* + * Ok so we've already migrated our bytes over for the truncate, so here + * just reserve the one slot we need for updating the inode. + */ + trans = btrfs_start_transaction(root, 1); + if (IS_ERR(trans)) { + err = PTR_ERR(trans); + goto out; + } btrfs_set_trans_block_group(trans, inode); - trans->block_rsv = root->orphan_block_rsv; + trans->block_rsv = rsv; /* * setattr is responsible for setting the ordered_data_close flag, @@ -6649,24 +6705,18 @@ static int btrfs_truncate(struct inode *inode) while (1) { if (!trans) { - trans = btrfs_start_transaction(root, 0); - if (IS_ERR(trans)) - return PTR_ERR(trans); - btrfs_set_trans_block_group(trans, inode); - trans->block_rsv = root->orphan_block_rsv; - } + trans = btrfs_start_transaction(root, 3); + if (IS_ERR(trans)) { + err = PTR_ERR(trans); + goto out; + } - ret = btrfs_block_rsv_check(trans, root, - root->orphan_block_rsv, 0, 5); - if (ret == -EAGAIN) { - ret = btrfs_commit_transaction(trans, root); - if (ret) - return ret; - trans = NULL; - continue; - } else if (ret) { - err = ret; - break; + ret = btrfs_truncate_reserve_metadata(trans, root, + rsv); + BUG_ON(ret); + + btrfs_set_trans_block_group(trans, inode); + trans->block_rsv = rsv; } ret = btrfs_truncate_inode_items(trans, root, inode, @@ -6677,6 +6727,7 @@ static int btrfs_truncate(struct inode *inode) break; } + trans->block_rsv = &root->fs_info->trans_block_rsv; ret = btrfs_update_inode(trans, root, inode); if (ret) { err = ret; @@ -6690,6 +6741,7 @@ static int btrfs_truncate(struct inode *inode) } if (ret == 0 && inode->i_nlink > 0) { + trans->block_rsv = root->orphan_block_rsv; ret = btrfs_orphan_del(trans, inode); if (ret) err = ret; @@ -6701,15 +6753,20 @@ static int btrfs_truncate(struct inode *inode) ret = btrfs_orphan_del(NULL, inode); } + trans->block_rsv = &root->fs_info->trans_block_rsv; ret = btrfs_update_inode(trans, root, inode); if (ret && !err) err = ret; nr = trans->blocks_used; ret = btrfs_end_transaction_throttle(trans, root); + btrfs_btree_balance_dirty(root, nr); + +out: + btrfs_free_block_rsv(root, rsv); + if (ret && !err) err = ret; - btrfs_btree_balance_dirty(root, nr); return err; } -- cgit v1.2.3 From af60bed24eb0e3b6d93eaa6bb395a5721e6c09a8 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Wed, 4 May 2011 11:11:17 -0400 Subject: Btrfs: set range_start to the right start in count_range_bits In count_range_bits we are adjusting total_bytes based on the range we are searching for, but we don't adjust the range start according to the range we are searching for, which makes for weird results. For example, if the range [0-8192] is set DELALLOC, but I search for 4096-8192, I will get back 4096 for the number of bytes found, but the range_start will be 0, which makes it look like the range is [0-4096]. So instead set range_start = max(cur_start, state->start). This makes everything come out right. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/extent_io.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index ba41da59e31b..b5f6f227a97c 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -1480,7 +1480,7 @@ u64 count_range_bits(struct extent_io_tree *tree, if (total_bytes >= max_bytes) break; if (!found) { - *start = state->start; + *start = max(cur_start, state->start); found = 1; } last = state->end; -- cgit v1.2.3 From cb25c2ea6a79702ab7895b873c6c43e0d3bc3c72 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Wed, 11 May 2011 12:17:34 -0400 Subject: Btrfs: map the node block when looking for readahead targets If we have particularly full nodes, we could call btrfs_node_blockptr up to 32 times, which is 32 pairs of kmap/kunmap, which _sucks_. So go ahead and map the extent buffer while we look for readahead targets. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/ctree.c | 23 +++++++++++++++++++++-- 1 file changed, 21 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 84d7ca1fe0ba..009bcf7f1e4b 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -1229,6 +1229,7 @@ static void reada_for_search(struct btrfs_root *root, u64 search; u64 target; u64 nread = 0; + u64 gen; int direction = path->reada; struct extent_buffer *eb; u32 nr; @@ -1256,6 +1257,15 @@ static void reada_for_search(struct btrfs_root *root, nritems = btrfs_header_nritems(node); nr = slot; while (1) { + if (!node->map_token) { + unsigned long offset = btrfs_node_key_ptr_offset(nr); + map_private_extent_buffer(node, offset, + sizeof(struct btrfs_key_ptr), + &node->map_token, + &node->kaddr, + &node->map_start, + &node->map_len, KM_USER1); + } if (direction < 0) { if (nr == 0) break; @@ -1273,14 +1283,23 @@ static void reada_for_search(struct btrfs_root *root, search = btrfs_node_blockptr(node, nr); if ((search <= target && target - search <= 65536) || (search > target && search - target <= 65536)) { - readahead_tree_block(root, search, blocksize, - btrfs_node_ptr_generation(node, nr)); + gen = btrfs_node_ptr_generation(node, nr); + if (node->map_token) { + unmap_extent_buffer(node, node->map_token, + KM_USER1); + node->map_token = NULL; + } + readahead_tree_block(root, search, blocksize, gen); nread += blocksize; } nscan++; if ((nread > 65536 || nscan > 32)) break; } + if (node->map_token) { + unmap_extent_buffer(node, node->map_token, KM_USER1); + node->map_token = NULL; + } } /* -- cgit v1.2.3 From 7e2355ba1a11649f0b212a29fdb9f47476f1248e Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Wed, 11 May 2011 12:25:37 -0400 Subject: Btrfs: don't look at the extent buffer level 3 times in a row We have a bit of debugging in btrfs_search_slot to make sure the level of the cow block is the same as the original block we were cow'ing. I don't think I've ever seen this tripped, so kill it. This saves us 2 kmap's per level in our search. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/ctree.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 009bcf7f1e4b..f7a0a64b868f 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -1672,9 +1672,6 @@ again: } cow_done: BUG_ON(!cow && ins_len); - if (level != btrfs_header_level(b)) - WARN_ON(1); - level = btrfs_header_level(b); p->nodes[level] = b; if (!p->skip_locking) -- cgit v1.2.3 From d82a6f1d7e8b61ed5996334d0db66651bb43641d Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Wed, 11 May 2011 15:26:06 -0400 Subject: Btrfs: kill BTRFS_I(inode)->block_group Originally this was going to be used as a way to give hints to the allocator, but frankly we can get much better hints elsewhere and it's not even used at all for anything usefull. In addition to be completely useless, when we initialize an inode we try and find a freeish block group to set as the inodes block group, and with a completely full 40gb fs this takes _forever_, so I imagine with say 1tb fs this is just unbearable. So just axe the thing altoghether, we don't need it and it saves us 8 bytes in the inode and saves us 500 microseconds per inode lookup in my testcase. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/btrfs_inode.h | 3 -- fs/btrfs/ctree.h | 3 +- fs/btrfs/extent-tree.c | 10 ++---- fs/btrfs/inode.c | 87 ++++++-------------------------------------------- fs/btrfs/ioctl.c | 3 +- fs/btrfs/transaction.c | 1 - fs/btrfs/transaction.h | 14 -------- fs/btrfs/xattr.c | 2 -- 8 files changed, 13 insertions(+), 110 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h index 57c3bb2884ce..4bc852d3b83d 100644 --- a/fs/btrfs/btrfs_inode.h +++ b/fs/btrfs/btrfs_inode.h @@ -120,9 +120,6 @@ struct btrfs_inode { */ u64 index_cnt; - /* the start of block group preferred for allocations. */ - u64 block_group; - /* the fsync log has some corner cases that mean we have to check * directories to see if any unlinks have been done before * the directory was logged. See tree-log.c for all the diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index f31aed7fedd9..0f8c489bcc02 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2512,8 +2512,7 @@ int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end, int btrfs_writepages(struct address_space *mapping, struct writeback_control *wbc); int btrfs_create_subvol_root(struct btrfs_trans_handle *trans, - struct btrfs_root *new_root, - u64 new_dirid, u64 alloc_hint); + struct btrfs_root *new_root, u64 new_dirid); int btrfs_merge_bio_hook(struct page *page, unsigned long offset, size_t size, struct bio *bio, unsigned long bio_flags); diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index a2ca561c70f0..9f0a4e3bd8a9 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -5319,6 +5319,7 @@ checks: btrfs_add_free_space(block_group, offset, search_start - offset); BUG_ON(offset > search_start); + btrfs_put_block_group(block_group); break; loop: failed_cluster_refill = false; @@ -5411,14 +5412,7 @@ loop: ret = -ENOSPC; } else if (!ins->objectid) { ret = -ENOSPC; - } - - /* we found what we needed */ - if (ins->objectid) { - if (!(data & BTRFS_BLOCK_GROUP_DATA)) - trans->block_group = block_group->key.objectid; - - btrfs_put_block_group(block_group); + } else if (ins->objectid) { ret = 0; } diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index bc12ba23db5f..dd5938a7de21 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -136,7 +136,6 @@ static noinline int insert_inline_extent(struct btrfs_trans_handle *trans, return -ENOMEM; path->leave_spinning = 1; - btrfs_set_trans_block_group(trans, inode); key.objectid = inode->i_ino; key.offset = start; @@ -422,7 +421,6 @@ again: if (start == 0) { trans = btrfs_join_transaction(root); BUG_ON(IS_ERR(trans)); - btrfs_set_trans_block_group(trans, inode); trans->block_rsv = &root->fs_info->delalloc_block_rsv; /* lets try to make an inline extent */ @@ -781,7 +779,6 @@ static noinline int cow_file_range(struct inode *inode, BUG_ON(root == root->fs_info->tree_root); trans = btrfs_join_transaction(root); BUG_ON(IS_ERR(trans)); - btrfs_set_trans_block_group(trans, inode); trans->block_rsv = &root->fs_info->delalloc_block_rsv; num_bytes = (end - start + blocksize) & ~(blocksize - 1); @@ -1502,8 +1499,6 @@ static noinline int add_pending_csums(struct btrfs_trans_handle *trans, { struct btrfs_ordered_sum *sum; - btrfs_set_trans_block_group(trans, inode); - list_for_each_entry(sum, list, list) { btrfs_csum_file_blocks(trans, BTRFS_I(inode)->root->fs_info->csum_root, sum); @@ -1722,7 +1717,6 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) else trans = btrfs_join_transaction(root); BUG_ON(IS_ERR(trans)); - btrfs_set_trans_block_group(trans, inode); trans->block_rsv = &root->fs_info->delalloc_block_rsv; ret = btrfs_update_inode(trans, root, inode); BUG_ON(ret); @@ -1739,7 +1733,6 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) else trans = btrfs_join_transaction(root); BUG_ON(IS_ERR(trans)); - btrfs_set_trans_block_group(trans, inode); trans->block_rsv = &root->fs_info->delalloc_block_rsv; if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags)) @@ -2495,7 +2488,6 @@ static void btrfs_read_locked_inode(struct inode *inode) struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_key location; int maybe_acls; - u64 alloc_group_block; u32 rdev; int ret; @@ -2539,8 +2531,6 @@ static void btrfs_read_locked_inode(struct inode *inode) BTRFS_I(inode)->index_cnt = (u64)-1; BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item); - alloc_group_block = btrfs_inode_block_group(leaf, inode_item); - /* * try to precache a NULL acl entry for files that don't have * any xattrs or acls @@ -2549,8 +2539,6 @@ static void btrfs_read_locked_inode(struct inode *inode) if (!maybe_acls) cache_no_acl(inode); - BTRFS_I(inode)->block_group = btrfs_find_block_group(root, 0, - alloc_group_block, 0); btrfs_free_path(path); inode_item = NULL; @@ -2630,7 +2618,7 @@ static void fill_inode_item(struct btrfs_trans_handle *trans, btrfs_set_inode_transid(leaf, item, trans->transid); btrfs_set_inode_rdev(leaf, item, inode->i_rdev); btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags); - btrfs_set_inode_block_group(leaf, item, BTRFS_I(inode)->block_group); + btrfs_set_inode_block_group(leaf, item, 0); if (leaf->map_token) { unmap_extent_buffer(leaf, leaf->map_token, KM_USER1); @@ -2971,8 +2959,6 @@ static int btrfs_unlink(struct inode *dir, struct dentry *dentry) if (IS_ERR(trans)) return PTR_ERR(trans); - btrfs_set_trans_block_group(trans, dir); - btrfs_record_unlink_dir(trans, dir, dentry->d_inode, 0); ret = btrfs_unlink_inode(trans, root, dir, dentry->d_inode, @@ -3068,8 +3054,6 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry) if (IS_ERR(trans)) return PTR_ERR(trans); - btrfs_set_trans_block_group(trans, dir); - if (unlikely(inode->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) { err = btrfs_unlink_subvol(trans, root, dir, BTRFS_I(inode)->location.objectid, @@ -3649,7 +3633,6 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size) err = PTR_ERR(trans); break; } - btrfs_set_trans_block_group(trans, inode); err = btrfs_drop_extents(trans, inode, cur_offset, cur_offset + hole_size, @@ -3785,7 +3768,6 @@ void btrfs_evict_inode(struct inode *inode) while (1) { trans = btrfs_start_transaction(root, 0); BUG_ON(IS_ERR(trans)); - btrfs_set_trans_block_group(trans, inode); trans->block_rsv = root->orphan_block_rsv; ret = btrfs_block_rsv_check(trans, root, @@ -4383,7 +4365,6 @@ int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc) trans = btrfs_join_transaction(root); if (IS_ERR(trans)) return PTR_ERR(trans); - btrfs_set_trans_block_group(trans, inode); if (nolock) ret = btrfs_end_transaction_nolock(trans, root); else @@ -4409,7 +4390,6 @@ void btrfs_dirty_inode(struct inode *inode) trans = btrfs_join_transaction(root); BUG_ON(IS_ERR(trans)); - btrfs_set_trans_block_group(trans, inode); ret = btrfs_update_inode(trans, root, inode); if (ret && ret == -ENOSPC) { @@ -4424,7 +4404,6 @@ void btrfs_dirty_inode(struct inode *inode) } return; } - btrfs_set_trans_block_group(trans, inode); ret = btrfs_update_inode(trans, root, inode); if (ret) { @@ -4519,8 +4498,8 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct inode *dir, const char *name, int name_len, - u64 ref_objectid, u64 objectid, - u64 alloc_hint, int mode, u64 *index) + u64 ref_objectid, u64 objectid, int mode, + u64 *index) { struct inode *inode; struct btrfs_inode_item *inode_item; @@ -4567,8 +4546,6 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans, owner = 0; else owner = 1; - BTRFS_I(inode)->block_group = - btrfs_find_block_group(root, 0, alloc_hint, owner); key[0].objectid = objectid; btrfs_set_key_type(&key[0], BTRFS_INODE_ITEM_KEY); @@ -4729,11 +4706,9 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry, if (IS_ERR(trans)) return PTR_ERR(trans); - btrfs_set_trans_block_group(trans, dir); - inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, dentry->d_name.len, dir->i_ino, objectid, - BTRFS_I(dir)->block_group, mode, &index); + mode, &index); if (IS_ERR(inode)) { err = PTR_ERR(inode); goto out_unlock; @@ -4745,7 +4720,6 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry, goto out_unlock; } - btrfs_set_trans_block_group(trans, inode); err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index); if (err) drop_inode = 1; @@ -4754,8 +4728,6 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry, init_special_inode(inode, inode->i_mode, rdev); btrfs_update_inode(trans, root, inode); } - btrfs_update_inode_block_group(trans, inode); - btrfs_update_inode_block_group(trans, dir); out_unlock: nr = trans->blocks_used; btrfs_end_transaction_throttle(trans, root); @@ -4791,11 +4763,9 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry, if (IS_ERR(trans)) return PTR_ERR(trans); - btrfs_set_trans_block_group(trans, dir); - inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, dentry->d_name.len, dir->i_ino, objectid, - BTRFS_I(dir)->block_group, mode, &index); + mode, &index); if (IS_ERR(inode)) { err = PTR_ERR(inode); goto out_unlock; @@ -4807,7 +4777,6 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry, goto out_unlock; } - btrfs_set_trans_block_group(trans, inode); err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index); if (err) drop_inode = 1; @@ -4818,8 +4787,6 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry, inode->i_op = &btrfs_file_inode_operations; BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; } - btrfs_update_inode_block_group(trans, inode); - btrfs_update_inode_block_group(trans, dir); out_unlock: nr = trans->blocks_used; btrfs_end_transaction_throttle(trans, root); @@ -4866,8 +4833,6 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir, btrfs_inc_nlink(inode); inode->i_ctime = CURRENT_TIME; - - btrfs_set_trans_block_group(trans, dir); ihold(inode); err = btrfs_add_nondir(trans, dir, dentry, inode, 1, index); @@ -4876,7 +4841,6 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir, drop_inode = 1; } else { struct dentry *parent = dget_parent(dentry); - btrfs_update_inode_block_group(trans, dir); err = btrfs_update_inode(trans, root, inode); BUG_ON(err); btrfs_log_new_name(trans, inode, NULL, parent); @@ -4917,12 +4881,10 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) trans = btrfs_start_transaction(root, 5); if (IS_ERR(trans)) return PTR_ERR(trans); - btrfs_set_trans_block_group(trans, dir); inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, dentry->d_name.len, dir->i_ino, objectid, - BTRFS_I(dir)->block_group, S_IFDIR | mode, - &index); + S_IFDIR | mode, &index); if (IS_ERR(inode)) { err = PTR_ERR(inode); goto out_fail; @@ -4936,7 +4898,6 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) inode->i_op = &btrfs_dir_inode_operations; inode->i_fop = &btrfs_dir_file_operations; - btrfs_set_trans_block_group(trans, inode); btrfs_i_size_write(inode, 0); err = btrfs_update_inode(trans, root, inode); @@ -4950,8 +4911,6 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) d_instantiate(dentry, inode); drop_on_err = 0; - btrfs_update_inode_block_group(trans, inode); - btrfs_update_inode_block_group(trans, dir); out_fail: nr = trans->blocks_used; @@ -6652,8 +6611,6 @@ static int btrfs_truncate(struct inode *inode) goto out; } - btrfs_set_trans_block_group(trans, inode); - /* * Reserve space for the truncate process. Truncate should be adding * space, but if there are snapshots it may end up using space. @@ -6680,7 +6637,6 @@ static int btrfs_truncate(struct inode *inode) err = PTR_ERR(trans); goto out; } - btrfs_set_trans_block_group(trans, inode); trans->block_rsv = rsv; /* @@ -6715,7 +6671,6 @@ static int btrfs_truncate(struct inode *inode) rsv); BUG_ON(ret); - btrfs_set_trans_block_group(trans, inode); trans->block_rsv = rsv; } @@ -6775,15 +6730,14 @@ out: * create a new subvolume directory/inode (helper for the ioctl). */ int btrfs_create_subvol_root(struct btrfs_trans_handle *trans, - struct btrfs_root *new_root, - u64 new_dirid, u64 alloc_hint) + struct btrfs_root *new_root, u64 new_dirid) { struct inode *inode; int err; u64 index = 0; inode = btrfs_new_inode(trans, new_root, NULL, "..", 2, new_dirid, - new_dirid, alloc_hint, S_IFDIR | 0700, &index); + new_dirid, S_IFDIR | 0700, &index); if (IS_ERR(inode)) return PTR_ERR(inode); inode->i_op = &btrfs_dir_inode_operations; @@ -6893,21 +6847,6 @@ void btrfs_destroy_inode(struct inode *inode) spin_unlock(&root->fs_info->ordered_extent_lock); } - if (root == root->fs_info->tree_root) { - struct btrfs_block_group_cache *block_group; - - block_group = btrfs_lookup_block_group(root->fs_info, - BTRFS_I(inode)->block_group); - if (block_group && block_group->inode == inode) { - spin_lock(&block_group->lock); - block_group->inode = NULL; - spin_unlock(&block_group->lock); - btrfs_put_block_group(block_group); - } else if (block_group) { - btrfs_put_block_group(block_group); - } - } - spin_lock(&root->orphan_lock); if (!list_empty(&BTRFS_I(inode)->i_orphan)) { printk(KERN_INFO "BTRFS: inode %lu still on the orphan list\n", @@ -7091,8 +7030,6 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, goto out_notrans; } - btrfs_set_trans_block_group(trans, new_dir); - if (dest != root) btrfs_record_root_in_trans(trans, dest); @@ -7331,12 +7268,9 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry, if (IS_ERR(trans)) return PTR_ERR(trans); - btrfs_set_trans_block_group(trans, dir); - inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, dentry->d_name.len, dir->i_ino, objectid, - BTRFS_I(dir)->block_group, S_IFLNK|S_IRWXUGO, - &index); + S_IFLNK|S_IRWXUGO, &index); if (IS_ERR(inode)) { err = PTR_ERR(inode); goto out_unlock; @@ -7348,7 +7282,6 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry, goto out_unlock; } - btrfs_set_trans_block_group(trans, inode); err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index); if (err) drop_inode = 1; @@ -7359,8 +7292,6 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry, inode->i_op = &btrfs_file_inode_operations; BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; } - btrfs_update_inode_block_group(trans, inode); - btrfs_update_inode_block_group(trans, dir); if (drop_inode) goto out_unlock; diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index a578620e06a8..8e90ccf4b76a 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -413,8 +413,7 @@ static noinline int create_subvol(struct btrfs_root *root, btrfs_record_root_in_trans(trans, new_root); - ret = btrfs_create_subvol_root(trans, new_root, new_dirid, - BTRFS_I(dir)->block_group); + ret = btrfs_create_subvol_root(trans, new_root, new_dirid); /* * insert the directory item */ diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 43816f8b23e7..f4ea695325b2 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -241,7 +241,6 @@ again: h->transid = cur_trans->transid; h->transaction = cur_trans; h->blocks_used = 0; - h->block_group = 0; h->bytes_reserved = 0; h->delayed_ref_updates = 0; h->use_count = 1; diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h index 11c6efcd4ed2..da7289e06a82 100644 --- a/fs/btrfs/transaction.h +++ b/fs/btrfs/transaction.h @@ -47,7 +47,6 @@ struct btrfs_transaction { struct btrfs_trans_handle { u64 transid; - u64 block_group; u64 bytes_reserved; unsigned long use_count; unsigned long blocks_reserved; @@ -70,19 +69,6 @@ struct btrfs_pending_snapshot { struct list_head list; }; -static inline void btrfs_set_trans_block_group(struct btrfs_trans_handle *trans, - struct inode *inode) -{ - trans->block_group = BTRFS_I(inode)->block_group; -} - -static inline void btrfs_update_inode_block_group( - struct btrfs_trans_handle *trans, - struct inode *inode) -{ - BTRFS_I(inode)->block_group = trans->block_group; -} - static inline void btrfs_set_inode_last_trans(struct btrfs_trans_handle *trans, struct inode *inode) { diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c index cfd660550ded..72ab0295ca74 100644 --- a/fs/btrfs/xattr.c +++ b/fs/btrfs/xattr.c @@ -158,8 +158,6 @@ int __btrfs_setxattr(struct btrfs_trans_handle *trans, if (IS_ERR(trans)) return PTR_ERR(trans); - btrfs_set_trans_block_group(trans, inode); - ret = do_setxattr(trans, inode, name, value, size, flags); if (ret) goto out; -- cgit v1.2.3 From 589d8ade83f07c0f11c8191c0ca309f34d7a2c14 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Wed, 11 May 2011 17:30:53 -0400 Subject: Btrfs: try not to sleep as much when doing slow caching When the fs is super full and we unmount the fs, we could get stuck in this thing where unmount is waiting for the caching kthread to make progress and the caching kthread keeps scheduling because we're in the middle of a commit. So instead just let the caching kthread keep going and only yeild if need_resched(). This makes my horrible umount case go from taking up to 10 minutes to taking less than 20 seconds. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/extent-tree.c | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 9f0a4e3bd8a9..96be62450318 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -378,15 +378,18 @@ again: if (ret) break; - caching_ctl->progress = last; - btrfs_release_path(extent_root, path); - up_read(&fs_info->extent_commit_sem); - mutex_unlock(&caching_ctl->mutex); - if (btrfs_transaction_in_commit(fs_info)) - schedule_timeout(1); - else + if (need_resched() || + btrfs_next_leaf(extent_root, path)) { + caching_ctl->progress = last; + btrfs_release_path(extent_root, path); + up_read(&fs_info->extent_commit_sem); + mutex_unlock(&caching_ctl->mutex); cond_resched(); - goto again; + goto again; + } + leaf = path->nodes[0]; + nritems = btrfs_header_nritems(leaf); + continue; } if (key.objectid < block_group->key.objectid) { -- cgit v1.2.3 From 026fd317828500524cdc7e5ff9e8e7923abb2868 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Fri, 13 May 2011 10:32:11 -0400 Subject: Btrfs: don't always do readahead Our readahead is sort of sloppy, and really isn't always needed. For example if ls is doing a stating ls (which is the default) it's going to stat in non-disk order, so if say you have a directory with a stupid amount of files, readahead is going to do nothing but waste time in the case of doing the stat. Taking the unconditional readahead out made my test go from 57 minutes to 36 minutes. This means that everywhere we do loop through the tree we want to make sure we do set path->reada properly, so I went through and found all of the places where we loop through the path and set reada to 1. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/ctree.c | 2 -- fs/btrfs/extent-tree.c | 3 ++- fs/btrfs/inode.c | 14 ++++++++++++-- fs/btrfs/relocation.c | 6 ++++++ 4 files changed, 20 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index f7a0a64b868f..f61c16c1481a 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -48,8 +48,6 @@ struct btrfs_path *btrfs_alloc_path(void) { struct btrfs_path *path; path = kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS); - if (path) - path->reada = 1; return path; } diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 96be62450318..1ba2cc58eab5 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -347,7 +347,7 @@ static int caching_kthread(void *data) */ path->skip_locking = 1; path->search_commit_root = 1; - path->reada = 2; + path->reada = 1; key.objectid = last; key.offset = 0; @@ -8556,6 +8556,7 @@ int btrfs_read_block_groups(struct btrfs_root *root) path = btrfs_alloc_path(); if (!path) return -ENOMEM; + path->reada = 1; cache_gen = btrfs_super_cache_generation(&root->fs_info->super_copy); if (cache_gen != 0 && diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index dd5938a7de21..6228a304b547 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -4242,7 +4242,9 @@ static int btrfs_real_readdir(struct file *filp, void *dirent, filp->f_pos = 2; } path = btrfs_alloc_path(); - path->reada = 2; + if (!path) + return -ENOMEM; + path->reada = 1; btrfs_set_key_type(&key, key_type); key.offset = filp->f_pos; @@ -5043,7 +5045,15 @@ again: if (!path) { path = btrfs_alloc_path(); - BUG_ON(!path); + if (!path) { + err = -ENOMEM; + goto out; + } + /* + * Chances are we'll be called again, so go ahead and do + * readahead + */ + path->reada = 1; } ret = btrfs_lookup_file_extent(trans, root, path, diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 09c30d37d43e..5872b41581f4 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -676,6 +676,8 @@ struct backref_node *build_backref_tree(struct reloc_control *rc, err = -ENOMEM; goto out; } + path1->reada = 1; + path2->reada = 2; node = alloc_backref_node(cache); if (!node) { @@ -1996,6 +1998,7 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc, path = btrfs_alloc_path(); if (!path) return -ENOMEM; + path->reada = 1; reloc_root = root->reloc_root; root_item = &reloc_root->root_item; @@ -3297,6 +3300,7 @@ static int find_data_references(struct reloc_control *rc, path = btrfs_alloc_path(); if (!path) return -ENOMEM; + path->reada = 1; root = read_fs_root(rc->extent_root->fs_info, ref_root); if (IS_ERR(root)) { @@ -3665,6 +3669,7 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc) path = btrfs_alloc_path(); if (!path) return -ENOMEM; + path->reada = 1; ret = prepare_to_relocate(rc); if (ret) { @@ -4090,6 +4095,7 @@ int btrfs_recover_relocation(struct btrfs_root *root) path = btrfs_alloc_path(); if (!path) return -ENOMEM; + path->reada = -1; key.objectid = BTRFS_TREE_RELOC_OBJECTID; key.type = BTRFS_ROOT_ITEM_KEY; -- cgit v1.2.3 From cca1c81f43e26ab60c0d1090fb90992358d69bdf Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Fri, 13 May 2011 11:07:12 -0400 Subject: Btrfs: don't try to allocate from a block group that doesn't have enough space If we have a very large filesystem, we can spend a lot of time in find_free_extent just trying to allocate from empty block groups. So instead check to see if the block group even has enough space for the allocation, and if not go on to the next block group. Signed-off-by: Josef Bacik --- fs/btrfs/extent-tree.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'fs') diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 1ba2cc58eab5..c8c318494dee 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -5159,6 +5159,14 @@ have_block_group: if (unlikely(block_group->ro)) goto loop; + spin_lock(&block_group->tree_lock); + if (cached && + block_group->free_space < num_bytes + empty_size) { + spin_unlock(&block_group->tree_lock); + goto loop; + } + spin_unlock(&block_group->tree_lock); + /* * Ok we want to try and use the cluster allocator, so lets look * there, unless we are on LOOP_NO_EMPTY_SIZE, since we will -- cgit v1.2.3 From 207dde8289d9b005b665cb9d8d2bb9464256101d Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Fri, 13 May 2011 14:49:23 -0400 Subject: Btrfs: check for duplicate entries in the free space cache If there are duplicate entries in the free space cache, discard the entire cache and load it the old fashioned way. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/free-space-cache.c | 27 ++++++++++++++++++++++++--- 1 file changed, 24 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 63731a1fb0a1..d634a7e42207 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -420,7 +420,14 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info, spin_lock(&block_group->tree_lock); ret = link_free_space(block_group, e); spin_unlock(&block_group->tree_lock); - BUG_ON(ret); + if (ret) { + printk(KERN_ERR "Duplicate entries in " + "free space cache, dumping\n"); + kunmap(page); + unlock_page(page); + page_cache_release(page); + goto free_cache; + } } else { e->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS); if (!e->bitmap) { @@ -437,6 +444,14 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info, recalculate_thresholds(block_group); spin_unlock(&block_group->tree_lock); list_add_tail(&e->list, &bitmaps); + if (ret) { + printk(KERN_ERR "Duplicate entries in " + "free space cache, dumping\n"); + kunmap(page); + unlock_page(page); + page_cache_release(page); + goto free_cache; + } } num_entries--; @@ -909,10 +924,16 @@ static int tree_insert_offset(struct rb_root *root, u64 offset, * logically. */ if (bitmap) { - WARN_ON(info->bitmap); + if (info->bitmap) { + WARN_ON_ONCE(1); + return -EEXIST; + } p = &(*p)->rb_right; } else { - WARN_ON(!info->bitmap); + if (!info->bitmap) { + WARN_ON_ONCE(1); + return -EEXIST; + } p = &(*p)->rb_left; } } -- cgit v1.2.3 From d90c732122a1f6d0efe388a8a204f67f144b2eb3 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Tue, 17 May 2011 09:50:54 -0400 Subject: Btrfs: leave spinning on lookup and map the leaf On lookup we only want to read the inode item, so leave the path spinning. Also we're just wholesale reading the leaf off, so map the leaf so we don't do a bunch of kmap/kunmaps. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/inode.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'fs') diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 6228a304b547..dc8fb2b3a145 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -2493,6 +2493,7 @@ static void btrfs_read_locked_inode(struct inode *inode) path = btrfs_alloc_path(); BUG_ON(!path); + path->leave_spinning = 1; memcpy(&location, &BTRFS_I(inode)->location, sizeof(location)); ret = btrfs_lookup_inode(NULL, root, path, &location, 0); @@ -2502,6 +2503,12 @@ static void btrfs_read_locked_inode(struct inode *inode) leaf = path->nodes[0]; inode_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_inode_item); + if (!leaf->map_token) + map_private_extent_buffer(leaf, (unsigned long)inode_item, + sizeof(struct btrfs_inode_item), + &leaf->map_token, &leaf->kaddr, + &leaf->map_start, &leaf->map_len, + KM_USER1); inode->i_mode = btrfs_inode_mode(leaf, inode_item); inode->i_nlink = btrfs_inode_nlink(leaf, inode_item); @@ -2539,6 +2546,11 @@ static void btrfs_read_locked_inode(struct inode *inode) if (!maybe_acls) cache_no_acl(inode); + if (leaf->map_token) { + unmap_extent_buffer(leaf, leaf->map_token, KM_USER1); + leaf->map_token = NULL; + } + btrfs_free_path(path); inode_item = NULL; -- cgit v1.2.3 From f90e5b5b136ede1f0fd15999e95f13124d6b0dbd Mon Sep 17 00:00:00 2001 From: Bob Peterson Date: Tue, 24 May 2011 10:44:42 -0400 Subject: GFS2: Processes waiting on inode glock that no processes are holding This patch fixes a race in the GFS2 glock state machine that may result in lockups. The symptom is that all nodes but one will hang, waiting for a particular glock. All the holder records will have the "W" (Waiting) bit set. The other node will typically have the glock stuck in Exclusive mode (EX) with no holder records, but the dinode will be cached. In other words, an entry with "I:" will appear in the glock dump for that glock, but nothing else. The race has to do with the glock "Pending Demote" bit, which can be set, then immediately reset, thus losing the fact that another node needs the glock. The sequence of events is: 1. Something schedules the glock workqueue (e.g. glock request from fs) 2. The glock workqueue gets to the point between the test of the reply pending bit and the spin lock: if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) { finish_xmote(gl, gl->gl_reply); drop_ref = 1; } down_read(&gfs2_umount_flush_sem); <---- i.e. here spin_lock(&gl->gl_spin); 3. In comes (a) the reply to our EX lock request setting GLF_REPLY_PENDING and (b) the demote request which sets GLF_PENDING_DEMOTE 4. The following test is executed: if (test_and_clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && gl->gl_state != LM_ST_UNLOCKED && gl->gl_demote_state != LM_ST_EXCLUSIVE) { This resets the pending demote flag, and gl->gl_demote_state is not equal to exclusive, however because the reply from the dlm arrived after we checked for the GLF_REPLY_PENDING flag, gl->gl_state is still equal to unlocked, so although we reset the GLF_PENDING_DEMOTE flag, we didn't then set the GLF_DEMOTE flag or reinstate the GLF_PENDING_DEMOTE_FLAG. The patch closes the timing window by only transitioning the "Pending demote" bit to the "demote" flag once we know the other conditions (not unlocked and not exclusive) are met. Signed-off-by: Bob Peterson Signed-off-by: Steven Whitehouse --- fs/gfs2/glock.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index a2a6abbccc07..7137750f17f0 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c @@ -663,14 +663,19 @@ static void glock_work_func(struct work_struct *work) drop_ref = 1; } spin_lock(&gl->gl_spin); - if (test_and_clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && + if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && gl->gl_state != LM_ST_UNLOCKED && gl->gl_demote_state != LM_ST_EXCLUSIVE) { unsigned long holdtime, now = jiffies; + holdtime = gl->gl_tchange + gl->gl_ops->go_min_hold_time; if (time_before(now, holdtime)) delay = holdtime - now; - set_bit(delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE, &gl->gl_flags); + + if (!delay) { + clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags); + set_bit(GLF_DEMOTE, &gl->gl_flags); + } } run_queue(gl, 0); spin_unlock(&gl->gl_spin); -- cgit v1.2.3 From 6df87e65dbe4528ef07b917af89913abb8caaaba Mon Sep 17 00:00:00 2001 From: Andreas Gruenbacher Date: Sat, 28 May 2011 14:44:46 +0200 Subject: block: improve the bio_add_page() and bio_add_pc_page() descriptions The descriptions of bio_add_page() and bio_add_pc_page() are slightly inconsistent; improve them. Signed-off-by: Andreas Gruenbacher Cc: Jens Axboe Signed-off-by: Andrew Morton Signed-off-by: Jens Axboe --- fs/bio.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/bio.c b/fs/bio.c index 840a0d755248..9bfade8a609b 100644 --- a/fs/bio.c +++ b/fs/bio.c @@ -638,10 +638,11 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page * @offset: vec entry offset * * Attempt to add a page to the bio_vec maplist. This can fail for a - * number of reasons, such as the bio being full or target block - * device limitations. The target block device must allow bio's - * smaller than PAGE_SIZE, so it is always possible to add a single - * page to an empty bio. This should only be used by REQ_PC bios. + * number of reasons, such as the bio being full or target block device + * limitations. The target block device must allow bio's up to PAGE_SIZE, + * so it is always possible to add a single page to an empty bio. + * + * This should only be used by REQ_PC bios. */ int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page *page, unsigned int len, unsigned int offset) @@ -659,10 +660,9 @@ EXPORT_SYMBOL(bio_add_pc_page); * @offset: vec entry offset * * Attempt to add a page to the bio_vec maplist. This can fail for a - * number of reasons, such as the bio being full or target block - * device limitations. The target block device must allow bio's - * smaller than PAGE_SIZE, so it is always possible to add a single - * page to an empty bio. + * number of reasons, such as the bio being full or target block device + * limitations. The target block device must allow bio's up to PAGE_SIZE, + * so it is always possible to add a single page to an empty bio. */ int bio_add_page(struct bio *bio, struct page *page, unsigned int len, unsigned int offset) -- cgit v1.2.3 From a1706ac4c0201ea0143dc0db0659001b26ceeabb Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Mon, 30 May 2011 07:42:51 +0200 Subject: Revert "block: Remove extra discard_alignment from hd_struct." It was not a good idea to start dereferencing disk->queue from the fs sysfs strategy for displaying discard alignment. We ran into first a NULL pointer deref, and after fixing that we sometimes see unvalid disk->queue pointer values. Since discard is the only one of the bunch actually looking into the queue, just revert the change. This reverts commit 23ceb5b7719e9276d4fa72a3ecf94dd396755276. Conflicts: fs/partitions/check.c --- fs/partitions/check.c | 10 +++------- include/linux/genhd.h | 1 + 2 files changed, 4 insertions(+), 7 deletions(-) (limited to 'fs') diff --git a/fs/partitions/check.c b/fs/partitions/check.c index f82e762eeca2..d545e97d99c3 100644 --- a/fs/partitions/check.c +++ b/fs/partitions/check.c @@ -255,13 +255,7 @@ ssize_t part_discard_alignment_show(struct device *dev, struct device_attribute *attr, char *buf) { struct hd_struct *p = dev_to_part(dev); - struct gendisk *disk = dev_to_disk(dev); - unsigned int alignment = 0; - - if (disk->queue) - alignment = queue_limit_discard_alignment(&disk->queue->limits, - p->start_sect); - return sprintf(buf, "%u\n", alignment); + return sprintf(buf, "%u\n", p->discard_alignment); } ssize_t part_stat_show(struct device *dev, @@ -455,6 +449,8 @@ struct hd_struct *add_partition(struct gendisk *disk, int partno, p->start_sect = start; p->alignment_offset = queue_limit_alignment_offset(&disk->queue->limits, start); + p->discard_alignment = + queue_limit_discard_alignment(&disk->queue->limits, start); p->nr_sects = len; p->partno = partno; p->policy = get_disk_ro(disk); diff --git a/include/linux/genhd.h b/include/linux/genhd.h index b78956b3c2e7..300d7582006e 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -100,6 +100,7 @@ struct hd_struct { sector_t start_sect; sector_t nr_sects; sector_t alignment_offset; + unsigned int discard_alignment; struct device __dev; struct kobject *holder_dir; int policy, partno; -- cgit v1.2.3 From 1adffbae22332bb558c2a29de19d9aca391869f6 Mon Sep 17 00:00:00 2001 From: OGAWA Hirofumi Date: Tue, 31 May 2011 19:38:07 +0900 Subject: fat: Fix corrupt inode flags when remove ATTR_SYS flag We are clearly missing '~' in fat_ioctl_set_attributes(). Cc: Reported-by: Dmitry Dmitriev Signed-off-by: OGAWA Hirofumi --- fs/fat/file.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/fat/file.c b/fs/fat/file.c index 7257752b6d5d..7018e1d8902d 100644 --- a/fs/fat/file.c +++ b/fs/fat/file.c @@ -102,7 +102,7 @@ static int fat_ioctl_set_attributes(struct file *file, u32 __user *user_attr) if (attr & ATTR_SYS) inode->i_flags |= S_IMMUTABLE; else - inode->i_flags &= S_IMMUTABLE; + inode->i_flags &= ~S_IMMUTABLE; } fat_save_attrs(inode, attr); -- cgit v1.2.3 From fe47ae7f53e179d2ef6771024feb000cbb86640f Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Tue, 31 May 2011 12:35:41 +0200 Subject: oprofile, dcookies: Fix possible circular locking dependency The lockdep warning below detects a possible A->B/B->A locking dependency of mm->mmap_sem and dcookie_mutex. The order in sync_buffer() is mm->mmap_sem/dcookie_mutex, while in sys_lookup_dcookie() it is vice versa. Fixing it in sys_lookup_dcookie() by unlocking dcookie_mutex before copy_to_user(). oprofiled/4432 is trying to acquire lock: (&mm->mmap_sem){++++++}, at: [] might_fault+0x53/0xa3 but task is already holding lock: (dcookie_mutex){+.+.+.}, at: [] sys_lookup_dcookie+0x45/0x149 which lock already depends on the new lock. the existing dependency chain (in reverse order) is: -> #1 (dcookie_mutex){+.+.+.}: [] lock_acquire+0xf8/0x11e [] mutex_lock_nested+0x63/0x309 [] get_dcookie+0x30/0x144 [] sync_buffer+0x196/0x3ec [oprofile] [] task_exit_notify+0x16/0x1a [oprofile] [] notifier_call_chain+0x37/0x63 [] __blocking_notifier_call_chain+0x50/0x67 [] blocking_notifier_call_chain+0x14/0x16 [] profile_task_exit+0x1a/0x1c [] do_exit+0x2a/0x6fc [] do_group_exit+0x83/0xae [] sys_exit_group+0x17/0x1b [] system_call_fastpath+0x16/0x1b -> #0 (&mm->mmap_sem){++++++}: [] __lock_acquire+0x1085/0x1711 [] lock_acquire+0xf8/0x11e [] might_fault+0x80/0xa3 [] sys_lookup_dcookie+0x104/0x149 [] system_call_fastpath+0x16/0x1b other info that might help us debug this: 1 lock held by oprofiled/4432: #0: (dcookie_mutex){+.+.+.}, at: [] sys_lookup_dcookie+0x45/0x149 stack backtrace: Pid: 4432, comm: oprofiled Not tainted 2.6.39-00008-ge5a450d #9 Call Trace: [] print_circular_bug+0xae/0xbc [] __lock_acquire+0x1085/0x1711 [] ? get_parent_ip+0x11/0x42 [] ? might_fault+0x53/0xa3 [] lock_acquire+0xf8/0x11e [] ? might_fault+0x53/0xa3 [] ? path_put+0x22/0x27 [] might_fault+0x80/0xa3 [] ? might_fault+0x53/0xa3 [] sys_lookup_dcookie+0x104/0x149 [] system_call_fastpath+0x16/0x1b References: https://bugzilla.kernel.org/show_bug.cgi?id=13809 Cc: # .27+ Signed-off-by: Robert Richter --- fs/dcookies.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'fs') diff --git a/fs/dcookies.c b/fs/dcookies.c index a21cabdbd87b..dda0dc702d1b 100644 --- a/fs/dcookies.c +++ b/fs/dcookies.c @@ -178,6 +178,8 @@ SYSCALL_DEFINE(lookup_dcookie)(u64 cookie64, char __user * buf, size_t len) /* FIXME: (deleted) ? */ path = d_path(&dcs->path, kbuf, PAGE_SIZE); + mutex_unlock(&dcookie_mutex); + if (IS_ERR(path)) { err = PTR_ERR(path); goto out_free; @@ -194,6 +196,7 @@ SYSCALL_DEFINE(lookup_dcookie)(u64 cookie64, char __user * buf, size_t len) out_free: kfree(kbuf); + return err; out: mutex_unlock(&dcookie_mutex); return err; -- cgit v1.2.3 From 4c49ff3fe128ca68dabd07537415c419ad7f82f9 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 1 Jun 2011 08:27:41 +0200 Subject: block: blkdev_get() should access ->bd_disk only after success d4dc210f69 (block: don't block events on excl write for non-optical devices) added dereferencing of bdev->bd_disk to test GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE; however, bdev->bd_disk can be %NULL if open failed which can lead to an oops. Test the flag after testing open was successful, not before. Signed-off-by: Tejun Heo Reported-by: David Miller Tested-by: David Miller Cc: stable@kernel.org Signed-off-by: Jens Axboe --- fs/block_dev.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/block_dev.c b/fs/block_dev.c index 1f2b19978333..1a2421f908f0 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -1272,8 +1272,8 @@ int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder) * individual writeable reference is too fragile given the * way @mode is used in blkdev_get/put(). */ - if ((disk->flags & GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE) && - !res && (mode & FMODE_WRITE) && !bdev->bd_write_holder) { + if (!res && (mode & FMODE_WRITE) && !bdev->bd_write_holder && + (disk->flags & GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE)) { bdev->bd_write_holder = true; disk_block_events(disk); } -- cgit v1.2.3 From ab75950b11e74145ffe61376ac073d56645aab8a Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Thu, 26 May 2011 06:51:48 +0300 Subject: UBIFS: supress false error messages Commit ab51afe05273741f72383529ef488aa1ea598ec6 was a good clean-up, but it introduced a regression - now UBIFS prints scary error messages during recovery on all corrupted nodes, even though the corruptions are expected (due to a power cut). This patch fixes the issue. Additionally fix a typo in a commentary introduced by the same commit. Signed-off-by: Artem Bityutskiy --- fs/ubifs/recovery.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/recovery.c b/fs/ubifs/recovery.c index 731d9e2e7b50..95e24183b710 100644 --- a/fs/ubifs/recovery.c +++ b/fs/ubifs/recovery.c @@ -635,7 +635,7 @@ struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum, * Scan quietly until there is an error from which we cannot * recover */ - ret = ubifs_scan_a_node(c, buf, len, lnum, offs, 0); + ret = ubifs_scan_a_node(c, buf, len, lnum, offs, 1); if (ret == SCANNED_A_NODE) { /* A valid node, and not a padding node */ struct ubifs_ch *ch = buf; @@ -701,7 +701,7 @@ struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum, * While we are in the middle of the same min. I/O unit keep dropping * nodes. So basically, what we want is to make sure that the last min. * I/O unit where we saw the corruption is dropped completely with all - * the uncorrupted node which may possibly sit there. + * the uncorrupted nodes which may possibly sit there. * * In other words, let's name the min. I/O unit where the corruption * starts B, and the previous min. I/O unit A. The below code tries to -- cgit v1.2.3 From 1a0b06997ceca96db9259e537eb935f9fe59a3de Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Thu, 26 May 2011 08:26:05 +0300 Subject: UBIFS: introduce a "grouped" journal head flag Journal heads are different in a way how UBIFS writes nodes there. All normal journal heads receive grouped nodes, while the GC journal heads receives ungrouped nodes. This patch adds a 'grouped' flag to 'struct ubifs_jhead' which describes this property. This patch is a preparation to a further recovery fix. Signed-off-by: Artem Bityutskiy --- fs/ubifs/super.c | 5 ++++- fs/ubifs/ubifs.h | 2 ++ 2 files changed, 6 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index 1ab0d22e4c94..1e40db740da9 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c @@ -811,15 +811,18 @@ static int alloc_wbufs(struct ubifs_info *c) c->jheads[i].wbuf.sync_callback = &bud_wbuf_callback; c->jheads[i].wbuf.jhead = i; + c->jheads[i].grouped = 1; } c->jheads[BASEHD].wbuf.dtype = UBI_SHORTTERM; /* * Garbage Collector head likely contains long-term data and - * does not need to be synchronized by timer. + * does not need to be synchronized by timer. Also GC head nodes are + * not grouped. */ c->jheads[GCHD].wbuf.dtype = UBI_LONGTERM; c->jheads[GCHD].wbuf.no_timer = 1; + c->jheads[GCHD].grouped = 0; return 0; } diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h index a70d7b4ffb25..adeca14c0e9f 100644 --- a/fs/ubifs/ubifs.h +++ b/fs/ubifs/ubifs.h @@ -722,12 +722,14 @@ struct ubifs_bud { * struct ubifs_jhead - journal head. * @wbuf: head's write-buffer * @buds_list: list of bud LEBs belonging to this journal head + * @grouped: non-zero if UBIFS groups nodes when writing to this journal head * * Note, the @buds list is protected by the @c->buds_lock. */ struct ubifs_jhead { struct ubifs_wbuf wbuf; struct list_head buds_list; + unsigned int grouped:1; }; /** -- cgit v1.2.3 From efcfde54ca68091b164f9aec544c7233a9760aff Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Thu, 26 May 2011 08:36:52 +0300 Subject: UBIFS: amend ubifs_recover_leb interface Instead of passing "grouped" parameter to 'ubifs_recover_leb()' which tells whether the nodes are grouped in the LEB to recover, pass the journal head number and let 'ubifs_recover_leb()' look at the journal head's 'grouped' flag. This patch is a preparation to a further fix where we'll need to know the journal head number for other purposes. Signed-off-by: Artem Bityutskiy --- fs/ubifs/orphan.c | 2 +- fs/ubifs/recovery.c | 10 ++++++---- fs/ubifs/replay.c | 3 +-- fs/ubifs/ubifs.h | 2 +- 4 files changed, 9 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/orphan.c b/fs/ubifs/orphan.c index bd644bf587a8..a5422fffbd69 100644 --- a/fs/ubifs/orphan.c +++ b/fs/ubifs/orphan.c @@ -674,7 +674,7 @@ static int kill_orphans(struct ubifs_info *c) if (IS_ERR(sleb)) { if (PTR_ERR(sleb) == -EUCLEAN) sleb = ubifs_recover_leb(c, lnum, 0, - c->sbuf, 0); + c->sbuf, -1); if (IS_ERR(sleb)) { err = PTR_ERR(sleb); break; diff --git a/fs/ubifs/recovery.c b/fs/ubifs/recovery.c index 95e24183b710..6adb5328a016 100644 --- a/fs/ubifs/recovery.c +++ b/fs/ubifs/recovery.c @@ -604,7 +604,8 @@ static int drop_last_node(struct ubifs_scan_leb *sleb, int *offs, int grouped) * @lnum: LEB number * @offs: offset * @sbuf: LEB-sized buffer to use - * @grouped: nodes may be grouped for recovery + * @jhead: journal head number this LEB belongs to (%-1 if the LEB does not + * belong to any journal head) * * This function does a scan of a LEB, but caters for errors that might have * been caused by the unclean unmount from which we are attempting to recover. @@ -612,13 +613,14 @@ static int drop_last_node(struct ubifs_scan_leb *sleb, int *offs, int grouped) * found, and a negative error code in case of failure. */ struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum, - int offs, void *sbuf, int grouped) + int offs, void *sbuf, int jhead) { int ret = 0, err, len = c->leb_size - offs, start = offs, min_io_unit; + int grouped = jhead == -1 ? 0 : c->jheads[jhead].grouped; struct ubifs_scan_leb *sleb; void *buf = sbuf + offs; - dbg_rcvry("%d:%d", lnum, offs); + dbg_rcvry("%d:%d, jhead %d, grouped %d", lnum, offs, jhead, grouped); sleb = ubifs_start_scan(c, lnum, offs, sbuf); if (IS_ERR(sleb)) @@ -881,7 +883,7 @@ struct ubifs_scan_leb *ubifs_recover_log_leb(struct ubifs_info *c, int lnum, } ubifs_scan_destroy(sleb); } - return ubifs_recover_leb(c, lnum, offs, sbuf, 0); + return ubifs_recover_leb(c, lnum, offs, sbuf, -1); } /** diff --git a/fs/ubifs/replay.c b/fs/ubifs/replay.c index 6617280d1679..5e97161ce4d3 100644 --- a/fs/ubifs/replay.c +++ b/fs/ubifs/replay.c @@ -557,8 +557,7 @@ static int replay_bud(struct ubifs_info *c, struct bud_entry *b) * these LEBs could possibly be written to at the power cut * time. */ - sleb = ubifs_recover_leb(c, lnum, offs, c->sbuf, - b->bud->jhead != GCHD); + sleb = ubifs_recover_leb(c, lnum, offs, c->sbuf, b->bud->jhead); else sleb = ubifs_scan(c, lnum, offs, c->sbuf, 0); if (IS_ERR(sleb)) diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h index adeca14c0e9f..f79983d6f860 100644 --- a/fs/ubifs/ubifs.h +++ b/fs/ubifs/ubifs.h @@ -1744,7 +1744,7 @@ struct inode *ubifs_iget(struct super_block *sb, unsigned long inum); int ubifs_recover_master_node(struct ubifs_info *c); int ubifs_write_rcvrd_mst_node(struct ubifs_info *c); struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum, - int offs, void *sbuf, int grouped); + int offs, void *sbuf, int jhead); struct ubifs_scan_leb *ubifs_recover_log_leb(struct ubifs_info *c, int lnum, int offs, void *sbuf); int ubifs_recover_inl_heads(const struct ubifs_info *c, void *sbuf); -- cgit v1.2.3 From da8b94ea61c5d80aae0cc7b7541f1e0fa7459391 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Thu, 26 May 2011 08:58:19 +0300 Subject: UBIFS: fix recovery broken by the previous recovery fix Unfortunately, the recovery fix d1606a59b6be4ea392eabd40d1250aa1eeb19efb (UBIFS: fix extremely rare mount failure) broke recovery. This commit make UBIFS drop the last min. I/O unit in all journal heads, but this is needed only for the GC head. And this does not work for non-GC heads. For example, if suppose we have min. I/O units A and B, and A contains a valid node X, which was fsynced, and then a group of nodes Y which spans the rest of A and B. In this case we'll drop not only Y, but also X, which is obviously incorrect. This patch fixes the issue and additionally makes recovery to drop last min. I/O unit only for the GC head, and leave things as they have been for ages for the other heads - this is safer. Signed-off-by: Artem Bityutskiy --- fs/ubifs/recovery.c | 152 ++++++++++++++++++++++++++++++---------------------- 1 file changed, 87 insertions(+), 65 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/recovery.c b/fs/ubifs/recovery.c index 6adb5328a016..783d8e0beb76 100644 --- a/fs/ubifs/recovery.c +++ b/fs/ubifs/recovery.c @@ -564,19 +564,15 @@ static int fix_unclean_leb(struct ubifs_info *c, struct ubifs_scan_leb *sleb, } /** - * drop_last_node - drop the last node or group of nodes. + * drop_last_group - drop the last group of nodes. * @sleb: scanned LEB information * @offs: offset of dropped nodes is returned here - * @grouped: non-zero if whole group of nodes have to be dropped * * This is a helper function for 'ubifs_recover_leb()' which drops the last - * node of the scanned LEB or the last group of nodes if @grouped is not zero. - * This function returns %1 if a node was dropped and %0 otherwise. + * group of nodes of the scanned LEB. */ -static int drop_last_node(struct ubifs_scan_leb *sleb, int *offs, int grouped) +static void drop_last_group(struct ubifs_scan_leb *sleb, int *offs) { - int dropped = 0; - while (!list_empty(&sleb->nodes)) { struct ubifs_scan_node *snod; struct ubifs_ch *ch; @@ -585,17 +581,40 @@ static int drop_last_node(struct ubifs_scan_leb *sleb, int *offs, int grouped) list); ch = snod->node; if (ch->group_type != UBIFS_IN_NODE_GROUP) - return dropped; - dbg_rcvry("dropping node at %d:%d", sleb->lnum, snod->offs); + break; + + dbg_rcvry("dropping grouped node at %d:%d", + sleb->lnum, snod->offs); + *offs = snod->offs; + list_del(&snod->list); + kfree(snod); + sleb->nodes_cnt -= 1; + } +} + +/** + * drop_last_node - drop the last node. + * @sleb: scanned LEB information + * @offs: offset of dropped nodes is returned here + * @grouped: non-zero if whole group of nodes have to be dropped + * + * This is a helper function for 'ubifs_recover_leb()' which drops the last + * node of the scanned LEB. + */ +static void drop_last_node(struct ubifs_scan_leb *sleb, int *offs) +{ + struct ubifs_scan_node *snod; + + if (!list_empty(&sleb->nodes)) { + snod = list_entry(sleb->nodes.prev, struct ubifs_scan_node, + list); + + dbg_rcvry("dropping last node at %d:%d", sleb->lnum, snod->offs); *offs = snod->offs; list_del(&snod->list); kfree(snod); sleb->nodes_cnt -= 1; - dropped = 1; - if (!grouped) - break; } - return dropped; } /** @@ -697,59 +716,62 @@ struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum, * If nodes are grouped, always drop the incomplete group at * the end. */ - drop_last_node(sleb, &offs, 1); + drop_last_group(sleb, &offs); - /* - * While we are in the middle of the same min. I/O unit keep dropping - * nodes. So basically, what we want is to make sure that the last min. - * I/O unit where we saw the corruption is dropped completely with all - * the uncorrupted nodes which may possibly sit there. - * - * In other words, let's name the min. I/O unit where the corruption - * starts B, and the previous min. I/O unit A. The below code tries to - * deal with a situation when half of B contains valid nodes or the end - * of a valid node, and the second half of B contains corrupted data or - * garbage. This means that UBIFS had been writing to B just before the - * power cut happened. I do not know how realistic is this scenario - * that half of the min. I/O unit had been written successfully and the - * other half not, but this is possible in our 'failure mode emulation' - * infrastructure at least. - * - * So what is the problem, why we need to drop those nodes? Whey can't - * we just clean-up the second half of B by putting a padding node - * there? We can, and this works fine with one exception which was - * reproduced with power cut emulation testing and happens extremely - * rarely. The description follows, but it is worth noting that that is - * only about the GC head, so we could do this trick only if the bud - * belongs to the GC head, but it does not seem to be worth an - * additional "if" statement. - * - * So, imagine the file-system is full, we run GC which is moving valid - * nodes from LEB X to LEB Y (obviously, LEB Y is the current GC head - * LEB). The @c->gc_lnum is -1, which means that GC will retain LEB X - * and will try to continue. Imagine that LEB X is currently the - * dirtiest LEB, and the amount of used space in LEB Y is exactly the - * same as amount of free space in LEB X. - * - * And a power cut happens when nodes are moved from LEB X to LEB Y. We - * are here trying to recover LEB Y which is the GC head LEB. We find - * the min. I/O unit B as described above. Then we clean-up LEB Y by - * padding min. I/O unit. And later 'ubifs_rcvry_gc_commit()' function - * fails, because it cannot find a dirty LEB which could be GC'd into - * LEB Y! Even LEB X does not match because the amount of valid nodes - * there does not fit the free space in LEB Y any more! And this is - * because of the padding node which we added to LEB Y. The - * user-visible effect of this which I once observed and analysed is - * that we cannot mount the file-system with -ENOSPC error. - * - * So obviously, to make sure that situation does not happen we should - * free min. I/O unit B in LEB Y completely and the last used min. I/O - * unit in LEB Y should be A. This is basically what the below code - * tries to do. - */ - while (min_io_unit == round_down(offs, c->min_io_size) && - min_io_unit != offs && - drop_last_node(sleb, &offs, grouped)); + if (jhead == GCHD) { + /* + * If this LEB belongs to the GC head then while we are in the + * middle of the same min. I/O unit keep dropping nodes. So + * basically, what we want is to make sure that the last min. + * I/O unit where we saw the corruption is dropped completely + * with all the uncorrupted nodes which may possibly sit there. + * + * In other words, let's name the min. I/O unit where the + * corruption starts B, and the previous min. I/O unit A. The + * below code tries to deal with a situation when half of B + * contains valid nodes or the end of a valid node, and the + * second half of B contains corrupted data or garbage. This + * means that UBIFS had been writing to B just before the power + * cut happened. I do not know how realistic is this scenario + * that half of the min. I/O unit had been written successfully + * and the other half not, but this is possible in our 'failure + * mode emulation' infrastructure at least. + * + * So what is the problem, why we need to drop those nodes? Why + * can't we just clean-up the second half of B by putting a + * padding node there? We can, and this works fine with one + * exception which was reproduced with power cut emulation + * testing and happens extremely rarely. + * + * Imagine the file-system is full, we run GC which starts + * moving valid nodes from LEB X to LEB Y (obviously, LEB Y is + * the current GC head LEB). The @c->gc_lnum is -1, which means + * that GC will retain LEB X and will try to continue. Imagine + * that LEB X is currently the dirtiest LEB, and the amount of + * used space in LEB Y is exactly the same as amount of free + * space in LEB X. + * + * And a power cut happens when nodes are moved from LEB X to + * LEB Y. We are here trying to recover LEB Y which is the GC + * head LEB. We find the min. I/O unit B as described above. + * Then we clean-up LEB Y by padding min. I/O unit. And later + * 'ubifs_rcvry_gc_commit()' function fails, because it cannot + * find a dirty LEB which could be GC'd into LEB Y! Even LEB X + * does not match because the amount of valid nodes there does + * not fit the free space in LEB Y any more! And this is + * because of the padding node which we added to LEB Y. The + * user-visible effect of this which I once observed and + * analysed is that we cannot mount the file-system with + * -ENOSPC error. + * + * So obviously, to make sure that situation does not happen we + * should free min. I/O unit B in LEB Y completely and the last + * used min. I/O unit in LEB Y should be A. This is basically + * what the below code tries to do. + */ + while (offs > min_io_unit) + drop_last_node(sleb, &offs); + } buf = sbuf + offs; len = c->leb_size - offs; -- cgit v1.2.3 From cf610bf4199770420629d3bc273494bd27ad6c1d Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Tue, 31 May 2011 07:03:21 +0300 Subject: UBIFS: fix shrinker object count reports Sometimes VM asks the shrinker to return amount of objects it can shrink, and we return the ubifs_clean_zn_cnt in that case. However, it is possible that this counter is negative for a short period of time, due to the way UBIFS TNC code updates it. And I can observe the following warnings sometimes: shrink_slab: ubifs_shrinker+0x0/0x2b7 [ubifs] negative objects to delete nr=-8541616642706119788 This patch makes sure UBIFS never returns negative count of objects. Signed-off-by: Artem Bityutskiy Cc: stable@kernel.org --- fs/ubifs/shrinker.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/ubifs/shrinker.c b/fs/ubifs/shrinker.c index ca953a945029..9e1d05666fed 100644 --- a/fs/ubifs/shrinker.c +++ b/fs/ubifs/shrinker.c @@ -284,7 +284,11 @@ int ubifs_shrinker(struct shrinker *shrink, struct shrink_control *sc) long clean_zn_cnt = atomic_long_read(&ubifs_clean_zn_cnt); if (nr == 0) - return clean_zn_cnt; + /* + * Due to the way UBIFS updates the clean znode counter it may + * temporarily be negative. + */ + return clean_zn_cnt >= 0 ? clean_zn_cnt : 1; if (!clean_zn_cnt) { /* -- cgit v1.2.3 From 812eb258311f89bcd664a34a620f249d54a2cd83 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Tue, 31 May 2011 08:40:40 +0300 Subject: UBIFS: fix memory leak on error path UBIFS leaks memory on error path in 'ubifs_jnl_update()' in case of write failure because it forgets to free the 'struct ubifs_dent_node *dent' object. Although the object is small, the alignment can make it large - e.g., 2KiB if the min. I/O unit is 2KiB. Signed-off-by: Artem Bityutskiy Cc: stable@kernel.org --- fs/ubifs/journal.c | 1 + 1 file changed, 1 insertion(+) (limited to 'fs') diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c index 34b1679e6e3a..cef0460f4c54 100644 --- a/fs/ubifs/journal.c +++ b/fs/ubifs/journal.c @@ -669,6 +669,7 @@ out_free: out_release: release_head(c, BASEHD); + kfree(dent); out_ro: ubifs_ro_mode(c, err); if (last_reference) -- cgit v1.2.3 From 837072377034d0a0b18b851d1ab95676b245cc0a Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Tue, 31 May 2011 14:26:07 +0300 Subject: UBIFS: fix clean znode counter corruption in error cases UBIFS maintains per-filesystem and global clean znode counters ('c->clean_zn_cnt' and 'ubifs_clean_zn_cnt'). It is important to maintain correct values there since the shrinker relies on 'ubifs_clean_zn_cnt'. However, in case of failures during commit the counters were corrupted. E.g., if a failure happens in the middle of 'write_index()', then some nodes in the commit list ('c->cnext') are marked as clean, and some are marked as dirty. And the 'ubifs_destroy_tnc_subtree()' frees does not retrun correct count, and we end up with non-zero 'c->clean_zn_cnt' when unmounting. This means that if we have 2 file-sytem and one of them fails, and we unmount it, 'ubifs_clean_zn_cnt' stays incorrect and confuses the shrinker. Signed-off-by: Artem Bityutskiy --- fs/ubifs/tnc.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c index 8119b1fd8d94..91b4213dde84 100644 --- a/fs/ubifs/tnc.c +++ b/fs/ubifs/tnc.c @@ -2876,12 +2876,13 @@ static void tnc_destroy_cnext(struct ubifs_info *c) */ void ubifs_tnc_close(struct ubifs_info *c) { - long clean_freed; - tnc_destroy_cnext(c); if (c->zroot.znode) { - clean_freed = ubifs_destroy_tnc_subtree(c->zroot.znode); - atomic_long_sub(clean_freed, &ubifs_clean_zn_cnt); + long n; + + ubifs_destroy_tnc_subtree(c->zroot.znode); + n = atomic_long_read(&c->clean_zn_cnt); + atomic_long_sub(n, &ubifs_clean_zn_cnt); } kfree(c->gap_lebs); kfree(c->ilebs); -- cgit v1.2.3 From 4f1ab9b01d34eac9fc958f7150d3bf266dcc1685 Mon Sep 17 00:00:00 2001 From: Ben Gardiner Date: Mon, 30 May 2011 14:56:14 -0400 Subject: UBIFS: assert no fixup when writing a node The current free space fixup can result in some writing to the UBI volume when the space_fixup flag is set. To catch instances where UBIFS is writing to the NAND while the space_fixup flag is set, add an assert to ubifs_write_node(). Artem: tweaked the patch, added similar assertion to the write buffer write path. Signed-off-by: Ben Gardiner Reviewed-by: Matthew L. Creech Signed-off-by: Artem Bityutskiy --- fs/ubifs/io.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'fs') diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c index 166951e0dcd3..3be645e012c9 100644 --- a/fs/ubifs/io.c +++ b/fs/ubifs/io.c @@ -581,6 +581,7 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len) ubifs_assert(wbuf->size % c->min_io_size == 0); ubifs_assert(mutex_is_locked(&wbuf->io_mutex)); ubifs_assert(!c->ro_media && !c->ro_mount); + ubifs_assert(!c->space_fixup); if (c->leb_size - wbuf->offs >= c->max_write_size) ubifs_assert(!((wbuf->offs + wbuf->size) % c->max_write_size)); @@ -759,6 +760,7 @@ int ubifs_write_node(struct ubifs_info *c, void *buf, int len, int lnum, ubifs_assert(lnum >= 0 && lnum < c->leb_cnt && offs >= 0); ubifs_assert(offs % c->min_io_size == 0 && offs < c->leb_size); ubifs_assert(!c->ro_media && !c->ro_mount); + ubifs_assert(!c->space_fixup); if (c->ro_error) return -EROFS; -- cgit v1.2.3 From 781c5717a95a74b294beb38b8276943b0f8b5bb4 Mon Sep 17 00:00:00 2001 From: Ben Gardiner Date: Mon, 30 May 2011 14:56:15 -0400 Subject: UBIFS: intialize LPT earlier The current 'mount_ubifs()' implementation does not initialize the LPT until the the master node is marked dirty. Move the LPT initialization to before marking the master node dirty. This is a preparation for the next patch which will move the free-space-fixup check to before marking the master node dirty, because we have to fix-up the free space before doing any writes. Artem: massaged the patch and commit message. Signed-off-by: Ben Gardiner Reviewed-by: Matthew L. Creech Signed-off-by: Artem Bityutskiy --- fs/ubifs/super.c | 29 ++++++++++++++++------------- 1 file changed, 16 insertions(+), 13 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index 1e40db740da9..6d357fd9c289 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c @@ -1282,17 +1282,24 @@ static int mount_ubifs(struct ubifs_info *c) if (err) goto out_master; - init_constants_master(c); - if ((c->mst_node->flags & cpu_to_le32(UBIFS_MST_DIRTY)) != 0) { ubifs_msg("recovery needed"); c->need_recovery = 1; - if (!c->ro_mount) { - err = ubifs_recover_inl_heads(c, c->sbuf); - if (err) - goto out_master; - } - } else if (!c->ro_mount) { + } + + init_constants_master(c); + + if (c->need_recovery && !c->ro_mount) { + err = ubifs_recover_inl_heads(c, c->sbuf); + if (err) + goto out_master; + } + + err = ubifs_lpt_init(c, 1, !c->ro_mount); + if (err) + goto out_master; + + if (!c->ro_mount) { /* * Set the "dirty" flag so that if we reboot uncleanly we * will notice this immediately on the next mount. @@ -1300,13 +1307,9 @@ static int mount_ubifs(struct ubifs_info *c) c->mst_node->flags |= cpu_to_le32(UBIFS_MST_DIRTY); err = ubifs_write_master(c); if (err) - goto out_master; + goto out_lpt; } - err = ubifs_lpt_init(c, 1, !c->ro_mount); - if (err) - goto out_lpt; - err = dbg_check_idx_size(c, c->bi.old_idx_sz); if (err) goto out_lpt; -- cgit v1.2.3 From 098011940a2549ae7182db4bf101c3e3d2b4e6df Mon Sep 17 00:00:00 2001 From: Ben Gardiner Date: Mon, 30 May 2011 14:56:16 -0400 Subject: UBIFS: fix-up free space earlier The free space fixup is currently initiated during mount after the call to ubifs_write_master() which results in a write to PEBs; this has been observed with the patch 'assert no fixup when writing a node' applied: Move the free space fixup on mount to before the calls to ubifs_recover_inl_heads() and ubifs_write_master(). This results in no assertions with the previously mentioned patch applied. Artem: tweaked the patch a bit Signed-off-by: Ben Gardiner Reviewed-by: Matthew L. Creech Signed-off-by: Artem Bityutskiy --- fs/ubifs/super.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index 6d357fd9c289..b5aeb5a8ebed 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c @@ -1282,13 +1282,13 @@ static int mount_ubifs(struct ubifs_info *c) if (err) goto out_master; + init_constants_master(c); + if ((c->mst_node->flags & cpu_to_le32(UBIFS_MST_DIRTY)) != 0) { ubifs_msg("recovery needed"); c->need_recovery = 1; } - init_constants_master(c); - if (c->need_recovery && !c->ro_mount) { err = ubifs_recover_inl_heads(c, c->sbuf); if (err) @@ -1299,6 +1299,12 @@ static int mount_ubifs(struct ubifs_info *c) if (err) goto out_master; + if (!c->ro_mount && c->space_fixup) { + err = ubifs_fixup_free_space(c); + if (err) + goto out_master; + } + if (!c->ro_mount) { /* * Set the "dirty" flag so that if we reboot uncleanly we @@ -1402,12 +1408,6 @@ static int mount_ubifs(struct ubifs_info *c) } else ubifs_assert(c->lst.taken_empty_lebs > 0); - if (!c->ro_mount && c->space_fixup) { - err = ubifs_fixup_free_space(c); - if (err) - goto out_infos; - } - err = dbg_check_filesystem(c); if (err) goto out_infos; -- cgit v1.2.3 From c592a7073796de1cd221f957ab693c19d383423f Mon Sep 17 00:00:00 2001 From: Suresh Jayaraman Date: Fri, 3 Jun 2011 12:06:19 +0530 Subject: cifs: fix the kernel release version in the default security warning message When ntlm security mechanim is used, the message that warns about the upgrade to ntlmv2 got the kernel release version wrong (Blame it on Linus :). Fix it. Signed-off-by: Suresh Jayaraman Reviewed-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/connect.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 6d88b82537c3..84c730701c09 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -1976,7 +1976,7 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info) warned_on_ntlm = true; cERROR(1, "default security mechanism requested. The default " "security mechanism will be upgraded from ntlm to " - "ntlmv2 in kernel release 2.6.41"); + "ntlmv2 in kernel release 3.1"); } ses->overrideSecFlg = volume_info->secFlg; -- cgit v1.2.3 From 5f0b23eeba2d9105944148e5a85b0bfb34a8ecf5 Mon Sep 17 00:00:00 2001 From: Suresh Jayaraman Date: Fri, 3 Jun 2011 14:19:01 +0530 Subject: cifs: make CIFS depend on CRYPTO_ECB When CONFIG_CRYPTO_ECB is not set, trying to mount a CIFS share with NTLM security resulted in mount failure with the following error: "CIFS VFS: could not allocate des crypto API" Seems like a leftover from commit 43988d7. Signed-off-by: Suresh Jayaraman CC: Shirish Pargaonkar Reviewed-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/Kconfig | 1 + 1 file changed, 1 insertion(+) (limited to 'fs') diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig index 1cd4c3a1862d..66c68ab9e7df 100644 --- a/fs/cifs/Kconfig +++ b/fs/cifs/Kconfig @@ -7,6 +7,7 @@ config CIFS select CRYPTO_MD5 select CRYPTO_HMAC select CRYPTO_ARC4 + select CRYPTO_ECB select CRYPTO_DES help This is the client VFS module for the Common Internet File System -- cgit v1.2.3 From 9e1f1de02c2275d7172e18dc4e7c2065777611bf Mon Sep 17 00:00:00 2001 From: Al Viro Date: Fri, 3 Jun 2011 18:24:58 -0400 Subject: more conservative S_NOSEC handling Caching "we have already removed suid/caps" was overenthusiastic as merged. On network filesystems we might have had suid/caps set on another client, silently picked by this client on revalidate, all of that *without* clearing the S_NOSEC flag. AFAICS, the only reasonably sane way to deal with that is * new superblock flag; unless set, S_NOSEC is not going to be set. * local block filesystems set it in their ->mount() (more accurately, mount_bdev() does, so does btrfs ->mount(), users of mount_bdev() other than local block ones clear it) * if any network filesystem (or a cluster one) wants to use S_NOSEC, it'll need to set MS_NOSEC in sb->s_flags *AND* take care to clear S_NOSEC when inode attribute changes are picked from other clients. It's not an earth-shattering hole (anybody that can set suid on another client will almost certainly be able to write to the file before doing that anyway), but it's a bug that needs fixing. Signed-off-by: Al Viro --- fs/btrfs/super.c | 2 +- fs/fuse/inode.c | 2 ++ fs/ocfs2/super.c | 2 +- fs/super.c | 2 +- include/linux/fs.h | 3 ++- mm/filemap.c | 2 +- 6 files changed, 8 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 9b2e7e5bc3ef..d158b672a2d2 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -819,7 +819,7 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags, } else { char b[BDEVNAME_SIZE]; - s->s_flags = flags; + s->s_flags = flags | MS_NOSEC; strlcpy(s->s_id, bdevname(bdev, b), sizeof(s->s_id)); error = btrfs_fill_super(s, fs_devices, data, flags & MS_SILENT ? 1 : 0); diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index cc6ec4b2f0ff..38f84cd48b67 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c @@ -921,6 +921,8 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent) if (sb->s_flags & MS_MANDLOCK) goto err; + sb->s_flags &= ~MS_NOSEC; + if (!parse_fuse_opt((char *) data, &d, is_bdev)) goto err; diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index cdbaf5e97308..56f61027236b 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c @@ -1072,7 +1072,7 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent) sb->s_magic = OCFS2_SUPER_MAGIC; - sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | + sb->s_flags = (sb->s_flags & ~(MS_POSIXACL | MS_NOSEC)) | ((osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0); /* Hard readonly mode only if: bdev_read_only, MS_RDONLY, diff --git a/fs/super.c b/fs/super.c index c75593953c52..ab3d672db0de 100644 --- a/fs/super.c +++ b/fs/super.c @@ -822,7 +822,7 @@ struct dentry *mount_bdev(struct file_system_type *fs_type, } else { char b[BDEVNAME_SIZE]; - s->s_flags = flags; + s->s_flags = flags | MS_NOSEC; s->s_mode = mode; strlcpy(s->s_id, bdevname(bdev, b), sizeof(s->s_id)); sb_set_blocksize(s, block_size(bdev)); diff --git a/include/linux/fs.h b/include/linux/fs.h index c55d6b7cd5d6..646a1836152a 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -208,6 +208,7 @@ struct inodes_stat_t { #define MS_KERNMOUNT (1<<22) /* this is a kern_mount call */ #define MS_I_VERSION (1<<23) /* Update inode I_version field */ #define MS_STRICTATIME (1<<24) /* Always perform atime updates */ +#define MS_NOSEC (1<<28) #define MS_BORN (1<<29) #define MS_ACTIVE (1<<30) #define MS_NOUSER (1<<31) @@ -2591,7 +2592,7 @@ static inline int is_sxid(mode_t mode) static inline void inode_has_no_xattr(struct inode *inode) { - if (!is_sxid(inode->i_mode)) + if (!is_sxid(inode->i_mode) && (inode->i_sb->s_flags & MS_NOSEC)) inode->i_flags |= S_NOSEC; } diff --git a/mm/filemap.c b/mm/filemap.c index d7b10578a64b..a8251a8d3457 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -2000,7 +2000,7 @@ int file_remove_suid(struct file *file) error = security_inode_killpriv(dentry); if (!error && killsuid) error = __remove_suid(dentry, killsuid); - if (!error) + if (!error && (inode->i_sb->s_flags & MS_NOSEC)) inode->i_flags |= S_NOSEC; return error; -- cgit v1.2.3 From 1bc8779349d6278e2713a1ff94418c2a6746a791 Mon Sep 17 00:00:00 2001 From: Arne Jansen Date: Sat, 28 May 2011 21:57:55 +0200 Subject: btrfs: scrub: don't reuse bios and pages The current scrub implementation reuses bios and pages as often as possible, allocating them only on start and releasing them when finished. This leads to more problems with the block layer than it's worth. The elevator gets confused when there are more pages added to the bio than bi_size suggests. This patch completely rips out the reuse of bios and pages and allocates them freshly for each submit. Signed-off-by: Arne Jansen Signed-off-by: Chris Maosn --- fs/btrfs/scrub.c | 114 +++++++++++++++++++++++++++++++------------------------ 1 file changed, 65 insertions(+), 49 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 6dfed0c27ac3..2d1f8909a8e1 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -117,33 +117,37 @@ static void scrub_free_csums(struct scrub_dev *sdev) } } +static void scrub_free_bio(struct bio *bio) +{ + int i; + struct page *last_page = NULL; + + if (!bio) + return; + + for (i = 0; i < bio->bi_vcnt; ++i) { + if (bio->bi_io_vec[i].bv_page == last_page) + continue; + last_page = bio->bi_io_vec[i].bv_page; + __free_page(last_page); + } + bio_put(bio); +} + static noinline_for_stack void scrub_free_dev(struct scrub_dev *sdev) { int i; - int j; - struct page *last_page; if (!sdev) return; for (i = 0; i < SCRUB_BIOS_PER_DEV; ++i) { struct scrub_bio *sbio = sdev->bios[i]; - struct bio *bio; if (!sbio) break; - bio = sbio->bio; - if (bio) { - last_page = NULL; - for (j = 0; j < bio->bi_vcnt; ++j) { - if (bio->bi_io_vec[j].bv_page == last_page) - continue; - last_page = bio->bi_io_vec[j].bv_page; - __free_page(last_page); - } - bio_put(bio); - } + scrub_free_bio(sbio->bio); kfree(sbio); } @@ -156,8 +160,6 @@ struct scrub_dev *scrub_setup_dev(struct btrfs_device *dev) { struct scrub_dev *sdev; int i; - int j; - int ret; struct btrfs_fs_info *fs_info = dev->dev_root->fs_info; sdev = kzalloc(sizeof(*sdev), GFP_NOFS); @@ -165,7 +167,6 @@ struct scrub_dev *scrub_setup_dev(struct btrfs_device *dev) goto nomem; sdev->dev = dev; for (i = 0; i < SCRUB_BIOS_PER_DEV; ++i) { - struct bio *bio; struct scrub_bio *sbio; sbio = kzalloc(sizeof(*sbio), GFP_NOFS); @@ -173,32 +174,10 @@ struct scrub_dev *scrub_setup_dev(struct btrfs_device *dev) goto nomem; sdev->bios[i] = sbio; - bio = bio_kmalloc(GFP_NOFS, SCRUB_PAGES_PER_BIO); - if (!bio) - goto nomem; - sbio->index = i; sbio->sdev = sdev; - sbio->bio = bio; sbio->count = 0; sbio->work.func = scrub_checksum; - bio->bi_private = sdev->bios[i]; - bio->bi_end_io = scrub_bio_end_io; - bio->bi_sector = 0; - bio->bi_bdev = dev->bdev; - bio->bi_size = 0; - - for (j = 0; j < SCRUB_PAGES_PER_BIO; ++j) { - struct page *page; - page = alloc_page(GFP_NOFS); - if (!page) - goto nomem; - - ret = bio_add_page(bio, page, PAGE_SIZE, 0); - if (!ret) - goto nomem; - } - WARN_ON(bio->bi_vcnt != SCRUB_PAGES_PER_BIO); if (i != SCRUB_BIOS_PER_DEV-1) sdev->bios[i]->next_free = i + 1; @@ -394,6 +373,7 @@ static void scrub_bio_end_io(struct bio *bio, int err) struct btrfs_fs_info *fs_info = sdev->dev->dev_root->fs_info; sbio->err = err; + sbio->bio = bio; btrfs_queue_worker(&fs_info->scrub_workers, &sbio->work); } @@ -453,6 +433,8 @@ static void scrub_checksum(struct btrfs_work *work) } out: + scrub_free_bio(sbio->bio); + sbio->bio = NULL; spin_lock(&sdev->list_lock); sbio->next_free = sdev->first_free; sdev->first_free = sbio->index; @@ -583,25 +565,50 @@ static int scrub_checksum_super(struct scrub_bio *sbio, void *buffer) static int scrub_submit(struct scrub_dev *sdev) { struct scrub_bio *sbio; + struct bio *bio; + int i; if (sdev->curr == -1) return 0; sbio = sdev->bios[sdev->curr]; - sbio->bio->bi_sector = sbio->physical >> 9; - sbio->bio->bi_size = sbio->count * PAGE_SIZE; - sbio->bio->bi_next = NULL; - sbio->bio->bi_flags |= 1 << BIO_UPTODATE; - sbio->bio->bi_comp_cpu = -1; - sbio->bio->bi_bdev = sdev->dev->bdev; + bio = bio_alloc(GFP_NOFS, sbio->count); + if (!bio) + goto nomem; + + bio->bi_private = sbio; + bio->bi_end_io = scrub_bio_end_io; + bio->bi_bdev = sdev->dev->bdev; + bio->bi_sector = sbio->physical >> 9; + + for (i = 0; i < sbio->count; ++i) { + struct page *page; + int ret; + + page = alloc_page(GFP_NOFS); + if (!page) + goto nomem; + + ret = bio_add_page(bio, page, PAGE_SIZE, 0); + if (!ret) { + __free_page(page); + goto nomem; + } + } + sbio->err = 0; sdev->curr = -1; atomic_inc(&sdev->in_flight); - submit_bio(0, sbio->bio); + submit_bio(READ, bio); return 0; + +nomem: + scrub_free_bio(bio); + + return -ENOMEM; } static int scrub_page(struct scrub_dev *sdev, u64 logical, u64 len, @@ -633,7 +640,11 @@ again: sbio->logical = logical; } else if (sbio->physical + sbio->count * PAGE_SIZE != physical || sbio->logical + sbio->count * PAGE_SIZE != logical) { - scrub_submit(sdev); + int ret; + + ret = scrub_submit(sdev); + if (ret) + return ret; goto again; } sbio->spag[sbio->count].flags = flags; @@ -645,8 +656,13 @@ again: memcpy(sbio->spag[sbio->count].csum, csum, sdev->csum_size); } ++sbio->count; - if (sbio->count == SCRUB_PAGES_PER_BIO || force) - scrub_submit(sdev); + if (sbio->count == SCRUB_PAGES_PER_BIO || force) { + int ret; + + ret = scrub_submit(sdev); + if (ret) + return ret; + } return 0; } -- cgit v1.2.3 From 17aca1c987cff89dc4279371857035da902c8854 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Fri, 3 Jun 2011 01:13:45 -0400 Subject: Btrfs: fix uninit variable in the delayed inode code The nitems counter needs to start at zero Signed-off-by: Chris Mason --- fs/btrfs/delayed-inode.c | 1 + 1 file changed, 1 insertion(+) (limited to 'fs') diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c index b46d94d1dea8..c61c32cf0f71 100644 --- a/fs/btrfs/delayed-inode.c +++ b/fs/btrfs/delayed-inode.c @@ -678,6 +678,7 @@ static int btrfs_batch_insert_items(struct btrfs_trans_handle *trans, INIT_LIST_HEAD(&head); next = item; + nitems = 0; /* * count the number of the continuous items that we can insert in batch -- cgit v1.2.3 From 211f96c24f117fcc6e9e2431e40d92f4de22625e Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Fri, 3 Jun 2011 01:26:53 -0400 Subject: Btrfs: make sure we don't overflow the free space cache crc page The free space cache uses only one page for crcs right now, which means we can't have a cache file bigger than the crcs we can fit in the first page. This adds a check to enforce that restriction. Signed-off-by: Chris Mason --- fs/btrfs/free-space-cache.c | 27 +++++++++++++++++++-------- 1 file changed, 19 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index dd38d4c3a599..1cb72394498c 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -590,10 +590,25 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, num_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; + + /* Since the first page has all of our checksums and our generation we + * need to calculate the offset into the page that we can start writing + * our entries. + */ + first_page_offset = (sizeof(u32) * num_pages) + sizeof(u64); + filemap_write_and_wait(inode->i_mapping); btrfs_wait_ordered_range(inode, inode->i_size & ~(root->sectorsize - 1), (u64)-1); + /* make sure we don't overflow that first page */ + if (first_page_offset + sizeof(struct btrfs_free_space_entry) >= PAGE_CACHE_SIZE) { + /* this is really the same as running out of space, where we also return 0 */ + printk(KERN_CRIT "Btrfs: free space cache was too big for the crc page\n"); + ret = 0; + goto out_update; + } + /* We need a checksum per page. */ crc = checksums = kzalloc(sizeof(u32) * num_pages, GFP_NOFS); if (!crc) @@ -605,12 +620,6 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, return -1; } - /* Since the first page has all of our checksums and our generation we - * need to calculate the offset into the page that we can start writing - * our entries. - */ - first_page_offset = (sizeof(u32) * num_pages) + sizeof(u64); - /* Get the cluster for this block_group if it exists */ if (block_group && !list_empty(&block_group->cluster_list)) cluster = list_entry(block_group->cluster_list.next, @@ -872,12 +881,14 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, ret = 1; out_free: + kfree(checksums); + kfree(pages); + +out_update: if (ret != 1) { invalidate_inode_pages2_range(inode->i_mapping, 0, index); BTRFS_I(inode)->generation = 0; } - kfree(checksums); - kfree(pages); btrfs_update_inode(trans, root, inode); return ret; } -- cgit v1.2.3 From ca456ae280c0646e1e571c3b9a3834c55e90adfe Mon Sep 17 00:00:00 2001 From: liubo Date: Wed, 1 Jun 2011 09:42:49 +0000 Subject: Btrfs: don't save the inode cache in non-FS roots This adds extra checks to make sure the inode map we are caching really belongs to a FS root instead of a special relocation tree. It prevents crashes during balancing operations. Signed-off-by: Liu Bo Signed-off-by: Chris Mason --- fs/btrfs/inode-map.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'fs') diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c index 3262cd17a12f..04f7199facb4 100644 --- a/fs/btrfs/inode-map.c +++ b/fs/btrfs/inode-map.c @@ -388,6 +388,12 @@ int btrfs_save_ino_cache(struct btrfs_root *root, int prealloc; bool retry = false; + /* only fs tree and subvol/snap needs ino cache */ + if (root->root_key.objectid != BTRFS_FS_TREE_OBJECTID && + (root->root_key.objectid < BTRFS_FIRST_FREE_OBJECTID || + root->root_key.objectid > BTRFS_LAST_FREE_OBJECTID)) + return 0; + path = btrfs_alloc_path(); if (!path) return -ENOMEM; -- cgit v1.2.3 From 5f3f302a6f4cb74906c05fad1d03fc5e95c7e5af Mon Sep 17 00:00:00 2001 From: Arne Jansen Date: Mon, 30 May 2011 08:36:16 +0000 Subject: btrfs: false BUG_ON when degraded In degraded mode the struct btrfs_device of missing devs don't have device->name set. A kstrdup of NULL correctly returns NULL. Don't BUG in this case. Signed-off-by: Arne Jansen Signed-off-by: Chris Mason --- fs/btrfs/volumes.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index c48214ef5c09..da541dfca2e3 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -504,7 +504,7 @@ static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices) BUG_ON(!new_device); memcpy(new_device, device, sizeof(*new_device)); new_device->name = kstrdup(device->name, GFP_NOFS); - BUG_ON(!new_device->name); + BUG_ON(device->name && !new_device->name); new_device->bdev = NULL; new_device->writeable = 0; new_device->in_fs_metadata = 0; -- cgit v1.2.3 From d132a538d258f8f52fd0cd8b5017755f4e915386 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Tue, 31 May 2011 19:33:33 +0000 Subject: Btrfs: don't save the inode cache if we are deleting this root With xfstest 254 I can panic the box every time with the inode number caching stuff on. This is because we clean the inodes out when we delete the subvolume, but then we write out the inode cache which adds an inode to the subvolume inode tree, and then when it gets evicted again the root gets added back on the dead roots list and is deleted again, so we have a double free. To stop this from happening just return 0 if refs is 0 (and we're not the tree root since tree root always has refs of 0). With this fix 254 no longer panics. Thanks, Signed-off-by: Josef Bacik Tested-by: David Sterba Signed-off-by: Chris Mason --- fs/btrfs/inode-map.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'fs') diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c index 04f7199facb4..2d0d50067a7b 100644 --- a/fs/btrfs/inode-map.c +++ b/fs/btrfs/inode-map.c @@ -394,6 +394,11 @@ int btrfs_save_ino_cache(struct btrfs_root *root, root->root_key.objectid > BTRFS_LAST_FREE_OBJECTID)) return 0; + /* Don't save inode cache if we are deleting this root */ + if (btrfs_root_refs(&root->root_item) == 0 && + root != root->fs_info->tree_root) + return 0; + path = btrfs_alloc_path(); if (!path) return -ENOMEM; -- cgit v1.2.3 From a4689d2bd3b00dcf5c4320f06e0ab88810fbff9c Mon Sep 17 00:00:00 2001 From: David Sterba Date: Tue, 31 May 2011 17:08:14 +0000 Subject: btrfs: use btrfs_ino to access inode number commit 4cb5300bc ("Btrfs: add mount -o auto_defrag") accesses inode number directly while it should use the helper with the new inode number allocator. Signed-off-by: David Sterba Signed-off-by: Chris Mason --- fs/btrfs/file.c | 2 +- fs/btrfs/ioctl.c | 7 ++++--- 2 files changed, 5 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index e3a1b0c2394c..982b5ea9762f 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -144,7 +144,7 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans, if (!defrag) return -ENOMEM; - defrag->ino = inode->i_ino; + defrag->ino = btrfs_ino(inode); defrag->transid = transid; defrag->root = root->root_key.objectid; diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 74c80595d707..ac37040e426a 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -706,16 +706,17 @@ static int find_new_extents(struct btrfs_root *root, struct btrfs_file_extent_item *extent; int type; int ret; + u64 ino = btrfs_ino(inode); path = btrfs_alloc_path(); if (!path) return -ENOMEM; - min_key.objectid = inode->i_ino; + min_key.objectid = ino; min_key.type = BTRFS_EXTENT_DATA_KEY; min_key.offset = *off; - max_key.objectid = inode->i_ino; + max_key.objectid = ino; max_key.type = (u8)-1; max_key.offset = (u64)-1; @@ -726,7 +727,7 @@ static int find_new_extents(struct btrfs_root *root, path, 0, newer_than); if (ret != 0) goto none; - if (min_key.objectid != inode->i_ino) + if (min_key.objectid != ino) goto none; if (min_key.type != BTRFS_EXTENT_DATA_KEY) goto none; -- cgit v1.2.3 From e7786c3ae517b2c433edc91714e86be770e9f1ce Mon Sep 17 00:00:00 2001 From: Arne Jansen Date: Sat, 28 May 2011 20:58:38 +0000 Subject: btrfs: scrub: add explicit plugging With the removal of the implicit plugging scrub ends up doing more and smaller I/O than necessary. This patch adds explicit plugging per chunk. Signed-off-by: Arne Jansen Signed-off-by: Chris Mason --- fs/btrfs/scrub.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 2d1f8909a8e1..1204eab94028 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -348,9 +348,6 @@ static int scrub_fixup_io(int rw, struct block_device *bdev, sector_t sector, int ret; DECLARE_COMPLETION_ONSTACK(complete); - /* we are going to wait on this IO */ - rw |= REQ_SYNC; - bio = bio_alloc(GFP_NOFS, 1); bio->bi_bdev = bdev; bio->bi_sector = sector; @@ -359,6 +356,7 @@ static int scrub_fixup_io(int rw, struct block_device *bdev, sector_t sector, bio->bi_private = &complete; submit_bio(rw, bio); + /* this will also unplug the queue */ wait_for_completion(&complete); ret = !test_bit(BIO_UPTODATE, &bio->bi_flags); @@ -743,6 +741,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_dev *sdev, struct btrfs_root *root = fs_info->extent_root; struct btrfs_root *csum_root = fs_info->csum_root; struct btrfs_extent_item *extent; + struct blk_plug plug; u64 flags; int ret; int slot; @@ -847,6 +846,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_dev *sdev, * the scrub. This might currently (crc32) end up to be about 1MB */ start_stripe = 0; + blk_start_plug(&plug); again: logical = base + offset + start_stripe * increment; for (i = start_stripe; i < nstripes; ++i) { @@ -988,6 +988,7 @@ next: scrub_submit(sdev); out: + blk_finish_plug(&plug); btrfs_free_path(path); return ret < 0 ? ret : 0; } -- cgit v1.2.3 From 4b9465cb9e3859186eefa1ca3b990a5849386320 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Fri, 3 Jun 2011 09:36:29 -0400 Subject: Btrfs: add mount -o inode_cache This makes the inode map cache default to off until we fix the overflow problem when the free space crcs don't fit inside a single page. Signed-off-by: Chris Mason --- fs/btrfs/ctree.h | 1 + fs/btrfs/free-space-cache.c | 6 ++++++ fs/btrfs/inode-map.c | 20 ++++++++++++++++++++ fs/btrfs/super.c | 8 +++++++- 4 files changed, 34 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 8f98c2005715..4958ef5417d6 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -1340,6 +1340,7 @@ struct btrfs_ioctl_defrag_range_args { #define BTRFS_MOUNT_USER_SUBVOL_RM_ALLOWED (1 << 14) #define BTRFS_MOUNT_ENOSPC_DEBUG (1 << 15) #define BTRFS_MOUNT_AUTO_DEFRAG (1 << 16) +#define BTRFS_MOUNT_INODE_MAP_CACHE (1 << 17) #define btrfs_clear_opt(o, opt) ((o) &= ~BTRFS_MOUNT_##opt) #define btrfs_set_opt(o, opt) ((o) |= BTRFS_MOUNT_##opt) diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 1cb72394498c..bffa5c4a633b 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -2536,6 +2536,9 @@ int load_free_ino_cache(struct btrfs_fs_info *fs_info, struct btrfs_root *root) int ret = 0; u64 root_gen = btrfs_root_generation(&root->root_item); + if (!btrfs_test_opt(root, INODE_MAP_CACHE)) + return 0; + /* * If we're unmounting then just return, since this does a search on the * normal root and not the commit root and we could deadlock. @@ -2575,6 +2578,9 @@ int btrfs_write_out_ino_cache(struct btrfs_root *root, struct inode *inode; int ret; + if (!btrfs_test_opt(root, INODE_MAP_CACHE)) + return 0; + inode = lookup_free_ino_inode(root, path); if (IS_ERR(inode)) return 0; diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c index 2d0d50067a7b..cb79b8975c9f 100644 --- a/fs/btrfs/inode-map.c +++ b/fs/btrfs/inode-map.c @@ -38,6 +38,9 @@ static int caching_kthread(void *data) int slot; int ret; + if (!btrfs_test_opt(root, INODE_MAP_CACHE)) + return 0; + path = btrfs_alloc_path(); if (!path) return -ENOMEM; @@ -141,6 +144,9 @@ static void start_caching(struct btrfs_root *root) int ret; u64 objectid; + if (!btrfs_test_opt(root, INODE_MAP_CACHE)) + return; + spin_lock(&root->cache_lock); if (root->cached != BTRFS_CACHE_NO) { spin_unlock(&root->cache_lock); @@ -178,6 +184,9 @@ static void start_caching(struct btrfs_root *root) int btrfs_find_free_ino(struct btrfs_root *root, u64 *objectid) { + if (!btrfs_test_opt(root, INODE_MAP_CACHE)) + return btrfs_find_free_objectid(root, objectid); + again: *objectid = btrfs_find_ino_for_alloc(root); @@ -201,6 +210,10 @@ void btrfs_return_ino(struct btrfs_root *root, u64 objectid) { struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; struct btrfs_free_space_ctl *pinned = root->free_ino_pinned; + + if (!btrfs_test_opt(root, INODE_MAP_CACHE)) + return; + again: if (root->cached == BTRFS_CACHE_FINISHED) { __btrfs_add_free_space(ctl, objectid, 1); @@ -250,6 +263,9 @@ void btrfs_unpin_free_ino(struct btrfs_root *root) struct rb_node *n; u64 count; + if (!btrfs_test_opt(root, INODE_MAP_CACHE)) + return; + while (1) { n = rb_first(rbroot); if (!n) @@ -399,9 +415,13 @@ int btrfs_save_ino_cache(struct btrfs_root *root, root != root->fs_info->tree_root) return 0; + if (!btrfs_test_opt(root, INODE_MAP_CACHE)) + return 0; + path = btrfs_alloc_path(); if (!path) return -ENOMEM; + again: inode = lookup_free_ino_inode(root, path); if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) { diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 28e3cb2607ff..3559d0b3518a 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -160,7 +160,8 @@ enum { Opt_compress_type, Opt_compress_force, Opt_compress_force_type, Opt_notreelog, Opt_ratio, Opt_flushoncommit, Opt_discard, Opt_space_cache, Opt_clear_cache, Opt_user_subvol_rm_allowed, - Opt_enospc_debug, Opt_subvolrootid, Opt_defrag, Opt_err, + Opt_enospc_debug, Opt_subvolrootid, Opt_defrag, + Opt_inode_cache, Opt_err, }; static match_table_t tokens = { @@ -192,6 +193,7 @@ static match_table_t tokens = { {Opt_enospc_debug, "enospc_debug"}, {Opt_subvolrootid, "subvolrootid=%d"}, {Opt_defrag, "autodefrag"}, + {Opt_inode_cache, "inode_cache"}, {Opt_err, NULL}, }; @@ -360,6 +362,10 @@ int btrfs_parse_options(struct btrfs_root *root, char *options) printk(KERN_INFO "btrfs: enabling disk space caching\n"); btrfs_set_opt(info->mount_opt, SPACE_CACHE); break; + case Opt_inode_cache: + printk(KERN_INFO "btrfs: enabling inode map caching\n"); + btrfs_set_opt(info->mount_opt, INODE_MAP_CACHE); + break; case Opt_clear_cache: printk(KERN_INFO "btrfs: force clearing of disk cache\n"); btrfs_set_opt(info->mount_opt, CLEAR_CACHE); -- cgit v1.2.3 From 7841cb2898f66a73062c64d0ef5733dde7279e46 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Tue, 31 May 2011 18:07:27 +0200 Subject: btrfs: add helper for fs_info->closing wrap checking of filesystem 'closing' flag and fix a few missing memory barriers. Signed-off-by: David Sterba --- fs/btrfs/ctree.h | 9 +++++++++ fs/btrfs/extent-tree.c | 3 +-- fs/btrfs/file.c | 4 ++-- fs/btrfs/free-space-cache.c | 10 ++++------ fs/btrfs/inode-map.c | 3 +-- fs/btrfs/inode.c | 3 +-- fs/btrfs/scrub.c | 2 +- fs/btrfs/transaction.c | 2 +- 8 files changed, 20 insertions(+), 16 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 4958ef5417d6..8490ee063709 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2354,6 +2354,15 @@ int btrfs_drop_subtree(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *node, struct extent_buffer *parent); +static inline int btrfs_fs_closing(struct btrfs_fs_info *fs_info) +{ + /* + * Get synced with close_ctree() + */ + smp_mb(); + return fs_info->closing; +} + /* root-item.c */ int btrfs_find_root_ref(struct btrfs_root *tree_root, struct btrfs_path *path, diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index c9173a7827b0..5b9b6b6df242 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -366,8 +366,7 @@ again: nritems = btrfs_header_nritems(leaf); while (1) { - smp_mb(); - if (fs_info->closing > 1) { + if (btrfs_fs_closing(fs_info) > 1) { last = (u64)-1; break; } diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 982b5ea9762f..fa4ef18b66b1 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -129,7 +129,7 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans, if (!btrfs_test_opt(root, AUTO_DEFRAG)) return 0; - if (root->fs_info->closing) + if (btrfs_fs_closing(root->fs_info)) return 0; if (BTRFS_I(inode)->in_defrag) @@ -229,7 +229,7 @@ int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info) first_ino = defrag->ino + 1; rb_erase(&defrag->rb_node, &fs_info->defrag_inodes); - if (fs_info->closing) + if (btrfs_fs_closing(fs_info)) goto next_free; spin_unlock(&fs_info->defrag_inodes_lock); diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index bffa5c4a633b..ad144736a5fd 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -98,7 +98,7 @@ struct inode *lookup_free_space_inode(struct btrfs_root *root, return inode; spin_lock(&block_group->lock); - if (!root->fs_info->closing) { + if (!btrfs_fs_closing(root->fs_info)) { block_group->inode = igrab(inode); block_group->iref = 1; } @@ -493,8 +493,7 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info, * If we're unmounting then just return, since this does a search on the * normal root and not the commit root and we could deadlock. */ - smp_mb(); - if (fs_info->closing) + if (btrfs_fs_closing(fs_info)) return 0; /* @@ -2513,7 +2512,7 @@ struct inode *lookup_free_ino_inode(struct btrfs_root *root, return inode; spin_lock(&root->cache_lock); - if (!root->fs_info->closing) + if (!btrfs_fs_closing(root->fs_info)) root->cache_inode = igrab(inode); spin_unlock(&root->cache_lock); @@ -2543,8 +2542,7 @@ int load_free_ino_cache(struct btrfs_fs_info *fs_info, struct btrfs_root *root) * If we're unmounting then just return, since this does a search on the * normal root and not the commit root and we could deadlock. */ - smp_mb(); - if (fs_info->closing) + if (btrfs_fs_closing(fs_info)) return 0; path = btrfs_alloc_path(); diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c index cb79b8975c9f..b4087e0fa871 100644 --- a/fs/btrfs/inode-map.c +++ b/fs/btrfs/inode-map.c @@ -62,8 +62,7 @@ again: goto out; while (1) { - smp_mb(); - if (fs_info->closing) + if (btrfs_fs_closing(fs_info)) goto out; leaf = path->nodes[0]; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index a83e44bf3206..02ff4a1b968b 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -4266,8 +4266,7 @@ int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc) if (BTRFS_I(inode)->dummy_inode) return 0; - smp_mb(); - if (root->fs_info->closing && is_free_space_inode(root, inode)) + if (btrfs_fs_closing(root->fs_info) && is_free_space_inode(root, inode)) nolock = true; if (wbc->sync_mode == WB_SYNC_ALL) { diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 1204eab94028..df50fd1eca8f 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -1183,7 +1183,7 @@ int btrfs_scrub_dev(struct btrfs_root *root, u64 devid, u64 start, u64 end, int ret; struct btrfs_device *dev; - if (root->fs_info->closing) + if (btrfs_fs_closing(root->fs_info)) return -EINVAL; /* diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 2d5c6d2aa4e4..dd719662340e 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -817,7 +817,7 @@ int btrfs_defrag_root(struct btrfs_root *root, int cacheonly) btrfs_btree_balance_dirty(info->tree_root, nr); cond_resched(); - if (root->fs_info->closing || ret != -EAGAIN) + if (btrfs_fs_closing(root->fs_info) || ret != -EAGAIN) break; } root->defrag_running = 0; -- cgit v1.2.3 From aa0467d8d2a00e75b2bb6a56a4ee6d70c5d1928f Mon Sep 17 00:00:00 2001 From: David Sterba Date: Fri, 3 Jun 2011 16:29:08 +0200 Subject: btrfs: fix uninitialized variable warning With Linus' tree, today's linux-next build (powercp ppc64_defconfig) produced this warning: fs/btrfs/delayed-inode.c: In function 'btrfs_delayed_update_inode': fs/btrfs/delayed-inode.c:1598:6: warning: 'ret' may be used uninitialized in this function Introduced by commit 16cdcec736cd ("btrfs: implement delayed inode items operation"). This fixes a bug in btrfs_update_inode(): if the returned value from btrfs_delayed_update_inode is a nonzero garbage, inode stat data are not updated and several call paths may hit a BUG_ON or fail with strange code. Reported-by: Stephen Rothwell Signed-off-by: David Sterba --- fs/btrfs/delayed-inode.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c index c61c32cf0f71..6462c29d2d37 100644 --- a/fs/btrfs/delayed-inode.c +++ b/fs/btrfs/delayed-inode.c @@ -1595,7 +1595,7 @@ int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct inode *inode) { struct btrfs_delayed_node *delayed_node; - int ret; + int ret = 0; delayed_node = btrfs_get_or_create_delayed_node(inode); if (IS_ERR(delayed_node)) -- cgit v1.2.3 From a8f072c1d624a627b67f2ace2f0c25d856ef4e54 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 2 Jun 2011 11:13:59 +0200 Subject: job control: rename signal->group_stop and flags to jobctl and update them signal->group_stop currently hosts mostly group stop related flags; however, it's gonna be used for wider purposes and the GROUP_STOP_ flag prefix becomes confusing. Rename signal->group_stop to signal->jobctl and rename all GROUP_STOP_* flags to JOBCTL_*. Bit position macros JOBCTL_*_BIT are defined and JOBCTL_* flags are defined in terms of them to allow using bitops later. While at it, reassign JOBCTL_TRAPPING to bit 22 to better accomodate future additions. This doesn't cause any functional change. -v2: JOBCTL_*_BIT macros added as suggested by Linus. Signed-off-by: Tejun Heo Cc: Linus Torvalds Signed-off-by: Oleg Nesterov --- fs/exec.c | 2 +- include/linux/sched.h | 22 ++++++++----- kernel/ptrace.c | 12 +++---- kernel/signal.c | 91 ++++++++++++++++++++++++++------------------------- 4 files changed, 67 insertions(+), 60 deletions(-) (limited to 'fs') diff --git a/fs/exec.c b/fs/exec.c index ea5f748906a8..8986bb0f9dc2 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -1772,7 +1772,7 @@ static int zap_process(struct task_struct *start, int exit_code) t = start; do { - task_clear_group_stop_pending(t); + task_clear_jobctl_stop_pending(t); if (t != current && t->mm) { sigaddset(&t->pending.signal, SIGKILL); signal_wake_up(t, 1); diff --git a/include/linux/sched.h b/include/linux/sched.h index 2a8621c4be1e..b0dd064eb4fc 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1282,7 +1282,7 @@ struct task_struct { int exit_state; int exit_code, exit_signal; int pdeath_signal; /* The signal sent when the parent dies */ - unsigned int group_stop; /* GROUP_STOP_*, siglock protected */ + unsigned int jobctl; /* JOBCTL_*, siglock protected */ /* ??? */ unsigned int personality; unsigned did_exec:1; @@ -1803,15 +1803,21 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t * #define used_math() tsk_used_math(current) /* - * task->group_stop flags + * task->jobctl flags */ -#define GROUP_STOP_SIGMASK 0xffff /* signr of the last group stop */ -#define GROUP_STOP_PENDING (1 << 16) /* task should stop for group stop */ -#define GROUP_STOP_CONSUME (1 << 17) /* consume group stop count */ -#define GROUP_STOP_TRAPPING (1 << 18) /* switching from STOPPED to TRACED */ -#define GROUP_STOP_DEQUEUED (1 << 19) /* stop signal dequeued */ +#define JOBCTL_STOP_SIGMASK 0xffff /* signr of the last group stop */ -extern void task_clear_group_stop_pending(struct task_struct *task); +#define JOBCTL_STOP_DEQUEUED_BIT 16 /* stop signal dequeued */ +#define JOBCTL_STOP_PENDING_BIT 17 /* task should stop for group stop */ +#define JOBCTL_STOP_CONSUME_BIT 18 /* consume group stop count */ +#define JOBCTL_TRAPPING_BIT 21 /* switching to TRACED */ + +#define JOBCTL_STOP_DEQUEUED (1 << JOBCTL_STOP_DEQUEUED_BIT) +#define JOBCTL_STOP_PENDING (1 << JOBCTL_STOP_PENDING_BIT) +#define JOBCTL_STOP_CONSUME (1 << JOBCTL_STOP_CONSUME_BIT) +#define JOBCTL_TRAPPING (1 << JOBCTL_TRAPPING_BIT) + +extern void task_clear_jobctl_stop_pending(struct task_struct *task); #ifdef CONFIG_PREEMPT_RCU diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 4f689cb739a3..134f34cb142b 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c @@ -77,13 +77,13 @@ void __ptrace_unlink(struct task_struct *child) spin_lock(&child->sighand->siglock); /* - * Reinstate GROUP_STOP_PENDING if group stop is in effect and + * Reinstate JOBCTL_STOP_PENDING if group stop is in effect and * @child isn't dead. */ if (!(child->flags & PF_EXITING) && (child->signal->flags & SIGNAL_STOP_STOPPED || child->signal->group_stop_count)) - child->group_stop |= GROUP_STOP_PENDING; + child->jobctl |= JOBCTL_STOP_PENDING; /* * If transition to TASK_STOPPED is pending or in TASK_TRACED, kick @@ -91,7 +91,7 @@ void __ptrace_unlink(struct task_struct *child) * is in TASK_TRACED; otherwise, we might unduly disrupt * TASK_KILLABLE sleeps. */ - if (child->group_stop & GROUP_STOP_PENDING || task_is_traced(child)) + if (child->jobctl & JOBCTL_STOP_PENDING || task_is_traced(child)) signal_wake_up(child, task_is_traced(child)); spin_unlock(&child->sighand->siglock); @@ -226,7 +226,7 @@ static int ptrace_attach(struct task_struct *task) spin_lock(&task->sighand->siglock); /* - * If the task is already STOPPED, set GROUP_STOP_PENDING and + * If the task is already STOPPED, set JOBCTL_STOP_PENDING and * TRAPPING, and kick it so that it transits to TRACED. TRAPPING * will be cleared if the child completes the transition or any * event which clears the group stop states happens. We'll wait @@ -243,7 +243,7 @@ static int ptrace_attach(struct task_struct *task) * in and out of STOPPED are protected by siglock. */ if (task_is_stopped(task)) { - task->group_stop |= GROUP_STOP_PENDING | GROUP_STOP_TRAPPING; + task->jobctl |= JOBCTL_STOP_PENDING | JOBCTL_TRAPPING; signal_wake_up(task, 1); } @@ -257,7 +257,7 @@ unlock_creds: out: if (!retval) wait_event(current->signal->wait_chldexit, - !(task->group_stop & GROUP_STOP_TRAPPING)); + !(task->jobctl & JOBCTL_TRAPPING)); return retval; } diff --git a/kernel/signal.c b/kernel/signal.c index 86c32b884f8e..ab6851c06461 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -124,7 +124,7 @@ static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked) static int recalc_sigpending_tsk(struct task_struct *t) { - if ((t->group_stop & GROUP_STOP_PENDING) || + if ((t->jobctl & JOBCTL_STOP_PENDING) || PENDING(&t->pending, &t->blocked) || PENDING(&t->signal->shared_pending, &t->blocked)) { set_tsk_thread_flag(t, TIF_SIGPENDING); @@ -224,27 +224,28 @@ static inline void print_dropped_signal(int sig) } /** - * task_clear_group_stop_trapping - clear group stop trapping bit + * task_clear_jobctl_trapping - clear jobctl trapping bit * @task: target task * - * If GROUP_STOP_TRAPPING is set, a ptracer is waiting for us. Clear it - * and wake up the ptracer. Note that we don't need any further locking. - * @task->siglock guarantees that @task->parent points to the ptracer. + * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED. + * Clear it and wake up the ptracer. Note that we don't need any further + * locking. @task->siglock guarantees that @task->parent points to the + * ptracer. * * CONTEXT: * Must be called with @task->sighand->siglock held. */ -static void task_clear_group_stop_trapping(struct task_struct *task) +static void task_clear_jobctl_trapping(struct task_struct *task) { - if (unlikely(task->group_stop & GROUP_STOP_TRAPPING)) { - task->group_stop &= ~GROUP_STOP_TRAPPING; + if (unlikely(task->jobctl & JOBCTL_TRAPPING)) { + task->jobctl &= ~JOBCTL_TRAPPING; __wake_up_sync_key(&task->parent->signal->wait_chldexit, TASK_UNINTERRUPTIBLE, 1, task); } } /** - * task_clear_group_stop_pending - clear pending group stop + * task_clear_jobctl_stop_pending - clear pending group stop * @task: target task * * Clear group stop states for @task. @@ -252,19 +253,19 @@ static void task_clear_group_stop_trapping(struct task_struct *task) * CONTEXT: * Must be called with @task->sighand->siglock held. */ -void task_clear_group_stop_pending(struct task_struct *task) +void task_clear_jobctl_stop_pending(struct task_struct *task) { - task->group_stop &= ~(GROUP_STOP_PENDING | GROUP_STOP_CONSUME | - GROUP_STOP_DEQUEUED); + task->jobctl &= ~(JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME | + JOBCTL_STOP_DEQUEUED); } /** * task_participate_group_stop - participate in a group stop * @task: task participating in a group stop * - * @task has GROUP_STOP_PENDING set and is participating in a group stop. + * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop. * Group stop states are cleared and the group stop count is consumed if - * %GROUP_STOP_CONSUME was set. If the consumption completes the group + * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group * stop, the appropriate %SIGNAL_* flags are set. * * CONTEXT: @@ -277,11 +278,11 @@ void task_clear_group_stop_pending(struct task_struct *task) static bool task_participate_group_stop(struct task_struct *task) { struct signal_struct *sig = task->signal; - bool consume = task->group_stop & GROUP_STOP_CONSUME; + bool consume = task->jobctl & JOBCTL_STOP_CONSUME; - WARN_ON_ONCE(!(task->group_stop & GROUP_STOP_PENDING)); + WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING)); - task_clear_group_stop_pending(task); + task_clear_jobctl_stop_pending(task); if (!consume) return false; @@ -604,7 +605,7 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) * is to alert stop-signal processing code when another * processor has come along and cleared the flag. */ - current->group_stop |= GROUP_STOP_DEQUEUED; + current->jobctl |= JOBCTL_STOP_DEQUEUED; } if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) { /* @@ -809,7 +810,7 @@ static int prepare_signal(int sig, struct task_struct *p, int from_ancestor_ns) rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending); t = p; do { - task_clear_group_stop_pending(t); + task_clear_jobctl_stop_pending(t); rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending); wake_up_state(t, __TASK_STOPPED); } while_each_thread(p, t); @@ -925,7 +926,7 @@ static void complete_signal(int sig, struct task_struct *p, int group) signal->group_stop_count = 0; t = p; do { - task_clear_group_stop_pending(t); + task_clear_jobctl_stop_pending(t); sigaddset(&t->pending.signal, SIGKILL); signal_wake_up(t, 1); } while_each_thread(p, t); @@ -1160,7 +1161,7 @@ int zap_other_threads(struct task_struct *p) p->signal->group_stop_count = 0; while_each_thread(p, t) { - task_clear_group_stop_pending(t); + task_clear_jobctl_stop_pending(t); count++; /* Don't bother with already dead threads */ @@ -1738,7 +1739,7 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info) * clear now. We act as if SIGCONT is received after TASK_TRACED * is entered - ignore it. */ - if (why == CLD_STOPPED && (current->group_stop & GROUP_STOP_PENDING)) + if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING)) gstop_done = task_participate_group_stop(current); current->last_siginfo = info; @@ -1751,12 +1752,12 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info) set_current_state(TASK_TRACED); /* - * We're committing to trapping. Clearing GROUP_STOP_TRAPPING and + * We're committing to trapping. Clearing JOBCTL_TRAPPING and * transition to TASK_TRACED should be atomic with respect to - * siglock. This hsould be done after the arch hook as siglock is + * siglock. This should be done after the arch hook as siglock is * released and regrabbed across it. */ - task_clear_group_stop_trapping(current); + task_clear_jobctl_trapping(current); spin_unlock_irq(¤t->sighand->siglock); read_lock(&tasklist_lock); @@ -1792,9 +1793,9 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info) * * If @gstop_done, the ptracer went away between group stop * completion and here. During detach, it would have set - * GROUP_STOP_PENDING on us and we'll re-enter TASK_STOPPED - * in do_signal_stop() on return, so notifying the real - * parent of the group stop completion is enough. + * JOBCTL_STOP_PENDING on us and we'll re-enter + * TASK_STOPPED in do_signal_stop() on return, so notifying + * the real parent of the group stop completion is enough. */ if (gstop_done) do_notify_parent_cldstop(current, false, why); @@ -1856,14 +1857,14 @@ static int do_signal_stop(int signr) { struct signal_struct *sig = current->signal; - if (!(current->group_stop & GROUP_STOP_PENDING)) { - unsigned int gstop = GROUP_STOP_PENDING | GROUP_STOP_CONSUME; + if (!(current->jobctl & JOBCTL_STOP_PENDING)) { + unsigned int gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME; struct task_struct *t; - /* signr will be recorded in task->group_stop for retries */ - WARN_ON_ONCE(signr & ~GROUP_STOP_SIGMASK); + /* signr will be recorded in task->jobctl for retries */ + WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK); - if (!likely(current->group_stop & GROUP_STOP_DEQUEUED) || + if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) || unlikely(signal_group_exit(sig))) return 0; /* @@ -1890,19 +1891,19 @@ static int do_signal_stop(int signr) else WARN_ON_ONCE(!task_ptrace(current)); - current->group_stop &= ~GROUP_STOP_SIGMASK; - current->group_stop |= signr | gstop; + current->jobctl &= ~JOBCTL_STOP_SIGMASK; + current->jobctl |= signr | gstop; sig->group_stop_count = 1; for (t = next_thread(current); t != current; t = next_thread(t)) { - t->group_stop &= ~GROUP_STOP_SIGMASK; + t->jobctl &= ~JOBCTL_STOP_SIGMASK; /* * Setting state to TASK_STOPPED for a group * stop is always done with the siglock held, * so this check has no races. */ if (!(t->flags & PF_EXITING) && !task_is_stopped(t)) { - t->group_stop |= signr | gstop; + t->jobctl |= signr | gstop; sig->group_stop_count++; signal_wake_up(t, 0); } @@ -1943,23 +1944,23 @@ retry: spin_lock_irq(¤t->sighand->siglock); } else { - ptrace_stop(current->group_stop & GROUP_STOP_SIGMASK, + ptrace_stop(current->jobctl & JOBCTL_STOP_SIGMASK, CLD_STOPPED, 0, NULL); current->exit_code = 0; } /* - * GROUP_STOP_PENDING could be set if another group stop has + * JOBCTL_STOP_PENDING could be set if another group stop has * started since being woken up or ptrace wants us to transit * between TASK_STOPPED and TRACED. Retry group stop. */ - if (current->group_stop & GROUP_STOP_PENDING) { - WARN_ON_ONCE(!(current->group_stop & GROUP_STOP_SIGMASK)); + if (current->jobctl & JOBCTL_STOP_PENDING) { + WARN_ON_ONCE(!(current->jobctl & JOBCTL_STOP_SIGMASK)); goto retry; } /* PTRACE_ATTACH might have raced with task killing, clear trapping */ - task_clear_group_stop_trapping(current); + task_clear_jobctl_trapping(current); spin_unlock_irq(¤t->sighand->siglock); @@ -2078,8 +2079,8 @@ relock: if (unlikely(signr != 0)) ka = return_ka; else { - if (unlikely(current->group_stop & - GROUP_STOP_PENDING) && do_signal_stop(0)) + if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) && + do_signal_stop(0)) goto relock; signr = dequeue_signal(current, ¤t->blocked, @@ -2253,7 +2254,7 @@ void exit_signals(struct task_struct *tsk) signotset(&unblocked); retarget_shared_pending(tsk, &unblocked); - if (unlikely(tsk->group_stop & GROUP_STOP_PENDING) && + if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) && task_participate_group_stop(tsk)) group_stop = CLD_STOPPED; out: -- cgit v1.2.3 From 3759a0d94c18764247b66511d1038f2b93aa95de Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 2 Jun 2011 11:14:00 +0200 Subject: job control: introduce JOBCTL_PENDING_MASK and task_clear_jobctl_pending() This patch introduces JOBCTL_PENDING_MASK and replaces task_clear_jobctl_stop_pending() with task_clear_jobctl_pending() which takes an extra @mask argument. JOBCTL_PENDING_MASK is currently equal to JOBCTL_STOP_PENDING but future patches will add more bits. recalc_sigpending_tsk() is updated to use JOBCTL_PENDING_MASK instead. task_clear_jobctl_pending() takes @mask which in subset of JOBCTL_PENDING_MASK and clears the relevant jobctl bits. If JOBCTL_STOP_PENDING is set, other STOP bits are cleared together. All task_clear_jobctl_stop_pending() users are updated to call task_clear_jobctl_pending() with JOBCTL_STOP_PENDING which is functionally identical to task_clear_jobctl_stop_pending(). This patch doesn't cause any functional change. Signed-off-by: Tejun Heo Signed-off-by: Oleg Nesterov --- fs/exec.c | 2 +- include/linux/sched.h | 5 ++++- kernel/signal.c | 27 +++++++++++++++++---------- 3 files changed, 22 insertions(+), 12 deletions(-) (limited to 'fs') diff --git a/fs/exec.c b/fs/exec.c index 8986bb0f9dc2..4402105287cb 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -1772,7 +1772,7 @@ static int zap_process(struct task_struct *start, int exit_code) t = start; do { - task_clear_jobctl_stop_pending(t); + task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING); if (t != current && t->mm) { sigaddset(&t->pending.signal, SIGKILL); signal_wake_up(t, 1); diff --git a/include/linux/sched.h b/include/linux/sched.h index b0dd064eb4fc..5a958b17f9fe 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1817,7 +1817,10 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t * #define JOBCTL_STOP_CONSUME (1 << JOBCTL_STOP_CONSUME_BIT) #define JOBCTL_TRAPPING (1 << JOBCTL_TRAPPING_BIT) -extern void task_clear_jobctl_stop_pending(struct task_struct *task); +#define JOBCTL_PENDING_MASK JOBCTL_STOP_PENDING + +extern void task_clear_jobctl_pending(struct task_struct *task, + unsigned int mask); #ifdef CONFIG_PREEMPT_RCU diff --git a/kernel/signal.c b/kernel/signal.c index 62a6c3bb9f0d..288d952fa3b8 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -124,7 +124,7 @@ static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked) static int recalc_sigpending_tsk(struct task_struct *t) { - if ((t->jobctl & JOBCTL_STOP_PENDING) || + if ((t->jobctl & JOBCTL_PENDING_MASK) || PENDING(&t->pending, &t->blocked) || PENDING(&t->signal->shared_pending, &t->blocked)) { set_tsk_thread_flag(t, TIF_SIGPENDING); @@ -245,18 +245,25 @@ static void task_clear_jobctl_trapping(struct task_struct *task) } /** - * task_clear_jobctl_stop_pending - clear pending group stop + * task_clear_jobctl_pending - clear jobctl pending bits * @task: target task + * @mask: pending bits to clear * - * Clear group stop states for @task. + * Clear @mask from @task->jobctl. @mask must be subset of + * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other + * STOP bits are cleared together. * * CONTEXT: * Must be called with @task->sighand->siglock held. */ -void task_clear_jobctl_stop_pending(struct task_struct *task) +void task_clear_jobctl_pending(struct task_struct *task, unsigned int mask) { - task->jobctl &= ~(JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME | - JOBCTL_STOP_DEQUEUED); + BUG_ON(mask & ~JOBCTL_PENDING_MASK); + + if (mask & JOBCTL_STOP_PENDING) + mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED; + + task->jobctl &= ~mask; } /** @@ -282,7 +289,7 @@ static bool task_participate_group_stop(struct task_struct *task) WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING)); - task_clear_jobctl_stop_pending(task); + task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING); if (!consume) return false; @@ -810,7 +817,7 @@ static int prepare_signal(int sig, struct task_struct *p, int from_ancestor_ns) rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending); t = p; do { - task_clear_jobctl_stop_pending(t); + task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING); rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending); wake_up_state(t, __TASK_STOPPED); } while_each_thread(p, t); @@ -926,7 +933,7 @@ static void complete_signal(int sig, struct task_struct *p, int group) signal->group_stop_count = 0; t = p; do { - task_clear_jobctl_stop_pending(t); + task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING); sigaddset(&t->pending.signal, SIGKILL); signal_wake_up(t, 1); } while_each_thread(p, t); @@ -1161,7 +1168,7 @@ int zap_other_threads(struct task_struct *p) p->signal->group_stop_count = 0; while_each_thread(p, t) { - task_clear_jobctl_stop_pending(t); + task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING); count++; /* Don't bother with already dead threads */ -- cgit v1.2.3 From 6dfca32984237a8a011b5bf367e53341a265b2a4 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 2 Jun 2011 11:14:00 +0200 Subject: job control: make task_clear_jobctl_pending() clear TRAPPING automatically JOBCTL_TRAPPING indicates that ptracer is waiting for tracee to (re)transit into TRACED. task_clear_jobctl_pending() must be called when either tracee enters TRACED or the transition is cancelled for some reason. The former is achieved by explicitly calling task_clear_jobctl_pending() in ptrace_stop() and the latter by calling it at the end of do_signal_stop(). Calling task_clear_jobctl_trapping() at the end of do_signal_stop() limits the scope TRAPPING can be used and is fragile in that seemingly unrelated changes to tracee's control flow can lead to stuck TRAPPING. We already have task_clear_jobctl_pending() calls on those cancelling events to clear JOBCTL_STOP_PENDING. Cancellations can be handled by making those call sites use JOBCTL_PENDING_MASK instead and updating task_clear_jobctl_pending() such that task_clear_jobctl_trapping() is called automatically if no stop/trap is pending. This patch makes the above changes and removes the fallback task_clear_jobctl_trapping() call from do_signal_stop(). Signed-off-by: Tejun Heo Signed-off-by: Oleg Nesterov --- fs/exec.c | 2 +- kernel/signal.c | 13 ++++++++----- 2 files changed, 9 insertions(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/exec.c b/fs/exec.c index 4402105287cb..a9f2b3631bdb 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -1772,7 +1772,7 @@ static int zap_process(struct task_struct *start, int exit_code) t = start; do { - task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING); + task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK); if (t != current && t->mm) { sigaddset(&t->pending.signal, SIGKILL); signal_wake_up(t, 1); diff --git a/kernel/signal.c b/kernel/signal.c index 288d952fa3b8..637a171b65b6 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -253,6 +253,9 @@ static void task_clear_jobctl_trapping(struct task_struct *task) * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other * STOP bits are cleared together. * + * If clearing of @mask leaves no stop or trap pending, this function calls + * task_clear_jobctl_trapping(). + * * CONTEXT: * Must be called with @task->sighand->siglock held. */ @@ -264,6 +267,9 @@ void task_clear_jobctl_pending(struct task_struct *task, unsigned int mask) mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED; task->jobctl &= ~mask; + + if (!(task->jobctl & JOBCTL_PENDING_MASK)) + task_clear_jobctl_trapping(task); } /** @@ -933,7 +939,7 @@ static void complete_signal(int sig, struct task_struct *p, int group) signal->group_stop_count = 0; t = p; do { - task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING); + task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK); sigaddset(&t->pending.signal, SIGKILL); signal_wake_up(t, 1); } while_each_thread(p, t); @@ -1168,7 +1174,7 @@ int zap_other_threads(struct task_struct *p) p->signal->group_stop_count = 0; while_each_thread(p, t) { - task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING); + task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK); count++; /* Don't bother with already dead threads */ @@ -1964,9 +1970,6 @@ retry: goto retry; } - /* PTRACE_ATTACH might have raced with task killing, clear trapping */ - task_clear_jobctl_trapping(current); - spin_unlock_irq(¤t->sighand->siglock); tracehook_finish_jctl(); -- cgit v1.2.3 From 5def1360252b974faeb438775c19c14338bc1903 Mon Sep 17 00:00:00 2001 From: Yongqiang Yang Date: Sun, 5 Jun 2011 23:26:40 -0400 Subject: ext4: correct comments for ext4_free_blocks() metadata is not parameter of ext4_free_blocks() any more. Signed-off-by: Yongqiang Yang Signed-off-by: "Theodore Ts'o" --- fs/ext4/mballoc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index 859f2ae8864e..eb71dd59f2e4 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -4448,7 +4448,7 @@ ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b, * @inode: inode * @block: start physical block to free * @count: number of blocks to count - * @metadata: Are these metadata blocks + * @flags: flags used by ext4_free_blocks */ void ext4_free_blocks(handle_t *handle, struct inode *inode, struct buffer_head *bh, ext4_fsblk_t block, -- cgit v1.2.3 From f17722f917b2f21497deb6edc62fb1683daa08e6 Mon Sep 17 00:00:00 2001 From: Lukas Czerner Date: Mon, 6 Jun 2011 00:05:17 -0400 Subject: ext4: Fix max file size and logical block counting of extent format file Kazuya Mio reported that he was able to hit BUG_ON(next == lblock) in ext4_ext_put_gap_in_cache() while creating a sparse file in extent format and fill the tail of file up to its end. We will hit the BUG_ON when we write the last block (2^32-1) into the sparse file. The root cause of the problem lies in the fact that we specifically set s_maxbytes so that block at s_maxbytes fit into on-disk extent format, which is 32 bit long. However, we are not storing start and end block number, but rather start block number and length in blocks. It means that in order to cover extent from 0 to EXT_MAX_BLOCK we need EXT_MAX_BLOCK+1 to fit into len (because we counting block 0 as well) - and it does not. The only way to fix it without changing the meaning of the struct ext4_extent members is, as Kazuya Mio suggested, to lower s_maxbytes by one fs block so we can cover the whole extent we can get by the on-disk extent format. Also in many places EXT_MAX_BLOCK is used as length instead of maximum logical block number as the name suggests, it is all a bit messy. So this commit renames it to EXT_MAX_BLOCKS and change its usage in some places to actually be maximum number of blocks in the extent. The bug which this commit fixes can be reproduced as follows: dd if=/dev/zero of=/mnt/mp1/file bs= count=1 seek=$((2**32-2)) sync dd if=/dev/zero of=/mnt/mp1/file bs= count=1 seek=$((2**32-1)) Reported-by: Kazuya Mio Signed-off-by: Lukas Czerner Signed-off-by: "Theodore Ts'o" --- fs/ext4/ext4_extents.h | 7 +++++-- fs/ext4/extents.c | 34 +++++++++++++++++----------------- fs/ext4/move_extent.c | 10 +++++----- fs/ext4/super.c | 15 ++++++++++++--- 4 files changed, 39 insertions(+), 27 deletions(-) (limited to 'fs') diff --git a/fs/ext4/ext4_extents.h b/fs/ext4/ext4_extents.h index 2e29abb30f76..476414644387 100644 --- a/fs/ext4/ext4_extents.h +++ b/fs/ext4/ext4_extents.h @@ -133,8 +133,11 @@ typedef int (*ext_prepare_callback)(struct inode *, struct ext4_ext_path *, #define EXT_BREAK 1 #define EXT_REPEAT 2 -/* Maximum logical block in a file; ext4_extent's ee_block is __le32 */ -#define EXT_MAX_BLOCK 0xffffffff +/* + * Maximum number of logical blocks in a file; ext4_extent's ee_block is + * __le32. + */ +#define EXT_MAX_BLOCKS 0xffffffff /* * EXT_INIT_MAX_LEN is the maximum number of blocks we can have in an diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index 5199bac7fc62..41575704600a 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -1408,7 +1408,7 @@ got_index: /* * ext4_ext_next_allocated_block: - * returns allocated block in subsequent extent or EXT_MAX_BLOCK. + * returns allocated block in subsequent extent or EXT_MAX_BLOCKS. * NOTE: it considers block number from index entry as * allocated block. Thus, index entries have to be consistent * with leaves. @@ -1422,7 +1422,7 @@ ext4_ext_next_allocated_block(struct ext4_ext_path *path) depth = path->p_depth; if (depth == 0 && path->p_ext == NULL) - return EXT_MAX_BLOCK; + return EXT_MAX_BLOCKS; while (depth >= 0) { if (depth == path->p_depth) { @@ -1439,12 +1439,12 @@ ext4_ext_next_allocated_block(struct ext4_ext_path *path) depth--; } - return EXT_MAX_BLOCK; + return EXT_MAX_BLOCKS; } /* * ext4_ext_next_leaf_block: - * returns first allocated block from next leaf or EXT_MAX_BLOCK + * returns first allocated block from next leaf or EXT_MAX_BLOCKS */ static ext4_lblk_t ext4_ext_next_leaf_block(struct inode *inode, struct ext4_ext_path *path) @@ -1456,7 +1456,7 @@ static ext4_lblk_t ext4_ext_next_leaf_block(struct inode *inode, /* zero-tree has no leaf blocks at all */ if (depth == 0) - return EXT_MAX_BLOCK; + return EXT_MAX_BLOCKS; /* go to index block */ depth--; @@ -1469,7 +1469,7 @@ static ext4_lblk_t ext4_ext_next_leaf_block(struct inode *inode, depth--; } - return EXT_MAX_BLOCK; + return EXT_MAX_BLOCKS; } /* @@ -1677,13 +1677,13 @@ static unsigned int ext4_ext_check_overlap(struct inode *inode, */ if (b2 < b1) { b2 = ext4_ext_next_allocated_block(path); - if (b2 == EXT_MAX_BLOCK) + if (b2 == EXT_MAX_BLOCKS) goto out; } /* check for wrap through zero on extent logical start block*/ if (b1 + len1 < b1) { - len1 = EXT_MAX_BLOCK - b1; + len1 = EXT_MAX_BLOCKS - b1; newext->ee_len = cpu_to_le16(len1); ret = 1; } @@ -1767,7 +1767,7 @@ repeat: fex = EXT_LAST_EXTENT(eh); next = ext4_ext_next_leaf_block(inode, path); if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block) - && next != EXT_MAX_BLOCK) { + && next != EXT_MAX_BLOCKS) { ext_debug("next leaf block - %d\n", next); BUG_ON(npath != NULL); npath = ext4_ext_find_extent(inode, next, NULL); @@ -1887,7 +1887,7 @@ static int ext4_ext_walk_space(struct inode *inode, ext4_lblk_t block, BUG_ON(func == NULL); BUG_ON(inode == NULL); - while (block < last && block != EXT_MAX_BLOCK) { + while (block < last && block != EXT_MAX_BLOCKS) { num = last - block; /* find extent for this block */ down_read(&EXT4_I(inode)->i_data_sem); @@ -2020,7 +2020,7 @@ ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path, if (ex == NULL) { /* there is no extent yet, so gap is [0;-] */ lblock = 0; - len = EXT_MAX_BLOCK; + len = EXT_MAX_BLOCKS; ext_debug("cache gap(whole file):"); } else if (block < le32_to_cpu(ex->ee_block)) { lblock = block; @@ -2350,7 +2350,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode, * never happen because at least one of the end points * needs to be on the edge of the extent. */ - if (end == EXT_MAX_BLOCK) { + if (end == EXT_MAX_BLOCKS - 1) { ext_debug(" bad truncate %u:%u\n", start, end); block = 0; @@ -2398,7 +2398,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode, * If this is a truncate, this condition * should never happen */ - if (end == EXT_MAX_BLOCK) { + if (end == EXT_MAX_BLOCKS - 1) { ext_debug(" bad truncate %u:%u\n", start, end); err = -EIO; @@ -2478,7 +2478,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode, * we need to remove it from the leaf */ if (num == 0) { - if (end != EXT_MAX_BLOCK) { + if (end != EXT_MAX_BLOCKS - 1) { /* * For hole punching, we need to scoot all the * extents up when an extent is removed so that @@ -3699,7 +3699,7 @@ void ext4_ext_truncate(struct inode *inode) last_block = (inode->i_size + sb->s_blocksize - 1) >> EXT4_BLOCK_SIZE_BITS(sb); - err = ext4_ext_remove_space(inode, last_block, EXT_MAX_BLOCK); + err = ext4_ext_remove_space(inode, last_block, EXT_MAX_BLOCKS - 1); /* In a multi-transaction truncate, we only make the final * transaction synchronous. @@ -4347,8 +4347,8 @@ int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, start_blk = start >> inode->i_sb->s_blocksize_bits; last_blk = (start + len - 1) >> inode->i_sb->s_blocksize_bits; - if (last_blk >= EXT_MAX_BLOCK) - last_blk = EXT_MAX_BLOCK-1; + if (last_blk >= EXT_MAX_BLOCKS) + last_blk = EXT_MAX_BLOCKS-1; len_blks = ((ext4_lblk_t) last_blk) - start_blk + 1; /* diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c index 2b8304bf3c50..f57455a1b1b2 100644 --- a/fs/ext4/move_extent.c +++ b/fs/ext4/move_extent.c @@ -1002,12 +1002,12 @@ mext_check_arguments(struct inode *orig_inode, return -EINVAL; } - if ((orig_start > EXT_MAX_BLOCK) || - (donor_start > EXT_MAX_BLOCK) || - (*len > EXT_MAX_BLOCK) || - (orig_start + *len > EXT_MAX_BLOCK)) { + if ((orig_start >= EXT_MAX_BLOCKS) || + (donor_start >= EXT_MAX_BLOCKS) || + (*len > EXT_MAX_BLOCKS) || + (orig_start + *len >= EXT_MAX_BLOCKS)) { ext4_debug("ext4 move extent: Can't handle over [%u] blocks " - "[ino:orig %lu, donor %lu]\n", EXT_MAX_BLOCK, + "[ino:orig %lu, donor %lu]\n", EXT_MAX_BLOCKS, orig_inode->i_ino, donor_inode->i_ino); return -EINVAL; } diff --git a/fs/ext4/super.c b/fs/ext4/super.c index cc5c157aa11d..9ea71aa864b3 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -2243,6 +2243,12 @@ static void ext4_orphan_cleanup(struct super_block *sb, * in the vfs. ext4 inode has 48 bits of i_block in fsblock units, * so that won't be a limiting factor. * + * However there is other limiting factor. We do store extents in the form + * of starting block and length, hence the resulting length of the extent + * covering maximum file size must fit into on-disk format containers as + * well. Given that length is always by 1 unit bigger than max unit (because + * we count 0 as well) we have to lower the s_maxbytes by one fs block. + * * Note, this does *not* consider any metadata overhead for vfs i_blocks. */ static loff_t ext4_max_size(int blkbits, int has_huge_files) @@ -2264,10 +2270,13 @@ static loff_t ext4_max_size(int blkbits, int has_huge_files) upper_limit <<= blkbits; } - /* 32-bit extent-start container, ee_block */ - res = 1LL << 32; + /* + * 32-bit extent-start container, ee_block. We lower the maxbytes + * by one fs block, so ee_len can cover the extent of maximum file + * size + */ + res = (1LL << 32) - 1; res <<= blkbits; - res -= 1; /* Sanity check against vm- & vfs- imposed limits */ if (res > upper_limit) -- cgit v1.2.3 From c03f8aa9abdd517477c2021ea1251939b4da49e6 Mon Sep 17 00:00:00 2001 From: Lukas Czerner Date: Mon, 6 Jun 2011 00:06:52 -0400 Subject: ext4: use FIEMAP_EXTENT_LAST flag for last extent in fiemap Currently we are not marking the extent as the last one (FIEMAP_EXTENT_LAST) if there is a hole at the end of the file. This is because we just do not check for it right now and continue searching for next extent. But at the point we hit the hole at the end of the file, it is too late. This commit adds check for the allocated block in subsequent extent and if there is no more extents (block = EXT_MAX_BLOCKS) just flag the current one as the last one. This behaviour has been spotted unintentionally by 252 xfstest, when the test hangs out, because of wrong loop condition. However on other filesystems (like xfs) it will exit anyway, because we notice the last extent flag and exit. With this patch xfstest 252 does not hang anymore, ext4 fiemap implementation still reports bad extent type in some cases, however this seems to be different issue. Signed-off-by: Lukas Czerner Signed-off-by: "Theodore Ts'o" --- fs/ext4/ext4_extents.h | 2 +- fs/ext4/extents.c | 8 +++----- 2 files changed, 4 insertions(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/ext4/ext4_extents.h b/fs/ext4/ext4_extents.h index 476414644387..095c36f3b612 100644 --- a/fs/ext4/ext4_extents.h +++ b/fs/ext4/ext4_extents.h @@ -125,7 +125,7 @@ struct ext4_ext_path { * positive retcode - signal for ext4_ext_walk_space(), see below * callback must return valid extent (passed or newly created) */ -typedef int (*ext_prepare_callback)(struct inode *, struct ext4_ext_path *, +typedef int (*ext_prepare_callback)(struct inode *, ext4_lblk_t, struct ext4_ext_cache *, struct ext4_extent *, void *); diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index 41575704600a..f815cc81e7a2 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -1958,7 +1958,7 @@ static int ext4_ext_walk_space(struct inode *inode, ext4_lblk_t block, err = -EIO; break; } - err = func(inode, path, &cbex, ex, cbdata); + err = func(inode, next, &cbex, ex, cbdata); ext4_ext_drop_refs(path); if (err < 0) @@ -3914,14 +3914,13 @@ int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset, /* * Callback function called for each extent to gather FIEMAP information. */ -static int ext4_ext_fiemap_cb(struct inode *inode, struct ext4_ext_path *path, +static int ext4_ext_fiemap_cb(struct inode *inode, ext4_lblk_t next, struct ext4_ext_cache *newex, struct ext4_extent *ex, void *data) { __u64 logical; __u64 physical; __u64 length; - loff_t size; __u32 flags = 0; int ret = 0; struct fiemap_extent_info *fieinfo = data; @@ -4103,8 +4102,7 @@ found_delayed_extent: if (ex && ext4_ext_is_uninitialized(ex)) flags |= FIEMAP_EXTENT_UNWRITTEN; - size = i_size_read(inode); - if (logical + length >= size) + if (next == EXT_MAX_BLOCKS) flags |= FIEMAP_EXTENT_LAST; ret = fiemap_fill_next_extent(fieinfo, logical, physical, -- cgit v1.2.3 From a9c667f8f0656631ee5438baaf21bf30d5f67375 Mon Sep 17 00:00:00 2001 From: Lukas Czerner Date: Mon, 6 Jun 2011 09:51:52 -0400 Subject: ext4: fixed tracepoints cleanup While creating fixed tracepoints for ext3, basically by porting them from ext4, I found a lot of useless retyping, wrong type usage, useless variable passing and other inconsistencies in the ext4 fixed tracepoint code. This patch cleans the fixed tracepoint code for ext4 and also simplify some of them. Signed-off-by: Lukas Czerner Signed-off-by: "Theodore Ts'o" --- fs/ext4/inode.c | 2 +- fs/ext4/mballoc.c | 6 +- include/trace/events/ext4.h | 179 +++++++++++++++++++------------------------- 3 files changed, 80 insertions(+), 107 deletions(-) (limited to 'fs') diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index a5763e3505ba..e3126c051006 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -2634,7 +2634,7 @@ static int ext4_writepage(struct page *page, struct buffer_head *page_bufs = NULL; struct inode *inode = page->mapping->host; - trace_ext4_writepage(inode, page); + trace_ext4_writepage(page); size = i_size_read(inode); if (page->index == size >> PAGE_CACHE_SHIFT) len = size & ~PAGE_CACHE_MASK; diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index eb71dd59f2e4..6ed859d56850 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -3578,8 +3578,8 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh, free += next - bit; trace_ext4_mballoc_discard(sb, NULL, group, bit, next - bit); - trace_ext4_mb_release_inode_pa(sb, pa->pa_inode, pa, - grp_blk_start + bit, next - bit); + trace_ext4_mb_release_inode_pa(pa, grp_blk_start + bit, + next - bit); mb_free_blocks(pa->pa_inode, e4b, bit, next - bit); bit = next + 1; } @@ -3608,7 +3608,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b, ext4_group_t group; ext4_grpblk_t bit; - trace_ext4_mb_release_group_pa(sb, pa); + trace_ext4_mb_release_group_pa(pa); BUG_ON(pa->pa_deleted == 0); ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit); BUG_ON(group != e4b->bd_group && pa->pa_len != 0); diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h index e09592d2f916..5ce2b2f5f524 100644 --- a/include/trace/events/ext4.h +++ b/include/trace/events/ext4.h @@ -26,7 +26,7 @@ TRACE_EVENT(ext4_free_inode, __field( umode_t, mode ) __field( uid_t, uid ) __field( gid_t, gid ) - __field( blkcnt_t, blocks ) + __field( __u64, blocks ) ), TP_fast_assign( @@ -40,9 +40,8 @@ TRACE_EVENT(ext4_free_inode, TP_printk("dev %d,%d ino %lu mode 0%o uid %u gid %u blocks %llu", MAJOR(__entry->dev), MINOR(__entry->dev), - (unsigned long) __entry->ino, - __entry->mode, __entry->uid, __entry->gid, - (unsigned long long) __entry->blocks) + (unsigned long) __entry->ino, __entry->mode, + __entry->uid, __entry->gid, __entry->blocks) ); TRACE_EVENT(ext4_request_inode, @@ -178,7 +177,7 @@ TRACE_EVENT(ext4_begin_ordered_truncate, TP_printk("dev %d,%d ino %lu new_size %lld", MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long) __entry->ino, - (long long) __entry->new_size) + __entry->new_size) ); DECLARE_EVENT_CLASS(ext4__write_begin, @@ -204,7 +203,7 @@ DECLARE_EVENT_CLASS(ext4__write_begin, __entry->flags = flags; ), - TP_printk("dev %d,%d ino %lu pos %llu len %u flags %u", + TP_printk("dev %d,%d ino %lu pos %lld len %u flags %u", MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long) __entry->ino, __entry->pos, __entry->len, __entry->flags) @@ -248,7 +247,7 @@ DECLARE_EVENT_CLASS(ext4__write_end, __entry->copied = copied; ), - TP_printk("dev %d,%d ino %lu pos %llu len %u copied %u", + TP_printk("dev %d,%d ino %lu pos %lld len %u copied %u", MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long) __entry->ino, __entry->pos, __entry->len, __entry->copied) @@ -286,29 +285,6 @@ DEFINE_EVENT(ext4__write_end, ext4_da_write_end, TP_ARGS(inode, pos, len, copied) ); -TRACE_EVENT(ext4_writepage, - TP_PROTO(struct inode *inode, struct page *page), - - TP_ARGS(inode, page), - - TP_STRUCT__entry( - __field( dev_t, dev ) - __field( ino_t, ino ) - __field( pgoff_t, index ) - - ), - - TP_fast_assign( - __entry->dev = inode->i_sb->s_dev; - __entry->ino = inode->i_ino; - __entry->index = page->index; - ), - - TP_printk("dev %d,%d ino %lu page_index %lu", - MAJOR(__entry->dev), MINOR(__entry->dev), - (unsigned long) __entry->ino, __entry->index) -); - TRACE_EVENT(ext4_da_writepages, TP_PROTO(struct inode *inode, struct writeback_control *wbc), @@ -341,7 +317,7 @@ TRACE_EVENT(ext4_da_writepages, ), TP_printk("dev %d,%d ino %lu nr_to_write %ld pages_skipped %ld " - "range_start %llu range_end %llu sync_mode %d" + "range_start %lld range_end %lld sync_mode %d" "for_kupdate %d range_cyclic %d writeback_index %lu", MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long) __entry->ino, __entry->nr_to_write, @@ -449,7 +425,14 @@ DECLARE_EVENT_CLASS(ext4__page_op, TP_printk("dev %d,%d ino %lu page_index %lu", MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long) __entry->ino, - __entry->index) + (unsigned long) __entry->index) +); + +DEFINE_EVENT(ext4__page_op, ext4_writepage, + + TP_PROTO(struct page *page), + + TP_ARGS(page) ); DEFINE_EVENT(ext4__page_op, ext4_readpage, @@ -489,7 +472,7 @@ TRACE_EVENT(ext4_invalidatepage, TP_printk("dev %d,%d ino %lu page_index %lu offset %lu", MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long) __entry->ino, - __entry->index, __entry->offset) + (unsigned long) __entry->index, __entry->offset) ); TRACE_EVENT(ext4_discard_blocks, @@ -562,12 +545,10 @@ DEFINE_EVENT(ext4__mb_new_pa, ext4_mb_new_group_pa, ); TRACE_EVENT(ext4_mb_release_inode_pa, - TP_PROTO(struct super_block *sb, - struct inode *inode, - struct ext4_prealloc_space *pa, + TP_PROTO(struct ext4_prealloc_space *pa, unsigned long long block, unsigned int count), - TP_ARGS(sb, inode, pa, block, count), + TP_ARGS(pa, block, count), TP_STRUCT__entry( __field( dev_t, dev ) @@ -578,8 +559,8 @@ TRACE_EVENT(ext4_mb_release_inode_pa, ), TP_fast_assign( - __entry->dev = sb->s_dev; - __entry->ino = inode->i_ino; + __entry->dev = pa->pa_inode->i_sb->s_dev; + __entry->ino = pa->pa_inode->i_ino; __entry->block = block; __entry->count = count; ), @@ -591,10 +572,9 @@ TRACE_EVENT(ext4_mb_release_inode_pa, ); TRACE_EVENT(ext4_mb_release_group_pa, - TP_PROTO(struct super_block *sb, - struct ext4_prealloc_space *pa), + TP_PROTO(struct ext4_prealloc_space *pa), - TP_ARGS(sb, pa), + TP_ARGS(pa), TP_STRUCT__entry( __field( dev_t, dev ) @@ -604,7 +584,7 @@ TRACE_EVENT(ext4_mb_release_group_pa, ), TP_fast_assign( - __entry->dev = sb->s_dev; + __entry->dev = pa->pa_inode->i_sb->s_dev; __entry->pa_pstart = pa->pa_pstart; __entry->pa_len = pa->pa_len; ), @@ -666,10 +646,10 @@ TRACE_EVENT(ext4_request_blocks, __field( ino_t, ino ) __field( unsigned int, flags ) __field( unsigned int, len ) - __field( __u64, logical ) + __field( __u32, logical ) + __field( __u32, lleft ) + __field( __u32, lright ) __field( __u64, goal ) - __field( __u64, lleft ) - __field( __u64, lright ) __field( __u64, pleft ) __field( __u64, pright ) ), @@ -687,17 +667,13 @@ TRACE_EVENT(ext4_request_blocks, __entry->pright = ar->pright; ), - TP_printk("dev %d,%d ino %lu flags %u len %u lblk %llu goal %llu " - "lleft %llu lright %llu pleft %llu pright %llu ", + TP_printk("dev %d,%d ino %lu flags %u len %u lblk %u goal %llu " + "lleft %u lright %u pleft %llu pright %llu ", MAJOR(__entry->dev), MINOR(__entry->dev), - (unsigned long) __entry->ino, - __entry->flags, __entry->len, - (unsigned long long) __entry->logical, - (unsigned long long) __entry->goal, - (unsigned long long) __entry->lleft, - (unsigned long long) __entry->lright, - (unsigned long long) __entry->pleft, - (unsigned long long) __entry->pright) + (unsigned long) __entry->ino, __entry->flags, + __entry->len, __entry->logical, __entry->goal, + __entry->lleft, __entry->lright, __entry->pleft, + __entry->pright) ); TRACE_EVENT(ext4_allocate_blocks, @@ -711,10 +687,10 @@ TRACE_EVENT(ext4_allocate_blocks, __field( __u64, block ) __field( unsigned int, flags ) __field( unsigned int, len ) - __field( __u64, logical ) + __field( __u32, logical ) + __field( __u32, lleft ) + __field( __u32, lright ) __field( __u64, goal ) - __field( __u64, lleft ) - __field( __u64, lright ) __field( __u64, pleft ) __field( __u64, pright ) ), @@ -733,17 +709,13 @@ TRACE_EVENT(ext4_allocate_blocks, __entry->pright = ar->pright; ), - TP_printk("dev %d,%d ino %lu flags %u len %u block %llu lblk %llu " - "goal %llu lleft %llu lright %llu pleft %llu pright %llu", + TP_printk("dev %d,%d ino %lu flags %u len %u block %llu lblk %u " + "goal %llu lleft %u lright %u pleft %llu pright %llu", MAJOR(__entry->dev), MINOR(__entry->dev), - (unsigned long) __entry->ino, - __entry->flags, __entry->len, __entry->block, - (unsigned long long) __entry->logical, - (unsigned long long) __entry->goal, - (unsigned long long) __entry->lleft, - (unsigned long long) __entry->lright, - (unsigned long long) __entry->pleft, - (unsigned long long) __entry->pright) + (unsigned long) __entry->ino, __entry->flags, + __entry->len, __entry->block, __entry->logical, + __entry->goal, __entry->lleft, __entry->lright, + __entry->pleft, __entry->pright) ); TRACE_EVENT(ext4_free_blocks, @@ -755,10 +727,10 @@ TRACE_EVENT(ext4_free_blocks, TP_STRUCT__entry( __field( dev_t, dev ) __field( ino_t, ino ) - __field( umode_t, mode ) + __field( umode_t, mode ) __field( __u64, block ) __field( unsigned long, count ) - __field( int, flags ) + __field( int, flags ) ), TP_fast_assign( @@ -798,7 +770,7 @@ TRACE_EVENT(ext4_sync_file_enter, __entry->parent = dentry->d_parent->d_inode->i_ino; ), - TP_printk("dev %d,%d ino %ld parent %ld datasync %d ", + TP_printk("dev %d,%d ino %lu parent %lu datasync %d ", MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long) __entry->ino, (unsigned long) __entry->parent, __entry->datasync) @@ -821,7 +793,7 @@ TRACE_EVENT(ext4_sync_file_exit, __entry->dev = inode->i_sb->s_dev; ), - TP_printk("dev %d,%d ino %ld ret %d", + TP_printk("dev %d,%d ino %lu ret %d", MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long) __entry->ino, __entry->ret) @@ -1005,7 +977,7 @@ DECLARE_EVENT_CLASS(ext4__mballoc, __entry->result_len = len; ), - TP_printk("dev %d,%d inode %lu extent %u/%d/%u ", + TP_printk("dev %d,%d inode %lu extent %u/%d/%d ", MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long) __entry->ino, __entry->result_group, __entry->result_start, @@ -1093,7 +1065,7 @@ TRACE_EVENT(ext4_da_update_reserve_space, "allocated_meta_blocks %d", MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long) __entry->ino, - __entry->mode, (unsigned long long) __entry->i_blocks, + __entry->mode, __entry->i_blocks, __entry->used_blocks, __entry->reserved_data_blocks, __entry->reserved_meta_blocks, __entry->allocated_meta_blocks) ); @@ -1127,7 +1099,7 @@ TRACE_EVENT(ext4_da_reserve_space, "reserved_data_blocks %d reserved_meta_blocks %d", MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long) __entry->ino, - __entry->mode, (unsigned long long) __entry->i_blocks, + __entry->mode, __entry->i_blocks, __entry->md_needed, __entry->reserved_data_blocks, __entry->reserved_meta_blocks) ); @@ -1164,7 +1136,7 @@ TRACE_EVENT(ext4_da_release_space, "allocated_meta_blocks %d", MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long) __entry->ino, - __entry->mode, (unsigned long long) __entry->i_blocks, + __entry->mode, __entry->i_blocks, __entry->freed_blocks, __entry->reserved_data_blocks, __entry->reserved_meta_blocks, __entry->allocated_meta_blocks) ); @@ -1239,14 +1211,15 @@ TRACE_EVENT(ext4_direct_IO_enter, __entry->rw = rw; ), - TP_printk("dev %d,%d ino %lu pos %llu len %lu rw %d", + TP_printk("dev %d,%d ino %lu pos %lld len %lu rw %d", MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long) __entry->ino, - (unsigned long long) __entry->pos, __entry->len, __entry->rw) + __entry->pos, __entry->len, __entry->rw) ); TRACE_EVENT(ext4_direct_IO_exit, - TP_PROTO(struct inode *inode, loff_t offset, unsigned long len, int rw, int ret), + TP_PROTO(struct inode *inode, loff_t offset, unsigned long len, + int rw, int ret), TP_ARGS(inode, offset, len, rw, ret), @@ -1268,10 +1241,10 @@ TRACE_EVENT(ext4_direct_IO_exit, __entry->ret = ret; ), - TP_printk("dev %d,%d ino %lu pos %llu len %lu rw %d ret %d", + TP_printk("dev %d,%d ino %lu pos %lld len %lu rw %d ret %d", MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long) __entry->ino, - (unsigned long long) __entry->pos, __entry->len, + __entry->pos, __entry->len, __entry->rw, __entry->ret) ); @@ -1296,15 +1269,15 @@ TRACE_EVENT(ext4_fallocate_enter, __entry->mode = mode; ), - TP_printk("dev %d,%d ino %ld pos %llu len %llu mode %d", + TP_printk("dev %d,%d ino %lu pos %lld len %lld mode %d", MAJOR(__entry->dev), MINOR(__entry->dev), - (unsigned long) __entry->ino, - (unsigned long long) __entry->pos, - (unsigned long long) __entry->len, __entry->mode) + (unsigned long) __entry->ino, __entry->pos, + __entry->len, __entry->mode) ); TRACE_EVENT(ext4_fallocate_exit, - TP_PROTO(struct inode *inode, loff_t offset, unsigned int max_blocks, int ret), + TP_PROTO(struct inode *inode, loff_t offset, + unsigned int max_blocks, int ret), TP_ARGS(inode, offset, max_blocks, ret), @@ -1312,7 +1285,7 @@ TRACE_EVENT(ext4_fallocate_exit, __field( ino_t, ino ) __field( dev_t, dev ) __field( loff_t, pos ) - __field( unsigned, blocks ) + __field( unsigned int, blocks ) __field( int, ret ) ), @@ -1324,10 +1297,10 @@ TRACE_EVENT(ext4_fallocate_exit, __entry->ret = ret; ), - TP_printk("dev %d,%d ino %ld pos %llu blocks %d ret %d", + TP_printk("dev %d,%d ino %lu pos %lld blocks %u ret %d", MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long) __entry->ino, - (unsigned long long) __entry->pos, __entry->blocks, + __entry->pos, __entry->blocks, __entry->ret) ); @@ -1350,7 +1323,7 @@ TRACE_EVENT(ext4_unlink_enter, __entry->dev = dentry->d_inode->i_sb->s_dev; ), - TP_printk("dev %d,%d ino %ld size %lld parent %ld", + TP_printk("dev %d,%d ino %lu size %lld parent %lu", MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long) __entry->ino, __entry->size, (unsigned long) __entry->parent) @@ -1373,7 +1346,7 @@ TRACE_EVENT(ext4_unlink_exit, __entry->ret = ret; ), - TP_printk("dev %d,%d ino %ld ret %d", + TP_printk("dev %d,%d ino %lu ret %d", MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long) __entry->ino, __entry->ret) @@ -1387,7 +1360,7 @@ DECLARE_EVENT_CLASS(ext4__truncate, TP_STRUCT__entry( __field( ino_t, ino ) __field( dev_t, dev ) - __field( blkcnt_t, blocks ) + __field( __u64, blocks ) ), TP_fast_assign( @@ -1396,9 +1369,9 @@ DECLARE_EVENT_CLASS(ext4__truncate, __entry->blocks = inode->i_blocks; ), - TP_printk("dev %d,%d ino %lu blocks %lu", + TP_printk("dev %d,%d ino %lu blocks %llu", MAJOR(__entry->dev), MINOR(__entry->dev), - (unsigned long) __entry->ino, (unsigned long) __entry->blocks) + (unsigned long) __entry->ino, __entry->blocks) ); DEFINE_EVENT(ext4__truncate, ext4_truncate_enter, @@ -1417,7 +1390,7 @@ DEFINE_EVENT(ext4__truncate, ext4_truncate_exit, DECLARE_EVENT_CLASS(ext4__map_blocks_enter, TP_PROTO(struct inode *inode, ext4_lblk_t lblk, - unsigned len, unsigned flags), + unsigned int len, unsigned int flags), TP_ARGS(inode, lblk, len, flags), @@ -1425,8 +1398,8 @@ DECLARE_EVENT_CLASS(ext4__map_blocks_enter, __field( ino_t, ino ) __field( dev_t, dev ) __field( ext4_lblk_t, lblk ) - __field( unsigned, len ) - __field( unsigned, flags ) + __field( unsigned int, len ) + __field( unsigned int, flags ) ), TP_fast_assign( @@ -1440,7 +1413,7 @@ DECLARE_EVENT_CLASS(ext4__map_blocks_enter, TP_printk("dev %d,%d ino %lu lblk %u len %u flags %u", MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long) __entry->ino, - (unsigned) __entry->lblk, __entry->len, __entry->flags) + __entry->lblk, __entry->len, __entry->flags) ); DEFINE_EVENT(ext4__map_blocks_enter, ext4_ext_map_blocks_enter, @@ -1459,7 +1432,7 @@ DEFINE_EVENT(ext4__map_blocks_enter, ext4_ind_map_blocks_enter, DECLARE_EVENT_CLASS(ext4__map_blocks_exit, TP_PROTO(struct inode *inode, ext4_lblk_t lblk, - ext4_fsblk_t pblk, unsigned len, int ret), + ext4_fsblk_t pblk, unsigned int len, int ret), TP_ARGS(inode, lblk, pblk, len, ret), @@ -1468,7 +1441,7 @@ DECLARE_EVENT_CLASS(ext4__map_blocks_exit, __field( dev_t, dev ) __field( ext4_lblk_t, lblk ) __field( ext4_fsblk_t, pblk ) - __field( unsigned, len ) + __field( unsigned int, len ) __field( int, ret ) ), @@ -1484,7 +1457,7 @@ DECLARE_EVENT_CLASS(ext4__map_blocks_exit, TP_printk("dev %d,%d ino %lu lblk %u pblk %llu len %u ret %d", MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long) __entry->ino, - (unsigned) __entry->lblk, (unsigned long long) __entry->pblk, + __entry->lblk, __entry->pblk, __entry->len, __entry->ret) ); @@ -1524,7 +1497,7 @@ TRACE_EVENT(ext4_ext_load_extent, TP_printk("dev %d,%d ino %lu lblk %u pblk %llu", MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long) __entry->ino, - (unsigned) __entry->lblk, (unsigned long long) __entry->pblk) + __entry->lblk, __entry->pblk) ); TRACE_EVENT(ext4_load_inode, -- cgit v1.2.3 From 957df4535d06a8e009101239937ca5e50a6218c6 Mon Sep 17 00:00:00 2001 From: Vasily Averin Date: Mon, 6 Jun 2011 11:33:12 +0400 Subject: possible memory corruption in cifs_parse_mount_options() error path after mountdata check frees uninitialized mountdata_copy Signed-off-by: Vasily Averin Signed-off-by: Steve French --- fs/cifs/connect.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 84c730701c09..fb31c2c1bb17 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -784,7 +784,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname, struct smb_vol *vol) { char *value, *data, *end; - char *mountdata_copy, *options; + char *mountdata_copy = NULL, *options; unsigned int temp_len, i, j; char separator[2]; short int override_uid = -1; -- cgit v1.2.3 From 243e2dd38e1b322b29a1714034cc60b84d3d5e07 Mon Sep 17 00:00:00 2001 From: Darren Salt Date: Mon, 6 Jun 2011 16:58:16 +0000 Subject: CIFS ACL support needs CONFIG_KEYS, so depend on it Build fails if CONFIG_KEYS is not selected. Signed-off-by: Darren Salt Reviewed-by: Shirish Pargaonkar Signed-off-by: Steve French --- fs/cifs/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig index 66c68ab9e7df..53ed1ad2c112 100644 --- a/fs/cifs/Kconfig +++ b/fs/cifs/Kconfig @@ -149,7 +149,7 @@ config CIFS_FSCACHE config CIFS_ACL bool "Provide CIFS ACL support (EXPERIMENTAL)" - depends on EXPERIMENTAL && CIFS_XATTR + depends on EXPERIMENTAL && CIFS_XATTR && KEYS help Allows to fetch CIFS/NTFS ACL from the server. The DACL blob is handed over to the application/caller. -- cgit v1.2.3 From b084f598df36b62dfae83c10ed17f0b66b50f442 Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Tue, 31 May 2011 12:24:58 -0400 Subject: nfsd: fix dependency of nfsd on auth_rpcgss Commit b0b0c0a26e84 "nfsd: add proc file listing kernel's gss_krb5 enctypes" added an nunnecessary dependency of nfsd on the auth_rpcgss module. It's a little ad hoc, but since the only piece of information nfsd needs from rpcsec_gss_krb5 is a single static string, one solution is just to share it with an include file. Cc: stable@kernel.org Reported-by: Michael Guntsche Cc: Kevin Coffman Signed-off-by: J. Bruce Fields --- fs/nfsd/nfsctl.c | 19 ++++++------------- include/linux/sunrpc/gss_krb5_enctypes.h | 4 ++++ net/sunrpc/auth_gss/gss_krb5_mech.c | 3 ++- 3 files changed, 12 insertions(+), 14 deletions(-) create mode 100644 include/linux/sunrpc/gss_krb5_enctypes.h (limited to 'fs') diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c index 1f5eae40f34e..2b1449dd2f49 100644 --- a/fs/nfsd/nfsctl.c +++ b/fs/nfsd/nfsctl.c @@ -13,6 +13,7 @@ #include #include #include +#include #include "idmap.h" #include "nfsd.h" @@ -189,18 +190,10 @@ static struct file_operations export_features_operations = { .release = single_release, }; -#ifdef CONFIG_SUNRPC_GSS +#if defined(CONFIG_SUNRPC_GSS) || defined(CONFIG_SUNRPC_GSS_MODULE) static int supported_enctypes_show(struct seq_file *m, void *v) { - struct gss_api_mech *k5mech; - - k5mech = gss_mech_get_by_name("krb5"); - if (k5mech == NULL) - goto out; - if (k5mech->gm_upcall_enctypes != NULL) - seq_printf(m, k5mech->gm_upcall_enctypes); - gss_mech_put(k5mech); -out: + seq_printf(m, KRB5_SUPPORTED_ENCTYPES); return 0; } @@ -215,7 +208,7 @@ static struct file_operations supported_enctypes_ops = { .llseek = seq_lseek, .release = single_release, }; -#endif /* CONFIG_SUNRPC_GSS */ +#endif /* CONFIG_SUNRPC_GSS or CONFIG_SUNRPC_GSS_MODULE */ extern int nfsd_pool_stats_open(struct inode *inode, struct file *file); extern int nfsd_pool_stats_release(struct inode *inode, struct file *file); @@ -1427,9 +1420,9 @@ static int nfsd_fill_super(struct super_block * sb, void * data, int silent) [NFSD_Versions] = {"versions", &transaction_ops, S_IWUSR|S_IRUSR}, [NFSD_Ports] = {"portlist", &transaction_ops, S_IWUSR|S_IRUGO}, [NFSD_MaxBlkSize] = {"max_block_size", &transaction_ops, S_IWUSR|S_IRUGO}, -#ifdef CONFIG_SUNRPC_GSS +#if defined(CONFIG_SUNRPC_GSS) || defined(CONFIG_SUNRPC_GSS_MODULE) [NFSD_SupportedEnctypes] = {"supported_krb5_enctypes", &supported_enctypes_ops, S_IRUGO}, -#endif /* CONFIG_SUNRPC_GSS */ +#endif /* CONFIG_SUNRPC_GSS or CONFIG_SUNRPC_GSS_MODULE */ #ifdef CONFIG_NFSD_V4 [NFSD_Leasetime] = {"nfsv4leasetime", &transaction_ops, S_IWUSR|S_IRUSR}, [NFSD_Gracetime] = {"nfsv4gracetime", &transaction_ops, S_IWUSR|S_IRUSR}, diff --git a/include/linux/sunrpc/gss_krb5_enctypes.h b/include/linux/sunrpc/gss_krb5_enctypes.h new file mode 100644 index 000000000000..ec6234eee89c --- /dev/null +++ b/include/linux/sunrpc/gss_krb5_enctypes.h @@ -0,0 +1,4 @@ +/* + * Dumb way to share this static piece of information with nfsd + */ +#define KRB5_SUPPORTED_ENCTYPES "18,17,16,23,3,1,2" diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c index 0a9a2ec2e469..c3b75333b821 100644 --- a/net/sunrpc/auth_gss/gss_krb5_mech.c +++ b/net/sunrpc/auth_gss/gss_krb5_mech.c @@ -43,6 +43,7 @@ #include #include #include +#include #ifdef RPC_DEBUG # define RPCDBG_FACILITY RPCDBG_AUTH @@ -750,7 +751,7 @@ static struct gss_api_mech gss_kerberos_mech = { .gm_ops = &gss_kerberos_ops, .gm_pf_num = ARRAY_SIZE(gss_kerberos_pfs), .gm_pfs = gss_kerberos_pfs, - .gm_upcall_enctypes = "18,17,16,23,3,1,2", + .gm_upcall_enctypes = KRB5_SUPPORTED_ENCTYPES, }; static int __init init_kerberos_module(void) -- cgit v1.2.3 From be1f4084b4824301e640e81d63b6275cd99ee6a1 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Mon, 6 Jun 2011 11:22:17 -0700 Subject: nfsd: v4 support requires CRYPTO nfsd V4 support uses crypto interfaces, so select CRYPTO to fix build errors in 2.6.39: ERROR: "crypto_destroy_tfm" [fs/nfsd/nfsd.ko] undefined! ERROR: "crypto_alloc_base" [fs/nfsd/nfsd.ko] undefined! Reported-by: Wakko Warner Signed-off-by: Randy Dunlap Cc: stable@kernel.org Signed-off-by: J. Bruce Fields --- fs/nfsd/Kconfig | 1 + 1 file changed, 1 insertion(+) (limited to 'fs') diff --git a/fs/nfsd/Kconfig b/fs/nfsd/Kconfig index 18b3e8975fe0..fbb2a5ef5817 100644 --- a/fs/nfsd/Kconfig +++ b/fs/nfsd/Kconfig @@ -82,6 +82,7 @@ config NFSD_V4 select NFSD_V3 select FS_POSIX_ACL select SUNRPC_GSS + select CRYPTO help This option enables support in your system's NFS server for version 4 of the NFS protocol (RFC 3530). -- cgit v1.2.3 From 7d751f6f8c679f51b73d01a1b5269347a929004c Mon Sep 17 00:00:00 2001 From: Casey Bodley Date: Fri, 3 Jun 2011 12:21:23 -0400 Subject: nfsd: link returns nfserr_delay when breaking lease fix for commit 4795bb37effb7b8fe77e2d2034545d062d3788a8, nfsd: break lease on unlink, link, and rename if the LINK operation breaks a delegation, it returns NFS4ERR_NOENT (which is not a valid error in rfc 5661) instead of NFS4ERR_DELAY. the return value of nfsd_break_lease() in nfsd_link() must be converted from host_err to err Signed-off-by: Casey Bodley Cc: stable@kernel.org Signed-off-by: J. Bruce Fields --- fs/nfsd/vfs.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index d5718273bb32..f3fb61b69a1e 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c @@ -1660,8 +1660,10 @@ nfsd_link(struct svc_rqst *rqstp, struct svc_fh *ffhp, if (!dold->d_inode) goto out_drop_write; host_err = nfsd_break_lease(dold->d_inode); - if (host_err) + if (host_err) { + err = nfserrno(host_err); goto out_drop_write; + } host_err = vfs_link(dold, dirp, dnew); if (!host_err) { err = nfserrno(commit_metadata(ffhp)); -- cgit v1.2.3 From 9c4843ea576107a3c1fb94f2f758f198e9fe9e54 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Mon, 6 Jun 2011 15:40:23 -0400 Subject: cifs: silence printk when establishing first session on socket When signing is enabled, the first session that's established on a socket will cause a printk like this to pop: CIFS VFS: Unexpected SMB signature This is because the key exchange hasn't happened yet, so the signature field is bogus. Don't try to check the signature on the socket until the first session has been established. Also, eliminate the specific check for SMB_COM_NEGOTIATE since this check covers that case too. Cc: stable@kernel.org Cc: Shirish Pargaonkar Signed-off-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/cifsencrypt.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c index dfbd9f1f373d..5a0ee7f2af06 100644 --- a/fs/cifs/cifsencrypt.c +++ b/fs/cifs/cifsencrypt.c @@ -184,7 +184,7 @@ int cifs_verify_signature(struct smb_hdr *cifs_pdu, if (cifs_pdu == NULL || server == NULL) return -EINVAL; - if (cifs_pdu->Command == SMB_COM_NEGOTIATE) + if (!server->session_estab) return 0; if (cifs_pdu->Command == SMB_COM_LOCKING_ANDX) { -- cgit v1.2.3 From 9054760ff585a7fa436599990b63a585ae89ff4d Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 5 Jun 2011 19:22:56 +0100 Subject: lmLogOpen() broken failure exit Callers of lmLogOpen() expect it to return -E... on failure exits, which is what it returns, except for the case of blkdev_get_by_dev() failure. It that case lmLogOpen() return the error with the wrong sign... Signed-off-by: Al Viro Acked-by: Dave Kleikamp --- fs/jfs/jfs_logmgr.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c index 278e3fb40b71..583636f745e5 100644 --- a/fs/jfs/jfs_logmgr.c +++ b/fs/jfs/jfs_logmgr.c @@ -1123,7 +1123,7 @@ int lmLogOpen(struct super_block *sb) bdev = blkdev_get_by_dev(sbi->logdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, log); if (IS_ERR(bdev)) { - rc = -PTR_ERR(bdev); + rc = PTR_ERR(bdev); goto free; } -- cgit v1.2.3 From e6bc45d65df8599fdbae73be9cec4ceed274db53 Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Mon, 6 Jun 2011 19:19:40 -0400 Subject: vfs: make unlink() and rmdir() return ENOENT in preference to EROFS If user space attempts to remove a non-existent file or directory, and the file system is mounted read-only, return ENOENT instead of EROFS. Either error code is arguably valid/correct, but ENOENT is a more specific error message. Reported-by: Michael Tokarev Signed-off-by: "Theodore Ts'o" Signed-off-by: Al Viro --- fs/namei.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/namei.c b/fs/namei.c index e2e4e8d032ee..9802345df5e7 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -2624,6 +2624,10 @@ static long do_rmdir(int dfd, const char __user *pathname) error = PTR_ERR(dentry); if (IS_ERR(dentry)) goto exit2; + if (!dentry->d_inode) { + error = -ENOENT; + goto exit3; + } error = mnt_want_write(nd.path.mnt); if (error) goto exit3; @@ -2709,11 +2713,10 @@ static long do_unlinkat(int dfd, const char __user *pathname) error = PTR_ERR(dentry); if (!IS_ERR(dentry)) { /* Why not before? Because we want correct error value */ - if (nd.last.name[nd.last.len]) - goto slashes; inode = dentry->d_inode; - if (inode) - ihold(inode); + if (nd.last.name[nd.last.len] || !inode) + goto slashes; + ihold(inode); error = mnt_want_write(nd.path.mnt); if (error) goto exit2; -- cgit v1.2.3 From 70b666c3b4cb2b96098d80e6f515e4bc6d37db5a Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Fri, 27 May 2011 09:24:26 -0700 Subject: ceph: use ihold when we already have an inode ref We should use ihold whenever we already have a stable inode ref, even when we aren't holding i_lock. This avoids adding new and unnecessary locking dependencies. Signed-off-by: Sage Weil --- fs/ceph/addr.c | 2 +- fs/ceph/caps.c | 10 ++++------ fs/ceph/dir.c | 11 +++++++---- fs/ceph/export.c | 4 ++-- fs/ceph/file.c | 3 ++- fs/ceph/inode.c | 18 ++++++++++-------- fs/ceph/ioctl.c | 6 ++++-- fs/ceph/locks.c | 3 ++- fs/ceph/snap.c | 2 +- fs/ceph/xattr.c | 6 ++++-- 10 files changed, 37 insertions(+), 28 deletions(-) (limited to 'fs') diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index 33da49dc3cc6..5a3953db8118 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c @@ -453,7 +453,7 @@ static int ceph_writepage(struct page *page, struct writeback_control *wbc) int err; struct inode *inode = page->mapping->host; BUG_ON(!inode); - igrab(inode); + ihold(inode); err = writepage_nounlock(page, wbc); unlock_page(page); iput(inode); diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index 1f72b00447c4..f605753c8fe9 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -2940,14 +2940,12 @@ void ceph_flush_dirty_caps(struct ceph_mds_client *mdsc) while (!list_empty(&mdsc->cap_dirty)) { ci = list_first_entry(&mdsc->cap_dirty, struct ceph_inode_info, i_dirty_item); - inode = igrab(&ci->vfs_inode); + inode = &ci->vfs_inode; + ihold(inode); dout("flush_dirty_caps %p\n", inode); spin_unlock(&mdsc->cap_dirty_lock); - if (inode) { - ceph_check_caps(ci, CHECK_CAPS_NODELAY|CHECK_CAPS_FLUSH, - NULL); - iput(inode); - } + ceph_check_caps(ci, CHECK_CAPS_NODELAY|CHECK_CAPS_FLUSH, NULL); + iput(inode); spin_lock(&mdsc->cap_dirty_lock); } spin_unlock(&mdsc->cap_dirty_lock); diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c index 33729e822bb9..ef8f08c343e8 100644 --- a/fs/ceph/dir.c +++ b/fs/ceph/dir.c @@ -308,7 +308,8 @@ more: req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS); if (IS_ERR(req)) return PTR_ERR(req); - req->r_inode = igrab(inode); + req->r_inode = inode; + ihold(inode); req->r_dentry = dget(filp->f_dentry); /* hints to request -> mds selection code */ req->r_direct_mode = USE_AUTH_MDS; @@ -787,10 +788,12 @@ static int ceph_link(struct dentry *old_dentry, struct inode *dir, req->r_dentry_drop = CEPH_CAP_FILE_SHARED; req->r_dentry_unless = CEPH_CAP_FILE_EXCL; err = ceph_mdsc_do_request(mdsc, dir, req); - if (err) + if (err) { d_drop(dentry); - else if (!req->r_reply_info.head->is_dentry) - d_instantiate(dentry, igrab(old_dentry->d_inode)); + } else if (!req->r_reply_info.head->is_dentry) { + ihold(old_dentry->d_inode); + d_instantiate(dentry, old_dentry->d_inode); + } ceph_mdsc_put_request(req); return err; } diff --git a/fs/ceph/export.c b/fs/ceph/export.c index a610d3d67488..f67b687550de 100644 --- a/fs/ceph/export.c +++ b/fs/ceph/export.c @@ -109,7 +109,7 @@ static struct dentry *__fh_to_dentry(struct super_block *sb, err = ceph_mdsc_do_request(mdsc, NULL, req); inode = req->r_target_inode; if (inode) - igrab(inode); + ihold(inode); ceph_mdsc_put_request(req); if (!inode) return ERR_PTR(-ESTALE); @@ -167,7 +167,7 @@ static struct dentry *__cfh_to_dentry(struct super_block *sb, err = ceph_mdsc_do_request(mdsc, NULL, req); inode = req->r_target_inode; if (inode) - igrab(inode); + ihold(inode); ceph_mdsc_put_request(req); if (!inode) return ERR_PTR(err ? err : -ESTALE); diff --git a/fs/ceph/file.c b/fs/ceph/file.c index 203252d88d9f..8c5ac4e72832 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -191,7 +191,8 @@ int ceph_open(struct inode *inode, struct file *file) err = PTR_ERR(req); goto out; } - req->r_inode = igrab(inode); + req->r_inode = inode; + ihold(inode); req->r_num_caps = 1; err = ceph_mdsc_do_request(mdsc, parent_inode, req); if (!err) diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index 70b6a4839c38..d8858e96ab18 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c @@ -1101,10 +1101,10 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req, goto done; } req->r_dentry = dn; /* may have spliced */ - igrab(in); + ihold(in); } else if (ceph_ino(in) == vino.ino && ceph_snap(in) == vino.snap) { - igrab(in); + ihold(in); } else { dout(" %p links to %p %llx.%llx, not %llx.%llx\n", dn, in, ceph_ino(in), ceph_snap(in), @@ -1144,7 +1144,7 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req, goto done; } req->r_dentry = dn; /* may have spliced */ - igrab(in); + ihold(in); rinfo->head->is_dentry = 1; /* fool notrace handlers */ } @@ -1328,7 +1328,7 @@ void ceph_queue_writeback(struct inode *inode) if (queue_work(ceph_inode_to_client(inode)->wb_wq, &ceph_inode(inode)->i_wb_work)) { dout("ceph_queue_writeback %p\n", inode); - igrab(inode); + ihold(inode); } else { dout("ceph_queue_writeback %p failed\n", inode); } @@ -1353,7 +1353,7 @@ void ceph_queue_invalidate(struct inode *inode) if (queue_work(ceph_inode_to_client(inode)->pg_inv_wq, &ceph_inode(inode)->i_pg_inv_work)) { dout("ceph_queue_invalidate %p\n", inode); - igrab(inode); + ihold(inode); } else { dout("ceph_queue_invalidate %p failed\n", inode); } @@ -1477,7 +1477,7 @@ void ceph_queue_vmtruncate(struct inode *inode) if (queue_work(ceph_sb_to_client(inode->i_sb)->trunc_wq, &ci->i_vmtruncate_work)) { dout("ceph_queue_vmtruncate %p\n", inode); - igrab(inode); + ihold(inode); } else { dout("ceph_queue_vmtruncate %p failed, pending=%d\n", inode, ci->i_truncate_pending); @@ -1738,7 +1738,8 @@ int ceph_setattr(struct dentry *dentry, struct iattr *attr) __mark_inode_dirty(inode, inode_dirty_flags); if (mask) { - req->r_inode = igrab(inode); + req->r_inode = inode; + ihold(inode); req->r_inode_drop = release; req->r_args.setattr.mask = cpu_to_le32(mask); req->r_num_caps = 1; @@ -1779,7 +1780,8 @@ int ceph_do_getattr(struct inode *inode, int mask) req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, USE_ANY_MDS); if (IS_ERR(req)) return PTR_ERR(req); - req->r_inode = igrab(inode); + req->r_inode = inode; + ihold(inode); req->r_num_caps = 1; req->r_args.getattr.mask = cpu_to_le32(mask); err = ceph_mdsc_do_request(mdsc, NULL, req); diff --git a/fs/ceph/ioctl.c b/fs/ceph/ioctl.c index 8888c9ba68db..ef0b5f48e13a 100644 --- a/fs/ceph/ioctl.c +++ b/fs/ceph/ioctl.c @@ -73,7 +73,8 @@ static long ceph_ioctl_set_layout(struct file *file, void __user *arg) USE_AUTH_MDS); if (IS_ERR(req)) return PTR_ERR(req); - req->r_inode = igrab(inode); + req->r_inode = inode; + ihold(inode); req->r_inode_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_EXCL; req->r_args.setlayout.layout.fl_stripe_unit = @@ -135,7 +136,8 @@ static long ceph_ioctl_set_layout_policy (struct file *file, void __user *arg) if (IS_ERR(req)) return PTR_ERR(req); - req->r_inode = igrab(inode); + req->r_inode = inode; + ihold(inode); req->r_args.setlayout.layout.fl_stripe_unit = cpu_to_le32(l.stripe_unit); diff --git a/fs/ceph/locks.c b/fs/ceph/locks.c index 476b329867d4..7f0f72c53f7e 100644 --- a/fs/ceph/locks.c +++ b/fs/ceph/locks.c @@ -23,7 +23,8 @@ static int ceph_lock_message(u8 lock_type, u16 operation, struct file *file, req = ceph_mdsc_create_request(mdsc, operation, USE_AUTH_MDS); if (IS_ERR(req)) return PTR_ERR(req); - req->r_inode = igrab(inode); + req->r_inode = inode; + ihold(inode); /* mds requires start and length rather than start and end */ if (LLONG_MAX == fl->fl_end) diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c index 24067d68a554..54b14de2e729 100644 --- a/fs/ceph/snap.c +++ b/fs/ceph/snap.c @@ -722,7 +722,7 @@ static void flush_snaps(struct ceph_mds_client *mdsc) ci = list_first_entry(&mdsc->snap_flush_list, struct ceph_inode_info, i_snap_flush_item); inode = &ci->vfs_inode; - igrab(inode); + ihold(inode); spin_unlock(&mdsc->snap_flush_lock); spin_lock(&inode->i_lock); __ceph_flush_snaps(ci, &session, 0); diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c index f2b628696180..f42d730f1b66 100644 --- a/fs/ceph/xattr.c +++ b/fs/ceph/xattr.c @@ -665,7 +665,8 @@ static int ceph_sync_setxattr(struct dentry *dentry, const char *name, err = PTR_ERR(req); goto out; } - req->r_inode = igrab(inode); + req->r_inode = inode; + ihold(inode); req->r_inode_drop = CEPH_CAP_XATTR_SHARED; req->r_num_caps = 1; req->r_args.setxattr.flags = cpu_to_le32(flags); @@ -795,7 +796,8 @@ static int ceph_send_removexattr(struct dentry *dentry, const char *name) USE_AUTH_MDS); if (IS_ERR(req)) return PTR_ERR(req); - req->r_inode = igrab(inode); + req->r_inode = inode; + ihold(inode); req->r_inode_drop = CEPH_CAP_XATTR_SHARED; req->r_num_caps = 1; req->r_path2 = kstrdup(name, GFP_NOFS); -- cgit v1.2.3 From c3cd62839aaa2cdb2b99687c9e44f1b300a4aece Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Wed, 1 Jun 2011 16:08:44 -0700 Subject: ceph: fix short sync reads from the OSD If we get a short read from the OSD because the object is small, we need to zero the remainder of the buffer. For O_DIRECT reads, the attempted range is not trimmed to i_size by the VFS, so we were actually looping indefinitely. Fix by trimming by i_size, and the unconditionally zeroing the trailing range. Reported-by: Jeff Wu Signed-off-by: Sage Weil --- fs/ceph/file.c | 28 +++++++++++++++------------- 1 file changed, 15 insertions(+), 13 deletions(-) (limited to 'fs') diff --git a/fs/ceph/file.c b/fs/ceph/file.c index 8c5ac4e72832..b654f403139e 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -283,7 +283,7 @@ int ceph_release(struct inode *inode, struct file *file) static int striped_read(struct inode *inode, u64 off, u64 len, struct page **pages, int num_pages, - int *checkeof, bool align_to_pages, + int *checkeof, bool o_direct, unsigned long buf_align) { struct ceph_fs_client *fsc = ceph_inode_to_client(inode); @@ -308,7 +308,7 @@ static int striped_read(struct inode *inode, io_align = off & ~PAGE_MASK; more: - if (align_to_pages) + if (o_direct) page_align = (pos - io_align + buf_align) & ~PAGE_MASK; else page_align = pos & ~PAGE_MASK; @@ -346,20 +346,22 @@ more: } if (was_short) { - /* was original extent fully inside i_size? */ - if (pos + left <= inode->i_size) { - dout("zero tail\n"); - ceph_zero_page_vector_range(page_off + read, len - read, + /* did we bounce off eof? */ + if (pos + left > inode->i_size) + *checkeof = 1; + + /* zero trailing bytes (inside i_size) */ + if (left > 0 && pos < inode->i_size) { + if (pos + left > inode->i_size) + left = inode->i_size - pos; + + dout("zero tail %d\n", left); + ceph_zero_page_vector_range(page_off + read, left, pages); - read = len; - goto out; + read += left; } - - /* check i_size */ - *checkeof = 1; } -out: if (ret >= 0) ret = read; dout("striped_read returns %d\n", ret); @@ -659,7 +661,7 @@ out: /* hit EOF or hole? */ if (statret == 0 && *ppos < inode->i_size) { - dout("aio_read sync_read hit hole, reading more\n"); + dout("aio_read sync_read hit hole, ppos %lld < size %lld, reading more\n", *ppos, inode->i_size); read += ret; base += ret; len -= ret; -- cgit v1.2.3 From 0e98728fa32d338907631349a8cc2afa07c0cb9a Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Tue, 7 Jun 2011 20:40:35 -0700 Subject: ceph: fix ENOENT logic in striped_read Getting ENOENT is equivalent to reading 0 bytes. Make that correction before setting up the hit_stripe and was_short flags. Fixes the following case: dd if=/dev/zero of=/mnt/fs_depot/dd3 bs=1 seek=1048576 count=0 dd if=/mnt/fs_depot/dd3 of=/root/ddout1 skip=8 bs=500 count=2 iflag=direct Reported-by: Henry C Chang Signed-off-by: Sage Weil --- fs/ceph/file.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/ceph/file.c b/fs/ceph/file.c index b654f403139e..9542f07d0b93 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -318,10 +318,10 @@ more: ci->i_truncate_seq, ci->i_truncate_size, page_pos, pages_left, page_align); - hit_stripe = this_len < left; - was_short = ret >= 0 && ret < this_len; if (ret == -ENOENT) ret = 0; + hit_stripe = this_len < left; + was_short = ret >= 0 && ret < this_len; dout("striped_read %llu~%u (read %u) got %d%s%s\n", pos, left, read, ret, hit_stripe ? " HITSTRIPE" : "", was_short ? " SHORT" : ""); -- cgit v1.2.3 From 0c1f91f27140cf3b6e38dc4e892adac241c73a20 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Wed, 25 May 2011 14:56:12 -0700 Subject: ceph: unwind canceled flock state If we request a lock and then abort (e.g., ^C), we need to send a matching unlock request to the MDS to unwind our lock attempt to avoid indefinitely blocking other clients. Reported-by: Brian Chrisman Signed-off-by: Sage Weil --- fs/ceph/locks.c | 26 ++++++++++++++++---------- 1 file changed, 16 insertions(+), 10 deletions(-) (limited to 'fs') diff --git a/fs/ceph/locks.c b/fs/ceph/locks.c index 7f0f72c53f7e..80576d05d687 100644 --- a/fs/ceph/locks.c +++ b/fs/ceph/locks.c @@ -33,11 +33,10 @@ static int ceph_lock_message(u8 lock_type, u16 operation, struct file *file, length = fl->fl_end - fl->fl_start + 1; dout("ceph_lock_message: rule: %d, op: %d, pid: %llu, start: %llu, " - "length: %llu, wait: %d, type`: %d", (int)lock_type, + "length: %llu, wait: %d, type: %d", (int)lock_type, (int)operation, (u64)fl->fl_pid, fl->fl_start, length, wait, fl->fl_type); - req->r_args.filelock_change.rule = lock_type; req->r_args.filelock_change.type = cmd; req->r_args.filelock_change.pid = cpu_to_le64((u64)fl->fl_pid); @@ -71,7 +70,7 @@ static int ceph_lock_message(u8 lock_type, u16 operation, struct file *file, } ceph_mdsc_put_request(req); dout("ceph_lock_message: rule: %d, op: %d, pid: %llu, start: %llu, " - "length: %llu, wait: %d, type`: %d, err code %d", (int)lock_type, + "length: %llu, wait: %d, type: %d, err code %d", (int)lock_type, (int)operation, (u64)fl->fl_pid, fl->fl_start, length, wait, fl->fl_type, err); return err; @@ -110,16 +109,20 @@ int ceph_lock(struct file *file, int cmd, struct file_lock *fl) dout("mds locked, locking locally"); err = posix_lock_file(file, fl, NULL); if (err && (CEPH_MDS_OP_SETFILELOCK == op)) { - /* undo! This should only happen if the kernel detects - * local deadlock. */ + /* undo! This should only happen if + * the kernel detects local + * deadlock. */ ceph_lock_message(CEPH_LOCK_FCNTL, op, file, CEPH_LOCK_UNLOCK, 0, fl); - dout("got %d on posix_lock_file, undid lock", err); + dout("got %d on posix_lock_file, undid lock", + err); } } - } else { - dout("mds returned error code %d", err); + } else if (err == -ERESTARTSYS) { + dout("undoing lock\n"); + ceph_lock_message(CEPH_LOCK_FCNTL, op, file, + CEPH_LOCK_UNLOCK, 0, fl); } return err; } @@ -156,8 +159,11 @@ int ceph_flock(struct file *file, int cmd, struct file_lock *fl) file, CEPH_LOCK_UNLOCK, 0, fl); dout("got %d on flock_lock_file_wait, undid lock", err); } - } else { - dout("mds error code %d", err); + } else if (err == -ERESTARTSYS) { + dout("undoing lock\n"); + ceph_lock_message(CEPH_LOCK_FLOCK, + CEPH_MDS_OP_SETFILELOCK, + file, CEPH_LOCK_UNLOCK, 0, fl); } return err; } -- cgit v1.2.3 From 83fb086e0ecd879a4676cf12fc7afc1f9ecd1784 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Wed, 8 Jun 2011 07:35:24 -0400 Subject: cifs: trivial: add space in fsc error message Signed-off-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/connect.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index fb31c2c1bb17..bb659eb73810 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -1391,7 +1391,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname, "/proc/fs/cifs/LookupCacheEnabled to 0\n"); } else if (strnicmp(data, "fsc", 3) == 0) { #ifndef CONFIG_CIFS_FSCACHE - cERROR(1, "FS-Cache support needs CONFIG_CIFS_FSCACHE" + cERROR(1, "FS-Cache support needs CONFIG_CIFS_FSCACHE " "kernel config option set"); goto cifs_parse_mount_err; #endif -- cgit v1.2.3 From 86d4a77ba3dc4ace238a0556541a41df2bd71d49 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Wed, 25 May 2011 13:03:16 -0400 Subject: Btrfs: cache bitmaps when searching for a cluster If we are looking for a cluster in a particularly sparse or fragmented block group, we will do a lot of looping through the free space tree looking for various things, and if we need to look at bitmaps we will endup doing the whole dance twice. So instead add the bitmap entries to a temporary list so if we have to do the bitmap search we can just look through the list of entries we've found quickly instead of having to loop through the entire tree again. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/free-space-cache.c | 54 ++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 49 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index ad144736a5fd..930c07f79b3d 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -2144,6 +2144,7 @@ again: */ static int setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group, struct btrfs_free_cluster *cluster, + struct list_head *bitmaps, u64 offset, u64 bytes, u64 min_bytes) { struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; @@ -2166,6 +2167,8 @@ static int setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group, * extent entry. */ while (entry->bitmap) { + if (list_empty(&entry->list)) + list_add_tail(&entry->list, bitmaps); node = rb_next(&entry->offset_index); if (!node) return -ENOSPC; @@ -2185,8 +2188,12 @@ static int setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group, return -ENOSPC; entry = rb_entry(node, struct btrfs_free_space, offset_index); - if (entry->bitmap) + if (entry->bitmap) { + if (list_empty(&entry->list)) + list_add_tail(&entry->list, bitmaps); continue; + } + /* * we haven't filled the empty size and the window is * very large. reset and try again @@ -2240,6 +2247,7 @@ static int setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group, */ static int setup_cluster_bitmap(struct btrfs_block_group_cache *block_group, struct btrfs_free_cluster *cluster, + struct list_head *bitmaps, u64 offset, u64 bytes, u64 min_bytes) { struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; @@ -2250,10 +2258,39 @@ static int setup_cluster_bitmap(struct btrfs_block_group_cache *block_group, if (ctl->total_bitmaps == 0) return -ENOSPC; + /* + * First check our cached list of bitmaps and see if there is an entry + * here that will work. + */ + list_for_each_entry(entry, bitmaps, list) { + if (entry->bytes < min_bytes) + continue; + ret = btrfs_bitmap_cluster(block_group, entry, cluster, offset, + bytes, min_bytes); + if (!ret) + return 0; + } + + /* + * If we do have entries on our list and we are here then we didn't find + * anything, so go ahead and get the next entry after the last entry in + * this list and start the search from there. + */ + if (!list_empty(bitmaps)) { + entry = list_entry(bitmaps->prev, struct btrfs_free_space, + list); + node = rb_next(&entry->offset_index); + if (!node) + return -ENOSPC; + entry = rb_entry(node, struct btrfs_free_space, offset_index); + goto search; + } + entry = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), 0, 1); if (!entry) return -ENOSPC; +search: node = &entry->offset_index; do { entry = rb_entry(node, struct btrfs_free_space, offset_index); @@ -2284,6 +2321,8 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans, u64 offset, u64 bytes, u64 empty_size) { struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; + struct list_head bitmaps; + struct btrfs_free_space *entry, *tmp; u64 min_bytes; int ret; @@ -2322,11 +2361,16 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans, goto out; } - ret = setup_cluster_no_bitmap(block_group, cluster, offset, bytes, - min_bytes); + INIT_LIST_HEAD(&bitmaps); + ret = setup_cluster_no_bitmap(block_group, cluster, &bitmaps, offset, + bytes, min_bytes); if (ret) - ret = setup_cluster_bitmap(block_group, cluster, offset, - bytes, min_bytes); + ret = setup_cluster_bitmap(block_group, cluster, &bitmaps, + offset, bytes, min_bytes); + + /* Clear our temporary list */ + list_for_each_entry_safe(entry, tmp, &bitmaps, list) + list_del_init(&entry->list); if (!ret) { atomic_inc(&block_group->count); -- cgit v1.2.3 From 3de85bb95cc50d0977cbb7a0c605e894be4c790d Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Wed, 25 May 2011 13:07:37 -0400 Subject: Btrfs: noinline the cluster searching functions When profiling the find cluster code it's hard to tell where we are spending our time because the bitmap and non-bitmap functions get inlined by the compiler, so make that not happen. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/free-space-cache.c | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 930c07f79b3d..f56caacfd8ad 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -2142,10 +2142,11 @@ again: /* * This searches the block group for just extents to fill the cluster with. */ -static int setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group, - struct btrfs_free_cluster *cluster, - struct list_head *bitmaps, - u64 offset, u64 bytes, u64 min_bytes) +static noinline int +setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group, + struct btrfs_free_cluster *cluster, + struct list_head *bitmaps, u64 offset, u64 bytes, + u64 min_bytes) { struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; struct btrfs_free_space *first = NULL; @@ -2245,10 +2246,11 @@ static int setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group, * This specifically looks for bitmaps that may work in the cluster, we assume * that we have already failed to find extents that will work. */ -static int setup_cluster_bitmap(struct btrfs_block_group_cache *block_group, - struct btrfs_free_cluster *cluster, - struct list_head *bitmaps, - u64 offset, u64 bytes, u64 min_bytes) +static noinline int +setup_cluster_bitmap(struct btrfs_block_group_cache *block_group, + struct btrfs_free_cluster *cluster, + struct list_head *bitmaps, u64 offset, u64 bytes, + u64 min_bytes) { struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; struct btrfs_free_space *entry; -- cgit v1.2.3 From f2bb8f5cfb3bce595b2de251ed7638047fc4e530 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Wed, 25 May 2011 13:10:16 -0400 Subject: Btrfs: don't commit the transaction if we dont have enough pinned bytes I noticed when running an enospc test that we would get stuck committing the transaction in check_data_space even though we truly didn't have enough space. So check to see if bytes_pinned is bigger than num_bytes, if it's not don't commit the transaction. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/extent-tree.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'fs') diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 5b9b6b6df242..0d0a3fe77bb7 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -3089,6 +3089,13 @@ alloc: } goto again; } + + /* + * If we have less pinned bytes than we want to allocate then + * don't bother committing the transaction, it won't help us. + */ + if (data_sinfo->bytes_pinned < bytes) + committed = 1; spin_unlock(&data_sinfo->lock); /* commit the current transaction and try again */ -- cgit v1.2.3 From 2cdc342c204dba69ca3b2ec43d8e6ff41ed920b8 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Fri, 27 May 2011 14:07:49 -0400 Subject: Btrfs: fix bitmap regression In cleaning up the clustering code I accidently introduced a regression by adding bitmap entries to the cluster rb tree. The problem is if we've maxed out the number of bitmaps we can have for the block group we can only add free space to the bitmaps, but since the bitmap is on the cluster we can't find it and we try to create another one. This would result in a panic because the total bitmaps was bigger than the max bitmaps that were allowed. This patch fixes this by checking to see if we have a cluster, and then looking at the cluster rb tree to see if it has a bitmap entry and if it does and that space belongs to that bitmap, go ahead and add it to that bitmap. I could hit this panic every time with an fs_mark test within a couple of minutes. With this patch I no longer hit the panic and fs_mark goes to completion. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/free-space-cache.c | 88 +++++++++++++++++++++++++++++++++++---------- 1 file changed, 69 insertions(+), 19 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index f56caacfd8ad..8258ccf85dbd 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -1417,6 +1417,23 @@ again: return 0; } +static u64 add_bytes_to_bitmap(struct btrfs_free_space_ctl *ctl, + struct btrfs_free_space *info, u64 offset, + u64 bytes) +{ + u64 bytes_to_set = 0; + u64 end; + + end = info->offset + (u64)(BITS_PER_BITMAP * ctl->unit); + + bytes_to_set = min(end - offset, bytes); + + bitmap_set_bits(ctl, info, offset, bytes_to_set); + + return bytes_to_set; + +} + static bool use_bitmap(struct btrfs_free_space_ctl *ctl, struct btrfs_free_space *info) { @@ -1453,12 +1470,18 @@ static bool use_bitmap(struct btrfs_free_space_ctl *ctl, return true; } +static struct btrfs_free_space_op free_space_op = { + .recalc_thresholds = recalculate_thresholds, + .use_bitmap = use_bitmap, +}; + static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl, struct btrfs_free_space *info) { struct btrfs_free_space *bitmap_info; + struct btrfs_block_group_cache *block_group = NULL; int added = 0; - u64 bytes, offset, end; + u64 bytes, offset, bytes_added; int ret; bytes = info->bytes; @@ -1467,6 +1490,47 @@ static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl, if (!ctl->op->use_bitmap(ctl, info)) return 0; + if (ctl->op == &free_space_op) + block_group = ctl->private; + + /* + * Since we link bitmaps right into the cluster we need to see if we + * have a cluster here, and if so and it has our bitmap we need to add + * the free space to that bitmap. + */ + if (block_group && !list_empty(&block_group->cluster_list)) { + struct btrfs_free_cluster *cluster; + struct rb_node *node; + struct btrfs_free_space *entry; + + cluster = list_entry(block_group->cluster_list.next, + struct btrfs_free_cluster, + block_group_list); + spin_lock(&cluster->lock); + node = rb_first(&cluster->root); + if (!node) { + spin_unlock(&cluster->lock); + goto again; + } + + entry = rb_entry(node, struct btrfs_free_space, offset_index); + if (!entry->bitmap) { + spin_unlock(&cluster->lock); + goto again; + } + + if (entry->offset == offset_to_bitmap(ctl, offset)) { + bytes_added = add_bytes_to_bitmap(ctl, entry, + offset, bytes); + bytes -= bytes_added; + offset += bytes_added; + } + spin_unlock(&cluster->lock); + if (!bytes) { + ret = 1; + goto out; + } + } again: bitmap_info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), 1, 0); @@ -1475,19 +1539,10 @@ again: goto new_bitmap; } - end = bitmap_info->offset + (u64)(BITS_PER_BITMAP * ctl->unit); - - if (offset >= bitmap_info->offset && offset + bytes > end) { - bitmap_set_bits(ctl, bitmap_info, offset, end - offset); - bytes -= end - offset; - offset = end; - added = 0; - } else if (offset >= bitmap_info->offset && offset + bytes <= end) { - bitmap_set_bits(ctl, bitmap_info, offset, bytes); - bytes = 0; - } else { - BUG(); - } + bytes_added = add_bytes_to_bitmap(ctl, bitmap_info, offset, bytes); + bytes -= bytes_added; + offset += bytes_added; + added = 0; if (!bytes) { ret = 1; @@ -1766,11 +1821,6 @@ void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group, "\n", count); } -static struct btrfs_free_space_op free_space_op = { - .recalc_thresholds = recalculate_thresholds, - .use_bitmap = use_bitmap, -}; - void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group) { struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; -- cgit v1.2.3 From 723bda2083d44edbd6be0f0b09f902120dc07442 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Fri, 27 May 2011 16:11:38 -0400 Subject: Btrfs: fix the allocator loop logic I was testing with empty_cluster = 0 to try and reproduce a problem and kept hitting early enospc panics. This was because our loop logic was a little confused. So this is what I did 1) Make the loop variable the ultimate decider on wether we should loop again isntead of checking to see if we had an uncached bg, empty size or empty cluster. 2) Increment loop before checking to see what we are on to make the loop definitions make more sense. 3) If we are on the chunk alloc loop don't set empty_size/empty_cluster to 0 unless we didn't actually allocate a chunk. If we did allocate a chunk we should be able to easily setup a new cluster so clearing empty_size/empty_cluster makes us less efficient. This kept me from hitting panics while trying to reproduce the other problem. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/extent-tree.c | 48 +++++++++++++++++++++++++----------------------- 1 file changed, 25 insertions(+), 23 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 0d0a3fe77bb7..b42efc2ded51 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -5218,9 +5218,7 @@ loop: * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try * again */ - if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE && - (found_uncached_bg || empty_size || empty_cluster || - allowed_chunk_alloc)) { + if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) { index = 0; if (loop == LOOP_FIND_IDEAL && found_uncached_bg) { found_uncached_bg = false; @@ -5260,32 +5258,36 @@ loop: goto search; } - if (loop < LOOP_CACHING_WAIT) { - loop++; - goto search; - } + loop++; if (loop == LOOP_ALLOC_CHUNK) { - empty_size = 0; - empty_cluster = 0; - } + if (allowed_chunk_alloc) { + ret = do_chunk_alloc(trans, root, num_bytes + + 2 * 1024 * 1024, data, + CHUNK_ALLOC_LIMITED); + allowed_chunk_alloc = 0; + if (ret == 1) + done_chunk_alloc = 1; + } else if (!done_chunk_alloc && + space_info->force_alloc == + CHUNK_ALLOC_NO_FORCE) { + space_info->force_alloc = CHUNK_ALLOC_LIMITED; + } - if (allowed_chunk_alloc) { - ret = do_chunk_alloc(trans, root, num_bytes + - 2 * 1024 * 1024, data, - CHUNK_ALLOC_LIMITED); - allowed_chunk_alloc = 0; - done_chunk_alloc = 1; - } else if (!done_chunk_alloc && - space_info->force_alloc == CHUNK_ALLOC_NO_FORCE) { - space_info->force_alloc = CHUNK_ALLOC_LIMITED; + /* + * We didn't allocate a chunk, go ahead and drop the + * empty size and loop again. + */ + if (!done_chunk_alloc) + loop = LOOP_NO_EMPTY_SIZE; } - if (loop < LOOP_NO_EMPTY_SIZE) { - loop++; - goto search; + if (loop == LOOP_NO_EMPTY_SIZE) { + empty_size = 0; + empty_cluster = 0; } - ret = -ENOSPC; + + goto search; } else if (!ins->objectid) { ret = -ENOSPC; } else if (ins->objectid) { -- cgit v1.2.3 From f6a398298d34af66ec3a2d82a44a4dbc5277357d Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Mon, 6 Jun 2011 10:50:35 -0400 Subject: Btrfs: fix duplicate checking logic When merging my code into the integration test the second check for duplicate entries got screwed up. This patch fixes it by dropping ret2 and just using ret for the return value, and checking if we got an error before adding the bitmap to the local list. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/free-space-cache.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 8258ccf85dbd..38f3fd923043 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -250,7 +250,7 @@ int __load_free_space_cache(struct btrfs_root *root, struct inode *inode, pgoff_t index = 0; unsigned long first_page_offset; int num_checksums; - int ret = 0, ret2; + int ret = 0; INIT_LIST_HEAD(&bitmaps); @@ -421,11 +421,10 @@ int __load_free_space_cache(struct btrfs_root *root, struct inode *inode, goto free_cache; } spin_lock(&ctl->tree_lock); - ret2 = link_free_space(ctl, e); + ret = link_free_space(ctl, e); ctl->total_bitmaps++; ctl->op->recalc_thresholds(ctl); spin_unlock(&ctl->tree_lock); - list_add_tail(&e->list, &bitmaps); if (ret) { printk(KERN_ERR "Duplicate entries in " "free space cache, dumping\n"); @@ -434,6 +433,7 @@ int __load_free_space_cache(struct btrfs_root *root, struct inode *inode, page_cache_release(page); goto free_cache; } + list_add_tail(&e->list, &bitmaps); } num_entries--; -- cgit v1.2.3 From 25b8b936ed44814a5ce6fc3b2a21401f33cd56f6 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Wed, 8 Jun 2011 14:36:54 -0400 Subject: Btrfs: don't map extent buffer if path->skip_locking is set Arne's scrub stuff exposed a problem with mapping the extent buffer in reada_for_search. He searches the commit root with multiple threads and with skip_locking set, so we can race and overwrite node->map_token since node isn't locked. So fix this so that we only map the extent buffer if we don't already have a map_token and skip_locking isn't set. Without this patch scrub would panic almost immediately, with the patch it doesn't panic anymore. Thanks, Reported-by: Arne Jansen Signed-off-by: Josef Bacik --- fs/btrfs/ctree.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index d84089349c82..2e667868e0d2 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -1228,6 +1228,7 @@ static void reada_for_search(struct btrfs_root *root, u32 nr; u32 blocksize; u32 nscan = 0; + bool map = true; if (level != 1) return; @@ -1249,8 +1250,11 @@ static void reada_for_search(struct btrfs_root *root, nritems = btrfs_header_nritems(node); nr = slot; + if (node->map_token || path->skip_locking) + map = false; + while (1) { - if (!node->map_token) { + if (map && !node->map_token) { unsigned long offset = btrfs_node_key_ptr_offset(nr); map_private_extent_buffer(node, offset, sizeof(struct btrfs_key_ptr), @@ -1277,7 +1281,7 @@ static void reada_for_search(struct btrfs_root *root, if ((search <= target && target - search <= 65536) || (search > target && search - target <= 65536)) { gen = btrfs_node_ptr_generation(node, nr); - if (node->map_token) { + if (map && node->map_token) { unmap_extent_buffer(node, node->map_token, KM_USER1); node->map_token = NULL; @@ -1289,7 +1293,7 @@ static void reada_for_search(struct btrfs_root *root, if ((nread > 65536 || nscan > 32)) break; } - if (node->map_token) { + if (map && node->map_token) { unmap_extent_buffer(node, node->map_token, KM_USER1); node->map_token = NULL; } -- cgit v1.2.3 From 3473f3c06a36865ae05993041fff35ee928342a7 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Thu, 9 Jun 2011 10:15:17 -0400 Subject: Btrfs: unlock the trans lock properly In btrfs_wait_for_commit if we came upon a transaction that had committed we just exited, but that's bad since we are holding the trans_lock. So break instead so that the lock is dropped. Thanks, Reported-by: David Sterba Signed-off-by: Josef Bacik --- fs/btrfs/transaction.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index dd719662340e..6b2e4786d189 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -349,7 +349,7 @@ int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid) list) { if (t->in_commit) { if (t->commit_done) - goto out; + break; cur_trans = t; atomic_inc(&cur_trans->use_count); break; -- cgit v1.2.3 From dac853ae89043f1b7752875300faf614de43c74b Mon Sep 17 00:00:00 2001 From: Mathias Krause Date: Thu, 9 Jun 2011 20:05:18 +0200 Subject: exec: delay address limit change until point of no return Unconditionally changing the address limit to USER_DS and not restoring it to its old value in the error path is wrong because it prevents us using kernel memory on repeated calls to this function. This, in fact, breaks the fallback of hard coded paths to the init program from being ever successful if the first candidate fails to load. With this patch applied switching to USER_DS is delayed until the point of no return is reached which makes it possible to have a multi-arch rootfs with one arch specific init binary for each of the (hard coded) probed paths. Since the address limit is already set to USER_DS when start_thread() will be invoked, this redundancy can be safely removed. Signed-off-by: Mathias Krause Cc: Al Viro Cc: stable@kernel.org Signed-off-by: Linus Torvalds --- arch/x86/kernel/process_32.c | 1 - arch/x86/kernel/process_64.c | 1 - fs/exec.c | 5 +---- 3 files changed, 1 insertion(+), 6 deletions(-) (limited to 'fs') diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 8d128783af47..a3d0dc59067b 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -245,7 +245,6 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) { set_user_gs(regs, 0); regs->fs = 0; - set_fs(USER_DS); regs->ds = __USER_DS; regs->es = __USER_DS; regs->ss = __USER_DS; diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 6c9dd922ac0d..ca6f7ab8df33 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -338,7 +338,6 @@ start_thread_common(struct pt_regs *regs, unsigned long new_ip, regs->cs = _cs; regs->ss = _ss; regs->flags = X86_EFLAGS_IF; - set_fs(USER_DS); /* * Free the old FP and other extended state */ diff --git a/fs/exec.c b/fs/exec.c index ea5f748906a8..97e0d52d72fd 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -1093,6 +1093,7 @@ int flush_old_exec(struct linux_binprm * bprm) bprm->mm = NULL; /* We're using it now */ + set_fs(USER_DS); current->flags &= ~(PF_RANDOMIZE | PF_KTHREAD); flush_thread(); current->personality &= ~bprm->per_clear; @@ -1357,10 +1358,6 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs) if (retval) return retval; - /* kernel module loader fixup */ - /* so we don't try to load run modprobe in kernel space. */ - set_fs(USER_DS); - retval = audit_bprm(bprm); if (retval) return retval; -- cgit v1.2.3 From ad3e34bba4b64ab8e1f5ea1a17768e1a0d9648ea Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Wed, 8 Jun 2011 14:45:50 -0400 Subject: Btrfs: don't map extent buffer if path->skip_locking is set Arne's scrub stuff exposed a problem with mapping the extent buffer in reada_for_search. He searches the commit root with multiple threads and with skip_locking set, so we can race and overwrite node->map_token since node isn't locked. So fix this so that we only map the extent buffer if we don't already have a map_token and skip_locking isn't set. Without this patch scrub would panic almost immediately, with the patch it doesn't panic anymore. Thanks, Reported-by: Arne Jansen Signed-off-by: Josef Bacik --- fs/btrfs/ctree.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index d84089349c82..2e667868e0d2 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -1228,6 +1228,7 @@ static void reada_for_search(struct btrfs_root *root, u32 nr; u32 blocksize; u32 nscan = 0; + bool map = true; if (level != 1) return; @@ -1249,8 +1250,11 @@ static void reada_for_search(struct btrfs_root *root, nritems = btrfs_header_nritems(node); nr = slot; + if (node->map_token || path->skip_locking) + map = false; + while (1) { - if (!node->map_token) { + if (map && !node->map_token) { unsigned long offset = btrfs_node_key_ptr_offset(nr); map_private_extent_buffer(node, offset, sizeof(struct btrfs_key_ptr), @@ -1277,7 +1281,7 @@ static void reada_for_search(struct btrfs_root *root, if ((search <= target && target - search <= 65536) || (search > target && search - target <= 65536)) { gen = btrfs_node_ptr_generation(node, nr); - if (node->map_token) { + if (map && node->map_token) { unmap_extent_buffer(node, node->map_token, KM_USER1); node->map_token = NULL; @@ -1289,7 +1293,7 @@ static void reada_for_search(struct btrfs_root *root, if ((nread > 65536 || nscan > 32)) break; } - if (node->map_token) { + if (map && node->map_token) { unmap_extent_buffer(node, node->map_token, KM_USER1); node->map_token = NULL; } -- cgit v1.2.3 From 8c51032f978bac5bec5dae0c5de4f85db97c1cc9 Mon Sep 17 00:00:00 2001 From: Arne Jansen Date: Fri, 3 Jun 2011 10:09:26 +0200 Subject: btrfs: scrub: errors in tree enumeration due to the semantics of btrfs_search_slot the path can point to an invalid slot when ret > 0. This condition went unnoticed, which in turn could have led to an incomplete scrubbing. Signed-off-by: Arne Jansen --- fs/btrfs/scrub.c | 57 +++++++++++++++++++++++++++++++++----------------------- 1 file changed, 34 insertions(+), 23 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index df50fd1eca8f..d5a4108cedaf 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -804,18 +804,12 @@ static noinline_for_stack int scrub_stripe(struct scrub_dev *sdev, ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) - goto out; - - l = path->nodes[0]; - slot = path->slots[0]; - btrfs_item_key_to_cpu(l, &key, slot); - if (key.objectid != logical) { - ret = btrfs_previous_item(root, path, 0, - BTRFS_EXTENT_ITEM_KEY); - if (ret < 0) - goto out; - } + goto out_noplug; + /* + * we might miss half an extent here, but that doesn't matter, + * as it's only the prefetch + */ while (1) { l = path->nodes[0]; slot = path->slots[0]; @@ -824,7 +818,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_dev *sdev, if (ret == 0) continue; if (ret < 0) - goto out; + goto out_noplug; break; } @@ -906,15 +900,20 @@ again: ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) goto out; - - l = path->nodes[0]; - slot = path->slots[0]; - btrfs_item_key_to_cpu(l, &key, slot); - if (key.objectid != logical) { + if (ret > 0) { ret = btrfs_previous_item(root, path, 0, BTRFS_EXTENT_ITEM_KEY); if (ret < 0) goto out; + if (ret > 0) { + /* there's no smaller item, so stick with the + * larger one */ + btrfs_release_path(path); + ret = btrfs_search_slot(NULL, root, &key, + path, 0, 0); + if (ret < 0) + goto out; + } } while (1) { @@ -989,6 +988,7 @@ next: out: blk_finish_plug(&plug); +out_noplug: btrfs_free_path(path); return ret < 0 ? ret : 0; } @@ -1064,8 +1064,15 @@ int scrub_enumerate_chunks(struct scrub_dev *sdev, u64 start, u64 end) while (1) { ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) - goto out; - ret = 0; + break; + if (ret > 0) { + if (path->slots[0] >= + btrfs_header_nritems(path->nodes[0])) { + ret = btrfs_next_leaf(root, path); + if (ret) + break; + } + } l = path->nodes[0]; slot = path->slots[0]; @@ -1075,7 +1082,7 @@ int scrub_enumerate_chunks(struct scrub_dev *sdev, u64 start, u64 end) if (found_key.objectid != sdev->dev->devid) break; - if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY) + if (btrfs_key_type(&found_key) != BTRFS_DEV_EXTENT_KEY) break; if (found_key.offset >= end) @@ -1104,7 +1111,7 @@ int scrub_enumerate_chunks(struct scrub_dev *sdev, u64 start, u64 end) cache = btrfs_lookup_block_group(fs_info, chunk_offset); if (!cache) { ret = -ENOENT; - goto out; + break; } ret = scrub_chunk(sdev, chunk_tree, chunk_objectid, chunk_offset, length); @@ -1116,9 +1123,13 @@ int scrub_enumerate_chunks(struct scrub_dev *sdev, u64 start, u64 end) btrfs_release_path(path); } -out: btrfs_free_path(path); - return ret; + + /* + * ret can still be 1 from search_slot or next_leaf, + * that's not an error + */ + return ret < 0 ? ret : 0; } static noinline_for_stack int scrub_supers(struct scrub_dev *sdev) -- cgit v1.2.3 From 632dd772fcbde2ba37c0e8983bd38ef4a1eac906 Mon Sep 17 00:00:00 2001 From: Arne Jansen Date: Fri, 10 Jun 2011 12:07:07 +0200 Subject: btrfs: reinitialize scrub workers Scrub starts the workers each time a scrub starts and stops them after it finished. This patch adds an initialization for the workers before each start, otherwise the workers behave strangely. Signed-off-by: Arne Jansen --- fs/btrfs/disk-io.c | 2 -- fs/btrfs/scrub.c | 6 +++++- 2 files changed, 5 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index a203d363184d..7bbbfebe47e4 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1668,8 +1668,6 @@ struct btrfs_root *open_ctree(struct super_block *sb, init_waitqueue_head(&fs_info->scrub_pause_wait); init_rwsem(&fs_info->scrub_super_lock); fs_info->scrub_workers_refcnt = 0; - btrfs_init_workers(&fs_info->scrub_workers, "scrub", - fs_info->thread_pool_size, &fs_info->generic_worker); sb->s_blocksize = 4096; sb->s_blocksize_bits = blksize_bits(4096); diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index d5a4108cedaf..92cac19388ed 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -1166,8 +1166,12 @@ static noinline_for_stack int scrub_workers_get(struct btrfs_root *root) struct btrfs_fs_info *fs_info = root->fs_info; mutex_lock(&fs_info->scrub_lock); - if (fs_info->scrub_workers_refcnt == 0) + if (fs_info->scrub_workers_refcnt == 0) { + btrfs_init_workers(&fs_info->scrub_workers, "scrub", + fs_info->thread_pool_size, &fs_info->generic_worker); + fs_info->scrub_workers.idle_thresh = 4; btrfs_start_workers(&fs_info->scrub_workers, 1); + } ++fs_info->scrub_workers_refcnt; mutex_unlock(&fs_info->scrub_lock); -- cgit v1.2.3 From 6eef3125886df260ca0e8758d141308152226f6a Mon Sep 17 00:00:00 2001 From: Arne Jansen Date: Fri, 10 Jun 2011 13:04:58 +0200 Subject: btrfs: remove unneeded includes from scrub.c Signed-off-by: Arne Jansen --- fs/btrfs/scrub.c | 6 ------ 1 file changed, 6 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 92cac19388ed..a8d03d5efb5d 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -16,13 +16,7 @@ * Boston, MA 021110-1307, USA. */ -#include -#include -#include #include -#include -#include -#include #include "ctree.h" #include "volumes.h" #include "disk-io.h" -- cgit v1.2.3 From 38e87880666091fe9c572a7a2ed2e771d97ca5aa Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Fri, 10 Jun 2011 16:36:57 -0400 Subject: Btrfs: make sure to recheck for bitmaps in clusters Josef recently changed the free extent cache to look in the block group cluster for any bitmaps before trying to add a new bitmap for the same offset. This avoids BUG_ON()s due covering duplicate ranges. But it didn't go quite far enough. A given free range might span between one or more bitmaps or free space entries. The code has looping to cover this, but it doesn't check for clustered bitmaps every time. This shuffles our gotos to check for a bitmap in the cluster for every new bitmap entry we try to add. Signed-off-by: Chris Mason --- fs/btrfs/free-space-cache.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 38f3fd923043..9f985a429877 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -1492,7 +1492,7 @@ static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl, if (ctl->op == &free_space_op) block_group = ctl->private; - +again: /* * Since we link bitmaps right into the cluster we need to see if we * have a cluster here, and if so and it has our bitmap we need to add @@ -1510,13 +1510,13 @@ static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl, node = rb_first(&cluster->root); if (!node) { spin_unlock(&cluster->lock); - goto again; + goto no_cluster_bitmap; } entry = rb_entry(node, struct btrfs_free_space, offset_index); if (!entry->bitmap) { spin_unlock(&cluster->lock); - goto again; + goto no_cluster_bitmap; } if (entry->offset == offset_to_bitmap(ctl, offset)) { @@ -1531,7 +1531,8 @@ static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl, goto out; } } -again: + +no_cluster_bitmap: bitmap_info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), 1, 0); if (!bitmap_info) { -- cgit v1.2.3 From 38e880540f983045da7a00fbc50daad238207fc5 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Fri, 10 Jun 2011 18:43:13 +0000 Subject: Btrfs: clear current->journal_info on async transaction commit Normally current->jouranl_info is cleared by commit_transaction. For an async snap or subvol creation, though, it runs in a work queue. Clear it in btrfs_commit_transaction_async() to avoid leaking a non-NULL journal_info when we return to userspace. When the actual commit runs in the other thread it won't care that it's current->journal_info is already NULL. Signed-off-by: Sage Weil Tested-by: Jim Schutt Signed-off-by: Chris Mason --- fs/btrfs/transaction.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 6b2e4786d189..2b3590b9fe98 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -1118,8 +1118,11 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans, wait_current_trans_commit_start_and_unblock(root, cur_trans); else wait_current_trans_commit_start(root, cur_trans); - put_transaction(cur_trans); + if (current->journal_info == trans) + current->journal_info = NULL; + + put_transaction(cur_trans); return 0; } -- cgit v1.2.3 From 9eb9104c665aae2401a1723c044669eb10240072 Mon Sep 17 00:00:00 2001 From: richard kennedy Date: Tue, 7 Jun 2011 10:46:32 +0000 Subject: btrfs: remove 64bit alignment padding to allow extent_buffer to fit into one fewer cacheline Reorder extent_buffer to remove 8 bytes of alignment padding on 64 bit builds. This shrinks its size to 128 bytes allowing it to fit into one fewer cache lines and allows more objects per slab in its kmem_cache. slabinfo extent_buffer reports :- before:- Sizes (bytes) Slabs ---------------------------------- Object : 136 Total : 123 SlabObj: 136 Full : 121 SlabSiz: 4096 Partial: 0 Loss : 0 CpuSlab: 2 Align : 8 Objects: 30 after :- Object : 128 Total : 4 SlabObj: 128 Full : 2 SlabSiz: 4096 Partial: 0 Loss : 0 CpuSlab: 2 Align : 8 Objects: 32 Signed-off-by: Richard Kennedy Signed-off-by: Chris Mason --- fs/btrfs/extent_io.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index 4e8445a4757c..a11a92ee2d30 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -126,9 +126,9 @@ struct extent_buffer { unsigned long map_len; struct page *first_page; unsigned long bflags; - atomic_t refs; struct list_head leak_list; struct rcu_head rcu_head; + atomic_t refs; /* the spinlock is used to protect most operations */ spinlock_t lock; -- cgit v1.2.3 From 027ed2f0044e95a97ed34db2d55a9ca95ba84385 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Wed, 8 Jun 2011 08:27:56 +0000 Subject: Btrfs: avoid stack bloat in btrfs_ioctl_fs_info() The size of struct btrfs_ioctl_fs_info_args is as big as 1KB, so don't declare the variable on stack. Signed-off-by: Li Zefan Reviewed-by: Josef Bacik Signed-off-by: Chris Mason --- fs/btrfs/ioctl.c | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index ac37040e426a..b793d112d1f6 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -2054,29 +2054,34 @@ static long btrfs_ioctl_rm_dev(struct btrfs_root *root, void __user *arg) static long btrfs_ioctl_fs_info(struct btrfs_root *root, void __user *arg) { - struct btrfs_ioctl_fs_info_args fi_args; + struct btrfs_ioctl_fs_info_args *fi_args; struct btrfs_device *device; struct btrfs_device *next; struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices; + int ret = 0; if (!capable(CAP_SYS_ADMIN)) return -EPERM; - fi_args.num_devices = fs_devices->num_devices; - fi_args.max_id = 0; - memcpy(&fi_args.fsid, root->fs_info->fsid, sizeof(fi_args.fsid)); + fi_args = kzalloc(sizeof(*fi_args), GFP_KERNEL); + if (!fi_args) + return -ENOMEM; + + fi_args->num_devices = fs_devices->num_devices; + memcpy(&fi_args->fsid, root->fs_info->fsid, sizeof(fi_args->fsid)); mutex_lock(&fs_devices->device_list_mutex); list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) { - if (device->devid > fi_args.max_id) - fi_args.max_id = device->devid; + if (device->devid > fi_args->max_id) + fi_args->max_id = device->devid; } mutex_unlock(&fs_devices->device_list_mutex); - if (copy_to_user(arg, &fi_args, sizeof(fi_args))) - return -EFAULT; + if (copy_to_user(arg, fi_args, sizeof(*fi_args))) + ret = -EFAULT; - return 0; + kfree(fi_args); + return ret; } static long btrfs_ioctl_dev_info(struct btrfs_root *root, void __user *arg) -- cgit v1.2.3 From 5be76758f35ec6578e5b9b150aa513ac26bd9c54 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Thu, 9 Jun 2011 10:02:51 +0000 Subject: btrfs: fix unlocked access of delalloc_inodes list_splice_init will make delalloc_inodes empty, but without a spinlock around, this may produce corrupted list head, accessed in many placess, The race window is very tight and nobody seems to have hit it so far. Signed-off-by: David Sterba Signed-off-by: Chris Mason --- fs/btrfs/disk-io.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index a203d363184d..33b744a5ac03 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -2911,9 +2911,8 @@ static int btrfs_destroy_delalloc_inodes(struct btrfs_root *root) INIT_LIST_HEAD(&splice); - list_splice_init(&root->fs_info->delalloc_inodes, &splice); - spin_lock(&root->fs_info->delalloc_lock); + list_splice_init(&root->fs_info->delalloc_inodes, &splice); while (!list_empty(&splice)) { btrfs_inode = list_entry(splice.next, struct btrfs_inode, -- cgit v1.2.3 From 08d2f347e877e489ca098c87a6fd2e872fef9767 Mon Sep 17 00:00:00 2001 From: Jan Schmidt Date: Wed, 4 May 2011 16:18:50 +0200 Subject: Btrfs: fix extent state leak on failed nodatasum reads When encountering an EIO while reading from a nodatasum extent, we insert an error record into the inode's failure tree. btrfs_readpage_end_io_hook returns early for nodatasum inodes. We'd better clear the failure tree in that case, otherwise the kernel complains about BUG extent_state: Objects remaining on kmem_cache_close() on rmmod. Signed-off-by: Jan Schmidt Signed-off-by: Chris Mason --- fs/btrfs/inode.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 02ff4a1b968b..113913ae36e0 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -1986,7 +1986,7 @@ static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end, } if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM) - return 0; + goto good; if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID && test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) { -- cgit v1.2.3 From 22b63a2971c5657dfc1bf4514f9410fc90c8b2c2 Mon Sep 17 00:00:00 2001 From: Ilya Dryomov Date: Wed, 9 Feb 2011 16:05:31 +0200 Subject: Btrfs - use %pU to print fsid Get rid of FIXME comment. Uuids from dmesg are now the same as uuids given by btrfs-progs. Signed-off-by: Ilya Dryomov Signed-off-by: Chris Mason --- fs/btrfs/volumes.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index da541dfca2e3..1efa56e18f9b 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -689,12 +689,8 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder, transid = btrfs_super_generation(disk_super); if (disk_super->label[0]) printk(KERN_INFO "device label %s ", disk_super->label); - else { - /* FIXME, make a readl uuid parser */ - printk(KERN_INFO "device fsid %llx-%llx ", - *(unsigned long long *)disk_super->fsid, - *(unsigned long long *)(disk_super->fsid + 8)); - } + else + printk(KERN_INFO "device fsid %pU ", disk_super->fsid); printk(KERN_CONT "devid %llu transid %llu %s\n", (unsigned long long)devid, (unsigned long long)transid, path); ret = device_list_add(path, disk_super, devid, fs_devices_ret); -- cgit v1.2.3 From fe744fdb74f2417d8571faefa45f72b0ead25f89 Mon Sep 17 00:00:00 2001 From: Ryusuke Konishi Date: Wed, 25 May 2011 23:00:27 +0900 Subject: nilfs2: fix incorrect block address termination in node concatenation nilfs_btree_delete function wrongly terminates virtual block address of the btree node held by its parent at index 0. When concatenating the index-0 node with its right sibling node, nilfs_btree_delete terminates the block address of index-0 node instead of the right sibling node which should be deleted. This bug not only wears disk space in the long run, but also causes file system corruption. This will fix it. Signed-off-by: Ryusuke Konishi --- fs/nilfs2/btree.c | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c index 7eafe468a29c..fd4f05b91942 100644 --- a/fs/nilfs2/btree.c +++ b/fs/nilfs2/btree.c @@ -1356,20 +1356,19 @@ static int nilfs_btree_prepare_delete(struct nilfs_bmap *btree, struct buffer_head *bh; struct nilfs_btree_node *node, *parent, *sib; __u64 sibptr; - int pindex, level, ncmin, ncmax, ncblk, ret; + int pindex, dindex, level, ncmin, ncmax, ncblk, ret; ret = 0; stats->bs_nblocks = 0; ncmin = NILFS_BTREE_NODE_NCHILDREN_MIN(nilfs_btree_node_size(btree)); ncblk = nilfs_btree_nchildren_per_block(btree); - for (level = NILFS_BTREE_LEVEL_NODE_MIN; + for (level = NILFS_BTREE_LEVEL_NODE_MIN, dindex = path[level].bp_index; level < nilfs_btree_height(btree) - 1; level++) { node = nilfs_btree_get_nonroot_node(path, level); path[level].bp_oldreq.bpr_ptr = - nilfs_btree_node_get_ptr(node, path[level].bp_index, - ncblk); + nilfs_btree_node_get_ptr(node, dindex, ncblk); ret = nilfs_bmap_prepare_end_ptr(btree, &path[level].bp_oldreq, dat); if (ret < 0) @@ -1383,6 +1382,7 @@ static int nilfs_btree_prepare_delete(struct nilfs_bmap *btree, parent = nilfs_btree_get_node(btree, path, level + 1, &ncmax); pindex = path[level + 1].bp_index; + dindex = pindex; if (pindex > 0) { /* left sibling */ @@ -1421,6 +1421,14 @@ static int nilfs_btree_prepare_delete(struct nilfs_bmap *btree, path[level].bp_sib_bh = bh; path[level].bp_op = nilfs_btree_concat_right; stats->bs_nblocks++; + /* + * When merging right sibling node + * into the current node, pointer to + * the right sibling node must be + * terminated instead. The adjustment + * below is required for that. + */ + dindex = pindex + 1; /* continue; */ } } else { @@ -1443,7 +1451,7 @@ static int nilfs_btree_prepare_delete(struct nilfs_bmap *btree, node = nilfs_btree_get_root(btree); path[level].bp_oldreq.bpr_ptr = - nilfs_btree_node_get_ptr(node, path[level].bp_index, + nilfs_btree_node_get_ptr(node, dindex, NILFS_BTREE_ROOT_NCHILDREN_MAX); ret = nilfs_bmap_prepare_end_ptr(btree, &path[level].bp_oldreq, dat); -- cgit v1.2.3 From d40990537c9ea85dfe75dbe0ffba5e1002dfdf3f Mon Sep 17 00:00:00 2001 From: Ryusuke Konishi Date: Wed, 25 May 2011 23:00:27 +0900 Subject: nilfs2: fix missing block address termination in btree node shrinking nilfs_btree_delete function does not terminate part of virtual block addresses when shrinking the last remaining child node into the root node. The missing address termination causes that dead btree node blocks persist and chip away free disk space. This fixes the leak bug on the btree node deletion. Signed-off-by: Ryusuke Konishi --- fs/nilfs2/btree.c | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) (limited to 'fs') diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c index fd4f05b91942..b2e3ff347620 100644 --- a/fs/nilfs2/btree.c +++ b/fs/nilfs2/btree.c @@ -1346,6 +1346,11 @@ static void nilfs_btree_shrink(struct nilfs_bmap *btree, path[level].bp_bh = NULL; } +static void nilfs_btree_nop(struct nilfs_bmap *btree, + struct nilfs_btree_path *path, + int level, __u64 *keyp, __u64 *ptrp) +{ +} static int nilfs_btree_prepare_delete(struct nilfs_bmap *btree, struct nilfs_btree_path *path, @@ -1439,16 +1444,22 @@ static int nilfs_btree_prepare_delete(struct nilfs_bmap *btree, NILFS_BTREE_ROOT_NCHILDREN_MAX) { path[level].bp_op = nilfs_btree_shrink; stats->bs_nblocks += 2; + level++; + path[level].bp_op = nilfs_btree_nop; + goto shrink_root_child; } else { path[level].bp_op = nilfs_btree_do_delete; stats->bs_nblocks++; + goto out; } - - goto out; - } } + /* child of the root node is deleted */ + path[level].bp_op = nilfs_btree_do_delete; + stats->bs_nblocks++; + +shrink_root_child: node = nilfs_btree_get_root(btree); path[level].bp_oldreq.bpr_ptr = nilfs_btree_node_get_ptr(node, dindex, @@ -1458,10 +1469,6 @@ static int nilfs_btree_prepare_delete(struct nilfs_bmap *btree, if (ret < 0) goto err_out_child_node; - /* child of the root node is deleted */ - path[level].bp_op = nilfs_btree_do_delete; - stats->bs_nblocks++; - /* success */ out: *levelp = level; -- cgit v1.2.3 From 071d73cfe5c38cf62338b952bd350ff3de541b75 Mon Sep 17 00:00:00 2001 From: Ryusuke Konishi Date: Fri, 10 Jun 2011 00:33:06 +0900 Subject: nilfs2: fix problem in setting checkpoint interval Checkpoint generation interval of nilfs goes wrong after user has changed the interval parameter with nilfs-tune tool. segctord starting. Construction interval = 5 seconds, CP frequency < 30 seconds segctord starting. Construction interval = 0 seconds, CP frequency < 30 seconds This turned out to be caused by a trivial bug in initialization code of log writer. This will fix it. Reported-by: Andrea Gelmini Signed-off-by: Ryusuke Konishi --- fs/nilfs2/segment.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c index 141646e88fb5..bb24ab6c282f 100644 --- a/fs/nilfs2/segment.c +++ b/fs/nilfs2/segment.c @@ -2573,7 +2573,7 @@ static struct nilfs_sc_info *nilfs_segctor_new(struct super_block *sb, sci->sc_watermark = NILFS_SC_DEFAULT_WATERMARK; if (nilfs->ns_interval) - sci->sc_interval = nilfs->ns_interval; + sci->sc_interval = HZ * nilfs->ns_interval; if (nilfs->ns_watermark) sci->sc_watermark = nilfs->ns_watermark; return sci; -- cgit v1.2.3 From 30b4caf5d73af5c99cf1b2b46496d8bc35330992 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Wed, 8 Jun 2011 03:56:44 +0000 Subject: Btrfs: use join_transaction in btrfs_evict_inode() The WARN_ON() in start_transaction() was triggered while balancing. The cause is btrfs_relocate_chunk() started a transaction and then called iput() on the inode that stores free space cache, and iput() called btrfs_start_transaction() again. Reported-by: Tsutomu Itoh Signed-off-by: Li Zefan Reviewed-by: Josef Bacik Signed-off-by: Chris Mason --- fs/btrfs/inode.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 113913ae36e0..c15636b17874 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -3646,7 +3646,7 @@ void btrfs_evict_inode(struct inode *inode) btrfs_i_size_write(inode, 0); while (1) { - trans = btrfs_start_transaction(root, 0); + trans = btrfs_join_transaction(root); BUG_ON(IS_ERR(trans)); trans->block_rsv = root->orphan_block_rsv; -- cgit v1.2.3 From ff78fca2a03c08436535d3f7152a30752d8131d1 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 12 Jun 2011 09:42:17 -0400 Subject: fix leak in proc_set_super() set_anon_super() can fail... Signed-off-by: Al Viro --- fs/proc/root.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/proc/root.c b/fs/proc/root.c index a9000e9cfee5..d6c3b416529b 100644 --- a/fs/proc/root.c +++ b/fs/proc/root.c @@ -28,11 +28,12 @@ static int proc_test_super(struct super_block *sb, void *data) static int proc_set_super(struct super_block *sb, void *data) { - struct pid_namespace *ns; - - ns = (struct pid_namespace *)data; - sb->s_fs_info = get_pid_ns(ns); - return set_anon_super(sb, NULL); + int err = set_anon_super(sb, NULL); + if (!err) { + struct pid_namespace *ns = (struct pid_namespace *)data; + sb->s_fs_info = get_pid_ns(ns); + } + return err; } static struct dentry *proc_mount(struct file_system_type *fs_type, -- cgit v1.2.3 From b1c27ab3f93daede979f804afc38b189c2f17c60 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 12 Jun 2011 10:07:03 -0400 Subject: ubifs: split allocation of ubifs_info into a separate function preparation to ubifs sget() race fixes Signed-off-by: Al Viro --- fs/ubifs/super.c | 87 +++++++++++++++++++++++++++++++------------------------- 1 file changed, 48 insertions(+), 39 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index b5aeb5a8ebed..ddc3b02e8cf0 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c @@ -1971,6 +1971,53 @@ static struct ubi_volume_desc *open_ubi(const char *name, int mode) return ERR_PTR(-EINVAL); } +static struct ubifs_info *alloc_ubifs_info(struct ubi_volume_desc *ubi) +{ + struct ubifs_info *c; + + c = kzalloc(sizeof(struct ubifs_info), GFP_KERNEL); + if (c) { + spin_lock_init(&c->cnt_lock); + spin_lock_init(&c->cs_lock); + spin_lock_init(&c->buds_lock); + spin_lock_init(&c->space_lock); + spin_lock_init(&c->orphan_lock); + init_rwsem(&c->commit_sem); + mutex_init(&c->lp_mutex); + mutex_init(&c->tnc_mutex); + mutex_init(&c->log_mutex); + mutex_init(&c->mst_mutex); + mutex_init(&c->umount_mutex); + mutex_init(&c->bu_mutex); + mutex_init(&c->write_reserve_mutex); + init_waitqueue_head(&c->cmt_wq); + c->buds = RB_ROOT; + c->old_idx = RB_ROOT; + c->size_tree = RB_ROOT; + c->orph_tree = RB_ROOT; + INIT_LIST_HEAD(&c->infos_list); + INIT_LIST_HEAD(&c->idx_gc); + INIT_LIST_HEAD(&c->replay_list); + INIT_LIST_HEAD(&c->replay_buds); + INIT_LIST_HEAD(&c->uncat_list); + INIT_LIST_HEAD(&c->empty_list); + INIT_LIST_HEAD(&c->freeable_list); + INIT_LIST_HEAD(&c->frdi_idx_list); + INIT_LIST_HEAD(&c->unclean_leb_list); + INIT_LIST_HEAD(&c->old_buds); + INIT_LIST_HEAD(&c->orph_list); + INIT_LIST_HEAD(&c->orph_new); + c->no_chk_data_crc = 1; + + c->highest_inum = UBIFS_FIRST_INO; + c->lhead_lnum = c->ltail_lnum = UBIFS_LOG_LNUM; + + ubi_get_volume_info(ubi, &c->vi); + ubi_get_device_info(c->vi.ubi_num, &c->di); + } + return c; +} + static int ubifs_fill_super(struct super_block *sb, void *data, int silent) { struct ubi_volume_desc *ubi = sb->s_fs_info; @@ -1978,49 +2025,11 @@ static int ubifs_fill_super(struct super_block *sb, void *data, int silent) struct inode *root; int err; - c = kzalloc(sizeof(struct ubifs_info), GFP_KERNEL); + c = alloc_ubifs_info(ubi); if (!c) return -ENOMEM; - spin_lock_init(&c->cnt_lock); - spin_lock_init(&c->cs_lock); - spin_lock_init(&c->buds_lock); - spin_lock_init(&c->space_lock); - spin_lock_init(&c->orphan_lock); - init_rwsem(&c->commit_sem); - mutex_init(&c->lp_mutex); - mutex_init(&c->tnc_mutex); - mutex_init(&c->log_mutex); - mutex_init(&c->mst_mutex); - mutex_init(&c->umount_mutex); - mutex_init(&c->bu_mutex); - mutex_init(&c->write_reserve_mutex); - init_waitqueue_head(&c->cmt_wq); - c->buds = RB_ROOT; - c->old_idx = RB_ROOT; - c->size_tree = RB_ROOT; - c->orph_tree = RB_ROOT; - INIT_LIST_HEAD(&c->infos_list); - INIT_LIST_HEAD(&c->idx_gc); - INIT_LIST_HEAD(&c->replay_list); - INIT_LIST_HEAD(&c->replay_buds); - INIT_LIST_HEAD(&c->uncat_list); - INIT_LIST_HEAD(&c->empty_list); - INIT_LIST_HEAD(&c->freeable_list); - INIT_LIST_HEAD(&c->frdi_idx_list); - INIT_LIST_HEAD(&c->unclean_leb_list); - INIT_LIST_HEAD(&c->old_buds); - INIT_LIST_HEAD(&c->orph_list); - INIT_LIST_HEAD(&c->orph_new); - c->no_chk_data_crc = 1; - c->vfs_sb = sb; - c->highest_inum = UBIFS_FIRST_INO; - c->lhead_lnum = c->ltail_lnum = UBIFS_LOG_LNUM; - - ubi_get_volume_info(ubi, &c->vi); - ubi_get_device_info(c->vi.ubi_num, &c->di); - /* Re-open the UBI device in read-write mode */ c->ubi = ubi_open_volume(c->vi.ubi_num, c->vi.vol_id, UBI_READWRITE); if (IS_ERR(c->ubi)) { -- cgit v1.2.3 From d251ed271d528afb407cc2ede30923e34cb209a5 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 12 Jun 2011 10:24:33 -0400 Subject: ubifs: fix sget races * allocate ubifs_info in ->mount(), fill it enough for sb_test() and set ->s_fs_info to it in set() callback passed to sget(). * do *not* free it in ->put_super(); do that in ->kill_sb() after we'd done kill_anon_super(). * don't free it in ubifs_fill_super() either - deactivate_locked_super() done by caller when ubifs_fill_super() returns an error will take care of that sucker. * get rid of kludge with passing ubi to ubifs_fill_super() in ->s_fs_info; we only need it in alloc_ubifs_info(), so ubifs_fill_super() will need only ubifs_info. Which it will find in ->s_fs_info just fine, no need to reassign anything... As the result, sb_test() becomes safe to apply to all superblocks that can be found by sget() (and a kludge with temporary use of ->s_fs_info to store a pointer to very different structure goes away). Signed-off-by: Al Viro --- fs/ubifs/super.c | 54 ++++++++++++++++++++++++++++++------------------------ 1 file changed, 30 insertions(+), 24 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index ddc3b02e8cf0..8c892c2d5300 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c @@ -1848,7 +1848,6 @@ static void ubifs_put_super(struct super_block *sb) bdi_destroy(&c->bdi); ubi_close_volume(c->ubi); mutex_unlock(&c->umount_mutex); - kfree(c); } static int ubifs_remount_fs(struct super_block *sb, int *flags, char *data) @@ -2020,21 +2019,16 @@ static struct ubifs_info *alloc_ubifs_info(struct ubi_volume_desc *ubi) static int ubifs_fill_super(struct super_block *sb, void *data, int silent) { - struct ubi_volume_desc *ubi = sb->s_fs_info; - struct ubifs_info *c; + struct ubifs_info *c = sb->s_fs_info; struct inode *root; int err; - c = alloc_ubifs_info(ubi); - if (!c) - return -ENOMEM; - c->vfs_sb = sb; /* Re-open the UBI device in read-write mode */ c->ubi = ubi_open_volume(c->vi.ubi_num, c->vi.vol_id, UBI_READWRITE); if (IS_ERR(c->ubi)) { err = PTR_ERR(c->ubi); - goto out_free; + goto out; } /* @@ -2100,24 +2094,29 @@ out_bdi: bdi_destroy(&c->bdi); out_close: ubi_close_volume(c->ubi); -out_free: - kfree(c); +out: return err; } static int sb_test(struct super_block *sb, void *data) { - dev_t *dev = data; + struct ubifs_info *c1 = data; struct ubifs_info *c = sb->s_fs_info; - return c->vi.cdev == *dev; + return c->vi.cdev == c1->vi.cdev; +} + +static int sb_set(struct super_block *sb, void *data) +{ + sb->s_fs_info = data; + return set_anon_super(sb, NULL); } static struct dentry *ubifs_mount(struct file_system_type *fs_type, int flags, const char *name, void *data) { struct ubi_volume_desc *ubi; - struct ubi_volume_info vi; + struct ubifs_info *c; struct super_block *sb; int err; @@ -2134,19 +2133,24 @@ static struct dentry *ubifs_mount(struct file_system_type *fs_type, int flags, name, (int)PTR_ERR(ubi)); return ERR_CAST(ubi); } - ubi_get_volume_info(ubi, &vi); - dbg_gen("opened ubi%d_%d", vi.ubi_num, vi.vol_id); + c = alloc_ubifs_info(ubi); + if (!c) { + err = -ENOMEM; + goto out_close; + } + + dbg_gen("opened ubi%d_%d", c->vi.ubi_num, c->vi.vol_id); - sb = sget(fs_type, &sb_test, &set_anon_super, &vi.cdev); + sb = sget(fs_type, sb_test, sb_set, c); if (IS_ERR(sb)) { err = PTR_ERR(sb); - goto out_close; + kfree(c); } if (sb->s_root) { struct ubifs_info *c1 = sb->s_fs_info; - + kfree(c); /* A new mount point for already mounted UBIFS */ dbg_gen("this ubi volume is already mounted"); if (!!(flags & MS_RDONLY) != c1->ro_mount) { @@ -2155,11 +2159,6 @@ static struct dentry *ubifs_mount(struct file_system_type *fs_type, int flags, } } else { sb->s_flags = flags; - /* - * Pass 'ubi' to 'fill_super()' in sb->s_fs_info where it is - * replaced by 'c'. - */ - sb->s_fs_info = ubi; err = ubifs_fill_super(sb, data, flags & MS_SILENT ? 1 : 0); if (err) goto out_deact; @@ -2179,11 +2178,18 @@ out_close: return ERR_PTR(err); } +static void kill_ubifs_super(struct super_block *s) +{ + struct ubifs_info *c = s->s_fs_info; + kill_anon_super(s); + kfree(c); +} + static struct file_system_type ubifs_fs_type = { .name = "ubifs", .owner = THIS_MODULE, .mount = ubifs_mount, - .kill_sb = kill_anon_super, + .kill_sb = kill_ubifs_super, }; /* -- cgit v1.2.3 From dde194a64bb5c3fd05d965775dc92e8a4920a53a Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 12 Jun 2011 16:01:21 -0400 Subject: afs: fix sget() races, close leak on umount * set ->s_fs_info in set() callback passed to sget() * allocate the thing and set it up enough for afs_test_super() before making it visible * have it freed in ->kill_sb() (current tree simply leaks it) * have ->put_super() leave ->s_fs_info->volume alone; it's too early for dropping it; do that from ->kill_sb() after having called kill_anon_super(). Signed-off-by: Al Viro --- fs/afs/super.c | 73 +++++++++++++++++++++++++--------------------------------- 1 file changed, 32 insertions(+), 41 deletions(-) (limited to 'fs') diff --git a/fs/afs/super.c b/fs/afs/super.c index fb240e8766d6..b7d48d7eaa1e 100644 --- a/fs/afs/super.c +++ b/fs/afs/super.c @@ -31,8 +31,8 @@ static void afs_i_init_once(void *foo); static struct dentry *afs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data); +static void afs_kill_super(struct super_block *sb); static struct inode *afs_alloc_inode(struct super_block *sb); -static void afs_put_super(struct super_block *sb); static void afs_destroy_inode(struct inode *inode); static int afs_statfs(struct dentry *dentry, struct kstatfs *buf); @@ -40,7 +40,7 @@ struct file_system_type afs_fs_type = { .owner = THIS_MODULE, .name = "afs", .mount = afs_mount, - .kill_sb = kill_anon_super, + .kill_sb = afs_kill_super, .fs_flags = 0, }; @@ -50,7 +50,6 @@ static const struct super_operations afs_super_ops = { .drop_inode = afs_drop_inode, .destroy_inode = afs_destroy_inode, .evict_inode = afs_evict_inode, - .put_super = afs_put_super, .show_options = generic_show_options, }; @@ -282,19 +281,25 @@ static int afs_parse_device_name(struct afs_mount_params *params, */ static int afs_test_super(struct super_block *sb, void *data) { - struct afs_mount_params *params = data; + struct afs_super_info *as1 = data; struct afs_super_info *as = sb->s_fs_info; - return as->volume == params->volume; + return as->volume == as1->volume; +} + +static int afs_set_super(struct super_block *sb, void *data) +{ + sb->s_fs_info = data; + return set_anon_super(sb, NULL); } /* * fill in the superblock */ -static int afs_fill_super(struct super_block *sb, void *data) +static int afs_fill_super(struct super_block *sb, + struct afs_mount_params *params) { - struct afs_mount_params *params = data; - struct afs_super_info *as = NULL; + struct afs_super_info *as = sb->s_fs_info; struct afs_fid fid; struct dentry *root = NULL; struct inode *inode = NULL; @@ -302,22 +307,11 @@ static int afs_fill_super(struct super_block *sb, void *data) _enter(""); - /* allocate a superblock info record */ - as = kzalloc(sizeof(struct afs_super_info), GFP_KERNEL); - if (!as) { - _leave(" = -ENOMEM"); - return -ENOMEM; - } - - afs_get_volume(params->volume); - as->volume = params->volume; - /* fill in the superblock */ sb->s_blocksize = PAGE_CACHE_SIZE; sb->s_blocksize_bits = PAGE_CACHE_SHIFT; sb->s_magic = AFS_FS_MAGIC; sb->s_op = &afs_super_ops; - sb->s_fs_info = as; sb->s_bdi = &as->volume->bdi; /* allocate the root inode and dentry */ @@ -326,7 +320,7 @@ static int afs_fill_super(struct super_block *sb, void *data) fid.unique = 1; inode = afs_iget(sb, params->key, &fid, NULL, NULL); if (IS_ERR(inode)) - goto error_inode; + return PTR_ERR(inode); if (params->autocell) set_bit(AFS_VNODE_AUTOCELL, &AFS_FS_I(inode)->flags); @@ -342,16 +336,8 @@ static int afs_fill_super(struct super_block *sb, void *data) _leave(" = 0"); return 0; -error_inode: - ret = PTR_ERR(inode); - inode = NULL; error: iput(inode); - afs_put_volume(as->volume); - kfree(as); - - sb->s_fs_info = NULL; - _leave(" = %d", ret); return ret; } @@ -367,6 +353,7 @@ static struct dentry *afs_mount(struct file_system_type *fs_type, struct afs_volume *vol; struct key *key; char *new_opts = kstrdup(options, GFP_KERNEL); + struct afs_super_info *as; int ret; _enter(",,%s,%p", dev_name, options); @@ -399,12 +386,22 @@ static struct dentry *afs_mount(struct file_system_type *fs_type, ret = PTR_ERR(vol); goto error; } - params.volume = vol; + + /* allocate a superblock info record */ + as = kzalloc(sizeof(struct afs_super_info), GFP_KERNEL); + if (!as) { + ret = -ENOMEM; + afs_put_volume(vol); + goto error; + } + as->volume = vol; /* allocate a deviceless superblock */ - sb = sget(fs_type, afs_test_super, set_anon_super, ¶ms); + sb = sget(fs_type, afs_test_super, afs_set_super, as); if (IS_ERR(sb)) { ret = PTR_ERR(sb); + afs_put_volume(vol); + kfree(as); goto error; } @@ -422,16 +419,16 @@ static struct dentry *afs_mount(struct file_system_type *fs_type, } else { _debug("reuse"); ASSERTCMP(sb->s_flags, &, MS_ACTIVE); + afs_put_volume(vol); + kfree(as); } - afs_put_volume(params.volume); afs_put_cell(params.cell); kfree(new_opts); _leave(" = 0 [%p]", sb); return dget(sb->s_root); error: - afs_put_volume(params.volume); afs_put_cell(params.cell); key_put(params.key); kfree(new_opts); @@ -439,18 +436,12 @@ error: return ERR_PTR(ret); } -/* - * finish the unmounting process on the superblock - */ -static void afs_put_super(struct super_block *sb) +static void afs_kill_super(struct super_block *sb) { struct afs_super_info *as = sb->s_fs_info; - - _enter(""); - + kill_anon_super(sb); afs_put_volume(as->volume); - - _leave(""); + kfree(as); } /* -- cgit v1.2.3 From a685e08987d1edf1995b76511d4c98ea0e905377 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Wed, 8 Jun 2011 21:13:01 -0400 Subject: Delay struct net freeing while there's a sysfs instance refering to it * new refcount in struct net, controlling actual freeing of the memory * new method in kobj_ns_type_operations (->drop_ns()) * ->current_ns() semantics change - it's supposed to be followed by corresponding ->drop_ns(). For struct net in case of CONFIG_NET_NS it bumps the new refcount; net_drop_ns() decrements it and calls net_free() if the last reference has been dropped. Method renamed to ->grab_current_ns(). * old net_free() callers call net_drop_ns() instead. * sysfs_exit_ns() is gone, along with a large part of callchain leading to it; now that the references stored in ->ns[...] stay valid we do not need to hunt them down and replace them with NULL. That fixes problems in sysfs_lookup() and sysfs_readdir(), along with getting rid of sb->s_instances abuse. Note that struct net *shutdown* logics has not changed - net_cleanup() is called exactly when it used to be called. The only thing postponed by having a sysfs instance refering to that struct net is actual freeing of memory occupied by struct net. Signed-off-by: Al Viro --- fs/sysfs/mount.c | 37 +++++++++++-------------------------- fs/sysfs/sysfs.h | 2 +- include/linux/kobject_ns.h | 10 ++++++---- include/linux/sysfs.h | 7 ------- include/net/net_namespace.h | 10 +++++++++- lib/kobject.c | 26 +++++++++----------------- net/core/net-sysfs.c | 23 +++++++++-------------- net/core/net_namespace.c | 12 ++++++++++-- 8 files changed, 55 insertions(+), 72 deletions(-) (limited to 'fs') diff --git a/fs/sysfs/mount.c b/fs/sysfs/mount.c index 266895783b47..e34f0d99ea4e 100644 --- a/fs/sysfs/mount.c +++ b/fs/sysfs/mount.c @@ -95,6 +95,14 @@ static int sysfs_set_super(struct super_block *sb, void *data) return error; } +static void free_sysfs_super_info(struct sysfs_super_info *info) +{ + int type; + for (type = KOBJ_NS_TYPE_NONE; type < KOBJ_NS_TYPES; type++) + kobj_ns_drop(type, info->ns[type]); + kfree(info); +} + static struct dentry *sysfs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { @@ -108,11 +116,11 @@ static struct dentry *sysfs_mount(struct file_system_type *fs_type, return ERR_PTR(-ENOMEM); for (type = KOBJ_NS_TYPE_NONE; type < KOBJ_NS_TYPES; type++) - info->ns[type] = kobj_ns_current(type); + info->ns[type] = kobj_ns_grab_current(type); sb = sget(fs_type, sysfs_test_super, sysfs_set_super, info); if (IS_ERR(sb) || sb->s_fs_info != info) - kfree(info); + free_sysfs_super_info(info); if (IS_ERR(sb)) return ERR_CAST(sb); if (!sb->s_root) { @@ -131,12 +139,11 @@ static struct dentry *sysfs_mount(struct file_system_type *fs_type, static void sysfs_kill_sb(struct super_block *sb) { struct sysfs_super_info *info = sysfs_info(sb); - /* Remove the superblock from fs_supers/s_instances * so we can't find it, before freeing sysfs_super_info. */ kill_anon_super(sb); - kfree(info); + free_sysfs_super_info(info); } static struct file_system_type sysfs_fs_type = { @@ -145,28 +152,6 @@ static struct file_system_type sysfs_fs_type = { .kill_sb = sysfs_kill_sb, }; -void sysfs_exit_ns(enum kobj_ns_type type, const void *ns) -{ - struct super_block *sb; - - mutex_lock(&sysfs_mutex); - spin_lock(&sb_lock); - list_for_each_entry(sb, &sysfs_fs_type.fs_supers, s_instances) { - struct sysfs_super_info *info = sysfs_info(sb); - /* - * If we see a superblock on the fs_supers/s_instances - * list the unmount has not completed and sb->s_fs_info - * points to a valid struct sysfs_super_info. - */ - /* Ignore superblocks with the wrong ns */ - if (info->ns[type] != ns) - continue; - info->ns[type] = NULL; - } - spin_unlock(&sb_lock); - mutex_unlock(&sysfs_mutex); -} - int __init sysfs_init(void) { int err = -ENOMEM; diff --git a/fs/sysfs/sysfs.h b/fs/sysfs/sysfs.h index 3d28af31d863..2ed2404f3113 100644 --- a/fs/sysfs/sysfs.h +++ b/fs/sysfs/sysfs.h @@ -136,7 +136,7 @@ struct sysfs_addrm_cxt { * instance). */ struct sysfs_super_info { - const void *ns[KOBJ_NS_TYPES]; + void *ns[KOBJ_NS_TYPES]; }; #define sysfs_info(SB) ((struct sysfs_super_info *)(SB->s_fs_info)) extern struct sysfs_dirent sysfs_root; diff --git a/include/linux/kobject_ns.h b/include/linux/kobject_ns.h index 82cb5bf461fb..f66b065a8b5f 100644 --- a/include/linux/kobject_ns.h +++ b/include/linux/kobject_ns.h @@ -32,15 +32,17 @@ enum kobj_ns_type { /* * Callbacks so sysfs can determine namespaces - * @current_ns: return calling task's namespace + * @grab_current_ns: return a new reference to calling task's namespace * @netlink_ns: return namespace to which a sock belongs (right?) * @initial_ns: return the initial namespace (i.e. init_net_ns) + * @drop_ns: drops a reference to namespace */ struct kobj_ns_type_operations { enum kobj_ns_type type; - const void *(*current_ns)(void); + void *(*grab_current_ns)(void); const void *(*netlink_ns)(struct sock *sk); const void *(*initial_ns)(void); + void (*drop_ns)(void *); }; int kobj_ns_type_register(const struct kobj_ns_type_operations *ops); @@ -48,9 +50,9 @@ int kobj_ns_type_registered(enum kobj_ns_type type); const struct kobj_ns_type_operations *kobj_child_ns_ops(struct kobject *parent); const struct kobj_ns_type_operations *kobj_ns_ops(struct kobject *kobj); -const void *kobj_ns_current(enum kobj_ns_type type); +void *kobj_ns_grab_current(enum kobj_ns_type type); const void *kobj_ns_netlink(enum kobj_ns_type type, struct sock *sk); const void *kobj_ns_initial(enum kobj_ns_type type); -void kobj_ns_exit(enum kobj_ns_type type, const void *ns); +void kobj_ns_drop(enum kobj_ns_type type, void *ns); #endif /* _LINUX_KOBJECT_NS_H */ diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h index c3acda60eee0..e2696d76a599 100644 --- a/include/linux/sysfs.h +++ b/include/linux/sysfs.h @@ -177,9 +177,6 @@ struct sysfs_dirent *sysfs_get_dirent(struct sysfs_dirent *parent_sd, struct sysfs_dirent *sysfs_get(struct sysfs_dirent *sd); void sysfs_put(struct sysfs_dirent *sd); -/* Called to clear a ns tag when it is no longer valid */ -void sysfs_exit_ns(enum kobj_ns_type type, const void *tag); - int __must_check sysfs_init(void); #else /* CONFIG_SYSFS */ @@ -338,10 +335,6 @@ static inline void sysfs_put(struct sysfs_dirent *sd) { } -static inline void sysfs_exit_ns(int type, const void *tag) -{ -} - static inline int __must_check sysfs_init(void) { return 0; diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h index 2bf9ed9ef26b..aef430d779bd 100644 --- a/include/net/net_namespace.h +++ b/include/net/net_namespace.h @@ -35,8 +35,11 @@ struct netns_ipvs; #define NETDEV_HASHENTRIES (1 << NETDEV_HASHBITS) struct net { + atomic_t passive; /* To decided when the network + * namespace should be freed. + */ atomic_t count; /* To decided when the network - * namespace should be freed. + * namespace should be shut down. */ #ifdef NETNS_REFCNT_DEBUG atomic_t use_count; /* To track references we @@ -154,6 +157,9 @@ int net_eq(const struct net *net1, const struct net *net2) { return net1 == net2; } + +extern void net_drop_ns(void *); + #else static inline struct net *get_net(struct net *net) @@ -175,6 +181,8 @@ int net_eq(const struct net *net1, const struct net *net2) { return 1; } + +#define net_drop_ns NULL #endif diff --git a/lib/kobject.c b/lib/kobject.c index 82dc34c095c2..640bd98a4c8a 100644 --- a/lib/kobject.c +++ b/lib/kobject.c @@ -948,14 +948,14 @@ const struct kobj_ns_type_operations *kobj_ns_ops(struct kobject *kobj) } -const void *kobj_ns_current(enum kobj_ns_type type) +void *kobj_ns_grab_current(enum kobj_ns_type type) { - const void *ns = NULL; + void *ns = NULL; spin_lock(&kobj_ns_type_lock); if ((type > KOBJ_NS_TYPE_NONE) && (type < KOBJ_NS_TYPES) && kobj_ns_ops_tbl[type]) - ns = kobj_ns_ops_tbl[type]->current_ns(); + ns = kobj_ns_ops_tbl[type]->grab_current_ns(); spin_unlock(&kobj_ns_type_lock); return ns; @@ -987,23 +987,15 @@ const void *kobj_ns_initial(enum kobj_ns_type type) return ns; } -/* - * kobj_ns_exit - invalidate a namespace tag - * - * @type: the namespace type (i.e. KOBJ_NS_TYPE_NET) - * @ns: the actual namespace being invalidated - * - * This is called when a tag is no longer valid. For instance, - * when a network namespace exits, it uses this helper to - * make sure no sb's sysfs_info points to the now-invalidated - * netns. - */ -void kobj_ns_exit(enum kobj_ns_type type, const void *ns) +void kobj_ns_drop(enum kobj_ns_type type, void *ns) { - sysfs_exit_ns(type, ns); + spin_lock(&kobj_ns_type_lock); + if ((type > KOBJ_NS_TYPE_NONE) && (type < KOBJ_NS_TYPES) && + kobj_ns_ops_tbl[type] && kobj_ns_ops_tbl[type]->drop_ns) + kobj_ns_ops_tbl[type]->drop_ns(ns); + spin_unlock(&kobj_ns_type_lock); } - EXPORT_SYMBOL(kobject_get); EXPORT_SYMBOL(kobject_put); EXPORT_SYMBOL(kobject_del); diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index 11b98bc2aa8f..33d2a1fba131 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c @@ -1179,9 +1179,14 @@ static void remove_queue_kobjects(struct net_device *net) #endif } -static const void *net_current_ns(void) +static void *net_grab_current_ns(void) { - return current->nsproxy->net_ns; + struct net *ns = current->nsproxy->net_ns; +#ifdef CONFIG_NET_NS + if (ns) + atomic_inc(&ns->passive); +#endif + return ns; } static const void *net_initial_ns(void) @@ -1196,22 +1201,13 @@ static const void *net_netlink_ns(struct sock *sk) struct kobj_ns_type_operations net_ns_type_operations = { .type = KOBJ_NS_TYPE_NET, - .current_ns = net_current_ns, + .grab_current_ns = net_grab_current_ns, .netlink_ns = net_netlink_ns, .initial_ns = net_initial_ns, + .drop_ns = net_drop_ns, }; EXPORT_SYMBOL_GPL(net_ns_type_operations); -static void net_kobj_ns_exit(struct net *net) -{ - kobj_ns_exit(KOBJ_NS_TYPE_NET, net); -} - -static struct pernet_operations kobj_net_ops = { - .exit = net_kobj_ns_exit, -}; - - #ifdef CONFIG_HOTPLUG static int netdev_uevent(struct device *d, struct kobj_uevent_env *env) { @@ -1339,6 +1335,5 @@ EXPORT_SYMBOL(netdev_class_remove_file); int netdev_kobject_init(void) { kobj_ns_type_register(&net_ns_type_operations); - register_pernet_subsys(&kobj_net_ops); return class_register(&net_class); } diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index 6c6b86d0da15..cdcbc3cb00a9 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c @@ -128,6 +128,7 @@ static __net_init int setup_net(struct net *net) LIST_HEAD(net_exit_list); atomic_set(&net->count, 1); + atomic_set(&net->passive, 1); #ifdef NETNS_REFCNT_DEBUG atomic_set(&net->use_count, 0); @@ -210,6 +211,13 @@ static void net_free(struct net *net) kmem_cache_free(net_cachep, net); } +void net_drop_ns(void *p) +{ + struct net *ns = p; + if (ns && atomic_dec_and_test(&ns->passive)) + net_free(ns); +} + struct net *copy_net_ns(unsigned long flags, struct net *old_net) { struct net *net; @@ -230,7 +238,7 @@ struct net *copy_net_ns(unsigned long flags, struct net *old_net) } mutex_unlock(&net_mutex); if (rv < 0) { - net_free(net); + net_drop_ns(net); return ERR_PTR(rv); } return net; @@ -286,7 +294,7 @@ static void cleanup_net(struct work_struct *work) /* Finally it is safe to free my network namespace structure */ list_for_each_entry_safe(net, tmp, &net_exit_list, exit_list) { list_del_init(&net->exit_list); - net_free(net); + net_drop_ns(net); } } static DECLARE_WORK(net_cleanup_work, cleanup_net); -- cgit v1.2.3 From 1fb74cda1b5e9c6207225fda5ef7504e815ce0e0 Mon Sep 17 00:00:00 2001 From: Tao Ma Date: Sun, 12 Jun 2011 22:44:10 -0400 Subject: jbd2: Remove obsolete parameters in the comments for some jbd2 functions credits isn't a parameter for jbd2_journal_get_write_access and jbd2_journal_get_undo_access. So remove the corresponding comments. Acked-by: Jan Kara Cc: Randy Dunlap Signed-off-by: Tao Ma Signed-off-by: "Theodore Ts'o" --- fs/jbd2/transaction.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'fs') diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index 3eec82d32fd4..547b101049e5 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c @@ -814,7 +814,6 @@ out: * int jbd2_journal_get_write_access() - notify intent to modify a buffer for metadata (not data) update. * @handle: transaction to add buffer modifications to * @bh: bh to be used for metadata writes - * @credits: variable that will receive credits for the buffer * * Returns an error code or 0 on success. * @@ -932,7 +931,6 @@ out: * non-rewindable consequences * @handle: transaction * @bh: buffer to undo - * @credits: store the number of taken credits here (if not NULL) * * Sometimes there is a need to distinguish between metadata which has * been committed to disk and that which has not. The ext3fs code uses -- cgit v1.2.3 From d4c208b86b8be4254eba0e74071496e599f94639 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 13 Jun 2011 12:45:48 +0200 Subject: block: use the passed in @bdev when claiming if partno is zero 6b4517a791 (block: implement bd_claiming and claiming block) introduced claiming block to support O_EXCL blkdev opens properly. bd_start_claiming() looks up the part 0 bdev and starts claiming block. The function assumed that there is only one part 0 bdev and always used bdget_disk(disk, 0) to look it up; unfortunately, this isn't true for some drivers (floppy) which use multiple block devices to denote different operating parameters for the same physical device. There can be multiple part 0 bdev's for the same device number. This incorrect assumption caused the wrong bdev to be used during claiming leading to unbalanced bd_holders as reported in the following bug. https://bugzilla.kernel.org/show_bug.cgi?id=28522 This patch updates bd_start_claiming() such that it uses the bdev specified as argument if its partno is zero. Note that this means that different bdev's can be used for the same device and O_EXCL check can be effectively bypassed. It has always been broken that way and floppy is fortunately on its way out. Leave that breakage alone. Signed-off-by: Tejun Heo Reported-by: Alex Villacis Lasso Tested-by: Alex Villacis Lasso Cc: stable@kernel.org # >= v2.6.36 Signed-off-by: Jens Axboe --- fs/block_dev.c | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/block_dev.c b/fs/block_dev.c index 1a2421f908f0..610e8e0b04b8 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -762,7 +762,19 @@ static struct block_device *bd_start_claiming(struct block_device *bdev, if (!disk) return ERR_PTR(-ENXIO); - whole = bdget_disk(disk, 0); + /* + * Normally, @bdev should equal what's returned from bdget_disk() + * if partno is 0; however, some drivers (floppy) use multiple + * bdev's for the same physical device and @bdev may be one of the + * aliases. Keep @bdev if partno is 0. This means claimer + * tracking is broken for those devices but it has always been that + * way. + */ + if (partno) + whole = bdget_disk(disk, 0); + else + whole = bdgrab(bdev); + module_put(disk->fops->owner); put_disk(disk); if (!whole) -- cgit v1.2.3 From ac08aedfa5d3de0dcb3825b598d16c2e57991f54 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Mon, 13 Jun 2011 11:28:50 -0400 Subject: Btrfs: check the return value from set_anon_super Al Viro noticed we weren't checking for set_anon_super failures. This adds the required checks. Signed-off-by: Chris Mason --- fs/btrfs/disk-io.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 9f68c6898653..20c111b3fa0d 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1312,7 +1312,9 @@ again: spin_lock_init(&root->cache_lock); init_waitqueue_head(&root->cache_wait); - set_anon_super(&root->anon_super, NULL); + ret = set_anon_super(&root->anon_super, NULL); + if (ret) + goto fail; if (btrfs_root_refs(&root->root_item) == 0) { ret = -ENOENT; -- cgit v1.2.3 From f4c44016218a6fce357715b9bbabbbbe1f69853c Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Mon, 13 Jun 2011 11:30:47 -0400 Subject: Btrfs: drop the delalloc_bytes check in shrink_delalloc Even when delalloc_bytes is zero, we may need to sleep while waiting for delalloc space. Signed-off-by: Chris Mason --- fs/btrfs/extent-tree.c | 4 ---- 1 file changed, 4 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index b42efc2ded51..1f61bf5b4960 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -3314,10 +3314,6 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans, if (reserved == 0) return 0; - /* nothing to shrink - nothing to reclaim */ - if (root->fs_info->delalloc_bytes == 0) - return 0; - max_reclaim = min(reserved, to_reclaim); while (loops < 1024) { -- cgit v1.2.3 From de1b794130b130e77ffa975bb58cb843744f9ae5 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Mon, 13 Jun 2011 15:38:22 -0400 Subject: jbd2: Fix oops in jbd2_journal_remove_journal_head() jbd2_journal_remove_journal_head() can oops when trying to access journal_head returned by bh2jh(). This is caused for example by the following race: TASK1 TASK2 jbd2_journal_commit_transaction() ... processing t_forget list __jbd2_journal_refile_buffer(jh); if (!jh->b_transaction) { jbd_unlock_bh_state(bh); jbd2_journal_try_to_free_buffers() jbd2_journal_grab_journal_head(bh) jbd_lock_bh_state(bh) __journal_try_to_free_buffer() jbd2_journal_put_journal_head(jh) jbd2_journal_remove_journal_head(bh); jbd2_journal_put_journal_head() in TASK2 sees that b_jcount == 0 and buffer is not part of any transaction and thus frees journal_head before TASK1 gets to doing so. Note that even buffer_head can be released by try_to_free_buffers() after jbd2_journal_put_journal_head() which adds even larger opportunity for oops (but I didn't see this happen in reality). Fix the problem by making transactions hold their own journal_head reference (in b_jcount). That way we don't have to remove journal_head explicitely via jbd2_journal_remove_journal_head() and instead just remove journal_head when b_jcount drops to zero. The result of this is that [__]jbd2_journal_refile_buffer(), [__]jbd2_journal_unfile_buffer(), and __jdb2_journal_remove_checkpoint() can free journal_head which needs modification of a few callers. Also we have to be careful because once journal_head is removed, buffer_head might be freed as well. So we have to get our own buffer_head reference where it matters. Signed-off-by: Jan Kara Signed-off-by: "Theodore Ts'o" --- fs/jbd2/checkpoint.c | 28 +++++++++------- fs/jbd2/commit.c | 33 +++++++++++-------- fs/jbd2/journal.c | 91 ++++++++++++++++----------------------------------- fs/jbd2/transaction.c | 67 +++++++++++++++++++------------------ include/linux/jbd2.h | 2 -- 5 files changed, 99 insertions(+), 122 deletions(-) (limited to 'fs') diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c index 6a79fd0a1a32..2c62c5aae82f 100644 --- a/fs/jbd2/checkpoint.c +++ b/fs/jbd2/checkpoint.c @@ -97,10 +97,14 @@ static int __try_to_free_cp_buf(struct journal_head *jh) if (jh->b_jlist == BJ_None && !buffer_locked(bh) && !buffer_dirty(bh) && !buffer_write_io_error(bh)) { + /* + * Get our reference so that bh cannot be freed before + * we unlock it + */ + get_bh(bh); JBUFFER_TRACE(jh, "remove from checkpoint list"); ret = __jbd2_journal_remove_checkpoint(jh) + 1; jbd_unlock_bh_state(bh); - jbd2_journal_remove_journal_head(bh); BUFFER_TRACE(bh, "release"); __brelse(bh); } else { @@ -223,8 +227,8 @@ restart: spin_lock(&journal->j_list_lock); goto restart; } + get_bh(bh); if (buffer_locked(bh)) { - atomic_inc(&bh->b_count); spin_unlock(&journal->j_list_lock); jbd_unlock_bh_state(bh); wait_on_buffer(bh); @@ -243,7 +247,6 @@ restart: */ released = __jbd2_journal_remove_checkpoint(jh); jbd_unlock_bh_state(bh); - jbd2_journal_remove_journal_head(bh); __brelse(bh); } @@ -284,7 +287,7 @@ static int __process_buffer(journal_t *journal, struct journal_head *jh, int ret = 0; if (buffer_locked(bh)) { - atomic_inc(&bh->b_count); + get_bh(bh); spin_unlock(&journal->j_list_lock); jbd_unlock_bh_state(bh); wait_on_buffer(bh); @@ -316,12 +319,12 @@ static int __process_buffer(journal_t *journal, struct journal_head *jh, ret = 1; if (unlikely(buffer_write_io_error(bh))) ret = -EIO; + get_bh(bh); J_ASSERT_JH(jh, !buffer_jbddirty(bh)); BUFFER_TRACE(bh, "remove from checkpoint"); __jbd2_journal_remove_checkpoint(jh); spin_unlock(&journal->j_list_lock); jbd_unlock_bh_state(bh); - jbd2_journal_remove_journal_head(bh); __brelse(bh); } else { /* @@ -554,7 +557,8 @@ int jbd2_cleanup_journal_tail(journal_t *journal) /* * journal_clean_one_cp_list * - * Find all the written-back checkpoint buffers in the given list and release them. + * Find all the written-back checkpoint buffers in the given list and + * release them. * * Called with the journal locked. * Called with j_list_lock held. @@ -663,8 +667,8 @@ out: * checkpoint lists. * * The function returns 1 if it frees the transaction, 0 otherwise. + * The function can free jh and bh. * - * This function is called with the journal locked. * This function is called with j_list_lock held. * This function is called with jbd_lock_bh_state(jh2bh(jh)) */ @@ -684,13 +688,14 @@ int __jbd2_journal_remove_checkpoint(struct journal_head *jh) } journal = transaction->t_journal; + JBUFFER_TRACE(jh, "removing from transaction"); __buffer_unlink(jh); jh->b_cp_transaction = NULL; + jbd2_journal_put_journal_head(jh); if (transaction->t_checkpoint_list != NULL || transaction->t_checkpoint_io_list != NULL) goto out; - JBUFFER_TRACE(jh, "transaction has no more buffers"); /* * There is one special case to worry about: if we have just pulled the @@ -701,10 +706,8 @@ int __jbd2_journal_remove_checkpoint(struct journal_head *jh) * The locking here around t_state is a bit sleazy. * See the comment at the end of jbd2_journal_commit_transaction(). */ - if (transaction->t_state != T_FINISHED) { - JBUFFER_TRACE(jh, "belongs to running/committing transaction"); + if (transaction->t_state != T_FINISHED) goto out; - } /* OK, that was the last buffer for the transaction: we can now safely remove this transaction from the log */ @@ -723,7 +726,6 @@ int __jbd2_journal_remove_checkpoint(struct journal_head *jh) wake_up(&journal->j_wait_logspace); ret = 1; out: - JBUFFER_TRACE(jh, "exit"); return ret; } @@ -742,6 +744,8 @@ void __jbd2_journal_insert_checkpoint(struct journal_head *jh, J_ASSERT_JH(jh, buffer_dirty(jh2bh(jh)) || buffer_jbddirty(jh2bh(jh))); J_ASSERT_JH(jh, jh->b_cp_transaction == NULL); + /* Get reference for checkpointing transaction */ + jbd2_journal_grab_journal_head(jh2bh(jh)); jh->b_cp_transaction = transaction; if (!transaction->t_checkpoint_list) { diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c index 7f21cf3aaf92..eef6979821a4 100644 --- a/fs/jbd2/commit.c +++ b/fs/jbd2/commit.c @@ -848,10 +848,16 @@ restart_loop: while (commit_transaction->t_forget) { transaction_t *cp_transaction; struct buffer_head *bh; + int try_to_free = 0; jh = commit_transaction->t_forget; spin_unlock(&journal->j_list_lock); bh = jh2bh(jh); + /* + * Get a reference so that bh cannot be freed before we are + * done with it. + */ + get_bh(bh); jbd_lock_bh_state(bh); J_ASSERT_JH(jh, jh->b_transaction == commit_transaction); @@ -914,28 +920,27 @@ restart_loop: __jbd2_journal_insert_checkpoint(jh, commit_transaction); if (is_journal_aborted(journal)) clear_buffer_jbddirty(bh); - JBUFFER_TRACE(jh, "refile for checkpoint writeback"); - __jbd2_journal_refile_buffer(jh); - jbd_unlock_bh_state(bh); } else { J_ASSERT_BH(bh, !buffer_dirty(bh)); - /* The buffer on BJ_Forget list and not jbddirty means + /* + * The buffer on BJ_Forget list and not jbddirty means * it has been freed by this transaction and hence it * could not have been reallocated until this * transaction has committed. *BUT* it could be * reallocated once we have written all the data to * disk and before we process the buffer on BJ_Forget - * list. */ - JBUFFER_TRACE(jh, "refile or unfile freed buffer"); - __jbd2_journal_refile_buffer(jh); - if (!jh->b_transaction) { - jbd_unlock_bh_state(bh); - /* needs a brelse */ - jbd2_journal_remove_journal_head(bh); - release_buffer_page(bh); - } else - jbd_unlock_bh_state(bh); + * list. + */ + if (!jh->b_next_transaction) + try_to_free = 1; } + JBUFFER_TRACE(jh, "refile or unfile buffer"); + __jbd2_journal_refile_buffer(jh); + jbd_unlock_bh_state(bh); + if (try_to_free) + release_buffer_page(bh); /* Drops bh reference */ + else + __brelse(bh); cond_resched_lock(&journal->j_list_lock); } spin_unlock(&journal->j_list_lock); diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index 9a7826990304..0dfa5b598e68 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c @@ -2078,10 +2078,9 @@ static void journal_free_journal_head(struct journal_head *jh) * When a buffer has its BH_JBD bit set it is immune from being released by * core kernel code, mainly via ->b_count. * - * A journal_head may be detached from its buffer_head when the journal_head's - * b_transaction, b_cp_transaction and b_next_transaction pointers are NULL. - * Various places in JBD call jbd2_journal_remove_journal_head() to indicate that the - * journal_head can be dropped if needed. + * A journal_head is detached from its buffer_head when the journal_head's + * b_jcount reaches zero. Running transaction (b_transaction) and checkpoint + * transaction (b_cp_transaction) hold their references to b_jcount. * * Various places in the kernel want to attach a journal_head to a buffer_head * _before_ attaching the journal_head to a transaction. To protect the @@ -2094,17 +2093,16 @@ static void journal_free_journal_head(struct journal_head *jh) * (Attach a journal_head if needed. Increments b_jcount) * struct journal_head *jh = jbd2_journal_add_journal_head(bh); * ... + * (Get another reference for transaction) + * jbd2_journal_grab_journal_head(bh); * jh->b_transaction = xxx; + * (Put original reference) * jbd2_journal_put_journal_head(jh); - * - * Now, the journal_head's b_jcount is zero, but it is safe from being released - * because it has a non-zero b_transaction. */ /* * Give a buffer_head a journal_head. * - * Doesn't need the journal lock. * May sleep. */ struct journal_head *jbd2_journal_add_journal_head(struct buffer_head *bh) @@ -2168,61 +2166,29 @@ static void __journal_remove_journal_head(struct buffer_head *bh) struct journal_head *jh = bh2jh(bh); J_ASSERT_JH(jh, jh->b_jcount >= 0); - - get_bh(bh); - if (jh->b_jcount == 0) { - if (jh->b_transaction == NULL && - jh->b_next_transaction == NULL && - jh->b_cp_transaction == NULL) { - J_ASSERT_JH(jh, jh->b_jlist == BJ_None); - J_ASSERT_BH(bh, buffer_jbd(bh)); - J_ASSERT_BH(bh, jh2bh(jh) == bh); - BUFFER_TRACE(bh, "remove journal_head"); - if (jh->b_frozen_data) { - printk(KERN_WARNING "%s: freeing " - "b_frozen_data\n", - __func__); - jbd2_free(jh->b_frozen_data, bh->b_size); - } - if (jh->b_committed_data) { - printk(KERN_WARNING "%s: freeing " - "b_committed_data\n", - __func__); - jbd2_free(jh->b_committed_data, bh->b_size); - } - bh->b_private = NULL; - jh->b_bh = NULL; /* debug, really */ - clear_buffer_jbd(bh); - __brelse(bh); - journal_free_journal_head(jh); - } else { - BUFFER_TRACE(bh, "journal_head was locked"); - } + J_ASSERT_JH(jh, jh->b_transaction == NULL); + J_ASSERT_JH(jh, jh->b_next_transaction == NULL); + J_ASSERT_JH(jh, jh->b_cp_transaction == NULL); + J_ASSERT_JH(jh, jh->b_jlist == BJ_None); + J_ASSERT_BH(bh, buffer_jbd(bh)); + J_ASSERT_BH(bh, jh2bh(jh) == bh); + BUFFER_TRACE(bh, "remove journal_head"); + if (jh->b_frozen_data) { + printk(KERN_WARNING "%s: freeing b_frozen_data\n", __func__); + jbd2_free(jh->b_frozen_data, bh->b_size); } + if (jh->b_committed_data) { + printk(KERN_WARNING "%s: freeing b_committed_data\n", __func__); + jbd2_free(jh->b_committed_data, bh->b_size); + } + bh->b_private = NULL; + jh->b_bh = NULL; /* debug, really */ + clear_buffer_jbd(bh); + journal_free_journal_head(jh); } /* - * jbd2_journal_remove_journal_head(): if the buffer isn't attached to a transaction - * and has a zero b_jcount then remove and release its journal_head. If we did - * see that the buffer is not used by any transaction we also "logically" - * decrement ->b_count. - * - * We in fact take an additional increment on ->b_count as a convenience, - * because the caller usually wants to do additional things with the bh - * after calling here. - * The caller of jbd2_journal_remove_journal_head() *must* run __brelse(bh) at some - * time. Once the caller has run __brelse(), the buffer is eligible for - * reaping by try_to_free_buffers(). - */ -void jbd2_journal_remove_journal_head(struct buffer_head *bh) -{ - jbd_lock_bh_journal_head(bh); - __journal_remove_journal_head(bh); - jbd_unlock_bh_journal_head(bh); -} - -/* - * Drop a reference on the passed journal_head. If it fell to zero then try to + * Drop a reference on the passed journal_head. If it fell to zero then * release the journal_head from the buffer_head. */ void jbd2_journal_put_journal_head(struct journal_head *jh) @@ -2232,11 +2198,12 @@ void jbd2_journal_put_journal_head(struct journal_head *jh) jbd_lock_bh_journal_head(bh); J_ASSERT_JH(jh, jh->b_jcount > 0); --jh->b_jcount; - if (!jh->b_jcount && !jh->b_transaction) { + if (!jh->b_jcount) { __journal_remove_journal_head(bh); + jbd_unlock_bh_journal_head(bh); __brelse(bh); - } - jbd_unlock_bh_journal_head(bh); + } else + jbd_unlock_bh_journal_head(bh); } /* diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index 547b101049e5..2d7109414cdd 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c @@ -30,6 +30,7 @@ #include static void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh); +static void __jbd2_journal_unfile_buffer(struct journal_head *jh); /* * jbd2_get_transaction: obtain a new transaction_t object. @@ -764,7 +765,6 @@ repeat: if (!jh->b_transaction) { JBUFFER_TRACE(jh, "no transaction"); J_ASSERT_JH(jh, !jh->b_next_transaction); - jh->b_transaction = transaction; JBUFFER_TRACE(jh, "file as BJ_Reserved"); spin_lock(&journal->j_list_lock); __jbd2_journal_file_buffer(jh, transaction, BJ_Reserved); @@ -895,8 +895,6 @@ int jbd2_journal_get_create_access(handle_t *handle, struct buffer_head *bh) * committed and so it's safe to clear the dirty bit. */ clear_buffer_dirty(jh2bh(jh)); - jh->b_transaction = transaction; - /* first access by this transaction */ jh->b_modified = 0; @@ -1230,8 +1228,6 @@ int jbd2_journal_forget (handle_t *handle, struct buffer_head *bh) __jbd2_journal_file_buffer(jh, transaction, BJ_Forget); } else { __jbd2_journal_unfile_buffer(jh); - jbd2_journal_remove_journal_head(bh); - __brelse(bh); if (!buffer_jbd(bh)) { spin_unlock(&journal->j_list_lock); jbd_unlock_bh_state(bh); @@ -1554,19 +1550,32 @@ void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh) mark_buffer_dirty(bh); /* Expose it to the VM */ } -void __jbd2_journal_unfile_buffer(struct journal_head *jh) +/* + * Remove buffer from all transactions. + * + * Called with bh_state lock and j_list_lock + * + * jh and bh may be already freed when this function returns. + */ +static void __jbd2_journal_unfile_buffer(struct journal_head *jh) { __jbd2_journal_temp_unlink_buffer(jh); jh->b_transaction = NULL; + jbd2_journal_put_journal_head(jh); } void jbd2_journal_unfile_buffer(journal_t *journal, struct journal_head *jh) { - jbd_lock_bh_state(jh2bh(jh)); + struct buffer_head *bh = jh2bh(jh); + + /* Get reference so that buffer cannot be freed before we unlock it */ + get_bh(bh); + jbd_lock_bh_state(bh); spin_lock(&journal->j_list_lock); __jbd2_journal_unfile_buffer(jh); spin_unlock(&journal->j_list_lock); - jbd_unlock_bh_state(jh2bh(jh)); + jbd_unlock_bh_state(bh); + __brelse(bh); } /* @@ -1593,8 +1602,6 @@ __journal_try_to_free_buffer(journal_t *journal, struct buffer_head *bh) if (jh->b_jlist == BJ_None) { JBUFFER_TRACE(jh, "remove from checkpoint list"); __jbd2_journal_remove_checkpoint(jh); - jbd2_journal_remove_journal_head(bh); - __brelse(bh); } } spin_unlock(&journal->j_list_lock); @@ -1657,7 +1664,6 @@ int jbd2_journal_try_to_free_buffers(journal_t *journal, /* * We take our own ref against the journal_head here to avoid * having to add tons of locking around each instance of - * jbd2_journal_remove_journal_head() and * jbd2_journal_put_journal_head(). */ jh = jbd2_journal_grab_journal_head(bh); @@ -1695,10 +1701,9 @@ static int __dispose_buffer(struct journal_head *jh, transaction_t *transaction) int may_free = 1; struct buffer_head *bh = jh2bh(jh); - __jbd2_journal_unfile_buffer(jh); - if (jh->b_cp_transaction) { JBUFFER_TRACE(jh, "on running+cp transaction"); + __jbd2_journal_temp_unlink_buffer(jh); /* * We don't want to write the buffer anymore, clear the * bit so that we don't confuse checks in @@ -1709,8 +1714,7 @@ static int __dispose_buffer(struct journal_head *jh, transaction_t *transaction) may_free = 0; } else { JBUFFER_TRACE(jh, "on running transaction"); - jbd2_journal_remove_journal_head(bh); - __brelse(bh); + __jbd2_journal_unfile_buffer(jh); } return may_free; } @@ -1988,6 +1992,8 @@ void __jbd2_journal_file_buffer(struct journal_head *jh, if (jh->b_transaction) __jbd2_journal_temp_unlink_buffer(jh); + else + jbd2_journal_grab_journal_head(bh); jh->b_transaction = transaction; switch (jlist) { @@ -2039,9 +2045,10 @@ void jbd2_journal_file_buffer(struct journal_head *jh, * already started to be used by a subsequent transaction, refile the * buffer on that transaction's metadata list. * - * Called under journal->j_list_lock - * + * Called under j_list_lock * Called under jbd_lock_bh_state(jh2bh(jh)) + * + * jh and bh may be already free when this function returns */ void __jbd2_journal_refile_buffer(struct journal_head *jh) { @@ -2065,6 +2072,11 @@ void __jbd2_journal_refile_buffer(struct journal_head *jh) was_dirty = test_clear_buffer_jbddirty(bh); __jbd2_journal_temp_unlink_buffer(jh); + /* + * We set b_transaction here because b_next_transaction will inherit + * our jh reference and thus __jbd2_journal_file_buffer() must not + * take a new one. + */ jh->b_transaction = jh->b_next_transaction; jh->b_next_transaction = NULL; if (buffer_freed(bh)) @@ -2081,30 +2093,21 @@ void __jbd2_journal_refile_buffer(struct journal_head *jh) } /* - * For the unlocked version of this call, also make sure that any - * hanging journal_head is cleaned up if necessary. - * - * __jbd2_journal_refile_buffer is usually called as part of a single locked - * operation on a buffer_head, in which the caller is probably going to - * be hooking the journal_head onto other lists. In that case it is up - * to the caller to remove the journal_head if necessary. For the - * unlocked jbd2_journal_refile_buffer call, the caller isn't going to be - * doing anything else to the buffer so we need to do the cleanup - * ourselves to avoid a jh leak. - * - * *** The journal_head may be freed by this call! *** + * __jbd2_journal_refile_buffer() with necessary locking added. We take our + * bh reference so that we can safely unlock bh. + * + * The jh and bh may be freed by this call. */ void jbd2_journal_refile_buffer(journal_t *journal, struct journal_head *jh) { struct buffer_head *bh = jh2bh(jh); + /* Get reference so that buffer cannot be freed before we unlock it */ + get_bh(bh); jbd_lock_bh_state(bh); spin_lock(&journal->j_list_lock); - __jbd2_journal_refile_buffer(jh); jbd_unlock_bh_state(bh); - jbd2_journal_remove_journal_head(bh); - spin_unlock(&journal->j_list_lock); __brelse(bh); } diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index 4ecb7b16b278..d087c2e7b2aa 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -1024,7 +1024,6 @@ struct journal_s /* Filing buffers */ extern void jbd2_journal_unfile_buffer(journal_t *, struct journal_head *); -extern void __jbd2_journal_unfile_buffer(struct journal_head *); extern void __jbd2_journal_refile_buffer(struct journal_head *); extern void jbd2_journal_refile_buffer(journal_t *, struct journal_head *); extern void __jbd2_journal_file_buffer(struct journal_head *, transaction_t *, int); @@ -1165,7 +1164,6 @@ extern void jbd2_journal_release_jbd_inode(journal_t *journal, struct jbd2_in */ struct journal_head *jbd2_journal_add_journal_head(struct buffer_head *bh); struct journal_head *jbd2_journal_grab_journal_head(struct buffer_head *bh); -void jbd2_journal_remove_journal_head(struct buffer_head *bh); void jbd2_journal_put_journal_head(struct journal_head *jh); /* -- cgit v1.2.3 From cd51875d53ae1459a2b09b4338166a218c0635a7 Mon Sep 17 00:00:00 2001 From: Pavel Shilovsky Date: Thu, 9 Jun 2011 12:58:53 +0400 Subject: CIFS: Fix sparse error cifs_sb_master_tlink was declared as inline, but without a definition. Remove the declaration and move the definition up. Signed-off-by: Pavel Shilovsky Reviewed-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/connect.c | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) (limited to 'fs') diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index bb659eb73810..9190018f97e1 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -2149,7 +2149,10 @@ cifs_put_tlink(struct tcon_link *tlink) } static inline struct tcon_link * -cifs_sb_master_tlink(struct cifs_sb_info *cifs_sb); +cifs_sb_master_tlink(struct cifs_sb_info *cifs_sb) +{ + return cifs_sb->master_tlink; +} static int compare_mount_options(struct super_block *sb, struct cifs_mnt_data *mnt_data) @@ -3484,12 +3487,6 @@ out: return tcon; } -static inline struct tcon_link * -cifs_sb_master_tlink(struct cifs_sb_info *cifs_sb) -{ - return cifs_sb->master_tlink; -} - struct cifs_tcon * cifs_sb_master_tcon(struct cifs_sb_info *cifs_sb) { -- cgit v1.2.3 From 7fdbaa1b8daa1009b705985b903e3d2ebccad456 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Fri, 10 Jun 2011 16:14:57 -0400 Subject: cifs: don't allow cifs_reconnect to exit with NULL socket pointer It's possible for the following set of events to happen: cifsd calls cifs_reconnect which reconnects the socket. A userspace process then calls cifs_negotiate_protocol to handle the NEGOTIATE and gets a reply. But, while processing the reply, cifsd calls cifs_reconnect again. Eventually the GlobalMid_Lock is dropped and the reply from the earlier NEGOTIATE completes and the tcpStatus is set to CifsGood. cifs_reconnect then goes through and closes the socket and sets the pointer to zero, but because the status is now CifsGood, the new socket is not created and cifs_reconnect exits with the socket pointer set to NULL. Fix this by only setting the tcpStatus to CifsGood if the tcpStatus is CifsNeedNegotiate, and by making sure that generic_ip_connect is always called at least once in cifs_reconnect. Note that this is not a perfect fix for this issue. It's still possible that the NEGOTIATE reply is handled after the socket has been closed and reconnected. In that case, the socket state will look correct but it no NEGOTIATE was performed on it be for the wrong socket. In that situation though the server should just shut down the socket on the next attempted send, rather than causing the oops that occurs today. Cc: # .38.x: fd88ce9: [CIFS] cifs: clarify the meaning of tcpStatus == CifsGood Reported-and-Tested-by: Ben Greear Signed-off-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/connect.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 9190018f97e1..fa73e2a51cf2 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -152,7 +152,7 @@ cifs_reconnect(struct TCP_Server_Info *server) mid_entry->callback(mid_entry); } - while (server->tcpStatus == CifsNeedReconnect) { + do { try_to_freeze(); /* we should try only the port we connected to before */ @@ -167,7 +167,7 @@ cifs_reconnect(struct TCP_Server_Info *server) server->tcpStatus = CifsNeedNegotiate; spin_unlock(&GlobalMid_Lock); } - } + } while (server->tcpStatus == CifsNeedReconnect); return rc; } @@ -3374,7 +3374,7 @@ int cifs_negotiate_protocol(unsigned int xid, struct cifs_ses *ses) } if (rc == 0) { spin_lock(&GlobalMid_Lock); - if (server->tcpStatus != CifsExiting) + if (server->tcpStatus == CifsNeedNegotiate) server->tcpStatus = CifsGood; else rc = -EHOSTDOWN; -- cgit v1.2.3 From 3e715513643f0207c8f3c22010b54954cd697474 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Mon, 13 Jun 2011 11:50:41 -0400 Subject: cifs: show sec= option in /proc/mounts Signed-off-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/cifsfs.c | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) (limited to 'fs') diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 989442dcfb45..e9def996e383 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -352,6 +352,37 @@ cifs_show_address(struct seq_file *s, struct TCP_Server_Info *server) } } +static void +cifs_show_security(struct seq_file *s, struct TCP_Server_Info *server) +{ + seq_printf(s, ",sec="); + + switch (server->secType) { + case LANMAN: + seq_printf(s, "lanman"); + break; + case NTLMv2: + seq_printf(s, "ntlmv2"); + break; + case NTLM: + seq_printf(s, "ntlm"); + break; + case Kerberos: + seq_printf(s, "krb5"); + break; + case RawNTLMSSP: + seq_printf(s, "ntlmssp"); + break; + default: + /* shouldn't ever happen */ + seq_printf(s, "unknown"); + break; + } + + if (server->sec_mode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) + seq_printf(s, "i"); +} + /* * cifs_show_options() is for displaying mount options in /proc/mounts. * Not all settable options are displayed but most of the important @@ -365,6 +396,8 @@ cifs_show_options(struct seq_file *s, struct vfsmount *m) struct sockaddr *srcaddr; srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr; + cifs_show_security(s, tcon->ses->server); + seq_printf(s, ",unc=%s", tcon->treeName); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER) -- cgit v1.2.3 From 8d1bca328b7c17af33bcf966d799c556ecbf370f Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Sat, 11 Jun 2011 21:17:10 -0400 Subject: cifs: correctly handle NULL tcon pointer in CIFSTCon Long ago (in commit 00e485b0), I added some code to handle share-level passwords in CIFSTCon. That code ignored the fact that it's legit to pass in a NULL tcon pointer when connecting to the IPC$ share on the server. This wasn't really a problem until recently as we only called CIFSTCon this way when the server returned -EREMOTE. With the introduction of commit c1508ca2 however, it gets called this way on every mount, causing an oops when share-level security is in effect. Fix this by simply treating a NULL tcon pointer as if user-level security were in effect. I'm not aware of any servers that protect the IPC$ share with a specific password anyway. Also, add a comment to the top of CIFSTCon to ensure that we don't make the same mistake again. Cc: Reported-by: Martijn Uffing Signed-off-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/connect.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index fa73e2a51cf2..12cf72dd0c42 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -3174,6 +3174,10 @@ out: return rc; } +/* + * Issue a TREE_CONNECT request. Note that for IPC$ shares, that the tcon + * pointer may be NULL. + */ int CIFSTCon(unsigned int xid, struct cifs_ses *ses, const char *tree, struct cifs_tcon *tcon, @@ -3208,7 +3212,7 @@ CIFSTCon(unsigned int xid, struct cifs_ses *ses, pSMB->AndXCommand = 0xFF; pSMB->Flags = cpu_to_le16(TCON_EXTENDED_SECINFO); bcc_ptr = &pSMB->Password[0]; - if ((ses->server->sec_mode) & SECMODE_USER) { + if (!tcon || (ses->server->sec_mode & SECMODE_USER)) { pSMB->PasswordLength = cpu_to_le16(1); /* minimum */ *bcc_ptr = 0; /* password is null byte */ bcc_ptr++; /* skip password */ -- cgit v1.2.3 From 773e9b442693b250aa6c452cb0cf5a9343f51cef Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Tue, 7 Jun 2011 20:57:14 -0700 Subject: ceph: fix page alignment corrections dd if=/dev/urandom of=/mnt/fs_depot/dd10 bs=500 seek=8388 count=1 dd if=/mnt/fs_depot/dd10 of=/root/dd10out bs=500 skip=8388 count=1 Reported-by: Henry C Chang Signed-off-by: Sage Weil --- fs/ceph/file.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/ceph/file.c b/fs/ceph/file.c index 9542f07d0b93..2be0f35afdfb 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -290,7 +290,6 @@ static int striped_read(struct inode *inode, struct ceph_inode_info *ci = ceph_inode(inode); u64 pos, this_len; int io_align, page_align; - int page_off = off & ~PAGE_CACHE_MASK; /* first byte's offset in page */ int left, pages_left; int read; struct page **page_pos; @@ -326,12 +325,11 @@ more: ret, hit_stripe ? " HITSTRIPE" : "", was_short ? " SHORT" : ""); if (ret > 0) { - int didpages = - ((pos & ~PAGE_CACHE_MASK) + ret) >> PAGE_CACHE_SHIFT; + int didpages = (page_align + ret) >> PAGE_CACHE_SHIFT; if (read < pos - off) { dout(" zero gap %llu to %llu\n", off + read, pos); - ceph_zero_page_vector_range(page_off + read, + ceph_zero_page_vector_range(page_align + read, pos - off - read, pages); } pos += ret; @@ -356,7 +354,7 @@ more: left = inode->i_size - pos; dout("zero tail %d\n", left); - ceph_zero_page_vector_range(page_off + read, left, + ceph_zero_page_vector_range(page_align + read, left, pages); read += left; } -- cgit v1.2.3 From d7f124f129a6aea99938e0d4172c741b56fefeda Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Mon, 13 Jun 2011 16:22:18 -0700 Subject: ceph: fix sync and dio writes across stripe boundaries We were iterating across stripe boundaries properly, but not moving the write buffer pointer forward. This caused us to rewrite the same data after the break. Fix by adjusting the data pointer forward, and recalculating the io and buffer alignment after the break. Signed-off-by: Sage Weil --- fs/ceph/file.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/ceph/file.c b/fs/ceph/file.c index 2be0f35afdfb..4698a5c553dc 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -476,9 +476,6 @@ static ssize_t ceph_sync_write(struct file *file, const char __user *data, else pos = *offset; - io_align = pos & ~PAGE_MASK; - buf_align = (unsigned long)data & ~PAGE_MASK; - ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + left); if (ret < 0) return ret; @@ -502,6 +499,8 @@ static ssize_t ceph_sync_write(struct file *file, const char __user *data, * boundary. this isn't atomic, unfortunately. :( */ more: + io_align = pos & ~PAGE_MASK; + buf_align = (unsigned long)data & ~PAGE_MASK; len = left; if (file->f_flags & O_DIRECT) { /* write from beginning of first page, regardless of @@ -591,6 +590,7 @@ out: pos += len; written += len; left -= len; + data += written; if (left) goto more; -- cgit v1.2.3 From 1123d93963cbd2546449d4d9f0c568e323cb0ac6 Mon Sep 17 00:00:00 2001 From: Max Asbock Date: Mon, 13 Jun 2011 10:18:32 -0700 Subject: timerfd: Fix wakeup of processes when timer is cancelled on clock change Currently processes waiting with poll on cancelable timerfd timers are not woken up when the timers are canceled. When the system time is set the clock_was_set() function calls timerfd_clock_was_set() to cancel and wake up processes waiting on potential cancelable timerfd timers. However the wake up currently has no effect because in the case of timerfd_read it is dependent on ctx->ticks not being 0. timerfd_poll also requires ctx->ticks being non zero. As a consequence processes waiting on cancelable timers only get woken up when the timers expire. This patch fixes this by incrementing ctx->ticks before calling wake_up. Signed-off-by: Max Asbock Cc: kay.sievers@vrfy.org Cc: virtuoso@slind.org Cc: johnstul Link: http://lkml.kernel.org/r/1307985512.4710.41.camel@w-amax.beaverton.ibm.com Signed-off-by: Thomas Gleixner --- fs/timerfd.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/timerfd.c b/fs/timerfd.c index f67acbdda5e8..dffeb3795af1 100644 --- a/fs/timerfd.c +++ b/fs/timerfd.c @@ -61,7 +61,9 @@ static enum hrtimer_restart timerfd_tmrproc(struct hrtimer *htmr) /* * Called when the clock was set to cancel the timers in the cancel - * list. + * list. This will wake up processes waiting on these timers. The + * wake-up requires ctx->ticks to be non zero, therefore we increment + * it before calling wake_up_locked(). */ void timerfd_clock_was_set(void) { @@ -76,6 +78,7 @@ void timerfd_clock_was_set(void) spin_lock_irqsave(&ctx->wqh.lock, flags); if (ctx->moffs.tv64 != moffs.tv64) { ctx->moffs.tv64 = KTIME_MAX; + ctx->ticks++; wake_up_locked(&ctx->wqh); } spin_unlock_irqrestore(&ctx->wqh.lock, flags); -- cgit v1.2.3 From 040d15c86747cf44fcf6b8ee19d805d4ef20caf3 Mon Sep 17 00:00:00 2001 From: Steve French Date: Tue, 14 Jun 2011 15:51:18 +0000 Subject: [CIFS] trivial cleanup fscache cFYI and cERROR messages ... for uniformity and cleaner debug logs. Signed-off-by: Suresh Jayaraman Signed-off-by: Steve French --- fs/cifs/cache.c | 6 +++--- fs/cifs/fscache.c | 51 ++++++++++++++++++++++++--------------------------- 2 files changed, 27 insertions(+), 30 deletions(-) (limited to 'fs') diff --git a/fs/cifs/cache.c b/fs/cifs/cache.c index dd8584d35a14..545509c3313b 100644 --- a/fs/cifs/cache.c +++ b/fs/cifs/cache.c @@ -92,7 +92,7 @@ static uint16_t cifs_server_get_key(const void *cookie_netfs_data, break; default: - cERROR(1, "CIFS: Unknown network family '%d'", sa->sa_family); + cERROR(1, "Unknown network family '%d'", sa->sa_family); key_len = 0; break; } @@ -152,7 +152,7 @@ static uint16_t cifs_super_get_key(const void *cookie_netfs_data, void *buffer, sharename = extract_sharename(tcon->treeName); if (IS_ERR(sharename)) { - cFYI(1, "CIFS: couldn't extract sharename\n"); + cFYI(1, "%s: couldn't extract sharename\n", __func__); sharename = NULL; return 0; } @@ -302,7 +302,7 @@ static void cifs_fscache_inode_now_uncached(void *cookie_netfs_data) pagevec_init(&pvec, 0); first = 0; - cFYI(1, "cifs inode 0x%p now uncached", cifsi); + cFYI(1, "%s: cifs inode 0x%p now uncached", __func__, cifsi); for (;;) { nr_pages = pagevec_lookup(&pvec, diff --git a/fs/cifs/fscache.c b/fs/cifs/fscache.c index d368a47ba5eb..816696621ec9 100644 --- a/fs/cifs/fscache.c +++ b/fs/cifs/fscache.c @@ -28,14 +28,14 @@ void cifs_fscache_get_client_cookie(struct TCP_Server_Info *server) server->fscache = fscache_acquire_cookie(cifs_fscache_netfs.primary_index, &cifs_fscache_server_index_def, server); - cFYI(1, "CIFS: get client cookie (0x%p/0x%p)", server, - server->fscache); + cFYI(1, "%s: (0x%p/0x%p)", __func__, server, + server->fscache); } void cifs_fscache_release_client_cookie(struct TCP_Server_Info *server) { - cFYI(1, "CIFS: release client cookie (0x%p/0x%p)", server, - server->fscache); + cFYI(1, "%s: (0x%p/0x%p)", __func__, server, + server->fscache); fscache_relinquish_cookie(server->fscache, 0); server->fscache = NULL; } @@ -47,13 +47,13 @@ void cifs_fscache_get_super_cookie(struct cifs_tcon *tcon) tcon->fscache = fscache_acquire_cookie(server->fscache, &cifs_fscache_super_index_def, tcon); - cFYI(1, "CIFS: get superblock cookie (0x%p/0x%p)", - server->fscache, tcon->fscache); + cFYI(1, "%s: (0x%p/0x%p)", __func__, server->fscache, + tcon->fscache); } void cifs_fscache_release_super_cookie(struct cifs_tcon *tcon) { - cFYI(1, "CIFS: releasing superblock cookie (0x%p)", tcon->fscache); + cFYI(1, "%s: (0x%p)", __func__, tcon->fscache); fscache_relinquish_cookie(tcon->fscache, 0); tcon->fscache = NULL; } @@ -70,8 +70,8 @@ static void cifs_fscache_enable_inode_cookie(struct inode *inode) if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE) { cifsi->fscache = fscache_acquire_cookie(tcon->fscache, &cifs_fscache_inode_object_def, cifsi); - cFYI(1, "CIFS: got FH cookie (0x%p/0x%p)", tcon->fscache, - cifsi->fscache); + cFYI(1, "%s: got FH cookie (0x%p/0x%p)", __func__, + tcon->fscache, cifsi->fscache); } } @@ -80,8 +80,7 @@ void cifs_fscache_release_inode_cookie(struct inode *inode) struct cifsInodeInfo *cifsi = CIFS_I(inode); if (cifsi->fscache) { - cFYI(1, "CIFS releasing inode cookie (0x%p)", - cifsi->fscache); + cFYI(1, "%s: (0x%p)", __func__, cifsi->fscache); fscache_relinquish_cookie(cifsi->fscache, 0); cifsi->fscache = NULL; } @@ -92,8 +91,7 @@ static void cifs_fscache_disable_inode_cookie(struct inode *inode) struct cifsInodeInfo *cifsi = CIFS_I(inode); if (cifsi->fscache) { - cFYI(1, "CIFS disabling inode cookie (0x%p)", - cifsi->fscache); + cFYI(1, "%s: (0x%p)", __func__, cifsi->fscache); fscache_relinquish_cookie(cifsi->fscache, 1); cifsi->fscache = NULL; } @@ -121,8 +119,8 @@ void cifs_fscache_reset_inode_cookie(struct inode *inode) cifs_sb_master_tcon(cifs_sb)->fscache, &cifs_fscache_inode_object_def, cifsi); - cFYI(1, "CIFS: new cookie 0x%p oldcookie 0x%p", - cifsi->fscache, old); + cFYI(1, "%s: new cookie 0x%p oldcookie 0x%p", + __func__, cifsi->fscache, old); } } @@ -132,8 +130,8 @@ int cifs_fscache_release_page(struct page *page, gfp_t gfp) struct inode *inode = page->mapping->host; struct cifsInodeInfo *cifsi = CIFS_I(inode); - cFYI(1, "CIFS: fscache release page (0x%p/0x%p)", - page, cifsi->fscache); + cFYI(1, "%s: (0x%p/0x%p)", __func__, page, + cifsi->fscache); if (!fscache_maybe_release_page(cifsi->fscache, page, gfp)) return 0; } @@ -144,8 +142,7 @@ int cifs_fscache_release_page(struct page *page, gfp_t gfp) static void cifs_readpage_from_fscache_complete(struct page *page, void *ctx, int error) { - cFYI(1, "CFS: readpage_from_fscache_complete (0x%p/%d)", - page, error); + cFYI(1, "%s: (0x%p/%d)", __func__, page, error); if (!error) SetPageUptodate(page); unlock_page(page); @@ -158,7 +155,7 @@ int __cifs_readpage_from_fscache(struct inode *inode, struct page *page) { int ret; - cFYI(1, "CIFS: readpage_from_fscache(fsc:%p, p:%p, i:0x%p", + cFYI(1, "%s: (fsc:%p, p:%p, i:0x%p", __func__, CIFS_I(inode)->fscache, page, inode); ret = fscache_read_or_alloc_page(CIFS_I(inode)->fscache, page, cifs_readpage_from_fscache_complete, @@ -167,11 +164,11 @@ int __cifs_readpage_from_fscache(struct inode *inode, struct page *page) switch (ret) { case 0: /* page found in fscache, read submitted */ - cFYI(1, "CIFS: readpage_from_fscache: submitted"); + cFYI(1, "%s: submitted", __func__); return ret; case -ENOBUFS: /* page won't be cached */ case -ENODATA: /* page not in cache */ - cFYI(1, "CIFS: readpage_from_fscache %d", ret); + cFYI(1, "%s: %d", __func__, ret); return 1; default: @@ -190,7 +187,7 @@ int __cifs_readpages_from_fscache(struct inode *inode, { int ret; - cFYI(1, "CIFS: __cifs_readpages_from_fscache (0x%p/%u/0x%p)", + cFYI(1, "%s: (0x%p/%u/0x%p)", __func__, CIFS_I(inode)->fscache, *nr_pages, inode); ret = fscache_read_or_alloc_pages(CIFS_I(inode)->fscache, mapping, pages, nr_pages, @@ -199,12 +196,12 @@ int __cifs_readpages_from_fscache(struct inode *inode, mapping_gfp_mask(mapping)); switch (ret) { case 0: /* read submitted to the cache for all pages */ - cFYI(1, "CIFS: readpages_from_fscache: submitted"); + cFYI(1, "%s: submitted", __func__); return ret; case -ENOBUFS: /* some pages are not cached and can't be */ case -ENODATA: /* some pages are not cached */ - cFYI(1, "CIFS: readpages_from_fscache: no page"); + cFYI(1, "%s: no page", __func__); return 1; default: @@ -218,7 +215,7 @@ void __cifs_readpage_to_fscache(struct inode *inode, struct page *page) { int ret; - cFYI(1, "CIFS: readpage_to_fscache(fsc: %p, p: %p, i: %p", + cFYI(1, "%s: (fsc: %p, p: %p, i: %p)", __func__, CIFS_I(inode)->fscache, page, inode); ret = fscache_write_page(CIFS_I(inode)->fscache, page, GFP_KERNEL); if (ret != 0) @@ -230,7 +227,7 @@ void __cifs_fscache_invalidate_page(struct page *page, struct inode *inode) struct cifsInodeInfo *cifsi = CIFS_I(inode); struct fscache_cookie *cookie = cifsi->fscache; - cFYI(1, "CIFS: fscache invalidatepage (0x%p/0x%p)", page, cookie); + cFYI(1, "%s: (0x%p/0x%p)", __func__, page, cookie); fscache_wait_on_page_write(cookie, page); fscache_uncache_page(cookie, page); } -- cgit v1.2.3 From c46a131c0c0f4c2457e6b1e430c578a5cb057334 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 5 Jun 2011 11:12:31 +0000 Subject: xfs: fix ->mknod() return value on xfs_get_acl() failure ->mknod() should return negative on errors and PTR_ERR() gives already negative value... Signed-off-by: Al Viro Reviewed-by: Christoph Hellwig Signed-off-by: Alex Elder --- fs/xfs/linux-2.6/xfs_iops.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c index dd21784525a8..d44d92cd12b1 100644 --- a/fs/xfs/linux-2.6/xfs_iops.c +++ b/fs/xfs/linux-2.6/xfs_iops.c @@ -182,7 +182,7 @@ xfs_vn_mknod( if (IS_POSIXACL(dir)) { default_acl = xfs_get_acl(dir, ACL_TYPE_DEFAULT); if (IS_ERR(default_acl)) - return -PTR_ERR(default_acl); + return PTR_ERR(default_acl); if (!default_acl) mode &= ~current_umask(); -- cgit v1.2.3 From 1252b3013b790c77e1c4f077a40542f86df37fb4 Mon Sep 17 00:00:00 2001 From: Steve French Date: Tue, 14 Jun 2011 16:19:54 +0000 Subject: [CIFS] update cifs version to 1.73 Signed-off-by: Steve French --- fs/cifs/cifsfs.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h index 64313f778ebf..0900e1658c96 100644 --- a/fs/cifs/cifsfs.h +++ b/fs/cifs/cifsfs.h @@ -129,5 +129,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg); extern const struct export_operations cifs_export_ops; #endif /* CIFS_NFSD_EXPORT */ -#define CIFS_VERSION "1.72" +#define CIFS_VERSION "1.73" #endif /* _CIFSFS_H */ -- cgit v1.2.3 From 9e3bd4e24e94d60d2e0762e919aab6c9a7fc0c5b Mon Sep 17 00:00:00 2001 From: Weston Andros Adamson Date: Tue, 31 May 2011 21:46:50 -0400 Subject: NFS: fix umount of pnfs filesystems Unmounting a pnfs filesystem hangs using filelayout and possibly others. This fixes the use of the rcu protected node by making use of a new 'tmpnode' for the temporary purge list. Also, the spinlock shouldn't be held when calling synchronize_rcu(). Signed-off-by: Weston Andros Adamson Signed-off-by: Andy Adamson Signed-off-by: Trond Myklebust --- fs/nfs/pnfs.h | 1 + fs/nfs/pnfs_dev.c | 16 +++++++++++----- 2 files changed, 12 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h index 48d0a8e4d062..96bf4e6f45be 100644 --- a/fs/nfs/pnfs.h +++ b/fs/nfs/pnfs.h @@ -186,6 +186,7 @@ int pnfs_ld_read_done(struct nfs_read_data *); /* pnfs_dev.c */ struct nfs4_deviceid_node { struct hlist_node node; + struct hlist_node tmpnode; const struct pnfs_layoutdriver_type *ld; const struct nfs_client *nfs_client; struct nfs4_deviceid deviceid; diff --git a/fs/nfs/pnfs_dev.c b/fs/nfs/pnfs_dev.c index c65e133ce9c0..5944d4b369a2 100644 --- a/fs/nfs/pnfs_dev.c +++ b/fs/nfs/pnfs_dev.c @@ -174,6 +174,7 @@ nfs4_init_deviceid_node(struct nfs4_deviceid_node *d, const struct nfs4_deviceid *id) { INIT_HLIST_NODE(&d->node); + INIT_HLIST_NODE(&d->tmpnode); d->ld = ld; d->nfs_client = nfs_client; d->deviceid = *id; @@ -238,24 +239,29 @@ static void _deviceid_purge_client(const struct nfs_client *clp, long hash) { struct nfs4_deviceid_node *d; - struct hlist_node *n, *next; + struct hlist_node *n; HLIST_HEAD(tmp); + spin_lock(&nfs4_deviceid_lock); rcu_read_lock(); hlist_for_each_entry_rcu(d, n, &nfs4_deviceid_cache[hash], node) if (d->nfs_client == clp && atomic_read(&d->ref)) { hlist_del_init_rcu(&d->node); - hlist_add_head(&d->node, &tmp); + hlist_add_head(&d->tmpnode, &tmp); } rcu_read_unlock(); + spin_unlock(&nfs4_deviceid_lock); if (hlist_empty(&tmp)) return; synchronize_rcu(); - hlist_for_each_entry_safe(d, n, next, &tmp, node) + while (!hlist_empty(&tmp)) { + d = hlist_entry(tmp.first, struct nfs4_deviceid_node, tmpnode); + hlist_del(&d->tmpnode); if (atomic_dec_and_test(&d->ref)) d->ld->free_deviceid_node(d); + } } void @@ -263,8 +269,8 @@ nfs4_deviceid_purge_client(const struct nfs_client *clp) { long h; - spin_lock(&nfs4_deviceid_lock); + if (!(clp->cl_exchange_flags & EXCHGID4_FLAG_USE_PNFS_MDS)) + return; for (h = 0; h < NFS4_DEVICE_ID_HASH_SIZE; h++) _deviceid_purge_client(clp, h); - spin_unlock(&nfs4_deviceid_lock); } -- cgit v1.2.3 From 0b760113a3a155269a3fba93a409c640031dd68f Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 31 May 2011 15:15:34 -0400 Subject: NLM: Don't hang forever on NLM unlock requests If the NLM daemon is killed on the NFS server, we can currently end up hanging forever on an 'unlock' request, instead of aborting. Basically, if the rpcbind request fails, or the server keeps returning garbage, we really want to quit instead of retrying. Tested-by: Vasily Averin Signed-off-by: Trond Myklebust Cc: stable@kernel.org --- fs/lockd/clntproc.c | 8 +++++++- include/linux/sunrpc/sched.h | 3 ++- net/sunrpc/clnt.c | 3 +++ net/sunrpc/sched.c | 1 + 4 files changed, 13 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c index adb45ec9038c..e374050a911c 100644 --- a/fs/lockd/clntproc.c +++ b/fs/lockd/clntproc.c @@ -708,7 +708,13 @@ static void nlmclnt_unlock_callback(struct rpc_task *task, void *data) if (task->tk_status < 0) { dprintk("lockd: unlock failed (err = %d)\n", -task->tk_status); - goto retry_rebind; + switch (task->tk_status) { + case -EACCES: + case -EIO: + goto die; + default: + goto retry_rebind; + } } if (status == NLM_LCK_DENIED_GRACE_PERIOD) { rpc_delay(task, NLMCLNT_GRACE_WAIT); diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h index f73c482ec9c6..fe2d8e6b923b 100644 --- a/include/linux/sunrpc/sched.h +++ b/include/linux/sunrpc/sched.h @@ -84,7 +84,8 @@ struct rpc_task { #endif unsigned char tk_priority : 2,/* Task priority */ tk_garb_retry : 2, - tk_cred_retry : 2; + tk_cred_retry : 2, + tk_rebind_retry : 2; }; #define tk_xprt tk_client->cl_xprt diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index b84d7395535e..566bcfd067f6 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -1175,6 +1175,9 @@ call_bind_status(struct rpc_task *task) status = -EOPNOTSUPP; break; } + if (task->tk_rebind_retry == 0) + break; + task->tk_rebind_retry--; rpc_delay(task, 3*HZ); goto retry_timeout; case -ETIMEDOUT: diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index 6b43ee7221d5..a27406b1654f 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c @@ -792,6 +792,7 @@ static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *ta /* Initialize retry counters */ task->tk_garb_retry = 2; task->tk_cred_retry = 2; + task->tk_rebind_retry = 2; task->tk_priority = task_setup_data->priority - RPC_PRIORITY_LOW; task->tk_owner = current->tgid; -- cgit v1.2.3 From 0f66b5984df2fe1617c05900a39a7ef493ca9de9 Mon Sep 17 00:00:00 2001 From: Peng Tao Date: Sat, 16 Oct 2010 22:07:46 -0700 Subject: NFS41: do not update isize if inode needs layoutcommit nfs_update_inode will update isize if there is no queued pages. For pNFS, layoutcommit is supposed to change file size on server, the same effect as queued pages. nfs_update_inode may be called when dirty pages are written back (nfsi->npages==0) but layoutcommit is not sent, and it will change client file size according to server file size. Then client ends up losing what it just writes back in pNFS path. So we should skip updating client file size if file needs layoutcommit. Signed-off-by: Peng Tao Cc: stable@kernel.org [2.6.39] Signed-off-by: Trond Myklebust --- fs/nfs/inode.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 144f2a3c7185..3f1eb8180373 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -1294,7 +1294,8 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr) if (new_isize != cur_isize) { /* Do we perhaps have any outstanding writes, or has * the file grown beyond our last write? */ - if (nfsi->npages == 0 || new_isize > cur_isize) { + if ((nfsi->npages == 0 && !test_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) || + new_isize > cur_isize) { i_size_write(inode, new_isize); invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA; } -- cgit v1.2.3 From c9c30dd5f73dccaa326a54dfcf490316946aea87 Mon Sep 17 00:00:00 2001 From: Benny Halevy Date: Sat, 11 Jun 2011 17:08:39 -0400 Subject: NFSv4.1: deprecate headerpadsz in CREATE_SESSION We don't support header padding yet so better off ditching it Reported-by: Sid Moore Signed-off-by: Benny Halevy Signed-off-by: Trond Myklebust --- fs/nfs/nfs4proc.c | 4 ---- fs/nfs/nfs4xdr.c | 10 ++++++---- include/linux/nfs_xdr.h | 1 - 3 files changed, 6 insertions(+), 9 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index d2c4b59c896d..3ca449789545 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -5098,7 +5098,6 @@ static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args) if (mxresp_sz == 0) mxresp_sz = NFS_MAX_FILE_IO_SIZE; /* Fore channel attributes */ - args->fc_attrs.headerpadsz = 0; args->fc_attrs.max_rqst_sz = mxrqst_sz; args->fc_attrs.max_resp_sz = mxresp_sz; args->fc_attrs.max_ops = NFS4_MAX_OPS; @@ -5111,7 +5110,6 @@ static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args) args->fc_attrs.max_ops, args->fc_attrs.max_reqs); /* Back channel attributes */ - args->bc_attrs.headerpadsz = 0; args->bc_attrs.max_rqst_sz = PAGE_SIZE; args->bc_attrs.max_resp_sz = PAGE_SIZE; args->bc_attrs.max_resp_sz_cached = 0; @@ -5131,8 +5129,6 @@ static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args struct nfs4_channel_attrs *sent = &args->fc_attrs; struct nfs4_channel_attrs *rcvd = &session->fc_attrs; - if (rcvd->headerpadsz > sent->headerpadsz) - return -EINVAL; if (rcvd->max_resp_sz > sent->max_resp_sz) return -EINVAL; /* diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index d869a5e5464b..c4b7d6c04948 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -1725,7 +1725,7 @@ static void encode_create_session(struct xdr_stream *xdr, *p++ = cpu_to_be32(args->flags); /*flags */ /* Fore Channel */ - *p++ = cpu_to_be32(args->fc_attrs.headerpadsz); /* header padding size */ + *p++ = cpu_to_be32(0); /* header padding size */ *p++ = cpu_to_be32(args->fc_attrs.max_rqst_sz); /* max req size */ *p++ = cpu_to_be32(args->fc_attrs.max_resp_sz); /* max resp size */ *p++ = cpu_to_be32(max_resp_sz_cached); /* Max resp sz cached */ @@ -1734,7 +1734,7 @@ static void encode_create_session(struct xdr_stream *xdr, *p++ = cpu_to_be32(0); /* rdmachannel_attrs */ /* Back Channel */ - *p++ = cpu_to_be32(args->fc_attrs.headerpadsz); /* header padding size */ + *p++ = cpu_to_be32(0); /* header padding size */ *p++ = cpu_to_be32(args->bc_attrs.max_rqst_sz); /* max req size */ *p++ = cpu_to_be32(args->bc_attrs.max_resp_sz); /* max resp size */ *p++ = cpu_to_be32(args->bc_attrs.max_resp_sz_cached); /* Max resp sz cached */ @@ -4997,12 +4997,14 @@ static int decode_chan_attrs(struct xdr_stream *xdr, struct nfs4_channel_attrs *attrs) { __be32 *p; - u32 nr_attrs; + u32 nr_attrs, val; p = xdr_inline_decode(xdr, 28); if (unlikely(!p)) goto out_overflow; - attrs->headerpadsz = be32_to_cpup(p++); + val = be32_to_cpup(p++); /* headerpadsz */ + if (val) + return -EINVAL; /* no support for header padding yet */ attrs->max_rqst_sz = be32_to_cpup(p++); attrs->max_resp_sz = be32_to_cpup(p++); attrs->max_resp_sz_cached = be32_to_cpup(p++); diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 5e8444a11adf..00848d86ffb2 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -158,7 +158,6 @@ struct nfs_seqid; /* nfs41 sessions channel attributes */ struct nfs4_channel_attrs { - u32 headerpadsz; u32 max_rqst_sz; u32 max_resp_sz; u32 max_resp_sz_cached; -- cgit v1.2.3 From 1d92a08da23848a38eece4df7eaa4e8ec0e6c699 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 14 Jun 2011 12:07:38 -0400 Subject: NFSv4.1: Fix a refcounting issue in the pNFS device id cache When we add something to the global device id cache, we need to bump the reference count, so that the cache itself holds a reference. Signed-off-by: Trond Myklebust --- fs/nfs/pnfs_dev.c | 1 + 1 file changed, 1 insertion(+) (limited to 'fs') diff --git a/fs/nfs/pnfs_dev.c b/fs/nfs/pnfs_dev.c index 5944d4b369a2..f0f8e1e22f6c 100644 --- a/fs/nfs/pnfs_dev.c +++ b/fs/nfs/pnfs_dev.c @@ -209,6 +209,7 @@ nfs4_insert_deviceid_node(struct nfs4_deviceid_node *new) hlist_add_head_rcu(&new->node, &nfs4_deviceid_cache[hash]); spin_unlock(&nfs4_deviceid_lock); + atomic_inc(&new->ref); return new; } -- cgit v1.2.3 From 533eb4611c9eea53072eb6a61d5a6393b6a77ed7 Mon Sep 17 00:00:00 2001 From: Andy Adamson Date: Mon, 13 Jun 2011 18:25:56 -0400 Subject: NFSv4.1: allow nfs_fhget to succeed with mounted on fileid Commit 28331a46d88459788c8fca72dbb0415cd7f514c9 "Ensure we request the ordinary fileid when doing readdirplus" changed the meaning of NFS_ATTR_FATTR_FILEID which used to be set when FATTR4_WORD1_MOUNTED_ON_FILED was requested. Allow nfs_fhget to succeed with only a mounted on fileid when crossing a mountpoint or a referral. Ask for the fileid of the absent file system if mounted_on_fileid is not supported. Signed-off-by: Andy Adamson cc:stable@kernel.org [2.6.39] Signed-off-by: Trond Myklebust --- fs/nfs/inode.c | 3 ++- fs/nfs/internal.h | 11 +++++++++++ fs/nfs/nfs4proc.c | 33 +++++++++++++++++++++++---------- 3 files changed, 36 insertions(+), 11 deletions(-) (limited to 'fs') diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 3f1eb8180373..6f4850deb272 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -256,7 +256,8 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr) nfs_attr_check_mountpoint(sb, fattr); - if ((fattr->valid & NFS_ATTR_FATTR_FILEID) == 0 && (fattr->valid & NFS_ATTR_FATTR_MOUNTPOINT) == 0) + if (((fattr->valid & NFS_ATTR_FATTR_FILEID) == 0) && + !nfs_attr_use_mounted_on_fileid(fattr)) goto out_no_inode; if ((fattr->valid & NFS_ATTR_FATTR_TYPE) == 0) goto out_no_inode; diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index b9056cbe68d6..2a55347a2daa 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h @@ -45,6 +45,17 @@ static inline void nfs_attr_check_mountpoint(struct super_block *parent, struct fattr->valid |= NFS_ATTR_FATTR_MOUNTPOINT; } +static inline int nfs_attr_use_mounted_on_fileid(struct nfs_fattr *fattr) +{ + if (((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) == 0) || + (((fattr->valid & NFS_ATTR_FATTR_MOUNTPOINT) == 0) && + ((fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL) == 0))) + return 0; + + fattr->fileid = fattr->mounted_on_fileid; + return 1; +} + struct nfs_clone_mount { const struct super_block *sb; const struct dentry *dentry; diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 3ca449789545..289c539bd6b3 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -2265,12 +2265,14 @@ static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *fhandle, return nfs4_map_errors(status); } +static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr); /* * Get locations and (maybe) other attributes of a referral. * Note that we'll actually follow the referral later when * we detect fsid mismatch in inode revalidation */ -static int nfs4_get_referral(struct inode *dir, const struct qstr *name, struct nfs_fattr *fattr, struct nfs_fh *fhandle) +static int nfs4_get_referral(struct inode *dir, const struct qstr *name, + struct nfs_fattr *fattr, struct nfs_fh *fhandle) { int status = -ENOMEM; struct page *page = NULL; @@ -2288,15 +2290,16 @@ static int nfs4_get_referral(struct inode *dir, const struct qstr *name, struct goto out; /* Make sure server returned a different fsid for the referral */ if (nfs_fsid_equal(&NFS_SERVER(dir)->fsid, &locations->fattr.fsid)) { - dprintk("%s: server did not return a different fsid for a referral at %s\n", __func__, name->name); + dprintk("%s: server did not return a different fsid for" + " a referral at %s\n", __func__, name->name); status = -EIO; goto out; } + /* Fixup attributes for the nfs_lookup() call to nfs_fhget() */ + nfs_fixup_referral_attributes(&locations->fattr); + /* replace the lookup nfs_fattr with the locations nfs_fattr */ memcpy(fattr, &locations->fattr, sizeof(struct nfs_fattr)); - fattr->valid |= NFS_ATTR_FATTR_V4_REFERRAL; - if (!fattr->mode) - fattr->mode = S_IFDIR; memset(fhandle, 0, sizeof(struct nfs_fh)); out: if (page) @@ -4667,11 +4670,15 @@ static size_t nfs4_xattr_list_nfs4_acl(struct dentry *dentry, char *list, return len; } +/* + * nfs_fhget will use either the mounted_on_fileid or the fileid + */ static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr) { - if (!((fattr->valid & NFS_ATTR_FATTR_FILEID) && - (fattr->valid & NFS_ATTR_FATTR_FSID) && - (fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL))) + if (!(((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) || + (fattr->valid & NFS_ATTR_FATTR_FILEID)) && + (fattr->valid & NFS_ATTR_FATTR_FSID) && + (fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL))) return; fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE | @@ -4686,7 +4693,6 @@ int nfs4_proc_fs_locations(struct inode *dir, const struct qstr *name, struct nfs_server *server = NFS_SERVER(dir); u32 bitmask[2] = { [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS, - [1] = FATTR4_WORD1_MOUNTED_ON_FILEID, }; struct nfs4_fs_locations_arg args = { .dir_fh = NFS_FH(dir), @@ -4705,11 +4711,18 @@ int nfs4_proc_fs_locations(struct inode *dir, const struct qstr *name, int status; dprintk("%s: start\n", __func__); + + /* Ask for the fileid of the absent filesystem if mounted_on_fileid + * is not supported */ + if (NFS_SERVER(dir)->attr_bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID) + bitmask[1] |= FATTR4_WORD1_MOUNTED_ON_FILEID; + else + bitmask[0] |= FATTR4_WORD0_FILEID; + nfs_fattr_init(&fs_locations->fattr); fs_locations->server = server; fs_locations->nlocations = 0; status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); - nfs_fixup_referral_attributes(&fs_locations->fattr); dprintk("%s: returned status = %d\n", __func__, status); return status; } -- cgit v1.2.3 From cec765cf5891c7fc3d905832b481bfb6fd55825d Mon Sep 17 00:00:00 2001 From: Andy Adamson Date: Mon, 13 Jun 2011 18:36:17 -0400 Subject: NFSv4.1: allow zero fh array in filelayout decode layout Signed-off-by: Andy Adamson cc:stable@kernel.org [2.6.39] Signed-off-by: Trond Myklebust --- fs/nfs/nfs4filelayout.c | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4filelayout.c b/fs/nfs/nfs4filelayout.c index 426908809c97..5d6f369b15d0 100644 --- a/fs/nfs/nfs4filelayout.c +++ b/fs/nfs/nfs4filelayout.c @@ -552,13 +552,18 @@ filelayout_decode_layout(struct pnfs_layout_hdr *flo, __func__, nfl_util, fl->num_fh, fl->first_stripe_index, fl->pattern_offset); - if (!fl->num_fh) + /* Note that a zero value for num_fh is legal for STRIPE_SPARSE. + * Futher checking is done in filelayout_check_layout */ + if (fl->num_fh < 0 || fl->num_fh > + max(NFS4_PNFS_MAX_STRIPE_CNT, NFS4_PNFS_MAX_MULTI_CNT)) goto out_err; - fl->fh_array = kzalloc(fl->num_fh * sizeof(struct nfs_fh *), - gfp_flags); - if (!fl->fh_array) - goto out_err; + if (fl->num_fh > 0) { + fl->fh_array = kzalloc(fl->num_fh * sizeof(struct nfs_fh *), + gfp_flags); + if (!fl->fh_array) + goto out_err; + } for (i = 0; i < fl->num_fh; i++) { /* Do we want to use a mempool here? */ -- cgit v1.2.3 From a2e1d4f2e5ed83850de92a491ef225824cb457bd Mon Sep 17 00:00:00 2001 From: Fred Isaman Date: Mon, 13 Jun 2011 18:54:53 -0400 Subject: nfs4.1: fix several problems with _pnfs_return_layout _pnfs_return_layout had the following problems: - it did not call pnfs_free_lseg_list on all paths - it unintentionally did a forgetful return when there was no outstanding io - it raced with concurrent LAYOUTGETS Signed-off-by: Fred Isaman Signed-off-by: Trond Myklebust --- fs/nfs/nfs4proc.c | 8 ++++---- fs/nfs/pnfs.c | 8 +++++--- 2 files changed, 9 insertions(+), 7 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 289c539bd6b3..5879b23e0c99 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -5706,6 +5706,7 @@ static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata) { struct nfs4_layoutreturn *lrp = calldata; struct nfs_server *server; + struct pnfs_layout_hdr *lo = NFS_I(lrp->args.inode)->layout; dprintk("--> %s\n", __func__); @@ -5717,16 +5718,15 @@ static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata) nfs_restart_rpc(task, lrp->clp); return; } + spin_lock(&lo->plh_inode->i_lock); if (task->tk_status == 0) { - struct pnfs_layout_hdr *lo = NFS_I(lrp->args.inode)->layout; - if (lrp->res.lrs_present) { - spin_lock(&lo->plh_inode->i_lock); pnfs_set_layout_stateid(lo, &lrp->res.stateid, true); - spin_unlock(&lo->plh_inode->i_lock); } else BUG_ON(!list_empty(&lo->plh_segs)); } + lo->plh_block_lgets--; + spin_unlock(&lo->plh_inode->i_lock); dprintk("<-- %s\n", __func__); } diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index 8c1309d852a6..25de6b27bdf4 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -634,12 +634,14 @@ _pnfs_return_layout(struct inode *ino) spin_lock(&ino->i_lock); lo = nfsi->layout; - if (!lo || !mark_matching_lsegs_invalid(lo, &tmp_list, NULL)) { + if (!lo) { spin_unlock(&ino->i_lock); - dprintk("%s: no layout segments to return\n", __func__); - goto out; + dprintk("%s: no layout to return\n", __func__); + return status; } stateid = nfsi->layout->plh_stateid; + mark_matching_lsegs_invalid(lo, &tmp_list, NULL); + lo->plh_block_lgets++; /* Reference matched in nfs4_layoutreturn_release */ get_layout_hdr(lo); spin_unlock(&ino->i_lock); -- cgit v1.2.3 From d771e3a43e23a37398b7e05a9d1b1036d698263c Mon Sep 17 00:00:00 2001 From: Benny Halevy Date: Tue, 14 Jun 2011 16:30:16 -0400 Subject: NFSv4.1: fix break condition in pnfs_find_lseg The break condition to skip out of the loop got broken when cmp_layout was change. Essentially, we want to stop looking once we know no layout on the remainder of the list can match the first byte of the looked-up range. Reported-by: Peng Tao Signed-off-by: Benny Halevy Signed-off-by: Trond Myklebust --- fs/nfs/pnfs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index 25de6b27bdf4..d066aad608ad 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -889,7 +889,7 @@ pnfs_find_lseg(struct pnfs_layout_hdr *lo, ret = get_lseg(lseg); break; } - if (cmp_layout(range, &lseg->pls_range) > 0) + if (lseg->pls_range.offset > range->offset) break; } -- cgit v1.2.3 From c7fd06228b994190d8369a2a0acf5224e4e13d1a Mon Sep 17 00:00:00 2001 From: David Howells Date: Wed, 15 Jun 2011 00:55:44 +0100 Subject: NFS: (d)printks should use %zd for ssize_t arguments (d)printks should use %zd for ssize_t arguments not %ld, otherwise they might get a warning. I see the following with MN10300. fs/nfs/objlayout/objlayout.c: In function 'objlayout_read_done': fs/nfs/objlayout/objlayout.c:294: warning: format '%ld' expects type 'long int', but argument 3 has type 'ssize_t' Signed-off-by: David Howells cc: Trond Myklebust cc: linux-nfs@vger.kernel.org Signed-off-by: Trond Myklebust --- fs/nfs/objlayout/objlayout.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/nfs/objlayout/objlayout.c b/fs/nfs/objlayout/objlayout.c index dc3956c0de80..1d06f8e2adea 100644 --- a/fs/nfs/objlayout/objlayout.c +++ b/fs/nfs/objlayout/objlayout.c @@ -291,7 +291,7 @@ objlayout_read_done(struct objlayout_io_state *state, ssize_t status, bool sync) struct nfs_read_data *rdata; state->status = status; - dprintk("%s: Begin status=%ld eof=%d\n", __func__, status, eof); + dprintk("%s: Begin status=%zd eof=%d\n", __func__, status, eof); rdata = state->rpcdata; rdata->task.tk_status = status; if (status >= 0) { -- cgit v1.2.3 From 1ed3a8539af7b36aa5c977f304e80f7fc8d27bfc Mon Sep 17 00:00:00 2001 From: Benny Halevy Date: Wed, 15 Jun 2011 11:39:57 -0400 Subject: NFSv4.1: need to put_layout_hdr on _pnfs_return_layout error path We always get a reference on the layout header and we rely on nfs4_layoutreturn_release to put it. If we hit an allocation error before starting the rpc proc we bail out early without dereferncing the layout header properly. Signed-off-by: Benny Halevy Signed-off-by: Trond Myklebust --- fs/nfs/pnfs.c | 1 + 1 file changed, 1 insertion(+) (limited to 'fs') diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index d066aad608ad..8f9582281252 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -652,6 +652,7 @@ _pnfs_return_layout(struct inode *ino) lrp = kzalloc(sizeof(*lrp), GFP_KERNEL); if (unlikely(lrp == NULL)) { status = -ENOMEM; + put_layout_hdr(lo); goto out; } -- cgit v1.2.3 From ea0ded748bdea78f9e2fefb571f7d6ce9edb4f89 Mon Sep 17 00:00:00 2001 From: Fred Isaman Date: Wed, 15 Jun 2011 12:31:02 -0400 Subject: nfs4.1: prevent race that allowed use of freed layout in _pnfs_return_layout mark_matching_lsegs_invalid could put the last ref to the layout, so the get_layout_hdr needs to be called first. Signed-off-by: Fred Isaman Signed-off-by: Trond Myklebust --- fs/nfs/pnfs.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index 8f9582281252..730d4dbbaf68 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -640,10 +640,10 @@ _pnfs_return_layout(struct inode *ino) return status; } stateid = nfsi->layout->plh_stateid; - mark_matching_lsegs_invalid(lo, &tmp_list, NULL); - lo->plh_block_lgets++; /* Reference matched in nfs4_layoutreturn_release */ get_layout_hdr(lo); + mark_matching_lsegs_invalid(lo, &tmp_list, NULL); + lo->plh_block_lgets++; spin_unlock(&ino->i_lock); pnfs_free_lseg_list(&tmp_list); -- cgit v1.2.3 From 71d7aed014457147e8f71a843d5fbf03235e4a85 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Tue, 14 Jun 2011 14:24:32 -0400 Subject: Btrfs: fix path leakage on subvol deletion The delayed ref patch accidently removed the btrfs_free_path in btrfs_unlink_subvol, this puts it back and means we don't leak a path. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/inode.c | 1 + 1 file changed, 1 insertion(+) (limited to 'fs') diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index c15636b17874..5813dec5101c 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -3076,6 +3076,7 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans, ret = btrfs_update_inode(trans, root, dir); BUG_ON(ret); + btrfs_free_path(path); return 0; } -- cgit v1.2.3 From 8351583e3f6e430ce8f71913909a96ad5cc6a2f6 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Tue, 14 Jun 2011 15:16:14 -0400 Subject: Btrfs: protect the pending_snapshots list with trans_lock Currently there is nothing protecting the pending_snapshots list on the transaction. We only hold the directory mutex that we are snapshotting and a read lock on the subvol_sem, so we could race with somebody else creating a snapshot in a different directory and end up with list corruption. So protect this list with the trans_lock. Thanks, Signed-off-by: Josef Bacik --- fs/btrfs/ioctl.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'fs') diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index b793d112d1f6..a3c4751e07db 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -482,8 +482,10 @@ static int create_snapshot(struct btrfs_root *root, struct dentry *dentry, ret = btrfs_snap_reserve_metadata(trans, pending_snapshot); BUG_ON(ret); + spin_lock(&root->fs_info->trans_lock); list_add(&pending_snapshot->list, &trans->transaction->pending_snapshots); + spin_unlock(&root->fs_info->trans_lock); if (async_transid) { *async_transid = trans->transid; ret = btrfs_commit_transaction_async(trans, -- cgit v1.2.3 From ed0ca14021e5ae3147602128641aa7f742ab227c Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Tue, 14 Jun 2011 16:22:15 -0400 Subject: Btrfs: set no_trans_join after trying to expand the transaction We can lockup if we try to allow new writers join the transaction and we have flushoncommit set or have a pending snapshot. This is because we set no_trans_join and then loop around and try to wait for ordered extents again. The problem is the ordered endio stuff needs to join the transaction, which it can't do because no_trans_join is set. So instead wait until after this loop to set no_trans_join and then make sure to wait for num_writers == 1 in case anybody got started in between us exiting the loop and setting no_trans_join. This could easily be reproduced by mounting -o flushoncommit and running xfstest 13. It cannot be reproduced with this patch. Thanks, Reported-by: Jim Schutt Signed-off-by: Josef Bacik --- fs/btrfs/transaction.c | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 2b3590b9fe98..56695595e036 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -1241,12 +1241,20 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, schedule_timeout(1); finish_wait(&cur_trans->writer_wait, &wait); - spin_lock(&root->fs_info->trans_lock); - root->fs_info->trans_no_join = 1; - spin_unlock(&root->fs_info->trans_lock); } while (atomic_read(&cur_trans->num_writers) > 1 || (should_grow && cur_trans->num_joined != joined)); + /* + * Ok now we need to make sure to block out any other joins while we + * commit the transaction. We could have started a join before setting + * no_join so make sure to wait for num_writers to == 1 again. + */ + spin_lock(&root->fs_info->trans_lock); + root->fs_info->trans_no_join = 1; + spin_unlock(&root->fs_info->trans_lock); + wait_event(cur_trans->writer_wait, + atomic_read(&cur_trans->num_writers) == 1); + ret = create_pending_snapshots(trans, root->fs_info); BUG_ON(ret); -- cgit v1.2.3 From 9e2dfdb3081edfae66a49013517e80dd8a0469fa Mon Sep 17 00:00:00 2001 From: Fred Isaman Date: Wed, 15 Jun 2011 14:32:02 -0400 Subject: nfs4.1: mark layout as bad on error path in _pnfs_return_layout Signed-off-by: Fred Isaman Signed-off-by: Trond Myklebust --- fs/nfs/pnfs.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'fs') diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index 730d4dbbaf68..539b94cb6c0f 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -652,6 +652,8 @@ _pnfs_return_layout(struct inode *ino) lrp = kzalloc(sizeof(*lrp), GFP_KERNEL); if (unlikely(lrp == NULL)) { status = -ENOMEM; + set_bit(NFS_LAYOUT_RW_FAILED, &lo->plh_flags); + set_bit(NFS_LAYOUT_RO_FAILED, &lo->plh_flags); put_layout_hdr(lo); goto out; } -- cgit v1.2.3 From 793925334f32e9026c22baee5c3c340f47d4ef7e Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Wed, 15 Jun 2011 12:47:04 -0700 Subject: proc: Fix Oops on stat of /proc//ns/net Don't call iput with the inode half setup to be a namespace filedescriptor. Instead rearrange the code so that we don't initialize ei->ns_ops until after I ns_ops->get succeeds, preventing us from invoking ns_ops->put when ns_ops->get failed. Reported-by: Ingo Saitz Signed-off-by: Eric W. Biederman --- fs/proc/namespaces.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/proc/namespaces.c b/fs/proc/namespaces.c index 781dec5bd682..be177f702acb 100644 --- a/fs/proc/namespaces.c +++ b/fs/proc/namespaces.c @@ -38,18 +38,21 @@ static struct dentry *proc_ns_instantiate(struct inode *dir, struct inode *inode; struct proc_inode *ei; struct dentry *error = ERR_PTR(-ENOENT); + void *ns; inode = proc_pid_make_inode(dir->i_sb, task); if (!inode) goto out; + ns = ns_ops->get(task); + if (!ns) + goto out_iput; + ei = PROC_I(inode); inode->i_mode = S_IFREG|S_IRUSR; inode->i_fop = &ns_file_operations; ei->ns_ops = ns_ops; - ei->ns = ns_ops->get(task); - if (!ei->ns) - goto out_iput; + ei->ns = ns; dentry->d_op = &pid_dentry_operations; d_add(dentry, inode); -- cgit v1.2.3 From 7f81c8890c15a10f5220bebae3b6dfae4961962a Mon Sep 17 00:00:00 2001 From: Michal Hocko Date: Wed, 15 Jun 2011 15:08:11 -0700 Subject: fs/exec.c: use BUILD_BUG_ON for VM_STACK_FLAGS & VM_STACK_INCOMPLETE_SETUP Commit a8bef8ff6ea1 ("mm: migration: avoid race between shift_arg_pages() and rmap_walk() during migration by not migrating temporary stacks") introduced a BUG_ON() to ensure that VM_STACK_FLAGS and VM_STACK_INCOMPLETE_SETUP do not overlap. The check is a compile time one, so BUILD_BUG_ON is more appropriate. Signed-off-by: Michal Hocko Cc: Mel Gorman Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/exec.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/exec.c b/fs/exec.c index 97e0d52d72fd..b54f74f3cd80 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -277,7 +277,7 @@ static int __bprm_mm_init(struct linux_binprm *bprm) * use STACK_TOP because that can depend on attributes which aren't * configured yet. */ - BUG_ON(VM_STACK_FLAGS & VM_STACK_INCOMPLETE_SETUP); + BUILD_BUG_ON(VM_STACK_FLAGS & VM_STACK_INCOMPLETE_SETUP); vma->vm_end = STACK_TOP_MAX; vma->vm_start = vma->vm_end - PAGE_SIZE; vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP; -- cgit v1.2.3 From 13fca640bb8ab611a50e0ba120b186faa2994d6c Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Wed, 15 Jun 2011 21:53:52 -0700 Subject: Revert "fs/exec.c: use BUILD_BUG_ON for VM_STACK_FLAGS & VM_STACK_INCOMPLETE_SETUP" This reverts commit 7f81c8890c15a10f5220bebae3b6dfae4961962a. It turns out that it's not actually a build-time check on x86-64 UML, which does some seriously crazy stuff with VM_STACK_FLAGS. The VM_STACK_FLAGS define depends on the arch-supplied VM_STACK_DEFAULT_FLAGS value, and on x86-64 UML we have arch/um/sys-x86_64/shared/sysdep/vm-flags.h: #define VM_STACK_DEFAULT_FLAGS \ (test_thread_flag(TIF_IA32) ? vm_stack_flags32 : vm_stack_flags) #define VM_STACK_DEFAULT_FLAGS vm_stack_flags (yes, seriously: two different #define's for that thing, with the first one being inside an "#ifdef TIF_IA32") It's possible that it is UML that should just be fixed in this area, but for now let's just undo the (very small) optimization. Reported-by: Randy Dunlap Acked-by: Andrew Morton Cc: Michal Hocko Cc: Richard Weinberger Signed-off-by: Linus Torvalds --- fs/exec.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/exec.c b/fs/exec.c index b54f74f3cd80..97e0d52d72fd 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -277,7 +277,7 @@ static int __bprm_mm_init(struct linux_binprm *bprm) * use STACK_TOP because that can depend on attributes which aren't * configured yet. */ - BUILD_BUG_ON(VM_STACK_FLAGS & VM_STACK_INCOMPLETE_SETUP); + BUG_ON(VM_STACK_FLAGS & VM_STACK_INCOMPLETE_SETUP); vma->vm_end = STACK_TOP_MAX; vma->vm_start = vma->vm_end - PAGE_SIZE; vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP; -- cgit v1.2.3 From 50338b889dc504c69e0cb316ac92d1b9e51f3c8a Mon Sep 17 00:00:00 2001 From: Török Edwin Date: Thu, 16 Jun 2011 00:06:14 +0300 Subject: fix wrong iput on d_inode introduced by e6bc45d65d MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Git bisection shows that commit e6bc45d65df8599fdbae73be9cec4ceed274db53 causes BUG_ONs under high I/O load: kernel BUG at fs/inode.c:1368! [ 2862.501007] Call Trace: [ 2862.501007] [] d_kill+0xf8/0x140 [ 2862.501007] [] dput+0xc9/0x190 [ 2862.501007] [] fput+0x15f/0x210 [ 2862.501007] [] filp_close+0x61/0x90 [ 2862.501007] [] sys_close+0xb1/0x110 [ 2862.501007] [] system_call_fastpath+0x16/0x1b A reliable way to reproduce this bug is: Login to KDE, run 'rsnapshot sync', and apt-get install openjdk-6-jdk, and apt-get remove openjdk-6-jdk. The buggy part of the patch is this: struct inode *inode = NULL; ..... - if (nd.last.name[nd.last.len]) - goto slashes; inode = dentry->d_inode; - if (inode) - ihold(inode); + if (nd.last.name[nd.last.len] || !inode) + goto slashes; + ihold(inode) ... if (inode) iput(inode); /* truncate the inode here */ If nd.last.name[nd.last.len] is nonzero (and thus goto slashes branch is taken), and dentry->d_inode is non-NULL, then this code now does an additional iput on the inode, which is wrong. Fix this by only setting the inode variable if nd.last.name[nd.last.len] is 0. Reference: https://lkml.org/lkml/2011/6/15/50 Reported-by: Norbert Preining Reported-by: Török Edwin Cc: "Theodore Ts'o" Cc: Al Viro Signed-off-by: Török Edwin Signed-off-by: Al Viro --- fs/namei.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/namei.c b/fs/namei.c index 9802345df5e7..6301963b161f 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -2713,8 +2713,10 @@ static long do_unlinkat(int dfd, const char __user *pathname) error = PTR_ERR(dentry); if (!IS_ERR(dentry)) { /* Why not before? Because we want correct error value */ + if (nd.last.name[nd.last.len]) + goto slashes; inode = dentry->d_inode; - if (nd.last.name[nd.last.len] || !inode) + if (!inode) goto slashes; ihold(inode); error = mnt_want_write(nd.path.mnt); -- cgit v1.2.3 From 8aef18845266f5c05904c610088f2d1ed58f6be3 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Thu, 16 Jun 2011 15:10:06 +0100 Subject: VFS: Fix vfsmount overput on simultaneous automount [Kudos to dhowells for tracking that crap down] If two processes attempt to cause automounting on the same mountpoint at the same time, the vfsmount holding the mountpoint will be left with one too few references on it, causing a BUG when the kernel tries to clean up. The problem is that lock_mount() drops the caller's reference to the mountpoint's vfsmount in the case where it finds something already mounted on the mountpoint as it transits to the mounted filesystem and replaces path->mnt with the new mountpoint vfsmount. During a pathwalk, however, we don't take a reference on the vfsmount if it is the same as the one in the nameidata struct, but do_add_mount() doesn't know this. The fix is to make sure we have a ref on the vfsmount of the mountpoint before calling do_add_mount(). However, if lock_mount() doesn't transit, we're then left with an extra ref on the mountpoint vfsmount which needs releasing. We can handle that in follow_managed() by not making assumptions about what we can and what we cannot get from lookup_mnt() as the current code does. The callers of follow_managed() expect that reference to path->mnt will be grabbed iff path->mnt has been changed. follow_managed() and follow_automount() keep track of whether such reference has been grabbed and assume that it'll happen in those and only those cases that'll have us return with changed path->mnt. That assumption is almost correct - it breaks in case of racing automounts and in even harder to hit race between following a mountpoint and a couple of mount --move. The thing is, we don't need to make that assumption at all - after the end of loop in follow_manage() we can check if path->mnt has ended up unchanged and do mntput() if needed. The BUG can be reproduced with the following test program: #include #include #include #include #include int main(int argc, char **argv) { int pid, ws; struct stat buf; pid = fork(); stat(argv[1], &buf); if (pid > 0) wait(&ws); return 0; } and the following procedure: (1) Mount an NFS volume that on the server has something else mounted on a subdirectory. For instance, I can mount / from my server: mount warthog:/ /mnt -t nfs4 -r On the server /data has another filesystem mounted on it, so NFS will see a change in FSID as it walks down the path, and will mark /mnt/data as being a mountpoint. This will cause the automount code to be triggered. !!! Do not look inside the mounted fs at this point !!! (2) Run the above program on a file within the submount to generate two simultaneous automount requests: /tmp/forkstat /mnt/data/testfile (3) Unmount the automounted submount: umount /mnt/data (4) Unmount the original mount: umount /mnt At this point the kernel should throw a BUG with something like the following: BUG: Dentry ffff880032e3c5c0{i=2,n=} still in use (1) [unmount of nfs4 0:12] Note that the bug appears on the root dentry of the original mount, not the mountpoint and not the submount because sys_umount() hasn't got to its final mntput_no_expire() yet, but this isn't so obvious from the call trace: [] shrink_dcache_for_umount+0x69/0x82 [] generic_shutdown_super+0x37/0x15b [] ? nfs_super_return_all_delegations+0x2e/0x1b1 [nfs] [] kill_anon_super+0x1d/0x7e [] nfs4_kill_super+0x60/0xb6 [nfs] [] deactivate_locked_super+0x34/0x83 [] deactivate_super+0x6f/0x7b [] mntput_no_expire+0x18d/0x199 [] mntput+0x3b/0x44 [] release_mounts+0xa2/0xbf [] sys_umount+0x47a/0x4ba [] ? trace_hardirqs_on_caller+0x1fd/0x22f [] system_call_fastpath+0x16/0x1b as do_umount() is inlined. However, you can see release_mounts() in there. Note also that it may be necessary to have multiple CPU cores to be able to trigger this bug. Tested-by: Jeff Layton Tested-by: Ian Kent Signed-off-by: David Howells Signed-off-by: Al Viro --- fs/namei.c | 24 ++++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/namei.c b/fs/namei.c index 6301963b161f..9e425e7e6c8f 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -812,6 +812,11 @@ static int follow_automount(struct path *path, unsigned flags, if (!mnt) /* mount collision */ return 0; + if (!*need_mntput) { + /* lock_mount() may release path->mnt on error */ + mntget(path->mnt); + *need_mntput = true; + } err = finish_automount(mnt, path); switch (err) { @@ -819,12 +824,9 @@ static int follow_automount(struct path *path, unsigned flags, /* Someone else made a mount here whilst we were busy */ return 0; case 0: - dput(path->dentry); - if (*need_mntput) - mntput(path->mnt); + path_put(path); path->mnt = mnt; path->dentry = dget(mnt->mnt_root); - *need_mntput = true; return 0; default: return err; @@ -844,9 +846,10 @@ static int follow_automount(struct path *path, unsigned flags, */ static int follow_managed(struct path *path, unsigned flags) { + struct vfsmount *mnt = path->mnt; /* held by caller, must be left alone */ unsigned managed; bool need_mntput = false; - int ret; + int ret = 0; /* Given that we're not holding a lock here, we retain the value in a * local variable for each dentry as we look at it so that we don't see @@ -861,7 +864,7 @@ static int follow_managed(struct path *path, unsigned flags) BUG_ON(!path->dentry->d_op->d_manage); ret = path->dentry->d_op->d_manage(path->dentry, false); if (ret < 0) - return ret == -EISDIR ? 0 : ret; + break; } /* Transit to a mounted filesystem. */ @@ -887,14 +890,19 @@ static int follow_managed(struct path *path, unsigned flags) if (managed & DCACHE_NEED_AUTOMOUNT) { ret = follow_automount(path, flags, &need_mntput); if (ret < 0) - return ret == -EISDIR ? 0 : ret; + break; continue; } /* We didn't change the current path point */ break; } - return 0; + + if (need_mntput && path->mnt == mnt) + mntput(path->mnt); + if (ret == -EISDIR) + ret = 0; + return ret; } int follow_down_one(struct path *path) -- cgit v1.2.3 From 5e7f23373bf9a853e9256e81e86724cdd0a33c29 Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Mon, 13 Jun 2011 22:31:12 +0100 Subject: afs: afs_fill_page reads too much, or wrong data afs_fill_page should read the page that is about to be written but the current implementation has a number of issues. If we aren't extending the file we always read PAGE_CACHE_SIZE at offset 0. If we are extending the file we try to read the entire file. Change afs_fill_page to read PAGE_CACHE_SIZE at the right offset, clamped to i_size. While here, avoid calling afs_fill_page when we are doing a PAGE_CACHE_SIZE write. Signed-off-by: Anton Blanchard Signed-off-by: David Howells Signed-off-by: Al Viro --- fs/afs/write.c | 21 +++++++++------------ 1 file changed, 9 insertions(+), 12 deletions(-) (limited to 'fs') diff --git a/fs/afs/write.c b/fs/afs/write.c index 789b3afb3423..b806285ff853 100644 --- a/fs/afs/write.c +++ b/fs/afs/write.c @@ -84,23 +84,21 @@ void afs_put_writeback(struct afs_writeback *wb) * partly or wholly fill a page that's under preparation for writing */ static int afs_fill_page(struct afs_vnode *vnode, struct key *key, - loff_t pos, unsigned len, struct page *page) + loff_t pos, struct page *page) { loff_t i_size; - unsigned eof; int ret; + int len; - _enter(",,%llu,%u", (unsigned long long)pos, len); - - ASSERTCMP(len, <=, PAGE_CACHE_SIZE); + _enter(",,%llu", (unsigned long long)pos); i_size = i_size_read(&vnode->vfs_inode); - if (pos + len > i_size) - eof = i_size; + if (pos + PAGE_CACHE_SIZE > i_size) + len = i_size - pos; else - eof = PAGE_CACHE_SIZE; + len = PAGE_CACHE_SIZE; - ret = afs_vnode_fetch_data(vnode, key, 0, eof, page); + ret = afs_vnode_fetch_data(vnode, key, pos, len, page); if (ret < 0) { if (ret == -ENOENT) { _debug("got NOENT from server" @@ -153,9 +151,8 @@ int afs_write_begin(struct file *file, struct address_space *mapping, *pagep = page; /* page won't leak in error case: it eventually gets cleaned off LRU */ - if (!PageUptodate(page)) { - _debug("not up to date"); - ret = afs_fill_page(vnode, key, pos, len, page); + if (!PageUptodate(page) && len != PAGE_CACHE_SIZE) { + ret = afs_fill_page(vnode, key, index << PAGE_CACHE_SHIFT, page); if (ret < 0) { kfree(candidate); _leave(" = %d [prep]", ret); -- cgit v1.2.3 From f9f07b6c1372b1436aa6b45333445b443ffd8c95 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Tue, 14 Jun 2011 00:58:27 +0200 Subject: vfs: Fix data corruption after failed write in __block_write_begin() I've got a report of a file corruption from fsxlinux on ext3. The important operations to the page were: mapwrite to a hole partial write to the page read - found the page zeroed from the end of the normal write The culprit seems to be that if get_block() fails in __block_write_begin() (e.g. transient ENOSPC in ext3), the function does ClearPageUptodate(page). Thus when we retry the write, the logic in __block_write_begin() thinks zeroing of the page is needed and overwrites old data. In fact, I don't see why we should ever need to zero the uptodate bit here - either the page was uptodate when we entered __block_write_begin() and it should stay so when we leave it, or it was not uptodate and noone had right to set it uptodate during __block_write_begin() so it remains !uptodate when we leave as well. So just remove clearing of the bit. Signed-off-by: Jan Kara Signed-off-by: Al Viro --- fs/buffer.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/buffer.c b/fs/buffer.c index 49c9aada0374..1a80b048ade8 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -1902,10 +1902,8 @@ int __block_write_begin(struct page *page, loff_t pos, unsigned len, if (!buffer_uptodate(*wait_bh)) err = -EIO; } - if (unlikely(err)) { + if (unlikely(err)) page_zero_new_buffers(page, from, to); - ClearPageUptodate(page); - } return err; } EXPORT_SYMBOL(__block_write_begin); -- cgit v1.2.3 From 2e41ae225f742ded5b7d9847cd8bd605f27daba8 Mon Sep 17 00:00:00 2001 From: David Howells Date: Tue, 14 Jun 2011 00:38:44 +0100 Subject: AFS: Set s_id in the superblock to the volume name Set s_id in the superblock to the name of the AFS volume that this superblock corresponds to. Signed-off-by: David Howells Signed-off-by: Al Viro --- fs/afs/super.c | 1 + 1 file changed, 1 insertion(+) (limited to 'fs') diff --git a/fs/afs/super.c b/fs/afs/super.c index b7d48d7eaa1e..356dcf0929e8 100644 --- a/fs/afs/super.c +++ b/fs/afs/super.c @@ -313,6 +313,7 @@ static int afs_fill_super(struct super_block *sb, sb->s_magic = AFS_FS_MAGIC; sb->s_op = &afs_super_ops; sb->s_bdi = &as->volume->bdi; + strlcpy(sb->s_id, as->volume->vlocation->vldb.name, sizeof(sb->s_id)); /* allocate the root inode and dentry */ fid.vid = as->volume->vid; -- cgit v1.2.3 From d6e43f751f252c68ca69fa6d18665d88d69ef8b7 Mon Sep 17 00:00:00 2001 From: David Howells Date: Tue, 14 Jun 2011 00:45:44 +0100 Subject: AFS: Use i_generation not i_version for the vnode uniquifier Store the AFS vnode uniquifier in the i_generation field, not the i_version field of the inode struct. i_version can then be given the AFS data version number. Signed-off-by: David Howells Signed-off-by: Al Viro --- fs/afs/dir.c | 8 ++++---- fs/afs/fsclient.c | 3 ++- fs/afs/inode.c | 10 +++++----- 3 files changed, 11 insertions(+), 10 deletions(-) (limited to 'fs') diff --git a/fs/afs/dir.c b/fs/afs/dir.c index 20c106f24927..1b0b19550015 100644 --- a/fs/afs/dir.c +++ b/fs/afs/dir.c @@ -584,11 +584,11 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry, success: d_add(dentry, inode); - _leave(" = 0 { vn=%u u=%u } -> { ino=%lu v=%llu }", + _leave(" = 0 { vn=%u u=%u } -> { ino=%lu v=%u }", fid.vnode, fid.unique, dentry->d_inode->i_ino, - (unsigned long long)dentry->d_inode->i_version); + dentry->d_inode->i_generation); return NULL; } @@ -671,10 +671,10 @@ static int afs_d_revalidate(struct dentry *dentry, struct nameidata *nd) * been deleted and replaced, and the original vnode ID has * been reused */ if (fid.unique != vnode->fid.unique) { - _debug("%s: file deleted (uq %u -> %u I:%llu)", + _debug("%s: file deleted (uq %u -> %u I:%u)", dentry->d_name.name, fid.unique, vnode->fid.unique, - (unsigned long long)dentry->d_inode->i_version); + dentry->d_inode->i_generation); spin_lock(&vnode->lock); set_bit(AFS_VNODE_DELETED, &vnode->flags); spin_unlock(&vnode->lock); diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c index 4bd0218473a9..346e3289abd7 100644 --- a/fs/afs/fsclient.c +++ b/fs/afs/fsclient.c @@ -89,7 +89,7 @@ static void xdr_decode_AFSFetchStatus(const __be32 **_bp, i_size_write(&vnode->vfs_inode, size); vnode->vfs_inode.i_uid = status->owner; vnode->vfs_inode.i_gid = status->group; - vnode->vfs_inode.i_version = vnode->fid.unique; + vnode->vfs_inode.i_generation = vnode->fid.unique; vnode->vfs_inode.i_nlink = status->nlink; mode = vnode->vfs_inode.i_mode; @@ -102,6 +102,7 @@ static void xdr_decode_AFSFetchStatus(const __be32 **_bp, vnode->vfs_inode.i_ctime.tv_sec = status->mtime_server; vnode->vfs_inode.i_mtime = vnode->vfs_inode.i_ctime; vnode->vfs_inode.i_atime = vnode->vfs_inode.i_ctime; + vnode->vfs_inode.i_version = data_version; } expected_version = status->data_version; diff --git a/fs/afs/inode.c b/fs/afs/inode.c index db66c5201474..0fdab6e03d87 100644 --- a/fs/afs/inode.c +++ b/fs/afs/inode.c @@ -75,7 +75,8 @@ static int afs_inode_map_status(struct afs_vnode *vnode, struct key *key) inode->i_ctime.tv_nsec = 0; inode->i_atime = inode->i_mtime = inode->i_ctime; inode->i_blocks = 0; - inode->i_version = vnode->fid.unique; + inode->i_generation = vnode->fid.unique; + inode->i_version = vnode->status.data_version; inode->i_mapping->a_ops = &afs_fs_aops; /* check to see whether a symbolic link is really a mountpoint */ @@ -100,7 +101,7 @@ static int afs_iget5_test(struct inode *inode, void *opaque) struct afs_iget_data *data = opaque; return inode->i_ino == data->fid.vnode && - inode->i_version == data->fid.unique; + inode->i_generation == data->fid.unique; } /* @@ -122,7 +123,7 @@ static int afs_iget5_set(struct inode *inode, void *opaque) struct afs_vnode *vnode = AFS_FS_I(inode); inode->i_ino = data->fid.vnode; - inode->i_version = data->fid.unique; + inode->i_generation = data->fid.unique; vnode->fid = data->fid; vnode->volume = data->volume; @@ -380,8 +381,7 @@ int afs_getattr(struct vfsmount *mnt, struct dentry *dentry, inode = dentry->d_inode; - _enter("{ ino=%lu v=%llu }", inode->i_ino, - (unsigned long long)inode->i_version); + _enter("{ ino=%lu v=%u }", inode->i_ino, inode->i_generation); generic_fillattr(inode, stat); return 0; -- cgit v1.2.3 From a27a263bae072a499acc77b632238a6dacccf888 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 16 Jun 2011 12:02:23 +0000 Subject: xfs: make log devices with write back caches work There's no reason not to support cache flushing on external log devices. The only thing this really requires is flushing the data device first both in fsync and log commits. A side effect is that we also have to remove the barrier write test during mount, which has been superflous since the new FLUSH+FUA code anyway. Also use the chance to flush the RT subvolume write cache before the fsync commit, which is required for correct semantics. Signed-off-by: Christoph Hellwig Signed-off-by: Alex Elder --- fs/xfs/linux-2.6/xfs_file.c | 50 ++++++++++++++++++----------- fs/xfs/linux-2.6/xfs_super.c | 75 -------------------------------------------- fs/xfs/xfs_log.c | 11 ++++++- 3 files changed, 41 insertions(+), 95 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_file.c b/fs/xfs/linux-2.6/xfs_file.c index f4213ba1ff85..7f782af286bf 100644 --- a/fs/xfs/linux-2.6/xfs_file.c +++ b/fs/xfs/linux-2.6/xfs_file.c @@ -131,19 +131,34 @@ xfs_file_fsync( { struct inode *inode = file->f_mapping->host; struct xfs_inode *ip = XFS_I(inode); + struct xfs_mount *mp = ip->i_mount; struct xfs_trans *tp; int error = 0; int log_flushed = 0; trace_xfs_file_fsync(ip); - if (XFS_FORCED_SHUTDOWN(ip->i_mount)) + if (XFS_FORCED_SHUTDOWN(mp)) return -XFS_ERROR(EIO); xfs_iflags_clear(ip, XFS_ITRUNCATED); xfs_ioend_wait(ip); + if (mp->m_flags & XFS_MOUNT_BARRIER) { + /* + * If we have an RT and/or log subvolume we need to make sure + * to flush the write cache the device used for file data + * first. This is to ensure newly written file data make + * it to disk before logging the new inode size in case of + * an extending write. + */ + if (XFS_IS_REALTIME_INODE(ip)) + xfs_blkdev_issue_flush(mp->m_rtdev_targp); + else if (mp->m_logdev_targp != mp->m_ddev_targp) + xfs_blkdev_issue_flush(mp->m_ddev_targp); + } + /* * We always need to make sure that the required inode state is safe on * disk. The inode might be clean but we still might need to force the @@ -175,9 +190,9 @@ xfs_file_fsync( * updates. The sync transaction will also force the log. */ xfs_iunlock(ip, XFS_ILOCK_SHARED); - tp = xfs_trans_alloc(ip->i_mount, XFS_TRANS_FSYNC_TS); + tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS); error = xfs_trans_reserve(tp, 0, - XFS_FSYNC_TS_LOG_RES(ip->i_mount), 0, 0, 0); + XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0); if (error) { xfs_trans_cancel(tp, 0); return -error; @@ -209,28 +224,25 @@ xfs_file_fsync( * force the log. */ if (xfs_ipincount(ip)) { - error = _xfs_log_force_lsn(ip->i_mount, + error = _xfs_log_force_lsn(mp, ip->i_itemp->ili_last_lsn, XFS_LOG_SYNC, &log_flushed); } xfs_iunlock(ip, XFS_ILOCK_SHARED); } - if (ip->i_mount->m_flags & XFS_MOUNT_BARRIER) { - /* - * If the log write didn't issue an ordered tag we need - * to flush the disk cache for the data device now. - */ - if (!log_flushed) - xfs_blkdev_issue_flush(ip->i_mount->m_ddev_targp); - - /* - * If this inode is on the RT dev we need to flush that - * cache as well. - */ - if (XFS_IS_REALTIME_INODE(ip)) - xfs_blkdev_issue_flush(ip->i_mount->m_rtdev_targp); - } + /* + * If we only have a single device, and the log force about was + * a no-op we might have to flush the data device cache here. + * This can only happen for fdatasync/O_DSYNC if we were overwriting + * an already allocated file and thus do not have any metadata to + * commit. + */ + if ((mp->m_flags & XFS_MOUNT_BARRIER) && + mp->m_logdev_targp == mp->m_ddev_targp && + !XFS_IS_REALTIME_INODE(ip) && + !log_flushed) + xfs_blkdev_issue_flush(mp->m_ddev_targp); return -error; } diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c index 1e3a7ce804dc..a1a881e68a9a 100644 --- a/fs/xfs/linux-2.6/xfs_super.c +++ b/fs/xfs/linux-2.6/xfs_super.c @@ -627,68 +627,6 @@ xfs_blkdev_put( blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); } -/* - * Try to write out the superblock using barriers. - */ -STATIC int -xfs_barrier_test( - xfs_mount_t *mp) -{ - xfs_buf_t *sbp = xfs_getsb(mp, 0); - int error; - - XFS_BUF_UNDONE(sbp); - XFS_BUF_UNREAD(sbp); - XFS_BUF_UNDELAYWRITE(sbp); - XFS_BUF_WRITE(sbp); - XFS_BUF_UNASYNC(sbp); - XFS_BUF_ORDERED(sbp); - - xfsbdstrat(mp, sbp); - error = xfs_buf_iowait(sbp); - - /* - * Clear all the flags we set and possible error state in the - * buffer. We only did the write to try out whether barriers - * worked and shouldn't leave any traces in the superblock - * buffer. - */ - XFS_BUF_DONE(sbp); - XFS_BUF_ERROR(sbp, 0); - XFS_BUF_UNORDERED(sbp); - - xfs_buf_relse(sbp); - return error; -} - -STATIC void -xfs_mountfs_check_barriers(xfs_mount_t *mp) -{ - int error; - - if (mp->m_logdev_targp != mp->m_ddev_targp) { - xfs_notice(mp, - "Disabling barriers, not supported with external log device"); - mp->m_flags &= ~XFS_MOUNT_BARRIER; - return; - } - - if (xfs_readonly_buftarg(mp->m_ddev_targp)) { - xfs_notice(mp, - "Disabling barriers, underlying device is readonly"); - mp->m_flags &= ~XFS_MOUNT_BARRIER; - return; - } - - error = xfs_barrier_test(mp); - if (error) { - xfs_notice(mp, - "Disabling barriers, trial barrier write failed"); - mp->m_flags &= ~XFS_MOUNT_BARRIER; - return; - } -} - void xfs_blkdev_issue_flush( xfs_buftarg_t *buftarg) @@ -1240,14 +1178,6 @@ xfs_fs_remount( switch (token) { case Opt_barrier: mp->m_flags |= XFS_MOUNT_BARRIER; - - /* - * Test if barriers are actually working if we can, - * else delay this check until the filesystem is - * marked writeable. - */ - if (!(mp->m_flags & XFS_MOUNT_RDONLY)) - xfs_mountfs_check_barriers(mp); break; case Opt_nobarrier: mp->m_flags &= ~XFS_MOUNT_BARRIER; @@ -1282,8 +1212,6 @@ xfs_fs_remount( /* ro -> rw */ if ((mp->m_flags & XFS_MOUNT_RDONLY) && !(*flags & MS_RDONLY)) { mp->m_flags &= ~XFS_MOUNT_RDONLY; - if (mp->m_flags & XFS_MOUNT_BARRIER) - xfs_mountfs_check_barriers(mp); /* * If this is the first remount to writeable state we @@ -1465,9 +1393,6 @@ xfs_fs_fill_super( if (error) goto out_free_sb; - if (mp->m_flags & XFS_MOUNT_BARRIER) - xfs_mountfs_check_barriers(mp); - error = xfs_filestream_mount(mp); if (error) goto out_free_sb; diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index 211930246f20..41d5b8f2bf92 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c @@ -1372,8 +1372,17 @@ xlog_sync(xlog_t *log, XFS_BUF_ASYNC(bp); bp->b_flags |= XBF_LOG_BUFFER; - if (log->l_mp->m_flags & XFS_MOUNT_BARRIER) + if (log->l_mp->m_flags & XFS_MOUNT_BARRIER) { + /* + * If we have an external log device, flush the data device + * before flushing the log to make sure all meta data + * written back from the AIL actually made it to disk + * before writing out the new log tail LSN in the log buffer. + */ + if (log->l_mp->m_logdev_targp != log->l_mp->m_ddev_targp) + xfs_blkdev_issue_flush(log->l_mp->m_ddev_targp); XFS_BUF_ORDERED(bp); + } ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1); ASSERT(XFS_BUF_ADDR(bp) + BTOBB(count) <= log->l_logBBsize); -- cgit v1.2.3 From ee7b75fc4f3ae49e1f25bf56219bb5de3c29afaf Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Thu, 16 Jun 2011 13:15:41 -0400 Subject: NFSv4: Fix a readdir regression Commit 7ebb9315 (NFS: use secinfo when crossing mountpoints) introduces a regression when decoding an NFSv4 readdir entry that sets the rdattr_error field. By treating the resulting value as if it is a decoding error, the current code may cause us to skip valid readdir entries. Reported-by: Andy Adamson Cc: stable@kernel.org [2.6.39] Signed-off-by: Trond Myklebust --- fs/nfs/nfs4xdr.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index c4b7d6c04948..5db44a8dddf9 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -3098,7 +3098,7 @@ out_overflow: return -EIO; } -static int decode_attr_error(struct xdr_stream *xdr, uint32_t *bitmap) +static int decode_attr_error(struct xdr_stream *xdr, uint32_t *bitmap, int32_t *res) { __be32 *p; @@ -3109,7 +3109,7 @@ static int decode_attr_error(struct xdr_stream *xdr, uint32_t *bitmap) if (unlikely(!p)) goto out_overflow; bitmap[0] &= ~FATTR4_WORD0_RDATTR_ERROR; - return -be32_to_cpup(p); + *res = -be32_to_cpup(p); } return 0; out_overflow: @@ -4070,6 +4070,7 @@ static int decode_getfattr_attrs(struct xdr_stream *xdr, uint32_t *bitmap, int status; umode_t fmode = 0; uint32_t type; + int32_t err; status = decode_attr_type(xdr, bitmap, &type); if (status < 0) @@ -4095,13 +4096,12 @@ static int decode_getfattr_attrs(struct xdr_stream *xdr, uint32_t *bitmap, goto xdr_error; fattr->valid |= status; - status = decode_attr_error(xdr, bitmap); - if (status == -NFS4ERR_WRONGSEC) { - nfs_fixup_secinfo_attributes(fattr, fh); - status = 0; - } + err = 0; + status = decode_attr_error(xdr, bitmap, &err); if (status < 0) goto xdr_error; + if (err == -NFS4ERR_WRONGSEC) + nfs_fixup_secinfo_attributes(fattr, fh); status = decode_attr_filehandle(xdr, bitmap, fh); if (status < 0) -- cgit v1.2.3 From 879669961b11e7f40b518784863a259f735a72bf Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 17 Jun 2011 11:25:59 +0100 Subject: KEYS/DNS: Fix ____call_usermodehelper() to not lose the session keyring ____call_usermodehelper() now erases any credentials set by the subprocess_inf::init() function. The problem is that commit 17f60a7da150 ("capabilites: allow the application of capability limits to usermode helpers") creates and commits new credentials with prepare_kernel_cred() after the call to the init() function. This wipes all keyrings after umh_keys_init() is called. The best way to deal with this is to put the init() call just prior to the commit_creds() call, and pass the cred pointer to init(). That means that umh_keys_init() and suchlike can modify the credentials _before_ they are published and potentially in use by the rest of the system. This prevents request_key() from working as it is prevented from passing the session keyring it set up with the authorisation token to /sbin/request-key, and so the latter can't assume the authority to instantiate the key. This causes the in-kernel DNS resolver to fail with ENOKEY unconditionally. Signed-off-by: David Howells Acked-by: Eric Paris Tested-by: Jeff Layton Signed-off-by: Linus Torvalds --- fs/exec.c | 2 +- include/linux/kmod.h | 8 ++++---- kernel/kmod.c | 16 +++++++++------- security/keys/request_key.c | 3 +-- 4 files changed, 15 insertions(+), 14 deletions(-) (limited to 'fs') diff --git a/fs/exec.c b/fs/exec.c index 97e0d52d72fd..6075a1e727ae 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -1996,7 +1996,7 @@ static void wait_for_dump_helpers(struct file *file) * is a special value that we use to trap recursive * core dumps */ -static int umh_pipe_setup(struct subprocess_info *info) +static int umh_pipe_setup(struct subprocess_info *info, struct cred *new) { struct file *rp, *wp; struct fdtable *fdt; diff --git a/include/linux/kmod.h b/include/linux/kmod.h index d4a5c84c503d..0da38cf7db7b 100644 --- a/include/linux/kmod.h +++ b/include/linux/kmod.h @@ -45,7 +45,7 @@ static inline int request_module_nowait(const char *name, ...) { return -ENOSYS; #endif -struct key; +struct cred; struct file; enum umh_wait { @@ -62,7 +62,7 @@ struct subprocess_info { char **envp; enum umh_wait wait; int retval; - int (*init)(struct subprocess_info *info); + int (*init)(struct subprocess_info *info, struct cred *new); void (*cleanup)(struct subprocess_info *info); void *data; }; @@ -73,7 +73,7 @@ struct subprocess_info *call_usermodehelper_setup(char *path, char **argv, /* Set various pieces of state into the subprocess_info structure */ void call_usermodehelper_setfns(struct subprocess_info *info, - int (*init)(struct subprocess_info *info), + int (*init)(struct subprocess_info *info, struct cred *new), void (*cleanup)(struct subprocess_info *info), void *data); @@ -87,7 +87,7 @@ void call_usermodehelper_freeinfo(struct subprocess_info *info); static inline int call_usermodehelper_fns(char *path, char **argv, char **envp, enum umh_wait wait, - int (*init)(struct subprocess_info *info), + int (*init)(struct subprocess_info *info, struct cred *new), void (*cleanup)(struct subprocess_info *), void *data) { struct subprocess_info *info; diff --git a/kernel/kmod.c b/kernel/kmod.c index ad6a81c58b44..47613dfb7b28 100644 --- a/kernel/kmod.c +++ b/kernel/kmod.c @@ -156,12 +156,6 @@ static int ____call_usermodehelper(void *data) */ set_user_nice(current, 0); - if (sub_info->init) { - retval = sub_info->init(sub_info); - if (retval) - goto fail; - } - retval = -ENOMEM; new = prepare_kernel_cred(current); if (!new) @@ -173,6 +167,14 @@ static int ____call_usermodehelper(void *data) new->cap_inheritable); spin_unlock(&umh_sysctl_lock); + if (sub_info->init) { + retval = sub_info->init(sub_info, new); + if (retval) { + abort_creds(new); + goto fail; + } + } + commit_creds(new); retval = kernel_execve(sub_info->path, @@ -388,7 +390,7 @@ EXPORT_SYMBOL(call_usermodehelper_setup); * context in which call_usermodehelper_exec is called. */ void call_usermodehelper_setfns(struct subprocess_info *info, - int (*init)(struct subprocess_info *info), + int (*init)(struct subprocess_info *info, struct cred *new), void (*cleanup)(struct subprocess_info *info), void *data) { diff --git a/security/keys/request_key.c b/security/keys/request_key.c index d31862e0aa1c..8e319a416eec 100644 --- a/security/keys/request_key.c +++ b/security/keys/request_key.c @@ -71,9 +71,8 @@ EXPORT_SYMBOL(complete_request_key); * This is called in context of freshly forked kthread before kernel_execve(), * so we can simply install the desired session_keyring at this point. */ -static int umh_keys_init(struct subprocess_info *info) +static int umh_keys_init(struct subprocess_info *info, struct cred *cred) { - struct cred *cred = (struct cred*)current_cred(); struct key *keyring = info->data; return install_session_keyring_to_cred(cred, keyring); -- cgit v1.2.3 From 7585717f304f5ed005cc4ad933a69aab3efbd136 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Mon, 13 Jun 2011 20:00:16 -0400 Subject: Btrfs: fix relocation races The recent commit to get rid of our trans_mutex introduced some races with block group relocation. The problem is that relocation needs to do some record keeping about each root, and it was relying on the transaction mutex to coordinate things in subtle ways. This fix adds a mutex just for the relocation code and makes sure it doesn't have a big impact on normal operations. The race is really fixed in btrfs_record_root_in_trans, which is where we step back and wait for the relocation code to finish accounting setup. Signed-off-by: Chris Mason --- fs/btrfs/ctree.h | 14 ++++++++++ fs/btrfs/disk-io.c | 1 + fs/btrfs/relocation.c | 30 ++++++++++++++------- fs/btrfs/transaction.c | 73 +++++++++++++++++++++++++++++++++++++++++++++++--- 4 files changed, 105 insertions(+), 13 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 8490ee063709..a2c91a102b72 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -967,6 +967,12 @@ struct btrfs_fs_info { struct srcu_struct subvol_srcu; spinlock_t trans_lock; + /* + * the reloc mutex goes with the trans lock, it is taken + * during commit to protect us from the relocation code + */ + struct mutex reloc_mutex; + struct list_head trans_list; struct list_head hashers; struct list_head dead_roots; @@ -1172,6 +1178,14 @@ struct btrfs_root { u32 type; u64 highest_objectid; + + /* btrfs_record_root_in_trans is a multi-step process, + * and it can race with the balancing code. But the + * race is very small, and only the first time the root + * is added to each transaction. So in_trans_setup + * is used to tell us when more checks are required + */ + unsigned long in_trans_setup; int ref_cows; int track_dirty; int in_radix; diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 20c111b3fa0d..0b2b4b759136 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1620,6 +1620,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, spin_lock_init(&fs_info->fs_roots_radix_lock); spin_lock_init(&fs_info->delayed_iput_lock); spin_lock_init(&fs_info->defrag_inodes_lock); + mutex_init(&fs_info->reloc_mutex); init_completion(&fs_info->kobj_unregister); fs_info->tree_root = tree_root; diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index f25b10a22a0a..086b1e6b8614 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -1368,7 +1368,7 @@ int btrfs_update_reloc_root(struct btrfs_trans_handle *trans, int ret; if (!root->reloc_root) - return 0; + goto out; reloc_root = root->reloc_root; root_item = &reloc_root->root_item; @@ -1390,6 +1390,8 @@ int btrfs_update_reloc_root(struct btrfs_trans_handle *trans, ret = btrfs_update_root(trans, root->fs_info->tree_root, &reloc_root->root_key, root_item); BUG_ON(ret); + +out: return 0; } @@ -2142,10 +2144,11 @@ int prepare_to_merge(struct reloc_control *rc, int err) u64 num_bytes = 0; int ret; - spin_lock(&root->fs_info->trans_lock); + mutex_lock(&root->fs_info->reloc_mutex); rc->merging_rsv_size += root->nodesize * (BTRFS_MAX_LEVEL - 1) * 2; rc->merging_rsv_size += rc->nodes_relocated * 2; - spin_unlock(&root->fs_info->trans_lock); + mutex_unlock(&root->fs_info->reloc_mutex); + again: if (!err) { num_bytes = rc->merging_rsv_size; @@ -2214,9 +2217,16 @@ int merge_reloc_roots(struct reloc_control *rc) int ret; again: root = rc->extent_root; - spin_lock(&root->fs_info->trans_lock); + + /* + * this serializes us with btrfs_record_root_in_transaction, + * we have to make sure nobody is in the middle of + * adding their roots to the list while we are + * doing this splice + */ + mutex_lock(&root->fs_info->reloc_mutex); list_splice_init(&rc->reloc_roots, &reloc_roots); - spin_unlock(&root->fs_info->trans_lock); + mutex_unlock(&root->fs_info->reloc_mutex); while (!list_empty(&reloc_roots)) { found = 1; @@ -3590,17 +3600,19 @@ next: static void set_reloc_control(struct reloc_control *rc) { struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; - spin_lock(&fs_info->trans_lock); + + mutex_lock(&fs_info->reloc_mutex); fs_info->reloc_ctl = rc; - spin_unlock(&fs_info->trans_lock); + mutex_unlock(&fs_info->reloc_mutex); } static void unset_reloc_control(struct reloc_control *rc) { struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; - spin_lock(&fs_info->trans_lock); + + mutex_lock(&fs_info->reloc_mutex); fs_info->reloc_ctl = NULL; - spin_unlock(&fs_info->trans_lock); + mutex_unlock(&fs_info->reloc_mutex); } static int check_extent_flags(u64 flags) diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 2b3590b9fe98..833996a0c628 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -126,28 +126,85 @@ static noinline int join_transaction(struct btrfs_root *root, int nofail) * to make sure the old root from before we joined the transaction is deleted * when the transaction commits */ -int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans, +static int record_root_in_trans(struct btrfs_trans_handle *trans, struct btrfs_root *root) { if (root->ref_cows && root->last_trans < trans->transid) { WARN_ON(root == root->fs_info->extent_root); WARN_ON(root->commit_root != root->node); + /* + * see below for in_trans_setup usage rules + * we have the reloc mutex held now, so there + * is only one writer in this function + */ + root->in_trans_setup = 1; + + /* make sure readers find in_trans_setup before + * they find our root->last_trans update + */ + smp_wmb(); + spin_lock(&root->fs_info->fs_roots_radix_lock); if (root->last_trans == trans->transid) { spin_unlock(&root->fs_info->fs_roots_radix_lock); return 0; } - root->last_trans = trans->transid; radix_tree_tag_set(&root->fs_info->fs_roots_radix, (unsigned long)root->root_key.objectid, BTRFS_ROOT_TRANS_TAG); spin_unlock(&root->fs_info->fs_roots_radix_lock); + root->last_trans = trans->transid; + + /* this is pretty tricky. We don't want to + * take the relocation lock in btrfs_record_root_in_trans + * unless we're really doing the first setup for this root in + * this transaction. + * + * Normally we'd use root->last_trans as a flag to decide + * if we want to take the expensive mutex. + * + * But, we have to set root->last_trans before we + * init the relocation root, otherwise, we trip over warnings + * in ctree.c. The solution used here is to flag ourselves + * with root->in_trans_setup. When this is 1, we're still + * fixing up the reloc trees and everyone must wait. + * + * When this is zero, they can trust root->last_trans and fly + * through btrfs_record_root_in_trans without having to take the + * lock. smp_wmb() makes sure that all the writes above are + * done before we pop in the zero below + */ btrfs_init_reloc_root(trans, root); + smp_wmb(); + root->in_trans_setup = 0; } return 0; } + +int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans, + struct btrfs_root *root) +{ + if (!root->ref_cows) + return 0; + + /* + * see record_root_in_trans for comments about in_trans_setup usage + * and barriers + */ + smp_rmb(); + if (root->last_trans == trans->transid && + !root->in_trans_setup) + return 0; + + mutex_lock(&root->fs_info->reloc_mutex); + record_root_in_trans(trans, root); + mutex_unlock(&root->fs_info->reloc_mutex); + + return 0; +} + /* wait for commit against the current transaction to become unblocked * when this is done, it is safe to start a new transaction, but the current * transaction might not be fully on disk. @@ -882,7 +939,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, parent = dget_parent(dentry); parent_inode = parent->d_inode; parent_root = BTRFS_I(parent_inode)->root; - btrfs_record_root_in_trans(trans, parent_root); + record_root_in_trans(trans, parent_root); /* * insert the directory item @@ -900,7 +957,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, ret = btrfs_update_inode(trans, parent_root, parent_inode); BUG_ON(ret); - btrfs_record_root_in_trans(trans, root); + record_root_in_trans(trans, root); btrfs_set_root_last_snapshot(&root->root_item, trans->transid); memcpy(new_root_item, &root->root_item, sizeof(*new_root_item)); btrfs_check_and_init_root_item(new_root_item); @@ -1247,6 +1304,13 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, } while (atomic_read(&cur_trans->num_writers) > 1 || (should_grow && cur_trans->num_joined != joined)); + /* + * the reloc mutex makes sure that we stop + * the balancing code from coming in and moving + * extents around in the middle of the commit + */ + mutex_lock(&root->fs_info->reloc_mutex); + ret = create_pending_snapshots(trans, root->fs_info); BUG_ON(ret); @@ -1312,6 +1376,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, root->fs_info->running_transaction = NULL; root->fs_info->trans_no_join = 0; spin_unlock(&root->fs_info->trans_lock); + mutex_unlock(&root->fs_info->reloc_mutex); wake_up(&root->fs_info->transaction_wait); -- cgit v1.2.3 From 3ed4498caf381a73d6259d3ffacc914b17a507ec Mon Sep 17 00:00:00 2001 From: David Sterba Date: Mon, 13 Jun 2011 17:54:22 +0000 Subject: btrfs: fix dereference of ERR_PTR value smatch reports: btrfs_recover_log_trees error: 'wc.replay_dest' dereferencing possible ERR_PTR() Signed-off-by: David Sterba Signed-off-by: Chris Mason --- fs/btrfs/tree-log.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 592396c6dc47..4ce8a9f41d1e 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -3177,7 +3177,7 @@ again: tmp_key.offset = (u64)-1; wc.replay_dest = btrfs_read_fs_root_no_name(fs_info, &tmp_key); - BUG_ON(!wc.replay_dest); + BUG_ON(IS_ERR_OR_NULL(wc.replay_dest)); wc.replay_dest->log_root = log; btrfs_record_root_in_trans(trans, wc.replay_dest); -- cgit v1.2.3 From 9fe6a50fb764f508dd2de47a66e62e51388791fb Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Thu, 16 Jun 2011 09:04:57 +0000 Subject: btrfs: Remove unused sysfs code Removes code no longer used. The sysfs file itself is kept, because the btrfs developers expressed interest in putting new entries to sysfs. Signed-off-by: Maarten Lankhorst Signed-off-by: Chris Mason --- fs/btrfs/ctree.h | 1 - fs/btrfs/disk-io.c | 1 - fs/btrfs/sysfs.c | 146 ----------------------------------------------------- 3 files changed, 148 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index a2c91a102b72..8e948ec1ee6b 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -1195,7 +1195,6 @@ struct btrfs_root { struct btrfs_key defrag_max; int defrag_running; char *name; - int in_sysfs; /* the dirty list is only used by non-reference counted roots */ struct list_head dirty_list; diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 0b2b4b759136..c25ef5a0ccd6 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1044,7 +1044,6 @@ static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize, root->last_trans = 0; root->highest_objectid = 0; root->name = NULL; - root->in_sysfs = 0; root->inode_tree = RB_ROOT; INIT_RADIX_TREE(&root->delayed_nodes_tree, GFP_ATOMIC); root->block_rsv = NULL; diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c index c3c223ae6691..daac9ae6d731 100644 --- a/fs/btrfs/sysfs.c +++ b/fs/btrfs/sysfs.c @@ -28,152 +28,6 @@ #include "disk-io.h" #include "transaction.h" -static ssize_t root_blocks_used_show(struct btrfs_root *root, char *buf) -{ - return snprintf(buf, PAGE_SIZE, "%llu\n", - (unsigned long long)btrfs_root_used(&root->root_item)); -} - -static ssize_t root_block_limit_show(struct btrfs_root *root, char *buf) -{ - return snprintf(buf, PAGE_SIZE, "%llu\n", - (unsigned long long)btrfs_root_limit(&root->root_item)); -} - -static ssize_t super_blocks_used_show(struct btrfs_fs_info *fs, char *buf) -{ - - return snprintf(buf, PAGE_SIZE, "%llu\n", - (unsigned long long)btrfs_super_bytes_used(&fs->super_copy)); -} - -static ssize_t super_total_blocks_show(struct btrfs_fs_info *fs, char *buf) -{ - return snprintf(buf, PAGE_SIZE, "%llu\n", - (unsigned long long)btrfs_super_total_bytes(&fs->super_copy)); -} - -static ssize_t super_blocksize_show(struct btrfs_fs_info *fs, char *buf) -{ - return snprintf(buf, PAGE_SIZE, "%llu\n", - (unsigned long long)btrfs_super_sectorsize(&fs->super_copy)); -} - -/* this is for root attrs (subvols/snapshots) */ -struct btrfs_root_attr { - struct attribute attr; - ssize_t (*show)(struct btrfs_root *, char *); - ssize_t (*store)(struct btrfs_root *, const char *, size_t); -}; - -#define ROOT_ATTR(name, mode, show, store) \ -static struct btrfs_root_attr btrfs_root_attr_##name = __ATTR(name, mode, \ - show, store) - -ROOT_ATTR(blocks_used, 0444, root_blocks_used_show, NULL); -ROOT_ATTR(block_limit, 0644, root_block_limit_show, NULL); - -static struct attribute *btrfs_root_attrs[] = { - &btrfs_root_attr_blocks_used.attr, - &btrfs_root_attr_block_limit.attr, - NULL, -}; - -/* this is for super attrs (actual full fs) */ -struct btrfs_super_attr { - struct attribute attr; - ssize_t (*show)(struct btrfs_fs_info *, char *); - ssize_t (*store)(struct btrfs_fs_info *, const char *, size_t); -}; - -#define SUPER_ATTR(name, mode, show, store) \ -static struct btrfs_super_attr btrfs_super_attr_##name = __ATTR(name, mode, \ - show, store) - -SUPER_ATTR(blocks_used, 0444, super_blocks_used_show, NULL); -SUPER_ATTR(total_blocks, 0444, super_total_blocks_show, NULL); -SUPER_ATTR(blocksize, 0444, super_blocksize_show, NULL); - -static struct attribute *btrfs_super_attrs[] = { - &btrfs_super_attr_blocks_used.attr, - &btrfs_super_attr_total_blocks.attr, - &btrfs_super_attr_blocksize.attr, - NULL, -}; - -static ssize_t btrfs_super_attr_show(struct kobject *kobj, - struct attribute *attr, char *buf) -{ - struct btrfs_fs_info *fs = container_of(kobj, struct btrfs_fs_info, - super_kobj); - struct btrfs_super_attr *a = container_of(attr, - struct btrfs_super_attr, - attr); - - return a->show ? a->show(fs, buf) : 0; -} - -static ssize_t btrfs_super_attr_store(struct kobject *kobj, - struct attribute *attr, - const char *buf, size_t len) -{ - struct btrfs_fs_info *fs = container_of(kobj, struct btrfs_fs_info, - super_kobj); - struct btrfs_super_attr *a = container_of(attr, - struct btrfs_super_attr, - attr); - - return a->store ? a->store(fs, buf, len) : 0; -} - -static ssize_t btrfs_root_attr_show(struct kobject *kobj, - struct attribute *attr, char *buf) -{ - struct btrfs_root *root = container_of(kobj, struct btrfs_root, - root_kobj); - struct btrfs_root_attr *a = container_of(attr, - struct btrfs_root_attr, - attr); - - return a->show ? a->show(root, buf) : 0; -} - -static ssize_t btrfs_root_attr_store(struct kobject *kobj, - struct attribute *attr, - const char *buf, size_t len) -{ - struct btrfs_root *root = container_of(kobj, struct btrfs_root, - root_kobj); - struct btrfs_root_attr *a = container_of(attr, - struct btrfs_root_attr, - attr); - return a->store ? a->store(root, buf, len) : 0; -} - -static void btrfs_super_release(struct kobject *kobj) -{ - struct btrfs_fs_info *fs = container_of(kobj, struct btrfs_fs_info, - super_kobj); - complete(&fs->kobj_unregister); -} - -static void btrfs_root_release(struct kobject *kobj) -{ - struct btrfs_root *root = container_of(kobj, struct btrfs_root, - root_kobj); - complete(&root->kobj_unregister); -} - -static const struct sysfs_ops btrfs_super_attr_ops = { - .show = btrfs_super_attr_show, - .store = btrfs_super_attr_store, -}; - -static const struct sysfs_ops btrfs_root_attr_ops = { - .show = btrfs_root_attr_show, - .store = btrfs_root_attr_store, -}; - /* /sys/fs/btrfs/ entry */ static struct kset *btrfs_kset; -- cgit v1.2.3 From 19fd294957e426bfdd8e19085096467ec18df5c4 Mon Sep 17 00:00:00 2001 From: Miao Xie Date: Wed, 15 Jun 2011 10:47:30 +0000 Subject: btrfs: fix wrong reservation when doing delayed inode operations We have migrated the space for the delayed inode items from trans_block_rsv to global_block_rsv, but we forgot to set trans->block_rsv to global_block_rsv when we doing delayed inode operations, and the following Oops happened: [ 9792.654889] ------------[ cut here ]------------ [ 9792.654898] WARNING: at fs/btrfs/extent-tree.c:5681 btrfs_alloc_free_block+0xca/0x27c [btrfs]() [ 9792.654899] Hardware name: To Be Filled By O.E.M. [ 9792.654900] Modules linked in: btrfs zlib_deflate libcrc32c ip6t_REJECT nf_conntrack_ipv6 nf_defrag_ipv6 ip6table_filter ip6_tables arc4 rt61pci rt2x00pci rt2x00lib snd_hda_codec_hdmi mac80211 snd_hda_codec_realtek cfg80211 snd_hda_intel edac_core snd_seq rfkill pcspkr serio_raw snd_hda_codec eeprom_93cx6 edac_mce_amd sp5100_tco i2c_piix4 k10temp snd_hwdep snd_seq_device snd_pcm floppy r8169 xhci_hcd mii snd_timer snd soundcore snd_page_alloc ipv6 firewire_ohci pata_acpi ata_generic firewire_core pata_via crc_itu_t radeon ttm drm_kms_helper drm i2c_algo_bit i2c_core [last unloaded: scsi_wait_scan] [ 9792.654919] Pid: 2762, comm: rm Tainted: G W 2.6.39+ #1 [ 9792.654920] Call Trace: [ 9792.654922] [] warn_slowpath_common+0x83/0x9b [ 9792.654925] [] warn_slowpath_null+0x1a/0x1c [ 9792.654933] [] btrfs_alloc_free_block+0xca/0x27c [btrfs] [ 9792.654945] [] ? map_extent_buffer+0x6e/0xa8 [btrfs] [ 9792.654953] [] __btrfs_cow_block+0xfc/0x30c [btrfs] [ 9792.654963] [] ? btrfs_buffer_uptodate+0x47/0x58 [btrfs] [ 9792.654970] [] ? read_block_for_search+0x94/0x368 [btrfs] [ 9792.654978] [] btrfs_cow_block+0xfe/0x146 [btrfs] [ 9792.654986] [] btrfs_search_slot+0x14d/0x4b6 [btrfs] [ 9792.654997] [] ? map_extent_buffer+0x6e/0xa8 [btrfs] [ 9792.655022] [] btrfs_lookup_inode+0x2f/0x8f [btrfs] [ 9792.655025] [] ? _cond_resched+0xe/0x22 [ 9792.655027] [] ? mutex_lock+0x29/0x50 [ 9792.655039] [] btrfs_update_delayed_inode+0x72/0x137 [btrfs] [ 9792.655051] [] btrfs_run_delayed_items+0x90/0xdb [btrfs] [ 9792.655062] [] btrfs_commit_transaction+0x228/0x654 [btrfs] [ 9792.655064] [] ? remove_wait_queue+0x3a/0x3a [ 9792.655075] [] btrfs_evict_inode+0x14d/0x202 [btrfs] [ 9792.655077] [] evict+0x71/0x111 [ 9792.655079] [] iput+0x12a/0x132 [ 9792.655081] [] do_unlinkat+0x106/0x155 [ 9792.655083] [] ? path_put+0x1f/0x23 [ 9792.655085] [] ? audit_syscall_entry+0x145/0x171 [ 9792.655087] [] ? putname+0x34/0x36 [ 9792.655090] [] sys_unlinkat+0x29/0x2b [ 9792.655092] [] system_call_fastpath+0x16/0x1b [ 9792.655093] ---[ end trace 02b696eb02b3f768 ]--- This patch fix it by setting the reservation of the transaction handle to the correct one. Reported-by: Josef Bacik Signed-off-by: Miao Xie Signed-off-by: Chris Mason --- fs/btrfs/delayed-inode.c | 25 ++++++++++++++++++++----- fs/btrfs/delayed-inode.h | 1 - 2 files changed, 20 insertions(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c index 6462c29d2d37..fc515b787e8c 100644 --- a/fs/btrfs/delayed-inode.c +++ b/fs/btrfs/delayed-inode.c @@ -297,7 +297,6 @@ struct btrfs_delayed_item *btrfs_alloc_delayed_item(u32 data_len) item->data_len = data_len; item->ins_or_del = 0; item->bytes_reserved = 0; - item->block_rsv = NULL; item->delayed_node = NULL; atomic_set(&item->refs, 1); } @@ -593,10 +592,8 @@ static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans, num_bytes = btrfs_calc_trans_metadata_size(root, 1); ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes); - if (!ret) { + if (!ret) item->bytes_reserved = num_bytes; - item->block_rsv = dst_rsv; - } return ret; } @@ -604,10 +601,13 @@ static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans, static void btrfs_delayed_item_release_metadata(struct btrfs_root *root, struct btrfs_delayed_item *item) { + struct btrfs_block_rsv *rsv; + if (!item->bytes_reserved) return; - btrfs_block_rsv_release(root, item->block_rsv, + rsv = &root->fs_info->global_block_rsv; + btrfs_block_rsv_release(root, rsv, item->bytes_reserved); } @@ -1014,6 +1014,7 @@ int btrfs_run_delayed_items(struct btrfs_trans_handle *trans, struct btrfs_delayed_root *delayed_root; struct btrfs_delayed_node *curr_node, *prev_node; struct btrfs_path *path; + struct btrfs_block_rsv *block_rsv; int ret = 0; path = btrfs_alloc_path(); @@ -1021,6 +1022,9 @@ int btrfs_run_delayed_items(struct btrfs_trans_handle *trans, return -ENOMEM; path->leave_spinning = 1; + block_rsv = trans->block_rsv; + trans->block_rsv = &root->fs_info->global_block_rsv; + delayed_root = btrfs_get_delayed_root(root); curr_node = btrfs_first_delayed_node(delayed_root); @@ -1045,6 +1049,7 @@ int btrfs_run_delayed_items(struct btrfs_trans_handle *trans, } btrfs_free_path(path); + trans->block_rsv = block_rsv; return ret; } @@ -1052,6 +1057,7 @@ static int __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans, struct btrfs_delayed_node *node) { struct btrfs_path *path; + struct btrfs_block_rsv *block_rsv; int ret; path = btrfs_alloc_path(); @@ -1059,6 +1065,9 @@ static int __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans, return -ENOMEM; path->leave_spinning = 1; + block_rsv = trans->block_rsv; + trans->block_rsv = &node->root->fs_info->global_block_rsv; + ret = btrfs_insert_delayed_items(trans, path, node->root, node); if (!ret) ret = btrfs_delete_delayed_items(trans, path, node->root, node); @@ -1066,6 +1075,7 @@ static int __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans, ret = btrfs_update_delayed_inode(trans, node->root, path, node); btrfs_free_path(path); + trans->block_rsv = block_rsv; return ret; } @@ -1116,6 +1126,7 @@ static void btrfs_async_run_delayed_node_done(struct btrfs_work *work) struct btrfs_path *path; struct btrfs_delayed_node *delayed_node = NULL; struct btrfs_root *root; + struct btrfs_block_rsv *block_rsv; unsigned long nr = 0; int need_requeue = 0; int ret; @@ -1134,6 +1145,9 @@ static void btrfs_async_run_delayed_node_done(struct btrfs_work *work) if (IS_ERR(trans)) goto free_path; + block_rsv = trans->block_rsv; + trans->block_rsv = &root->fs_info->global_block_rsv; + ret = btrfs_insert_delayed_items(trans, path, root, delayed_node); if (!ret) ret = btrfs_delete_delayed_items(trans, path, root, @@ -1176,6 +1190,7 @@ static void btrfs_async_run_delayed_node_done(struct btrfs_work *work) nr = trans->blocks_used; + trans->block_rsv = block_rsv; btrfs_end_transaction_dmeta(trans, root); __btrfs_btree_balance_dirty(root, nr); free_path: diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h index eb7d240aa648..cb79b6771e82 100644 --- a/fs/btrfs/delayed-inode.h +++ b/fs/btrfs/delayed-inode.h @@ -75,7 +75,6 @@ struct btrfs_delayed_item { struct list_head tree_list; /* used for batch insert/delete items */ struct list_head readdir_list; /* used for readdir items */ u64 bytes_reserved; - struct btrfs_block_rsv *block_rsv; struct btrfs_delayed_node *delayed_node; atomic_t refs; int ins_or_del; -- cgit v1.2.3 From 35a30d7ce54e087d8025a725d4e5a2fdee723a9f Mon Sep 17 00:00:00 2001 From: David Sterba Date: Mon, 13 Jun 2011 15:18:23 +0000 Subject: btrfs: fix uninitialized return value When allocation fails in btrfs_read_fs_root_no_name, ret is not set although it is returned, holding a garbage value. Signed-off-by: David Sterba Reviewed-by: Li Zefan Signed-off-by: Chris Mason --- fs/btrfs/disk-io.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index c25ef5a0ccd6..1ac8db5dc0a3 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1299,12 +1299,12 @@ again: return root; root->free_ino_ctl = kzalloc(sizeof(*root->free_ino_ctl), GFP_NOFS); - if (!root->free_ino_ctl) - goto fail; root->free_ino_pinned = kzalloc(sizeof(*root->free_ino_pinned), GFP_NOFS); - if (!root->free_ino_pinned) + if (!root->free_ino_pinned || !root->free_ino_ctl) { + ret = -ENOMEM; goto fail; + } btrfs_init_free_ino_ctl(root); mutex_init(&root->fs_commit_mutex); -- cgit v1.2.3 From e999376f094162aa425ae749aa1df95ab928d010 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Fri, 17 Jun 2011 16:14:09 -0400 Subject: Btrfs: avoid delayed metadata items during commits Snapshot creation has two phases. One is the initial snapshot setup, and the second is done during commit, while nobody is allowed to modify the root we are snapshotting. The delayed metadata insertion code can break that rule, it does a delayed inode update on the inode of the parent of the snapshot, and delayed directory item insertion. This makes sure to run the pending delayed operations before we record the snapshot root, which avoids corruptions. Signed-off-by: Chris Mason --- fs/btrfs/delayed-inode.c | 7 +++++++ fs/btrfs/delayed-inode.h | 4 ++++ fs/btrfs/transaction.c | 27 +++++++++++++++++---------- 3 files changed, 28 insertions(+), 10 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c index fc515b787e8c..f1cbd028f7b3 100644 --- a/fs/btrfs/delayed-inode.c +++ b/fs/btrfs/delayed-inode.c @@ -1237,6 +1237,13 @@ again: return 0; } +void btrfs_assert_delayed_root_empty(struct btrfs_root *root) +{ + struct btrfs_delayed_root *delayed_root; + delayed_root = btrfs_get_delayed_root(root); + WARN_ON(btrfs_first_delayed_node(delayed_root)); +} + void btrfs_balance_delayed_items(struct btrfs_root *root) { struct btrfs_delayed_root *delayed_root; diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h index cb79b6771e82..d1a6a2915c66 100644 --- a/fs/btrfs/delayed-inode.h +++ b/fs/btrfs/delayed-inode.h @@ -137,4 +137,8 @@ int btrfs_readdir_delayed_dir_index(struct file *filp, void *dirent, /* for init */ int __init btrfs_delayed_inode_init(void); void btrfs_delayed_inode_exit(void); + +/* for debugging */ +void btrfs_assert_delayed_root_empty(struct btrfs_root *root); + #endif diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index c073d85e14f3..51dcec86757f 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -957,6 +957,15 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, ret = btrfs_update_inode(trans, parent_root, parent_inode); BUG_ON(ret); + /* + * pull in the delayed directory update + * and the delayed inode item + * otherwise we corrupt the FS during + * snapshot + */ + ret = btrfs_run_delayed_items(trans, root); + BUG_ON(ret); + record_root_in_trans(trans, root); btrfs_set_root_last_snapshot(&root->root_item, trans->transid); memcpy(new_root_item, &root->root_item, sizeof(*new_root_item)); @@ -1018,14 +1027,6 @@ static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans, int ret; list_for_each_entry(pending, head, list) { - /* - * We must deal with the delayed items before creating - * snapshots, or we will create a snapthot with inconsistent - * information. - */ - ret = btrfs_run_delayed_items(trans, fs_info->fs_root); - BUG_ON(ret); - ret = create_pending_snapshot(trans, fs_info, pending); BUG_ON(ret); } @@ -1319,15 +1320,21 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, */ mutex_lock(&root->fs_info->reloc_mutex); - ret = create_pending_snapshots(trans, root->fs_info); + ret = btrfs_run_delayed_items(trans, root); BUG_ON(ret); - ret = btrfs_run_delayed_items(trans, root); + ret = create_pending_snapshots(trans, root->fs_info); BUG_ON(ret); ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); BUG_ON(ret); + /* + * make sure none of the code above managed to slip in a + * delayed item + */ + btrfs_assert_delayed_root_empty(root); + WARN_ON(cur_trans != trans->transaction); btrfs_scrub_pause(root); -- cgit v1.2.3 From c11760c6d80ab6aa20e383cf378a7287305f591c Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Wed, 8 Jun 2011 10:30:03 -0700 Subject: isofs: fix bh leak in isofs_fill_super() error case MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In isofs_fill_super(), when an iso_primary_descriptor is found, it is kept in pri_bh. The error cases don't properly release it. Fix it. Reported-and-tested-by: 김원석 Cc: Andrew Morton Signed-off-by: Linus Torvalds --- fs/isofs/inode.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c index 3db5ba4568fc..b3cc8586984e 100644 --- a/fs/isofs/inode.c +++ b/fs/isofs/inode.c @@ -974,7 +974,7 @@ out_no_inode: out_no_read: printk(KERN_WARNING "%s: bread failed, dev=%s, iso_blknum=%d, block=%d\n", __func__, s->s_id, iso_blknum, block); - goto out_freesbi; + goto out_freebh; out_bad_zone_size: printk(KERN_WARNING "ISOFS: Bad logical zone size %ld\n", sbi->s_log_zone_size); @@ -989,6 +989,7 @@ out_unknown_format: out_freebh: brelse(bh); + brelse(pri_bh); out_freesbi: kfree(opt.iocharset); kfree(sbi); -- cgit v1.2.3 From df18d127f4fed7a0284bcfa8d2843800cdb63b72 Mon Sep 17 00:00:00 2001 From: Boaz Harrosh Date: Fri, 17 Jun 2011 16:25:51 -0400 Subject: pnfs-obj: No longer needed to take an extra ref at add_device Andy's last device_cache patches, already take an extra reference on the newly inserted device_id. So we can remove it from obj-io. Without this patch the device_ids are leaked. Andy's patches are not in Linus tree yet. So I'm not sure if they are scheduled for this Kernel or the next. This patch should be added as part of these. CC: Andy Adamson Signed-off-by: Boaz Harrosh Signed-off-by: Trond Myklebust --- fs/nfs/objlayout/objio_osd.c | 1 - 1 file changed, 1 deletion(-) (limited to 'fs') diff --git a/fs/nfs/objlayout/objio_osd.c b/fs/nfs/objlayout/objio_osd.c index 9cf208df1f25..eb4aafa9f521 100644 --- a/fs/nfs/objlayout/objio_osd.c +++ b/fs/nfs/objlayout/objio_osd.c @@ -108,7 +108,6 @@ _dev_list_add(const struct nfs_server *nfss, de = n; } - atomic_inc(&de->id_node.ref); return de; } -- cgit v1.2.3 From 105f4622104848ff1ee1f644d661bef9dec3eb27 Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Tue, 7 Jun 2011 11:50:23 -0400 Subject: nfsd4: fix break_lease flags on nfsd open Thanks to Casey Bodley for pointing out that on a read open we pass 0, instead of O_RDONLY, to break_lease, with the result that a read open is treated like a write open for the purposes of lease breaking! Reported-by: Casey Bodley Cc: stable@kernel.org Signed-off-by: J. Bruce Fields --- fs/nfsd/vfs.c | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index f3fb61b69a1e..fd0acca5370a 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c @@ -696,7 +696,15 @@ nfsd_access(struct svc_rqst *rqstp, struct svc_fh *fhp, u32 *access, u32 *suppor } #endif /* CONFIG_NFSD_V3 */ +static int nfsd_open_break_lease(struct inode *inode, int access) +{ + unsigned int mode; + if (access & NFSD_MAY_NOT_BREAK_LEASE) + return 0; + mode = (access & NFSD_MAY_WRITE) ? O_WRONLY : O_RDONLY; + return break_lease(inode, mode | O_NONBLOCK); +} /* * Open an existing file or directory. @@ -744,12 +752,7 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, if (!inode->i_fop) goto out; - /* - * Check to see if there are any leases on this file. - * This may block while leases are broken. - */ - if (!(access & NFSD_MAY_NOT_BREAK_LEASE)) - host_err = break_lease(inode, O_NONBLOCK | ((access & NFSD_MAY_WRITE) ? O_WRONLY : 0)); + host_err = nfsd_open_break_lease(inode, access); if (host_err) /* NOMEM or WOULDBLOCK */ goto out_nfserr; -- cgit v1.2.3 From 185bf87393afe6b966881e36c459949d90930a7a Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Mon, 20 Jun 2011 10:10:24 +0300 Subject: ubifs: dereferencing an ERR_PTR in ubifs_mount() d251ed271d5 "ubifs: fix sget races" left out the goto from this error path so the static checkers complain that we're dereferencing "sb" when it's an ERR_PTR. Signed-off-by: Dan Carpenter Signed-off-by: Al Viro --- fs/ubifs/super.c | 1 + 1 file changed, 1 insertion(+) (limited to 'fs') diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index 8c892c2d5300..529be0582029 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c @@ -2146,6 +2146,7 @@ static struct dentry *ubifs_mount(struct file_system_type *fs_type, int flags, if (IS_ERR(sb)) { err = PTR_ERR(sb); kfree(c); + goto out_close; } if (sb->s_root) { -- cgit v1.2.3 From 1712c20dae7b770b62b2e3272100b3b40af0157c Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 18 Jun 2011 19:59:04 -0400 Subject: bad_inode_permission() is safe from RCU mode return -EIO; is *not* a blocking operation, thank you very much. Nick, what the hell have you been smoking? Signed-off-by: Al Viro --- fs/bad_inode.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'fs') diff --git a/fs/bad_inode.c b/fs/bad_inode.c index 9ad2369d9e35..bfcb18feb1df 100644 --- a/fs/bad_inode.c +++ b/fs/bad_inode.c @@ -231,9 +231,6 @@ static int bad_inode_readlink(struct dentry *dentry, char __user *buffer, static int bad_inode_permission(struct inode *inode, int mask, unsigned int flags) { - if (flags & IPERM_FLAG_RCU) - return -ECHILD; - return -EIO; } -- cgit v1.2.3 From ec12781f192568f7ea860f440f890389ba393df7 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 18 Jun 2011 20:03:36 -0400 Subject: cifs_permission() doesn't need to bail out in RCU mode nothing potentially blocking except generic_permission(), which will DTRT Signed-off-by: Al Viro --- fs/cifs/cifsfs.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'fs') diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index e9def996e383..2f0c58646c10 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -257,9 +257,6 @@ static int cifs_permission(struct inode *inode, int mask, unsigned int flags) { struct cifs_sb_info *cifs_sb; - if (flags & IPERM_FLAG_RCU) - return -ECHILD; - cifs_sb = CIFS_SB(inode->i_sb); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) { -- cgit v1.2.3 From 6b419951f1e44c8a46fccfc6551eca9a9980acd6 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 18 Jun 2011 20:11:43 -0400 Subject: coda_ioctl_permission() is safe in RCU mode return (mask & MAY_EXEC) ? -EACCES : 0; is non-blocking... Signed-off-by: Al Viro --- fs/coda/pioctl.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'fs') diff --git a/fs/coda/pioctl.c b/fs/coda/pioctl.c index 6cbb3afb36dc..cb140ef293e4 100644 --- a/fs/coda/pioctl.c +++ b/fs/coda/pioctl.c @@ -43,8 +43,6 @@ const struct file_operations coda_ioctl_operations = { /* the coda pioctl inode ops */ static int coda_ioctl_permission(struct inode *inode, int mask, unsigned int flags) { - if (flags & IPERM_FLAG_RCU) - return -ECHILD; return (mask & MAY_EXEC) ? -EACCES : 0; } -- cgit v1.2.3 From a63ab94d67879bc0630ea9821c582ddf58ba5527 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 18 Jun 2011 20:17:22 -0400 Subject: logfs doesn't need ->permission() at all ... and never did, what with its ->permission() being what we do by default when ->permission is NULL... Signed-off-by: Al Viro --- fs/logfs/dir.c | 8 -------- 1 file changed, 8 deletions(-) (limited to 'fs') diff --git a/fs/logfs/dir.c b/fs/logfs/dir.c index 9ed89d1663f8..1afae26cf236 100644 --- a/fs/logfs/dir.c +++ b/fs/logfs/dir.c @@ -555,13 +555,6 @@ static int logfs_symlink(struct inode *dir, struct dentry *dentry, return __logfs_create(dir, dentry, inode, target, destlen); } -static int logfs_permission(struct inode *inode, int mask, unsigned int flags) -{ - if (flags & IPERM_FLAG_RCU) - return -ECHILD; - return generic_permission(inode, mask, flags, NULL); -} - static int logfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) { @@ -820,7 +813,6 @@ const struct inode_operations logfs_dir_iops = { .mknod = logfs_mknod, .rename = logfs_rename, .rmdir = logfs_rmdir, - .permission = logfs_permission, .symlink = logfs_symlink, .unlink = logfs_unlink, }; -- cgit v1.2.3 From 730e908f3539066d4aa01f4720ebfc750ce4d045 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 18 Jun 2011 20:21:44 -0400 Subject: nilfs2_permission() doesn't need to bail out in RCU mode Nothing blocking except for generic_permission(). Which will DTRT. Signed-off-by: Al Viro --- fs/nilfs2/inode.c | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c index b954878ad6ce..b9b45fc2903e 100644 --- a/fs/nilfs2/inode.c +++ b/fs/nilfs2/inode.c @@ -801,12 +801,7 @@ out_err: int nilfs_permission(struct inode *inode, int mask, unsigned int flags) { - struct nilfs_root *root; - - if (flags & IPERM_FLAG_RCU) - return -ECHILD; - - root = NILFS_I(inode)->i_root; + struct nilfs_root *root = NILFS_I(inode)->i_root; if ((mask & MAY_WRITE) && root && root->cno != NILFS_CPTREE_CURRENT_CNO) return -EROFS; /* snapshot is not writable */ -- cgit v1.2.3 From cf1279111686d9742cbc4145bc9d526c83f59fea Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 18 Jun 2011 20:35:23 -0400 Subject: proc_fd_permission() is doesn't need to bail out in RCU mode nothing blocking except generic_permission() Signed-off-by: Al Viro --- fs/proc/base.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/proc/base.c b/fs/proc/base.c index 14def991d9dd..8a84210ca080 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -2169,11 +2169,7 @@ static const struct file_operations proc_fd_operations = { */ static int proc_fd_permission(struct inode *inode, int mask, unsigned int flags) { - int rv; - - if (flags & IPERM_FLAG_RCU) - return -ECHILD; - rv = generic_permission(inode, mask, flags, NULL); + int rv = generic_permission(inode, mask, flags, NULL); if (rv == 0) return 0; if (task_pid(current) == proc_pid(inode)) -- cgit v1.2.3 From 1d29b5a2ed7eb8862c9b66daf475f3e4c1a40299 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 18 Jun 2011 20:37:33 -0400 Subject: reiserfs_permission() doesn't need to bail out in RCU mode nothing blocking other than generic_permission() (and check_acl callback does bail out in RCU mode). Signed-off-by: Al Viro --- fs/reiserfs/xattr.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'fs') diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c index e8a62f41b458..d78089690965 100644 --- a/fs/reiserfs/xattr.c +++ b/fs/reiserfs/xattr.c @@ -954,8 +954,6 @@ static int xattr_mount_check(struct super_block *s) int reiserfs_permission(struct inode *inode, int mask, unsigned int flags) { - if (flags & IPERM_FLAG_RCU) - return -ECHILD; /* * We don't do permission checks on the internal objects. * Permissions are determined by the "owning" object. -- cgit v1.2.3 From 1aec7036d0c2996c86ce483ca0a28f3b20807b43 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 18 Jun 2011 20:42:00 -0400 Subject: proc_sys_permission() is OK in RCU mode nothing blocking there, since all instances of sysctl ->permissions() method are non-blocking - both of them, that is. Signed-off-by: Al Viro --- fs/proc/proc_sysctl.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'fs') diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c index f50133c11c24..d167de365a8d 100644 --- a/fs/proc/proc_sysctl.c +++ b/fs/proc/proc_sysctl.c @@ -304,9 +304,6 @@ static int proc_sys_permission(struct inode *inode, int mask,unsigned int flags) struct ctl_table *table; int error; - if (flags & IPERM_FLAG_RCU) - return -ECHILD; - /* Executable files are not allowed under /proc/sys/ */ if ((mask & MAY_EXEC) && S_ISREG(inode->i_mode)) return -EACCES; -- cgit v1.2.3 From 6291176bcd71a2766a19a10cbd9bab07d289e1d7 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Fri, 17 Jun 2011 19:20:48 -0400 Subject: kill obsolete comment for follow_down() Signed-off-by: Al Viro --- fs/namei.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'fs') diff --git a/fs/namei.c b/fs/namei.c index 9e425e7e6c8f..975c40620fe9 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -1011,9 +1011,6 @@ failed: * Follow down to the covering mount currently visible to userspace. At each * point, the filesystem owning that dentry may be queried as to whether the * caller is permitted to proceed or not. - * - * Care must be taken as namespace_sem may be held (indicated by mounting_here - * being true). */ int follow_down(struct path *path) { -- cgit v1.2.3 From 8e833fd2e1f0107ee7a4b6bc4de3c9f0e9b0ed41 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 19 Jun 2011 01:56:53 -0400 Subject: fix comment in generic_permission() CAP_DAC_OVERRIDE is enough for MAY_EXEC on directory, even if no exec bits are set. Signed-off-by: Al Viro --- fs/namei.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/namei.c b/fs/namei.c index 975c40620fe9..0223c41fb114 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -238,7 +238,8 @@ int generic_permission(struct inode *inode, int mask, unsigned int flags, /* * Read/write DACs are always overridable. - * Executable DACs are overridable if at least one exec bit is set. + * Executable DACs are overridable for all directories and + * for non-directories that have least one exec bit set. */ if (!(mask & MAY_EXEC) || execute_ok(inode)) if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE)) -- cgit v1.2.3 From 206b6310fd0268a6ca50cf36f03b0f4eee5602ec Mon Sep 17 00:00:00 2001 From: Dave Kleikamp Date: Mon, 20 Jun 2011 10:30:04 -0500 Subject: jfs: old_agsize should be 64 bits in jfs_extendfs Signed-off-by: Dave Kleikamp --- fs/jfs/resize.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/jfs/resize.c b/fs/jfs/resize.c index 8ea5efb5a34e..8d0c1c7c0820 100644 --- a/fs/jfs/resize.c +++ b/fs/jfs/resize.c @@ -80,7 +80,7 @@ int jfs_extendfs(struct super_block *sb, s64 newLVSize, int newLogSize) int log_formatted = 0; struct inode *iplist[1]; struct jfs_superblock *j_sb, *j_sb2; - uint old_agsize; + s64 old_agsize; int agsizechanged = 0; struct buffer_head *bh, *bh2; -- cgit v1.2.3 From 28e0fa894cd5996d3007ce82f07226f79beb7286 Mon Sep 17 00:00:00 2001 From: Dave Kleikamp Date: Mon, 20 Jun 2011 10:32:46 -0500 Subject: jfs: Update agstart when resizing volume A comment indicates that the IAG's agstart does not need to be updated since it will always point to a block in the same aggregate group, but jfs_fsck isn't so forgiving and reports it as an error. I'm fixing this in jfsutils as well, so either a new kernel or new utilities will be sufficient to fix the problem. Signed-off-by: Dave Kleikamp --- fs/jfs/jfs_imap.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/jfs/jfs_imap.c b/fs/jfs/jfs_imap.c index ed53a4740168..0533e8f3d19e 100644 --- a/fs/jfs/jfs_imap.c +++ b/fs/jfs/jfs_imap.c @@ -2921,10 +2921,9 @@ int diExtendFS(struct inode *ipimap, struct inode *ipbmap) continue; } - /* agstart that computes to the same ag is treated as same; */ agstart = le64_to_cpu(iagp->agstart); - /* iagp->agstart = agstart & ~(mp->db_agsize - 1); */ n = agstart >> mp->db_agl2size; + iagp->agstart = cpu_to_le64((s64)n << mp->db_agl2size); /* compute backed inodes */ numinos = (EXTSPERIAG - le32_to_cpu(iagp->nfreeexts)) -- cgit v1.2.3 From d31b53e3cd069e02290ed8a648aa8c7618d6fe77 Mon Sep 17 00:00:00 2001 From: Dave Kleikamp Date: Mon, 20 Jun 2011 10:53:46 -0500 Subject: JFS: Don't save agno in the inode Resizing the file system can result in an in-memory inode being remapped to a different aggregate group (AG). A cached AG number can cause problems when trying to free or allocate inodes. Instead, save the IAG's agstart address and calculate the agno when we need it. Signed-off-by: Dave Kleikamp --- fs/jfs/file.c | 6 +++--- fs/jfs/jfs_imap.c | 9 ++++----- fs/jfs/jfs_incore.h | 3 ++- 3 files changed, 9 insertions(+), 9 deletions(-) (limited to 'fs') diff --git a/fs/jfs/file.c b/fs/jfs/file.c index c5ce6c1d1ff4..2f3f531f3606 100644 --- a/fs/jfs/file.c +++ b/fs/jfs/file.c @@ -66,9 +66,9 @@ static int jfs_open(struct inode *inode, struct file *file) struct jfs_inode_info *ji = JFS_IP(inode); spin_lock_irq(&ji->ag_lock); if (ji->active_ag == -1) { - ji->active_ag = ji->agno; - atomic_inc( - &JFS_SBI(inode->i_sb)->bmap->db_active[ji->agno]); + struct jfs_sb_info *jfs_sb = JFS_SBI(inode->i_sb); + ji->active_ag = BLKTOAG(addressPXD(&ji->ixpxd), jfs_sb); + atomic_inc( &jfs_sb->bmap->db_active[ji->active_ag]); } spin_unlock_irq(&ji->ag_lock); } diff --git a/fs/jfs/jfs_imap.c b/fs/jfs/jfs_imap.c index 0533e8f3d19e..b78b2f978f04 100644 --- a/fs/jfs/jfs_imap.c +++ b/fs/jfs/jfs_imap.c @@ -397,7 +397,7 @@ int diRead(struct inode *ip) release_metapage(mp); /* set the ag for the inode */ - JFS_IP(ip)->agno = BLKTOAG(agstart, sbi); + JFS_IP(ip)->agstart = agstart; JFS_IP(ip)->active_ag = -1; return (rc); @@ -901,7 +901,7 @@ int diFree(struct inode *ip) /* get the allocation group for this ino. */ - agno = JFS_IP(ip)->agno; + agno = BLKTOAG(JFS_IP(ip)->agstart, JFS_SBI(ip->i_sb)); /* Lock the AG specific inode map information */ @@ -1315,12 +1315,11 @@ int diFree(struct inode *ip) static inline void diInitInode(struct inode *ip, int iagno, int ino, int extno, struct iag * iagp) { - struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb); struct jfs_inode_info *jfs_ip = JFS_IP(ip); ip->i_ino = (iagno << L2INOSPERIAG) + ino; jfs_ip->ixpxd = iagp->inoext[extno]; - jfs_ip->agno = BLKTOAG(le64_to_cpu(iagp->agstart), sbi); + jfs_ip->agstart = le64_to_cpu(iagp->agstart); jfs_ip->active_ag = -1; } @@ -1379,7 +1378,7 @@ int diAlloc(struct inode *pip, bool dir, struct inode *ip) */ /* get the ag number of this iag */ - agno = JFS_IP(pip)->agno; + agno = BLKTOAG(JFS_IP(pip)->agstart, JFS_SBI(pip->i_sb)); if (atomic_read(&JFS_SBI(pip->i_sb)->bmap->db_active[agno])) { /* diff --git a/fs/jfs/jfs_incore.h b/fs/jfs/jfs_incore.h index 1439f119ec83..5097839b7709 100644 --- a/fs/jfs/jfs_incore.h +++ b/fs/jfs/jfs_incore.h @@ -50,8 +50,9 @@ struct jfs_inode_info { short btindex; /* btpage entry index*/ struct inode *ipimap; /* inode map */ unsigned long cflag; /* commit flags */ + long agstart; /* agstart of the containing IAG */ u16 bxflag; /* xflag of pseudo buffer? */ - unchar agno; /* ag number */ + unchar pad; signed char active_ag; /* ag currently allocating from */ lid_t blid; /* lid of pseudo buffer? */ lid_t atlhead; /* anonymous tlock list head */ -- cgit v1.2.3 From 19345cb299e8234006c5125151ab723e851a1d24 Mon Sep 17 00:00:00 2001 From: Benny Halevy Date: Sun, 19 Jun 2011 18:33:46 -0400 Subject: NFSv4.1: file layout must consider pg_bsize for coalescing Otherwise we end up overflowing the rpc buffer size on the receive end. Signed-off-by: Benny Halevy Signed-off-by: Trond Myklebust --- fs/nfs/nfs4filelayout.c | 6 ++++-- fs/nfs/pagelist.c | 3 ++- include/linux/nfs_page.h | 3 +++ 3 files changed, 9 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4filelayout.c b/fs/nfs/nfs4filelayout.c index 5d6f369b15d0..0bafcc91c27f 100644 --- a/fs/nfs/nfs4filelayout.c +++ b/fs/nfs/nfs4filelayout.c @@ -30,6 +30,7 @@ */ #include +#include #include "internal.h" #include "nfs4filelayout.h" @@ -666,8 +667,9 @@ filelayout_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev, u64 p_stripe, r_stripe; u32 stripe_unit; - if (!pnfs_generic_pg_test(pgio, prev, req)) - return 0; + if (!pnfs_generic_pg_test(pgio, prev, req) || + !nfs_generic_pg_test(pgio, prev, req)) + return false; if (!pgio->pg_lseg) return 1; diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c index 7913961aff22..009855716286 100644 --- a/fs/nfs/pagelist.c +++ b/fs/nfs/pagelist.c @@ -204,7 +204,7 @@ nfs_wait_on_request(struct nfs_page *req) TASK_UNINTERRUPTIBLE); } -static bool nfs_generic_pg_test(struct nfs_pageio_descriptor *desc, struct nfs_page *prev, struct nfs_page *req) +bool nfs_generic_pg_test(struct nfs_pageio_descriptor *desc, struct nfs_page *prev, struct nfs_page *req) { /* * FIXME: ideally we should be able to coalesce all requests @@ -218,6 +218,7 @@ static bool nfs_generic_pg_test(struct nfs_pageio_descriptor *desc, struct nfs_p return desc->pg_count + req->wb_bytes <= desc->pg_bsize; } +EXPORT_SYMBOL_GPL(nfs_generic_pg_test); /** * nfs_pageio_init - initialise a page io descriptor diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h index 3a34e80ae92f..25311b3bedf8 100644 --- a/include/linux/nfs_page.h +++ b/include/linux/nfs_page.h @@ -92,6 +92,9 @@ extern int nfs_pageio_add_request(struct nfs_pageio_descriptor *, struct nfs_page *); extern void nfs_pageio_complete(struct nfs_pageio_descriptor *desc); extern void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *, pgoff_t); +extern bool nfs_generic_pg_test(struct nfs_pageio_descriptor *desc, + struct nfs_page *prev, + struct nfs_page *req); extern int nfs_wait_on_request(struct nfs_page *); extern void nfs_unlock_request(struct nfs_page *req); extern int nfs_set_page_tag_locked(struct nfs_page *req); -- cgit v1.2.3 From ecc90462b428db2ad2ee5081c45496ed10f3a633 Mon Sep 17 00:00:00 2001 From: Dave Kleikamp Date: Mon, 20 Jun 2011 17:53:24 -0500 Subject: jfs: agstart field must be 64 bits The previous patch added the agstart field to jfs_ip, but declared it a long. We need to make sure its 64 bits on every platform. Signed-off-by: Dave Kleikamp --- fs/jfs/jfs_incore.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/jfs/jfs_incore.h b/fs/jfs/jfs_incore.h index 5097839b7709..584a4a1a6e81 100644 --- a/fs/jfs/jfs_incore.h +++ b/fs/jfs/jfs_incore.h @@ -50,7 +50,7 @@ struct jfs_inode_info { short btindex; /* btpage entry index*/ struct inode *ipimap; /* inode map */ unsigned long cflag; /* commit flags */ - long agstart; /* agstart of the containing IAG */ + u64 agstart; /* agstart of the containing IAG */ u16 bxflag; /* xflag of pseudo buffer? */ unchar pad; signed char active_ag; /* ag currently allocating from */ -- cgit v1.2.3 From 8f7d5efbef8718a774ac5e347b4ec069f17fd9b4 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Fri, 10 Jun 2011 13:30:22 -0400 Subject: NFSv4.1: Fix some issues with pnfs_generic_pg_test 1. If the intention is to coalesce requests 'prev' and 'req' then we have to ensure at least that we have a layout starting at req_offset(prev). 2. If we're only requesting a minimal layout of length desc->pg_count, we need to test the length actually returned by the server before we allow the coalescing to occur. 3. We need to deal correctly with (pgio->lseg == NULL) 4. Fixup the test guarding the pnfs_update_layout. Signed-off-by: Trond Myklebust --- fs/nfs/objlayout/objio_osd.c | 3 +++ fs/nfs/pnfs.c | 12 +++++++----- 2 files changed, 10 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/nfs/objlayout/objio_osd.c b/fs/nfs/objlayout/objio_osd.c index eb4aafa9f521..8ff2ea3f10ef 100644 --- a/fs/nfs/objlayout/objio_osd.c +++ b/fs/nfs/objlayout/objio_osd.c @@ -1000,6 +1000,9 @@ static bool objio_pg_test(struct nfs_pageio_descriptor *pgio, if (!pnfs_generic_pg_test(pgio, prev, req)) return false; + if (pgio->pg_lseg == NULL) + return true; + return pgio->pg_count + req->wb_bytes <= OBJIO_LSEG(pgio->pg_lseg)->max_io_size; } diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index 539b94cb6c0f..0f42e02436ee 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -1064,19 +1064,21 @@ pnfs_generic_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev, gfp_flags = GFP_NOFS; } - if (pgio->pg_count == prev->wb_bytes) { + if (pgio->pg_lseg == NULL) { + if (pgio->pg_count != prev->wb_bytes) + return true; /* This is first coelesce call for a series of nfs_pages */ pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode, prev->wb_context, - req_offset(req), + req_offset(prev), pgio->pg_count, access_type, gfp_flags); - return true; + if (pgio->pg_lseg == NULL) + return true; } - if (pgio->pg_lseg && - req_offset(req) > end_offset(pgio->pg_lseg->pls_range.offset, + if (req_offset(req) > end_offset(pgio->pg_lseg->pls_range.offset, pgio->pg_lseg->pls_range.length)) return false; -- cgit v1.2.3 From 19982ba8562e33083cb5bbb59a74855d8a9624ea Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Fri, 10 Jun 2011 13:30:23 -0400 Subject: NFSv4.1: Fix an off-by-one error in pnfs_generic_pg_test And document what is going on there... Signed-off-by: Trond Myklebust --- fs/nfs/pnfs.c | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index 0f42e02436ee..29c0ca7fc347 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -1078,11 +1078,22 @@ pnfs_generic_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev, return true; } - if (req_offset(req) > end_offset(pgio->pg_lseg->pls_range.offset, - pgio->pg_lseg->pls_range.length)) - return false; - - return true; + /* + * Test if a nfs_page is fully contained in the pnfs_layout_range. + * Note that this test makes several assumptions: + * - that the previous nfs_page in the struct nfs_pageio_descriptor + * is known to lie within the range. + * - that the nfs_page being tested is known to be contiguous with the + * previous nfs_page. + * - Layout ranges are page aligned, so we only have to test the + * start offset of the request. + * + * Please also note that 'end_offset' is actually the offset of the + * first byte that lies outside the pnfs_layout_range. FIXME? + * + */ + return req_offset(req) < end_offset(pgio->pg_lseg->pls_range.offset, + pgio->pg_lseg->pls_range.length); } EXPORT_SYMBOL_GPL(pnfs_generic_pg_test); -- cgit v1.2.3 From 1650add23578b5ca35c1f1e863987180a8c03779 Mon Sep 17 00:00:00 2001 From: Bryan Schumaker Date: Thu, 2 Jun 2011 15:07:35 -0400 Subject: NFS: Fix decode_secinfo_maxsz I initially did the calculation in bytes, and not words Signed-off-by: Bryan Schumaker Signed-off-by: Trond Myklebust --- fs/nfs/nfs4xdr.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index 5db44a8dddf9..6870bc61ceec 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -255,7 +255,7 @@ static int nfs4_stat_to_errno(int); #define decode_fs_locations_maxsz \ (0) #define encode_secinfo_maxsz (op_encode_hdr_maxsz + nfs4_name_maxsz) -#define decode_secinfo_maxsz (op_decode_hdr_maxsz + 4 + (NFS_MAX_SECFLAVORS * (16 + GSS_OID_MAX_LEN))) +#define decode_secinfo_maxsz (op_decode_hdr_maxsz + 1 + ((NFS_MAX_SECFLAVORS * (16 + GSS_OID_MAX_LEN)) / 4)) #if defined(CONFIG_NFS_V4_1) #define NFS4_MAX_MACHINE_NAME_LEN (64) -- cgit v1.2.3 From a288eecce5253cc1565d400a52b9b476a157e040 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 17 Jun 2011 16:50:37 +0200 Subject: ptrace: kill trivial tracehooks At this point, tracehooks aren't useful to mainline kernel and mostly just add an extra layer of obfuscation. Although they have comments, without actual in-kernel users, it is difficult to tell what are their assumptions and they're actually trying to achieve. To mainline kernel, they just aren't worth keeping around. This patch kills the following trivial tracehooks. * Ones testing whether task is ptraced. Replace with ->ptrace test. tracehook_expect_breakpoints() tracehook_consider_ignored_signal() tracehook_consider_fatal_signal() * ptrace_event() wrappers. Call directly. tracehook_report_exec() tracehook_report_exit() tracehook_report_vfork_done() * ptrace_release_task() wrapper. Call directly. tracehook_finish_release_task() * noop tracehook_prepare_release_task() tracehook_report_death() This doesn't introduce any behavior change. Signed-off-by: Tejun Heo Cc: Christoph Hellwig Cc: Martin Schwidefsky Signed-off-by: Oleg Nesterov --- arch/s390/kernel/traps.c | 4 +- fs/exec.c | 2 +- include/linux/tracehook.h | 156 ---------------------------------------------- kernel/exit.c | 7 +-- kernel/fork.c | 2 +- kernel/signal.c | 8 +-- mm/nommu.c | 3 +- 7 files changed, 11 insertions(+), 171 deletions(-) (limited to 'fs') diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c index a65d2e82f61d..a63d34c3611e 100644 --- a/arch/s390/kernel/traps.c +++ b/arch/s390/kernel/traps.c @@ -331,7 +331,7 @@ void __kprobes do_per_trap(struct pt_regs *regs) { if (notify_die(DIE_SSTEP, "sstep", regs, 0, 0, SIGTRAP) == NOTIFY_STOP) return; - if (tracehook_consider_fatal_signal(current, SIGTRAP)) + if (current->ptrace) force_sig(SIGTRAP, current); } @@ -425,7 +425,7 @@ static void __kprobes illegal_op(struct pt_regs *regs, long pgm_int_code, if (get_user(*((__u16 *) opcode), (__u16 __user *) location)) return; if (*((__u16 *) opcode) == S390_BREAKPOINT_U16) { - if (tracehook_consider_fatal_signal(current, SIGTRAP)) + if (current->ptrace) force_sig(SIGTRAP, current); else signal = SIGILL; diff --git a/fs/exec.c b/fs/exec.c index a9f2b3631bdb..b37030d0a50b 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -1384,7 +1384,7 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs) bprm->recursion_depth = depth; if (retval >= 0) { if (depth == 0) - tracehook_report_exec(fmt, bprm, regs); + ptrace_event(PTRACE_EVENT_EXEC, 0); put_binfmt(fmt); allow_write_access(bprm->file); if (bprm->file) diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h index 3b68aa842a92..8b06d4f2b814 100644 --- a/include/linux/tracehook.h +++ b/include/linux/tracehook.h @@ -51,21 +51,6 @@ #include struct linux_binprm; -/** - * tracehook_expect_breakpoints - guess if task memory might be touched - * @task: current task, making a new mapping - * - * Return nonzero if @task is expected to want breakpoint insertion in - * its memory at some point. A zero return is no guarantee it won't - * be done, but this is a hint that it's known to be likely. - * - * May be called with @task->mm->mmap_sem held for writing. - */ -static inline int tracehook_expect_breakpoints(struct task_struct *task) -{ - return (task->ptrace & PT_PTRACED) != 0; -} - /* * ptrace report for syscall entry and exit looks identical. */ @@ -183,42 +168,6 @@ static inline struct task_struct *tracehook_tracer_task(struct task_struct *tsk) return NULL; } -/** - * tracehook_report_exec - a successful exec was completed - * @fmt: &struct linux_binfmt that performed the exec - * @bprm: &struct linux_binprm containing exec details - * @regs: user-mode register state - * - * An exec just completed, we are shortly going to return to user mode. - * The freshly initialized register state can be seen and changed in @regs. - * The name, file and other pointers in @bprm are still on hand to be - * inspected, but will be freed as soon as this returns. - * - * Called with no locks, but with some kernel resources held live - * and a reference on @fmt->module. - */ -static inline void tracehook_report_exec(struct linux_binfmt *fmt, - struct linux_binprm *bprm, - struct pt_regs *regs) -{ - ptrace_event(PTRACE_EVENT_EXEC, 0); -} - -/** - * tracehook_report_exit - task has begun to exit - * @exit_code: pointer to value destined for @current->exit_code - * - * @exit_code points to the value passed to do_exit(), which tracing - * might change here. This is almost the first thing in do_exit(), - * before freeing any resources or setting the %PF_EXITING flag. - * - * Called with no locks held. - */ -static inline void tracehook_report_exit(long *exit_code) -{ - ptrace_event(PTRACE_EVENT_EXIT, *exit_code); -} - /** * tracehook_prepare_clone - prepare for new child to be cloned * @clone_flags: %CLONE_* flags from clone/fork/vfork system call @@ -319,52 +268,6 @@ static inline void tracehook_report_clone_complete(int trace, ptrace_event(trace, pid); } -/** - * tracehook_report_vfork_done - vfork parent's child has exited or exec'd - * @child: child task, already running - * @pid: new child's PID in the parent's namespace - * - * Called after a %CLONE_VFORK parent has waited for the child to complete. - * The clone/vfork system call will return immediately after this. - * The @child pointer may be invalid if a self-reaping child died and - * tracehook_report_clone() took no action to prevent it from self-reaping. - * - * Called with no locks held. - */ -static inline void tracehook_report_vfork_done(struct task_struct *child, - pid_t pid) -{ - ptrace_event(PTRACE_EVENT_VFORK_DONE, pid); -} - -/** - * tracehook_prepare_release_task - task is being reaped, clean up tracing - * @task: task in %EXIT_DEAD state - * - * This is called in release_task() just before @task gets finally reaped - * and freed. This would be the ideal place to remove and clean up any - * tracing-related state for @task. - * - * Called with no locks held. - */ -static inline void tracehook_prepare_release_task(struct task_struct *task) -{ -} - -/** - * tracehook_finish_release_task - final tracing clean-up - * @task: task in %EXIT_DEAD state - * - * This is called in release_task() when @task is being in the middle of - * being reaped. After this, there must be no tracing entanglements. - * - * Called with write_lock_irq(&tasklist_lock) held. - */ -static inline void tracehook_finish_release_task(struct task_struct *task) -{ - ptrace_release_task(task); -} - /** * tracehook_signal_handler - signal handler setup is complete * @sig: number of signal being delivered @@ -388,41 +291,6 @@ static inline void tracehook_signal_handler(int sig, siginfo_t *info, ptrace_notify(SIGTRAP); } -/** - * tracehook_consider_ignored_signal - suppress short-circuit of ignored signal - * @task: task receiving the signal - * @sig: signal number being sent - * - * Return zero iff tracing doesn't care to examine this ignored signal, - * so it can short-circuit normal delivery and never even get queued. - * - * Called with @task->sighand->siglock held. - */ -static inline int tracehook_consider_ignored_signal(struct task_struct *task, - int sig) -{ - return (task->ptrace & PT_PTRACED) != 0; -} - -/** - * tracehook_consider_fatal_signal - suppress special handling of fatal signal - * @task: task receiving the signal - * @sig: signal number being sent - * - * Return nonzero to prevent special handling of this termination signal. - * Normally handler for signal is %SIG_DFL. It can be %SIG_IGN if @sig is - * ignored, in which case force_sig() is about to reset it to %SIG_DFL. - * When this returns zero, this signal might cause a quick termination - * that does not give the debugger a chance to intercept the signal. - * - * Called with or without @task->sighand->siglock held. - */ -static inline int tracehook_consider_fatal_signal(struct task_struct *task, - int sig) -{ - return (task->ptrace & PT_PTRACED) != 0; -} - #define DEATH_REAP -1 #define DEATH_DELAYED_GROUP_LEADER -2 @@ -457,30 +325,6 @@ static inline int tracehook_notify_death(struct task_struct *task, return task->ptrace ? SIGCHLD : DEATH_DELAYED_GROUP_LEADER; } -/** - * tracehook_report_death - task is dead and ready to be reaped - * @task: @current task now exiting - * @signal: return value from tracheook_notify_death() - * @death_cookie: value passed back from tracehook_notify_death() - * @group_dead: nonzero if this was the last thread in the group to die - * - * Thread has just become a zombie or is about to self-reap. If positive, - * @signal is the signal number just sent to the parent (usually %SIGCHLD). - * If @signal is %DEATH_REAP, this thread will self-reap. If @signal is - * %DEATH_DELAYED_GROUP_LEADER, this is a delayed_group_leader() zombie. - * The @death_cookie was passed back by tracehook_notify_death(). - * - * If normal reaping is not inhibited, @task->exit_state might be changing - * in parallel. - * - * Called without locks. - */ -static inline void tracehook_report_death(struct task_struct *task, - int signal, void *death_cookie, - int group_dead) -{ -} - #ifdef TIF_NOTIFY_RESUME /** * set_notify_resume - cause tracehook_notify_resume() to be called diff --git a/kernel/exit.c b/kernel/exit.c index e5cc05644609..d49134a7f250 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -169,7 +169,6 @@ void release_task(struct task_struct * p) struct task_struct *leader; int zap_leader; repeat: - tracehook_prepare_release_task(p); /* don't need to get the RCU readlock here - the process is dead and * can't be modifying its own credentials. But shut RCU-lockdep up */ rcu_read_lock(); @@ -179,7 +178,7 @@ repeat: proc_flush_task(p); write_lock_irq(&tasklist_lock); - tracehook_finish_release_task(p); + ptrace_release_task(p); __exit_signal(p); /* @@ -868,8 +867,6 @@ static void exit_notify(struct task_struct *tsk, int group_dead) wake_up_process(tsk->signal->group_exit_task); write_unlock_irq(&tasklist_lock); - tracehook_report_death(tsk, signal, cookie, group_dead); - /* If the process is dead, release it - nobody will wait for it */ if (signal == DEATH_REAP) release_task(tsk); @@ -924,7 +921,7 @@ NORET_TYPE void do_exit(long code) */ set_fs(USER_DS); - tracehook_report_exit(&code); + ptrace_event(PTRACE_EVENT_EXIT, code); validate_creds_for_do_exit(tsk); diff --git a/kernel/fork.c b/kernel/fork.c index 0276c30401a0..d4f0dff9d617 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1527,7 +1527,7 @@ long do_fork(unsigned long clone_flags, freezer_do_not_count(); wait_for_completion(&vfork); freezer_count(); - tracehook_report_vfork_done(p, nr); + ptrace_event(PTRACE_EVENT_VFORK_DONE, nr); } } else { nr = PTR_ERR(p); diff --git a/kernel/signal.c b/kernel/signal.c index 0f3370872506..1550aee34f42 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -87,7 +87,7 @@ static int sig_ignored(struct task_struct *t, int sig, int from_ancestor_ns) /* * Tracers may want to know about even ignored signals. */ - return !tracehook_consider_ignored_signal(t, sig); + return !t->ptrace; } /* @@ -493,7 +493,8 @@ int unhandled_signal(struct task_struct *tsk, int sig) return 1; if (handler != SIG_IGN && handler != SIG_DFL) return 0; - return !tracehook_consider_fatal_signal(tsk, sig); + /* if ptraced, let the tracer determine */ + return !tsk->ptrace; } /* @@ -981,8 +982,7 @@ static void complete_signal(int sig, struct task_struct *p, int group) if (sig_fatal(p, sig) && !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) && !sigismember(&t->real_blocked, sig) && - (sig == SIGKILL || - !tracehook_consider_fatal_signal(t, sig))) { + (sig == SIGKILL || !t->ptrace)) { /* * This signal will be fatal to the whole group. */ diff --git a/mm/nommu.c b/mm/nommu.c index 1fd0c51b10a6..54ae707bdae8 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -22,7 +22,6 @@ #include #include #include -#include #include #include #include @@ -1087,7 +1086,7 @@ static unsigned long determine_vm_flags(struct file *file, * it's being traced - otherwise breakpoints set in it may interfere * with another untraced process */ - if ((flags & MAP_PRIVATE) && tracehook_expect_breakpoints(current)) + if ((flags & MAP_PRIVATE) && current->ptrace) vm_flags &= ~VM_MAYSHARE; return vm_flags; -- cgit v1.2.3 From 4b9d33e6d83cc05a8005a8f9a8b9677fa0f53626 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 17 Jun 2011 16:50:38 +0200 Subject: ptrace: kill clone/exec tracehooks At this point, tracehooks aren't useful to mainline kernel and mostly just add an extra layer of obfuscation. Although they have comments, without actual in-kernel users, it is difficult to tell what are their assumptions and they're actually trying to achieve. To mainline kernel, they just aren't worth keeping around. This patch kills the following clone and exec related tracehooks. tracehook_prepare_clone() tracehook_finish_clone() tracehook_report_clone() tracehook_report_clone_complete() tracehook_unsafe_exec() The changes are mostly trivial - logic is moved to the caller and comments are merged and adjusted appropriately. The only exception is in check_unsafe_exec() where LSM_UNSAFE_PTRACE* are OR'd to bprm->unsafe instead of setting it, which produces the same result as the field is always zero on entry. It also tests p->ptrace instead of (p->ptrace & PT_PTRACED) for consistency, which also gives the same result. This doesn't introduce any behavior change. Signed-off-by: Tejun Heo Cc: Christoph Hellwig Signed-off-by: Oleg Nesterov --- fs/exec.c | 7 ++- include/linux/tracehook.h | 121 ---------------------------------------------- kernel/fork.c | 41 ++++++++++++---- 3 files changed, 38 insertions(+), 131 deletions(-) (limited to 'fs') diff --git a/fs/exec.c b/fs/exec.c index b37030d0a50b..8dca45b0dae8 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -1224,7 +1224,12 @@ int check_unsafe_exec(struct linux_binprm *bprm) unsigned n_fs; int res = 0; - bprm->unsafe = tracehook_unsafe_exec(p); + if (p->ptrace) { + if (p->ptrace & PT_PTRACE_CAP) + bprm->unsafe |= LSM_UNSAFE_PTRACE_CAP; + else + bprm->unsafe |= LSM_UNSAFE_PTRACE; + } n_fs = 1; spin_lock(&p->fs->lock); diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h index 8b06d4f2b814..bcc4ca762aee 100644 --- a/include/linux/tracehook.h +++ b/include/linux/tracehook.h @@ -129,27 +129,6 @@ static inline void tracehook_report_syscall_exit(struct pt_regs *regs, int step) ptrace_report_syscall(regs); } -/** - * tracehook_unsafe_exec - check for exec declared unsafe due to tracing - * @task: current task doing exec - * - * Return %LSM_UNSAFE_* bits applied to an exec because of tracing. - * - * @task->signal->cred_guard_mutex is held by the caller through the do_execve(). - */ -static inline int tracehook_unsafe_exec(struct task_struct *task) -{ - int unsafe = 0; - int ptrace = task->ptrace; - if (ptrace & PT_PTRACED) { - if (ptrace & PT_PTRACE_CAP) - unsafe |= LSM_UNSAFE_PTRACE_CAP; - else - unsafe |= LSM_UNSAFE_PTRACE; - } - return unsafe; -} - /** * tracehook_tracer_task - return the task that is tracing the given task * @tsk: task to consider @@ -168,106 +147,6 @@ static inline struct task_struct *tracehook_tracer_task(struct task_struct *tsk) return NULL; } -/** - * tracehook_prepare_clone - prepare for new child to be cloned - * @clone_flags: %CLONE_* flags from clone/fork/vfork system call - * - * This is called before a new user task is to be cloned. - * Its return value will be passed to tracehook_finish_clone(). - * - * Called with no locks held. - */ -static inline int tracehook_prepare_clone(unsigned clone_flags) -{ - int event = 0; - - if (clone_flags & CLONE_UNTRACED) - return 0; - - if (clone_flags & CLONE_VFORK) - event = PTRACE_EVENT_VFORK; - else if ((clone_flags & CSIGNAL) != SIGCHLD) - event = PTRACE_EVENT_CLONE; - else - event = PTRACE_EVENT_FORK; - - return ptrace_event_enabled(current, event) ? event : 0; -} - -/** - * tracehook_finish_clone - new child created and being attached - * @child: new child task - * @clone_flags: %CLONE_* flags from clone/fork/vfork system call - * @trace: return value from tracehook_prepare_clone() - * - * This is called immediately after adding @child to its parent's children list. - * The @trace value is that returned by tracehook_prepare_clone(). - * - * Called with current's siglock and write_lock_irq(&tasklist_lock) held. - */ -static inline void tracehook_finish_clone(struct task_struct *child, - unsigned long clone_flags, int trace) -{ - ptrace_init_task(child, (clone_flags & CLONE_PTRACE) || trace); -} - -/** - * tracehook_report_clone - in parent, new child is about to start running - * @regs: parent's user register state - * @clone_flags: flags from parent's system call - * @pid: new child's PID in the parent's namespace - * @child: new child task - * - * Called after a child is set up, but before it has been started running. - * This is not a good place to block, because the child has not started - * yet. Suspend the child here if desired, and then block in - * tracehook_report_clone_complete(). This must prevent the child from - * self-reaping if tracehook_report_clone_complete() uses the @child - * pointer; otherwise it might have died and been released by the time - * tracehook_report_clone_complete() is called. - * - * Called with no locks held, but the child cannot run until this returns. - */ -static inline void tracehook_report_clone(struct pt_regs *regs, - unsigned long clone_flags, - pid_t pid, struct task_struct *child) -{ - if (unlikely(child->ptrace)) { - /* - * It doesn't matter who attached/attaching to this - * task, the pending SIGSTOP is right in any case. - */ - sigaddset(&child->pending.signal, SIGSTOP); - set_tsk_thread_flag(child, TIF_SIGPENDING); - } -} - -/** - * tracehook_report_clone_complete - new child is running - * @trace: return value from tracehook_prepare_clone() - * @regs: parent's user register state - * @clone_flags: flags from parent's system call - * @pid: new child's PID in the parent's namespace - * @child: child task, already running - * - * This is called just after the child has started running. This is - * just before the clone/fork syscall returns, or blocks for vfork - * child completion if @clone_flags has the %CLONE_VFORK bit set. - * The @child pointer may be invalid if a self-reaping child died and - * tracehook_report_clone() took no action to prevent it from self-reaping. - * - * Called with no locks held. - */ -static inline void tracehook_report_clone_complete(int trace, - struct pt_regs *regs, - unsigned long clone_flags, - pid_t pid, - struct task_struct *child) -{ - if (unlikely(trace)) - ptrace_event(trace, pid); -} - /** * tracehook_signal_handler - signal handler setup is complete * @sig: number of signal being delivered diff --git a/kernel/fork.c b/kernel/fork.c index d4f0dff9d617..3c72a5b321a7 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1340,7 +1340,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, } if (likely(p->pid)) { - tracehook_finish_clone(p, clone_flags, trace); + ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace); if (thread_group_leader(p)) { if (is_child_reaper(pid)) @@ -1481,10 +1481,22 @@ long do_fork(unsigned long clone_flags, } /* - * When called from kernel_thread, don't do user tracing stuff. + * Determine whether and which event to report to ptracer. When + * called from kernel_thread or CLONE_UNTRACED is explicitly + * requested, no event is reported; otherwise, report if the event + * for the type of forking is enabled. */ - if (likely(user_mode(regs))) - trace = tracehook_prepare_clone(clone_flags); + if (likely(user_mode(regs)) && !(clone_flags & CLONE_UNTRACED)) { + if (clone_flags & CLONE_VFORK) + trace = PTRACE_EVENT_VFORK; + else if ((clone_flags & CSIGNAL) != SIGCHLD) + trace = PTRACE_EVENT_CLONE; + else + trace = PTRACE_EVENT_FORK; + + if (likely(!ptrace_event_enabled(current, trace))) + trace = 0; + } p = copy_process(clone_flags, stack_start, regs, stack_size, child_tidptr, NULL, trace); @@ -1508,20 +1520,31 @@ long do_fork(unsigned long clone_flags, } audit_finish_fork(p); - tracehook_report_clone(regs, clone_flags, nr, p); + + /* + * Child is ready but hasn't started running yet. Queue + * SIGSTOP if it's gonna be ptraced - it doesn't matter who + * attached/attaching to this task, the pending SIGSTOP is + * right in any case. + */ + if (unlikely(p->ptrace)) { + sigaddset(&p->pending.signal, SIGSTOP); + set_tsk_thread_flag(p, TIF_SIGPENDING); + } /* * We set PF_STARTING at creation in case tracing wants to * use this to distinguish a fully live task from one that - * hasn't gotten to tracehook_report_clone() yet. Now we - * clear it and set the child going. + * hasn't finished SIGSTOP raising yet. Now we clear it + * and set the child going. */ p->flags &= ~PF_STARTING; wake_up_new_task(p); - tracehook_report_clone_complete(trace, regs, - clone_flags, nr, p); + /* forking complete and child started to run, tell ptracer */ + if (unlikely(trace)) + ptrace_event(trace, nr); if (clone_flags & CLONE_VFORK) { freezer_do_not_count(); -- cgit v1.2.3 From 06d984737bac0545fe20bb5447ee488b95adb531 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 17 Jun 2011 16:50:40 +0200 Subject: ptrace: s/tracehook_tracer_task()/ptrace_parent()/ tracehook.h is on the way out. Rename tracehook_tracer_task() to ptrace_parent() and move it from tracehook.h to ptrace.h. Signed-off-by: Tejun Heo Cc: Christoph Hellwig Cc: John Johansen Cc: Stephen Smalley Signed-off-by: Oleg Nesterov --- fs/proc/array.c | 2 +- fs/proc/base.c | 2 +- include/linux/ptrace.h | 18 ++++++++++++++++++ include/linux/tracehook.h | 18 ------------------ security/apparmor/domain.c | 2 +- security/selinux/hooks.c | 4 ++-- 6 files changed, 23 insertions(+), 23 deletions(-) (limited to 'fs') diff --git a/fs/proc/array.c b/fs/proc/array.c index 9b45ee84fbcc..3a1dafd228d1 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c @@ -172,7 +172,7 @@ static inline void task_state(struct seq_file *m, struct pid_namespace *ns, task_tgid_nr_ns(rcu_dereference(p->real_parent), ns) : 0; tpid = 0; if (pid_alive(p)) { - struct task_struct *tracer = tracehook_tracer_task(p); + struct task_struct *tracer = ptrace_parent(p); if (tracer) tpid = task_pid_nr_ns(tracer, ns); } diff --git a/fs/proc/base.c b/fs/proc/base.c index 14def991d9dd..c883dad74b9a 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -216,7 +216,7 @@ static struct mm_struct *__check_mem_permission(struct task_struct *task) if (task_is_stopped_or_traced(task)) { int match; rcu_read_lock(); - match = (tracehook_tracer_task(task) == current); + match = (ptrace_parent(task) == current); rcu_read_unlock(); if (match && ptrace_may_access(task, PTRACE_MODE_ATTACH)) return mm; diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index b546fd6c3506..bb157bdd0c55 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h @@ -150,6 +150,24 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr, int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr, unsigned long data); +/** + * ptrace_parent - return the task that is tracing the given task + * @task: task to consider + * + * Returns %NULL if no one is tracing @task, or the &struct task_struct + * pointer to its tracer. + * + * Must called under rcu_read_lock(). The pointer returned might be kept + * live only by RCU. During exec, this may be called with task_lock() held + * on @task, still held from when check_unsafe_exec() was called. + */ +static inline struct task_struct *ptrace_parent(struct task_struct *task) +{ + if (unlikely(task->ptrace)) + return rcu_dereference(task->parent); + return NULL; +} + /** * ptrace_event_enabled - test whether a ptrace event is enabled * @task: ptracee of interest diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h index bcc4ca762aee..7a1bd12aeffa 100644 --- a/include/linux/tracehook.h +++ b/include/linux/tracehook.h @@ -129,24 +129,6 @@ static inline void tracehook_report_syscall_exit(struct pt_regs *regs, int step) ptrace_report_syscall(regs); } -/** - * tracehook_tracer_task - return the task that is tracing the given task - * @tsk: task to consider - * - * Returns NULL if no one is tracing @task, or the &struct task_struct - * pointer to its tracer. - * - * Must called under rcu_read_lock(). The pointer returned might be kept - * live only by RCU. During exec, this may be called with task_lock() - * held on @task, still held from when tracehook_unsafe_exec() was called. - */ -static inline struct task_struct *tracehook_tracer_task(struct task_struct *tsk) -{ - if (tsk->ptrace & PT_PTRACED) - return rcu_dereference(tsk->parent); - return NULL; -} - /** * tracehook_signal_handler - signal handler setup is complete * @sig: number of signal being delivered diff --git a/security/apparmor/domain.c b/security/apparmor/domain.c index c825c6e0b636..7312bf9f7afc 100644 --- a/security/apparmor/domain.c +++ b/security/apparmor/domain.c @@ -67,7 +67,7 @@ static int may_change_ptraced_domain(struct task_struct *task, int error = 0; rcu_read_lock(); - tracer = tracehook_tracer_task(task); + tracer = ptrace_parent(task); if (tracer) { /* released below */ cred = get_task_cred(tracer); diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index a0d38459d650..fc07d18ed6fc 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -2048,7 +2048,7 @@ static int selinux_bprm_set_creds(struct linux_binprm *bprm) u32 ptsid = 0; rcu_read_lock(); - tracer = tracehook_tracer_task(current); + tracer = ptrace_parent(current); if (likely(tracer != NULL)) { sec = __task_cred(tracer)->security; ptsid = sec->sid; @@ -5314,7 +5314,7 @@ static int selinux_setprocattr(struct task_struct *p, Otherwise, leave SID unchanged and fail. */ ptsid = 0; task_lock(p); - tracer = tracehook_tracer_task(p); + tracer = ptrace_parent(p); if (tracer) ptsid = task_sid(tracer); task_unlock(p); -- cgit v1.2.3 From 446b23a75804d7ffa4cca2d4d8f0afb822108c7e Mon Sep 17 00:00:00 2001 From: Pavel Shilovsky Date: Mon, 20 Jun 2011 12:33:16 +0400 Subject: CIFS: Fix problem with 3.0-rc1 null user mount failure Figured it out: it was broken by b946845a9dc523c759cae2b6a0f6827486c3221a commit - "cifs: cifs_parse_mount_options: do not tokenize mount options in-place". So, as a quick fix I suggest to apply this patch. [PATCH] CIFS: Fix kfree() with constant string in a null user case Signed-off-by: Pavel Shilovsky Reviewed-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/connect.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 12cf72dd0c42..19fdbda7aa95 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -2937,7 +2937,11 @@ int cifs_setup_volume_info(struct smb_vol **pvolume_info, char *mount_data, if (volume_info->nullauth) { cFYI(1, "null user"); - volume_info->username = ""; + volume_info->username = kzalloc(1, GFP_KERNEL); + if (volume_info->username == NULL) { + rc = -ENOMEM; + goto out; + } } else if (volume_info->username) { /* BB fixme parse for domain name here */ cFYI(1, "Username: %s", volume_info->username); -- cgit v1.2.3 From 1190f6a067bf27b2ee7e06ec0776a17fe0f6c4d8 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Wed, 22 Jun 2011 17:33:57 -0400 Subject: cifs: fix wsize negotiation to respect max buffer size and active signing (try #4) Hopefully last version. Base signing check on CAP_UNIX instead of tcon->unix_ext, also clean up the comments a bit more. According to Hongwei Sun's blog posting here: http://blogs.msdn.com/b/openspecification/archive/2009/04/10/smb-maximum-transmit-buffer-size-and-performance-tuning.aspx CAP_LARGE_WRITEX is ignored when signing is active. Also, the maximum size for a write without CAP_LARGE_WRITEX should be the maxBuf that the server sent in the NEGOTIATE request. Fix the wsize negotiation to take this into account. While we're at it, alter the other wsize definitions to use sizeof(WRITE_REQ) to allow for slightly larger amounts of data to potentially be written per request. Signed-off-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/connect.c | 33 ++++++++++++++++++++------------- 1 file changed, 20 insertions(+), 13 deletions(-) (limited to 'fs') diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 19fdbda7aa95..c761935eab8c 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -2750,21 +2750,21 @@ void cifs_setup_cifs_sb(struct smb_vol *pvolume_info, /* * When the server supports very large writes via POSIX extensions, we can - * allow up to 2^24 - PAGE_CACHE_SIZE. + * allow up to 2^24-1, minus the size of a WRITE_AND_X header, not including + * the RFC1001 length. * * Note that this might make for "interesting" allocation problems during - * writeback however (as we have to allocate an array of pointers for the - * pages). A 16M write means ~32kb page array with PAGE_CACHE_SIZE == 4096. + * writeback however as we have to allocate an array of pointers for the + * pages. A 16M write means ~32kb page array with PAGE_CACHE_SIZE == 4096. */ -#define CIFS_MAX_WSIZE ((1<<24) - PAGE_CACHE_SIZE) +#define CIFS_MAX_WSIZE ((1<<24) - 1 - sizeof(WRITE_REQ) + 4) /* - * When the server doesn't allow large posix writes, default to a wsize of - * 128k - PAGE_CACHE_SIZE -- one page less than the largest frame size - * described in RFC1001. This allows space for the header without going over - * that by default. + * When the server doesn't allow large posix writes, only allow a wsize of + * 128k minus the size of the WRITE_AND_X header. That allows for a write up + * to the maximum size described by RFC1002. */ -#define CIFS_MAX_RFC1001_WSIZE (128 * 1024 - PAGE_CACHE_SIZE) +#define CIFS_MAX_RFC1002_WSIZE (128 * 1024 - sizeof(WRITE_REQ) + 4) /* * The default wsize is 1M. find_get_pages seems to return a maximum of 256 @@ -2783,11 +2783,18 @@ cifs_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info) /* can server support 24-bit write sizes? (via UNIX extensions) */ if (!tcon->unix_ext || !(unix_cap & CIFS_UNIX_LARGE_WRITE_CAP)) - wsize = min_t(unsigned int, wsize, CIFS_MAX_RFC1001_WSIZE); + wsize = min_t(unsigned int, wsize, CIFS_MAX_RFC1002_WSIZE); - /* no CAP_LARGE_WRITE_X? Limit it to 16 bits */ - if (!(server->capabilities & CAP_LARGE_WRITE_X)) - wsize = min_t(unsigned int, wsize, USHRT_MAX); + /* + * no CAP_LARGE_WRITE_X or is signing enabled without CAP_UNIX set? + * Limit it to max buffer offered by the server, minus the size of the + * WRITEX header, not including the 4 byte RFC1001 length. + */ + if (!(server->capabilities & CAP_LARGE_WRITE_X) || + (!(server->capabilities & CAP_UNIX) && + (server->sec_mode & (SECMODE_SIGN_ENABLED|SECMODE_SIGN_REQUIRED)))) + wsize = min_t(unsigned int, wsize, + server->maxBuf - sizeof(WRITE_REQ) + 4); /* hard limit of CIFS_MAX_WSIZE */ wsize = min_t(unsigned int, wsize, CIFS_MAX_WSIZE); -- cgit v1.2.3 From 778e24bb6dd8682318bb496d4bfdc32b501a6420 Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Thu, 23 Jun 2011 01:34:59 +0000 Subject: xfs: reset inode per-lifetime state when recycling it XFS inodes has several per-lifetime state fields that determine the behaviour of the inode. These state fields are not all reset when an inode is reused from the reclaimable state. This can lead to unexpected behaviour of the new inode such as speculative preallocation not being truncated away in the expected manner for local files until the inode is subsequently truncated, freed or cycles out of the cache. It can also lead to an inode being considered to be a filestream inode or having been truncated when that is not the case. Rework the reinitialisation of the inode when it is recycled to ensure that it is pristine before it is reused. While there, also fix the resetting of state flags in the recycling error paths so the inode does not become unreclaimable. Signed-off-by: Dave Chinner Signed-off-by: Alex Elder --- fs/xfs/xfs_iget.c | 13 +++++++++---- fs/xfs/xfs_inode.h | 10 ++++++++++ 2 files changed, 19 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c index cb9b6d1469f7..3631783b2b53 100644 --- a/fs/xfs/xfs_iget.c +++ b/fs/xfs/xfs_iget.c @@ -253,16 +253,21 @@ xfs_iget_cache_hit( rcu_read_lock(); spin_lock(&ip->i_flags_lock); - ip->i_flags &= ~XFS_INEW; - ip->i_flags |= XFS_IRECLAIMABLE; - __xfs_inode_set_reclaim_tag(pag, ip); + ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM); + ASSERT(ip->i_flags & XFS_IRECLAIMABLE); trace_xfs_iget_reclaim_fail(ip); goto out_error; } spin_lock(&pag->pag_ici_lock); spin_lock(&ip->i_flags_lock); - ip->i_flags &= ~(XFS_IRECLAIMABLE | XFS_IRECLAIM); + + /* + * Clear the per-lifetime state in the inode as we are now + * effectively a new inode and need to return to the initial + * state before reuse occurs. + */ + ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS; ip->i_flags |= XFS_INEW; __xfs_inode_clear_reclaim_tag(mp, pag, ip); inode->i_state = I_NEW; diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h index 3ae6d58e5473..964cfea77686 100644 --- a/fs/xfs/xfs_inode.h +++ b/fs/xfs/xfs_inode.h @@ -383,6 +383,16 @@ static inline void xfs_ifunlock(xfs_inode_t *ip) #define XFS_ITRUNCATED 0x0020 /* truncated down so flush-on-close */ #define XFS_IDIRTY_RELEASE 0x0040 /* dirty release already seen */ +/* + * Per-lifetime flags need to be reset when re-using a reclaimable inode during + * inode lookup. Thi prevents unintended behaviour on the new inode from + * ocurring. + */ +#define XFS_IRECLAIM_RESET_FLAGS \ + (XFS_IRECLAIMABLE | XFS_IRECLAIM | \ + XFS_IDIRTY_RELEASE | XFS_ITRUNCATED | \ + XFS_IFILESTREAM); + /* * Flags for inode locking. * Bit ranges: 1<<1 - 1<<16-1 -- iolock/ilock modes (bitfield) -- cgit v1.2.3 From df4368a146d2b350b8398babfe11e2088f741d67 Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Thu, 23 Jun 2011 01:35:00 +0000 Subject: xfs: clear XFS_IDIRTY_RELEASE on truncate down When an inode is truncated down, speculative preallocation is removed from the inode. This should also reset the state bits for controlling whether preallocation is subsequently removed when the file is next closed. The flag is not being cleared, so repeated operations on a file that first involve a truncate (e.g. multiple repeated dd invocations on a file) give different file layouts for the second and subsequent invocations. Fix this by clearing the XFS_IDIRTY_RELEASE state bit when the XFS_ITRUNCATED bit is detected in xfs_release() and hence ensure that speculative delalloc is removed on files that have been truncated down. Signed-off-by: Dave Chinner Reviewed-by: Christoph Hellwig Signed-off-by: Alex Elder --- fs/xfs/xfs_vnodeops.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c index b7a5fe7c52c8..619720705bc6 100644 --- a/fs/xfs/xfs_vnodeops.c +++ b/fs/xfs/xfs_vnodeops.c @@ -960,8 +960,11 @@ xfs_release( * be exposed to that problem. */ truncated = xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED); - if (truncated && VN_DIRTY(VFS_I(ip)) && ip->i_delayed_blks > 0) - xfs_flush_pages(ip, 0, -1, XBF_ASYNC, FI_NONE); + if (truncated) { + xfs_iflags_clear(ip, XFS_IDIRTY_RELEASE); + if (VN_DIRTY(VFS_I(ip)) && ip->i_delayed_blks > 0) + xfs_flush_pages(ip, 0, -1, XBF_ASYNC, FI_NONE); + } } if (ip->i_d.di_nlink == 0) -- cgit v1.2.3 From 4a33821236f2ef3af0081e8a5eec1301cbed3125 Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Thu, 23 Jun 2011 01:35:01 +0000 Subject: xfs: prevent bogus assert when trying to remove non-existent attribute If the attribute fork on an inode is in btree format and has multiple levels (i.e node format rather than leaf format), then a lookup failure will trigger an assert failure in xfs_da_path_shift if the flag XFS_DA_OP_OKNOENT is not set. This flag is used to indicate to the directory btree code that not finding an entry is not a fatal error. In the case of doing a lookup for a directory name removal, this is valid as a user cannot insert an arbitrary name to remove from the directory btree. However, in the case of the attribute tree, a user has direct control over the attribute name and can ask for any random name to be removed without any validation. In this case, fsstress is asking for a non-existent user.selinux attribute to be removed, and that is causing xfs_da_path_shift() to fall off the bottom of the tree where it asserts that a lookup failure is allowed. Because the flag is not set, we die a horrible death on a debug enable kernel. Prevent this assert from firing on attribute removes by adding the op_flag XFS_DA_OP_OKNOENT to atribute removal operations. Discovered when testing on a SELinux enabled system by fsstress in test 070 by trying to remove a non-existent user.selinux attribute. Signed-off-by: Dave Chinner Reviewed-by: Christoph Hellwig Signed-off-by: Alex Elder --- fs/xfs/xfs_attr.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'fs') diff --git a/fs/xfs/xfs_attr.c b/fs/xfs/xfs_attr.c index c86375378810..01d2072fb6d4 100644 --- a/fs/xfs/xfs_attr.c +++ b/fs/xfs/xfs_attr.c @@ -489,6 +489,13 @@ xfs_attr_remove_int(xfs_inode_t *dp, struct xfs_name *name, int flags) args.total = 0; args.whichfork = XFS_ATTR_FORK; + /* + * we have no control over the attribute names that userspace passes us + * to remove, so we have to allow the name lookup prior to attribute + * removal to fail. + */ + args.op_flags = XFS_DA_OP_OKNOENT; + /* * Attach the dquots to the inode. */ -- cgit v1.2.3 From 46e4edbf7ea9cf26665eb9f90c0fc7688d1a51ed Mon Sep 17 00:00:00 2001 From: Jesper Juhl Date: Thu, 23 Jun 2011 23:59:32 +0200 Subject: Remove unneeded version.h includes from fs/ It was pointed out by 'make versioncheck' that some includes of linux/version.h were not needed in fs/ (fs/btrfs/ctree.h and fs/omfs/file.c). This patch removes them. Signed-off-by: Jesper Juhl Acked-by: Bob Copeland Signed-off-by: Linus Torvalds --- fs/btrfs/ctree.h | 1 - fs/omfs/file.c | 1 - 2 files changed, 2 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 300628795fdb..f30ac05dbda7 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -19,7 +19,6 @@ #ifndef __BTRFS_CTREE__ #define __BTRFS_CTREE__ -#include #include #include #include diff --git a/fs/omfs/file.c b/fs/omfs/file.c index d738a7e493dd..2c6d95257a4d 100644 --- a/fs/omfs/file.c +++ b/fs/omfs/file.c @@ -4,7 +4,6 @@ * Released under GPL v2. */ -#include #include #include #include -- cgit v1.2.3 From e4fb0edb7c03e5ec19b6f732f1dfbe911212dbde Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Mon, 20 Jun 2011 14:33:16 -0400 Subject: cifs: free blkcipher in smbhash This is currently leaked in the rc == 0 case. Reported-by: J. Bruce Fields Signed-off-by: Jeff Layton Reviewed-by: Shirish Pargaonkar Signed-off-by: Steve French --- fs/cifs/smbencrypt.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/cifs/smbencrypt.c b/fs/cifs/smbencrypt.c index 1525d5e662b6..1c5b770c3141 100644 --- a/fs/cifs/smbencrypt.c +++ b/fs/cifs/smbencrypt.c @@ -90,12 +90,10 @@ smbhash(unsigned char *out, const unsigned char *in, unsigned char *key) sg_init_one(&sgout, out, 8); rc = crypto_blkcipher_encrypt(&desc, &sgout, &sgin, 8); - if (rc) { + if (rc) cERROR(1, "could not encrypt crypt key rc: %d\n", rc); - crypto_free_blkcipher(tfm_des); - goto smbhash_err; - } + crypto_free_blkcipher(tfm_des); smbhash_err: return rc; } -- cgit v1.2.3 From 1973f0faeb4a5f35597793c65d3c94d8fd386e10 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Fri, 24 Jun 2011 13:13:29 -0400 Subject: Btrfs: make sure to record the transid in new inodes When we create a new inode, we aren't filling in the field that records the transaction that last changed this inode. If we then go to fsync that inode, it will be skipped because the field isn't filled in. Signed-off-by: Chris Mason --- fs/btrfs/inode.c | 1 + 1 file changed, 1 insertion(+) (limited to 'fs') diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 5813dec5101c..87f1e0cf26f8 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -4520,6 +4520,7 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans, inode_tree_add(inode); trace_btrfs_inode_new(inode); + btrfs_set_inode_last_trans(trans, inode); return inode; fail: -- cgit v1.2.3 From 9b8e072a31180eb5cd6991d08524d9c4fa235ade Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Tue, 21 Jun 2011 07:18:26 -0400 Subject: cifs: mark CONFIG_CIFS_NFSD_EXPORT as BROKEN This does not work properly with CIFS as current servers do not enable support for the FILE_OPEN_BY_FILE_ID on SMB NTCreateX and not all NFS clients handle ESTALE. For now, it just plain doesn't work. Mark it BROKEN to discourage distros from enabling it. Signed-off-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig index 53ed1ad2c112..f66cc1625150 100644 --- a/fs/cifs/Kconfig +++ b/fs/cifs/Kconfig @@ -156,6 +156,6 @@ config CIFS_ACL config CIFS_NFSD_EXPORT bool "Allow nfsd to export CIFS file system (EXPERIMENTAL)" - depends on CIFS && EXPERIMENTAL + depends on CIFS && EXPERIMENTAL && BROKEN help Allows NFS server to export a CIFS mounted share (nfsd over cifs) -- cgit v1.2.3 From dd8544661947ad6d8d87b3c9d4333bfa1583d1bc Mon Sep 17 00:00:00 2001 From: Al Viro Date: Fri, 17 Jun 2011 08:24:42 -0400 Subject: take bdi setup/destruction into cifs_mount/cifs_umount Acked-by: Pavel Shilovsky Reviewed-by: Jeff Layton Signed-off-by: Al Viro --- fs/cifs/cifsfs.c | 12 +----------- fs/cifs/connect.c | 10 ++++++++++ 2 files changed, 11 insertions(+), 11 deletions(-) (limited to 'fs') diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 2f0c58646c10..5d3c4fa4b546 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -116,18 +116,12 @@ cifs_read_super(struct super_block *sb, struct smb_vol *volume_info, spin_lock_init(&cifs_sb->tlink_tree_lock); cifs_sb->tlink_tree = RB_ROOT; - rc = bdi_setup_and_register(&cifs_sb->bdi, "cifs", BDI_CAP_MAP_COPY); - if (rc) - return rc; - - cifs_sb->bdi.ra_pages = default_backing_dev_info.ra_pages; - rc = cifs_mount(sb, cifs_sb, volume_info, devname); if (rc) { if (!silent) cERROR(1, "cifs_mount failed w/return code = %d", rc); - goto out_mount_failed; + return rc; } sb->s_magic = CIFS_MAGIC_NUMBER; @@ -171,9 +165,6 @@ out_no_root: iput(inode); cifs_umount(sb, cifs_sb); - -out_mount_failed: - bdi_destroy(&cifs_sb->bdi); return rc; } @@ -199,7 +190,6 @@ cifs_put_super(struct super_block *sb) } unload_nls(cifs_sb->local_nls); - bdi_destroy(&cifs_sb->bdi); kfree(cifs_sb); } diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 12cf72dd0c42..78fd7557e35d 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -2983,6 +2983,13 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb, struct tcon_link *tlink; #ifdef CONFIG_CIFS_DFS_UPCALL int referral_walks_count = 0; + + rc = bdi_setup_and_register(&cifs_sb->bdi, "cifs", BDI_CAP_MAP_COPY); + if (rc) + return rc; + + cifs_sb->bdi.ra_pages = default_backing_dev_info.ra_pages; + try_mount_again: /* cleanup activities if we're chasing a referral */ if (referral_walks_count) { @@ -3007,6 +3014,7 @@ try_mount_again: srvTcp = cifs_get_tcp_session(volume_info); if (IS_ERR(srvTcp)) { rc = PTR_ERR(srvTcp); + bdi_destroy(&cifs_sb->bdi); goto out; } @@ -3161,6 +3169,7 @@ mount_fail_check: cifs_put_smb_ses(pSesInfo); else cifs_put_tcp_session(srvTcp); + bdi_destroy(&cifs_sb->bdi); goto out; } @@ -3357,6 +3366,7 @@ cifs_umount(struct super_block *sb, struct cifs_sb_info *cifs_sb) } spin_unlock(&cifs_sb->tlink_tree_lock); + bdi_destroy(&cifs_sb->bdi); return 0; } -- cgit v1.2.3 From 6d6861757dfadb7d6aec6bb34acd471210a755f9 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Fri, 17 Jun 2011 08:34:57 -0400 Subject: cifs: double free on mount failure if we get to out_super with ->s_root already set (e.g. with cifs_get_root() failure), we'll end up with cifs_put_super() called and ->mountdata freed twice. We'll also get cifs_sb freed twice and cifs_sb->local_nls dropped twice. The problem is, we can get to out_super both with and without ->s_root, which makes ->put_super() a bad place for such work. Switch to ->kill_sb(), have all that work done there after kill_anon_super(). Unlike ->put_super(), ->kill_sb() is called by deactivate_locked_super() whether we have ->s_root or not. Acked-by: Pavel Shilovsky Reviewed-by: Jeff Layton Signed-off-by: Al Viro --- fs/cifs/cifsfs.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 5d3c4fa4b546..dc76c7bccb15 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -184,11 +184,13 @@ cifs_put_super(struct super_block *sb) rc = cifs_umount(sb, cifs_sb); if (rc) cERROR(1, "cifs_umount failed with return code %d", rc); - if (cifs_sb->mountdata) { - kfree(cifs_sb->mountdata); - cifs_sb->mountdata = NULL; - } +} +static void cifs_kill_sb(struct super_block *sb) +{ + struct cifs_sb_info *cifs_sb = CIFS_SB(sb); + kill_anon_super(sb); + kfree(cifs_sb->mountdata); unload_nls(cifs_sb->local_nls); kfree(cifs_sb); } @@ -729,8 +731,8 @@ out_shared: goto out; out_super: - kfree(cifs_sb->mountdata); deactivate_locked_super(sb); + goto out; out_cifs_sb: unload_nls(cifs_sb->local_nls); @@ -827,7 +829,7 @@ struct file_system_type cifs_fs_type = { .owner = THIS_MODULE, .name = "cifs", .mount = cifs_do_mount, - .kill_sb = kill_anon_super, + .kill_sb = cifs_kill_sb, /* .fs_flags */ }; const struct inode_operations cifs_dir_inode_ops = { -- cgit v1.2.3 From ca171baaad1420a29cca98be5bdf5596cd70b294 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Fri, 17 Jun 2011 08:49:12 -0400 Subject: cifs: don't leak nls on mount failure if cifs_sb allocation fails, we still need to drop nls we'd stashed into volume_info - the one we would've copied to cifs_sb if we could allocate the latter. Acked-by: Pavel Shilovsky Reviewed-by: Jeff Layton Signed-off-by: Al Viro --- fs/cifs/cifsfs.c | 1 + 1 file changed, 1 insertion(+) (limited to 'fs') diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index dc76c7bccb15..bfab2bc83726 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -672,6 +672,7 @@ cifs_do_mount(struct file_system_type *fs_type, cifs_sb = kzalloc(sizeof(struct cifs_sb_info), GFP_KERNEL); if (cifs_sb == NULL) { root = ERR_PTR(-ENOMEM); + unload_nls(volume_info->local_nls); goto out; } -- cgit v1.2.3 From 2c6292ae4be00454882246d07f38cdf15a823c2a Mon Sep 17 00:00:00 2001 From: Al Viro Date: Fri, 17 Jun 2011 09:05:48 -0400 Subject: cifs: don't pass superblock to cifs_mount() To close sget() races we'll need to be able to set cifs_sb up before we get the superblock, so we'll want to be able to do cifs_mount() earlier. Fortunately, it's easy to do - setting ->s_maxbytes can be done in cifs_read_super(), ditto for ->s_time_gran and as for putting MS_POSIXACL into ->s_flags, we can mirror it in ->mnt_cifs_flags until cifs_read_super() is called. Kill unused 'devname' argument, while we are at it... Acked-by: Pavel Shilovsky Reviewed-by: Jeff Layton Signed-off-by: Al Viro --- fs/cifs/cifs_fs_sb.h | 1 + fs/cifs/cifsfs.c | 13 ++++++++++++- fs/cifs/cifsproto.h | 6 +++--- fs/cifs/connect.c | 28 ++++++++++------------------ 4 files changed, 26 insertions(+), 22 deletions(-) (limited to 'fs') diff --git a/fs/cifs/cifs_fs_sb.h b/fs/cifs/cifs_fs_sb.h index ffb1459dc6ec..7260e11e21f8 100644 --- a/fs/cifs/cifs_fs_sb.h +++ b/fs/cifs/cifs_fs_sb.h @@ -42,6 +42,7 @@ #define CIFS_MOUNT_MULTIUSER 0x20000 /* multiuser mount */ #define CIFS_MOUNT_STRICT_IO 0x40000 /* strict cache mode */ #define CIFS_MOUNT_RWPIDFORWARD 0x80000 /* use pid forwarding for rw */ +#define CIFS_MOUNT_POSIXACL 0x100000 /* mirror of MS_POSIXACL in mnt_cifs_flags */ struct cifs_sb_info { struct rb_root tlink_tree; diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index bfab2bc83726..8f7451f3c8e6 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -116,7 +116,7 @@ cifs_read_super(struct super_block *sb, struct smb_vol *volume_info, spin_lock_init(&cifs_sb->tlink_tree_lock); cifs_sb->tlink_tree = RB_ROOT; - rc = cifs_mount(sb, cifs_sb, volume_info, devname); + rc = cifs_mount(cifs_sb, volume_info); if (rc) { if (!silent) @@ -124,6 +124,17 @@ cifs_read_super(struct super_block *sb, struct smb_vol *volume_info, return rc; } + if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIXACL) + sb->s_flags |= MS_POSIXACL; + + if (cifs_sb_master_tcon(cifs_sb)->ses->capabilities & CAP_LARGE_FILES) + sb->s_maxbytes = MAX_LFS_FILESIZE; + else + sb->s_maxbytes = MAX_NON_LFS; + + /* BB FIXME fix time_gran to be larger for LANMAN sessions */ + sb->s_time_gran = 100; + sb->s_magic = CIFS_MAGIC_NUMBER; sb->s_op = &cifs_super_ops; sb->s_bdi = &cifs_sb->bdi; diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h index 953f84413c77..5814fe543f95 100644 --- a/fs/cifs/cifsproto.h +++ b/fs/cifs/cifsproto.h @@ -157,8 +157,7 @@ extern int cifs_match_super(struct super_block *, void *); extern void cifs_cleanup_volume_info(struct smb_vol **pvolume_info); extern int cifs_setup_volume_info(struct smb_vol **pvolume_info, char *mount_data, const char *devname); -extern int cifs_mount(struct super_block *, struct cifs_sb_info *, - struct smb_vol *, const char *); +extern int cifs_mount(struct cifs_sb_info *, struct smb_vol *); extern int cifs_umount(struct super_block *, struct cifs_sb_info *); extern void cifs_dfs_release_automount_timer(void); void cifs_proc_init(void); @@ -218,7 +217,8 @@ extern int get_dfs_path(int xid, struct cifs_ses *pSesInfo, struct dfs_info3_param **preferrals, int remap); extern void reset_cifs_unix_caps(int xid, struct cifs_tcon *tcon, - struct super_block *sb, struct smb_vol *vol); + struct cifs_sb_info *cifs_sb, + struct smb_vol *vol); extern int CIFSSMBQFSInfo(const int xid, struct cifs_tcon *tcon, struct kstatfs *FSData); extern int SMBOldQFSInfo(const int xid, struct cifs_tcon *tcon, diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 78fd7557e35d..3011ac8c9249 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -2546,7 +2546,7 @@ ip_connect(struct TCP_Server_Info *server) } void reset_cifs_unix_caps(int xid, struct cifs_tcon *tcon, - struct super_block *sb, struct smb_vol *vol_info) + struct cifs_sb_info *cifs_sb, struct smb_vol *vol_info) { /* if we are reconnecting then should we check to see if * any requested capabilities changed locally e.g. via @@ -2600,22 +2600,23 @@ void reset_cifs_unix_caps(int xid, struct cifs_tcon *tcon, cap &= ~CIFS_UNIX_POSIX_ACL_CAP; else if (CIFS_UNIX_POSIX_ACL_CAP & cap) { cFYI(1, "negotiated posix acl support"); - if (sb) - sb->s_flags |= MS_POSIXACL; + if (cifs_sb) + cifs_sb->mnt_cifs_flags |= + CIFS_MOUNT_POSIXACL; } if (vol_info && vol_info->posix_paths == 0) cap &= ~CIFS_UNIX_POSIX_PATHNAMES_CAP; else if (cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) { cFYI(1, "negotiate posix pathnames"); - if (sb) - CIFS_SB(sb)->mnt_cifs_flags |= + if (cifs_sb) + cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_POSIX_PATHS; } - if (sb && (CIFS_SB(sb)->rsize > 127 * 1024)) { + if (cifs_sb && (cifs_sb->rsize > 127 * 1024)) { if ((cap & CIFS_UNIX_LARGE_READ_CAP) == 0) { - CIFS_SB(sb)->rsize = 127 * 1024; + cifs_sb->rsize = 127 * 1024; cFYI(DBG2, "larger reads not supported by srv"); } } @@ -2971,8 +2972,7 @@ out: } int -cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb, - struct smb_vol *volume_info, const char *devname) +cifs_mount(struct cifs_sb_info *cifs_sb, struct smb_vol *volume_info) { int rc = 0; int xid; @@ -3026,14 +3026,6 @@ try_mount_again: goto mount_fail_check; } - if (pSesInfo->capabilities & CAP_LARGE_FILES) - sb->s_maxbytes = MAX_LFS_FILESIZE; - else - sb->s_maxbytes = MAX_NON_LFS; - - /* BB FIXME fix time_gran to be larger for LANMAN sessions */ - sb->s_time_gran = 100; - /* search for existing tcon to this server share */ tcon = cifs_get_tcon(pSesInfo, volume_info); if (IS_ERR(tcon)) { @@ -3046,7 +3038,7 @@ try_mount_again: if (tcon->ses->capabilities & CAP_UNIX) { /* reset of caps checks mount to see if unix extensions disabled for just this mount */ - reset_cifs_unix_caps(xid, tcon, sb, volume_info); + reset_cifs_unix_caps(xid, tcon, cifs_sb, volume_info); if ((tcon->ses->server->tcpStatus == CifsNeedReconnect) && (le64_to_cpu(tcon->fsUnixInfo.Capability) & CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP)) { -- cgit v1.2.3 From d687ca380f1a8f3043f42efd2403cbe58c846e70 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Fri, 17 Jun 2011 09:14:27 -0400 Subject: cifs: leak on mount if we share superblock cifs_sb and nls end up leaked... Acked-by: Pavel Shilovsky Reviewed-by: Jeff Layton Signed-off-by: Al Viro --- fs/cifs/cifsfs.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'fs') diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 8f7451f3c8e6..4162ee45d043 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -701,6 +701,8 @@ cifs_do_mount(struct file_system_type *fs_type, if (sb->s_fs_info) { cFYI(1, "Use existing superblock"); + unload_nls(cifs_sb->local_nls); + kfree(cifs_sb); goto out_shared; } -- cgit v1.2.3 From 5d3bc605cafe3f367b1c43b673bf643245c81626 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Fri, 17 Jun 2011 09:17:28 -0400 Subject: cifs: allocate mountdata earlier pull mountdata allocation up, so that it won't stand in the way when we lift cifs_mount() to location before sget(). Acked-by: Pavel Shilovsky Reviewed-by: Jeff Layton Signed-off-by: Al Viro --- fs/cifs/cifsfs.c | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) (limited to 'fs') diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 4162ee45d043..ec19161dd278 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -687,6 +687,14 @@ cifs_do_mount(struct file_system_type *fs_type, goto out; } + cifs_sb->mountdata = kstrndup(data, PAGE_SIZE, GFP_KERNEL); + if (cifs_sb->mountdata == NULL) { + root = ERR_PTR(-ENOMEM); + unload_nls(volume_info->local_nls); + kfree(cifs_sb); + goto out; + } + cifs_setup_cifs_sb(volume_info, cifs_sb); mnt_data.vol = volume_info; @@ -701,22 +709,12 @@ cifs_do_mount(struct file_system_type *fs_type, if (sb->s_fs_info) { cFYI(1, "Use existing superblock"); + kfree(cifs_sb->mountdata); unload_nls(cifs_sb->local_nls); kfree(cifs_sb); goto out_shared; } - /* - * Copy mount params for use in submounts. Better to do - * the copy here and deal with the error before cleanup gets - * complicated post-mount. - */ - cifs_sb->mountdata = kstrndup(data, PAGE_SIZE, GFP_KERNEL); - if (cifs_sb->mountdata == NULL) { - root = ERR_PTR(-ENOMEM); - goto out_super; - } - sb->s_flags = flags; /* BB should we make this contingent on mount parm? */ sb->s_flags |= MS_NODIRATIME | MS_NOATIME; @@ -749,6 +747,7 @@ out_super: goto out; out_cifs_sb: + kfree(cifs_sb->mountdata); unload_nls(cifs_sb->local_nls); kfree(cifs_sb); -- cgit v1.2.3 From 2ced6f693581357b2a5bf8b031a702c624b12d0d Mon Sep 17 00:00:00 2001 From: Al Viro Date: Fri, 17 Jun 2011 09:20:04 -0400 Subject: cifs: initialize ->tlink_tree in cifs_setup_cifs_sb() no need to wait until cifs_read_super() and we need it done by the time cifs_mount() will be called. Acked-by: Pavel Shilovsky Reviewed-by: Jeff Layton Signed-off-by: Al Viro --- fs/cifs/cifsfs.c | 3 --- fs/cifs/connect.c | 3 +++ 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index ec19161dd278..61c7aa870f1c 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -113,9 +113,6 @@ cifs_read_super(struct super_block *sb, struct smb_vol *volume_info, cifs_sb = CIFS_SB(sb); - spin_lock_init(&cifs_sb->tlink_tree_lock); - cifs_sb->tlink_tree = RB_ROOT; - rc = cifs_mount(cifs_sb, volume_info); if (rc) { diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 3011ac8c9249..9f09adf51edc 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -2663,6 +2663,9 @@ void cifs_setup_cifs_sb(struct smb_vol *pvolume_info, { INIT_DELAYED_WORK(&cifs_sb->prune_tlinks, cifs_prune_tlinks); + spin_lock_init(&cifs_sb->tlink_tree_lock); + cifs_sb->tlink_tree = RB_ROOT; + if (pvolume_info->rsize > CIFSMaxBufSize) { cERROR(1, "rsize %d too large, using MaxBufSize", pvolume_info->rsize); -- cgit v1.2.3 From 2a9b99516c662d1713d58648e4a4c9aef72051bc Mon Sep 17 00:00:00 2001 From: Al Viro Date: Fri, 17 Jun 2011 09:27:16 -0400 Subject: sanitize cifs_umount() prototype a) superblock argument is unused b) it always returns 0 Acked-by: Pavel Shilovsky Reviewed-by: Jeff Layton Signed-off-by: Al Viro --- fs/cifs/cifsfs.c | 6 ++---- fs/cifs/cifsproto.h | 2 +- fs/cifs/connect.c | 5 ++--- 3 files changed, 5 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 61c7aa870f1c..2af14d4577a7 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -172,7 +172,7 @@ out_no_root: if (inode) iput(inode); - cifs_umount(sb, cifs_sb); + cifs_umount(cifs_sb); return rc; } @@ -189,9 +189,7 @@ cifs_put_super(struct super_block *sb) return; } - rc = cifs_umount(sb, cifs_sb); - if (rc) - cERROR(1, "cifs_umount failed with return code %d", rc); + cifs_umount(cifs_sb); } static void cifs_kill_sb(struct super_block *sb) diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h index 5814fe543f95..257f312ede42 100644 --- a/fs/cifs/cifsproto.h +++ b/fs/cifs/cifsproto.h @@ -158,7 +158,7 @@ extern void cifs_cleanup_volume_info(struct smb_vol **pvolume_info); extern int cifs_setup_volume_info(struct smb_vol **pvolume_info, char *mount_data, const char *devname); extern int cifs_mount(struct cifs_sb_info *, struct smb_vol *); -extern int cifs_umount(struct super_block *, struct cifs_sb_info *); +extern void cifs_umount(struct cifs_sb_info *); extern void cifs_dfs_release_automount_timer(void); void cifs_proc_init(void); void cifs_proc_clean(void); diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 9f09adf51edc..b2702226634e 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -3339,8 +3339,8 @@ CIFSTCon(unsigned int xid, struct cifs_ses *ses, return rc; } -int -cifs_umount(struct super_block *sb, struct cifs_sb_info *cifs_sb) +void +cifs_umount(struct cifs_sb_info *cifs_sb) { struct rb_root *root = &cifs_sb->tlink_tree; struct rb_node *node; @@ -3362,7 +3362,6 @@ cifs_umount(struct super_block *sb, struct cifs_sb_info *cifs_sb) spin_unlock(&cifs_sb->tlink_tree_lock); bdi_destroy(&cifs_sb->bdi); - return 0; } int cifs_negotiate_protocol(unsigned int xid, struct cifs_ses *ses) -- cgit v1.2.3 From 97d1152acec0647b72f8c6ecc57da0d6fed574de Mon Sep 17 00:00:00 2001 From: Al Viro Date: Fri, 17 Jun 2011 09:29:57 -0400 Subject: cifs: pull cifs_mount() call up ... to the point prior to sget(). Now we have cifs_sb set up early enough. Acked-by: Pavel Shilovsky Reviewed-by: Jeff Layton Signed-off-by: Al Viro --- fs/cifs/cifsfs.c | 27 +++++++++++++++------------ 1 file changed, 15 insertions(+), 12 deletions(-) (limited to 'fs') diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 2af14d4577a7..4004bc647a72 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -104,8 +104,7 @@ cifs_sb_deactive(struct super_block *sb) } static int -cifs_read_super(struct super_block *sb, struct smb_vol *volume_info, - const char *devname, int silent) +cifs_read_super(struct super_block *sb) { struct inode *inode; struct cifs_sb_info *cifs_sb; @@ -113,14 +112,6 @@ cifs_read_super(struct super_block *sb, struct smb_vol *volume_info, cifs_sb = CIFS_SB(sb); - rc = cifs_mount(cifs_sb, volume_info); - - if (rc) { - if (!silent) - cERROR(1, "cifs_mount failed w/return code = %d", rc); - return rc; - } - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIXACL) sb->s_flags |= MS_POSIXACL; @@ -692,6 +683,17 @@ cifs_do_mount(struct file_system_type *fs_type, cifs_setup_cifs_sb(volume_info, cifs_sb); + rc = cifs_mount(cifs_sb, volume_info); + if (rc) { + if (!(flags & MS_SILENT)) + cERROR(1, "cifs_mount failed w/return code = %d", rc); + root = ERR_PTR(rc); + unload_nls(volume_info->local_nls); + kfree(cifs_sb->mountdata); + kfree(cifs_sb); + goto out; + } + mnt_data.vol = volume_info; mnt_data.cifs_sb = cifs_sb; mnt_data.flags = flags; @@ -699,11 +701,13 @@ cifs_do_mount(struct file_system_type *fs_type, sb = sget(fs_type, cifs_match_super, set_anon_super, &mnt_data); if (IS_ERR(sb)) { root = ERR_CAST(sb); + cifs_umount(cifs_sb); goto out_cifs_sb; } if (sb->s_fs_info) { cFYI(1, "Use existing superblock"); + cifs_umount(cifs_sb); kfree(cifs_sb->mountdata); unload_nls(cifs_sb->local_nls); kfree(cifs_sb); @@ -715,8 +719,7 @@ cifs_do_mount(struct file_system_type *fs_type, sb->s_flags |= MS_NODIRATIME | MS_NOATIME; sb->s_fs_info = cifs_sb; - rc = cifs_read_super(sb, volume_info, dev_name, - flags & MS_SILENT ? 1 : 0); + rc = cifs_read_super(sb); if (rc) { root = ERR_PTR(rc); goto out_super; -- cgit v1.2.3 From 98ab494dd1d25388981114057cf9446250cc7dc7 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Fri, 17 Jun 2011 09:32:10 -0400 Subject: cifs: move cifs_umount() call into ->kill_sb() instead of calling it manually in case if cifs_read_super() fails to set ->s_root, just call it from ->kill_sb(). cifs_put_super() is gone now *and* we have cifs_sb shutdown and destruction done after the superblock is gone from ->s_instances. Acked-by: Pavel Shilovsky Reviewed-by: Jeff Layton Signed-off-by: Al Viro --- fs/cifs/cifsfs.c | 19 +------------------ 1 file changed, 1 insertion(+), 18 deletions(-) (limited to 'fs') diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 4004bc647a72..15de4561dbc5 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -163,30 +163,14 @@ out_no_root: if (inode) iput(inode); - cifs_umount(cifs_sb); return rc; } -static void -cifs_put_super(struct super_block *sb) -{ - int rc = 0; - struct cifs_sb_info *cifs_sb; - - cFYI(1, "In cifs_put_super"); - cifs_sb = CIFS_SB(sb); - if (cifs_sb == NULL) { - cFYI(1, "Empty cifs superblock info passed to unmount"); - return; - } - - cifs_umount(cifs_sb); -} - static void cifs_kill_sb(struct super_block *sb) { struct cifs_sb_info *cifs_sb = CIFS_SB(sb); kill_anon_super(sb); + cifs_umount(cifs_sb); kfree(cifs_sb->mountdata); unload_nls(cifs_sb->local_nls); kfree(cifs_sb); @@ -537,7 +521,6 @@ static int cifs_drop_inode(struct inode *inode) } static const struct super_operations cifs_super_ops = { - .put_super = cifs_put_super, .statfs = cifs_statfs, .alloc_inode = cifs_alloc_inode, .destroy_inode = cifs_destroy_inode, -- cgit v1.2.3 From d757d71bfc30669a500b72792067e8d1c5d401a5 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Fri, 17 Jun 2011 09:42:43 -0400 Subject: cifs: pull freeing mountdata/dropping nls/freeing cifs_sb into cifs_umount() all callers of cifs_umount() proceed to do the same thing; pull it into cifs_umount() itself. Acked-by: Pavel Shilovsky Reviewed-by: Jeff Layton Signed-off-by: Al Viro --- fs/cifs/cifsfs.c | 15 +-------------- fs/cifs/connect.c | 3 +++ 2 files changed, 4 insertions(+), 14 deletions(-) (limited to 'fs') diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 15de4561dbc5..46960b7ee433 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -171,9 +171,6 @@ static void cifs_kill_sb(struct super_block *sb) struct cifs_sb_info *cifs_sb = CIFS_SB(sb); kill_anon_super(sb); cifs_umount(cifs_sb); - kfree(cifs_sb->mountdata); - unload_nls(cifs_sb->local_nls); - kfree(cifs_sb); } static int @@ -685,15 +682,12 @@ cifs_do_mount(struct file_system_type *fs_type, if (IS_ERR(sb)) { root = ERR_CAST(sb); cifs_umount(cifs_sb); - goto out_cifs_sb; + goto out; } if (sb->s_fs_info) { cFYI(1, "Use existing superblock"); cifs_umount(cifs_sb); - kfree(cifs_sb->mountdata); - unload_nls(cifs_sb->local_nls); - kfree(cifs_sb); goto out_shared; } @@ -725,13 +719,6 @@ out_shared: out_super: deactivate_locked_super(sb); - goto out; - -out_cifs_sb: - kfree(cifs_sb->mountdata); - unload_nls(cifs_sb->local_nls); - kfree(cifs_sb); - out: cifs_cleanup_volume_info(&volume_info); return root; diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index b2702226634e..ca7fbe3d51a5 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -3362,6 +3362,9 @@ cifs_umount(struct cifs_sb_info *cifs_sb) spin_unlock(&cifs_sb->tlink_tree_lock); bdi_destroy(&cifs_sb->bdi); + kfree(cifs_sb->mountdata); + unload_nls(cifs_sb->local_nls); + kfree(cifs_sb); } int cifs_negotiate_protocol(unsigned int xid, struct cifs_ses *ses) -- cgit v1.2.3 From ee01a14d9ddcf3f832f9ceb837888501cb496e27 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Fri, 17 Jun 2011 09:47:23 -0400 Subject: cifs: close sget() races have ->s_fs_info set by the set() callback passed to sget() Acked-by: Pavel Shilovsky Reviewed-by: Jeff Layton Signed-off-by: Al Viro --- fs/cifs/cifsfs.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 46960b7ee433..ba2b2da360d3 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -629,6 +629,13 @@ out: return dparent; } +static int cifs_set_super(struct super_block *sb, void *data) +{ + struct cifs_mnt_data *mnt_data = data; + sb->s_fs_info = mnt_data->cifs_sb; + return set_anon_super(sb, NULL); +} + static struct dentry * cifs_do_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) @@ -678,14 +685,14 @@ cifs_do_mount(struct file_system_type *fs_type, mnt_data.cifs_sb = cifs_sb; mnt_data.flags = flags; - sb = sget(fs_type, cifs_match_super, set_anon_super, &mnt_data); + sb = sget(fs_type, cifs_match_super, cifs_set_super, &mnt_data); if (IS_ERR(sb)) { root = ERR_CAST(sb); cifs_umount(cifs_sb); goto out; } - if (sb->s_fs_info) { + if (sb->s_root) { cFYI(1, "Use existing superblock"); cifs_umount(cifs_sb); goto out_shared; @@ -694,7 +701,6 @@ cifs_do_mount(struct file_system_type *fs_type, sb->s_flags = flags; /* BB should we make this contingent on mount parm? */ sb->s_flags |= MS_NODIRATIME | MS_NOATIME; - sb->s_fs_info = cifs_sb; rc = cifs_read_super(sb); if (rc) { -- cgit v1.2.3 From fa18f1bdce898f0efd0c8639c901d826d01be04f Mon Sep 17 00:00:00 2001 From: Al Viro Date: Fri, 17 Jun 2011 09:50:44 -0400 Subject: cifs: more breakage on mount failures if cifs_get_root() fails, we end up with ->mount() returning NULL, which is not what callers expect. Moreover, in case of superblock reuse we end up leaking a superblock reference... Acked-by: Pavel Shilovsky Reviewed-by: Jeff Layton Signed-off-by: Al Viro --- fs/cifs/cifsfs.c | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) (limited to 'fs') diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index ba2b2da360d3..234e9d08db76 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -710,19 +710,16 @@ cifs_do_mount(struct file_system_type *fs_type, sb->s_flags |= MS_ACTIVE; +out_shared: root = cifs_get_root(volume_info, sb); - if (root == NULL) + if (root == NULL) { + root = ERR_PTR(-EINVAL); /* XXX */ goto out_super; + } cFYI(1, "dentry root is: %p", root); goto out; -out_shared: - root = cifs_get_root(volume_info, sb); - if (root) - cFYI(1, "dentry root is: %p", root); - goto out; - out_super: deactivate_locked_super(sb); out: -- cgit v1.2.3 From 5c4f1ad7c6aa3b729bd3a93b80f9417d7e978c32 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Fri, 17 Jun 2011 09:56:55 -0400 Subject: cifs: tidy cifs_do_mount() up a bit Acked-by: Pavel Shilovsky Reviewed-by: Jeff Layton Signed-off-by: Al Viro --- fs/cifs/cifsfs.c | 44 ++++++++++++++++++++++---------------------- 1 file changed, 22 insertions(+), 22 deletions(-) (limited to 'fs') diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 234e9d08db76..9a6696a5eb78 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -656,16 +656,13 @@ cifs_do_mount(struct file_system_type *fs_type, cifs_sb = kzalloc(sizeof(struct cifs_sb_info), GFP_KERNEL); if (cifs_sb == NULL) { root = ERR_PTR(-ENOMEM); - unload_nls(volume_info->local_nls); - goto out; + goto out_nls; } cifs_sb->mountdata = kstrndup(data, PAGE_SIZE, GFP_KERNEL); if (cifs_sb->mountdata == NULL) { root = ERR_PTR(-ENOMEM); - unload_nls(volume_info->local_nls); - kfree(cifs_sb); - goto out; + goto out_cifs_sb; } cifs_setup_cifs_sb(volume_info, cifs_sb); @@ -675,10 +672,7 @@ cifs_do_mount(struct file_system_type *fs_type, if (!(flags & MS_SILENT)) cERROR(1, "cifs_mount failed w/return code = %d", rc); root = ERR_PTR(rc); - unload_nls(volume_info->local_nls); - kfree(cifs_sb->mountdata); - kfree(cifs_sb); - goto out; + goto out_mountdata; } mnt_data.vol = volume_info; @@ -695,22 +689,20 @@ cifs_do_mount(struct file_system_type *fs_type, if (sb->s_root) { cFYI(1, "Use existing superblock"); cifs_umount(cifs_sb); - goto out_shared; - } - - sb->s_flags = flags; - /* BB should we make this contingent on mount parm? */ - sb->s_flags |= MS_NODIRATIME | MS_NOATIME; + } else { + sb->s_flags = flags; + /* BB should we make this contingent on mount parm? */ + sb->s_flags |= MS_NODIRATIME | MS_NOATIME; + + rc = cifs_read_super(sb); + if (rc) { + root = ERR_PTR(rc); + goto out_super; + } - rc = cifs_read_super(sb); - if (rc) { - root = ERR_PTR(rc); - goto out_super; + sb->s_flags |= MS_ACTIVE; } - sb->s_flags |= MS_ACTIVE; - -out_shared: root = cifs_get_root(volume_info, sb); if (root == NULL) { root = ERR_PTR(-EINVAL); /* XXX */ @@ -725,6 +717,14 @@ out_super: out: cifs_cleanup_volume_info(&volume_info); return root; + +out_mountdata: + kfree(cifs_sb->mountdata); +out_cifs_sb: + kfree(cifs_sb); +out_nls: + unload_nls(volume_info->local_nls); + goto out; } static ssize_t cifs_file_aio_write(struct kiocb *iocb, const struct iovec *iov, -- cgit v1.2.3 From 9403c9c598e91d473c0582066e47ed2289292e45 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Fri, 17 Jun 2011 10:02:59 -0400 Subject: cifs: propagate errors from cifs_get_root() to mount(2) ... instead of just failing with -EINVAL Acked-by: Pavel Shilovsky Reviewed-by: Jeff Layton Signed-off-by: Al Viro --- fs/cifs/cifsfs.c | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) (limited to 'fs') diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 9a6696a5eb78..35f9154615fa 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -554,7 +554,7 @@ cifs_get_root(struct smb_vol *vol, struct super_block *sb) full_path = cifs_build_path_to_root(vol, cifs_sb, cifs_sb_master_tcon(cifs_sb)); if (full_path == NULL) - return NULL; + return ERR_PTR(-ENOMEM); cFYI(1, "Get root dentry for %s", full_path); @@ -583,7 +583,7 @@ cifs_get_root(struct smb_vol *vol, struct super_block *sb) dchild = d_alloc(dparent, &name); if (dchild == NULL) { dput(dparent); - dparent = NULL; + dparent = ERR_PTR(-ENOMEM); goto out; } } @@ -601,7 +601,7 @@ cifs_get_root(struct smb_vol *vol, struct super_block *sb) if (rc) { dput(dchild); dput(dparent); - dparent = NULL; + dparent = ERR_PTR(rc); goto out; } alias = d_materialise_unique(dchild, inode); @@ -609,7 +609,7 @@ cifs_get_root(struct smb_vol *vol, struct super_block *sb) dput(dchild); if (IS_ERR(alias)) { dput(dparent); - dparent = NULL; + dparent = ERR_PTR(-EINVAL); /* XXX */ goto out; } dchild = alias; @@ -704,10 +704,8 @@ cifs_do_mount(struct file_system_type *fs_type, } root = cifs_get_root(volume_info, sb); - if (root == NULL) { - root = ERR_PTR(-EINVAL); /* XXX */ + if (IS_ERR(root)) goto out_super; - } cFYI(1, "dentry root is: %p", root); goto out; -- cgit v1.2.3 From e0f5406727f1dfdc47b8ba4a0ff6eae4b0b5ed4c Mon Sep 17 00:00:00 2001 From: Ilya Dryomov Date: Sat, 18 Jun 2011 20:26:38 +0000 Subject: Btrfs: fix type mismatch in find_free_extent() data parameter should be u64 because a full-sized chunk flags field is passed instead of 0/1 for distinguishing data from metadata. All underlying functions expect u64. Signed-off-by: Ilya Dryomov Signed-off-by: Chris Mason --- fs/btrfs/extent-tree.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 1f61bf5b4960..71cd456fdb60 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -4842,7 +4842,7 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans, u64 num_bytes, u64 empty_size, u64 search_start, u64 search_end, u64 hint_byte, struct btrfs_key *ins, - int data) + u64 data) { int ret = 0; struct btrfs_root *root = orig_root->fs_info->extent_root; @@ -4869,7 +4869,7 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans, space_info = __find_space_info(root->fs_info, data); if (!space_info) { - printk(KERN_ERR "No space info for %d\n", data); + printk(KERN_ERR "No space info for %llu\n", data); return -ENOSPC; } -- cgit v1.2.3 From 9b90f5135320bc74dc6c9a8c74d69fd4821d9282 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Fri, 24 Jun 2011 16:02:51 +0000 Subject: Btrfs: make sure to update total_bitmaps when freeing cache V3 A user reported this bug again where we have more bitmaps than we are supposed to. This is because we failed to load the free space cache, but don't update the ctl->total_bitmaps counter when we remove entries from the tree. This patch fixes this problem and we should be good to go again. Thanks, Signed-off-by: Josef Bacik Signed-off-by: Chris Mason --- fs/btrfs/free-space-cache.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 9f985a429877..bf0d61567f3d 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -1893,9 +1893,12 @@ void __btrfs_remove_free_space_cache_locked(struct btrfs_free_space_ctl *ctl) while ((node = rb_last(&ctl->free_space_offset)) != NULL) { info = rb_entry(node, struct btrfs_free_space, offset_index); - unlink_free_space(ctl, info); - kfree(info->bitmap); - kmem_cache_free(btrfs_free_space_cachep, info); + if (!info->bitmap) { + unlink_free_space(ctl, info); + kmem_cache_free(btrfs_free_space_cachep, info); + } else { + free_bitmap(ctl, info); + } if (need_resched()) { spin_unlock(&ctl->tree_lock); cond_resched(); -- cgit v1.2.3 From 2f7e33d432d097a2a7f467b031bf18be91cb3d49 Mon Sep 17 00:00:00 2001 From: Miao Xie Date: Thu, 23 Jun 2011 07:27:13 +0000 Subject: btrfs: fix inconsonant inode information When iputting the inode, We may leave the delayed nodes if they have some delayed items that have not been dealt with. So when the inode is read again, we must look up the relative delayed node, and use the information in it to initialize the inode. Or we will get inconsonant inode information, it may cause that the same directory index number is allocated again, and hit the following oops: [ 5447.554187] err add delayed dir index item(name: pglog_0.965_0) into the insertion tree of the delayed node(root id: 262, inode id: 258, errno: -17) [ 5447.569766] ------------[ cut here ]------------ [ 5447.575361] kernel BUG at fs/btrfs/delayed-inode.c:1301! [SNIP] [ 5447.790721] Call Trace: [ 5447.793191] [] btrfs_insert_dir_item+0x189/0x1bb [btrfs] [ 5447.800156] [] btrfs_add_link+0x12b/0x191 [btrfs] [ 5447.806517] [] btrfs_add_nondir+0x31/0x58 [btrfs] [ 5447.812876] [] btrfs_create+0xf9/0x197 [btrfs] [ 5447.818961] [] vfs_create+0x72/0x92 [ 5447.824090] [] do_last+0x22c/0x40b [ 5447.829133] [] path_openat+0xc0/0x2ef [ 5447.834438] [] ? __perf_event_task_sched_out+0x24/0x44 [ 5447.841216] [] ? perf_event_task_sched_out+0x59/0x67 [ 5447.847846] [] do_filp_open+0x3d/0x87 [ 5447.853156] [] ? strncpy_from_user+0x43/0x4d [ 5447.859072] [] ? getname_flags+0x2e/0x80 [ 5447.864636] [] ? do_getname+0x14b/0x173 [ 5447.870112] [] ? audit_getname+0x16/0x26 [ 5447.875682] [] ? spin_lock+0xe/0x10 [ 5447.880882] [] do_sys_open+0x69/0xae [ 5447.886153] [] sys_open+0x20/0x22 [ 5447.891114] [] system_call_fastpath+0x16/0x1b Fix it by reusing the old delayed node. Reported-by: Jim Schutt Signed-off-by: Miao Xie Tested-by: Jim Schutt Signed-off-by: Chris Mason --- fs/btrfs/delayed-inode.c | 104 ++++++++++++++++++++++++++++++++++++----------- fs/btrfs/delayed-inode.h | 1 + fs/btrfs/inode.c | 12 +++++- 3 files changed, 91 insertions(+), 26 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c index f1cbd028f7b3..98c68e658a9b 100644 --- a/fs/btrfs/delayed-inode.c +++ b/fs/btrfs/delayed-inode.c @@ -82,19 +82,16 @@ static inline struct btrfs_delayed_root *btrfs_get_delayed_root( return root->fs_info->delayed_root; } -static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node( - struct inode *inode) +static struct btrfs_delayed_node *btrfs_get_delayed_node(struct inode *inode) { - struct btrfs_delayed_node *node; struct btrfs_inode *btrfs_inode = BTRFS_I(inode); struct btrfs_root *root = btrfs_inode->root; u64 ino = btrfs_ino(inode); - int ret; + struct btrfs_delayed_node *node; -again: node = ACCESS_ONCE(btrfs_inode->delayed_node); if (node) { - atomic_inc(&node->refs); /* can be accessed */ + atomic_inc(&node->refs); return node; } @@ -102,8 +99,10 @@ again: node = radix_tree_lookup(&root->delayed_nodes_tree, ino); if (node) { if (btrfs_inode->delayed_node) { + atomic_inc(&node->refs); /* can be accessed */ + BUG_ON(btrfs_inode->delayed_node != node); spin_unlock(&root->inode_lock); - goto again; + return node; } btrfs_inode->delayed_node = node; atomic_inc(&node->refs); /* can be accessed */ @@ -113,6 +112,23 @@ again: } spin_unlock(&root->inode_lock); + return NULL; +} + +static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node( + struct inode *inode) +{ + struct btrfs_delayed_node *node; + struct btrfs_inode *btrfs_inode = BTRFS_I(inode); + struct btrfs_root *root = btrfs_inode->root; + u64 ino = btrfs_ino(inode); + int ret; + +again: + node = btrfs_get_delayed_node(inode); + if (node) + return node; + node = kmem_cache_alloc(delayed_node_cache, GFP_NOFS); if (!node) return ERR_PTR(-ENOMEM); @@ -548,19 +564,6 @@ struct btrfs_delayed_item *__btrfs_next_delayed_item( return next; } -static inline struct btrfs_delayed_node *btrfs_get_delayed_node( - struct inode *inode) -{ - struct btrfs_inode *btrfs_inode = BTRFS_I(inode); - struct btrfs_delayed_node *delayed_node; - - delayed_node = btrfs_inode->delayed_node; - if (delayed_node) - atomic_inc(&delayed_node->refs); - - return delayed_node; -} - static inline struct btrfs_root *btrfs_get_fs_root(struct btrfs_root *root, u64 root_id) { @@ -1404,8 +1407,7 @@ end: int btrfs_inode_delayed_dir_index_count(struct inode *inode) { - struct btrfs_delayed_node *delayed_node = BTRFS_I(inode)->delayed_node; - int ret = 0; + struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode); if (!delayed_node) return -ENOENT; @@ -1415,11 +1417,14 @@ int btrfs_inode_delayed_dir_index_count(struct inode *inode) * a new directory index is added into the delayed node and index_cnt * is updated now. So we needn't lock the delayed node. */ - if (!delayed_node->index_cnt) + if (!delayed_node->index_cnt) { + btrfs_release_delayed_node(delayed_node); return -EINVAL; + } BTRFS_I(inode)->index_cnt = delayed_node->index_cnt; - return ret; + btrfs_release_delayed_node(delayed_node); + return 0; } void btrfs_get_delayed_items(struct inode *inode, struct list_head *ins_list, @@ -1613,6 +1618,57 @@ static void fill_stack_inode_item(struct btrfs_trans_handle *trans, inode->i_ctime.tv_nsec); } +int btrfs_fill_inode(struct inode *inode, u32 *rdev) +{ + struct btrfs_delayed_node *delayed_node; + struct btrfs_inode_item *inode_item; + struct btrfs_timespec *tspec; + + delayed_node = btrfs_get_delayed_node(inode); + if (!delayed_node) + return -ENOENT; + + mutex_lock(&delayed_node->mutex); + if (!delayed_node->inode_dirty) { + mutex_unlock(&delayed_node->mutex); + btrfs_release_delayed_node(delayed_node); + return -ENOENT; + } + + inode_item = &delayed_node->inode_item; + + inode->i_uid = btrfs_stack_inode_uid(inode_item); + inode->i_gid = btrfs_stack_inode_gid(inode_item); + btrfs_i_size_write(inode, btrfs_stack_inode_size(inode_item)); + inode->i_mode = btrfs_stack_inode_mode(inode_item); + inode->i_nlink = btrfs_stack_inode_nlink(inode_item); + inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item)); + BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item); + BTRFS_I(inode)->sequence = btrfs_stack_inode_sequence(inode_item); + inode->i_rdev = 0; + *rdev = btrfs_stack_inode_rdev(inode_item); + BTRFS_I(inode)->flags = btrfs_stack_inode_flags(inode_item); + + tspec = btrfs_inode_atime(inode_item); + inode->i_atime.tv_sec = btrfs_stack_timespec_sec(tspec); + inode->i_atime.tv_nsec = btrfs_stack_timespec_nsec(tspec); + + tspec = btrfs_inode_mtime(inode_item); + inode->i_mtime.tv_sec = btrfs_stack_timespec_sec(tspec); + inode->i_mtime.tv_nsec = btrfs_stack_timespec_nsec(tspec); + + tspec = btrfs_inode_ctime(inode_item); + inode->i_ctime.tv_sec = btrfs_stack_timespec_sec(tspec); + inode->i_ctime.tv_nsec = btrfs_stack_timespec_nsec(tspec); + + inode->i_generation = BTRFS_I(inode)->generation; + BTRFS_I(inode)->index_cnt = (u64)-1; + + mutex_unlock(&delayed_node->mutex); + btrfs_release_delayed_node(delayed_node); + return 0; +} + int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct inode *inode) { diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h index d1a6a2915c66..8d27af4bd8b9 100644 --- a/fs/btrfs/delayed-inode.h +++ b/fs/btrfs/delayed-inode.h @@ -119,6 +119,7 @@ void btrfs_kill_delayed_inode_items(struct inode *inode); int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct inode *inode); +int btrfs_fill_inode(struct inode *inode, u32 *rdev); /* Used for drop dead root */ void btrfs_kill_all_delayed_nodes(struct btrfs_root *root); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 87f1e0cf26f8..447612d3a16a 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -2509,6 +2509,11 @@ static void btrfs_read_locked_inode(struct inode *inode) int maybe_acls; u32 rdev; int ret; + bool filled = false; + + ret = btrfs_fill_inode(inode, &rdev); + if (!ret) + filled = true; path = btrfs_alloc_path(); BUG_ON(!path); @@ -2520,6 +2525,10 @@ static void btrfs_read_locked_inode(struct inode *inode) goto make_bad; leaf = path->nodes[0]; + + if (filled) + goto cache_acl; + inode_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_inode_item); if (!leaf->map_token) @@ -2556,7 +2565,7 @@ static void btrfs_read_locked_inode(struct inode *inode) BTRFS_I(inode)->index_cnt = (u64)-1; BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item); - +cache_acl: /* * try to precache a NULL acl entry for files that don't have * any xattrs or acls @@ -2572,7 +2581,6 @@ static void btrfs_read_locked_inode(struct inode *inode) } btrfs_free_path(path); - inode_item = NULL; switch (inode->i_mode & S_IFMT) { case S_IFREG: -- cgit v1.2.3 From 087806b1281563e4ae7a5bce3155f894af5f4118 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Wed, 22 Jun 2011 23:10:26 +0200 Subject: redefine thread_group_leader() as exit_signal >= 0 Change de_thread() to set old_leader->exit_signal = -1. This is good for the consistency, it is no longer the leader and all sub-threads have exit_signal = -1 set by copy_process(CLONE_THREAD). And this allows us to micro-optimize thread_group_leader(), it can simply check exit_signal >= 0. This also makes sense because we should move ->group_leader from task_struct to signal_struct. Signed-off-by: Oleg Nesterov Reviewed-by: Tejun Heo --- fs/exec.c | 1 + include/linux/sched.h | 6 ++++-- 2 files changed, 5 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/exec.c b/fs/exec.c index 8dca45b0dae8..c3d517bfdd27 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -963,6 +963,7 @@ static int de_thread(struct task_struct *tsk) leader->group_leader = tsk; tsk->exit_signal = SIGCHLD; + leader->exit_signal = -1; BUG_ON(leader->exit_state != EXIT_ZOMBIE); leader->exit_state = EXIT_DEAD; diff --git a/include/linux/sched.h b/include/linux/sched.h index 39acee2c8929..b38ed51d5c64 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -2284,8 +2284,10 @@ static inline int get_nr_threads(struct task_struct *tsk) return tsk->signal->nr_threads; } -/* de_thread depends on thread_group_leader not being a pid based check */ -#define thread_group_leader(p) (p == p->group_leader) +static inline bool thread_group_leader(struct task_struct *p) +{ + return p->exit_signal >= 0; +} /* Do to the insanities of de_thread it is possible for a process * to have the pid of the thread group leader without actually being -- cgit v1.2.3 From 2b4b2482e70eba10dd98653a3a5ac68126565e24 Mon Sep 17 00:00:00 2001 From: Bob Liu Date: Mon, 27 Jun 2011 16:18:06 -0700 Subject: romfs: fix romfs_get_unmapped_area() argument check romfs_get_unmapped_area() checks argument `len' without considering PAGE_ALIGN which will cause do_mmap_pgoff() return -EINVAL error after commit f67d9b1576c ("nommu: add page_align to mmap"). Fix the check by changing it in same way ramfs_nommu_get_unmapped_area() was changed in ramfs/file-nommu.c. Signed-off-by: Bob Liu Cc: David Howells Cc: Paul Mundt Acked-by: Greg Ungerer Cc: Geert Uytterhoeven Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/romfs/mmap-nommu.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/romfs/mmap-nommu.c b/fs/romfs/mmap-nommu.c index f0511e816967..eed99428f104 100644 --- a/fs/romfs/mmap-nommu.c +++ b/fs/romfs/mmap-nommu.c @@ -27,14 +27,18 @@ static unsigned long romfs_get_unmapped_area(struct file *file, { struct inode *inode = file->f_mapping->host; struct mtd_info *mtd = inode->i_sb->s_mtd; - unsigned long isize, offset; + unsigned long isize, offset, maxpages, lpages; if (!mtd) goto cant_map_directly; + /* the mapping mustn't extend beyond the EOF */ + lpages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; isize = i_size_read(inode); offset = pgoff << PAGE_SHIFT; - if (offset > isize || len > isize || offset > isize - len) + + maxpages = (isize + PAGE_SIZE - 1) >> PAGE_SHIFT; + if ((pgoff >= maxpages) || (maxpages - pgoff < lpages)) return (unsigned long) -EINVAL; /* we need to call down to the MTD layer to do the actual mapping */ -- cgit v1.2.3 From 08142579b6ca35883c1ed066a2681de6f6917062 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Mon, 27 Jun 2011 16:18:10 -0700 Subject: mm: fix assertion mapping->nrpages == 0 in end_writeback() Under heavy memory and filesystem load, users observe the assertion mapping->nrpages == 0 in end_writeback() trigger. This can be caused by page reclaim reclaiming the last page from a mapping in the following race: CPU0 CPU1 ... shrink_page_list() __remove_mapping() __delete_from_page_cache() radix_tree_delete() evict_inode() truncate_inode_pages() truncate_inode_pages_range() pagevec_lookup() - finds nothing end_writeback() mapping->nrpages != 0 -> BUG page->mapping = NULL mapping->nrpages-- Fix the problem by doing a reliable check of mapping->nrpages under mapping->tree_lock in end_writeback(). Analyzed by Jay , lost in LKML, and dug out by Miklos Szeredi . Cc: Jay Cc: Miklos Szeredi Signed-off-by: Jan Kara Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/inode.c | 7 +++++++ include/linux/fs.h | 1 + mm/truncate.c | 5 +++++ 3 files changed, 13 insertions(+) (limited to 'fs') diff --git a/fs/inode.c b/fs/inode.c index 0f7e88a7803f..43566d17d1b8 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -423,7 +423,14 @@ EXPORT_SYMBOL(remove_inode_hash); void end_writeback(struct inode *inode) { might_sleep(); + /* + * We have to cycle tree_lock here because reclaim can be still in the + * process of removing the last page (in __delete_from_page_cache()) + * and we must not free mapping under it. + */ + spin_lock_irq(&inode->i_data.tree_lock); BUG_ON(inode->i_data.nrpages); + spin_unlock_irq(&inode->i_data.tree_lock); BUG_ON(!list_empty(&inode->i_data.private_list)); BUG_ON(!(inode->i_state & I_FREEING)); BUG_ON(inode->i_state & I_CLEAR); diff --git a/include/linux/fs.h b/include/linux/fs.h index 6e73e2e9ae33..b5b979247863 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -639,6 +639,7 @@ struct address_space { struct prio_tree_root i_mmap; /* tree of private and shared mappings */ struct list_head i_mmap_nonlinear;/*list VM_NONLINEAR mappings */ struct mutex i_mmap_mutex; /* protect tree, count, list */ + /* Protected by tree_lock together with the radix tree */ unsigned long nrpages; /* number of total pages */ pgoff_t writeback_index;/* writeback starts here */ const struct address_space_operations *a_ops; /* methods */ diff --git a/mm/truncate.c b/mm/truncate.c index 29a9b8a5a31a..e13f22efaad7 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -304,6 +304,11 @@ EXPORT_SYMBOL(truncate_inode_pages_range); * @lstart: offset from which to truncate * * Called under (and serialised by) inode->i_mutex. + * + * Note: When this function returns, there can be a page in the process of + * deletion (inside __delete_from_page_cache()) in the specified range. Thus + * mapping->nrpages can be non-zero when this function returns even after + * truncation of the whole mapping. */ void truncate_inode_pages(struct address_space *mapping, loff_t lstart) { -- cgit v1.2.3 From 1d1221f375c94ef961ba8574ac4f85c8870ddd51 Mon Sep 17 00:00:00 2001 From: Vasiliy Kulikov Date: Fri, 24 Jun 2011 16:08:38 +0400 Subject: proc: restrict access to /proc/PID/io /proc/PID/io may be used for gathering private information. E.g. for openssh and vsftpd daemons wchars/rchars may be used to learn the precise password length. Restrict it to processes being able to ptrace the target process. ptrace_may_access() is needed to prevent keeping open file descriptor of "io" file, executing setuid binary and gathering io information of the setuid'ed process. Signed-off-by: Vasiliy Kulikov Signed-off-by: Linus Torvalds --- fs/proc/base.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/proc/base.c b/fs/proc/base.c index 8a84210ca080..fc5bc2767692 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -2708,6 +2708,9 @@ static int do_io_accounting(struct task_struct *task, char *buffer, int whole) struct task_io_accounting acct = task->ioac; unsigned long flags; + if (!ptrace_may_access(task, PTRACE_MODE_READ)) + return -EACCES; + if (whole && lock_task_sighand(task, &flags)) { struct task_struct *t = task; @@ -2839,7 +2842,7 @@ static const struct pid_entry tgid_base_stuff[] = { REG("coredump_filter", S_IRUGO|S_IWUSR, proc_coredump_filter_operations), #endif #ifdef CONFIG_TASK_IO_ACCOUNTING - INF("io", S_IRUGO, proc_tgid_io_accounting), + INF("io", S_IRUSR, proc_tgid_io_accounting), #endif #ifdef CONFIG_HARDWALL INF("hardwall", S_IRUGO, proc_pid_hardwall), @@ -3181,7 +3184,7 @@ static const struct pid_entry tid_base_stuff[] = { REG("make-it-fail", S_IRUGO|S_IWUSR, proc_fault_inject_operations), #endif #ifdef CONFIG_TASK_IO_ACCOUNTING - INF("io", S_IRUGO, proc_tid_io_accounting), + INF("io", S_IRUSR, proc_tid_io_accounting), #endif #ifdef CONFIG_HARDWALL INF("hardwall", S_IRUGO, proc_pid_hardwall), -- cgit v1.2.3 From 2bea038c52e88badbbd5b420c1de918f7f2579e9 Mon Sep 17 00:00:00 2001 From: Boaz Harrosh Date: Thu, 16 Jun 2011 11:35:46 -0400 Subject: pnfs: write: Set mds_offset in the generic layer - it is needed by all LDs In current pnfs tree, all the layouts set mds_offset in their .write_pagelist member. mds_offset is only used by generic layer and should be handled by it. This patch is for upstream. It is needed in this -rc series to fix a bug in objects layout_commit. I'll send patches for objects and blocks to be squashed into current pnfs tree. TODO: It looks like the read path needs the same patch. Signed-off-by: Boaz Harrosh Signed-off-by: Trond Myklebust --- fs/nfs/nfs4filelayout.c | 1 - fs/nfs/write.c | 2 ++ 2 files changed, 2 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/nfs/nfs4filelayout.c b/fs/nfs/nfs4filelayout.c index 0bafcc91c27f..f9d03abcd04c 100644 --- a/fs/nfs/nfs4filelayout.c +++ b/fs/nfs/nfs4filelayout.c @@ -398,7 +398,6 @@ filelayout_write_pagelist(struct nfs_write_data *data, int sync) * this offset and save the original offset. */ data->args.offset = filelayout_get_dserver_offset(lseg, offset); - data->mds_offset = offset; /* Perform an asynchronous write */ status = nfs_initiate_write(data, ds->ds_clp->cl_rpcclient, diff --git a/fs/nfs/write.c b/fs/nfs/write.c index e268e3b23497..727168059684 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -864,6 +864,8 @@ static int nfs_write_rpcsetup(struct nfs_page *req, data->args.fh = NFS_FH(inode); data->args.offset = req_offset(req) + offset; + /* pnfs_set_layoutcommit needs this */ + data->mds_offset = data->args.offset; data->args.pgbase = req->wb_pgbase + offset; data->args.pages = data->pagevec; data->args.count = count; -- cgit v1.2.3 From 50176ddefa4a942419cb693dd2d8345bfdcde67c Mon Sep 17 00:00:00 2001 From: Seth Forshee Date: Tue, 31 May 2011 16:35:50 -0500 Subject: hfsplus: add missing call to bio_put() hfsplus leaks bio objects by failing to call bio_put() on the bios it allocates. Add the missing call to fix the leak. Signed-off-by: Seth Forshee Cc: # .38.x, .39.x Signed-off-by: Christoph Hellwig --- fs/hfsplus/wrapper.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/hfsplus/wrapper.c b/fs/hfsplus/wrapper.c index 3031d81f5f0f..4ac88ff79aa6 100644 --- a/fs/hfsplus/wrapper.c +++ b/fs/hfsplus/wrapper.c @@ -36,6 +36,7 @@ int hfsplus_submit_bio(struct block_device *bdev, sector_t sector, { DECLARE_COMPLETION_ONSTACK(wait); struct bio *bio; + int ret = 0; bio = bio_alloc(GFP_NOIO, 1); bio->bi_sector = sector; @@ -54,8 +55,10 @@ int hfsplus_submit_bio(struct block_device *bdev, sector_t sector, wait_for_completion(&wait); if (!bio_flagged(bio, BIO_UPTODATE)) - return -EIO; - return 0; + ret = -EIO; + + bio_put(bio); + return ret; } static int hfsplus_read_mdb(void *bufptr, struct hfsplus_wd *wd) -- cgit v1.2.3 From 032016a56a1e9c83646435b32e4416d499e1f1ce Mon Sep 17 00:00:00 2001 From: Alexey Khoroshilov Date: Fri, 24 Jun 2011 01:15:02 +0400 Subject: hfsplus: Fix double iput of the same inode in hfsplus_fill_super() There is a misprint in resource deallocation code on error path in hfsplus_fill_super(): the sbi->alloc_file inode is iput twice, while the root inode in not iput at all. Found by Linux Driver Verification project (linuxtesting.org). Signed-off-by: Alexey Khoroshilov Signed-off-by: Christoph Hellwig --- fs/hfsplus/super.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c index b49b55584c84..84a47b709f51 100644 --- a/fs/hfsplus/super.c +++ b/fs/hfsplus/super.c @@ -500,7 +500,7 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent) out_put_hidden_dir: iput(sbi->hidden_dir); out_put_root: - iput(sbi->alloc_file); + iput(root); out_put_alloc_file: iput(sbi->alloc_file); out_close_cat_tree: -- cgit v1.2.3 From 2b4f9ca8a575ce6d7ddb59d668e2be250bf86a8f Mon Sep 17 00:00:00 2001 From: Anton Salikhmetov Date: Thu, 16 Dec 2010 18:08:42 +0200 Subject: hfsplus: assignments inside `if' condition clean-up Make assignments outside `if' conditions for unicode.c module where the checkpatch.pl script reported this coding style error. Signed-off-by: Anton Salikhmetov Signed-off-by: Christoph Hellwig --- fs/hfsplus/unicode.c | 35 +++++++++++++++++++++++++++-------- 1 file changed, 27 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/hfsplus/unicode.c b/fs/hfsplus/unicode.c index a3f0bfcc881e..a32998f29f0b 100644 --- a/fs/hfsplus/unicode.c +++ b/fs/hfsplus/unicode.c @@ -142,7 +142,11 @@ int hfsplus_uni2asc(struct super_block *sb, /* search for single decomposed char */ if (likely(compose)) ce1 = hfsplus_compose_lookup(hfsplus_compose_table, c0); - if (ce1 && (cc = ce1[0])) { + if (ce1) + cc = ce1[0]; + else + cc = 0; + if (cc) { /* start of a possibly decomposed Hangul char */ if (cc != 0xffff) goto done; @@ -209,7 +213,8 @@ int hfsplus_uni2asc(struct super_block *sb, i++; ce2 = ce1; } - if ((cc = ce2[0])) { + cc = ce2[0]; + if (cc) { ip += i; ustrlen -= i; goto done; @@ -301,7 +306,11 @@ int hfsplus_asc2uni(struct super_block *sb, struct hfsplus_unistr *ustr, while (outlen < HFSPLUS_MAX_STRLEN && len > 0) { size = asc2unichar(sb, astr, len, &c); - if (decompose && (dstr = decompose_unichar(c, &dsize))) { + if (decompose) + dstr = decompose_unichar(c, &dsize); + else + dstr = NULL; + if (dstr) { if (outlen + dsize > HFSPLUS_MAX_STRLEN) break; do { @@ -346,15 +355,23 @@ int hfsplus_hash_dentry(const struct dentry *dentry, const struct inode *inode, astr += size; len -= size; - if (decompose && (dstr = decompose_unichar(c, &dsize))) { + if (decompose) + dstr = decompose_unichar(c, &dsize); + else + dstr = NULL; + if (dstr) { do { c2 = *dstr++; - if (!casefold || (c2 = case_fold(c2))) + if (casefold) + c2 = case_fold(c2); + if (!casefold || c2) hash = partial_name_hash(c2, hash); } while (--dsize > 0); } else { c2 = c; - if (!casefold || (c2 = case_fold(c2))) + if (casefold) + c2 = case_fold(c2); + if (!casefold || c2) hash = partial_name_hash(c2, hash); } } @@ -422,12 +439,14 @@ int hfsplus_compare_dentry(const struct dentry *parent, c1 = *dstr1; c2 = *dstr2; if (casefold) { - if (!(c1 = case_fold(c1))) { + c1 = case_fold(c1); + if (!c1) { dstr1++; dsize1--; continue; } - if (!(c2 = case_fold(c2))) { + c2 = case_fold(c2); + if (!c2) { dstr2++; dsize2--; continue; -- cgit v1.2.3 From bf1a1b31fa3ea24e3a90821d69a5c3da066f7d6c Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 16 Feb 2011 09:34:17 +0100 Subject: hfsplus: fix overflow in hfsplus_get_block For filesystems larger than 2TB the final sector number passed to map_bh might overflow the range representable in a 32-bit data type. Make sure we use a sector_t for it and the arithmetics calculating it. Signed-off-by: Christoph Hellwig --- fs/hfsplus/extents.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/hfsplus/extents.c b/fs/hfsplus/extents.c index b1991a2a08e0..b9c1a4b5ba89 100644 --- a/fs/hfsplus/extents.c +++ b/fs/hfsplus/extents.c @@ -209,6 +209,7 @@ int hfsplus_get_block(struct inode *inode, sector_t iblock, struct hfsplus_inode_info *hip = HFSPLUS_I(inode); int res = -EIO; u32 ablock, dblock, mask; + sector_t sector; int was_dirty = 0; int shift; @@ -255,10 +256,12 @@ int hfsplus_get_block(struct inode *inode, sector_t iblock, done: dprint(DBG_EXTENT, "get_block(%lu): %llu - %u\n", inode->i_ino, (long long)iblock, dblock); + mask = (1 << sbi->fs_shift) - 1; - map_bh(bh_result, sb, - (dblock << sbi->fs_shift) + sbi->blockoffset + - (iblock & mask)); + sector = ((sector_t)dblock << sbi->fs_shift) + + sbi->blockoffset + (iblock & mask); + map_bh(bh_result, sb, sector); + if (create) { set_buffer_new(bh_result); hip->phys_size += sb->s_blocksize; -- cgit v1.2.3 From 4ba2d5fdcfd19de0dedf394ddc48db2f219fa89a Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 16 Feb 2011 09:34:22 +0100 Subject: hfsplus: fix overflow in hfsplus_read_wrapper For partitions larger than 2TB or at such an offset the hfs wrapper code in hfsplus might overflow the range representable in a 32-bit data type. Make sure we use a sector_t for the arithmetics leading to it. I'm not sure this code can be readed at all as hfs itself never supported such large volumes. Signed-off-by: Christoph Hellwig --- fs/hfsplus/wrapper.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/hfsplus/wrapper.c b/fs/hfsplus/wrapper.c index 4ac88ff79aa6..4b86468125c1 100644 --- a/fs/hfsplus/wrapper.c +++ b/fs/hfsplus/wrapper.c @@ -172,8 +172,9 @@ reread: if (!hfsplus_read_mdb(sbi->s_vhdr, &wd)) goto out_free_backup_vhdr; wd.ablk_size >>= HFSPLUS_SECTOR_SHIFT; - part_start += wd.ablk_start + wd.embed_start * wd.ablk_size; - part_size = wd.embed_count * wd.ablk_size; + part_start += (sector_t)wd.ablk_start + + (sector_t)wd.embed_start * wd.ablk_size; + part_size = (sector_t)wd.embed_count * wd.ablk_size; goto reread; default: /* -- cgit v1.2.3 From c6d5f5fa658f2569a7baaff5acda261a1316cee9 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 16 Feb 2011 09:34:26 +0100 Subject: hfsplus: lift the 2TB size limit Replace the hardcoded 2TB limit with a dynamic limit based on the block size now that we have fixed the few overflows preventing operation with large volumes. Signed-off-by: Christoph Hellwig --- fs/hfsplus/super.c | 9 +++++++++ fs/hfsplus/wrapper.c | 4 ---- 2 files changed, 9 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c index 84a47b709f51..acaef57e3606 100644 --- a/fs/hfsplus/super.c +++ b/fs/hfsplus/super.c @@ -393,6 +393,13 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent) if (!sbi->rsrc_clump_blocks) sbi->rsrc_clump_blocks = 1; + err = generic_check_addressable(sbi->alloc_blksz_shift, + sbi->total_blocks); + if (err) { + printk(KERN_ERR "hfs: filesystem size too large.\n"); + goto out_free_vhdr; + } + /* Set up operations so we can load metadata */ sb->s_op = &hfsplus_sops; sb->s_maxbytes = MAX_LFS_FILESIZE; @@ -417,6 +424,8 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent) sb->s_flags |= MS_RDONLY; } + err = -EINVAL; + /* Load metadata objects (B*Trees) */ sbi->ext_tree = hfs_btree_open(sb, HFSPLUS_EXT_CNID); if (!sbi->ext_tree) { diff --git a/fs/hfsplus/wrapper.c b/fs/hfsplus/wrapper.c index 4b86468125c1..2f933e83f5c5 100644 --- a/fs/hfsplus/wrapper.c +++ b/fs/hfsplus/wrapper.c @@ -141,10 +141,6 @@ int hfsplus_read_wrapper(struct super_block *sb) if (hfsplus_get_last_session(sb, &part_start, &part_size)) goto out; - if ((u64)part_start + part_size > 0x100000000ULL) { - pr_err("hfs: volumes larger than 2TB are not supported yet\n"); - goto out; - } error = -ENOMEM; sbi->s_vhdr = kmalloc(HFSPLUS_SECTOR_SIZE, GFP_KERNEL); -- cgit v1.2.3 From 55b3286d3dfd74a1daa5c902cb76575ba0187388 Mon Sep 17 00:00:00 2001 From: Masatake YAMATO Date: Thu, 30 Jun 2011 21:37:10 +0900 Subject: dlm: show addresses in configfs Display all addresses the dlm is using for the local node from the configfs file config/dlm//comms//addr_list Also make the addr file write only. Signed-off-by: Masatake YAMATO Signed-off-by: David Teigland --- fs/dlm/config.c | 59 +++++++++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 57 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/dlm/config.c b/fs/dlm/config.c index 9b026ea8baa9..ad3b5a8535d0 100644 --- a/fs/dlm/config.c +++ b/fs/dlm/config.c @@ -28,7 +28,8 @@ * /config/dlm//spaces//nodes//weight * /config/dlm//comms//nodeid * /config/dlm//comms//local - * /config/dlm//comms//addr + * /config/dlm//comms//addr (write only) + * /config/dlm//comms//addr_list (read only) * The level is useless, but I haven't figured out how to avoid it. */ @@ -80,6 +81,7 @@ static ssize_t comm_local_write(struct dlm_comm *cm, const char *buf, size_t len); static ssize_t comm_addr_write(struct dlm_comm *cm, const char *buf, size_t len); +static ssize_t comm_addr_list_read(struct dlm_comm *cm, char *buf); static ssize_t node_nodeid_read(struct dlm_node *nd, char *buf); static ssize_t node_nodeid_write(struct dlm_node *nd, const char *buf, size_t len); @@ -190,6 +192,7 @@ enum { COMM_ATTR_NODEID = 0, COMM_ATTR_LOCAL, COMM_ATTR_ADDR, + COMM_ATTR_ADDR_LIST, }; struct comm_attribute { @@ -217,14 +220,22 @@ static struct comm_attribute comm_attr_local = { static struct comm_attribute comm_attr_addr = { .attr = { .ca_owner = THIS_MODULE, .ca_name = "addr", - .ca_mode = S_IRUGO | S_IWUSR }, + .ca_mode = S_IWUSR }, .store = comm_addr_write, }; +static struct comm_attribute comm_attr_addr_list = { + .attr = { .ca_owner = THIS_MODULE, + .ca_name = "addr_list", + .ca_mode = S_IRUGO }, + .show = comm_addr_list_read, +}; + static struct configfs_attribute *comm_attrs[] = { [COMM_ATTR_NODEID] = &comm_attr_nodeid.attr, [COMM_ATTR_LOCAL] = &comm_attr_local.attr, [COMM_ATTR_ADDR] = &comm_attr_addr.attr, + [COMM_ATTR_ADDR_LIST] = &comm_attr_addr_list.attr, NULL, }; @@ -720,6 +731,50 @@ static ssize_t comm_addr_write(struct dlm_comm *cm, const char *buf, size_t len) return len; } +static ssize_t comm_addr_list_read(struct dlm_comm *cm, char *buf) +{ + ssize_t s; + ssize_t allowance; + int i; + struct sockaddr_storage *addr; + struct sockaddr_in *addr_in; + struct sockaddr_in6 *addr_in6; + + /* Taken from ip6_addr_string() defined in lib/vsprintf.c */ + char buf0[sizeof("AF_INET6 xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:255.255.255.255\n")]; + + + /* Derived from SIMPLE_ATTR_SIZE of fs/configfs/file.c */ + allowance = 4096; + buf[0] = '\0'; + + for (i = 0; i < cm->addr_count; i++) { + addr = cm->addr[i]; + + switch(addr->ss_family) { + case AF_INET: + addr_in = (struct sockaddr_in *)addr; + s = sprintf(buf0, "AF_INET %pI4\n", &addr_in->sin_addr.s_addr); + break; + case AF_INET6: + addr_in6 = (struct sockaddr_in6 *)addr; + s = sprintf(buf0, "AF_INET6 %pI6\n", &addr_in6->sin6_addr); + break; + default: + s = sprintf(buf0, "%s\n", ""); + break; + } + allowance -= s; + if (allowance >= 0) + strcat(buf, buf0); + else { + allowance += s; + break; + } + } + return 4096 - allowance; +} + static ssize_t show_node(struct config_item *i, struct configfs_attribute *a, char *buf) { -- cgit v1.2.3 From ee1b3ea9e6171d7a842527a44873f9f51e6f239b Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Tue, 21 Jun 2011 07:18:26 -0400 Subject: cifs: set socket send and receive timeouts before attempting connect Benjamin S. reported that he was unable to suspend his machine while it had a cifs share mounted. The freezer caused this to spew when he tried it: -----------------------[snip]------------------ PM: Syncing filesystems ... done. Freezing user space processes ... (elapsed 0.01 seconds) done. Freezing remaining freezable tasks ... Freezing of tasks failed after 20.01 seconds (1 tasks refusing to freeze, wq_busy=0): cifsd S ffff880127f7b1b0 0 1821 2 0x00800000 ffff880127f7b1b0 0000000000000046 ffff88005fe008a8 ffff8800ffffffff ffff880127cee6b0 0000000000011100 ffff880127737fd8 0000000000004000 ffff880127737fd8 0000000000011100 ffff880127f7b1b0 ffff880127736010 Call Trace: [] ? sk_reset_timer+0xf/0x19 [] ? tcp_connect+0x43c/0x445 [] ? tcp_v4_connect+0x40d/0x47f [] ? schedule_timeout+0x21/0x1ad [] ? _raw_spin_lock_bh+0x9/0x1f [] ? release_sock+0x19/0xef [] ? inet_stream_connect+0x14c/0x24a [] ? autoremove_wake_function+0x0/0x2a [] ? ipv4_connect+0x39c/0x3b5 [cifs] [] ? cifs_reconnect+0x1fc/0x28a [cifs] [] ? cifs_demultiplex_thread+0x397/0xb9f [cifs] [] ? perf_event_exit_task+0xb9/0x1bf [] ? cifs_demultiplex_thread+0x0/0xb9f [cifs] [] ? cifs_demultiplex_thread+0x0/0xb9f [cifs] [] ? kthread+0x7a/0x82 [] ? kernel_thread_helper+0x4/0x10 [] ? kthread+0x0/0x82 [] ? kernel_thread_helper+0x0/0x10 Restarting tasks ... done. -----------------------[snip]------------------ We do attempt to perform a try_to_freeze in cifs_reconnect, but the connection attempt itself seems to be taking longer than 20s to time out. The connect timeout is governed by the socket send and receive timeouts, so we can shorten that period by setting those timeouts before attempting the connect instead of after. Adam Williamson tested the patch and said that it seems to have fixed suspending on his laptop when a cifs share is mounted. Reported-by: Benjamin S Tested-by: Adam Williamson Signed-off-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/connect.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 7f540df52527..c8cb83ef6f6f 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -2474,14 +2474,6 @@ generic_ip_connect(struct TCP_Server_Info *server) if (rc < 0) return rc; - rc = socket->ops->connect(socket, saddr, slen, 0); - if (rc < 0) { - cFYI(1, "Error %d connecting to server", rc); - sock_release(socket); - server->ssocket = NULL; - return rc; - } - /* * Eventually check for other socket options to change from * the default. sock_setsockopt not used because it expects @@ -2510,6 +2502,14 @@ generic_ip_connect(struct TCP_Server_Info *server) socket->sk->sk_sndbuf, socket->sk->sk_rcvbuf, socket->sk->sk_rcvtimeo); + rc = socket->ops->connect(socket, saddr, slen, 0); + if (rc < 0) { + cFYI(1, "Error %d connecting to server", rc); + sock_release(socket); + server->ssocket = NULL; + return rc; + } + if (sport == htons(RFC1001_PORT)) rc = ip_rfc1001_connect(server); -- cgit v1.2.3 From bb188d7e64deb0e9cf13a99f44ae0065de5352d6 Mon Sep 17 00:00:00 2001 From: Denys Vlasenko Date: Wed, 29 Jun 2011 04:13:39 +0200 Subject: ptrace: make former thread ID available via PTRACE_GETEVENTMSG after PTRACE_EVENT_EXEC stop When multithreaded program execs under ptrace, all traced threads report WIFEXITED status, except for thread group leader and the thread which execs. Unless tracer tracks thread group relationship between tracees, which is a nontrivial task, it will not detect that execed thread no longer exists. This patch allows tracer to figure out which thread performed this exec, by requesting PTRACE_GETEVENTMSG in PTRACE_EVENT_EXEC stop. Another, samller problem which is solved by this patch is that tracer now can figure out which of the several concurrent execs in multithreaded program succeeded. Signed-off-by: Denys Vlasenko Signed-off-by: Oleg Nesterov --- fs/exec.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/exec.c b/fs/exec.c index c3d517bfdd27..b08367abf30e 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -1358,6 +1358,7 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs) unsigned int depth = bprm->recursion_depth; int try,retval; struct linux_binfmt *fmt; + pid_t old_pid; retval = security_bprm_check(bprm); if (retval) @@ -1371,6 +1372,11 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs) if (retval) return retval; + /* Need to fetch pid before load_binary changes it */ + rcu_read_lock(); + old_pid = task_pid_nr_ns(current, task_active_pid_ns(current->parent)); + rcu_read_unlock(); + retval = -ENOENT; for (try=0; try<2; try++) { read_lock(&binfmt_lock); @@ -1390,7 +1396,8 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs) bprm->recursion_depth = depth; if (retval >= 0) { if (depth == 0) - ptrace_event(PTRACE_EVENT_EXEC, 0); + ptrace_event(PTRACE_EVENT_EXEC, + old_pid); put_binfmt(fmt); allow_write_access(bprm->file); if (bprm->file) -- cgit v1.2.3 From c282af49901679b63c139b8330b0aba027aa3b56 Mon Sep 17 00:00:00 2001 From: "Bryn M. Reeves" Date: Fri, 1 Jul 2011 15:49:23 -0500 Subject: dlm: use vmalloc for hash tables Allocate dlm hash tables in the vmalloc area to allow a greater maximum size without restructuring of the hash table code. Signed-off-by: Bryn M. Reeves Signed-off-by: David Teigland --- fs/dlm/lockspace.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) (limited to 'fs') diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c index 14cbf4099753..493d1e7161a4 100644 --- a/fs/dlm/lockspace.c +++ b/fs/dlm/lockspace.c @@ -463,7 +463,7 @@ static int new_lockspace(const char *name, int namelen, void **lockspace, size = dlm_config.ci_rsbtbl_size; ls->ls_rsbtbl_size = size; - ls->ls_rsbtbl = kmalloc(sizeof(struct dlm_rsbtable) * size, GFP_NOFS); + ls->ls_rsbtbl = vmalloc(sizeof(struct dlm_rsbtable) * size); if (!ls->ls_rsbtbl) goto out_lsfree; for (i = 0; i < size; i++) { @@ -475,7 +475,7 @@ static int new_lockspace(const char *name, int namelen, void **lockspace, size = dlm_config.ci_lkbtbl_size; ls->ls_lkbtbl_size = size; - ls->ls_lkbtbl = kmalloc(sizeof(struct dlm_lkbtable) * size, GFP_NOFS); + ls->ls_lkbtbl = vmalloc(sizeof(struct dlm_lkbtable) * size); if (!ls->ls_lkbtbl) goto out_rsbfree; for (i = 0; i < size; i++) { @@ -487,7 +487,7 @@ static int new_lockspace(const char *name, int namelen, void **lockspace, size = dlm_config.ci_dirtbl_size; ls->ls_dirtbl_size = size; - ls->ls_dirtbl = kmalloc(sizeof(struct dlm_dirtable) * size, GFP_NOFS); + ls->ls_dirtbl = vmalloc(sizeof(struct dlm_dirtable) * size); if (!ls->ls_dirtbl) goto out_lkbfree; for (i = 0; i < size; i++) { @@ -603,11 +603,11 @@ static int new_lockspace(const char *name, int namelen, void **lockspace, spin_unlock(&lslist_lock); kfree(ls->ls_recover_buf); out_dirfree: - kfree(ls->ls_dirtbl); + vfree(ls->ls_dirtbl); out_lkbfree: - kfree(ls->ls_lkbtbl); + vfree(ls->ls_lkbtbl); out_rsbfree: - kfree(ls->ls_rsbtbl); + vfree(ls->ls_rsbtbl); out_lsfree: if (do_unreg) kobject_put(&ls->ls_kobj); @@ -721,7 +721,7 @@ static int release_lockspace(struct dlm_ls *ls, int force) */ dlm_dir_clear(ls); - kfree(ls->ls_dirtbl); + vfree(ls->ls_dirtbl); /* * Free all lkb's on lkbtbl[] lists. @@ -745,7 +745,7 @@ static int release_lockspace(struct dlm_ls *ls, int force) } dlm_astd_resume(); - kfree(ls->ls_lkbtbl); + vfree(ls->ls_lkbtbl); /* * Free all rsb's on rsbtbl[] lists @@ -770,7 +770,7 @@ static int release_lockspace(struct dlm_ls *ls, int force) } } - kfree(ls->ls_rsbtbl); + vfree(ls->ls_rsbtbl); /* * Free structures on any other lists -- cgit v1.2.3 From 549c999a768a7a144c60a0faa58f34c48f39112b Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Thu, 26 May 2011 16:57:14 +0300 Subject: UBIFS: return EROFS in case of broken commit If commit failed and it is in broken state, UBIFS switches to R/O mode. Most operations return -EROFS in this case, except of commit which returns -EINVAL. Make it return -EROFS too for consistency. This is also important for our power cut emulation testing. Signed-off-by: Artem Bityutskiy --- fs/ubifs/commit.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/commit.c b/fs/ubifs/commit.c index 87cd0ead8633..8ab03d12d5c0 100644 --- a/fs/ubifs/commit.c +++ b/fs/ubifs/commit.c @@ -418,7 +418,7 @@ int ubifs_run_commit(struct ubifs_info *c) spin_lock(&c->cs_lock); if (c->cmt_state == COMMIT_BROKEN) { - err = -EINVAL; + err = -EROFS; goto out; } @@ -444,7 +444,7 @@ int ubifs_run_commit(struct ubifs_info *c) * re-check it. */ if (c->cmt_state == COMMIT_BROKEN) { - err = -EINVAL; + err = -EROFS; goto out_cmt_unlock; } -- cgit v1.2.3 From ae380ce04731579f45f27b3a84d7d8d8ee1f9b1b Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Thu, 19 May 2011 14:13:16 +0300 Subject: UBIFS: lessen the size of debugging info data structure This patch lessens the 'struct ubifs_debug_info' size by 90 bytes by allocating less bytes for the debugfs root directory name. It introduces macros for the name patter an length instead of hard-coding 100 bytes. It also makes UBIFS use 'snprintf()' and teaches it to gracefully catch situations when the name array is too short. Additionally, this patch makes 2 unrelated changes - I just thought they do not deserve separate commits: simplifies 'ubifs_assert()' for non-debugging case and makes 'dbg_debugfs_init()' properly verify debugfs return code which may be an error code or NULL, so we should you 'IS_ERR_OR_NULL()' instead of 'IS_ERR()'. Signed-off-by: Artem Bityutskiy --- fs/ubifs/debug.c | 18 +++++++++++++----- fs/ubifs/debug.h | 11 +++++++++-- 2 files changed, 22 insertions(+), 7 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c index 0bb2bcef0de9..c9609a63512e 100644 --- a/fs/ubifs/debug.c +++ b/fs/ubifs/debug.c @@ -2815,8 +2815,8 @@ static struct dentry *dfs_rootdir; int dbg_debugfs_init(void) { dfs_rootdir = debugfs_create_dir("ubifs", NULL); - if (IS_ERR(dfs_rootdir)) { - int err = PTR_ERR(dfs_rootdir); + if (IS_ERR_OR_NULL(dfs_rootdir)) { + int err = dfs_rootdir ? PTR_ERR(dfs_rootdir) : -ENODEV; ubifs_err("cannot create \"ubifs\" debugfs directory, " "error %d\n", err); return err; @@ -2880,12 +2880,20 @@ static const struct file_operations dfs_fops = { */ int dbg_debugfs_init_fs(struct ubifs_info *c) { - int err; + int err, n; const char *fname; struct dentry *dent; struct ubifs_debug_info *d = c->dbg; - sprintf(d->dfs_dir_name, "ubi%d_%d", c->vi.ubi_num, c->vi.vol_id); + n = snprintf(d->dfs_dir_name, UBIFS_DFS_DIR_LEN + 1, UBIFS_DFS_DIR_NAME, + c->vi.ubi_num, c->vi.vol_id); + if (n == UBIFS_DFS_DIR_LEN) { + /* The array size is too small */ + fname = UBIFS_DFS_DIR_NAME; + dent = ERR_PTR(-EINVAL); + goto out; + } + fname = d->dfs_dir_name; dent = debugfs_create_dir(fname, dfs_rootdir); if (IS_ERR_OR_NULL(dent)) @@ -2916,7 +2924,7 @@ out_remove: debugfs_remove_recursive(d->dfs_dir); out: err = dent ? PTR_ERR(dent) : -ENODEV; - ubifs_err("cannot create \"%s\" debugfs directory, error %d\n", + ubifs_err("cannot create \"%s\" debugfs filr or directory, error %d\n", fname, err); return err; } diff --git a/fs/ubifs/debug.h b/fs/ubifs/debug.h index a811ac4a26bb..b59c43a4149c 100644 --- a/fs/ubifs/debug.h +++ b/fs/ubifs/debug.h @@ -33,6 +33,13 @@ typedef int (*dbg_znode_callback)(struct ubifs_info *c, #include +/* + * The UBIFS debugfs directory name pattern and maximum name length (3 for "ubi" + * + 1 for "_" and plus 2x2 for 2 UBI numbers and 1 for the trailing zero byte. + */ +#define UBIFS_DFS_DIR_NAME "ubi%d_%d" +#define UBIFS_DFS_DIR_LEN (3 + 1 + 2*2 + 1) + /** * ubifs_debug_info - per-FS debugging information. * @old_zroot: old index root - used by 'dbg_check_old_index()' @@ -84,7 +91,7 @@ struct ubifs_debug_info { long long saved_free; int saved_idx_gc_cnt; - char dfs_dir_name[100]; + char dfs_dir_name[UBIFS_DFS_DIR_LEN + 1]; struct dentry *dfs_dir; struct dentry *dfs_dump_lprops; struct dentry *dfs_dump_budg; @@ -313,7 +320,7 @@ void dbg_debugfs_exit_fs(struct ubifs_info *c); /* Use "if (0)" to make compiler check arguments even if debugging is off */ #define ubifs_assert(expr) do { \ - if (0 && (expr)) \ + if (0) \ printk(KERN_CRIT "UBIFS assert failed in %s at %u (pid %d)\n", \ __func__, __LINE__, current->pid); \ } while (0) -- cgit v1.2.3 From bfcf677decd8051c305b1d8fda407d069c2361e3 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Tue, 24 May 2011 14:10:03 +0300 Subject: UBIFS: dump stack when pnode or nnode reading fails When we fail to read a pnode or nnode - print stacktrace if debugging is enabled. Signed-off-by: Artem Bityutskiy --- fs/ubifs/lpt.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'fs') diff --git a/fs/ubifs/lpt.c b/fs/ubifs/lpt.c index ef5155e109a2..04713cd22679 100644 --- a/fs/ubifs/lpt.c +++ b/fs/ubifs/lpt.c @@ -1247,6 +1247,7 @@ int ubifs_read_nnode(struct ubifs_info *c, struct ubifs_nnode *parent, int iip) out: ubifs_err("error %d reading nnode at %d:%d", err, lnum, offs); + dbg_dump_stack(); kfree(nnode); return err; } @@ -1312,6 +1313,7 @@ static int read_pnode(struct ubifs_info *c, struct ubifs_nnode *parent, int iip) out: ubifs_err("error %d reading pnode at %d:%d", err, lnum, offs); dbg_dump_pnode(c, pnode, parent, iip); + dbg_dump_stack(); dbg_msg("calc num: %d", calc_pnode_num_from_parent(c, parent, iip)); kfree(pnode); return err; -- cgit v1.2.3 From 4315fb4072905e45da94d51e2c1e86fa41c5fc5f Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Wed, 25 May 2011 17:32:42 +0300 Subject: UBIFS: improve inode dumping function Teach 'dbg_dump_inode()' dump directory entries for directory inodes. This requires few additional changes: 1. The 'c' argument of 'dbg_dump_inode()' cannot be const any more. 2. Users of 'dbg_dump_inode()' should not have 'tnc_mutex' locked. Signed-off-by: Artem Bityutskiy --- fs/ubifs/debug.c | 56 +++++++++++++++++++++++++++++++++++++++++++++++++++++++- fs/ubifs/debug.h | 6 +++--- fs/ubifs/tnc.c | 3 ++- 3 files changed, 60 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c index c9609a63512e..4a2170dce0db 100644 --- a/fs/ubifs/debug.c +++ b/fs/ubifs/debug.c @@ -91,6 +91,28 @@ static const char *get_key_type(int type) } } +static const char *get_dent_type(int type) +{ + switch (type) { + case UBIFS_ITYPE_REG: + return "file"; + case UBIFS_ITYPE_DIR: + return "dir"; + case UBIFS_ITYPE_LNK: + return "symlink"; + case UBIFS_ITYPE_BLK: + return "blkdev"; + case UBIFS_ITYPE_CHR: + return "char dev"; + case UBIFS_ITYPE_FIFO: + return "fifo"; + case UBIFS_ITYPE_SOCK: + return "socket"; + default: + return "unknown/invalid type"; + } +} + static void sprintf_key(const struct ubifs_info *c, const union ubifs_key *key, char *buffer) { @@ -234,9 +256,13 @@ static void dump_ch(const struct ubifs_ch *ch) printk(KERN_DEBUG "\tlen %u\n", le32_to_cpu(ch->len)); } -void dbg_dump_inode(const struct ubifs_info *c, const struct inode *inode) +void dbg_dump_inode(struct ubifs_info *c, const struct inode *inode) { const struct ubifs_inode *ui = ubifs_inode(inode); + struct qstr nm = { .name = NULL }; + union ubifs_key key; + struct ubifs_dent_node *dent, *pdent = NULL; + int count = 2; printk(KERN_DEBUG "Dump in-memory inode:"); printk(KERN_DEBUG "\tinode %lu\n", inode->i_ino); @@ -270,6 +296,32 @@ void dbg_dump_inode(const struct ubifs_info *c, const struct inode *inode) printk(KERN_DEBUG "\tlast_page_read %lu\n", ui->last_page_read); printk(KERN_DEBUG "\tread_in_a_row %lu\n", ui->read_in_a_row); printk(KERN_DEBUG "\tdata_len %d\n", ui->data_len); + + if (!S_ISDIR(inode->i_mode)) + return; + + printk(KERN_DEBUG "List of directory entries:\n"); + ubifs_assert(!mutex_is_locked(&c->tnc_mutex)); + + lowest_dent_key(c, &key, inode->i_ino); + while (1) { + dent = ubifs_tnc_next_ent(c, &key, &nm); + if (IS_ERR(dent)) { + if (PTR_ERR(dent) != -ENOENT) + printk(KERN_DEBUG "error %ld\n", PTR_ERR(dent)); + break; + } + + printk(KERN_DEBUG "\t%d: %s (%s)\n", + count++, dent->name, get_dent_type(dent->type)); + + nm.name = dent->name; + nm.len = le16_to_cpu(dent->nlen); + kfree(pdent); + pdent = dent; + key_read(c, &dent->key, &key); + } + kfree(pdent); } void dbg_dump_node(const struct ubifs_info *c, const void *node) @@ -1167,12 +1219,14 @@ int dbg_check_dir_size(struct ubifs_info *c, const struct inode *dir) "but calculated size is %llu", dir->i_ino, (unsigned long long)i_size_read(dir), (unsigned long long)size); + dbg_dump_inode(c, dir); dump_stack(); return -EINVAL; } if (dir->i_nlink != nlink) { ubifs_err("directory inode %lu has nlink %u, but calculated " "nlink is %u", dir->i_ino, dir->i_nlink, nlink); + dbg_dump_inode(c, dir); dump_stack(); return -EINVAL; } diff --git a/fs/ubifs/debug.h b/fs/ubifs/debug.h index b59c43a4149c..c6ad9ea15e3c 100644 --- a/fs/ubifs/debug.h +++ b/fs/ubifs/debug.h @@ -214,7 +214,7 @@ const char *dbg_cstate(int cmt_state); const char *dbg_jhead(int jhead); const char *dbg_get_key_dump(const struct ubifs_info *c, const union ubifs_key *key); -void dbg_dump_inode(const struct ubifs_info *c, const struct inode *inode); +void dbg_dump_inode(struct ubifs_info *c, const struct inode *inode); void dbg_dump_node(const struct ubifs_info *c, const void *node); void dbg_dump_lpt_node(const struct ubifs_info *c, void *node, int lnum, int offs); @@ -364,7 +364,7 @@ static inline const char *dbg_jhead(int jhead) { return ""; } static inline const char * dbg_get_key_dump(const struct ubifs_info *c, const union ubifs_key *key) { return ""; } -static inline void dbg_dump_inode(const struct ubifs_info *c, +static inline void dbg_dump_inode(struct ubifs_info *c, const struct inode *inode) { return; } static inline void dbg_dump_node(const struct ubifs_info *c, const void *node) { return; } @@ -418,7 +418,7 @@ static inline int dbg_chk_lpt_sz(struct ubifs_info *c, int action, int len) { return 0; } static inline int dbg_check_synced_i_size(struct inode *inode) { return 0; } static inline int dbg_check_dir_size(struct ubifs_info *c, - const struct inode *dir) { return 0; } + const struct inode *dir) { return 0; } static inline int dbg_check_tnc(struct ubifs_info *c, int extra) { return 0; } static inline int dbg_check_idx_size(struct ubifs_info *c, long long idx_size) { return 0; } diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c index 91b4213dde84..48b6ee6fa848 100644 --- a/fs/ubifs/tnc.c +++ b/fs/ubifs/tnc.c @@ -3337,9 +3337,10 @@ out_dump: ubifs_err("inode %lu has size %lld, but there are data at offset %lld " "(data key %s)", (unsigned long)inode->i_ino, size, ((loff_t)block) << UBIFS_BLOCK_SHIFT, DBGKEY(key)); + mutex_unlock(&c->tnc_mutex); dbg_dump_inode(c, inode); dbg_dump_stack(); - err = -EINVAL; + return -EINVAL; out_unlock: mutex_unlock(&c->tnc_mutex); -- cgit v1.2.3 From 1b51e98365e42c27eb2664f8353bc62ab8c55c85 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Wed, 25 May 2011 17:38:29 +0300 Subject: UBIFS: rename dbg_check_dir_size function Since this function is not only about size checking, rename it to 'dbg_check_dir()'. Signed-off-by: Artem Bityutskiy --- fs/ubifs/debug.c | 2 +- fs/ubifs/debug.h | 4 ++-- fs/ubifs/super.c | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c index 4a2170dce0db..26d4c6173181 100644 --- a/fs/ubifs/debug.c +++ b/fs/ubifs/debug.c @@ -1177,7 +1177,7 @@ int dbg_check_synced_i_size(struct inode *inode) * Note, it is good idea to make sure the @dir->i_mutex is locked before * calling this function. */ -int dbg_check_dir_size(struct ubifs_info *c, const struct inode *dir) +int dbg_check_dir(struct ubifs_info *c, const struct inode *dir) { unsigned int nlink = 2; union ubifs_key key; diff --git a/fs/ubifs/debug.h b/fs/ubifs/debug.h index c6ad9ea15e3c..a51c20be655d 100644 --- a/fs/ubifs/debug.h +++ b/fs/ubifs/debug.h @@ -248,7 +248,7 @@ int dbg_check_ltab(struct ubifs_info *c); int dbg_chk_lpt_free_spc(struct ubifs_info *c); int dbg_chk_lpt_sz(struct ubifs_info *c, int action, int len); int dbg_check_synced_i_size(struct inode *inode); -int dbg_check_dir_size(struct ubifs_info *c, const struct inode *dir); +int dbg_check_dir(struct ubifs_info *c, const struct inode *dir); int dbg_check_tnc(struct ubifs_info *c, int extra); int dbg_check_idx_size(struct ubifs_info *c, long long idx_size); int dbg_check_filesystem(struct ubifs_info *c); @@ -417,7 +417,7 @@ static inline int dbg_chk_lpt_free_spc(struct ubifs_info *c) { return 0; } static inline int dbg_chk_lpt_sz(struct ubifs_info *c, int action, int len) { return 0; } static inline int dbg_check_synced_i_size(struct inode *inode) { return 0; } -static inline int dbg_check_dir_size(struct ubifs_info *c, +static inline int dbg_check_dir(struct ubifs_info *c, const struct inode *dir) { return 0; } static inline int dbg_check_tnc(struct ubifs_info *c, int extra) { return 0; } static inline int dbg_check_idx_size(struct ubifs_info *c, diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index 529be0582029..de82c5d04ab9 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c @@ -94,7 +94,7 @@ static int validate_inode(struct ubifs_info *c, const struct inode *inode) ubifs_compr_name(ui->compr_type)); } - err = dbg_check_dir_size(c, inode); + err = dbg_check_dir(c, inode); return err; } -- cgit v1.2.3 From a29fa9dfa4d5d1b962825c79f19d9b6f3f15843b Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Thu, 26 May 2011 09:43:34 +0300 Subject: UBIFS: minor cleanup: use S_ISREG helper Instead of using long "(inode->i_mode & S_IFMT) != S_IFREG" expression, use shorted "!S_ISREG(inode->i_mode)". Signed-off-by: Artem Bityutskiy --- fs/ubifs/super.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index de82c5d04ab9..285038548da9 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c @@ -85,7 +85,7 @@ static int validate_inode(struct ubifs_info *c, const struct inode *inode) if (ui->data_len < 0 || ui->data_len > UBIFS_MAX_INO_DATA) return 4; - if (ui->xattr && (inode->i_mode & S_IFMT) != S_IFREG) + if (ui->xattr && !S_ISREG(inode->i_mode)) return 5; if (!ubifs_compr_present(ui->compr_type)) { -- cgit v1.2.3 From 12e776a0882def45e7ee50918016968b392ac7bd Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Fri, 27 May 2011 15:50:39 +0300 Subject: UBIFS: remove unnecessary brackets Remove unnecessary brackets in "inode->i_flags |= (S_NOCMTIME)" statement to make the code not look silly. Signed-off-by: Artem Bityutskiy --- fs/ubifs/dir.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c index ef5abd38f0bf..c6cbdd0f636c 100644 --- a/fs/ubifs/dir.c +++ b/fs/ubifs/dir.c @@ -102,7 +102,7 @@ struct inode *ubifs_new_inode(struct ubifs_info *c, const struct inode *dir, * UBIFS has to fully control "clean <-> dirty" transitions of inodes * to make budgeting work. */ - inode->i_flags |= (S_NOCMTIME); + inode->i_flags |= S_NOCMTIME; inode_init_owner(inode, dir, mode); inode->i_mtime = inode->i_atime = inode->i_ctime = -- cgit v1.2.3 From 1f42596ec0a7d3d50a494d95e3f1afbd117250f8 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Mon, 30 May 2011 14:30:51 +0300 Subject: UBIFS: remove dead code Remove dead pieces of code under "if (c->min_io_size == 1)" statement - we never execute it because in UBIFS 'c->min_io_size' is always at least 8. This are leftovers from old pre-mainline prototype. Signed-off-by: Artem Bityutskiy --- fs/ubifs/tnc_commit.c | 106 ++++++++++++++------------------------------------ 1 file changed, 30 insertions(+), 76 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/tnc_commit.c b/fs/ubifs/tnc_commit.c index 41920f357bbf..8315387be3fd 100644 --- a/fs/ubifs/tnc_commit.c +++ b/fs/ubifs/tnc_commit.c @@ -491,25 +491,6 @@ static int layout_in_empty_space(struct ubifs_info *c) else next_len = ubifs_idx_node_sz(c, cnext->child_cnt); - if (c->min_io_size == 1) { - buf_offs += ALIGN(len, 8); - if (next_len) { - if (buf_offs + next_len <= c->leb_size) - continue; - err = ubifs_update_one_lp(c, lnum, 0, - c->leb_size - buf_offs, 0, 0); - if (err) - return err; - lnum = -1; - continue; - } - err = ubifs_update_one_lp(c, lnum, - c->leb_size - buf_offs, 0, 0, 0); - if (err) - return err; - break; - } - /* Update buffer positions */ wlen = used + len; used += ALIGN(len, 8); @@ -830,7 +811,7 @@ static int write_index(struct ubifs_info *c) struct ubifs_idx_node *idx; struct ubifs_znode *znode, *cnext; int i, lnum, offs, len, next_len, buf_len, buf_offs, used; - int avail, wlen, err, lnum_pos = 0; + int avail, wlen, err, lnum_pos = 0, blen, nxt_offs; cnext = c->enext; if (!cnext) @@ -938,65 +919,38 @@ static int write_index(struct ubifs_info *c) else next_len = ubifs_idx_node_sz(c, cnext->child_cnt); - if (c->min_io_size == 1) { - /* - * Write the prepared index node immediately if there is - * no minimum IO size - */ - err = ubifs_leb_write(c, lnum, c->cbuf, buf_offs, - wlen, UBI_SHORTTERM); - if (err) - return err; - buf_offs += ALIGN(wlen, 8); - if (next_len) { - used = 0; - avail = buf_len; - if (buf_offs + next_len > c->leb_size) { - err = ubifs_update_one_lp(c, lnum, - LPROPS_NC, 0, 0, LPROPS_TAKEN); - if (err) - return err; - lnum = -1; - } + nxt_offs = buf_offs + used + next_len; + if (next_len && nxt_offs <= c->leb_size) { + if (avail > 0) continue; - } + else + blen = buf_len; } else { - int blen, nxt_offs = buf_offs + used + next_len; - - if (next_len && nxt_offs <= c->leb_size) { - if (avail > 0) - continue; - else - blen = buf_len; - } else { - wlen = ALIGN(wlen, 8); - blen = ALIGN(wlen, c->min_io_size); - ubifs_pad(c, c->cbuf + wlen, blen - wlen); - } - /* - * The buffer is full or there are no more znodes - * to do - */ - err = ubifs_leb_write(c, lnum, c->cbuf, buf_offs, - blen, UBI_SHORTTERM); - if (err) - return err; - buf_offs += blen; - if (next_len) { - if (nxt_offs > c->leb_size) { - err = ubifs_update_one_lp(c, lnum, - LPROPS_NC, 0, 0, LPROPS_TAKEN); - if (err) - return err; - lnum = -1; - } - used -= blen; - if (used < 0) - used = 0; - avail = buf_len - used; - memmove(c->cbuf, c->cbuf + blen, used); - continue; + wlen = ALIGN(wlen, 8); + blen = ALIGN(wlen, c->min_io_size); + ubifs_pad(c, c->cbuf + wlen, blen - wlen); + } + + /* The buffer is full or there are no more znodes to do */ + err = ubifs_leb_write(c, lnum, c->cbuf, buf_offs, blen, + UBI_SHORTTERM); + if (err) + return err; + buf_offs += blen; + if (next_len) { + if (nxt_offs > c->leb_size) { + err = ubifs_update_one_lp(c, lnum, LPROPS_NC, 0, + 0, LPROPS_TAKEN); + if (err) + return err; + lnum = -1; } + used -= blen; + if (used < 0) + used = 0; + avail = buf_len - used; + memmove(c->cbuf, c->cbuf + blen, used); + continue; } break; } -- cgit v1.2.3 From f42eed7cba7f83197b0ffbb023e7d89a0b2fd71d Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Mon, 30 May 2011 14:45:30 +0300 Subject: UBIFS: harmonize znode flag helpers We have 3 znode flags: cow, obsolete, dirty. For the last flag we have a 'ubifs_zn_dirty()' helper function, but for the other 2 flags we use 'test_bit()' directly. This patch makes the situation more consistent and introduces helpers for the other 2 flags: 'ubifs_zn_cow()' and 'ubifs_zn_obsolete()'. Signed-off-by: Artem Bityutskiy --- fs/ubifs/commit.c | 2 +- fs/ubifs/misc.h | 23 +++++++++++++++++++++++ fs/ubifs/tnc.c | 13 ++++++------- fs/ubifs/tnc_commit.c | 8 ++++---- 4 files changed, 34 insertions(+), 12 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/commit.c b/fs/ubifs/commit.c index 8ab03d12d5c0..637e07673c3a 100644 --- a/fs/ubifs/commit.c +++ b/fs/ubifs/commit.c @@ -78,7 +78,7 @@ static int nothing_to_commit(struct ubifs_info *c) * If the root TNC node is dirty, we definitely have something to * commit. */ - if (c->zroot.znode && test_bit(DIRTY_ZNODE, &c->zroot.znode->flags)) + if (c->zroot.znode && ubifs_zn_dirty(c->zroot.znode)) return 0; /* diff --git a/fs/ubifs/misc.h b/fs/ubifs/misc.h index 0b5296a9a4c5..160cd909e957 100644 --- a/fs/ubifs/misc.h +++ b/fs/ubifs/misc.h @@ -38,6 +38,29 @@ static inline int ubifs_zn_dirty(const struct ubifs_znode *znode) return !!test_bit(DIRTY_ZNODE, &znode->flags); } +/** + * ubifs_zn_obsolete - check if znode is obsolete. + * @znode: znode to check + * + * This helper function returns %1 if @znode is obsolete and %0 otherwise. + */ +static inline int ubifs_zn_obsolete(const struct ubifs_znode *znode) +{ + return !!test_bit(OBSOLETE_ZNODE, &znode->flags); +} + +/** + * ubifs_zn_cow - check if znode has to be copied on write. + * @znode: znode to check + * + * This helper function returns %1 if @znode is has COW flag set and %0 + * otherwise. + */ +static inline int ubifs_zn_cow(const struct ubifs_znode *znode) +{ + return !!test_bit(COW_ZNODE, &znode->flags); +} + /** * ubifs_wake_up_bgt - wake up background thread. * @c: UBIFS file-system description object diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c index 48b6ee6fa848..b32a5376b7b4 100644 --- a/fs/ubifs/tnc.c +++ b/fs/ubifs/tnc.c @@ -223,7 +223,7 @@ static struct ubifs_znode *copy_znode(struct ubifs_info *c, __set_bit(DIRTY_ZNODE, &zn->flags); __clear_bit(COW_ZNODE, &zn->flags); - ubifs_assert(!test_bit(OBSOLETE_ZNODE, &znode->flags)); + ubifs_assert(!ubifs_zn_obsolete(znode)); __set_bit(OBSOLETE_ZNODE, &znode->flags); if (znode->level != 0) { @@ -271,7 +271,7 @@ static struct ubifs_znode *dirty_cow_znode(struct ubifs_info *c, struct ubifs_znode *zn; int err; - if (!test_bit(COW_ZNODE, &znode->flags)) { + if (!ubifs_zn_cow(znode)) { /* znode is not being committed */ if (!test_and_set_bit(DIRTY_ZNODE, &znode->flags)) { atomic_long_inc(&c->dirty_zn_cnt); @@ -2423,7 +2423,7 @@ static int tnc_delete(struct ubifs_info *c, struct ubifs_znode *znode, int n) */ do { - ubifs_assert(!test_bit(OBSOLETE_ZNODE, &znode->flags)); + ubifs_assert(!ubifs_zn_obsolete(znode)); ubifs_assert(ubifs_zn_dirty(znode)); zp = znode->parent; @@ -2479,9 +2479,8 @@ static int tnc_delete(struct ubifs_info *c, struct ubifs_znode *znode, int n) c->zroot.offs = zbr->offs; c->zroot.len = zbr->len; c->zroot.znode = znode; - ubifs_assert(!test_bit(OBSOLETE_ZNODE, - &zp->flags)); - ubifs_assert(test_bit(DIRTY_ZNODE, &zp->flags)); + ubifs_assert(!ubifs_zn_obsolete(zp)); + ubifs_assert(ubifs_zn_dirty(zp)); atomic_long_dec(&c->dirty_zn_cnt); if (zp->cnext) { @@ -2865,7 +2864,7 @@ static void tnc_destroy_cnext(struct ubifs_info *c) struct ubifs_znode *znode = cnext; cnext = cnext->cnext; - if (test_bit(OBSOLETE_ZNODE, &znode->flags)) + if (ubifs_zn_obsolete(znode)) kfree(znode); } while (cnext && cnext != c->cnext); } diff --git a/fs/ubifs/tnc_commit.c b/fs/ubifs/tnc_commit.c index 8315387be3fd..f50c3e5da263 100644 --- a/fs/ubifs/tnc_commit.c +++ b/fs/ubifs/tnc_commit.c @@ -87,7 +87,7 @@ static int make_idx_node(struct ubifs_info *c, struct ubifs_idx_node *idx, atomic_long_dec(&c->dirty_zn_cnt); ubifs_assert(ubifs_zn_dirty(znode)); - ubifs_assert(test_bit(COW_ZNODE, &znode->flags)); + ubifs_assert(ubifs_zn_cow(znode)); __clear_bit(DIRTY_ZNODE, &znode->flags); __clear_bit(COW_ZNODE, &znode->flags); @@ -639,7 +639,7 @@ static int get_znodes_to_commit(struct ubifs_info *c) } cnt += 1; while (1) { - ubifs_assert(!test_bit(COW_ZNODE, &znode->flags)); + ubifs_assert(!ubifs_zn_cow(znode)); __set_bit(COW_ZNODE, &znode->flags); znode->alt = 0; cnext = find_next_dirty(znode); @@ -888,7 +888,7 @@ static int write_index(struct ubifs_info *c) cnext = znode->cnext; ubifs_assert(ubifs_zn_dirty(znode)); - ubifs_assert(test_bit(COW_ZNODE, &znode->flags)); + ubifs_assert(ubifs_zn_cow(znode)); /* * It is important that other threads should see %DIRTY_ZNODE @@ -983,7 +983,7 @@ static void free_obsolete_znodes(struct ubifs_info *c) do { znode = cnext; cnext = znode->cnext; - if (test_bit(OBSOLETE_ZNODE, &znode->flags)) + if (ubifs_zn_obsolete(znode)) kfree(znode); else { znode->cnext = NULL; -- cgit v1.2.3 From 376624476921e43d8b87498161a2ffba6ab8d5aa Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Mon, 30 May 2011 14:51:20 +0300 Subject: UBIFS: use correct flags in lprops The UBIFS lpt tree is in many aspects similar to the TNC tree, and we have similar flags for these trees. And by mistake we use the COW_ZNODE flag for LPT in some places, instead of the right flag COW_CNODE. And this works only because these two constants have the same value. This patch makes all the LPT code to use COW_CNODE and also changes COW_CNODE constant value to make sure we do not misuse the flags any more. Signed-off-by: Artem Bityutskiy --- fs/ubifs/lprops.c | 2 +- fs/ubifs/lpt_commit.c | 6 +++--- fs/ubifs/ubifs.h | 8 ++++---- 3 files changed, 8 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/lprops.c b/fs/ubifs/lprops.c index 667884f4a615..98b8e73c454c 100644 --- a/fs/ubifs/lprops.c +++ b/fs/ubifs/lprops.c @@ -504,7 +504,7 @@ static int is_lprops_dirty(struct ubifs_info *c, struct ubifs_lprops *lprops) pnode = (struct ubifs_pnode *)container_of(lprops - pos, struct ubifs_pnode, lprops[0]); - return !test_bit(COW_ZNODE, &pnode->flags) && + return !test_bit(COW_CNODE, &pnode->flags) && test_bit(DIRTY_CNODE, &pnode->flags); } diff --git a/fs/ubifs/lpt_commit.c b/fs/ubifs/lpt_commit.c index dfcb5748a7dc..254e8d32fc67 100644 --- a/fs/ubifs/lpt_commit.c +++ b/fs/ubifs/lpt_commit.c @@ -116,8 +116,8 @@ static int get_cnodes_to_commit(struct ubifs_info *c) return 0; cnt += 1; while (1) { - ubifs_assert(!test_bit(COW_ZNODE, &cnode->flags)); - __set_bit(COW_ZNODE, &cnode->flags); + ubifs_assert(!test_bit(COW_CNODE, &cnode->flags)); + __set_bit(COW_CNODE, &cnode->flags); cnext = next_dirty_cnode(cnode); if (!cnext) { cnode->cnext = c->lpt_cnext; @@ -465,7 +465,7 @@ static int write_cnodes(struct ubifs_info *c) */ clear_bit(DIRTY_CNODE, &cnode->flags); smp_mb__before_clear_bit(); - clear_bit(COW_ZNODE, &cnode->flags); + clear_bit(COW_CNODE, &cnode->flags); smp_mb__after_clear_bit(); offs += len; dbg_chk_lpt_sz(c, 1, len); diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h index f79983d6f860..3304aad04885 100644 --- a/fs/ubifs/ubifs.h +++ b/fs/ubifs/ubifs.h @@ -230,14 +230,14 @@ enum { * LPT cnode flag bits. * * DIRTY_CNODE: cnode is dirty - * COW_CNODE: cnode is being committed and must be copied before writing * OBSOLETE_CNODE: cnode is being committed and has been copied (or deleted), - * so it can (and must) be freed when the commit is finished + * so it can (and must) be freed when the commit is finished + * COW_CNODE: cnode is being committed and must be copied before writing */ enum { DIRTY_CNODE = 0, - COW_CNODE = 1, - OBSOLETE_CNODE = 2, + OBSOLETE_CNODE = 1, + COW_CNODE = 2, }; /* -- cgit v1.2.3 From 06b282a4cc02f37414c14c94a2f154ca250cf73f Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Mon, 30 May 2011 18:19:34 +0300 Subject: UBIFS: add few commentaries about TNC Add a couple of comments - while looking into TNC I could not easily figure out few facts, so it is a good idea to document them in the code. Signed-off-by: Artem Bityutskiy --- fs/ubifs/tnc_commit.c | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) (limited to 'fs') diff --git a/fs/ubifs/tnc_commit.c b/fs/ubifs/tnc_commit.c index f50c3e5da263..d6fab1a9986c 100644 --- a/fs/ubifs/tnc_commit.c +++ b/fs/ubifs/tnc_commit.c @@ -89,6 +89,10 @@ static int make_idx_node(struct ubifs_info *c, struct ubifs_idx_node *idx, ubifs_assert(ubifs_zn_dirty(znode)); ubifs_assert(ubifs_zn_cow(znode)); + /* + * Note, unlike 'write_index()' we do not add memory barriers here + * because this function is called with @c->tnc_mutex locked. + */ __clear_bit(DIRTY_ZNODE, &znode->flags); __clear_bit(COW_ZNODE, &znode->flags); @@ -903,6 +907,28 @@ static int write_index(struct ubifs_info *c) clear_bit(COW_ZNODE, &znode->flags); smp_mb__after_clear_bit(); + /* + * We have marked the znode as clean but have not updated the + * @c->clean_zn_cnt counter. If this znode becomes dirty again + * before 'free_obsolete_znodes()' is called, then + * @c->clean_zn_cnt will be decremented before it gets + * incremented (resulting in 2 decrements for the same znode). + * This means that @c->clean_zn_cnt may become negative for a + * while. + * + * Q: why we cannot increment @c->clean_zn_cnt? + * A: because we do not have the @c->tnc_mutex locked, and the + * following code would be racy and buggy: + * + * if (!ubifs_zn_obsolete(znode)) { + * atomic_long_inc(&c->clean_zn_cnt); + * atomic_long_inc(&ubifs_clean_zn_cnt); + * } + * + * Thus, we just delay the @c->clean_zn_cnt update until we + * have the mutex locked. + */ + /* Do not access znode from this point on */ /* Update buffer positions */ -- cgit v1.2.3 From bb2615d4d14777fd37e2b91bd99b92c0354813d1 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Tue, 31 May 2011 17:47:53 +0300 Subject: UBIFS: amend debugging name check function prototype Add 'struct ubifs_info *c' parameter to the 'dbg_check_name()' debugging function - it will be needed in one of the following commits where we switch to debugfs. So this is just a preparation. Mark parameters as 'const' while on it. Signed-off-by: Artem Bityutskiy --- fs/ubifs/dir.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c index c6cbdd0f636c..d1725a9914ac 100644 --- a/fs/ubifs/dir.c +++ b/fs/ubifs/dir.c @@ -172,7 +172,9 @@ struct inode *ubifs_new_inode(struct ubifs_info *c, const struct inode *dir, #ifdef CONFIG_UBIFS_FS_DEBUG -static int dbg_check_name(struct ubifs_dent_node *dent, struct qstr *nm) +static int dbg_check_name(const struct ubifs_info *c, + const struct ubifs_dent_node *dent, + const struct qstr *nm) { if (!(ubifs_chk_flags & UBIFS_CHK_GEN)) return 0; @@ -185,7 +187,7 @@ static int dbg_check_name(struct ubifs_dent_node *dent, struct qstr *nm) #else -#define dbg_check_name(dent, nm) 0 +#define dbg_check_name(c, dent, nm) 0 #endif @@ -219,7 +221,7 @@ static struct dentry *ubifs_lookup(struct inode *dir, struct dentry *dentry, goto out; } - if (dbg_check_name(dent, &dentry->d_name)) { + if (dbg_check_name(c, dent, &dentry->d_name)) { err = -EINVAL; goto out; } -- cgit v1.2.3 From d808efb407e1a2cf83a8d21411157195f26bdef9 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Tue, 31 May 2011 18:14:38 +0300 Subject: UBIFS: amend debugging inode size check function prototype Add 'const struct ubifs_info *c' parameter to 'dbg_check_synced_i_size()' function because we'll need it in the next patch when we switch to debugfs. So this patch is just a preparation. Signed-off-by: Artem Bityutskiy --- fs/ubifs/debug.c | 3 ++- fs/ubifs/debug.h | 6 ++++-- fs/ubifs/dir.c | 4 ++-- fs/ubifs/file.c | 2 +- 4 files changed, 9 insertions(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c index 26d4c6173181..7adf9fe91817 100644 --- a/fs/ubifs/debug.c +++ b/fs/ubifs/debug.c @@ -1132,6 +1132,7 @@ out: /** * dbg_check_synced_i_size - check synchronized inode size. + * @c: UBIFS file-system description object * @inode: inode to check * * If inode is clean, synchronized inode size has to be equivalent to current @@ -1139,7 +1140,7 @@ out: * has to be locked). Returns %0 if synchronized inode size if correct, and * %-EINVAL if not. */ -int dbg_check_synced_i_size(struct inode *inode) +int dbg_check_synced_i_size(const struct ubifs_info *c, struct inode *inode) { int err = 0; struct ubifs_inode *ui = ubifs_inode(inode); diff --git a/fs/ubifs/debug.h b/fs/ubifs/debug.h index a51c20be655d..90805bd49456 100644 --- a/fs/ubifs/debug.h +++ b/fs/ubifs/debug.h @@ -247,7 +247,7 @@ int dbg_check_cats(struct ubifs_info *c); int dbg_check_ltab(struct ubifs_info *c); int dbg_chk_lpt_free_spc(struct ubifs_info *c); int dbg_chk_lpt_sz(struct ubifs_info *c, int action, int len); -int dbg_check_synced_i_size(struct inode *inode); +int dbg_check_synced_i_size(const struct ubifs_info *c, struct inode *inode); int dbg_check_dir(struct ubifs_info *c, const struct inode *dir); int dbg_check_tnc(struct ubifs_info *c, int extra); int dbg_check_idx_size(struct ubifs_info *c, long long idx_size); @@ -416,7 +416,9 @@ static inline int dbg_check_ltab(struct ubifs_info *c) { return 0; } static inline int dbg_chk_lpt_free_spc(struct ubifs_info *c) { return 0; } static inline int dbg_chk_lpt_sz(struct ubifs_info *c, int action, int len) { return 0; } -static inline int dbg_check_synced_i_size(struct inode *inode) { return 0; } +static inline int +dbg_check_synced_i_size(const struct ubifs_info *c, + struct inode *inode) { return 0; } static inline int dbg_check_dir(struct ubifs_info *c, const struct inode *dir) { return 0; } static inline int dbg_check_tnc(struct ubifs_info *c, int extra) { return 0; } diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c index d1725a9914ac..98014bf1d8b4 100644 --- a/fs/ubifs/dir.c +++ b/fs/ubifs/dir.c @@ -524,7 +524,7 @@ static int ubifs_link(struct dentry *old_dentry, struct inode *dir, ubifs_assert(mutex_is_locked(&dir->i_mutex)); ubifs_assert(mutex_is_locked(&inode->i_mutex)); - err = dbg_check_synced_i_size(inode); + err = dbg_check_synced_i_size(c, inode); if (err) return err; @@ -579,7 +579,7 @@ static int ubifs_unlink(struct inode *dir, struct dentry *dentry) inode->i_nlink, dir->i_ino); ubifs_assert(mutex_is_locked(&dir->i_mutex)); ubifs_assert(mutex_is_locked(&inode->i_mutex)); - err = dbg_check_synced_i_size(inode); + err = dbg_check_synced_i_size(c, inode); if (err) return err; diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c index 5e7fccfc4b29..7cf738a4544d 100644 --- a/fs/ubifs/file.c +++ b/fs/ubifs/file.c @@ -1263,7 +1263,7 @@ int ubifs_setattr(struct dentry *dentry, struct iattr *attr) if (err) return err; - err = dbg_check_synced_i_size(inode); + err = dbg_check_synced_i_size(c, inode); if (err) return err; -- cgit v1.2.3 From 2b1844a8c934723134ee1ff313e51d0d281cdef1 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Fri, 3 Jun 2011 08:31:29 +0300 Subject: UBIFS: introduce helper functions for debugging checks and tests This patch introduces helper functions for all debugging checks, so instead of doing if (!(ubifs_chk_flags & UBIFS_CHK_GEN)) we now do if (!dbg_is_chk_gen(c)) This is a preparation to further changes where the flags will go away, and we'll need to only change the helper functions, but the code which utilizes them won't be touched. At the same time this patch removes 'dbg_force_in_the_gaps()', 'dbg_force_in_the_gaps_enabled()', and dbg_failure_mode helpers for consistency. Signed-off-by: Artem Bityutskiy --- fs/ubifs/commit.c | 2 +- fs/ubifs/debug.c | 30 +++++++++-------------- fs/ubifs/debug.h | 66 +++++++++++++++++++++++++++++++++++---------------- fs/ubifs/dir.c | 2 +- fs/ubifs/log.c | 2 +- fs/ubifs/lprops.c | 6 ++--- fs/ubifs/lpt.c | 2 +- fs/ubifs/lpt_commit.c | 10 ++++---- fs/ubifs/orphan.c | 2 +- fs/ubifs/scan.c | 2 +- fs/ubifs/tnc.c | 2 +- fs/ubifs/tnc_commit.c | 5 ++-- 12 files changed, 75 insertions(+), 56 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/commit.c b/fs/ubifs/commit.c index 637e07673c3a..63c4e447f977 100644 --- a/fs/ubifs/commit.c +++ b/fs/ubifs/commit.c @@ -576,7 +576,7 @@ int dbg_check_old_index(struct ubifs_info *c, struct ubifs_zbranch *zroot) struct idx_node *i; size_t sz; - if (!(ubifs_chk_flags & UBIFS_CHK_OLD_IDX)) + if (!dbg_is_chk_old_idx(c)) return 0; INIT_LIST_HEAD(&list); diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c index 7adf9fe91817..79d8924aca5b 100644 --- a/fs/ubifs/debug.c +++ b/fs/ubifs/debug.c @@ -330,7 +330,7 @@ void dbg_dump_node(const struct ubifs_info *c, const void *node) union ubifs_key key; const struct ubifs_ch *ch = node; - if (dbg_failure_mode) + if (dbg_is_tst_rcvry(c)) return; /* If the magic is incorrect, just hexdump the first bytes */ @@ -886,7 +886,7 @@ void dbg_dump_leb(const struct ubifs_info *c, int lnum) struct ubifs_scan_node *snod; void *buf; - if (dbg_failure_mode) + if (dbg_is_tst_rcvry(c)) return; printk(KERN_DEBUG "(pid %d) start dumping LEB %d\n", @@ -1145,7 +1145,7 @@ int dbg_check_synced_i_size(const struct ubifs_info *c, struct inode *inode) int err = 0; struct ubifs_inode *ui = ubifs_inode(inode); - if (!(ubifs_chk_flags & UBIFS_CHK_GEN)) + if (!dbg_is_chk_gen(c)) return 0; if (!S_ISREG(inode->i_mode)) return 0; @@ -1186,7 +1186,7 @@ int dbg_check_dir(struct ubifs_info *c, const struct inode *dir) struct qstr nm = { .name = NULL }; loff_t size = UBIFS_INO_NODE_SZ; - if (!(ubifs_chk_flags & UBIFS_CHK_GEN)) + if (!dbg_is_chk_gen(c)) return 0; if (!S_ISDIR(dir->i_mode)) @@ -1544,7 +1544,7 @@ int dbg_check_tnc(struct ubifs_info *c, int extra) long clean_cnt = 0, dirty_cnt = 0; int err, last; - if (!(ubifs_chk_flags & UBIFS_CHK_TNC)) + if (!dbg_is_chk_tnc(c)) return 0; ubifs_assert(mutex_is_locked(&c->tnc_mutex)); @@ -1791,7 +1791,7 @@ int dbg_check_idx_size(struct ubifs_info *c, long long idx_size) int err; long long calc = 0; - if (!(ubifs_chk_flags & UBIFS_CHK_IDX_SZ)) + if (!dbg_is_chk_idx_sz(c)) return 0; err = dbg_walk_index(c, NULL, add_size, &calc); @@ -2367,7 +2367,7 @@ int dbg_check_filesystem(struct ubifs_info *c) int err; struct fsck_data fsckd; - if (!(ubifs_chk_flags & UBIFS_CHK_FS)) + if (!dbg_is_chk_fs(c)) return 0; fsckd.inodes = RB_ROOT; @@ -2402,7 +2402,7 @@ int dbg_check_data_nodes_order(struct ubifs_info *c, struct list_head *head) struct list_head *cur; struct ubifs_scan_node *sa, *sb; - if (!(ubifs_chk_flags & UBIFS_CHK_GEN)) + if (!dbg_is_chk_gen(c)) return 0; for (cur = head->next; cur->next != head; cur = cur->next) { @@ -2469,7 +2469,7 @@ int dbg_check_nondata_nodes_order(struct ubifs_info *c, struct list_head *head) struct list_head *cur; struct ubifs_scan_node *sa, *sb; - if (!(ubifs_chk_flags & UBIFS_CHK_GEN)) + if (!dbg_is_chk_gen(c)) return 0; for (cur = head->next; cur->next != head; cur = cur->next) { @@ -2546,14 +2546,6 @@ error_dump: return 0; } -int dbg_force_in_the_gaps(void) -{ - if (!(ubifs_chk_flags & UBIFS_CHK_GEN)) - return 0; - - return !(random32() & 7); -} - /* Failure mode for recovery testing */ #define chance(n, d) (simple_rand() <= (n) * 32768LL / (d)) @@ -2624,7 +2616,7 @@ static int in_failure_mode(struct ubi_volume_desc *desc) { struct ubifs_info *c = dbg_find_info(desc); - if (c && dbg_failure_mode) + if (c && dbg_is_tst_rcvry(c)) return c->dbg->failure_mode; return 0; } @@ -2634,7 +2626,7 @@ static int do_fail(struct ubi_volume_desc *desc, int lnum, int write) struct ubifs_info *c = dbg_find_info(desc); struct ubifs_debug_info *d; - if (!c || !dbg_failure_mode) + if (!c || !dbg_is_tst_rcvry(c)) return 0; d = c->dbg; if (d->failure_mode) diff --git a/fs/ubifs/debug.h b/fs/ubifs/debug.h index 90805bd49456..c3d1ffd0610a 100644 --- a/fs/ubifs/debug.h +++ b/fs/ubifs/debug.h @@ -31,8 +31,6 @@ typedef int (*dbg_znode_callback)(struct ubifs_info *c, #ifdef CONFIG_UBIFS_FS_DEBUG -#include - /* * The UBIFS debugfs directory name pattern and maximum name length (3 for "ubi" * + 1 for "_" and plus 2x2 for 2 UBI numbers and 1 for the trailing zero byte. @@ -169,6 +167,8 @@ const char *dbg_key_str1(const struct ubifs_info *c, /* Additional recovery messages */ #define dbg_rcvry(fmt, ...) ubifs_dbg_msg("rcvry", fmt, ##__VA_ARGS__) +extern spinlock_t dbg_lock; + /* * Debugging check flags. * @@ -199,12 +199,43 @@ enum { UBIFS_TST_RCVRY = 0x4, }; -extern spinlock_t dbg_lock; - extern unsigned int ubifs_msg_flags; extern unsigned int ubifs_chk_flags; extern unsigned int ubifs_tst_flags; +static inline int dbg_is_chk_gen(const struct ubifs_info *c) +{ + return !!(ubifs_chk_flags & UBIFS_CHK_GEN); +} +static inline int dbg_is_chk_tnc(const struct ubifs_info *c) +{ + return !!(ubifs_chk_flags & UBIFS_CHK_TNC); +} +static inline int dbg_is_chk_idx_sz(const struct ubifs_info *c) +{ + return !!(ubifs_chk_flags & UBIFS_CHK_IDX_SZ); +} +static inline int dbg_is_chk_orph(const struct ubifs_info *c) +{ + return !!(ubifs_chk_flags & UBIFS_CHK_ORPH); +} +static inline int dbg_is_chk_old_idx(const struct ubifs_info *c) +{ + return !!(ubifs_chk_flags & UBIFS_CHK_OLD_IDX); +} +static inline int dbg_is_chk_lprops(const struct ubifs_info *c) +{ + return !!(ubifs_chk_flags & UBIFS_CHK_LPROPS); +} +static inline int dbg_is_chk_fs(const struct ubifs_info *c) +{ + return !!(ubifs_chk_flags & UBIFS_CHK_FS); +} +static inline int dbg_is_tst_rcvry(const struct ubifs_info *c) +{ + return !!(ubifs_tst_flags & UBIFS_TST_RCVRY); +} + int ubifs_debugging_init(struct ubifs_info *c); void ubifs_debugging_exit(struct ubifs_info *c); @@ -261,16 +292,6 @@ int dbg_check_inode_size(struct ubifs_info *c, const struct inode *inode, int dbg_check_data_nodes_order(struct ubifs_info *c, struct list_head *head); int dbg_check_nondata_nodes_order(struct ubifs_info *c, struct list_head *head); -/* Force the use of in-the-gaps method for testing */ -static inline int dbg_force_in_the_gaps_enabled(void) -{ - return ubifs_chk_flags & UBIFS_CHK_GEN; -} -int dbg_force_in_the_gaps(void); - -/* Failure mode for recovery testing */ -#define dbg_failure_mode (ubifs_tst_flags & UBIFS_TST_RCVRY) - #ifndef UBIFS_DBG_PRESERVE_UBI #define ubi_leb_read dbg_leb_read #define ubi_leb_write dbg_leb_write @@ -330,6 +351,9 @@ void dbg_debugfs_exit_fs(struct ubifs_info *c); ubifs_err(fmt, ##__VA_ARGS__); \ } while (0) +#define DBGKEY(key) ((char *)(key)) +#define DBGKEY1(key) ((char *)(key)) + #define ubifs_dbg_msg(fmt, ...) do { \ if (0) \ pr_debug(fmt "\n", ##__VA_ARGS__); \ @@ -353,9 +377,6 @@ void dbg_debugfs_exit_fs(struct ubifs_info *c); #define dbg_scan(fmt, ...) ubifs_dbg_msg(fmt, ##__VA_ARGS__) #define dbg_rcvry(fmt, ...) ubifs_dbg_msg(fmt, ##__VA_ARGS__) -#define DBGKEY(key) ((char *)(key)) -#define DBGKEY1(key) ((char *)(key)) - static inline int ubifs_debugging_init(struct ubifs_info *c) { return 0; } static inline void ubifs_debugging_exit(struct ubifs_info *c) { return; } static inline const char *dbg_ntype(int type) { return ""; } @@ -440,9 +461,14 @@ static inline int dbg_check_nondata_nodes_order(struct ubifs_info *c, struct list_head *head) { return 0; } -static inline int dbg_force_in_the_gaps(void) { return 0; } -#define dbg_force_in_the_gaps_enabled() 0 -#define dbg_failure_mode 0 +static inline int dbg_is_chk_gen(const struct ubifs_info *c) { return 0; } +static inline int dbg_is_chk_tnc(const struct ubifs_info *c) { return 0; } +static inline int dbg_is_chk_idx_sz(const struct ubifs_info *c) { return 0; } +static inline int dbg_is_chk_orph(const struct ubifs_info *c) { return 0; } +static inline int dbg_is_chk_old_idx(const struct ubifs_info *c) { return 0; } +static inline int dbg_is_chk_lprops(const struct ubifs_info *c) { return 0; } +static inline int dbg_is_chk_fs(const struct ubifs_info *c) { return 0; } +static inline int dbg_is_tst_rcvry(const struct ubifs_info *c) { return 0; } static inline int dbg_debugfs_init(void) { return 0; } static inline void dbg_debugfs_exit(void) { return; } diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c index 98014bf1d8b4..683492043317 100644 --- a/fs/ubifs/dir.c +++ b/fs/ubifs/dir.c @@ -176,7 +176,7 @@ static int dbg_check_name(const struct ubifs_info *c, const struct ubifs_dent_node *dent, const struct qstr *nm) { - if (!(ubifs_chk_flags & UBIFS_CHK_GEN)) + if (!dbg_is_chk_gen(c)) return 0; if (le16_to_cpu(dent->nlen) != nm->len) return -EINVAL; diff --git a/fs/ubifs/log.c b/fs/ubifs/log.c index affea9494ae2..fabfb53c4fee 100644 --- a/fs/ubifs/log.c +++ b/fs/ubifs/log.c @@ -752,7 +752,7 @@ static int dbg_check_bud_bytes(struct ubifs_info *c) struct ubifs_bud *bud; long long bud_bytes = 0; - if (!(ubifs_chk_flags & UBIFS_CHK_GEN)) + if (!dbg_is_chk_gen(c)) return 0; spin_lock(&c->buds_lock); diff --git a/fs/ubifs/lprops.c b/fs/ubifs/lprops.c index 98b8e73c454c..f8a181e647cc 100644 --- a/fs/ubifs/lprops.c +++ b/fs/ubifs/lprops.c @@ -860,7 +860,7 @@ int dbg_check_cats(struct ubifs_info *c) struct list_head *pos; int i, cat; - if (!(ubifs_chk_flags & (UBIFS_CHK_GEN | UBIFS_CHK_LPROPS))) + if (!dbg_is_chk_gen(c) && !dbg_is_chk_lprops(c)) return 0; list_for_each_entry(lprops, &c->empty_list, list) { @@ -958,7 +958,7 @@ void dbg_check_heap(struct ubifs_info *c, struct ubifs_lpt_heap *heap, int cat, { int i = 0, j, err = 0; - if (!(ubifs_chk_flags & (UBIFS_CHK_GEN | UBIFS_CHK_LPROPS))) + if (!dbg_is_chk_gen(c) && !dbg_is_chk_lprops(c)) return; for (i = 0; i < heap->cnt; i++) { @@ -1262,7 +1262,7 @@ int dbg_check_lprops(struct ubifs_info *c) int i, err; struct ubifs_lp_stats lst; - if (!(ubifs_chk_flags & UBIFS_CHK_LPROPS)) + if (!dbg_is_chk_lprops(c)) return 0; /* diff --git a/fs/ubifs/lpt.c b/fs/ubifs/lpt.c index 04713cd22679..ab91ca628455 100644 --- a/fs/ubifs/lpt.c +++ b/fs/ubifs/lpt.c @@ -2226,7 +2226,7 @@ int dbg_check_lpt_nodes(struct ubifs_info *c, struct ubifs_cnode *cnode, struct ubifs_cnode *cn; int num, iip = 0, err; - if (!(ubifs_chk_flags & UBIFS_CHK_LPROPS)) + if (!dbg_is_chk_lprops(c)) return 0; while (cnode) { diff --git a/fs/ubifs/lpt_commit.c b/fs/ubifs/lpt_commit.c index 254e8d32fc67..f8e286ba8095 100644 --- a/fs/ubifs/lpt_commit.c +++ b/fs/ubifs/lpt_commit.c @@ -1640,7 +1640,7 @@ static int dbg_check_ltab_lnum(struct ubifs_info *c, int lnum) int ret; void *buf, *p; - if (!(ubifs_chk_flags & UBIFS_CHK_LPROPS)) + if (!dbg_is_chk_lprops(c)) return 0; buf = p = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL); @@ -1711,7 +1711,7 @@ int dbg_check_ltab(struct ubifs_info *c) { int lnum, err, i, cnt; - if (!(ubifs_chk_flags & UBIFS_CHK_LPROPS)) + if (!dbg_is_chk_lprops(c)) return 0; /* Bring the entire tree into memory */ @@ -1754,7 +1754,7 @@ int dbg_chk_lpt_free_spc(struct ubifs_info *c) long long free = 0; int i; - if (!(ubifs_chk_flags & UBIFS_CHK_LPROPS)) + if (!dbg_is_chk_lprops(c)) return 0; for (i = 0; i < c->lpt_lebs; i++) { @@ -1796,7 +1796,7 @@ int dbg_chk_lpt_sz(struct ubifs_info *c, int action, int len) long long chk_lpt_sz, lpt_sz; int err = 0; - if (!(ubifs_chk_flags & UBIFS_CHK_LPROPS)) + if (!dbg_is_chk_lprops(c)) return 0; switch (action) { @@ -2019,7 +2019,7 @@ static int dbg_populate_lsave(struct ubifs_info *c) struct ubifs_lpt_heap *heap; int i; - if (!(ubifs_chk_flags & UBIFS_CHK_GEN)) + if (!dbg_is_chk_gen(c)) return 0; if (random32() & 3) return 0; diff --git a/fs/ubifs/orphan.c b/fs/ubifs/orphan.c index a5422fffbd69..c542c73cfa3c 100644 --- a/fs/ubifs/orphan.c +++ b/fs/ubifs/orphan.c @@ -929,7 +929,7 @@ static int dbg_check_orphans(struct ubifs_info *c) struct check_info ci; int err; - if (!(ubifs_chk_flags & UBIFS_CHK_ORPH)) + if (!dbg_is_chk_orph(c)) return 0; ci.last_ino = 0; diff --git a/fs/ubifs/scan.c b/fs/ubifs/scan.c index 36216b46f772..c7df916dce97 100644 --- a/fs/ubifs/scan.c +++ b/fs/ubifs/scan.c @@ -240,7 +240,7 @@ void ubifs_scanned_corruption(const struct ubifs_info *c, int lnum, int offs, int len; ubifs_err("corruption at LEB %d:%d", lnum, offs); - if (dbg_failure_mode) + if (dbg_is_tst_rcvry(c)) return; len = c->leb_size - offs; if (len > 8192) diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c index b32a5376b7b4..526b63cd3333 100644 --- a/fs/ubifs/tnc.c +++ b/fs/ubifs/tnc.c @@ -3300,7 +3300,7 @@ int dbg_check_inode_size(struct ubifs_info *c, const struct inode *inode, if (!S_ISREG(inode->i_mode)) return 0; - if (!(ubifs_chk_flags & UBIFS_CHK_GEN)) + if (!dbg_is_chk_gen(c)) return 0; block = (size + UBIFS_BLOCK_SIZE - 1) >> UBIFS_BLOCK_SHIFT; diff --git a/fs/ubifs/tnc_commit.c b/fs/ubifs/tnc_commit.c index d6fab1a9986c..8959c726b024 100644 --- a/fs/ubifs/tnc_commit.c +++ b/fs/ubifs/tnc_commit.c @@ -23,6 +23,7 @@ /* This file implements TNC functions for committing */ #include "ubifs.h" +#include /** * make_idx_node - make an index node for fill-the-gaps method of TNC commit. @@ -381,7 +382,7 @@ static int layout_in_gaps(struct ubifs_info *c, int cnt) c->gap_lebs = NULL; return err; } - if (dbg_force_in_the_gaps_enabled()) { + if (!dbg_is_chk_gen(c)) { /* * Do not print scary warnings if the debugging * option which forces in-the-gaps is enabled. @@ -689,7 +690,7 @@ static int alloc_idx_lebs(struct ubifs_info *c, int cnt) c->ilebs[c->ileb_cnt++] = lnum; dbg_cmt("LEB %d", lnum); } - if (dbg_force_in_the_gaps()) + if (dbg_is_chk_gen(c) && !(random32() & 7)) return -ENOSPC; return 0; } -- cgit v1.2.3 From 8d7819b4af697eec45339cc24db7c3fe45fea0e7 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Fri, 3 Jun 2011 08:53:35 +0300 Subject: UBIFS: lessen amount of debugging check types We have too many different debugging checks - lessen the amount by merging all index-related checks into one. At the same time, move the "force in-the-gap" test to the "index checks" class, because it is too heavy for the "general" class. This patch merges TNC, Old index, and Index size check and calles this just "index checks". Signed-off-by: Artem Bityutskiy --- Documentation/filesystems/ubifs.txt | 4 +--- fs/ubifs/commit.c | 2 +- fs/ubifs/debug.c | 4 ++-- fs/ubifs/debug.h | 24 +++++------------------- fs/ubifs/lpt_commit.c | 1 + fs/ubifs/tnc_commit.c | 6 +++--- 6 files changed, 13 insertions(+), 28 deletions(-) (limited to 'fs') diff --git a/Documentation/filesystems/ubifs.txt b/Documentation/filesystems/ubifs.txt index 8e4fab639d9c..91ef07652cd2 100644 --- a/Documentation/filesystems/ubifs.txt +++ b/Documentation/filesystems/ubifs.txt @@ -123,10 +123,8 @@ debug_chks Selects extra checks that UBIFS can do while running: Check Flag value General checks 1 - Check Tree Node Cache (TNC) 2 - Check indexing tree size 4 + Check the index 2 Check orphan area 8 - Check old indexing tree 16 Check LEB properties (lprops) 32 Check leaf nodes and inodes 64 diff --git a/fs/ubifs/commit.c b/fs/ubifs/commit.c index 63c4e447f977..fb3b5c813a30 100644 --- a/fs/ubifs/commit.c +++ b/fs/ubifs/commit.c @@ -576,7 +576,7 @@ int dbg_check_old_index(struct ubifs_info *c, struct ubifs_zbranch *zroot) struct idx_node *i; size_t sz; - if (!dbg_is_chk_old_idx(c)) + if (!dbg_is_chk_index(c)) return 0; INIT_LIST_HEAD(&list); diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c index 79d8924aca5b..a967d6800ead 100644 --- a/fs/ubifs/debug.c +++ b/fs/ubifs/debug.c @@ -1544,7 +1544,7 @@ int dbg_check_tnc(struct ubifs_info *c, int extra) long clean_cnt = 0, dirty_cnt = 0; int err, last; - if (!dbg_is_chk_tnc(c)) + if (!dbg_is_chk_index(c)) return 0; ubifs_assert(mutex_is_locked(&c->tnc_mutex)); @@ -1791,7 +1791,7 @@ int dbg_check_idx_size(struct ubifs_info *c, long long idx_size) int err; long long calc = 0; - if (!dbg_is_chk_idx_sz(c)) + if (!dbg_is_chk_index(c)) return 0; err = dbg_walk_index(c, NULL, add_size, &calc); diff --git a/fs/ubifs/debug.h b/fs/ubifs/debug.h index c3d1ffd0610a..44265a3a08ce 100644 --- a/fs/ubifs/debug.h +++ b/fs/ubifs/debug.h @@ -173,19 +173,15 @@ extern spinlock_t dbg_lock; * Debugging check flags. * * UBIFS_CHK_GEN: general checks - * UBIFS_CHK_TNC: check TNC - * UBIFS_CHK_IDX_SZ: check index size + * UBIFS_CHK_INDEX: check the index * UBIFS_CHK_ORPH: check orphans - * UBIFS_CHK_OLD_IDX: check the old index * UBIFS_CHK_LPROPS: check lprops * UBIFS_CHK_FS: check the file-system */ enum { UBIFS_CHK_GEN = 0x1, - UBIFS_CHK_TNC = 0x2, - UBIFS_CHK_IDX_SZ = 0x4, + UBIFS_CHK_INDEX = 0x2, UBIFS_CHK_ORPH = 0x8, - UBIFS_CHK_OLD_IDX = 0x10, UBIFS_CHK_LPROPS = 0x20, UBIFS_CHK_FS = 0x40, }; @@ -207,22 +203,14 @@ static inline int dbg_is_chk_gen(const struct ubifs_info *c) { return !!(ubifs_chk_flags & UBIFS_CHK_GEN); } -static inline int dbg_is_chk_tnc(const struct ubifs_info *c) +static inline int dbg_is_chk_index(const struct ubifs_info *c) { - return !!(ubifs_chk_flags & UBIFS_CHK_TNC); -} -static inline int dbg_is_chk_idx_sz(const struct ubifs_info *c) -{ - return !!(ubifs_chk_flags & UBIFS_CHK_IDX_SZ); + return !!(ubifs_chk_flags & UBIFS_CHK_INDEX); } static inline int dbg_is_chk_orph(const struct ubifs_info *c) { return !!(ubifs_chk_flags & UBIFS_CHK_ORPH); } -static inline int dbg_is_chk_old_idx(const struct ubifs_info *c) -{ - return !!(ubifs_chk_flags & UBIFS_CHK_OLD_IDX); -} static inline int dbg_is_chk_lprops(const struct ubifs_info *c) { return !!(ubifs_chk_flags & UBIFS_CHK_LPROPS); @@ -462,10 +450,8 @@ dbg_check_nondata_nodes_order(struct ubifs_info *c, struct list_head *head) { return 0; } static inline int dbg_is_chk_gen(const struct ubifs_info *c) { return 0; } -static inline int dbg_is_chk_tnc(const struct ubifs_info *c) { return 0; } -static inline int dbg_is_chk_idx_sz(const struct ubifs_info *c) { return 0; } +static inline int dbg_is_chk_index(const struct ubifs_info *c) { return 0; } static inline int dbg_is_chk_orph(const struct ubifs_info *c) { return 0; } -static inline int dbg_is_chk_old_idx(const struct ubifs_info *c) { return 0; } static inline int dbg_is_chk_lprops(const struct ubifs_info *c) { return 0; } static inline int dbg_is_chk_fs(const struct ubifs_info *c) { return 0; } static inline int dbg_is_tst_rcvry(const struct ubifs_info *c) { return 0; } diff --git a/fs/ubifs/lpt_commit.c b/fs/ubifs/lpt_commit.c index f8e286ba8095..f13addf72e89 100644 --- a/fs/ubifs/lpt_commit.c +++ b/fs/ubifs/lpt_commit.c @@ -27,6 +27,7 @@ #include #include +#include #include "ubifs.h" #ifdef CONFIG_UBIFS_FS_DEBUG diff --git a/fs/ubifs/tnc_commit.c b/fs/ubifs/tnc_commit.c index 8959c726b024..4c15f07a8bb2 100644 --- a/fs/ubifs/tnc_commit.c +++ b/fs/ubifs/tnc_commit.c @@ -22,8 +22,8 @@ /* This file implements TNC functions for committing */ -#include "ubifs.h" #include +#include "ubifs.h" /** * make_idx_node - make an index node for fill-the-gaps method of TNC commit. @@ -382,7 +382,7 @@ static int layout_in_gaps(struct ubifs_info *c, int cnt) c->gap_lebs = NULL; return err; } - if (!dbg_is_chk_gen(c)) { + if (!dbg_is_chk_index(c)) { /* * Do not print scary warnings if the debugging * option which forces in-the-gaps is enabled. @@ -690,7 +690,7 @@ static int alloc_idx_lebs(struct ubifs_info *c, int cnt) c->ilebs[c->ileb_cnt++] = lnum; dbg_cmt("LEB %d", lnum); } - if (dbg_is_chk_gen(c) && !(random32() & 7)) + if (dbg_is_chk_index(c) && !(random32() & 7)) return -ENOSPC; return 0; } -- cgit v1.2.3 From 81e79d38df5a17bb1c738a14f8e5f3412fb33afa Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Tue, 31 May 2011 18:16:34 +0300 Subject: UBIFS: switch self-check knobs to debugfs UBIFS has many built-in self-check functions which can be enabled using the debug_chks module parameter or the corresponding sysfs file (/sys/module/ubifs/parameters/debug_chks). However, this is not flexible enough because it is not per-filesystem. This patch moves this to debugfs interfaces. We already have debugfs support, so this patch just adds more debugfs files. While looking at debugfs support I've noticed that it is racy WRT file-system unmount, and added a TODO entry for that. This problem has been there for long time and it is quite standard debugfs PITA. The plan is to fix this later. This patch is simple, but it is large because it changes many places where we check if a particular type of checks is enabled or disabled. Signed-off-by: Artem Bityutskiy --- Documentation/filesystems/ubifs.txt | 26 ------- fs/ubifs/debug.c | 140 +++++++++++++++++++++++++++++++----- fs/ubifs/debug.h | 72 +++++++++---------- 3 files changed, 159 insertions(+), 79 deletions(-) (limited to 'fs') diff --git a/Documentation/filesystems/ubifs.txt b/Documentation/filesystems/ubifs.txt index 91ef07652cd2..a0a61d2f389f 100644 --- a/Documentation/filesystems/ubifs.txt +++ b/Documentation/filesystems/ubifs.txt @@ -111,32 +111,6 @@ The following is an example of the kernel boot arguments to attach mtd0 to UBI and mount volume "rootfs": ubi.mtd=0 root=ubi0:rootfs rootfstype=ubifs - -Module Parameters for Debugging -=============================== - -When UBIFS has been compiled with debugging enabled, there are 2 module -parameters that are available to control aspects of testing and debugging. - -debug_chks Selects extra checks that UBIFS can do while running: - - Check Flag value - - General checks 1 - Check the index 2 - Check orphan area 8 - Check LEB properties (lprops) 32 - Check leaf nodes and inodes 64 - -debug_tsts Selects a mode of testing, as follows: - - Test mode Flag value - - Failure mode for recovery testing 4 - -For example, set debug_chks to 3 to enable general and TNC checks. - - References ========== diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c index a967d6800ead..fdfa5dea5b95 100644 --- a/fs/ubifs/debug.c +++ b/fs/ubifs/debug.c @@ -31,9 +31,9 @@ #include "ubifs.h" #include -#include #include #include +#include #ifdef CONFIG_UBIFS_FS_DEBUG @@ -42,15 +42,6 @@ DEFINE_SPINLOCK(dbg_lock); static char dbg_key_buf0[128]; static char dbg_key_buf1[128]; -unsigned int ubifs_chk_flags; -unsigned int ubifs_tst_flags; - -module_param_named(debug_chks, ubifs_chk_flags, uint, S_IRUGO | S_IWUSR); -module_param_named(debug_tsts, ubifs_tst_flags, uint, S_IRUGO | S_IWUSR); - -MODULE_PARM_DESC(debug_chks, "Debug check flags"); -MODULE_PARM_DESC(debug_tsts, "Debug special test flags"); - static const char *get_key_fmt(int fmt) { switch (fmt) { @@ -2886,21 +2877,93 @@ static int open_debugfs_file(struct inode *inode, struct file *file) return nonseekable_open(inode, file); } -static ssize_t write_debugfs_file(struct file *file, const char __user *buf, - size_t count, loff_t *ppos) +static ssize_t dfs_file_read(struct file *file, char __user *u, size_t count, + loff_t *ppos) +{ + struct dentry *dent = file->f_path.dentry; + struct ubifs_info *c = file->private_data; + struct ubifs_debug_info *d = c->dbg; + char buf[3]; + int val; + + if (dent == d->dfs_chk_gen) + val = d->chk_gen; + else if (dent == d->dfs_chk_index) + val = d->chk_index; + else if (dent == d->dfs_chk_orph) + val = d->chk_orph; + else if (dent == d->dfs_chk_lprops) + val = d->chk_lprops; + else if (dent == d->dfs_chk_fs) + val = d->chk_fs; + else if (dent == d->dfs_tst_rcvry) + val = d->tst_rcvry; + else + return -EINVAL; + + if (val) + buf[0] = '1'; + else + buf[0] = '0'; + buf[1] = '\n'; + buf[2] = 0x00; + + return simple_read_from_buffer(u, count, ppos, buf, 2); +} + +static ssize_t dfs_file_write(struct file *file, const char __user *u, + size_t count, loff_t *ppos) { struct ubifs_info *c = file->private_data; struct ubifs_debug_info *d = c->dbg; + struct dentry *dent = file->f_path.dentry; + size_t buf_size; + char buf[8]; + int val; - if (file->f_path.dentry == d->dfs_dump_lprops) + /* + * FIXME: this is racy - the file-system might have already been + * unmounted and we'd oops in this case. + */ + if (file->f_path.dentry == d->dfs_dump_lprops) { dbg_dump_lprops(c); - else if (file->f_path.dentry == d->dfs_dump_budg) + return count; + } + if (file->f_path.dentry == d->dfs_dump_budg) { dbg_dump_budg(c, &c->bi); - else if (file->f_path.dentry == d->dfs_dump_tnc) { + return count; + } + if (file->f_path.dentry == d->dfs_dump_tnc) { mutex_lock(&c->tnc_mutex); dbg_dump_tnc(c); mutex_unlock(&c->tnc_mutex); - } else + return count; + } + + buf_size = min_t(size_t, count, (sizeof(buf) - 1)); + if (copy_from_user(buf, u, buf_size)) + return -EFAULT; + + if (buf[0] == '1') + val = 1; + else if (buf[0] == '0') + val = 0; + else + return -EINVAL; + + if (dent == d->dfs_chk_gen) + d->chk_gen = val; + else if (dent == d->dfs_chk_index) + d->chk_index = val; + else if (dent == d->dfs_chk_orph) + d->chk_orph = val; + else if (dent == d->dfs_chk_lprops) + d->chk_lprops = val; + else if (dent == d->dfs_chk_fs) + d->chk_fs = val; + else if (dent == d->dfs_tst_rcvry) + d->tst_rcvry = val; + else return -EINVAL; return count; @@ -2908,7 +2971,8 @@ static ssize_t write_debugfs_file(struct file *file, const char __user *buf, static const struct file_operations dfs_fops = { .open = open_debugfs_file, - .write = write_debugfs_file, + .read = dfs_file_read, + .write = dfs_file_write, .owner = THIS_MODULE, .llseek = no_llseek, }; @@ -2965,6 +3029,48 @@ int dbg_debugfs_init_fs(struct ubifs_info *c) goto out_remove; d->dfs_dump_tnc = dent; + fname = "chk_general"; + dent = debugfs_create_file(fname, S_IRUSR | S_IWUSR, d->dfs_dir, c, + &dfs_fops); + if (IS_ERR_OR_NULL(dent)) + goto out_remove; + d->dfs_chk_gen = dent; + + fname = "chk_index"; + dent = debugfs_create_file(fname, S_IRUSR | S_IWUSR, d->dfs_dir, c, + &dfs_fops); + if (IS_ERR_OR_NULL(dent)) + goto out_remove; + d->dfs_chk_index = dent; + + fname = "chk_orphans"; + dent = debugfs_create_file(fname, S_IRUSR | S_IWUSR, d->dfs_dir, c, + &dfs_fops); + if (IS_ERR_OR_NULL(dent)) + goto out_remove; + d->dfs_chk_orph = dent; + + fname = "chk_lprops"; + dent = debugfs_create_file(fname, S_IRUSR | S_IWUSR, d->dfs_dir, c, + &dfs_fops); + if (IS_ERR_OR_NULL(dent)) + goto out_remove; + d->dfs_chk_lprops = dent; + + fname = "chk_fs"; + dent = debugfs_create_file(fname, S_IRUSR | S_IWUSR, d->dfs_dir, c, + &dfs_fops); + if (IS_ERR_OR_NULL(dent)) + goto out_remove; + d->dfs_chk_fs = dent; + + fname = "tst_recovery"; + dent = debugfs_create_file(fname, S_IRUSR | S_IWUSR, d->dfs_dir, c, + &dfs_fops); + if (IS_ERR_OR_NULL(dent)) + goto out_remove; + d->dfs_tst_rcvry = dent; + return 0; out_remove: diff --git a/fs/ubifs/debug.h b/fs/ubifs/debug.h index 44265a3a08ce..8c3bdd378037 100644 --- a/fs/ubifs/debug.h +++ b/fs/ubifs/debug.h @@ -43,11 +43,13 @@ typedef int (*dbg_znode_callback)(struct ubifs_info *c, * @old_zroot: old index root - used by 'dbg_check_old_index()' * @old_zroot_level: old index root level - used by 'dbg_check_old_index()' * @old_zroot_sqnum: old index root sqnum - used by 'dbg_check_old_index()' + * * @failure_mode: failure mode for recovery testing * @fail_delay: 0=>don't delay, 1=>delay a time, 2=>delay a number of calls * @fail_timeout: time in jiffies when delay of failure mode expires * @fail_cnt: current number of calls to failure mode I/O functions * @fail_cnt_max: number of calls by which to delay failure mode + * * @chk_lpt_sz: used by LPT tree size checker * @chk_lpt_sz2: used by LPT tree size checker * @chk_lpt_wastage: used by LPT tree size checker @@ -61,21 +63,36 @@ typedef int (*dbg_znode_callback)(struct ubifs_info *c, * @saved_free: saved amount of free space * @saved_idx_gc_cnt: saved value of @c->idx_gc_cnt * + * @chk_gen: if general extra checks are enabled + * @chk_index: if index xtra checks are enabled + * @chk_orph: if orphans extra checks are enabled + * @chk_lprops: if lprops extra checks are enabled + * @chk_fs: if UBIFS contents extra checks are enabled + * @tst_rcvry: if UBIFS recovery testing mode enabled + * * @dfs_dir_name: name of debugfs directory containing this file-system's files * @dfs_dir: direntry object of the file-system debugfs directory * @dfs_dump_lprops: "dump lprops" debugfs knob * @dfs_dump_budg: "dump budgeting information" debugfs knob * @dfs_dump_tnc: "dump TNC" debugfs knob + * @dfs_chk_gen: debugfs knob to enable UBIFS general extra checks + * @dfs_chk_index: debugfs knob to enable UBIFS index extra checks + * @dfs_chk_orph: debugfs knob to enable UBIFS orphans extra checks + * @dfs_chk_lprops: debugfs knob to enable UBIFS LEP properties extra checks + * @dfs_chk_fs: debugfs knob to enable UBIFS contents extra checks + * @dfs_tst_rcvry: debugfs knob to enable UBIFS recovery testing */ struct ubifs_debug_info { struct ubifs_zbranch old_zroot; int old_zroot_level; unsigned long long old_zroot_sqnum; + int failure_mode; int fail_delay; unsigned long fail_timeout; unsigned int fail_cnt; unsigned int fail_cnt_max; + long long chk_lpt_sz; long long chk_lpt_sz2; long long chk_lpt_wastage; @@ -89,11 +106,24 @@ struct ubifs_debug_info { long long saved_free; int saved_idx_gc_cnt; + unsigned int chk_gen:1; + unsigned int chk_index:1; + unsigned int chk_orph:1; + unsigned int chk_lprops:1; + unsigned int chk_fs:1; + unsigned int tst_rcvry:1; + char dfs_dir_name[UBIFS_DFS_DIR_LEN + 1]; struct dentry *dfs_dir; struct dentry *dfs_dump_lprops; struct dentry *dfs_dump_budg; struct dentry *dfs_dump_tnc; + struct dentry *dfs_chk_gen; + struct dentry *dfs_chk_index; + struct dentry *dfs_chk_orph; + struct dentry *dfs_chk_lprops; + struct dentry *dfs_chk_fs; + struct dentry *dfs_tst_rcvry; }; #define ubifs_assert(expr) do { \ @@ -169,59 +199,29 @@ const char *dbg_key_str1(const struct ubifs_info *c, extern spinlock_t dbg_lock; -/* - * Debugging check flags. - * - * UBIFS_CHK_GEN: general checks - * UBIFS_CHK_INDEX: check the index - * UBIFS_CHK_ORPH: check orphans - * UBIFS_CHK_LPROPS: check lprops - * UBIFS_CHK_FS: check the file-system - */ -enum { - UBIFS_CHK_GEN = 0x1, - UBIFS_CHK_INDEX = 0x2, - UBIFS_CHK_ORPH = 0x8, - UBIFS_CHK_LPROPS = 0x20, - UBIFS_CHK_FS = 0x40, -}; - -/* - * Special testing flags. - * - * UBIFS_TST_RCVRY: failure mode for recovery testing - */ -enum { - UBIFS_TST_RCVRY = 0x4, -}; - -extern unsigned int ubifs_msg_flags; -extern unsigned int ubifs_chk_flags; -extern unsigned int ubifs_tst_flags; - static inline int dbg_is_chk_gen(const struct ubifs_info *c) { - return !!(ubifs_chk_flags & UBIFS_CHK_GEN); + return c->dbg->chk_gen; } static inline int dbg_is_chk_index(const struct ubifs_info *c) { - return !!(ubifs_chk_flags & UBIFS_CHK_INDEX); + return c->dbg->chk_index; } static inline int dbg_is_chk_orph(const struct ubifs_info *c) { - return !!(ubifs_chk_flags & UBIFS_CHK_ORPH); + return c->dbg->chk_orph; } static inline int dbg_is_chk_lprops(const struct ubifs_info *c) { - return !!(ubifs_chk_flags & UBIFS_CHK_LPROPS); + return c->dbg->chk_lprops; } static inline int dbg_is_chk_fs(const struct ubifs_info *c) { - return !!(ubifs_chk_flags & UBIFS_CHK_FS); + return c->dbg->chk_fs; } static inline int dbg_is_tst_rcvry(const struct ubifs_info *c) { - return !!(ubifs_tst_flags & UBIFS_TST_RCVRY); + return c->dbg->tst_rcvry; } int ubifs_debugging_init(struct ubifs_info *c); -- cgit v1.2.3 From 24a4f8009ee9e259a412d86373e0d2aac3a80333 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Wed, 1 Jun 2011 15:23:25 +0300 Subject: UBIFS: be more informative in failure mode When we are testing UBIFS recovery, it is better to print in which eraseblock we are going to fail. Currently UBIFS prints it only if recovery debugging messages are enabled, but this is not very practical. So change 'dbg_rcvry()' messages to 'ubifs_warn()' messages. Signed-off-by: Artem Bityutskiy --- fs/ubifs/debug.c | 38 +++++++++++++++++++++++--------------- 1 file changed, 23 insertions(+), 15 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c index fdfa5dea5b95..e4a3630eb520 100644 --- a/fs/ubifs/debug.c +++ b/fs/ubifs/debug.c @@ -2631,11 +2631,11 @@ static int do_fail(struct ubi_volume_desc *desc, int lnum, int write) d->fail_delay = 1; d->fail_timeout = jiffies + msecs_to_jiffies(delay); - dbg_rcvry("failing after %ums", delay); + ubifs_warn("failing after %ums", delay); } else { d->fail_delay = 2; d->fail_cnt_max = delay; - dbg_rcvry("failing after %u calls", delay); + ubifs_warn("failing after %u calls", delay); } } d->fail_cnt += 1; @@ -2653,56 +2653,56 @@ static int do_fail(struct ubi_volume_desc *desc, int lnum, int write) return 0; } else if (chance(19, 20)) return 0; - dbg_rcvry("failing in super block LEB %d", lnum); + ubifs_warn("failing in super block LEB %d", lnum); } else if (lnum == UBIFS_MST_LNUM || lnum == UBIFS_MST_LNUM + 1) { if (chance(19, 20)) return 0; - dbg_rcvry("failing in master LEB %d", lnum); + ubifs_warn("failing in master LEB %d", lnum); } else if (lnum >= UBIFS_LOG_LNUM && lnum <= c->log_last) { if (write) { if (chance(99, 100)) return 0; } else if (chance(399, 400)) return 0; - dbg_rcvry("failing in log LEB %d", lnum); + ubifs_warn("failing in log LEB %d", lnum); } else if (lnum >= c->lpt_first && lnum <= c->lpt_last) { if (write) { if (chance(7, 8)) return 0; } else if (chance(19, 20)) return 0; - dbg_rcvry("failing in LPT LEB %d", lnum); + ubifs_warn("failing in LPT LEB %d", lnum); } else if (lnum >= c->orph_first && lnum <= c->orph_last) { if (write) { if (chance(1, 2)) return 0; } else if (chance(9, 10)) return 0; - dbg_rcvry("failing in orphan LEB %d", lnum); + ubifs_warn("failing in orphan LEB %d", lnum); } else if (lnum == c->ihead_lnum) { if (chance(99, 100)) return 0; - dbg_rcvry("failing in index head LEB %d", lnum); + ubifs_warn("failing in index head LEB %d", lnum); } else if (c->jheads && lnum == c->jheads[GCHD].wbuf.lnum) { if (chance(9, 10)) return 0; - dbg_rcvry("failing in GC head LEB %d", lnum); + ubifs_warn("failing in GC head LEB %d", lnum); } else if (write && !RB_EMPTY_ROOT(&c->buds) && !ubifs_search_bud(c, lnum)) { if (chance(19, 20)) return 0; - dbg_rcvry("failing in non-bud LEB %d", lnum); + ubifs_warn("failing in non-bud LEB %d", lnum); } else if (c->cmt_state == COMMIT_RUNNING_BACKGROUND || c->cmt_state == COMMIT_RUNNING_REQUIRED) { if (chance(999, 1000)) return 0; - dbg_rcvry("failing in bud LEB %d commit running", lnum); + ubifs_warn("failing in bud LEB %d commit running", lnum); } else { if (chance(9999, 10000)) return 0; - dbg_rcvry("failing in bud LEB %d commit not running", lnum); + ubifs_warn("failing in bud LEB %d commit not running", lnum); } - ubifs_err("*** SETTING FAILURE MODE ON (LEB %d) ***", lnum); + d->failure_mode = 1; dump_stack(); return 1; @@ -2922,8 +2922,16 @@ static ssize_t dfs_file_write(struct file *file, const char __user *u, int val; /* - * FIXME: this is racy - the file-system might have already been - * unmounted and we'd oops in this case. + * TODO: this is racy - the file-system might have already been + * unmounted and we'd oops in this case. The plan is to fix it with + * help of 'iterate_supers_type()' which we should have in v3.0: when + * a debugfs opened, we rember FS's UUID in file->private_data. Then + * whenever we access the FS via a debugfs file, we iterate all UBIFS + * superblocks and fine the one with the same UUID, and take the + * locking right. + * + * The other way to go suggested by Al Viro is to create a separate + * 'ubifs-debug' file-system instead. */ if (file->f_path.dentry == d->dfs_dump_lprops) { dbg_dump_lprops(c); -- cgit v1.2.3 From 7dae997de62bbd78f12305bf10019ec8f1103bd4 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Wed, 1 Jun 2011 15:44:14 +0300 Subject: UBIFS: re-arrange debugging code a bit Move 'dbg_debugfs_init()' and 'dbg_debugfs_exit()' functions which initialize debugfs for whole UBIFS subsystem below the code which initializes debugfs for a particular UBIFS instance. And do the same for 'ubifs_debugging_init()' and 'ubifs_debugging_exit()' functions. This layout is a bit better for the next patches, so this is just a preparation. Also, rename 'open_debugfs_file()' into 'dfs_file_open()' for consistency. Signed-off-by: Artem Bityutskiy --- fs/ubifs/debug.c | 118 +++++++++++++++++++++++++++---------------------------- 1 file changed, 59 insertions(+), 59 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c index e4a3630eb520..24a85073ac6c 100644 --- a/fs/ubifs/debug.c +++ b/fs/ubifs/debug.c @@ -2808,70 +2808,13 @@ int dbg_leb_map(struct ubi_volume_desc *desc, int lnum, int dtype) return 0; } -/** - * ubifs_debugging_init - initialize UBIFS debugging. - * @c: UBIFS file-system description object - * - * This function initializes debugging-related data for the file system. - * Returns zero in case of success and a negative error code in case of - * failure. - */ -int ubifs_debugging_init(struct ubifs_info *c) -{ - c->dbg = kzalloc(sizeof(struct ubifs_debug_info), GFP_KERNEL); - if (!c->dbg) - return -ENOMEM; - - failure_mode_init(c); - return 0; -} - -/** - * ubifs_debugging_exit - free debugging data. - * @c: UBIFS file-system description object - */ -void ubifs_debugging_exit(struct ubifs_info *c) -{ - failure_mode_exit(c); - kfree(c->dbg); -} - /* * Root directory for UBIFS stuff in debugfs. Contains sub-directories which * contain the stuff specific to particular file-system mounts. */ static struct dentry *dfs_rootdir; -/** - * dbg_debugfs_init - initialize debugfs file-system. - * - * UBIFS uses debugfs file-system to expose various debugging knobs to - * user-space. This function creates "ubifs" directory in the debugfs - * file-system. Returns zero in case of success and a negative error code in - * case of failure. - */ -int dbg_debugfs_init(void) -{ - dfs_rootdir = debugfs_create_dir("ubifs", NULL); - if (IS_ERR_OR_NULL(dfs_rootdir)) { - int err = dfs_rootdir ? PTR_ERR(dfs_rootdir) : -ENODEV; - ubifs_err("cannot create \"ubifs\" debugfs directory, " - "error %d\n", err); - return err; - } - - return 0; -} - -/** - * dbg_debugfs_exit - remove the "ubifs" directory from debugfs file-system. - */ -void dbg_debugfs_exit(void) -{ - debugfs_remove(dfs_rootdir); -} - -static int open_debugfs_file(struct inode *inode, struct file *file) +static int dfs_file_open(struct inode *inode, struct file *file) { file->private_data = inode->i_private; return nonseekable_open(inode, file); @@ -2978,7 +2921,7 @@ static ssize_t dfs_file_write(struct file *file, const char __user *u, } static const struct file_operations dfs_fops = { - .open = open_debugfs_file, + .open = dfs_file_open, .read = dfs_file_read, .write = dfs_file_write, .owner = THIS_MODULE, @@ -3099,4 +3042,61 @@ void dbg_debugfs_exit_fs(struct ubifs_info *c) debugfs_remove_recursive(c->dbg->dfs_dir); } +/** + * dbg_debugfs_init - initialize debugfs file-system. + * + * UBIFS uses debugfs file-system to expose various debugging knobs to + * user-space. This function creates "ubifs" directory in the debugfs + * file-system. Returns zero in case of success and a negative error code in + * case of failure. + */ +int dbg_debugfs_init(void) +{ + dfs_rootdir = debugfs_create_dir("ubifs", NULL); + if (IS_ERR_OR_NULL(dfs_rootdir)) { + int err = dfs_rootdir ? PTR_ERR(dfs_rootdir) : -ENODEV; + ubifs_err("cannot create \"ubifs\" debugfs directory, " + "error %d\n", err); + return err; + } + + return 0; +} + +/** + * dbg_debugfs_exit - remove the "ubifs" directory from debugfs file-system. + */ +void dbg_debugfs_exit(void) +{ + debugfs_remove(dfs_rootdir); +} + +/** + * ubifs_debugging_init - initialize UBIFS debugging. + * @c: UBIFS file-system description object + * + * This function initializes debugging-related data for the file system. + * Returns zero in case of success and a negative error code in case of + * failure. + */ +int ubifs_debugging_init(struct ubifs_info *c) +{ + c->dbg = kzalloc(sizeof(struct ubifs_debug_info), GFP_KERNEL); + if (!c->dbg) + return -ENOMEM; + + failure_mode_init(c); + return 0; +} + +/** + * ubifs_debugging_exit - free debugging data. + * @c: UBIFS file-system description object + */ +void ubifs_debugging_exit(struct ubifs_info *c) +{ + failure_mode_exit(c); + kfree(c->dbg); +} + #endif /* CONFIG_UBIFS_FS_DEBUG */ -- cgit v1.2.3 From 28488fc28aa39815b78c2cbeaaf25f33fef92ce8 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Fri, 3 Jun 2011 09:58:23 +0300 Subject: UBIFS: introduce debugfs helpers Separate out pieces of code from the debugfs file read/write functions and create separate 'interpret_user_input()'/'provide_user_output()' helpers. These helpers will be needed in one of the following patches, so this is just a preparational change. Signed-off-by: Artem Bityutskiy --- fs/ubifs/debug.c | 76 +++++++++++++++++++++++++++++++++++++++++--------------- 1 file changed, 56 insertions(+), 20 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c index 24a85073ac6c..b98638eb0fcb 100644 --- a/fs/ubifs/debug.c +++ b/fs/ubifs/debug.c @@ -2820,13 +2820,39 @@ static int dfs_file_open(struct inode *inode, struct file *file) return nonseekable_open(inode, file); } +/** + * provide_user_output - provide output to the user reading a debugfs file. + * @val: boolean value for the answer + * @u: the buffer to store the answer at + * @count: size of the buffer + * @ppos: position in the @u output buffer + * + * This is a simple helper function which stores @val boolean value in the user + * buffer when the user reads one of UBIFS debugfs files. Returns amount of + * bytes written to @u in case of success and a negative error code in case of + * failure. + */ +static int provide_user_output(int val, char __user *u, size_t count, + loff_t *ppos) +{ + char buf[3]; + + if (val) + buf[0] = '1'; + else + buf[0] = '0'; + buf[1] = '\n'; + buf[2] = 0x00; + + return simple_read_from_buffer(u, count, ppos, buf, 2); +} + static ssize_t dfs_file_read(struct file *file, char __user *u, size_t count, loff_t *ppos) { struct dentry *dent = file->f_path.dentry; struct ubifs_info *c = file->private_data; struct ubifs_debug_info *d = c->dbg; - char buf[3]; int val; if (dent == d->dfs_chk_gen) @@ -2844,14 +2870,33 @@ static ssize_t dfs_file_read(struct file *file, char __user *u, size_t count, else return -EINVAL; - if (val) - buf[0] = '1'; - else - buf[0] = '0'; - buf[1] = '\n'; - buf[2] = 0x00; + return provide_user_output(val, u, count, ppos); +} - return simple_read_from_buffer(u, count, ppos, buf, 2); +/** + * interpret_user_input - interpret user debugfs file input. + * @u: user-provided buffer with the input + * @count: buffer size + * + * This is a helper function which interpret user input to a boolean UBIFS + * debugfs file. Returns %0 or %1 in case of success and a negative error code + * in case of failure. + */ +static int interpret_user_input(const char __user *u, size_t count) +{ + size_t buf_size; + char buf[8]; + + buf_size = min_t(size_t, count, (sizeof(buf) - 1)); + if (copy_from_user(buf, u, buf_size)) + return -EFAULT; + + if (buf[0] == '1') + return 1; + else if (buf[0] == '0') + return 0; + + return -EINVAL; } static ssize_t dfs_file_write(struct file *file, const char __user *u, @@ -2860,8 +2905,6 @@ static ssize_t dfs_file_write(struct file *file, const char __user *u, struct ubifs_info *c = file->private_data; struct ubifs_debug_info *d = c->dbg; struct dentry *dent = file->f_path.dentry; - size_t buf_size; - char buf[8]; int val; /* @@ -2891,16 +2934,9 @@ static ssize_t dfs_file_write(struct file *file, const char __user *u, return count; } - buf_size = min_t(size_t, count, (sizeof(buf) - 1)); - if (copy_from_user(buf, u, buf_size)) - return -EFAULT; - - if (buf[0] == '1') - val = 1; - else if (buf[0] == '0') - val = 0; - else - return -EINVAL; + val = interpret_user_input(u, count); + if (val < 0) + return val; if (dent == d->dfs_chk_gen) d->chk_gen = val; -- cgit v1.2.3 From e7717060ddd509e6c305ad7bf5a090a95e91c8cf Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Wed, 1 Jun 2011 17:43:43 +0300 Subject: UBIFS: add global debugfs knobs Now we have per-FS (superblock) debugfs knobs, but they have one drawback - you have to first mount the FS and only after this you can switch self-checks on/off. But often we want to have the checks enabled during the mount. Introduce global debugging knobs for this purpose. Signed-off-by: Artem Bityutskiy --- fs/ubifs/debug.c | 138 +++++++++++++++++++++++++++++++++++++++++++++++++++---- fs/ubifs/debug.h | 35 +++++++++++--- 2 files changed, 157 insertions(+), 16 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c index b98638eb0fcb..15bec635bf3e 100644 --- a/fs/ubifs/debug.c +++ b/fs/ubifs/debug.c @@ -3064,7 +3064,7 @@ out_remove: debugfs_remove_recursive(d->dfs_dir); out: err = dent ? PTR_ERR(dent) : -ENODEV; - ubifs_err("cannot create \"%s\" debugfs filr or directory, error %d\n", + ubifs_err("cannot create \"%s\" debugfs file or directory, error %d\n", fname, err); return err; } @@ -3078,6 +3078,74 @@ void dbg_debugfs_exit_fs(struct ubifs_info *c) debugfs_remove_recursive(c->dbg->dfs_dir); } +struct ubifs_global_debug_info ubifs_dbg; + +static struct dentry *dfs_chk_gen; +static struct dentry *dfs_chk_index; +static struct dentry *dfs_chk_orph; +static struct dentry *dfs_chk_lprops; +static struct dentry *dfs_chk_fs; +static struct dentry *dfs_tst_rcvry; + +static ssize_t dfs_global_file_read(struct file *file, char __user *u, + size_t count, loff_t *ppos) +{ + struct dentry *dent = file->f_path.dentry; + int val; + + if (dent == dfs_chk_gen) + val = ubifs_dbg.chk_gen; + else if (dent == dfs_chk_index) + val = ubifs_dbg.chk_index; + else if (dent == dfs_chk_orph) + val = ubifs_dbg.chk_orph; + else if (dent == dfs_chk_lprops) + val = ubifs_dbg.chk_lprops; + else if (dent == dfs_chk_fs) + val = ubifs_dbg.chk_fs; + else if (dent == dfs_tst_rcvry) + val = ubifs_dbg.tst_rcvry; + else + return -EINVAL; + + return provide_user_output(val, u, count, ppos); +} + +static ssize_t dfs_global_file_write(struct file *file, const char __user *u, + size_t count, loff_t *ppos) +{ + struct dentry *dent = file->f_path.dentry; + int val; + + val = interpret_user_input(u, count); + if (val < 0) + return val; + + if (dent == dfs_chk_gen) + ubifs_dbg.chk_gen = val; + else if (dent == dfs_chk_index) + ubifs_dbg.chk_index = val; + else if (dent == dfs_chk_orph) + ubifs_dbg.chk_orph = val; + else if (dent == dfs_chk_lprops) + ubifs_dbg.chk_lprops = val; + else if (dent == dfs_chk_fs) + ubifs_dbg.chk_fs = val; + else if (dent == dfs_tst_rcvry) + ubifs_dbg.tst_rcvry = val; + else + return -EINVAL; + + return count; +} + +static const struct file_operations dfs_global_fops = { + .read = dfs_global_file_read, + .write = dfs_global_file_write, + .owner = THIS_MODULE, + .llseek = no_llseek, +}; + /** * dbg_debugfs_init - initialize debugfs file-system. * @@ -3088,15 +3156,67 @@ void dbg_debugfs_exit_fs(struct ubifs_info *c) */ int dbg_debugfs_init(void) { - dfs_rootdir = debugfs_create_dir("ubifs", NULL); - if (IS_ERR_OR_NULL(dfs_rootdir)) { - int err = dfs_rootdir ? PTR_ERR(dfs_rootdir) : -ENODEV; - ubifs_err("cannot create \"ubifs\" debugfs directory, " - "error %d\n", err); - return err; - } + int err; + const char *fname; + struct dentry *dent; + + fname = "ubifs"; + dent = debugfs_create_dir(fname, NULL); + if (IS_ERR_OR_NULL(dent)) + goto out; + dfs_rootdir = dent; + + fname = "chk_general"; + dent = debugfs_create_file(fname, S_IRUSR | S_IWUSR, dfs_rootdir, NULL, + &dfs_global_fops); + if (IS_ERR_OR_NULL(dent)) + goto out_remove; + dfs_chk_gen = dent; + + fname = "chk_index"; + dent = debugfs_create_file(fname, S_IRUSR | S_IWUSR, dfs_rootdir, NULL, + &dfs_global_fops); + if (IS_ERR_OR_NULL(dent)) + goto out_remove; + dfs_chk_index = dent; + + fname = "chk_orphans"; + dent = debugfs_create_file(fname, S_IRUSR | S_IWUSR, dfs_rootdir, NULL, + &dfs_global_fops); + if (IS_ERR_OR_NULL(dent)) + goto out_remove; + dfs_chk_orph = dent; + + fname = "chk_lprops"; + dent = debugfs_create_file(fname, S_IRUSR | S_IWUSR, dfs_rootdir, NULL, + &dfs_global_fops); + if (IS_ERR_OR_NULL(dent)) + goto out_remove; + dfs_chk_lprops = dent; + + fname = "chk_fs"; + dent = debugfs_create_file(fname, S_IRUSR | S_IWUSR, dfs_rootdir, NULL, + &dfs_global_fops); + if (IS_ERR_OR_NULL(dent)) + goto out_remove; + dfs_chk_fs = dent; + + fname = "tst_recovery"; + dent = debugfs_create_file(fname, S_IRUSR | S_IWUSR, dfs_rootdir, NULL, + &dfs_global_fops); + if (IS_ERR_OR_NULL(dent)) + goto out_remove; + dfs_tst_rcvry = dent; return 0; + +out_remove: + debugfs_remove_recursive(dfs_rootdir); +out: + err = dent ? PTR_ERR(dent) : -ENODEV; + ubifs_err("cannot create \"%s\" debugfs file or directory, error %d\n", + fname, err); + return err; } /** @@ -3104,7 +3224,7 @@ int dbg_debugfs_init(void) */ void dbg_debugfs_exit(void) { - debugfs_remove(dfs_rootdir); + debugfs_remove_recursive(dfs_rootdir); } /** diff --git a/fs/ubifs/debug.h b/fs/ubifs/debug.h index 8c3bdd378037..43ec5d120d7c 100644 --- a/fs/ubifs/debug.h +++ b/fs/ubifs/debug.h @@ -126,6 +126,25 @@ struct ubifs_debug_info { struct dentry *dfs_tst_rcvry; }; +/** + * ubifs_global_debug_info - global (not per-FS) UBIFS debugging information. + * + * @chk_gen: if general extra checks are enabled + * @chk_index: if index xtra checks are enabled + * @chk_orph: if orphans extra checks are enabled + * @chk_lprops: if lprops extra checks are enabled + * @chk_fs: if UBIFS contents extra checks are enabled + * @tst_rcvry: if UBIFS recovery testing mode enabled + */ +struct ubifs_global_debug_info { + unsigned int chk_gen:1; + unsigned int chk_index:1; + unsigned int chk_orph:1; + unsigned int chk_lprops:1; + unsigned int chk_fs:1; + unsigned int tst_rcvry:1; +}; + #define ubifs_assert(expr) do { \ if (unlikely(!(expr))) { \ printk(KERN_CRIT "UBIFS assert failed in %s at %u (pid %d)\n", \ @@ -162,6 +181,8 @@ const char *dbg_key_str1(const struct ubifs_info *c, #define DBGKEY(key) dbg_key_str0(c, (key)) #define DBGKEY1(key) dbg_key_str1(c, (key)) +extern spinlock_t dbg_lock; + #define ubifs_dbg_msg(type, fmt, ...) do { \ spin_lock(&dbg_lock); \ pr_debug("UBIFS DBG " type ": " fmt "\n", ##__VA_ARGS__); \ @@ -197,31 +218,31 @@ const char *dbg_key_str1(const struct ubifs_info *c, /* Additional recovery messages */ #define dbg_rcvry(fmt, ...) ubifs_dbg_msg("rcvry", fmt, ##__VA_ARGS__) -extern spinlock_t dbg_lock; +extern struct ubifs_global_debug_info ubifs_dbg; static inline int dbg_is_chk_gen(const struct ubifs_info *c) { - return c->dbg->chk_gen; + return !!(ubifs_dbg.chk_gen || c->dbg->chk_gen); } static inline int dbg_is_chk_index(const struct ubifs_info *c) { - return c->dbg->chk_index; + return !!(ubifs_dbg.chk_index || c->dbg->chk_index); } static inline int dbg_is_chk_orph(const struct ubifs_info *c) { - return c->dbg->chk_orph; + return !!(ubifs_dbg.chk_orph || c->dbg->chk_orph); } static inline int dbg_is_chk_lprops(const struct ubifs_info *c) { - return c->dbg->chk_lprops; + return !!(ubifs_dbg.chk_lprops || c->dbg->chk_lprops); } static inline int dbg_is_chk_fs(const struct ubifs_info *c) { - return c->dbg->chk_fs; + return !!(ubifs_dbg.chk_fs || c->dbg->chk_fs); } static inline int dbg_is_tst_rcvry(const struct ubifs_info *c) { - return c->dbg->tst_rcvry; + return !!(ubifs_dbg.tst_rcvry || c->dbg->tst_rcvry); } int ubifs_debugging_init(struct ubifs_info *c); -- cgit v1.2.3 From 891a54a153646f9b16bffe5df6cb74cb3f1e9dc6 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Fri, 3 Jun 2011 11:32:21 +0300 Subject: UBIFS: remove unused and unneeded debugging function This patch contains several minor clean-up and preparational cahnges. 1. Remove 'dbg_read()', 'dbg_write()', 'dbg_change()', and 'dbg_leb_erase()' functions as they are not used. 2. Remove 'dbg_leb_read()' and 'dbg_is_mapped()' as they are not really needed, it is fine to let reads go through in failure mode. 3. Rename 'offset' argument to 'offs' to be consistent with the rest of UBIFS code. Signed-off-by: Artem Bityutskiy --- fs/ubifs/debug.c | 33 ++------------------------------- fs/ubifs/debug.h | 27 +-------------------------- 2 files changed, 3 insertions(+), 57 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c index 15bec635bf3e..059c586b5ef8 100644 --- a/fs/ubifs/debug.c +++ b/fs/ubifs/debug.c @@ -2718,16 +2718,8 @@ static void cut_data(const void *buf, int len) p[i] = 0xff; } -int dbg_leb_read(struct ubi_volume_desc *desc, int lnum, char *buf, int offset, - int len, int check) -{ - if (in_failure_mode(desc)) - return -EROFS; - return ubi_leb_read(desc, lnum, buf, offset, len, check); -} - int dbg_leb_write(struct ubi_volume_desc *desc, int lnum, const void *buf, - int offset, int len, int dtype) + int offs, int len, int dtype) { int err, failing; @@ -2736,7 +2728,7 @@ int dbg_leb_write(struct ubi_volume_desc *desc, int lnum, const void *buf, failing = do_fail(desc, lnum, 1); if (failing) cut_data(buf, len); - err = ubi_leb_write(desc, lnum, buf, offset, len, dtype); + err = ubi_leb_write(desc, lnum, buf, offs, len, dtype); if (err) return err; if (failing) @@ -2759,20 +2751,6 @@ int dbg_leb_change(struct ubi_volume_desc *desc, int lnum, const void *buf, return 0; } -int dbg_leb_erase(struct ubi_volume_desc *desc, int lnum) -{ - int err; - - if (do_fail(desc, lnum, 0)) - return -EROFS; - err = ubi_leb_erase(desc, lnum); - if (err) - return err; - if (do_fail(desc, lnum, 0)) - return -EROFS; - return 0; -} - int dbg_leb_unmap(struct ubi_volume_desc *desc, int lnum) { int err; @@ -2787,13 +2765,6 @@ int dbg_leb_unmap(struct ubi_volume_desc *desc, int lnum) return 0; } -int dbg_is_mapped(struct ubi_volume_desc *desc, int lnum) -{ - if (in_failure_mode(desc)) - return -EROFS; - return ubi_is_mapped(desc, lnum); -} - int dbg_leb_map(struct ubi_volume_desc *desc, int lnum, int dtype) { int err; diff --git a/fs/ubifs/debug.h b/fs/ubifs/debug.h index 43ec5d120d7c..e1dcfd29c9aa 100644 --- a/fs/ubifs/debug.h +++ b/fs/ubifs/debug.h @@ -302,44 +302,19 @@ int dbg_check_data_nodes_order(struct ubifs_info *c, struct list_head *head); int dbg_check_nondata_nodes_order(struct ubifs_info *c, struct list_head *head); #ifndef UBIFS_DBG_PRESERVE_UBI -#define ubi_leb_read dbg_leb_read #define ubi_leb_write dbg_leb_write #define ubi_leb_change dbg_leb_change -#define ubi_leb_erase dbg_leb_erase #define ubi_leb_unmap dbg_leb_unmap -#define ubi_is_mapped dbg_is_mapped #define ubi_leb_map dbg_leb_map #endif -int dbg_leb_read(struct ubi_volume_desc *desc, int lnum, char *buf, int offset, - int len, int check); int dbg_leb_write(struct ubi_volume_desc *desc, int lnum, const void *buf, - int offset, int len, int dtype); + int offs, int len, int dtype); int dbg_leb_change(struct ubi_volume_desc *desc, int lnum, const void *buf, int len, int dtype); -int dbg_leb_erase(struct ubi_volume_desc *desc, int lnum); int dbg_leb_unmap(struct ubi_volume_desc *desc, int lnum); -int dbg_is_mapped(struct ubi_volume_desc *desc, int lnum); int dbg_leb_map(struct ubi_volume_desc *desc, int lnum, int dtype); -static inline int dbg_read(struct ubi_volume_desc *desc, int lnum, char *buf, - int offset, int len) -{ - return dbg_leb_read(desc, lnum, buf, offset, len, 0); -} - -static inline int dbg_write(struct ubi_volume_desc *desc, int lnum, - const void *buf, int offset, int len) -{ - return dbg_leb_write(desc, lnum, buf, offset, len, UBI_UNKNOWN); -} - -static inline int dbg_change(struct ubi_volume_desc *desc, int lnum, - const void *buf, int len) -{ - return dbg_leb_change(desc, lnum, buf, len, UBI_UNKNOWN); -} - /* Debugfs-related stuff */ int dbg_debugfs_init(void); void dbg_debugfs_exit(void); -- cgit v1.2.3 From d033c98b17ecf30d64d83d96938ce7bfb47f7520 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Fri, 3 Jun 2011 13:16:08 +0300 Subject: UBIFS: always print stacktrace when switching to R/O mode When switching to R/O mode due to an I/O error, always dump the stack, not only when debugging is enabled. Signed-off-by: Artem Bityutskiy --- fs/ubifs/io.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c index 3be645e012c9..48e16804ca57 100644 --- a/fs/ubifs/io.c +++ b/fs/ubifs/io.c @@ -86,7 +86,7 @@ void ubifs_ro_mode(struct ubifs_info *c, int err) c->no_chk_data_crc = 0; c->vfs_sb->s_flags |= MS_RDONLY; ubifs_warn("switched to read-only mode, error %d", err); - dbg_dump_stack(); + dump_stack(); } } -- cgit v1.2.3 From 83cef708c606f46a2b527af025acb3d24555f0c4 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Fri, 3 Jun 2011 13:45:09 +0300 Subject: UBIFS: introduce more I/O helpers Introduce the following I/O helper functions: 'ubifs_leb_read()', 'ubifs_leb_write()', 'ubifs_leb_change()', 'ubifs_leb_unmap()', 'ubifs_leb_map()', 'ubifs_is_mapped(). The idea is to wrap all UBI I/O functions in order to encapsulate various assertions and error path handling (error message, stack dump, switching to R/O mode). And there are some other benefits of this which will be used in the following patches. This patch does not switch whole UBIFS to use these functions yet. Signed-off-by: Artem Bityutskiy --- fs/ubifs/debug.h | 16 +++++++ fs/ubifs/io.c | 117 ++++++++++++++++++++++++++++++++++++++++++++++++++++ fs/ubifs/misc.h | 80 ----------------------------------- fs/ubifs/recovery.c | 9 ++-- fs/ubifs/ubifs.h | 13 +++++- 5 files changed, 148 insertions(+), 87 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/debug.h b/fs/ubifs/debug.h index e1dcfd29c9aa..b0b005b97279 100644 --- a/fs/ubifs/debug.h +++ b/fs/ubifs/debug.h @@ -244,6 +244,10 @@ static inline int dbg_is_tst_rcvry(const struct ubifs_info *c) { return !!(ubifs_dbg.tst_rcvry || c->dbg->tst_rcvry); } +static inline int dbg_is_power_cut(const struct ubifs_info *c) +{ + return !!c->dbg->failure_mode; +} int ubifs_debugging_init(struct ubifs_info *c); void ubifs_debugging_exit(struct ubifs_info *c); @@ -445,12 +449,24 @@ static inline int dbg_check_nondata_nodes_order(struct ubifs_info *c, struct list_head *head) { return 0; } +static inline int dbg_leb_write(struct ubi_volume_desc *desc, + int lnum, const void *buf, + int offset, int len, int dtype) { return 0; } +static inline int dbg_leb_change(struct ubi_volume_desc *desc, + int lnum, const void *buf, + int len, int dtype) { return 0; } +static inline int dbg_leb_unmap(struct ubi_volume_desc *desc, + int lnum) { return 0; } +static inline int dbg_leb_map(struct ubi_volume_desc *desc, + int lnum, int dtype) { return 0; } + static inline int dbg_is_chk_gen(const struct ubifs_info *c) { return 0; } static inline int dbg_is_chk_index(const struct ubifs_info *c) { return 0; } static inline int dbg_is_chk_orph(const struct ubifs_info *c) { return 0; } static inline int dbg_is_chk_lprops(const struct ubifs_info *c) { return 0; } static inline int dbg_is_chk_fs(const struct ubifs_info *c) { return 0; } static inline int dbg_is_tst_rcvry(const struct ubifs_info *c) { return 0; } +static inline int dbg_is_power_cut(const struct ubifs_info *c) { return 0; } static inline int dbg_debugfs_init(void) { return 0; } static inline void dbg_debugfs_exit(void) { return; } diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c index 48e16804ca57..239899d7bf3f 100644 --- a/fs/ubifs/io.c +++ b/fs/ubifs/io.c @@ -90,6 +90,123 @@ void ubifs_ro_mode(struct ubifs_info *c, int err) } } +/* + * Below are simple wrappers over UBI I/O functions which include some + * additional checks and UBIFS debugging stuff. See corresponding UBI function + * for more information. + */ + +int ubifs_leb_read(const struct ubifs_info *c, int lnum, void *buf, int offs, + int len, int even_ebadmsg) +{ + int err; + + err = ubi_read(c->ubi, lnum, buf, offs, len); + /* + * In case of %-EBADMSG print the error message only if the + * @even_ebadmsg is true. + */ + if (err && (err != -EBADMSG || even_ebadmsg)) { + ubifs_err("reading %d bytes from LEB %d:%d failed, error %d", + len, lnum, offs, err); + dbg_dump_stack(); + } + return err; +} + +int ubifs_leb_write(struct ubifs_info *c, int lnum, const void *buf, int offs, + int len, int dtype) +{ + int err; + + ubifs_assert(!c->ro_media && !c->ro_mount); + if (c->ro_error) + return -EROFS; + if (!dbg_is_tst_rcvry(c)) + err = ubi_leb_write(c->ubi, lnum, buf, offs, len, dtype); + else + err = dbg_leb_write(c->ubi, lnum, buf, offs, len, dtype); + if (err) { + ubifs_err("writing %d bytes to LEB %d:%d failed, error %d", + len, lnum, offs, err); + ubifs_ro_mode(c, err); + dbg_dump_stack(); + } + return err; +} + +int ubifs_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len, + int dtype) +{ + int err; + + ubifs_assert(!c->ro_media && !c->ro_mount); + if (c->ro_error) + return -EROFS; + if (!dbg_is_tst_rcvry(c)) + err = ubi_leb_change(c->ubi, lnum, buf, len, dtype); + else + err = dbg_leb_change(c->ubi, lnum, buf, len, dtype); + if (err) { + ubifs_err("changing %d bytes in LEB %d failed, error %d", + len, lnum, err); + ubifs_ro_mode(c, err); + dbg_dump_stack(); + } + return err; +} + +int ubifs_leb_unmap(struct ubifs_info *c, int lnum) +{ + int err; + + ubifs_assert(!c->ro_media && !c->ro_mount); + if (c->ro_error) + return -EROFS; + if (!dbg_is_tst_rcvry(c)) + err = ubi_leb_unmap(c->ubi, lnum); + else + err = dbg_leb_unmap(c->ubi, lnum); + if (err) { + ubifs_err("unmap LEB %d failed, error %d", lnum, err); + ubifs_ro_mode(c, err); + dbg_dump_stack(); + } + return err; +} + +int ubifs_leb_map(struct ubifs_info *c, int lnum, int dtype) +{ + int err; + + ubifs_assert(!c->ro_media && !c->ro_mount); + if (c->ro_error) + return -EROFS; + if (!dbg_is_tst_rcvry(c)) + err = ubi_leb_map(c->ubi, lnum, dtype); + else + err = dbg_leb_map(c->ubi, lnum, dtype); + if (err) { + ubifs_err("mapping LEB %d failed, error %d", lnum, err); + ubifs_ro_mode(c, err); + dbg_dump_stack(); + } + return err; +} + +int ubifs_is_mapped(const struct ubifs_info *c, int lnum) +{ + int err; + + err = ubi_is_mapped(c->ubi, lnum); + if (err < 0) { + ubifs_err("ubi_is_mapped failed for LEB %d, error %d", + lnum, err); + dbg_dump_stack(); + } + return err; +} + /** * ubifs_check_node - check node. * @c: UBIFS file-system description object diff --git a/fs/ubifs/misc.h b/fs/ubifs/misc.h index 160cd909e957..ee7cb5ebb6e8 100644 --- a/fs/ubifs/misc.h +++ b/fs/ubifs/misc.h @@ -144,86 +144,6 @@ static inline int ubifs_wbuf_sync(struct ubifs_wbuf *wbuf) return err; } -/** - * ubifs_leb_unmap - unmap an LEB. - * @c: UBIFS file-system description object - * @lnum: LEB number to unmap - * - * This function returns %0 on success and a negative error code on failure. - */ -static inline int ubifs_leb_unmap(const struct ubifs_info *c, int lnum) -{ - int err; - - ubifs_assert(!c->ro_media && !c->ro_mount); - if (c->ro_error) - return -EROFS; - err = ubi_leb_unmap(c->ubi, lnum); - if (err) { - ubifs_err("unmap LEB %d failed, error %d", lnum, err); - return err; - } - - return 0; -} - -/** - * ubifs_leb_write - write to a LEB. - * @c: UBIFS file-system description object - * @lnum: LEB number to write - * @buf: buffer to write from - * @offs: offset within LEB to write to - * @len: length to write - * @dtype: data type - * - * This function returns %0 on success and a negative error code on failure. - */ -static inline int ubifs_leb_write(const struct ubifs_info *c, int lnum, - const void *buf, int offs, int len, int dtype) -{ - int err; - - ubifs_assert(!c->ro_media && !c->ro_mount); - if (c->ro_error) - return -EROFS; - err = ubi_leb_write(c->ubi, lnum, buf, offs, len, dtype); - if (err) { - ubifs_err("writing %d bytes at %d:%d, error %d", - len, lnum, offs, err); - return err; - } - - return 0; -} - -/** - * ubifs_leb_change - atomic LEB change. - * @c: UBIFS file-system description object - * @lnum: LEB number to write - * @buf: buffer to write from - * @len: length to write - * @dtype: data type - * - * This function returns %0 on success and a negative error code on failure. - */ -static inline int ubifs_leb_change(const struct ubifs_info *c, int lnum, - const void *buf, int len, int dtype) -{ - int err; - - ubifs_assert(!c->ro_media && !c->ro_mount); - if (c->ro_error) - return -EROFS; - err = ubi_leb_change(c->ubi, lnum, buf, len, dtype); - if (err) { - ubifs_err("changing %d bytes in LEB %d, error %d", - len, lnum, err); - return err; - } - - return 0; -} - /** * ubifs_encode_dev - encode device node IDs. * @dev: UBIFS device node information diff --git a/fs/ubifs/recovery.c b/fs/ubifs/recovery.c index 783d8e0beb76..c59154980719 100644 --- a/fs/ubifs/recovery.c +++ b/fs/ubifs/recovery.c @@ -919,8 +919,7 @@ struct ubifs_scan_leb *ubifs_recover_log_leb(struct ubifs_info *c, int lnum, * * This function returns %0 on success and a negative error code on failure. */ -static int recover_head(const struct ubifs_info *c, int lnum, int offs, - void *sbuf) +static int recover_head(struct ubifs_info *c, int lnum, int offs, void *sbuf) { int len = c->max_write_size, err; @@ -962,7 +961,7 @@ static int recover_head(const struct ubifs_info *c, int lnum, int offs, * * This function returns %0 on success and a negative error code on failure. */ -int ubifs_recover_inl_heads(const struct ubifs_info *c, void *sbuf) +int ubifs_recover_inl_heads(struct ubifs_info *c, void *sbuf) { int err; @@ -993,7 +992,7 @@ int ubifs_recover_inl_heads(const struct ubifs_info *c, void *sbuf) * * This function returns %0 on success and a negative error code on failure. */ -static int clean_an_unclean_leb(const struct ubifs_info *c, +static int clean_an_unclean_leb(struct ubifs_info *c, struct ubifs_unclean_leb *ucleb, void *sbuf) { int err, lnum = ucleb->lnum, offs = 0, len = ucleb->endpt, quiet = 1; @@ -1089,7 +1088,7 @@ static int clean_an_unclean_leb(const struct ubifs_info *c, * * This function returns %0 on success and a negative error code on failure. */ -int ubifs_clean_lebs(const struct ubifs_info *c, void *sbuf) +int ubifs_clean_lebs(struct ubifs_info *c, void *sbuf) { dbg_rcvry("recovery"); while (!list_empty(&c->unclean_leb_list)) { diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h index 3304aad04885..702b79258e30 100644 --- a/fs/ubifs/ubifs.h +++ b/fs/ubifs/ubifs.h @@ -1468,6 +1468,15 @@ extern struct ubifs_compressor *ubifs_compressors[UBIFS_COMPR_TYPES_CNT]; /* io.c */ void ubifs_ro_mode(struct ubifs_info *c, int err); +int ubifs_leb_read(const struct ubifs_info *c, int lnum, void *buf, int offs, + int len, int even_ebadmsg); +int ubifs_leb_write(struct ubifs_info *c, int lnum, const void *buf, int offs, + int len, int dtype); +int ubifs_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len, + int dtype); +int ubifs_leb_unmap(struct ubifs_info *c, int lnum); +int ubifs_leb_map(struct ubifs_info *c, int lnum, int dtype); +int ubifs_is_mapped(const struct ubifs_info *c, int lnum); int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len); int ubifs_wbuf_seek_nolock(struct ubifs_wbuf *wbuf, int lnum, int offs, int dtype); @@ -1747,8 +1756,8 @@ struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum, int offs, void *sbuf, int jhead); struct ubifs_scan_leb *ubifs_recover_log_leb(struct ubifs_info *c, int lnum, int offs, void *sbuf); -int ubifs_recover_inl_heads(const struct ubifs_info *c, void *sbuf); -int ubifs_clean_lebs(const struct ubifs_info *c, void *sbuf); +int ubifs_recover_inl_heads(struct ubifs_info *c, void *sbuf); +int ubifs_clean_lebs(struct ubifs_info *c, void *sbuf); int ubifs_rcvry_gc_commit(struct ubifs_info *c); int ubifs_recover_size_accum(struct ubifs_info *c, union ubifs_key *key, int deletion, loff_t new_size); -- cgit v1.2.3 From d304820a1f6cdacab691bbcb7faa35ec631c6398 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Fri, 3 Jun 2011 14:03:25 +0300 Subject: UBIFS: switch to ubifs_leb_read Instead of using 'ubi_read()' function directly, used the 'ubifs_leb_read()' helper function instead. This allows to get rid of several redundant error messages and make sure that we always have a stack dump on read errors. Signed-off-by: Artem Bityutskiy --- fs/ubifs/io.c | 15 ++++----------- fs/ubifs/lpt.c | 17 +++++++++-------- fs/ubifs/lpt_commit.c | 23 +++++++++++------------ fs/ubifs/recovery.c | 17 +++++++++-------- fs/ubifs/replay.c | 3 +-- fs/ubifs/sb.c | 2 +- fs/ubifs/scan.c | 2 +- fs/ubifs/tnc.c | 8 ++++---- 8 files changed, 40 insertions(+), 47 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c index 239899d7bf3f..f58f11bed387 100644 --- a/fs/ubifs/io.c +++ b/fs/ubifs/io.c @@ -941,13 +941,9 @@ int ubifs_read_node_wbuf(struct ubifs_wbuf *wbuf, void *buf, int type, int len, if (rlen > 0) { /* Read everything that goes before write-buffer */ - err = ubi_read(c->ubi, lnum, buf, offs, rlen); - if (err && err != -EBADMSG) { - ubifs_err("failed to read node %d from LEB %d:%d, " - "error %d", type, lnum, offs, err); - dbg_dump_stack(); + err = ubifs_leb_read(c, lnum, buf, offs, rlen, 0); + if (err && err != -EBADMSG) return err; - } } if (type != ch->node_type) { @@ -1002,12 +998,9 @@ int ubifs_read_node(const struct ubifs_info *c, void *buf, int type, int len, ubifs_assert(!(offs & 7) && offs < c->leb_size); ubifs_assert(type >= 0 && type < UBIFS_NODE_TYPES_CNT); - err = ubi_read(c->ubi, lnum, buf, offs, len); - if (err && err != -EBADMSG) { - ubifs_err("cannot read node %d from LEB %d:%d, error %d", - type, lnum, offs, err); + err = ubifs_leb_read(c, lnum, buf, offs, len, 0); + if (err && err != -EBADMSG) return err; - } if (type != ch->node_type) { ubifs_err("bad node type (%d but expected %d)", diff --git a/fs/ubifs/lpt.c b/fs/ubifs/lpt.c index ab91ca628455..8b9ee9fe0a41 100644 --- a/fs/ubifs/lpt.c +++ b/fs/ubifs/lpt.c @@ -1222,7 +1222,7 @@ int ubifs_read_nnode(struct ubifs_info *c, struct ubifs_nnode *parent, int iip) if (c->big_lpt) nnode->num = calc_nnode_num_from_parent(c, parent, iip); } else { - err = ubi_read(c->ubi, lnum, buf, offs, c->nnode_sz); + err = ubifs_leb_read(c, lnum, buf, offs, c->nnode_sz, 1); if (err) goto out; err = ubifs_unpack_nnode(c, buf, nnode); @@ -1291,7 +1291,7 @@ static int read_pnode(struct ubifs_info *c, struct ubifs_nnode *parent, int iip) lprops->flags = ubifs_categorize_lprops(c, lprops); } } else { - err = ubi_read(c->ubi, lnum, buf, offs, c->pnode_sz); + err = ubifs_leb_read(c, lnum, buf, offs, c->pnode_sz, 1); if (err) goto out; err = unpack_pnode(c, buf, pnode); @@ -1333,7 +1333,7 @@ static int read_ltab(struct ubifs_info *c) buf = vmalloc(c->ltab_sz); if (!buf) return -ENOMEM; - err = ubi_read(c->ubi, c->ltab_lnum, buf, c->ltab_offs, c->ltab_sz); + err = ubifs_leb_read(c, c->ltab_lnum, buf, c->ltab_offs, c->ltab_sz, 1); if (err) goto out; err = unpack_ltab(c, buf); @@ -1356,7 +1356,8 @@ static int read_lsave(struct ubifs_info *c) buf = vmalloc(c->lsave_sz); if (!buf) return -ENOMEM; - err = ubi_read(c->ubi, c->lsave_lnum, buf, c->lsave_offs, c->lsave_sz); + err = ubifs_leb_read(c, c->lsave_lnum, buf, c->lsave_offs, + c->lsave_sz, 1); if (err) goto out; err = unpack_lsave(c, buf); @@ -1816,8 +1817,8 @@ static struct ubifs_nnode *scan_get_nnode(struct ubifs_info *c, if (c->big_lpt) nnode->num = calc_nnode_num_from_parent(c, parent, iip); } else { - err = ubi_read(c->ubi, branch->lnum, buf, branch->offs, - c->nnode_sz); + err = ubifs_leb_read(c, branch->lnum, buf, branch->offs, + c->nnode_sz, 1); if (err) return ERR_PTR(err); err = ubifs_unpack_nnode(c, buf, nnode); @@ -1885,8 +1886,8 @@ static struct ubifs_pnode *scan_get_pnode(struct ubifs_info *c, ubifs_assert(branch->lnum >= c->lpt_first && branch->lnum <= c->lpt_last); ubifs_assert(branch->offs >= 0 && branch->offs < c->leb_size); - err = ubi_read(c->ubi, branch->lnum, buf, branch->offs, - c->pnode_sz); + err = ubifs_leb_read(c, branch->lnum, buf, branch->offs, + c->pnode_sz, 1); if (err) return ERR_PTR(err); err = unpack_pnode(c, buf, pnode); diff --git a/fs/ubifs/lpt_commit.c b/fs/ubifs/lpt_commit.c index f13addf72e89..cddd6bd214f4 100644 --- a/fs/ubifs/lpt_commit.c +++ b/fs/ubifs/lpt_commit.c @@ -1161,11 +1161,11 @@ static int lpt_gc_lnum(struct ubifs_info *c, int lnum) void *buf = c->lpt_buf; dbg_lp("LEB %d", lnum); - err = ubi_read(c->ubi, lnum, buf, 0, c->leb_size); - if (err) { - ubifs_err("cannot read LEB %d, error %d", lnum, err); + + err = ubifs_leb_read(c, lnum, buf, 0, c->leb_size, 1); + if (err) return err; - } + while (1) { if (!is_a_node(c, buf, len)) { int pad_len; @@ -1651,11 +1651,11 @@ static int dbg_check_ltab_lnum(struct ubifs_info *c, int lnum) } dbg_lp("LEB %d", lnum); - err = ubi_read(c->ubi, lnum, buf, 0, c->leb_size); - if (err) { - dbg_msg("ubi_read failed, LEB %d, error %d", lnum, err); + + err = ubifs_leb_read(c, lnum, buf, 0, c->leb_size, 1); + if (err) goto out; - } + while (1) { if (!is_a_node(c, p, len)) { int i, pad_len; @@ -1902,11 +1902,10 @@ static void dump_lpt_leb(const struct ubifs_info *c, int lnum) return; } - err = ubi_read(c->ubi, lnum, buf, 0, c->leb_size); - if (err) { - ubifs_err("cannot read LEB %d, error %d", lnum, err); + err = ubifs_leb_read(c, lnum, buf, 0, c->leb_size, 1); + if (err) goto out; - } + while (1) { offs = c->leb_size - len; if (!is_a_node(c, p, len)) { diff --git a/fs/ubifs/recovery.c b/fs/ubifs/recovery.c index c59154980719..f28070cb00c6 100644 --- a/fs/ubifs/recovery.c +++ b/fs/ubifs/recovery.c @@ -117,7 +117,7 @@ static int get_master_node(const struct ubifs_info *c, int lnum, void **pbuf, if (!sbuf) return -ENOMEM; - err = ubi_read(c->ubi, lnum, sbuf, 0, c->leb_size); + err = ubifs_leb_read(c, lnum, sbuf, 0, c->leb_size, 0); if (err && err != -EBADMSG) goto out_free; @@ -539,8 +539,8 @@ static int fix_unclean_leb(struct ubifs_info *c, struct ubifs_scan_leb *sleb, int len = ALIGN(endpt, c->min_io_size); if (start) { - err = ubi_read(c->ubi, lnum, sleb->buf, 0, - start); + err = ubifs_leb_read(c, lnum, sleb->buf, 0, + start, 1); if (err) return err; } @@ -819,7 +819,8 @@ static int get_cs_sqnum(struct ubifs_info *c, int lnum, int offs, return -ENOMEM; if (c->leb_size - offs < UBIFS_CS_NODE_SZ) goto out_err; - err = ubi_read(c->ubi, lnum, (void *)cs_node, offs, UBIFS_CS_NODE_SZ); + err = ubifs_leb_read(c, lnum, (void *)cs_node, offs, + UBIFS_CS_NODE_SZ, 0); if (err && err != -EBADMSG) goto out_free; ret = ubifs_scan_a_node(c, cs_node, UBIFS_CS_NODE_SZ, lnum, offs, 0); @@ -930,12 +931,12 @@ static int recover_head(struct ubifs_info *c, int lnum, int offs, void *sbuf) return 0; /* Read at the head location and check it is empty flash */ - err = ubi_read(c->ubi, lnum, sbuf, offs, len); + err = ubifs_leb_read(c, lnum, sbuf, offs, len, 1); if (err || !is_empty(sbuf, len)) { dbg_rcvry("cleaning head at %d:%d", lnum, offs); if (offs == 0) return ubifs_leb_unmap(c, lnum); - err = ubi_read(c->ubi, lnum, sbuf, 0, offs); + err = ubifs_leb_read(c, lnum, sbuf, 0, offs, 1); if (err) return err; return ubi_leb_change(c->ubi, lnum, sbuf, offs, UBI_UNKNOWN); @@ -1008,7 +1009,7 @@ static int clean_an_unclean_leb(struct ubifs_info *c, return 0; } - err = ubi_read(c->ubi, lnum, buf, offs, len); + err = ubifs_leb_read(c, lnum, buf, offs, len, 0); if (err && err != -EBADMSG) return err; @@ -1453,7 +1454,7 @@ static int fix_size_in_place(struct ubifs_info *c, struct size_entry *e) if (i_size >= e->d_size) return 0; /* Read the LEB */ - err = ubi_read(c->ubi, lnum, c->sbuf, 0, c->leb_size); + err = ubifs_leb_read(c, lnum, c->sbuf, 0, c->leb_size, 1); if (err) goto out; /* Change the size field and recalculate the CRC */ diff --git a/fs/ubifs/replay.c b/fs/ubifs/replay.c index 5e97161ce4d3..ccabaf1164b3 100644 --- a/fs/ubifs/replay.c +++ b/fs/ubifs/replay.c @@ -523,8 +523,7 @@ static int is_last_bud(struct ubifs_info *c, struct ubifs_bud *bud) if (!list_is_last(&next->list, &jh->buds_list)) return 0; - err = ubi_read(c->ubi, next->lnum, (char *)&data, - next->start, 4); + err = ubifs_leb_read(c, next->lnum, (char *)&data, next->start, 4, 1); if (err) return 0; diff --git a/fs/ubifs/sb.c b/fs/ubifs/sb.c index c606f010e8df..701dccc1d6de 100644 --- a/fs/ubifs/sb.c +++ b/fs/ubifs/sb.c @@ -678,7 +678,7 @@ static int fixup_leb(struct ubifs_info *c, int lnum, int len) } dbg_mnt("fixup LEB %d, data len %d", lnum, len); - err = ubi_read(c->ubi, lnum, c->sbuf, 0, len); + err = ubifs_leb_read(c, lnum, c->sbuf, 0, len, 1); if (err) return err; diff --git a/fs/ubifs/scan.c b/fs/ubifs/scan.c index c7df916dce97..37383e8011b1 100644 --- a/fs/ubifs/scan.c +++ b/fs/ubifs/scan.c @@ -148,7 +148,7 @@ struct ubifs_scan_leb *ubifs_start_scan(const struct ubifs_info *c, int lnum, INIT_LIST_HEAD(&sleb->nodes); sleb->buf = sbuf; - err = ubi_read(c->ubi, lnum, sbuf + offs, offs, c->leb_size - offs); + err = ubifs_leb_read(c, lnum, sbuf + offs, offs, c->leb_size - offs, 0); if (err && err != -EBADMSG) { ubifs_err("cannot read %d bytes from LEB %d:%d," " error %d", c->leb_size - offs, lnum, offs, err); diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c index 526b63cd3333..066738647685 100644 --- a/fs/ubifs/tnc.c +++ b/fs/ubifs/tnc.c @@ -462,7 +462,7 @@ static int try_read_node(const struct ubifs_info *c, void *buf, int type, dbg_io("LEB %d:%d, %s, length %d", lnum, offs, dbg_ntype(type), len); - err = ubi_read(c->ubi, lnum, buf, offs, len); + err = ubifs_leb_read(c, lnum, buf, offs, len, 1); if (err) { ubifs_err("cannot read node type %d from LEB %d:%d, error %d", type, lnum, offs, err); @@ -1666,7 +1666,7 @@ static int read_wbuf(struct ubifs_wbuf *wbuf, void *buf, int len, int lnum, if (!overlap) { /* We may safely unlock the write-buffer and read the data */ spin_unlock(&wbuf->lock); - return ubi_read(c->ubi, lnum, buf, offs, len); + return ubifs_leb_read(c, lnum, buf, offs, len, 0); } /* Don't read under wbuf */ @@ -1680,7 +1680,7 @@ static int read_wbuf(struct ubifs_wbuf *wbuf, void *buf, int len, int lnum, if (rlen > 0) /* Read everything that goes before write-buffer */ - return ubi_read(c->ubi, lnum, buf, offs, rlen); + return ubifs_leb_read(c, lnum, buf, offs, rlen, 0); return 0; } @@ -1767,7 +1767,7 @@ int ubifs_tnc_bulk_read(struct ubifs_info *c, struct bu_info *bu) if (wbuf) err = read_wbuf(wbuf, bu->buf, len, lnum, offs); else - err = ubi_read(c->ubi, lnum, bu->buf, offs, len); + err = ubifs_leb_read(c, lnum, bu->buf, offs, len, 0); /* Check for a race with GC */ if (maybe_leb_gced(c, lnum, bu->gc_seq)) -- cgit v1.2.3 From 987226a5d3a356792650f8e9028132a79815f6ef Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Fri, 3 Jun 2011 14:12:10 +0300 Subject: UBIFS: switch to ubifs_leb_write Stop using 'ubi_leb_write()' directly and switch to the 'ubifs_leb_write()' helper. Signed-off-by: Artem Bityutskiy --- fs/ubifs/io.c | 36 ++++++++++++++---------------------- 1 file changed, 14 insertions(+), 22 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c index f58f11bed387..a0c5fb4b6fa8 100644 --- a/fs/ubifs/io.c +++ b/fs/ubifs/io.c @@ -523,14 +523,10 @@ int ubifs_wbuf_sync_nolock(struct ubifs_wbuf *wbuf) dirt = sync_len - wbuf->used; if (dirt) ubifs_pad(c, wbuf->buf + wbuf->used, dirt); - err = ubi_leb_write(c->ubi, wbuf->lnum, wbuf->buf, wbuf->offs, - sync_len, wbuf->dtype); - if (err) { - ubifs_err("cannot write %d bytes to LEB %d:%d", - sync_len, wbuf->lnum, wbuf->offs); - dbg_dump_stack(); + err = ubifs_leb_write(c, wbuf->lnum, wbuf->buf, wbuf->offs, sync_len, + wbuf->dtype); + if (err) return err; - } spin_lock(&wbuf->lock); wbuf->offs += sync_len; @@ -722,9 +718,9 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len) if (aligned_len == wbuf->avail) { dbg_io("flush jhead %s wbuf to LEB %d:%d", dbg_jhead(wbuf->jhead), wbuf->lnum, wbuf->offs); - err = ubi_leb_write(c->ubi, wbuf->lnum, wbuf->buf, - wbuf->offs, wbuf->size, - wbuf->dtype); + err = ubifs_leb_write(c, wbuf->lnum, wbuf->buf, + wbuf->offs, wbuf->size, + wbuf->dtype); if (err) goto out; @@ -759,8 +755,8 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len) dbg_io("flush jhead %s wbuf to LEB %d:%d", dbg_jhead(wbuf->jhead), wbuf->lnum, wbuf->offs); memcpy(wbuf->buf + wbuf->used, buf, wbuf->avail); - err = ubi_leb_write(c->ubi, wbuf->lnum, wbuf->buf, wbuf->offs, - wbuf->size, wbuf->dtype); + err = ubifs_leb_write(c, wbuf->lnum, wbuf->buf, wbuf->offs, + wbuf->size, wbuf->dtype); if (err) goto out; @@ -778,8 +774,8 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len) */ dbg_io("write %d bytes to LEB %d:%d", wbuf->size, wbuf->lnum, wbuf->offs); - err = ubi_leb_write(c->ubi, wbuf->lnum, buf, wbuf->offs, - wbuf->size, wbuf->dtype); + err = ubifs_leb_write(c, wbuf->lnum, buf, wbuf->offs, + wbuf->size, wbuf->dtype); if (err) goto out; @@ -800,8 +796,8 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len) n <<= c->max_write_shift; dbg_io("write %d bytes to LEB %d:%d", n, wbuf->lnum, wbuf->offs); - err = ubi_leb_write(c->ubi, wbuf->lnum, buf + written, - wbuf->offs, n, wbuf->dtype); + err = ubifs_leb_write(c, wbuf->lnum, buf + written, + wbuf->offs, n, wbuf->dtype); if (err) goto out; wbuf->offs += n; @@ -883,13 +879,9 @@ int ubifs_write_node(struct ubifs_info *c, void *buf, int len, int lnum, return -EROFS; ubifs_prepare_node(c, buf, len, 1); - err = ubi_leb_write(c->ubi, lnum, buf, offs, buf_len, dtype); - if (err) { - ubifs_err("cannot write %d bytes to LEB %d:%d, error %d", - buf_len, lnum, offs, err); + err = ubifs_leb_write(c, lnum, buf, offs, buf_len, dtype); + if (err) dbg_dump_node(c, buf); - dbg_dump_stack(); - } return err; } -- cgit v1.2.3 From d3b2578f56e0a77b9e261d83e9b5a0a666b82980 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Fri, 3 Jun 2011 14:22:05 +0300 Subject: UBIFS: switch to I/O helpers Switch the rest of direct UBI calls to UBIFS helper functions. Signed-off-by: Artem Bityutskiy --- fs/ubifs/log.c | 4 +--- fs/ubifs/lpt.c | 16 ++++++++-------- fs/ubifs/recovery.c | 14 +++++++------- fs/ubifs/sb.c | 4 ++-- fs/ubifs/super.c | 2 +- 5 files changed, 19 insertions(+), 21 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/log.c b/fs/ubifs/log.c index fabfb53c4fee..f9fd068d1ae0 100644 --- a/fs/ubifs/log.c +++ b/fs/ubifs/log.c @@ -262,7 +262,7 @@ int ubifs_add_bud_to_log(struct ubifs_info *c, int jhead, int lnum, int offs) * an unclean reboot, because the target LEB might have been * unmapped, but not yet physically erased. */ - err = ubi_leb_map(c->ubi, bud->lnum, UBI_SHORTTERM); + err = ubifs_leb_map(c, bud->lnum, UBI_SHORTTERM); if (err) goto out_unlock; } @@ -283,8 +283,6 @@ int ubifs_add_bud_to_log(struct ubifs_info *c, int jhead, int lnum, int offs) return 0; out_unlock: - if (err != -EAGAIN) - ubifs_ro_mode(c, err); mutex_unlock(&c->log_mutex); kfree(ref); kfree(bud); diff --git a/fs/ubifs/lpt.c b/fs/ubifs/lpt.c index 8b9ee9fe0a41..6189c74d97f0 100644 --- a/fs/ubifs/lpt.c +++ b/fs/ubifs/lpt.c @@ -701,8 +701,8 @@ int ubifs_create_dflt_lpt(struct ubifs_info *c, int *main_lebs, int lpt_first, alen = ALIGN(len, c->min_io_size); set_ltab(c, lnum, c->leb_size - alen, alen - len); memset(p, 0xff, alen - len); - err = ubi_leb_change(c->ubi, lnum++, buf, alen, - UBI_SHORTTERM); + err = ubifs_leb_change(c, lnum++, buf, alen, + UBI_SHORTTERM); if (err) goto out; p = buf; @@ -732,8 +732,8 @@ int ubifs_create_dflt_lpt(struct ubifs_info *c, int *main_lebs, int lpt_first, set_ltab(c, lnum, c->leb_size - alen, alen - len); memset(p, 0xff, alen - len); - err = ubi_leb_change(c->ubi, lnum++, buf, alen, - UBI_SHORTTERM); + err = ubifs_leb_change(c, lnum++, buf, alen, + UBI_SHORTTERM); if (err) goto out; p = buf; @@ -780,8 +780,8 @@ int ubifs_create_dflt_lpt(struct ubifs_info *c, int *main_lebs, int lpt_first, alen = ALIGN(len, c->min_io_size); set_ltab(c, lnum, c->leb_size - alen, alen - len); memset(p, 0xff, alen - len); - err = ubi_leb_change(c->ubi, lnum++, buf, alen, - UBI_SHORTTERM); + err = ubifs_leb_change(c, lnum++, buf, alen, + UBI_SHORTTERM); if (err) goto out; p = buf; @@ -806,7 +806,7 @@ int ubifs_create_dflt_lpt(struct ubifs_info *c, int *main_lebs, int lpt_first, alen = ALIGN(len, c->min_io_size); set_ltab(c, lnum, c->leb_size - alen, alen - len); memset(p, 0xff, alen - len); - err = ubi_leb_change(c->ubi, lnum++, buf, alen, UBI_SHORTTERM); + err = ubifs_leb_change(c, lnum++, buf, alen, UBI_SHORTTERM); if (err) goto out; p = buf; @@ -826,7 +826,7 @@ int ubifs_create_dflt_lpt(struct ubifs_info *c, int *main_lebs, int lpt_first, /* Write remaining buffer */ memset(p, 0xff, alen - len); - err = ubi_leb_change(c->ubi, lnum, buf, alen, UBI_SHORTTERM); + err = ubifs_leb_change(c, lnum, buf, alen, UBI_SHORTTERM); if (err) goto out; diff --git a/fs/ubifs/recovery.c b/fs/ubifs/recovery.c index f28070cb00c6..51bcf4227b29 100644 --- a/fs/ubifs/recovery.c +++ b/fs/ubifs/recovery.c @@ -213,10 +213,10 @@ static int write_rcvrd_mst_node(struct ubifs_info *c, mst->flags |= cpu_to_le32(UBIFS_MST_RCVRY); ubifs_prepare_node(c, mst, UBIFS_MST_NODE_SZ, 1); - err = ubi_leb_change(c->ubi, lnum, mst, sz, UBI_SHORTTERM); + err = ubifs_leb_change(c, lnum, mst, sz, UBI_SHORTTERM); if (err) goto out; - err = ubi_leb_change(c->ubi, lnum + 1, mst, sz, UBI_SHORTTERM); + err = ubifs_leb_change(c, lnum + 1, mst, sz, UBI_SHORTTERM); if (err) goto out; out: @@ -554,8 +554,8 @@ static int fix_unclean_leb(struct ubifs_info *c, struct ubifs_scan_leb *sleb, ubifs_pad(c, buf, pad_len); } } - err = ubi_leb_change(c->ubi, lnum, sleb->buf, len, - UBI_UNKNOWN); + err = ubifs_leb_change(c, lnum, sleb->buf, len, + UBI_UNKNOWN); if (err) return err; } @@ -939,7 +939,7 @@ static int recover_head(struct ubifs_info *c, int lnum, int offs, void *sbuf) err = ubifs_leb_read(c, lnum, sbuf, 0, offs, 1); if (err) return err; - return ubi_leb_change(c->ubi, lnum, sbuf, offs, UBI_UNKNOWN); + return ubifs_leb_change(c, lnum, sbuf, offs, UBI_UNKNOWN); } return 0; @@ -1069,7 +1069,7 @@ static int clean_an_unclean_leb(struct ubifs_info *c, } /* Write back the LEB atomically */ - err = ubi_leb_change(c->ubi, lnum, sbuf, len, UBI_UNKNOWN); + err = ubifs_leb_change(c, lnum, sbuf, len, UBI_UNKNOWN); if (err) return err; @@ -1470,7 +1470,7 @@ static int fix_size_in_place(struct ubifs_info *c, struct size_entry *e) len -= 1; len = ALIGN(len + 1, c->min_io_size); /* Atomically write the fixed LEB back again */ - err = ubi_leb_change(c->ubi, lnum, c->sbuf, len, UBI_UNKNOWN); + err = ubifs_leb_change(c, lnum, c->sbuf, len, UBI_UNKNOWN); if (err) goto out; dbg_rcvry("inode %lu at %d:%d size %lld -> %lld", diff --git a/fs/ubifs/sb.c b/fs/ubifs/sb.c index 701dccc1d6de..93d938ad3d2a 100644 --- a/fs/ubifs/sb.c +++ b/fs/ubifs/sb.c @@ -674,7 +674,7 @@ static int fixup_leb(struct ubifs_info *c, int lnum, int len) if (len == 0) { dbg_mnt("unmap empty LEB %d", lnum); - return ubi_leb_unmap(c->ubi, lnum); + return ubifs_leb_unmap(c, lnum); } dbg_mnt("fixup LEB %d, data len %d", lnum, len); @@ -682,7 +682,7 @@ static int fixup_leb(struct ubifs_info *c, int lnum, int len) if (err) return err; - return ubi_leb_change(c->ubi, lnum, c->sbuf, len, UBI_UNKNOWN); + return ubifs_leb_change(c, lnum, c->sbuf, len, UBI_UNKNOWN); } /** diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index 285038548da9..b28121278d46 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c @@ -914,7 +914,7 @@ static int check_volume_empty(struct ubifs_info *c) c->empty = 1; for (lnum = 0; lnum < c->leb_cnt; lnum++) { - err = ubi_is_mapped(c->ubi, lnum); + err = ubifs_is_mapped(c, lnum); if (unlikely(err < 0)) return err; if (err == 1) { -- cgit v1.2.3 From 0a541b14e819f972d14f29d17cb9fd8b4b71222e Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Fri, 3 Jun 2011 14:38:50 +0300 Subject: UBIFS: stop re-defining UBI operations Now when we use UBIFS helpers for all the I/O, we can remove the horrible hack of re-defining UBI I/O functions. Signed-off-by: Artem Bityutskiy --- fs/ubifs/debug.c | 2 -- fs/ubifs/debug.h | 7 ------- 2 files changed, 9 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c index 059c586b5ef8..3a7344124f30 100644 --- a/fs/ubifs/debug.c +++ b/fs/ubifs/debug.c @@ -27,8 +27,6 @@ * various local functions of those subsystems. */ -#define UBIFS_DBG_PRESERVE_UBI - #include "ubifs.h" #include #include diff --git a/fs/ubifs/debug.h b/fs/ubifs/debug.h index b0b005b97279..b5bc09deeb32 100644 --- a/fs/ubifs/debug.h +++ b/fs/ubifs/debug.h @@ -305,13 +305,6 @@ int dbg_check_inode_size(struct ubifs_info *c, const struct inode *inode, int dbg_check_data_nodes_order(struct ubifs_info *c, struct list_head *head); int dbg_check_nondata_nodes_order(struct ubifs_info *c, struct list_head *head); -#ifndef UBIFS_DBG_PRESERVE_UBI -#define ubi_leb_write dbg_leb_write -#define ubi_leb_change dbg_leb_change -#define ubi_leb_unmap dbg_leb_unmap -#define ubi_leb_map dbg_leb_map -#endif - int dbg_leb_write(struct ubi_volume_desc *desc, int lnum, const void *buf, int offs, int len, int dtype); int dbg_leb_change(struct ubi_volume_desc *desc, int lnum, const void *buf, -- cgit v1.2.3 From f57cb188ccd9c0242111d99b7283eda7827746c4 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Fri, 3 Jun 2011 14:51:41 +0300 Subject: UBIFS: remove custom list of superblocks This is a clean-up of the power-cut emulation code - remove the custom list of superblocks which we maintained to find the superblock by the UBI volume descriptor. We do not need that crud any longer, because now we can get the superblock as a function argument. Signed-off-by: Artem Bityutskiy --- fs/ubifs/debug.c | 105 +++++++++++-------------------------------------------- fs/ubifs/debug.h | 31 ++++++++-------- fs/ubifs/io.c | 8 ++--- 3 files changed, 39 insertions(+), 105 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c index 3a7344124f30..1e880cedefa4 100644 --- a/fs/ubifs/debug.c +++ b/fs/ubifs/debug.c @@ -2539,16 +2539,7 @@ error_dump: #define chance(n, d) (simple_rand() <= (n) * 32768LL / (d)) -struct failure_mode_info { - struct list_head list; - struct ubifs_info *c; -}; - -static LIST_HEAD(fmi_list); -static DEFINE_SPINLOCK(fmi_lock); - static unsigned int next; - static int simple_rand(void) { if (next == 0) @@ -2557,69 +2548,15 @@ static int simple_rand(void) return (next >> 16) & 32767; } -static void failure_mode_init(struct ubifs_info *c) -{ - struct failure_mode_info *fmi; - - fmi = kmalloc(sizeof(struct failure_mode_info), GFP_NOFS); - if (!fmi) { - ubifs_err("Failed to register failure mode - no memory"); - return; - } - fmi->c = c; - spin_lock(&fmi_lock); - list_add_tail(&fmi->list, &fmi_list); - spin_unlock(&fmi_lock); -} - -static void failure_mode_exit(struct ubifs_info *c) -{ - struct failure_mode_info *fmi, *tmp; - - spin_lock(&fmi_lock); - list_for_each_entry_safe(fmi, tmp, &fmi_list, list) - if (fmi->c == c) { - list_del(&fmi->list); - kfree(fmi); - } - spin_unlock(&fmi_lock); -} - -static struct ubifs_info *dbg_find_info(struct ubi_volume_desc *desc) +static int do_fail(struct ubifs_info *c, int lnum, int write) { - struct failure_mode_info *fmi; - - spin_lock(&fmi_lock); - list_for_each_entry(fmi, &fmi_list, list) - if (fmi->c->ubi == desc) { - struct ubifs_info *c = fmi->c; - - spin_unlock(&fmi_lock); - return c; - } - spin_unlock(&fmi_lock); - return NULL; -} - -static int in_failure_mode(struct ubi_volume_desc *desc) -{ - struct ubifs_info *c = dbg_find_info(desc); + struct ubifs_debug_info *d = c->dbg; - if (c && dbg_is_tst_rcvry(c)) - return c->dbg->failure_mode; - return 0; -} + ubifs_assert(dbg_is_tst_rcvry(c)); -static int do_fail(struct ubi_volume_desc *desc, int lnum, int write) -{ - struct ubifs_info *c = dbg_find_info(desc); - struct ubifs_debug_info *d; - - if (!c || !dbg_is_tst_rcvry(c)) - return 0; - d = c->dbg; if (d->failure_mode) return 1; + if (!d->fail_cnt) { /* First call - decide delay to failure */ if (chance(1, 2)) { @@ -2716,17 +2653,17 @@ static void cut_data(const void *buf, int len) p[i] = 0xff; } -int dbg_leb_write(struct ubi_volume_desc *desc, int lnum, const void *buf, +int dbg_leb_write(struct ubifs_info *c, int lnum, const void *buf, int offs, int len, int dtype) { int err, failing; - if (in_failure_mode(desc)) + if (c->dbg->failure_mode) return -EROFS; - failing = do_fail(desc, lnum, 1); + failing = do_fail(c, lnum, 1); if (failing) cut_data(buf, len); - err = ubi_leb_write(desc, lnum, buf, offs, len, dtype); + err = ubi_leb_write(c->ubi, lnum, buf, offs, len, dtype); if (err) return err; if (failing) @@ -2734,45 +2671,45 @@ int dbg_leb_write(struct ubi_volume_desc *desc, int lnum, const void *buf, return 0; } -int dbg_leb_change(struct ubi_volume_desc *desc, int lnum, const void *buf, +int dbg_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len, int dtype) { int err; - if (do_fail(desc, lnum, 1)) + if (do_fail(c, lnum, 1)) return -EROFS; - err = ubi_leb_change(desc, lnum, buf, len, dtype); + err = ubi_leb_change(c->ubi, lnum, buf, len, dtype); if (err) return err; - if (do_fail(desc, lnum, 1)) + if (do_fail(c, lnum, 1)) return -EROFS; return 0; } -int dbg_leb_unmap(struct ubi_volume_desc *desc, int lnum) +int dbg_leb_unmap(struct ubifs_info *c, int lnum) { int err; - if (do_fail(desc, lnum, 0)) + if (do_fail(c, lnum, 0)) return -EROFS; - err = ubi_leb_unmap(desc, lnum); + err = ubi_leb_unmap(c->ubi, lnum); if (err) return err; - if (do_fail(desc, lnum, 0)) + if (do_fail(c, lnum, 0)) return -EROFS; return 0; } -int dbg_leb_map(struct ubi_volume_desc *desc, int lnum, int dtype) +int dbg_leb_map(struct ubifs_info *c, int lnum, int dtype) { int err; - if (do_fail(desc, lnum, 0)) + if (do_fail(c, lnum, 0)) return -EROFS; - err = ubi_leb_map(desc, lnum, dtype); + err = ubi_leb_map(c->ubi, lnum, dtype); if (err) return err; - if (do_fail(desc, lnum, 0)) + if (do_fail(c, lnum, 0)) return -EROFS; return 0; } @@ -3210,7 +3147,6 @@ int ubifs_debugging_init(struct ubifs_info *c) if (!c->dbg) return -ENOMEM; - failure_mode_init(c); return 0; } @@ -3220,7 +3156,6 @@ int ubifs_debugging_init(struct ubifs_info *c) */ void ubifs_debugging_exit(struct ubifs_info *c) { - failure_mode_exit(c); kfree(c->dbg); } diff --git a/fs/ubifs/debug.h b/fs/ubifs/debug.h index b5bc09deeb32..0ab3757ef0e3 100644 --- a/fs/ubifs/debug.h +++ b/fs/ubifs/debug.h @@ -305,12 +305,12 @@ int dbg_check_inode_size(struct ubifs_info *c, const struct inode *inode, int dbg_check_data_nodes_order(struct ubifs_info *c, struct list_head *head); int dbg_check_nondata_nodes_order(struct ubifs_info *c, struct list_head *head); -int dbg_leb_write(struct ubi_volume_desc *desc, int lnum, const void *buf, - int offs, int len, int dtype); -int dbg_leb_change(struct ubi_volume_desc *desc, int lnum, const void *buf, - int len, int dtype); -int dbg_leb_unmap(struct ubi_volume_desc *desc, int lnum); -int dbg_leb_map(struct ubi_volume_desc *desc, int lnum, int dtype); +int dbg_leb_write(struct ubifs_info *c, int lnum, const void *buf, int offs, + int len, int dtype); +int dbg_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len, + int dtype); +int dbg_leb_unmap(struct ubifs_info *c, int lnum); +int dbg_leb_map(struct ubifs_info *c, int lnum, int dtype); /* Debugfs-related stuff */ int dbg_debugfs_init(void); @@ -442,16 +442,15 @@ static inline int dbg_check_nondata_nodes_order(struct ubifs_info *c, struct list_head *head) { return 0; } -static inline int dbg_leb_write(struct ubi_volume_desc *desc, - int lnum, const void *buf, - int offset, int len, int dtype) { return 0; } -static inline int dbg_leb_change(struct ubi_volume_desc *desc, - int lnum, const void *buf, - int len, int dtype) { return 0; } -static inline int dbg_leb_unmap(struct ubi_volume_desc *desc, - int lnum) { return 0; } -static inline int dbg_leb_map(struct ubi_volume_desc *desc, - int lnum, int dtype) { return 0; } +static inline int dbg_leb_write(struct ubifs_info *c, int lnum, + const void *buf, int offset, + int len, int dtype) { return 0; } +static inline int dbg_leb_change(struct ubifs_info *c, int lnum, + const void *buf, int len, + int dtype) { return 0; } +static inline int dbg_leb_unmap(struct ubifs_info *c, int lnum) { return 0; } +static inline int dbg_leb_map(struct ubifs_info *c, int lnum, + int dtype) { return 0; } static inline int dbg_is_chk_gen(const struct ubifs_info *c) { return 0; } static inline int dbg_is_chk_index(const struct ubifs_info *c) { return 0; } diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c index a0c5fb4b6fa8..9228950a658f 100644 --- a/fs/ubifs/io.c +++ b/fs/ubifs/io.c @@ -125,7 +125,7 @@ int ubifs_leb_write(struct ubifs_info *c, int lnum, const void *buf, int offs, if (!dbg_is_tst_rcvry(c)) err = ubi_leb_write(c->ubi, lnum, buf, offs, len, dtype); else - err = dbg_leb_write(c->ubi, lnum, buf, offs, len, dtype); + err = dbg_leb_write(c, lnum, buf, offs, len, dtype); if (err) { ubifs_err("writing %d bytes to LEB %d:%d failed, error %d", len, lnum, offs, err); @@ -146,7 +146,7 @@ int ubifs_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len, if (!dbg_is_tst_rcvry(c)) err = ubi_leb_change(c->ubi, lnum, buf, len, dtype); else - err = dbg_leb_change(c->ubi, lnum, buf, len, dtype); + err = dbg_leb_change(c, lnum, buf, len, dtype); if (err) { ubifs_err("changing %d bytes in LEB %d failed, error %d", len, lnum, err); @@ -166,7 +166,7 @@ int ubifs_leb_unmap(struct ubifs_info *c, int lnum) if (!dbg_is_tst_rcvry(c)) err = ubi_leb_unmap(c->ubi, lnum); else - err = dbg_leb_unmap(c->ubi, lnum); + err = dbg_leb_unmap(c, lnum); if (err) { ubifs_err("unmap LEB %d failed, error %d", lnum, err); ubifs_ro_mode(c, err); @@ -185,7 +185,7 @@ int ubifs_leb_map(struct ubifs_info *c, int lnum, int dtype) if (!dbg_is_tst_rcvry(c)) err = ubi_leb_map(c->ubi, lnum, dtype); else - err = dbg_leb_map(c->ubi, lnum, dtype); + err = dbg_leb_map(c, lnum, dtype); if (err) { ubifs_err("mapping LEB %d failed, error %d", lnum, err); ubifs_ro_mode(c, err); -- cgit v1.2.3 From d27462a518c31a4b1093ad866229f85b2b765e7e Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Fri, 3 Jun 2011 15:10:33 +0300 Subject: UBIFS: rename recovery testing variables Since the recovery testing is effectively about emulating power cuts by UBIFS, use "power cut" as the base term for all the related variables and name them correspondingly. This is just a minor clean-up for the sake of readability. Signed-off-by: Artem Bityutskiy --- fs/ubifs/debug.c | 52 ++++++++++++++++++++++++++++------------------------ fs/ubifs/debug.h | 22 +++++++++++----------- 2 files changed, 39 insertions(+), 35 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c index 1e880cedefa4..b801af7837e9 100644 --- a/fs/ubifs/debug.c +++ b/fs/ubifs/debug.c @@ -2548,39 +2548,36 @@ static int simple_rand(void) return (next >> 16) & 32767; } -static int do_fail(struct ubifs_info *c, int lnum, int write) +static int power_cut_emulated(struct ubifs_info *c, int lnum, int write) { struct ubifs_debug_info *d = c->dbg; ubifs_assert(dbg_is_tst_rcvry(c)); - if (d->failure_mode) - return 1; - - if (!d->fail_cnt) { - /* First call - decide delay to failure */ + if (!d->pc_cnt) { + /* First call - decide delay to the power cut */ if (chance(1, 2)) { unsigned int delay = 1 << (simple_rand() >> 11); if (chance(1, 2)) { - d->fail_delay = 1; - d->fail_timeout = jiffies + + d->pc_delay = 1; + d->pc_timeout = jiffies + msecs_to_jiffies(delay); ubifs_warn("failing after %ums", delay); } else { - d->fail_delay = 2; - d->fail_cnt_max = delay; + d->pc_delay = 2; + d->pc_cnt_max = delay; ubifs_warn("failing after %u calls", delay); } } - d->fail_cnt += 1; + d->pc_cnt += 1; } /* Determine if failure delay has expired */ - if (d->fail_delay == 1) { - if (time_before(jiffies, d->fail_timeout)) + if (d->pc_delay == 1) { + if (time_before(jiffies, d->pc_timeout)) return 0; - } else if (d->fail_delay == 2) - if (d->fail_cnt++ < d->fail_cnt_max) + } else if (d->pc_delay == 2) + if (d->pc_cnt++ < d->pc_cnt_max) return 0; if (lnum == UBIFS_SB_LNUM) { if (write) { @@ -2638,7 +2635,7 @@ static int do_fail(struct ubifs_info *c, int lnum, int write) ubifs_warn("failing in bud LEB %d commit not running", lnum); } - d->failure_mode = 1; + d->pc_happened = 1; dump_stack(); return 1; } @@ -2658,9 +2655,10 @@ int dbg_leb_write(struct ubifs_info *c, int lnum, const void *buf, { int err, failing; - if (c->dbg->failure_mode) + if (c->dbg->pc_happened) return -EROFS; - failing = do_fail(c, lnum, 1); + + failing = power_cut_emulated(c, lnum, 1); if (failing) cut_data(buf, len); err = ubi_leb_write(c->ubi, lnum, buf, offs, len, dtype); @@ -2676,12 +2674,14 @@ int dbg_leb_change(struct ubifs_info *c, int lnum, const void *buf, { int err; - if (do_fail(c, lnum, 1)) + if (c->dbg->pc_happened) + return -EROFS; + if (power_cut_emulated(c, lnum, 1)) return -EROFS; err = ubi_leb_change(c->ubi, lnum, buf, len, dtype); if (err) return err; - if (do_fail(c, lnum, 1)) + if (power_cut_emulated(c, lnum, 1)) return -EROFS; return 0; } @@ -2690,12 +2690,14 @@ int dbg_leb_unmap(struct ubifs_info *c, int lnum) { int err; - if (do_fail(c, lnum, 0)) + if (c->dbg->pc_happened) + return -EROFS; + if (power_cut_emulated(c, lnum, 0)) return -EROFS; err = ubi_leb_unmap(c->ubi, lnum); if (err) return err; - if (do_fail(c, lnum, 0)) + if (power_cut_emulated(c, lnum, 0)) return -EROFS; return 0; } @@ -2704,12 +2706,14 @@ int dbg_leb_map(struct ubifs_info *c, int lnum, int dtype) { int err; - if (do_fail(c, lnum, 0)) + if (c->dbg->pc_happened) + return -EROFS; + if (power_cut_emulated(c, lnum, 0)) return -EROFS; err = ubi_leb_map(c->ubi, lnum, dtype); if (err) return err; - if (do_fail(c, lnum, 0)) + if (power_cut_emulated(c, lnum, 0)) return -EROFS; return 0; } diff --git a/fs/ubifs/debug.h b/fs/ubifs/debug.h index 0ab3757ef0e3..45174b534377 100644 --- a/fs/ubifs/debug.h +++ b/fs/ubifs/debug.h @@ -44,11 +44,11 @@ typedef int (*dbg_znode_callback)(struct ubifs_info *c, * @old_zroot_level: old index root level - used by 'dbg_check_old_index()' * @old_zroot_sqnum: old index root sqnum - used by 'dbg_check_old_index()' * - * @failure_mode: failure mode for recovery testing - * @fail_delay: 0=>don't delay, 1=>delay a time, 2=>delay a number of calls - * @fail_timeout: time in jiffies when delay of failure mode expires - * @fail_cnt: current number of calls to failure mode I/O functions - * @fail_cnt_max: number of calls by which to delay failure mode + * @pc_happened: non-zero if an emulated power cut happened + * @pc_delay: 0=>don't delay, 1=>delay a time, 2=>delay a number of calls + * @pc_timeout: time in jiffies when delay of failure mode expires + * @pc_cnt: current number of calls to failure mode I/O functions + * @pc_cnt_max: number of calls by which to delay failure mode * * @chk_lpt_sz: used by LPT tree size checker * @chk_lpt_sz2: used by LPT tree size checker @@ -87,11 +87,11 @@ struct ubifs_debug_info { int old_zroot_level; unsigned long long old_zroot_sqnum; - int failure_mode; - int fail_delay; - unsigned long fail_timeout; - unsigned int fail_cnt; - unsigned int fail_cnt_max; + int pc_happened; + int pc_delay; + unsigned long pc_timeout; + unsigned int pc_cnt; + unsigned int pc_cnt_max; long long chk_lpt_sz; long long chk_lpt_sz2; @@ -246,7 +246,7 @@ static inline int dbg_is_tst_rcvry(const struct ubifs_info *c) } static inline int dbg_is_power_cut(const struct ubifs_info *c) { - return !!c->dbg->failure_mode; + return !!c->dbg->pc_happened; } int ubifs_debugging_init(struct ubifs_info *c); -- cgit v1.2.3 From a7fa94a9fe26a4925914083a31b5387bca530eb1 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Fri, 3 Jun 2011 16:20:03 +0300 Subject: UBIFS: improve power cut emulation testing This patch cleans-up and improves the power cut testing: 1. Kill custom 'simple_random()' function and use 'random32()' instead. 2. Make timeout larger 3. When cutting the buffer - fill the end with random data sometimes, not only with 0xFFs. 4. Some times cut in the middle of the buffer, not always at the end. Signed-off-by: Artem Bityutskiy --- fs/ubifs/debug.c | 91 +++++++++++++++++++++++++++++++------------------------- 1 file changed, 50 insertions(+), 41 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c index b801af7837e9..eef109a1a927 100644 --- a/fs/ubifs/debug.c +++ b/fs/ubifs/debug.c @@ -27,11 +27,12 @@ * various local functions of those subsystems. */ -#include "ubifs.h" #include #include #include #include +#include +#include "ubifs.h" #ifdef CONFIG_UBIFS_FS_DEBUG @@ -2535,17 +2536,10 @@ error_dump: return 0; } -/* Failure mode for recovery testing */ - -#define chance(n, d) (simple_rand() <= (n) * 32768LL / (d)) - -static unsigned int next; -static int simple_rand(void) +static inline int chance(unsigned int n, unsigned int out_of) { - if (next == 0) - next = current->pid; - next = next * 1103515245 + 12345; - return (next >> 16) & 32767; + return !!((random32() % out_of) + 1 <= n); + } static int power_cut_emulated(struct ubifs_info *c, int lnum, int write) @@ -2557,33 +2551,37 @@ static int power_cut_emulated(struct ubifs_info *c, int lnum, int write) if (!d->pc_cnt) { /* First call - decide delay to the power cut */ if (chance(1, 2)) { - unsigned int delay = 1 << (simple_rand() >> 11); + unsigned long delay; if (chance(1, 2)) { d->pc_delay = 1; - d->pc_timeout = jiffies + - msecs_to_jiffies(delay); - ubifs_warn("failing after %ums", delay); + /* Fail withing 1 minute */ + delay = random32() % 60000; + d->pc_timeout = jiffies; + d->pc_timeout += msecs_to_jiffies(delay); + ubifs_warn("failing after %lums", delay); } else { d->pc_delay = 2; + delay = random32() % 10000; + /* Fail within 10000 operations */ d->pc_cnt_max = delay; - ubifs_warn("failing after %u calls", delay); + ubifs_warn("failing after %lu calls", delay); } } + d->pc_cnt += 1; } + /* Determine if failure delay has expired */ - if (d->pc_delay == 1) { - if (time_before(jiffies, d->pc_timeout)) + if (d->pc_delay == 1 && time_before(jiffies, d->pc_timeout)) return 0; - } else if (d->pc_delay == 2) - if (d->pc_cnt++ < d->pc_cnt_max) + if (d->pc_delay == 2 && d->pc_cnt++ < d->pc_cnt_max) return 0; + if (lnum == UBIFS_SB_LNUM) { - if (write) { - if (chance(1, 2)) - return 0; - } else if (chance(19, 20)) + if (write && chance(1, 2)) + return 0; + if (chance(19, 20)) return 0; ubifs_warn("failing in super block LEB %d", lnum); } else if (lnum == UBIFS_MST_LNUM || lnum == UBIFS_MST_LNUM + 1) { @@ -2591,24 +2589,21 @@ static int power_cut_emulated(struct ubifs_info *c, int lnum, int write) return 0; ubifs_warn("failing in master LEB %d", lnum); } else if (lnum >= UBIFS_LOG_LNUM && lnum <= c->log_last) { - if (write) { - if (chance(99, 100)) - return 0; - } else if (chance(399, 400)) + if (write && chance(99, 100)) + return 0; + if (chance(399, 400)) return 0; ubifs_warn("failing in log LEB %d", lnum); } else if (lnum >= c->lpt_first && lnum <= c->lpt_last) { - if (write) { - if (chance(7, 8)) - return 0; - } else if (chance(19, 20)) + if (write && chance(7, 8)) + return 0; + if (chance(19, 20)) return 0; ubifs_warn("failing in LPT LEB %d", lnum); } else if (lnum >= c->orph_first && lnum <= c->orph_last) { - if (write) { - if (chance(1, 2)) - return 0; - } else if (chance(9, 10)) + if (write && chance(1, 2)) + return 0; + if (chance(9, 10)) return 0; ubifs_warn("failing in orphan LEB %d", lnum); } else if (lnum == c->ihead_lnum) { @@ -2636,18 +2631,32 @@ static int power_cut_emulated(struct ubifs_info *c, int lnum, int write) } d->pc_happened = 1; + ubifs_warn("========== Power cut emulated =========="); dump_stack(); return 1; } -static void cut_data(const void *buf, int len) +static void cut_data(const void *buf, unsigned int len) { - int flen, i; + unsigned int from, to, i, ffs = chance(1, 2); unsigned char *p = (void *)buf; - flen = (len * (long long)simple_rand()) >> 15; - for (i = flen; i < len; i++) - p[i] = 0xff; + from = random32() % (len + 1); + if (chance(1, 2)) + to = random32() % (len - from + 1); + else + to = len; + + if (from < to) + ubifs_warn("filled bytes %u-%u with %s", from, to - 1, + ffs ? "0xFFs" : "random data"); + + if (ffs) + for (i = from; i < to; i++) + p[i] = 0xFF; + else + for (i = from; i < to; i++) + p[i] = random32() % 0x100; } int dbg_leb_write(struct ubifs_info *c, int lnum, const void *buf, -- cgit v1.2.3 From a51cb91d81f8e6fc4e5e08b772cc3ceb13ac9d37 Mon Sep 17 00:00:00 2001 From: Miklos Szeredi Date: Wed, 6 Jul 2011 12:33:55 +0200 Subject: fs: fix lock initialization locks_alloc_lock() assumed that the allocated struct file_lock is already initialized to zero members. This is only true for the first allocation of the structure, after reuse some of the members will have random values. This will for example result in passing random fl_start values to userspace in fuse for FL_FLOCK locks, which is an information leak at best. Fix by reinitializing those members which may be non-zero after freeing. Signed-off-by: Miklos Szeredi CC: stable@kernel.org Signed-off-by: Linus Torvalds --- fs/locks.c | 30 ++++++++++++++++++++---------- 1 file changed, 20 insertions(+), 10 deletions(-) (limited to 'fs') diff --git a/fs/locks.c b/fs/locks.c index 0a4f50dfadfb..b286539d547a 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -160,10 +160,28 @@ EXPORT_SYMBOL_GPL(unlock_flocks); static struct kmem_cache *filelock_cache __read_mostly; +static void locks_init_lock_always(struct file_lock *fl) +{ + fl->fl_next = NULL; + fl->fl_fasync = NULL; + fl->fl_owner = NULL; + fl->fl_pid = 0; + fl->fl_nspid = NULL; + fl->fl_file = NULL; + fl->fl_flags = 0; + fl->fl_type = 0; + fl->fl_start = fl->fl_end = 0; +} + /* Allocate an empty lock structure. */ struct file_lock *locks_alloc_lock(void) { - return kmem_cache_alloc(filelock_cache, GFP_KERNEL); + struct file_lock *fl = kmem_cache_alloc(filelock_cache, GFP_KERNEL); + + if (fl) + locks_init_lock_always(fl); + + return fl; } EXPORT_SYMBOL_GPL(locks_alloc_lock); @@ -200,17 +218,9 @@ void locks_init_lock(struct file_lock *fl) INIT_LIST_HEAD(&fl->fl_link); INIT_LIST_HEAD(&fl->fl_block); init_waitqueue_head(&fl->fl_wait); - fl->fl_next = NULL; - fl->fl_fasync = NULL; - fl->fl_owner = NULL; - fl->fl_pid = 0; - fl->fl_nspid = NULL; - fl->fl_file = NULL; - fl->fl_flags = 0; - fl->fl_type = 0; - fl->fl_start = fl->fl_end = 0; fl->fl_ops = NULL; fl->fl_lmops = NULL; + locks_init_lock_always(fl); } EXPORT_SYMBOL(locks_init_lock); -- cgit v1.2.3 From bcb65a797eb7c51e4f227dd19295f14d38738fee Mon Sep 17 00:00:00 2001 From: Davidlohr Bueso Date: Wed, 6 Jul 2011 12:26:05 +0100 Subject: FDPIC: Fix memory leak The shdr4extnum variable isn't being freed in the cleanup process of elf_fdpic_core_dump(). Signed-off-by: Davidlohr Bueso Signed-off-by: David Howells Signed-off-by: Linus Torvalds --- fs/binfmt_elf_fdpic.c | 1 + 1 file changed, 1 insertion(+) (limited to 'fs') diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c index 63039ed9576f..2bc5dc644b4c 100644 --- a/fs/binfmt_elf_fdpic.c +++ b/fs/binfmt_elf_fdpic.c @@ -1864,6 +1864,7 @@ cleanup: kfree(psinfo); kfree(notes); kfree(fpu); + kfree(shdr4extnum); #ifdef ELF_CORE_COPY_XFPREGS kfree(xfpu); #endif -- cgit v1.2.3 From 677d8537d875832019fa989186f084ba47ecd93d Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Tue, 5 Jul 2011 17:37:37 -0400 Subject: cifs: remove bogus call to cifs_cleanup_volume_info This call to cifs_cleanup_volume_info is clearly wrong. As soon as it's called the following call to cifs_get_tcp_session will oops as the volume_info pointer will then be NULL. The caller of cifs_mount should clean up this data since it passed it in. There's no need for us to call this here. Regression introduced by commit 724d9f1cfba. Reported-by: Adam Williamson Cc: Pavel Shilovsky Signed-off-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/connect.c | 1 - 1 file changed, 1 deletion(-) (limited to 'fs') diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index c8cb83ef6f6f..545e85465747 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -3012,7 +3012,6 @@ try_mount_again: else if (pSesInfo) cifs_put_smb_ses(pSesInfo); - cifs_cleanup_volume_info(&volume_info); FreeXid(xid); } #endif -- cgit v1.2.3 From b2a0fa152072f0085fa8d8eb0dbf9b3b0c5952fc Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Wed, 6 Jul 2011 08:10:36 -0400 Subject: cifs: fix build_unc_path_to_root to account for a prefixpath Regression introduced by commit f87d39d9513. Signed-off-by: Jeff Layton Reviewed-by: Pavel Shilovsky Signed-off-by: Steve French --- fs/cifs/connect.c | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 545e85465747..44376ce41e42 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -2855,19 +2855,28 @@ cifs_cleanup_volume_info(struct smb_vol **pvolume_info) /* build_path_to_root returns full path to root when * we do not have an exiting connection (tcon) */ static char * -build_unc_path_to_root(const struct smb_vol *volume_info, +build_unc_path_to_root(const struct smb_vol *vol, const struct cifs_sb_info *cifs_sb) { - char *full_path; + char *full_path, *pos; + unsigned int pplen = vol->prepath ? strlen(vol->prepath) : 0; + unsigned int unc_len = strnlen(vol->UNC, MAX_TREE_SIZE + 1); - int unc_len = strnlen(volume_info->UNC, MAX_TREE_SIZE + 1); - full_path = kmalloc(unc_len + 1, GFP_KERNEL); + full_path = kmalloc(unc_len + pplen + 1, GFP_KERNEL); if (full_path == NULL) return ERR_PTR(-ENOMEM); - strncpy(full_path, volume_info->UNC, unc_len); - full_path[unc_len] = 0; /* add trailing null */ + strncpy(full_path, vol->UNC, unc_len); + pos = full_path + unc_len; + + if (pplen) { + strncpy(pos, vol->prepath, pplen); + pos += pplen; + } + + *pos = '\0'; /* add trailing null */ convert_delimiter(full_path, CIFS_DIR_SEP(cifs_sb)); + cFYI(1, "%s: full_path=%s", __func__, full_path); return full_path; } -- cgit v1.2.3 From f9e59bcba2cff580a3ccf62e89460f9eed295d89 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Wed, 6 Jul 2011 08:10:37 -0400 Subject: cifs: have cifs_cleanup_volume_info not take a double pointer ...as that makes for a cumbersome interface. Make it take a regular smb_vol pointer and rely on the caller to zero it out if needed. Signed-off-by: Jeff Layton Reviewed-by: Pavel Shilovsky Signed-off-by: Steve French --- fs/cifs/cifsfs.c | 2 +- fs/cifs/cifsproto.h | 2 +- fs/cifs/connect.c | 10 +++------- 3 files changed, 5 insertions(+), 9 deletions(-) (limited to 'fs') diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 35f9154615fa..e11b83149833 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -713,7 +713,7 @@ cifs_do_mount(struct file_system_type *fs_type, out_super: deactivate_locked_super(sb); out: - cifs_cleanup_volume_info(&volume_info); + cifs_cleanup_volume_info(volume_info); return root; out_mountdata: diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h index 257f312ede42..53b1b13581c6 100644 --- a/fs/cifs/cifsproto.h +++ b/fs/cifs/cifsproto.h @@ -154,7 +154,7 @@ extern int set_cifs_acl(struct cifs_ntsd *, __u32, struct inode *, extern void cifs_setup_cifs_sb(struct smb_vol *pvolume_info, struct cifs_sb_info *cifs_sb); extern int cifs_match_super(struct super_block *, void *); -extern void cifs_cleanup_volume_info(struct smb_vol **pvolume_info); +extern void cifs_cleanup_volume_info(struct smb_vol *pvolume_info); extern int cifs_setup_volume_info(struct smb_vol **pvolume_info, char *mount_data, const char *devname); extern int cifs_mount(struct cifs_sb_info *, struct smb_vol *); diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 44376ce41e42..dd064075ddfb 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -2831,14 +2831,11 @@ is_path_accessible(int xid, struct cifs_tcon *tcon, } void -cifs_cleanup_volume_info(struct smb_vol **pvolume_info) +cifs_cleanup_volume_info(struct smb_vol *volume_info) { - struct smb_vol *volume_info; - - if (!pvolume_info || !*pvolume_info) + if (!volume_info) return; - volume_info = *pvolume_info; kfree(volume_info->username); kzfree(volume_info->password); kfree(volume_info->UNC); @@ -2847,7 +2844,6 @@ cifs_cleanup_volume_info(struct smb_vol **pvolume_info) kfree(volume_info->iocharset); kfree(volume_info->prepath); kfree(volume_info); - *pvolume_info = NULL; return; } @@ -2990,7 +2986,7 @@ int cifs_setup_volume_info(struct smb_vol **pvolume_info, char *mount_data, *pvolume_info = volume_info; return rc; out: - cifs_cleanup_volume_info(&volume_info); + cifs_cleanup_volume_info(volume_info); return rc; } -- cgit v1.2.3 From 1316d4da3f632d5843d5a446203e73067dc40f09 Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Mon, 4 Jul 2011 05:27:36 +0000 Subject: xfs: unpin stale inodes directly in IOP_COMMITTED When inodes are marked stale in a transaction, they are treated specially when the inode log item is being inserted into the AIL. It tries to avoid moving the log item forward in the AIL due to a race condition with the writing the underlying buffer back to disk. The was "fixed" in commit de25c18 ("xfs: avoid moving stale inodes in the AIL"). To avoid moving the item forward, we return a LSN smaller than the commit_lsn of the completing transaction, thereby trying to trick the commit code into not moving the inode forward at all. I'm not sure this ever worked as intended - it assumes the inode is already in the AIL, but I don't think the returned LSN would have been small enough to prevent moving the inode. It appears that the reason it worked is that the lower LSN of the inodes meant they were inserted into the AIL and flushed before the inode buffer (which was moved to the commit_lsn of the transaction). The big problem is that with delayed logging, the returning of the different LSN means insertion takes the slow, non-bulk path. Worse yet is that insertion is to a position -before- the commit_lsn so it is doing a AIL traversal on every insertion, and has to walk over all the items that have already been inserted into the AIL. It's expensive. To compound the matter further, with delayed logging inodes are likely to go from clean to stale in a single checkpoint, which means they aren't even in the AIL at all when we come across them at AIL insertion time. Hence these were all getting inserted into the AIL when they simply do not need to be as inodes marked XFS_ISTALE are never written back. Transactional/recovery integrity is maintained in this case by the other items in the unlink transaction that were modified (e.g. the AGI btree blocks) and committed in the same checkpoint. So to fix this, simply unpin the stale inodes directly in xfs_inode_item_committed() and return -1 to indicate that the AIL insertion code does not need to do any further processing of these inodes. Signed-off-by: Dave Chinner Reviewed-by: Christoph Hellwig Signed-off-by: Alex Elder --- fs/xfs/xfs_inode_item.c | 14 ++++++++------ fs/xfs/xfs_trans.c | 4 ++-- 2 files changed, 10 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c index 09983a3344a5..b1e88d56069c 100644 --- a/fs/xfs/xfs_inode_item.c +++ b/fs/xfs/xfs_inode_item.c @@ -681,15 +681,15 @@ xfs_inode_item_unlock( * where the cluster buffer may be unpinned before the inode is inserted into * the AIL during transaction committed processing. If the buffer is unpinned * before the inode item has been committed and inserted, then it is possible - * for the buffer to be written and IO completions before the inode is inserted + * for the buffer to be written and IO completes before the inode is inserted * into the AIL. In that case, we'd be inserting a clean, stale inode into the * AIL which will never get removed. It will, however, get reclaimed which * triggers an assert in xfs_inode_free() complaining about freein an inode * still in the AIL. * - * To avoid this, return a lower LSN than the one passed in so that the - * transaction committed code will not move the inode forward in the AIL but - * will still unpin it properly. + * To avoid this, just unpin the inode directly and return a LSN of -1 so the + * transaction committed code knows that it does not need to do any further + * processing on the item. */ STATIC xfs_lsn_t xfs_inode_item_committed( @@ -699,8 +699,10 @@ xfs_inode_item_committed( struct xfs_inode_log_item *iip = INODE_ITEM(lip); struct xfs_inode *ip = iip->ili_inode; - if (xfs_iflags_test(ip, XFS_ISTALE)) - return lsn - 1; + if (xfs_iflags_test(ip, XFS_ISTALE)) { + xfs_inode_item_unpin(lip, 0); + return -1; + } return lsn; } diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c index 7c7bc2b786bd..c83f63b33aae 100644 --- a/fs/xfs/xfs_trans.c +++ b/fs/xfs/xfs_trans.c @@ -1361,7 +1361,7 @@ xfs_trans_item_committed( lip->li_flags |= XFS_LI_ABORTED; item_lsn = IOP_COMMITTED(lip, commit_lsn); - /* If the committed routine returns -1, item has been freed. */ + /* item_lsn of -1 means the item needs no further processing */ if (XFS_LSN_CMP(item_lsn, (xfs_lsn_t)-1) == 0) return; @@ -1474,7 +1474,7 @@ xfs_trans_committed_bulk( lip->li_flags |= XFS_LI_ABORTED; item_lsn = IOP_COMMITTED(lip, commit_lsn); - /* item_lsn of -1 means the item was freed */ + /* item_lsn of -1 means the item needs no further processing */ if (XFS_LSN_CMP(item_lsn, (xfs_lsn_t)-1) == 0) continue; -- cgit v1.2.3 From bcaadf5c1ac4ff84b52174a84adb86a1e3e806dd Mon Sep 17 00:00:00 2001 From: Masatake YAMATO Date: Mon, 4 Jul 2011 12:25:51 +0900 Subject: dlm: dump address of unknown node When the dlm fails to make a network connection to another node, include the address of the node in the error message. Signed-off-by: Masatake YAMATO Signed-off-by: David Teigland --- fs/dlm/lowcomms.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c index 5e2c71f05e46..990626e7da80 100644 --- a/fs/dlm/lowcomms.c +++ b/fs/dlm/lowcomms.c @@ -512,12 +512,10 @@ static void process_sctp_notification(struct connection *con, } make_sockaddr(&prim.ssp_addr, 0, &addr_len); if (dlm_addr_to_nodeid(&prim.ssp_addr, &nodeid)) { - int i; unsigned char *b=(unsigned char *)&prim.ssp_addr; log_print("reject connect from unknown addr"); - for (i=0; isock_mutex); return -1; -- cgit v1.2.3 From 0942caa373c676dca614ea8352ac77e0270aba73 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Tue, 28 Jun 2011 15:10:37 +0000 Subject: btrfs: add missing options displayed in mount output There are three missed mount options settable by user which are not currently displayed in mount output. Signed-off-by: David Sterba Signed-off-by: Chris Mason --- fs/btrfs/ctree.h | 5 +++++ fs/btrfs/super.c | 6 ++++++ 2 files changed, 11 insertions(+) (limited to 'fs') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 8e948ec1ee6b..60e13ef23a5e 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -1336,6 +1336,11 @@ struct btrfs_ioctl_defrag_range_args { */ #define BTRFS_STRING_ITEM_KEY 253 +/* + * Flags for mount options. + * + * Note: don't forget to add new options to btrfs_show_options() + */ #define BTRFS_MOUNT_NODATASUM (1 << 0) #define BTRFS_MOUNT_NODATACOW (1 << 1) #define BTRFS_MOUNT_NOBARRIER (1 << 2) diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 3559d0b3518a..5746081199ee 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -721,6 +721,12 @@ static int btrfs_show_options(struct seq_file *seq, struct vfsmount *vfs) seq_puts(seq, ",clear_cache"); if (btrfs_test_opt(root, USER_SUBVOL_RM_ALLOWED)) seq_puts(seq, ",user_subvol_rm_allowed"); + if (btrfs_test_opt(root, ENOSPC_DEBUG)) + seq_puts(seq, ",enospc_debug"); + if (btrfs_test_opt(root, AUTO_DEFRAG)) + seq_puts(seq, ",autodefrag"); + if (btrfs_test_opt(root, INODE_MAP_CACHE)) + seq_puts(seq, ",inode_cache"); return 0; } -- cgit v1.2.3 From 508794eb5ec2a2b832742e78c6766844b10c0c94 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Sat, 2 Jul 2011 21:24:41 +0000 Subject: Btrfs: don't panic if we get an error while balancing V2 A user reported an error where if we try to balance an fs after a device has been removed it will blow up. This is because we get an EIO back and this is where BUG_ON(ret) bites us in the ass. To fix we just exit. Thanks, Reported-by: Anand Jain Signed-off-by: Josef Bacik Signed-off-by: Chris Mason --- fs/btrfs/volumes.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 1efa56e18f9b..19450bc53632 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -2098,7 +2098,8 @@ int btrfs_balance(struct btrfs_root *dev_root) chunk_root->root_key.objectid, found_key.objectid, found_key.offset); - BUG_ON(ret && ret != -ENOSPC); + if (ret && ret != -ENOSPC) + goto error; key.offset = found_key.offset - 1; } ret = 0; -- cgit v1.2.3 From 149e2d76b4886c4c7ff5e077646a8ba3563c8026 Mon Sep 17 00:00:00 2001 From: Miao Xie Date: Wed, 6 Jul 2011 18:51:53 -0400 Subject: btrfs: fix oops when doing space balance We need to make sure the data relocation inode doesn't go through the delayed metadata updates, otherwise we get an oops during balance: kernel BUG at fs/btrfs/relocation.c:4303! [SNIP] Call Trace: [] ? update_ref_for_cow+0x22d/0x330 [btrfs] [] __btrfs_cow_block+0x451/0x5e0 [btrfs] [] ? read_block_for_search+0x14d/0x4d0 [btrfs] [] btrfs_cow_block+0x10b/0x240 [btrfs] [] btrfs_search_slot+0x49e/0x7a0 [btrfs] [] btrfs_lookup_inode+0x2f/0xa0 [btrfs] [] ? mutex_lock+0x1e/0x50 [] btrfs_update_delayed_inode+0x71/0x160 [btrfs] [] ? __btrfs_release_delayed_node+0x67/0x190 [btrfs] [] btrfs_run_delayed_items+0xe8/0x120 [btrfs] [] btrfs_commit_transaction+0x250/0x850 [btrfs] [] ? find_get_pages+0x39/0x130 [] ? join_transaction+0x25/0x250 [btrfs] [] ? wake_up_bit+0x40/0x40 [] prepare_to_relocate+0xda/0xf0 [btrfs] [] relocate_block_group+0x4b/0x620 [btrfs] [] ? btrfs_clean_old_snapshots+0x35/0x150 [btrfs] [] btrfs_relocate_block_group+0x1b3/0x2e0 [btrfs] [] ? btrfs_tree_unlock+0x50/0x50 [btrfs] [] btrfs_relocate_chunk+0x8b/0x670 [btrfs] [] ? btrfs_set_path_blocking+0x3d/0x50 [btrfs] [] ? read_extent_buffer+0xd8/0x1d0 [btrfs] [] ? btrfs_previous_item+0xb1/0x150 [btrfs] [] ? read_extent_buffer+0xd8/0x1d0 [btrfs] [] btrfs_balance+0x21a/0x2b0 [btrfs] [] btrfs_ioctl+0x798/0xd20 [btrfs] [] ? handle_mm_fault+0x148/0x270 [] ? do_page_fault+0x1d8/0x4b0 [] do_vfs_ioctl+0x9a/0x540 [] sys_ioctl+0xa1/0xb0 [] system_call_fastpath+0x16/0x1b [SNIP] RIP [] btrfs_reloc_cow_block+0x22c/0x270 [btrfs] Signed-off-by: Miao Xie Signed-off-by: Chris Mason --- fs/btrfs/inode.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 447612d3a16a..4a1373083747 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -2678,12 +2678,14 @@ noinline int btrfs_update_inode(struct btrfs_trans_handle *trans, int ret; /* - * If root is tree root, it means this inode is used to - * store free space information. And these inodes are updated - * when committing the transaction, so they needn't delaye to - * be updated, or deadlock will occured. + * If the inode is a free space inode, we can deadlock during commit + * if we put it into the delayed code. + * + * The data relocation inode should also be directly updated + * without delay */ - if (!is_free_space_inode(root, inode)) { + if (!is_free_space_inode(root, inode) + && root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID) { ret = btrfs_delayed_update_inode(trans, root, inode); if (!ret) btrfs_set_inode_last_trans(trans, inode); -- cgit v1.2.3 From 5bd9d99d107c56ff7b35a29e930d85f91a07b2fd Mon Sep 17 00:00:00 2001 From: Alexey Khoroshilov Date: Wed, 6 Jul 2011 02:29:59 +0400 Subject: hfsplus: add error checking for hfs_find_init() hfs_find_init() may fail with ENOMEM, but there are places, where the returned value is not checked. The consequences can be very unpleasant, e.g. kfree uninitialized pointer and inappropriate mutex unlocking. The patch adds checks for errors in hfs_find_init(). Found by Linux Driver Verification project (linuxtesting.org). Signed-off-by: Alexey Khoroshilov Signed-off-by: Christoph Hellwig --- fs/hfsplus/catalog.c | 14 ++++++++++---- fs/hfsplus/dir.c | 8 ++++++-- fs/hfsplus/extents.c | 27 ++++++++++++++++++--------- fs/hfsplus/inode.c | 12 +++++++----- fs/hfsplus/super.c | 16 ++++++++++------ 5 files changed, 51 insertions(+), 26 deletions(-) (limited to 'fs') diff --git a/fs/hfsplus/catalog.c b/fs/hfsplus/catalog.c index b4ba1b319333..4dfbfec357e8 100644 --- a/fs/hfsplus/catalog.c +++ b/fs/hfsplus/catalog.c @@ -212,7 +212,9 @@ int hfsplus_create_cat(u32 cnid, struct inode *dir, dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n", str->name, cnid, inode->i_nlink); - hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd); + err = hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd); + if (err) + return err; hfsplus_cat_build_key(sb, fd.search_key, cnid, NULL); entry_size = hfsplus_fill_cat_thread(sb, &entry, @@ -269,7 +271,9 @@ int hfsplus_delete_cat(u32 cnid, struct inode *dir, struct qstr *str) dprint(DBG_CAT_MOD, "delete_cat: %s,%u\n", str ? str->name : NULL, cnid); - hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd); + err = hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd); + if (err) + return err; if (!str) { int len; @@ -347,12 +351,14 @@ int hfsplus_rename_cat(u32 cnid, struct hfs_find_data src_fd, dst_fd; hfsplus_cat_entry entry; int entry_size, type; - int err = 0; + int err; dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n", cnid, src_dir->i_ino, src_name->name, dst_dir->i_ino, dst_name->name); - hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &src_fd); + err = hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &src_fd); + if (err) + return err; dst_fd = src_fd; /* find the old dir entry and read the data */ diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c index 4df5059c25da..25b2443a004c 100644 --- a/fs/hfsplus/dir.c +++ b/fs/hfsplus/dir.c @@ -38,7 +38,9 @@ static struct dentry *hfsplus_lookup(struct inode *dir, struct dentry *dentry, sb = dir->i_sb; dentry->d_fsdata = NULL; - hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd); + err = hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd); + if (err) + return ERR_PTR(err); hfsplus_cat_build_key(sb, fd.search_key, dir->i_ino, &dentry->d_name); again: err = hfs_brec_read(&fd, &entry, sizeof(entry)); @@ -132,7 +134,9 @@ static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir) if (filp->f_pos >= inode->i_size) return 0; - hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd); + err = hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd); + if (err) + return err; hfsplus_cat_build_key(sb, fd.search_key, inode->i_ino, NULL); err = hfs_brec_find(&fd); if (err) diff --git a/fs/hfsplus/extents.c b/fs/hfsplus/extents.c index b9c1a4b5ba89..95065a89e7e2 100644 --- a/fs/hfsplus/extents.c +++ b/fs/hfsplus/extents.c @@ -124,9 +124,10 @@ static void hfsplus_ext_write_extent_locked(struct inode *inode) if (HFSPLUS_I(inode)->extent_state & HFSPLUS_EXT_DIRTY) { struct hfs_find_data fd; - hfs_find_init(HFSPLUS_SB(inode->i_sb)->ext_tree, &fd); - __hfsplus_ext_write_extent(inode, &fd); - hfs_find_exit(&fd); + if (!hfs_find_init(HFSPLUS_SB(inode->i_sb)->ext_tree, &fd)) { + __hfsplus_ext_write_extent(inode, &fd); + hfs_find_exit(&fd); + } } } @@ -194,9 +195,11 @@ static int hfsplus_ext_read_extent(struct inode *inode, u32 block) block < hip->cached_start + hip->cached_blocks) return 0; - hfs_find_init(HFSPLUS_SB(inode->i_sb)->ext_tree, &fd); - res = __hfsplus_ext_cache_extent(&fd, inode, block); - hfs_find_exit(&fd); + res = hfs_find_init(HFSPLUS_SB(inode->i_sb)->ext_tree, &fd); + if (!res) { + res = __hfsplus_ext_cache_extent(&fd, inode, block); + hfs_find_exit(&fd); + } return res; } @@ -374,7 +377,9 @@ int hfsplus_free_fork(struct super_block *sb, u32 cnid, if (total_blocks == blocks) return 0; - hfs_find_init(HFSPLUS_SB(sb)->ext_tree, &fd); + res = hfs_find_init(HFSPLUS_SB(sb)->ext_tree, &fd); + if (res) + return res; do { res = __hfsplus_ext_read_extent(&fd, ext_entry, cnid, total_blocks, type); @@ -503,7 +508,6 @@ void hfsplus_file_truncate(struct inode *inode) struct page *page; void *fsdata; u32 size = inode->i_size; - int res; res = pagecache_write_begin(NULL, mapping, size, 0, AOP_FLAG_UNINTERRUPTIBLE, @@ -526,7 +530,12 @@ void hfsplus_file_truncate(struct inode *inode) goto out; mutex_lock(&hip->extents_lock); - hfs_find_init(HFSPLUS_SB(sb)->ext_tree, &fd); + res = hfs_find_init(HFSPLUS_SB(sb)->ext_tree, &fd); + if (res) { + mutex_unlock(&hip->extents_lock); + /* XXX: We lack error handling of hfsplus_file_truncate() */ + return; + } while (1) { if (alloc_cnt == hip->first_blocks) { hfsplus_free_extents(sb, hip->first_extents, diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c index b248a6cfcad9..010cd363d085 100644 --- a/fs/hfsplus/inode.c +++ b/fs/hfsplus/inode.c @@ -195,11 +195,13 @@ static struct dentry *hfsplus_file_lookup(struct inode *dir, hip->flags = 0; set_bit(HFSPLUS_I_RSRC, &hip->flags); - hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd); - err = hfsplus_find_cat(sb, dir->i_ino, &fd); - if (!err) - err = hfsplus_cat_read_inode(inode, &fd); - hfs_find_exit(&fd); + err = hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd); + if (!err) { + err = hfsplus_find_cat(sb, dir->i_ino, &fd); + if (!err) + err = hfsplus_cat_read_inode(inode, &fd); + hfs_find_exit(&fd); + } if (err) { iput(inode); return ERR_PTR(err); diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c index acaef57e3606..2c1a72287fb5 100644 --- a/fs/hfsplus/super.c +++ b/fs/hfsplus/super.c @@ -73,11 +73,13 @@ struct inode *hfsplus_iget(struct super_block *sb, unsigned long ino) if (inode->i_ino >= HFSPLUS_FIRSTUSER_CNID || inode->i_ino == HFSPLUS_ROOT_CNID) { - hfs_find_init(HFSPLUS_SB(inode->i_sb)->cat_tree, &fd); - err = hfsplus_find_cat(inode->i_sb, inode->i_ino, &fd); - if (!err) - err = hfsplus_cat_read_inode(inode, &fd); - hfs_find_exit(&fd); + err = hfs_find_init(HFSPLUS_SB(inode->i_sb)->cat_tree, &fd); + if (!err) { + err = hfsplus_find_cat(inode->i_sb, inode->i_ino, &fd); + if (!err) + err = hfsplus_cat_read_inode(inode, &fd); + hfs_find_exit(&fd); + } } else { err = hfsplus_system_read_inode(inode); } @@ -456,7 +458,9 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent) str.len = sizeof(HFSP_HIDDENDIR_NAME) - 1; str.name = HFSP_HIDDENDIR_NAME; - hfs_find_init(sbi->cat_tree, &fd); + err = hfs_find_init(sbi->cat_tree, &fd); + if (err) + goto out_put_root; hfsplus_cat_build_key(sb, fd.search_key, HFSPLUS_ROOT_CNID, &str); if (!hfs_brec_read(&fd, &entry, sizeof(entry))) { hfs_find_exit(&fd); -- cgit v1.2.3 From dd7f3d5458e5c0eded620fe8192abe7e418fc94c Mon Sep 17 00:00:00 2001 From: Alexey Khoroshilov Date: Wed, 6 Jul 2011 02:30:00 +0400 Subject: hfsplus: Add error propagation for hfsplus_ext_write_extent_locked Implement error propagation through the callers of hfsplus_ext_write_extent_locked(). Signed-off-by: Alexey Khoroshilov Signed-off-by: Christoph Hellwig --- fs/hfsplus/extents.c | 26 ++++++++++++++++++-------- fs/hfsplus/hfsplus_fs.h | 2 +- fs/hfsplus/super.c | 6 +++++- 3 files changed, 24 insertions(+), 10 deletions(-) (limited to 'fs') diff --git a/fs/hfsplus/extents.c b/fs/hfsplus/extents.c index 95065a89e7e2..5849e3ef35cc 100644 --- a/fs/hfsplus/extents.c +++ b/fs/hfsplus/extents.c @@ -119,23 +119,31 @@ static void __hfsplus_ext_write_extent(struct inode *inode, set_bit(HFSPLUS_I_EXT_DIRTY, &hip->flags); } -static void hfsplus_ext_write_extent_locked(struct inode *inode) +static int hfsplus_ext_write_extent_locked(struct inode *inode) { + int res; + if (HFSPLUS_I(inode)->extent_state & HFSPLUS_EXT_DIRTY) { struct hfs_find_data fd; - if (!hfs_find_init(HFSPLUS_SB(inode->i_sb)->ext_tree, &fd)) { - __hfsplus_ext_write_extent(inode, &fd); - hfs_find_exit(&fd); - } + res = hfs_find_init(HFSPLUS_SB(inode->i_sb)->ext_tree, &fd); + if (res) + return res; + __hfsplus_ext_write_extent(inode, &fd); + hfs_find_exit(&fd); } + return 0; } -void hfsplus_ext_write_extent(struct inode *inode) +int hfsplus_ext_write_extent(struct inode *inode) { + int res; + mutex_lock(&HFSPLUS_I(inode)->extents_lock); - hfsplus_ext_write_extent_locked(inode); + res = hfsplus_ext_write_extent_locked(inode); mutex_unlock(&HFSPLUS_I(inode)->extents_lock); + + return res; } static inline int __hfsplus_ext_read_extent(struct hfs_find_data *fd, @@ -477,7 +485,9 @@ out: insert_extent: dprint(DBG_EXTENT, "insert new extent\n"); - hfsplus_ext_write_extent_locked(inode); + res = hfsplus_ext_write_extent_locked(inode); + if (res) + goto out; memset(hip->cached_extents, 0, sizeof(hfsplus_extent_rec)); hip->cached_extents[0].start_block = cpu_to_be32(start); diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h index d6857523336d..0bebf7488feb 100644 --- a/fs/hfsplus/hfsplus_fs.h +++ b/fs/hfsplus/hfsplus_fs.h @@ -374,7 +374,7 @@ extern const struct file_operations hfsplus_dir_operations; /* extents.c */ int hfsplus_ext_cmp_key(const hfsplus_btree_key *, const hfsplus_btree_key *); -void hfsplus_ext_write_extent(struct inode *); +int hfsplus_ext_write_extent(struct inode *); int hfsplus_get_block(struct inode *, sector_t, struct buffer_head *, int); int hfsplus_free_fork(struct super_block *, u32, struct hfsplus_fork_raw *, int); diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c index 2c1a72287fb5..84f56e1f6dac 100644 --- a/fs/hfsplus/super.c +++ b/fs/hfsplus/super.c @@ -135,9 +135,13 @@ static int hfsplus_system_write_inode(struct inode *inode) static int hfsplus_write_inode(struct inode *inode, struct writeback_control *wbc) { + int err; + dprint(DBG_INODE, "hfsplus_write_inode: %lu\n", inode->i_ino); - hfsplus_ext_write_extent(inode); + err = hfsplus_ext_write_extent(inode); + if (err) + return err; if (inode->i_ino >= HFSPLUS_FIRSTUSER_CNID || inode->i_ino == HFSPLUS_ROOT_CNID) -- cgit v1.2.3 From c902ce1bfb40d8b049bd2319b388b4b68b04bc27 Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 7 Jul 2011 12:19:48 +0100 Subject: FS-Cache: Add a helper to bulk uncache pages on an inode Add an FS-Cache helper to bulk uncache pages on an inode. This will only work for the circumstance where the pages in the cache correspond 1:1 with the pages attached to an inode's page cache. This is required for CIFS and NFS: When disabling inode cookie, we were returning the cookie and setting cifsi->fscache to NULL but failed to invalidate any previously mapped pages. This resulted in "Bad page state" errors and manifested in other kind of errors when running fsstress. Fix it by uncaching mapped pages when we disable the inode cookie. This patch should fix the following oops and "Bad page state" errors seen during fsstress testing. ------------[ cut here ]------------ kernel BUG at fs/cachefiles/namei.c:201! invalid opcode: 0000 [#1] SMP Pid: 5, comm: kworker/u:0 Not tainted 2.6.38.7-30.fc15.x86_64 #1 Bochs Bochs RIP: 0010: cachefiles_walk_to_object+0x436/0x745 [cachefiles] RSP: 0018:ffff88002ce6dd00 EFLAGS: 00010282 RAX: ffff88002ef165f0 RBX: ffff88001811f500 RCX: 0000000000000000 RDX: 0000000000000000 RSI: 0000000000000100 RDI: 0000000000000282 RBP: ffff88002ce6dda0 R08: 0000000000000100 R09: ffffffff81b3a300 R10: 0000ffff00066c0a R11: 0000000000000003 R12: ffff88002ae54840 R13: ffff88002ae54840 R14: ffff880029c29c00 R15: ffff88001811f4b0 FS: 00007f394dd32720(0000) GS:ffff88002ef00000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b CR2: 00007fffcb62ddf8 CR3: 000000001825f000 CR4: 00000000000006e0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 0000000000000400 Process kworker/u:0 (pid: 5, threadinfo ffff88002ce6c000, task ffff88002ce55cc0) Stack: 0000000000000246 ffff88002ce55cc0 ffff88002ce6dd58 ffff88001815dc00 ffff8800185246c0 ffff88001811f618 ffff880029c29d18 ffff88001811f380 ffff88002ce6dd50 ffffffff814757e4 ffff88002ce6dda0 ffffffff8106ac56 Call Trace: cachefiles_lookup_object+0x78/0xd4 [cachefiles] fscache_lookup_object+0x131/0x16d [fscache] fscache_object_work_func+0x1bc/0x669 [fscache] process_one_work+0x186/0x298 worker_thread+0xda/0x15d kthread+0x84/0x8c kernel_thread_helper+0x4/0x10 RIP cachefiles_walk_to_object+0x436/0x745 [cachefiles] ---[ end trace 1d481c9af1804caa ]--- I tested the uncaching by the following means: (1) Create a big file on my NFS server (104857600 bytes). (2) Read the file into the cache with md5sum on the NFS client. Look in /proc/fs/fscache/stats: Pages : mrk=25601 unc=0 (3) Open the file for read/write ("bash 5<>/warthog/bigfile"). Look in proc again: Pages : mrk=25601 unc=25601 Reported-by: Jeff Layton Signed-off-by: David Howells Reviewed-and-Tested-by: Suresh Jayaraman cc: stable@kernel.org Signed-off-by: Linus Torvalds --- Documentation/filesystems/caching/netfs-api.txt | 16 +++++++++ fs/cifs/fscache.c | 1 + fs/fscache/page.c | 44 +++++++++++++++++++++++++ fs/nfs/fscache.c | 8 ++--- include/linux/fscache.h | 21 ++++++++++++ 5 files changed, 85 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/Documentation/filesystems/caching/netfs-api.txt b/Documentation/filesystems/caching/netfs-api.txt index a167ab876c35..7cc6bf2871eb 100644 --- a/Documentation/filesystems/caching/netfs-api.txt +++ b/Documentation/filesystems/caching/netfs-api.txt @@ -673,6 +673,22 @@ storage request to complete, or it may attempt to cancel the storage request - in which case the page will not be stored in the cache this time. +BULK INODE PAGE UNCACHE +----------------------- + +A convenience routine is provided to perform an uncache on all the pages +attached to an inode. This assumes that the pages on the inode correspond on a +1:1 basis with the pages in the cache. + + void fscache_uncache_all_inode_pages(struct fscache_cookie *cookie, + struct inode *inode); + +This takes the netfs cookie that the pages were cached with and the inode that +the pages are attached to. This function will wait for pages to finish being +written to the cache and for the cache to finish with the page generally. No +error is returned. + + ========================== INDEX AND DATA FILE UPDATE ========================== diff --git a/fs/cifs/fscache.c b/fs/cifs/fscache.c index 816696621ec9..42e5363b4102 100644 --- a/fs/cifs/fscache.c +++ b/fs/cifs/fscache.c @@ -92,6 +92,7 @@ static void cifs_fscache_disable_inode_cookie(struct inode *inode) if (cifsi->fscache) { cFYI(1, "%s: (0x%p)", __func__, cifsi->fscache); + fscache_uncache_all_inode_pages(cifsi->fscache, inode); fscache_relinquish_cookie(cifsi->fscache, 1); cifsi->fscache = NULL; } diff --git a/fs/fscache/page.c b/fs/fscache/page.c index a2a5d19ece6a..2f343b4d7a7d 100644 --- a/fs/fscache/page.c +++ b/fs/fscache/page.c @@ -954,3 +954,47 @@ void fscache_mark_pages_cached(struct fscache_retrieval *op, pagevec_reinit(pagevec); } EXPORT_SYMBOL(fscache_mark_pages_cached); + +/* + * Uncache all the pages in an inode that are marked PG_fscache, assuming them + * to be associated with the given cookie. + */ +void __fscache_uncache_all_inode_pages(struct fscache_cookie *cookie, + struct inode *inode) +{ + struct address_space *mapping = inode->i_mapping; + struct pagevec pvec; + pgoff_t next; + int i; + + _enter("%p,%p", cookie, inode); + + if (!mapping || mapping->nrpages == 0) { + _leave(" [no pages]"); + return; + } + + pagevec_init(&pvec, 0); + next = 0; + while (next <= (loff_t)-1 && + pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE) + ) { + for (i = 0; i < pagevec_count(&pvec); i++) { + struct page *page = pvec.pages[i]; + pgoff_t page_index = page->index; + + ASSERTCMP(page_index, >=, next); + next = page_index + 1; + + if (PageFsCache(page)) { + __fscache_wait_on_page_write(cookie, page); + __fscache_uncache_page(cookie, page); + } + } + pagevec_release(&pvec); + cond_resched(); + } + + _leave(""); +} +EXPORT_SYMBOL(__fscache_uncache_all_inode_pages); diff --git a/fs/nfs/fscache.c b/fs/nfs/fscache.c index ce153a6b3aec..419119c371bf 100644 --- a/fs/nfs/fscache.c +++ b/fs/nfs/fscache.c @@ -259,12 +259,10 @@ static void nfs_fscache_disable_inode_cookie(struct inode *inode) dfprintk(FSCACHE, "NFS: nfsi 0x%p turning cache off\n", NFS_I(inode)); - /* Need to invalidate any mapped pages that were read in before - * turning off the cache. + /* Need to uncache any pages attached to this inode that + * fscache knows about before turning off the cache. */ - if (inode->i_mapping && inode->i_mapping->nrpages) - invalidate_inode_pages2(inode->i_mapping); - + fscache_uncache_all_inode_pages(NFS_I(inode)->fscache, inode); nfs_fscache_zap_inode_cookie(inode); } } diff --git a/include/linux/fscache.h b/include/linux/fscache.h index 7c4d72f5581f..9ec20dec3353 100644 --- a/include/linux/fscache.h +++ b/include/linux/fscache.h @@ -204,6 +204,8 @@ extern bool __fscache_check_page_write(struct fscache_cookie *, struct page *); extern void __fscache_wait_on_page_write(struct fscache_cookie *, struct page *); extern bool __fscache_maybe_release_page(struct fscache_cookie *, struct page *, gfp_t); +extern void __fscache_uncache_all_inode_pages(struct fscache_cookie *, + struct inode *); /** * fscache_register_netfs - Register a filesystem as desiring caching services @@ -643,4 +645,23 @@ bool fscache_maybe_release_page(struct fscache_cookie *cookie, return false; } +/** + * fscache_uncache_all_inode_pages - Uncache all an inode's pages + * @cookie: The cookie representing the inode's cache object. + * @inode: The inode to uncache pages from. + * + * Uncache all the pages in an inode that are marked PG_fscache, assuming them + * to be associated with the given cookie. + * + * This function may sleep. It will wait for pages that are being written out + * and will wait whilst the PG_fscache mark is removed by the cache. + */ +static inline +void fscache_uncache_all_inode_pages(struct fscache_cookie *cookie, + struct inode *inode) +{ + if (fscache_cookie_valid(cookie)) + __fscache_uncache_all_inode_pages(cookie, inode); +} + #endif /* _LINUX_FSCACHE_H */ -- cgit v1.2.3 From 04db79b015dafcb79371fda6b5c32ffdbd31a2ff Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Wed, 6 Jul 2011 08:10:38 -0400 Subject: cifs: factor smb_vol allocation out of cifs_setup_volume_info Signed-off-by: Jeff Layton Reviewed-by: Pavel Shilovsky Signed-off-by: Steve French --- fs/cifs/cifsfs.c | 6 +++--- fs/cifs/cifsproto.h | 4 ++-- fs/cifs/connect.c | 56 ++++++++++++++++++++++++++--------------------------- 3 files changed, 33 insertions(+), 33 deletions(-) (limited to 'fs') diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index e11b83149833..3e2989976297 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -649,9 +649,9 @@ cifs_do_mount(struct file_system_type *fs_type, cFYI(1, "Devname: %s flags: %d ", dev_name, flags); - rc = cifs_setup_volume_info(&volume_info, (char *)data, dev_name); - if (rc) - return ERR_PTR(rc); + volume_info = cifs_get_volume_info((char *)data, dev_name); + if (IS_ERR(volume_info)) + return ERR_CAST(volume_info); cifs_sb = kzalloc(sizeof(struct cifs_sb_info), GFP_KERNEL); if (cifs_sb == NULL) { diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h index 53b1b13581c6..8df28e925e5b 100644 --- a/fs/cifs/cifsproto.h +++ b/fs/cifs/cifsproto.h @@ -155,8 +155,8 @@ extern void cifs_setup_cifs_sb(struct smb_vol *pvolume_info, struct cifs_sb_info *cifs_sb); extern int cifs_match_super(struct super_block *, void *); extern void cifs_cleanup_volume_info(struct smb_vol *pvolume_info); -extern int cifs_setup_volume_info(struct smb_vol **pvolume_info, - char *mount_data, const char *devname); +extern struct smb_vol *cifs_get_volume_info(char *mount_data, + const char *devname); extern int cifs_mount(struct cifs_sb_info *, struct smb_vol *); extern void cifs_umount(struct cifs_sb_info *); extern void cifs_dfs_release_automount_timer(void); diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index dd064075ddfb..46cc0ad03487 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -2931,33 +2931,20 @@ expand_dfs_referral(int xid, struct cifs_ses *pSesInfo, } #endif -int cifs_setup_volume_info(struct smb_vol **pvolume_info, char *mount_data, - const char *devname) +static int +cifs_setup_volume_info(struct smb_vol *volume_info, char *mount_data, + const char *devname) { - struct smb_vol *volume_info; int rc = 0; - *pvolume_info = NULL; - - volume_info = kzalloc(sizeof(struct smb_vol), GFP_KERNEL); - if (!volume_info) { - rc = -ENOMEM; - goto out; - } - - if (cifs_parse_mount_options(mount_data, devname, - volume_info)) { - rc = -EINVAL; - goto out; - } + if (cifs_parse_mount_options(mount_data, devname, volume_info)) + return -EINVAL; if (volume_info->nullauth) { cFYI(1, "null user"); volume_info->username = kzalloc(1, GFP_KERNEL); - if (volume_info->username == NULL) { - rc = -ENOMEM; - goto out; - } + if (volume_info->username == NULL) + return -ENOMEM; } else if (volume_info->username) { /* BB fixme parse for domain name here */ cFYI(1, "Username: %s", volume_info->username); @@ -2965,8 +2952,7 @@ int cifs_setup_volume_info(struct smb_vol **pvolume_info, char *mount_data, cifserror("No username specified"); /* In userspace mount helper we can get user name from alternate locations such as env variables and files on disk */ - rc = -EINVAL; - goto out; + return -EINVAL; } /* this is needed for ASCII cp to Unicode converts */ @@ -2978,18 +2964,32 @@ int cifs_setup_volume_info(struct smb_vol **pvolume_info, char *mount_data, if (volume_info->local_nls == NULL) { cERROR(1, "CIFS mount error: iocharset %s not found", volume_info->iocharset); - rc = -ELIBACC; - goto out; + return -ELIBACC; } } - *pvolume_info = volume_info; - return rc; -out: - cifs_cleanup_volume_info(volume_info); return rc; } +struct smb_vol * +cifs_get_volume_info(char *mount_data, const char *devname) +{ + int rc; + struct smb_vol *volume_info; + + volume_info = kzalloc(sizeof(struct smb_vol), GFP_KERNEL); + if (!volume_info) + return ERR_PTR(-ENOMEM); + + rc = cifs_setup_volume_info(volume_info, mount_data, devname); + if (rc) { + cifs_cleanup_volume_info(volume_info); + volume_info = ERR_PTR(rc); + } + + return volume_info; +} + int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb_vol *volume_info) { -- cgit v1.2.3 From 19495f70d1ebcdc732dd3c94f5968a4bff198ae5 Mon Sep 17 00:00:00 2001 From: Anatolij Gustschin Date: Thu, 7 Jul 2011 12:25:02 +0200 Subject: UBIFS: fix master node recovery When the 1st LEB was unmapped and written but 2nd LEB not, the master node recovery doesn't succeed after power cut. We see following error when mounting UBIFS partition on NOR flash: UBIFS error (pid 1137): ubifs_recover_master_node: failed to recover master node Correct 2nd master node offset check is needed to fix the problem. If the 2nd master node is at the end in the 2nd LEB, first master node is used for recovery. When checking for this condition we should check whether the master node is exactly at the end of the LEB (without remaining empty space) or whether it is followed by an empty space less than the master node size. Artem: when the error happened, offs2 = 261120, sz = 512, c->leb_size = 262016. Signed-off-by: Anatolij Gustschin Signed-off-by: Artem Bityutskiy --- fs/ubifs/recovery.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/ubifs/recovery.c b/fs/ubifs/recovery.c index 51bcf4227b29..af02790d9328 100644 --- a/fs/ubifs/recovery.c +++ b/fs/ubifs/recovery.c @@ -274,7 +274,8 @@ int ubifs_recover_master_node(struct ubifs_info *c) if (cor1) goto out_err; mst = mst1; - } else if (offs1 == 0 && offs2 + sz >= c->leb_size) { + } else if (offs1 == 0 && + c->leb_size - offs2 - sz < sz) { /* 1st LEB was unmapped and written, 2nd not */ if (cor1) goto out_err; -- cgit v1.2.3 From 680a647b49fa52de0b37b8bc58a3a8c4bc76b262 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 8 Jul 2011 14:34:05 +0200 Subject: xfs: PF_FSTRANS should never be set in ->writepage Now that we reject direct reclaim in addition to always using GFP_NOFS allocation there's no chance we'll ever end up in ->writepage with PF_FSTRANS set. Add a WARN_ON if we hit this case, and stop checking if we'd actually need to start a transaction. Signed-off-by: Christoph Hellwig Reviewed-by: Alex Elder Reviewed-by: Dave Chinner --- fs/xfs/linux-2.6/xfs_aops.c | 17 +++-------------- 1 file changed, 3 insertions(+), 14 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c index 79ce38be15a1..60e1364a191f 100644 --- a/fs/xfs/linux-2.6/xfs_aops.c +++ b/fs/xfs/linux-2.6/xfs_aops.c @@ -894,11 +894,6 @@ out_invalidate: * For unwritten space on the page we need to start the conversion to * regular allocated space. * For any other dirty buffer heads on the page we should flush them. - * - * If we detect that a transaction would be required to flush the page, we - * have to check the process flags first, if we are already in a transaction - * or disk I/O during allocations is off, we need to fail the writepage and - * redirty the page. */ STATIC int xfs_vm_writepage( @@ -906,7 +901,6 @@ xfs_vm_writepage( struct writeback_control *wbc) { struct inode *inode = page->mapping->host; - int delalloc, unwritten; struct buffer_head *bh, *head; struct xfs_bmbt_irec imap; xfs_ioend_t *ioend = NULL, *iohead = NULL; @@ -938,15 +932,10 @@ xfs_vm_writepage( goto redirty; /* - * We need a transaction if there are delalloc or unwritten buffers - * on the page. - * - * If we need a transaction and the process flags say we are already - * in a transaction, or no IO is allowed then mark the page dirty - * again and leave the page as is. + * Given that we do not allow direct reclaim to call us, we should + * never be called while in a filesystem transaction. */ - xfs_count_page_state(page, &delalloc, &unwritten); - if ((current->flags & PF_FSTRANS) && (delalloc || unwritten)) + if (WARN_ON(current->flags & PF_FSTRANS)) goto redirty; /* Is this page beyond the end of the file? */ -- cgit v1.2.3 From dbcdde3e76f45d56c4a30ca6c5d69b6d473d3fd1 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 8 Jul 2011 14:34:14 +0200 Subject: xfs: re-enable non-blocking behaviour in xfs_map_blocks The non-blockig behaviour in xfs_vm_writepage currently is conditional on having both the WB_SYNC_NONE sync_mode and the nonblocking flag set. The latter used to be used by both pdflush, kswapd and a few other places in older kernels, but has been fading out starting with the introduction of the per-bdi flusher threads. Enable the non-blocking behaviour for all WB_SYNC_NONE calls to get back the behaviour we want. Signed-off-by: Christoph Hellwig Signed-off-by: Alex Elder Reviewed-by: Dave Chinner --- fs/xfs/linux-2.6/xfs_aops.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c index 60e1364a191f..3447733fa897 100644 --- a/fs/xfs/linux-2.6/xfs_aops.c +++ b/fs/xfs/linux-2.6/xfs_aops.c @@ -959,7 +959,7 @@ xfs_vm_writepage( offset = page_offset(page); type = IO_OVERWRITE; - if (wbc->sync_mode == WB_SYNC_NONE && wbc->nonblocking) + if (wbc->sync_mode == WB_SYNC_NONE) nonblocking = 1; do { -- cgit v1.2.3 From dec58f1dfd30a3c3e9dadc808692f4e5cd922745 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 8 Jul 2011 14:34:18 +0200 Subject: xfs: work around bogus gcc warning in xfs_allocbt_init_cursor GCC 4.6 complains about an array subscript is above array bounds when using the btree index to index into the agf_levels array. The only two indices passed in are 0 and 1, and we have an assert insuring that. Replace the trick of using the array index directly with using constants in the already existing branch for assigning the XFS_BTREE_LASTREC_UPDATE flag. Signed-off-by: Christoph Hellwig Reviewed-by: Alex Elder Reviewed-by: Dave Chinner --- fs/xfs/xfs_alloc_btree.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_alloc_btree.c b/fs/xfs/xfs_alloc_btree.c index 2b3518826a69..e277777a7cf9 100644 --- a/fs/xfs/xfs_alloc_btree.c +++ b/fs/xfs/xfs_alloc_btree.c @@ -427,13 +427,16 @@ xfs_allocbt_init_cursor( cur->bc_tp = tp; cur->bc_mp = mp; - cur->bc_nlevels = be32_to_cpu(agf->agf_levels[btnum]); cur->bc_btnum = btnum; cur->bc_blocklog = mp->m_sb.sb_blocklog; - cur->bc_ops = &xfs_allocbt_ops; - if (btnum == XFS_BTNUM_CNT) + + if (btnum == XFS_BTNUM_CNT) { + cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]); cur->bc_flags = XFS_BTREE_LASTREC_UPDATE; + } else { + cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]); + } cur->bc_private.a.agbp = agbp; cur->bc_private.a.agno = agno; -- cgit v1.2.3 From c4ed4243c40f97ed5b7b121777bbbc6aeaa722f0 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 8 Jul 2011 14:34:23 +0200 Subject: xfs: split xfs_setattr Split up xfs_setattr into two functions, one for the complex truncate handling, and one for the trivial attribute updates. Also move both new routines to xfs_iops.c as they are fairly Linux-specific. Signed-off-by: Christoph Hellwig Reviewed-by: Alex Elder Reviewed-by: Dave Chinner --- fs/xfs/linux-2.6/xfs_acl.c | 2 +- fs/xfs/linux-2.6/xfs_file.c | 2 +- fs/xfs/linux-2.6/xfs_iops.c | 440 +++++++++++++++++++++++++++++++++++++++++++- fs/xfs/xfs_vnodeops.c | 426 +----------------------------------------- fs/xfs/xfs_vnodeops.h | 3 +- 5 files changed, 444 insertions(+), 429 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_acl.c b/fs/xfs/linux-2.6/xfs_acl.c index 39f4f809bb68..115ac6919533 100644 --- a/fs/xfs/linux-2.6/xfs_acl.c +++ b/fs/xfs/linux-2.6/xfs_acl.c @@ -264,7 +264,7 @@ xfs_set_mode(struct inode *inode, mode_t mode) iattr.ia_mode = mode; iattr.ia_ctime = current_fs_time(inode->i_sb); - error = -xfs_setattr(XFS_I(inode), &iattr, XFS_ATTR_NOACL); + error = -xfs_setattr_nonsize(XFS_I(inode), &iattr, XFS_ATTR_NOACL); } return error; diff --git a/fs/xfs/linux-2.6/xfs_file.c b/fs/xfs/linux-2.6/xfs_file.c index 7f782af286bf..8073f61efb8e 100644 --- a/fs/xfs/linux-2.6/xfs_file.c +++ b/fs/xfs/linux-2.6/xfs_file.c @@ -944,7 +944,7 @@ xfs_file_fallocate( iattr.ia_valid = ATTR_SIZE; iattr.ia_size = new_size; - error = -xfs_setattr(ip, &iattr, XFS_ATTR_NOLOCK); + error = -xfs_setattr_size(ip, &iattr, XFS_ATTR_NOLOCK); } out_unlock: diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c index d44d92cd12b1..65d3bf8266e7 100644 --- a/fs/xfs/linux-2.6/xfs_iops.c +++ b/fs/xfs/linux-2.6/xfs_iops.c @@ -39,6 +39,7 @@ #include "xfs_buf_item.h" #include "xfs_utils.h" #include "xfs_vnodeops.h" +#include "xfs_inode_item.h" #include "xfs_trace.h" #include @@ -497,12 +498,449 @@ xfs_vn_getattr( return 0; } +int +xfs_setattr_nonsize( + struct xfs_inode *ip, + struct iattr *iattr, + int flags) +{ + xfs_mount_t *mp = ip->i_mount; + struct inode *inode = VFS_I(ip); + int mask = iattr->ia_valid; + xfs_trans_t *tp; + int error; + uid_t uid = 0, iuid = 0; + gid_t gid = 0, igid = 0; + struct xfs_dquot *udqp = NULL, *gdqp = NULL; + struct xfs_dquot *olddquot1 = NULL, *olddquot2 = NULL; + + trace_xfs_setattr(ip); + + if (mp->m_flags & XFS_MOUNT_RDONLY) + return XFS_ERROR(EROFS); + + if (XFS_FORCED_SHUTDOWN(mp)) + return XFS_ERROR(EIO); + + error = -inode_change_ok(inode, iattr); + if (error) + return XFS_ERROR(error); + + ASSERT((mask & ATTR_SIZE) == 0); + + /* + * If disk quotas is on, we make sure that the dquots do exist on disk, + * before we start any other transactions. Trying to do this later + * is messy. We don't care to take a readlock to look at the ids + * in inode here, because we can't hold it across the trans_reserve. + * If the IDs do change before we take the ilock, we're covered + * because the i_*dquot fields will get updated anyway. + */ + if (XFS_IS_QUOTA_ON(mp) && (mask & (ATTR_UID|ATTR_GID))) { + uint qflags = 0; + + if ((mask & ATTR_UID) && XFS_IS_UQUOTA_ON(mp)) { + uid = iattr->ia_uid; + qflags |= XFS_QMOPT_UQUOTA; + } else { + uid = ip->i_d.di_uid; + } + if ((mask & ATTR_GID) && XFS_IS_GQUOTA_ON(mp)) { + gid = iattr->ia_gid; + qflags |= XFS_QMOPT_GQUOTA; + } else { + gid = ip->i_d.di_gid; + } + + /* + * We take a reference when we initialize udqp and gdqp, + * so it is important that we never blindly double trip on + * the same variable. See xfs_create() for an example. + */ + ASSERT(udqp == NULL); + ASSERT(gdqp == NULL); + error = xfs_qm_vop_dqalloc(ip, uid, gid, xfs_get_projid(ip), + qflags, &udqp, &gdqp); + if (error) + return error; + } + + tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_NOT_SIZE); + error = xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES(mp), 0, 0, 0); + if (error) + goto out_dqrele; + + xfs_ilock(ip, XFS_ILOCK_EXCL); + + /* + * Change file ownership. Must be the owner or privileged. + */ + if (mask & (ATTR_UID|ATTR_GID)) { + /* + * These IDs could have changed since we last looked at them. + * But, we're assured that if the ownership did change + * while we didn't have the inode locked, inode's dquot(s) + * would have changed also. + */ + iuid = ip->i_d.di_uid; + igid = ip->i_d.di_gid; + gid = (mask & ATTR_GID) ? iattr->ia_gid : igid; + uid = (mask & ATTR_UID) ? iattr->ia_uid : iuid; + + /* + * Do a quota reservation only if uid/gid is actually + * going to change. + */ + if (XFS_IS_QUOTA_RUNNING(mp) && + ((XFS_IS_UQUOTA_ON(mp) && iuid != uid) || + (XFS_IS_GQUOTA_ON(mp) && igid != gid))) { + ASSERT(tp); + error = xfs_qm_vop_chown_reserve(tp, ip, udqp, gdqp, + capable(CAP_FOWNER) ? + XFS_QMOPT_FORCE_RES : 0); + if (error) /* out of quota */ + goto out_trans_cancel; + } + } + + xfs_trans_ijoin(tp, ip); + + /* + * Change file ownership. Must be the owner or privileged. + */ + if (mask & (ATTR_UID|ATTR_GID)) { + /* + * CAP_FSETID overrides the following restrictions: + * + * The set-user-ID and set-group-ID bits of a file will be + * cleared upon successful return from chown() + */ + if ((ip->i_d.di_mode & (S_ISUID|S_ISGID)) && + !capable(CAP_FSETID)) + ip->i_d.di_mode &= ~(S_ISUID|S_ISGID); + + /* + * Change the ownerships and register quota modifications + * in the transaction. + */ + if (iuid != uid) { + if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_UQUOTA_ON(mp)) { + ASSERT(mask & ATTR_UID); + ASSERT(udqp); + olddquot1 = xfs_qm_vop_chown(tp, ip, + &ip->i_udquot, udqp); + } + ip->i_d.di_uid = uid; + inode->i_uid = uid; + } + if (igid != gid) { + if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_GQUOTA_ON(mp)) { + ASSERT(!XFS_IS_PQUOTA_ON(mp)); + ASSERT(mask & ATTR_GID); + ASSERT(gdqp); + olddquot2 = xfs_qm_vop_chown(tp, ip, + &ip->i_gdquot, gdqp); + } + ip->i_d.di_gid = gid; + inode->i_gid = gid; + } + } + + /* + * Change file access modes. + */ + if (mask & ATTR_MODE) { + umode_t mode = iattr->ia_mode; + + if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID)) + mode &= ~S_ISGID; + + ip->i_d.di_mode &= S_IFMT; + ip->i_d.di_mode |= mode & ~S_IFMT; + + inode->i_mode &= S_IFMT; + inode->i_mode |= mode & ~S_IFMT; + } + + /* + * Change file access or modified times. + */ + if (mask & ATTR_ATIME) { + inode->i_atime = iattr->ia_atime; + ip->i_d.di_atime.t_sec = iattr->ia_atime.tv_sec; + ip->i_d.di_atime.t_nsec = iattr->ia_atime.tv_nsec; + ip->i_update_core = 1; + } + if (mask & ATTR_CTIME) { + inode->i_ctime = iattr->ia_ctime; + ip->i_d.di_ctime.t_sec = iattr->ia_ctime.tv_sec; + ip->i_d.di_ctime.t_nsec = iattr->ia_ctime.tv_nsec; + ip->i_update_core = 1; + } + if (mask & ATTR_MTIME) { + inode->i_mtime = iattr->ia_mtime; + ip->i_d.di_mtime.t_sec = iattr->ia_mtime.tv_sec; + ip->i_d.di_mtime.t_nsec = iattr->ia_mtime.tv_nsec; + ip->i_update_core = 1; + } + + xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); + + XFS_STATS_INC(xs_ig_attrchg); + + if (mp->m_flags & XFS_MOUNT_WSYNC) + xfs_trans_set_sync(tp); + error = xfs_trans_commit(tp, 0); + + xfs_iunlock(ip, XFS_ILOCK_EXCL); + + /* + * Release any dquot(s) the inode had kept before chown. + */ + xfs_qm_dqrele(olddquot1); + xfs_qm_dqrele(olddquot2); + xfs_qm_dqrele(udqp); + xfs_qm_dqrele(gdqp); + + if (error) + return XFS_ERROR(error); + + /* + * XXX(hch): Updating the ACL entries is not atomic vs the i_mode + * update. We could avoid this with linked transactions + * and passing down the transaction pointer all the way + * to attr_set. No previous user of the generic + * Posix ACL code seems to care about this issue either. + */ + if ((mask & ATTR_MODE) && !(flags & XFS_ATTR_NOACL)) { + error = -xfs_acl_chmod(inode); + if (error) + return XFS_ERROR(error); + } + + return 0; + +out_trans_cancel: + xfs_trans_cancel(tp, 0); + xfs_iunlock(ip, XFS_ILOCK_EXCL); +out_dqrele: + xfs_qm_dqrele(udqp); + xfs_qm_dqrele(gdqp); + return error; +} + +/* + * Truncate file. Must have write permission and not be a directory. + */ +int +xfs_setattr_size( + struct xfs_inode *ip, + struct iattr *iattr, + int flags) +{ + struct xfs_mount *mp = ip->i_mount; + struct inode *inode = VFS_I(ip); + int mask = iattr->ia_valid; + struct xfs_trans *tp; + int error; + uint lock_flags; + uint commit_flags = 0; + + trace_xfs_setattr(ip); + + if (mp->m_flags & XFS_MOUNT_RDONLY) + return XFS_ERROR(EROFS); + + if (XFS_FORCED_SHUTDOWN(mp)) + return XFS_ERROR(EIO); + + error = -inode_change_ok(inode, iattr); + if (error) + return XFS_ERROR(error); + + ASSERT(S_ISREG(ip->i_d.di_mode)); + ASSERT((mask & (ATTR_MODE|ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_ATIME_SET| + ATTR_MTIME_SET|ATTR_KILL_SUID|ATTR_KILL_SGID| + ATTR_KILL_PRIV|ATTR_TIMES_SET)) == 0); + + lock_flags = XFS_ILOCK_EXCL; + if (!(flags & XFS_ATTR_NOLOCK)) + lock_flags |= XFS_IOLOCK_EXCL; + xfs_ilock(ip, lock_flags); + + /* + * Short circuit the truncate case for zero length files. + */ + if (iattr->ia_size == 0 && + ip->i_size == 0 && ip->i_d.di_nextents == 0) { + xfs_iunlock(ip, XFS_ILOCK_EXCL); + lock_flags &= ~XFS_ILOCK_EXCL; + if (mask & ATTR_CTIME) { + inode->i_mtime = inode->i_ctime = + current_fs_time(inode->i_sb); + xfs_mark_inode_dirty_sync(ip); + } + goto out_unlock; + } + + /* + * Make sure that the dquots are attached to the inode. + */ + error = xfs_qm_dqattach_locked(ip, 0); + if (error) + goto out_unlock; + + /* + * Now we can make the changes. Before we join the inode to the + * transaction, take care of the part of the truncation that must be + * done without the inode lock. This needs to be done before joining + * the inode to the transaction, because the inode cannot be unlocked + * once it is a part of the transaction. + */ + if (iattr->ia_size > ip->i_size) { + /* + * Do the first part of growing a file: zero any data in the + * last block that is beyond the old EOF. We need to do this + * before the inode is joined to the transaction to modify + * i_size. + */ + error = xfs_zero_eof(ip, iattr->ia_size, ip->i_size); + if (error) + goto out_unlock; + } + xfs_iunlock(ip, XFS_ILOCK_EXCL); + lock_flags &= ~XFS_ILOCK_EXCL; + + /* + * We are going to log the inode size change in this transaction so + * any previous writes that are beyond the on disk EOF and the new + * EOF that have not been written out need to be written here. If we + * do not write the data out, we expose ourselves to the null files + * problem. + * + * Only flush from the on disk size to the smaller of the in memory + * file size or the new size as that's the range we really care about + * here and prevents waiting for other data not within the range we + * care about here. + */ + if (ip->i_size != ip->i_d.di_size && iattr->ia_size > ip->i_d.di_size) { + error = xfs_flush_pages(ip, ip->i_d.di_size, iattr->ia_size, + XBF_ASYNC, FI_NONE); + if (error) + goto out_unlock; + } + + /* + * Wait for all I/O to complete. + */ + xfs_ioend_wait(ip); + + error = -block_truncate_page(inode->i_mapping, iattr->ia_size, + xfs_get_blocks); + if (error) + goto out_unlock; + + tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_SIZE); + error = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0, + XFS_TRANS_PERM_LOG_RES, + XFS_ITRUNCATE_LOG_COUNT); + if (error) + goto out_trans_cancel; + + truncate_setsize(inode, iattr->ia_size); + + commit_flags = XFS_TRANS_RELEASE_LOG_RES; + lock_flags |= XFS_ILOCK_EXCL; + + xfs_ilock(ip, XFS_ILOCK_EXCL); + + xfs_trans_ijoin(tp, ip); + + /* + * Only change the c/mtime if we are changing the size or we are + * explicitly asked to change it. This handles the semantic difference + * between truncate() and ftruncate() as implemented in the VFS. + * + * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a + * special case where we need to update the times despite not having + * these flags set. For all other operations the VFS set these flags + * explicitly if it wants a timestamp update. + */ + if (iattr->ia_size != ip->i_size && + (!(mask & (ATTR_CTIME | ATTR_MTIME)))) { + iattr->ia_ctime = iattr->ia_mtime = + current_fs_time(inode->i_sb); + mask |= ATTR_CTIME | ATTR_MTIME; + } + + if (iattr->ia_size > ip->i_size) { + ip->i_d.di_size = iattr->ia_size; + ip->i_size = iattr->ia_size; + } else if (iattr->ia_size <= ip->i_size || + (iattr->ia_size == 0 && ip->i_d.di_nextents)) { + /* + * Signal a sync transaction unless we are truncating an + * already unlinked file on a wsync filesystem. + */ + error = xfs_itruncate_finish(&tp, ip, iattr->ia_size, + XFS_DATA_FORK, + ((ip->i_d.di_nlink != 0 || + !(mp->m_flags & XFS_MOUNT_WSYNC)) + ? 1 : 0)); + if (error) + goto out_trans_abort; + + /* + * Truncated "down", so we're removing references to old data + * here - if we delay flushing for a long time, we expose + * ourselves unduly to the notorious NULL files problem. So, + * we mark this inode and flush it when the file is closed, + * and do not wait the usual (long) time for writeout. + */ + xfs_iflags_set(ip, XFS_ITRUNCATED); + } + + if (mask & ATTR_CTIME) { + inode->i_ctime = iattr->ia_ctime; + ip->i_d.di_ctime.t_sec = iattr->ia_ctime.tv_sec; + ip->i_d.di_ctime.t_nsec = iattr->ia_ctime.tv_nsec; + ip->i_update_core = 1; + } + if (mask & ATTR_MTIME) { + inode->i_mtime = iattr->ia_mtime; + ip->i_d.di_mtime.t_sec = iattr->ia_mtime.tv_sec; + ip->i_d.di_mtime.t_nsec = iattr->ia_mtime.tv_nsec; + ip->i_update_core = 1; + } + + xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); + + XFS_STATS_INC(xs_ig_attrchg); + + if (mp->m_flags & XFS_MOUNT_WSYNC) + xfs_trans_set_sync(tp); + + error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); +out_unlock: + if (lock_flags) + xfs_iunlock(ip, lock_flags); + return error; + +out_trans_abort: + commit_flags |= XFS_TRANS_ABORT; +out_trans_cancel: + xfs_trans_cancel(tp, commit_flags); + goto out_unlock; +} + STATIC int xfs_vn_setattr( struct dentry *dentry, struct iattr *iattr) { - return -xfs_setattr(XFS_I(dentry->d_inode), iattr, 0); + if (iattr->ia_valid & ATTR_SIZE) + return -xfs_setattr_size(XFS_I(dentry->d_inode), iattr, 0); + return -xfs_setattr_nonsize(XFS_I(dentry->d_inode), iattr, 0); } #define XFS_FIEMAP_FLAGS (FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR) diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c index 619720705bc6..a4f56a42ef90 100644 --- a/fs/xfs/xfs_vnodeops.c +++ b/fs/xfs/xfs_vnodeops.c @@ -50,430 +50,6 @@ #include "xfs_vnodeops.h" #include "xfs_trace.h" -int -xfs_setattr( - struct xfs_inode *ip, - struct iattr *iattr, - int flags) -{ - xfs_mount_t *mp = ip->i_mount; - struct inode *inode = VFS_I(ip); - int mask = iattr->ia_valid; - xfs_trans_t *tp; - int code; - uint lock_flags; - uint commit_flags=0; - uid_t uid=0, iuid=0; - gid_t gid=0, igid=0; - struct xfs_dquot *udqp, *gdqp, *olddquot1, *olddquot2; - int need_iolock = 1; - - trace_xfs_setattr(ip); - - if (mp->m_flags & XFS_MOUNT_RDONLY) - return XFS_ERROR(EROFS); - - if (XFS_FORCED_SHUTDOWN(mp)) - return XFS_ERROR(EIO); - - code = -inode_change_ok(inode, iattr); - if (code) - return code; - - olddquot1 = olddquot2 = NULL; - udqp = gdqp = NULL; - - /* - * If disk quotas is on, we make sure that the dquots do exist on disk, - * before we start any other transactions. Trying to do this later - * is messy. We don't care to take a readlock to look at the ids - * in inode here, because we can't hold it across the trans_reserve. - * If the IDs do change before we take the ilock, we're covered - * because the i_*dquot fields will get updated anyway. - */ - if (XFS_IS_QUOTA_ON(mp) && (mask & (ATTR_UID|ATTR_GID))) { - uint qflags = 0; - - if ((mask & ATTR_UID) && XFS_IS_UQUOTA_ON(mp)) { - uid = iattr->ia_uid; - qflags |= XFS_QMOPT_UQUOTA; - } else { - uid = ip->i_d.di_uid; - } - if ((mask & ATTR_GID) && XFS_IS_GQUOTA_ON(mp)) { - gid = iattr->ia_gid; - qflags |= XFS_QMOPT_GQUOTA; - } else { - gid = ip->i_d.di_gid; - } - - /* - * We take a reference when we initialize udqp and gdqp, - * so it is important that we never blindly double trip on - * the same variable. See xfs_create() for an example. - */ - ASSERT(udqp == NULL); - ASSERT(gdqp == NULL); - code = xfs_qm_vop_dqalloc(ip, uid, gid, xfs_get_projid(ip), - qflags, &udqp, &gdqp); - if (code) - return code; - } - - /* - * For the other attributes, we acquire the inode lock and - * first do an error checking pass. - */ - tp = NULL; - lock_flags = XFS_ILOCK_EXCL; - if (flags & XFS_ATTR_NOLOCK) - need_iolock = 0; - if (!(mask & ATTR_SIZE)) { - tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_NOT_SIZE); - commit_flags = 0; - code = xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES(mp), - 0, 0, 0); - if (code) { - lock_flags = 0; - goto error_return; - } - } else { - if (need_iolock) - lock_flags |= XFS_IOLOCK_EXCL; - } - - xfs_ilock(ip, lock_flags); - - /* - * Change file ownership. Must be the owner or privileged. - */ - if (mask & (ATTR_UID|ATTR_GID)) { - /* - * These IDs could have changed since we last looked at them. - * But, we're assured that if the ownership did change - * while we didn't have the inode locked, inode's dquot(s) - * would have changed also. - */ - iuid = ip->i_d.di_uid; - igid = ip->i_d.di_gid; - gid = (mask & ATTR_GID) ? iattr->ia_gid : igid; - uid = (mask & ATTR_UID) ? iattr->ia_uid : iuid; - - /* - * Do a quota reservation only if uid/gid is actually - * going to change. - */ - if (XFS_IS_QUOTA_RUNNING(mp) && - ((XFS_IS_UQUOTA_ON(mp) && iuid != uid) || - (XFS_IS_GQUOTA_ON(mp) && igid != gid))) { - ASSERT(tp); - code = xfs_qm_vop_chown_reserve(tp, ip, udqp, gdqp, - capable(CAP_FOWNER) ? - XFS_QMOPT_FORCE_RES : 0); - if (code) /* out of quota */ - goto error_return; - } - } - - /* - * Truncate file. Must have write permission and not be a directory. - */ - if (mask & ATTR_SIZE) { - /* Short circuit the truncate case for zero length files */ - if (iattr->ia_size == 0 && - ip->i_size == 0 && ip->i_d.di_nextents == 0) { - xfs_iunlock(ip, XFS_ILOCK_EXCL); - lock_flags &= ~XFS_ILOCK_EXCL; - if (mask & ATTR_CTIME) { - inode->i_mtime = inode->i_ctime = - current_fs_time(inode->i_sb); - xfs_mark_inode_dirty_sync(ip); - } - code = 0; - goto error_return; - } - - if (S_ISDIR(ip->i_d.di_mode)) { - code = XFS_ERROR(EISDIR); - goto error_return; - } else if (!S_ISREG(ip->i_d.di_mode)) { - code = XFS_ERROR(EINVAL); - goto error_return; - } - - /* - * Make sure that the dquots are attached to the inode. - */ - code = xfs_qm_dqattach_locked(ip, 0); - if (code) - goto error_return; - - /* - * Now we can make the changes. Before we join the inode - * to the transaction, if ATTR_SIZE is set then take care of - * the part of the truncation that must be done without the - * inode lock. This needs to be done before joining the inode - * to the transaction, because the inode cannot be unlocked - * once it is a part of the transaction. - */ - if (iattr->ia_size > ip->i_size) { - /* - * Do the first part of growing a file: zero any data - * in the last block that is beyond the old EOF. We - * need to do this before the inode is joined to the - * transaction to modify the i_size. - */ - code = xfs_zero_eof(ip, iattr->ia_size, ip->i_size); - if (code) - goto error_return; - } - xfs_iunlock(ip, XFS_ILOCK_EXCL); - lock_flags &= ~XFS_ILOCK_EXCL; - - /* - * We are going to log the inode size change in this - * transaction so any previous writes that are beyond the on - * disk EOF and the new EOF that have not been written out need - * to be written here. If we do not write the data out, we - * expose ourselves to the null files problem. - * - * Only flush from the on disk size to the smaller of the in - * memory file size or the new size as that's the range we - * really care about here and prevents waiting for other data - * not within the range we care about here. - */ - if (ip->i_size != ip->i_d.di_size && - iattr->ia_size > ip->i_d.di_size) { - code = xfs_flush_pages(ip, - ip->i_d.di_size, iattr->ia_size, - XBF_ASYNC, FI_NONE); - if (code) - goto error_return; - } - - /* wait for all I/O to complete */ - xfs_ioend_wait(ip); - - code = -block_truncate_page(inode->i_mapping, iattr->ia_size, - xfs_get_blocks); - if (code) - goto error_return; - - tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_SIZE); - code = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0, - XFS_TRANS_PERM_LOG_RES, - XFS_ITRUNCATE_LOG_COUNT); - if (code) - goto error_return; - - truncate_setsize(inode, iattr->ia_size); - - commit_flags = XFS_TRANS_RELEASE_LOG_RES; - lock_flags |= XFS_ILOCK_EXCL; - - xfs_ilock(ip, XFS_ILOCK_EXCL); - - xfs_trans_ijoin(tp, ip); - - /* - * Only change the c/mtime if we are changing the size - * or we are explicitly asked to change it. This handles - * the semantic difference between truncate() and ftruncate() - * as implemented in the VFS. - * - * The regular truncate() case without ATTR_CTIME and ATTR_MTIME - * is a special case where we need to update the times despite - * not having these flags set. For all other operations the - * VFS set these flags explicitly if it wants a timestamp - * update. - */ - if (iattr->ia_size != ip->i_size && - (!(mask & (ATTR_CTIME | ATTR_MTIME)))) { - iattr->ia_ctime = iattr->ia_mtime = - current_fs_time(inode->i_sb); - mask |= ATTR_CTIME | ATTR_MTIME; - } - - if (iattr->ia_size > ip->i_size) { - ip->i_d.di_size = iattr->ia_size; - ip->i_size = iattr->ia_size; - xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); - } else if (iattr->ia_size <= ip->i_size || - (iattr->ia_size == 0 && ip->i_d.di_nextents)) { - /* - * signal a sync transaction unless - * we're truncating an already unlinked - * file on a wsync filesystem - */ - code = xfs_itruncate_finish(&tp, ip, iattr->ia_size, - XFS_DATA_FORK, - ((ip->i_d.di_nlink != 0 || - !(mp->m_flags & XFS_MOUNT_WSYNC)) - ? 1 : 0)); - if (code) - goto abort_return; - /* - * Truncated "down", so we're removing references - * to old data here - if we now delay flushing for - * a long time, we expose ourselves unduly to the - * notorious NULL files problem. So, we mark this - * vnode and flush it when the file is closed, and - * do not wait the usual (long) time for writeout. - */ - xfs_iflags_set(ip, XFS_ITRUNCATED); - } - } else if (tp) { - xfs_trans_ijoin(tp, ip); - } - - /* - * Change file ownership. Must be the owner or privileged. - */ - if (mask & (ATTR_UID|ATTR_GID)) { - /* - * CAP_FSETID overrides the following restrictions: - * - * The set-user-ID and set-group-ID bits of a file will be - * cleared upon successful return from chown() - */ - if ((ip->i_d.di_mode & (S_ISUID|S_ISGID)) && - !capable(CAP_FSETID)) { - ip->i_d.di_mode &= ~(S_ISUID|S_ISGID); - } - - /* - * Change the ownerships and register quota modifications - * in the transaction. - */ - if (iuid != uid) { - if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_UQUOTA_ON(mp)) { - ASSERT(mask & ATTR_UID); - ASSERT(udqp); - olddquot1 = xfs_qm_vop_chown(tp, ip, - &ip->i_udquot, udqp); - } - ip->i_d.di_uid = uid; - inode->i_uid = uid; - } - if (igid != gid) { - if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_GQUOTA_ON(mp)) { - ASSERT(!XFS_IS_PQUOTA_ON(mp)); - ASSERT(mask & ATTR_GID); - ASSERT(gdqp); - olddquot2 = xfs_qm_vop_chown(tp, ip, - &ip->i_gdquot, gdqp); - } - ip->i_d.di_gid = gid; - inode->i_gid = gid; - } - } - - /* - * Change file access modes. - */ - if (mask & ATTR_MODE) { - umode_t mode = iattr->ia_mode; - - if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID)) - mode &= ~S_ISGID; - - ip->i_d.di_mode &= S_IFMT; - ip->i_d.di_mode |= mode & ~S_IFMT; - - inode->i_mode &= S_IFMT; - inode->i_mode |= mode & ~S_IFMT; - } - - /* - * Change file access or modified times. - */ - if (mask & ATTR_ATIME) { - inode->i_atime = iattr->ia_atime; - ip->i_d.di_atime.t_sec = iattr->ia_atime.tv_sec; - ip->i_d.di_atime.t_nsec = iattr->ia_atime.tv_nsec; - ip->i_update_core = 1; - } - if (mask & ATTR_CTIME) { - inode->i_ctime = iattr->ia_ctime; - ip->i_d.di_ctime.t_sec = iattr->ia_ctime.tv_sec; - ip->i_d.di_ctime.t_nsec = iattr->ia_ctime.tv_nsec; - ip->i_update_core = 1; - } - if (mask & ATTR_MTIME) { - inode->i_mtime = iattr->ia_mtime; - ip->i_d.di_mtime.t_sec = iattr->ia_mtime.tv_sec; - ip->i_d.di_mtime.t_nsec = iattr->ia_mtime.tv_nsec; - ip->i_update_core = 1; - } - - /* - * And finally, log the inode core if any attribute in it - * has been changed. - */ - if (mask & (ATTR_UID|ATTR_GID|ATTR_MODE| - ATTR_ATIME|ATTR_CTIME|ATTR_MTIME)) - xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); - - XFS_STATS_INC(xs_ig_attrchg); - - /* - * If this is a synchronous mount, make sure that the - * transaction goes to disk before returning to the user. - * This is slightly sub-optimal in that truncates require - * two sync transactions instead of one for wsync filesystems. - * One for the truncate and one for the timestamps since we - * don't want to change the timestamps unless we're sure the - * truncate worked. Truncates are less than 1% of the laddis - * mix so this probably isn't worth the trouble to optimize. - */ - code = 0; - if (mp->m_flags & XFS_MOUNT_WSYNC) - xfs_trans_set_sync(tp); - - code = xfs_trans_commit(tp, commit_flags); - - xfs_iunlock(ip, lock_flags); - - /* - * Release any dquot(s) the inode had kept before chown. - */ - xfs_qm_dqrele(olddquot1); - xfs_qm_dqrele(olddquot2); - xfs_qm_dqrele(udqp); - xfs_qm_dqrele(gdqp); - - if (code) - return code; - - /* - * XXX(hch): Updating the ACL entries is not atomic vs the i_mode - * update. We could avoid this with linked transactions - * and passing down the transaction pointer all the way - * to attr_set. No previous user of the generic - * Posix ACL code seems to care about this issue either. - */ - if ((mask & ATTR_MODE) && !(flags & XFS_ATTR_NOACL)) { - code = -xfs_acl_chmod(inode); - if (code) - return XFS_ERROR(code); - } - - return 0; - - abort_return: - commit_flags |= XFS_TRANS_ABORT; - error_return: - xfs_qm_dqrele(udqp); - xfs_qm_dqrele(gdqp); - if (tp) { - xfs_trans_cancel(tp, commit_flags); - } - if (lock_flags != 0) { - xfs_iunlock(ip, lock_flags); - } - return code; -} - /* * The maximum pathlen is 1024 bytes. Since the minimum file system * blocksize is 512 bytes, we can get a max of 2 extents back from @@ -2784,7 +2360,7 @@ xfs_change_file_space( iattr.ia_valid = ATTR_SIZE; iattr.ia_size = startoffset; - error = xfs_setattr(ip, &iattr, attr_flags); + error = xfs_setattr_size(ip, &iattr, attr_flags); if (error) return error; diff --git a/fs/xfs/xfs_vnodeops.h b/fs/xfs/xfs_vnodeops.h index 3bcd23353d6c..35d3d513e1e9 100644 --- a/fs/xfs/xfs_vnodeops.h +++ b/fs/xfs/xfs_vnodeops.h @@ -13,7 +13,8 @@ struct xfs_inode; struct xfs_iomap; -int xfs_setattr(struct xfs_inode *ip, struct iattr *vap, int flags); +int xfs_setattr_nonsize(struct xfs_inode *ip, struct iattr *vap, int flags); +int xfs_setattr_size(struct xfs_inode *ip, struct iattr *vap, int flags); #define XFS_ATTR_DMI 0x01 /* invocation from a DMI function */ #define XFS_ATTR_NONBLOCK 0x02 /* return EAGAIN if operation would block */ #define XFS_ATTR_NOLOCK 0x04 /* Don't grab any conflicting locks */ -- cgit v1.2.3 From 681b120018e3c7e2680c93e8188c5ee34215df2f Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 8 Jul 2011 14:34:26 +0200 Subject: xfs: always log timestamp updates in xfs_setattr_size Get rid of the special case where we use unlogged timestamp updates for a truncate to the current inode size, and just call xfs_setattr_nonsize for it to treat it like a utimes calls. Signed-off-by: Christoph Hellwig Reviewed-by: Alex Elder Reviewed-by: Dave Chinner --- fs/xfs/linux-2.6/xfs_iops.c | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c index 65d3bf8266e7..5a0fcb09fc7e 100644 --- a/fs/xfs/linux-2.6/xfs_iops.c +++ b/fs/xfs/linux-2.6/xfs_iops.c @@ -773,14 +773,15 @@ xfs_setattr_size( */ if (iattr->ia_size == 0 && ip->i_size == 0 && ip->i_d.di_nextents == 0) { - xfs_iunlock(ip, XFS_ILOCK_EXCL); - lock_flags &= ~XFS_ILOCK_EXCL; - if (mask & ATTR_CTIME) { - inode->i_mtime = inode->i_ctime = - current_fs_time(inode->i_sb); - xfs_mark_inode_dirty_sync(ip); - } - goto out_unlock; + if (!(mask & (ATTR_CTIME|ATTR_MTIME))) + goto out_unlock; + + /* + * Use the regular setattr path to update the timestamps. + */ + xfs_iunlock(ip, lock_flags); + iattr->ia_valid &= ~ATTR_SIZE; + return xfs_setattr_nonsize(ip, iattr, 0); } /* -- cgit v1.2.3 From 857b9778d86ccba7d7b42c9d8aeecde794ec8a6b Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 8 Jul 2011 14:34:30 +0200 Subject: xfs: kill xfs_itruncate_start xfs_itruncate_start is a rather length wrapper that evaluates to a call to xfs_ioend_wait and xfs_tosspages, and only has two callers. Instead of using the complicated checks left over from IRIX where we can to truncate the pagecache just call xfs_tosspages (aka truncate_inode_pages) directly as we want to get rid of all data after i_size, and truncate_inode_pages handles incorrect alignments and too large offsets just fine. Signed-off-by: Christoph Hellwig Reviewed-by: Alex Elder Reviewed-by: Dave Chinner --- fs/xfs/linux-2.6/xfs_trace.h | 34 --------- fs/xfs/xfs_inode.c | 163 +------------------------------------------ fs/xfs/xfs_inode.h | 11 --- fs/xfs/xfs_vnodeops.c | 27 +------ 4 files changed, 4 insertions(+), 231 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_trace.h b/fs/xfs/linux-2.6/xfs_trace.h index d48b7a579ae1..cac41e423451 100644 --- a/fs/xfs/linux-2.6/xfs_trace.h +++ b/fs/xfs/linux-2.6/xfs_trace.h @@ -1029,40 +1029,6 @@ DEFINE_SIMPLE_IO_EVENT(xfs_delalloc_enospc); DEFINE_SIMPLE_IO_EVENT(xfs_unwritten_convert); DEFINE_SIMPLE_IO_EVENT(xfs_get_blocks_notfound); - -TRACE_EVENT(xfs_itruncate_start, - TP_PROTO(struct xfs_inode *ip, xfs_fsize_t new_size, int flag, - xfs_off_t toss_start, xfs_off_t toss_finish), - TP_ARGS(ip, new_size, flag, toss_start, toss_finish), - TP_STRUCT__entry( - __field(dev_t, dev) - __field(xfs_ino_t, ino) - __field(xfs_fsize_t, size) - __field(xfs_fsize_t, new_size) - __field(xfs_off_t, toss_start) - __field(xfs_off_t, toss_finish) - __field(int, flag) - ), - TP_fast_assign( - __entry->dev = VFS_I(ip)->i_sb->s_dev; - __entry->ino = ip->i_ino; - __entry->size = ip->i_d.di_size; - __entry->new_size = new_size; - __entry->toss_start = toss_start; - __entry->toss_finish = toss_finish; - __entry->flag = flag; - ), - TP_printk("dev %d:%d ino 0x%llx %s size 0x%llx new_size 0x%llx " - "toss start 0x%llx toss finish 0x%llx", - MAJOR(__entry->dev), MINOR(__entry->dev), - __entry->ino, - __print_flags(__entry->flag, "|", XFS_ITRUNC_FLAGS), - __entry->size, - __entry->new_size, - __entry->toss_start, - __entry->toss_finish) -); - DECLARE_EVENT_CLASS(xfs_itrunc_class, TP_PROTO(struct xfs_inode *ip, xfs_fsize_t new_size), TP_ARGS(ip, new_size), diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index a098a20ca63e..82a282ab63dc 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -1217,165 +1217,8 @@ xfs_isize_check( #endif /* DEBUG */ /* - * Calculate the last possible buffered byte in a file. This must - * include data that was buffered beyond the EOF by the write code. - * This also needs to deal with overflowing the xfs_fsize_t type - * which can happen for sizes near the limit. - * - * We also need to take into account any blocks beyond the EOF. It - * may be the case that they were buffered by a write which failed. - * In that case the pages will still be in memory, but the inode size - * will never have been updated. - */ -STATIC xfs_fsize_t -xfs_file_last_byte( - xfs_inode_t *ip) -{ - xfs_mount_t *mp; - xfs_fsize_t last_byte; - xfs_fileoff_t last_block; - xfs_fileoff_t size_last_block; - int error; - - ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED)); - - mp = ip->i_mount; - /* - * Only check for blocks beyond the EOF if the extents have - * been read in. This eliminates the need for the inode lock, - * and it also saves us from looking when it really isn't - * necessary. - */ - if (ip->i_df.if_flags & XFS_IFEXTENTS) { - xfs_ilock(ip, XFS_ILOCK_SHARED); - error = xfs_bmap_last_offset(NULL, ip, &last_block, - XFS_DATA_FORK); - xfs_iunlock(ip, XFS_ILOCK_SHARED); - if (error) { - last_block = 0; - } - } else { - last_block = 0; - } - size_last_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)ip->i_size); - last_block = XFS_FILEOFF_MAX(last_block, size_last_block); - - last_byte = XFS_FSB_TO_B(mp, last_block); - if (last_byte < 0) { - return XFS_MAXIOFFSET(mp); - } - last_byte += (1 << mp->m_writeio_log); - if (last_byte < 0) { - return XFS_MAXIOFFSET(mp); - } - return last_byte; -} - -/* - * Start the truncation of the file to new_size. The new size - * must be smaller than the current size. This routine will - * clear the buffer and page caches of file data in the removed - * range, and xfs_itruncate_finish() will remove the underlying - * disk blocks. - * - * The inode must have its I/O lock locked EXCLUSIVELY, and it - * must NOT have the inode lock held at all. This is because we're - * calling into the buffer/page cache code and we can't hold the - * inode lock when we do so. - * - * We need to wait for any direct I/Os in flight to complete before we - * proceed with the truncate. This is needed to prevent the extents - * being read or written by the direct I/Os from being removed while the - * I/O is in flight as there is no other method of synchronising - * direct I/O with the truncate operation. Also, because we hold - * the IOLOCK in exclusive mode, we prevent new direct I/Os from being - * started until the truncate completes and drops the lock. Essentially, - * the xfs_ioend_wait() call forms an I/O barrier that provides strict - * ordering between direct I/Os and the truncate operation. - * - * The flags parameter can have either the value XFS_ITRUNC_DEFINITE - * or XFS_ITRUNC_MAYBE. The XFS_ITRUNC_MAYBE value should be used - * in the case that the caller is locking things out of order and - * may not be able to call xfs_itruncate_finish() with the inode lock - * held without dropping the I/O lock. If the caller must drop the - * I/O lock before calling xfs_itruncate_finish(), then xfs_itruncate_start() - * must be called again with all the same restrictions as the initial - * call. - */ -int -xfs_itruncate_start( - xfs_inode_t *ip, - uint flags, - xfs_fsize_t new_size) -{ - xfs_fsize_t last_byte; - xfs_off_t toss_start; - xfs_mount_t *mp; - int error = 0; - - ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL)); - ASSERT((new_size == 0) || (new_size <= ip->i_size)); - ASSERT((flags == XFS_ITRUNC_DEFINITE) || - (flags == XFS_ITRUNC_MAYBE)); - - mp = ip->i_mount; - - /* wait for the completion of any pending DIOs */ - if (new_size == 0 || new_size < ip->i_size) - xfs_ioend_wait(ip); - - /* - * Call toss_pages or flushinval_pages to get rid of pages - * overlapping the region being removed. We have to use - * the less efficient flushinval_pages in the case that the - * caller may not be able to finish the truncate without - * dropping the inode's I/O lock. Make sure - * to catch any pages brought in by buffers overlapping - * the EOF by searching out beyond the isize by our - * block size. We round new_size up to a block boundary - * so that we don't toss things on the same block as - * new_size but before it. - * - * Before calling toss_page or flushinval_pages, make sure to - * call remapf() over the same region if the file is mapped. - * This frees up mapped file references to the pages in the - * given range and for the flushinval_pages case it ensures - * that we get the latest mapped changes flushed out. - */ - toss_start = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size); - toss_start = XFS_FSB_TO_B(mp, toss_start); - if (toss_start < 0) { - /* - * The place to start tossing is beyond our maximum - * file size, so there is no way that the data extended - * out there. - */ - return 0; - } - last_byte = xfs_file_last_byte(ip); - trace_xfs_itruncate_start(ip, new_size, flags, toss_start, last_byte); - if (last_byte > toss_start) { - if (flags & XFS_ITRUNC_DEFINITE) { - xfs_tosspages(ip, toss_start, - -1, FI_REMAPF_LOCKED); - } else { - error = xfs_flushinval_pages(ip, toss_start, - -1, FI_REMAPF_LOCKED); - } - } - -#ifdef DEBUG - if (new_size == 0) { - ASSERT(VN_CACHED(VFS_I(ip)) == 0); - } -#endif - return error; -} - -/* - * Shrink the file to the given new_size. The new size must be smaller than - * the current size. This will free up the underlying blocks in the removed - * range after a call to xfs_itruncate_start() or xfs_atruncate_start(). + * Free up the underlying blocks past new_size. The new size must be + * smaller than the current size. * * The transaction passed to this routine must have made a permanent log * reservation of at least XFS_ITRUNCATE_LOG_RES. This routine may commit the @@ -1387,7 +1230,7 @@ xfs_itruncate_start( * will be "held" within the returned transaction. This routine does NOT * require any disk space to be reserved for it within the transaction. * - * The fork parameter must be either xfs_attr_fork or xfs_data_fork, and it + * The fork parameter must be either XFS_ATTR_FORK or XFS_DATA_FORK, and it * indicates the fork which is to be truncated. For the attribute fork we only * support truncation to size 0. * diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h index 964cfea77686..6efd471c8724 100644 --- a/fs/xfs/xfs_inode.h +++ b/fs/xfs/xfs_inode.h @@ -457,16 +457,6 @@ static inline void xfs_ifunlock(xfs_inode_t *ip) extern struct lock_class_key xfs_iolock_reclaimable; -/* - * Flags for xfs_itruncate_start(). - */ -#define XFS_ITRUNC_DEFINITE 0x1 -#define XFS_ITRUNC_MAYBE 0x2 - -#define XFS_ITRUNC_FLAGS \ - { XFS_ITRUNC_DEFINITE, "DEFINITE" }, \ - { XFS_ITRUNC_MAYBE, "MAYBE" } - /* * For multiple groups support: if S_ISGID bit is set in the parent * directory, group of new file is set to that of the parent, and @@ -501,7 +491,6 @@ uint xfs_ip2xflags(struct xfs_inode *); uint xfs_dic2xflags(struct xfs_dinode *); int xfs_ifree(struct xfs_trans *, xfs_inode_t *, struct xfs_bmap_free *); -int xfs_itruncate_start(xfs_inode_t *, uint, xfs_fsize_t); int xfs_itruncate_finish(struct xfs_trans **, xfs_inode_t *, xfs_fsize_t, int, int); int xfs_iunlink(struct xfs_trans *, xfs_inode_t *); diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c index a4f56a42ef90..45b8ac662aee 100644 --- a/fs/xfs/xfs_vnodeops.c +++ b/fs/xfs/xfs_vnodeops.c @@ -197,13 +197,6 @@ xfs_free_eofblocks( */ tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE); - /* - * Do the xfs_itruncate_start() call before - * reserving any log space because - * itruncate_start will call into the buffer - * cache and we can't - * do that within a transaction. - */ if (flags & XFS_FREE_EOF_TRYLOCK) { if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) { xfs_trans_cancel(tp, 0); @@ -212,13 +205,6 @@ xfs_free_eofblocks( } else { xfs_ilock(ip, XFS_IOLOCK_EXCL); } - error = xfs_itruncate_start(ip, XFS_ITRUNC_DEFINITE, - ip->i_size); - if (error) { - xfs_trans_cancel(tp, 0); - xfs_iunlock(ip, XFS_IOLOCK_EXCL); - return error; - } error = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), @@ -660,20 +646,9 @@ xfs_inactive( tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE); if (truncate) { - /* - * Do the xfs_itruncate_start() call before - * reserving any log space because itruncate_start - * will call into the buffer cache and we can't - * do that within a transaction. - */ xfs_ilock(ip, XFS_IOLOCK_EXCL); - error = xfs_itruncate_start(ip, XFS_ITRUNC_DEFINITE, 0); - if (error) { - xfs_trans_cancel(tp, 0); - xfs_iunlock(ip, XFS_IOLOCK_EXCL); - return VN_INACTIVE_CACHE; - } + xfs_ioend_wait(ip); error = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), -- cgit v1.2.3 From 8f04c47aa9712874af2c8816c2ca2a332cba80e4 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 8 Jul 2011 14:34:34 +0200 Subject: xfs: split xfs_itruncate_finish Split the guts of xfs_itruncate_finish that loop over the existing extents and calls xfs_bunmapi on them into a new helper, xfs_itruncate_externs. Make xfs_attr_inactive call it directly instead of xfs_itruncate_finish, which allows to simplify the latter a lot, by only letting it deal with the data fork. As a result xfs_itruncate_finish is renamed to xfs_itruncate_data to make its use case more obvious. Also remove the sync parameter from xfs_itruncate_data, which has been unessecary since the introduction of the busy extent list in 2002, and completely dead code since 2003 when the XFS_BMAPI_ASYNC parameter was made a no-op. I can't actually see why the xfs_attr_inactive needs to set the transaction sync, but let's keep this patch simple and without changes in behaviour. Also avoid passing a useless argument to xfs_isize_check, and make it private to xfs_inode.c. Signed-off-by: Christoph Hellwig Reviewed-by: Alex Elder Reviewed-by: Dave Chinner --- fs/xfs/linux-2.6/xfs_iops.c | 10 +- fs/xfs/linux-2.6/xfs_trace.h | 4 +- fs/xfs/quota/xfs_qm_syscalls.c | 2 +- fs/xfs/xfs_attr.c | 22 +-- fs/xfs/xfs_inode.c | 357 +++++++++++++++-------------------------- fs/xfs/xfs_inode.h | 13 +- fs/xfs/xfs_vnodeops.c | 24 +-- 7 files changed, 155 insertions(+), 277 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c index 5a0fcb09fc7e..501e4f630548 100644 --- a/fs/xfs/linux-2.6/xfs_iops.c +++ b/fs/xfs/linux-2.6/xfs_iops.c @@ -879,15 +879,7 @@ xfs_setattr_size( ip->i_size = iattr->ia_size; } else if (iattr->ia_size <= ip->i_size || (iattr->ia_size == 0 && ip->i_d.di_nextents)) { - /* - * Signal a sync transaction unless we are truncating an - * already unlinked file on a wsync filesystem. - */ - error = xfs_itruncate_finish(&tp, ip, iattr->ia_size, - XFS_DATA_FORK, - ((ip->i_d.di_nlink != 0 || - !(mp->m_flags & XFS_MOUNT_WSYNC)) - ? 1 : 0)); + error = xfs_itruncate_data(&tp, ip, iattr->ia_size); if (error) goto out_trans_abort; diff --git a/fs/xfs/linux-2.6/xfs_trace.h b/fs/xfs/linux-2.6/xfs_trace.h index cac41e423451..4fe53f9f0477 100644 --- a/fs/xfs/linux-2.6/xfs_trace.h +++ b/fs/xfs/linux-2.6/xfs_trace.h @@ -1055,8 +1055,8 @@ DECLARE_EVENT_CLASS(xfs_itrunc_class, DEFINE_EVENT(xfs_itrunc_class, name, \ TP_PROTO(struct xfs_inode *ip, xfs_fsize_t new_size), \ TP_ARGS(ip, new_size)) -DEFINE_ITRUNC_EVENT(xfs_itruncate_finish_start); -DEFINE_ITRUNC_EVENT(xfs_itruncate_finish_end); +DEFINE_ITRUNC_EVENT(xfs_itruncate_data_start); +DEFINE_ITRUNC_EVENT(xfs_itruncate_data_end); TRACE_EVENT(xfs_pagecache_inval, TP_PROTO(struct xfs_inode *ip, xfs_off_t start, xfs_off_t finish), diff --git a/fs/xfs/quota/xfs_qm_syscalls.c b/fs/xfs/quota/xfs_qm_syscalls.c index 2dadb15d5ca9..f2dfc74ccf34 100644 --- a/fs/xfs/quota/xfs_qm_syscalls.c +++ b/fs/xfs/quota/xfs_qm_syscalls.c @@ -263,7 +263,7 @@ xfs_qm_scall_trunc_qfile( xfs_ilock(ip, XFS_ILOCK_EXCL); xfs_trans_ijoin(tp, ip); - error = xfs_itruncate_finish(&tp, ip, 0, XFS_DATA_FORK, 1); + error = xfs_itruncate_data(&tp, ip, 0); if (error) { xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT); diff --git a/fs/xfs/xfs_attr.c b/fs/xfs/xfs_attr.c index 01d2072fb6d4..795d5aac7042 100644 --- a/fs/xfs/xfs_attr.c +++ b/fs/xfs/xfs_attr.c @@ -822,17 +822,21 @@ xfs_attr_inactive(xfs_inode_t *dp) error = xfs_attr_root_inactive(&trans, dp); if (error) goto out; + /* - * signal synchronous inactive transactions unless this - * is a synchronous mount filesystem in which case we - * know that we're here because we've been called out of - * xfs_inactive which means that the last reference is gone - * and the unlink transaction has already hit the disk so - * async inactive transactions are safe. + * Signal synchronous inactive transactions unless this is a + * synchronous mount filesystem in which case we know that we're here + * because we've been called out of xfs_inactive which means that the + * last reference is gone and the unlink transaction has already hit + * the disk so async inactive transactions are safe. */ - if ((error = xfs_itruncate_finish(&trans, dp, 0LL, XFS_ATTR_FORK, - (!(mp->m_flags & XFS_MOUNT_WSYNC) - ? 1 : 0)))) + if (!(mp->m_flags & XFS_MOUNT_WSYNC)) { + if (dp->i_d.di_anextents > 0) + xfs_trans_set_sync(trans); + } + + error = xfs_itruncate_extents(&trans, dp, XFS_ATTR_FORK, 0); + if (error) goto out; /* diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index 82a282ab63dc..aa143b870afb 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -52,7 +52,7 @@ kmem_zone_t *xfs_ifork_zone; kmem_zone_t *xfs_inode_zone; /* - * Used in xfs_itruncate(). This is the maximum number of extents + * Used in xfs_itruncate_extents(). This is the maximum number of extents * freed from a file in a single transaction. */ #define XFS_ITRUNC_MAX_EXTENTS 2 @@ -1179,15 +1179,15 @@ xfs_ialloc( * at least do it for regular files. */ #ifdef DEBUG -void +STATIC void xfs_isize_check( - xfs_mount_t *mp, - xfs_inode_t *ip, - xfs_fsize_t isize) + struct xfs_inode *ip, + xfs_fsize_t isize) { - xfs_fileoff_t map_first; - int nimaps; - xfs_bmbt_irec_t imaps[2]; + struct xfs_mount *mp = ip->i_mount; + xfs_fileoff_t map_first; + int nimaps; + xfs_bmbt_irec_t imaps[2]; if ((ip->i_d.di_mode & S_IFMT) != S_IFREG) return; @@ -1214,11 +1214,14 @@ xfs_isize_check( ASSERT(nimaps == 1); ASSERT(imaps[0].br_startblock == HOLESTARTBLOCK); } +#else /* DEBUG */ +#define xfs_isize_check(ip, isize) #endif /* DEBUG */ /* - * Free up the underlying blocks past new_size. The new size must be - * smaller than the current size. + * Free up the underlying blocks past new_size. The new size must be smaller + * than the current size. This routine can be used both for the attribute and + * data fork, and does not modify the inode size, which is left to the caller. * * The transaction passed to this routine must have made a permanent log * reservation of at least XFS_ITRUNCATE_LOG_RES. This routine may commit the @@ -1230,31 +1233,6 @@ xfs_isize_check( * will be "held" within the returned transaction. This routine does NOT * require any disk space to be reserved for it within the transaction. * - * The fork parameter must be either XFS_ATTR_FORK or XFS_DATA_FORK, and it - * indicates the fork which is to be truncated. For the attribute fork we only - * support truncation to size 0. - * - * We use the sync parameter to indicate whether or not the first transaction - * we perform might have to be synchronous. For the attr fork, it needs to be - * so if the unlink of the inode is not yet known to be permanent in the log. - * This keeps us from freeing and reusing the blocks of the attribute fork - * before the unlink of the inode becomes permanent. - * - * For the data fork, we normally have to run synchronously if we're being - * called out of the inactive path or we're being called out of the create path - * where we're truncating an existing file. Either way, the truncate needs to - * be sync so blocks don't reappear in the file with altered data in case of a - * crash. wsync filesystems can run the first case async because anything that - * shrinks the inode has to run sync so by the time we're called here from - * inactive, the inode size is permanently set to 0. - * - * Calls from the truncate path always need to be sync unless we're in a wsync - * filesystem and the file has already been unlinked. - * - * The caller is responsible for correctly setting the sync parameter. It gets - * too hard for us to guess here which path we're being called out of just - * based on inode state. - * * If we get an error, we must return with the inode locked and linked into the * current transaction. This keeps things simple for the higher level code, * because it always knows that the inode is locked and held in the transaction @@ -1262,124 +1240,31 @@ xfs_isize_check( * dirty on error so that transactions can be easily aborted if possible. */ int -xfs_itruncate_finish( - xfs_trans_t **tp, - xfs_inode_t *ip, - xfs_fsize_t new_size, - int fork, - int sync) +xfs_itruncate_extents( + struct xfs_trans **tpp, + struct xfs_inode *ip, + int whichfork, + xfs_fsize_t new_size) { - xfs_fsblock_t first_block; - xfs_fileoff_t first_unmap_block; - xfs_fileoff_t last_block; - xfs_filblks_t unmap_len=0; - xfs_mount_t *mp; - xfs_trans_t *ntp; - int done; - int committed; - xfs_bmap_free_t free_list; - int error; + struct xfs_mount *mp = ip->i_mount; + struct xfs_trans *tp = *tpp; + struct xfs_trans *ntp; + xfs_bmap_free_t free_list; + xfs_fsblock_t first_block; + xfs_fileoff_t first_unmap_block; + xfs_fileoff_t last_block; + xfs_filblks_t unmap_len; + int committed; + int error = 0; + int done = 0; ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL)); - ASSERT((new_size == 0) || (new_size <= ip->i_size)); - ASSERT(*tp != NULL); - ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES); - ASSERT(ip->i_transp == *tp); + ASSERT(new_size <= ip->i_size); + ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES); + ASSERT(ip->i_transp == tp); ASSERT(ip->i_itemp != NULL); ASSERT(ip->i_itemp->ili_lock_flags == 0); - - - ntp = *tp; - mp = (ntp)->t_mountp; - ASSERT(! XFS_NOT_DQATTACHED(mp, ip)); - - /* - * We only support truncating the entire attribute fork. - */ - if (fork == XFS_ATTR_FORK) { - new_size = 0LL; - } - first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size); - trace_xfs_itruncate_finish_start(ip, new_size); - - /* - * The first thing we do is set the size to new_size permanently - * on disk. This way we don't have to worry about anyone ever - * being able to look at the data being freed even in the face - * of a crash. What we're getting around here is the case where - * we free a block, it is allocated to another file, it is written - * to, and then we crash. If the new data gets written to the - * file but the log buffers containing the free and reallocation - * don't, then we'd end up with garbage in the blocks being freed. - * As long as we make the new_size permanent before actually - * freeing any blocks it doesn't matter if they get written to. - * - * The callers must signal into us whether or not the size - * setting here must be synchronous. There are a few cases - * where it doesn't have to be synchronous. Those cases - * occur if the file is unlinked and we know the unlink is - * permanent or if the blocks being truncated are guaranteed - * to be beyond the inode eof (regardless of the link count) - * and the eof value is permanent. Both of these cases occur - * only on wsync-mounted filesystems. In those cases, we're - * guaranteed that no user will ever see the data in the blocks - * that are being truncated so the truncate can run async. - * In the free beyond eof case, the file may wind up with - * more blocks allocated to it than it needs if we crash - * and that won't get fixed until the next time the file - * is re-opened and closed but that's ok as that shouldn't - * be too many blocks. - * - * However, we can't just make all wsync xactions run async - * because there's one call out of the create path that needs - * to run sync where it's truncating an existing file to size - * 0 whose size is > 0. - * - * It's probably possible to come up with a test in this - * routine that would correctly distinguish all the above - * cases from the values of the function parameters and the - * inode state but for sanity's sake, I've decided to let the - * layers above just tell us. It's simpler to correctly figure - * out in the layer above exactly under what conditions we - * can run async and I think it's easier for others read and - * follow the logic in case something has to be changed. - * cscope is your friend -- rcc. - * - * The attribute fork is much simpler. - * - * For the attribute fork we allow the caller to tell us whether - * the unlink of the inode that led to this call is yet permanent - * in the on disk log. If it is not and we will be freeing extents - * in this inode then we make the first transaction synchronous - * to make sure that the unlink is permanent by the time we free - * the blocks. - */ - if (fork == XFS_DATA_FORK) { - if (ip->i_d.di_nextents > 0) { - /* - * If we are not changing the file size then do - * not update the on-disk file size - we may be - * called from xfs_inactive_free_eofblocks(). If we - * update the on-disk file size and then the system - * crashes before the contents of the file are - * flushed to disk then the files may be full of - * holes (ie NULL files bug). - */ - if (ip->i_size != new_size) { - ip->i_d.di_size = new_size; - ip->i_size = new_size; - xfs_trans_log_inode(ntp, ip, XFS_ILOG_CORE); - } - } - } else if (sync) { - ASSERT(!(mp->m_flags & XFS_MOUNT_WSYNC)); - if (ip->i_d.di_anextents > 0) - xfs_trans_set_sync(ntp); - } - ASSERT(fork == XFS_DATA_FORK || - (fork == XFS_ATTR_FORK && - ((sync && !(mp->m_flags & XFS_MOUNT_WSYNC)) || - (sync == 0 && (mp->m_flags & XFS_MOUNT_WSYNC))))); + ASSERT(!XFS_NOT_DQATTACHED(mp, ip)); /* * Since it is possible for space to become allocated beyond @@ -1390,128 +1275,142 @@ xfs_itruncate_finish( * beyond the maximum file size (ie it is the same as last_block), * then there is nothing to do. */ + first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size); last_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAXIOFFSET(mp)); - ASSERT(first_unmap_block <= last_block); - done = 0; - if (last_block == first_unmap_block) { - done = 1; - } else { - unmap_len = last_block - first_unmap_block + 1; - } + if (first_unmap_block == last_block) + return 0; + + ASSERT(first_unmap_block < last_block); + unmap_len = last_block - first_unmap_block + 1; while (!done) { - /* - * Free up up to XFS_ITRUNC_MAX_EXTENTS. xfs_bunmapi() - * will tell us whether it freed the entire range or - * not. If this is a synchronous mount (wsync), - * then we can tell bunmapi to keep all the - * transactions asynchronous since the unlink - * transaction that made this inode inactive has - * already hit the disk. There's no danger of - * the freed blocks being reused, there being a - * crash, and the reused blocks suddenly reappearing - * in this file with garbage in them once recovery - * runs. - */ xfs_bmap_init(&free_list, &first_block); - error = xfs_bunmapi(ntp, ip, + error = xfs_bunmapi(tp, ip, first_unmap_block, unmap_len, - xfs_bmapi_aflag(fork), + xfs_bmapi_aflag(whichfork), XFS_ITRUNC_MAX_EXTENTS, &first_block, &free_list, &done); - if (error) { - /* - * If the bunmapi call encounters an error, - * return to the caller where the transaction - * can be properly aborted. We just need to - * make sure we're not holding any resources - * that we were not when we came in. - */ - xfs_bmap_cancel(&free_list); - return error; - } + if (error) + goto out_bmap_cancel; /* * Duplicate the transaction that has the permanent * reservation and commit the old transaction. */ - error = xfs_bmap_finish(tp, &free_list, &committed); - ntp = *tp; + error = xfs_bmap_finish(&tp, &free_list, &committed); if (committed) - xfs_trans_ijoin(ntp, ip); - - if (error) { - /* - * If the bmap finish call encounters an error, return - * to the caller where the transaction can be properly - * aborted. We just need to make sure we're not - * holding any resources that we were not when we came - * in. - * - * Aborting from this point might lose some blocks in - * the file system, but oh well. - */ - xfs_bmap_cancel(&free_list); - return error; - } + xfs_trans_ijoin(tp, ip); + if (error) + goto out_bmap_cancel; if (committed) { /* * Mark the inode dirty so it will be logged and * moved forward in the log as part of every commit. */ - xfs_trans_log_inode(ntp, ip, XFS_ILOG_CORE); + xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); } - ntp = xfs_trans_dup(ntp); - error = xfs_trans_commit(*tp, 0); - *tp = ntp; + ntp = xfs_trans_dup(tp); + error = xfs_trans_commit(tp, 0); + tp = ntp; - xfs_trans_ijoin(ntp, ip); + xfs_trans_ijoin(tp, ip); if (error) - return error; + goto out; + /* - * transaction commit worked ok so we can drop the extra ticket + * Transaction commit worked ok so we can drop the extra ticket * reference that we gained in xfs_trans_dup() */ - xfs_log_ticket_put(ntp->t_ticket); - error = xfs_trans_reserve(ntp, 0, + xfs_log_ticket_put(tp->t_ticket); + error = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0, XFS_TRANS_PERM_LOG_RES, XFS_ITRUNCATE_LOG_COUNT); if (error) - return error; + goto out; } + +out: + *tpp = tp; + return error; +out_bmap_cancel: /* - * Only update the size in the case of the data fork, but - * always re-log the inode so that our permanent transaction - * can keep on rolling it forward in the log. + * If the bunmapi call encounters an error, return to the caller where + * the transaction can be properly aborted. We just need to make sure + * we're not holding any resources that we were not when we came in. */ - if (fork == XFS_DATA_FORK) { - xfs_isize_check(mp, ip, new_size); + xfs_bmap_cancel(&free_list); + goto out; +} + +int +xfs_itruncate_data( + struct xfs_trans **tpp, + struct xfs_inode *ip, + xfs_fsize_t new_size) +{ + int error; + + trace_xfs_itruncate_data_start(ip, new_size); + + /* + * The first thing we do is set the size to new_size permanently on + * disk. This way we don't have to worry about anyone ever being able + * to look at the data being freed even in the face of a crash. + * What we're getting around here is the case where we free a block, it + * is allocated to another file, it is written to, and then we crash. + * If the new data gets written to the file but the log buffers + * containing the free and reallocation don't, then we'd end up with + * garbage in the blocks being freed. As long as we make the new_size + * permanent before actually freeing any blocks it doesn't matter if + * they get written to. + */ + if (ip->i_d.di_nextents > 0) { /* - * If we are not changing the file size then do - * not update the on-disk file size - we may be - * called from xfs_inactive_free_eofblocks(). If we - * update the on-disk file size and then the system - * crashes before the contents of the file are - * flushed to disk then the files may be full of - * holes (ie NULL files bug). + * If we are not changing the file size then do not update + * the on-disk file size - we may be called from + * xfs_inactive_free_eofblocks(). If we update the on-disk + * file size and then the system crashes before the contents + * of the file are flushed to disk then the files may be + * full of holes (ie NULL files bug). */ if (ip->i_size != new_size) { ip->i_d.di_size = new_size; ip->i_size = new_size; + xfs_trans_log_inode(*tpp, ip, XFS_ILOG_CORE); } } - xfs_trans_log_inode(ntp, ip, XFS_ILOG_CORE); - ASSERT((new_size != 0) || - (fork == XFS_ATTR_FORK) || - (ip->i_delayed_blks == 0)); - ASSERT((new_size != 0) || - (fork == XFS_ATTR_FORK) || - (ip->i_d.di_nextents == 0)); - trace_xfs_itruncate_finish_end(ip, new_size); + + error = xfs_itruncate_extents(tpp, ip, XFS_DATA_FORK, new_size); + if (error) + return error; + + /* + * If we are not changing the file size then do not update the on-disk + * file size - we may be called from xfs_inactive_free_eofblocks(). + * If we update the on-disk file size and then the system crashes + * before the contents of the file are flushed to disk then the files + * may be full of holes (ie NULL files bug). + */ + xfs_isize_check(ip, new_size); + if (ip->i_size != new_size) { + ip->i_d.di_size = new_size; + ip->i_size = new_size; + } + + ASSERT(new_size != 0 || ip->i_delayed_blks == 0); + ASSERT(new_size != 0 || ip->i_d.di_nextents == 0); + + /* + * Always re-log the inode so that our permanent transaction can keep + * on rolling it forward in the log. + */ + xfs_trans_log_inode(*tpp, ip, XFS_ILOG_CORE); + + trace_xfs_itruncate_data_end(ip, new_size); return 0; } diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h index 6efd471c8724..6495578efe05 100644 --- a/fs/xfs/xfs_inode.h +++ b/fs/xfs/xfs_inode.h @@ -491,8 +491,10 @@ uint xfs_ip2xflags(struct xfs_inode *); uint xfs_dic2xflags(struct xfs_dinode *); int xfs_ifree(struct xfs_trans *, xfs_inode_t *, struct xfs_bmap_free *); -int xfs_itruncate_finish(struct xfs_trans **, xfs_inode_t *, - xfs_fsize_t, int, int); +int xfs_itruncate_extents(struct xfs_trans **, struct xfs_inode *, + int, xfs_fsize_t); +int xfs_itruncate_data(struct xfs_trans **, struct xfs_inode *, + xfs_fsize_t); int xfs_iunlink(struct xfs_trans *, xfs_inode_t *); void xfs_iext_realloc(xfs_inode_t *, int, int); @@ -568,13 +570,6 @@ void xfs_iext_irec_update_extoffs(xfs_ifork_t *, int, int); #define xfs_ipincount(ip) ((unsigned int) atomic_read(&ip->i_pincount)) -#ifdef DEBUG -void xfs_isize_check(struct xfs_mount *, struct xfs_inode *, - xfs_fsize_t); -#else /* DEBUG */ -#define xfs_isize_check(mp, ip, isize) -#endif /* DEBUG */ - #if defined(DEBUG) void xfs_inobp_check(struct xfs_mount *, struct xfs_buf *); #else diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c index 45b8ac662aee..11242c482771 100644 --- a/fs/xfs/xfs_vnodeops.c +++ b/fs/xfs/xfs_vnodeops.c @@ -220,15 +220,12 @@ xfs_free_eofblocks( xfs_ilock(ip, XFS_ILOCK_EXCL); xfs_trans_ijoin(tp, ip); - error = xfs_itruncate_finish(&tp, ip, - ip->i_size, - XFS_DATA_FORK, - 0); - /* - * If we get an error at this point we - * simply don't bother truncating the file. - */ + error = xfs_itruncate_data(&tp, ip, ip->i_size); if (error) { + /* + * If we get an error at this point we simply don't + * bother truncating the file. + */ xfs_trans_cancel(tp, (XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT)); @@ -665,16 +662,7 @@ xfs_inactive( xfs_ilock(ip, XFS_ILOCK_EXCL); xfs_trans_ijoin(tp, ip); - /* - * normally, we have to run xfs_itruncate_finish sync. - * But if filesystem is wsync and we're in the inactive - * path, then we know that nlink == 0, and that the - * xaction that made nlink == 0 is permanently committed - * since xfs_remove runs as a synchronous transaction. - */ - error = xfs_itruncate_finish(&tp, ip, 0, XFS_DATA_FORK, - (!(mp->m_flags & XFS_MOUNT_WSYNC) ? 1 : 0)); - + error = xfs_itruncate_data(&tp, ip, 0); if (error) { xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT); -- cgit v1.2.3 From 33b8f7c2479dfcbc5c27174e44b5f659d9f33c70 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 8 Jul 2011 14:34:39 +0200 Subject: xfs: improve sync behaviour in the face of aggressive dirtying The following script from Wu Fengguang shows very bad behaviour in XFS when aggressively dirtying data during a sync on XFS, with sync times up to almost 10 times as long as ext4. A large part of the issue is that XFS writes data out itself two times in the ->sync_fs method, overriding the livelock protection in the core writeback code, and another issue is the lock-less xfs_ioend_wait call, which doesn't prevent new ioend from being queue up while waiting for the count to reach zero. This patch removes the XFS-internal sync calls and relies on the VFS to do it's work just like all other filesystems do. Note that the i_iocount wait which is rather suboptimal is simply removed here. We already do it in ->write_inode, which keeps the current supoptimal behaviour. We'll eventually need to remove that as well, but that's material for a separate commit. ------------------------------ snip ------------------------------ #!/bin/sh umount /dev/sda7 mkfs.xfs -f /dev/sda7 # mkfs.ext4 /dev/sda7 # mkfs.btrfs /dev/sda7 mount /dev/sda7 /fs echo $((50<<20)) > /proc/sys/vm/dirty_bytes pid= for i in `seq 10` do dd if=/dev/zero of=/fs/zero-$i bs=1M count=1000 & pid="$pid $!" done sleep 1 tic=$(date +'%s') sync tac=$(date +'%s') echo echo sync time: $((tac-tic)) egrep '(Dirty|Writeback|NFS_Unstable)' /proc/meminfo pidof dd > /dev/null && { kill -9 $pid; echo sync NOT livelocked; } ------------------------------ snip ------------------------------ Signed-off-by: Christoph Hellwig Reported-by: Wu Fengguang Reviewed-by: Alex Elder Reviewed-by: Dave Chinner --- fs/xfs/linux-2.6/xfs_sync.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c index 8ecad5ff9f9b..f54e8ee1ed62 100644 --- a/fs/xfs/linux-2.6/xfs_sync.c +++ b/fs/xfs/linux-2.6/xfs_sync.c @@ -359,14 +359,12 @@ xfs_quiesce_data( { int error, error2 = 0; - /* push non-blocking */ - xfs_sync_data(mp, 0); xfs_qm_sync(mp, SYNC_TRYLOCK); - - /* push and block till complete */ - xfs_sync_data(mp, SYNC_WAIT); xfs_qm_sync(mp, SYNC_WAIT); + /* force out the newly dirtied log buffers */ + xfs_log_force(mp, XFS_LOG_SYNC); + /* write superblock and hoover up shutdown errors */ error = xfs_sync_fsdata(mp); -- cgit v1.2.3 From 7a249cf83da1813cfa71cfe1e265b40045eceb47 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 8 Jul 2011 14:34:42 +0200 Subject: xfs: fix filesystsem freeze race in xfs_trans_alloc As pointed out by Jan xfs_trans_alloc can race with a concurrent filesystem freeze when it sleeps during the memory allocation. Fix this by moving the wait_for_freeze call after the memory allocation. This means moving the freeze into the low-level _xfs_trans_alloc helper, which thus grows a new argument. Also fix up some comments in that area while at it. Signed-off-by: Christoph Hellwig Reviewed-by: Alex Elder Reviewed-by: Dave Chinner --- fs/xfs/xfs_fsops.c | 2 +- fs/xfs/xfs_iomap.c | 3 +-- fs/xfs/xfs_mount.c | 14 +++++++------- fs/xfs/xfs_trans.c | 27 ++++++++++----------------- fs/xfs/xfs_trans.h | 10 ++++++++-- 5 files changed, 27 insertions(+), 29 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c index 9153d2c77caf..b7c492d293b5 100644 --- a/fs/xfs/xfs_fsops.c +++ b/fs/xfs/xfs_fsops.c @@ -626,7 +626,7 @@ xfs_fs_log_dummy( xfs_trans_t *tp; int error; - tp = _xfs_trans_alloc(mp, XFS_TRANS_DUMMY1, KM_SLEEP); + tp = _xfs_trans_alloc(mp, XFS_TRANS_DUMMY1, KM_SLEEP, false); error = xfs_trans_reserve(tp, 0, mp->m_sb.sb_sectsize + 128, 0, 0, XFS_DEFAULT_LOG_COUNT); if (error) { diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c index 091d82b94c4d..982b71108f33 100644 --- a/fs/xfs/xfs_iomap.c +++ b/fs/xfs/xfs_iomap.c @@ -688,8 +688,7 @@ xfs_iomap_write_unwritten( * the same inode that we complete here and might deadlock * on the iolock. */ - xfs_wait_for_freeze(mp, SB_FREEZE_TRANS); - tp = _xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE, KM_NOFS); + tp = _xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE, KM_NOFS, true); tp->t_flags |= XFS_TRANS_RESERVE; error = xfs_trans_reserve(tp, resblks, XFS_WRITE_LOG_RES(mp), 0, diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c index b49b82363d20..63659ee316bd 100644 --- a/fs/xfs/xfs_mount.c +++ b/fs/xfs/xfs_mount.c @@ -1566,15 +1566,9 @@ xfs_fs_writable(xfs_mount_t *mp) } /* - * xfs_log_sbcount - * * Called either periodically to keep the on disk superblock values * roughly up to date or from unmount to make sure the values are * correct on a clean unmount. - * - * Note this code can be called during the process of freezing, so - * we may need to use the transaction allocator which does not not - * block when the transaction subsystem is in its frozen state. */ int xfs_log_sbcount( @@ -1596,7 +1590,13 @@ xfs_log_sbcount( if (!xfs_sb_version_haslazysbcount(&mp->m_sb)) return 0; - tp = _xfs_trans_alloc(mp, XFS_TRANS_SB_COUNT, KM_SLEEP); + /* + * We can be called during the process of freezing, so make sure + * we go ahead even if the frozen for new transactions. We will + * always use a sync transaction in the freeze path to make sure + * the transaction has completed by the time we return. + */ + tp = _xfs_trans_alloc(mp, XFS_TRANS_SB_COUNT, KM_SLEEP, false); error = xfs_trans_reserve(tp, 0, mp->m_sb.sb_sectsize + 128, 0, 0, XFS_DEFAULT_LOG_COUNT); if (error) { diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c index c83f63b33aae..2837220ea5cf 100644 --- a/fs/xfs/xfs_trans.c +++ b/fs/xfs/xfs_trans.c @@ -566,31 +566,24 @@ xfs_trans_init( /* * This routine is called to allocate a transaction structure. + * * The type parameter indicates the type of the transaction. These * are enumerated in xfs_trans.h. - * - * Dynamically allocate the transaction structure from the transaction - * zone, initialize it, and return it to the caller. */ -xfs_trans_t * -xfs_trans_alloc( - xfs_mount_t *mp, - uint type) -{ - xfs_wait_for_freeze(mp, SB_FREEZE_TRANS); - return _xfs_trans_alloc(mp, type, KM_SLEEP); -} - -xfs_trans_t * +struct xfs_trans * _xfs_trans_alloc( - xfs_mount_t *mp, - uint type, - uint memflags) + struct xfs_mount *mp, + uint type, + uint memflags, + bool wait_for_freeze) { - xfs_trans_t *tp; + struct xfs_trans *tp; atomic_inc(&mp->m_active_trans); + if (wait_for_freeze) + xfs_wait_for_freeze(mp, SB_FREEZE_TRANS); + tp = kmem_zone_zalloc(xfs_trans_zone, memflags); tp->t_magic = XFS_TRANS_MAGIC; tp->t_type = type; diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h index 06a9759b6352..4e78432ce143 100644 --- a/fs/xfs/xfs_trans.h +++ b/fs/xfs/xfs_trans.h @@ -447,8 +447,14 @@ typedef struct xfs_trans { /* * XFS transaction mechanism exported interfaces. */ -xfs_trans_t *xfs_trans_alloc(struct xfs_mount *, uint); -xfs_trans_t *_xfs_trans_alloc(struct xfs_mount *, uint, uint); +xfs_trans_t *_xfs_trans_alloc(struct xfs_mount *, uint, uint, bool); + +static inline struct xfs_trans * +xfs_trans_alloc(struct xfs_mount *mp, uint type) +{ + return _xfs_trans_alloc(mp, type, KM_SLEEP, true); +} + xfs_trans_t *xfs_trans_dup(xfs_trans_t *); int xfs_trans_reserve(xfs_trans_t *, uint, uint, uint, uint, uint); -- cgit v1.2.3 From f3ca87389dbff0a3dc1a7cb2fa7c62e25421c66c Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 8 Jul 2011 14:34:47 +0200 Subject: xfs: remove i_transp Remove the transaction pointer in the inode. It's only used to avoid passing down an argument in the bmap code, and for a few asserts in the transaction code right now. Also use the local variable ip in a few more places in xfs_inode_item_unlock, so that it isn't only used for debug builds after the above change. Signed-off-by: Christoph Hellwig Reviewed-by: Alex Elder Reviewed-by: Dave Chinner --- fs/xfs/quota/xfs_trans_dquot.c | 2 +- fs/xfs/xfs_bmap.c | 35 +++++++++++++++++++---------------- fs/xfs/xfs_inode.c | 3 --- fs/xfs/xfs_inode.h | 1 - fs/xfs/xfs_inode_item.c | 13 ++++--------- fs/xfs/xfs_trans_inode.c | 9 --------- 6 files changed, 24 insertions(+), 39 deletions(-) (limited to 'fs') diff --git a/fs/xfs/quota/xfs_trans_dquot.c b/fs/xfs/quota/xfs_trans_dquot.c index 2a3648731331..24084edb7803 100644 --- a/fs/xfs/quota/xfs_trans_dquot.c +++ b/fs/xfs/quota/xfs_trans_dquot.c @@ -59,7 +59,7 @@ xfs_trans_dqjoin( xfs_trans_add_item(tp, &dqp->q_logitem.qli_item); /* - * Initialize i_transp so we can later determine if this dquot is + * Initialize d_transp so we can later determine if this dquot is * associated with this transaction. */ dqp->q_transp = tp; diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c index e546a33214c9..7b46b78ba3f0 100644 --- a/fs/xfs/xfs_bmap.c +++ b/fs/xfs/xfs_bmap.c @@ -94,6 +94,7 @@ xfs_bmap_add_attrfork_local( */ STATIC int /* error */ xfs_bmap_add_extent_delay_real( + struct xfs_trans *tp, /* transaction pointer */ xfs_inode_t *ip, /* incore inode pointer */ xfs_extnum_t *idx, /* extent number to update/insert */ xfs_btree_cur_t **curp, /* if *curp is null, not a btree */ @@ -439,6 +440,7 @@ xfs_bmap_add_attrfork_local( */ STATIC int /* error */ xfs_bmap_add_extent( + struct xfs_trans *tp, /* transaction pointer */ xfs_inode_t *ip, /* incore inode pointer */ xfs_extnum_t *idx, /* extent number to update/insert */ xfs_btree_cur_t **curp, /* if *curp is null, not a btree */ @@ -524,7 +526,7 @@ xfs_bmap_add_extent( if (cur) ASSERT(cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL); - error = xfs_bmap_add_extent_delay_real(ip, + error = xfs_bmap_add_extent_delay_real(tp, ip, idx, &cur, new, &da_new, first, flist, &logflags); } else { @@ -561,7 +563,7 @@ xfs_bmap_add_extent( int tmp_logflags; /* partial log flag return val */ ASSERT(cur == NULL); - error = xfs_bmap_extents_to_btree(ip->i_transp, ip, first, + error = xfs_bmap_extents_to_btree(tp, ip, first, flist, &cur, da_old > 0, &tmp_logflags, whichfork); logflags |= tmp_logflags; if (error) @@ -604,6 +606,7 @@ done: */ STATIC int /* error */ xfs_bmap_add_extent_delay_real( + struct xfs_trans *tp, /* transaction pointer */ xfs_inode_t *ip, /* incore inode pointer */ xfs_extnum_t *idx, /* extent number to update/insert */ xfs_btree_cur_t **curp, /* if *curp is null, not a btree */ @@ -901,7 +904,7 @@ xfs_bmap_add_extent_delay_real( } if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS && ip->i_d.di_nextents > ip->i_df.if_ext_max) { - error = xfs_bmap_extents_to_btree(ip->i_transp, ip, + error = xfs_bmap_extents_to_btree(tp, ip, first, flist, &cur, 1, &tmp_rval, XFS_DATA_FORK); rval |= tmp_rval; @@ -984,7 +987,7 @@ xfs_bmap_add_extent_delay_real( } if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS && ip->i_d.di_nextents > ip->i_df.if_ext_max) { - error = xfs_bmap_extents_to_btree(ip->i_transp, ip, + error = xfs_bmap_extents_to_btree(tp, ip, first, flist, &cur, 1, &tmp_rval, XFS_DATA_FORK); rval |= tmp_rval; @@ -1052,7 +1055,7 @@ xfs_bmap_add_extent_delay_real( } if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS && ip->i_d.di_nextents > ip->i_df.if_ext_max) { - error = xfs_bmap_extents_to_btree(ip->i_transp, ip, + error = xfs_bmap_extents_to_btree(tp, ip, first, flist, &cur, 1, &tmp_rval, XFS_DATA_FORK); rval |= tmp_rval; @@ -2871,8 +2874,8 @@ xfs_bmap_del_extent( len = del->br_blockcount; do_div(bno, mp->m_sb.sb_rextsize); do_div(len, mp->m_sb.sb_rextsize); - if ((error = xfs_rtfree_extent(ip->i_transp, bno, - (xfs_extlen_t)len))) + error = xfs_rtfree_extent(tp, bno, (xfs_extlen_t)len); + if (error) goto done; do_fx = 0; nblks = len * mp->m_sb.sb_rextsize; @@ -4662,7 +4665,7 @@ xfs_bmapi( if (!wasdelay && (flags & XFS_BMAPI_PREALLOC)) got.br_state = XFS_EXT_UNWRITTEN; } - error = xfs_bmap_add_extent(ip, &lastx, &cur, &got, + error = xfs_bmap_add_extent(tp, ip, &lastx, &cur, &got, firstblock, flist, &tmp_logflags, whichfork); logflags |= tmp_logflags; @@ -4763,7 +4766,7 @@ xfs_bmapi( mval->br_state = (mval->br_state == XFS_EXT_UNWRITTEN) ? XFS_EXT_NORM : XFS_EXT_UNWRITTEN; - error = xfs_bmap_add_extent(ip, &lastx, &cur, mval, + error = xfs_bmap_add_extent(tp, ip, &lastx, &cur, mval, firstblock, flist, &tmp_logflags, whichfork); logflags |= tmp_logflags; @@ -5117,7 +5120,7 @@ xfs_bunmapi( del.br_blockcount = mod; } del.br_state = XFS_EXT_UNWRITTEN; - error = xfs_bmap_add_extent(ip, &lastx, &cur, &del, + error = xfs_bmap_add_extent(tp, ip, &lastx, &cur, &del, firstblock, flist, &logflags, XFS_DATA_FORK); if (error) @@ -5175,18 +5178,18 @@ xfs_bunmapi( } prev.br_state = XFS_EXT_UNWRITTEN; lastx--; - error = xfs_bmap_add_extent(ip, &lastx, &cur, - &prev, firstblock, flist, &logflags, - XFS_DATA_FORK); + error = xfs_bmap_add_extent(tp, ip, &lastx, + &cur, &prev, firstblock, flist, + &logflags, XFS_DATA_FORK); if (error) goto error0; goto nodelete; } else { ASSERT(del.br_state == XFS_EXT_NORM); del.br_state = XFS_EXT_UNWRITTEN; - error = xfs_bmap_add_extent(ip, &lastx, &cur, - &del, firstblock, flist, &logflags, - XFS_DATA_FORK); + error = xfs_bmap_add_extent(tp, ip, &lastx, + &cur, &del, firstblock, flist, + &logflags, XFS_DATA_FORK); if (error) goto error0; goto nodelete; diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index aa143b870afb..db310f8fb76a 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -1261,7 +1261,6 @@ xfs_itruncate_extents( ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL)); ASSERT(new_size <= ip->i_size); ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES); - ASSERT(ip->i_transp == tp); ASSERT(ip->i_itemp != NULL); ASSERT(ip->i_itemp->ili_lock_flags == 0); ASSERT(!XFS_NOT_DQATTACHED(mp, ip)); @@ -1436,7 +1435,6 @@ xfs_iunlink( ASSERT(ip->i_d.di_nlink == 0); ASSERT(ip->i_d.di_mode != 0); - ASSERT(ip->i_transp == tp); mp = tp->t_mountp; @@ -1828,7 +1826,6 @@ xfs_ifree( xfs_buf_t *ibp; ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); - ASSERT(ip->i_transp == tp); ASSERT(ip->i_d.di_nlink == 0); ASSERT(ip->i_d.di_nextents == 0); ASSERT(ip->i_d.di_anextents == 0); diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h index 6495578efe05..a97644ab945a 100644 --- a/fs/xfs/xfs_inode.h +++ b/fs/xfs/xfs_inode.h @@ -241,7 +241,6 @@ typedef struct xfs_inode { xfs_ifork_t i_df; /* data fork */ /* Transaction and locking information. */ - struct xfs_trans *i_transp; /* ptr to owning transaction*/ struct xfs_inode_log_item *i_itemp; /* logging information */ mrlock_t i_lock; /* inode lock */ mrlock_t i_iolock; /* inode IO lock */ diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c index b1e88d56069c..a49811a3e5a4 100644 --- a/fs/xfs/xfs_inode_item.c +++ b/fs/xfs/xfs_inode_item.c @@ -632,13 +632,8 @@ xfs_inode_item_unlock( struct xfs_inode *ip = iip->ili_inode; unsigned short lock_flags; - ASSERT(iip->ili_inode->i_itemp != NULL); - ASSERT(xfs_isilocked(iip->ili_inode, XFS_ILOCK_EXCL)); - - /* - * Clear the transaction pointer in the inode. - */ - ip->i_transp = NULL; + ASSERT(ip->i_itemp != NULL); + ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); /* * If the inode needed a separate buffer with which to log @@ -664,8 +659,8 @@ xfs_inode_item_unlock( lock_flags = iip->ili_lock_flags; iip->ili_lock_flags = 0; if (lock_flags) { - xfs_iunlock(iip->ili_inode, lock_flags); - IRELE(iip->ili_inode); + xfs_iunlock(ip, lock_flags); + IRELE(ip); } } diff --git a/fs/xfs/xfs_trans_inode.c b/fs/xfs/xfs_trans_inode.c index 048b0c689d3e..c8dea2fd7e68 100644 --- a/fs/xfs/xfs_trans_inode.c +++ b/fs/xfs/xfs_trans_inode.c @@ -55,7 +55,6 @@ xfs_trans_ijoin( { xfs_inode_log_item_t *iip; - ASSERT(ip->i_transp == NULL); ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); if (ip->i_itemp == NULL) xfs_inode_item_init(ip, ip->i_mount); @@ -68,12 +67,6 @@ xfs_trans_ijoin( xfs_trans_add_item(tp, &iip->ili_item); xfs_trans_inode_broot_debug(ip); - - /* - * Initialize i_transp so we can find it with xfs_inode_incore() - * in xfs_trans_iget() above. - */ - ip->i_transp = tp; } /* @@ -111,7 +104,6 @@ xfs_trans_ichgtime( ASSERT(tp); ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); - ASSERT(ip->i_transp == tp); tv = current_fs_time(inode->i_sb); @@ -140,7 +132,6 @@ xfs_trans_log_inode( xfs_inode_t *ip, uint flags) { - ASSERT(ip->i_transp == tp); ASSERT(ip->i_itemp != NULL); ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); -- cgit v1.2.3 From 29d104af0a92ba1eac74b636da7fcf88242e1180 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 8 Jul 2011 14:34:51 +0200 Subject: xfs: kill the unused struct xfs_sync_work Signed-off-by: Christoph Hellwig Reviewed-by: Alex Elder Reviewed-by: Dave Chinner --- fs/xfs/linux-2.6/xfs_sync.h | 8 -------- 1 file changed, 8 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_sync.h b/fs/xfs/linux-2.6/xfs_sync.h index e3a6ad27415f..e914fd621746 100644 --- a/fs/xfs/linux-2.6/xfs_sync.h +++ b/fs/xfs/linux-2.6/xfs_sync.h @@ -21,14 +21,6 @@ struct xfs_mount; struct xfs_perag; -typedef struct xfs_sync_work { - struct list_head w_list; - struct xfs_mount *w_mount; - void *w_data; /* syncer routine argument */ - void (*w_syncer)(struct xfs_mount *, void *); - struct completion *w_completion; -} xfs_sync_work_t; - #define SYNC_WAIT 0x0001 /* wait for i/o to complete */ #define SYNC_TRYLOCK 0x0002 /* only try to lock inodes */ -- cgit v1.2.3 From 4fb44c8272a071290d2ad76164c532fa2902b604 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 8 Jul 2011 14:34:59 +0200 Subject: xfs: factor out xfs_dir2_leaf_find_entry Add a new xfs_dir2_leaf_find_entry helper to factor out some duplicate code from xfs_dir2_leaf_addname xfs_dir2_leafn_add. Found by Eric Sandeen using an automated code duplication checker. Signed-off-by: Christoph Hellwig Reviewed-by: Alex Elder Reviewed-by: Dave Chinner --- fs/xfs/xfs_dir2_leaf.c | 217 +++++++++++++++++++++++++++---------------------- fs/xfs/xfs_dir2_leaf.h | 3 + fs/xfs/xfs_dir2_node.c | 84 +------------------ 3 files changed, 128 insertions(+), 176 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_dir2_leaf.c b/fs/xfs/xfs_dir2_leaf.c index ae891223be90..7f7769ea2be8 100644 --- a/fs/xfs/xfs_dir2_leaf.c +++ b/fs/xfs/xfs_dir2_leaf.c @@ -152,6 +152,123 @@ xfs_dir2_block_to_leaf( return 0; } +struct xfs_dir2_leaf_entry * +xfs_dir2_leaf_find_entry( + xfs_dir2_leaf_t *leaf, /* leaf structure */ + int index, /* leaf table position */ + int compact, /* need to compact leaves */ + int lowstale, /* index of prev stale leaf */ + int highstale, /* index of next stale leaf */ + int *lfloglow, /* low leaf logging index */ + int *lfloghigh) /* high leaf logging index */ +{ + if (!leaf->hdr.stale) { + xfs_dir2_leaf_entry_t *lep; /* leaf entry table pointer */ + + /* + * Now we need to make room to insert the leaf entry. + * + * If there are no stale entries, just insert a hole at index. + */ + lep = &leaf->ents[index]; + if (index < be16_to_cpu(leaf->hdr.count)) + memmove(lep + 1, lep, + (be16_to_cpu(leaf->hdr.count) - index) * + sizeof(*lep)); + + /* + * Record low and high logging indices for the leaf. + */ + *lfloglow = index; + *lfloghigh = be16_to_cpu(leaf->hdr.count); + be16_add_cpu(&leaf->hdr.count, 1); + return lep; + } + + /* + * There are stale entries. + * + * We will use one of them for the new entry. It's probably not at + * the right location, so we'll have to shift some up or down first. + * + * If we didn't compact before, we need to find the nearest stale + * entries before and after our insertion point. + */ + if (compact == 0) { + /* + * Find the first stale entry before the insertion point, + * if any. + */ + for (lowstale = index - 1; + lowstale >= 0 && + be32_to_cpu(leaf->ents[lowstale].address) != + XFS_DIR2_NULL_DATAPTR; + lowstale--) + continue; + + /* + * Find the next stale entry at or after the insertion point, + * if any. Stop if we go so far that the lowstale entry + * would be better. + */ + for (highstale = index; + highstale < be16_to_cpu(leaf->hdr.count) && + be32_to_cpu(leaf->ents[highstale].address) != + XFS_DIR2_NULL_DATAPTR && + (lowstale < 0 || + index - lowstale - 1 >= highstale - index); + highstale++) + continue; + } + + /* + * If the low one is better, use it. + */ + if (lowstale >= 0 && + (highstale == be16_to_cpu(leaf->hdr.count) || + index - lowstale - 1 < highstale - index)) { + ASSERT(index - lowstale - 1 >= 0); + ASSERT(be32_to_cpu(leaf->ents[lowstale].address) == + XFS_DIR2_NULL_DATAPTR); + + /* + * Copy entries up to cover the stale entry and make room + * for the new entry. + */ + if (index - lowstale - 1 > 0) { + memmove(&leaf->ents[lowstale], + &leaf->ents[lowstale + 1], + (index - lowstale - 1) * + sizeof(xfs_dir2_leaf_entry_t)); + } + *lfloglow = MIN(lowstale, *lfloglow); + *lfloghigh = MAX(index - 1, *lfloghigh); + be16_add_cpu(&leaf->hdr.stale, -1); + return &leaf->ents[index - 1]; + } + + /* + * The high one is better, so use that one. + */ + ASSERT(highstale - index >= 0); + ASSERT(be32_to_cpu(leaf->ents[highstale].address) == + XFS_DIR2_NULL_DATAPTR); + + /* + * Copy entries down to cover the stale entry and make room for the + * new entry. + */ + if (highstale - index > 0) { + memmove(&leaf->ents[index + 1], + &leaf->ents[index], + (highstale - index) * sizeof(xfs_dir2_leaf_entry_t)); + } + *lfloglow = MIN(index, *lfloglow); + *lfloghigh = MAX(highstale, *lfloghigh); + be16_add_cpu(&leaf->hdr.stale, -1); + return &leaf->ents[index]; +} + /* * Add an entry to a leaf form directory. */ @@ -430,102 +547,10 @@ xfs_dir2_leaf_addname( if (!grown) xfs_dir2_leaf_log_bests(tp, lbp, use_block, use_block); } - /* - * Now we need to make room to insert the leaf entry. - * If there are no stale entries, we just insert a hole at index. - */ - if (!leaf->hdr.stale) { - /* - * lep is still good as the index leaf entry. - */ - if (index < be16_to_cpu(leaf->hdr.count)) - memmove(lep + 1, lep, - (be16_to_cpu(leaf->hdr.count) - index) * sizeof(*lep)); - /* - * Record low and high logging indices for the leaf. - */ - lfloglow = index; - lfloghigh = be16_to_cpu(leaf->hdr.count); - be16_add_cpu(&leaf->hdr.count, 1); - } - /* - * There are stale entries. - * We will use one of them for the new entry. - * It's probably not at the right location, so we'll have to - * shift some up or down first. - */ - else { - /* - * If we didn't compact before, we need to find the nearest - * stale entries before and after our insertion point. - */ - if (compact == 0) { - /* - * Find the first stale entry before the insertion - * point, if any. - */ - for (lowstale = index - 1; - lowstale >= 0 && - be32_to_cpu(leaf->ents[lowstale].address) != - XFS_DIR2_NULL_DATAPTR; - lowstale--) - continue; - /* - * Find the next stale entry at or after the insertion - * point, if any. Stop if we go so far that the - * lowstale entry would be better. - */ - for (highstale = index; - highstale < be16_to_cpu(leaf->hdr.count) && - be32_to_cpu(leaf->ents[highstale].address) != - XFS_DIR2_NULL_DATAPTR && - (lowstale < 0 || - index - lowstale - 1 >= highstale - index); - highstale++) - continue; - } - /* - * If the low one is better, use it. - */ - if (lowstale >= 0 && - (highstale == be16_to_cpu(leaf->hdr.count) || - index - lowstale - 1 < highstale - index)) { - ASSERT(index - lowstale - 1 >= 0); - ASSERT(be32_to_cpu(leaf->ents[lowstale].address) == - XFS_DIR2_NULL_DATAPTR); - /* - * Copy entries up to cover the stale entry - * and make room for the new entry. - */ - if (index - lowstale - 1 > 0) - memmove(&leaf->ents[lowstale], - &leaf->ents[lowstale + 1], - (index - lowstale - 1) * sizeof(*lep)); - lep = &leaf->ents[index - 1]; - lfloglow = MIN(lowstale, lfloglow); - lfloghigh = MAX(index - 1, lfloghigh); - } - /* - * The high one is better, so use that one. - */ - else { - ASSERT(highstale - index >= 0); - ASSERT(be32_to_cpu(leaf->ents[highstale].address) == - XFS_DIR2_NULL_DATAPTR); - /* - * Copy entries down to cover the stale entry - * and make room for the new entry. - */ - if (highstale - index > 0) - memmove(&leaf->ents[index + 1], - &leaf->ents[index], - (highstale - index) * sizeof(*lep)); - lep = &leaf->ents[index]; - lfloglow = MIN(index, lfloglow); - lfloghigh = MAX(highstale, lfloghigh); - } - be16_add_cpu(&leaf->hdr.stale, -1); - } + + lep = xfs_dir2_leaf_find_entry(leaf, index, compact, lowstale, + highstale, &lfloglow, &lfloghigh); + /* * Fill in the new leaf entry. */ diff --git a/fs/xfs/xfs_dir2_leaf.h b/fs/xfs/xfs_dir2_leaf.h index 6c9539f06987..bc620622c342 100644 --- a/fs/xfs/xfs_dir2_leaf.h +++ b/fs/xfs/xfs_dir2_leaf.h @@ -248,6 +248,9 @@ extern int xfs_dir2_leaf_search_hash(struct xfs_da_args *args, struct xfs_dabuf *lbp); extern int xfs_dir2_leaf_trim_data(struct xfs_da_args *args, struct xfs_dabuf *lbp, xfs_dir2_db_t db); +extern xfs_dir2_leaf_entry_t *xfs_dir2_leaf_find_entry(xfs_dir2_leaf_t *, int, + int, int, int, + int *, int *); extern int xfs_dir2_node_to_leaf(struct xfs_da_state *state); #endif /* __XFS_DIR2_LEAF_H__ */ diff --git a/fs/xfs/xfs_dir2_node.c b/fs/xfs/xfs_dir2_node.c index a0aab7d3294f..02da7b7a005a 100644 --- a/fs/xfs/xfs_dir2_node.c +++ b/fs/xfs/xfs_dir2_node.c @@ -244,89 +244,13 @@ xfs_dir2_leafn_add( lfloglow = be16_to_cpu(leaf->hdr.count); lfloghigh = -1; } - /* - * No stale entries, just insert a space for the new entry. - */ - if (!leaf->hdr.stale) { - lep = &leaf->ents[index]; - if (index < be16_to_cpu(leaf->hdr.count)) - memmove(lep + 1, lep, - (be16_to_cpu(leaf->hdr.count) - index) * sizeof(*lep)); - lfloglow = index; - lfloghigh = be16_to_cpu(leaf->hdr.count); - be16_add_cpu(&leaf->hdr.count, 1); - } - /* - * There are stale entries. We'll use one for the new entry. - */ - else { - /* - * If we didn't do a compact then we need to figure out - * which stale entry will be used. - */ - if (compact == 0) { - /* - * Find first stale entry before our insertion point. - */ - for (lowstale = index - 1; - lowstale >= 0 && - be32_to_cpu(leaf->ents[lowstale].address) != - XFS_DIR2_NULL_DATAPTR; - lowstale--) - continue; - /* - * Find next stale entry after insertion point. - * Stop looking if the answer would be worse than - * lowstale already found. - */ - for (highstale = index; - highstale < be16_to_cpu(leaf->hdr.count) && - be32_to_cpu(leaf->ents[highstale].address) != - XFS_DIR2_NULL_DATAPTR && - (lowstale < 0 || - index - lowstale - 1 >= highstale - index); - highstale++) - continue; - } - /* - * Using the low stale entry. - * Shift entries up toward the stale slot. - */ - if (lowstale >= 0 && - (highstale == be16_to_cpu(leaf->hdr.count) || - index - lowstale - 1 < highstale - index)) { - ASSERT(be32_to_cpu(leaf->ents[lowstale].address) == - XFS_DIR2_NULL_DATAPTR); - ASSERT(index - lowstale - 1 >= 0); - if (index - lowstale - 1 > 0) - memmove(&leaf->ents[lowstale], - &leaf->ents[lowstale + 1], - (index - lowstale - 1) * sizeof(*lep)); - lep = &leaf->ents[index - 1]; - lfloglow = MIN(lowstale, lfloglow); - lfloghigh = MAX(index - 1, lfloghigh); - } - /* - * Using the high stale entry. - * Shift entries down toward the stale slot. - */ - else { - ASSERT(be32_to_cpu(leaf->ents[highstale].address) == - XFS_DIR2_NULL_DATAPTR); - ASSERT(highstale - index >= 0); - if (highstale - index > 0) - memmove(&leaf->ents[index + 1], - &leaf->ents[index], - (highstale - index) * sizeof(*lep)); - lep = &leaf->ents[index]; - lfloglow = MIN(index, lfloglow); - lfloghigh = MAX(highstale, lfloghigh); - } - be16_add_cpu(&leaf->hdr.stale, -1); - } + /* * Insert the new entry, log everything. */ + lep = xfs_dir2_leaf_find_entry(leaf, index, compact, lowstale, + highstale, &lfloglow, &lfloghigh); + lep->hashval = cpu_to_be32(args->hashval); lep->address = cpu_to_be32(xfs_dir2_db_off_to_dataptr(mp, args->blkno, args->index)); -- cgit v1.2.3 From 8bc387875870c87087f138741f456983cbc54660 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 8 Jul 2011 14:35:03 +0200 Subject: xfs: cleanup shortform directory inode number handling Refactor the shortform directory helpers that deal with the 32-bit vs 64-bit wide inode numbers into more sensible helpers, and kill the xfs_intino_t typedef that is now superflous. Signed-off-by: Christoph Hellwig Reviewed-by: Alex Elder Reviewed-by: Dave Chinner --- fs/xfs/xfs_dir2_block.c | 5 +- fs/xfs/xfs_dir2_sf.c | 138 ++++++++++++++++++++++++++++++++++-------------- fs/xfs/xfs_dir2_sf.h | 25 ++------- fs/xfs/xfs_inum.h | 11 ---- 4 files changed, 104 insertions(+), 75 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_dir2_block.c b/fs/xfs/xfs_dir2_block.c index 580d99cef9e7..8ab99888ad44 100644 --- a/fs/xfs/xfs_dir2_block.c +++ b/fs/xfs/xfs_dir2_block.c @@ -1146,7 +1146,7 @@ xfs_dir2_sf_to_block( */ dep = (xfs_dir2_data_entry_t *) ((char *)block + XFS_DIR2_DATA_DOTDOT_OFFSET); - dep->inumber = cpu_to_be64(xfs_dir2_sf_get_inumber(sfp, &sfp->hdr.parent)); + dep->inumber = cpu_to_be64(xfs_dir2_sf_get_parent_ino(sfp)); dep->namelen = 2; dep->name[0] = dep->name[1] = '.'; tagp = xfs_dir2_data_entry_tag_p(dep); @@ -1195,8 +1195,7 @@ xfs_dir2_sf_to_block( * Copy a real entry. */ dep = (xfs_dir2_data_entry_t *)((char *)block + newoffset); - dep->inumber = cpu_to_be64(xfs_dir2_sf_get_inumber(sfp, - xfs_dir2_sf_inumberp(sfep))); + dep->inumber = cpu_to_be64(xfs_dir2_sfe_get_ino(sfp, sfep)); dep->namelen = sfep->namelen; memcpy(dep->name, sfep->name, dep->namelen); tagp = xfs_dir2_data_entry_tag_p(dep); diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c index b1bae6b1eed9..4c3ecf2c7397 100644 --- a/fs/xfs/xfs_dir2_sf.c +++ b/fs/xfs/xfs_dir2_sf.c @@ -59,6 +59,79 @@ static void xfs_dir2_sf_toino4(xfs_da_args_t *args); static void xfs_dir2_sf_toino8(xfs_da_args_t *args); #endif /* XFS_BIG_INUMS */ + +/* + * Inode numbers in short-form directories can come in two versions, + * either 4 bytes or 8 bytes wide. These helpers deal with the + * two forms transparently by looking at the headers i8count field. + */ +static xfs_ino_t +xfs_dir2_sf_get_ino( + struct xfs_dir2_sf *sfp, + xfs_dir2_inou_t *from) +{ + if (sfp->hdr.i8count) + return XFS_GET_DIR_INO8(from->i8); + else + return XFS_GET_DIR_INO4(from->i4); +} + +static void +xfs_dir2_sf_put_ino( + struct xfs_dir2_sf *sfp, + xfs_dir2_inou_t *to, + xfs_ino_t ino) +{ + if (sfp->hdr.i8count) + XFS_PUT_DIR_INO8(ino, to->i8); + else + XFS_PUT_DIR_INO4(ino, to->i4); +} + +xfs_ino_t +xfs_dir2_sf_get_parent_ino( + struct xfs_dir2_sf *sfp) +{ + return xfs_dir2_sf_get_ino(sfp, &sfp->hdr.parent); +} + +static void +xfs_dir2_sf_put_parent_ino( + struct xfs_dir2_sf *sfp, + xfs_ino_t ino) +{ + xfs_dir2_sf_put_ino(sfp, &sfp->hdr.parent, ino); +} + +/* + * In short-form directory entries the inode numbers are stored at variable + * offset behind the entry name. The inode numbers may only be accessed + * through the helpers below. + */ +static xfs_dir2_inou_t * +xfs_dir2_sfe_inop( + struct xfs_dir2_sf_entry *sfep) +{ + return (xfs_dir2_inou_t *)&sfep->name[sfep->namelen]; +} + +xfs_ino_t +xfs_dir2_sfe_get_ino( + struct xfs_dir2_sf *sfp, + struct xfs_dir2_sf_entry *sfep) +{ + return xfs_dir2_sf_get_ino(sfp, xfs_dir2_sfe_inop(sfep)); +} + +static void +xfs_dir2_sfe_put_ino( + struct xfs_dir2_sf *sfp, + struct xfs_dir2_sf_entry *sfep, + xfs_ino_t ino) +{ + xfs_dir2_sf_put_ino(sfp, xfs_dir2_sfe_inop(sfep), ino); +} + /* * Given a block directory (dp/block), calculate its size as a shortform (sf) * directory and a header for the sf directory, if it will fit it the @@ -138,7 +211,7 @@ xfs_dir2_block_sfsize( */ sfhp->count = count; sfhp->i8count = i8count; - xfs_dir2_sf_put_inumber((xfs_dir2_sf_t *)sfhp, &parent, &sfhp->parent); + xfs_dir2_sf_put_parent_ino((xfs_dir2_sf_t *)sfhp, parent); return size; } @@ -165,7 +238,6 @@ xfs_dir2_block_to_sf( char *ptr; /* current data pointer */ xfs_dir2_sf_entry_t *sfep; /* shortform entry */ xfs_dir2_sf_t *sfp; /* shortform structure */ - xfs_ino_t temp; trace_xfs_dir2_block_to_sf(args); @@ -233,7 +305,7 @@ xfs_dir2_block_to_sf( else if (dep->namelen == 2 && dep->name[0] == '.' && dep->name[1] == '.') ASSERT(be64_to_cpu(dep->inumber) == - xfs_dir2_sf_get_inumber(sfp, &sfp->hdr.parent)); + xfs_dir2_sf_get_parent_ino(sfp)); /* * Normal entry, copy it into shortform. */ @@ -243,9 +315,9 @@ xfs_dir2_block_to_sf( (xfs_dir2_data_aoff_t) ((char *)dep - (char *)block)); memcpy(sfep->name, dep->name, dep->namelen); - temp = be64_to_cpu(dep->inumber); - xfs_dir2_sf_put_inumber(sfp, &temp, - xfs_dir2_sf_inumberp(sfep)); + xfs_dir2_sfe_put_ino(sfp, sfep, + be64_to_cpu(dep->inumber)); + sfep = xfs_dir2_sf_nextentry(sfp, sfep); } ptr += xfs_dir2_data_entsize(dep->namelen); @@ -406,8 +478,7 @@ xfs_dir2_sf_addname_easy( sfep->namelen = args->namelen; xfs_dir2_sf_put_offset(sfep, offset); memcpy(sfep->name, args->name, sfep->namelen); - xfs_dir2_sf_put_inumber(sfp, &args->inumber, - xfs_dir2_sf_inumberp(sfep)); + xfs_dir2_sfe_put_ino(sfp, sfep, args->inumber); /* * Update the header and inode. */ @@ -498,8 +569,7 @@ xfs_dir2_sf_addname_hard( sfep->namelen = args->namelen; xfs_dir2_sf_put_offset(sfep, offset); memcpy(sfep->name, args->name, sfep->namelen); - xfs_dir2_sf_put_inumber(sfp, &args->inumber, - xfs_dir2_sf_inumberp(sfep)); + xfs_dir2_sfe_put_ino(sfp, sfep, args->inumber); sfp->hdr.count++; #if XFS_BIG_INUMS if (args->inumber > XFS_DIR2_MAX_SHORT_INUM && !objchange) @@ -618,14 +688,14 @@ xfs_dir2_sf_check( sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data; offset = XFS_DIR2_DATA_FIRST_OFFSET; - ino = xfs_dir2_sf_get_inumber(sfp, &sfp->hdr.parent); + ino = xfs_dir2_sf_get_parent_ino(sfp); i8count = ino > XFS_DIR2_MAX_SHORT_INUM; for (i = 0, sfep = xfs_dir2_sf_firstentry(sfp); i < sfp->hdr.count; i++, sfep = xfs_dir2_sf_nextentry(sfp, sfep)) { ASSERT(xfs_dir2_sf_get_offset(sfep) >= offset); - ino = xfs_dir2_sf_get_inumber(sfp, xfs_dir2_sf_inumberp(sfep)); + ino = xfs_dir2_sfe_get_ino(sfp, sfep); i8count += ino > XFS_DIR2_MAX_SHORT_INUM; offset = xfs_dir2_sf_get_offset(sfep) + @@ -686,7 +756,7 @@ xfs_dir2_sf_create( /* * Now can put in the inode number, since i8count is set. */ - xfs_dir2_sf_put_inumber(sfp, &pino, &sfp->hdr.parent); + xfs_dir2_sf_put_parent_ino(sfp, pino); sfp->hdr.count = 0; dp->i_d.di_size = size; xfs_dir2_sf_check(args); @@ -759,7 +829,7 @@ xfs_dir2_sf_getdents( * Put .. entry unless we're starting past it. */ if (*offset <= dotdot_offset) { - ino = xfs_dir2_sf_get_inumber(sfp, &sfp->hdr.parent); + ino = xfs_dir2_sf_get_parent_ino(sfp); if (filldir(dirent, "..", 2, dotdot_offset & 0x7fffffff, ino, DT_DIR)) { *offset = dotdot_offset & 0x7fffffff; return 0; @@ -779,7 +849,7 @@ xfs_dir2_sf_getdents( continue; } - ino = xfs_dir2_sf_get_inumber(sfp, xfs_dir2_sf_inumberp(sfep)); + ino = xfs_dir2_sfe_get_ino(sfp, sfep); if (filldir(dirent, (char *)sfep->name, sfep->namelen, off & 0x7fffffff, ino, DT_UNKNOWN)) { *offset = off & 0x7fffffff; @@ -839,7 +909,7 @@ xfs_dir2_sf_lookup( */ if (args->namelen == 2 && args->name[0] == '.' && args->name[1] == '.') { - args->inumber = xfs_dir2_sf_get_inumber(sfp, &sfp->hdr.parent); + args->inumber = xfs_dir2_sf_get_parent_ino(sfp); args->cmpresult = XFS_CMP_EXACT; return XFS_ERROR(EEXIST); } @@ -858,8 +928,7 @@ xfs_dir2_sf_lookup( sfep->namelen); if (cmp != XFS_CMP_DIFFERENT && cmp != args->cmpresult) { args->cmpresult = cmp; - args->inumber = xfs_dir2_sf_get_inumber(sfp, - xfs_dir2_sf_inumberp(sfep)); + args->inumber = xfs_dir2_sfe_get_ino(sfp, sfep); if (cmp == XFS_CMP_EXACT) return XFS_ERROR(EEXIST); ci_sfep = sfep; @@ -918,9 +987,8 @@ xfs_dir2_sf_removename( i++, sfep = xfs_dir2_sf_nextentry(sfp, sfep)) { if (xfs_da_compname(args, sfep->name, sfep->namelen) == XFS_CMP_EXACT) { - ASSERT(xfs_dir2_sf_get_inumber(sfp, - xfs_dir2_sf_inumberp(sfep)) == - args->inumber); + ASSERT(xfs_dir2_sfe_get_ino(sfp, sfep) == + args->inumber); break; } } @@ -1040,10 +1108,10 @@ xfs_dir2_sf_replace( if (args->namelen == 2 && args->name[0] == '.' && args->name[1] == '.') { #if XFS_BIG_INUMS || defined(DEBUG) - ino = xfs_dir2_sf_get_inumber(sfp, &sfp->hdr.parent); + ino = xfs_dir2_sf_get_parent_ino(sfp); ASSERT(args->inumber != ino); #endif - xfs_dir2_sf_put_inumber(sfp, &args->inumber, &sfp->hdr.parent); + xfs_dir2_sf_put_parent_ino(sfp, args->inumber); } /* * Normal entry, look for the name. @@ -1055,12 +1123,10 @@ xfs_dir2_sf_replace( if (xfs_da_compname(args, sfep->name, sfep->namelen) == XFS_CMP_EXACT) { #if XFS_BIG_INUMS || defined(DEBUG) - ino = xfs_dir2_sf_get_inumber(sfp, - xfs_dir2_sf_inumberp(sfep)); + ino = xfs_dir2_sfe_get_ino(sfp, sfep); ASSERT(args->inumber != ino); #endif - xfs_dir2_sf_put_inumber(sfp, &args->inumber, - xfs_dir2_sf_inumberp(sfep)); + xfs_dir2_sfe_put_ino(sfp, sfep, args->inumber); break; } } @@ -1121,7 +1187,6 @@ xfs_dir2_sf_toino4( char *buf; /* old dir's buffer */ xfs_inode_t *dp; /* incore directory inode */ int i; /* entry index */ - xfs_ino_t ino; /* entry inode number */ int newsize; /* new inode size */ xfs_dir2_sf_entry_t *oldsfep; /* old sf entry */ xfs_dir2_sf_t *oldsfp; /* old sf directory */ @@ -1162,8 +1227,7 @@ xfs_dir2_sf_toino4( */ sfp->hdr.count = oldsfp->hdr.count; sfp->hdr.i8count = 0; - ino = xfs_dir2_sf_get_inumber(oldsfp, &oldsfp->hdr.parent); - xfs_dir2_sf_put_inumber(sfp, &ino, &sfp->hdr.parent); + xfs_dir2_sf_put_parent_ino(sfp, xfs_dir2_sf_get_parent_ino(oldsfp)); /* * Copy the entries field by field. */ @@ -1175,9 +1239,8 @@ xfs_dir2_sf_toino4( sfep->namelen = oldsfep->namelen; sfep->offset = oldsfep->offset; memcpy(sfep->name, oldsfep->name, sfep->namelen); - ino = xfs_dir2_sf_get_inumber(oldsfp, - xfs_dir2_sf_inumberp(oldsfep)); - xfs_dir2_sf_put_inumber(sfp, &ino, xfs_dir2_sf_inumberp(sfep)); + xfs_dir2_sfe_put_ino(sfp, sfep, + xfs_dir2_sfe_get_ino(oldsfp, oldsfep)); } /* * Clean up the inode. @@ -1199,7 +1262,6 @@ xfs_dir2_sf_toino8( char *buf; /* old dir's buffer */ xfs_inode_t *dp; /* incore directory inode */ int i; /* entry index */ - xfs_ino_t ino; /* entry inode number */ int newsize; /* new inode size */ xfs_dir2_sf_entry_t *oldsfep; /* old sf entry */ xfs_dir2_sf_t *oldsfp; /* old sf directory */ @@ -1240,8 +1302,7 @@ xfs_dir2_sf_toino8( */ sfp->hdr.count = oldsfp->hdr.count; sfp->hdr.i8count = 1; - ino = xfs_dir2_sf_get_inumber(oldsfp, &oldsfp->hdr.parent); - xfs_dir2_sf_put_inumber(sfp, &ino, &sfp->hdr.parent); + xfs_dir2_sf_put_parent_ino(sfp, xfs_dir2_sf_get_parent_ino(oldsfp)); /* * Copy the entries field by field. */ @@ -1253,9 +1314,8 @@ xfs_dir2_sf_toino8( sfep->namelen = oldsfep->namelen; sfep->offset = oldsfep->offset; memcpy(sfep->name, oldsfep->name, sfep->namelen); - ino = xfs_dir2_sf_get_inumber(oldsfp, - xfs_dir2_sf_inumberp(oldsfep)); - xfs_dir2_sf_put_inumber(sfp, &ino, xfs_dir2_sf_inumberp(sfep)); + xfs_dir2_sfe_put_ino(sfp, sfep, + xfs_dir2_sfe_get_ino(oldsfp, oldsfep)); } /* * Clean up the inode. diff --git a/fs/xfs/xfs_dir2_sf.h b/fs/xfs/xfs_dir2_sf.h index 6ac44b550d39..208d2c3c1122 100644 --- a/fs/xfs/xfs_dir2_sf.h +++ b/fs/xfs/xfs_dir2_sf.h @@ -90,28 +90,6 @@ static inline int xfs_dir2_sf_hdr_size(int i8count) ((uint)sizeof(xfs_dir2_ino8_t) - (uint)sizeof(xfs_dir2_ino4_t))); } -static inline xfs_dir2_inou_t *xfs_dir2_sf_inumberp(xfs_dir2_sf_entry_t *sfep) -{ - return (xfs_dir2_inou_t *)&(sfep)->name[(sfep)->namelen]; -} - -static inline xfs_intino_t -xfs_dir2_sf_get_inumber(xfs_dir2_sf_t *sfp, xfs_dir2_inou_t *from) -{ - return ((sfp)->hdr.i8count == 0 ? \ - (xfs_intino_t)XFS_GET_DIR_INO4((from)->i4) : \ - (xfs_intino_t)XFS_GET_DIR_INO8((from)->i8)); -} - -static inline void xfs_dir2_sf_put_inumber(xfs_dir2_sf_t *sfp, xfs_ino_t *from, - xfs_dir2_inou_t *to) -{ - if ((sfp)->hdr.i8count == 0) - XFS_PUT_DIR_INO4(*(from), (to)->i4); - else - XFS_PUT_DIR_INO8(*(from), (to)->i8); -} - static inline xfs_dir2_data_aoff_t xfs_dir2_sf_get_offset(xfs_dir2_sf_entry_t *sfep) { @@ -155,6 +133,9 @@ xfs_dir2_sf_nextentry(xfs_dir2_sf_t *sfp, xfs_dir2_sf_entry_t *sfep) /* * Functions. */ +extern xfs_ino_t xfs_dir2_sf_get_parent_ino(struct xfs_dir2_sf *sfp); +extern xfs_ino_t xfs_dir2_sfe_get_ino(struct xfs_dir2_sf *sfp, + struct xfs_dir2_sf_entry *sfep); extern int xfs_dir2_block_sfsize(struct xfs_inode *dp, struct xfs_dir2_block *block, xfs_dir2_sf_hdr_t *sfhp); diff --git a/fs/xfs/xfs_inum.h b/fs/xfs/xfs_inum.h index b8e4ee4e89a4..b253c0ea5bec 100644 --- a/fs/xfs/xfs_inum.h +++ b/fs/xfs/xfs_inum.h @@ -28,17 +28,6 @@ typedef __uint32_t xfs_agino_t; /* within allocation grp inode number */ -/* - * Useful inode bits for this kernel. - * Used in some places where having 64-bits in the 32-bit kernels - * costs too much. - */ -#if XFS_BIG_INUMS -typedef xfs_ino_t xfs_intino_t; -#else -typedef __uint32_t xfs_intino_t; -#endif - #define NULLFSINO ((xfs_ino_t)-1) #define NULLAGINO ((xfs_agino_t)-1) -- cgit v1.2.3 From ac8ba50f6be4a3ab49a2ad2cb676422af4ebaf8a Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 8 Jul 2011 14:35:13 +0200 Subject: xfs: kill struct xfs_dir2_sf The list field of it is never cactually used, so all uses can simply be replaced with the xfs_dir2_sf_hdr_t type that it has as first member. Signed-off-by: Christoph Hellwig Reviewed-by: Alex Elder Reviewed-by: Dave Chinner --- fs/xfs/xfs_dir2.c | 6 +- fs/xfs/xfs_dir2_block.c | 42 ++++++----- fs/xfs/xfs_dir2_sf.c | 188 ++++++++++++++++++++++++------------------------ fs/xfs/xfs_dir2_sf.h | 47 ++++++------ 4 files changed, 141 insertions(+), 142 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_dir2.c b/fs/xfs/xfs_dir2.c index dba7a71cedf3..0dee57812f45 100644 --- a/fs/xfs/xfs_dir2.c +++ b/fs/xfs/xfs_dir2.c @@ -122,15 +122,15 @@ int xfs_dir_isempty( xfs_inode_t *dp) { - xfs_dir2_sf_t *sfp; + xfs_dir2_sf_hdr_t *sfp; ASSERT((dp->i_d.di_mode & S_IFMT) == S_IFDIR); if (dp->i_d.di_size == 0) /* might happen during shutdown. */ return 1; if (dp->i_d.di_size > XFS_IFORK_DSIZE(dp)) return 0; - sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data; - return !sfp->hdr.count; + sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data; + return !sfp->count; } /* diff --git a/fs/xfs/xfs_dir2_block.c b/fs/xfs/xfs_dir2_block.c index 8ab99888ad44..c66f194fc2a8 100644 --- a/fs/xfs/xfs_dir2_block.c +++ b/fs/xfs/xfs_dir2_block.c @@ -1028,8 +1028,6 @@ xfs_dir2_sf_to_block( xfs_dir2_leaf_entry_t *blp; /* block leaf entries */ xfs_dabuf_t *bp; /* block buffer */ xfs_dir2_block_tail_t *btp; /* block tail pointer */ - char *buf; /* sf buffer */ - int buf_len; xfs_dir2_data_entry_t *dep; /* data entry pointer */ xfs_inode_t *dp; /* incore directory inode */ int dummy; /* trash */ @@ -1043,7 +1041,8 @@ xfs_dir2_sf_to_block( int newoffset; /* offset from current entry */ int offset; /* target block offset */ xfs_dir2_sf_entry_t *sfep; /* sf entry pointer */ - xfs_dir2_sf_t *sfp; /* shortform structure */ + xfs_dir2_sf_hdr_t *oldsfp; /* old shortform header */ + xfs_dir2_sf_hdr_t *sfp; /* shortform header */ __be16 *tagp; /* end of data entry */ xfs_trans_t *tp; /* transaction pointer */ struct xfs_name name; @@ -1061,32 +1060,30 @@ xfs_dir2_sf_to_block( ASSERT(XFS_FORCED_SHUTDOWN(mp)); return XFS_ERROR(EIO); } + + oldsfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data; + ASSERT(dp->i_df.if_bytes == dp->i_d.di_size); ASSERT(dp->i_df.if_u1.if_data != NULL); - sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data; - ASSERT(dp->i_d.di_size >= xfs_dir2_sf_hdr_size(sfp->hdr.i8count)); + ASSERT(dp->i_d.di_size >= xfs_dir2_sf_hdr_size(oldsfp->i8count)); + /* - * Copy the directory into the stack buffer. + * Copy the directory into a temporary buffer. * Then pitch the incore inode data so we can make extents. */ + sfp = kmem_alloc(dp->i_df.if_bytes, KM_SLEEP); + memcpy(sfp, oldsfp, dp->i_df.if_bytes); - buf_len = dp->i_df.if_bytes; - buf = kmem_alloc(buf_len, KM_SLEEP); - - memcpy(buf, sfp, buf_len); - xfs_idata_realloc(dp, -buf_len, XFS_DATA_FORK); + xfs_idata_realloc(dp, -dp->i_df.if_bytes, XFS_DATA_FORK); dp->i_d.di_size = 0; xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE); - /* - * Reset pointer - old sfp is gone. - */ - sfp = (xfs_dir2_sf_t *)buf; + /* * Add block 0 to the inode. */ error = xfs_dir2_grow_inode(args, XFS_DIR2_DATA_SPACE, &blkno); if (error) { - kmem_free(buf); + kmem_free(sfp); return error; } /* @@ -1094,7 +1091,7 @@ xfs_dir2_sf_to_block( */ error = xfs_dir2_data_init(args, blkno, &bp); if (error) { - kmem_free(buf); + kmem_free(sfp); return error; } block = bp->data; @@ -1103,7 +1100,7 @@ xfs_dir2_sf_to_block( * Compute size of block "tail" area. */ i = (uint)sizeof(*btp) + - (sfp->hdr.count + 2) * (uint)sizeof(xfs_dir2_leaf_entry_t); + (sfp->count + 2) * (uint)sizeof(xfs_dir2_leaf_entry_t); /* * The whole thing is initialized to free by the init routine. * Say we're using the leaf and tail area. @@ -1117,7 +1114,7 @@ xfs_dir2_sf_to_block( * Fill in the tail. */ btp = xfs_dir2_block_tail_p(mp, block); - btp->count = cpu_to_be32(sfp->hdr.count + 2); /* ., .. */ + btp->count = cpu_to_be32(sfp->count + 2); /* ., .. */ btp->stale = 0; blp = xfs_dir2_block_leaf_p(btp); endoffset = (uint)((char *)blp - (char *)block); @@ -1159,7 +1156,8 @@ xfs_dir2_sf_to_block( /* * Loop over existing entries, stuff them in. */ - if ((i = 0) == sfp->hdr.count) + i = 0; + if (!sfp->count) sfep = NULL; else sfep = xfs_dir2_sf_firstentry(sfp); @@ -1208,13 +1206,13 @@ xfs_dir2_sf_to_block( blp[2 + i].address = cpu_to_be32(xfs_dir2_byte_to_dataptr(mp, (char *)dep - (char *)block)); offset = (int)((char *)(tagp + 1) - (char *)block); - if (++i == sfp->hdr.count) + if (++i == sfp->count) sfep = NULL; else sfep = xfs_dir2_sf_nextentry(sfp, sfep); } /* Done with the temporary buffer */ - kmem_free(buf); + kmem_free(sfp); /* * Sort the leaf entries by hash value. */ diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c index 4c3ecf2c7397..806b348f496f 100644 --- a/fs/xfs/xfs_dir2_sf.c +++ b/fs/xfs/xfs_dir2_sf.c @@ -67,10 +67,10 @@ static void xfs_dir2_sf_toino8(xfs_da_args_t *args); */ static xfs_ino_t xfs_dir2_sf_get_ino( - struct xfs_dir2_sf *sfp, + struct xfs_dir2_sf_hdr *hdr, xfs_dir2_inou_t *from) { - if (sfp->hdr.i8count) + if (hdr->i8count) return XFS_GET_DIR_INO8(from->i8); else return XFS_GET_DIR_INO4(from->i4); @@ -78,11 +78,11 @@ xfs_dir2_sf_get_ino( static void xfs_dir2_sf_put_ino( - struct xfs_dir2_sf *sfp, + struct xfs_dir2_sf_hdr *hdr, xfs_dir2_inou_t *to, xfs_ino_t ino) { - if (sfp->hdr.i8count) + if (hdr->i8count) XFS_PUT_DIR_INO8(ino, to->i8); else XFS_PUT_DIR_INO4(ino, to->i4); @@ -90,17 +90,17 @@ xfs_dir2_sf_put_ino( xfs_ino_t xfs_dir2_sf_get_parent_ino( - struct xfs_dir2_sf *sfp) + struct xfs_dir2_sf_hdr *hdr) { - return xfs_dir2_sf_get_ino(sfp, &sfp->hdr.parent); + return xfs_dir2_sf_get_ino(hdr, &hdr->parent); } static void xfs_dir2_sf_put_parent_ino( - struct xfs_dir2_sf *sfp, + struct xfs_dir2_sf_hdr *hdr, xfs_ino_t ino) { - xfs_dir2_sf_put_ino(sfp, &sfp->hdr.parent, ino); + xfs_dir2_sf_put_ino(hdr, &hdr->parent, ino); } /* @@ -117,19 +117,19 @@ xfs_dir2_sfe_inop( xfs_ino_t xfs_dir2_sfe_get_ino( - struct xfs_dir2_sf *sfp, + struct xfs_dir2_sf_hdr *hdr, struct xfs_dir2_sf_entry *sfep) { - return xfs_dir2_sf_get_ino(sfp, xfs_dir2_sfe_inop(sfep)); + return xfs_dir2_sf_get_ino(hdr, xfs_dir2_sfe_inop(sfep)); } static void xfs_dir2_sfe_put_ino( - struct xfs_dir2_sf *sfp, + struct xfs_dir2_sf_hdr *hdr, struct xfs_dir2_sf_entry *sfep, xfs_ino_t ino) { - xfs_dir2_sf_put_ino(sfp, xfs_dir2_sfe_inop(sfep), ino); + xfs_dir2_sf_put_ino(hdr, xfs_dir2_sfe_inop(sfep), ino); } /* @@ -211,7 +211,7 @@ xfs_dir2_block_sfsize( */ sfhp->count = count; sfhp->i8count = i8count; - xfs_dir2_sf_put_parent_ino((xfs_dir2_sf_t *)sfhp, parent); + xfs_dir2_sf_put_parent_ino(sfhp, parent); return size; } @@ -237,7 +237,7 @@ xfs_dir2_block_to_sf( xfs_mount_t *mp; /* filesystem mount point */ char *ptr; /* current data pointer */ xfs_dir2_sf_entry_t *sfep; /* shortform entry */ - xfs_dir2_sf_t *sfp; /* shortform structure */ + xfs_dir2_sf_hdr_t *sfp; /* shortform directory header */ trace_xfs_dir2_block_to_sf(args); @@ -270,7 +270,7 @@ xfs_dir2_block_to_sf( /* * Copy the header into the newly allocate local space. */ - sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data; + sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data; memcpy(sfp, sfhp, xfs_dir2_sf_hdr_size(sfhp->i8count)); dp->i_d.di_size = size; /* @@ -349,7 +349,7 @@ xfs_dir2_sf_addname( xfs_dir2_data_aoff_t offset = 0; /* offset for new entry */ int old_isize; /* di_size before adding name */ int pick; /* which algorithm to use */ - xfs_dir2_sf_t *sfp; /* shortform structure */ + xfs_dir2_sf_hdr_t *sfp; /* shortform structure */ xfs_dir2_sf_entry_t *sfep = NULL; /* shortform entry */ trace_xfs_dir2_sf_addname(args); @@ -366,8 +366,8 @@ xfs_dir2_sf_addname( } ASSERT(dp->i_df.if_bytes == dp->i_d.di_size); ASSERT(dp->i_df.if_u1.if_data != NULL); - sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data; - ASSERT(dp->i_d.di_size >= xfs_dir2_sf_hdr_size(sfp->hdr.i8count)); + sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data; + ASSERT(dp->i_d.di_size >= xfs_dir2_sf_hdr_size(sfp->i8count)); /* * Compute entry (and change in) size. */ @@ -378,7 +378,7 @@ xfs_dir2_sf_addname( /* * Do we have to change to 8 byte inodes? */ - if (args->inumber > XFS_DIR2_MAX_SHORT_INUM && sfp->hdr.i8count == 0) { + if (args->inumber > XFS_DIR2_MAX_SHORT_INUM && sfp->i8count == 0) { /* * Yes, adjust the entry size and the total size. */ @@ -386,7 +386,7 @@ xfs_dir2_sf_addname( (uint)sizeof(xfs_dir2_ino8_t) - (uint)sizeof(xfs_dir2_ino4_t); incr_isize += - (sfp->hdr.count + 2) * + (sfp->count + 2) * ((uint)sizeof(xfs_dir2_ino8_t) - (uint)sizeof(xfs_dir2_ino4_t)); objchange = 1; @@ -456,11 +456,11 @@ xfs_dir2_sf_addname_easy( { int byteoff; /* byte offset in sf dir */ xfs_inode_t *dp; /* incore directory inode */ - xfs_dir2_sf_t *sfp; /* shortform structure */ + xfs_dir2_sf_hdr_t *sfp; /* shortform structure */ dp = args->dp; - sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data; + sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data; byteoff = (int)((char *)sfep - (char *)sfp); /* * Grow the in-inode space. @@ -470,7 +470,7 @@ xfs_dir2_sf_addname_easy( /* * Need to set up again due to realloc of the inode data. */ - sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data; + sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data; sfep = (xfs_dir2_sf_entry_t *)((char *)sfp + byteoff); /* * Fill in the new entry. @@ -482,10 +482,10 @@ xfs_dir2_sf_addname_easy( /* * Update the header and inode. */ - sfp->hdr.count++; + sfp->count++; #if XFS_BIG_INUMS if (args->inumber > XFS_DIR2_MAX_SHORT_INUM) - sfp->hdr.i8count++; + sfp->i8count++; #endif dp->i_d.di_size = new_isize; xfs_dir2_sf_check(args); @@ -515,19 +515,19 @@ xfs_dir2_sf_addname_hard( xfs_dir2_data_aoff_t offset; /* current offset value */ int old_isize; /* previous di_size */ xfs_dir2_sf_entry_t *oldsfep; /* entry in original dir */ - xfs_dir2_sf_t *oldsfp; /* original shortform dir */ + xfs_dir2_sf_hdr_t *oldsfp; /* original shortform dir */ xfs_dir2_sf_entry_t *sfep; /* entry in new dir */ - xfs_dir2_sf_t *sfp; /* new shortform dir */ + xfs_dir2_sf_hdr_t *sfp; /* new shortform dir */ /* * Copy the old directory to the stack buffer. */ dp = args->dp; - sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data; + sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data; old_isize = (int)dp->i_d.di_size; buf = kmem_alloc(old_isize, KM_SLEEP); - oldsfp = (xfs_dir2_sf_t *)buf; + oldsfp = (xfs_dir2_sf_hdr_t *)buf; memcpy(oldsfp, sfp, old_isize); /* * Loop over the old directory finding the place we're going @@ -556,7 +556,7 @@ xfs_dir2_sf_addname_hard( /* * Reset the pointer since the buffer was reallocated. */ - sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data; + sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data; /* * Copy the first part of the directory, including the header. */ @@ -570,10 +570,10 @@ xfs_dir2_sf_addname_hard( xfs_dir2_sf_put_offset(sfep, offset); memcpy(sfep->name, args->name, sfep->namelen); xfs_dir2_sfe_put_ino(sfp, sfep, args->inumber); - sfp->hdr.count++; + sfp->count++; #if XFS_BIG_INUMS if (args->inumber > XFS_DIR2_MAX_SHORT_INUM && !objchange) - sfp->hdr.i8count++; + sfp->i8count++; #endif /* * If there's more left to copy, do that. @@ -607,14 +607,14 @@ xfs_dir2_sf_addname_pick( xfs_mount_t *mp; /* filesystem mount point */ xfs_dir2_data_aoff_t offset; /* data block offset */ xfs_dir2_sf_entry_t *sfep; /* shortform entry */ - xfs_dir2_sf_t *sfp; /* shortform structure */ + xfs_dir2_sf_hdr_t *sfp; /* shortform structure */ int size; /* entry's data size */ int used; /* data bytes used */ dp = args->dp; mp = dp->i_mount; - sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data; + sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data; size = xfs_dir2_data_entsize(args->namelen); offset = XFS_DIR2_DATA_FIRST_OFFSET; sfep = xfs_dir2_sf_firstentry(sfp); @@ -624,7 +624,7 @@ xfs_dir2_sf_addname_pick( * Keep track of data offset and whether we've seen a place * to insert the new entry. */ - for (i = 0; i < sfp->hdr.count; i++) { + for (i = 0; i < sfp->count; i++) { if (!holefit) holefit = offset + size <= xfs_dir2_sf_get_offset(sfep); offset = xfs_dir2_sf_get_offset(sfep) + @@ -636,7 +636,7 @@ xfs_dir2_sf_addname_pick( * was a data block (block form directory). */ used = offset + - (sfp->hdr.count + 3) * (uint)sizeof(xfs_dir2_leaf_entry_t) + + (sfp->count + 3) * (uint)sizeof(xfs_dir2_leaf_entry_t) + (uint)sizeof(xfs_dir2_block_tail_t); /* * If it won't fit in a block form then we can't insert it, @@ -682,17 +682,17 @@ xfs_dir2_sf_check( xfs_ino_t ino; /* entry inode number */ int offset; /* data offset */ xfs_dir2_sf_entry_t *sfep; /* shortform dir entry */ - xfs_dir2_sf_t *sfp; /* shortform structure */ + xfs_dir2_sf_hdr_t *sfp; /* shortform structure */ dp = args->dp; - sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data; + sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data; offset = XFS_DIR2_DATA_FIRST_OFFSET; ino = xfs_dir2_sf_get_parent_ino(sfp); i8count = ino > XFS_DIR2_MAX_SHORT_INUM; for (i = 0, sfep = xfs_dir2_sf_firstentry(sfp); - i < sfp->hdr.count; + i < sfp->count; i++, sfep = xfs_dir2_sf_nextentry(sfp, sfep)) { ASSERT(xfs_dir2_sf_get_offset(sfep) >= offset); ino = xfs_dir2_sfe_get_ino(sfp, sfep); @@ -701,11 +701,11 @@ xfs_dir2_sf_check( xfs_dir2_sf_get_offset(sfep) + xfs_dir2_data_entsize(sfep->namelen); } - ASSERT(i8count == sfp->hdr.i8count); + ASSERT(i8count == sfp->i8count); ASSERT(XFS_BIG_INUMS || i8count == 0); ASSERT((char *)sfep - (char *)sfp == dp->i_d.di_size); ASSERT(offset + - (sfp->hdr.count + 2) * (uint)sizeof(xfs_dir2_leaf_entry_t) + + (sfp->count + 2) * (uint)sizeof(xfs_dir2_leaf_entry_t) + (uint)sizeof(xfs_dir2_block_tail_t) <= dp->i_mount->m_dirblksize); } @@ -721,7 +721,7 @@ xfs_dir2_sf_create( { xfs_inode_t *dp; /* incore directory inode */ int i8count; /* parent inode is an 8-byte number */ - xfs_dir2_sf_t *sfp; /* shortform structure */ + xfs_dir2_sf_hdr_t *sfp; /* shortform structure */ int size; /* directory size */ trace_xfs_dir2_sf_create(args); @@ -751,13 +751,13 @@ xfs_dir2_sf_create( /* * Fill in the header, */ - sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data; - sfp->hdr.i8count = i8count; + sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data; + sfp->i8count = i8count; /* * Now can put in the inode number, since i8count is set. */ xfs_dir2_sf_put_parent_ino(sfp, pino); - sfp->hdr.count = 0; + sfp->count = 0; dp->i_d.di_size = size; xfs_dir2_sf_check(args); xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_DDATA); @@ -775,7 +775,7 @@ xfs_dir2_sf_getdents( xfs_mount_t *mp; /* filesystem mount point */ xfs_dir2_dataptr_t off; /* current entry's offset */ xfs_dir2_sf_entry_t *sfep; /* shortform directory entry */ - xfs_dir2_sf_t *sfp; /* shortform structure */ + xfs_dir2_sf_hdr_t *sfp; /* shortform structure */ xfs_dir2_dataptr_t dot_offset; xfs_dir2_dataptr_t dotdot_offset; xfs_ino_t ino; @@ -794,9 +794,9 @@ xfs_dir2_sf_getdents( ASSERT(dp->i_df.if_bytes == dp->i_d.di_size); ASSERT(dp->i_df.if_u1.if_data != NULL); - sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data; + sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data; - ASSERT(dp->i_d.di_size >= xfs_dir2_sf_hdr_size(sfp->hdr.i8count)); + ASSERT(dp->i_d.di_size >= xfs_dir2_sf_hdr_size(sfp->i8count)); /* * If the block number in the offset is out of range, we're done. @@ -840,7 +840,7 @@ xfs_dir2_sf_getdents( * Loop while there are more entries and put'ing works. */ sfep = xfs_dir2_sf_firstentry(sfp); - for (i = 0; i < sfp->hdr.count; i++) { + for (i = 0; i < sfp->count; i++) { off = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk, xfs_dir2_sf_get_offset(sfep)); @@ -875,7 +875,7 @@ xfs_dir2_sf_lookup( int i; /* entry index */ int error; xfs_dir2_sf_entry_t *sfep; /* shortform directory entry */ - xfs_dir2_sf_t *sfp; /* shortform structure */ + xfs_dir2_sf_hdr_t *sfp; /* shortform structure */ enum xfs_dacmp cmp; /* comparison result */ xfs_dir2_sf_entry_t *ci_sfep; /* case-insens. entry */ @@ -894,8 +894,8 @@ xfs_dir2_sf_lookup( } ASSERT(dp->i_df.if_bytes == dp->i_d.di_size); ASSERT(dp->i_df.if_u1.if_data != NULL); - sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data; - ASSERT(dp->i_d.di_size >= xfs_dir2_sf_hdr_size(sfp->hdr.i8count)); + sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data; + ASSERT(dp->i_d.di_size >= xfs_dir2_sf_hdr_size(sfp->i8count)); /* * Special case for . */ @@ -917,7 +917,7 @@ xfs_dir2_sf_lookup( * Loop over all the entries trying to match ours. */ ci_sfep = NULL; - for (i = 0, sfep = xfs_dir2_sf_firstentry(sfp); i < sfp->hdr.count; + for (i = 0, sfep = xfs_dir2_sf_firstentry(sfp); i < sfp->count; i++, sfep = xfs_dir2_sf_nextentry(sfp, sfep)) { /* * Compare name and if it's an exact match, return the inode @@ -960,7 +960,7 @@ xfs_dir2_sf_removename( int newsize; /* new inode size */ int oldsize; /* old inode size */ xfs_dir2_sf_entry_t *sfep; /* shortform directory entry */ - xfs_dir2_sf_t *sfp; /* shortform structure */ + xfs_dir2_sf_hdr_t *sfp; /* shortform structure */ trace_xfs_dir2_sf_removename(args); @@ -977,13 +977,13 @@ xfs_dir2_sf_removename( } ASSERT(dp->i_df.if_bytes == oldsize); ASSERT(dp->i_df.if_u1.if_data != NULL); - sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data; - ASSERT(oldsize >= xfs_dir2_sf_hdr_size(sfp->hdr.i8count)); + sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data; + ASSERT(oldsize >= xfs_dir2_sf_hdr_size(sfp->i8count)); /* * Loop over the old directory entries. * Find the one we're deleting. */ - for (i = 0, sfep = xfs_dir2_sf_firstentry(sfp); i < sfp->hdr.count; + for (i = 0, sfep = xfs_dir2_sf_firstentry(sfp); i < sfp->count; i++, sfep = xfs_dir2_sf_nextentry(sfp, sfep)) { if (xfs_da_compname(args, sfep->name, sfep->namelen) == XFS_CMP_EXACT) { @@ -995,7 +995,7 @@ xfs_dir2_sf_removename( /* * Didn't find it. */ - if (i == sfp->hdr.count) + if (i == sfp->count) return XFS_ERROR(ENOENT); /* * Calculate sizes. @@ -1012,22 +1012,22 @@ xfs_dir2_sf_removename( /* * Fix up the header and file size. */ - sfp->hdr.count--; + sfp->count--; dp->i_d.di_size = newsize; /* * Reallocate, making it smaller. */ xfs_idata_realloc(dp, newsize - oldsize, XFS_DATA_FORK); - sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data; + sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data; #if XFS_BIG_INUMS /* * Are we changing inode number size? */ if (args->inumber > XFS_DIR2_MAX_SHORT_INUM) { - if (sfp->hdr.i8count == 1) + if (sfp->i8count == 1) xfs_dir2_sf_toino4(args); else - sfp->hdr.i8count--; + sfp->i8count--; } #endif xfs_dir2_sf_check(args); @@ -1051,7 +1051,7 @@ xfs_dir2_sf_replace( int i8elevated; /* sf_toino8 set i8count=1 */ #endif xfs_dir2_sf_entry_t *sfep; /* shortform directory entry */ - xfs_dir2_sf_t *sfp; /* shortform structure */ + xfs_dir2_sf_hdr_t *sfp; /* shortform structure */ trace_xfs_dir2_sf_replace(args); @@ -1067,19 +1067,19 @@ xfs_dir2_sf_replace( } ASSERT(dp->i_df.if_bytes == dp->i_d.di_size); ASSERT(dp->i_df.if_u1.if_data != NULL); - sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data; - ASSERT(dp->i_d.di_size >= xfs_dir2_sf_hdr_size(sfp->hdr.i8count)); + sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data; + ASSERT(dp->i_d.di_size >= xfs_dir2_sf_hdr_size(sfp->i8count)); #if XFS_BIG_INUMS /* * New inode number is large, and need to convert to 8-byte inodes. */ - if (args->inumber > XFS_DIR2_MAX_SHORT_INUM && sfp->hdr.i8count == 0) { + if (args->inumber > XFS_DIR2_MAX_SHORT_INUM && sfp->i8count == 0) { int error; /* error return value */ int newsize; /* new inode size */ newsize = dp->i_df.if_bytes + - (sfp->hdr.count + 1) * + (sfp->count + 1) * ((uint)sizeof(xfs_dir2_ino8_t) - (uint)sizeof(xfs_dir2_ino4_t)); /* @@ -1097,7 +1097,7 @@ xfs_dir2_sf_replace( */ xfs_dir2_sf_toino8(args); i8elevated = 1; - sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data; + sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data; } else i8elevated = 0; #endif @@ -1118,7 +1118,7 @@ xfs_dir2_sf_replace( */ else { for (i = 0, sfep = xfs_dir2_sf_firstentry(sfp); - i < sfp->hdr.count; + i < sfp->count; i++, sfep = xfs_dir2_sf_nextentry(sfp, sfep)) { if (xfs_da_compname(args, sfep->name, sfep->namelen) == XFS_CMP_EXACT) { @@ -1133,7 +1133,7 @@ xfs_dir2_sf_replace( /* * Didn't find it. */ - if (i == sfp->hdr.count) { + if (i == sfp->count) { ASSERT(args->op_flags & XFS_DA_OP_OKNOENT); #if XFS_BIG_INUMS if (i8elevated) @@ -1151,10 +1151,10 @@ xfs_dir2_sf_replace( /* * And the old count was one, so need to convert to small. */ - if (sfp->hdr.i8count == 1) + if (sfp->i8count == 1) xfs_dir2_sf_toino4(args); else - sfp->hdr.i8count--; + sfp->i8count--; } /* * See if the old number was small, the new number is large. @@ -1165,9 +1165,9 @@ xfs_dir2_sf_replace( * add to the i8count unless we just converted to 8-byte * inodes (which does an implied i8count = 1) */ - ASSERT(sfp->hdr.i8count != 0); + ASSERT(sfp->i8count != 0); if (!i8elevated) - sfp->hdr.i8count++; + sfp->i8count++; } #endif xfs_dir2_sf_check(args); @@ -1189,10 +1189,10 @@ xfs_dir2_sf_toino4( int i; /* entry index */ int newsize; /* new inode size */ xfs_dir2_sf_entry_t *oldsfep; /* old sf entry */ - xfs_dir2_sf_t *oldsfp; /* old sf directory */ + xfs_dir2_sf_hdr_t *oldsfp; /* old sf directory */ int oldsize; /* old inode size */ xfs_dir2_sf_entry_t *sfep; /* new sf entry */ - xfs_dir2_sf_t *sfp; /* new sf directory */ + xfs_dir2_sf_hdr_t *sfp; /* new sf directory */ trace_xfs_dir2_sf_toino4(args); @@ -1205,35 +1205,35 @@ xfs_dir2_sf_toino4( */ oldsize = dp->i_df.if_bytes; buf = kmem_alloc(oldsize, KM_SLEEP); - oldsfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data; - ASSERT(oldsfp->hdr.i8count == 1); + oldsfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data; + ASSERT(oldsfp->i8count == 1); memcpy(buf, oldsfp, oldsize); /* * Compute the new inode size. */ newsize = oldsize - - (oldsfp->hdr.count + 1) * + (oldsfp->count + 1) * ((uint)sizeof(xfs_dir2_ino8_t) - (uint)sizeof(xfs_dir2_ino4_t)); xfs_idata_realloc(dp, -oldsize, XFS_DATA_FORK); xfs_idata_realloc(dp, newsize, XFS_DATA_FORK); /* * Reset our pointers, the data has moved. */ - oldsfp = (xfs_dir2_sf_t *)buf; - sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data; + oldsfp = (xfs_dir2_sf_hdr_t *)buf; + sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data; /* * Fill in the new header. */ - sfp->hdr.count = oldsfp->hdr.count; - sfp->hdr.i8count = 0; + sfp->count = oldsfp->count; + sfp->i8count = 0; xfs_dir2_sf_put_parent_ino(sfp, xfs_dir2_sf_get_parent_ino(oldsfp)); /* * Copy the entries field by field. */ for (i = 0, sfep = xfs_dir2_sf_firstentry(sfp), oldsfep = xfs_dir2_sf_firstentry(oldsfp); - i < sfp->hdr.count; + i < sfp->count; i++, sfep = xfs_dir2_sf_nextentry(sfp, sfep), oldsfep = xfs_dir2_sf_nextentry(oldsfp, oldsfep)) { sfep->namelen = oldsfep->namelen; @@ -1264,10 +1264,10 @@ xfs_dir2_sf_toino8( int i; /* entry index */ int newsize; /* new inode size */ xfs_dir2_sf_entry_t *oldsfep; /* old sf entry */ - xfs_dir2_sf_t *oldsfp; /* old sf directory */ + xfs_dir2_sf_hdr_t *oldsfp; /* old sf directory */ int oldsize; /* old inode size */ xfs_dir2_sf_entry_t *sfep; /* new sf entry */ - xfs_dir2_sf_t *sfp; /* new sf directory */ + xfs_dir2_sf_hdr_t *sfp; /* new sf directory */ trace_xfs_dir2_sf_toino8(args); @@ -1280,35 +1280,35 @@ xfs_dir2_sf_toino8( */ oldsize = dp->i_df.if_bytes; buf = kmem_alloc(oldsize, KM_SLEEP); - oldsfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data; - ASSERT(oldsfp->hdr.i8count == 0); + oldsfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data; + ASSERT(oldsfp->i8count == 0); memcpy(buf, oldsfp, oldsize); /* * Compute the new inode size. */ newsize = oldsize + - (oldsfp->hdr.count + 1) * + (oldsfp->count + 1) * ((uint)sizeof(xfs_dir2_ino8_t) - (uint)sizeof(xfs_dir2_ino4_t)); xfs_idata_realloc(dp, -oldsize, XFS_DATA_FORK); xfs_idata_realloc(dp, newsize, XFS_DATA_FORK); /* * Reset our pointers, the data has moved. */ - oldsfp = (xfs_dir2_sf_t *)buf; - sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data; + oldsfp = (xfs_dir2_sf_hdr_t *)buf; + sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data; /* * Fill in the new header. */ - sfp->hdr.count = oldsfp->hdr.count; - sfp->hdr.i8count = 1; + sfp->count = oldsfp->count; + sfp->i8count = 1; xfs_dir2_sf_put_parent_ino(sfp, xfs_dir2_sf_get_parent_ino(oldsfp)); /* * Copy the entries field by field. */ for (i = 0, sfep = xfs_dir2_sf_firstentry(sfp), oldsfep = xfs_dir2_sf_firstentry(oldsfp); - i < sfp->hdr.count; + i < sfp->count; i++, sfep = xfs_dir2_sf_nextentry(sfp, sfep), oldsfep = xfs_dir2_sf_nextentry(oldsfp, oldsfep)) { sfep->namelen = oldsfep->namelen; diff --git a/fs/xfs/xfs_dir2_sf.h b/fs/xfs/xfs_dir2_sf.h index 208d2c3c1122..c1ab2e7c66ae 100644 --- a/fs/xfs/xfs_dir2_sf.h +++ b/fs/xfs/xfs_dir2_sf.h @@ -21,8 +21,12 @@ /* * Directory layout when stored internal to an inode. * - * Small directories are packed as tightly as possible so as to - * fit into the literal area of the inode. + * Small directories are packed as tightly as possible so as to fit into the + * literal area of the inode. They consist of a single xfs_dir2_sf_hdr header + * followed by zero or more xfs_dir2_sf_entry structures. Due the different + * inode number storage size and the variable length name field in + * the xfs_dir2_sf_entry all these structure are variable length, and the + * accessors in this file should be used to iterate over them. */ struct uio; @@ -61,9 +65,9 @@ typedef struct { __uint8_t i[2]; } __arch_pack xfs_dir2_sf_off_t; * The parent directory has a dedicated field, and the self-pointer must * be calculated on the fly. * - * Entries are packed toward the top as tightly as possible. The header - * and the elements must be memcpy'd out into a work area to get correct - * alignment for the inode number fields. + * Entries are packed toward the top as tightly as possible, and thus may + * be misaligned. Care needs to be taken to access them through special + * helpers or copy them into aligned variables first. */ typedef struct xfs_dir2_sf_hdr { __uint8_t count; /* count of entries */ @@ -78,11 +82,6 @@ typedef struct xfs_dir2_sf_entry { xfs_dir2_inou_t inumber; /* inode number, var. offset */ } __arch_pack xfs_dir2_sf_entry_t; -typedef struct xfs_dir2_sf { - xfs_dir2_sf_hdr_t hdr; /* shortform header */ - xfs_dir2_sf_entry_t list[1]; /* shortform entries */ -} xfs_dir2_sf_t; - static inline int xfs_dir2_sf_hdr_size(int i8count) { return ((uint)sizeof(xfs_dir2_sf_hdr_t) - \ @@ -102,39 +101,41 @@ xfs_dir2_sf_put_offset(xfs_dir2_sf_entry_t *sfep, xfs_dir2_data_aoff_t off) INT_SET_UNALIGNED_16_BE(&(sfep)->offset.i, off); } -static inline int xfs_dir2_sf_entsize_byname(xfs_dir2_sf_t *sfp, int len) +static inline int xfs_dir2_sf_entsize_byname(xfs_dir2_sf_hdr_t *sfp, int len) { return ((uint)sizeof(xfs_dir2_sf_entry_t) - 1 + (len) - \ - ((sfp)->hdr.i8count == 0) * \ + ((sfp)->i8count == 0) * \ ((uint)sizeof(xfs_dir2_ino8_t) - (uint)sizeof(xfs_dir2_ino4_t))); } static inline int -xfs_dir2_sf_entsize_byentry(xfs_dir2_sf_t *sfp, xfs_dir2_sf_entry_t *sfep) +xfs_dir2_sf_entsize_byentry(xfs_dir2_sf_hdr_t *sfp, xfs_dir2_sf_entry_t *sfep) { return ((uint)sizeof(xfs_dir2_sf_entry_t) - 1 + (sfep)->namelen - \ - ((sfp)->hdr.i8count == 0) * \ + ((sfp)->i8count == 0) * \ ((uint)sizeof(xfs_dir2_ino8_t) - (uint)sizeof(xfs_dir2_ino4_t))); } -static inline xfs_dir2_sf_entry_t *xfs_dir2_sf_firstentry(xfs_dir2_sf_t *sfp) +static inline struct xfs_dir2_sf_entry * +xfs_dir2_sf_firstentry(struct xfs_dir2_sf_hdr *hdr) { - return ((xfs_dir2_sf_entry_t *) \ - ((char *)(sfp) + xfs_dir2_sf_hdr_size(sfp->hdr.i8count))); + return (struct xfs_dir2_sf_entry *) + ((char *)hdr + xfs_dir2_sf_hdr_size(hdr->i8count)); } -static inline xfs_dir2_sf_entry_t * -xfs_dir2_sf_nextentry(xfs_dir2_sf_t *sfp, xfs_dir2_sf_entry_t *sfep) +static inline struct xfs_dir2_sf_entry * +xfs_dir2_sf_nextentry(struct xfs_dir2_sf_hdr *hdr, + struct xfs_dir2_sf_entry *sfep) { - return ((xfs_dir2_sf_entry_t *) \ - ((char *)(sfep) + xfs_dir2_sf_entsize_byentry(sfp,sfep))); + return (struct xfs_dir2_sf_entry *) + ((char *)sfep + xfs_dir2_sf_entsize_byentry(hdr, sfep)); } /* * Functions. */ -extern xfs_ino_t xfs_dir2_sf_get_parent_ino(struct xfs_dir2_sf *sfp); -extern xfs_ino_t xfs_dir2_sfe_get_ino(struct xfs_dir2_sf *sfp, +extern xfs_ino_t xfs_dir2_sf_get_parent_ino(struct xfs_dir2_sf_hdr *sfp); +extern xfs_ino_t xfs_dir2_sfe_get_ino(struct xfs_dir2_sf_hdr *sfp, struct xfs_dir2_sf_entry *sfep); extern int xfs_dir2_block_sfsize(struct xfs_inode *dp, struct xfs_dir2_block *block, -- cgit v1.2.3 From 78f70cd7b7e8fd44dc89af4f72fb110d865338a1 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 8 Jul 2011 14:35:19 +0200 Subject: xfs: cleanup the definition of struct xfs_dir2_sf_entry Remove the inumber member which is at a variable offset after the actual name, and make name a real variable sized C99 array instead of the incorrect one-sized array which confuses (not only) gcc. Based on this clean up the helpers to calculate the entry size. Signed-off-by: Christoph Hellwig Reviewed-by: Alex Elder Reviewed-by: Dave Chinner --- fs/xfs/xfs_dir2_sf.c | 6 +++--- fs/xfs/xfs_dir2_sf.h | 28 +++++++++++++--------------- 2 files changed, 16 insertions(+), 18 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c index 806b348f496f..3a1e3f4e3fa3 100644 --- a/fs/xfs/xfs_dir2_sf.c +++ b/fs/xfs/xfs_dir2_sf.c @@ -371,7 +371,7 @@ xfs_dir2_sf_addname( /* * Compute entry (and change in) size. */ - add_entsize = xfs_dir2_sf_entsize_byname(sfp, args->namelen); + add_entsize = xfs_dir2_sf_entsize(sfp, args->namelen); incr_isize = add_entsize; objchange = 0; #if XFS_BIG_INUMS @@ -465,7 +465,7 @@ xfs_dir2_sf_addname_easy( /* * Grow the in-inode space. */ - xfs_idata_realloc(dp, xfs_dir2_sf_entsize_byname(sfp, args->namelen), + xfs_idata_realloc(dp, xfs_dir2_sf_entsize(sfp, args->namelen), XFS_DATA_FORK); /* * Need to set up again due to realloc of the inode data. @@ -1001,7 +1001,7 @@ xfs_dir2_sf_removename( * Calculate sizes. */ byteoff = (int)((char *)sfep - (char *)sfp); - entsize = xfs_dir2_sf_entsize_byname(sfp, args->namelen); + entsize = xfs_dir2_sf_entsize(sfp, args->namelen); newsize = oldsize - entsize; /* * Copy the part if any after the removed entry, sliding it down. diff --git a/fs/xfs/xfs_dir2_sf.h b/fs/xfs/xfs_dir2_sf.h index c1ab2e7c66ae..55560bc80ec0 100644 --- a/fs/xfs/xfs_dir2_sf.h +++ b/fs/xfs/xfs_dir2_sf.h @@ -76,10 +76,13 @@ typedef struct xfs_dir2_sf_hdr { } __arch_pack xfs_dir2_sf_hdr_t; typedef struct xfs_dir2_sf_entry { - __uint8_t namelen; /* actual name length */ + __u8 namelen; /* actual name length */ xfs_dir2_sf_off_t offset; /* saved offset */ - __uint8_t name[1]; /* name, variable size */ - xfs_dir2_inou_t inumber; /* inode number, var. offset */ + __u8 name[]; /* name, variable size */ + /* + * A xfs_dir2_ino8_t or xfs_dir2_ino4_t follows here, at a + * variable offset after the name. + */ } __arch_pack xfs_dir2_sf_entry_t; static inline int xfs_dir2_sf_hdr_size(int i8count) @@ -101,19 +104,14 @@ xfs_dir2_sf_put_offset(xfs_dir2_sf_entry_t *sfep, xfs_dir2_data_aoff_t off) INT_SET_UNALIGNED_16_BE(&(sfep)->offset.i, off); } -static inline int xfs_dir2_sf_entsize_byname(xfs_dir2_sf_hdr_t *sfp, int len) -{ - return ((uint)sizeof(xfs_dir2_sf_entry_t) - 1 + (len) - \ - ((sfp)->i8count == 0) * \ - ((uint)sizeof(xfs_dir2_ino8_t) - (uint)sizeof(xfs_dir2_ino4_t))); -} - static inline int -xfs_dir2_sf_entsize_byentry(xfs_dir2_sf_hdr_t *sfp, xfs_dir2_sf_entry_t *sfep) +xfs_dir2_sf_entsize(struct xfs_dir2_sf_hdr *hdr, int len) { - return ((uint)sizeof(xfs_dir2_sf_entry_t) - 1 + (sfep)->namelen - \ - ((sfp)->i8count == 0) * \ - ((uint)sizeof(xfs_dir2_ino8_t) - (uint)sizeof(xfs_dir2_ino4_t))); + return sizeof(struct xfs_dir2_sf_entry) + /* namelen + offset */ + len + /* name */ + (hdr->i8count ? /* ino */ + sizeof(xfs_dir2_ino8_t) : + sizeof(xfs_dir2_ino4_t)); } static inline struct xfs_dir2_sf_entry * @@ -128,7 +126,7 @@ xfs_dir2_sf_nextentry(struct xfs_dir2_sf_hdr *hdr, struct xfs_dir2_sf_entry *sfep) { return (struct xfs_dir2_sf_entry *) - ((char *)sfep + xfs_dir2_sf_entsize_byentry(hdr, sfep)); + ((char *)sfep + xfs_dir2_sf_entsize(hdr, sfep->namelen)); } /* -- cgit v1.2.3 From 4f6ae1a49ed5c81501d6f7385416bb4e07289e99 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 8 Jul 2011 14:35:27 +0200 Subject: xfs: avoid usage of struct xfs_dir2_block In most places we can simply pass around and use the struct xfs_dir2_data_hdr, which is the first and most important member of struct xfs_dir2_block instead of the full structure. Signed-off-by: Christoph Hellwig Reviewed-by: Alex Elder Reviewed-by: Dave Chinner --- fs/xfs/xfs_dir2_block.c | 175 ++++++++++++++++++++++++------------------------ fs/xfs/xfs_dir2_block.h | 5 +- fs/xfs/xfs_dir2_data.c | 6 +- fs/xfs/xfs_dir2_leaf.c | 16 ++--- fs/xfs/xfs_dir2_sf.c | 9 +-- fs/xfs/xfs_dir2_sf.h | 4 +- 6 files changed, 106 insertions(+), 109 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_dir2_block.c b/fs/xfs/xfs_dir2_block.c index c66f194fc2a8..a0037cc4dd8c 100644 --- a/fs/xfs/xfs_dir2_block.c +++ b/fs/xfs/xfs_dir2_block.c @@ -67,7 +67,7 @@ xfs_dir2_block_addname( xfs_da_args_t *args) /* directory op arguments */ { xfs_dir2_data_free_t *bf; /* bestfree table in block */ - xfs_dir2_block_t *block; /* directory block structure */ + xfs_dir2_data_hdr_t *hdr; /* block header */ xfs_dir2_leaf_entry_t *blp; /* block leaf entries */ xfs_dabuf_t *bp; /* buffer for block */ xfs_dir2_block_tail_t *btp; /* block tail */ @@ -105,13 +105,13 @@ xfs_dir2_block_addname( return error; } ASSERT(bp != NULL); - block = bp->data; + hdr = bp->data; /* * Check the magic number, corrupted if wrong. */ - if (unlikely(be32_to_cpu(block->hdr.magic) != XFS_DIR2_BLOCK_MAGIC)) { + if (unlikely(be32_to_cpu(hdr->magic) != XFS_DIR2_BLOCK_MAGIC)) { XFS_CORRUPTION_ERROR("xfs_dir2_block_addname", - XFS_ERRLEVEL_LOW, mp, block); + XFS_ERRLEVEL_LOW, mp, hdr); xfs_da_brelse(tp, bp); return XFS_ERROR(EFSCORRUPTED); } @@ -119,8 +119,8 @@ xfs_dir2_block_addname( /* * Set up pointers to parts of the block. */ - bf = block->hdr.bestfree; - btp = xfs_dir2_block_tail_p(mp, block); + bf = hdr->bestfree; + btp = xfs_dir2_block_tail_p(mp, hdr); blp = xfs_dir2_block_leaf_p(btp); /* * No stale entries? Need space for entry and new leaf. @@ -133,7 +133,7 @@ xfs_dir2_block_addname( /* * Data object just before the first leaf entry. */ - enddup = (xfs_dir2_data_unused_t *)((char *)block + be16_to_cpu(*tagp)); + enddup = (xfs_dir2_data_unused_t *)((char *)hdr + be16_to_cpu(*tagp)); /* * If it's not free then can't do this add without cleaning up: * the space before the first leaf entry needs to be free so it @@ -146,7 +146,7 @@ xfs_dir2_block_addname( */ else { dup = (xfs_dir2_data_unused_t *) - ((char *)block + be16_to_cpu(bf[0].offset)); + ((char *)hdr + be16_to_cpu(bf[0].offset)); if (dup == enddup) { /* * It is the biggest freespace, is it too small @@ -159,7 +159,7 @@ xfs_dir2_block_addname( */ if (be16_to_cpu(bf[1].length) >= len) dup = (xfs_dir2_data_unused_t *) - ((char *)block + + ((char *)hdr + be16_to_cpu(bf[1].offset)); else dup = NULL; @@ -182,7 +182,7 @@ xfs_dir2_block_addname( */ else if (be16_to_cpu(bf[0].length) >= len) { dup = (xfs_dir2_data_unused_t *) - ((char *)block + be16_to_cpu(bf[0].offset)); + ((char *)hdr + be16_to_cpu(bf[0].offset)); compact = 0; } /* @@ -196,7 +196,7 @@ xfs_dir2_block_addname( /* * Data object just before the first leaf entry. */ - dup = (xfs_dir2_data_unused_t *)((char *)block + be16_to_cpu(*tagp)); + dup = (xfs_dir2_data_unused_t *)((char *)hdr + be16_to_cpu(*tagp)); /* * If it's not free then the data will go where the * leaf data starts now, if it works at all. @@ -272,7 +272,7 @@ xfs_dir2_block_addname( lfloghigh -= be32_to_cpu(btp->stale) - 1; be32_add_cpu(&btp->count, -(be32_to_cpu(btp->stale) - 1)); xfs_dir2_data_make_free(tp, bp, - (xfs_dir2_data_aoff_t)((char *)blp - (char *)block), + (xfs_dir2_data_aoff_t)((char *)blp - (char *)hdr), (xfs_dir2_data_aoff_t)((be32_to_cpu(btp->stale) - 1) * sizeof(*blp)), &needlog, &needscan); blp += be32_to_cpu(btp->stale) - 1; @@ -282,7 +282,7 @@ xfs_dir2_block_addname( * This needs to happen before the next call to use_free. */ if (needscan) { - xfs_dir2_data_freescan(mp, (xfs_dir2_data_t *)block, &needlog); + xfs_dir2_data_freescan(mp, (xfs_dir2_data_t *)hdr, &needlog); needscan = 0; } } @@ -318,7 +318,7 @@ xfs_dir2_block_addname( */ xfs_dir2_data_use_free(tp, bp, enddup, (xfs_dir2_data_aoff_t) - ((char *)enddup - (char *)block + be16_to_cpu(enddup->length) - + ((char *)enddup - (char *)hdr + be16_to_cpu(enddup->length) - sizeof(*blp)), (xfs_dir2_data_aoff_t)sizeof(*blp), &needlog, &needscan); @@ -331,7 +331,7 @@ xfs_dir2_block_addname( * This needs to happen before the next call to use_free. */ if (needscan) { - xfs_dir2_data_freescan(mp, (xfs_dir2_data_t *)block, + xfs_dir2_data_freescan(mp, (xfs_dir2_data_t *)hdr, &needlog); needscan = 0; } @@ -397,13 +397,13 @@ xfs_dir2_block_addname( */ blp[mid].hashval = cpu_to_be32(args->hashval); blp[mid].address = cpu_to_be32(xfs_dir2_byte_to_dataptr(mp, - (char *)dep - (char *)block)); + (char *)dep - (char *)hdr)); xfs_dir2_block_log_leaf(tp, bp, lfloglow, lfloghigh); /* * Mark space for the data entry used. */ xfs_dir2_data_use_free(tp, bp, dup, - (xfs_dir2_data_aoff_t)((char *)dup - (char *)block), + (xfs_dir2_data_aoff_t)((char *)dup - (char *)hdr), (xfs_dir2_data_aoff_t)len, &needlog, &needscan); /* * Create the new data entry. @@ -412,12 +412,12 @@ xfs_dir2_block_addname( dep->namelen = args->namelen; memcpy(dep->name, args->name, args->namelen); tagp = xfs_dir2_data_entry_tag_p(dep); - *tagp = cpu_to_be16((char *)dep - (char *)block); + *tagp = cpu_to_be16((char *)dep - (char *)hdr); /* * Clean up the bestfree array and log the header, tail, and entry. */ if (needscan) - xfs_dir2_data_freescan(mp, (xfs_dir2_data_t *)block, &needlog); + xfs_dir2_data_freescan(mp, (xfs_dir2_data_t *)hdr, &needlog); if (needlog) xfs_dir2_data_log_header(tp, bp); xfs_dir2_block_log_tail(tp, bp); @@ -438,6 +438,7 @@ xfs_dir2_block_getdents( filldir_t filldir) { xfs_dir2_block_t *block; /* directory block structure */ + xfs_dir2_data_hdr_t *hdr; /* block header */ xfs_dabuf_t *bp; /* buffer for block */ xfs_dir2_block_tail_t *btp; /* block tail */ xfs_dir2_data_entry_t *dep; /* block data entry */ @@ -471,11 +472,12 @@ xfs_dir2_block_getdents( */ wantoff = xfs_dir2_dataptr_to_off(mp, *offset); block = bp->data; + hdr = &block->hdr; xfs_dir2_data_check(dp, bp); /* * Set up values for the loop. */ - btp = xfs_dir2_block_tail_p(mp, block); + btp = xfs_dir2_block_tail_p(mp, hdr); ptr = (char *)block->u; endptr = (char *)xfs_dir2_block_leaf_p(btp); @@ -502,11 +504,11 @@ xfs_dir2_block_getdents( /* * The entry is before the desired starting point, skip it. */ - if ((char *)dep - (char *)block < wantoff) + if ((char *)dep - (char *)hdr < wantoff) continue; cook = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk, - (char *)dep - (char *)block); + (char *)dep - (char *)hdr); /* * If it didn't fit, set the final offset to here & return. @@ -540,17 +542,14 @@ xfs_dir2_block_log_leaf( int first, /* index of first logged leaf */ int last) /* index of last logged leaf */ { - xfs_dir2_block_t *block; /* directory block structure */ - xfs_dir2_leaf_entry_t *blp; /* block leaf entries */ - xfs_dir2_block_tail_t *btp; /* block tail */ - xfs_mount_t *mp; /* filesystem mount point */ + xfs_dir2_data_hdr_t *hdr = bp->data; + xfs_dir2_leaf_entry_t *blp; + xfs_dir2_block_tail_t *btp; - mp = tp->t_mountp; - block = bp->data; - btp = xfs_dir2_block_tail_p(mp, block); + btp = xfs_dir2_block_tail_p(tp->t_mountp, hdr); blp = xfs_dir2_block_leaf_p(btp); - xfs_da_log_buf(tp, bp, (uint)((char *)&blp[first] - (char *)block), - (uint)((char *)&blp[last + 1] - (char *)block - 1)); + xfs_da_log_buf(tp, bp, (uint)((char *)&blp[first] - (char *)hdr), + (uint)((char *)&blp[last + 1] - (char *)hdr - 1)); } /* @@ -561,15 +560,12 @@ xfs_dir2_block_log_tail( xfs_trans_t *tp, /* transaction structure */ xfs_dabuf_t *bp) /* block buffer */ { - xfs_dir2_block_t *block; /* directory block structure */ - xfs_dir2_block_tail_t *btp; /* block tail */ - xfs_mount_t *mp; /* filesystem mount point */ + xfs_dir2_data_hdr_t *hdr = bp->data; + xfs_dir2_block_tail_t *btp; - mp = tp->t_mountp; - block = bp->data; - btp = xfs_dir2_block_tail_p(mp, block); - xfs_da_log_buf(tp, bp, (uint)((char *)btp - (char *)block), - (uint)((char *)(btp + 1) - (char *)block - 1)); + btp = xfs_dir2_block_tail_p(tp->t_mountp, hdr); + xfs_da_log_buf(tp, bp, (uint)((char *)btp - (char *)hdr), + (uint)((char *)(btp + 1) - (char *)hdr - 1)); } /* @@ -580,7 +576,7 @@ int /* error */ xfs_dir2_block_lookup( xfs_da_args_t *args) /* dir lookup arguments */ { - xfs_dir2_block_t *block; /* block structure */ + xfs_dir2_data_hdr_t *hdr; /* block header */ xfs_dir2_leaf_entry_t *blp; /* block leaf entries */ xfs_dabuf_t *bp; /* block buffer */ xfs_dir2_block_tail_t *btp; /* block tail */ @@ -600,14 +596,14 @@ xfs_dir2_block_lookup( return error; dp = args->dp; mp = dp->i_mount; - block = bp->data; + hdr = bp->data; xfs_dir2_data_check(dp, bp); - btp = xfs_dir2_block_tail_p(mp, block); + btp = xfs_dir2_block_tail_p(mp, hdr); blp = xfs_dir2_block_leaf_p(btp); /* * Get the offset from the leaf entry, to point to the data. */ - dep = (xfs_dir2_data_entry_t *)((char *)block + + dep = (xfs_dir2_data_entry_t *)((char *)hdr + xfs_dir2_dataptr_to_off(mp, be32_to_cpu(blp[ent].address))); /* * Fill in inode number, CI name if appropriate, release the block. @@ -628,7 +624,7 @@ xfs_dir2_block_lookup_int( int *entno) /* returned entry number */ { xfs_dir2_dataptr_t addr; /* data entry address */ - xfs_dir2_block_t *block; /* block structure */ + xfs_dir2_data_hdr_t *hdr; /* block header */ xfs_dir2_leaf_entry_t *blp; /* block leaf entries */ xfs_dabuf_t *bp; /* block buffer */ xfs_dir2_block_tail_t *btp; /* block tail */ @@ -654,9 +650,9 @@ xfs_dir2_block_lookup_int( return error; } ASSERT(bp != NULL); - block = bp->data; + hdr = bp->data; xfs_dir2_data_check(dp, bp); - btp = xfs_dir2_block_tail_p(mp, block); + btp = xfs_dir2_block_tail_p(mp, hdr); blp = xfs_dir2_block_leaf_p(btp); /* * Loop doing a binary search for our hash value. @@ -694,7 +690,7 @@ xfs_dir2_block_lookup_int( * Get pointer to the entry from the leaf. */ dep = (xfs_dir2_data_entry_t *) - ((char *)block + xfs_dir2_dataptr_to_off(mp, addr)); + ((char *)hdr + xfs_dir2_dataptr_to_off(mp, addr)); /* * Compare name and if it's an exact match, return the index * and buffer. If it's the first case-insensitive match, store @@ -733,7 +729,7 @@ int /* error */ xfs_dir2_block_removename( xfs_da_args_t *args) /* directory operation args */ { - xfs_dir2_block_t *block; /* block structure */ + xfs_dir2_data_hdr_t *hdr; /* block header */ xfs_dir2_leaf_entry_t *blp; /* block leaf pointer */ xfs_dabuf_t *bp; /* block buffer */ xfs_dir2_block_tail_t *btp; /* block tail */ @@ -760,20 +756,20 @@ xfs_dir2_block_removename( dp = args->dp; tp = args->trans; mp = dp->i_mount; - block = bp->data; - btp = xfs_dir2_block_tail_p(mp, block); + hdr = bp->data; + btp = xfs_dir2_block_tail_p(mp, hdr); blp = xfs_dir2_block_leaf_p(btp); /* * Point to the data entry using the leaf entry. */ dep = (xfs_dir2_data_entry_t *) - ((char *)block + xfs_dir2_dataptr_to_off(mp, be32_to_cpu(blp[ent].address))); + ((char *)hdr + xfs_dir2_dataptr_to_off(mp, be32_to_cpu(blp[ent].address))); /* * Mark the data entry's space free. */ needlog = needscan = 0; xfs_dir2_data_make_free(tp, bp, - (xfs_dir2_data_aoff_t)((char *)dep - (char *)block), + (xfs_dir2_data_aoff_t)((char *)dep - (char *)hdr), xfs_dir2_data_entsize(dep->namelen), &needlog, &needscan); /* * Fix up the block tail. @@ -789,15 +785,15 @@ xfs_dir2_block_removename( * Fix up bestfree, log the header if necessary. */ if (needscan) - xfs_dir2_data_freescan(mp, (xfs_dir2_data_t *)block, &needlog); + xfs_dir2_data_freescan(mp, (xfs_dir2_data_t *)hdr, &needlog); if (needlog) xfs_dir2_data_log_header(tp, bp); xfs_dir2_data_check(dp, bp); /* * See if the size as a shortform is good enough. */ - if ((size = xfs_dir2_block_sfsize(dp, block, &sfh)) > - XFS_IFORK_DSIZE(dp)) { + size = xfs_dir2_block_sfsize(dp, hdr, &sfh); + if (size > XFS_IFORK_DSIZE(dp)) { xfs_da_buf_done(bp); return 0; } @@ -815,7 +811,7 @@ int /* error */ xfs_dir2_block_replace( xfs_da_args_t *args) /* directory operation args */ { - xfs_dir2_block_t *block; /* block structure */ + xfs_dir2_data_hdr_t *hdr; /* block header */ xfs_dir2_leaf_entry_t *blp; /* block leaf entries */ xfs_dabuf_t *bp; /* block buffer */ xfs_dir2_block_tail_t *btp; /* block tail */ @@ -836,14 +832,14 @@ xfs_dir2_block_replace( } dp = args->dp; mp = dp->i_mount; - block = bp->data; - btp = xfs_dir2_block_tail_p(mp, block); + hdr = bp->data; + btp = xfs_dir2_block_tail_p(mp, hdr); blp = xfs_dir2_block_leaf_p(btp); /* * Point to the data entry we need to change. */ dep = (xfs_dir2_data_entry_t *) - ((char *)block + xfs_dir2_dataptr_to_off(mp, be32_to_cpu(blp[ent].address))); + ((char *)hdr + xfs_dir2_dataptr_to_off(mp, be32_to_cpu(blp[ent].address))); ASSERT(be64_to_cpu(dep->inumber) != args->inumber); /* * Change the inode number to the new value. @@ -882,7 +878,7 @@ xfs_dir2_leaf_to_block( xfs_dabuf_t *dbp) /* data buffer */ { __be16 *bestsp; /* leaf bests table */ - xfs_dir2_block_t *block; /* block structure */ + xfs_dir2_data_hdr_t *hdr; /* block header */ xfs_dir2_block_tail_t *btp; /* block tail */ xfs_inode_t *dp; /* incore directory inode */ xfs_dir2_data_unused_t *dup; /* unused data entry */ @@ -917,7 +913,7 @@ xfs_dir2_leaf_to_block( while (dp->i_d.di_size > mp->m_dirblksize) { bestsp = xfs_dir2_leaf_bests_p(ltp); if (be16_to_cpu(bestsp[be32_to_cpu(ltp->bestcount) - 1]) == - mp->m_dirblksize - (uint)sizeof(block->hdr)) { + mp->m_dirblksize - (uint)sizeof(*hdr)) { if ((error = xfs_dir2_leaf_trim_data(args, lbp, (xfs_dir2_db_t)(be32_to_cpu(ltp->bestcount) - 1)))) @@ -935,18 +931,18 @@ xfs_dir2_leaf_to_block( XFS_DATA_FORK))) { goto out; } - block = dbp->data; - ASSERT(be32_to_cpu(block->hdr.magic) == XFS_DIR2_DATA_MAGIC); + hdr = dbp->data; + ASSERT(be32_to_cpu(hdr->magic) == XFS_DIR2_DATA_MAGIC); /* * Size of the "leaf" area in the block. */ - size = (uint)sizeof(block->tail) + + size = (uint)sizeof(xfs_dir2_block_tail_t) + (uint)sizeof(*lep) * (be16_to_cpu(leaf->hdr.count) - be16_to_cpu(leaf->hdr.stale)); /* * Look at the last data entry. */ - tagp = (__be16 *)((char *)block + mp->m_dirblksize) - 1; - dup = (xfs_dir2_data_unused_t *)((char *)block + be16_to_cpu(*tagp)); + tagp = (__be16 *)((char *)hdr + mp->m_dirblksize) - 1; + dup = (xfs_dir2_data_unused_t *)((char *)hdr + be16_to_cpu(*tagp)); /* * If it's not free or is too short we can't do it. */ @@ -958,7 +954,7 @@ xfs_dir2_leaf_to_block( /* * Start converting it to block form. */ - block->hdr.magic = cpu_to_be32(XFS_DIR2_BLOCK_MAGIC); + hdr->magic = cpu_to_be32(XFS_DIR2_BLOCK_MAGIC); needlog = 1; needscan = 0; /* @@ -969,7 +965,7 @@ xfs_dir2_leaf_to_block( /* * Initialize the block tail. */ - btp = xfs_dir2_block_tail_p(mp, block); + btp = xfs_dir2_block_tail_p(mp, hdr); btp->count = cpu_to_be32(be16_to_cpu(leaf->hdr.count) - be16_to_cpu(leaf->hdr.stale)); btp->stale = 0; xfs_dir2_block_log_tail(tp, dbp); @@ -988,7 +984,7 @@ xfs_dir2_leaf_to_block( * Scan the bestfree if we need it and log the data block header. */ if (needscan) - xfs_dir2_data_freescan(mp, (xfs_dir2_data_t *)block, &needlog); + xfs_dir2_data_freescan(mp, (xfs_dir2_data_t *)hdr, &needlog); if (needlog) xfs_dir2_data_log_header(tp, dbp); /* @@ -1002,8 +998,8 @@ xfs_dir2_leaf_to_block( /* * Now see if the resulting block can be shrunken to shortform. */ - if ((size = xfs_dir2_block_sfsize(dp, block, &sfh)) > - XFS_IFORK_DSIZE(dp)) { + size = xfs_dir2_block_sfsize(dp, hdr, &sfh); + if (size > XFS_IFORK_DSIZE(dp)) { error = 0; goto out; } @@ -1025,6 +1021,7 @@ xfs_dir2_sf_to_block( { xfs_dir2_db_t blkno; /* dir-relative block # (0) */ xfs_dir2_block_t *block; /* block structure */ + xfs_dir2_data_hdr_t *hdr; /* block header */ xfs_dir2_leaf_entry_t *blp; /* block leaf entries */ xfs_dabuf_t *bp; /* block buffer */ xfs_dir2_block_tail_t *btp; /* block tail pointer */ @@ -1095,7 +1092,8 @@ xfs_dir2_sf_to_block( return error; } block = bp->data; - block->hdr.magic = cpu_to_be32(XFS_DIR2_BLOCK_MAGIC); + hdr = &block->hdr; + hdr->magic = cpu_to_be32(XFS_DIR2_BLOCK_MAGIC); /* * Compute size of block "tail" area. */ @@ -1113,45 +1111,45 @@ xfs_dir2_sf_to_block( /* * Fill in the tail. */ - btp = xfs_dir2_block_tail_p(mp, block); + btp = xfs_dir2_block_tail_p(mp, hdr); btp->count = cpu_to_be32(sfp->count + 2); /* ., .. */ btp->stale = 0; blp = xfs_dir2_block_leaf_p(btp); - endoffset = (uint)((char *)blp - (char *)block); + endoffset = (uint)((char *)blp - (char *)hdr); /* * Remove the freespace, we'll manage it. */ xfs_dir2_data_use_free(tp, bp, dup, - (xfs_dir2_data_aoff_t)((char *)dup - (char *)block), + (xfs_dir2_data_aoff_t)((char *)dup - (char *)hdr), be16_to_cpu(dup->length), &needlog, &needscan); /* * Create entry for . */ dep = (xfs_dir2_data_entry_t *) - ((char *)block + XFS_DIR2_DATA_DOT_OFFSET); + ((char *)hdr + XFS_DIR2_DATA_DOT_OFFSET); dep->inumber = cpu_to_be64(dp->i_ino); dep->namelen = 1; dep->name[0] = '.'; tagp = xfs_dir2_data_entry_tag_p(dep); - *tagp = cpu_to_be16((char *)dep - (char *)block); + *tagp = cpu_to_be16((char *)dep - (char *)hdr); xfs_dir2_data_log_entry(tp, bp, dep); blp[0].hashval = cpu_to_be32(xfs_dir_hash_dot); blp[0].address = cpu_to_be32(xfs_dir2_byte_to_dataptr(mp, - (char *)dep - (char *)block)); + (char *)dep - (char *)hdr)); /* * Create entry for .. */ dep = (xfs_dir2_data_entry_t *) - ((char *)block + XFS_DIR2_DATA_DOTDOT_OFFSET); + ((char *)hdr + XFS_DIR2_DATA_DOTDOT_OFFSET); dep->inumber = cpu_to_be64(xfs_dir2_sf_get_parent_ino(sfp)); dep->namelen = 2; dep->name[0] = dep->name[1] = '.'; tagp = xfs_dir2_data_entry_tag_p(dep); - *tagp = cpu_to_be16((char *)dep - (char *)block); + *tagp = cpu_to_be16((char *)dep - (char *)hdr); xfs_dir2_data_log_entry(tp, bp, dep); blp[1].hashval = cpu_to_be32(xfs_dir_hash_dotdot); blp[1].address = cpu_to_be32(xfs_dir2_byte_to_dataptr(mp, - (char *)dep - (char *)block)); + (char *)dep - (char *)hdr)); offset = XFS_DIR2_DATA_FIRST_OFFSET; /* * Loop over existing entries, stuff them in. @@ -1177,14 +1175,13 @@ xfs_dir2_sf_to_block( * There should be a hole here, make one. */ if (offset < newoffset) { - dup = (xfs_dir2_data_unused_t *) - ((char *)block + offset); + dup = (xfs_dir2_data_unused_t *)((char *)hdr + offset); dup->freetag = cpu_to_be16(XFS_DIR2_DATA_FREE_TAG); dup->length = cpu_to_be16(newoffset - offset); *xfs_dir2_data_unused_tag_p(dup) = cpu_to_be16( - ((char *)dup - (char *)block)); + ((char *)dup - (char *)hdr)); xfs_dir2_data_log_unused(tp, bp, dup); - (void)xfs_dir2_data_freeinsert((xfs_dir2_data_t *)block, + (void)xfs_dir2_data_freeinsert((xfs_dir2_data_t *)hdr, dup, &dummy); offset += be16_to_cpu(dup->length); continue; @@ -1192,20 +1189,20 @@ xfs_dir2_sf_to_block( /* * Copy a real entry. */ - dep = (xfs_dir2_data_entry_t *)((char *)block + newoffset); + dep = (xfs_dir2_data_entry_t *)((char *)hdr + newoffset); dep->inumber = cpu_to_be64(xfs_dir2_sfe_get_ino(sfp, sfep)); dep->namelen = sfep->namelen; memcpy(dep->name, sfep->name, dep->namelen); tagp = xfs_dir2_data_entry_tag_p(dep); - *tagp = cpu_to_be16((char *)dep - (char *)block); + *tagp = cpu_to_be16((char *)dep - (char *)hdr); xfs_dir2_data_log_entry(tp, bp, dep); name.name = sfep->name; name.len = sfep->namelen; blp[2 + i].hashval = cpu_to_be32(mp->m_dirnameops-> hashname(&name)); blp[2 + i].address = cpu_to_be32(xfs_dir2_byte_to_dataptr(mp, - (char *)dep - (char *)block)); - offset = (int)((char *)(tagp + 1) - (char *)block); + (char *)dep - (char *)hdr)); + offset = (int)((char *)(tagp + 1) - (char *)hdr); if (++i == sfp->count) sfep = NULL; else diff --git a/fs/xfs/xfs_dir2_block.h b/fs/xfs/xfs_dir2_block.h index 10e689676382..fe91f88f880c 100644 --- a/fs/xfs/xfs_dir2_block.h +++ b/fs/xfs/xfs_dir2_block.h @@ -61,10 +61,9 @@ typedef struct xfs_dir2_block { * Pointer to the leaf header embedded in a data block (1-block format) */ static inline xfs_dir2_block_tail_t * -xfs_dir2_block_tail_p(struct xfs_mount *mp, xfs_dir2_block_t *block) +xfs_dir2_block_tail_p(struct xfs_mount *mp, xfs_dir2_data_hdr_t *hdr) { - return (((xfs_dir2_block_tail_t *) - ((char *)(block) + (mp)->m_dirblksize)) - 1); + return ((xfs_dir2_block_tail_t *)((char *)hdr + mp->m_dirblksize)) - 1; } /* diff --git a/fs/xfs/xfs_dir2_data.c b/fs/xfs/xfs_dir2_data.c index 921595b84f5b..46ccb83bdab4 100644 --- a/fs/xfs/xfs_dir2_data.c +++ b/fs/xfs/xfs_dir2_data.c @@ -72,7 +72,7 @@ xfs_dir2_data_check( bf = d->hdr.bestfree; p = (char *)d->u; if (be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC) { - btp = xfs_dir2_block_tail_p(mp, (xfs_dir2_block_t *)d); + btp = xfs_dir2_block_tail_p(mp, &d->hdr); lep = xfs_dir2_block_leaf_p(btp); endp = (char *)lep; } else @@ -348,7 +348,7 @@ xfs_dir2_data_freescan( */ p = (char *)d->u; if (be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC) { - btp = xfs_dir2_block_tail_p(mp, (xfs_dir2_block_t *)d); + btp = xfs_dir2_block_tail_p(mp, &d->hdr); endp = (char *)xfs_dir2_block_leaf_p(btp); } else endp = (char *)d + mp->m_dirblksize; @@ -537,7 +537,7 @@ xfs_dir2_data_make_free( xfs_dir2_block_tail_t *btp; /* block tail */ ASSERT(be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC); - btp = xfs_dir2_block_tail_p(mp, (xfs_dir2_block_t *)d); + btp = xfs_dir2_block_tail_p(mp, &d->hdr); endptr = (char *)xfs_dir2_block_leaf_p(btp); } /* diff --git a/fs/xfs/xfs_dir2_leaf.c b/fs/xfs/xfs_dir2_leaf.c index 7f7769ea2be8..1b048dad360f 100644 --- a/fs/xfs/xfs_dir2_leaf.c +++ b/fs/xfs/xfs_dir2_leaf.c @@ -64,7 +64,7 @@ xfs_dir2_block_to_leaf( { __be16 *bestsp; /* leaf's bestsp entries */ xfs_dablk_t blkno; /* leaf block's bno */ - xfs_dir2_block_t *block; /* block structure */ + xfs_dir2_data_hdr_t *hdr; /* block header */ xfs_dir2_leaf_entry_t *blp; /* block's leaf entries */ xfs_dir2_block_tail_t *btp; /* block's tail */ xfs_inode_t *dp; /* incore directory inode */ @@ -101,9 +101,9 @@ xfs_dir2_block_to_leaf( } ASSERT(lbp != NULL); leaf = lbp->data; - block = dbp->data; + hdr = dbp->data; xfs_dir2_data_check(dp, dbp); - btp = xfs_dir2_block_tail_p(mp, block); + btp = xfs_dir2_block_tail_p(mp, hdr); blp = xfs_dir2_block_leaf_p(btp); /* * Set the counts in the leaf header. @@ -123,23 +123,23 @@ xfs_dir2_block_to_leaf( * tail be free. */ xfs_dir2_data_make_free(tp, dbp, - (xfs_dir2_data_aoff_t)((char *)blp - (char *)block), - (xfs_dir2_data_aoff_t)((char *)block + mp->m_dirblksize - + (xfs_dir2_data_aoff_t)((char *)blp - (char *)hdr), + (xfs_dir2_data_aoff_t)((char *)hdr + mp->m_dirblksize - (char *)blp), &needlog, &needscan); /* * Fix up the block header, make it a data block. */ - block->hdr.magic = cpu_to_be32(XFS_DIR2_DATA_MAGIC); + hdr->magic = cpu_to_be32(XFS_DIR2_DATA_MAGIC); if (needscan) - xfs_dir2_data_freescan(mp, (xfs_dir2_data_t *)block, &needlog); + xfs_dir2_data_freescan(mp, (xfs_dir2_data_t *)hdr, &needlog); /* * Set up leaf tail and bests table. */ ltp = xfs_dir2_leaf_tail_p(mp, leaf); ltp->bestcount = cpu_to_be32(1); bestsp = xfs_dir2_leaf_bests_p(ltp); - bestsp[0] = block->hdr.bestfree[0].length; + bestsp[0] = hdr->bestfree[0].length; /* * Log the data header and leaf bests table. */ diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c index 3a1e3f4e3fa3..515efa5d29ca 100644 --- a/fs/xfs/xfs_dir2_sf.c +++ b/fs/xfs/xfs_dir2_sf.c @@ -141,7 +141,7 @@ xfs_dir2_sfe_put_ino( int /* size for sf form */ xfs_dir2_block_sfsize( xfs_inode_t *dp, /* incore inode pointer */ - xfs_dir2_block_t *block, /* block directory data */ + xfs_dir2_data_hdr_t *hdr, /* block directory data */ xfs_dir2_sf_hdr_t *sfhp) /* output: header for sf form */ { xfs_dir2_dataptr_t addr; /* data entry address */ @@ -161,7 +161,7 @@ xfs_dir2_block_sfsize( mp = dp->i_mount; count = i8count = namelen = 0; - btp = xfs_dir2_block_tail_p(mp, block); + btp = xfs_dir2_block_tail_p(mp, hdr); blp = xfs_dir2_block_leaf_p(btp); /* @@ -174,7 +174,7 @@ xfs_dir2_block_sfsize( * Calculate the pointer to the entry at hand. */ dep = (xfs_dir2_data_entry_t *) - ((char *)block + xfs_dir2_dataptr_to_off(mp, addr)); + ((char *)hdr + xfs_dir2_dataptr_to_off(mp, addr)); /* * Detect . and .., so we can special-case them. * . is not included in sf directories. @@ -255,6 +255,7 @@ xfs_dir2_block_to_sf( ASSERT(error != ENOSPC); goto out; } + /* * The buffer is now unconditionally gone, whether * xfs_dir2_shrink_inode worked or not. @@ -276,7 +277,7 @@ xfs_dir2_block_to_sf( /* * Set up to loop over the block's entries. */ - btp = xfs_dir2_block_tail_p(mp, block); + btp = xfs_dir2_block_tail_p(mp, &block->hdr); ptr = (char *)block->u; endptr = (char *)xfs_dir2_block_leaf_p(btp); sfep = xfs_dir2_sf_firstentry(sfp); diff --git a/fs/xfs/xfs_dir2_sf.h b/fs/xfs/xfs_dir2_sf.h index 55560bc80ec0..65c5763905c9 100644 --- a/fs/xfs/xfs_dir2_sf.h +++ b/fs/xfs/xfs_dir2_sf.h @@ -32,7 +32,7 @@ struct uio; struct xfs_dabuf; struct xfs_da_args; -struct xfs_dir2_block; +struct xfs_dir2_data_hdr; struct xfs_inode; struct xfs_mount; struct xfs_trans; @@ -136,7 +136,7 @@ extern xfs_ino_t xfs_dir2_sf_get_parent_ino(struct xfs_dir2_sf_hdr *sfp); extern xfs_ino_t xfs_dir2_sfe_get_ino(struct xfs_dir2_sf_hdr *sfp, struct xfs_dir2_sf_entry *sfep); extern int xfs_dir2_block_sfsize(struct xfs_inode *dp, - struct xfs_dir2_block *block, + struct xfs_dir2_data_hdr *block, xfs_dir2_sf_hdr_t *sfhp); extern int xfs_dir2_block_to_sf(struct xfs_da_args *args, struct xfs_dabuf *bp, int size, xfs_dir2_sf_hdr_t *sfhp); -- cgit v1.2.3 From a64b04179735de6bfd9f00c130a68ed7f20d18ef Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 8 Jul 2011 14:35:32 +0200 Subject: xfs: kill struct xfs_dir2_block Remove the confusing xfs_dir2_block structure. It is supposed to describe an XFS dir2 block format btree block, but due to the variable sized nature of almost all elements in it it can't actuall do anything close to that job. In addition to accessing the fixed offset header structure it was only used to get a pointer to the first dir or unused entry after it, which can be trivially replaced by pointer arithmetics on the header pointer. For most users that is actually more natural anyway, as they don't use a typed pointer but rather a character pointer for further arithmetics. Signed-off-by: Christoph Hellwig Reviewed-by: Alex Elder Reviewed-by: Dave Chinner --- fs/xfs/xfs_dir2_block.c | 12 ++++-------- fs/xfs/xfs_dir2_block.h | 44 +++++++++++++++++++++++--------------------- fs/xfs/xfs_dir2_sf.c | 14 +++++++------- 3 files changed, 34 insertions(+), 36 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_dir2_block.c b/fs/xfs/xfs_dir2_block.c index a0037cc4dd8c..9adaf803957a 100644 --- a/fs/xfs/xfs_dir2_block.c +++ b/fs/xfs/xfs_dir2_block.c @@ -437,7 +437,6 @@ xfs_dir2_block_getdents( xfs_off_t *offset, filldir_t filldir) { - xfs_dir2_block_t *block; /* directory block structure */ xfs_dir2_data_hdr_t *hdr; /* block header */ xfs_dabuf_t *bp; /* buffer for block */ xfs_dir2_block_tail_t *btp; /* block tail */ @@ -471,14 +470,13 @@ xfs_dir2_block_getdents( * We'll skip entries before this. */ wantoff = xfs_dir2_dataptr_to_off(mp, *offset); - block = bp->data; - hdr = &block->hdr; + hdr = bp->data; xfs_dir2_data_check(dp, bp); /* * Set up values for the loop. */ btp = xfs_dir2_block_tail_p(mp, hdr); - ptr = (char *)block->u; + ptr = (char *)(hdr + 1); endptr = (char *)xfs_dir2_block_leaf_p(btp); /* @@ -1020,7 +1018,6 @@ xfs_dir2_sf_to_block( xfs_da_args_t *args) /* operation arguments */ { xfs_dir2_db_t blkno; /* dir-relative block # (0) */ - xfs_dir2_block_t *block; /* block structure */ xfs_dir2_data_hdr_t *hdr; /* block header */ xfs_dir2_leaf_entry_t *blp; /* block leaf entries */ xfs_dabuf_t *bp; /* block buffer */ @@ -1091,8 +1088,7 @@ xfs_dir2_sf_to_block( kmem_free(sfp); return error; } - block = bp->data; - hdr = &block->hdr; + hdr = bp->data; hdr->magic = cpu_to_be32(XFS_DIR2_BLOCK_MAGIC); /* * Compute size of block "tail" area. @@ -1103,7 +1099,7 @@ xfs_dir2_sf_to_block( * The whole thing is initialized to free by the init routine. * Say we're using the leaf and tail area. */ - dup = (xfs_dir2_data_unused_t *)block->u; + dup = (xfs_dir2_data_unused_t *)(hdr + 1); needlog = needscan = 0; xfs_dir2_data_use_free(tp, bp, dup, mp->m_dirblksize - i, i, &needlog, &needscan); diff --git a/fs/xfs/xfs_dir2_block.h b/fs/xfs/xfs_dir2_block.h index fe91f88f880c..de59677e38ba 100644 --- a/fs/xfs/xfs_dir2_block.h +++ b/fs/xfs/xfs_dir2_block.h @@ -19,10 +19,30 @@ #define __XFS_DIR2_BLOCK_H__ /* - * xfs_dir2_block.h - * Directory version 2, single block format structures + * Directory version 2, single block format structures. + * + * The single block format looks like the following drawing on disk: + * + * +-------------------------------------------------+ + * | xfs_dir2_data_hdr_t | + * +-------------------------------------------------+ + * | xfs_dir2_data_entry_t OR xfs_dir2_data_unused_t | + * | xfs_dir2_data_entry_t OR xfs_dir2_data_unused_t | + * | xfs_dir2_data_entry_t OR xfs_dir2_data_unused_t | + * | ... | + * +-------------------------------------------------+ + * | unused space | + * +-------------------------------------------------+ + * | ... | + * | xfs_dir2_leaf_entry_t | + * | xfs_dir2_leaf_entry_t | + * +-------------------------------------------------+ + * | xfs_dir2_block_tail_t | + * +-------------------------------------------------+ + * + * As all the entries are variable size structures the accessors in this + * file and xfs_dir2_data.h should be used to iterate over them. */ - struct uio; struct xfs_dabuf; struct xfs_da_args; @@ -32,14 +52,6 @@ struct xfs_inode; struct xfs_mount; struct xfs_trans; -/* - * The single block format is as follows: - * xfs_dir2_data_hdr_t structure - * xfs_dir2_data_entry_t and xfs_dir2_data_unused_t structures - * xfs_dir2_leaf_entry_t structures - * xfs_dir2_block_tail_t structure - */ - #define XFS_DIR2_BLOCK_MAGIC 0x58443242 /* XD2B: for one block dirs */ typedef struct xfs_dir2_block_tail { @@ -47,16 +59,6 @@ typedef struct xfs_dir2_block_tail { __be32 stale; /* count of stale lf entries */ } xfs_dir2_block_tail_t; -/* - * Generic single-block structure, for xfs_db. - */ -typedef struct xfs_dir2_block { - xfs_dir2_data_hdr_t hdr; /* magic XFS_DIR2_BLOCK_MAGIC */ - xfs_dir2_data_union_t u[1]; - xfs_dir2_leaf_entry_t leaf[1]; - xfs_dir2_block_tail_t tail; -} xfs_dir2_block_t; - /* * Pointer to the leaf header embedded in a data block (1-block format) */ diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c index 515efa5d29ca..b47a99c8f244 100644 --- a/fs/xfs/xfs_dir2_sf.c +++ b/fs/xfs/xfs_dir2_sf.c @@ -226,7 +226,7 @@ xfs_dir2_block_to_sf( int size, /* shortform directory size */ xfs_dir2_sf_hdr_t *sfhp) /* shortform directory hdr */ { - xfs_dir2_block_t *block; /* block structure */ + xfs_dir2_data_hdr_t *hdr; /* block header */ xfs_dir2_block_tail_t *btp; /* block tail pointer */ xfs_dir2_data_entry_t *dep; /* data entry pointer */ xfs_inode_t *dp; /* incore directory inode */ @@ -248,8 +248,8 @@ xfs_dir2_block_to_sf( * Make a copy of the block data, so we can shrink the inode * and add local data. */ - block = kmem_alloc(mp->m_dirblksize, KM_SLEEP); - memcpy(block, bp->data, mp->m_dirblksize); + hdr = kmem_alloc(mp->m_dirblksize, KM_SLEEP); + memcpy(hdr, bp->data, mp->m_dirblksize); logflags = XFS_ILOG_CORE; if ((error = xfs_dir2_shrink_inode(args, mp->m_dirdatablk, bp))) { ASSERT(error != ENOSPC); @@ -277,8 +277,8 @@ xfs_dir2_block_to_sf( /* * Set up to loop over the block's entries. */ - btp = xfs_dir2_block_tail_p(mp, &block->hdr); - ptr = (char *)block->u; + btp = xfs_dir2_block_tail_p(mp, hdr); + ptr = (char *)(hdr + 1); endptr = (char *)xfs_dir2_block_leaf_p(btp); sfep = xfs_dir2_sf_firstentry(sfp); /* @@ -314,7 +314,7 @@ xfs_dir2_block_to_sf( sfep->namelen = dep->namelen; xfs_dir2_sf_put_offset(sfep, (xfs_dir2_data_aoff_t) - ((char *)dep - (char *)block)); + ((char *)dep - (char *)hdr)); memcpy(sfep->name, dep->name, dep->namelen); xfs_dir2_sfe_put_ino(sfp, sfep, be64_to_cpu(dep->inumber)); @@ -327,7 +327,7 @@ xfs_dir2_block_to_sf( xfs_dir2_sf_check(args); out: xfs_trans_log_inode(args->trans, dp, logflags); - kmem_free(block); + kmem_free(hdr); return error; } -- cgit v1.2.3 From c2066e2662070e794f57a96a129c42575e77cfcb Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 8 Jul 2011 14:35:38 +0200 Subject: xfs: avoid usage of struct xfs_dir2_data In most places we can simply pass around and use the struct xfs_dir2_data_hdr, which is the first and most important member of struct xfs_dir2_data instead of the full structure. Signed-off-by: Christoph Hellwig Reviewed-by: Alex Elder Reviewed-by: Dave Chinner --- fs/xfs/xfs_da_btree.c | 11 +- fs/xfs/xfs_dir2_block.c | 14 +-- fs/xfs/xfs_dir2_data.c | 308 +++++++++++++++++++++++++----------------------- fs/xfs/xfs_dir2_data.h | 8 +- fs/xfs/xfs_dir2_leaf.c | 76 ++++++------ fs/xfs/xfs_dir2_node.c | 50 ++++---- 6 files changed, 236 insertions(+), 231 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_da_btree.c b/fs/xfs/xfs_da_btree.c index 6102ac6d1dff..efb906f8a9c9 100644 --- a/fs/xfs/xfs_da_btree.c +++ b/fs/xfs/xfs_da_btree.c @@ -2079,16 +2079,13 @@ xfs_da_do_buf( * For read_buf, check the magic number. */ if (caller == 1) { - xfs_dir2_data_t *data; - xfs_dir2_free_t *free; - xfs_da_blkinfo_t *info; + xfs_dir2_data_hdr_t *hdr = rbp->data; + xfs_dir2_free_t *free = rbp->data; + xfs_da_blkinfo_t *info = rbp->data; uint magic, magic1; - info = rbp->data; - data = rbp->data; - free = rbp->data; magic = be16_to_cpu(info->magic); - magic1 = be32_to_cpu(data->hdr.magic); + magic1 = be32_to_cpu(hdr->magic); if (unlikely( XFS_TEST_ERROR((magic != XFS_DA_NODE_MAGIC) && (magic != XFS_ATTR_LEAF_MAGIC) && diff --git a/fs/xfs/xfs_dir2_block.c b/fs/xfs/xfs_dir2_block.c index 9adaf803957a..c9fdabe5e1ad 100644 --- a/fs/xfs/xfs_dir2_block.c +++ b/fs/xfs/xfs_dir2_block.c @@ -282,7 +282,7 @@ xfs_dir2_block_addname( * This needs to happen before the next call to use_free. */ if (needscan) { - xfs_dir2_data_freescan(mp, (xfs_dir2_data_t *)hdr, &needlog); + xfs_dir2_data_freescan(mp, hdr, &needlog); needscan = 0; } } @@ -331,8 +331,7 @@ xfs_dir2_block_addname( * This needs to happen before the next call to use_free. */ if (needscan) { - xfs_dir2_data_freescan(mp, (xfs_dir2_data_t *)hdr, - &needlog); + xfs_dir2_data_freescan(mp, hdr, &needlog); needscan = 0; } /* @@ -417,7 +416,7 @@ xfs_dir2_block_addname( * Clean up the bestfree array and log the header, tail, and entry. */ if (needscan) - xfs_dir2_data_freescan(mp, (xfs_dir2_data_t *)hdr, &needlog); + xfs_dir2_data_freescan(mp, hdr, &needlog); if (needlog) xfs_dir2_data_log_header(tp, bp); xfs_dir2_block_log_tail(tp, bp); @@ -783,7 +782,7 @@ xfs_dir2_block_removename( * Fix up bestfree, log the header if necessary. */ if (needscan) - xfs_dir2_data_freescan(mp, (xfs_dir2_data_t *)hdr, &needlog); + xfs_dir2_data_freescan(mp, hdr, &needlog); if (needlog) xfs_dir2_data_log_header(tp, bp); xfs_dir2_data_check(dp, bp); @@ -982,7 +981,7 @@ xfs_dir2_leaf_to_block( * Scan the bestfree if we need it and log the data block header. */ if (needscan) - xfs_dir2_data_freescan(mp, (xfs_dir2_data_t *)hdr, &needlog); + xfs_dir2_data_freescan(mp, hdr, &needlog); if (needlog) xfs_dir2_data_log_header(tp, dbp); /* @@ -1177,8 +1176,7 @@ xfs_dir2_sf_to_block( *xfs_dir2_data_unused_tag_p(dup) = cpu_to_be16( ((char *)dup - (char *)hdr)); xfs_dir2_data_log_unused(tp, bp, dup); - (void)xfs_dir2_data_freeinsert((xfs_dir2_data_t *)hdr, - dup, &dummy); + xfs_dir2_data_freeinsert(hdr, dup, &dummy); offset += be16_to_cpu(dup->length); continue; } diff --git a/fs/xfs/xfs_dir2_data.c b/fs/xfs/xfs_dir2_data.c index 46ccb83bdab4..2bbc21696944 100644 --- a/fs/xfs/xfs_dir2_data.c +++ b/fs/xfs/xfs_dir2_data.c @@ -35,6 +35,9 @@ #include "xfs_dir2_block.h" #include "xfs_error.h" +STATIC xfs_dir2_data_free_t * +xfs_dir2_data_freefind(xfs_dir2_data_hdr_t *hdr, xfs_dir2_data_unused_t *dup); + #ifdef DEBUG /* * Check the consistency of the data block. @@ -51,6 +54,7 @@ xfs_dir2_data_check( xfs_dir2_block_tail_t *btp=NULL; /* block tail */ int count; /* count of entries found */ xfs_dir2_data_t *d; /* data block pointer */ + xfs_dir2_data_hdr_t *hdr; /* data block header */ xfs_dir2_data_entry_t *dep; /* data entry */ xfs_dir2_data_free_t *dfp; /* bestfree entry */ xfs_dir2_data_unused_t *dup; /* unused entry */ @@ -67,16 +71,19 @@ xfs_dir2_data_check( mp = dp->i_mount; d = bp->data; - ASSERT(be32_to_cpu(d->hdr.magic) == XFS_DIR2_DATA_MAGIC || - be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC); - bf = d->hdr.bestfree; + hdr = &d->hdr; + bf = hdr->bestfree; p = (char *)d->u; - if (be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC) { - btp = xfs_dir2_block_tail_p(mp, &d->hdr); + + if (be32_to_cpu(hdr->magic) == XFS_DIR2_BLOCK_MAGIC) { + btp = xfs_dir2_block_tail_p(mp, hdr); lep = xfs_dir2_block_leaf_p(btp); endp = (char *)lep; - } else - endp = (char *)d + mp->m_dirblksize; + } else { + ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC)); + endp = (char *)hdr + mp->m_dirblksize; + } + count = lastfree = freeseen = 0; /* * Account for zero bestfree entries. @@ -108,8 +115,8 @@ xfs_dir2_data_check( if (be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG) { ASSERT(lastfree == 0); ASSERT(be16_to_cpu(*xfs_dir2_data_unused_tag_p(dup)) == - (char *)dup - (char *)d); - dfp = xfs_dir2_data_freefind(d, dup); + (char *)dup - (char *)hdr); + dfp = xfs_dir2_data_freefind(hdr, dup); if (dfp) { i = (int)(dfp - bf); ASSERT((freeseen & (1 << i)) == 0); @@ -132,13 +139,13 @@ xfs_dir2_data_check( ASSERT(dep->namelen != 0); ASSERT(xfs_dir_ino_validate(mp, be64_to_cpu(dep->inumber)) == 0); ASSERT(be16_to_cpu(*xfs_dir2_data_entry_tag_p(dep)) == - (char *)dep - (char *)d); + (char *)dep - (char *)hdr); count++; lastfree = 0; - if (be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC) { + if (be32_to_cpu(hdr->magic) == XFS_DIR2_BLOCK_MAGIC) { addr = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk, (xfs_dir2_data_aoff_t) - ((char *)dep - (char *)d)); + ((char *)dep - (char *)hdr)); name.name = dep->name; name.len = dep->namelen; hash = mp->m_dirnameops->hashname(&name); @@ -155,7 +162,7 @@ xfs_dir2_data_check( * Need to have seen all the entries and all the bestfree slots. */ ASSERT(freeseen == 7); - if (be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC) { + if (be32_to_cpu(hdr->magic) == XFS_DIR2_BLOCK_MAGIC) { for (i = stale = 0; i < be32_to_cpu(btp->count); i++) { if (be32_to_cpu(lep[i].address) == XFS_DIR2_NULL_DATAPTR) stale++; @@ -172,9 +179,9 @@ xfs_dir2_data_check( * Given a data block and an unused entry from that block, * return the bestfree entry if any that corresponds to it. */ -xfs_dir2_data_free_t * +STATIC xfs_dir2_data_free_t * xfs_dir2_data_freefind( - xfs_dir2_data_t *d, /* data block */ + xfs_dir2_data_hdr_t *hdr, /* data block */ xfs_dir2_data_unused_t *dup) /* data unused entry */ { xfs_dir2_data_free_t *dfp; /* bestfree entry */ @@ -184,17 +191,17 @@ xfs_dir2_data_freefind( int seenzero; /* saw a 0 bestfree entry */ #endif - off = (xfs_dir2_data_aoff_t)((char *)dup - (char *)d); + off = (xfs_dir2_data_aoff_t)((char *)dup - (char *)hdr); #if defined(DEBUG) && defined(__KERNEL__) /* * Validate some consistency in the bestfree table. * Check order, non-overlapping entries, and if we find the * one we're looking for it has to be exact. */ - ASSERT(be32_to_cpu(d->hdr.magic) == XFS_DIR2_DATA_MAGIC || - be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC); - for (dfp = &d->hdr.bestfree[0], seenzero = matched = 0; - dfp < &d->hdr.bestfree[XFS_DIR2_DATA_FD_COUNT]; + ASSERT(be32_to_cpu(hdr->magic) == XFS_DIR2_DATA_MAGIC || + be32_to_cpu(hdr->magic) == XFS_DIR2_BLOCK_MAGIC); + for (dfp = &hdr->bestfree[0], seenzero = matched = 0; + dfp < &hdr->bestfree[XFS_DIR2_DATA_FD_COUNT]; dfp++) { if (!dfp->offset) { ASSERT(!dfp->length); @@ -210,7 +217,7 @@ xfs_dir2_data_freefind( else ASSERT(be16_to_cpu(dfp->offset) + be16_to_cpu(dfp->length) <= off); ASSERT(matched || be16_to_cpu(dfp->length) >= be16_to_cpu(dup->length)); - if (dfp > &d->hdr.bestfree[0]) + if (dfp > &hdr->bestfree[0]) ASSERT(be16_to_cpu(dfp[-1].length) >= be16_to_cpu(dfp[0].length)); } #endif @@ -219,13 +226,13 @@ xfs_dir2_data_freefind( * it can't be there since they're sorted. */ if (be16_to_cpu(dup->length) < - be16_to_cpu(d->hdr.bestfree[XFS_DIR2_DATA_FD_COUNT - 1].length)) + be16_to_cpu(hdr->bestfree[XFS_DIR2_DATA_FD_COUNT - 1].length)) return NULL; /* * Look at the three bestfree entries for our guy. */ - for (dfp = &d->hdr.bestfree[0]; - dfp < &d->hdr.bestfree[XFS_DIR2_DATA_FD_COUNT]; + for (dfp = &hdr->bestfree[0]; + dfp < &hdr->bestfree[XFS_DIR2_DATA_FD_COUNT]; dfp++) { if (!dfp->offset) return NULL; @@ -243,7 +250,7 @@ xfs_dir2_data_freefind( */ xfs_dir2_data_free_t * /* entry inserted */ xfs_dir2_data_freeinsert( - xfs_dir2_data_t *d, /* data block pointer */ + xfs_dir2_data_hdr_t *hdr, /* data block pointer */ xfs_dir2_data_unused_t *dup, /* unused space */ int *loghead) /* log the data header (out) */ { @@ -251,12 +258,13 @@ xfs_dir2_data_freeinsert( xfs_dir2_data_free_t new; /* new bestfree entry */ #ifdef __KERNEL__ - ASSERT(be32_to_cpu(d->hdr.magic) == XFS_DIR2_DATA_MAGIC || - be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC); + ASSERT(be32_to_cpu(hdr->magic) == XFS_DIR2_DATA_MAGIC || + be32_to_cpu(hdr->magic) == XFS_DIR2_BLOCK_MAGIC); #endif - dfp = d->hdr.bestfree; + dfp = hdr->bestfree; new.length = dup->length; - new.offset = cpu_to_be16((char *)dup - (char *)d); + new.offset = cpu_to_be16((char *)dup - (char *)hdr); + /* * Insert at position 0, 1, or 2; or not at all. */ @@ -286,36 +294,36 @@ xfs_dir2_data_freeinsert( */ STATIC void xfs_dir2_data_freeremove( - xfs_dir2_data_t *d, /* data block pointer */ + xfs_dir2_data_hdr_t *hdr, /* data block header */ xfs_dir2_data_free_t *dfp, /* bestfree entry pointer */ int *loghead) /* out: log data header */ { #ifdef __KERNEL__ - ASSERT(be32_to_cpu(d->hdr.magic) == XFS_DIR2_DATA_MAGIC || - be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC); + ASSERT(be32_to_cpu(hdr->magic) == XFS_DIR2_DATA_MAGIC || + be32_to_cpu(hdr->magic) == XFS_DIR2_BLOCK_MAGIC); #endif /* * It's the first entry, slide the next 2 up. */ - if (dfp == &d->hdr.bestfree[0]) { - d->hdr.bestfree[0] = d->hdr.bestfree[1]; - d->hdr.bestfree[1] = d->hdr.bestfree[2]; + if (dfp == &hdr->bestfree[0]) { + hdr->bestfree[0] = hdr->bestfree[1]; + hdr->bestfree[1] = hdr->bestfree[2]; } /* * It's the second entry, slide the 3rd entry up. */ - else if (dfp == &d->hdr.bestfree[1]) - d->hdr.bestfree[1] = d->hdr.bestfree[2]; + else if (dfp == &hdr->bestfree[1]) + hdr->bestfree[1] = hdr->bestfree[2]; /* * Must be the last entry. */ else - ASSERT(dfp == &d->hdr.bestfree[2]); + ASSERT(dfp == &hdr->bestfree[2]); /* * Clear the 3rd entry, must be zero now. */ - d->hdr.bestfree[2].length = 0; - d->hdr.bestfree[2].offset = 0; + hdr->bestfree[2].length = 0; + hdr->bestfree[2].offset = 0; *loghead = 1; } @@ -325,9 +333,10 @@ xfs_dir2_data_freeremove( void xfs_dir2_data_freescan( xfs_mount_t *mp, /* filesystem mount point */ - xfs_dir2_data_t *d, /* data block pointer */ + xfs_dir2_data_hdr_t *hdr, /* data block header */ int *loghead) /* out: log data header */ { + xfs_dir2_data_t *d = (xfs_dir2_data_t *)hdr; xfs_dir2_block_tail_t *btp; /* block tail */ xfs_dir2_data_entry_t *dep; /* active data entry */ xfs_dir2_data_unused_t *dup; /* unused data entry */ @@ -335,23 +344,23 @@ xfs_dir2_data_freescan( char *p; /* current entry pointer */ #ifdef __KERNEL__ - ASSERT(be32_to_cpu(d->hdr.magic) == XFS_DIR2_DATA_MAGIC || - be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC); + ASSERT(be32_to_cpu(hdr->magic) == XFS_DIR2_DATA_MAGIC || + be32_to_cpu(hdr->magic) == XFS_DIR2_BLOCK_MAGIC); #endif /* * Start by clearing the table. */ - memset(d->hdr.bestfree, 0, sizeof(d->hdr.bestfree)); + memset(hdr->bestfree, 0, sizeof(hdr->bestfree)); *loghead = 1; /* * Set up pointers. */ p = (char *)d->u; - if (be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC) { - btp = xfs_dir2_block_tail_p(mp, &d->hdr); + if (be32_to_cpu(hdr->magic) == XFS_DIR2_BLOCK_MAGIC) { + btp = xfs_dir2_block_tail_p(mp, hdr); endp = (char *)xfs_dir2_block_leaf_p(btp); } else - endp = (char *)d + mp->m_dirblksize; + endp = (char *)hdr + mp->m_dirblksize; /* * Loop over the block's entries. */ @@ -361,9 +370,9 @@ xfs_dir2_data_freescan( * If it's a free entry, insert it. */ if (be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG) { - ASSERT((char *)dup - (char *)d == + ASSERT((char *)dup - (char *)hdr == be16_to_cpu(*xfs_dir2_data_unused_tag_p(dup))); - xfs_dir2_data_freeinsert(d, dup, loghead); + xfs_dir2_data_freeinsert(hdr, dup, loghead); p += be16_to_cpu(dup->length); } /* @@ -371,7 +380,7 @@ xfs_dir2_data_freescan( */ else { dep = (xfs_dir2_data_entry_t *)p; - ASSERT((char *)dep - (char *)d == + ASSERT((char *)dep - (char *)hdr == be16_to_cpu(*xfs_dir2_data_entry_tag_p(dep))); p += xfs_dir2_data_entsize(dep->namelen); } @@ -390,6 +399,7 @@ xfs_dir2_data_init( { xfs_dabuf_t *bp; /* block buffer */ xfs_dir2_data_t *d; /* pointer to block */ + xfs_dir2_data_hdr_t *hdr; /* data block header */ xfs_inode_t *dp; /* incore directory inode */ xfs_dir2_data_unused_t *dup; /* unused entry pointer */ int error; /* error return value */ @@ -410,26 +420,29 @@ xfs_dir2_data_init( return error; } ASSERT(bp != NULL); + /* * Initialize the header. */ d = bp->data; - d->hdr.magic = cpu_to_be32(XFS_DIR2_DATA_MAGIC); - d->hdr.bestfree[0].offset = cpu_to_be16(sizeof(d->hdr)); + hdr = &d->hdr; + hdr->magic = cpu_to_be32(XFS_DIR2_DATA_MAGIC); + hdr->bestfree[0].offset = cpu_to_be16(sizeof(*hdr)); for (i = 1; i < XFS_DIR2_DATA_FD_COUNT; i++) { - d->hdr.bestfree[i].length = 0; - d->hdr.bestfree[i].offset = 0; + hdr->bestfree[i].length = 0; + hdr->bestfree[i].offset = 0; } + /* * Set up an unused entry for the block's body. */ dup = &d->u[0].unused; dup->freetag = cpu_to_be16(XFS_DIR2_DATA_FREE_TAG); - t=mp->m_dirblksize - (uint)sizeof(d->hdr); - d->hdr.bestfree[0].length = cpu_to_be16(t); + t = mp->m_dirblksize - (uint)sizeof(*hdr); + hdr->bestfree[0].length = cpu_to_be16(t); dup->length = cpu_to_be16(t); - *xfs_dir2_data_unused_tag_p(dup) = cpu_to_be16((char *)dup - (char *)d); + *xfs_dir2_data_unused_tag_p(dup) = cpu_to_be16((char *)dup - (char *)hdr); /* * Log it and return it. */ @@ -448,14 +461,14 @@ xfs_dir2_data_log_entry( xfs_dabuf_t *bp, /* block buffer */ xfs_dir2_data_entry_t *dep) /* data entry pointer */ { - xfs_dir2_data_t *d; /* data block pointer */ + xfs_dir2_data_hdr_t *hdr = bp->data; - d = bp->data; - ASSERT(be32_to_cpu(d->hdr.magic) == XFS_DIR2_DATA_MAGIC || - be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC); - xfs_da_log_buf(tp, bp, (uint)((char *)dep - (char *)d), + ASSERT(be32_to_cpu(hdr->magic) == XFS_DIR2_DATA_MAGIC || + be32_to_cpu(hdr->magic) == XFS_DIR2_BLOCK_MAGIC); + + xfs_da_log_buf(tp, bp, (uint)((char *)dep - (char *)hdr), (uint)((char *)(xfs_dir2_data_entry_tag_p(dep) + 1) - - (char *)d - 1)); + (char *)hdr - 1)); } /* @@ -466,13 +479,12 @@ xfs_dir2_data_log_header( xfs_trans_t *tp, /* transaction pointer */ xfs_dabuf_t *bp) /* block buffer */ { - xfs_dir2_data_t *d; /* data block pointer */ + xfs_dir2_data_hdr_t *hdr = bp->data; - d = bp->data; - ASSERT(be32_to_cpu(d->hdr.magic) == XFS_DIR2_DATA_MAGIC || - be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC); - xfs_da_log_buf(tp, bp, (uint)((char *)&d->hdr - (char *)d), - (uint)(sizeof(d->hdr) - 1)); + ASSERT(be32_to_cpu(hdr->magic) == XFS_DIR2_DATA_MAGIC || + be32_to_cpu(hdr->magic) == XFS_DIR2_BLOCK_MAGIC); + + xfs_da_log_buf(tp, bp, 0, sizeof(*hdr) - 1); } /* @@ -484,23 +496,23 @@ xfs_dir2_data_log_unused( xfs_dabuf_t *bp, /* block buffer */ xfs_dir2_data_unused_t *dup) /* data unused pointer */ { - xfs_dir2_data_t *d; /* data block pointer */ + xfs_dir2_data_hdr_t *hdr = bp->data; + + ASSERT(be32_to_cpu(hdr->magic) == XFS_DIR2_DATA_MAGIC || + be32_to_cpu(hdr->magic) == XFS_DIR2_BLOCK_MAGIC); - d = bp->data; - ASSERT(be32_to_cpu(d->hdr.magic) == XFS_DIR2_DATA_MAGIC || - be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC); /* * Log the first part of the unused entry. */ - xfs_da_log_buf(tp, bp, (uint)((char *)dup - (char *)d), + xfs_da_log_buf(tp, bp, (uint)((char *)dup - (char *)hdr), (uint)((char *)&dup->length + sizeof(dup->length) - - 1 - (char *)d)); + 1 - (char *)hdr)); /* * Log the end (tag) of the unused entry. */ xfs_da_log_buf(tp, bp, - (uint)((char *)xfs_dir2_data_unused_tag_p(dup) - (char *)d), - (uint)((char *)xfs_dir2_data_unused_tag_p(dup) - (char *)d + + (uint)((char *)xfs_dir2_data_unused_tag_p(dup) - (char *)hdr), + (uint)((char *)xfs_dir2_data_unused_tag_p(dup) - (char *)hdr + sizeof(xfs_dir2_data_off_t) - 1)); } @@ -517,7 +529,7 @@ xfs_dir2_data_make_free( int *needlogp, /* out: log header */ int *needscanp) /* out: regen bestfree */ { - xfs_dir2_data_t *d; /* data block pointer */ + xfs_dir2_data_hdr_t *hdr; /* data block pointer */ xfs_dir2_data_free_t *dfp; /* bestfree pointer */ char *endptr; /* end of data area */ xfs_mount_t *mp; /* filesystem mount point */ @@ -527,28 +539,29 @@ xfs_dir2_data_make_free( xfs_dir2_data_unused_t *prevdup; /* unused entry before us */ mp = tp->t_mountp; - d = bp->data; + hdr = bp->data; + /* * Figure out where the end of the data area is. */ - if (be32_to_cpu(d->hdr.magic) == XFS_DIR2_DATA_MAGIC) - endptr = (char *)d + mp->m_dirblksize; + if (be32_to_cpu(hdr->magic) == XFS_DIR2_DATA_MAGIC) + endptr = (char *)hdr + mp->m_dirblksize; else { xfs_dir2_block_tail_t *btp; /* block tail */ - ASSERT(be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC); - btp = xfs_dir2_block_tail_p(mp, &d->hdr); + ASSERT(be32_to_cpu(hdr->magic) == XFS_DIR2_BLOCK_MAGIC); + btp = xfs_dir2_block_tail_p(mp, hdr); endptr = (char *)xfs_dir2_block_leaf_p(btp); } /* * If this isn't the start of the block, then back up to * the previous entry and see if it's free. */ - if (offset > sizeof(d->hdr)) { + if (offset > sizeof(*hdr)) { __be16 *tagp; /* tag just before us */ - tagp = (__be16 *)((char *)d + offset) - 1; - prevdup = (xfs_dir2_data_unused_t *)((char *)d + be16_to_cpu(*tagp)); + tagp = (__be16 *)((char *)hdr + offset) - 1; + prevdup = (xfs_dir2_data_unused_t *)((char *)hdr + be16_to_cpu(*tagp)); if (be16_to_cpu(prevdup->freetag) != XFS_DIR2_DATA_FREE_TAG) prevdup = NULL; } else @@ -557,9 +570,9 @@ xfs_dir2_data_make_free( * If this isn't the end of the block, see if the entry after * us is free. */ - if ((char *)d + offset + len < endptr) { + if ((char *)hdr + offset + len < endptr) { postdup = - (xfs_dir2_data_unused_t *)((char *)d + offset + len); + (xfs_dir2_data_unused_t *)((char *)hdr + offset + len); if (be16_to_cpu(postdup->freetag) != XFS_DIR2_DATA_FREE_TAG) postdup = NULL; } else @@ -576,21 +589,21 @@ xfs_dir2_data_make_free( /* * See if prevdup and/or postdup are in bestfree table. */ - dfp = xfs_dir2_data_freefind(d, prevdup); - dfp2 = xfs_dir2_data_freefind(d, postdup); + dfp = xfs_dir2_data_freefind(hdr, prevdup); + dfp2 = xfs_dir2_data_freefind(hdr, postdup); /* * We need a rescan unless there are exactly 2 free entries * namely our two. Then we know what's happening, otherwise * since the third bestfree is there, there might be more * entries. */ - needscan = (d->hdr.bestfree[2].length != 0); + needscan = (hdr->bestfree[2].length != 0); /* * Fix up the new big freespace. */ be16_add_cpu(&prevdup->length, len + be16_to_cpu(postdup->length)); *xfs_dir2_data_unused_tag_p(prevdup) = - cpu_to_be16((char *)prevdup - (char *)d); + cpu_to_be16((char *)prevdup - (char *)hdr); xfs_dir2_data_log_unused(tp, bp, prevdup); if (!needscan) { /* @@ -600,18 +613,18 @@ xfs_dir2_data_make_free( * Remove entry 1 first then entry 0. */ ASSERT(dfp && dfp2); - if (dfp == &d->hdr.bestfree[1]) { - dfp = &d->hdr.bestfree[0]; + if (dfp == &hdr->bestfree[1]) { + dfp = &hdr->bestfree[0]; ASSERT(dfp2 == dfp); - dfp2 = &d->hdr.bestfree[1]; + dfp2 = &hdr->bestfree[1]; } - xfs_dir2_data_freeremove(d, dfp2, needlogp); - xfs_dir2_data_freeremove(d, dfp, needlogp); + xfs_dir2_data_freeremove(hdr, dfp2, needlogp); + xfs_dir2_data_freeremove(hdr, dfp, needlogp); /* * Now insert the new entry. */ - dfp = xfs_dir2_data_freeinsert(d, prevdup, needlogp); - ASSERT(dfp == &d->hdr.bestfree[0]); + dfp = xfs_dir2_data_freeinsert(hdr, prevdup, needlogp); + ASSERT(dfp == &hdr->bestfree[0]); ASSERT(dfp->length == prevdup->length); ASSERT(!dfp[1].length); ASSERT(!dfp[2].length); @@ -621,10 +634,10 @@ xfs_dir2_data_make_free( * The entry before us is free, merge with it. */ else if (prevdup) { - dfp = xfs_dir2_data_freefind(d, prevdup); + dfp = xfs_dir2_data_freefind(hdr, prevdup); be16_add_cpu(&prevdup->length, len); *xfs_dir2_data_unused_tag_p(prevdup) = - cpu_to_be16((char *)prevdup - (char *)d); + cpu_to_be16((char *)prevdup - (char *)hdr); xfs_dir2_data_log_unused(tp, bp, prevdup); /* * If the previous entry was in the table, the new entry @@ -632,27 +645,27 @@ xfs_dir2_data_make_free( * the old one and add the new one. */ if (dfp) { - xfs_dir2_data_freeremove(d, dfp, needlogp); - (void)xfs_dir2_data_freeinsert(d, prevdup, needlogp); + xfs_dir2_data_freeremove(hdr, dfp, needlogp); + xfs_dir2_data_freeinsert(hdr, prevdup, needlogp); } /* * Otherwise we need a scan if the new entry is big enough. */ else { needscan = be16_to_cpu(prevdup->length) > - be16_to_cpu(d->hdr.bestfree[2].length); + be16_to_cpu(hdr->bestfree[2].length); } } /* * The following entry is free, merge with it. */ else if (postdup) { - dfp = xfs_dir2_data_freefind(d, postdup); - newdup = (xfs_dir2_data_unused_t *)((char *)d + offset); + dfp = xfs_dir2_data_freefind(hdr, postdup); + newdup = (xfs_dir2_data_unused_t *)((char *)hdr + offset); newdup->freetag = cpu_to_be16(XFS_DIR2_DATA_FREE_TAG); newdup->length = cpu_to_be16(len + be16_to_cpu(postdup->length)); *xfs_dir2_data_unused_tag_p(newdup) = - cpu_to_be16((char *)newdup - (char *)d); + cpu_to_be16((char *)newdup - (char *)hdr); xfs_dir2_data_log_unused(tp, bp, newdup); /* * If the following entry was in the table, the new entry @@ -660,28 +673,28 @@ xfs_dir2_data_make_free( * the old one and add the new one. */ if (dfp) { - xfs_dir2_data_freeremove(d, dfp, needlogp); - (void)xfs_dir2_data_freeinsert(d, newdup, needlogp); + xfs_dir2_data_freeremove(hdr, dfp, needlogp); + xfs_dir2_data_freeinsert(hdr, newdup, needlogp); } /* * Otherwise we need a scan if the new entry is big enough. */ else { needscan = be16_to_cpu(newdup->length) > - be16_to_cpu(d->hdr.bestfree[2].length); + be16_to_cpu(hdr->bestfree[2].length); } } /* * Neither neighbor is free. Make a new entry. */ else { - newdup = (xfs_dir2_data_unused_t *)((char *)d + offset); + newdup = (xfs_dir2_data_unused_t *)((char *)hdr + offset); newdup->freetag = cpu_to_be16(XFS_DIR2_DATA_FREE_TAG); newdup->length = cpu_to_be16(len); *xfs_dir2_data_unused_tag_p(newdup) = - cpu_to_be16((char *)newdup - (char *)d); + cpu_to_be16((char *)newdup - (char *)hdr); xfs_dir2_data_log_unused(tp, bp, newdup); - (void)xfs_dir2_data_freeinsert(d, newdup, needlogp); + xfs_dir2_data_freeinsert(hdr, newdup, needlogp); } *needscanp = needscan; } @@ -699,7 +712,7 @@ xfs_dir2_data_use_free( int *needlogp, /* out: need to log header */ int *needscanp) /* out: need regen bestfree */ { - xfs_dir2_data_t *d; /* data block */ + xfs_dir2_data_hdr_t *hdr; /* data block header */ xfs_dir2_data_free_t *dfp; /* bestfree pointer */ int matchback; /* matches end of freespace */ int matchfront; /* matches start of freespace */ @@ -708,24 +721,24 @@ xfs_dir2_data_use_free( xfs_dir2_data_unused_t *newdup2; /* another new unused entry */ int oldlen; /* old unused entry's length */ - d = bp->data; - ASSERT(be32_to_cpu(d->hdr.magic) == XFS_DIR2_DATA_MAGIC || - be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC); + hdr = bp->data; + ASSERT(be32_to_cpu(hdr->magic) == XFS_DIR2_DATA_MAGIC || + be32_to_cpu(hdr->magic) == XFS_DIR2_BLOCK_MAGIC); ASSERT(be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG); - ASSERT(offset >= (char *)dup - (char *)d); - ASSERT(offset + len <= (char *)dup + be16_to_cpu(dup->length) - (char *)d); - ASSERT((char *)dup - (char *)d == be16_to_cpu(*xfs_dir2_data_unused_tag_p(dup))); + ASSERT(offset >= (char *)dup - (char *)hdr); + ASSERT(offset + len <= (char *)dup + be16_to_cpu(dup->length) - (char *)hdr); + ASSERT((char *)dup - (char *)hdr == be16_to_cpu(*xfs_dir2_data_unused_tag_p(dup))); /* * Look up the entry in the bestfree table. */ - dfp = xfs_dir2_data_freefind(d, dup); + dfp = xfs_dir2_data_freefind(hdr, dup); oldlen = be16_to_cpu(dup->length); - ASSERT(dfp || oldlen <= be16_to_cpu(d->hdr.bestfree[2].length)); + ASSERT(dfp || oldlen <= be16_to_cpu(hdr->bestfree[2].length)); /* * Check for alignment with front and back of the entry. */ - matchfront = (char *)dup - (char *)d == offset; - matchback = (char *)dup + oldlen - (char *)d == offset + len; + matchfront = (char *)dup - (char *)hdr == offset; + matchback = (char *)dup + oldlen - (char *)hdr == offset + len; ASSERT(*needscanp == 0); needscan = 0; /* @@ -734,9 +747,9 @@ xfs_dir2_data_use_free( */ if (matchfront && matchback) { if (dfp) { - needscan = (d->hdr.bestfree[2].offset != 0); + needscan = (hdr->bestfree[2].offset != 0); if (!needscan) - xfs_dir2_data_freeremove(d, dfp, needlogp); + xfs_dir2_data_freeremove(hdr, dfp, needlogp); } } /* @@ -744,27 +757,27 @@ xfs_dir2_data_use_free( * Make a new entry with the remaining freespace. */ else if (matchfront) { - newdup = (xfs_dir2_data_unused_t *)((char *)d + offset + len); + newdup = (xfs_dir2_data_unused_t *)((char *)hdr + offset + len); newdup->freetag = cpu_to_be16(XFS_DIR2_DATA_FREE_TAG); newdup->length = cpu_to_be16(oldlen - len); *xfs_dir2_data_unused_tag_p(newdup) = - cpu_to_be16((char *)newdup - (char *)d); + cpu_to_be16((char *)newdup - (char *)hdr); xfs_dir2_data_log_unused(tp, bp, newdup); /* * If it was in the table, remove it and add the new one. */ if (dfp) { - xfs_dir2_data_freeremove(d, dfp, needlogp); - dfp = xfs_dir2_data_freeinsert(d, newdup, needlogp); + xfs_dir2_data_freeremove(hdr, dfp, needlogp); + dfp = xfs_dir2_data_freeinsert(hdr, newdup, needlogp); ASSERT(dfp != NULL); ASSERT(dfp->length == newdup->length); - ASSERT(be16_to_cpu(dfp->offset) == (char *)newdup - (char *)d); + ASSERT(be16_to_cpu(dfp->offset) == (char *)newdup - (char *)hdr); /* * If we got inserted at the last slot, * that means we don't know if there was a better * choice for the last slot, or not. Rescan. */ - needscan = dfp == &d->hdr.bestfree[2]; + needscan = dfp == &hdr->bestfree[2]; } } /* @@ -773,25 +786,25 @@ xfs_dir2_data_use_free( */ else if (matchback) { newdup = dup; - newdup->length = cpu_to_be16(((char *)d + offset) - (char *)newdup); + newdup->length = cpu_to_be16(((char *)hdr + offset) - (char *)newdup); *xfs_dir2_data_unused_tag_p(newdup) = - cpu_to_be16((char *)newdup - (char *)d); + cpu_to_be16((char *)newdup - (char *)hdr); xfs_dir2_data_log_unused(tp, bp, newdup); /* * If it was in the table, remove it and add the new one. */ if (dfp) { - xfs_dir2_data_freeremove(d, dfp, needlogp); - dfp = xfs_dir2_data_freeinsert(d, newdup, needlogp); + xfs_dir2_data_freeremove(hdr, dfp, needlogp); + dfp = xfs_dir2_data_freeinsert(hdr, newdup, needlogp); ASSERT(dfp != NULL); ASSERT(dfp->length == newdup->length); - ASSERT(be16_to_cpu(dfp->offset) == (char *)newdup - (char *)d); + ASSERT(be16_to_cpu(dfp->offset) == (char *)newdup - (char *)hdr); /* * If we got inserted at the last slot, * that means we don't know if there was a better * choice for the last slot, or not. Rescan. */ - needscan = dfp == &d->hdr.bestfree[2]; + needscan = dfp == &hdr->bestfree[2]; } } /* @@ -800,15 +813,15 @@ xfs_dir2_data_use_free( */ else { newdup = dup; - newdup->length = cpu_to_be16(((char *)d + offset) - (char *)newdup); + newdup->length = cpu_to_be16(((char *)hdr + offset) - (char *)newdup); *xfs_dir2_data_unused_tag_p(newdup) = - cpu_to_be16((char *)newdup - (char *)d); + cpu_to_be16((char *)newdup - (char *)hdr); xfs_dir2_data_log_unused(tp, bp, newdup); - newdup2 = (xfs_dir2_data_unused_t *)((char *)d + offset + len); + newdup2 = (xfs_dir2_data_unused_t *)((char *)hdr + offset + len); newdup2->freetag = cpu_to_be16(XFS_DIR2_DATA_FREE_TAG); newdup2->length = cpu_to_be16(oldlen - len - be16_to_cpu(newdup->length)); *xfs_dir2_data_unused_tag_p(newdup2) = - cpu_to_be16((char *)newdup2 - (char *)d); + cpu_to_be16((char *)newdup2 - (char *)hdr); xfs_dir2_data_log_unused(tp, bp, newdup2); /* * If the old entry was in the table, we need to scan @@ -819,13 +832,12 @@ xfs_dir2_data_use_free( * the 2 new will work. */ if (dfp) { - needscan = (d->hdr.bestfree[2].length != 0); + needscan = (hdr->bestfree[2].length != 0); if (!needscan) { - xfs_dir2_data_freeremove(d, dfp, needlogp); - (void)xfs_dir2_data_freeinsert(d, newdup, - needlogp); - (void)xfs_dir2_data_freeinsert(d, newdup2, - needlogp); + xfs_dir2_data_freeremove(hdr, dfp, needlogp); + xfs_dir2_data_freeinsert(hdr, newdup, needlogp); + xfs_dir2_data_freeinsert(hdr, newdup2, + needlogp); } } } diff --git a/fs/xfs/xfs_dir2_data.h b/fs/xfs/xfs_dir2_data.h index efbc290c7fec..aa6bf91c4228 100644 --- a/fs/xfs/xfs_dir2_data.h +++ b/fs/xfs/xfs_dir2_data.h @@ -157,12 +157,10 @@ extern void xfs_dir2_data_check(struct xfs_inode *dp, struct xfs_dabuf *bp); #else #define xfs_dir2_data_check(dp,bp) #endif -extern xfs_dir2_data_free_t *xfs_dir2_data_freefind(xfs_dir2_data_t *d, - xfs_dir2_data_unused_t *dup); -extern xfs_dir2_data_free_t *xfs_dir2_data_freeinsert(xfs_dir2_data_t *d, +extern xfs_dir2_data_free_t *xfs_dir2_data_freeinsert(xfs_dir2_data_hdr_t *hdr, xfs_dir2_data_unused_t *dup, int *loghead); -extern void xfs_dir2_data_freescan(struct xfs_mount *mp, xfs_dir2_data_t *d, - int *loghead); +extern void xfs_dir2_data_freescan(struct xfs_mount *mp, + xfs_dir2_data_hdr_t *hdr, int *loghead); extern int xfs_dir2_data_init(struct xfs_da_args *args, xfs_dir2_db_t blkno, struct xfs_dabuf **bpp); extern void xfs_dir2_data_log_entry(struct xfs_trans *tp, struct xfs_dabuf *bp, diff --git a/fs/xfs/xfs_dir2_leaf.c b/fs/xfs/xfs_dir2_leaf.c index 1b048dad360f..d7b4d4494f2d 100644 --- a/fs/xfs/xfs_dir2_leaf.c +++ b/fs/xfs/xfs_dir2_leaf.c @@ -132,7 +132,7 @@ xfs_dir2_block_to_leaf( */ hdr->magic = cpu_to_be32(XFS_DIR2_DATA_MAGIC); if (needscan) - xfs_dir2_data_freescan(mp, (xfs_dir2_data_t *)hdr, &needlog); + xfs_dir2_data_freescan(mp, hdr, &needlog); /* * Set up leaf tail and bests table. */ @@ -278,7 +278,7 @@ xfs_dir2_leaf_addname( { __be16 *bestsp; /* freespace table in leaf */ int compact; /* need to compact leaves */ - xfs_dir2_data_t *data; /* data block structure */ + xfs_dir2_data_hdr_t *hdr; /* data block header */ xfs_dabuf_t *dbp; /* data block buffer */ xfs_dir2_data_entry_t *dep; /* data block entry */ xfs_inode_t *dp; /* incore directory inode */ @@ -486,8 +486,8 @@ xfs_dir2_leaf_addname( */ else xfs_dir2_leaf_log_bests(tp, lbp, use_block, use_block); - data = dbp->data; - bestsp[use_block] = data->hdr.bestfree[0].length; + hdr = dbp->data; + bestsp[use_block] = hdr->bestfree[0].length; grown = 1; } /* @@ -501,7 +501,7 @@ xfs_dir2_leaf_addname( xfs_da_brelse(tp, lbp); return error; } - data = dbp->data; + hdr = dbp->data; grown = 0; } xfs_dir2_data_check(dp, dbp); @@ -509,14 +509,14 @@ xfs_dir2_leaf_addname( * Point to the biggest freespace in our data block. */ dup = (xfs_dir2_data_unused_t *) - ((char *)data + be16_to_cpu(data->hdr.bestfree[0].offset)); + ((char *)hdr + be16_to_cpu(hdr->bestfree[0].offset)); ASSERT(be16_to_cpu(dup->length) >= length); needscan = needlog = 0; /* * Mark the initial part of our freespace in use for the new entry. */ xfs_dir2_data_use_free(tp, dbp, dup, - (xfs_dir2_data_aoff_t)((char *)dup - (char *)data), length, + (xfs_dir2_data_aoff_t)((char *)dup - (char *)hdr), length, &needlog, &needscan); /* * Initialize our new entry (at last). @@ -526,12 +526,12 @@ xfs_dir2_leaf_addname( dep->namelen = args->namelen; memcpy(dep->name, args->name, dep->namelen); tagp = xfs_dir2_data_entry_tag_p(dep); - *tagp = cpu_to_be16((char *)dep - (char *)data); + *tagp = cpu_to_be16((char *)dep - (char *)hdr); /* * Need to scan fix up the bestfree table. */ if (needscan) - xfs_dir2_data_freescan(mp, data, &needlog); + xfs_dir2_data_freescan(mp, hdr, &needlog); /* * Need to log the data block's header. */ @@ -542,8 +542,8 @@ xfs_dir2_leaf_addname( * If the bests table needs to be changed, do it. * Log the change unless we've already done that. */ - if (be16_to_cpu(bestsp[use_block]) != be16_to_cpu(data->hdr.bestfree[0].length)) { - bestsp[use_block] = data->hdr.bestfree[0].length; + if (be16_to_cpu(bestsp[use_block]) != be16_to_cpu(hdr->bestfree[0].length)) { + bestsp[use_block] = hdr->bestfree[0].length; if (!grown) xfs_dir2_leaf_log_bests(tp, lbp, use_block, use_block); } @@ -786,6 +786,7 @@ xfs_dir2_leaf_getdents( xfs_dir2_db_t curdb; /* db for current block */ xfs_dir2_off_t curoff; /* current overall offset */ xfs_dir2_data_t *data; /* data block structure */ + xfs_dir2_data_hdr_t *hdr; /* data block header */ xfs_dir2_data_entry_t *dep; /* data entry */ xfs_dir2_data_unused_t *dup; /* unused entry */ int error = 0; /* error return value */ @@ -1044,6 +1045,7 @@ xfs_dir2_leaf_getdents( ASSERT(xfs_dir2_byte_to_db(mp, curoff) == curdb); data = bp->data; + hdr = &data->hdr; xfs_dir2_data_check(dp, bp); /* * Find our position in the block. @@ -1054,12 +1056,12 @@ xfs_dir2_leaf_getdents( * Skip past the header. */ if (byteoff == 0) - curoff += (uint)sizeof(data->hdr); + curoff += (uint)sizeof(*hdr); /* * Skip past entries until we reach our offset. */ else { - while ((char *)ptr - (char *)data < byteoff) { + while ((char *)ptr - (char *)hdr < byteoff) { dup = (xfs_dir2_data_unused_t *)ptr; if (be16_to_cpu(dup->freetag) @@ -1080,8 +1082,8 @@ xfs_dir2_leaf_getdents( curoff = xfs_dir2_db_off_to_byte(mp, xfs_dir2_byte_to_db(mp, curoff), - (char *)ptr - (char *)data); - if (ptr >= (char *)data + mp->m_dirblksize) { + (char *)ptr - (char *)hdr); + if (ptr >= (char *)hdr + mp->m_dirblksize) { continue; } } @@ -1462,7 +1464,7 @@ xfs_dir2_leaf_removename( xfs_da_args_t *args) /* operation arguments */ { __be16 *bestsp; /* leaf block best freespace */ - xfs_dir2_data_t *data; /* data block structure */ + xfs_dir2_data_hdr_t *hdr; /* data block header */ xfs_dir2_db_t db; /* data block number */ xfs_dabuf_t *dbp; /* data block buffer */ xfs_dir2_data_entry_t *dep; /* data entry structure */ @@ -1492,7 +1494,7 @@ xfs_dir2_leaf_removename( tp = args->trans; mp = dp->i_mount; leaf = lbp->data; - data = dbp->data; + hdr = dbp->data; xfs_dir2_data_check(dp, dbp); /* * Point to the leaf entry, use that to point to the data entry. @@ -1500,9 +1502,9 @@ xfs_dir2_leaf_removename( lep = &leaf->ents[index]; db = xfs_dir2_dataptr_to_db(mp, be32_to_cpu(lep->address)); dep = (xfs_dir2_data_entry_t *) - ((char *)data + xfs_dir2_dataptr_to_off(mp, be32_to_cpu(lep->address))); + ((char *)hdr + xfs_dir2_dataptr_to_off(mp, be32_to_cpu(lep->address))); needscan = needlog = 0; - oldbest = be16_to_cpu(data->hdr.bestfree[0].length); + oldbest = be16_to_cpu(hdr->bestfree[0].length); ltp = xfs_dir2_leaf_tail_p(mp, leaf); bestsp = xfs_dir2_leaf_bests_p(ltp); ASSERT(be16_to_cpu(bestsp[db]) == oldbest); @@ -1510,7 +1512,7 @@ xfs_dir2_leaf_removename( * Mark the former data entry unused. */ xfs_dir2_data_make_free(tp, dbp, - (xfs_dir2_data_aoff_t)((char *)dep - (char *)data), + (xfs_dir2_data_aoff_t)((char *)dep - (char *)hdr), xfs_dir2_data_entsize(dep->namelen), &needlog, &needscan); /* * We just mark the leaf entry stale by putting a null in it. @@ -1524,23 +1526,23 @@ xfs_dir2_leaf_removename( * log the data block header if necessary. */ if (needscan) - xfs_dir2_data_freescan(mp, data, &needlog); + xfs_dir2_data_freescan(mp, hdr, &needlog); if (needlog) xfs_dir2_data_log_header(tp, dbp); /* * If the longest freespace in the data block has changed, * put the new value in the bests table and log that. */ - if (be16_to_cpu(data->hdr.bestfree[0].length) != oldbest) { - bestsp[db] = data->hdr.bestfree[0].length; + if (be16_to_cpu(hdr->bestfree[0].length) != oldbest) { + bestsp[db] = hdr->bestfree[0].length; xfs_dir2_leaf_log_bests(tp, lbp, db, db); } xfs_dir2_data_check(dp, dbp); /* * If the data block is now empty then get rid of the data block. */ - if (be16_to_cpu(data->hdr.bestfree[0].length) == - mp->m_dirblksize - (uint)sizeof(data->hdr)) { + if (be16_to_cpu(hdr->bestfree[0].length) == + mp->m_dirblksize - (uint)sizeof(*hdr)) { ASSERT(db != mp->m_dirdatablk); if ((error = xfs_dir2_shrink_inode(args, db, dbp))) { /* @@ -1711,9 +1713,6 @@ xfs_dir2_leaf_trim_data( xfs_dir2_db_t db) /* data block number */ { __be16 *bestsp; /* leaf bests table */ -#ifdef DEBUG - xfs_dir2_data_t *data; /* data block structure */ -#endif xfs_dabuf_t *dbp; /* data block buffer */ xfs_inode_t *dp; /* incore directory inode */ int error; /* error return value */ @@ -1732,20 +1731,21 @@ xfs_dir2_leaf_trim_data( XFS_DATA_FORK))) { return error; } -#ifdef DEBUG - data = dbp->data; - ASSERT(be32_to_cpu(data->hdr.magic) == XFS_DIR2_DATA_MAGIC); -#endif - /* this seems to be an error - * data is only valid if DEBUG is defined? - * RMC 09/08/1999 - */ leaf = lbp->data; ltp = xfs_dir2_leaf_tail_p(mp, leaf); - ASSERT(be16_to_cpu(data->hdr.bestfree[0].length) == - mp->m_dirblksize - (uint)sizeof(data->hdr)); + +#ifdef DEBUG +{ + struct xfs_dir2_data_hdr *hdr = dbp->data; + + ASSERT(be32_to_cpu(hdr->magic) == XFS_DIR2_DATA_MAGIC); + ASSERT(be16_to_cpu(hdr->bestfree[0].length) == + mp->m_dirblksize - (uint)sizeof(*hdr)); ASSERT(db == be32_to_cpu(ltp->bestcount) - 1); +} +#endif + /* * Get rid of the data block. */ diff --git a/fs/xfs/xfs_dir2_node.c b/fs/xfs/xfs_dir2_node.c index 02da7b7a005a..8d4b54c1bedb 100644 --- a/fs/xfs/xfs_dir2_node.c +++ b/fs/xfs/xfs_dir2_node.c @@ -842,7 +842,7 @@ xfs_dir2_leafn_remove( xfs_da_state_blk_t *dblk, /* data block */ int *rval) /* resulting block needs join */ { - xfs_dir2_data_t *data; /* data block structure */ + xfs_dir2_data_hdr_t *hdr; /* data block header */ xfs_dir2_db_t db; /* data block number */ xfs_dabuf_t *dbp; /* data block buffer */ xfs_dir2_data_entry_t *dep; /* data block entry */ @@ -887,9 +887,9 @@ xfs_dir2_leafn_remove( * in the data block in case it changes. */ dbp = dblk->bp; - data = dbp->data; - dep = (xfs_dir2_data_entry_t *)((char *)data + off); - longest = be16_to_cpu(data->hdr.bestfree[0].length); + hdr = dbp->data; + dep = (xfs_dir2_data_entry_t *)((char *)hdr + off); + longest = be16_to_cpu(hdr->bestfree[0].length); needlog = needscan = 0; xfs_dir2_data_make_free(tp, dbp, off, xfs_dir2_data_entsize(dep->namelen), &needlog, &needscan); @@ -898,7 +898,7 @@ xfs_dir2_leafn_remove( * Log the data block header if needed. */ if (needscan) - xfs_dir2_data_freescan(mp, data, &needlog); + xfs_dir2_data_freescan(mp, hdr, &needlog); if (needlog) xfs_dir2_data_log_header(tp, dbp); xfs_dir2_data_check(dp, dbp); @@ -906,7 +906,7 @@ xfs_dir2_leafn_remove( * If the longest data block freespace changes, need to update * the corresponding freeblock entry. */ - if (longest < be16_to_cpu(data->hdr.bestfree[0].length)) { + if (longest < be16_to_cpu(hdr->bestfree[0].length)) { int error; /* error return value */ xfs_dabuf_t *fbp; /* freeblock buffer */ xfs_dir2_db_t fdb; /* freeblock block number */ @@ -932,19 +932,19 @@ xfs_dir2_leafn_remove( * Calculate which entry we need to fix. */ findex = xfs_dir2_db_to_fdindex(mp, db); - longest = be16_to_cpu(data->hdr.bestfree[0].length); + longest = be16_to_cpu(hdr->bestfree[0].length); /* * If the data block is now empty we can get rid of it * (usually). */ - if (longest == mp->m_dirblksize - (uint)sizeof(data->hdr)) { + if (longest == mp->m_dirblksize - (uint)sizeof(*hdr)) { /* * Try to punch out the data block. */ error = xfs_dir2_shrink_inode(args, db, dbp); if (error == 0) { dblk->bp = NULL; - data = NULL; + hdr = NULL; } /* * We can get ENOSPC if there's no space reservation. @@ -960,7 +960,7 @@ xfs_dir2_leafn_remove( * If we got rid of the data block, we can eliminate that entry * in the free block. */ - if (data == NULL) { + if (hdr == NULL) { /* * One less used entry in the free table. */ @@ -1356,7 +1356,7 @@ xfs_dir2_node_addname_int( xfs_da_args_t *args, /* operation arguments */ xfs_da_state_blk_t *fblk) /* optional freespace block */ { - xfs_dir2_data_t *data; /* data block structure */ + xfs_dir2_data_hdr_t *hdr; /* data block header */ xfs_dir2_db_t dbno; /* data block number */ xfs_dabuf_t *dbp; /* data block buffer */ xfs_dir2_data_entry_t *dep; /* data entry pointer */ @@ -1641,8 +1641,8 @@ xfs_dir2_node_addname_int( * We haven't allocated the data entry yet so this will * change again. */ - data = dbp->data; - free->bests[findex] = data->hdr.bestfree[0].length; + hdr = dbp->data; + free->bests[findex] = hdr->bestfree[0].length; logfree = 1; } /* @@ -1667,21 +1667,21 @@ xfs_dir2_node_addname_int( xfs_da_buf_done(fbp); return error; } - data = dbp->data; + hdr = dbp->data; logfree = 0; } - ASSERT(be16_to_cpu(data->hdr.bestfree[0].length) >= length); + ASSERT(be16_to_cpu(hdr->bestfree[0].length) >= length); /* * Point to the existing unused space. */ dup = (xfs_dir2_data_unused_t *) - ((char *)data + be16_to_cpu(data->hdr.bestfree[0].offset)); + ((char *)hdr + be16_to_cpu(hdr->bestfree[0].offset)); needscan = needlog = 0; /* * Mark the first part of the unused space, inuse for us. */ xfs_dir2_data_use_free(tp, dbp, dup, - (xfs_dir2_data_aoff_t)((char *)dup - (char *)data), length, + (xfs_dir2_data_aoff_t)((char *)dup - (char *)hdr), length, &needlog, &needscan); /* * Fill in the new entry and log it. @@ -1691,13 +1691,13 @@ xfs_dir2_node_addname_int( dep->namelen = args->namelen; memcpy(dep->name, args->name, dep->namelen); tagp = xfs_dir2_data_entry_tag_p(dep); - *tagp = cpu_to_be16((char *)dep - (char *)data); + *tagp = cpu_to_be16((char *)dep - (char *)hdr); xfs_dir2_data_log_entry(tp, dbp, dep); /* * Rescan the block for bestfree if needed. */ if (needscan) - xfs_dir2_data_freescan(mp, data, &needlog); + xfs_dir2_data_freescan(mp, hdr, &needlog); /* * Log the data block header if needed. */ @@ -1706,8 +1706,8 @@ xfs_dir2_node_addname_int( /* * If the freespace entry is now wrong, update it. */ - if (be16_to_cpu(free->bests[findex]) != be16_to_cpu(data->hdr.bestfree[0].length)) { - free->bests[findex] = data->hdr.bestfree[0].length; + if (be16_to_cpu(free->bests[findex]) != be16_to_cpu(hdr->bestfree[0].length)) { + free->bests[findex] = hdr->bestfree[0].length; logfree = 1; } /* @@ -1857,7 +1857,7 @@ xfs_dir2_node_replace( xfs_da_args_t *args) /* operation arguments */ { xfs_da_state_blk_t *blk; /* leaf block */ - xfs_dir2_data_t *data; /* data block structure */ + xfs_dir2_data_hdr_t *hdr; /* data block header */ xfs_dir2_data_entry_t *dep; /* data entry changed */ int error; /* error return value */ int i; /* btree level */ @@ -1901,10 +1901,10 @@ xfs_dir2_node_replace( /* * Point to the data entry. */ - data = state->extrablk.bp->data; - ASSERT(be32_to_cpu(data->hdr.magic) == XFS_DIR2_DATA_MAGIC); + hdr = state->extrablk.bp->data; + ASSERT(be32_to_cpu(hdr->magic) == XFS_DIR2_DATA_MAGIC); dep = (xfs_dir2_data_entry_t *) - ((char *)data + + ((char *)hdr + xfs_dir2_dataptr_to_off(state->mp, be32_to_cpu(lep->address))); ASSERT(inum != be64_to_cpu(dep->inumber)); /* -- cgit v1.2.3 From 0ba9cd84ef2af58645333a86f9c901684ab1fef6 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 8 Jul 2011 14:35:42 +0200 Subject: xfs: kill struct xfs_dir2_data Remove the confusing xfs_dir2_data structure. It is supposed to describe an XFS dir2 data btree block, but due to the variable sized nature of almost all elements in it it can't actuall do anything close to that job. In addition to accessing the fixed offset header structure it was only used to get a pointer to the first dir or unused entry after it, which can be trivially replaced by pointer arithmetics on the header pointer. For most users that is actually more natural anyway, as they don't use a typed pointer but rather a character pointer for further arithmetics. Signed-off-by: Christoph Hellwig Reviewed-by: Alex Elder Reviewed-by: Dave Chinner --- fs/xfs/xfs_dir2_data.c | 15 +++++---------- fs/xfs/xfs_dir2_data.h | 33 ++++++++++++++++----------------- fs/xfs/xfs_dir2_leaf.c | 6 ++---- 3 files changed, 23 insertions(+), 31 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_dir2_data.c b/fs/xfs/xfs_dir2_data.c index 2bbc21696944..8e47ac3a3b9d 100644 --- a/fs/xfs/xfs_dir2_data.c +++ b/fs/xfs/xfs_dir2_data.c @@ -53,7 +53,6 @@ xfs_dir2_data_check( xfs_dir2_data_free_t *bf; /* bestfree table */ xfs_dir2_block_tail_t *btp=NULL; /* block tail */ int count; /* count of entries found */ - xfs_dir2_data_t *d; /* data block pointer */ xfs_dir2_data_hdr_t *hdr; /* data block header */ xfs_dir2_data_entry_t *dep; /* data entry */ xfs_dir2_data_free_t *dfp; /* bestfree entry */ @@ -70,10 +69,9 @@ xfs_dir2_data_check( struct xfs_name name; mp = dp->i_mount; - d = bp->data; - hdr = &d->hdr; + hdr = bp->data; bf = hdr->bestfree; - p = (char *)d->u; + p = (char *)(hdr + 1); if (be32_to_cpu(hdr->magic) == XFS_DIR2_BLOCK_MAGIC) { btp = xfs_dir2_block_tail_p(mp, hdr); @@ -336,7 +334,6 @@ xfs_dir2_data_freescan( xfs_dir2_data_hdr_t *hdr, /* data block header */ int *loghead) /* out: log data header */ { - xfs_dir2_data_t *d = (xfs_dir2_data_t *)hdr; xfs_dir2_block_tail_t *btp; /* block tail */ xfs_dir2_data_entry_t *dep; /* active data entry */ xfs_dir2_data_unused_t *dup; /* unused data entry */ @@ -355,7 +352,7 @@ xfs_dir2_data_freescan( /* * Set up pointers. */ - p = (char *)d->u; + p = (char *)(hdr + 1); if (be32_to_cpu(hdr->magic) == XFS_DIR2_BLOCK_MAGIC) { btp = xfs_dir2_block_tail_p(mp, hdr); endp = (char *)xfs_dir2_block_leaf_p(btp); @@ -398,7 +395,6 @@ xfs_dir2_data_init( xfs_dabuf_t **bpp) /* output block buffer */ { xfs_dabuf_t *bp; /* block buffer */ - xfs_dir2_data_t *d; /* pointer to block */ xfs_dir2_data_hdr_t *hdr; /* data block header */ xfs_inode_t *dp; /* incore directory inode */ xfs_dir2_data_unused_t *dup; /* unused entry pointer */ @@ -424,8 +420,7 @@ xfs_dir2_data_init( /* * Initialize the header. */ - d = bp->data; - hdr = &d->hdr; + hdr = bp->data; hdr->magic = cpu_to_be32(XFS_DIR2_DATA_MAGIC); hdr->bestfree[0].offset = cpu_to_be16(sizeof(*hdr)); for (i = 1; i < XFS_DIR2_DATA_FD_COUNT; i++) { @@ -436,7 +431,7 @@ xfs_dir2_data_init( /* * Set up an unused entry for the block's body. */ - dup = &d->u[0].unused; + dup = (xfs_dir2_data_unused_t *)(hdr + 1); dup->freetag = cpu_to_be16(XFS_DIR2_DATA_FREE_TAG); t = mp->m_dirblksize - (uint)sizeof(*hdr); diff --git a/fs/xfs/xfs_dir2_data.h b/fs/xfs/xfs_dir2_data.h index aa6bf91c4228..ca02cc71e234 100644 --- a/fs/xfs/xfs_dir2_data.h +++ b/fs/xfs/xfs_dir2_data.h @@ -20,6 +20,22 @@ /* * Directory format 2, data block structures. + * + * A pure data block looks like the following drawing on disk: + * + * +-------------------------------------------------+ + * | xfs_dir2_data_hdr_t | + * +-------------------------------------------------+ + * | xfs_dir2_data_entry_t OR xfs_dir2_data_unused_t | + * | xfs_dir2_data_entry_t OR xfs_dir2_data_unused_t | + * | xfs_dir2_data_entry_t OR xfs_dir2_data_unused_t | + * | ... | + * +-------------------------------------------------+ + * | unused space | + * +-------------------------------------------------+ + * + * As all the entries are variable size structures the accessors in this + * file should be used to iterate over them. */ struct xfs_dabuf; @@ -103,23 +119,6 @@ typedef struct xfs_dir2_data_unused { __be16 tag; /* starting offset of us */ } xfs_dir2_data_unused_t; -typedef union { - xfs_dir2_data_entry_t entry; - xfs_dir2_data_unused_t unused; -} xfs_dir2_data_union_t; - -/* - * Generic data block structure, for xfs_db. - */ -typedef struct xfs_dir2_data { - xfs_dir2_data_hdr_t hdr; /* magic XFS_DIR2_DATA_MAGIC */ - xfs_dir2_data_union_t u[1]; -} xfs_dir2_data_t; - -/* - * Macros. - */ - /* * Size of a data entry. */ diff --git a/fs/xfs/xfs_dir2_leaf.c b/fs/xfs/xfs_dir2_leaf.c index d7b4d4494f2d..067a6abdeabe 100644 --- a/fs/xfs/xfs_dir2_leaf.c +++ b/fs/xfs/xfs_dir2_leaf.c @@ -785,7 +785,6 @@ xfs_dir2_leaf_getdents( int byteoff; /* offset in current block */ xfs_dir2_db_t curdb; /* db for current block */ xfs_dir2_off_t curoff; /* current overall offset */ - xfs_dir2_data_t *data; /* data block structure */ xfs_dir2_data_hdr_t *hdr; /* data block header */ xfs_dir2_data_entry_t *dep; /* data entry */ xfs_dir2_data_unused_t *dup; /* unused entry */ @@ -1044,13 +1043,12 @@ xfs_dir2_leaf_getdents( else if (curoff > newoff) ASSERT(xfs_dir2_byte_to_db(mp, curoff) == curdb); - data = bp->data; - hdr = &data->hdr; + hdr = bp->data; xfs_dir2_data_check(dp, bp); /* * Find our position in the block. */ - ptr = (char *)&data->u; + ptr = (char *)(hdr + 1); byteoff = xfs_dir2_byte_to_off(mp, curoff); /* * Skip past the header. -- cgit v1.2.3 From 3ed8638f8867b4d0df1ec606231a087ff06c4a59 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 8 Jul 2011 14:35:50 +0200 Subject: xfs: cleanup the definition of struct xfs_dir2_data_entry Remove the tag member which is at a variable offset after the actual name, and make name a real variable sized C99 array instead of the incorrect one-sized array which confuses (not only) gcc. Signed-off-by: Christoph Hellwig Reviewed-by: Alex Elder Reviewed-by: Dave Chinner --- fs/xfs/xfs_dir2_data.h | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_dir2_data.h b/fs/xfs/xfs_dir2_data.h index ca02cc71e234..c50512584ebf 100644 --- a/fs/xfs/xfs_dir2_data.h +++ b/fs/xfs/xfs_dir2_data.h @@ -98,14 +98,15 @@ typedef struct xfs_dir2_data_hdr { /* * Active entry in a data block. Aligned to 8 bytes. - * Tag appears as the last 2 bytes. + * + * After the variable length name field there is a 2 byte tag field, which + * can be accessed using xfs_dir2_data_entry_tag_p. */ typedef struct xfs_dir2_data_entry { __be64 inumber; /* inode number */ __u8 namelen; /* name length */ - __u8 name[1]; /* name bytes, no null */ - /* variable offset */ - __be16 tag; /* starting offset of us */ + __u8 name[]; /* name bytes, no null */ + /* __be16 tag; */ /* starting offset of us */ } xfs_dir2_data_entry_t; /* -- cgit v1.2.3 From 2282396d8157033503318fe4dee77ba82dc9d144 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 8 Jul 2011 14:35:53 +0200 Subject: xfs: cleanup struct xfs_dir2_leaf Simplify the confusing xfs_dir2_leaf structure. It is supposed to describe an XFS dir2 leaf format btree block, but due to the variable sized nature of almost all elements in it it can't actuall do anything close to that job. Remove the members that are after the first variable sized array, given that they could only be used for sizeof expressions that can as well just use the underlying types directly, and make the ents array a real C99 variable sized array. Also factor out the xfs_dir2_leaf_size, to make the sizing of a leaf entry which already was convoluted somewhat readable after using the longer type names in the sizeof expressions. Signed-off-by: Christoph Hellwig Reviewed-by: Alex Elder Reviewed-by: Dave Chinner --- fs/xfs/xfs_dir2_leaf.c | 34 +++++++++++++++++++++++++--------- fs/xfs/xfs_dir2_leaf.h | 37 +++++++++++++++++++++++++++++++------ 2 files changed, 56 insertions(+), 15 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_dir2_leaf.c b/fs/xfs/xfs_dir2_leaf.c index 067a6abdeabe..5d2651f1c759 100644 --- a/fs/xfs/xfs_dir2_leaf.c +++ b/fs/xfs/xfs_dir2_leaf.c @@ -367,9 +367,12 @@ xfs_dir2_leaf_addname( /* * How many bytes do we need in the leaf block? */ - needbytes = - (leaf->hdr.stale ? 0 : (uint)sizeof(leaf->ents[0])) + - (use_block != -1 ? 0 : (uint)sizeof(leaf->bests[0])); + needbytes = 0; + if (!leaf->hdr.stale) + needbytes += sizeof(xfs_dir2_leaf_entry_t); + if (use_block == -1) + needbytes += sizeof(xfs_dir2_data_off_t); + /* * Now kill use_block if it refers to a missing block, so we * can use it as an indication of allocation needed. @@ -1763,6 +1766,20 @@ xfs_dir2_leaf_trim_data( return 0; } +static inline size_t +xfs_dir2_leaf_size( + struct xfs_dir2_leaf_hdr *hdr, + int counts) +{ + int entries; + + entries = be16_to_cpu(hdr->count) - be16_to_cpu(hdr->stale); + return sizeof(xfs_dir2_leaf_hdr_t) + + entries * sizeof(xfs_dir2_leaf_entry_t) + + counts * sizeof(xfs_dir2_data_off_t) + + sizeof(xfs_dir2_leaf_tail_t); +} + /* * Convert node form directory to leaf form directory. * The root of the node form dir needs to already be a LEAFN block. @@ -1844,18 +1861,17 @@ xfs_dir2_node_to_leaf( free = fbp->data; ASSERT(be32_to_cpu(free->hdr.magic) == XFS_DIR2_FREE_MAGIC); ASSERT(!free->hdr.firstdb); + /* * Now see if the leafn and free data will fit in a leaf1. * If not, release the buffer and give up. */ - if ((uint)sizeof(leaf->hdr) + - (be16_to_cpu(leaf->hdr.count) - be16_to_cpu(leaf->hdr.stale)) * (uint)sizeof(leaf->ents[0]) + - be32_to_cpu(free->hdr.nvalid) * (uint)sizeof(leaf->bests[0]) + - (uint)sizeof(leaf->tail) > - mp->m_dirblksize) { + if (xfs_dir2_leaf_size(&leaf->hdr, be32_to_cpu(free->hdr.nvalid)) > + mp->m_dirblksize) { xfs_da_brelse(tp, fbp); return 0; } + /* * If the leaf has any stale entries in it, compress them out. * The compact routine will log the header. @@ -1874,7 +1890,7 @@ xfs_dir2_node_to_leaf( * Set up the leaf bests table. */ memcpy(xfs_dir2_leaf_bests_p(ltp), free->bests, - be32_to_cpu(ltp->bestcount) * sizeof(leaf->bests[0])); + be32_to_cpu(ltp->bestcount) * sizeof(xfs_dir2_data_off_t)); xfs_dir2_leaf_log_bests(tp, lbp, 0, be32_to_cpu(ltp->bestcount) - 1); xfs_dir2_leaf_log_tail(tp, lbp); xfs_dir2_leaf_check(dp, lbp); diff --git a/fs/xfs/xfs_dir2_leaf.h b/fs/xfs/xfs_dir2_leaf.h index bc620622c342..140281adc7e5 100644 --- a/fs/xfs/xfs_dir2_leaf.h +++ b/fs/xfs/xfs_dir2_leaf.h @@ -18,6 +18,36 @@ #ifndef __XFS_DIR2_LEAF_H__ #define __XFS_DIR2_LEAF_H__ +/* + * Directory format 2, leaf block structures. + * + * A pure data block looks like the following drawing on disk: + * + * +---------------------------+ + * | xfs_dir2_leaf_hdr_t | + * +---------------------------+ + * | xfs_dir2_leaf_entry_t | + * | xfs_dir2_leaf_entry_t | + * | xfs_dir2_leaf_entry_t | + * | xfs_dir2_leaf_entry_t | + * | ... | + * +---------------------------+ + * | xfs_dir2_data_off_t | + * | xfs_dir2_data_off_t | + * | xfs_dir2_data_off_t | + * | ... | + * +---------------------------+ + * | xfs_dir2_leaf_tail_t | + * +---------------------------+ + * + * The bests (xfs_dir2_data_off_t members) and tail are at the end of the + * block for single-leaf only (magic = XFS_DIR2_LEAF1_MAGIC not + * XFS_DIR2_LEAFN_MAGIC). + * + * As all the entries are variable size structures the accessors in this + * file should be used to iterate over them. + */ + struct uio; struct xfs_dabuf; struct xfs_da_args; @@ -67,15 +97,10 @@ typedef struct xfs_dir2_leaf_tail { /* * Leaf block. - * bests and tail are at the end of the block for single-leaf only - * (magic = XFS_DIR2_LEAF1_MAGIC not XFS_DIR2_LEAFN_MAGIC). */ typedef struct xfs_dir2_leaf { xfs_dir2_leaf_hdr_t hdr; /* leaf header */ - xfs_dir2_leaf_entry_t ents[1]; /* entries */ - /* ... */ - xfs_dir2_data_off_t bests[1]; /* best free counts */ - xfs_dir2_leaf_tail_t tail; /* leaf tail */ + xfs_dir2_leaf_entry_t ents[]; /* entries */ } xfs_dir2_leaf_t; /* -- cgit v1.2.3 From 218106a1104c598011e5df9d9aac7e0416be03e6 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 8 Jul 2011 14:35:58 +0200 Subject: xfs: use generic get_unaligned_beXX helpers Switch the shortform directory code over to use the generic get_unaligned_beXX helpers instead of reinventing them. As a result kill off xfs_arch.h and move the setting of XFS_NATIVE_HOST into xfs_linux.h. Signed-off-by: Christoph Hellwig Reviewed-by: Alex Elder Reviewed-by: Dave Chinner --- fs/xfs/linux-2.6/xfs_linux.h | 7 ++- fs/xfs/xfs_arch.h | 136 ------------------------------------------- fs/xfs/xfs_dir2_sf.c | 13 +++-- fs/xfs/xfs_dir2_sf.h | 4 +- 4 files changed, 16 insertions(+), 144 deletions(-) delete mode 100644 fs/xfs/xfs_arch.h (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_linux.h b/fs/xfs/linux-2.6/xfs_linux.h index 8633521b3b2e..d42f814e4d35 100644 --- a/fs/xfs/linux-2.6/xfs_linux.h +++ b/fs/xfs/linux-2.6/xfs_linux.h @@ -33,7 +33,6 @@ #endif #include -#include #include #include @@ -88,6 +87,12 @@ #include #include +#ifdef __BIG_ENDIAN +#define XFS_NATIVE_HOST 1 +#else +#undef XFS_NATIVE_HOST +#endif + /* * Feature macros (disable/enable) */ diff --git a/fs/xfs/xfs_arch.h b/fs/xfs/xfs_arch.h deleted file mode 100644 index 0902249354a0..000000000000 --- a/fs/xfs/xfs_arch.h +++ /dev/null @@ -1,136 +0,0 @@ -/* - * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc. - * All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it would be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - */ -#ifndef __XFS_ARCH_H__ -#define __XFS_ARCH_H__ - -#ifndef XFS_BIG_INUMS -# error XFS_BIG_INUMS must be defined true or false -#endif - -#ifdef __KERNEL__ - -#include - -#ifdef __BIG_ENDIAN -#define XFS_NATIVE_HOST 1 -#else -#undef XFS_NATIVE_HOST -#endif - -#else /* __KERNEL__ */ - -#if __BYTE_ORDER == __BIG_ENDIAN -#define XFS_NATIVE_HOST 1 -#else -#undef XFS_NATIVE_HOST -#endif - -#ifdef XFS_NATIVE_HOST -#define cpu_to_be16(val) ((__force __be16)(__u16)(val)) -#define cpu_to_be32(val) ((__force __be32)(__u32)(val)) -#define cpu_to_be64(val) ((__force __be64)(__u64)(val)) -#define be16_to_cpu(val) ((__force __u16)(__be16)(val)) -#define be32_to_cpu(val) ((__force __u32)(__be32)(val)) -#define be64_to_cpu(val) ((__force __u64)(__be64)(val)) -#else -#define cpu_to_be16(val) ((__force __be16)__swab16((__u16)(val))) -#define cpu_to_be32(val) ((__force __be32)__swab32((__u32)(val))) -#define cpu_to_be64(val) ((__force __be64)__swab64((__u64)(val))) -#define be16_to_cpu(val) (__swab16((__force __u16)(__be16)(val))) -#define be32_to_cpu(val) (__swab32((__force __u32)(__be32)(val))) -#define be64_to_cpu(val) (__swab64((__force __u64)(__be64)(val))) -#endif - -static inline void be16_add_cpu(__be16 *a, __s16 b) -{ - *a = cpu_to_be16(be16_to_cpu(*a) + b); -} - -static inline void be32_add_cpu(__be32 *a, __s32 b) -{ - *a = cpu_to_be32(be32_to_cpu(*a) + b); -} - -static inline void be64_add_cpu(__be64 *a, __s64 b) -{ - *a = cpu_to_be64(be64_to_cpu(*a) + b); -} - -#endif /* __KERNEL__ */ - -/* - * get and set integers from potentially unaligned locations - */ - -#define INT_GET_UNALIGNED_16_BE(pointer) \ - ((__u16)((((__u8*)(pointer))[0] << 8) | (((__u8*)(pointer))[1]))) -#define INT_SET_UNALIGNED_16_BE(pointer,value) \ - { \ - ((__u8*)(pointer))[0] = (((value) >> 8) & 0xff); \ - ((__u8*)(pointer))[1] = (((value) ) & 0xff); \ - } - -/* - * In directories inode numbers are stored as unaligned arrays of unsigned - * 8bit integers on disk. - * - * For v1 directories or v2 directories that contain inode numbers that - * do not fit into 32bit the array has eight members, but the first member - * is always zero: - * - * |unused|48-55|40-47|32-39|24-31|16-23| 8-15| 0- 7| - * - * For v2 directories that only contain entries with inode numbers that fit - * into 32bits a four-member array is used: - * - * |24-31|16-23| 8-15| 0- 7| - */ - -#define XFS_GET_DIR_INO4(di) \ - (((__u32)(di).i[0] << 24) | ((di).i[1] << 16) | ((di).i[2] << 8) | ((di).i[3])) - -#define XFS_PUT_DIR_INO4(from, di) \ -do { \ - (di).i[0] = (((from) & 0xff000000ULL) >> 24); \ - (di).i[1] = (((from) & 0x00ff0000ULL) >> 16); \ - (di).i[2] = (((from) & 0x0000ff00ULL) >> 8); \ - (di).i[3] = ((from) & 0x000000ffULL); \ -} while (0) - -#define XFS_DI_HI(di) \ - (((__u32)(di).i[1] << 16) | ((di).i[2] << 8) | ((di).i[3])) -#define XFS_DI_LO(di) \ - (((__u32)(di).i[4] << 24) | ((di).i[5] << 16) | ((di).i[6] << 8) | ((di).i[7])) - -#define XFS_GET_DIR_INO8(di) \ - (((xfs_ino_t)XFS_DI_LO(di) & 0xffffffffULL) | \ - ((xfs_ino_t)XFS_DI_HI(di) << 32)) - -#define XFS_PUT_DIR_INO8(from, di) \ -do { \ - (di).i[0] = 0; \ - (di).i[1] = (((from) & 0x00ff000000000000ULL) >> 48); \ - (di).i[2] = (((from) & 0x0000ff0000000000ULL) >> 40); \ - (di).i[3] = (((from) & 0x000000ff00000000ULL) >> 32); \ - (di).i[4] = (((from) & 0x00000000ff000000ULL) >> 24); \ - (di).i[5] = (((from) & 0x0000000000ff0000ULL) >> 16); \ - (di).i[6] = (((from) & 0x000000000000ff00ULL) >> 8); \ - (di).i[7] = ((from) & 0x00000000000000ffULL); \ -} while (0) - -#endif /* __XFS_ARCH_H__ */ diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c index b47a99c8f244..9d3dff2ae4c5 100644 --- a/fs/xfs/xfs_dir2_sf.c +++ b/fs/xfs/xfs_dir2_sf.c @@ -59,11 +59,12 @@ static void xfs_dir2_sf_toino4(xfs_da_args_t *args); static void xfs_dir2_sf_toino8(xfs_da_args_t *args); #endif /* XFS_BIG_INUMS */ - /* * Inode numbers in short-form directories can come in two versions, * either 4 bytes or 8 bytes wide. These helpers deal with the * two forms transparently by looking at the headers i8count field. + * + * For 64-bit inode number the most significant byte must be zero. */ static xfs_ino_t xfs_dir2_sf_get_ino( @@ -71,9 +72,9 @@ xfs_dir2_sf_get_ino( xfs_dir2_inou_t *from) { if (hdr->i8count) - return XFS_GET_DIR_INO8(from->i8); + return get_unaligned_be64(&from->i8.i) & 0x00ffffffffffffffULL; else - return XFS_GET_DIR_INO4(from->i4); + return get_unaligned_be32(&from->i4.i); } static void @@ -82,10 +83,12 @@ xfs_dir2_sf_put_ino( xfs_dir2_inou_t *to, xfs_ino_t ino) { + ASSERT((ino & 0xff00000000000000ULL) == 0); + if (hdr->i8count) - XFS_PUT_DIR_INO8(ino, to->i8); + put_unaligned_be64(ino, &to->i8.i); else - XFS_PUT_DIR_INO4(ino, to->i4); + put_unaligned_be32(ino, &to->i4.i); } xfs_ino_t diff --git a/fs/xfs/xfs_dir2_sf.h b/fs/xfs/xfs_dir2_sf.h index 65c5763905c9..26f062912395 100644 --- a/fs/xfs/xfs_dir2_sf.h +++ b/fs/xfs/xfs_dir2_sf.h @@ -95,13 +95,13 @@ static inline int xfs_dir2_sf_hdr_size(int i8count) static inline xfs_dir2_data_aoff_t xfs_dir2_sf_get_offset(xfs_dir2_sf_entry_t *sfep) { - return INT_GET_UNALIGNED_16_BE(&(sfep)->offset.i); + return get_unaligned_be16(&sfep->offset.i); } static inline void xfs_dir2_sf_put_offset(xfs_dir2_sf_entry_t *sfep, xfs_dir2_data_aoff_t off) { - INT_SET_UNALIGNED_16_BE(&(sfep)->offset.i, off); + put_unaligned_be16(off, &sfep->offset.i); } static inline int -- cgit v1.2.3 From 69ef921b55cc3788d1d2a27b33b27d04acd0090a Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 8 Jul 2011 14:36:05 +0200 Subject: xfs: byteswap constants instead of variables Micro-optimize various comparisms by always byteswapping the constant instead of the variable, which allows to do the swap at compile instead of runtime. Signed-off-by: Christoph Hellwig Reviewed-by: Alex Elder Reviewed-by: Dave Chinner --- fs/xfs/xfs_alloc.c | 2 +- fs/xfs/xfs_attr.c | 19 ++++++++------- fs/xfs/xfs_attr_leaf.c | 60 ++++++++++++++++++++++++------------------------ fs/xfs/xfs_bmap.c | 2 +- fs/xfs/xfs_bmap_btree.c | 8 +++---- fs/xfs/xfs_btree.c | 28 +++++++++++----------- fs/xfs/xfs_da_btree.c | 58 +++++++++++++++++++++++----------------------- fs/xfs/xfs_dir2_block.c | 18 +++++++++------ fs/xfs/xfs_dir2_data.c | 47 ++++++++++++++++++------------------- fs/xfs/xfs_dir2_leaf.c | 59 +++++++++++++++++++++++++---------------------- fs/xfs/xfs_dir2_node.c | 53 ++++++++++++++++++++++-------------------- fs/xfs/xfs_ialloc.c | 14 +++++------ fs/xfs/xfs_inode.c | 13 +++++------ fs/xfs/xfs_log.c | 4 ++-- fs/xfs/xfs_log_recover.c | 20 ++++++++-------- 15 files changed, 208 insertions(+), 197 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c index 95862bbff56b..3ad3ab9f56cb 100644 --- a/fs/xfs/xfs_alloc.c +++ b/fs/xfs/xfs_alloc.c @@ -2127,7 +2127,7 @@ xfs_read_agf( * Validate the magic number of the agf block. */ agf_ok = - be32_to_cpu(agf->agf_magicnum) == XFS_AGF_MAGIC && + agf->agf_magicnum == cpu_to_be32(XFS_AGF_MAGIC) && XFS_AGF_GOOD_VERSION(be32_to_cpu(agf->agf_versionnum)) && be32_to_cpu(agf->agf_freeblks) <= be32_to_cpu(agf->agf_length) && be32_to_cpu(agf->agf_flfirst) < XFS_AGFL_SIZE(mp) && diff --git a/fs/xfs/xfs_attr.c b/fs/xfs/xfs_attr.c index 795d5aac7042..cbae424fe1ba 100644 --- a/fs/xfs/xfs_attr.c +++ b/fs/xfs/xfs_attr.c @@ -1203,7 +1203,7 @@ xfs_attr_leaf_list(xfs_attr_list_context_t *context) return XFS_ERROR(error); ASSERT(bp != NULL); leaf = bp->data; - if (unlikely(be16_to_cpu(leaf->hdr.info.magic) != XFS_ATTR_LEAF_MAGIC)) { + if (unlikely(leaf->hdr.info.magic != cpu_to_be16(XFS_ATTR_LEAF_MAGIC))) { XFS_CORRUPTION_ERROR("xfs_attr_leaf_list", XFS_ERRLEVEL_LOW, context->dp->i_mount, leaf); xfs_da_brelse(NULL, bp); @@ -1610,9 +1610,8 @@ xfs_attr_node_removename(xfs_da_args_t *args) XFS_ATTR_FORK); if (error) goto out; - ASSERT(be16_to_cpu(((xfs_attr_leafblock_t *) - bp->data)->hdr.info.magic) - == XFS_ATTR_LEAF_MAGIC); + ASSERT((((xfs_attr_leafblock_t *)bp->data)->hdr.info.magic) == + cpu_to_be16(XFS_ATTR_LEAF_MAGIC)); if ((forkoff = xfs_attr_shortform_allfit(bp, dp))) { xfs_bmap_init(args->flist, args->firstblock); @@ -1877,11 +1876,11 @@ xfs_attr_node_list(xfs_attr_list_context_t *context) return(XFS_ERROR(EFSCORRUPTED)); } node = bp->data; - if (be16_to_cpu(node->hdr.info.magic) - == XFS_ATTR_LEAF_MAGIC) + if (node->hdr.info.magic == + cpu_to_be16(XFS_ATTR_LEAF_MAGIC)) break; - if (unlikely(be16_to_cpu(node->hdr.info.magic) - != XFS_DA_NODE_MAGIC)) { + if (unlikely(node->hdr.info.magic != + cpu_to_be16(XFS_DA_NODE_MAGIC))) { XFS_CORRUPTION_ERROR("xfs_attr_node_list(3)", XFS_ERRLEVEL_LOW, context->dp->i_mount, @@ -1916,8 +1915,8 @@ xfs_attr_node_list(xfs_attr_list_context_t *context) */ for (;;) { leaf = bp->data; - if (unlikely(be16_to_cpu(leaf->hdr.info.magic) - != XFS_ATTR_LEAF_MAGIC)) { + if (unlikely(leaf->hdr.info.magic != + cpu_to_be16(XFS_ATTR_LEAF_MAGIC))) { XFS_CORRUPTION_ERROR("xfs_attr_node_list(4)", XFS_ERRLEVEL_LOW, context->dp->i_mount, leaf); diff --git a/fs/xfs/xfs_attr_leaf.c b/fs/xfs/xfs_attr_leaf.c index 71e90dc2aeb1..8fad9602542b 100644 --- a/fs/xfs/xfs_attr_leaf.c +++ b/fs/xfs/xfs_attr_leaf.c @@ -731,7 +731,7 @@ xfs_attr_shortform_allfit(xfs_dabuf_t *bp, xfs_inode_t *dp) int bytes, i; leaf = bp->data; - ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); + ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)); entry = &leaf->entries[0]; bytes = sizeof(struct xfs_attr_sf_hdr); @@ -777,7 +777,7 @@ xfs_attr_leaf_to_shortform(xfs_dabuf_t *bp, xfs_da_args_t *args, int forkoff) ASSERT(bp != NULL); memcpy(tmpbuffer, bp->data, XFS_LBSIZE(dp->i_mount)); leaf = (xfs_attr_leafblock_t *)tmpbuffer; - ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); + ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)); memset(bp->data, 0, XFS_LBSIZE(dp->i_mount)); /* @@ -872,7 +872,7 @@ xfs_attr_leaf_to_node(xfs_da_args_t *args) goto out; node = bp1->data; leaf = bp2->data; - ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); + ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)); /* both on-disk, don't endian-flip twice */ node->btree[0].hashval = leaf->entries[be16_to_cpu(leaf->hdr.count)-1 ].hashval; @@ -997,7 +997,7 @@ xfs_attr_leaf_add(xfs_dabuf_t *bp, xfs_da_args_t *args) int tablesize, entsize, sum, tmp, i; leaf = bp->data; - ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); + ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)); ASSERT((args->index >= 0) && (args->index <= be16_to_cpu(leaf->hdr.count))); hdr = &leaf->hdr; @@ -1070,7 +1070,7 @@ xfs_attr_leaf_add_work(xfs_dabuf_t *bp, xfs_da_args_t *args, int mapindex) int tmp, i; leaf = bp->data; - ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); + ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)); hdr = &leaf->hdr; ASSERT((mapindex >= 0) && (mapindex < XFS_ATTR_LEAF_MAPSIZE)); ASSERT((args->index >= 0) && (args->index <= be16_to_cpu(hdr->count))); @@ -1256,8 +1256,8 @@ xfs_attr_leaf_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1, ASSERT(blk2->magic == XFS_ATTR_LEAF_MAGIC); leaf1 = blk1->bp->data; leaf2 = blk2->bp->data; - ASSERT(be16_to_cpu(leaf1->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); - ASSERT(be16_to_cpu(leaf2->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); + ASSERT(leaf1->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)); + ASSERT(leaf2->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)); args = state->args; /* @@ -1533,7 +1533,7 @@ xfs_attr_leaf_toosmall(xfs_da_state_t *state, int *action) */ blk = &state->path.blk[ state->path.active-1 ]; info = blk->bp->data; - ASSERT(be16_to_cpu(info->magic) == XFS_ATTR_LEAF_MAGIC); + ASSERT(info->magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)); leaf = (xfs_attr_leafblock_t *)info; count = be16_to_cpu(leaf->hdr.count); bytes = sizeof(xfs_attr_leaf_hdr_t) + @@ -1596,7 +1596,7 @@ xfs_attr_leaf_toosmall(xfs_da_state_t *state, int *action) bytes = state->blocksize - (state->blocksize>>2); bytes -= be16_to_cpu(leaf->hdr.usedbytes); leaf = bp->data; - ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); + ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)); count += be16_to_cpu(leaf->hdr.count); bytes -= be16_to_cpu(leaf->hdr.usedbytes); bytes -= count * sizeof(xfs_attr_leaf_entry_t); @@ -1650,7 +1650,7 @@ xfs_attr_leaf_remove(xfs_dabuf_t *bp, xfs_da_args_t *args) xfs_mount_t *mp; leaf = bp->data; - ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); + ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)); hdr = &leaf->hdr; mp = args->trans->t_mountp; ASSERT((be16_to_cpu(hdr->count) > 0) @@ -1813,8 +1813,8 @@ xfs_attr_leaf_unbalance(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk, ASSERT(save_blk->magic == XFS_ATTR_LEAF_MAGIC); drop_leaf = drop_blk->bp->data; save_leaf = save_blk->bp->data; - ASSERT(be16_to_cpu(drop_leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); - ASSERT(be16_to_cpu(save_leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); + ASSERT(drop_leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)); + ASSERT(save_leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)); drop_hdr = &drop_leaf->hdr; save_hdr = &save_leaf->hdr; @@ -1915,7 +1915,7 @@ xfs_attr_leaf_lookup_int(xfs_dabuf_t *bp, xfs_da_args_t *args) xfs_dahash_t hashval; leaf = bp->data; - ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); + ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)); ASSERT(be16_to_cpu(leaf->hdr.count) < (XFS_LBSIZE(args->dp->i_mount)/8)); @@ -2019,7 +2019,7 @@ xfs_attr_leaf_getvalue(xfs_dabuf_t *bp, xfs_da_args_t *args) xfs_attr_leaf_name_remote_t *name_rmt; leaf = bp->data; - ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); + ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)); ASSERT(be16_to_cpu(leaf->hdr.count) < (XFS_LBSIZE(args->dp->i_mount)/8)); ASSERT(args->index < be16_to_cpu(leaf->hdr.count)); @@ -2087,8 +2087,8 @@ xfs_attr_leaf_moveents(xfs_attr_leafblock_t *leaf_s, int start_s, /* * Set up environment. */ - ASSERT(be16_to_cpu(leaf_s->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); - ASSERT(be16_to_cpu(leaf_d->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); + ASSERT(leaf_s->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)); + ASSERT(leaf_d->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)); hdr_s = &leaf_s->hdr; hdr_d = &leaf_d->hdr; ASSERT((be16_to_cpu(hdr_s->count) > 0) && @@ -2222,8 +2222,8 @@ xfs_attr_leaf_order(xfs_dabuf_t *leaf1_bp, xfs_dabuf_t *leaf2_bp) leaf1 = leaf1_bp->data; leaf2 = leaf2_bp->data; - ASSERT((be16_to_cpu(leaf1->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC) && - (be16_to_cpu(leaf2->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC)); + ASSERT((leaf1->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)) && + (leaf2->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC))); if ((be16_to_cpu(leaf1->hdr.count) > 0) && (be16_to_cpu(leaf2->hdr.count) > 0) && ((be32_to_cpu(leaf2->entries[0].hashval) < @@ -2246,7 +2246,7 @@ xfs_attr_leaf_lasthash(xfs_dabuf_t *bp, int *count) xfs_attr_leafblock_t *leaf; leaf = bp->data; - ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); + ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)); if (count) *count = be16_to_cpu(leaf->hdr.count); if (!leaf->hdr.count) @@ -2265,7 +2265,7 @@ xfs_attr_leaf_entsize(xfs_attr_leafblock_t *leaf, int index) xfs_attr_leaf_name_remote_t *name_rmt; int size; - ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); + ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)); if (leaf->entries[index].flags & XFS_ATTR_LOCAL) { name_loc = xfs_attr_leaf_name_local(leaf, index); size = xfs_attr_leaf_entsize_local(name_loc->namelen, @@ -2451,7 +2451,7 @@ xfs_attr_leaf_clearflag(xfs_da_args_t *args) ASSERT(bp != NULL); leaf = bp->data; - ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); + ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)); ASSERT(args->index < be16_to_cpu(leaf->hdr.count)); ASSERT(args->index >= 0); entry = &leaf->entries[ args->index ]; @@ -2515,7 +2515,7 @@ xfs_attr_leaf_setflag(xfs_da_args_t *args) ASSERT(bp != NULL); leaf = bp->data; - ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); + ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)); ASSERT(args->index < be16_to_cpu(leaf->hdr.count)); ASSERT(args->index >= 0); entry = &leaf->entries[ args->index ]; @@ -2585,13 +2585,13 @@ xfs_attr_leaf_flipflags(xfs_da_args_t *args) } leaf1 = bp1->data; - ASSERT(be16_to_cpu(leaf1->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); + ASSERT(leaf1->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)); ASSERT(args->index < be16_to_cpu(leaf1->hdr.count)); ASSERT(args->index >= 0); entry1 = &leaf1->entries[ args->index ]; leaf2 = bp2->data; - ASSERT(be16_to_cpu(leaf2->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); + ASSERT(leaf2->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)); ASSERT(args->index2 < be16_to_cpu(leaf2->hdr.count)); ASSERT(args->index2 >= 0); entry2 = &leaf2->entries[ args->index2 ]; @@ -2689,9 +2689,9 @@ xfs_attr_root_inactive(xfs_trans_t **trans, xfs_inode_t *dp) * This is a depth-first traversal! */ info = bp->data; - if (be16_to_cpu(info->magic) == XFS_DA_NODE_MAGIC) { + if (info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC)) { error = xfs_attr_node_inactive(trans, dp, bp, 1); - } else if (be16_to_cpu(info->magic) == XFS_ATTR_LEAF_MAGIC) { + } else if (info->magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)) { error = xfs_attr_leaf_inactive(trans, dp, bp); } else { error = XFS_ERROR(EIO); @@ -2739,7 +2739,7 @@ xfs_attr_node_inactive(xfs_trans_t **trans, xfs_inode_t *dp, xfs_dabuf_t *bp, } node = bp->data; - ASSERT(be16_to_cpu(node->hdr.info.magic) == XFS_DA_NODE_MAGIC); + ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC)); parent_blkno = xfs_da_blkno(bp); /* save for re-read later */ count = be16_to_cpu(node->hdr.count); if (!count) { @@ -2773,10 +2773,10 @@ xfs_attr_node_inactive(xfs_trans_t **trans, xfs_inode_t *dp, xfs_dabuf_t *bp, * Invalidate the subtree, however we have to. */ info = child_bp->data; - if (be16_to_cpu(info->magic) == XFS_DA_NODE_MAGIC) { + if (info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC)) { error = xfs_attr_node_inactive(trans, dp, child_bp, level+1); - } else if (be16_to_cpu(info->magic) == XFS_ATTR_LEAF_MAGIC) { + } else if (info->magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)) { error = xfs_attr_leaf_inactive(trans, dp, child_bp); } else { @@ -2836,7 +2836,7 @@ xfs_attr_leaf_inactive(xfs_trans_t **trans, xfs_inode_t *dp, xfs_dabuf_t *bp) int error, count, size, tmp, i; leaf = bp->data; - ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC); + ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)); /* * Count the number of "remote" value extents. diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c index 7b46b78ba3f0..2ce6aa644e7e 100644 --- a/fs/xfs/xfs_bmap.c +++ b/fs/xfs/xfs_bmap.c @@ -4083,7 +4083,7 @@ xfs_bmap_sanity_check( { struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp); - if (be32_to_cpu(block->bb_magic) != XFS_BMAP_MAGIC || + if (block->bb_magic != cpu_to_be32(XFS_BMAP_MAGIC) || be16_to_cpu(block->bb_level) != level || be16_to_cpu(block->bb_numrecs) == 0 || be16_to_cpu(block->bb_numrecs) > mp->m_bmap_dmxr[level != 0]) diff --git a/fs/xfs/xfs_bmap_btree.c b/fs/xfs/xfs_bmap_btree.c index 87d3c10b6954..50a28442a544 100644 --- a/fs/xfs/xfs_bmap_btree.c +++ b/fs/xfs/xfs_bmap_btree.c @@ -425,10 +425,10 @@ xfs_bmbt_to_bmdr( xfs_bmbt_key_t *tkp; __be64 *tpp; - ASSERT(be32_to_cpu(rblock->bb_magic) == XFS_BMAP_MAGIC); - ASSERT(be64_to_cpu(rblock->bb_u.l.bb_leftsib) == NULLDFSBNO); - ASSERT(be64_to_cpu(rblock->bb_u.l.bb_rightsib) == NULLDFSBNO); - ASSERT(be16_to_cpu(rblock->bb_level) > 0); + ASSERT(rblock->bb_magic == cpu_to_be32(XFS_BMAP_MAGIC)); + ASSERT(rblock->bb_u.l.bb_leftsib == cpu_to_be64(NULLDFSBNO)); + ASSERT(rblock->bb_u.l.bb_rightsib == cpu_to_be64(NULLDFSBNO)); + ASSERT(rblock->bb_level != 0); dblock->bb_level = rblock->bb_level; dblock->bb_numrecs = rblock->bb_numrecs; dmxr = xfs_bmdr_maxrecs(mp, dblocklen, 0); diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c index 2f9e97c128a0..43125af42629 100644 --- a/fs/xfs/xfs_btree.c +++ b/fs/xfs/xfs_btree.c @@ -66,11 +66,11 @@ xfs_btree_check_lblock( be16_to_cpu(block->bb_numrecs) <= cur->bc_ops->get_maxrecs(cur, level) && block->bb_u.l.bb_leftsib && - (be64_to_cpu(block->bb_u.l.bb_leftsib) == NULLDFSBNO || + (block->bb_u.l.bb_leftsib == cpu_to_be64(NULLDFSBNO) || XFS_FSB_SANITY_CHECK(mp, be64_to_cpu(block->bb_u.l.bb_leftsib))) && block->bb_u.l.bb_rightsib && - (be64_to_cpu(block->bb_u.l.bb_rightsib) == NULLDFSBNO || + (block->bb_u.l.bb_rightsib == cpu_to_be64(NULLDFSBNO) || XFS_FSB_SANITY_CHECK(mp, be64_to_cpu(block->bb_u.l.bb_rightsib))); if (unlikely(XFS_TEST_ERROR(!lblock_ok, mp, @@ -105,10 +105,10 @@ xfs_btree_check_sblock( be16_to_cpu(block->bb_level) == level && be16_to_cpu(block->bb_numrecs) <= cur->bc_ops->get_maxrecs(cur, level) && - (be32_to_cpu(block->bb_u.s.bb_leftsib) == NULLAGBLOCK || + (block->bb_u.s.bb_leftsib == cpu_to_be32(NULLAGBLOCK) || be32_to_cpu(block->bb_u.s.bb_leftsib) < agflen) && block->bb_u.s.bb_leftsib && - (be32_to_cpu(block->bb_u.s.bb_rightsib) == NULLAGBLOCK || + (block->bb_u.s.bb_rightsib == cpu_to_be32(NULLAGBLOCK) || be32_to_cpu(block->bb_u.s.bb_rightsib) < agflen) && block->bb_u.s.bb_rightsib; if (unlikely(XFS_TEST_ERROR(!sblock_ok, cur->bc_mp, @@ -511,9 +511,9 @@ xfs_btree_islastblock( block = xfs_btree_get_block(cur, level, &bp); xfs_btree_check_block(cur, block, level, bp); if (cur->bc_flags & XFS_BTREE_LONG_PTRS) - return be64_to_cpu(block->bb_u.l.bb_rightsib) == NULLDFSBNO; + return block->bb_u.l.bb_rightsib == cpu_to_be64(NULLDFSBNO); else - return be32_to_cpu(block->bb_u.s.bb_rightsib) == NULLAGBLOCK; + return block->bb_u.s.bb_rightsib == cpu_to_be32(NULLAGBLOCK); } /* @@ -777,14 +777,14 @@ xfs_btree_setbuf( b = XFS_BUF_TO_BLOCK(bp); if (cur->bc_flags & XFS_BTREE_LONG_PTRS) { - if (be64_to_cpu(b->bb_u.l.bb_leftsib) == NULLDFSBNO) + if (b->bb_u.l.bb_leftsib == cpu_to_be64(NULLDFSBNO)) cur->bc_ra[lev] |= XFS_BTCUR_LEFTRA; - if (be64_to_cpu(b->bb_u.l.bb_rightsib) == NULLDFSBNO) + if (b->bb_u.l.bb_rightsib == cpu_to_be64(NULLDFSBNO)) cur->bc_ra[lev] |= XFS_BTCUR_RIGHTRA; } else { - if (be32_to_cpu(b->bb_u.s.bb_leftsib) == NULLAGBLOCK) + if (b->bb_u.s.bb_leftsib == cpu_to_be32(NULLAGBLOCK)) cur->bc_ra[lev] |= XFS_BTCUR_LEFTRA; - if (be32_to_cpu(b->bb_u.s.bb_rightsib) == NULLAGBLOCK) + if (b->bb_u.s.bb_rightsib == cpu_to_be32(NULLAGBLOCK)) cur->bc_ra[lev] |= XFS_BTCUR_RIGHTRA; } } @@ -795,9 +795,9 @@ xfs_btree_ptr_is_null( union xfs_btree_ptr *ptr) { if (cur->bc_flags & XFS_BTREE_LONG_PTRS) - return be64_to_cpu(ptr->l) == NULLDFSBNO; + return ptr->l == cpu_to_be64(NULLDFSBNO); else - return be32_to_cpu(ptr->s) == NULLAGBLOCK; + return ptr->s == cpu_to_be32(NULLAGBLOCK); } STATIC void @@ -923,12 +923,12 @@ xfs_btree_ptr_to_daddr( union xfs_btree_ptr *ptr) { if (cur->bc_flags & XFS_BTREE_LONG_PTRS) { - ASSERT(be64_to_cpu(ptr->l) != NULLDFSBNO); + ASSERT(ptr->l != cpu_to_be64(NULLDFSBNO)); return XFS_FSB_TO_DADDR(cur->bc_mp, be64_to_cpu(ptr->l)); } else { ASSERT(cur->bc_private.a.agno != NULLAGNUMBER); - ASSERT(be32_to_cpu(ptr->s) != NULLAGBLOCK); + ASSERT(ptr->s != cpu_to_be32(NULLAGBLOCK)); return XFS_AGB_TO_DADDR(cur->bc_mp, cur->bc_private.a.agno, be32_to_cpu(ptr->s)); diff --git a/fs/xfs/xfs_da_btree.c b/fs/xfs/xfs_da_btree.c index efb906f8a9c9..18425717cbe6 100644 --- a/fs/xfs/xfs_da_btree.c +++ b/fs/xfs/xfs_da_btree.c @@ -321,11 +321,11 @@ xfs_da_root_split(xfs_da_state_t *state, xfs_da_state_blk_t *blk1, ASSERT(bp != NULL); node = bp->data; oldroot = blk1->bp->data; - if (be16_to_cpu(oldroot->hdr.info.magic) == XFS_DA_NODE_MAGIC) { + if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC)) { size = (int)((char *)&oldroot->btree[be16_to_cpu(oldroot->hdr.count)] - (char *)oldroot); } else { - ASSERT(be16_to_cpu(oldroot->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC); + ASSERT(oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC)); leaf = (xfs_dir2_leaf_t *)oldroot; size = (int)((char *)&leaf->ents[be16_to_cpu(leaf->hdr.count)] - (char *)leaf); @@ -352,7 +352,7 @@ xfs_da_root_split(xfs_da_state_t *state, xfs_da_state_blk_t *blk1, node->hdr.count = cpu_to_be16(2); #ifdef DEBUG - if (be16_to_cpu(oldroot->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC) { + if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC)) { ASSERT(blk1->blkno >= mp->m_dirleafblk && blk1->blkno < mp->m_dirfreeblk); ASSERT(blk2->blkno >= mp->m_dirleafblk && @@ -384,7 +384,7 @@ xfs_da_node_split(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk, int useextra; node = oldblk->bp->data; - ASSERT(be16_to_cpu(node->hdr.info.magic) == XFS_DA_NODE_MAGIC); + ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC)); /* * With V2 dirs the extra block is data or freespace. @@ -483,8 +483,8 @@ xfs_da_node_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1, node1 = node2; node2 = tmpnode; } - ASSERT(be16_to_cpu(node1->hdr.info.magic) == XFS_DA_NODE_MAGIC); - ASSERT(be16_to_cpu(node2->hdr.info.magic) == XFS_DA_NODE_MAGIC); + ASSERT(node1->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC)); + ASSERT(node2->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC)); count = (be16_to_cpu(node1->hdr.count) - be16_to_cpu(node2->hdr.count)) / 2; if (count == 0) return; @@ -578,7 +578,7 @@ xfs_da_node_add(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk, int tmp; node = oldblk->bp->data; - ASSERT(be16_to_cpu(node->hdr.info.magic) == XFS_DA_NODE_MAGIC); + ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC)); ASSERT((oldblk->index >= 0) && (oldblk->index <= be16_to_cpu(node->hdr.count))); ASSERT(newblk->blkno != 0); if (state->args->whichfork == XFS_DATA_FORK) @@ -714,7 +714,7 @@ xfs_da_root_join(xfs_da_state_t *state, xfs_da_state_blk_t *root_blk) ASSERT(args != NULL); ASSERT(root_blk->magic == XFS_DA_NODE_MAGIC); oldroot = root_blk->bp->data; - ASSERT(be16_to_cpu(oldroot->hdr.info.magic) == XFS_DA_NODE_MAGIC); + ASSERT(oldroot->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC)); ASSERT(!oldroot->hdr.info.forw); ASSERT(!oldroot->hdr.info.back); @@ -737,10 +737,10 @@ xfs_da_root_join(xfs_da_state_t *state, xfs_da_state_blk_t *root_blk) ASSERT(bp != NULL); blkinfo = bp->data; if (be16_to_cpu(oldroot->hdr.level) == 1) { - ASSERT(be16_to_cpu(blkinfo->magic) == XFS_DIR2_LEAFN_MAGIC || - be16_to_cpu(blkinfo->magic) == XFS_ATTR_LEAF_MAGIC); + ASSERT(blkinfo->magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) || + blkinfo->magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)); } else { - ASSERT(be16_to_cpu(blkinfo->magic) == XFS_DA_NODE_MAGIC); + ASSERT(blkinfo->magic == cpu_to_be16(XFS_DA_NODE_MAGIC)); } ASSERT(!blkinfo->forw); ASSERT(!blkinfo->back); @@ -776,7 +776,7 @@ xfs_da_node_toosmall(xfs_da_state_t *state, int *action) */ blk = &state->path.blk[ state->path.active-1 ]; info = blk->bp->data; - ASSERT(be16_to_cpu(info->magic) == XFS_DA_NODE_MAGIC); + ASSERT(info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC)); node = (xfs_da_intnode_t *)info; count = be16_to_cpu(node->hdr.count); if (count > (state->node_ents >> 1)) { @@ -836,7 +836,7 @@ xfs_da_node_toosmall(xfs_da_state_t *state, int *action) count -= state->node_ents >> 2; count -= be16_to_cpu(node->hdr.count); node = bp->data; - ASSERT(be16_to_cpu(node->hdr.info.magic) == XFS_DA_NODE_MAGIC); + ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC)); count -= be16_to_cpu(node->hdr.count); xfs_da_brelse(state->args->trans, bp); if (count >= 0) @@ -911,7 +911,7 @@ xfs_da_fixhashpath(xfs_da_state_t *state, xfs_da_state_path_t *path) } for (blk--, level--; level >= 0; blk--, level--) { node = blk->bp->data; - ASSERT(be16_to_cpu(node->hdr.info.magic) == XFS_DA_NODE_MAGIC); + ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC)); btree = &node->btree[ blk->index ]; if (be32_to_cpu(btree->hashval) == lasthash) break; @@ -979,8 +979,8 @@ xfs_da_node_unbalance(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk, drop_node = drop_blk->bp->data; save_node = save_blk->bp->data; - ASSERT(be16_to_cpu(drop_node->hdr.info.magic) == XFS_DA_NODE_MAGIC); - ASSERT(be16_to_cpu(save_node->hdr.info.magic) == XFS_DA_NODE_MAGIC); + ASSERT(drop_node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC)); + ASSERT(save_node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC)); tp = state->args->trans; /* @@ -1278,8 +1278,8 @@ xfs_da_node_order(xfs_dabuf_t *node1_bp, xfs_dabuf_t *node2_bp) node1 = node1_bp->data; node2 = node2_bp->data; - ASSERT((be16_to_cpu(node1->hdr.info.magic) == XFS_DA_NODE_MAGIC) && - (be16_to_cpu(node2->hdr.info.magic) == XFS_DA_NODE_MAGIC)); + ASSERT(node1->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC) && + node2->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC)); if ((be16_to_cpu(node1->hdr.count) > 0) && (be16_to_cpu(node2->hdr.count) > 0) && ((be32_to_cpu(node2->btree[0].hashval) < be32_to_cpu(node1->btree[0].hashval)) || @@ -1299,7 +1299,7 @@ xfs_da_node_lasthash(xfs_dabuf_t *bp, int *count) xfs_da_intnode_t *node; node = bp->data; - ASSERT(be16_to_cpu(node->hdr.info.magic) == XFS_DA_NODE_MAGIC); + ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC)); if (count) *count = be16_to_cpu(node->hdr.count); if (!node->hdr.count) @@ -1412,7 +1412,7 @@ xfs_da_path_shift(xfs_da_state_t *state, xfs_da_state_path_t *path, for (blk = &path->blk[level]; level >= 0; blk--, level--) { ASSERT(blk->bp != NULL); node = blk->bp->data; - ASSERT(be16_to_cpu(node->hdr.info.magic) == XFS_DA_NODE_MAGIC); + ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC)); if (forward && (blk->index < be16_to_cpu(node->hdr.count)-1)) { blk->index++; blkno = be32_to_cpu(node->btree[blk->index].before); @@ -1451,9 +1451,9 @@ xfs_da_path_shift(xfs_da_state_t *state, xfs_da_state_path_t *path, return(error); ASSERT(blk->bp != NULL); info = blk->bp->data; - ASSERT(be16_to_cpu(info->magic) == XFS_DA_NODE_MAGIC || - be16_to_cpu(info->magic) == XFS_DIR2_LEAFN_MAGIC || - be16_to_cpu(info->magic) == XFS_ATTR_LEAF_MAGIC); + ASSERT(info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC) || + info->magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) || + info->magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)); blk->magic = be16_to_cpu(info->magic); if (blk->magic == XFS_DA_NODE_MAGIC) { node = (xfs_da_intnode_t *)info; @@ -1704,12 +1704,12 @@ xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop, /* * Get values from the moved block. */ - if (be16_to_cpu(dead_info->magic) == XFS_DIR2_LEAFN_MAGIC) { + if (dead_info->magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC)) { dead_leaf2 = (xfs_dir2_leaf_t *)dead_info; dead_level = 0; dead_hash = be32_to_cpu(dead_leaf2->ents[be16_to_cpu(dead_leaf2->hdr.count) - 1].hashval); } else { - ASSERT(be16_to_cpu(dead_info->magic) == XFS_DA_NODE_MAGIC); + ASSERT(dead_info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC)); dead_node = (xfs_da_intnode_t *)dead_info; dead_level = be16_to_cpu(dead_node->hdr.level); dead_hash = be32_to_cpu(dead_node->btree[be16_to_cpu(dead_node->hdr.count) - 1].hashval); @@ -1768,8 +1768,8 @@ xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop, if ((error = xfs_da_read_buf(tp, ip, par_blkno, -1, &par_buf, w))) goto done; par_node = par_buf->data; - if (unlikely( - be16_to_cpu(par_node->hdr.info.magic) != XFS_DA_NODE_MAGIC || + if (unlikely(par_node->hdr.info.magic != + cpu_to_be16(XFS_DA_NODE_MAGIC) || (level >= 0 && level != be16_to_cpu(par_node->hdr.level) + 1))) { XFS_ERROR_REPORT("xfs_da_swap_lastblock(4)", XFS_ERRLEVEL_LOW, mp); @@ -1820,7 +1820,7 @@ xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop, par_node = par_buf->data; if (unlikely( be16_to_cpu(par_node->hdr.level) != level || - be16_to_cpu(par_node->hdr.info.magic) != XFS_DA_NODE_MAGIC)) { + par_node->hdr.info.magic != cpu_to_be16(XFS_DA_NODE_MAGIC))) { XFS_ERROR_REPORT("xfs_da_swap_lastblock(7)", XFS_ERRLEVEL_LOW, mp); error = XFS_ERROR(EFSCORRUPTED); @@ -2093,7 +2093,7 @@ xfs_da_do_buf( (magic != XFS_DIR2_LEAFN_MAGIC) && (magic1 != XFS_DIR2_BLOCK_MAGIC) && (magic1 != XFS_DIR2_DATA_MAGIC) && - (be32_to_cpu(free->hdr.magic) != XFS_DIR2_FREE_MAGIC), + (free->hdr.magic != cpu_to_be32(XFS_DIR2_FREE_MAGIC)), mp, XFS_ERRTAG_DA_READ_BUF, XFS_RANDOM_DA_READ_BUF))) { trace_xfs_da_btree_corrupt(rbp->bps[0], _RET_IP_); diff --git a/fs/xfs/xfs_dir2_block.c b/fs/xfs/xfs_dir2_block.c index c9fdabe5e1ad..70fb0cb6bd69 100644 --- a/fs/xfs/xfs_dir2_block.c +++ b/fs/xfs/xfs_dir2_block.c @@ -109,7 +109,7 @@ xfs_dir2_block_addname( /* * Check the magic number, corrupted if wrong. */ - if (unlikely(be32_to_cpu(hdr->magic) != XFS_DIR2_BLOCK_MAGIC)) { + if (unlikely(hdr->magic != cpu_to_be32(XFS_DIR2_BLOCK_MAGIC))) { XFS_CORRUPTION_ERROR("xfs_dir2_block_addname", XFS_ERRLEVEL_LOW, mp, hdr); xfs_da_brelse(tp, bp); @@ -255,7 +255,8 @@ xfs_dir2_block_addname( highstale = lfloghigh = -1; fromidx >= 0; fromidx--) { - if (be32_to_cpu(blp[fromidx].address) == XFS_DIR2_NULL_DATAPTR) { + if (blp[fromidx].address == + cpu_to_be32(XFS_DIR2_NULL_DATAPTR)) { if (highstale == -1) highstale = toidx; else { @@ -352,12 +353,14 @@ xfs_dir2_block_addname( else { for (lowstale = mid; lowstale >= 0 && - be32_to_cpu(blp[lowstale].address) != XFS_DIR2_NULL_DATAPTR; + blp[lowstale].address != + cpu_to_be32(XFS_DIR2_NULL_DATAPTR); lowstale--) continue; for (highstale = mid + 1; highstale < be32_to_cpu(btp->count) && - be32_to_cpu(blp[highstale].address) != XFS_DIR2_NULL_DATAPTR && + blp[highstale].address != + cpu_to_be32(XFS_DIR2_NULL_DATAPTR) && (lowstale < 0 || mid - lowstale > highstale - mid); highstale++) continue; @@ -899,7 +902,7 @@ xfs_dir2_leaf_to_block( tp = args->trans; mp = dp->i_mount; leaf = lbp->data; - ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAF1_MAGIC); + ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAF1_MAGIC)); ltp = xfs_dir2_leaf_tail_p(mp, leaf); /* * If there are data blocks other than the first one, take this @@ -929,7 +932,7 @@ xfs_dir2_leaf_to_block( goto out; } hdr = dbp->data; - ASSERT(be32_to_cpu(hdr->magic) == XFS_DIR2_DATA_MAGIC); + ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC)); /* * Size of the "leaf" area in the block. */ @@ -971,7 +974,8 @@ xfs_dir2_leaf_to_block( */ lep = xfs_dir2_block_leaf_p(btp); for (from = to = 0; from < be16_to_cpu(leaf->hdr.count); from++) { - if (be32_to_cpu(leaf->ents[from].address) == XFS_DIR2_NULL_DATAPTR) + if (leaf->ents[from].address == + cpu_to_be32(XFS_DIR2_NULL_DATAPTR)) continue; lep[to++] = leaf->ents[from]; } diff --git a/fs/xfs/xfs_dir2_data.c b/fs/xfs/xfs_dir2_data.c index 8e47ac3a3b9d..32dca4c531a6 100644 --- a/fs/xfs/xfs_dir2_data.c +++ b/fs/xfs/xfs_dir2_data.c @@ -73,7 +73,7 @@ xfs_dir2_data_check( bf = hdr->bestfree; p = (char *)(hdr + 1); - if (be32_to_cpu(hdr->magic) == XFS_DIR2_BLOCK_MAGIC) { + if (hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC)) { btp = xfs_dir2_block_tail_p(mp, hdr); lep = xfs_dir2_block_leaf_p(btp); endp = (char *)lep; @@ -140,7 +140,7 @@ xfs_dir2_data_check( (char *)dep - (char *)hdr); count++; lastfree = 0; - if (be32_to_cpu(hdr->magic) == XFS_DIR2_BLOCK_MAGIC) { + if (hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC)) { addr = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk, (xfs_dir2_data_aoff_t) ((char *)dep - (char *)hdr)); @@ -160,9 +160,10 @@ xfs_dir2_data_check( * Need to have seen all the entries and all the bestfree slots. */ ASSERT(freeseen == 7); - if (be32_to_cpu(hdr->magic) == XFS_DIR2_BLOCK_MAGIC) { + if (hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC)) { for (i = stale = 0; i < be32_to_cpu(btp->count); i++) { - if (be32_to_cpu(lep[i].address) == XFS_DIR2_NULL_DATAPTR) + if (lep[i].address == + cpu_to_be32(XFS_DIR2_NULL_DATAPTR)) stale++; if (i > 0) ASSERT(be32_to_cpu(lep[i].hashval) >= be32_to_cpu(lep[i - 1].hashval)); @@ -196,8 +197,8 @@ xfs_dir2_data_freefind( * Check order, non-overlapping entries, and if we find the * one we're looking for it has to be exact. */ - ASSERT(be32_to_cpu(hdr->magic) == XFS_DIR2_DATA_MAGIC || - be32_to_cpu(hdr->magic) == XFS_DIR2_BLOCK_MAGIC); + ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC) || + hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC)); for (dfp = &hdr->bestfree[0], seenzero = matched = 0; dfp < &hdr->bestfree[XFS_DIR2_DATA_FD_COUNT]; dfp++) { @@ -256,8 +257,8 @@ xfs_dir2_data_freeinsert( xfs_dir2_data_free_t new; /* new bestfree entry */ #ifdef __KERNEL__ - ASSERT(be32_to_cpu(hdr->magic) == XFS_DIR2_DATA_MAGIC || - be32_to_cpu(hdr->magic) == XFS_DIR2_BLOCK_MAGIC); + ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC) || + hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC)); #endif dfp = hdr->bestfree; new.length = dup->length; @@ -297,8 +298,8 @@ xfs_dir2_data_freeremove( int *loghead) /* out: log data header */ { #ifdef __KERNEL__ - ASSERT(be32_to_cpu(hdr->magic) == XFS_DIR2_DATA_MAGIC || - be32_to_cpu(hdr->magic) == XFS_DIR2_BLOCK_MAGIC); + ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC) || + hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC)); #endif /* * It's the first entry, slide the next 2 up. @@ -341,8 +342,8 @@ xfs_dir2_data_freescan( char *p; /* current entry pointer */ #ifdef __KERNEL__ - ASSERT(be32_to_cpu(hdr->magic) == XFS_DIR2_DATA_MAGIC || - be32_to_cpu(hdr->magic) == XFS_DIR2_BLOCK_MAGIC); + ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC) || + hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC)); #endif /* * Start by clearing the table. @@ -353,7 +354,7 @@ xfs_dir2_data_freescan( * Set up pointers. */ p = (char *)(hdr + 1); - if (be32_to_cpu(hdr->magic) == XFS_DIR2_BLOCK_MAGIC) { + if (hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC)) { btp = xfs_dir2_block_tail_p(mp, hdr); endp = (char *)xfs_dir2_block_leaf_p(btp); } else @@ -458,8 +459,8 @@ xfs_dir2_data_log_entry( { xfs_dir2_data_hdr_t *hdr = bp->data; - ASSERT(be32_to_cpu(hdr->magic) == XFS_DIR2_DATA_MAGIC || - be32_to_cpu(hdr->magic) == XFS_DIR2_BLOCK_MAGIC); + ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC) || + hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC)); xfs_da_log_buf(tp, bp, (uint)((char *)dep - (char *)hdr), (uint)((char *)(xfs_dir2_data_entry_tag_p(dep) + 1) - @@ -476,8 +477,8 @@ xfs_dir2_data_log_header( { xfs_dir2_data_hdr_t *hdr = bp->data; - ASSERT(be32_to_cpu(hdr->magic) == XFS_DIR2_DATA_MAGIC || - be32_to_cpu(hdr->magic) == XFS_DIR2_BLOCK_MAGIC); + ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC) || + hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC)); xfs_da_log_buf(tp, bp, 0, sizeof(*hdr) - 1); } @@ -493,8 +494,8 @@ xfs_dir2_data_log_unused( { xfs_dir2_data_hdr_t *hdr = bp->data; - ASSERT(be32_to_cpu(hdr->magic) == XFS_DIR2_DATA_MAGIC || - be32_to_cpu(hdr->magic) == XFS_DIR2_BLOCK_MAGIC); + ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC) || + hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC)); /* * Log the first part of the unused entry. @@ -539,12 +540,12 @@ xfs_dir2_data_make_free( /* * Figure out where the end of the data area is. */ - if (be32_to_cpu(hdr->magic) == XFS_DIR2_DATA_MAGIC) + if (hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC)) endptr = (char *)hdr + mp->m_dirblksize; else { xfs_dir2_block_tail_t *btp; /* block tail */ - ASSERT(be32_to_cpu(hdr->magic) == XFS_DIR2_BLOCK_MAGIC); + ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC)); btp = xfs_dir2_block_tail_p(mp, hdr); endptr = (char *)xfs_dir2_block_leaf_p(btp); } @@ -717,8 +718,8 @@ xfs_dir2_data_use_free( int oldlen; /* old unused entry's length */ hdr = bp->data; - ASSERT(be32_to_cpu(hdr->magic) == XFS_DIR2_DATA_MAGIC || - be32_to_cpu(hdr->magic) == XFS_DIR2_BLOCK_MAGIC); + ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC) || + hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC)); ASSERT(be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG); ASSERT(offset >= (char *)dup - (char *)hdr); ASSERT(offset + len <= (char *)dup + be16_to_cpu(dup->length) - (char *)hdr); diff --git a/fs/xfs/xfs_dir2_leaf.c b/fs/xfs/xfs_dir2_leaf.c index 5d2651f1c759..d629b888ca28 100644 --- a/fs/xfs/xfs_dir2_leaf.c +++ b/fs/xfs/xfs_dir2_leaf.c @@ -201,8 +201,8 @@ xfs_dir2_leaf_find_entry( */ for (lowstale = index - 1; lowstale >= 0 && - be32_to_cpu(leaf->ents[lowstale].address) != - XFS_DIR2_NULL_DATAPTR; + leaf->ents[lowstale].address != + cpu_to_be32(XFS_DIR2_NULL_DATAPTR); lowstale--) continue; @@ -213,8 +213,8 @@ xfs_dir2_leaf_find_entry( */ for (highstale = index; highstale < be16_to_cpu(leaf->hdr.count) && - be32_to_cpu(leaf->ents[highstale].address) != - XFS_DIR2_NULL_DATAPTR && + leaf->ents[highstale].address != + cpu_to_be32(XFS_DIR2_NULL_DATAPTR) && (lowstale < 0 || index - lowstale - 1 >= highstale - index); highstale++) @@ -228,8 +228,8 @@ xfs_dir2_leaf_find_entry( (highstale == be16_to_cpu(leaf->hdr.count) || index - lowstale - 1 < highstale - index)) { ASSERT(index - lowstale - 1 >= 0); - ASSERT(be32_to_cpu(leaf->ents[lowstale].address) == - XFS_DIR2_NULL_DATAPTR); + ASSERT(leaf->ents[lowstale].address == + cpu_to_be32(XFS_DIR2_NULL_DATAPTR)); /* * Copy entries up to cover the stale entry and make room @@ -251,8 +251,8 @@ xfs_dir2_leaf_find_entry( * The high one is better, so use that one. */ ASSERT(highstale - index >= 0); - ASSERT(be32_to_cpu(leaf->ents[highstale].address) == - XFS_DIR2_NULL_DATAPTR); + ASSERT(leaf->ents[highstale].address == + cpu_to_be32(XFS_DIR2_NULL_DATAPTR)); /* * Copy entries down to cover the stale entry and make room for the @@ -342,7 +342,7 @@ xfs_dir2_leaf_addname( continue; i = xfs_dir2_dataptr_to_db(mp, be32_to_cpu(lep->address)); ASSERT(i < be32_to_cpu(ltp->bestcount)); - ASSERT(be16_to_cpu(bestsp[i]) != NULLDATAOFF); + ASSERT(bestsp[i] != cpu_to_be16(NULLDATAOFF)); if (be16_to_cpu(bestsp[i]) >= length) { use_block = i; break; @@ -356,7 +356,8 @@ xfs_dir2_leaf_addname( /* * Remember a block we see that's missing. */ - if (be16_to_cpu(bestsp[i]) == NULLDATAOFF && use_block == -1) + if (bestsp[i] == cpu_to_be16(NULLDATAOFF) && + use_block == -1) use_block = i; else if (be16_to_cpu(bestsp[i]) >= length) { use_block = i; @@ -377,7 +378,7 @@ xfs_dir2_leaf_addname( * Now kill use_block if it refers to a missing block, so we * can use it as an indication of allocation needed. */ - if (use_block != -1 && be16_to_cpu(bestsp[use_block]) == NULLDATAOFF) + if (use_block != -1 && bestsp[use_block] == cpu_to_be16(NULLDATAOFF)) use_block = -1; /* * If we don't have enough free bytes but we can make enough @@ -590,7 +591,7 @@ xfs_dir2_leaf_check( leaf = bp->data; mp = dp->i_mount; - ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAF1_MAGIC); + ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAF1_MAGIC)); /* * This value is not restrictive enough. * Should factor in the size of the bests table as well. @@ -610,7 +611,7 @@ xfs_dir2_leaf_check( if (i + 1 < be16_to_cpu(leaf->hdr.count)) ASSERT(be32_to_cpu(leaf->ents[i].hashval) <= be32_to_cpu(leaf->ents[i + 1].hashval)); - if (be32_to_cpu(leaf->ents[i].address) == XFS_DIR2_NULL_DATAPTR) + if (leaf->ents[i].address == cpu_to_be32(XFS_DIR2_NULL_DATAPTR)) stale++; } ASSERT(be16_to_cpu(leaf->hdr.stale) == stale); @@ -639,7 +640,8 @@ xfs_dir2_leaf_compact( * Compress out the stale entries in place. */ for (from = to = 0, loglow = -1; from < be16_to_cpu(leaf->hdr.count); from++) { - if (be32_to_cpu(leaf->ents[from].address) == XFS_DIR2_NULL_DATAPTR) + if (leaf->ents[from].address == + cpu_to_be32(XFS_DIR2_NULL_DATAPTR)) continue; /* * Only actually copy the entries that are different. @@ -696,7 +698,8 @@ xfs_dir2_leaf_compact_x1( */ for (lowstale = index - 1; lowstale >= 0 && - be32_to_cpu(leaf->ents[lowstale].address) != XFS_DIR2_NULL_DATAPTR; + leaf->ents[lowstale].address != + cpu_to_be32(XFS_DIR2_NULL_DATAPTR); lowstale--) continue; /* @@ -705,7 +708,8 @@ xfs_dir2_leaf_compact_x1( */ for (highstale = index; highstale < be16_to_cpu(leaf->hdr.count) && - be32_to_cpu(leaf->ents[highstale].address) != XFS_DIR2_NULL_DATAPTR && + leaf->ents[highstale].address != + cpu_to_be32(XFS_DIR2_NULL_DATAPTR) && (lowstale < 0 || index - lowstale > highstale - index); highstale++) continue; @@ -729,7 +733,8 @@ xfs_dir2_leaf_compact_x1( if (index == from) newindex = to; if (from != keepstale && - be32_to_cpu(leaf->ents[from].address) == XFS_DIR2_NULL_DATAPTR) { + leaf->ents[from].address == + cpu_to_be32(XFS_DIR2_NULL_DATAPTR)) { if (from == to) *lowlogp = to; continue; @@ -1207,7 +1212,7 @@ xfs_dir2_leaf_log_bests( xfs_dir2_leaf_tail_t *ltp; /* leaf tail structure */ leaf = bp->data; - ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAF1_MAGIC); + ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAF1_MAGIC)); ltp = xfs_dir2_leaf_tail_p(tp->t_mountp, leaf); firstb = xfs_dir2_leaf_bests_p(ltp) + first; lastb = xfs_dir2_leaf_bests_p(ltp) + last; @@ -1230,8 +1235,8 @@ xfs_dir2_leaf_log_ents( xfs_dir2_leaf_t *leaf; /* leaf structure */ leaf = bp->data; - ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAF1_MAGIC || - be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC); + ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAF1_MAGIC) || + leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC)); firstlep = &leaf->ents[first]; lastlep = &leaf->ents[last]; xfs_da_log_buf(tp, bp, (uint)((char *)firstlep - (char *)leaf), @@ -1249,8 +1254,8 @@ xfs_dir2_leaf_log_header( xfs_dir2_leaf_t *leaf; /* leaf structure */ leaf = bp->data; - ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAF1_MAGIC || - be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC); + ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAF1_MAGIC) || + leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC)); xfs_da_log_buf(tp, bp, (uint)((char *)&leaf->hdr - (char *)leaf), (uint)(sizeof(leaf->hdr) - 1)); } @@ -1269,7 +1274,7 @@ xfs_dir2_leaf_log_tail( mp = tp->t_mountp; leaf = bp->data; - ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAF1_MAGIC); + ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAF1_MAGIC)); ltp = xfs_dir2_leaf_tail_p(mp, leaf); xfs_da_log_buf(tp, bp, (uint)((char *)ltp - (char *)leaf), (uint)(mp->m_dirblksize - 1)); @@ -1570,7 +1575,7 @@ xfs_dir2_leaf_removename( * Look for the last active entry (i). */ for (i = db - 1; i > 0; i--) { - if (be16_to_cpu(bestsp[i]) != NULLDATAOFF) + if (bestsp[i] != cpu_to_be16(NULLDATAOFF)) break; } /* @@ -1740,7 +1745,7 @@ xfs_dir2_leaf_trim_data( { struct xfs_dir2_data_hdr *hdr = dbp->data; - ASSERT(be32_to_cpu(hdr->magic) == XFS_DIR2_DATA_MAGIC); + ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC)); ASSERT(be16_to_cpu(hdr->bestfree[0].length) == mp->m_dirblksize - (uint)sizeof(*hdr)); ASSERT(db == be32_to_cpu(ltp->bestcount) - 1); @@ -1850,7 +1855,7 @@ xfs_dir2_node_to_leaf( return 0; lbp = state->path.blk[0].bp; leaf = lbp->data; - ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC); + ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC)); /* * Read the freespace block. */ @@ -1859,7 +1864,7 @@ xfs_dir2_node_to_leaf( return error; } free = fbp->data; - ASSERT(be32_to_cpu(free->hdr.magic) == XFS_DIR2_FREE_MAGIC); + ASSERT(free->hdr.magic == cpu_to_be32(XFS_DIR2_FREE_MAGIC)); ASSERT(!free->hdr.firstdb); /* diff --git a/fs/xfs/xfs_dir2_node.c b/fs/xfs/xfs_dir2_node.c index 8d4b54c1bedb..14571cdeb33f 100644 --- a/fs/xfs/xfs_dir2_node.c +++ b/fs/xfs/xfs_dir2_node.c @@ -73,7 +73,7 @@ xfs_dir2_free_log_bests( xfs_dir2_free_t *free; /* freespace structure */ free = bp->data; - ASSERT(be32_to_cpu(free->hdr.magic) == XFS_DIR2_FREE_MAGIC); + ASSERT(free->hdr.magic == cpu_to_be32(XFS_DIR2_FREE_MAGIC)); xfs_da_log_buf(tp, bp, (uint)((char *)&free->bests[first] - (char *)free), (uint)((char *)&free->bests[last] - (char *)free + @@ -91,7 +91,7 @@ xfs_dir2_free_log_header( xfs_dir2_free_t *free; /* freespace structure */ free = bp->data; - ASSERT(be32_to_cpu(free->hdr.magic) == XFS_DIR2_FREE_MAGIC); + ASSERT(free->hdr.magic == cpu_to_be32(XFS_DIR2_FREE_MAGIC)); xfs_da_log_buf(tp, bp, (uint)((char *)&free->hdr - (char *)free), (uint)(sizeof(xfs_dir2_free_hdr_t) - 1)); } @@ -276,14 +276,14 @@ xfs_dir2_leafn_check( leaf = bp->data; mp = dp->i_mount; - ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC); + ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC)); ASSERT(be16_to_cpu(leaf->hdr.count) <= xfs_dir2_max_leaf_ents(mp)); for (i = stale = 0; i < be16_to_cpu(leaf->hdr.count); i++) { if (i + 1 < be16_to_cpu(leaf->hdr.count)) { ASSERT(be32_to_cpu(leaf->ents[i].hashval) <= be32_to_cpu(leaf->ents[i + 1].hashval)); } - if (be32_to_cpu(leaf->ents[i].address) == XFS_DIR2_NULL_DATAPTR) + if (leaf->ents[i].address == cpu_to_be32(XFS_DIR2_NULL_DATAPTR)) stale++; } ASSERT(be16_to_cpu(leaf->hdr.stale) == stale); @@ -302,7 +302,7 @@ xfs_dir2_leafn_lasthash( xfs_dir2_leaf_t *leaf; /* leaf structure */ leaf = bp->data; - ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC); + ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC)); if (count) *count = be16_to_cpu(leaf->hdr.count); if (!leaf->hdr.count) @@ -341,7 +341,7 @@ xfs_dir2_leafn_lookup_for_addname( tp = args->trans; mp = dp->i_mount; leaf = bp->data; - ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC); + ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC)); #ifdef __KERNEL__ ASSERT(be16_to_cpu(leaf->hdr.count) > 0); #endif @@ -358,7 +358,7 @@ xfs_dir2_leafn_lookup_for_addname( curbp = state->extrablk.bp; curfdb = state->extrablk.blkno; free = curbp->data; - ASSERT(be32_to_cpu(free->hdr.magic) == XFS_DIR2_FREE_MAGIC); + ASSERT(free->hdr.magic == cpu_to_be32(XFS_DIR2_FREE_MAGIC)); } length = xfs_dir2_data_entsize(args->namelen); /* @@ -424,7 +424,8 @@ xfs_dir2_leafn_lookup_for_addname( /* * If it has room, return it. */ - if (unlikely(be16_to_cpu(free->bests[fi]) == NULLDATAOFF)) { + if (unlikely(free->bests[fi] == + cpu_to_be16(NULLDATAOFF))) { XFS_ERROR_REPORT("xfs_dir2_leafn_lookup_int", XFS_ERRLEVEL_LOW, mp); if (curfdb != newfdb) @@ -485,7 +486,7 @@ xfs_dir2_leafn_lookup_for_entry( tp = args->trans; mp = dp->i_mount; leaf = bp->data; - ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC); + ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC)); #ifdef __KERNEL__ ASSERT(be16_to_cpu(leaf->hdr.count) > 0); #endif @@ -666,7 +667,8 @@ xfs_dir2_leafn_moveents( int i; /* temp leaf index */ for (i = start_s, stale = 0; i < start_s + count; i++) { - if (be32_to_cpu(leaf_s->ents[i].address) == XFS_DIR2_NULL_DATAPTR) + if (leaf_s->ents[i].address == + cpu_to_be32(XFS_DIR2_NULL_DATAPTR)) stale++; } } else @@ -713,8 +715,8 @@ xfs_dir2_leafn_order( leaf1 = leaf1_bp->data; leaf2 = leaf2_bp->data; - ASSERT(be16_to_cpu(leaf1->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC); - ASSERT(be16_to_cpu(leaf2->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC); + ASSERT(leaf1->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC)); + ASSERT(leaf2->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC)); if (be16_to_cpu(leaf1->hdr.count) > 0 && be16_to_cpu(leaf2->hdr.count) > 0 && (be32_to_cpu(leaf2->ents[0].hashval) < be32_to_cpu(leaf1->ents[0].hashval) || @@ -862,7 +864,7 @@ xfs_dir2_leafn_remove( tp = args->trans; mp = dp->i_mount; leaf = bp->data; - ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC); + ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC)); /* * Point to the entry we're removing. */ @@ -924,7 +926,7 @@ xfs_dir2_leafn_remove( return error; } free = fbp->data; - ASSERT(be32_to_cpu(free->hdr.magic) == XFS_DIR2_FREE_MAGIC); + ASSERT(free->hdr.magic == cpu_to_be32(XFS_DIR2_FREE_MAGIC)); ASSERT(be32_to_cpu(free->hdr.firstdb) == XFS_DIR2_MAX_FREE_BESTS(mp) * (fdb - XFS_DIR2_FREE_FIRSTDB(mp))); @@ -976,7 +978,8 @@ xfs_dir2_leafn_remove( int i; /* free entry index */ for (i = findex - 1; - i >= 0 && be16_to_cpu(free->bests[i]) == NULLDATAOFF; + i >= 0 && + free->bests[i] == cpu_to_be16(NULLDATAOFF); i--) continue; free->hdr.nvalid = cpu_to_be32(i + 1); @@ -1133,7 +1136,7 @@ xfs_dir2_leafn_toosmall( */ blk = &state->path.blk[state->path.active - 1]; info = blk->bp->data; - ASSERT(be16_to_cpu(info->magic) == XFS_DIR2_LEAFN_MAGIC); + ASSERT(info->magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC)); leaf = (xfs_dir2_leaf_t *)info; count = be16_to_cpu(leaf->hdr.count) - be16_to_cpu(leaf->hdr.stale); bytes = (uint)sizeof(leaf->hdr) + count * (uint)sizeof(leaf->ents[0]); @@ -1192,7 +1195,7 @@ xfs_dir2_leafn_toosmall( count = be16_to_cpu(leaf->hdr.count) - be16_to_cpu(leaf->hdr.stale); bytes = state->blocksize - (state->blocksize >> 2); leaf = bp->data; - ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC); + ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC)); count += be16_to_cpu(leaf->hdr.count) - be16_to_cpu(leaf->hdr.stale); bytes -= count * (uint)sizeof(leaf->ents[0]); /* @@ -1251,8 +1254,8 @@ xfs_dir2_leafn_unbalance( ASSERT(save_blk->magic == XFS_DIR2_LEAFN_MAGIC); drop_leaf = drop_blk->bp->data; save_leaf = save_blk->bp->data; - ASSERT(be16_to_cpu(drop_leaf->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC); - ASSERT(be16_to_cpu(save_leaf->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC); + ASSERT(drop_leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC)); + ASSERT(save_leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC)); /* * If there are any stale leaf entries, take this opportunity * to purge them. @@ -1393,7 +1396,7 @@ xfs_dir2_node_addname_int( */ ifbno = fblk->blkno; free = fbp->data; - ASSERT(be32_to_cpu(free->hdr.magic) == XFS_DIR2_FREE_MAGIC); + ASSERT(free->hdr.magic == cpu_to_be32(XFS_DIR2_FREE_MAGIC)); findex = fblk->index; /* * This means the free entry showed that the data block had @@ -1477,7 +1480,7 @@ xfs_dir2_node_addname_int( continue; } free = fbp->data; - ASSERT(be32_to_cpu(free->hdr.magic) == XFS_DIR2_FREE_MAGIC); + ASSERT(free->hdr.magic == cpu_to_be32(XFS_DIR2_FREE_MAGIC)); findex = 0; } /* @@ -1609,7 +1612,7 @@ xfs_dir2_node_addname_int( free->hdr.nused = 0; } else { free = fbp->data; - ASSERT(be32_to_cpu(free->hdr.magic) == XFS_DIR2_FREE_MAGIC); + ASSERT(free->hdr.magic == cpu_to_be32(XFS_DIR2_FREE_MAGIC)); } /* @@ -1632,7 +1635,7 @@ xfs_dir2_node_addname_int( * If this entry was for an empty data block * (this should always be true) then update the header. */ - if (be16_to_cpu(free->bests[findex]) == NULLDATAOFF) { + if (free->bests[findex] == cpu_to_be16(NULLDATAOFF)) { be32_add_cpu(&free->hdr.nused, 1); xfs_dir2_free_log_header(tp, fbp); } @@ -1902,7 +1905,7 @@ xfs_dir2_node_replace( * Point to the data entry. */ hdr = state->extrablk.bp->data; - ASSERT(be32_to_cpu(hdr->magic) == XFS_DIR2_DATA_MAGIC); + ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC)); dep = (xfs_dir2_data_entry_t *) ((char *)hdr + xfs_dir2_dataptr_to_off(state->mp, be32_to_cpu(lep->address))); @@ -1968,7 +1971,7 @@ xfs_dir2_node_trim_free( return 0; } free = bp->data; - ASSERT(be32_to_cpu(free->hdr.magic) == XFS_DIR2_FREE_MAGIC); + ASSERT(free->hdr.magic == cpu_to_be32(XFS_DIR2_FREE_MAGIC)); /* * If there are used entries, there's nothing to do. */ diff --git a/fs/xfs/xfs_ialloc.c b/fs/xfs/xfs_ialloc.c index 84ebeec16642..dd5628bd8d0b 100644 --- a/fs/xfs/xfs_ialloc.c +++ b/fs/xfs/xfs_ialloc.c @@ -683,7 +683,7 @@ xfs_dialloc( return 0; } agi = XFS_BUF_TO_AGI(agbp); - ASSERT(be32_to_cpu(agi->agi_magicnum) == XFS_AGI_MAGIC); + ASSERT(agi->agi_magicnum == cpu_to_be32(XFS_AGI_MAGIC)); } else { /* * Continue where we left off before. In this case, we @@ -691,7 +691,7 @@ xfs_dialloc( */ agbp = *IO_agbp; agi = XFS_BUF_TO_AGI(agbp); - ASSERT(be32_to_cpu(agi->agi_magicnum) == XFS_AGI_MAGIC); + ASSERT(agi->agi_magicnum == cpu_to_be32(XFS_AGI_MAGIC)); ASSERT(be32_to_cpu(agi->agi_freecount) > 0); } mp = tp->t_mountp; @@ -775,7 +775,7 @@ nextag: if (error) goto nextag; agi = XFS_BUF_TO_AGI(agbp); - ASSERT(be32_to_cpu(agi->agi_magicnum) == XFS_AGI_MAGIC); + ASSERT(agi->agi_magicnum == cpu_to_be32(XFS_AGI_MAGIC)); } /* * Here with an allocation group that has a free inode. @@ -944,7 +944,7 @@ nextag: * See if the most recently allocated block has any free. */ newino: - if (be32_to_cpu(agi->agi_newino) != NULLAGINO) { + if (agi->agi_newino != cpu_to_be32(NULLAGINO)) { error = xfs_inobt_lookup(cur, be32_to_cpu(agi->agi_newino), XFS_LOOKUP_EQ, &i); if (error) @@ -1085,7 +1085,7 @@ xfs_difree( return error; } agi = XFS_BUF_TO_AGI(agbp); - ASSERT(be32_to_cpu(agi->agi_magicnum) == XFS_AGI_MAGIC); + ASSERT(agi->agi_magicnum == cpu_to_be32(XFS_AGI_MAGIC)); ASSERT(agbno < be32_to_cpu(agi->agi_length)); /* * Initialize the cursor. @@ -1438,7 +1438,7 @@ xfs_ialloc_log_agi( xfs_agi_t *agi; /* allocation group header */ agi = XFS_BUF_TO_AGI(bp); - ASSERT(be32_to_cpu(agi->agi_magicnum) == XFS_AGI_MAGIC); + ASSERT(agi->agi_magicnum == cpu_to_be32(XFS_AGI_MAGIC)); #endif /* * Compute byte offsets for the first and last fields. @@ -1492,7 +1492,7 @@ xfs_read_agi( /* * Validate the magic number of the agi block. */ - agi_ok = be32_to_cpu(agi->agi_magicnum) == XFS_AGI_MAGIC && + agi_ok = agi->agi_magicnum == cpu_to_be32(XFS_AGI_MAGIC) && XFS_AGI_GOOD_VERSION(be32_to_cpu(agi->agi_versionnum)) && be32_to_cpu(agi->agi_seqno) == agno; if (unlikely(XFS_TEST_ERROR(!agi_ok, mp, XFS_ERRTAG_IALLOC_READ_AGI, diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index db310f8fb76a..d04ea6a2dfaf 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -167,7 +167,7 @@ xfs_imap_to_bp( dip = (xfs_dinode_t *)xfs_buf_offset(bp, (i << mp->m_sb.sb_inodelog)); - di_ok = be16_to_cpu(dip->di_magic) == XFS_DINODE_MAGIC && + di_ok = dip->di_magic == cpu_to_be16(XFS_DINODE_MAGIC) && XFS_DINODE_GOOD_VERSION(dip->di_version); if (unlikely(XFS_TEST_ERROR(!di_ok, mp, XFS_ERRTAG_ITOBP_INOTOBP, @@ -802,7 +802,7 @@ xfs_iread( * If we got something that isn't an inode it means someone * (nfs or dmi) has a stale handle. */ - if (be16_to_cpu(dip->di_magic) != XFS_DINODE_MAGIC) { + if (dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC)) { #ifdef DEBUG xfs_alert(mp, "%s: dip->di_magic (0x%x) != XFS_DINODE_MAGIC (0x%x)", @@ -1457,7 +1457,7 @@ xfs_iunlink( ASSERT(agi->agi_unlinked[bucket_index]); ASSERT(be32_to_cpu(agi->agi_unlinked[bucket_index]) != agino); - if (be32_to_cpu(agi->agi_unlinked[bucket_index]) != NULLAGINO) { + if (agi->agi_unlinked[bucket_index] != cpu_to_be32(NULLAGINO)) { /* * There is already another inode in the bucket we need * to add ourselves to. Add us at the front of the list. @@ -1468,8 +1468,7 @@ xfs_iunlink( if (error) return error; - ASSERT(be32_to_cpu(dip->di_next_unlinked) == NULLAGINO); - /* both on-disk, don't endian flip twice */ + ASSERT(dip->di_next_unlinked == cpu_to_be32(NULLAGINO)); dip->di_next_unlinked = agi->agi_unlinked[bucket_index]; offset = ip->i_imap.im_boffset + offsetof(xfs_dinode_t, di_next_unlinked); @@ -1534,7 +1533,7 @@ xfs_iunlink_remove( agino = XFS_INO_TO_AGINO(mp, ip->i_ino); ASSERT(agino != 0); bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS; - ASSERT(be32_to_cpu(agi->agi_unlinked[bucket_index]) != NULLAGINO); + ASSERT(agi->agi_unlinked[bucket_index] != cpu_to_be32(NULLAGINO)); ASSERT(agi->agi_unlinked[bucket_index]); if (be32_to_cpu(agi->agi_unlinked[bucket_index]) == agino) { @@ -2659,7 +2658,7 @@ xfs_iflush_int( */ xfs_synchronize_times(ip); - if (XFS_TEST_ERROR(be16_to_cpu(dip->di_magic) != XFS_DINODE_MAGIC, + if (XFS_TEST_ERROR(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC), mp, XFS_ERRTAG_IFLUSH_1, XFS_RANDOM_IFLUSH_1)) { xfs_alert_tag(mp, XFS_PTAG_IFLUSH, "%s: Bad inode %Lu magic number 0x%x, ptr 0x%p", diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index 41d5b8f2bf92..5b24a71811f8 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c @@ -3521,13 +3521,13 @@ xlog_verify_iclog(xlog_t *log, spin_unlock(&log->l_icloglock); /* check log magic numbers */ - if (be32_to_cpu(iclog->ic_header.h_magicno) != XLOG_HEADER_MAGIC_NUM) + if (iclog->ic_header.h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) xfs_emerg(log->l_mp, "%s: invalid magic num", __func__); ptr = (xfs_caddr_t) &iclog->ic_header; for (ptr += BBSIZE; ptr < ((xfs_caddr_t)&iclog->ic_header) + count; ptr += BBSIZE) { - if (be32_to_cpu(*(__be32 *)ptr) == XLOG_HEADER_MAGIC_NUM) + if (*(__be32 *)ptr == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) xfs_emerg(log->l_mp, "%s: unexpected magic num", __func__); } diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index 04142caedb2b..fb528b354c0d 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -300,14 +300,14 @@ xlog_header_check_recover( xfs_mount_t *mp, xlog_rec_header_t *head) { - ASSERT(be32_to_cpu(head->h_magicno) == XLOG_HEADER_MAGIC_NUM); + ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)); /* * IRIX doesn't write the h_fmt field and leaves it zeroed * (XLOG_FMT_UNKNOWN). This stops us from trying to recover * a dirty log created in IRIX. */ - if (unlikely(be32_to_cpu(head->h_fmt) != XLOG_FMT)) { + if (unlikely(head->h_fmt != cpu_to_be32(XLOG_FMT))) { xfs_warn(mp, "dirty log written in incompatible format - can't recover"); xlog_header_check_dump(mp, head); @@ -333,7 +333,7 @@ xlog_header_check_mount( xfs_mount_t *mp, xlog_rec_header_t *head) { - ASSERT(be32_to_cpu(head->h_magicno) == XLOG_HEADER_MAGIC_NUM); + ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)); if (uuid_is_nil(&head->h_fs_uuid)) { /* @@ -534,7 +534,7 @@ xlog_find_verify_log_record( head = (xlog_rec_header_t *)offset; - if (XLOG_HEADER_MAGIC_NUM == be32_to_cpu(head->h_magicno)) + if (head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) break; if (!smallmem) @@ -916,7 +916,7 @@ xlog_find_tail( if (error) goto done; - if (XLOG_HEADER_MAGIC_NUM == be32_to_cpu(*(__be32 *)offset)) { + if (*(__be32 *)offset == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) { found = 1; break; } @@ -933,8 +933,8 @@ xlog_find_tail( if (error) goto done; - if (XLOG_HEADER_MAGIC_NUM == - be32_to_cpu(*(__be32 *)offset)) { + if (*(__be32 *)offset == + cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) { found = 2; break; } @@ -1947,7 +1947,7 @@ xfs_qm_dqcheck( * This is all fine; things are still consistent, and we haven't lost * any quota information. Just don't complain about bad dquot blks. */ - if (be16_to_cpu(ddq->d_magic) != XFS_DQUOT_MAGIC) { + if (ddq->d_magic != cpu_to_be16(XFS_DQUOT_MAGIC)) { if (flags & XFS_QMOPT_DOWARN) xfs_alert(mp, "%s : XFS dquot ID 0x%x, magic 0x%x != 0x%x", @@ -2238,7 +2238,7 @@ xlog_recover_inode_pass2( * Make sure the place we're flushing out to really looks * like an inode! */ - if (unlikely(be16_to_cpu(dip->di_magic) != XFS_DINODE_MAGIC)) { + if (unlikely(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC))) { xfs_buf_relse(bp); xfs_alert(mp, "%s: Bad inode magic number, dip = 0x%p, dino bp = 0x%p, ino = %Ld", @@ -3295,7 +3295,7 @@ xlog_valid_rec_header( { int hlen; - if (unlikely(be32_to_cpu(rhead->h_magicno) != XLOG_HEADER_MAGIC_NUM)) { + if (unlikely(rhead->h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM))) { XFS_ERROR_REPORT("xlog_valid_rec_header(1)", XFS_ERRLEVEL_LOW, log->l_mp); return XFS_ERROR(EFSCORRUPTED); -- cgit v1.2.3 From bbb4197c73be356a052dac25cce5ed0c157c6c90 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 8 Jul 2011 14:36:10 +0200 Subject: xfs: remove the unused xfs_bufhash structure Signed-off-by: Christoph Hellwig Reviewed-by: Alex Elder Reviewed-by: Dave Chinner --- fs/xfs/linux-2.6/xfs_buf.h | 5 ----- 1 file changed, 5 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/linux-2.6/xfs_buf.h index 50a7d5fb3b73..6847dc4dceeb 100644 --- a/fs/xfs/linux-2.6/xfs_buf.h +++ b/fs/xfs/linux-2.6/xfs_buf.h @@ -91,11 +91,6 @@ typedef enum { XBT_FORCE_FLUSH = 1, } xfs_buftarg_flags_t; -typedef struct xfs_bufhash { - struct list_head bh_list; - spinlock_t bh_lock; -} xfs_bufhash_t; - typedef struct xfs_buftarg { dev_t bt_dev; struct block_device *bt_bdev; -- cgit v1.2.3 From 0c842ad46a51891ac4420b7285613f4134a65ccd Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 8 Jul 2011 14:36:19 +0200 Subject: xfs: clean up buffer locking helpers Rename xfs_buf_cond_lock and reverse it's return value to fit most other trylock operations in the Kernel and XFS (with the exception of down_trylock, after which xfs_buf_cond_lock was modelled), and replace xfs_buf_lock_val with an xfs_buf_islocked for use in asserts, or and opencoded variant in tracing. remove the XFS_BUF_* wrappers for all the locking helpers. Signed-off-by: Christoph Hellwig Reviewed-by: Alex Elder Reviewed-by: Dave Chinner --- fs/xfs/linux-2.6/xfs_buf.c | 31 +++++++++++-------------------- fs/xfs/linux-2.6/xfs_buf.h | 9 +++------ fs/xfs/linux-2.6/xfs_trace.h | 10 +++++----- fs/xfs/quota/xfs_dquot.c | 6 +++--- fs/xfs/xfs_buf_item.c | 6 +++--- fs/xfs/xfs_log.c | 6 +++--- fs/xfs/xfs_log_recover.c | 2 +- fs/xfs/xfs_mount.c | 19 ++++++++----------- fs/xfs/xfs_trans_buf.c | 4 ++-- 9 files changed, 39 insertions(+), 54 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c index 5e68099db2a5..c77227615403 100644 --- a/fs/xfs/linux-2.6/xfs_buf.c +++ b/fs/xfs/linux-2.6/xfs_buf.c @@ -499,16 +499,14 @@ found: spin_unlock(&pag->pag_buf_lock); xfs_perag_put(pag); - if (xfs_buf_cond_lock(bp)) { - /* failed, so wait for the lock if requested. */ - if (!(flags & XBF_TRYLOCK)) { - xfs_buf_lock(bp); - XFS_STATS_INC(xb_get_locked_waited); - } else { + if (!xfs_buf_trylock(bp)) { + if (flags & XBF_TRYLOCK) { xfs_buf_rele(bp); XFS_STATS_INC(xb_busy_locked); return NULL; } + xfs_buf_lock(bp); + XFS_STATS_INC(xb_get_locked_waited); } /* @@ -896,8 +894,8 @@ xfs_buf_rele( * to push on stale inode buffers. */ int -xfs_buf_cond_lock( - xfs_buf_t *bp) +xfs_buf_trylock( + struct xfs_buf *bp) { int locked; @@ -907,15 +905,8 @@ xfs_buf_cond_lock( else if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE)) xfs_log_force(bp->b_target->bt_mount, 0); - trace_xfs_buf_cond_lock(bp, _RET_IP_); - return locked ? 0 : -EBUSY; -} - -int -xfs_buf_lock_value( - xfs_buf_t *bp) -{ - return bp->b_sema.count; + trace_xfs_buf_trylock(bp, _RET_IP_); + return locked; } /* @@ -929,7 +920,7 @@ xfs_buf_lock_value( */ void xfs_buf_lock( - xfs_buf_t *bp) + struct xfs_buf *bp) { trace_xfs_buf_lock(bp, _RET_IP_); @@ -950,7 +941,7 @@ xfs_buf_lock( */ void xfs_buf_unlock( - xfs_buf_t *bp) + struct xfs_buf *bp) { if ((bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)) == XBF_DELWRI) { atomic_inc(&bp->b_hold); @@ -1694,7 +1685,7 @@ xfs_buf_delwri_split( list_for_each_entry_safe(bp, n, dwq, b_list) { ASSERT(bp->b_flags & XBF_DELWRI); - if (!XFS_BUF_ISPINNED(bp) && !xfs_buf_cond_lock(bp)) { + if (!XFS_BUF_ISPINNED(bp) && xfs_buf_trylock(bp)) { if (!force && time_before(jiffies, bp->b_queuetime + age)) { xfs_buf_unlock(bp); diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/linux-2.6/xfs_buf.h index 6847dc4dceeb..706c40069116 100644 --- a/fs/xfs/linux-2.6/xfs_buf.h +++ b/fs/xfs/linux-2.6/xfs_buf.h @@ -187,10 +187,11 @@ extern void xfs_buf_free(xfs_buf_t *); extern void xfs_buf_rele(xfs_buf_t *); /* Locking and Unlocking Buffers */ -extern int xfs_buf_cond_lock(xfs_buf_t *); -extern int xfs_buf_lock_value(xfs_buf_t *); +extern int xfs_buf_trylock(xfs_buf_t *); extern void xfs_buf_lock(xfs_buf_t *); extern void xfs_buf_unlock(xfs_buf_t *); +#define xfs_buf_islocked(bp) \ + ((bp)->b_sema.count <= 0) /* Buffer Read and Write Routines */ extern int xfs_bwrite(struct xfs_mount *mp, struct xfs_buf *bp); @@ -308,10 +309,6 @@ xfs_buf_set_ref( #define XFS_BUF_ISPINNED(bp) atomic_read(&((bp)->b_pin_count)) -#define XFS_BUF_VALUSEMA(bp) xfs_buf_lock_value(bp) -#define XFS_BUF_CPSEMA(bp) (xfs_buf_cond_lock(bp) == 0) -#define XFS_BUF_VSEMA(bp) xfs_buf_unlock(bp) -#define XFS_BUF_PSEMA(bp,x) xfs_buf_lock(bp) #define XFS_BUF_FINISH_IOWAIT(bp) complete(&bp->b_iowait); #define XFS_BUF_SET_TARGET(bp, target) ((bp)->b_target = (target)) diff --git a/fs/xfs/linux-2.6/xfs_trace.h b/fs/xfs/linux-2.6/xfs_trace.h index 4fe53f9f0477..3bdfcb9f52b7 100644 --- a/fs/xfs/linux-2.6/xfs_trace.h +++ b/fs/xfs/linux-2.6/xfs_trace.h @@ -293,7 +293,7 @@ DECLARE_EVENT_CLASS(xfs_buf_class, __entry->buffer_length = bp->b_buffer_length; __entry->hold = atomic_read(&bp->b_hold); __entry->pincount = atomic_read(&bp->b_pin_count); - __entry->lockval = xfs_buf_lock_value(bp); + __entry->lockval = bp->b_sema.count; __entry->flags = bp->b_flags; __entry->caller_ip = caller_ip; ), @@ -323,7 +323,7 @@ DEFINE_BUF_EVENT(xfs_buf_bawrite); DEFINE_BUF_EVENT(xfs_buf_bdwrite); DEFINE_BUF_EVENT(xfs_buf_lock); DEFINE_BUF_EVENT(xfs_buf_lock_done); -DEFINE_BUF_EVENT(xfs_buf_cond_lock); +DEFINE_BUF_EVENT(xfs_buf_trylock); DEFINE_BUF_EVENT(xfs_buf_unlock); DEFINE_BUF_EVENT(xfs_buf_iowait); DEFINE_BUF_EVENT(xfs_buf_iowait_done); @@ -366,7 +366,7 @@ DECLARE_EVENT_CLASS(xfs_buf_flags_class, __entry->flags = flags; __entry->hold = atomic_read(&bp->b_hold); __entry->pincount = atomic_read(&bp->b_pin_count); - __entry->lockval = xfs_buf_lock_value(bp); + __entry->lockval = bp->b_sema.count; __entry->caller_ip = caller_ip; ), TP_printk("dev %d:%d bno 0x%llx len 0x%zx hold %d pincount %d " @@ -409,7 +409,7 @@ TRACE_EVENT(xfs_buf_ioerror, __entry->buffer_length = bp->b_buffer_length; __entry->hold = atomic_read(&bp->b_hold); __entry->pincount = atomic_read(&bp->b_pin_count); - __entry->lockval = xfs_buf_lock_value(bp); + __entry->lockval = bp->b_sema.count; __entry->error = error; __entry->flags = bp->b_flags; __entry->caller_ip = caller_ip; @@ -454,7 +454,7 @@ DECLARE_EVENT_CLASS(xfs_buf_item_class, __entry->buf_flags = bip->bli_buf->b_flags; __entry->buf_hold = atomic_read(&bip->bli_buf->b_hold); __entry->buf_pincount = atomic_read(&bip->bli_buf->b_pin_count); - __entry->buf_lockval = xfs_buf_lock_value(bip->bli_buf); + __entry->buf_lockval = bip->bli_buf->b_sema.count; __entry->li_desc = bip->bli_item.li_desc; __entry->li_flags = bip->bli_item.li_flags; ), diff --git a/fs/xfs/quota/xfs_dquot.c b/fs/xfs/quota/xfs_dquot.c index 6fa214603819..dec11497a2ef 100644 --- a/fs/xfs/quota/xfs_dquot.c +++ b/fs/xfs/quota/xfs_dquot.c @@ -318,7 +318,7 @@ xfs_qm_init_dquot_blk( ASSERT(tp); ASSERT(XFS_BUF_ISBUSY(bp)); - ASSERT(XFS_BUF_VALUSEMA(bp) <= 0); + ASSERT(xfs_buf_islocked(bp)); d = (xfs_dqblk_t *)XFS_BUF_PTR(bp); @@ -534,7 +534,7 @@ xfs_qm_dqtobp( } ASSERT(XFS_BUF_ISBUSY(bp)); - ASSERT(XFS_BUF_VALUSEMA(bp) <= 0); + ASSERT(xfs_buf_islocked(bp)); /* * calculate the location of the dquot inside the buffer. @@ -622,7 +622,7 @@ xfs_qm_dqread( * brelse it because we have the changes incore. */ ASSERT(XFS_BUF_ISBUSY(bp)); - ASSERT(XFS_BUF_VALUSEMA(bp) <= 0); + ASSERT(xfs_buf_islocked(bp)); xfs_trans_brelse(tp, bp); return (error); diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c index 7b7e005e3dcc..da399ac4411d 100644 --- a/fs/xfs/xfs_buf_item.c +++ b/fs/xfs/xfs_buf_item.c @@ -420,7 +420,7 @@ xfs_buf_item_unpin( if (freed && stale) { ASSERT(bip->bli_flags & XFS_BLI_STALE); - ASSERT(XFS_BUF_VALUSEMA(bp) <= 0); + ASSERT(xfs_buf_islocked(bp)); ASSERT(!(XFS_BUF_ISDELAYWRITE(bp))); ASSERT(XFS_BUF_ISSTALE(bp)); ASSERT(bip->bli_format.blf_flags & XFS_BLF_CANCEL); @@ -483,7 +483,7 @@ xfs_buf_item_trylock( if (XFS_BUF_ISPINNED(bp)) return XFS_ITEM_PINNED; - if (!XFS_BUF_CPSEMA(bp)) + if (!xfs_buf_trylock(bp)) return XFS_ITEM_LOCKED; /* take a reference to the buffer. */ @@ -905,7 +905,7 @@ xfs_buf_attach_iodone( xfs_log_item_t *head_lip; ASSERT(XFS_BUF_ISBUSY(bp)); - ASSERT(XFS_BUF_VALUSEMA(bp) <= 0); + ASSERT(xfs_buf_islocked(bp)); lip->li_cb = cb; if (XFS_BUF_FSPRIVATE(bp, void *) != NULL) { diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index 5b24a71811f8..68e053ea6e78 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c @@ -1059,7 +1059,7 @@ xlog_alloc_log(xfs_mount_t *mp, XFS_BUF_SET_IODONE_FUNC(bp, xlog_iodone); XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)1); ASSERT(XFS_BUF_ISBUSY(bp)); - ASSERT(XFS_BUF_VALUSEMA(bp) <= 0); + ASSERT(xfs_buf_islocked(bp)); log->l_xbuf = bp; spin_lock_init(&log->l_icloglock); @@ -1090,7 +1090,7 @@ xlog_alloc_log(xfs_mount_t *mp, log->l_iclog_size, 0); if (!bp) goto out_free_iclog; - if (!XFS_BUF_CPSEMA(bp)) + if (!xfs_buf_trylock(bp)) ASSERT(0); XFS_BUF_SET_IODONE_FUNC(bp, xlog_iodone); XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)1); @@ -1118,7 +1118,7 @@ xlog_alloc_log(xfs_mount_t *mp, iclog->ic_datap = (char *)iclog->ic_data + log->l_iclog_hsize; ASSERT(XFS_BUF_ISBUSY(iclog->ic_bp)); - ASSERT(XFS_BUF_VALUSEMA(iclog->ic_bp) <= 0); + ASSERT(xfs_buf_islocked(iclog->ic_bp)); init_waitqueue_head(&iclog->ic_force_wait); init_waitqueue_head(&iclog->ic_write_wait); diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index fb528b354c0d..a74e26408343 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -264,7 +264,7 @@ xlog_bwrite( XFS_BUF_ZEROFLAGS(bp); XFS_BUF_BUSY(bp); XFS_BUF_HOLD(bp); - XFS_BUF_PSEMA(bp, PRIBIO); + xfs_buf_lock(bp); XFS_BUF_SET_COUNT(bp, BBTOB(nbblks)); XFS_BUF_SET_TARGET(bp, log->l_mp->m_logdev_targp); diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c index 63659ee316bd..77ffe3ce71fe 100644 --- a/fs/xfs/xfs_mount.c +++ b/fs/xfs/xfs_mount.c @@ -1941,22 +1941,19 @@ unwind: * the superblock buffer if it can be locked without sleeping. * If it can't then we'll return NULL. */ -xfs_buf_t * +struct xfs_buf * xfs_getsb( - xfs_mount_t *mp, - int flags) + struct xfs_mount *mp, + int flags) { - xfs_buf_t *bp; + struct xfs_buf *bp = mp->m_sb_bp; - ASSERT(mp->m_sb_bp != NULL); - bp = mp->m_sb_bp; - if (flags & XBF_TRYLOCK) { - if (!XFS_BUF_CPSEMA(bp)) { + if (!xfs_buf_trylock(bp)) { + if (flags & XBF_TRYLOCK) return NULL; - } - } else { - XFS_BUF_PSEMA(bp, PRIBIO); + xfs_buf_lock(bp); } + XFS_BUF_HOLD(bp); ASSERT(XFS_BUF_ISDONE(bp)); return bp; diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c index 03b3b7f85a3b..bd5445787855 100644 --- a/fs/xfs/xfs_trans_buf.c +++ b/fs/xfs/xfs_trans_buf.c @@ -160,7 +160,7 @@ xfs_trans_get_buf(xfs_trans_t *tp, */ bp = xfs_trans_buf_item_match(tp, target_dev, blkno, len); if (bp != NULL) { - ASSERT(XFS_BUF_VALUSEMA(bp) <= 0); + ASSERT(xfs_buf_islocked(bp)); if (XFS_FORCED_SHUTDOWN(tp->t_mountp)) XFS_BUF_SUPER_STALE(bp); @@ -327,7 +327,7 @@ xfs_trans_read_buf( */ bp = xfs_trans_buf_item_match(tp, target, blkno, len); if (bp != NULL) { - ASSERT(XFS_BUF_VALUSEMA(bp) <= 0); + ASSERT(xfs_buf_islocked(bp)); ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp); ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL); ASSERT((XFS_BUF_ISERROR(bp)) == 0); -- cgit v1.2.3 From c8da0faf6b07623c473cab42967f943ad4ab7560 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 8 Jul 2011 14:36:25 +0200 Subject: xfs: return the buffer locked from xfs_buf_get_uncached All other xfs_buf_get/read-like helpers return the buffer locked, make sure xfs_buf_get_uncached isn't different for no reason. Half of the callers already lock it directly after, and the others probably should also keep it locked if only for consistency and beeing able to use xfs_buf_rele, but I'll leave that for later. Signed-off-by: Christoph Hellwig Reviewed-by: Alex Elder Reviewed-by: Dave Chinner --- fs/xfs/linux-2.6/xfs_buf.c | 3 --- fs/xfs/xfs_log.c | 3 +-- fs/xfs/xfs_log_recover.c | 8 ++++++-- fs/xfs/xfs_vnodeops.c | 2 ++ 4 files changed, 9 insertions(+), 7 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c index c77227615403..6ad5ae249bfb 100644 --- a/fs/xfs/linux-2.6/xfs_buf.c +++ b/fs/xfs/linux-2.6/xfs_buf.c @@ -679,7 +679,6 @@ xfs_buf_read_uncached( return NULL; /* set up the buffer for a read IO */ - xfs_buf_lock(bp); XFS_BUF_SET_ADDR(bp, daddr); XFS_BUF_READ(bp); XFS_BUF_BUSY(bp); @@ -814,8 +813,6 @@ xfs_buf_get_uncached( goto fail_free_mem; } - xfs_buf_unlock(bp); - trace_xfs_buf_get_uncached(bp, _RET_IP_); return bp; diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index 68e053ea6e78..44c269b387c7 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c @@ -1090,8 +1090,7 @@ xlog_alloc_log(xfs_mount_t *mp, log->l_iclog_size, 0); if (!bp) goto out_free_iclog; - if (!xfs_buf_trylock(bp)) - ASSERT(0); + XFS_BUF_SET_IODONE_FUNC(bp, xlog_iodone); XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)1); iclog->ic_bp = bp; diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index a74e26408343..40c9742aa478 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -91,6 +91,8 @@ xlog_get_bp( xlog_t *log, int nbblks) { + struct xfs_buf *bp; + if (!xlog_buf_bbcount_valid(log, nbblks)) { xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer", nbblks); @@ -118,8 +120,10 @@ xlog_get_bp( nbblks += log->l_sectBBsize; nbblks = round_up(nbblks, log->l_sectBBsize); - return xfs_buf_get_uncached(log->l_mp->m_logdev_targp, - BBTOB(nbblks), 0); + bp = xfs_buf_get_uncached(log->l_mp->m_logdev_targp, BBTOB(nbblks), 0); + if (bp) + xfs_buf_unlock(bp); + return bp; } STATIC void diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c index 11242c482771..88d121486c52 100644 --- a/fs/xfs/xfs_vnodeops.c +++ b/fs/xfs/xfs_vnodeops.c @@ -1969,6 +1969,8 @@ xfs_zero_remaining_bytes( if (!bp) return XFS_ERROR(ENOMEM); + xfs_buf_unlock(bp); + for (offset = startoff; offset <= endoff; offset = lastoffset + 1) { offset_fsb = XFS_B_TO_FSBT(mp, offset); nimap = 1; -- cgit v1.2.3 From 1d5ae5dfeee024d51fb2c0205035d7611a8f0c86 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 8 Jul 2011 14:36:32 +0200 Subject: xfs: cleanup I/O-related buffer flags Remove the unused and misnamed _XBF_RUN_QUEUES flag, rename XBF_LOG_BUFFER to the more fitting XBF_SYNCIO, and split XBF_ORDERED into XBF_FUA and XBF_FLUSH to allow more fine grained control over the bio flags. Also cleanup processing of the flags in _xfs_buf_ioapply to make more sense, and renumber the sparse flag number space to group flags by purpose. Signed-off-by: Christoph Hellwig Reviewed-by: Alex Elder Reviewed-by: Dave Chinner --- fs/xfs/linux-2.6/xfs_buf.c | 35 +++++++++++++++-------------------- fs/xfs/linux-2.6/xfs_buf.h | 40 ++++++++++++++++++++-------------------- fs/xfs/xfs_log.c | 9 ++++----- 3 files changed, 39 insertions(+), 45 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c index 6ad5ae249bfb..a0d97c5a3f15 100644 --- a/fs/xfs/linux-2.6/xfs_buf.c +++ b/fs/xfs/linux-2.6/xfs_buf.c @@ -592,10 +592,8 @@ _xfs_buf_read( ASSERT(!(flags & (XBF_DELWRI|XBF_WRITE))); ASSERT(bp->b_bn != XFS_BUF_DADDR_NULL); - bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_DELWRI | \ - XBF_READ_AHEAD | _XBF_RUN_QUEUES); - bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | \ - XBF_READ_AHEAD | _XBF_RUN_QUEUES); + bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_DELWRI | XBF_READ_AHEAD); + bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD); status = xfs_buf_iorequest(bp); if (status || XFS_BUF_ISERROR(bp) || (flags & XBF_ASYNC)) @@ -1211,23 +1209,21 @@ _xfs_buf_ioapply( total_nr_pages = bp->b_page_count; map_i = 0; - if (bp->b_flags & XBF_ORDERED) { - ASSERT(!(bp->b_flags & XBF_READ)); - rw = WRITE_FLUSH_FUA; - } else if (bp->b_flags & XBF_LOG_BUFFER) { - ASSERT(!(bp->b_flags & XBF_READ_AHEAD)); - bp->b_flags &= ~_XBF_RUN_QUEUES; - rw = (bp->b_flags & XBF_WRITE) ? WRITE_SYNC : READ_SYNC; - } else if (bp->b_flags & _XBF_RUN_QUEUES) { - ASSERT(!(bp->b_flags & XBF_READ_AHEAD)); - bp->b_flags &= ~_XBF_RUN_QUEUES; - rw = (bp->b_flags & XBF_WRITE) ? WRITE_META : READ_META; + if (bp->b_flags & XBF_WRITE) { + if (bp->b_flags & XBF_SYNCIO) + rw = WRITE_SYNC; + else + rw = WRITE; + if (bp->b_flags & XBF_FUA) + rw |= REQ_FUA; + if (bp->b_flags & XBF_FLUSH) + rw |= REQ_FLUSH; + } else if (bp->b_flags & XBF_READ_AHEAD) { + rw = READA; } else { - rw = (bp->b_flags & XBF_WRITE) ? WRITE : - (bp->b_flags & XBF_READ_AHEAD) ? READA : READ; + rw = READ; } - next_chunk: atomic_inc(&bp->b_io_remaining); nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT); @@ -1689,8 +1685,7 @@ xfs_buf_delwri_split( break; } - bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q| - _XBF_RUN_QUEUES); + bp->b_flags &= ~(XBF_DELWRI | _XBF_DELWRI_Q); bp->b_flags |= XBF_WRITE; list_move_tail(&bp->b_list, list); trace_xfs_buf_delwri_split(bp, _RET_IP_); diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/linux-2.6/xfs_buf.h index 706c40069116..21604a17e2a3 100644 --- a/fs/xfs/linux-2.6/xfs_buf.h +++ b/fs/xfs/linux-2.6/xfs_buf.h @@ -46,43 +46,46 @@ typedef enum { #define XBF_READ (1 << 0) /* buffer intended for reading from device */ #define XBF_WRITE (1 << 1) /* buffer intended for writing to device */ -#define XBF_MAPPED (1 << 2) /* buffer mapped (b_addr valid) */ +#define XBF_READ_AHEAD (1 << 2) /* asynchronous read-ahead */ +#define XBF_MAPPED (1 << 3) /* buffer mapped (b_addr valid) */ #define XBF_ASYNC (1 << 4) /* initiator will not wait for completion */ #define XBF_DONE (1 << 5) /* all pages in the buffer uptodate */ #define XBF_DELWRI (1 << 6) /* buffer has dirty pages */ #define XBF_STALE (1 << 7) /* buffer has been staled, do not find it */ -#define XBF_ORDERED (1 << 11)/* use ordered writes */ -#define XBF_READ_AHEAD (1 << 12)/* asynchronous read-ahead */ -#define XBF_LOG_BUFFER (1 << 13)/* this is a buffer used for the log */ + +/* I/O hints for the BIO layer */ +#define XBF_SYNCIO (1 << 10)/* treat this buffer as synchronous I/O */ +#define XBF_FUA (1 << 11)/* force cache write through mode */ +#define XBF_FLUSH (1 << 12)/* flush the disk cache before a write */ /* flags used only as arguments to access routines */ -#define XBF_LOCK (1 << 14)/* lock requested */ -#define XBF_TRYLOCK (1 << 15)/* lock requested, but do not wait */ -#define XBF_DONT_BLOCK (1 << 16)/* do not block in current thread */ +#define XBF_LOCK (1 << 15)/* lock requested */ +#define XBF_TRYLOCK (1 << 16)/* lock requested, but do not wait */ +#define XBF_DONT_BLOCK (1 << 17)/* do not block in current thread */ /* flags used only internally */ -#define _XBF_PAGES (1 << 18)/* backed by refcounted pages */ -#define _XBF_RUN_QUEUES (1 << 19)/* run block device task queue */ -#define _XBF_KMEM (1 << 20)/* backed by heap memory */ -#define _XBF_DELWRI_Q (1 << 21)/* buffer on delwri queue */ +#define _XBF_PAGES (1 << 20)/* backed by refcounted pages */ +#define _XBF_KMEM (1 << 21)/* backed by heap memory */ +#define _XBF_DELWRI_Q (1 << 22)/* buffer on delwri queue */ typedef unsigned int xfs_buf_flags_t; #define XFS_BUF_FLAGS \ { XBF_READ, "READ" }, \ { XBF_WRITE, "WRITE" }, \ + { XBF_READ_AHEAD, "READ_AHEAD" }, \ { XBF_MAPPED, "MAPPED" }, \ { XBF_ASYNC, "ASYNC" }, \ { XBF_DONE, "DONE" }, \ { XBF_DELWRI, "DELWRI" }, \ { XBF_STALE, "STALE" }, \ - { XBF_ORDERED, "ORDERED" }, \ - { XBF_READ_AHEAD, "READ_AHEAD" }, \ + { XBF_SYNCIO, "SYNCIO" }, \ + { XBF_FUA, "FUA" }, \ + { XBF_FLUSH, "FLUSH" }, \ { XBF_LOCK, "LOCK" }, /* should never be set */\ { XBF_TRYLOCK, "TRYLOCK" }, /* ditto */\ { XBF_DONT_BLOCK, "DONT_BLOCK" }, /* ditto */\ { _XBF_PAGES, "PAGES" }, \ - { _XBF_RUN_QUEUES, "RUN_QUEUES" }, \ { _XBF_KMEM, "KMEM" }, \ { _XBF_DELWRI_Q, "DELWRI_Q" } @@ -230,8 +233,9 @@ extern void xfs_buf_terminate(void); #define XFS_BUF_BFLAGS(bp) ((bp)->b_flags) -#define XFS_BUF_ZEROFLAGS(bp) ((bp)->b_flags &= \ - ~(XBF_READ|XBF_WRITE|XBF_ASYNC|XBF_DELWRI|XBF_ORDERED)) +#define XFS_BUF_ZEROFLAGS(bp) \ + ((bp)->b_flags &= ~(XBF_READ|XBF_WRITE|XBF_ASYNC|XBF_DELWRI| \ + XBF_SYNCIO|XBF_FUA|XBF_FLUSH)) void xfs_buf_stale(struct xfs_buf *bp); #define XFS_BUF_STALE(bp) xfs_buf_stale(bp); @@ -263,10 +267,6 @@ void xfs_buf_stale(struct xfs_buf *bp); #define XFS_BUF_UNASYNC(bp) ((bp)->b_flags &= ~XBF_ASYNC) #define XFS_BUF_ISASYNC(bp) ((bp)->b_flags & XBF_ASYNC) -#define XFS_BUF_ORDERED(bp) ((bp)->b_flags |= XBF_ORDERED) -#define XFS_BUF_UNORDERED(bp) ((bp)->b_flags &= ~XBF_ORDERED) -#define XFS_BUF_ISORDERED(bp) ((bp)->b_flags & XBF_ORDERED) - #define XFS_BUF_HOLD(bp) xfs_buf_hold(bp) #define XFS_BUF_READ(bp) ((bp)->b_flags |= XBF_READ) #define XFS_BUF_UNREAD(bp) ((bp)->b_flags &= ~XBF_READ) diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index 44c269b387c7..4c2d30e3b78d 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c @@ -1268,7 +1268,6 @@ xlog_bdstrat( return 0; } - bp->b_flags |= _XBF_RUN_QUEUES; xfs_buf_iorequest(bp); return 0; } @@ -1369,7 +1368,7 @@ xlog_sync(xlog_t *log, XFS_BUF_ZEROFLAGS(bp); XFS_BUF_BUSY(bp); XFS_BUF_ASYNC(bp); - bp->b_flags |= XBF_LOG_BUFFER; + bp->b_flags |= XBF_SYNCIO; if (log->l_mp->m_flags & XFS_MOUNT_BARRIER) { /* @@ -1380,7 +1379,7 @@ xlog_sync(xlog_t *log, */ if (log->l_mp->m_logdev_targp != log->l_mp->m_ddev_targp) xfs_blkdev_issue_flush(log->l_mp->m_ddev_targp); - XFS_BUF_ORDERED(bp); + bp->b_flags |= XBF_FUA | XBF_FLUSH; } ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1); @@ -1413,9 +1412,9 @@ xlog_sync(xlog_t *log, XFS_BUF_ZEROFLAGS(bp); XFS_BUF_BUSY(bp); XFS_BUF_ASYNC(bp); - bp->b_flags |= XBF_LOG_BUFFER; + bp->b_flags |= XBF_SYNCIO; if (log->l_mp->m_flags & XFS_MOUNT_BARRIER) - XFS_BUF_ORDERED(bp); + bp->b_flags |= XBF_FUA | XBF_FLUSH; dptr = XFS_BUF_PTR(bp); /* * Bump the cycle numbers at the start of each block -- cgit v1.2.3 From e163cbde981c5988c3e80ca29589132336f18a72 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 8 Jul 2011 14:36:36 +0200 Subject: xfs: avoid a few disk cache flushes There is no need for a pre-flush when doing writing the second part of a split log buffer, and if we are using an external log there is no need to do a full cache flush of the log device at all given that all writes to it use the FUA flag. Signed-off-by: Christoph Hellwig Reviewed-by: Alex Elder Reviewed-by: Dave Chinner --- fs/xfs/xfs_log.c | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index 4c2d30e3b78d..0ef5eb677383 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c @@ -1371,15 +1371,21 @@ xlog_sync(xlog_t *log, bp->b_flags |= XBF_SYNCIO; if (log->l_mp->m_flags & XFS_MOUNT_BARRIER) { + bp->b_flags |= XBF_FUA; + /* - * If we have an external log device, flush the data device - * before flushing the log to make sure all meta data - * written back from the AIL actually made it to disk - * before writing out the new log tail LSN in the log buffer. + * Flush the data device before flushing the log to make + * sure all meta data written back from the AIL actually made + * it to disk before stamping the new log tail LSN into the + * log buffer. For an external log we need to issue the + * flush explicitly, and unfortunately synchronously here; + * for an internal log we can simply use the block layer + * state machine for preflushes. */ if (log->l_mp->m_logdev_targp != log->l_mp->m_ddev_targp) xfs_blkdev_issue_flush(log->l_mp->m_ddev_targp); - bp->b_flags |= XBF_FUA | XBF_FLUSH; + else + bp->b_flags |= XBF_FLUSH; } ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1); @@ -1414,7 +1420,7 @@ xlog_sync(xlog_t *log, XFS_BUF_ASYNC(bp); bp->b_flags |= XBF_SYNCIO; if (log->l_mp->m_flags & XFS_MOUNT_BARRIER) - bp->b_flags |= XBF_FUA | XBF_FLUSH; + bp->b_flags |= XBF_FUA; dptr = XFS_BUF_PTR(bp); /* * Bump the cycle numbers at the start of each block -- cgit v1.2.3 From c0e090ced259b85b854636c99dda95b7070a2785 Mon Sep 17 00:00:00 2001 From: Eric Sandeen Date: Fri, 20 May 2011 21:52:17 +0000 Subject: xfs: consolidate & clarify mount sanity checks Pavol pointed out that there is one silent error case in the mount path, and that others are rather uninformative. I've taken Pavol's suggested patch and extended it a bit to also: * fix a message which says "turned off" but actually errors out * consolidate the vaguely differentiated "SB sanity check [12]" messages, and hexdump the superblock for analysis Original-patch-by: Pavol Gono Signed-off-by: Eric Sandeen Signed-off-by: Alex Elder --- fs/xfs/xfs_fs.h | 5 +++++ fs/xfs/xfs_mount.c | 33 +++++++++++++-------------------- 2 files changed, 18 insertions(+), 20 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_fs.h b/fs/xfs/xfs_fs.h index 8f6fc1a96386..c13fed8c394a 100644 --- a/fs/xfs/xfs_fs.h +++ b/fs/xfs/xfs_fs.h @@ -249,6 +249,11 @@ typedef struct xfs_fsop_resblks { #define XFS_MAX_LOG_BYTES \ ((2 * 1024 * 1024 * 1024ULL) - XFS_MIN_LOG_BYTES) +/* Used for sanity checks on superblock */ +#define XFS_MAX_DBLOCKS(s) ((xfs_drfsbno_t)(s)->sb_agcount * (s)->sb_agblocks) +#define XFS_MIN_DBLOCKS(s) ((xfs_drfsbno_t)((s)->sb_agcount - 1) * \ + (s)->sb_agblocks + XFS_MIN_AG_BLOCKS) + /* * Structures for XFS_IOC_FSGROWFSDATA, XFS_IOC_FSGROWFSLOG & XFS_IOC_FSGROWFSRT */ diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c index 77ffe3ce71fe..2be5e5cf897a 100644 --- a/fs/xfs/xfs_mount.c +++ b/fs/xfs/xfs_mount.c @@ -348,7 +348,7 @@ xfs_mount_validate_sb( } /* - * More sanity checking. These were stolen directly from + * More sanity checking. Most of these were stolen directly from * xfs_repair. */ if (unlikely( @@ -371,23 +371,13 @@ xfs_mount_validate_sb( (sbp->sb_blocklog - sbp->sb_inodelog != sbp->sb_inopblog) || (sbp->sb_rextsize * sbp->sb_blocksize > XFS_MAX_RTEXTSIZE) || (sbp->sb_rextsize * sbp->sb_blocksize < XFS_MIN_RTEXTSIZE) || - (sbp->sb_imax_pct > 100 /* zero sb_imax_pct is valid */))) { + (sbp->sb_imax_pct > 100 /* zero sb_imax_pct is valid */) || + sbp->sb_dblocks == 0 || + sbp->sb_dblocks > XFS_MAX_DBLOCKS(sbp) || + sbp->sb_dblocks < XFS_MIN_DBLOCKS(sbp))) { if (loud) - xfs_warn(mp, "SB sanity check 1 failed"); - return XFS_ERROR(EFSCORRUPTED); - } - - /* - * Sanity check AG count, size fields against data size field - */ - if (unlikely( - sbp->sb_dblocks == 0 || - sbp->sb_dblocks > - (xfs_drfsbno_t)sbp->sb_agcount * sbp->sb_agblocks || - sbp->sb_dblocks < (xfs_drfsbno_t)(sbp->sb_agcount - 1) * - sbp->sb_agblocks + XFS_MIN_AG_BLOCKS)) { - if (loud) - xfs_warn(mp, "SB sanity check 2 failed"); + XFS_CORRUPTION_ERROR("SB sanity check failed", + XFS_ERRLEVEL_LOW, mp, sbp); return XFS_ERROR(EFSCORRUPTED); } @@ -864,7 +854,8 @@ xfs_update_alignment(xfs_mount_t *mp) if ((BBTOB(mp->m_dalign) & mp->m_blockmask) || (BBTOB(mp->m_swidth) & mp->m_blockmask)) { if (mp->m_flags & XFS_MOUNT_RETERR) { - xfs_warn(mp, "alignment check 1 failed"); + xfs_warn(mp, "alignment check failed: " + "(sunit/swidth vs. blocksize)"); return XFS_ERROR(EINVAL); } mp->m_dalign = mp->m_swidth = 0; @@ -875,6 +866,8 @@ xfs_update_alignment(xfs_mount_t *mp) mp->m_dalign = XFS_BB_TO_FSBT(mp, mp->m_dalign); if (mp->m_dalign && (sbp->sb_agblocks % mp->m_dalign)) { if (mp->m_flags & XFS_MOUNT_RETERR) { + xfs_warn(mp, "alignment check failed: " + "(sunit/swidth vs. ag size)"); return XFS_ERROR(EINVAL); } xfs_warn(mp, @@ -889,8 +882,8 @@ xfs_update_alignment(xfs_mount_t *mp) mp->m_swidth = XFS_BB_TO_FSBT(mp, mp->m_swidth); } else { if (mp->m_flags & XFS_MOUNT_RETERR) { - xfs_warn(mp, - "stripe alignment turned off: sunit(%d) less than bsize(%d)", + xfs_warn(mp, "alignment check failed: " + "sunit(%d) less than bsize(%d)", mp->m_dalign, mp->m_blockmask +1); return XFS_ERROR(EINVAL); -- cgit v1.2.3 From 81463b1ca8dbd2f4f180feac3f49c7640e2b5f79 Mon Sep 17 00:00:00 2001 From: Chandra Seetharaman Date: Thu, 9 Jun 2011 16:47:49 +0000 Subject: xfs: remove variables that serve no purpose in xfs_alloc_ag_vextent_exact() Remove two variables that serve no purpose in xfs_alloc_ag_vextent_exact(). Signed-off-by: Chandra Seetharaman Signed-off-by: Alex Elder --- fs/xfs/xfs_alloc.c | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c index 3ad3ab9f56cb..1e00b3ef6274 100644 --- a/fs/xfs/xfs_alloc.c +++ b/fs/xfs/xfs_alloc.c @@ -570,9 +570,7 @@ xfs_alloc_ag_vextent_exact( xfs_agblock_t tbno; /* start block of trimmed extent */ xfs_extlen_t tlen; /* length of trimmed extent */ xfs_agblock_t tend; /* end block of trimmed extent */ - xfs_agblock_t end; /* end of allocated extent */ int i; /* success/failure of operation */ - xfs_extlen_t rlen; /* length of returned extent */ ASSERT(args->alignment == 1); @@ -625,18 +623,16 @@ xfs_alloc_ag_vextent_exact( * * Fix the length according to mod and prod if given. */ - end = XFS_AGBLOCK_MIN(tend, args->agbno + args->maxlen); - args->len = end - args->agbno; + args->len = XFS_AGBLOCK_MIN(tend, args->agbno + args->maxlen) + - args->agbno; xfs_alloc_fix_len(args); if (!xfs_alloc_fix_minleft(args)) goto not_found; - rlen = args->len; - ASSERT(args->agbno + rlen <= tend); - end = args->agbno + rlen; + ASSERT(args->agbno + args->len <= tend); /* - * We are allocating agbno for rlen [agbno .. end] + * We are allocating agbno for args->len * Allocate/initialize a cursor for the by-size btree. */ cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp, -- cgit v1.2.3 From 20547490c12b0ee3d32152b85e9f9bd183aa7224 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Sat, 9 Jul 2011 12:21:07 -0400 Subject: cifs: move bdi_setup_and_register outside of CONFIG_CIFS_DFS_UPCALL This needs to be done regardless of whether that KConfig option is set or not. Reported-by: Sven-Haegar Koch Signed-off-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/connect.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'fs') diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 46cc0ad03487..511176903e57 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -3002,6 +3002,7 @@ cifs_mount(struct cifs_sb_info *cifs_sb, struct smb_vol *volume_info) struct tcon_link *tlink; #ifdef CONFIG_CIFS_DFS_UPCALL int referral_walks_count = 0; +#endif rc = bdi_setup_and_register(&cifs_sb->bdi, "cifs", BDI_CAP_MAP_COPY); if (rc) @@ -3009,6 +3010,7 @@ cifs_mount(struct cifs_sb_info *cifs_sb, struct smb_vol *volume_info) cifs_sb->bdi.ra_pages = default_backing_dev_info.ra_pages; +#ifdef CONFIG_CIFS_DFS_UPCALL try_mount_again: /* cleanup activities if we're chasing a referral */ if (referral_walks_count) { -- cgit v1.2.3 From b9bce2e9f9936cfd12fbc62ead11edcdd46dec7e Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Wed, 6 Jul 2011 08:10:39 -0400 Subject: cifs: fix expand_dfs_referral Regression introduced in commit 724d9f1cfba. Prior to that, expand_dfs_referral would regenerate the mount data string and then call cifs_parse_mount_options to re-parse it (klunky, but it worked). The above commit moved cifs_parse_mount_options out of cifs_mount, so the re-parsing of the new mount options no longer occurred. Fix it by making expand_dfs_referral re-parse the mount options. Signed-off-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/connect.c | 30 ++++++++++++++++++++---------- 1 file changed, 20 insertions(+), 10 deletions(-) (limited to 'fs') diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 511176903e57..6ec366ff28b3 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -65,6 +65,8 @@ static int ip_connect(struct TCP_Server_Info *server); static int generic_ip_connect(struct TCP_Server_Info *server); static void tlink_rb_insert(struct rb_root *root, struct tcon_link *new_tlink); static void cifs_prune_tlinks(struct work_struct *work); +static int cifs_setup_volume_info(struct smb_vol *volume_info, char *mount_data, + const char *devname); /* * cifs tcp session reconnection @@ -2830,12 +2832,9 @@ is_path_accessible(int xid, struct cifs_tcon *tcon, return rc; } -void -cifs_cleanup_volume_info(struct smb_vol *volume_info) +static void +cleanup_volume_info_contents(struct smb_vol *volume_info) { - if (!volume_info) - return; - kfree(volume_info->username); kzfree(volume_info->password); kfree(volume_info->UNC); @@ -2843,10 +2842,18 @@ cifs_cleanup_volume_info(struct smb_vol *volume_info) kfree(volume_info->domainname); kfree(volume_info->iocharset); kfree(volume_info->prepath); +} + +void +cifs_cleanup_volume_info(struct smb_vol *volume_info) +{ + if (!volume_info) + return; + cleanup_volume_info_contents(volume_info); kfree(volume_info); - return; } + #ifdef CONFIG_CIFS_DFS_UPCALL /* build_path_to_root returns full path to root when * we do not have an exiting connection (tcon) */ @@ -2915,15 +2922,18 @@ expand_dfs_referral(int xid, struct cifs_ses *pSesInfo, &fake_devname); free_dfs_info_array(referrals, num_referrals); - kfree(fake_devname); - - if (cifs_sb->mountdata != NULL) - kfree(cifs_sb->mountdata); if (IS_ERR(mdata)) { rc = PTR_ERR(mdata); mdata = NULL; + } else { + cleanup_volume_info_contents(volume_info); + memset(volume_info, '\0', sizeof(*volume_info)); + rc = cifs_setup_volume_info(volume_info, mdata, + fake_devname); } + kfree(fake_devname); + kfree(cifs_sb->mountdata); cifs_sb->mountdata = mdata; } kfree(full_path); -- cgit v1.2.3 From 5d70828a7703b1f68e2fd4ce4b42b28d9623f52b Mon Sep 17 00:00:00 2001 From: Jesper Juhl Date: Sun, 10 Jul 2011 22:54:31 +0200 Subject: dlm: don't do pointless NULL check, use kzalloc and fix order of arguments In fs/dlm/lock.c in the dlm_scan_waiters() function there are 3 small issues: 1) There's no need to test the return value of the allocation and do a memset if is succeedes. Just use kzalloc() to obtain zeroed memory. 2) Since kfree() handles NULL pointers gracefully, the test of 'warned' against NULL before the kfree() after the loop is completely pointless. Remove it. 3) The arguments to kmalloc() (now kzalloc()) were swapped. Thanks to Dr. David Alan Gilbert for pointing this out. Signed-off-by: Jesper Juhl Signed-off-by: David Teigland --- fs/dlm/lock.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c index f71d0b5abd95..84c52e6b8810 100644 --- a/fs/dlm/lock.c +++ b/fs/dlm/lock.c @@ -849,9 +849,7 @@ void dlm_scan_waiters(struct dlm_ls *ls) if (!num_nodes) { num_nodes = ls->ls_num_nodes; - warned = kmalloc(GFP_KERNEL, num_nodes * sizeof(int)); - if (warned) - memset(warned, 0, num_nodes * sizeof(int)); + warned = kzalloc(num_nodes * sizeof(int), GFP_KERNEL); } if (!warned) continue; @@ -863,9 +861,7 @@ void dlm_scan_waiters(struct dlm_ls *ls) dlm_config.ci_waitwarn_us, lkb->lkb_wait_nodeid); } mutex_unlock(&ls->ls_waiters_mutex); - - if (warned) - kfree(warned); + kfree(warned); if (debug_expired) log_debug(ls, "scan_waiters %u warn %u over %d us max %lld us", -- cgit v1.2.3 From a22ca4806822154c163c6f220f4c2a05adf96fc7 Mon Sep 17 00:00:00 2001 From: David Teigland Date: Mon, 11 Jul 2011 08:40:53 -0500 Subject: dlm: fix kmalloc args The gfp and size args were switched. Signed-off-by: David Teigland --- fs/dlm/lock.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c index 84c52e6b8810..3c723489079a 100644 --- a/fs/dlm/lock.c +++ b/fs/dlm/lock.c @@ -4129,7 +4129,7 @@ void dlm_recover_waiters_pre(struct dlm_ls *ls) struct dlm_message *ms_stub; int wait_type, stub_unlock_result, stub_cancel_result; - ms_stub = kmalloc(GFP_KERNEL, sizeof(struct dlm_message)); + ms_stub = kmalloc(sizeof(struct dlm_message), GFP_KERNEL); if (!ms_stub) { log_error(ls, "dlm_recover_waiters_pre no mem"); return; -- cgit v1.2.3 From 3d6aa675fff9eee5a6339d67b355b63a6d69565f Mon Sep 17 00:00:00 2001 From: David Teigland Date: Wed, 6 Jul 2011 17:00:54 -0500 Subject: dlm: keep lkbs in idr This is simpler and quicker than the hash table, and avoids needing to search the hash list for every new lkid to check if it's used. Signed-off-by: David Teigland --- fs/dlm/config.c | 7 --- fs/dlm/config.h | 1 - fs/dlm/dlm_internal.h | 14 ++---- fs/dlm/lock.c | 69 +++++++++++------------------- fs/dlm/lockspace.c | 116 +++++++++++++++++++++++--------------------------- 5 files changed, 82 insertions(+), 125 deletions(-) (limited to 'fs') diff --git a/fs/dlm/config.c b/fs/dlm/config.c index ad3b5a8535d0..4e20f9317156 100644 --- a/fs/dlm/config.c +++ b/fs/dlm/config.c @@ -94,7 +94,6 @@ struct dlm_cluster { unsigned int cl_tcp_port; unsigned int cl_buffer_size; unsigned int cl_rsbtbl_size; - unsigned int cl_lkbtbl_size; unsigned int cl_dirtbl_size; unsigned int cl_recover_timer; unsigned int cl_toss_secs; @@ -109,7 +108,6 @@ enum { CLUSTER_ATTR_TCP_PORT = 0, CLUSTER_ATTR_BUFFER_SIZE, CLUSTER_ATTR_RSBTBL_SIZE, - CLUSTER_ATTR_LKBTBL_SIZE, CLUSTER_ATTR_DIRTBL_SIZE, CLUSTER_ATTR_RECOVER_TIMER, CLUSTER_ATTR_TOSS_SECS, @@ -162,7 +160,6 @@ __CONFIGFS_ATTR(name, 0644, name##_read, name##_write) CLUSTER_ATTR(tcp_port, 1); CLUSTER_ATTR(buffer_size, 1); CLUSTER_ATTR(rsbtbl_size, 1); -CLUSTER_ATTR(lkbtbl_size, 1); CLUSTER_ATTR(dirtbl_size, 1); CLUSTER_ATTR(recover_timer, 1); CLUSTER_ATTR(toss_secs, 1); @@ -176,7 +173,6 @@ static struct configfs_attribute *cluster_attrs[] = { [CLUSTER_ATTR_TCP_PORT] = &cluster_attr_tcp_port.attr, [CLUSTER_ATTR_BUFFER_SIZE] = &cluster_attr_buffer_size.attr, [CLUSTER_ATTR_RSBTBL_SIZE] = &cluster_attr_rsbtbl_size.attr, - [CLUSTER_ATTR_LKBTBL_SIZE] = &cluster_attr_lkbtbl_size.attr, [CLUSTER_ATTR_DIRTBL_SIZE] = &cluster_attr_dirtbl_size.attr, [CLUSTER_ATTR_RECOVER_TIMER] = &cluster_attr_recover_timer.attr, [CLUSTER_ATTR_TOSS_SECS] = &cluster_attr_toss_secs.attr, @@ -446,7 +442,6 @@ static struct config_group *make_cluster(struct config_group *g, cl->cl_tcp_port = dlm_config.ci_tcp_port; cl->cl_buffer_size = dlm_config.ci_buffer_size; cl->cl_rsbtbl_size = dlm_config.ci_rsbtbl_size; - cl->cl_lkbtbl_size = dlm_config.ci_lkbtbl_size; cl->cl_dirtbl_size = dlm_config.ci_dirtbl_size; cl->cl_recover_timer = dlm_config.ci_recover_timer; cl->cl_toss_secs = dlm_config.ci_toss_secs; @@ -1038,7 +1033,6 @@ int dlm_our_addr(struct sockaddr_storage *addr, int num) #define DEFAULT_TCP_PORT 21064 #define DEFAULT_BUFFER_SIZE 4096 #define DEFAULT_RSBTBL_SIZE 1024 -#define DEFAULT_LKBTBL_SIZE 1024 #define DEFAULT_DIRTBL_SIZE 1024 #define DEFAULT_RECOVER_TIMER 5 #define DEFAULT_TOSS_SECS 10 @@ -1052,7 +1046,6 @@ struct dlm_config_info dlm_config = { .ci_tcp_port = DEFAULT_TCP_PORT, .ci_buffer_size = DEFAULT_BUFFER_SIZE, .ci_rsbtbl_size = DEFAULT_RSBTBL_SIZE, - .ci_lkbtbl_size = DEFAULT_LKBTBL_SIZE, .ci_dirtbl_size = DEFAULT_DIRTBL_SIZE, .ci_recover_timer = DEFAULT_RECOVER_TIMER, .ci_toss_secs = DEFAULT_TOSS_SECS, diff --git a/fs/dlm/config.h b/fs/dlm/config.h index dd0ce24d5a80..260574463d29 100644 --- a/fs/dlm/config.h +++ b/fs/dlm/config.h @@ -20,7 +20,6 @@ struct dlm_config_info { int ci_tcp_port; int ci_buffer_size; int ci_rsbtbl_size; - int ci_lkbtbl_size; int ci_dirtbl_size; int ci_recover_timer; int ci_toss_secs; diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h index 0262451eb9c6..23a234bddc60 100644 --- a/fs/dlm/dlm_internal.h +++ b/fs/dlm/dlm_internal.h @@ -37,6 +37,7 @@ #include #include #include +#include #include #include @@ -52,7 +53,6 @@ struct dlm_ls; struct dlm_lkb; struct dlm_rsb; struct dlm_member; -struct dlm_lkbtable; struct dlm_rsbtable; struct dlm_dirtable; struct dlm_direntry; @@ -108,11 +108,6 @@ struct dlm_rsbtable { spinlock_t lock; }; -struct dlm_lkbtable { - struct list_head list; - rwlock_t lock; - uint16_t counter; -}; /* * Lockspace member (per node in a ls) @@ -248,7 +243,6 @@ struct dlm_lkb { int8_t lkb_wait_count; int lkb_wait_nodeid; /* for debugging */ - struct list_head lkb_idtbl_list; /* lockspace lkbtbl */ struct list_head lkb_statequeue; /* rsb g/c/w list */ struct list_head lkb_rsb_lookup; /* waiting for rsb lookup */ struct list_head lkb_wait_reply; /* waiting for remote reply */ @@ -465,12 +459,12 @@ struct dlm_ls { unsigned long ls_scan_time; struct kobject ls_kobj; + struct idr ls_lkbidr; + spinlock_t ls_lkbidr_spin; + struct dlm_rsbtable *ls_rsbtbl; uint32_t ls_rsbtbl_size; - struct dlm_lkbtable *ls_lkbtbl; - uint32_t ls_lkbtbl_size; - struct dlm_dirtable *ls_dirtbl; uint32_t ls_dirtbl_size; diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c index 3c723489079a..784cde417ced 100644 --- a/fs/dlm/lock.c +++ b/fs/dlm/lock.c @@ -580,9 +580,8 @@ static void detach_lkb(struct dlm_lkb *lkb) static int create_lkb(struct dlm_ls *ls, struct dlm_lkb **lkb_ret) { - struct dlm_lkb *lkb, *tmp; - uint32_t lkid = 0; - uint16_t bucket; + struct dlm_lkb *lkb; + int rv, id; lkb = dlm_allocate_lkb(ls); if (!lkb) @@ -596,58 +595,38 @@ static int create_lkb(struct dlm_ls *ls, struct dlm_lkb **lkb_ret) INIT_LIST_HEAD(&lkb->lkb_time_list); INIT_LIST_HEAD(&lkb->lkb_astqueue); - get_random_bytes(&bucket, sizeof(bucket)); - bucket &= (ls->ls_lkbtbl_size - 1); - - write_lock(&ls->ls_lkbtbl[bucket].lock); + retry: + rv = idr_pre_get(&ls->ls_lkbidr, GFP_NOFS); + if (!rv) + return -ENOMEM; - /* counter can roll over so we must verify lkid is not in use */ + spin_lock(&ls->ls_lkbidr_spin); + rv = idr_get_new_above(&ls->ls_lkbidr, lkb, 1, &id); + if (!rv) + lkb->lkb_id = id; + spin_unlock(&ls->ls_lkbidr_spin); - while (lkid == 0) { - lkid = (bucket << 16) | ls->ls_lkbtbl[bucket].counter++; + if (rv == -EAGAIN) + goto retry; - list_for_each_entry(tmp, &ls->ls_lkbtbl[bucket].list, - lkb_idtbl_list) { - if (tmp->lkb_id != lkid) - continue; - lkid = 0; - break; - } + if (rv < 0) { + log_error(ls, "create_lkb idr error %d", rv); + return rv; } - lkb->lkb_id = lkid; - list_add(&lkb->lkb_idtbl_list, &ls->ls_lkbtbl[bucket].list); - write_unlock(&ls->ls_lkbtbl[bucket].lock); - *lkb_ret = lkb; return 0; } -static struct dlm_lkb *__find_lkb(struct dlm_ls *ls, uint32_t lkid) -{ - struct dlm_lkb *lkb; - uint16_t bucket = (lkid >> 16); - - list_for_each_entry(lkb, &ls->ls_lkbtbl[bucket].list, lkb_idtbl_list) { - if (lkb->lkb_id == lkid) - return lkb; - } - return NULL; -} - static int find_lkb(struct dlm_ls *ls, uint32_t lkid, struct dlm_lkb **lkb_ret) { struct dlm_lkb *lkb; - uint16_t bucket = (lkid >> 16); - - if (bucket >= ls->ls_lkbtbl_size) - return -EBADSLT; - read_lock(&ls->ls_lkbtbl[bucket].lock); - lkb = __find_lkb(ls, lkid); + spin_lock(&ls->ls_lkbidr_spin); + lkb = idr_find(&ls->ls_lkbidr, lkid); if (lkb) kref_get(&lkb->lkb_ref); - read_unlock(&ls->ls_lkbtbl[bucket].lock); + spin_unlock(&ls->ls_lkbidr_spin); *lkb_ret = lkb; return lkb ? 0 : -ENOENT; @@ -668,12 +647,12 @@ static void kill_lkb(struct kref *kref) static int __put_lkb(struct dlm_ls *ls, struct dlm_lkb *lkb) { - uint16_t bucket = (lkb->lkb_id >> 16); + uint32_t lkid = lkb->lkb_id; - write_lock(&ls->ls_lkbtbl[bucket].lock); + spin_lock(&ls->ls_lkbidr_spin); if (kref_put(&lkb->lkb_ref, kill_lkb)) { - list_del(&lkb->lkb_idtbl_list); - write_unlock(&ls->ls_lkbtbl[bucket].lock); + idr_remove(&ls->ls_lkbidr, lkid); + spin_unlock(&ls->ls_lkbidr_spin); detach_lkb(lkb); @@ -683,7 +662,7 @@ static int __put_lkb(struct dlm_ls *ls, struct dlm_lkb *lkb) dlm_free_lkb(lkb); return 1; } else { - write_unlock(&ls->ls_lkbtbl[bucket].lock); + spin_unlock(&ls->ls_lkbidr_spin); return 0; } } diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c index 493d1e7161a4..871fe6deb5fa 100644 --- a/fs/dlm/lockspace.c +++ b/fs/dlm/lockspace.c @@ -472,17 +472,8 @@ static int new_lockspace(const char *name, int namelen, void **lockspace, spin_lock_init(&ls->ls_rsbtbl[i].lock); } - size = dlm_config.ci_lkbtbl_size; - ls->ls_lkbtbl_size = size; - - ls->ls_lkbtbl = vmalloc(sizeof(struct dlm_lkbtable) * size); - if (!ls->ls_lkbtbl) - goto out_rsbfree; - for (i = 0; i < size; i++) { - INIT_LIST_HEAD(&ls->ls_lkbtbl[i].list); - rwlock_init(&ls->ls_lkbtbl[i].lock); - ls->ls_lkbtbl[i].counter = 1; - } + idr_init(&ls->ls_lkbidr); + spin_lock_init(&ls->ls_lkbidr_spin); size = dlm_config.ci_dirtbl_size; ls->ls_dirtbl_size = size; @@ -605,8 +596,7 @@ static int new_lockspace(const char *name, int namelen, void **lockspace, out_dirfree: vfree(ls->ls_dirtbl); out_lkbfree: - vfree(ls->ls_lkbtbl); - out_rsbfree: + idr_destroy(&ls->ls_lkbidr); vfree(ls->ls_rsbtbl); out_lsfree: if (do_unreg) @@ -641,50 +631,66 @@ int dlm_new_lockspace(const char *name, int namelen, void **lockspace, return error; } -/* Return 1 if the lockspace still has active remote locks, - * 2 if the lockspace still has active local locks. - */ -static int lockspace_busy(struct dlm_ls *ls) -{ - int i, lkb_found = 0; - struct dlm_lkb *lkb; - - /* NOTE: We check the lockidtbl here rather than the resource table. - This is because there may be LKBs queued as ASTs that have been - unlinked from their RSBs and are pending deletion once the AST has - been delivered */ - - for (i = 0; i < ls->ls_lkbtbl_size; i++) { - read_lock(&ls->ls_lkbtbl[i].lock); - if (!list_empty(&ls->ls_lkbtbl[i].list)) { - lkb_found = 1; - list_for_each_entry(lkb, &ls->ls_lkbtbl[i].list, - lkb_idtbl_list) { - if (!lkb->lkb_nodeid) { - read_unlock(&ls->ls_lkbtbl[i].lock); - return 2; - } - } - } - read_unlock(&ls->ls_lkbtbl[i].lock); +static int lkb_idr_is_local(int id, void *p, void *data) +{ + struct dlm_lkb *lkb = p; + + if (!lkb->lkb_nodeid) + return 1; + return 0; +} + +static int lkb_idr_is_any(int id, void *p, void *data) +{ + return 1; +} + +static int lkb_idr_free(int id, void *p, void *data) +{ + struct dlm_lkb *lkb = p; + + dlm_del_ast(lkb); + + if (lkb->lkb_lvbptr && lkb->lkb_flags & DLM_IFL_MSTCPY) + dlm_free_lvb(lkb->lkb_lvbptr); + + dlm_free_lkb(lkb); + return 0; +} + +/* NOTE: We check the lkbidr here rather than the resource table. + This is because there may be LKBs queued as ASTs that have been unlinked + from their RSBs and are pending deletion once the AST has been delivered */ + +static int lockspace_busy(struct dlm_ls *ls, int force) +{ + int rv; + + spin_lock(&ls->ls_lkbidr_spin); + if (force == 0) { + rv = idr_for_each(&ls->ls_lkbidr, lkb_idr_is_any, ls); + } else if (force == 1) { + rv = idr_for_each(&ls->ls_lkbidr, lkb_idr_is_local, ls); + } else { + rv = 0; } - return lkb_found; + spin_unlock(&ls->ls_lkbidr_spin); + return rv; } static int release_lockspace(struct dlm_ls *ls, int force) { - struct dlm_lkb *lkb; struct dlm_rsb *rsb; struct list_head *head; int i, busy, rv; - busy = lockspace_busy(ls); + busy = lockspace_busy(ls, force); spin_lock(&lslist_lock); if (ls->ls_create_count == 1) { - if (busy > force) + if (busy) { rv = -EBUSY; - else { + } else { /* remove_lockspace takes ls off lslist */ ls->ls_create_count = 0; rv = 0; @@ -724,29 +730,15 @@ static int release_lockspace(struct dlm_ls *ls, int force) vfree(ls->ls_dirtbl); /* - * Free all lkb's on lkbtbl[] lists. + * Free all lkb's in idr */ - for (i = 0; i < ls->ls_lkbtbl_size; i++) { - head = &ls->ls_lkbtbl[i].list; - while (!list_empty(head)) { - lkb = list_entry(head->next, struct dlm_lkb, - lkb_idtbl_list); - - list_del(&lkb->lkb_idtbl_list); - - dlm_del_ast(lkb); + idr_for_each(&ls->ls_lkbidr, lkb_idr_free, ls); + idr_remove_all(&ls->ls_lkbidr); + idr_destroy(&ls->ls_lkbidr); - if (lkb->lkb_lvbptr && lkb->lkb_flags & DLM_IFL_MSTCPY) - dlm_free_lvb(lkb->lkb_lvbptr); - - dlm_free_lkb(lkb); - } - } dlm_astd_resume(); - vfree(ls->ls_lkbtbl); - /* * Free all rsb's on rsbtbl[] lists */ -- cgit v1.2.3 From b2ce39740066604288876c752d8170b3b17a21aa Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Mon, 11 Jul 2011 09:51:44 -0500 Subject: Revert "xfs: fix filesystsem freeze race in xfs_trans_alloc" This reverts commit 7a249cf83da1813cfa71cfe1e265b40045eceb47. That commit created a situation that could lead to a filesystem hang. As Dave Chinner pointed out, xfs_trans_alloc() could hold a reference to m_active_trans (i.e., keep it non-zero) and then wait for SB_FREEZE_TRANS to complete. Meanwhile a filesystem freeze request could set SB_FREEZE_TRANS and then wait for m_active_trans to drop to zero. Nobody benefits from this sequence of events... Signed-off-by: Christoph Hellwig Signed-off-by: Alex Elder --- fs/xfs/xfs_fsops.c | 2 +- fs/xfs/xfs_iomap.c | 3 ++- fs/xfs/xfs_mount.c | 14 +++++++------- fs/xfs/xfs_trans.c | 27 +++++++++++++++++---------- fs/xfs/xfs_trans.h | 10 ++-------- 5 files changed, 29 insertions(+), 27 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c index b7c492d293b5..9153d2c77caf 100644 --- a/fs/xfs/xfs_fsops.c +++ b/fs/xfs/xfs_fsops.c @@ -626,7 +626,7 @@ xfs_fs_log_dummy( xfs_trans_t *tp; int error; - tp = _xfs_trans_alloc(mp, XFS_TRANS_DUMMY1, KM_SLEEP, false); + tp = _xfs_trans_alloc(mp, XFS_TRANS_DUMMY1, KM_SLEEP); error = xfs_trans_reserve(tp, 0, mp->m_sb.sb_sectsize + 128, 0, 0, XFS_DEFAULT_LOG_COUNT); if (error) { diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c index 982b71108f33..091d82b94c4d 100644 --- a/fs/xfs/xfs_iomap.c +++ b/fs/xfs/xfs_iomap.c @@ -688,7 +688,8 @@ xfs_iomap_write_unwritten( * the same inode that we complete here and might deadlock * on the iolock. */ - tp = _xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE, KM_NOFS, true); + xfs_wait_for_freeze(mp, SB_FREEZE_TRANS); + tp = _xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE, KM_NOFS); tp->t_flags |= XFS_TRANS_RESERVE; error = xfs_trans_reserve(tp, resblks, XFS_WRITE_LOG_RES(mp), 0, diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c index 2be5e5cf897a..a19e92381f90 100644 --- a/fs/xfs/xfs_mount.c +++ b/fs/xfs/xfs_mount.c @@ -1559,9 +1559,15 @@ xfs_fs_writable(xfs_mount_t *mp) } /* + * xfs_log_sbcount + * * Called either periodically to keep the on disk superblock values * roughly up to date or from unmount to make sure the values are * correct on a clean unmount. + * + * Note this code can be called during the process of freezing, so + * we may need to use the transaction allocator which does not not + * block when the transaction subsystem is in its frozen state. */ int xfs_log_sbcount( @@ -1583,13 +1589,7 @@ xfs_log_sbcount( if (!xfs_sb_version_haslazysbcount(&mp->m_sb)) return 0; - /* - * We can be called during the process of freezing, so make sure - * we go ahead even if the frozen for new transactions. We will - * always use a sync transaction in the freeze path to make sure - * the transaction has completed by the time we return. - */ - tp = _xfs_trans_alloc(mp, XFS_TRANS_SB_COUNT, KM_SLEEP, false); + tp = _xfs_trans_alloc(mp, XFS_TRANS_SB_COUNT, KM_SLEEP); error = xfs_trans_reserve(tp, 0, mp->m_sb.sb_sectsize + 128, 0, 0, XFS_DEFAULT_LOG_COUNT); if (error) { diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c index 2837220ea5cf..c83f63b33aae 100644 --- a/fs/xfs/xfs_trans.c +++ b/fs/xfs/xfs_trans.c @@ -566,24 +566,31 @@ xfs_trans_init( /* * This routine is called to allocate a transaction structure. - * * The type parameter indicates the type of the transaction. These * are enumerated in xfs_trans.h. + * + * Dynamically allocate the transaction structure from the transaction + * zone, initialize it, and return it to the caller. */ -struct xfs_trans * +xfs_trans_t * +xfs_trans_alloc( + xfs_mount_t *mp, + uint type) +{ + xfs_wait_for_freeze(mp, SB_FREEZE_TRANS); + return _xfs_trans_alloc(mp, type, KM_SLEEP); +} + +xfs_trans_t * _xfs_trans_alloc( - struct xfs_mount *mp, - uint type, - uint memflags, - bool wait_for_freeze) + xfs_mount_t *mp, + uint type, + uint memflags) { - struct xfs_trans *tp; + xfs_trans_t *tp; atomic_inc(&mp->m_active_trans); - if (wait_for_freeze) - xfs_wait_for_freeze(mp, SB_FREEZE_TRANS); - tp = kmem_zone_zalloc(xfs_trans_zone, memflags); tp->t_magic = XFS_TRANS_MAGIC; tp->t_type = type; diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h index 4e78432ce143..06a9759b6352 100644 --- a/fs/xfs/xfs_trans.h +++ b/fs/xfs/xfs_trans.h @@ -447,14 +447,8 @@ typedef struct xfs_trans { /* * XFS transaction mechanism exported interfaces. */ -xfs_trans_t *_xfs_trans_alloc(struct xfs_mount *, uint, uint, bool); - -static inline struct xfs_trans * -xfs_trans_alloc(struct xfs_mount *mp, uint type) -{ - return _xfs_trans_alloc(mp, type, KM_SLEEP, true); -} - +xfs_trans_t *xfs_trans_alloc(struct xfs_mount *, uint); +xfs_trans_t *_xfs_trans_alloc(struct xfs_mount *, uint, uint); xfs_trans_t *xfs_trans_dup(xfs_trans_t *); int xfs_trans_reserve(xfs_trans_t *, uint, uint, uint, uint, uint); -- cgit v1.2.3 From f484b5d001a972a42129570e98086a2a6d216ce0 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Mon, 11 Jul 2011 10:16:34 -0400 Subject: cifs: drop spinlock before calling cifs_put_tlink ...as that function can sleep. Signed-off-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/connect.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 6ec366ff28b3..dbd669cc5bc7 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -2242,8 +2242,8 @@ cifs_match_super(struct super_block *sb, void *data) rc = compare_mount_options(sb, mnt_data); out: - cifs_put_tlink(tlink); spin_unlock(&cifs_tcp_ses_lock); + cifs_put_tlink(tlink); return rc; } -- cgit v1.2.3 From e5012d1f3861d18c7f3814e757c1c3ab3741dbcd Mon Sep 17 00:00:00 2001 From: Andy Adamson Date: Mon, 11 Jul 2011 17:17:42 -0400 Subject: NFSv4.1: update nfs4_fattr_bitmap_maxsz Attribute IDs assigned in RFC 5661 now require three bitmaps. Fixes hitting a BUG_ON in xdr_shrink_bufhead when getting ACLs. Signed-off-by: Andy Adamson Cc:stable@kernel.org [2.6.39] Signed-off-by: Trond Myklebust --- fs/nfs/nfs4xdr.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index 6870bc61ceec..e6e8f3b9a1de 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -91,7 +91,7 @@ static int nfs4_stat_to_errno(int); #define encode_getfh_maxsz (op_encode_hdr_maxsz) #define decode_getfh_maxsz (op_decode_hdr_maxsz + 1 + \ ((3+NFS4_FHSIZE) >> 2)) -#define nfs4_fattr_bitmap_maxsz 3 +#define nfs4_fattr_bitmap_maxsz 4 #define encode_getattr_maxsz (op_encode_hdr_maxsz + nfs4_fattr_bitmap_maxsz) #define nfs4_name_maxsz (1 + ((3 + NFS4_MAXNAMLEN) >> 2)) #define nfs4_path_maxsz (1 + ((3 + NFS4_MAXPATHLEN) >> 2)) -- cgit v1.2.3 From 1ce533686c7d40bf900dc346a7279c17a9ee8e0e Mon Sep 17 00:00:00 2001 From: Benjamin Marzinski Date: Mon, 13 Jun 2011 14:27:40 -0500 Subject: GFS2: force a log flush when invalidating the rindex glock Right now, there is nothing that forces the log to get flushed when a node drops its rindex glock so that another node can grow the filesystem. If the log doesn't get flushed, GFS2 can corrupt the sd_log_le_rg list in the following way. A node puts an rgd on the list in rg_lo_add(), and then the rindex glock is dropped so the other node can grow the filesystem. When the node reacquires the rindex glock, that rgd gets deleted in clear_rgrpdi() before ever being removed from the list by gfs2_log_flush(). This code simply forces a log flush when the rindex glock is invalidated, solving the problem. Signed-off-by: Benjamin Marzinski Signed-off-by: Steven Whitehouse --- fs/gfs2/glops.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c index 8ef70f464731..712b722030a1 100644 --- a/fs/gfs2/glops.c +++ b/fs/gfs2/glops.c @@ -221,8 +221,10 @@ static void inode_go_inval(struct gfs2_glock *gl, int flags) } } - if (ip == GFS2_I(gl->gl_sbd->sd_rindex)) + if (ip == GFS2_I(gl->gl_sbd->sd_rindex)) { + gfs2_log_flush(gl->gl_sbd, NULL); gl->gl_sbd->sd_rindex_uptodate = 0; + } if (ip && S_ISREG(ip->i_inode.i_mode)) truncate_inode_pages(ip->i_inode.i_mapping, 0); } -- cgit v1.2.3 From 3942ae5319640ced5844b75f44884e4bcb8a2f16 Mon Sep 17 00:00:00 2001 From: Steven Whitehouse Date: Mon, 11 Jul 2011 08:53:30 +0100 Subject: GFS2: Fix race during filesystem mount There is a potential race during filesystem mounting which has recently been reported. It occurs when the userland gfs_controld is able to process requests fast enough that it tries to use the sysfs interface before the lock module is properly initialised. This is a pretty unusual case as normally the lock module initialisation is very quick compared with gfs_controld. This patch adds an interruptible completion which is used to ensure that userland will wait for the initialisation of the lock module to complete. There are other potential solutions to this problem, but this is the quickest at this stage and has been tested both with and without mount.gfs2 present in the system. Signed-off-by: Steven Whitehouse Reported-by: David Booher --- fs/gfs2/incore.h | 2 ++ fs/gfs2/ops_fstype.c | 3 +++ fs/gfs2/sys.c | 7 ++++++- 3 files changed, 11 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h index 0a064e91ac70..81206e70cbf6 100644 --- a/fs/gfs2/incore.h +++ b/fs/gfs2/incore.h @@ -17,6 +17,7 @@ #include #include #include +#include #define DIO_WAIT 0x00000010 #define DIO_METADATA 0x00000020 @@ -546,6 +547,7 @@ struct gfs2_sbd { struct gfs2_glock *sd_trans_gl; wait_queue_head_t sd_glock_wait; atomic_t sd_glock_disposal; + struct completion sd_locking_init; /* Inode Stuff */ diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c index 8ac9ae189b53..2a77071fb7b6 100644 --- a/fs/gfs2/ops_fstype.c +++ b/fs/gfs2/ops_fstype.c @@ -72,6 +72,7 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb) init_waitqueue_head(&sdp->sd_glock_wait); atomic_set(&sdp->sd_glock_disposal, 0); + init_completion(&sdp->sd_locking_init); spin_lock_init(&sdp->sd_statfs_spin); spin_lock_init(&sdp->sd_rindex_spin); @@ -1017,11 +1018,13 @@ hostdata_error: fsname++; if (lm->lm_mount == NULL) { fs_info(sdp, "Now mounting FS...\n"); + complete(&sdp->sd_locking_init); return 0; } ret = lm->lm_mount(sdp, fsname); if (ret == 0) fs_info(sdp, "Joined cluster. Now mounting FS...\n"); + complete(&sdp->sd_locking_init); return ret; } diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c index e20eab37bc80..443cabcfcd23 100644 --- a/fs/gfs2/sys.c +++ b/fs/gfs2/sys.c @@ -338,6 +338,9 @@ static ssize_t lkfirst_store(struct gfs2_sbd *sdp, const char *buf, size_t len) rv = sscanf(buf, "%u", &first); if (rv != 1 || first > 1) return -EINVAL; + rv = wait_for_completion_killable(&sdp->sd_locking_init); + if (rv) + return rv; spin_lock(&sdp->sd_jindex_spin); rv = -EBUSY; if (test_bit(SDF_NOJOURNALID, &sdp->sd_flags) == 0) @@ -414,7 +417,9 @@ static ssize_t jid_store(struct gfs2_sbd *sdp, const char *buf, size_t len) rv = sscanf(buf, "%d", &jid); if (rv != 1) return -EINVAL; - + rv = wait_for_completion_killable(&sdp->sd_locking_init); + if (rv) + return rv; spin_lock(&sdp->sd_jindex_spin); rv = -EINVAL; if (sdp->sd_lockstruct.ls_ops->lm_mount == NULL) -- cgit v1.2.3 From 62411ab2fe5f002dff27417630ddf02cc40ca404 Mon Sep 17 00:00:00 2001 From: Shirish Pargaonkar Date: Sun, 10 Jul 2011 06:55:32 -0500 Subject: cifs: Fix signing failure when server mandates signing for NTLMSSP When using NTLMSSP authentication mechanism, if server mandates signing, keep the flags in type 3 messages of the NTLMSSP exchange same as in type 1 messages (i.e. keep the indicated capabilities same). Some of the servers such as Samba, expect the flags such as Negotiate_Key_Exchange in type 3 message of NTLMSSP exchange as well. Some servers like Windows do not. https://bugzilla.samba.org/show_bug.cgi?id=8212 Signed-off-by: Shirish Pargaonkar Acked-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/sess.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c index 3892ab817a36..d3e619692ee0 100644 --- a/fs/cifs/sess.c +++ b/fs/cifs/sess.c @@ -428,8 +428,7 @@ static void build_ntlmssp_negotiate_blob(unsigned char *pbuffer, (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) { flags |= NTLMSSP_NEGOTIATE_SIGN; if (!ses->server->session_estab) - flags |= NTLMSSP_NEGOTIATE_KEY_XCH | - NTLMSSP_NEGOTIATE_EXTENDED_SEC; + flags |= NTLMSSP_NEGOTIATE_KEY_XCH; } sec_blob->NegotiateFlags = cpu_to_le32(flags); @@ -465,10 +464,11 @@ static int build_ntlmssp_auth_blob(unsigned char *pbuffer, NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_UNICODE | NTLMSSP_NEGOTIATE_NTLM | NTLMSSP_NEGOTIATE_EXTENDED_SEC; if (ses->server->sec_mode & - (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) + (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) { flags |= NTLMSSP_NEGOTIATE_SIGN; - if (ses->server->sec_mode & SECMODE_SIGN_REQUIRED) - flags |= NTLMSSP_NEGOTIATE_ALWAYS_SIGN; + if (!ses->server->session_estab) + flags |= NTLMSSP_NEGOTIATE_KEY_XCH; + } tmp = pbuffer + sizeof(AUTHENTICATE_MESSAGE); sec_blob->NegotiateFlags = cpu_to_le32(flags); -- cgit v1.2.3 From ea1be1a3c3c4b2bbc32aeb7995e18336c8060e0e Mon Sep 17 00:00:00 2001 From: Steve French Date: Tue, 12 Jul 2011 18:24:43 +0000 Subject: [CIFS] update limit for snprintf in cifs_construct_tcon In 34c87901e113 "Shrink stack space usage in cifs_construct_tcon" we change the size of the username name buffer from MAX_USERNAME_SIZE (256) to 28. This call to snprintf() needs to be updated as well. Reported by Dan Carpenter. Reviewed-by: Dan Carpenter Signed-off-by: Steve French --- fs/cifs/connect.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index dbd669cc5bc7..ccc1afa0bf3b 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -3485,7 +3485,7 @@ cifs_construct_tcon(struct cifs_sb_info *cifs_sb, uid_t fsuid) goto out; } - snprintf(username, MAX_USERNAME_SIZE, "krb50x%x", fsuid); + snprintf(username, sizeof(username), "krb50x%x", fsuid); vol_info->username = username; vol_info->local_nls = cifs_sb->local_nls; vol_info->linux_uid = fsuid; -- cgit v1.2.3 From c2ec9471b5b1307429aef1cfaa2b3ae453a61d6f Mon Sep 17 00:00:00 2001 From: Steve French Date: Tue, 12 Jul 2011 19:15:02 +0000 Subject: [CIFS] update cifs to version 1.74 Signed-off-by: Steve French --- fs/cifs/cifsfs.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h index 0900e1658c96..036ca83e5f46 100644 --- a/fs/cifs/cifsfs.h +++ b/fs/cifs/cifsfs.h @@ -129,5 +129,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg); extern const struct export_operations cifs_export_ops; #endif /* CIFS_NFSD_EXPORT */ -#define CIFS_VERSION "1.73" +#define CIFS_VERSION "1.74" #endif /* _CIFSFS_H */ -- cgit v1.2.3 From 3881ac04ebf94268ba3d6e486aa524fd41f893a9 Mon Sep 17 00:00:00 2001 From: David Teigland Date: Thu, 7 Jul 2011 14:05:03 -0500 Subject: dlm: improve rsb searches By pre-allocating rsb structs before searching the hash table, they can be inserted immediately. This avoids always having to repeat the search when adding the struct to hash list. This also adds space to the rsb struct for a max resource name, so an rsb allocation can be used by any request. The constant size also allows us to finally use a slab for the rsb structs. Signed-off-by: David Teigland --- fs/dlm/config.c | 9 +++- fs/dlm/config.h | 1 + fs/dlm/dlm_internal.h | 6 ++- fs/dlm/lock.c | 119 ++++++++++++++++++++++++++++++++++---------------- fs/dlm/lockspace.c | 10 +++++ fs/dlm/memory.c | 22 ++++++---- fs/dlm/memory.h | 2 +- 7 files changed, 121 insertions(+), 48 deletions(-) (limited to 'fs') diff --git a/fs/dlm/config.c b/fs/dlm/config.c index 4e20f9317156..6cf72fcc0d0c 100644 --- a/fs/dlm/config.c +++ b/fs/dlm/config.c @@ -102,6 +102,7 @@ struct dlm_cluster { unsigned int cl_protocol; unsigned int cl_timewarn_cs; unsigned int cl_waitwarn_us; + unsigned int cl_new_rsb_count; }; enum { @@ -116,6 +117,7 @@ enum { CLUSTER_ATTR_PROTOCOL, CLUSTER_ATTR_TIMEWARN_CS, CLUSTER_ATTR_WAITWARN_US, + CLUSTER_ATTR_NEW_RSB_COUNT, }; struct cluster_attribute { @@ -168,6 +170,7 @@ CLUSTER_ATTR(log_debug, 0); CLUSTER_ATTR(protocol, 0); CLUSTER_ATTR(timewarn_cs, 1); CLUSTER_ATTR(waitwarn_us, 0); +CLUSTER_ATTR(new_rsb_count, 0); static struct configfs_attribute *cluster_attrs[] = { [CLUSTER_ATTR_TCP_PORT] = &cluster_attr_tcp_port.attr, @@ -181,6 +184,7 @@ static struct configfs_attribute *cluster_attrs[] = { [CLUSTER_ATTR_PROTOCOL] = &cluster_attr_protocol.attr, [CLUSTER_ATTR_TIMEWARN_CS] = &cluster_attr_timewarn_cs.attr, [CLUSTER_ATTR_WAITWARN_US] = &cluster_attr_waitwarn_us.attr, + [CLUSTER_ATTR_NEW_RSB_COUNT] = &cluster_attr_new_rsb_count.attr, NULL, }; @@ -450,6 +454,7 @@ static struct config_group *make_cluster(struct config_group *g, cl->cl_protocol = dlm_config.ci_protocol; cl->cl_timewarn_cs = dlm_config.ci_timewarn_cs; cl->cl_waitwarn_us = dlm_config.ci_waitwarn_us; + cl->cl_new_rsb_count = dlm_config.ci_new_rsb_count; space_list = &sps->ss_group; comm_list = &cms->cs_group; @@ -1041,6 +1046,7 @@ int dlm_our_addr(struct sockaddr_storage *addr, int num) #define DEFAULT_PROTOCOL 0 #define DEFAULT_TIMEWARN_CS 500 /* 5 sec = 500 centiseconds */ #define DEFAULT_WAITWARN_US 0 +#define DEFAULT_NEW_RSB_COUNT 128 struct dlm_config_info dlm_config = { .ci_tcp_port = DEFAULT_TCP_PORT, @@ -1053,6 +1059,7 @@ struct dlm_config_info dlm_config = { .ci_log_debug = DEFAULT_LOG_DEBUG, .ci_protocol = DEFAULT_PROTOCOL, .ci_timewarn_cs = DEFAULT_TIMEWARN_CS, - .ci_waitwarn_us = DEFAULT_WAITWARN_US + .ci_waitwarn_us = DEFAULT_WAITWARN_US, + .ci_new_rsb_count = DEFAULT_NEW_RSB_COUNT }; diff --git a/fs/dlm/config.h b/fs/dlm/config.h index 260574463d29..3099d0dd26c0 100644 --- a/fs/dlm/config.h +++ b/fs/dlm/config.h @@ -28,6 +28,7 @@ struct dlm_config_info { int ci_protocol; int ci_timewarn_cs; int ci_waitwarn_us; + int ci_new_rsb_count; }; extern struct dlm_config_info dlm_config; diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h index 23a234bddc60..6614f335e25d 100644 --- a/fs/dlm/dlm_internal.h +++ b/fs/dlm/dlm_internal.h @@ -293,7 +293,7 @@ struct dlm_rsb { int res_recover_locks_count; char *res_lvbptr; - char res_name[1]; + char res_name[DLM_RESNAME_MAXLEN+1]; }; /* find_rsb() flags */ @@ -477,6 +477,10 @@ struct dlm_ls { struct mutex ls_timeout_mutex; struct list_head ls_timeout; + spinlock_t ls_new_rsb_spin; + int ls_new_rsb_count; + struct list_head ls_new_rsb; /* new rsb structs */ + struct list_head ls_nodes; /* current nodes in ls */ struct list_head ls_nodes_gone; /* dead node list, recovery */ int ls_num_nodes; /* number of nodes in ls */ diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c index 784cde417ced..9ebeaa6e6903 100644 --- a/fs/dlm/lock.c +++ b/fs/dlm/lock.c @@ -327,19 +327,68 @@ static void queue_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int rqmode) * Basic operations on rsb's and lkb's */ -static struct dlm_rsb *create_rsb(struct dlm_ls *ls, char *name, int len) +static int pre_rsb_struct(struct dlm_ls *ls) +{ + struct dlm_rsb *r1, *r2; + int count = 0; + + spin_lock(&ls->ls_new_rsb_spin); + if (ls->ls_new_rsb_count > dlm_config.ci_new_rsb_count / 2) { + spin_unlock(&ls->ls_new_rsb_spin); + return 0; + } + spin_unlock(&ls->ls_new_rsb_spin); + + r1 = dlm_allocate_rsb(ls); + r2 = dlm_allocate_rsb(ls); + + spin_lock(&ls->ls_new_rsb_spin); + if (r1) { + list_add(&r1->res_hashchain, &ls->ls_new_rsb); + ls->ls_new_rsb_count++; + } + if (r2) { + list_add(&r2->res_hashchain, &ls->ls_new_rsb); + ls->ls_new_rsb_count++; + } + count = ls->ls_new_rsb_count; + spin_unlock(&ls->ls_new_rsb_spin); + + if (!count) + return -ENOMEM; + return 0; +} + +/* If ls->ls_new_rsb is empty, return -EAGAIN, so the caller can + unlock any spinlocks, go back and call pre_rsb_struct again. + Otherwise, take an rsb off the list and return it. */ + +static int get_rsb_struct(struct dlm_ls *ls, char *name, int len, + struct dlm_rsb **r_ret) { struct dlm_rsb *r; + int count; - r = dlm_allocate_rsb(ls, len); - if (!r) - return NULL; + spin_lock(&ls->ls_new_rsb_spin); + if (list_empty(&ls->ls_new_rsb)) { + count = ls->ls_new_rsb_count; + spin_unlock(&ls->ls_new_rsb_spin); + log_debug(ls, "find_rsb retry %d %d %s", + count, dlm_config.ci_new_rsb_count, name); + return -EAGAIN; + } + + r = list_first_entry(&ls->ls_new_rsb, struct dlm_rsb, res_hashchain); + list_del(&r->res_hashchain); + ls->ls_new_rsb_count--; + spin_unlock(&ls->ls_new_rsb_spin); r->res_ls = ls; r->res_length = len; memcpy(r->res_name, name, len); mutex_init(&r->res_mutex); + INIT_LIST_HEAD(&r->res_hashchain); INIT_LIST_HEAD(&r->res_lookup); INIT_LIST_HEAD(&r->res_grantqueue); INIT_LIST_HEAD(&r->res_convertqueue); @@ -347,7 +396,8 @@ static struct dlm_rsb *create_rsb(struct dlm_ls *ls, char *name, int len) INIT_LIST_HEAD(&r->res_root_list); INIT_LIST_HEAD(&r->res_recover_list); - return r; + *r_ret = r; + return 0; } static int search_rsb_list(struct list_head *head, char *name, int len, @@ -405,16 +455,6 @@ static int _search_rsb(struct dlm_ls *ls, char *name, int len, int b, return error; } -static int search_rsb(struct dlm_ls *ls, char *name, int len, int b, - unsigned int flags, struct dlm_rsb **r_ret) -{ - int error; - spin_lock(&ls->ls_rsbtbl[b].lock); - error = _search_rsb(ls, name, len, b, flags, r_ret); - spin_unlock(&ls->ls_rsbtbl[b].lock); - return error; -} - /* * Find rsb in rsbtbl and potentially create/add one * @@ -432,35 +472,48 @@ static int search_rsb(struct dlm_ls *ls, char *name, int len, int b, static int find_rsb(struct dlm_ls *ls, char *name, int namelen, unsigned int flags, struct dlm_rsb **r_ret) { - struct dlm_rsb *r = NULL, *tmp; + struct dlm_rsb *r = NULL; uint32_t hash, bucket; - int error = -EINVAL; + int error; - if (namelen > DLM_RESNAME_MAXLEN) + if (namelen > DLM_RESNAME_MAXLEN) { + error = -EINVAL; goto out; + } if (dlm_no_directory(ls)) flags |= R_CREATE; - error = 0; hash = jhash(name, namelen, 0); bucket = hash & (ls->ls_rsbtbl_size - 1); - error = search_rsb(ls, name, namelen, bucket, flags, &r); + retry: + if (flags & R_CREATE) { + error = pre_rsb_struct(ls); + if (error < 0) + goto out; + } + + spin_lock(&ls->ls_rsbtbl[bucket].lock); + + error = _search_rsb(ls, name, namelen, bucket, flags, &r); if (!error) - goto out; + goto out_unlock; if (error == -EBADR && !(flags & R_CREATE)) - goto out; + goto out_unlock; /* the rsb was found but wasn't a master copy */ if (error == -ENOTBLK) - goto out; + goto out_unlock; - error = -ENOMEM; - r = create_rsb(ls, name, namelen); - if (!r) - goto out; + error = get_rsb_struct(ls, name, namelen, &r); + if (error == -EAGAIN) { + spin_unlock(&ls->ls_rsbtbl[bucket].lock); + goto retry; + } + if (error) + goto out_unlock; r->res_hash = hash; r->res_bucket = bucket; @@ -474,18 +527,10 @@ static int find_rsb(struct dlm_ls *ls, char *name, int namelen, nodeid = 0; r->res_nodeid = nodeid; } - - spin_lock(&ls->ls_rsbtbl[bucket].lock); - error = _search_rsb(ls, name, namelen, bucket, 0, &tmp); - if (!error) { - spin_unlock(&ls->ls_rsbtbl[bucket].lock); - dlm_free_rsb(r); - r = tmp; - goto out; - } list_add(&r->res_hashchain, &ls->ls_rsbtbl[bucket].list); - spin_unlock(&ls->ls_rsbtbl[bucket].lock); error = 0; + out_unlock: + spin_unlock(&ls->ls_rsbtbl[bucket].lock); out: *r_ret = r; return error; diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c index 871fe6deb5fa..98a97762c893 100644 --- a/fs/dlm/lockspace.c +++ b/fs/dlm/lockspace.c @@ -493,6 +493,9 @@ static int new_lockspace(const char *name, int namelen, void **lockspace, INIT_LIST_HEAD(&ls->ls_timeout); mutex_init(&ls->ls_timeout_mutex); + INIT_LIST_HEAD(&ls->ls_new_rsb); + spin_lock_init(&ls->ls_new_rsb_spin); + INIT_LIST_HEAD(&ls->ls_nodes); INIT_LIST_HEAD(&ls->ls_nodes_gone); ls->ls_num_nodes = 0; @@ -764,6 +767,13 @@ static int release_lockspace(struct dlm_ls *ls, int force) vfree(ls->ls_rsbtbl); + while (!list_empty(&ls->ls_new_rsb)) { + rsb = list_first_entry(&ls->ls_new_rsb, struct dlm_rsb, + res_hashchain); + list_del(&rsb->res_hashchain); + dlm_free_rsb(rsb); + } + /* * Free structures on any other lists */ diff --git a/fs/dlm/memory.c b/fs/dlm/memory.c index 8e0d00db004f..da64df7576e1 100644 --- a/fs/dlm/memory.c +++ b/fs/dlm/memory.c @@ -16,6 +16,7 @@ #include "memory.h" static struct kmem_cache *lkb_cache; +static struct kmem_cache *rsb_cache; int __init dlm_memory_init(void) @@ -26,6 +27,14 @@ int __init dlm_memory_init(void) __alignof__(struct dlm_lkb), 0, NULL); if (!lkb_cache) ret = -ENOMEM; + + rsb_cache = kmem_cache_create("dlm_rsb", sizeof(struct dlm_rsb), + __alignof__(struct dlm_rsb), 0, NULL); + if (!rsb_cache) { + kmem_cache_destroy(lkb_cache); + ret = -ENOMEM; + } + return ret; } @@ -33,6 +42,8 @@ void dlm_memory_exit(void) { if (lkb_cache) kmem_cache_destroy(lkb_cache); + if (rsb_cache) + kmem_cache_destroy(rsb_cache); } char *dlm_allocate_lvb(struct dlm_ls *ls) @@ -48,16 +59,11 @@ void dlm_free_lvb(char *p) kfree(p); } -/* FIXME: have some minimal space built-in to rsb for the name and - kmalloc a separate name if needed, like dentries are done */ - -struct dlm_rsb *dlm_allocate_rsb(struct dlm_ls *ls, int namelen) +struct dlm_rsb *dlm_allocate_rsb(struct dlm_ls *ls) { struct dlm_rsb *r; - DLM_ASSERT(namelen <= DLM_RESNAME_MAXLEN,); - - r = kzalloc(sizeof(*r) + namelen, GFP_NOFS); + r = kmem_cache_zalloc(rsb_cache, GFP_NOFS); return r; } @@ -65,7 +71,7 @@ void dlm_free_rsb(struct dlm_rsb *r) { if (r->res_lvbptr) dlm_free_lvb(r->res_lvbptr); - kfree(r); + kmem_cache_free(rsb_cache, r); } struct dlm_lkb *dlm_allocate_lkb(struct dlm_ls *ls) diff --git a/fs/dlm/memory.h b/fs/dlm/memory.h index 485fb29143bd..177c11cbb0a6 100644 --- a/fs/dlm/memory.h +++ b/fs/dlm/memory.h @@ -16,7 +16,7 @@ int dlm_memory_init(void); void dlm_memory_exit(void); -struct dlm_rsb *dlm_allocate_rsb(struct dlm_ls *ls, int namelen); +struct dlm_rsb *dlm_allocate_rsb(struct dlm_ls *ls); void dlm_free_rsb(struct dlm_rsb *r); struct dlm_lkb *dlm_allocate_lkb(struct dlm_ls *ls); void dlm_free_lkb(struct dlm_lkb *l); -- cgit v1.2.3 From 94c0d4ecbe7f9fe56e052b26b2ab484e246c07b4 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Tue, 12 Jul 2011 21:40:23 -0400 Subject: Fix ->d_lock locking order in unlazy_walk() Make sure that child is still a child of parent before nested locking of child->d_lock in unlazy_walk(); otherwise we are risking a violation of locking order and deadlocks. Signed-off-by: Al Viro --- fs/namei.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'fs') diff --git a/fs/namei.c b/fs/namei.c index 0223c41fb114..5c867dd1c0b3 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -433,6 +433,8 @@ static int unlazy_walk(struct nameidata *nd, struct dentry *dentry) goto err_parent; BUG_ON(nd->inode != parent->d_inode); } else { + if (dentry->d_parent != parent) + goto err_parent; spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); if (!__d_rcu_to_refcount(dentry, nd->seq)) goto err_child; -- cgit v1.2.3 From 2bcf6e970f5a88fa05dced5eeb0326e13d93c4a1 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 13 Jul 2011 13:43:48 +0200 Subject: xfs: start periodic workers later Start the periodic sync workers only after we have finished xfs_mountfs and thus fully set up the filesystem structures. Without this we can call into xfs_qm_sync before the quotainfo strucute is set up if the mount takes unusually long, and probably hit other incomplete states as well. Also clean up the xfs_fs_fill_super error path by using consistent label names, and removing an impossible to reach case. Signed-off-by: Christoph Hellwig Reported-by: Arkadiusz Miskiewicz Reviewed-by: Alex Elder --- fs/xfs/linux-2.6/xfs_super.c | 35 ++++++++++++++--------------------- 1 file changed, 14 insertions(+), 21 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c index a1a881e68a9a..3ebb4588e1be 100644 --- a/fs/xfs/linux-2.6/xfs_super.c +++ b/fs/xfs/linux-2.6/xfs_super.c @@ -1412,37 +1412,35 @@ xfs_fs_fill_super( sb->s_time_gran = 1; set_posix_acl_flag(sb); - error = xfs_syncd_init(mp); - if (error) - goto out_filestream_unmount; - xfs_inode_shrinker_register(mp); error = xfs_mountfs(mp); if (error) - goto out_syncd_stop; + goto out_filestream_unmount; + + error = xfs_syncd_init(mp); + if (error) + goto out_unmount; root = igrab(VFS_I(mp->m_rootip)); if (!root) { error = ENOENT; - goto fail_unmount; + goto out_syncd_stop; } if (is_bad_inode(root)) { error = EINVAL; - goto fail_vnrele; + goto out_syncd_stop; } sb->s_root = d_alloc_root(root); if (!sb->s_root) { error = ENOMEM; - goto fail_vnrele; + goto out_iput; } return 0; - out_syncd_stop: - xfs_inode_shrinker_unregister(mp); - xfs_syncd_stop(mp); out_filestream_unmount: + xfs_inode_shrinker_unregister(mp); xfs_filestream_unmount(mp); out_free_sb: xfs_freesb(mp); @@ -1456,17 +1454,12 @@ xfs_fs_fill_super( out: return -error; - fail_vnrele: - if (sb->s_root) { - dput(sb->s_root); - sb->s_root = NULL; - } else { - iput(root); - } - - fail_unmount: - xfs_inode_shrinker_unregister(mp); + out_iput: + iput(root); + out_syncd_stop: xfs_syncd_stop(mp); + out_unmount: + xfs_inode_shrinker_unregister(mp); /* * Blow away any referenced inode in the filestreams cache. -- cgit v1.2.3 From 5792664070c62479b088e4909000582de3686396 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 13 Jul 2011 13:43:48 +0200 Subject: xfs: reshuffle dir2 headers Replace the current mess of dir2 headers with just three that have a clear purpose: - xfs_dir2_format.h for all format definitions, including the inline helpers to access our variable size structures - xfs_dir2_priv.h for all prototypes that are internal to the dir2 code and not needed by anything outside of the directory code. For this purpose xfs_da_btree.c, and phase6.c in xfs_repair are considered part of the directory code. - xfs_dir2.h for the public interface to the directory code In addition to the reshuffle I have also update the comments to not only match the new file structure, but also to describe the directory format better. Signed-off-by: Christoph Hellwig Reviewed-by: Alex Elder Reviewed-by: Dave Chinner --- fs/xfs/xfs_bmap.c | 4 - fs/xfs/xfs_da_btree.c | 9 +- fs/xfs/xfs_dir2.c | 9 +- fs/xfs/xfs_dir2.h | 54 +---- fs/xfs/xfs_dir2_block.c | 7 +- fs/xfs/xfs_dir2_block.h | 93 -------- fs/xfs/xfs_dir2_data.c | 7 +- fs/xfs/xfs_dir2_data.h | 182 --------------- fs/xfs/xfs_dir2_format.h | 595 +++++++++++++++++++++++++++++++++++++++++++++++ fs/xfs/xfs_dir2_leaf.c | 8 +- fs/xfs/xfs_dir2_leaf.h | 281 ---------------------- fs/xfs/xfs_dir2_node.c | 8 +- fs/xfs/xfs_dir2_node.h | 100 -------- fs/xfs/xfs_dir2_priv.h | 135 +++++++++++ fs/xfs/xfs_dir2_sf.c | 8 +- fs/xfs/xfs_dir2_sf.h | 151 ------------ 16 files changed, 751 insertions(+), 900 deletions(-) delete mode 100644 fs/xfs/xfs_dir2_block.h delete mode 100644 fs/xfs/xfs_dir2_data.h create mode 100644 fs/xfs/xfs_dir2_format.h delete mode 100644 fs/xfs/xfs_dir2_leaf.h delete mode 100644 fs/xfs/xfs_dir2_node.h create mode 100644 fs/xfs/xfs_dir2_priv.h delete mode 100644 fs/xfs/xfs_dir2_sf.h (limited to 'fs') diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c index 2ce6aa644e7e..c51a3f903633 100644 --- a/fs/xfs/xfs_bmap.c +++ b/fs/xfs/xfs_bmap.c @@ -29,15 +29,11 @@ #include "xfs_bmap_btree.h" #include "xfs_alloc_btree.h" #include "xfs_ialloc_btree.h" -#include "xfs_dir2_sf.h" #include "xfs_dinode.h" #include "xfs_inode.h" #include "xfs_btree.h" #include "xfs_mount.h" #include "xfs_itable.h" -#include "xfs_dir2_data.h" -#include "xfs_dir2_leaf.h" -#include "xfs_dir2_block.h" #include "xfs_inode_item.h" #include "xfs_extfree_item.h" #include "xfs_alloc.h" diff --git a/fs/xfs/xfs_da_btree.c b/fs/xfs/xfs_da_btree.c index 18425717cbe6..a58a6410c58d 100644 --- a/fs/xfs/xfs_da_btree.c +++ b/fs/xfs/xfs_da_btree.c @@ -24,11 +24,12 @@ #include "xfs_trans.h" #include "xfs_sb.h" #include "xfs_ag.h" -#include "xfs_dir2.h" #include "xfs_mount.h" #include "xfs_da_btree.h" #include "xfs_bmap_btree.h" -#include "xfs_dir2_sf.h" +#include "xfs_dir2.h" +#include "xfs_dir2_format.h" +#include "xfs_dir2_priv.h" #include "xfs_dinode.h" #include "xfs_inode.h" #include "xfs_inode_item.h" @@ -36,10 +37,6 @@ #include "xfs_bmap.h" #include "xfs_attr.h" #include "xfs_attr_leaf.h" -#include "xfs_dir2_data.h" -#include "xfs_dir2_leaf.h" -#include "xfs_dir2_block.h" -#include "xfs_dir2_node.h" #include "xfs_error.h" #include "xfs_trace.h" diff --git a/fs/xfs/xfs_dir2.c b/fs/xfs/xfs_dir2.c index 0dee57812f45..6effbeb24e5a 100644 --- a/fs/xfs/xfs_dir2.c +++ b/fs/xfs/xfs_dir2.c @@ -24,20 +24,17 @@ #include "xfs_trans.h" #include "xfs_sb.h" #include "xfs_ag.h" -#include "xfs_dir2.h" #include "xfs_mount.h" #include "xfs_da_btree.h" #include "xfs_bmap_btree.h" #include "xfs_alloc_btree.h" -#include "xfs_dir2_sf.h" #include "xfs_dinode.h" #include "xfs_inode.h" #include "xfs_inode_item.h" #include "xfs_bmap.h" -#include "xfs_dir2_data.h" -#include "xfs_dir2_leaf.h" -#include "xfs_dir2_block.h" -#include "xfs_dir2_node.h" +#include "xfs_dir2.h" +#include "xfs_dir2_format.h" +#include "xfs_dir2_priv.h" #include "xfs_error.h" #include "xfs_vnodeops.h" #include "xfs_trace.h" diff --git a/fs/xfs/xfs_dir2.h b/fs/xfs/xfs_dir2.h index 74a3b1057685..e937d9991c18 100644 --- a/fs/xfs/xfs_dir2.h +++ b/fs/xfs/xfs_dir2.h @@ -16,49 +16,14 @@ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef __XFS_DIR2_H__ -#define __XFS_DIR2_H__ +#define __XFS_DIR2_H__ -struct uio; -struct xfs_dabuf; -struct xfs_da_args; -struct xfs_dir2_put_args; struct xfs_bmap_free; +struct xfs_da_args; struct xfs_inode; struct xfs_mount; struct xfs_trans; -/* - * Directory version 2. - * There are 4 possible formats: - * shortform - * single block - data with embedded leaf at the end - * multiple data blocks, single leaf+freeindex block - * data blocks, node&leaf blocks (btree), freeindex blocks - * - * The shortform format is in xfs_dir2_sf.h. - * The single block format is in xfs_dir2_block.h. - * The data block format is in xfs_dir2_data.h. - * The leaf and freeindex block formats are in xfs_dir2_leaf.h. - * Node blocks are the same as the other version, in xfs_da_btree.h. - */ - -/* - * Byte offset in data block and shortform entry. - */ -typedef __uint16_t xfs_dir2_data_off_t; -#define NULLDATAOFF 0xffffU -typedef uint xfs_dir2_data_aoff_t; /* argument form */ - -/* - * Directory block number (logical dirblk in file) - */ -typedef __uint32_t xfs_dir2_db_t; - -/* - * Byte offset in a directory. - */ -typedef xfs_off_t xfs_dir2_off_t; - extern struct xfs_name xfs_name_dotdot; /* @@ -86,21 +51,10 @@ extern int xfs_dir_replace(struct xfs_trans *tp, struct xfs_inode *dp, struct xfs_bmap_free *flist, xfs_extlen_t tot); extern int xfs_dir_canenter(struct xfs_trans *tp, struct xfs_inode *dp, struct xfs_name *name, uint resblks); -extern int xfs_dir_ino_validate(struct xfs_mount *mp, xfs_ino_t ino); /* - * Utility routines for v2 directories. + * Direct call from the bmap code, bypassing the generic directory layer. */ -extern int xfs_dir2_grow_inode(struct xfs_da_args *args, int space, - xfs_dir2_db_t *dbp); -extern int xfs_dir2_isblock(struct xfs_trans *tp, struct xfs_inode *dp, - int *vp); -extern int xfs_dir2_isleaf(struct xfs_trans *tp, struct xfs_inode *dp, - int *vp); -extern int xfs_dir2_shrink_inode(struct xfs_da_args *args, xfs_dir2_db_t db, - struct xfs_dabuf *bp); - -extern int xfs_dir_cilookup_result(struct xfs_da_args *args, - const unsigned char *name, int len); +extern int xfs_dir2_sf_to_block(struct xfs_da_args *args); #endif /* __XFS_DIR2_H__ */ diff --git a/fs/xfs/xfs_dir2_block.c b/fs/xfs/xfs_dir2_block.c index 70fb0cb6bd69..9245e029b8ea 100644 --- a/fs/xfs/xfs_dir2_block.c +++ b/fs/xfs/xfs_dir2_block.c @@ -23,17 +23,14 @@ #include "xfs_trans.h" #include "xfs_sb.h" #include "xfs_ag.h" -#include "xfs_dir2.h" #include "xfs_mount.h" #include "xfs_da_btree.h" #include "xfs_bmap_btree.h" -#include "xfs_dir2_sf.h" #include "xfs_dinode.h" #include "xfs_inode.h" #include "xfs_inode_item.h" -#include "xfs_dir2_data.h" -#include "xfs_dir2_leaf.h" -#include "xfs_dir2_block.h" +#include "xfs_dir2_format.h" +#include "xfs_dir2_priv.h" #include "xfs_error.h" #include "xfs_trace.h" diff --git a/fs/xfs/xfs_dir2_block.h b/fs/xfs/xfs_dir2_block.h deleted file mode 100644 index de59677e38ba..000000000000 --- a/fs/xfs/xfs_dir2_block.h +++ /dev/null @@ -1,93 +0,0 @@ -/* - * Copyright (c) 2000-2001,2005 Silicon Graphics, Inc. - * All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it would be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - */ -#ifndef __XFS_DIR2_BLOCK_H__ -#define __XFS_DIR2_BLOCK_H__ - -/* - * Directory version 2, single block format structures. - * - * The single block format looks like the following drawing on disk: - * - * +-------------------------------------------------+ - * | xfs_dir2_data_hdr_t | - * +-------------------------------------------------+ - * | xfs_dir2_data_entry_t OR xfs_dir2_data_unused_t | - * | xfs_dir2_data_entry_t OR xfs_dir2_data_unused_t | - * | xfs_dir2_data_entry_t OR xfs_dir2_data_unused_t | - * | ... | - * +-------------------------------------------------+ - * | unused space | - * +-------------------------------------------------+ - * | ... | - * | xfs_dir2_leaf_entry_t | - * | xfs_dir2_leaf_entry_t | - * +-------------------------------------------------+ - * | xfs_dir2_block_tail_t | - * +-------------------------------------------------+ - * - * As all the entries are variable size structures the accessors in this - * file and xfs_dir2_data.h should be used to iterate over them. - */ -struct uio; -struct xfs_dabuf; -struct xfs_da_args; -struct xfs_dir2_data_hdr; -struct xfs_dir2_leaf_entry; -struct xfs_inode; -struct xfs_mount; -struct xfs_trans; - -#define XFS_DIR2_BLOCK_MAGIC 0x58443242 /* XD2B: for one block dirs */ - -typedef struct xfs_dir2_block_tail { - __be32 count; /* count of leaf entries */ - __be32 stale; /* count of stale lf entries */ -} xfs_dir2_block_tail_t; - -/* - * Pointer to the leaf header embedded in a data block (1-block format) - */ -static inline xfs_dir2_block_tail_t * -xfs_dir2_block_tail_p(struct xfs_mount *mp, xfs_dir2_data_hdr_t *hdr) -{ - return ((xfs_dir2_block_tail_t *)((char *)hdr + mp->m_dirblksize)) - 1; -} - -/* - * Pointer to the leaf entries embedded in a data block (1-block format) - */ -static inline struct xfs_dir2_leaf_entry * -xfs_dir2_block_leaf_p(xfs_dir2_block_tail_t *btp) -{ - return ((struct xfs_dir2_leaf_entry *)btp) - be32_to_cpu(btp->count); -} - -/* - * Function declarations. - */ -extern int xfs_dir2_block_addname(struct xfs_da_args *args); -extern int xfs_dir2_block_getdents(struct xfs_inode *dp, void *dirent, - xfs_off_t *offset, filldir_t filldir); -extern int xfs_dir2_block_lookup(struct xfs_da_args *args); -extern int xfs_dir2_block_removename(struct xfs_da_args *args); -extern int xfs_dir2_block_replace(struct xfs_da_args *args); -extern int xfs_dir2_leaf_to_block(struct xfs_da_args *args, - struct xfs_dabuf *lbp, struct xfs_dabuf *dbp); -extern int xfs_dir2_sf_to_block(struct xfs_da_args *args); - -#endif /* __XFS_DIR2_BLOCK_H__ */ diff --git a/fs/xfs/xfs_dir2_data.c b/fs/xfs/xfs_dir2_data.c index 32dca4c531a6..5bbe2a8a023f 100644 --- a/fs/xfs/xfs_dir2_data.c +++ b/fs/xfs/xfs_dir2_data.c @@ -23,16 +23,13 @@ #include "xfs_trans.h" #include "xfs_sb.h" #include "xfs_ag.h" -#include "xfs_dir2.h" #include "xfs_mount.h" #include "xfs_da_btree.h" #include "xfs_bmap_btree.h" -#include "xfs_dir2_sf.h" #include "xfs_dinode.h" #include "xfs_inode.h" -#include "xfs_dir2_data.h" -#include "xfs_dir2_leaf.h" -#include "xfs_dir2_block.h" +#include "xfs_dir2_format.h" +#include "xfs_dir2_priv.h" #include "xfs_error.h" STATIC xfs_dir2_data_free_t * diff --git a/fs/xfs/xfs_dir2_data.h b/fs/xfs/xfs_dir2_data.h deleted file mode 100644 index c50512584ebf..000000000000 --- a/fs/xfs/xfs_dir2_data.h +++ /dev/null @@ -1,182 +0,0 @@ -/* - * Copyright (c) 2000,2005 Silicon Graphics, Inc. - * All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it would be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - */ -#ifndef __XFS_DIR2_DATA_H__ -#define __XFS_DIR2_DATA_H__ - -/* - * Directory format 2, data block structures. - * - * A pure data block looks like the following drawing on disk: - * - * +-------------------------------------------------+ - * | xfs_dir2_data_hdr_t | - * +-------------------------------------------------+ - * | xfs_dir2_data_entry_t OR xfs_dir2_data_unused_t | - * | xfs_dir2_data_entry_t OR xfs_dir2_data_unused_t | - * | xfs_dir2_data_entry_t OR xfs_dir2_data_unused_t | - * | ... | - * +-------------------------------------------------+ - * | unused space | - * +-------------------------------------------------+ - * - * As all the entries are variable size structures the accessors in this - * file should be used to iterate over them. - */ - -struct xfs_dabuf; -struct xfs_da_args; -struct xfs_inode; -struct xfs_trans; - -/* - * Constants. - */ -#define XFS_DIR2_DATA_MAGIC 0x58443244 /* XD2D: for multiblock dirs */ -#define XFS_DIR2_DATA_ALIGN_LOG 3 /* i.e., 8 bytes */ -#define XFS_DIR2_DATA_ALIGN (1 << XFS_DIR2_DATA_ALIGN_LOG) -#define XFS_DIR2_DATA_FREE_TAG 0xffff -#define XFS_DIR2_DATA_FD_COUNT 3 - -/* - * Directory address space divided into sections, - * spaces separated by 32GB. - */ -#define XFS_DIR2_SPACE_SIZE (1ULL << (32 + XFS_DIR2_DATA_ALIGN_LOG)) -#define XFS_DIR2_DATA_SPACE 0 -#define XFS_DIR2_DATA_OFFSET (XFS_DIR2_DATA_SPACE * XFS_DIR2_SPACE_SIZE) -#define XFS_DIR2_DATA_FIRSTDB(mp) \ - xfs_dir2_byte_to_db(mp, XFS_DIR2_DATA_OFFSET) - -/* - * Offsets of . and .. in data space (always block 0) - */ -#define XFS_DIR2_DATA_DOT_OFFSET \ - ((xfs_dir2_data_aoff_t)sizeof(xfs_dir2_data_hdr_t)) -#define XFS_DIR2_DATA_DOTDOT_OFFSET \ - (XFS_DIR2_DATA_DOT_OFFSET + xfs_dir2_data_entsize(1)) -#define XFS_DIR2_DATA_FIRST_OFFSET \ - (XFS_DIR2_DATA_DOTDOT_OFFSET + xfs_dir2_data_entsize(2)) - -/* - * Structures. - */ - -/* - * Describe a free area in the data block. - * The freespace will be formatted as a xfs_dir2_data_unused_t. - */ -typedef struct xfs_dir2_data_free { - __be16 offset; /* start of freespace */ - __be16 length; /* length of freespace */ -} xfs_dir2_data_free_t; - -/* - * Header for the data blocks. - * Always at the beginning of a directory-sized block. - * The code knows that XFS_DIR2_DATA_FD_COUNT is 3. - */ -typedef struct xfs_dir2_data_hdr { - __be32 magic; /* XFS_DIR2_DATA_MAGIC */ - /* or XFS_DIR2_BLOCK_MAGIC */ - xfs_dir2_data_free_t bestfree[XFS_DIR2_DATA_FD_COUNT]; -} xfs_dir2_data_hdr_t; - -/* - * Active entry in a data block. Aligned to 8 bytes. - * - * After the variable length name field there is a 2 byte tag field, which - * can be accessed using xfs_dir2_data_entry_tag_p. - */ -typedef struct xfs_dir2_data_entry { - __be64 inumber; /* inode number */ - __u8 namelen; /* name length */ - __u8 name[]; /* name bytes, no null */ - /* __be16 tag; */ /* starting offset of us */ -} xfs_dir2_data_entry_t; - -/* - * Unused entry in a data block. Aligned to 8 bytes. - * Tag appears as the last 2 bytes. - */ -typedef struct xfs_dir2_data_unused { - __be16 freetag; /* XFS_DIR2_DATA_FREE_TAG */ - __be16 length; /* total free length */ - /* variable offset */ - __be16 tag; /* starting offset of us */ -} xfs_dir2_data_unused_t; - -/* - * Size of a data entry. - */ -static inline int xfs_dir2_data_entsize(int n) -{ - return (int)roundup(offsetof(xfs_dir2_data_entry_t, name[0]) + (n) + \ - (uint)sizeof(xfs_dir2_data_off_t), XFS_DIR2_DATA_ALIGN); -} - -/* - * Pointer to an entry's tag word. - */ -static inline __be16 * -xfs_dir2_data_entry_tag_p(xfs_dir2_data_entry_t *dep) -{ - return (__be16 *)((char *)dep + - xfs_dir2_data_entsize(dep->namelen) - sizeof(__be16)); -} - -/* - * Pointer to a freespace's tag word. - */ -static inline __be16 * -xfs_dir2_data_unused_tag_p(xfs_dir2_data_unused_t *dup) -{ - return (__be16 *)((char *)dup + - be16_to_cpu(dup->length) - sizeof(__be16)); -} - -/* - * Function declarations. - */ -#ifdef DEBUG -extern void xfs_dir2_data_check(struct xfs_inode *dp, struct xfs_dabuf *bp); -#else -#define xfs_dir2_data_check(dp,bp) -#endif -extern xfs_dir2_data_free_t *xfs_dir2_data_freeinsert(xfs_dir2_data_hdr_t *hdr, - xfs_dir2_data_unused_t *dup, int *loghead); -extern void xfs_dir2_data_freescan(struct xfs_mount *mp, - xfs_dir2_data_hdr_t *hdr, int *loghead); -extern int xfs_dir2_data_init(struct xfs_da_args *args, xfs_dir2_db_t blkno, - struct xfs_dabuf **bpp); -extern void xfs_dir2_data_log_entry(struct xfs_trans *tp, struct xfs_dabuf *bp, - xfs_dir2_data_entry_t *dep); -extern void xfs_dir2_data_log_header(struct xfs_trans *tp, - struct xfs_dabuf *bp); -extern void xfs_dir2_data_log_unused(struct xfs_trans *tp, struct xfs_dabuf *bp, - xfs_dir2_data_unused_t *dup); -extern void xfs_dir2_data_make_free(struct xfs_trans *tp, struct xfs_dabuf *bp, - xfs_dir2_data_aoff_t offset, - xfs_dir2_data_aoff_t len, int *needlogp, - int *needscanp); -extern void xfs_dir2_data_use_free(struct xfs_trans *tp, struct xfs_dabuf *bp, - xfs_dir2_data_unused_t *dup, - xfs_dir2_data_aoff_t offset, - xfs_dir2_data_aoff_t len, int *needlogp, - int *needscanp); - -#endif /* __XFS_DIR2_DATA_H__ */ diff --git a/fs/xfs/xfs_dir2_format.h b/fs/xfs/xfs_dir2_format.h new file mode 100644 index 000000000000..c3c4839eebd6 --- /dev/null +++ b/fs/xfs/xfs_dir2_format.h @@ -0,0 +1,595 @@ +/* + * Copyright (c) 2000-2001,2005 Silicon Graphics, Inc. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ +#ifndef __XFS_DIR2_FORMAT_H__ +#define __XFS_DIR2_FORMAT_H__ + +/* + * Directory version 2. + * + * There are 4 possible formats: + * - shortform - embedded into the inode + * - single block - data with embedded leaf at the end + * - multiple data blocks, single leaf+freeindex block + * - data blocks, node and leaf blocks (btree), freeindex blocks + * + * Note: many node blocks structures and constants are shared with the attr + * code and defined in xfs_da_btree.h. + */ + +#define XFS_DIR2_BLOCK_MAGIC 0x58443242 /* XD2B: single block dirs */ +#define XFS_DIR2_DATA_MAGIC 0x58443244 /* XD2D: multiblock dirs */ +#define XFS_DIR2_FREE_MAGIC 0x58443246 /* XD2F: free index blocks */ + +/* + * Byte offset in data block and shortform entry. + */ +typedef __uint16_t xfs_dir2_data_off_t; +#define NULLDATAOFF 0xffffU +typedef uint xfs_dir2_data_aoff_t; /* argument form */ + +/* + * Normalized offset (in a data block) of the entry, really xfs_dir2_data_off_t. + * Only need 16 bits, this is the byte offset into the single block form. + */ +typedef struct { __uint8_t i[2]; } __arch_pack xfs_dir2_sf_off_t; + +/* + * Offset in data space of a data entry. + */ +typedef __uint32_t xfs_dir2_dataptr_t; +#define XFS_DIR2_MAX_DATAPTR ((xfs_dir2_dataptr_t)0xffffffff) +#define XFS_DIR2_NULL_DATAPTR ((xfs_dir2_dataptr_t)0) + +/* + * Byte offset in a directory. + */ +typedef xfs_off_t xfs_dir2_off_t; + +/* + * Directory block number (logical dirblk in file) + */ +typedef __uint32_t xfs_dir2_db_t; + +/* + * Inode number stored as 8 8-bit values. + */ +typedef struct { __uint8_t i[8]; } xfs_dir2_ino8_t; + +/* + * Inode number stored as 4 8-bit values. + * Works a lot of the time, when all the inode numbers in a directory + * fit in 32 bits. + */ +typedef struct { __uint8_t i[4]; } xfs_dir2_ino4_t; + +typedef union { + xfs_dir2_ino8_t i8; + xfs_dir2_ino4_t i4; +} xfs_dir2_inou_t; +#define XFS_DIR2_MAX_SHORT_INUM ((xfs_ino_t)0xffffffffULL) + +/* + * Directory layout when stored internal to an inode. + * + * Small directories are packed as tightly as possible so as to fit into the + * literal area of the inode. These "shortform" directories consist of a + * single xfs_dir2_sf_hdr header followed by zero or more xfs_dir2_sf_entry + * structures. Due the different inode number storage size and the variable + * length name field in the xfs_dir2_sf_entry all these structure are + * variable length, and the accessors in this file should be used to iterate + * over them. + */ +typedef struct xfs_dir2_sf_hdr { + __uint8_t count; /* count of entries */ + __uint8_t i8count; /* count of 8-byte inode #s */ + xfs_dir2_inou_t parent; /* parent dir inode number */ +} __arch_pack xfs_dir2_sf_hdr_t; + +typedef struct xfs_dir2_sf_entry { + __u8 namelen; /* actual name length */ + xfs_dir2_sf_off_t offset; /* saved offset */ + __u8 name[]; /* name, variable size */ + /* + * A xfs_dir2_ino8_t or xfs_dir2_ino4_t follows here, at a + * variable offset after the name. + */ +} __arch_pack xfs_dir2_sf_entry_t; + +static inline int xfs_dir2_sf_hdr_size(int i8count) +{ + return sizeof(struct xfs_dir2_sf_hdr) - + (i8count == 0) * + (sizeof(xfs_dir2_ino8_t) - sizeof(xfs_dir2_ino4_t)); +} + +static inline xfs_dir2_data_aoff_t +xfs_dir2_sf_get_offset(xfs_dir2_sf_entry_t *sfep) +{ + return get_unaligned_be16(&sfep->offset.i); +} + +static inline void +xfs_dir2_sf_put_offset(xfs_dir2_sf_entry_t *sfep, xfs_dir2_data_aoff_t off) +{ + put_unaligned_be16(off, &sfep->offset.i); +} + +static inline int +xfs_dir2_sf_entsize(struct xfs_dir2_sf_hdr *hdr, int len) +{ + return sizeof(struct xfs_dir2_sf_entry) + /* namelen + offset */ + len + /* name */ + (hdr->i8count ? /* ino */ + sizeof(xfs_dir2_ino8_t) : + sizeof(xfs_dir2_ino4_t)); +} + +static inline struct xfs_dir2_sf_entry * +xfs_dir2_sf_firstentry(struct xfs_dir2_sf_hdr *hdr) +{ + return (struct xfs_dir2_sf_entry *) + ((char *)hdr + xfs_dir2_sf_hdr_size(hdr->i8count)); +} + +static inline struct xfs_dir2_sf_entry * +xfs_dir2_sf_nextentry(struct xfs_dir2_sf_hdr *hdr, + struct xfs_dir2_sf_entry *sfep) +{ + return (struct xfs_dir2_sf_entry *) + ((char *)sfep + xfs_dir2_sf_entsize(hdr, sfep->namelen)); +} + + +/* + * Data block structures. + * + * A pure data block looks like the following drawing on disk: + * + * +-------------------------------------------------+ + * | xfs_dir2_data_hdr_t | + * +-------------------------------------------------+ + * | xfs_dir2_data_entry_t OR xfs_dir2_data_unused_t | + * | xfs_dir2_data_entry_t OR xfs_dir2_data_unused_t | + * | xfs_dir2_data_entry_t OR xfs_dir2_data_unused_t | + * | ... | + * +-------------------------------------------------+ + * | unused space | + * +-------------------------------------------------+ + * + * As all the entries are variable size structures the accessors below should + * be used to iterate over them. + * + * In addition to the pure data blocks for the data and node formats, + * most structures are also used for the combined data/freespace "block" + * format below. + */ + +#define XFS_DIR2_DATA_ALIGN_LOG 3 /* i.e., 8 bytes */ +#define XFS_DIR2_DATA_ALIGN (1 << XFS_DIR2_DATA_ALIGN_LOG) +#define XFS_DIR2_DATA_FREE_TAG 0xffff +#define XFS_DIR2_DATA_FD_COUNT 3 + +/* + * Directory address space divided into sections, + * spaces separated by 32GB. + */ +#define XFS_DIR2_SPACE_SIZE (1ULL << (32 + XFS_DIR2_DATA_ALIGN_LOG)) +#define XFS_DIR2_DATA_SPACE 0 +#define XFS_DIR2_DATA_OFFSET (XFS_DIR2_DATA_SPACE * XFS_DIR2_SPACE_SIZE) +#define XFS_DIR2_DATA_FIRSTDB(mp) \ + xfs_dir2_byte_to_db(mp, XFS_DIR2_DATA_OFFSET) + +/* + * Offsets of . and .. in data space (always block 0) + */ +#define XFS_DIR2_DATA_DOT_OFFSET \ + ((xfs_dir2_data_aoff_t)sizeof(struct xfs_dir2_data_hdr)) +#define XFS_DIR2_DATA_DOTDOT_OFFSET \ + (XFS_DIR2_DATA_DOT_OFFSET + xfs_dir2_data_entsize(1)) +#define XFS_DIR2_DATA_FIRST_OFFSET \ + (XFS_DIR2_DATA_DOTDOT_OFFSET + xfs_dir2_data_entsize(2)) + +/* + * Describe a free area in the data block. + * + * The freespace will be formatted as a xfs_dir2_data_unused_t. + */ +typedef struct xfs_dir2_data_free { + __be16 offset; /* start of freespace */ + __be16 length; /* length of freespace */ +} xfs_dir2_data_free_t; + +/* + * Header for the data blocks. + * + * The code knows that XFS_DIR2_DATA_FD_COUNT is 3. + */ +typedef struct xfs_dir2_data_hdr { + __be32 magic; /* XFS_DIR2_DATA_MAGIC or */ + /* XFS_DIR2_BLOCK_MAGIC */ + xfs_dir2_data_free_t bestfree[XFS_DIR2_DATA_FD_COUNT]; +} xfs_dir2_data_hdr_t; + +/* + * Active entry in a data block. + * + * Aligned to 8 bytes. After the variable length name field there is a + * 2 byte tag field, which can be accessed using xfs_dir2_data_entry_tag_p. + */ +typedef struct xfs_dir2_data_entry { + __be64 inumber; /* inode number */ + __u8 namelen; /* name length */ + __u8 name[]; /* name bytes, no null */ + /* __be16 tag; */ /* starting offset of us */ +} xfs_dir2_data_entry_t; + +/* + * Unused entry in a data block. + * + * Aligned to 8 bytes. Tag appears as the last 2 bytes and must be accessed + * using xfs_dir2_data_unused_tag_p. + */ +typedef struct xfs_dir2_data_unused { + __be16 freetag; /* XFS_DIR2_DATA_FREE_TAG */ + __be16 length; /* total free length */ + /* variable offset */ + __be16 tag; /* starting offset of us */ +} xfs_dir2_data_unused_t; + +/* + * Size of a data entry. + */ +static inline int xfs_dir2_data_entsize(int n) +{ + return (int)roundup(offsetof(struct xfs_dir2_data_entry, name[0]) + n + + (uint)sizeof(xfs_dir2_data_off_t), XFS_DIR2_DATA_ALIGN); +} + +/* + * Pointer to an entry's tag word. + */ +static inline __be16 * +xfs_dir2_data_entry_tag_p(struct xfs_dir2_data_entry *dep) +{ + return (__be16 *)((char *)dep + + xfs_dir2_data_entsize(dep->namelen) - sizeof(__be16)); +} + +/* + * Pointer to a freespace's tag word. + */ +static inline __be16 * +xfs_dir2_data_unused_tag_p(struct xfs_dir2_data_unused *dup) +{ + return (__be16 *)((char *)dup + + be16_to_cpu(dup->length) - sizeof(__be16)); +} + +/* + * Leaf block structures. + * + * A pure leaf block looks like the following drawing on disk: + * + * +---------------------------+ + * | xfs_dir2_leaf_hdr_t | + * +---------------------------+ + * | xfs_dir2_leaf_entry_t | + * | xfs_dir2_leaf_entry_t | + * | xfs_dir2_leaf_entry_t | + * | xfs_dir2_leaf_entry_t | + * | ... | + * +---------------------------+ + * | xfs_dir2_data_off_t | + * | xfs_dir2_data_off_t | + * | xfs_dir2_data_off_t | + * | ... | + * +---------------------------+ + * | xfs_dir2_leaf_tail_t | + * +---------------------------+ + * + * The xfs_dir2_data_off_t members (bests) and tail are at the end of the block + * for single-leaf (magic = XFS_DIR2_LEAF1_MAGIC) blocks only, but not present + * for directories with separate leaf nodes and free space blocks + * (magic = XFS_DIR2_LEAFN_MAGIC). + * + * As all the entries are variable size structures the accessors below should + * be used to iterate over them. + */ + +/* + * Offset of the leaf/node space. First block in this space + * is the btree root. + */ +#define XFS_DIR2_LEAF_SPACE 1 +#define XFS_DIR2_LEAF_OFFSET (XFS_DIR2_LEAF_SPACE * XFS_DIR2_SPACE_SIZE) +#define XFS_DIR2_LEAF_FIRSTDB(mp) \ + xfs_dir2_byte_to_db(mp, XFS_DIR2_LEAF_OFFSET) + +/* + * Leaf block header. + */ +typedef struct xfs_dir2_leaf_hdr { + xfs_da_blkinfo_t info; /* header for da routines */ + __be16 count; /* count of entries */ + __be16 stale; /* count of stale entries */ +} xfs_dir2_leaf_hdr_t; + +/* + * Leaf block entry. + */ +typedef struct xfs_dir2_leaf_entry { + __be32 hashval; /* hash value of name */ + __be32 address; /* address of data entry */ +} xfs_dir2_leaf_entry_t; + +/* + * Leaf block tail. + */ +typedef struct xfs_dir2_leaf_tail { + __be32 bestcount; +} xfs_dir2_leaf_tail_t; + +/* + * Leaf block. + */ +typedef struct xfs_dir2_leaf { + xfs_dir2_leaf_hdr_t hdr; /* leaf header */ + xfs_dir2_leaf_entry_t ents[]; /* entries */ +} xfs_dir2_leaf_t; + +/* + * DB blocks here are logical directory block numbers, not filesystem blocks. + */ + +static inline int xfs_dir2_max_leaf_ents(struct xfs_mount *mp) +{ + return (mp->m_dirblksize - (uint)sizeof(struct xfs_dir2_leaf_hdr)) / + (uint)sizeof(struct xfs_dir2_leaf_entry); +} + +/* + * Get address of the bestcount field in the single-leaf block. + */ +static inline struct xfs_dir2_leaf_tail * +xfs_dir2_leaf_tail_p(struct xfs_mount *mp, struct xfs_dir2_leaf *lp) +{ + return (struct xfs_dir2_leaf_tail *) + ((char *)lp + mp->m_dirblksize - + sizeof(struct xfs_dir2_leaf_tail)); +} + +/* + * Get address of the bests array in the single-leaf block. + */ +static inline __be16 * +xfs_dir2_leaf_bests_p(struct xfs_dir2_leaf_tail *ltp) +{ + return (__be16 *)ltp - be32_to_cpu(ltp->bestcount); +} + +/* + * Convert dataptr to byte in file space + */ +static inline xfs_dir2_off_t +xfs_dir2_dataptr_to_byte(struct xfs_mount *mp, xfs_dir2_dataptr_t dp) +{ + return (xfs_dir2_off_t)dp << XFS_DIR2_DATA_ALIGN_LOG; +} + +/* + * Convert byte in file space to dataptr. It had better be aligned. + */ +static inline xfs_dir2_dataptr_t +xfs_dir2_byte_to_dataptr(struct xfs_mount *mp, xfs_dir2_off_t by) +{ + return (xfs_dir2_dataptr_t)(by >> XFS_DIR2_DATA_ALIGN_LOG); +} + +/* + * Convert byte in space to (DB) block + */ +static inline xfs_dir2_db_t +xfs_dir2_byte_to_db(struct xfs_mount *mp, xfs_dir2_off_t by) +{ + return (xfs_dir2_db_t) + (by >> (mp->m_sb.sb_blocklog + mp->m_sb.sb_dirblklog)); +} + +/* + * Convert dataptr to a block number + */ +static inline xfs_dir2_db_t +xfs_dir2_dataptr_to_db(struct xfs_mount *mp, xfs_dir2_dataptr_t dp) +{ + return xfs_dir2_byte_to_db(mp, xfs_dir2_dataptr_to_byte(mp, dp)); +} + +/* + * Convert byte in space to offset in a block + */ +static inline xfs_dir2_data_aoff_t +xfs_dir2_byte_to_off(struct xfs_mount *mp, xfs_dir2_off_t by) +{ + return (xfs_dir2_data_aoff_t)(by & + ((1 << (mp->m_sb.sb_blocklog + mp->m_sb.sb_dirblklog)) - 1)); +} + +/* + * Convert dataptr to a byte offset in a block + */ +static inline xfs_dir2_data_aoff_t +xfs_dir2_dataptr_to_off(struct xfs_mount *mp, xfs_dir2_dataptr_t dp) +{ + return xfs_dir2_byte_to_off(mp, xfs_dir2_dataptr_to_byte(mp, dp)); +} + +/* + * Convert block and offset to byte in space + */ +static inline xfs_dir2_off_t +xfs_dir2_db_off_to_byte(struct xfs_mount *mp, xfs_dir2_db_t db, + xfs_dir2_data_aoff_t o) +{ + return ((xfs_dir2_off_t)db << + (mp->m_sb.sb_blocklog + mp->m_sb.sb_dirblklog)) + o; +} + +/* + * Convert block (DB) to block (dablk) + */ +static inline xfs_dablk_t +xfs_dir2_db_to_da(struct xfs_mount *mp, xfs_dir2_db_t db) +{ + return (xfs_dablk_t)(db << mp->m_sb.sb_dirblklog); +} + +/* + * Convert byte in space to (DA) block + */ +static inline xfs_dablk_t +xfs_dir2_byte_to_da(struct xfs_mount *mp, xfs_dir2_off_t by) +{ + return xfs_dir2_db_to_da(mp, xfs_dir2_byte_to_db(mp, by)); +} + +/* + * Convert block and offset to dataptr + */ +static inline xfs_dir2_dataptr_t +xfs_dir2_db_off_to_dataptr(struct xfs_mount *mp, xfs_dir2_db_t db, + xfs_dir2_data_aoff_t o) +{ + return xfs_dir2_byte_to_dataptr(mp, xfs_dir2_db_off_to_byte(mp, db, o)); +} + +/* + * Convert block (dablk) to block (DB) + */ +static inline xfs_dir2_db_t +xfs_dir2_da_to_db(struct xfs_mount *mp, xfs_dablk_t da) +{ + return (xfs_dir2_db_t)(da >> mp->m_sb.sb_dirblklog); +} + +/* + * Convert block (dablk) to byte offset in space + */ +static inline xfs_dir2_off_t +xfs_dir2_da_to_byte(struct xfs_mount *mp, xfs_dablk_t da) +{ + return xfs_dir2_db_off_to_byte(mp, xfs_dir2_da_to_db(mp, da), 0); +} + +/* + * Free space block defintions for the node format. + */ + +/* + * Offset of the freespace index. + */ +#define XFS_DIR2_FREE_SPACE 2 +#define XFS_DIR2_FREE_OFFSET (XFS_DIR2_FREE_SPACE * XFS_DIR2_SPACE_SIZE) +#define XFS_DIR2_FREE_FIRSTDB(mp) \ + xfs_dir2_byte_to_db(mp, XFS_DIR2_FREE_OFFSET) + +typedef struct xfs_dir2_free_hdr { + __be32 magic; /* XFS_DIR2_FREE_MAGIC */ + __be32 firstdb; /* db of first entry */ + __be32 nvalid; /* count of valid entries */ + __be32 nused; /* count of used entries */ +} xfs_dir2_free_hdr_t; + +typedef struct xfs_dir2_free { + xfs_dir2_free_hdr_t hdr; /* block header */ + __be16 bests[1]; /* best free counts */ + /* unused entries are -1 */ +} xfs_dir2_free_t; + +#define XFS_DIR2_MAX_FREE_BESTS(mp) \ + (((mp)->m_dirblksize - (uint)sizeof(struct xfs_dir2_free_hdr)) / \ + (uint)sizeof(xfs_dir2_data_off_t)) + +/* + * Convert data space db to the corresponding free db. + */ +static inline xfs_dir2_db_t +xfs_dir2_db_to_fdb(struct xfs_mount *mp, xfs_dir2_db_t db) +{ + return XFS_DIR2_FREE_FIRSTDB(mp) + db / XFS_DIR2_MAX_FREE_BESTS(mp); +} + +/* + * Convert data space db to the corresponding index in a free db. + */ +static inline int +xfs_dir2_db_to_fdindex(struct xfs_mount *mp, xfs_dir2_db_t db) +{ + return db % XFS_DIR2_MAX_FREE_BESTS(mp); +} + +/* + * Single block format. + * + * The single block format looks like the following drawing on disk: + * + * +-------------------------------------------------+ + * | xfs_dir2_data_hdr_t | + * +-------------------------------------------------+ + * | xfs_dir2_data_entry_t OR xfs_dir2_data_unused_t | + * | xfs_dir2_data_entry_t OR xfs_dir2_data_unused_t | + * | xfs_dir2_data_entry_t OR xfs_dir2_data_unused_t : + * | ... | + * +-------------------------------------------------+ + * | unused space | + * +-------------------------------------------------+ + * | ... | + * | xfs_dir2_leaf_entry_t | + * | xfs_dir2_leaf_entry_t | + * +-------------------------------------------------+ + * | xfs_dir2_block_tail_t | + * +-------------------------------------------------+ + * + * As all the entries are variable size structures the accessors below should + * be used to iterate over them. + */ + +typedef struct xfs_dir2_block_tail { + __be32 count; /* count of leaf entries */ + __be32 stale; /* count of stale lf entries */ +} xfs_dir2_block_tail_t; + +/* + * Pointer to the leaf header embedded in a data block (1-block format) + */ +static inline struct xfs_dir2_block_tail * +xfs_dir2_block_tail_p(struct xfs_mount *mp, struct xfs_dir2_data_hdr *hdr) +{ + return ((struct xfs_dir2_block_tail *) + ((char *)hdr + mp->m_dirblksize)) - 1; +} + +/* + * Pointer to the leaf entries embedded in a data block (1-block format) + */ +static inline struct xfs_dir2_leaf_entry * +xfs_dir2_block_leaf_p(struct xfs_dir2_block_tail *btp) +{ + return ((struct xfs_dir2_leaf_entry *)btp) - be32_to_cpu(btp->count); +} + +#endif /* __XFS_DIR2_FORMAT_H__ */ diff --git a/fs/xfs/xfs_dir2_leaf.c b/fs/xfs/xfs_dir2_leaf.c index d629b888ca28..487036d3751b 100644 --- a/fs/xfs/xfs_dir2_leaf.c +++ b/fs/xfs/xfs_dir2_leaf.c @@ -24,18 +24,14 @@ #include "xfs_trans.h" #include "xfs_sb.h" #include "xfs_ag.h" -#include "xfs_dir2.h" #include "xfs_mount.h" #include "xfs_da_btree.h" #include "xfs_bmap_btree.h" -#include "xfs_dir2_sf.h" #include "xfs_dinode.h" #include "xfs_inode.h" #include "xfs_bmap.h" -#include "xfs_dir2_data.h" -#include "xfs_dir2_leaf.h" -#include "xfs_dir2_block.h" -#include "xfs_dir2_node.h" +#include "xfs_dir2_format.h" +#include "xfs_dir2_priv.h" #include "xfs_error.h" #include "xfs_trace.h" diff --git a/fs/xfs/xfs_dir2_leaf.h b/fs/xfs/xfs_dir2_leaf.h deleted file mode 100644 index 140281adc7e5..000000000000 --- a/fs/xfs/xfs_dir2_leaf.h +++ /dev/null @@ -1,281 +0,0 @@ -/* - * Copyright (c) 2000-2001,2005 Silicon Graphics, Inc. - * All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it would be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - */ -#ifndef __XFS_DIR2_LEAF_H__ -#define __XFS_DIR2_LEAF_H__ - -/* - * Directory format 2, leaf block structures. - * - * A pure data block looks like the following drawing on disk: - * - * +---------------------------+ - * | xfs_dir2_leaf_hdr_t | - * +---------------------------+ - * | xfs_dir2_leaf_entry_t | - * | xfs_dir2_leaf_entry_t | - * | xfs_dir2_leaf_entry_t | - * | xfs_dir2_leaf_entry_t | - * | ... | - * +---------------------------+ - * | xfs_dir2_data_off_t | - * | xfs_dir2_data_off_t | - * | xfs_dir2_data_off_t | - * | ... | - * +---------------------------+ - * | xfs_dir2_leaf_tail_t | - * +---------------------------+ - * - * The bests (xfs_dir2_data_off_t members) and tail are at the end of the - * block for single-leaf only (magic = XFS_DIR2_LEAF1_MAGIC not - * XFS_DIR2_LEAFN_MAGIC). - * - * As all the entries are variable size structures the accessors in this - * file should be used to iterate over them. - */ - -struct uio; -struct xfs_dabuf; -struct xfs_da_args; -struct xfs_inode; -struct xfs_mount; -struct xfs_trans; - -/* - * Offset of the leaf/node space. First block in this space - * is the btree root. - */ -#define XFS_DIR2_LEAF_SPACE 1 -#define XFS_DIR2_LEAF_OFFSET (XFS_DIR2_LEAF_SPACE * XFS_DIR2_SPACE_SIZE) -#define XFS_DIR2_LEAF_FIRSTDB(mp) \ - xfs_dir2_byte_to_db(mp, XFS_DIR2_LEAF_OFFSET) - -/* - * Offset in data space of a data entry. - */ -typedef __uint32_t xfs_dir2_dataptr_t; -#define XFS_DIR2_MAX_DATAPTR ((xfs_dir2_dataptr_t)0xffffffff) -#define XFS_DIR2_NULL_DATAPTR ((xfs_dir2_dataptr_t)0) - -/* - * Leaf block header. - */ -typedef struct xfs_dir2_leaf_hdr { - xfs_da_blkinfo_t info; /* header for da routines */ - __be16 count; /* count of entries */ - __be16 stale; /* count of stale entries */ -} xfs_dir2_leaf_hdr_t; - -/* - * Leaf block entry. - */ -typedef struct xfs_dir2_leaf_entry { - __be32 hashval; /* hash value of name */ - __be32 address; /* address of data entry */ -} xfs_dir2_leaf_entry_t; - -/* - * Leaf block tail. - */ -typedef struct xfs_dir2_leaf_tail { - __be32 bestcount; -} xfs_dir2_leaf_tail_t; - -/* - * Leaf block. - */ -typedef struct xfs_dir2_leaf { - xfs_dir2_leaf_hdr_t hdr; /* leaf header */ - xfs_dir2_leaf_entry_t ents[]; /* entries */ -} xfs_dir2_leaf_t; - -/* - * DB blocks here are logical directory block numbers, not filesystem blocks. - */ - -static inline int xfs_dir2_max_leaf_ents(struct xfs_mount *mp) -{ - return (int)(((mp)->m_dirblksize - (uint)sizeof(xfs_dir2_leaf_hdr_t)) / - (uint)sizeof(xfs_dir2_leaf_entry_t)); -} - -/* - * Get address of the bestcount field in the single-leaf block. - */ -static inline xfs_dir2_leaf_tail_t * -xfs_dir2_leaf_tail_p(struct xfs_mount *mp, xfs_dir2_leaf_t *lp) -{ - return (xfs_dir2_leaf_tail_t *) - ((char *)(lp) + (mp)->m_dirblksize - - (uint)sizeof(xfs_dir2_leaf_tail_t)); -} - -/* - * Get address of the bests array in the single-leaf block. - */ -static inline __be16 * -xfs_dir2_leaf_bests_p(xfs_dir2_leaf_tail_t *ltp) -{ - return (__be16 *)ltp - be32_to_cpu(ltp->bestcount); -} - -/* - * Convert dataptr to byte in file space - */ -static inline xfs_dir2_off_t -xfs_dir2_dataptr_to_byte(struct xfs_mount *mp, xfs_dir2_dataptr_t dp) -{ - return (xfs_dir2_off_t)(dp) << XFS_DIR2_DATA_ALIGN_LOG; -} - -/* - * Convert byte in file space to dataptr. It had better be aligned. - */ -static inline xfs_dir2_dataptr_t -xfs_dir2_byte_to_dataptr(struct xfs_mount *mp, xfs_dir2_off_t by) -{ - return (xfs_dir2_dataptr_t)((by) >> XFS_DIR2_DATA_ALIGN_LOG); -} - -/* - * Convert byte in space to (DB) block - */ -static inline xfs_dir2_db_t -xfs_dir2_byte_to_db(struct xfs_mount *mp, xfs_dir2_off_t by) -{ - return (xfs_dir2_db_t)((by) >> \ - ((mp)->m_sb.sb_blocklog + (mp)->m_sb.sb_dirblklog)); -} - -/* - * Convert dataptr to a block number - */ -static inline xfs_dir2_db_t -xfs_dir2_dataptr_to_db(struct xfs_mount *mp, xfs_dir2_dataptr_t dp) -{ - return xfs_dir2_byte_to_db(mp, xfs_dir2_dataptr_to_byte(mp, dp)); -} - -/* - * Convert byte in space to offset in a block - */ -static inline xfs_dir2_data_aoff_t -xfs_dir2_byte_to_off(struct xfs_mount *mp, xfs_dir2_off_t by) -{ - return (xfs_dir2_data_aoff_t)((by) & \ - ((1 << ((mp)->m_sb.sb_blocklog + (mp)->m_sb.sb_dirblklog)) - 1)); -} - -/* - * Convert dataptr to a byte offset in a block - */ -static inline xfs_dir2_data_aoff_t -xfs_dir2_dataptr_to_off(struct xfs_mount *mp, xfs_dir2_dataptr_t dp) -{ - return xfs_dir2_byte_to_off(mp, xfs_dir2_dataptr_to_byte(mp, dp)); -} - -/* - * Convert block and offset to byte in space - */ -static inline xfs_dir2_off_t -xfs_dir2_db_off_to_byte(struct xfs_mount *mp, xfs_dir2_db_t db, - xfs_dir2_data_aoff_t o) -{ - return ((xfs_dir2_off_t)(db) << \ - ((mp)->m_sb.sb_blocklog + (mp)->m_sb.sb_dirblklog)) + (o); -} - -/* - * Convert block (DB) to block (dablk) - */ -static inline xfs_dablk_t -xfs_dir2_db_to_da(struct xfs_mount *mp, xfs_dir2_db_t db) -{ - return (xfs_dablk_t)((db) << (mp)->m_sb.sb_dirblklog); -} - -/* - * Convert byte in space to (DA) block - */ -static inline xfs_dablk_t -xfs_dir2_byte_to_da(struct xfs_mount *mp, xfs_dir2_off_t by) -{ - return xfs_dir2_db_to_da(mp, xfs_dir2_byte_to_db(mp, by)); -} - -/* - * Convert block and offset to dataptr - */ -static inline xfs_dir2_dataptr_t -xfs_dir2_db_off_to_dataptr(struct xfs_mount *mp, xfs_dir2_db_t db, - xfs_dir2_data_aoff_t o) -{ - return xfs_dir2_byte_to_dataptr(mp, xfs_dir2_db_off_to_byte(mp, db, o)); -} - -/* - * Convert block (dablk) to block (DB) - */ -static inline xfs_dir2_db_t -xfs_dir2_da_to_db(struct xfs_mount *mp, xfs_dablk_t da) -{ - return (xfs_dir2_db_t)((da) >> (mp)->m_sb.sb_dirblklog); -} - -/* - * Convert block (dablk) to byte offset in space - */ -static inline xfs_dir2_off_t -xfs_dir2_da_to_byte(struct xfs_mount *mp, xfs_dablk_t da) -{ - return xfs_dir2_db_off_to_byte(mp, xfs_dir2_da_to_db(mp, da), 0); -} - -/* - * Function declarations. - */ -extern int xfs_dir2_block_to_leaf(struct xfs_da_args *args, - struct xfs_dabuf *dbp); -extern int xfs_dir2_leaf_addname(struct xfs_da_args *args); -extern void xfs_dir2_leaf_compact(struct xfs_da_args *args, - struct xfs_dabuf *bp); -extern void xfs_dir2_leaf_compact_x1(struct xfs_dabuf *bp, int *indexp, - int *lowstalep, int *highstalep, - int *lowlogp, int *highlogp); -extern int xfs_dir2_leaf_getdents(struct xfs_inode *dp, void *dirent, - size_t bufsize, xfs_off_t *offset, - filldir_t filldir); -extern int xfs_dir2_leaf_init(struct xfs_da_args *args, xfs_dir2_db_t bno, - struct xfs_dabuf **bpp, int magic); -extern void xfs_dir2_leaf_log_ents(struct xfs_trans *tp, struct xfs_dabuf *bp, - int first, int last); -extern void xfs_dir2_leaf_log_header(struct xfs_trans *tp, - struct xfs_dabuf *bp); -extern int xfs_dir2_leaf_lookup(struct xfs_da_args *args); -extern int xfs_dir2_leaf_removename(struct xfs_da_args *args); -extern int xfs_dir2_leaf_replace(struct xfs_da_args *args); -extern int xfs_dir2_leaf_search_hash(struct xfs_da_args *args, - struct xfs_dabuf *lbp); -extern int xfs_dir2_leaf_trim_data(struct xfs_da_args *args, - struct xfs_dabuf *lbp, xfs_dir2_db_t db); -extern xfs_dir2_leaf_entry_t *xfs_dir2_leaf_find_entry(xfs_dir2_leaf_t *, int, - int, int, int, - int *, int *); -extern int xfs_dir2_node_to_leaf(struct xfs_da_state *state); - -#endif /* __XFS_DIR2_LEAF_H__ */ diff --git a/fs/xfs/xfs_dir2_node.c b/fs/xfs/xfs_dir2_node.c index 14571cdeb33f..ec0a23e96705 100644 --- a/fs/xfs/xfs_dir2_node.c +++ b/fs/xfs/xfs_dir2_node.c @@ -23,18 +23,14 @@ #include "xfs_trans.h" #include "xfs_sb.h" #include "xfs_ag.h" -#include "xfs_dir2.h" #include "xfs_mount.h" #include "xfs_da_btree.h" #include "xfs_bmap_btree.h" -#include "xfs_dir2_sf.h" #include "xfs_dinode.h" #include "xfs_inode.h" #include "xfs_bmap.h" -#include "xfs_dir2_data.h" -#include "xfs_dir2_leaf.h" -#include "xfs_dir2_block.h" -#include "xfs_dir2_node.h" +#include "xfs_dir2_format.h" +#include "xfs_dir2_priv.h" #include "xfs_error.h" #include "xfs_trace.h" diff --git a/fs/xfs/xfs_dir2_node.h b/fs/xfs/xfs_dir2_node.h deleted file mode 100644 index 82dfe7147195..000000000000 --- a/fs/xfs/xfs_dir2_node.h +++ /dev/null @@ -1,100 +0,0 @@ -/* - * Copyright (c) 2000,2005 Silicon Graphics, Inc. - * All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it would be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - */ -#ifndef __XFS_DIR2_NODE_H__ -#define __XFS_DIR2_NODE_H__ - -/* - * Directory version 2, btree node format structures - */ - -struct uio; -struct xfs_dabuf; -struct xfs_da_args; -struct xfs_da_state; -struct xfs_da_state_blk; -struct xfs_inode; -struct xfs_trans; - -/* - * Offset of the freespace index. - */ -#define XFS_DIR2_FREE_SPACE 2 -#define XFS_DIR2_FREE_OFFSET (XFS_DIR2_FREE_SPACE * XFS_DIR2_SPACE_SIZE) -#define XFS_DIR2_FREE_FIRSTDB(mp) \ - xfs_dir2_byte_to_db(mp, XFS_DIR2_FREE_OFFSET) - -#define XFS_DIR2_FREE_MAGIC 0x58443246 /* XD2F */ - -typedef struct xfs_dir2_free_hdr { - __be32 magic; /* XFS_DIR2_FREE_MAGIC */ - __be32 firstdb; /* db of first entry */ - __be32 nvalid; /* count of valid entries */ - __be32 nused; /* count of used entries */ -} xfs_dir2_free_hdr_t; - -typedef struct xfs_dir2_free { - xfs_dir2_free_hdr_t hdr; /* block header */ - __be16 bests[1]; /* best free counts */ - /* unused entries are -1 */ -} xfs_dir2_free_t; - -#define XFS_DIR2_MAX_FREE_BESTS(mp) \ - (((mp)->m_dirblksize - (uint)sizeof(xfs_dir2_free_hdr_t)) / \ - (uint)sizeof(xfs_dir2_data_off_t)) - -/* - * Convert data space db to the corresponding free db. - */ -static inline xfs_dir2_db_t -xfs_dir2_db_to_fdb(struct xfs_mount *mp, xfs_dir2_db_t db) -{ - return (XFS_DIR2_FREE_FIRSTDB(mp) + (db) / XFS_DIR2_MAX_FREE_BESTS(mp)); -} - -/* - * Convert data space db to the corresponding index in a free db. - */ -static inline int -xfs_dir2_db_to_fdindex(struct xfs_mount *mp, xfs_dir2_db_t db) -{ - return ((db) % XFS_DIR2_MAX_FREE_BESTS(mp)); -} - -extern int xfs_dir2_leaf_to_node(struct xfs_da_args *args, - struct xfs_dabuf *lbp); -extern xfs_dahash_t xfs_dir2_leafn_lasthash(struct xfs_dabuf *bp, int *count); -extern int xfs_dir2_leafn_lookup_int(struct xfs_dabuf *bp, - struct xfs_da_args *args, int *indexp, - struct xfs_da_state *state); -extern int xfs_dir2_leafn_order(struct xfs_dabuf *leaf1_bp, - struct xfs_dabuf *leaf2_bp); -extern int xfs_dir2_leafn_split(struct xfs_da_state *state, - struct xfs_da_state_blk *oldblk, - struct xfs_da_state_blk *newblk); -extern int xfs_dir2_leafn_toosmall(struct xfs_da_state *state, int *action); -extern void xfs_dir2_leafn_unbalance(struct xfs_da_state *state, - struct xfs_da_state_blk *drop_blk, - struct xfs_da_state_blk *save_blk); -extern int xfs_dir2_node_addname(struct xfs_da_args *args); -extern int xfs_dir2_node_lookup(struct xfs_da_args *args); -extern int xfs_dir2_node_removename(struct xfs_da_args *args); -extern int xfs_dir2_node_replace(struct xfs_da_args *args); -extern int xfs_dir2_node_trim_free(struct xfs_da_args *args, xfs_fileoff_t fo, - int *rvalp); - -#endif /* __XFS_DIR2_NODE_H__ */ diff --git a/fs/xfs/xfs_dir2_priv.h b/fs/xfs/xfs_dir2_priv.h new file mode 100644 index 000000000000..067f403ecf8a --- /dev/null +++ b/fs/xfs/xfs_dir2_priv.h @@ -0,0 +1,135 @@ +/* + * Copyright (c) 2000-2001,2005 Silicon Graphics, Inc. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ +#ifndef __XFS_DIR2_PRIV_H__ +#define __XFS_DIR2_PRIV_H__ + +/* xfs_dir2.c */ +extern int xfs_dir_ino_validate(struct xfs_mount *mp, xfs_ino_t ino); +extern int xfs_dir2_isblock(struct xfs_trans *tp, struct xfs_inode *dp, int *r); +extern int xfs_dir2_isleaf(struct xfs_trans *tp, struct xfs_inode *dp, int *r); +extern int xfs_dir2_grow_inode(struct xfs_da_args *args, int space, + xfs_dir2_db_t *dbp); +extern int xfs_dir2_shrink_inode(struct xfs_da_args *args, xfs_dir2_db_t db, + struct xfs_dabuf *bp); +extern int xfs_dir_cilookup_result(struct xfs_da_args *args, + const unsigned char *name, int len); + +/* xfs_dir2_block.c */ +extern int xfs_dir2_block_addname(struct xfs_da_args *args); +extern int xfs_dir2_block_getdents(struct xfs_inode *dp, void *dirent, + xfs_off_t *offset, filldir_t filldir); +extern int xfs_dir2_block_lookup(struct xfs_da_args *args); +extern int xfs_dir2_block_removename(struct xfs_da_args *args); +extern int xfs_dir2_block_replace(struct xfs_da_args *args); +extern int xfs_dir2_leaf_to_block(struct xfs_da_args *args, + struct xfs_dabuf *lbp, struct xfs_dabuf *dbp); + +/* xfs_dir2_data.c */ +#ifdef DEBUG +extern void xfs_dir2_data_check(struct xfs_inode *dp, struct xfs_dabuf *bp); +#else +#define xfs_dir2_data_check(dp,bp) +#endif +extern struct xfs_dir2_data_free * +xfs_dir2_data_freeinsert(struct xfs_dir2_data_hdr *hdr, + struct xfs_dir2_data_unused *dup, int *loghead); +extern void xfs_dir2_data_freescan(struct xfs_mount *mp, + struct xfs_dir2_data_hdr *hdr, int *loghead); +extern int xfs_dir2_data_init(struct xfs_da_args *args, xfs_dir2_db_t blkno, + struct xfs_dabuf **bpp); +extern void xfs_dir2_data_log_entry(struct xfs_trans *tp, struct xfs_dabuf *bp, + struct xfs_dir2_data_entry *dep); +extern void xfs_dir2_data_log_header(struct xfs_trans *tp, + struct xfs_dabuf *bp); +extern void xfs_dir2_data_log_unused(struct xfs_trans *tp, struct xfs_dabuf *bp, + struct xfs_dir2_data_unused *dup); +extern void xfs_dir2_data_make_free(struct xfs_trans *tp, struct xfs_dabuf *bp, + xfs_dir2_data_aoff_t offset, xfs_dir2_data_aoff_t len, + int *needlogp, int *needscanp); +extern void xfs_dir2_data_use_free(struct xfs_trans *tp, struct xfs_dabuf *bp, + struct xfs_dir2_data_unused *dup, xfs_dir2_data_aoff_t offset, + xfs_dir2_data_aoff_t len, int *needlogp, int *needscanp); + +/* xfs_dir2_leaf.c */ +extern int xfs_dir2_block_to_leaf(struct xfs_da_args *args, + struct xfs_dabuf *dbp); +extern int xfs_dir2_leaf_addname(struct xfs_da_args *args); +extern void xfs_dir2_leaf_compact(struct xfs_da_args *args, + struct xfs_dabuf *bp); +extern void xfs_dir2_leaf_compact_x1(struct xfs_dabuf *bp, int *indexp, + int *lowstalep, int *highstalep, int *lowlogp, int *highlogp); +extern int xfs_dir2_leaf_getdents(struct xfs_inode *dp, void *dirent, + size_t bufsize, xfs_off_t *offset, filldir_t filldir); +extern int xfs_dir2_leaf_init(struct xfs_da_args *args, xfs_dir2_db_t bno, + struct xfs_dabuf **bpp, int magic); +extern void xfs_dir2_leaf_log_ents(struct xfs_trans *tp, struct xfs_dabuf *bp, + int first, int last); +extern void xfs_dir2_leaf_log_header(struct xfs_trans *tp, + struct xfs_dabuf *bp); +extern int xfs_dir2_leaf_lookup(struct xfs_da_args *args); +extern int xfs_dir2_leaf_removename(struct xfs_da_args *args); +extern int xfs_dir2_leaf_replace(struct xfs_da_args *args); +extern int xfs_dir2_leaf_search_hash(struct xfs_da_args *args, + struct xfs_dabuf *lbp); +extern int xfs_dir2_leaf_trim_data(struct xfs_da_args *args, + struct xfs_dabuf *lbp, xfs_dir2_db_t db); +extern struct xfs_dir2_leaf_entry * +xfs_dir2_leaf_find_entry(struct xfs_dir2_leaf *leaf, int index, int compact, + int lowstale, int highstale, + int *lfloglow, int *lfloghigh); +extern int xfs_dir2_node_to_leaf(struct xfs_da_state *state); + +/* xfs_dir2_node.c */ +extern int xfs_dir2_leaf_to_node(struct xfs_da_args *args, + struct xfs_dabuf *lbp); +extern xfs_dahash_t xfs_dir2_leafn_lasthash(struct xfs_dabuf *bp, int *count); +extern int xfs_dir2_leafn_lookup_int(struct xfs_dabuf *bp, + struct xfs_da_args *args, int *indexp, + struct xfs_da_state *state); +extern int xfs_dir2_leafn_order(struct xfs_dabuf *leaf1_bp, + struct xfs_dabuf *leaf2_bp); +extern int xfs_dir2_leafn_split(struct xfs_da_state *state, + struct xfs_da_state_blk *oldblk, struct xfs_da_state_blk *newblk); +extern int xfs_dir2_leafn_toosmall(struct xfs_da_state *state, int *action); +extern void xfs_dir2_leafn_unbalance(struct xfs_da_state *state, + struct xfs_da_state_blk *drop_blk, + struct xfs_da_state_blk *save_blk); +extern int xfs_dir2_node_addname(struct xfs_da_args *args); +extern int xfs_dir2_node_lookup(struct xfs_da_args *args); +extern int xfs_dir2_node_removename(struct xfs_da_args *args); +extern int xfs_dir2_node_replace(struct xfs_da_args *args); +extern int xfs_dir2_node_trim_free(struct xfs_da_args *args, xfs_fileoff_t fo, + int *rvalp); + +/* xfs_dir2_sf.c */ +extern xfs_ino_t xfs_dir2_sf_get_parent_ino(struct xfs_dir2_sf_hdr *sfp); +extern xfs_ino_t xfs_dir2_sfe_get_ino(struct xfs_dir2_sf_hdr *sfp, + struct xfs_dir2_sf_entry *sfep); +extern int xfs_dir2_block_sfsize(struct xfs_inode *dp, + struct xfs_dir2_data_hdr *block, struct xfs_dir2_sf_hdr *sfhp); +extern int xfs_dir2_block_to_sf(struct xfs_da_args *args, struct xfs_dabuf *bp, + int size, xfs_dir2_sf_hdr_t *sfhp); +extern int xfs_dir2_sf_addname(struct xfs_da_args *args); +extern int xfs_dir2_sf_create(struct xfs_da_args *args, xfs_ino_t pino); +extern int xfs_dir2_sf_getdents(struct xfs_inode *dp, void *dirent, + xfs_off_t *offset, filldir_t filldir); +extern int xfs_dir2_sf_lookup(struct xfs_da_args *args); +extern int xfs_dir2_sf_removename(struct xfs_da_args *args); +extern int xfs_dir2_sf_replace(struct xfs_da_args *args); + +#endif /* __XFS_DIR2_PRIV_H__ */ diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c index 9d3dff2ae4c5..79d05e84e296 100644 --- a/fs/xfs/xfs_dir2_sf.c +++ b/fs/xfs/xfs_dir2_sf.c @@ -23,18 +23,16 @@ #include "xfs_trans.h" #include "xfs_sb.h" #include "xfs_ag.h" -#include "xfs_dir2.h" #include "xfs_mount.h" #include "xfs_da_btree.h" #include "xfs_bmap_btree.h" -#include "xfs_dir2_sf.h" #include "xfs_dinode.h" #include "xfs_inode.h" #include "xfs_inode_item.h" #include "xfs_error.h" -#include "xfs_dir2_data.h" -#include "xfs_dir2_leaf.h" -#include "xfs_dir2_block.h" +#include "xfs_dir2.h" +#include "xfs_dir2_format.h" +#include "xfs_dir2_priv.h" #include "xfs_trace.h" /* diff --git a/fs/xfs/xfs_dir2_sf.h b/fs/xfs/xfs_dir2_sf.h deleted file mode 100644 index 26f062912395..000000000000 --- a/fs/xfs/xfs_dir2_sf.h +++ /dev/null @@ -1,151 +0,0 @@ -/* - * Copyright (c) 2000-2001,2005 Silicon Graphics, Inc. - * All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it would be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - */ -#ifndef __XFS_DIR2_SF_H__ -#define __XFS_DIR2_SF_H__ - -/* - * Directory layout when stored internal to an inode. - * - * Small directories are packed as tightly as possible so as to fit into the - * literal area of the inode. They consist of a single xfs_dir2_sf_hdr header - * followed by zero or more xfs_dir2_sf_entry structures. Due the different - * inode number storage size and the variable length name field in - * the xfs_dir2_sf_entry all these structure are variable length, and the - * accessors in this file should be used to iterate over them. - */ - -struct uio; -struct xfs_dabuf; -struct xfs_da_args; -struct xfs_dir2_data_hdr; -struct xfs_inode; -struct xfs_mount; -struct xfs_trans; - -/* - * Inode number stored as 8 8-bit values. - */ -typedef struct { __uint8_t i[8]; } xfs_dir2_ino8_t; - -/* - * Inode number stored as 4 8-bit values. - * Works a lot of the time, when all the inode numbers in a directory - * fit in 32 bits. - */ -typedef struct { __uint8_t i[4]; } xfs_dir2_ino4_t; - -typedef union { - xfs_dir2_ino8_t i8; - xfs_dir2_ino4_t i4; -} xfs_dir2_inou_t; -#define XFS_DIR2_MAX_SHORT_INUM ((xfs_ino_t)0xffffffffULL) - -/* - * Normalized offset (in a data block) of the entry, really xfs_dir2_data_off_t. - * Only need 16 bits, this is the byte offset into the single block form. - */ -typedef struct { __uint8_t i[2]; } __arch_pack xfs_dir2_sf_off_t; - -/* - * The parent directory has a dedicated field, and the self-pointer must - * be calculated on the fly. - * - * Entries are packed toward the top as tightly as possible, and thus may - * be misaligned. Care needs to be taken to access them through special - * helpers or copy them into aligned variables first. - */ -typedef struct xfs_dir2_sf_hdr { - __uint8_t count; /* count of entries */ - __uint8_t i8count; /* count of 8-byte inode #s */ - xfs_dir2_inou_t parent; /* parent dir inode number */ -} __arch_pack xfs_dir2_sf_hdr_t; - -typedef struct xfs_dir2_sf_entry { - __u8 namelen; /* actual name length */ - xfs_dir2_sf_off_t offset; /* saved offset */ - __u8 name[]; /* name, variable size */ - /* - * A xfs_dir2_ino8_t or xfs_dir2_ino4_t follows here, at a - * variable offset after the name. - */ -} __arch_pack xfs_dir2_sf_entry_t; - -static inline int xfs_dir2_sf_hdr_size(int i8count) -{ - return ((uint)sizeof(xfs_dir2_sf_hdr_t) - \ - ((i8count) == 0) * \ - ((uint)sizeof(xfs_dir2_ino8_t) - (uint)sizeof(xfs_dir2_ino4_t))); -} - -static inline xfs_dir2_data_aoff_t -xfs_dir2_sf_get_offset(xfs_dir2_sf_entry_t *sfep) -{ - return get_unaligned_be16(&sfep->offset.i); -} - -static inline void -xfs_dir2_sf_put_offset(xfs_dir2_sf_entry_t *sfep, xfs_dir2_data_aoff_t off) -{ - put_unaligned_be16(off, &sfep->offset.i); -} - -static inline int -xfs_dir2_sf_entsize(struct xfs_dir2_sf_hdr *hdr, int len) -{ - return sizeof(struct xfs_dir2_sf_entry) + /* namelen + offset */ - len + /* name */ - (hdr->i8count ? /* ino */ - sizeof(xfs_dir2_ino8_t) : - sizeof(xfs_dir2_ino4_t)); -} - -static inline struct xfs_dir2_sf_entry * -xfs_dir2_sf_firstentry(struct xfs_dir2_sf_hdr *hdr) -{ - return (struct xfs_dir2_sf_entry *) - ((char *)hdr + xfs_dir2_sf_hdr_size(hdr->i8count)); -} - -static inline struct xfs_dir2_sf_entry * -xfs_dir2_sf_nextentry(struct xfs_dir2_sf_hdr *hdr, - struct xfs_dir2_sf_entry *sfep) -{ - return (struct xfs_dir2_sf_entry *) - ((char *)sfep + xfs_dir2_sf_entsize(hdr, sfep->namelen)); -} - -/* - * Functions. - */ -extern xfs_ino_t xfs_dir2_sf_get_parent_ino(struct xfs_dir2_sf_hdr *sfp); -extern xfs_ino_t xfs_dir2_sfe_get_ino(struct xfs_dir2_sf_hdr *sfp, - struct xfs_dir2_sf_entry *sfep); -extern int xfs_dir2_block_sfsize(struct xfs_inode *dp, - struct xfs_dir2_data_hdr *block, - xfs_dir2_sf_hdr_t *sfhp); -extern int xfs_dir2_block_to_sf(struct xfs_da_args *args, struct xfs_dabuf *bp, - int size, xfs_dir2_sf_hdr_t *sfhp); -extern int xfs_dir2_sf_addname(struct xfs_da_args *args); -extern int xfs_dir2_sf_create(struct xfs_da_args *args, xfs_ino_t pino); -extern int xfs_dir2_sf_getdents(struct xfs_inode *dp, void *dirent, - xfs_off_t *offset, filldir_t filldir); -extern int xfs_dir2_sf_lookup(struct xfs_da_args *args); -extern int xfs_dir2_sf_removename(struct xfs_da_args *args); -extern int xfs_dir2_sf_replace(struct xfs_da_args *args); - -#endif /* __XFS_DIR2_SF_H__ */ -- cgit v1.2.3 From a00b7745c6e68ee89a123cd81e1dbc52fb55868e Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 13 Jul 2011 13:43:48 +0200 Subject: xfs: cleanup struct xfs_dir2_free Change the bests array to be a proper variable sized entry. This is done easily as no one relies on the size of the structure. Also change XFS_DIR2_MAX_FREE_BESTS to an inline function while we're at it. Signed-off-by: Christoph Hellwig Reviewed-by: Alex Elder Reviewed-by: Dave Chinner --- fs/xfs/xfs_dir2_format.h | 14 ++++++++------ fs/xfs/xfs_dir2_node.c | 8 ++++---- 2 files changed, 12 insertions(+), 10 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_dir2_format.h b/fs/xfs/xfs_dir2_format.h index c3c4839eebd6..07270981f48f 100644 --- a/fs/xfs/xfs_dir2_format.h +++ b/fs/xfs/xfs_dir2_format.h @@ -516,13 +516,15 @@ typedef struct xfs_dir2_free_hdr { typedef struct xfs_dir2_free { xfs_dir2_free_hdr_t hdr; /* block header */ - __be16 bests[1]; /* best free counts */ + __be16 bests[]; /* best free counts */ /* unused entries are -1 */ } xfs_dir2_free_t; -#define XFS_DIR2_MAX_FREE_BESTS(mp) \ - (((mp)->m_dirblksize - (uint)sizeof(struct xfs_dir2_free_hdr)) / \ - (uint)sizeof(xfs_dir2_data_off_t)) +static inline int xfs_dir2_free_max_bests(struct xfs_mount *mp) +{ + return (mp->m_dirblksize - sizeof(struct xfs_dir2_free_hdr)) / + sizeof(xfs_dir2_data_off_t); +} /* * Convert data space db to the corresponding free db. @@ -530,7 +532,7 @@ typedef struct xfs_dir2_free { static inline xfs_dir2_db_t xfs_dir2_db_to_fdb(struct xfs_mount *mp, xfs_dir2_db_t db) { - return XFS_DIR2_FREE_FIRSTDB(mp) + db / XFS_DIR2_MAX_FREE_BESTS(mp); + return XFS_DIR2_FREE_FIRSTDB(mp) + db / xfs_dir2_free_max_bests(mp); } /* @@ -539,7 +541,7 @@ xfs_dir2_db_to_fdb(struct xfs_mount *mp, xfs_dir2_db_t db) static inline int xfs_dir2_db_to_fdindex(struct xfs_mount *mp, xfs_dir2_db_t db) { - return db % XFS_DIR2_MAX_FREE_BESTS(mp); + return db % xfs_dir2_free_max_bests(mp); } /* diff --git a/fs/xfs/xfs_dir2_node.c b/fs/xfs/xfs_dir2_node.c index ec0a23e96705..084b3247d636 100644 --- a/fs/xfs/xfs_dir2_node.c +++ b/fs/xfs/xfs_dir2_node.c @@ -408,7 +408,7 @@ xfs_dir2_leafn_lookup_for_addname( ASSERT(be32_to_cpu(free->hdr.magic) == XFS_DIR2_FREE_MAGIC); ASSERT((be32_to_cpu(free->hdr.firstdb) % - XFS_DIR2_MAX_FREE_BESTS(mp)) == 0); + xfs_dir2_free_max_bests(mp)) == 0); ASSERT(be32_to_cpu(free->hdr.firstdb) <= curdb); ASSERT(curdb < be32_to_cpu(free->hdr.firstdb) + be32_to_cpu(free->hdr.nvalid)); @@ -924,7 +924,7 @@ xfs_dir2_leafn_remove( free = fbp->data; ASSERT(free->hdr.magic == cpu_to_be32(XFS_DIR2_FREE_MAGIC)); ASSERT(be32_to_cpu(free->hdr.firstdb) == - XFS_DIR2_MAX_FREE_BESTS(mp) * + xfs_dir2_free_max_bests(mp) * (fdb - XFS_DIR2_FREE_FIRSTDB(mp))); /* * Calculate which entry we need to fix. @@ -1603,7 +1603,7 @@ xfs_dir2_node_addname_int( free->hdr.magic = cpu_to_be32(XFS_DIR2_FREE_MAGIC); free->hdr.firstdb = cpu_to_be32( (fbno - XFS_DIR2_FREE_FIRSTDB(mp)) * - XFS_DIR2_MAX_FREE_BESTS(mp)); + xfs_dir2_free_max_bests(mp)); free->hdr.nvalid = 0; free->hdr.nused = 0; } else { @@ -1620,7 +1620,7 @@ xfs_dir2_node_addname_int( * freespace block, extend that table. */ if (findex >= be32_to_cpu(free->hdr.nvalid)) { - ASSERT(findex < XFS_DIR2_MAX_FREE_BESTS(mp)); + ASSERT(findex < xfs_dir2_free_max_bests(mp)); free->hdr.nvalid = cpu_to_be32(findex + 1); /* * Tag new entry so nused will go up. -- cgit v1.2.3 From a230a1df40864ef68ff6fbd09302f16d2a216ea5 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 13 Jul 2011 13:43:48 +0200 Subject: xfs: factor out xfs_dir2_leaf_find_stale Signed-off-by: Christoph Hellwig Reviewed-by: Alex Elder Reviewed-by: Dave Chinner --- fs/xfs/xfs_dir2_leaf.c | 83 ++++++++++++++++++++++---------------------------- 1 file changed, 37 insertions(+), 46 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_dir2_leaf.c b/fs/xfs/xfs_dir2_leaf.c index 487036d3751b..ca2386d82cdf 100644 --- a/fs/xfs/xfs_dir2_leaf.c +++ b/fs/xfs/xfs_dir2_leaf.c @@ -148,6 +148,38 @@ xfs_dir2_block_to_leaf( return 0; } +STATIC void +xfs_dir2_leaf_find_stale( + struct xfs_dir2_leaf *leaf, + int index, + int *lowstale, + int *highstale) +{ + /* + * Find the first stale entry before our index, if any. + */ + for (*lowstale = index - 1; *lowstale >= 0; --*lowstale) { + if (leaf->ents[*lowstale].address == + cpu_to_be32(XFS_DIR2_NULL_DATAPTR)) + break; + } + + /* + * Find the first stale entry at or after our index, if any. + * Stop if the result would require moving more entries than using + * lowstale. + */ + for (*highstale = index; + *highstale < be16_to_cpu(leaf->hdr.count); + ++*highstale) { + if (leaf->ents[*highstale].address == + cpu_to_be32(XFS_DIR2_NULL_DATAPTR)) + break; + if (*lowstale >= 0 && index - *lowstale <= *highstale - index) + break; + } +} + struct xfs_dir2_leaf_entry * xfs_dir2_leaf_find_entry( xfs_dir2_leaf_t *leaf, /* leaf structure */ @@ -190,32 +222,8 @@ xfs_dir2_leaf_find_entry( * If we didn't compact before, we need to find the nearest stale * entries before and after our insertion point. */ - if (compact == 0) { - /* - * Find the first stale entry before the insertion point, - * if any. - */ - for (lowstale = index - 1; - lowstale >= 0 && - leaf->ents[lowstale].address != - cpu_to_be32(XFS_DIR2_NULL_DATAPTR); - lowstale--) - continue; - - /* - * Find the next stale entry at or after the insertion point, - * if any. Stop if we go so far that the lowstale entry - * would be better. - */ - for (highstale = index; - highstale < be16_to_cpu(leaf->hdr.count) && - leaf->ents[highstale].address != - cpu_to_be32(XFS_DIR2_NULL_DATAPTR) && - (lowstale < 0 || - index - lowstale - 1 >= highstale - index); - highstale++) - continue; - } + if (compact == 0) + xfs_dir2_leaf_find_stale(leaf, index, &lowstale, &highstale); /* * If the low one is better, use it. @@ -689,26 +697,9 @@ xfs_dir2_leaf_compact_x1( leaf = bp->data; ASSERT(be16_to_cpu(leaf->hdr.stale) > 1); index = *indexp; - /* - * Find the first stale entry before our index, if any. - */ - for (lowstale = index - 1; - lowstale >= 0 && - leaf->ents[lowstale].address != - cpu_to_be32(XFS_DIR2_NULL_DATAPTR); - lowstale--) - continue; - /* - * Find the first stale entry at or after our index, if any. - * Stop if the answer would be worse than lowstale. - */ - for (highstale = index; - highstale < be16_to_cpu(leaf->hdr.count) && - leaf->ents[highstale].address != - cpu_to_be32(XFS_DIR2_NULL_DATAPTR) && - (lowstale < 0 || index - lowstale > highstale - index); - highstale++) - continue; + + xfs_dir2_leaf_find_stale(leaf, index, &lowstale, &highstale); + /* * Pick the better of lowstale and highstale. */ -- cgit v1.2.3 From 77936d0280616d84d1eb0eb38a6f857d2827a434 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 13 Jul 2011 13:43:49 +0200 Subject: xfs: factor out xfs_da_grow_inode_int xfs_da_grow_inode and xfs_dir2_grow_inode are mostly duplicate code. Factor the meat of those two functions into a new common helper. Signed-off-by: Christoph Hellwig Reviewed-by: Alex Elder Reviewed-by: Dave Chinner --- fs/xfs/xfs_da_btree.c | 126 +++++++++++++++++++++++++++----------------------- fs/xfs/xfs_da_btree.h | 2 + fs/xfs/xfs_dir2.c | 125 ++++++------------------------------------------- 3 files changed, 86 insertions(+), 167 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_da_btree.c b/fs/xfs/xfs_da_btree.c index a58a6410c58d..73b048f7eb71 100644 --- a/fs/xfs/xfs_da_btree.c +++ b/fs/xfs/xfs_da_btree.c @@ -1543,79 +1543,62 @@ const struct xfs_nameops xfs_default_nameops = { .compname = xfs_da_compname }; -/* - * Add a block to the btree ahead of the file. - * Return the new block number to the caller. - */ int -xfs_da_grow_inode(xfs_da_args_t *args, xfs_dablk_t *new_blkno) +xfs_da_grow_inode_int( + struct xfs_da_args *args, + xfs_fileoff_t *bno, + int count) { - xfs_fileoff_t bno, b; - xfs_bmbt_irec_t map; - xfs_bmbt_irec_t *mapp; - xfs_inode_t *dp; - int nmap, error, w, count, c, got, i, mapi; - xfs_trans_t *tp; - xfs_mount_t *mp; - xfs_drfsbno_t nblks; + struct xfs_trans *tp = args->trans; + struct xfs_inode *dp = args->dp; + int w = args->whichfork; + xfs_drfsbno_t nblks = dp->i_d.di_nblocks; + struct xfs_bmbt_irec map, *mapp; + int nmap, error, got, i, mapi; - dp = args->dp; - mp = dp->i_mount; - w = args->whichfork; - tp = args->trans; - nblks = dp->i_d.di_nblocks; - - /* - * For new directories adjust the file offset and block count. - */ - if (w == XFS_DATA_FORK) { - bno = mp->m_dirleafblk; - count = mp->m_dirblkfsbs; - } else { - bno = 0; - count = 1; - } /* * Find a spot in the file space to put the new block. */ - if ((error = xfs_bmap_first_unused(tp, dp, count, &bno, w))) + error = xfs_bmap_first_unused(tp, dp, count, bno, w); + if (error) return error; - if (w == XFS_DATA_FORK) - ASSERT(bno >= mp->m_dirleafblk && bno < mp->m_dirfreeblk); + /* * Try mapping it in one filesystem block. */ nmap = 1; ASSERT(args->firstblock != NULL); - if ((error = xfs_bmapi(tp, dp, bno, count, + error = xfs_bmapi(tp, dp, *bno, count, xfs_bmapi_aflag(w)|XFS_BMAPI_WRITE|XFS_BMAPI_METADATA| XFS_BMAPI_CONTIG, args->firstblock, args->total, &map, &nmap, - args->flist))) { + args->flist); + if (error) return error; - } + ASSERT(nmap <= 1); if (nmap == 1) { mapp = ↦ mapi = 1; - } - /* - * If we didn't get it and the block might work if fragmented, - * try without the CONTIG flag. Loop until we get it all. - */ - else if (nmap == 0 && count > 1) { + } else if (nmap == 0 && count > 1) { + xfs_fileoff_t b; + int c; + + /* + * If we didn't get it and the block might work if fragmented, + * try without the CONTIG flag. Loop until we get it all. + */ mapp = kmem_alloc(sizeof(*mapp) * count, KM_SLEEP); - for (b = bno, mapi = 0; b < bno + count; ) { + for (b = *bno, mapi = 0; b < *bno + count; ) { nmap = MIN(XFS_BMAP_MAX_NMAP, count); - c = (int)(bno + count - b); - if ((error = xfs_bmapi(tp, dp, b, c, + c = (int)(*bno + count - b); + error = xfs_bmapi(tp, dp, b, c, xfs_bmapi_aflag(w)|XFS_BMAPI_WRITE| XFS_BMAPI_METADATA, args->firstblock, args->total, - &mapp[mapi], &nmap, args->flist))) { - kmem_free(mapp); - return error; - } + &mapp[mapi], &nmap, args->flist); + if (error) + goto out_free_map; if (nmap < 1) break; mapi += nmap; @@ -1626,24 +1609,53 @@ xfs_da_grow_inode(xfs_da_args_t *args, xfs_dablk_t *new_blkno) mapi = 0; mapp = NULL; } + /* * Count the blocks we got, make sure it matches the total. */ for (i = 0, got = 0; i < mapi; i++) got += mapp[i].br_blockcount; - if (got != count || mapp[0].br_startoff != bno || + if (got != count || mapp[0].br_startoff != *bno || mapp[mapi - 1].br_startoff + mapp[mapi - 1].br_blockcount != - bno + count) { - if (mapp != &map) - kmem_free(mapp); - return XFS_ERROR(ENOSPC); + *bno + count) { + error = XFS_ERROR(ENOSPC); + goto out_free_map; } - if (mapp != &map) - kmem_free(mapp); + /* account for newly allocated blocks in reserved blocks total */ args->total -= dp->i_d.di_nblocks - nblks; - *new_blkno = (xfs_dablk_t)bno; - return 0; + +out_free_map: + if (mapp != &map) + kmem_free(mapp); + return error; +} + +/* + * Add a block to the btree ahead of the file. + * Return the new block number to the caller. + */ +int +xfs_da_grow_inode( + struct xfs_da_args *args, + xfs_dablk_t *new_blkno) +{ + xfs_fileoff_t bno; + int count; + int error; + + if (args->whichfork == XFS_DATA_FORK) { + bno = args->dp->i_mount->m_dirleafblk; + count = args->dp->i_mount->m_dirblkfsbs; + } else { + bno = 0; + count = 1; + } + + error = xfs_da_grow_inode_int(args, &bno, count); + if (!error) + *new_blkno = (xfs_dablk_t)bno; + return error; } /* diff --git a/fs/xfs/xfs_da_btree.h b/fs/xfs/xfs_da_btree.h index fe9f5a8c1d2a..7b0a6ea59a04 100644 --- a/fs/xfs/xfs_da_btree.h +++ b/fs/xfs/xfs_da_btree.h @@ -248,6 +248,8 @@ int xfs_da_blk_link(xfs_da_state_t *state, xfs_da_state_blk_t *old_blk, * Utility routines. */ int xfs_da_grow_inode(xfs_da_args_t *args, xfs_dablk_t *new_blkno); +int xfs_da_grow_inode_int(struct xfs_da_args *args, xfs_fileoff_t *bno, + int count); int xfs_da_get_buf(struct xfs_trans *trans, struct xfs_inode *dp, xfs_dablk_t bno, xfs_daddr_t mappedbno, xfs_dabuf_t **bp, int whichfork); diff --git a/fs/xfs/xfs_dir2.c b/fs/xfs/xfs_dir2.c index 6effbeb24e5a..4580ce00aeb4 100644 --- a/fs/xfs/xfs_dir2.c +++ b/fs/xfs/xfs_dir2.c @@ -497,129 +497,34 @@ xfs_dir_canenter( /* * Add a block to the directory. - * This routine is for data and free blocks, not leaf/node blocks - * which are handled by xfs_da_grow_inode. + * + * This routine is for data and free blocks, not leaf/node blocks which are + * handled by xfs_da_grow_inode. */ int xfs_dir2_grow_inode( - xfs_da_args_t *args, - int space, /* v2 dir's space XFS_DIR2_xxx_SPACE */ - xfs_dir2_db_t *dbp) /* out: block number added */ + struct xfs_da_args *args, + int space, /* v2 dir's space XFS_DIR2_xxx_SPACE */ + xfs_dir2_db_t *dbp) /* out: block number added */ { - xfs_fileoff_t bno; /* directory offset of new block */ - int count; /* count of filesystem blocks */ - xfs_inode_t *dp; /* incore directory inode */ - int error; - int got; /* blocks actually mapped */ - int i; - xfs_bmbt_irec_t map; /* single structure for bmap */ - int mapi; /* mapping index */ - xfs_bmbt_irec_t *mapp; /* bmap mapping structure(s) */ - xfs_mount_t *mp; - int nmap; /* number of bmap entries */ - xfs_trans_t *tp; - xfs_drfsbno_t nblks; + struct xfs_inode *dp = args->dp; + struct xfs_mount *mp = dp->i_mount; + xfs_fileoff_t bno; /* directory offset of new block */ + int count; /* count of filesystem blocks */ + int error; trace_xfs_dir2_grow_inode(args, space); - dp = args->dp; - tp = args->trans; - mp = dp->i_mount; - nblks = dp->i_d.di_nblocks; /* * Set lowest possible block in the space requested. */ bno = XFS_B_TO_FSBT(mp, space * XFS_DIR2_SPACE_SIZE); count = mp->m_dirblkfsbs; - /* - * Find the first hole for our block. - */ - if ((error = xfs_bmap_first_unused(tp, dp, count, &bno, XFS_DATA_FORK))) - return error; - nmap = 1; - ASSERT(args->firstblock != NULL); - /* - * Try mapping the new block contiguously (one extent). - */ - if ((error = xfs_bmapi(tp, dp, bno, count, - XFS_BMAPI_WRITE|XFS_BMAPI_METADATA|XFS_BMAPI_CONTIG, - args->firstblock, args->total, &map, &nmap, - args->flist))) - return error; - ASSERT(nmap <= 1); - if (nmap == 1) { - mapp = ↦ - mapi = 1; - } - /* - * Didn't work and this is a multiple-fsb directory block. - * Try again with contiguous flag turned on. - */ - else if (nmap == 0 && count > 1) { - xfs_fileoff_t b; /* current file offset */ - /* - * Space for maximum number of mappings. - */ - mapp = kmem_alloc(sizeof(*mapp) * count, KM_SLEEP); - /* - * Iterate until we get to the end of our block. - */ - for (b = bno, mapi = 0; b < bno + count; ) { - int c; /* current fsb count */ - - /* - * Can't map more than MAX_NMAP at once. - */ - nmap = MIN(XFS_BMAP_MAX_NMAP, count); - c = (int)(bno + count - b); - if ((error = xfs_bmapi(tp, dp, b, c, - XFS_BMAPI_WRITE|XFS_BMAPI_METADATA, - args->firstblock, args->total, - &mapp[mapi], &nmap, args->flist))) { - kmem_free(mapp); - return error; - } - if (nmap < 1) - break; - /* - * Add this bunch into our table, go to the next offset. - */ - mapi += nmap; - b = mapp[mapi - 1].br_startoff + - mapp[mapi - 1].br_blockcount; - } - } - /* - * Didn't work. - */ - else { - mapi = 0; - mapp = NULL; - } - /* - * See how many fsb's we got. - */ - for (i = 0, got = 0; i < mapi; i++) - got += mapp[i].br_blockcount; - /* - * Didn't get enough fsb's, or the first/last block's are wrong. - */ - if (got != count || mapp[0].br_startoff != bno || - mapp[mapi - 1].br_startoff + mapp[mapi - 1].br_blockcount != - bno + count) { - if (mapp != &map) - kmem_free(mapp); - return XFS_ERROR(ENOSPC); - } - /* - * Done with the temporary mapping table. - */ - if (mapp != &map) - kmem_free(mapp); + error = xfs_da_grow_inode_int(args, &bno, count); + if (error) + return error; - /* account for newly allocated blocks in reserved blocks total */ - args->total -= dp->i_d.di_nblocks - nblks; *dbp = xfs_dir2_da_to_db(mp, (xfs_dablk_t)bno); /* @@ -631,7 +536,7 @@ xfs_dir2_grow_inode( size = XFS_FSB_TO_B(mp, bno + count); if (size > dp->i_d.di_size) { dp->i_d.di_size = size; - xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE); + xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE); } } return 0; -- cgit v1.2.3 From bf9d9013a2a559858efb590bf922377be9d6d969 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 13 Jul 2011 13:43:49 +0200 Subject: xfs: add a proper transaction pointer to struct xfs_buf Replace the typeless b_fspriv2 and the ugly macros around it with a properly typed transaction pointer. As a fallout the log buffer state debug checks are also removed. We could have kept them using casts, but as they do not have a real purpose we can as well just remove them. Signed-off-by: Christoph Hellwig Reviewed-by: Alex Elder Reviewed-by: Dave Chinner --- fs/xfs/linux-2.6/xfs_buf.h | 4 +--- fs/xfs/xfs_buf_item.c | 4 ++-- fs/xfs/xfs_log.c | 9 --------- fs/xfs/xfs_trans_buf.c | 32 ++++++++++++++++---------------- 4 files changed, 19 insertions(+), 30 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/linux-2.6/xfs_buf.h index 21604a17e2a3..2eec8f0f2a52 100644 --- a/fs/xfs/linux-2.6/xfs_buf.h +++ b/fs/xfs/linux-2.6/xfs_buf.h @@ -149,7 +149,7 @@ typedef struct xfs_buf { xfs_buf_iodone_t b_iodone; /* I/O completion function */ struct completion b_iowait; /* queue for I/O waiters */ void *b_fspriv; - void *b_fspriv2; + struct xfs_trans *b_transp; struct page **b_pages; /* array of page pointers */ struct page *b_page_array[XB_PAGES]; /* inline pages */ unsigned long b_queuetime; /* time buffer was queued */ @@ -282,8 +282,6 @@ void xfs_buf_stale(struct xfs_buf *bp); #define XFS_BUF_FSPRIVATE(bp, type) ((type)(bp)->b_fspriv) #define XFS_BUF_SET_FSPRIVATE(bp, val) ((bp)->b_fspriv = (void*)(val)) -#define XFS_BUF_FSPRIVATE2(bp, type) ((type)(bp)->b_fspriv2) -#define XFS_BUF_SET_FSPRIVATE2(bp, val) ((bp)->b_fspriv2 = (void*)(val)) #define XFS_BUF_SET_START(bp) do { } while (0) #define XFS_BUF_PTR(bp) (xfs_caddr_t)((bp)->b_addr) diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c index da399ac4411d..716ef095b13f 100644 --- a/fs/xfs/xfs_buf_item.c +++ b/fs/xfs/xfs_buf_item.c @@ -443,7 +443,7 @@ xfs_buf_item_unpin( * Since the transaction no longer refers to the buffer, * the buffer should no longer refer to the transaction. */ - XFS_BUF_SET_FSPRIVATE2(bp, NULL); + bp->b_transp = NULL; } /* @@ -525,7 +525,7 @@ xfs_buf_item_unlock( uint hold; /* Clear the buffer's association with this transaction. */ - XFS_BUF_SET_FSPRIVATE2(bp, NULL); + bp->b_transp = NULL; /* * If this is a transaction abort, don't return early. Instead, allow diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index 0ef5eb677383..82c797dd6520 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c @@ -876,8 +876,6 @@ xlog_iodone(xfs_buf_t *bp) int aborted; iclog = XFS_BUF_FSPRIVATE(bp, xlog_in_core_t *); - ASSERT(XFS_BUF_FSPRIVATE2(bp, unsigned long) == (unsigned long) 2); - XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)1); aborted = 0; l = iclog->ic_log; @@ -1057,7 +1055,6 @@ xlog_alloc_log(xfs_mount_t *mp, if (!bp) goto out_free_log; XFS_BUF_SET_IODONE_FUNC(bp, xlog_iodone); - XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)1); ASSERT(XFS_BUF_ISBUSY(bp)); ASSERT(xfs_buf_islocked(bp)); log->l_xbuf = bp; @@ -1092,7 +1089,6 @@ xlog_alloc_log(xfs_mount_t *mp, goto out_free_iclog; XFS_BUF_SET_IODONE_FUNC(bp, xlog_iodone); - XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)1); iclog->ic_bp = bp; iclog->ic_data = bp->b_addr; #ifdef DEBUG @@ -1349,8 +1345,6 @@ xlog_sync(xlog_t *log, } bp = iclog->ic_bp; - ASSERT(XFS_BUF_FSPRIVATE2(bp, unsigned long) == (unsigned long)1); - XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)2); XFS_BUF_SET_ADDR(bp, BLOCK_LSN(be64_to_cpu(iclog->ic_header.h_lsn))); XFS_STATS_ADD(xs_log_blocks, BTOBB(count)); @@ -1408,9 +1402,6 @@ xlog_sync(xlog_t *log, } if (split) { bp = iclog->ic_log->l_xbuf; - ASSERT(XFS_BUF_FSPRIVATE2(bp, unsigned long) == - (unsigned long)1); - XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)2); XFS_BUF_SET_ADDR(bp, 0); /* logical 0 */ XFS_BUF_SET_PTR(bp, (xfs_caddr_t)((__psint_t)&(iclog->ic_header)+ (__psint_t)count), split); diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c index bd5445787855..09a0468ef11a 100644 --- a/fs/xfs/xfs_trans_buf.c +++ b/fs/xfs/xfs_trans_buf.c @@ -81,7 +81,7 @@ _xfs_trans_bjoin( struct xfs_buf_log_item *bip; ASSERT(XFS_BUF_ISBUSY(bp)); - ASSERT(XFS_BUF_FSPRIVATE2(bp, void *) == NULL); + ASSERT(bp->b_transp == NULL); /* * The xfs_buf_log_item pointer is stored in b_fsprivate. If @@ -110,7 +110,7 @@ _xfs_trans_bjoin( * Initialize b_fsprivate2 so we can find it with incore_match() * in xfs_trans_get_buf() and friends above. */ - XFS_BUF_SET_FSPRIVATE2(bp, tp); + bp->b_transp = tp; } @@ -172,7 +172,7 @@ xfs_trans_get_buf(xfs_trans_t *tp, else if (XFS_BUF_ISSTALE(bp)) ASSERT(!XFS_BUF_ISDELAYWRITE(bp)); - ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp); + ASSERT(bp->b_transp == tp); bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *); ASSERT(bip != NULL); ASSERT(atomic_read(&bip->bli_refcount) > 0); @@ -232,7 +232,7 @@ xfs_trans_getsb(xfs_trans_t *tp, * recursion count and return the buffer to the caller. */ bp = mp->m_sb_bp; - if (XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp) { + if (bp->b_transp == tp) { bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t*); ASSERT(bip != NULL); ASSERT(atomic_read(&bip->bli_refcount) > 0); @@ -328,7 +328,7 @@ xfs_trans_read_buf( bp = xfs_trans_buf_item_match(tp, target, blkno, len); if (bp != NULL) { ASSERT(xfs_buf_islocked(bp)); - ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp); + ASSERT(bp->b_transp == tp); ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL); ASSERT((XFS_BUF_ISERROR(bp)) == 0); if (!(XFS_BUF_ISDONE(bp))) { @@ -466,7 +466,7 @@ xfs_trans_brelse(xfs_trans_t *tp, * Default to a normal brelse() call if the tp is NULL. */ if (tp == NULL) { - ASSERT(XFS_BUF_FSPRIVATE2(bp, void *) == NULL); + ASSERT(bp->b_transp == NULL); /* * If there's a buf log item attached to the buffer, * then let the AIL know that the buffer is being @@ -484,7 +484,7 @@ xfs_trans_brelse(xfs_trans_t *tp, return; } - ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp); + ASSERT(bp->b_transp == tp); bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *); ASSERT(bip->bli_item.li_type == XFS_LI_BUF); ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); @@ -556,7 +556,7 @@ xfs_trans_brelse(xfs_trans_t *tp, xfs_buf_item_relse(bp); bip = NULL; } - XFS_BUF_SET_FSPRIVATE2(bp, NULL); + bp->b_transp = NULL; /* * If we've still got a buf log item on the buffer, then @@ -584,7 +584,7 @@ xfs_trans_bhold(xfs_trans_t *tp, xfs_buf_log_item_t *bip; ASSERT(XFS_BUF_ISBUSY(bp)); - ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp); + ASSERT(bp->b_transp == tp); ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL); bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *); @@ -606,7 +606,7 @@ xfs_trans_bhold_release(xfs_trans_t *tp, xfs_buf_log_item_t *bip; ASSERT(XFS_BUF_ISBUSY(bp)); - ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp); + ASSERT(bp->b_transp == tp); ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL); bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *); @@ -637,7 +637,7 @@ xfs_trans_log_buf(xfs_trans_t *tp, xfs_buf_log_item_t *bip; ASSERT(XFS_BUF_ISBUSY(bp)); - ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp); + ASSERT(bp->b_transp == tp); ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL); ASSERT((first <= last) && (last < XFS_BUF_COUNT(bp))); ASSERT((XFS_BUF_IODONE_FUNC(bp) == NULL) || @@ -709,7 +709,7 @@ xfs_trans_binval( xfs_buf_log_item_t *bip; ASSERT(XFS_BUF_ISBUSY(bp)); - ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp); + ASSERT(bp->b_transp == tp); ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL); bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *); @@ -783,7 +783,7 @@ xfs_trans_inode_buf( xfs_buf_log_item_t *bip; ASSERT(XFS_BUF_ISBUSY(bp)); - ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp); + ASSERT(bp->b_transp == tp); ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL); bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *); @@ -809,7 +809,7 @@ xfs_trans_stale_inode_buf( xfs_buf_log_item_t *bip; ASSERT(XFS_BUF_ISBUSY(bp)); - ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp); + ASSERT(bp->b_transp == tp); ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL); bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *); @@ -836,7 +836,7 @@ xfs_trans_inode_alloc_buf( xfs_buf_log_item_t *bip; ASSERT(XFS_BUF_ISBUSY(bp)); - ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp); + ASSERT(bp->b_transp == tp); ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL); bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *); @@ -866,7 +866,7 @@ xfs_trans_dquot_buf( xfs_buf_log_item_t *bip; ASSERT(XFS_BUF_ISBUSY(bp)); - ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp); + ASSERT(bp->b_transp == tp); ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL); ASSERT(type == XFS_BLF_UDQUOT_BUF || type == XFS_BLF_PDQUOT_BUF || -- cgit v1.2.3 From adadbeefb34f755a3477da51035eeeec2c1fde38 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 13 Jul 2011 13:43:49 +0200 Subject: xfs: remove wrappers around b_fspriv Signed-off-by: Christoph Hellwig Reviewed-by: Alex Elder Reviewed-by: Dave Chinner --- fs/xfs/linux-2.6/xfs_buf.h | 2 -- fs/xfs/xfs_buf_item.c | 51 +++++++++++++------------------ fs/xfs/xfs_inode.c | 4 +-- fs/xfs/xfs_inode_item.c | 4 +-- fs/xfs/xfs_log.c | 17 ++++------- fs/xfs/xfs_trans_buf.c | 76 ++++++++++++++++++---------------------------- 6 files changed, 61 insertions(+), 93 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/linux-2.6/xfs_buf.h index 2eec8f0f2a52..033c152c411c 100644 --- a/fs/xfs/linux-2.6/xfs_buf.h +++ b/fs/xfs/linux-2.6/xfs_buf.h @@ -280,8 +280,6 @@ void xfs_buf_stale(struct xfs_buf *bp); #define XFS_BUF_SET_IODONE_FUNC(bp, func) ((bp)->b_iodone = (func)) #define XFS_BUF_CLR_IODONE_FUNC(bp) ((bp)->b_iodone = NULL) -#define XFS_BUF_FSPRIVATE(bp, type) ((type)(bp)->b_fspriv) -#define XFS_BUF_SET_FSPRIVATE(bp, val) ((bp)->b_fspriv = (void*)(val)) #define XFS_BUF_SET_START(bp) do { } while (0) #define XFS_BUF_PTR(bp) (xfs_caddr_t)((bp)->b_addr) diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c index 716ef095b13f..9eacf06932bb 100644 --- a/fs/xfs/xfs_buf_item.c +++ b/fs/xfs/xfs_buf_item.c @@ -90,13 +90,11 @@ xfs_buf_item_flush_log_debug( uint first, uint last) { - xfs_buf_log_item_t *bip; + xfs_buf_log_item_t *bip = bp->b_fspriv; uint nbytes; - bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t*); - if ((bip == NULL) || (bip->bli_item.li_type != XFS_LI_BUF)) { + if (bip == NULL || (bip->bli_item.li_type != XFS_LI_BUF)) return; - } ASSERT(bip->bli_logged != NULL); nbytes = last - first + 1; @@ -408,7 +406,7 @@ xfs_buf_item_unpin( int stale = bip->bli_flags & XFS_BLI_STALE; int freed; - ASSERT(XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *) == bip); + ASSERT(bp->b_fspriv == bip); ASSERT(atomic_read(&bip->bli_refcount) > 0); trace_xfs_buf_item_unpin(bip); @@ -454,13 +452,13 @@ xfs_buf_item_unpin( */ if (bip->bli_flags & XFS_BLI_STALE_INODE) { xfs_buf_do_callbacks(bp); - XFS_BUF_SET_FSPRIVATE(bp, NULL); + bp->b_fspriv = NULL; XFS_BUF_CLR_IODONE_FUNC(bp); } else { spin_lock(&ailp->xa_lock); xfs_trans_ail_delete(ailp, (xfs_log_item_t *)bip); xfs_buf_item_relse(bp); - ASSERT(XFS_BUF_FSPRIVATE(bp, void *) == NULL); + ASSERT(bp->b_fspriv == NULL); } xfs_buf_relse(bp); } @@ -684,7 +682,7 @@ xfs_buf_item_init( xfs_buf_t *bp, xfs_mount_t *mp) { - xfs_log_item_t *lip; + xfs_log_item_t *lip = bp->b_fspriv; xfs_buf_log_item_t *bip; int chunks; int map_size; @@ -696,12 +694,8 @@ xfs_buf_item_init( * nothing to do here so return. */ ASSERT(bp->b_target->bt_mount == mp); - if (XFS_BUF_FSPRIVATE(bp, void *) != NULL) { - lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *); - if (lip->li_type == XFS_LI_BUF) { - return; - } - } + if (lip != NULL && lip->li_type == XFS_LI_BUF) + return; /* * chunks is the number of XFS_BLF_CHUNK size pieces @@ -740,11 +734,9 @@ xfs_buf_item_init( * Put the buf item into the list of items attached to the * buffer at the front. */ - if (XFS_BUF_FSPRIVATE(bp, void *) != NULL) { - bip->bli_item.li_bio_list = - XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *); - } - XFS_BUF_SET_FSPRIVATE(bp, bip); + if (bp->b_fspriv) + bip->bli_item.li_bio_list = bp->b_fspriv; + bp->b_fspriv = bip; } @@ -876,12 +868,11 @@ xfs_buf_item_relse( trace_xfs_buf_item_relse(bp, _RET_IP_); - bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t*); - XFS_BUF_SET_FSPRIVATE(bp, bip->bli_item.li_bio_list); - if ((XFS_BUF_FSPRIVATE(bp, void *) == NULL) && - (XFS_BUF_IODONE_FUNC(bp) != NULL)) { + bip = bp->b_fspriv; + bp->b_fspriv = bip->bli_item.li_bio_list; + if (bp->b_fspriv == NULL && XFS_BUF_IODONE_FUNC(bp) != NULL) XFS_BUF_CLR_IODONE_FUNC(bp); - } + xfs_buf_rele(bp); xfs_buf_item_free(bip); } @@ -908,12 +899,12 @@ xfs_buf_attach_iodone( ASSERT(xfs_buf_islocked(bp)); lip->li_cb = cb; - if (XFS_BUF_FSPRIVATE(bp, void *) != NULL) { - head_lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *); + head_lip = bp->b_fspriv; + if (head_lip) { lip->li_bio_list = head_lip->li_bio_list; head_lip->li_bio_list = lip; } else { - XFS_BUF_SET_FSPRIVATE(bp, lip); + bp->b_fspriv = lip; } ASSERT((XFS_BUF_IODONE_FUNC(bp) == xfs_buf_iodone_callbacks) || @@ -939,8 +930,8 @@ xfs_buf_do_callbacks( { struct xfs_log_item *lip; - while ((lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *)) != NULL) { - XFS_BUF_SET_FSPRIVATE(bp, lip->li_bio_list); + while ((lip = bp->b_fspriv) != NULL) { + bp->b_fspriv = lip->li_bio_list; ASSERT(lip->li_cb != NULL); /* * Clear the next pointer so we don't have any @@ -1026,7 +1017,7 @@ xfs_buf_iodone_callbacks( do_callbacks: xfs_buf_do_callbacks(bp); - XFS_BUF_SET_FSPRIVATE(bp, NULL); + bp->b_fspriv = NULL; XFS_BUF_CLR_IODONE_FUNC(bp); xfs_buf_ioend(bp, 0); } diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index d04ea6a2dfaf..34c6872b5f7b 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -1698,7 +1698,7 @@ xfs_ifree_cluster( * stale first, we will not attempt to lock them in the loop * below as the XFS_ISTALE flag will be set. */ - lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *); + lip = bp->b_fspriv; while (lip) { if (lip->li_type == XFS_LI_INODE) { iip = (xfs_inode_log_item_t *)lip; @@ -2811,7 +2811,7 @@ xfs_iflush_int( */ xfs_buf_attach_iodone(bp, xfs_iflush_done, &iip->ili_item); - ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL); + ASSERT(bp->b_fspriv != NULL); ASSERT(XFS_BUF_IODONE_FUNC(bp) != NULL); } else { /* diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c index a49811a3e5a4..588406dc6a35 100644 --- a/fs/xfs/xfs_inode_item.c +++ b/fs/xfs/xfs_inode_item.c @@ -874,7 +874,7 @@ xfs_iflush_done( * Scan the buffer IO completions for other inodes being completed and * attach them to the current inode log item. */ - blip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *); + blip = bp->b_fspriv; prev = NULL; while (blip != NULL) { if (lip->li_cb != xfs_iflush_done) { @@ -886,7 +886,7 @@ xfs_iflush_done( /* remove from list */ next = blip->li_bio_list; if (!prev) { - XFS_BUF_SET_FSPRIVATE(bp, next); + bp->b_fspriv = next; } else { prev->li_bio_list = next; } diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index 82c797dd6520..a8718ed9776d 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c @@ -871,13 +871,9 @@ xlog_space_left( void xlog_iodone(xfs_buf_t *bp) { - xlog_in_core_t *iclog; - xlog_t *l; - int aborted; - - iclog = XFS_BUF_FSPRIVATE(bp, xlog_in_core_t *); - aborted = 0; - l = iclog->ic_log; + xlog_in_core_t *iclog = bp->b_fspriv; + xlog_t *l = iclog->ic_log; + int aborted = 0; /* * Race to shutdown the filesystem if we see an error. @@ -1249,9 +1245,8 @@ STATIC int xlog_bdstrat( struct xfs_buf *bp) { - struct xlog_in_core *iclog; + struct xlog_in_core *iclog = bp->b_fspriv; - iclog = XFS_BUF_FSPRIVATE(bp, xlog_in_core_t *); if (iclog->ic_state & XLOG_STATE_IOERROR) { XFS_BUF_ERROR(bp, EIO); XFS_BUF_STALE(bp); @@ -1358,7 +1353,7 @@ xlog_sync(xlog_t *log, iclog->ic_bwritecnt = 1; } XFS_BUF_SET_COUNT(bp, count); - XFS_BUF_SET_FSPRIVATE(bp, iclog); /* save for later */ + bp->b_fspriv = iclog; XFS_BUF_ZEROFLAGS(bp); XFS_BUF_BUSY(bp); XFS_BUF_ASYNC(bp); @@ -1405,7 +1400,7 @@ xlog_sync(xlog_t *log, XFS_BUF_SET_ADDR(bp, 0); /* logical 0 */ XFS_BUF_SET_PTR(bp, (xfs_caddr_t)((__psint_t)&(iclog->ic_header)+ (__psint_t)count), split); - XFS_BUF_SET_FSPRIVATE(bp, iclog); + bp->b_fspriv = iclog; XFS_BUF_ZEROFLAGS(bp); XFS_BUF_BUSY(bp); XFS_BUF_ASYNC(bp); diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c index 09a0468ef11a..f910e51444a3 100644 --- a/fs/xfs/xfs_trans_buf.c +++ b/fs/xfs/xfs_trans_buf.c @@ -89,7 +89,7 @@ _xfs_trans_bjoin( * The checks to see if one is there are in xfs_buf_item_init(). */ xfs_buf_item_init(bp, tp->t_mountp); - bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *); + bip = bp->b_fspriv; ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); ASSERT(!(bip->bli_format.blf_flags & XFS_BLF_CANCEL)); ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED)); @@ -173,7 +173,7 @@ xfs_trans_get_buf(xfs_trans_t *tp, ASSERT(!XFS_BUF_ISDELAYWRITE(bp)); ASSERT(bp->b_transp == tp); - bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *); + bip = bp->b_fspriv; ASSERT(bip != NULL); ASSERT(atomic_read(&bip->bli_refcount) > 0); bip->bli_recur++; @@ -233,7 +233,7 @@ xfs_trans_getsb(xfs_trans_t *tp, */ bp = mp->m_sb_bp; if (bp->b_transp == tp) { - bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t*); + bip = bp->b_fspriv; ASSERT(bip != NULL); ASSERT(atomic_read(&bip->bli_refcount) > 0); bip->bli_recur++; @@ -329,7 +329,7 @@ xfs_trans_read_buf( if (bp != NULL) { ASSERT(xfs_buf_islocked(bp)); ASSERT(bp->b_transp == tp); - ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL); + ASSERT(bp->b_fspriv != NULL); ASSERT((XFS_BUF_ISERROR(bp)) == 0); if (!(XFS_BUF_ISDONE(bp))) { trace_xfs_trans_read_buf_io(bp, _RET_IP_); @@ -363,7 +363,7 @@ xfs_trans_read_buf( } - bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t*); + bip = bp->b_fspriv; bip->bli_recur++; ASSERT(atomic_read(&bip->bli_refcount) > 0); @@ -460,32 +460,30 @@ xfs_trans_brelse(xfs_trans_t *tp, xfs_buf_t *bp) { xfs_buf_log_item_t *bip; - xfs_log_item_t *lip; /* * Default to a normal brelse() call if the tp is NULL. */ if (tp == NULL) { + struct xfs_log_item *lip = bp->b_fspriv; + ASSERT(bp->b_transp == NULL); + /* * If there's a buf log item attached to the buffer, * then let the AIL know that the buffer is being * unlocked. */ - if (XFS_BUF_FSPRIVATE(bp, void *) != NULL) { - lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *); - if (lip->li_type == XFS_LI_BUF) { - bip = XFS_BUF_FSPRIVATE(bp,xfs_buf_log_item_t*); - xfs_trans_unlocked_item(bip->bli_item.li_ailp, - lip); - } + if (lip != NULL && lip->li_type == XFS_LI_BUF) { + bip = bp->b_fspriv; + xfs_trans_unlocked_item(bip->bli_item.li_ailp, lip); } xfs_buf_relse(bp); return; } ASSERT(bp->b_transp == tp); - bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *); + bip = bp->b_fspriv; ASSERT(bip->bli_item.li_type == XFS_LI_BUF); ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); ASSERT(!(bip->bli_format.blf_flags & XFS_BLF_CANCEL)); @@ -581,16 +579,15 @@ void xfs_trans_bhold(xfs_trans_t *tp, xfs_buf_t *bp) { - xfs_buf_log_item_t *bip; + xfs_buf_log_item_t *bip = bp->b_fspriv; ASSERT(XFS_BUF_ISBUSY(bp)); ASSERT(bp->b_transp == tp); - ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL); - - bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *); + ASSERT(bip != NULL); ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); ASSERT(!(bip->bli_format.blf_flags & XFS_BLF_CANCEL)); ASSERT(atomic_read(&bip->bli_refcount) > 0); + bip->bli_flags |= XFS_BLI_HOLD; trace_xfs_trans_bhold(bip); } @@ -603,19 +600,17 @@ void xfs_trans_bhold_release(xfs_trans_t *tp, xfs_buf_t *bp) { - xfs_buf_log_item_t *bip; + xfs_buf_log_item_t *bip = bp->b_fspriv; ASSERT(XFS_BUF_ISBUSY(bp)); ASSERT(bp->b_transp == tp); - ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL); - - bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *); + ASSERT(bip != NULL); ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); ASSERT(!(bip->bli_format.blf_flags & XFS_BLF_CANCEL)); ASSERT(atomic_read(&bip->bli_refcount) > 0); ASSERT(bip->bli_flags & XFS_BLI_HOLD); - bip->bli_flags &= ~XFS_BLI_HOLD; + bip->bli_flags &= ~XFS_BLI_HOLD; trace_xfs_trans_bhold_release(bip); } @@ -634,11 +629,11 @@ xfs_trans_log_buf(xfs_trans_t *tp, uint first, uint last) { - xfs_buf_log_item_t *bip; + xfs_buf_log_item_t *bip = bp->b_fspriv; ASSERT(XFS_BUF_ISBUSY(bp)); ASSERT(bp->b_transp == tp); - ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL); + ASSERT(bip != NULL); ASSERT((first <= last) && (last < XFS_BUF_COUNT(bp))); ASSERT((XFS_BUF_IODONE_FUNC(bp) == NULL) || (XFS_BUF_IODONE_FUNC(bp) == xfs_buf_iodone_callbacks)); @@ -656,7 +651,6 @@ xfs_trans_log_buf(xfs_trans_t *tp, XFS_BUF_DELAYWRITE(bp); XFS_BUF_DONE(bp); - bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *); ASSERT(atomic_read(&bip->bli_refcount) > 0); XFS_BUF_SET_IODONE_FUNC(bp, xfs_buf_iodone_callbacks); bip->bli_item.li_cb = xfs_buf_iodone; @@ -706,13 +700,11 @@ xfs_trans_binval( xfs_trans_t *tp, xfs_buf_t *bp) { - xfs_buf_log_item_t *bip; + xfs_buf_log_item_t *bip = bp->b_fspriv; ASSERT(XFS_BUF_ISBUSY(bp)); ASSERT(bp->b_transp == tp); - ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL); - - bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *); + ASSERT(bip != NULL); ASSERT(atomic_read(&bip->bli_refcount) > 0); trace_xfs_trans_binval(bip); @@ -780,13 +772,11 @@ xfs_trans_inode_buf( xfs_trans_t *tp, xfs_buf_t *bp) { - xfs_buf_log_item_t *bip; + xfs_buf_log_item_t *bip = bp->b_fspriv; ASSERT(XFS_BUF_ISBUSY(bp)); ASSERT(bp->b_transp == tp); - ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL); - - bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *); + ASSERT(bip != NULL); ASSERT(atomic_read(&bip->bli_refcount) > 0); bip->bli_flags |= XFS_BLI_INODE_BUF; @@ -806,13 +796,11 @@ xfs_trans_stale_inode_buf( xfs_trans_t *tp, xfs_buf_t *bp) { - xfs_buf_log_item_t *bip; + xfs_buf_log_item_t *bip = bp->b_fspriv; ASSERT(XFS_BUF_ISBUSY(bp)); ASSERT(bp->b_transp == tp); - ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL); - - bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *); + ASSERT(bip != NULL); ASSERT(atomic_read(&bip->bli_refcount) > 0); bip->bli_flags |= XFS_BLI_STALE_INODE; @@ -833,13 +821,11 @@ xfs_trans_inode_alloc_buf( xfs_trans_t *tp, xfs_buf_t *bp) { - xfs_buf_log_item_t *bip; + xfs_buf_log_item_t *bip = bp->b_fspriv; ASSERT(XFS_BUF_ISBUSY(bp)); ASSERT(bp->b_transp == tp); - ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL); - - bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *); + ASSERT(bip != NULL); ASSERT(atomic_read(&bip->bli_refcount) > 0); bip->bli_flags |= XFS_BLI_INODE_ALLOC_BUF; @@ -863,16 +849,14 @@ xfs_trans_dquot_buf( xfs_buf_t *bp, uint type) { - xfs_buf_log_item_t *bip; + xfs_buf_log_item_t *bip = bp->b_fspriv; ASSERT(XFS_BUF_ISBUSY(bp)); ASSERT(bp->b_transp == tp); - ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL); + ASSERT(bip != NULL); ASSERT(type == XFS_BLF_UDQUOT_BUF || type == XFS_BLF_PDQUOT_BUF || type == XFS_BLF_GDQUOT_BUF); - - bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *); ASSERT(atomic_read(&bip->bli_refcount) > 0); bip->bli_format.blf_flags |= type; -- cgit v1.2.3 From cb669ca5701153a808db6627521cc8aa52fc42d1 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 13 Jul 2011 13:43:49 +0200 Subject: xfs: remove wrappers around b_iodone Signed-off-by: Christoph Hellwig Reviewed-by: Alex Elder Reviewed-by: Dave Chinner --- fs/xfs/linux-2.6/xfs_buf.c | 2 +- fs/xfs/linux-2.6/xfs_buf.h | 4 ---- fs/xfs/xfs_buf_item.c | 16 ++++++++-------- fs/xfs/xfs_inode.c | 4 ++-- fs/xfs/xfs_log.c | 4 ++-- fs/xfs/xfs_log_recover.c | 8 ++++---- fs/xfs/xfs_trans_buf.c | 6 +++--- 7 files changed, 20 insertions(+), 24 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c index a0d97c5a3f15..888176ad5308 100644 --- a/fs/xfs/linux-2.6/xfs_buf.c +++ b/fs/xfs/linux-2.6/xfs_buf.c @@ -1107,7 +1107,7 @@ xfs_bioerror_relse( XFS_BUF_UNDELAYWRITE(bp); XFS_BUF_DONE(bp); XFS_BUF_STALE(bp); - XFS_BUF_CLR_IODONE_FUNC(bp); + bp->b_iodone = NULL; if (!(fl & XBF_ASYNC)) { /* * Mark b_error and B_ERROR _both_. diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/linux-2.6/xfs_buf.h index 033c152c411c..6a83b46b4bcf 100644 --- a/fs/xfs/linux-2.6/xfs_buf.h +++ b/fs/xfs/linux-2.6/xfs_buf.h @@ -276,10 +276,6 @@ void xfs_buf_stale(struct xfs_buf *bp); #define XFS_BUF_UNWRITE(bp) ((bp)->b_flags &= ~XBF_WRITE) #define XFS_BUF_ISWRITE(bp) ((bp)->b_flags & XBF_WRITE) -#define XFS_BUF_IODONE_FUNC(bp) ((bp)->b_iodone) -#define XFS_BUF_SET_IODONE_FUNC(bp, func) ((bp)->b_iodone = (func)) -#define XFS_BUF_CLR_IODONE_FUNC(bp) ((bp)->b_iodone = NULL) - #define XFS_BUF_SET_START(bp) do { } while (0) #define XFS_BUF_PTR(bp) (xfs_caddr_t)((bp)->b_addr) diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c index 9eacf06932bb..88492916c3dc 100644 --- a/fs/xfs/xfs_buf_item.c +++ b/fs/xfs/xfs_buf_item.c @@ -453,7 +453,7 @@ xfs_buf_item_unpin( if (bip->bli_flags & XFS_BLI_STALE_INODE) { xfs_buf_do_callbacks(bp); bp->b_fspriv = NULL; - XFS_BUF_CLR_IODONE_FUNC(bp); + bp->b_iodone = NULL; } else { spin_lock(&ailp->xa_lock); xfs_trans_ail_delete(ailp, (xfs_log_item_t *)bip); @@ -870,8 +870,8 @@ xfs_buf_item_relse( bip = bp->b_fspriv; bp->b_fspriv = bip->bli_item.li_bio_list; - if (bp->b_fspriv == NULL && XFS_BUF_IODONE_FUNC(bp) != NULL) - XFS_BUF_CLR_IODONE_FUNC(bp); + if (bp->b_fspriv == NULL) + bp->b_iodone = NULL; xfs_buf_rele(bp); xfs_buf_item_free(bip); @@ -907,9 +907,9 @@ xfs_buf_attach_iodone( bp->b_fspriv = lip; } - ASSERT((XFS_BUF_IODONE_FUNC(bp) == xfs_buf_iodone_callbacks) || - (XFS_BUF_IODONE_FUNC(bp) == NULL)); - XFS_BUF_SET_IODONE_FUNC(bp, xfs_buf_iodone_callbacks); + ASSERT(bp->b_iodone == NULL || + bp->b_iodone == xfs_buf_iodone_callbacks); + bp->b_iodone = xfs_buf_iodone_callbacks; } /* @@ -998,7 +998,7 @@ xfs_buf_iodone_callbacks( XFS_BUF_DONE(bp); XFS_BUF_SET_START(bp); } - ASSERT(XFS_BUF_IODONE_FUNC(bp)); + ASSERT(bp->b_iodone != NULL); trace_xfs_buf_item_iodone_async(bp, _RET_IP_); xfs_buf_relse(bp); return; @@ -1018,7 +1018,7 @@ xfs_buf_iodone_callbacks( do_callbacks: xfs_buf_do_callbacks(bp); bp->b_fspriv = NULL; - XFS_BUF_CLR_IODONE_FUNC(bp); + bp->b_iodone = NULL; xfs_buf_ioend(bp, 0); } diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index 34c6872b5f7b..1314c83ded1c 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -2471,7 +2471,7 @@ cluster_corrupt_out: * mark the buffer as an error and call them. Otherwise * mark it as stale and brelse. */ - if (XFS_BUF_IODONE_FUNC(bp)) { + if (bp->b_iodone) { XFS_BUF_UNDONE(bp); XFS_BUF_STALE(bp); XFS_BUF_ERROR(bp,EIO); @@ -2812,7 +2812,7 @@ xfs_iflush_int( xfs_buf_attach_iodone(bp, xfs_iflush_done, &iip->ili_item); ASSERT(bp->b_fspriv != NULL); - ASSERT(XFS_BUF_IODONE_FUNC(bp) != NULL); + ASSERT(bp->b_iodone != NULL); } else { /* * We're flushing an inode which is not in the AIL and has diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index a8718ed9776d..06ff8437ed8e 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c @@ -1050,7 +1050,7 @@ xlog_alloc_log(xfs_mount_t *mp, bp = xfs_buf_get_empty(log->l_iclog_size, mp->m_logdev_targp); if (!bp) goto out_free_log; - XFS_BUF_SET_IODONE_FUNC(bp, xlog_iodone); + bp->b_iodone = xlog_iodone; ASSERT(XFS_BUF_ISBUSY(bp)); ASSERT(xfs_buf_islocked(bp)); log->l_xbuf = bp; @@ -1084,7 +1084,7 @@ xlog_alloc_log(xfs_mount_t *mp, if (!bp) goto out_free_iclog; - XFS_BUF_SET_IODONE_FUNC(bp, xlog_iodone); + bp->b_iodone = xlog_iodone; iclog->ic_bp = bp; iclog->ic_data = bp->b_addr; #ifdef DEBUG diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index 40c9742aa478..8fe4206de057 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -371,7 +371,7 @@ xlog_recover_iodone( xfs_force_shutdown(bp->b_target->bt_mount, SHUTDOWN_META_IO_ERROR); } - XFS_BUF_CLR_IODONE_FUNC(bp); + bp->b_iodone = NULL; xfs_buf_ioend(bp, 0); } @@ -2178,7 +2178,7 @@ xlog_recover_buffer_pass2( error = xfs_bwrite(mp, bp); } else { ASSERT(bp->b_target->bt_mount == mp); - XFS_BUF_SET_IODONE_FUNC(bp, xlog_recover_iodone); + bp->b_iodone = xlog_recover_iodone; xfs_bdwrite(mp, bp); } @@ -2438,7 +2438,7 @@ xlog_recover_inode_pass2( write_inode_buffer: ASSERT(bp->b_target->bt_mount == mp); - XFS_BUF_SET_IODONE_FUNC(bp, xlog_recover_iodone); + bp->b_iodone = xlog_recover_iodone; xfs_bdwrite(mp, bp); error: if (need_free) @@ -2560,7 +2560,7 @@ xlog_recover_dquot_pass2( ASSERT(dq_f->qlf_size == 2); ASSERT(bp->b_target->bt_mount == mp); - XFS_BUF_SET_IODONE_FUNC(bp, xlog_recover_iodone); + bp->b_iodone = xlog_recover_iodone; xfs_bdwrite(mp, bp); return (0); diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c index f910e51444a3..15584fc3ed7d 100644 --- a/fs/xfs/xfs_trans_buf.c +++ b/fs/xfs/xfs_trans_buf.c @@ -635,8 +635,8 @@ xfs_trans_log_buf(xfs_trans_t *tp, ASSERT(bp->b_transp == tp); ASSERT(bip != NULL); ASSERT((first <= last) && (last < XFS_BUF_COUNT(bp))); - ASSERT((XFS_BUF_IODONE_FUNC(bp) == NULL) || - (XFS_BUF_IODONE_FUNC(bp) == xfs_buf_iodone_callbacks)); + ASSERT(bp->b_iodone == NULL || + bp->b_iodone == xfs_buf_iodone_callbacks); /* * Mark the buffer as needing to be written out eventually, @@ -652,7 +652,7 @@ xfs_trans_log_buf(xfs_trans_t *tp, XFS_BUF_DONE(bp); ASSERT(atomic_read(&bip->bli_refcount) > 0); - XFS_BUF_SET_IODONE_FUNC(bp, xfs_buf_iodone_callbacks); + bp->b_iodone = xfs_buf_iodone_callbacks; bip->bli_item.li_cb = xfs_buf_iodone; trace_xfs_trans_log_buf(bip); -- cgit v1.2.3 From 54244fec67024032cb54eb604b2a36579b5db2d3 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 13 Jul 2011 13:43:49 +0200 Subject: xfs: remove the unused xfs_buf_delwri_sort function Signed-off-by: Christoph Hellwig Reviewed-by: Alex Elder Reviewed-by: Dave Chinner --- fs/xfs/linux-2.6/xfs_buf.c | 8 -------- 1 file changed, 8 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c index 888176ad5308..b2b411985591 100644 --- a/fs/xfs/linux-2.6/xfs_buf.c +++ b/fs/xfs/linux-2.6/xfs_buf.c @@ -1721,14 +1721,6 @@ xfs_buf_cmp( return 0; } -void -xfs_buf_delwri_sort( - xfs_buftarg_t *target, - struct list_head *list) -{ - list_sort(NULL, list, xfs_buf_cmp); -} - STATIC int xfsbufd( void *data) -- cgit v1.2.3 From ea15ab3cdda1bc6822f572ce9ce5d86f097f9b08 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 13 Jul 2011 13:43:50 +0200 Subject: xfs: remove the dead QUOTADEBUG code Remove the dead hash table test rid which has been rotting away under QUOTADEBUG, including some code that was compiled for normal debug builds, but not actually called without QUOTADEBUG, and enable a few cheap debug checks that were hidden under QUOTADEBUG for normal debug builds. Signed-off-by: Christoph Hellwig Reviewed-by: Alex Elder Reviewed-by: Dave Chinner --- fs/xfs/quota/xfs_dquot.c | 42 +---- fs/xfs/quota/xfs_dquot.h | 6 - fs/xfs/quota/xfs_qm.c | 49 +----- fs/xfs/quota/xfs_qm.h | 6 - fs/xfs/quota/xfs_qm_syscalls.c | 353 ----------------------------------------- fs/xfs/quota/xfs_trans_dquot.c | 13 +- fs/xfs/xfs.h | 1 - fs/xfs/xfs_mount.c | 4 - 8 files changed, 8 insertions(+), 466 deletions(-) (limited to 'fs') diff --git a/fs/xfs/quota/xfs_dquot.c b/fs/xfs/quota/xfs_dquot.c index dec11497a2ef..837f31158d43 100644 --- a/fs/xfs/quota/xfs_dquot.c +++ b/fs/xfs/quota/xfs_dquot.c @@ -220,7 +220,7 @@ xfs_qm_adjust_dqtimers( { ASSERT(d->d_id); -#ifdef QUOTADEBUG +#ifdef DEBUG if (d->d_blk_hardlimit) ASSERT(be64_to_cpu(d->d_blk_softlimit) <= be64_to_cpu(d->d_blk_hardlimit)); @@ -231,6 +231,7 @@ xfs_qm_adjust_dqtimers( ASSERT(be64_to_cpu(d->d_rtb_softlimit) <= be64_to_cpu(d->d_rtb_hardlimit)); #endif + if (!d->d_btimer) { if ((d->d_blk_softlimit && (be64_to_cpu(d->d_bcount) >= @@ -1423,45 +1424,6 @@ xfs_qm_dqpurge( } -#ifdef QUOTADEBUG -void -xfs_qm_dqprint(xfs_dquot_t *dqp) -{ - struct xfs_mount *mp = dqp->q_mount; - - xfs_debug(mp, "-----------KERNEL DQUOT----------------"); - xfs_debug(mp, "---- dquotID = %d", - (int)be32_to_cpu(dqp->q_core.d_id)); - xfs_debug(mp, "---- type = %s", DQFLAGTO_TYPESTR(dqp)); - xfs_debug(mp, "---- fs = 0x%p", dqp->q_mount); - xfs_debug(mp, "---- blkno = 0x%x", (int) dqp->q_blkno); - xfs_debug(mp, "---- boffset = 0x%x", (int) dqp->q_bufoffset); - xfs_debug(mp, "---- blkhlimit = %Lu (0x%x)", - be64_to_cpu(dqp->q_core.d_blk_hardlimit), - (int)be64_to_cpu(dqp->q_core.d_blk_hardlimit)); - xfs_debug(mp, "---- blkslimit = %Lu (0x%x)", - be64_to_cpu(dqp->q_core.d_blk_softlimit), - (int)be64_to_cpu(dqp->q_core.d_blk_softlimit)); - xfs_debug(mp, "---- inohlimit = %Lu (0x%x)", - be64_to_cpu(dqp->q_core.d_ino_hardlimit), - (int)be64_to_cpu(dqp->q_core.d_ino_hardlimit)); - xfs_debug(mp, "---- inoslimit = %Lu (0x%x)", - be64_to_cpu(dqp->q_core.d_ino_softlimit), - (int)be64_to_cpu(dqp->q_core.d_ino_softlimit)); - xfs_debug(mp, "---- bcount = %Lu (0x%x)", - be64_to_cpu(dqp->q_core.d_bcount), - (int)be64_to_cpu(dqp->q_core.d_bcount)); - xfs_debug(mp, "---- icount = %Lu (0x%x)", - be64_to_cpu(dqp->q_core.d_icount), - (int)be64_to_cpu(dqp->q_core.d_icount)); - xfs_debug(mp, "---- btimer = %d", - (int)be32_to_cpu(dqp->q_core.d_btimer)); - xfs_debug(mp, "---- itimer = %d", - (int)be32_to_cpu(dqp->q_core.d_itimer)); - xfs_debug(mp, "---------------------------"); -} -#endif - /* * Give the buffer a little push if it is incore and * wait on the flush lock. diff --git a/fs/xfs/quota/xfs_dquot.h b/fs/xfs/quota/xfs_dquot.h index 5da3a23b820d..34b7e945dbfa 100644 --- a/fs/xfs/quota/xfs_dquot.h +++ b/fs/xfs/quota/xfs_dquot.h @@ -116,12 +116,6 @@ static inline void xfs_dqfunlock(xfs_dquot_t *dqp) (XFS_IS_UQUOTA_ON((d)->q_mount)) : \ (XFS_IS_OQUOTA_ON((d)->q_mount)))) -#ifdef QUOTADEBUG -extern void xfs_qm_dqprint(xfs_dquot_t *); -#else -#define xfs_qm_dqprint(a) -#endif - extern void xfs_qm_dqdestroy(xfs_dquot_t *); extern int xfs_qm_dqflush(xfs_dquot_t *, uint); extern int xfs_qm_dqpurge(xfs_dquot_t *); diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/quota/xfs_qm.c index b94dace4e785..46e54ad9a2dc 100644 --- a/fs/xfs/quota/xfs_qm.c +++ b/fs/xfs/quota/xfs_qm.c @@ -67,32 +67,6 @@ static struct shrinker xfs_qm_shaker = { .seeks = DEFAULT_SEEKS, }; -#ifdef DEBUG -extern struct mutex qcheck_lock; -#endif - -#ifdef QUOTADEBUG -static void -xfs_qm_dquot_list_print( - struct xfs_mount *mp) -{ - xfs_dquot_t *dqp; - int i = 0; - - list_for_each_entry(dqp, &mp->m_quotainfo->qi_dqlist_lock, qi_mplist) { - xfs_debug(mp, " %d. \"%d (%s)\" " - "bcnt = %lld, icnt = %lld, refs = %d", - i++, be32_to_cpu(dqp->q_core.d_id), - DQFLAGTO_TYPESTR(dqp), - (long long)be64_to_cpu(dqp->q_core.d_bcount), - (long long)be64_to_cpu(dqp->q_core.d_icount), - dqp->q_nrefs); - } -} -#else -static void xfs_qm_dquot_list_print(struct xfs_mount *mp) { } -#endif - /* * Initialize the XQM structure. * Note that there is not one quota manager per file system. @@ -165,9 +139,6 @@ xfs_Gqm_init(void) atomic_set(&xqm->qm_totaldquots, 0); xqm->qm_dqfree_ratio = XFS_QM_DQFREE_RATIO; xqm->qm_nrefs = 0; -#ifdef DEBUG - mutex_init(&qcheck_lock); -#endif return xqm; out_free_udqhash: @@ -204,9 +175,6 @@ xfs_qm_destroy( mutex_lock(&xqm->qm_dqfrlist_lock); list_for_each_entry_safe(dqp, n, &xqm->qm_dqfrlist, q_freelist) { xfs_dqlock(dqp); -#ifdef QUOTADEBUG - xfs_debug(dqp->q_mount, "FREELIST destroy 0x%p", dqp); -#endif list_del_init(&dqp->q_freelist); xfs_Gqm->qm_dqfrlist_cnt--; xfs_dqunlock(dqp); @@ -214,9 +182,6 @@ xfs_qm_destroy( } mutex_unlock(&xqm->qm_dqfrlist_lock); mutex_destroy(&xqm->qm_dqfrlist_lock); -#ifdef DEBUG - mutex_destroy(&qcheck_lock); -#endif kmem_free(xqm); } @@ -409,11 +374,6 @@ xfs_qm_mount_quotas( xfs_warn(mp, "Failed to initialize disk quotas."); return; } - -#ifdef QUOTADEBUG - if (XFS_IS_QUOTA_ON(mp)) - xfs_qm_internalqcheck(mp); -#endif } /* @@ -866,8 +826,8 @@ xfs_qm_dqattach_locked( } done: -#ifdef QUOTADEBUG - if (! error) { +#ifdef DEBUG + if (!error) { if (XFS_IS_UQUOTA_ON(mp)) ASSERT(ip->i_udquot); if (XFS_IS_OQUOTA_ON(mp)) @@ -1733,8 +1693,6 @@ xfs_qm_quotacheck( mp->m_qflags &= ~(XFS_OQUOTA_CHKD | XFS_UQUOTA_CHKD); mp->m_qflags |= flags; - xfs_qm_dquot_list_print(mp); - error_return: if (error) { xfs_warn(mp, @@ -2096,9 +2054,6 @@ xfs_qm_write_sb_changes( xfs_trans_t *tp; int error; -#ifdef QUOTADEBUG - xfs_notice(mp, "Writing superblock quota changes"); -#endif tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SBCHANGE); if ((error = xfs_trans_reserve(tp, 0, mp->m_sb.sb_sectsize + 128, 0, diff --git a/fs/xfs/quota/xfs_qm.h b/fs/xfs/quota/xfs_qm.h index 567b29b9f1b3..43b9abe1052c 100644 --- a/fs/xfs/quota/xfs_qm.h +++ b/fs/xfs/quota/xfs_qm.h @@ -163,10 +163,4 @@ extern int xfs_qm_scall_getqstat(xfs_mount_t *, fs_quota_stat_t *); extern int xfs_qm_scall_quotaon(xfs_mount_t *, uint); extern int xfs_qm_scall_quotaoff(xfs_mount_t *, uint); -#ifdef DEBUG -extern int xfs_qm_internalqcheck(xfs_mount_t *); -#else -#define xfs_qm_internalqcheck(mp) (0) -#endif - #endif /* __XFS_QM_H__ */ diff --git a/fs/xfs/quota/xfs_qm_syscalls.c b/fs/xfs/quota/xfs_qm_syscalls.c index f2dfc74ccf34..609246f42e6c 100644 --- a/fs/xfs/quota/xfs_qm_syscalls.c +++ b/fs/xfs/quota/xfs_qm_syscalls.c @@ -622,7 +622,6 @@ xfs_qm_scall_setqlim( xfs_trans_log_dquot(tp, dqp); error = xfs_trans_commit(tp, 0); - xfs_qm_dqprint(dqp); xfs_qm_dqrele(dqp); out_unlock: @@ -657,7 +656,6 @@ xfs_qm_scall_getquota( xfs_qm_dqput(dqp); return XFS_ERROR(ENOENT); } - /* xfs_qm_dqprint(dqp); */ /* * Convert the disk dquot to the exportable format */ @@ -906,354 +904,3 @@ xfs_qm_dqrele_all_inodes( ASSERT(mp->m_quotainfo); xfs_inode_ag_iterator(mp, xfs_dqrele_inode, flags); } - -/*------------------------------------------------------------------------*/ -#ifdef DEBUG -/* - * This contains all the test functions for XFS disk quotas. - * Currently it does a quota accounting check. ie. it walks through - * all inodes in the file system, calculating the dquot accounting fields, - * and prints out any inconsistencies. - */ -xfs_dqhash_t *qmtest_udqtab; -xfs_dqhash_t *qmtest_gdqtab; -int qmtest_hashmask; -int qmtest_nfails; -struct mutex qcheck_lock; - -#define DQTEST_HASHVAL(mp, id) (((__psunsigned_t)(mp) + \ - (__psunsigned_t)(id)) & \ - (qmtest_hashmask - 1)) - -#define DQTEST_HASH(mp, id, type) ((type & XFS_DQ_USER) ? \ - (qmtest_udqtab + \ - DQTEST_HASHVAL(mp, id)) : \ - (qmtest_gdqtab + \ - DQTEST_HASHVAL(mp, id))) - -#define DQTEST_LIST_PRINT(l, NXT, title) \ -{ \ - xfs_dqtest_t *dqp; int i = 0;\ - xfs_debug(NULL, "%s (#%d)", title, (int) (l)->qh_nelems); \ - for (dqp = (xfs_dqtest_t *)(l)->qh_next; dqp != NULL; \ - dqp = (xfs_dqtest_t *)dqp->NXT) { \ - xfs_debug(dqp->q_mount, \ - " %d. \"%d (%s)\" bcnt = %d, icnt = %d", \ - ++i, dqp->d_id, DQFLAGTO_TYPESTR(dqp), \ - dqp->d_bcount, dqp->d_icount); } \ -} - -typedef struct dqtest { - uint dq_flags; /* various flags (XFS_DQ_*) */ - struct list_head q_hashlist; - xfs_dqhash_t *q_hash; /* the hashchain header */ - xfs_mount_t *q_mount; /* filesystem this relates to */ - xfs_dqid_t d_id; /* user id or group id */ - xfs_qcnt_t d_bcount; /* # disk blocks owned by the user */ - xfs_qcnt_t d_icount; /* # inodes owned by the user */ -} xfs_dqtest_t; - -STATIC void -xfs_qm_hashinsert(xfs_dqhash_t *h, xfs_dqtest_t *dqp) -{ - list_add(&dqp->q_hashlist, &h->qh_list); - h->qh_version++; - h->qh_nelems++; -} -STATIC void -xfs_qm_dqtest_print( - struct xfs_mount *mp, - struct dqtest *d) -{ - xfs_debug(mp, "-----------DQTEST DQUOT----------------"); - xfs_debug(mp, "---- dquot ID = %d", d->d_id); - xfs_debug(mp, "---- fs = 0x%p", d->q_mount); - xfs_debug(mp, "---- bcount = %Lu (0x%x)", - d->d_bcount, (int)d->d_bcount); - xfs_debug(mp, "---- icount = %Lu (0x%x)", - d->d_icount, (int)d->d_icount); - xfs_debug(mp, "---------------------------"); -} - -STATIC void -xfs_qm_dqtest_failed( - xfs_dqtest_t *d, - xfs_dquot_t *dqp, - char *reason, - xfs_qcnt_t a, - xfs_qcnt_t b, - int error) -{ - qmtest_nfails++; - if (error) - xfs_debug(dqp->q_mount, - "quotacheck failed id=%d, err=%d\nreason: %s", - d->d_id, error, reason); - else - xfs_debug(dqp->q_mount, - "quotacheck failed id=%d (%s) [%d != %d]", - d->d_id, reason, (int)a, (int)b); - xfs_qm_dqtest_print(dqp->q_mount, d); - if (dqp) - xfs_qm_dqprint(dqp); -} - -STATIC int -xfs_dqtest_cmp2( - xfs_dqtest_t *d, - xfs_dquot_t *dqp) -{ - int err = 0; - if (be64_to_cpu(dqp->q_core.d_icount) != d->d_icount) { - xfs_qm_dqtest_failed(d, dqp, "icount mismatch", - be64_to_cpu(dqp->q_core.d_icount), - d->d_icount, 0); - err++; - } - if (be64_to_cpu(dqp->q_core.d_bcount) != d->d_bcount) { - xfs_qm_dqtest_failed(d, dqp, "bcount mismatch", - be64_to_cpu(dqp->q_core.d_bcount), - d->d_bcount, 0); - err++; - } - if (dqp->q_core.d_blk_softlimit && - be64_to_cpu(dqp->q_core.d_bcount) >= - be64_to_cpu(dqp->q_core.d_blk_softlimit)) { - if (!dqp->q_core.d_btimer && dqp->q_core.d_id) { - xfs_debug(dqp->q_mount, - "%d [%s] BLK TIMER NOT STARTED", - d->d_id, DQFLAGTO_TYPESTR(d)); - err++; - } - } - if (dqp->q_core.d_ino_softlimit && - be64_to_cpu(dqp->q_core.d_icount) >= - be64_to_cpu(dqp->q_core.d_ino_softlimit)) { - if (!dqp->q_core.d_itimer && dqp->q_core.d_id) { - xfs_debug(dqp->q_mount, - "%d [%s] INO TIMER NOT STARTED", - d->d_id, DQFLAGTO_TYPESTR(d)); - err++; - } - } -#ifdef QUOTADEBUG - if (!err) { - xfs_debug(dqp->q_mount, "%d [%s] qchecked", - d->d_id, DQFLAGTO_TYPESTR(d)); - } -#endif - return (err); -} - -STATIC void -xfs_dqtest_cmp( - xfs_dqtest_t *d) -{ - xfs_dquot_t *dqp; - int error; - - /* xfs_qm_dqtest_print(d); */ - if ((error = xfs_qm_dqget(d->q_mount, NULL, d->d_id, d->dq_flags, 0, - &dqp))) { - xfs_qm_dqtest_failed(d, NULL, "dqget failed", 0, 0, error); - return; - } - xfs_dqtest_cmp2(d, dqp); - xfs_qm_dqput(dqp); -} - -STATIC int -xfs_qm_internalqcheck_dqget( - xfs_mount_t *mp, - xfs_dqid_t id, - uint type, - xfs_dqtest_t **O_dq) -{ - xfs_dqtest_t *d; - xfs_dqhash_t *h; - - h = DQTEST_HASH(mp, id, type); - list_for_each_entry(d, &h->qh_list, q_hashlist) { - if (d->d_id == id && mp == d->q_mount) { - *O_dq = d; - return (0); - } - } - d = kmem_zalloc(sizeof(xfs_dqtest_t), KM_SLEEP); - d->dq_flags = type; - d->d_id = id; - d->q_mount = mp; - d->q_hash = h; - INIT_LIST_HEAD(&d->q_hashlist); - xfs_qm_hashinsert(h, d); - *O_dq = d; - return (0); -} - -STATIC void -xfs_qm_internalqcheck_get_dquots( - xfs_mount_t *mp, - xfs_dqid_t uid, - xfs_dqid_t projid, - xfs_dqid_t gid, - xfs_dqtest_t **ud, - xfs_dqtest_t **gd) -{ - if (XFS_IS_UQUOTA_ON(mp)) - xfs_qm_internalqcheck_dqget(mp, uid, XFS_DQ_USER, ud); - if (XFS_IS_GQUOTA_ON(mp)) - xfs_qm_internalqcheck_dqget(mp, gid, XFS_DQ_GROUP, gd); - else if (XFS_IS_PQUOTA_ON(mp)) - xfs_qm_internalqcheck_dqget(mp, projid, XFS_DQ_PROJ, gd); -} - - -STATIC void -xfs_qm_internalqcheck_dqadjust( - xfs_inode_t *ip, - xfs_dqtest_t *d) -{ - d->d_icount++; - d->d_bcount += (xfs_qcnt_t)ip->i_d.di_nblocks; -} - -STATIC int -xfs_qm_internalqcheck_adjust( - xfs_mount_t *mp, /* mount point for filesystem */ - xfs_ino_t ino, /* inode number to get data for */ - void __user *buffer, /* not used */ - int ubsize, /* not used */ - int *ubused, /* not used */ - int *res) /* bulkstat result code */ -{ - xfs_inode_t *ip; - xfs_dqtest_t *ud, *gd; - uint lock_flags; - boolean_t ipreleased; - int error; - - ASSERT(XFS_IS_QUOTA_RUNNING(mp)); - - if (ino == mp->m_sb.sb_uquotino || ino == mp->m_sb.sb_gquotino) { - *res = BULKSTAT_RV_NOTHING; - xfs_debug(mp, "%s: ino=%llu, uqino=%llu, gqino=%llu\n", - __func__, (unsigned long long) ino, - (unsigned long long) mp->m_sb.sb_uquotino, - (unsigned long long) mp->m_sb.sb_gquotino); - return XFS_ERROR(EINVAL); - } - ipreleased = B_FALSE; - again: - lock_flags = XFS_ILOCK_SHARED; - if ((error = xfs_iget(mp, NULL, ino, 0, lock_flags, &ip))) { - *res = BULKSTAT_RV_NOTHING; - return (error); - } - - /* - * This inode can have blocks after eof which can get released - * when we send it to inactive. Since we don't check the dquot - * until the after all our calculations are done, we must get rid - * of those now. - */ - if (! ipreleased) { - xfs_iunlock(ip, lock_flags); - IRELE(ip); - ipreleased = B_TRUE; - goto again; - } - xfs_qm_internalqcheck_get_dquots(mp, - (xfs_dqid_t) ip->i_d.di_uid, - (xfs_dqid_t) xfs_get_projid(ip), - (xfs_dqid_t) ip->i_d.di_gid, - &ud, &gd); - if (XFS_IS_UQUOTA_ON(mp)) { - ASSERT(ud); - xfs_qm_internalqcheck_dqadjust(ip, ud); - } - if (XFS_IS_OQUOTA_ON(mp)) { - ASSERT(gd); - xfs_qm_internalqcheck_dqadjust(ip, gd); - } - xfs_iunlock(ip, lock_flags); - IRELE(ip); - *res = BULKSTAT_RV_DIDONE; - return (0); -} - - -/* PRIVATE, debugging */ -int -xfs_qm_internalqcheck( - xfs_mount_t *mp) -{ - xfs_ino_t lastino; - int done, count; - int i; - int error; - - lastino = 0; - qmtest_hashmask = 32; - count = 5; - done = 0; - qmtest_nfails = 0; - - if (! XFS_IS_QUOTA_ON(mp)) - return XFS_ERROR(ESRCH); - - xfs_log_force(mp, XFS_LOG_SYNC); - XFS_bflush(mp->m_ddev_targp); - xfs_log_force(mp, XFS_LOG_SYNC); - XFS_bflush(mp->m_ddev_targp); - - mutex_lock(&qcheck_lock); - /* There should be absolutely no quota activity while this - is going on. */ - qmtest_udqtab = kmem_zalloc(qmtest_hashmask * - sizeof(xfs_dqhash_t), KM_SLEEP); - qmtest_gdqtab = kmem_zalloc(qmtest_hashmask * - sizeof(xfs_dqhash_t), KM_SLEEP); - do { - /* - * Iterate thru all the inodes in the file system, - * adjusting the corresponding dquot counters - */ - error = xfs_bulkstat(mp, &lastino, &count, - xfs_qm_internalqcheck_adjust, - 0, NULL, &done); - if (error) { - xfs_debug(mp, "Bulkstat returned error 0x%x", error); - break; - } - } while (!done); - - xfs_debug(mp, "Checking results against system dquots"); - for (i = 0; i < qmtest_hashmask; i++) { - xfs_dqtest_t *d, *n; - xfs_dqhash_t *h; - - h = &qmtest_udqtab[i]; - list_for_each_entry_safe(d, n, &h->qh_list, q_hashlist) { - xfs_dqtest_cmp(d); - kmem_free(d); - } - h = &qmtest_gdqtab[i]; - list_for_each_entry_safe(d, n, &h->qh_list, q_hashlist) { - xfs_dqtest_cmp(d); - kmem_free(d); - } - } - - if (qmtest_nfails) { - xfs_debug(mp, "******** quotacheck failed ********"); - xfs_debug(mp, "failures = %d", qmtest_nfails); - } else { - xfs_debug(mp, "******** quotacheck successful! ********"); - } - kmem_free(qmtest_udqtab); - kmem_free(qmtest_gdqtab); - mutex_unlock(&qcheck_lock); - return (qmtest_nfails); -} - -#endif /* DEBUG */ diff --git a/fs/xfs/quota/xfs_trans_dquot.c b/fs/xfs/quota/xfs_trans_dquot.c index 24084edb7803..4d00ee67792d 100644 --- a/fs/xfs/quota/xfs_trans_dquot.c +++ b/fs/xfs/quota/xfs_trans_dquot.c @@ -387,18 +387,18 @@ xfs_trans_apply_dquot_deltas( qtrx->qt_delbcnt_delta; totalrtbdelta = qtrx->qt_rtbcount_delta + qtrx->qt_delrtb_delta; -#ifdef QUOTADEBUG +#ifdef DEBUG if (totalbdelta < 0) ASSERT(be64_to_cpu(d->d_bcount) >= - (xfs_qcnt_t) -totalbdelta); + -totalbdelta); if (totalrtbdelta < 0) ASSERT(be64_to_cpu(d->d_rtbcount) >= - (xfs_qcnt_t) -totalrtbdelta); + -totalrtbdelta); if (qtrx->qt_icount_delta < 0) ASSERT(be64_to_cpu(d->d_icount) >= - (xfs_qcnt_t) -qtrx->qt_icount_delta); + -qtrx->qt_icount_delta); #endif if (totalbdelta) be64_add_cpu(&d->d_bcount, (xfs_qcnt_t)totalbdelta); @@ -642,11 +642,6 @@ xfs_trans_dqresv( ((XFS_IS_UQUOTA_ENFORCED(dqp->q_mount) && XFS_QM_ISUDQ(dqp)) || (XFS_IS_OQUOTA_ENFORCED(dqp->q_mount) && (XFS_QM_ISPDQ(dqp) || XFS_QM_ISGDQ(dqp))))) { -#ifdef QUOTADEBUG - xfs_debug(mp, - "BLK Res: nblks=%ld + resbcount=%Ld > hardlimit=%Ld?", - nblks, *resbcountp, hardlimit); -#endif if (nblks > 0) { /* * dquot is locked already. See if we'd go over the diff --git a/fs/xfs/xfs.h b/fs/xfs/xfs.h index 5ad8ad3a1dcd..53ec3ea9a625 100644 --- a/fs/xfs/xfs.h +++ b/fs/xfs/xfs.h @@ -22,7 +22,6 @@ #define STATIC #define DEBUG 1 #define XFS_BUF_LOCK_TRACKING 1 -/* #define QUOTADEBUG 1 */ #endif #include diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c index a19e92381f90..1fe643aa0efd 100644 --- a/fs/xfs/xfs_mount.c +++ b/fs/xfs/xfs_mount.c @@ -1089,10 +1089,6 @@ xfs_mount_reset_sbqflags( if (mp->m_flags & XFS_MOUNT_RDONLY) return 0; -#ifdef QUOTADEBUG - xfs_notice(mp, "Writing superblock quota changes"); -#endif - tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SBCHANGE); error = xfs_trans_reserve(tp, 0, mp->m_sb.sb_sectsize + 128, 0, 0, XFS_DEFAULT_LOG_COUNT); -- cgit v1.2.3 From c84470dda7a1165d90f55c2025c4c8ca403d485e Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 13 Jul 2011 13:43:50 +0200 Subject: xfs: remove leftovers of the old btree tracing code Remove various bits left over from the old kdb-only btree tracing code, but leave the actual trace point stubs in place to ease adding new event based btree tracing. Signed-off-by: Christoph Hellwig Reviewed-by: Alex Elder Reviewed-by: Dave Chinner --- fs/xfs/Makefile | 2 - fs/xfs/linux-2.6/xfs_super.c | 1 - fs/xfs/xfs_alloc_btree.c | 75 ------------- fs/xfs/xfs_bmap_btree.c | 98 ----------------- fs/xfs/xfs_btree.c | 1 - fs/xfs/xfs_btree.h | 38 +++---- fs/xfs/xfs_btree_trace.c | 249 ------------------------------------------- fs/xfs/xfs_btree_trace.h | 99 ----------------- fs/xfs/xfs_ialloc_btree.c | 75 ------------- fs/xfs/xfs_iget.c | 1 - fs/xfs/xfs_inode.c | 1 - 11 files changed, 19 insertions(+), 621 deletions(-) delete mode 100644 fs/xfs/xfs_btree_trace.c delete mode 100644 fs/xfs/xfs_btree_trace.h (limited to 'fs') diff --git a/fs/xfs/Makefile b/fs/xfs/Makefile index 284a7c89697e..75bb316529dd 100644 --- a/fs/xfs/Makefile +++ b/fs/xfs/Makefile @@ -88,8 +88,6 @@ xfs-y += xfs_alloc.o \ xfs_vnodeops.o \ xfs_rw.o -xfs-$(CONFIG_XFS_TRACE) += xfs_btree_trace.o - # Objects in linux/ xfs-y += $(addprefix $(XFS_LINUX)/, \ kmem.o \ diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c index 3ebb4588e1be..25fd2cd6c8b0 100644 --- a/fs/xfs/linux-2.6/xfs_super.c +++ b/fs/xfs/linux-2.6/xfs_super.c @@ -33,7 +33,6 @@ #include "xfs_dinode.h" #include "xfs_inode.h" #include "xfs_btree.h" -#include "xfs_btree_trace.h" #include "xfs_ialloc.h" #include "xfs_bmap.h" #include "xfs_rtalloc.h" diff --git a/fs/xfs/xfs_alloc_btree.c b/fs/xfs/xfs_alloc_btree.c index e277777a7cf9..ffb3386e45c1 100644 --- a/fs/xfs/xfs_alloc_btree.c +++ b/fs/xfs/xfs_alloc_btree.c @@ -31,7 +31,6 @@ #include "xfs_dinode.h" #include "xfs_inode.h" #include "xfs_btree.h" -#include "xfs_btree_trace.h" #include "xfs_alloc.h" #include "xfs_error.h" #include "xfs_trace.h" @@ -311,72 +310,6 @@ xfs_allocbt_recs_inorder( } #endif /* DEBUG */ -#ifdef XFS_BTREE_TRACE -ktrace_t *xfs_allocbt_trace_buf; - -STATIC void -xfs_allocbt_trace_enter( - struct xfs_btree_cur *cur, - const char *func, - char *s, - int type, - int line, - __psunsigned_t a0, - __psunsigned_t a1, - __psunsigned_t a2, - __psunsigned_t a3, - __psunsigned_t a4, - __psunsigned_t a5, - __psunsigned_t a6, - __psunsigned_t a7, - __psunsigned_t a8, - __psunsigned_t a9, - __psunsigned_t a10) -{ - ktrace_enter(xfs_allocbt_trace_buf, (void *)(__psint_t)type, - (void *)func, (void *)s, NULL, (void *)cur, - (void *)a0, (void *)a1, (void *)a2, (void *)a3, - (void *)a4, (void *)a5, (void *)a6, (void *)a7, - (void *)a8, (void *)a9, (void *)a10); -} - -STATIC void -xfs_allocbt_trace_cursor( - struct xfs_btree_cur *cur, - __uint32_t *s0, - __uint64_t *l0, - __uint64_t *l1) -{ - *s0 = cur->bc_private.a.agno; - *l0 = cur->bc_rec.a.ar_startblock; - *l1 = cur->bc_rec.a.ar_blockcount; -} - -STATIC void -xfs_allocbt_trace_key( - struct xfs_btree_cur *cur, - union xfs_btree_key *key, - __uint64_t *l0, - __uint64_t *l1) -{ - *l0 = be32_to_cpu(key->alloc.ar_startblock); - *l1 = be32_to_cpu(key->alloc.ar_blockcount); -} - -STATIC void -xfs_allocbt_trace_record( - struct xfs_btree_cur *cur, - union xfs_btree_rec *rec, - __uint64_t *l0, - __uint64_t *l1, - __uint64_t *l2) -{ - *l0 = be32_to_cpu(rec->alloc.ar_startblock); - *l1 = be32_to_cpu(rec->alloc.ar_blockcount); - *l2 = 0; -} -#endif /* XFS_BTREE_TRACE */ - static const struct xfs_btree_ops xfs_allocbt_ops = { .rec_len = sizeof(xfs_alloc_rec_t), .key_len = sizeof(xfs_alloc_key_t), @@ -393,18 +326,10 @@ static const struct xfs_btree_ops xfs_allocbt_ops = { .init_rec_from_cur = xfs_allocbt_init_rec_from_cur, .init_ptr_from_cur = xfs_allocbt_init_ptr_from_cur, .key_diff = xfs_allocbt_key_diff, - #ifdef DEBUG .keys_inorder = xfs_allocbt_keys_inorder, .recs_inorder = xfs_allocbt_recs_inorder, #endif - -#ifdef XFS_BTREE_TRACE - .trace_enter = xfs_allocbt_trace_enter, - .trace_cursor = xfs_allocbt_trace_cursor, - .trace_key = xfs_allocbt_trace_key, - .trace_record = xfs_allocbt_trace_record, -#endif }; /* diff --git a/fs/xfs/xfs_bmap_btree.c b/fs/xfs/xfs_bmap_btree.c index 50a28442a544..e2f5d59cbeaf 100644 --- a/fs/xfs/xfs_bmap_btree.c +++ b/fs/xfs/xfs_bmap_btree.c @@ -33,7 +33,6 @@ #include "xfs_inode_item.h" #include "xfs_alloc.h" #include "xfs_btree.h" -#include "xfs_btree_trace.h" #include "xfs_itable.h" #include "xfs_bmap.h" #include "xfs_error.h" @@ -732,95 +731,6 @@ xfs_bmbt_recs_inorder( } #endif /* DEBUG */ -#ifdef XFS_BTREE_TRACE -ktrace_t *xfs_bmbt_trace_buf; - -STATIC void -xfs_bmbt_trace_enter( - struct xfs_btree_cur *cur, - const char *func, - char *s, - int type, - int line, - __psunsigned_t a0, - __psunsigned_t a1, - __psunsigned_t a2, - __psunsigned_t a3, - __psunsigned_t a4, - __psunsigned_t a5, - __psunsigned_t a6, - __psunsigned_t a7, - __psunsigned_t a8, - __psunsigned_t a9, - __psunsigned_t a10) -{ - struct xfs_inode *ip = cur->bc_private.b.ip; - int whichfork = cur->bc_private.b.whichfork; - - ktrace_enter(xfs_bmbt_trace_buf, - (void *)((__psint_t)type | (whichfork << 8) | (line << 16)), - (void *)func, (void *)s, (void *)ip, (void *)cur, - (void *)a0, (void *)a1, (void *)a2, (void *)a3, - (void *)a4, (void *)a5, (void *)a6, (void *)a7, - (void *)a8, (void *)a9, (void *)a10); -} - -STATIC void -xfs_bmbt_trace_cursor( - struct xfs_btree_cur *cur, - __uint32_t *s0, - __uint64_t *l0, - __uint64_t *l1) -{ - struct xfs_bmbt_rec_host r; - - xfs_bmbt_set_all(&r, &cur->bc_rec.b); - - *s0 = (cur->bc_nlevels << 24) | - (cur->bc_private.b.flags << 16) | - cur->bc_private.b.allocated; - *l0 = r.l0; - *l1 = r.l1; -} - -STATIC void -xfs_bmbt_trace_key( - struct xfs_btree_cur *cur, - union xfs_btree_key *key, - __uint64_t *l0, - __uint64_t *l1) -{ - *l0 = be64_to_cpu(key->bmbt.br_startoff); - *l1 = 0; -} - -/* Endian flipping versions of the bmbt extraction functions */ -STATIC void -xfs_bmbt_disk_get_all( - xfs_bmbt_rec_t *r, - xfs_bmbt_irec_t *s) -{ - __xfs_bmbt_get_all(get_unaligned_be64(&r->l0), - get_unaligned_be64(&r->l1), s); -} - -STATIC void -xfs_bmbt_trace_record( - struct xfs_btree_cur *cur, - union xfs_btree_rec *rec, - __uint64_t *l0, - __uint64_t *l1, - __uint64_t *l2) -{ - struct xfs_bmbt_irec irec; - - xfs_bmbt_disk_get_all(&rec->bmbt, &irec); - *l0 = irec.br_startoff; - *l1 = irec.br_startblock; - *l2 = irec.br_blockcount; -} -#endif /* XFS_BTREE_TRACE */ - static const struct xfs_btree_ops xfs_bmbt_ops = { .rec_len = sizeof(xfs_bmbt_rec_t), .key_len = sizeof(xfs_bmbt_key_t), @@ -837,18 +747,10 @@ static const struct xfs_btree_ops xfs_bmbt_ops = { .init_rec_from_cur = xfs_bmbt_init_rec_from_cur, .init_ptr_from_cur = xfs_bmbt_init_ptr_from_cur, .key_diff = xfs_bmbt_key_diff, - #ifdef DEBUG .keys_inorder = xfs_bmbt_keys_inorder, .recs_inorder = xfs_bmbt_recs_inorder, #endif - -#ifdef XFS_BTREE_TRACE - .trace_enter = xfs_bmbt_trace_enter, - .trace_cursor = xfs_bmbt_trace_cursor, - .trace_key = xfs_bmbt_trace_key, - .trace_record = xfs_bmbt_trace_record, -#endif }; /* diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c index 43125af42629..cabf4b5604aa 100644 --- a/fs/xfs/xfs_btree.c +++ b/fs/xfs/xfs_btree.c @@ -32,7 +32,6 @@ #include "xfs_inode.h" #include "xfs_inode_item.h" #include "xfs_btree.h" -#include "xfs_btree_trace.h" #include "xfs_error.h" #include "xfs_trace.h" diff --git a/fs/xfs/xfs_btree.h b/fs/xfs/xfs_btree.h index 82fafc66bd1f..8d05a6a46ce3 100644 --- a/fs/xfs/xfs_btree.h +++ b/fs/xfs/xfs_btree.h @@ -199,25 +199,6 @@ struct xfs_btree_ops { union xfs_btree_rec *r1, union xfs_btree_rec *r2); #endif - - /* btree tracing */ -#ifdef XFS_BTREE_TRACE - void (*trace_enter)(struct xfs_btree_cur *, const char *, - char *, int, int, __psunsigned_t, - __psunsigned_t, __psunsigned_t, - __psunsigned_t, __psunsigned_t, - __psunsigned_t, __psunsigned_t, - __psunsigned_t, __psunsigned_t, - __psunsigned_t, __psunsigned_t); - void (*trace_cursor)(struct xfs_btree_cur *, __uint32_t *, - __uint64_t *, __uint64_t *); - void (*trace_key)(struct xfs_btree_cur *, - union xfs_btree_key *, __uint64_t *, - __uint64_t *); - void (*trace_record)(struct xfs_btree_cur *, - union xfs_btree_rec *, __uint64_t *, - __uint64_t *, __uint64_t *); -#endif }; /* @@ -452,4 +433,23 @@ static inline int xfs_btree_get_level(struct xfs_btree_block *block) (XFS_FSB_TO_AGNO(mp, fsb) < mp->m_sb.sb_agcount && \ XFS_FSB_TO_AGBNO(mp, fsb) < mp->m_sb.sb_agblocks) +/* + * Trace hooks. Currently not implemented as they need to be ported + * over to the generic tracing functionality, which is some effort. + * + * i,j = integer (32 bit) + * b = btree block buffer (xfs_buf_t) + * p = btree ptr + * r = btree record + * k = btree key + */ +#define XFS_BTREE_TRACE_ARGBI(c, b, i) +#define XFS_BTREE_TRACE_ARGBII(c, b, i, j) +#define XFS_BTREE_TRACE_ARGI(c, i) +#define XFS_BTREE_TRACE_ARGIPK(c, i, p, s) +#define XFS_BTREE_TRACE_ARGIPR(c, i, p, r) +#define XFS_BTREE_TRACE_ARGIK(c, i, k) +#define XFS_BTREE_TRACE_ARGR(c, r) +#define XFS_BTREE_TRACE_CURSOR(c, t) + #endif /* __XFS_BTREE_H__ */ diff --git a/fs/xfs/xfs_btree_trace.c b/fs/xfs/xfs_btree_trace.c deleted file mode 100644 index 44ff942a0fda..000000000000 --- a/fs/xfs/xfs_btree_trace.c +++ /dev/null @@ -1,249 +0,0 @@ -/* - * Copyright (c) 2008 Silicon Graphics, Inc. - * All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it would be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - */ -#include "xfs.h" -#include "xfs_types.h" -#include "xfs_inum.h" -#include "xfs_bmap_btree.h" -#include "xfs_alloc_btree.h" -#include "xfs_ialloc_btree.h" -#include "xfs_inode.h" -#include "xfs_btree.h" -#include "xfs_btree_trace.h" - -STATIC void -xfs_btree_trace_ptr( - struct xfs_btree_cur *cur, - union xfs_btree_ptr ptr, - __psunsigned_t *high, - __psunsigned_t *low) -{ - if (cur->bc_flags & XFS_BTREE_LONG_PTRS) { - __u64 val = be64_to_cpu(ptr.l); - *high = val >> 32; - *low = (int)val; - } else { - *high = 0; - *low = be32_to_cpu(ptr.s); - } -} - -/* - * Add a trace buffer entry for arguments, for a buffer & 1 integer arg. - */ -void -xfs_btree_trace_argbi( - const char *func, - struct xfs_btree_cur *cur, - struct xfs_buf *b, - int i, - int line) -{ - cur->bc_ops->trace_enter(cur, func, XBT_ARGS, XFS_BTREE_KTRACE_ARGBI, - line, (__psunsigned_t)b, i, 0, 0, 0, 0, 0, - 0, 0, 0, 0); -} - -/* - * Add a trace buffer entry for arguments, for a buffer & 2 integer args. - */ -void -xfs_btree_trace_argbii( - const char *func, - struct xfs_btree_cur *cur, - struct xfs_buf *b, - int i0, - int i1, - int line) -{ - cur->bc_ops->trace_enter(cur, func, XBT_ARGS, XFS_BTREE_KTRACE_ARGBII, - line, (__psunsigned_t)b, i0, i1, 0, 0, 0, 0, - 0, 0, 0, 0); -} - -/* - * Add a trace buffer entry for arguments, for 3 block-length args - * and an integer arg. - */ -void -xfs_btree_trace_argfffi( - const char *func, - struct xfs_btree_cur *cur, - xfs_dfiloff_t o, - xfs_dfsbno_t b, - xfs_dfilblks_t i, - int j, - int line) -{ - cur->bc_ops->trace_enter(cur, func, XBT_ARGS, XFS_BTREE_KTRACE_ARGFFFI, - line, - o >> 32, (int)o, - b >> 32, (int)b, - i >> 32, (int)i, - (int)j, 0, 0, 0, 0); -} - -/* - * Add a trace buffer entry for arguments, for one integer arg. - */ -void -xfs_btree_trace_argi( - const char *func, - struct xfs_btree_cur *cur, - int i, - int line) -{ - cur->bc_ops->trace_enter(cur, func, XBT_ARGS, XFS_BTREE_KTRACE_ARGI, - line, i, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); -} - -/* - * Add a trace buffer entry for arguments, for int, fsblock, key. - */ -void -xfs_btree_trace_argipk( - const char *func, - struct xfs_btree_cur *cur, - int i, - union xfs_btree_ptr ptr, - union xfs_btree_key *key, - int line) -{ - __psunsigned_t high, low; - __uint64_t l0, l1; - - xfs_btree_trace_ptr(cur, ptr, &high, &low); - cur->bc_ops->trace_key(cur, key, &l0, &l1); - cur->bc_ops->trace_enter(cur, func, XBT_ARGS, XFS_BTREE_KTRACE_ARGIPK, - line, i, high, low, - l0 >> 32, (int)l0, - l1 >> 32, (int)l1, - 0, 0, 0, 0); -} - -/* - * Add a trace buffer entry for arguments, for int, fsblock, rec. - */ -void -xfs_btree_trace_argipr( - const char *func, - struct xfs_btree_cur *cur, - int i, - union xfs_btree_ptr ptr, - union xfs_btree_rec *rec, - int line) -{ - __psunsigned_t high, low; - __uint64_t l0, l1, l2; - - xfs_btree_trace_ptr(cur, ptr, &high, &low); - cur->bc_ops->trace_record(cur, rec, &l0, &l1, &l2); - cur->bc_ops->trace_enter(cur, func, XBT_ARGS, XFS_BTREE_KTRACE_ARGIPR, - line, i, - high, low, - l0 >> 32, (int)l0, - l1 >> 32, (int)l1, - l2 >> 32, (int)l2, - 0, 0); -} - -/* - * Add a trace buffer entry for arguments, for int, key. - */ -void -xfs_btree_trace_argik( - const char *func, - struct xfs_btree_cur *cur, - int i, - union xfs_btree_key *key, - int line) -{ - __uint64_t l0, l1; - - cur->bc_ops->trace_key(cur, key, &l0, &l1); - cur->bc_ops->trace_enter(cur, func, XBT_ARGS, XFS_BTREE_KTRACE_ARGIK, - line, i, - l0 >> 32, (int)l0, - l1 >> 32, (int)l1, - 0, 0, 0, 0, 0, 0); -} - -/* - * Add a trace buffer entry for arguments, for record. - */ -void -xfs_btree_trace_argr( - const char *func, - struct xfs_btree_cur *cur, - union xfs_btree_rec *rec, - int line) -{ - __uint64_t l0, l1, l2; - - cur->bc_ops->trace_record(cur, rec, &l0, &l1, &l2); - cur->bc_ops->trace_enter(cur, func, XBT_ARGS, XFS_BTREE_KTRACE_ARGR, - line, - l0 >> 32, (int)l0, - l1 >> 32, (int)l1, - l2 >> 32, (int)l2, - 0, 0, 0, 0, 0); -} - -/* - * Add a trace buffer entry for the cursor/operation. - */ -void -xfs_btree_trace_cursor( - const char *func, - struct xfs_btree_cur *cur, - int type, - int line) -{ - __uint32_t s0; - __uint64_t l0, l1; - char *s; - - switch (type) { - case XBT_ARGS: - s = "args"; - break; - case XBT_ENTRY: - s = "entry"; - break; - case XBT_ERROR: - s = "error"; - break; - case XBT_EXIT: - s = "exit"; - break; - default: - s = "unknown"; - break; - } - - cur->bc_ops->trace_cursor(cur, &s0, &l0, &l1); - cur->bc_ops->trace_enter(cur, func, s, XFS_BTREE_KTRACE_CUR, line, - s0, - l0 >> 32, (int)l0, - l1 >> 32, (int)l1, - (__psunsigned_t)cur->bc_bufs[0], - (__psunsigned_t)cur->bc_bufs[1], - (__psunsigned_t)cur->bc_bufs[2], - (__psunsigned_t)cur->bc_bufs[3], - (cur->bc_ptrs[0] << 16) | cur->bc_ptrs[1], - (cur->bc_ptrs[2] << 16) | cur->bc_ptrs[3]); -} diff --git a/fs/xfs/xfs_btree_trace.h b/fs/xfs/xfs_btree_trace.h deleted file mode 100644 index 2d8a309873ea..000000000000 --- a/fs/xfs/xfs_btree_trace.h +++ /dev/null @@ -1,99 +0,0 @@ -/* - * Copyright (c) 2008 Silicon Graphics, Inc. - * All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it would be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - */ -#ifndef __XFS_BTREE_TRACE_H__ -#define __XFS_BTREE_TRACE_H__ - -struct xfs_btree_cur; -struct xfs_buf; - - -/* - * Trace hooks. - * i,j = integer (32 bit) - * b = btree block buffer (xfs_buf_t) - * p = btree ptr - * r = btree record - * k = btree key - */ - -#ifdef XFS_BTREE_TRACE - -/* - * Trace buffer entry types. - */ -#define XFS_BTREE_KTRACE_ARGBI 1 -#define XFS_BTREE_KTRACE_ARGBII 2 -#define XFS_BTREE_KTRACE_ARGFFFI 3 -#define XFS_BTREE_KTRACE_ARGI 4 -#define XFS_BTREE_KTRACE_ARGIPK 5 -#define XFS_BTREE_KTRACE_ARGIPR 6 -#define XFS_BTREE_KTRACE_ARGIK 7 -#define XFS_BTREE_KTRACE_ARGR 8 -#define XFS_BTREE_KTRACE_CUR 9 - -/* - * Sub-types for cursor traces. - */ -#define XBT_ARGS 0 -#define XBT_ENTRY 1 -#define XBT_ERROR 2 -#define XBT_EXIT 3 - -void xfs_btree_trace_argbi(const char *, struct xfs_btree_cur *, - struct xfs_buf *, int, int); -void xfs_btree_trace_argbii(const char *, struct xfs_btree_cur *, - struct xfs_buf *, int, int, int); -void xfs_btree_trace_argi(const char *, struct xfs_btree_cur *, int, int); -void xfs_btree_trace_argipk(const char *, struct xfs_btree_cur *, int, - union xfs_btree_ptr, union xfs_btree_key *, int); -void xfs_btree_trace_argipr(const char *, struct xfs_btree_cur *, int, - union xfs_btree_ptr, union xfs_btree_rec *, int); -void xfs_btree_trace_argik(const char *, struct xfs_btree_cur *, int, - union xfs_btree_key *, int); -void xfs_btree_trace_argr(const char *, struct xfs_btree_cur *, - union xfs_btree_rec *, int); -void xfs_btree_trace_cursor(const char *, struct xfs_btree_cur *, int, int); - -#define XFS_BTREE_TRACE_ARGBI(c, b, i) \ - xfs_btree_trace_argbi(__func__, c, b, i, __LINE__) -#define XFS_BTREE_TRACE_ARGBII(c, b, i, j) \ - xfs_btree_trace_argbii(__func__, c, b, i, j, __LINE__) -#define XFS_BTREE_TRACE_ARGI(c, i) \ - xfs_btree_trace_argi(__func__, c, i, __LINE__) -#define XFS_BTREE_TRACE_ARGIPK(c, i, p, k) \ - xfs_btree_trace_argipk(__func__, c, i, p, k, __LINE__) -#define XFS_BTREE_TRACE_ARGIPR(c, i, p, r) \ - xfs_btree_trace_argipr(__func__, c, i, p, r, __LINE__) -#define XFS_BTREE_TRACE_ARGIK(c, i, k) \ - xfs_btree_trace_argik(__func__, c, i, k, __LINE__) -#define XFS_BTREE_TRACE_ARGR(c, r) \ - xfs_btree_trace_argr(__func__, c, r, __LINE__) -#define XFS_BTREE_TRACE_CURSOR(c, t) \ - xfs_btree_trace_cursor(__func__, c, t, __LINE__) -#else -#define XFS_BTREE_TRACE_ARGBI(c, b, i) -#define XFS_BTREE_TRACE_ARGBII(c, b, i, j) -#define XFS_BTREE_TRACE_ARGI(c, i) -#define XFS_BTREE_TRACE_ARGIPK(c, i, p, s) -#define XFS_BTREE_TRACE_ARGIPR(c, i, p, r) -#define XFS_BTREE_TRACE_ARGIK(c, i, k) -#define XFS_BTREE_TRACE_ARGR(c, r) -#define XFS_BTREE_TRACE_CURSOR(c, t) -#endif /* XFS_BTREE_TRACE */ - -#endif /* __XFS_BTREE_TRACE_H__ */ diff --git a/fs/xfs/xfs_ialloc_btree.c b/fs/xfs/xfs_ialloc_btree.c index 16921f55c542..c6a75815aea0 100644 --- a/fs/xfs/xfs_ialloc_btree.c +++ b/fs/xfs/xfs_ialloc_btree.c @@ -31,7 +31,6 @@ #include "xfs_dinode.h" #include "xfs_inode.h" #include "xfs_btree.h" -#include "xfs_btree_trace.h" #include "xfs_ialloc.h" #include "xfs_alloc.h" #include "xfs_error.h" @@ -205,72 +204,6 @@ xfs_inobt_recs_inorder( } #endif /* DEBUG */ -#ifdef XFS_BTREE_TRACE -ktrace_t *xfs_inobt_trace_buf; - -STATIC void -xfs_inobt_trace_enter( - struct xfs_btree_cur *cur, - const char *func, - char *s, - int type, - int line, - __psunsigned_t a0, - __psunsigned_t a1, - __psunsigned_t a2, - __psunsigned_t a3, - __psunsigned_t a4, - __psunsigned_t a5, - __psunsigned_t a6, - __psunsigned_t a7, - __psunsigned_t a8, - __psunsigned_t a9, - __psunsigned_t a10) -{ - ktrace_enter(xfs_inobt_trace_buf, (void *)(__psint_t)type, - (void *)func, (void *)s, NULL, (void *)cur, - (void *)a0, (void *)a1, (void *)a2, (void *)a3, - (void *)a4, (void *)a5, (void *)a6, (void *)a7, - (void *)a8, (void *)a9, (void *)a10); -} - -STATIC void -xfs_inobt_trace_cursor( - struct xfs_btree_cur *cur, - __uint32_t *s0, - __uint64_t *l0, - __uint64_t *l1) -{ - *s0 = cur->bc_private.a.agno; - *l0 = cur->bc_rec.i.ir_startino; - *l1 = cur->bc_rec.i.ir_free; -} - -STATIC void -xfs_inobt_trace_key( - struct xfs_btree_cur *cur, - union xfs_btree_key *key, - __uint64_t *l0, - __uint64_t *l1) -{ - *l0 = be32_to_cpu(key->inobt.ir_startino); - *l1 = 0; -} - -STATIC void -xfs_inobt_trace_record( - struct xfs_btree_cur *cur, - union xfs_btree_rec *rec, - __uint64_t *l0, - __uint64_t *l1, - __uint64_t *l2) -{ - *l0 = be32_to_cpu(rec->inobt.ir_startino); - *l1 = be32_to_cpu(rec->inobt.ir_freecount); - *l2 = be64_to_cpu(rec->inobt.ir_free); -} -#endif /* XFS_BTREE_TRACE */ - static const struct xfs_btree_ops xfs_inobt_ops = { .rec_len = sizeof(xfs_inobt_rec_t), .key_len = sizeof(xfs_inobt_key_t), @@ -286,18 +219,10 @@ static const struct xfs_btree_ops xfs_inobt_ops = { .init_rec_from_cur = xfs_inobt_init_rec_from_cur, .init_ptr_from_cur = xfs_inobt_init_ptr_from_cur, .key_diff = xfs_inobt_key_diff, - #ifdef DEBUG .keys_inorder = xfs_inobt_keys_inorder, .recs_inorder = xfs_inobt_recs_inorder, #endif - -#ifdef XFS_BTREE_TRACE - .trace_enter = xfs_inobt_trace_enter, - .trace_cursor = xfs_inobt_trace_cursor, - .trace_key = xfs_inobt_trace_key, - .trace_record = xfs_inobt_trace_record, -#endif }; /* diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c index 3631783b2b53..7759812c1bbe 100644 --- a/fs/xfs/xfs_iget.c +++ b/fs/xfs/xfs_iget.c @@ -38,7 +38,6 @@ #include "xfs_trans_priv.h" #include "xfs_inode_item.h" #include "xfs_bmap.h" -#include "xfs_btree_trace.h" #include "xfs_trace.h" diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index 1314c83ded1c..3cc21ddf9f7e 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -37,7 +37,6 @@ #include "xfs_buf_item.h" #include "xfs_inode_item.h" #include "xfs_btree.h" -#include "xfs_btree_trace.h" #include "xfs_alloc.h" #include "xfs_ialloc.h" #include "xfs_bmap.h" -- cgit v1.2.3 From d0f9e8fb4cc6dd5d07c72eeecc2f332b6e85e221 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 13 Jul 2011 13:43:50 +0200 Subject: xfs: remove the dead XFS_DABUF_DEBUG code Signed-off-by: Christoph Hellwig Reviewed-by: Alex Elder Reviewed-by: Dave Chinner --- fs/xfs/xfs_da_btree.c | 68 +++++++++------------------------------------------ fs/xfs/xfs_da_btree.h | 11 --------- 2 files changed, 12 insertions(+), 67 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_da_btree.c b/fs/xfs/xfs_da_btree.c index 73b048f7eb71..2925726529f8 100644 --- a/fs/xfs/xfs_da_btree.c +++ b/fs/xfs/xfs_da_btree.c @@ -86,7 +86,7 @@ STATIC void xfs_da_node_unbalance(xfs_da_state_t *state, */ STATIC uint xfs_da_node_lasthash(xfs_dabuf_t *bp, int *count); STATIC int xfs_da_node_order(xfs_dabuf_t *node1_bp, xfs_dabuf_t *node2_bp); -STATIC xfs_dabuf_t *xfs_da_buf_make(int nbuf, xfs_buf_t **bps, inst_t *ra); +STATIC xfs_dabuf_t *xfs_da_buf_make(int nbuf, xfs_buf_t **bps); STATIC int xfs_da_blk_unlink(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk, xfs_da_state_blk_t *save_blk); @@ -1939,8 +1939,7 @@ xfs_da_do_buf( xfs_daddr_t *mappedbnop, xfs_dabuf_t **bpp, int whichfork, - int caller, - inst_t *ra) + int caller) { xfs_buf_t *bp = NULL; xfs_buf_t **bplist; @@ -2079,9 +2078,9 @@ xfs_da_do_buf( * Build a dabuf structure. */ if (bplist) { - rbp = xfs_da_buf_make(nbplist, bplist, ra); + rbp = xfs_da_buf_make(nbplist, bplist); } else if (bp) - rbp = xfs_da_buf_make(1, &bp, ra); + rbp = xfs_da_buf_make(1, &bp); else rbp = NULL; /* @@ -2149,8 +2148,7 @@ xfs_da_get_buf( xfs_dabuf_t **bpp, int whichfork) { - return xfs_da_do_buf(trans, dp, bno, &mappedbno, bpp, whichfork, 0, - (inst_t *)__return_address); + return xfs_da_do_buf(trans, dp, bno, &mappedbno, bpp, whichfork, 0); } /* @@ -2165,8 +2163,7 @@ xfs_da_read_buf( xfs_dabuf_t **bpp, int whichfork) { - return xfs_da_do_buf(trans, dp, bno, &mappedbno, bpp, whichfork, 1, - (inst_t *)__return_address); + return xfs_da_do_buf(trans, dp, bno, &mappedbno, bpp, whichfork, 1); } /* @@ -2182,8 +2179,7 @@ xfs_da_reada_buf( xfs_daddr_t rval; rval = -1; - if (xfs_da_do_buf(trans, dp, bno, &rval, NULL, whichfork, 3, - (inst_t *)__return_address)) + if (xfs_da_do_buf(trans, dp, bno, &rval, NULL, whichfork, 3)) return -1; else return rval; @@ -2241,17 +2237,12 @@ xfs_da_state_free(xfs_da_state_t *state) kmem_zone_free(xfs_da_state_zone, state); } -#ifdef XFS_DABUF_DEBUG -xfs_dabuf_t *xfs_dabuf_global_list; -static DEFINE_SPINLOCK(xfs_dabuf_global_lock); -#endif - /* * Create a dabuf. */ /* ARGSUSED */ STATIC xfs_dabuf_t * -xfs_da_buf_make(int nbuf, xfs_buf_t **bps, inst_t *ra) +xfs_da_buf_make(int nbuf, xfs_buf_t **bps) { xfs_buf_t *bp; xfs_dabuf_t *dabuf; @@ -2263,11 +2254,6 @@ xfs_da_buf_make(int nbuf, xfs_buf_t **bps, inst_t *ra) else dabuf = kmem_alloc(XFS_DA_BUF_SIZE(nbuf), KM_NOFS); dabuf->dirty = 0; -#ifdef XFS_DABUF_DEBUG - dabuf->ra = ra; - dabuf->target = XFS_BUF_TARGET(bps[0]); - dabuf->blkno = XFS_BUF_ADDR(bps[0]); -#endif if (nbuf == 1) { dabuf->nbuf = 1; bp = bps[0]; @@ -2287,23 +2273,6 @@ xfs_da_buf_make(int nbuf, xfs_buf_t **bps, inst_t *ra) XFS_BUF_COUNT(bp)); } } -#ifdef XFS_DABUF_DEBUG - { - xfs_dabuf_t *p; - - spin_lock(&xfs_dabuf_global_lock); - for (p = xfs_dabuf_global_list; p; p = p->next) { - ASSERT(p->blkno != dabuf->blkno || - p->target != dabuf->target); - } - dabuf->prev = NULL; - if (xfs_dabuf_global_list) - xfs_dabuf_global_list->prev = dabuf; - dabuf->next = xfs_dabuf_global_list; - xfs_dabuf_global_list = dabuf; - spin_unlock(&xfs_dabuf_global_lock); - } -#endif return dabuf; } @@ -2339,25 +2308,12 @@ xfs_da_buf_done(xfs_dabuf_t *dabuf) ASSERT(dabuf->nbuf && dabuf->data && dabuf->bbcount && dabuf->bps[0]); if (dabuf->dirty) xfs_da_buf_clean(dabuf); - if (dabuf->nbuf > 1) + if (dabuf->nbuf > 1) { kmem_free(dabuf->data); -#ifdef XFS_DABUF_DEBUG - { - spin_lock(&xfs_dabuf_global_lock); - if (dabuf->prev) - dabuf->prev->next = dabuf->next; - else - xfs_dabuf_global_list = dabuf->next; - if (dabuf->next) - dabuf->next->prev = dabuf->prev; - spin_unlock(&xfs_dabuf_global_lock); - } - memset(dabuf, 0, XFS_DA_BUF_SIZE(dabuf->nbuf)); -#endif - if (dabuf->nbuf == 1) - kmem_zone_free(xfs_dabuf_zone, dabuf); - else kmem_free(dabuf); + } else { + kmem_zone_free(xfs_dabuf_zone, dabuf); + } } /* diff --git a/fs/xfs/xfs_da_btree.h b/fs/xfs/xfs_da_btree.h index 7b0a6ea59a04..dbf7c074ae73 100644 --- a/fs/xfs/xfs_da_btree.h +++ b/fs/xfs/xfs_da_btree.h @@ -145,22 +145,11 @@ typedef struct xfs_dabuf { short dirty; /* data needs to be copied back */ short bbcount; /* how large is data in bbs */ void *data; /* pointer for buffers' data */ -#ifdef XFS_DABUF_DEBUG - inst_t *ra; /* return address of caller to make */ - struct xfs_dabuf *next; /* next in global chain */ - struct xfs_dabuf *prev; /* previous in global chain */ - struct xfs_buftarg *target; /* device for buffer */ - xfs_daddr_t blkno; /* daddr first in bps[0] */ -#endif struct xfs_buf *bps[1]; /* actually nbuf of these */ } xfs_dabuf_t; #define XFS_DA_BUF_SIZE(n) \ (sizeof(xfs_dabuf_t) + sizeof(struct xfs_buf *) * ((n) - 1)) -#ifdef XFS_DABUF_DEBUG -extern xfs_dabuf_t *xfs_dabuf_global_list; -#endif - /* * Storage for holding state during Btree searches and split/join ops. * -- cgit v1.2.3 From 380f7c65a7eb3288e4b6812acf3474a1de230707 Mon Sep 17 00:00:00 2001 From: Steven Whitehouse Date: Thu, 14 Jul 2011 08:59:44 +0100 Subject: GFS2: Resolve inode eviction and ail list interaction bug This patch contains a few misc fixes which resolve a recently reported issue. This patch has been a real team effort and has received a lot of testing. The first issue is that the ail lock needs to be held over a few more operations. The lock thats added into gfs2_releasepage() may possibly be a candidate for replacing with RCU at some future point, but at this stage we've gone for the obvious fix. The second issue is that gfs2_write_inode() can end up calling a glock recursively when called from gfs2_evict_inode() via the syncing code, so it needs a guard added. The third issue is that we either need to not truncate the metadata pages of inodes which have zero link count, but which we cannot deallocate due to them still being in use by other nodes, or we need to ensure that those pages have all made it through the journal and ail lists first. This patch takes the former approach, but the latter has also been tested and there is nothing to choose between them performance-wise. So again, we could revise that decision in the future. Also, the inode eviction process is now better documented. Signed-off-by: Steven Whitehouse Tested-by: Bob Peterson Tested-by: Abhijith Das Reported-by: Barry J. Marson Reported-by: David Teigland --- fs/gfs2/aops.c | 3 +++ fs/gfs2/glops.c | 4 ++-- fs/gfs2/log.c | 1 + fs/gfs2/super.c | 36 ++++++++++++++++++++++++++++++------ 4 files changed, 36 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c index 802ac5eeba28..f9fbbe96c222 100644 --- a/fs/gfs2/aops.c +++ b/fs/gfs2/aops.c @@ -1069,6 +1069,7 @@ int gfs2_releasepage(struct page *page, gfp_t gfp_mask) return 0; gfs2_log_lock(sdp); + spin_lock(&sdp->sd_ail_lock); head = bh = page_buffers(page); do { if (atomic_read(&bh->b_count)) @@ -1080,6 +1081,7 @@ int gfs2_releasepage(struct page *page, gfp_t gfp_mask) goto not_possible; bh = bh->b_this_page; } while(bh != head); + spin_unlock(&sdp->sd_ail_lock); gfs2_log_unlock(sdp); head = bh = page_buffers(page); @@ -1112,6 +1114,7 @@ not_possible: /* Should never happen */ WARN_ON(buffer_dirty(bh)); WARN_ON(buffer_pinned(bh)); cannot_release: + spin_unlock(&sdp->sd_ail_lock); gfs2_log_unlock(sdp); return 0; } diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c index 712b722030a1..2cca29316bd6 100644 --- a/fs/gfs2/glops.c +++ b/fs/gfs2/glops.c @@ -47,10 +47,10 @@ static void __gfs2_ail_flush(struct gfs2_glock *gl) bd_ail_gl_list); bh = bd->bd_bh; gfs2_remove_from_ail(bd); - spin_unlock(&sdp->sd_ail_lock); - bd->bd_bh = NULL; bh->b_private = NULL; + spin_unlock(&sdp->sd_ail_lock); + bd->bd_blkno = bh->b_blocknr; gfs2_log_lock(sdp); gfs2_assert_withdraw(sdp, !buffer_busy(bh)); diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c index 903115f2bb34..85c62923ee29 100644 --- a/fs/gfs2/log.c +++ b/fs/gfs2/log.c @@ -903,6 +903,7 @@ void gfs2_meta_syncfs(struct gfs2_sbd *sdp) if (gfs2_ail1_empty(sdp)) break; } + gfs2_log_flush(sdp, NULL); } static inline int gfs2_jrnl_flush_reqd(struct gfs2_sbd *sdp) diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c index ed540e7018be..fb0edf735483 100644 --- a/fs/gfs2/super.c +++ b/fs/gfs2/super.c @@ -757,13 +757,17 @@ static int gfs2_write_inode(struct inode *inode, struct writeback_control *wbc) struct timespec atime; struct gfs2_dinode *di; int ret = -EAGAIN; + int unlock_required = 0; /* Skip timestamp update, if this is from a memalloc */ if (current->flags & PF_MEMALLOC) goto do_flush; - ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); - if (ret) - goto do_flush; + if (!gfs2_glock_is_locked_by_me(ip->i_gl)) { + ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); + if (ret) + goto do_flush; + unlock_required = 1; + } ret = gfs2_trans_begin(sdp, RES_DINODE, 0); if (ret) goto do_unlock; @@ -780,7 +784,8 @@ static int gfs2_write_inode(struct inode *inode, struct writeback_control *wbc) } gfs2_trans_end(sdp); do_unlock: - gfs2_glock_dq_uninit(&gh); + if (unlock_required) + gfs2_glock_dq_uninit(&gh); do_flush: if (wbc->sync_mode == WB_SYNC_ALL) gfs2_log_flush(GFS2_SB(inode), ip->i_gl); @@ -1427,7 +1432,20 @@ out: return error; } -/* +/** + * gfs2_evict_inode - Remove an inode from cache + * @inode: The inode to evict + * + * There are three cases to consider: + * 1. i_nlink == 0, we are final opener (and must deallocate) + * 2. i_nlink == 0, we are not the final opener (and cannot deallocate) + * 3. i_nlink > 0 + * + * If the fs is read only, then we have to treat all cases as per #3 + * since we are unable to do any deallocation. The inode will be + * deallocated by the next read/write node to attempt an allocation + * in the same resource group + * * We have to (at the moment) hold the inodes main lock to cover * the gap between unlocking the shared lock on the iopen lock and * taking the exclusive lock. I'd rather do a shared -> exclusive @@ -1470,6 +1488,8 @@ static void gfs2_evict_inode(struct inode *inode) if (error) goto out_truncate; + /* Case 1 starts here */ + if (S_ISDIR(inode->i_mode) && (ip->i_diskflags & GFS2_DIF_EXHASH)) { error = gfs2_dir_exhash_dealloc(ip); @@ -1493,13 +1513,16 @@ static void gfs2_evict_inode(struct inode *inode) goto out_unlock; out_truncate: + /* Case 2 starts here */ error = gfs2_trans_begin(sdp, 0, sdp->sd_jdesc->jd_blocks); if (error) goto out_unlock; - gfs2_final_release_pages(ip); + /* Needs to be done before glock release & also in a transaction */ + truncate_inode_pages(&inode->i_data, 0); gfs2_trans_end(sdp); out_unlock: + /* Error path for case 1 */ if (test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) gfs2_glock_dq(&ip->i_iopen_gh); gfs2_holder_uninit(&ip->i_iopen_gh); @@ -1507,6 +1530,7 @@ out_unlock: if (error && error != GLR_TRYFAILED && error != -EROFS) fs_warn(sdp, "gfs2_evict_inode: %d\n", error); out: + /* Case 3 starts here */ truncate_inode_pages(&inode->i_data, 0); end_writeback(inode); -- cgit v1.2.3 From 883ba74f43092823d0ed4c6b21f0171e9b334607 Mon Sep 17 00:00:00 2001 From: David Teigland Date: Thu, 14 Jul 2011 12:31:49 -0500 Subject: dlm: remove deadlock debug print gfs2 recently began using this feature heavily, creating more debug output than we want to see. Signed-off-by: David Teigland --- fs/dlm/lock.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'fs') diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c index 9ebeaa6e6903..6772e5c5bcd6 100644 --- a/fs/dlm/lock.c +++ b/fs/dlm/lock.c @@ -2421,9 +2421,6 @@ static int do_convert(struct dlm_rsb *r, struct dlm_lkb *lkb) if (deadlk) { /* it's left on the granted queue */ - log_debug(r->res_ls, "deadlock %x node %d sts%d g%d r%d %s", - lkb->lkb_id, lkb->lkb_nodeid, lkb->lkb_status, - lkb->lkb_grmode, lkb->lkb_rqmode, r->res_name); revert_lock(r, lkb); queue_cast(r, lkb, -EDEADLK); error = -EDEADLK; -- cgit v1.2.3 From 1836750115f20b774e55c032a3893e8c5bdf41ed Mon Sep 17 00:00:00 2001 From: Al Viro Date: Tue, 12 Jul 2011 21:42:24 -0400 Subject: fix loop checks in d_materialise_unique() Both __d_unalias() and __d_materialise_dentry() need loop prevention. Grab rename_lock in caller, check for loops there... As a side benefit, we have dentry_lock_for_move() called only under rename_lock, which seriously reduces deadlock potential of the execrable "locking order" used for ->d_lock. Signed-off-by: Al Viro --- fs/dcache.c | 51 ++++++++++++++++++++++++++++++++++----------------- 1 file changed, 34 insertions(+), 17 deletions(-) (limited to 'fs') diff --git a/fs/dcache.c b/fs/dcache.c index 37f72ee5bf7c..6e4ea6d87774 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -2213,14 +2213,15 @@ static void dentry_unlock_parents_for_move(struct dentry *dentry, * The hash value has to match the hash queue that the dentry is on.. */ /* - * d_move - move a dentry + * __d_move - move a dentry * @dentry: entry to move * @target: new dentry * * Update the dcache to reflect the move of a file name. Negative - * dcache entries should not be moved in this way. + * dcache entries should not be moved in this way. Caller hold + * rename_lock. */ -void d_move(struct dentry * dentry, struct dentry * target) +static void __d_move(struct dentry * dentry, struct dentry * target) { if (!dentry->d_inode) printk(KERN_WARNING "VFS: moving negative dcache entry\n"); @@ -2228,8 +2229,6 @@ void d_move(struct dentry * dentry, struct dentry * target) BUG_ON(d_ancestor(dentry, target)); BUG_ON(d_ancestor(target, dentry)); - write_seqlock(&rename_lock); - dentry_lock_for_move(dentry, target); write_seqcount_begin(&dentry->d_seq); @@ -2275,6 +2274,20 @@ void d_move(struct dentry * dentry, struct dentry * target) spin_unlock(&target->d_lock); fsnotify_d_move(dentry); spin_unlock(&dentry->d_lock); +} + +/* + * d_move - move a dentry + * @dentry: entry to move + * @target: new dentry + * + * Update the dcache to reflect the move of a file name. Negative + * dcache entries should not be moved in this way. + */ +void d_move(struct dentry *dentry, struct dentry *target) +{ + write_seqlock(&rename_lock); + __d_move(dentry, target); write_sequnlock(&rename_lock); } EXPORT_SYMBOL(d_move); @@ -2302,7 +2315,7 @@ struct dentry *d_ancestor(struct dentry *p1, struct dentry *p2) * This helper attempts to cope with remotely renamed directories * * It assumes that the caller is already holding - * dentry->d_parent->d_inode->i_mutex and the inode->i_lock + * dentry->d_parent->d_inode->i_mutex, inode->i_lock and rename_lock * * Note: If ever the locking in lock_rename() changes, then please * remember to update this too... @@ -2317,11 +2330,6 @@ static struct dentry *__d_unalias(struct inode *inode, if (alias->d_parent == dentry->d_parent) goto out_unalias; - /* Check for loops */ - ret = ERR_PTR(-ELOOP); - if (d_ancestor(alias, dentry)) - goto out_err; - /* See lock_rename() */ ret = ERR_PTR(-EBUSY); if (!mutex_trylock(&dentry->d_sb->s_vfs_rename_mutex)) @@ -2331,7 +2339,7 @@ static struct dentry *__d_unalias(struct inode *inode, goto out_err; m2 = &alias->d_parent->d_inode->i_mutex; out_unalias: - d_move(alias, dentry); + __d_move(alias, dentry); ret = alias; out_err: spin_unlock(&inode->i_lock); @@ -2416,15 +2424,24 @@ struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode) alias = __d_find_alias(inode, 0); if (alias) { actual = alias; - /* Is this an anonymous mountpoint that we could splice - * into our tree? */ - if (IS_ROOT(alias)) { + write_seqlock(&rename_lock); + + if (d_ancestor(alias, dentry)) { + /* Check for loops */ + actual = ERR_PTR(-ELOOP); + } else if (IS_ROOT(alias)) { + /* Is this an anonymous mountpoint that we + * could splice into our tree? */ __d_materialise_dentry(dentry, alias); + write_sequnlock(&rename_lock); __d_drop(alias); goto found; + } else { + /* Nope, but we must(!) avoid directory + * aliasing */ + actual = __d_unalias(inode, dentry, alias); } - /* Nope, but we must(!) avoid directory aliasing */ - actual = __d_unalias(inode, dentry, alias); + write_sequnlock(&rename_lock); if (IS_ERR(actual)) dput(alias); goto out_nolock; -- cgit v1.2.3 From 17d539f0499fa2c0321b7c260831cca2bb36d119 Mon Sep 17 00:00:00 2001 From: Steven Whitehouse Date: Wed, 15 Jun 2011 10:29:37 +0100 Subject: GFS2: Cache dir hash table in a contiguous buffer This patch adds a cache for the hash table to the directory code in order to help simplify the way in which the hash table is accessed. This is intended to be a first step towards introducing some performance improvements in the directory code. There are two follow ups that I'm hoping to see fairly shortly. One is to simplify the hash table reading code now that we always read the complete hash table, whether we want one entry or all of them. The other is to introduce readahead on the heads of the hash chains which are referred to from the table. The hash table is a maximum of 128k in size, so it is not worth trying to read it in small chunks. Signed-off-by: Steven Whitehouse --- fs/gfs2/dir.c | 221 +++++++++++++++++++++++++++---------------------------- fs/gfs2/dir.h | 1 + fs/gfs2/glops.c | 2 + fs/gfs2/incore.h | 1 + fs/gfs2/main.c | 1 + fs/gfs2/super.c | 2 +- 6 files changed, 115 insertions(+), 113 deletions(-) (limited to 'fs') diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c index 091ee4779538..1cc2f8ec52a2 100644 --- a/fs/gfs2/dir.c +++ b/fs/gfs2/dir.c @@ -339,6 +339,67 @@ fail: return (copied) ? copied : error; } +/** + * gfs2_dir_get_hash_table - Get pointer to the dir hash table + * @ip: The inode in question + * + * Returns: The hash table or an error + */ + +static __be64 *gfs2_dir_get_hash_table(struct gfs2_inode *ip) +{ + struct inode *inode = &ip->i_inode; + int ret; + u32 hsize; + __be64 *hc; + + BUG_ON(!(ip->i_diskflags & GFS2_DIF_EXHASH)); + + hc = ip->i_hash_cache; + if (hc) + return hc; + + hsize = 1 << ip->i_depth; + hsize *= sizeof(__be64); + if (hsize != i_size_read(&ip->i_inode)) { + gfs2_consist_inode(ip); + return ERR_PTR(-EIO); + } + + hc = kmalloc(hsize, GFP_NOFS); + ret = -ENOMEM; + if (hc == NULL) + return ERR_PTR(-ENOMEM); + + ret = gfs2_dir_read_data(ip, (char *)hc, 0, hsize, 1); + if (ret < 0) { + kfree(hc); + return ERR_PTR(ret); + } + + spin_lock(&inode->i_lock); + if (ip->i_hash_cache) + kfree(hc); + else + ip->i_hash_cache = hc; + spin_unlock(&inode->i_lock); + + return ip->i_hash_cache; +} + +/** + * gfs2_dir_hash_inval - Invalidate dir hash + * @ip: The directory inode + * + * Must be called with an exclusive glock, or during glock invalidation. + */ +void gfs2_dir_hash_inval(struct gfs2_inode *ip) +{ + __be64 *hc = ip->i_hash_cache; + ip->i_hash_cache = NULL; + kfree(hc); +} + static inline int gfs2_dirent_sentinel(const struct gfs2_dirent *dent) { return dent->de_inum.no_addr == 0 || dent->de_inum.no_formal_ino == 0; @@ -686,17 +747,12 @@ static int get_leaf(struct gfs2_inode *dip, u64 leaf_no, static int get_leaf_nr(struct gfs2_inode *dip, u32 index, u64 *leaf_out) { - __be64 leaf_no; - int error; - - error = gfs2_dir_read_data(dip, (char *)&leaf_no, - index * sizeof(__be64), - sizeof(__be64), 0); - if (error != sizeof(u64)) - return (error < 0) ? error : -EIO; - - *leaf_out = be64_to_cpu(leaf_no); + __be64 *hash; + hash = gfs2_dir_get_hash_table(dip); + if (IS_ERR(hash)) + return PTR_ERR(hash); + *leaf_out = be64_to_cpu(*(hash + index)); return 0; } @@ -966,6 +1022,8 @@ static int dir_split_leaf(struct inode *inode, const struct qstr *name) for (x = 0; x < half_len; x++) lp[x] = cpu_to_be64(bn); + gfs2_dir_hash_inval(dip); + error = gfs2_dir_write_data(dip, (char *)lp, start * sizeof(u64), half_len * sizeof(u64)); if (error != half_len * sizeof(u64)) { @@ -1052,70 +1110,54 @@ fail_brelse: static int dir_double_exhash(struct gfs2_inode *dip) { - struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode); struct buffer_head *dibh; u32 hsize; - u64 *buf; - u64 *from, *to; - u64 block; - u64 disksize = i_size_read(&dip->i_inode); + u32 hsize_bytes; + __be64 *hc; + __be64 *hc2, *h; int x; int error = 0; hsize = 1 << dip->i_depth; - if (hsize * sizeof(u64) != disksize) { - gfs2_consist_inode(dip); - return -EIO; - } + hsize_bytes = hsize * sizeof(__be64); - /* Allocate both the "from" and "to" buffers in one big chunk */ + hc = gfs2_dir_get_hash_table(dip); + if (IS_ERR(hc)) + return PTR_ERR(hc); - buf = kcalloc(3, sdp->sd_hash_bsize, GFP_NOFS); - if (!buf) + h = hc2 = kmalloc(hsize_bytes * 2, GFP_NOFS); + if (!hc2) return -ENOMEM; - for (block = disksize >> sdp->sd_hash_bsize_shift; block--;) { - error = gfs2_dir_read_data(dip, (char *)buf, - block * sdp->sd_hash_bsize, - sdp->sd_hash_bsize, 1); - if (error != sdp->sd_hash_bsize) { - if (error >= 0) - error = -EIO; - goto fail; - } - - from = buf; - to = (u64 *)((char *)buf + sdp->sd_hash_bsize); - - for (x = sdp->sd_hash_ptrs; x--; from++) { - *to++ = *from; /* No endianess worries */ - *to++ = *from; - } + error = gfs2_meta_inode_buffer(dip, &dibh); + if (error) + goto out_kfree; - error = gfs2_dir_write_data(dip, - (char *)buf + sdp->sd_hash_bsize, - block * sdp->sd_sb.sb_bsize, - sdp->sd_sb.sb_bsize); - if (error != sdp->sd_sb.sb_bsize) { - if (error >= 0) - error = -EIO; - goto fail; - } + for (x = 0; x < hsize; x++) { + *h++ = *hc; + *h++ = *hc; + hc++; } - kfree(buf); - - error = gfs2_meta_inode_buffer(dip, &dibh); - if (!gfs2_assert_withdraw(sdp, !error)) { - dip->i_depth++; - gfs2_dinode_out(dip, dibh->b_data); - brelse(dibh); - } + error = gfs2_dir_write_data(dip, (char *)hc2, 0, hsize_bytes * 2); + if (error != (hsize_bytes * 2)) + goto fail; - return error; + gfs2_dir_hash_inval(dip); + dip->i_hash_cache = hc2; + dip->i_depth++; + gfs2_dinode_out(dip, dibh->b_data); + brelse(dibh); + return 0; fail: - kfree(buf); + /* Replace original hash table & size */ + gfs2_dir_write_data(dip, (char *)hc, 0, hsize_bytes); + i_size_write(&dip->i_inode, hsize_bytes); + gfs2_dinode_out(dip, dibh->b_data); + brelse(dibh); +out_kfree: + kfree(hc2); return error; } @@ -1348,6 +1390,7 @@ out: return error; } + /** * dir_e_read - Reads the entries from a directory into a filldir buffer * @dip: dinode pointer @@ -1362,9 +1405,7 @@ static int dir_e_read(struct inode *inode, u64 *offset, void *opaque, filldir_t filldir) { struct gfs2_inode *dip = GFS2_I(inode); - struct gfs2_sbd *sdp = GFS2_SB(inode); u32 hsize, len = 0; - u32 ht_offset, lp_offset, ht_offset_cur = -1; u32 hash, index; __be64 *lp; int copied = 0; @@ -1372,37 +1413,17 @@ static int dir_e_read(struct inode *inode, u64 *offset, void *opaque, unsigned depth = 0; hsize = 1 << dip->i_depth; - if (hsize * sizeof(u64) != i_size_read(inode)) { - gfs2_consist_inode(dip); - return -EIO; - } - hash = gfs2_dir_offset2hash(*offset); index = hash >> (32 - dip->i_depth); - lp = kmalloc(sdp->sd_hash_bsize, GFP_NOFS); - if (!lp) - return -ENOMEM; + lp = gfs2_dir_get_hash_table(dip); + if (IS_ERR(lp)) + return PTR_ERR(lp); while (index < hsize) { - lp_offset = index & (sdp->sd_hash_ptrs - 1); - ht_offset = index - lp_offset; - - if (ht_offset_cur != ht_offset) { - error = gfs2_dir_read_data(dip, (char *)lp, - ht_offset * sizeof(__be64), - sdp->sd_hash_bsize, 1); - if (error != sdp->sd_hash_bsize) { - if (error >= 0) - error = -EIO; - goto out; - } - ht_offset_cur = ht_offset; - } - error = gfs2_dir_read_leaf(inode, offset, opaque, filldir, &copied, &depth, - be64_to_cpu(lp[lp_offset])); + be64_to_cpu(lp[index])); if (error) break; @@ -1410,8 +1431,6 @@ static int dir_e_read(struct inode *inode, u64 *offset, void *opaque, index = (index & ~(len - 1)) + len; } -out: - kfree(lp); if (error > 0) error = 0; return error; @@ -1914,43 +1933,22 @@ out: int gfs2_dir_exhash_dealloc(struct gfs2_inode *dip) { - struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode); struct buffer_head *bh; struct gfs2_leaf *leaf; u32 hsize, len; - u32 ht_offset, lp_offset, ht_offset_cur = -1; u32 index = 0, next_index; __be64 *lp; u64 leaf_no; int error = 0, last; hsize = 1 << dip->i_depth; - if (hsize * sizeof(u64) != i_size_read(&dip->i_inode)) { - gfs2_consist_inode(dip); - return -EIO; - } - lp = kmalloc(sdp->sd_hash_bsize, GFP_NOFS); - if (!lp) - return -ENOMEM; + lp = gfs2_dir_get_hash_table(dip); + if (IS_ERR(lp)) + return PTR_ERR(lp); while (index < hsize) { - lp_offset = index & (sdp->sd_hash_ptrs - 1); - ht_offset = index - lp_offset; - - if (ht_offset_cur != ht_offset) { - error = gfs2_dir_read_data(dip, (char *)lp, - ht_offset * sizeof(__be64), - sdp->sd_hash_bsize, 1); - if (error != sdp->sd_hash_bsize) { - if (error >= 0) - error = -EIO; - goto out; - } - ht_offset_cur = ht_offset; - } - - leaf_no = be64_to_cpu(lp[lp_offset]); + leaf_no = be64_to_cpu(lp[index]); if (leaf_no) { error = get_leaf(dip, leaf_no, &bh); if (error) @@ -1976,7 +1974,6 @@ int gfs2_dir_exhash_dealloc(struct gfs2_inode *dip) } out: - kfree(lp); return error; } diff --git a/fs/gfs2/dir.h b/fs/gfs2/dir.h index e686af11becd..ff5772fbf024 100644 --- a/fs/gfs2/dir.h +++ b/fs/gfs2/dir.h @@ -35,6 +35,7 @@ extern int gfs2_diradd_alloc_required(struct inode *dir, const struct qstr *filename); extern int gfs2_dir_get_new_buffer(struct gfs2_inode *ip, u64 block, struct buffer_head **bhp); +extern void gfs2_dir_hash_inval(struct gfs2_inode *ip); static inline u32 gfs2_disk_hash(const char *data, int len) { diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c index 2cca29316bd6..95788ae436c6 100644 --- a/fs/gfs2/glops.c +++ b/fs/gfs2/glops.c @@ -26,6 +26,7 @@ #include "rgrp.h" #include "util.h" #include "trans.h" +#include "dir.h" /** * __gfs2_ail_flush - remove all buffers for a given lock from the AIL @@ -218,6 +219,7 @@ static void inode_go_inval(struct gfs2_glock *gl, int flags) if (ip) { set_bit(GIF_INVALID, &ip->i_flags); forget_all_cached_acls(&ip->i_inode); + gfs2_dir_hash_inval(ip); } } diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h index 81206e70cbf6..24cd55f60e60 100644 --- a/fs/gfs2/incore.h +++ b/fs/gfs2/incore.h @@ -285,6 +285,7 @@ struct gfs2_inode { u64 i_goal; /* goal block for allocations */ struct rw_semaphore i_rw_mutex; struct list_head i_trunc_list; + __be64 *i_hash_cache; u32 i_entries; u32 i_diskflags; u8 i_height; diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c index c2b34cd2abe0..29e1ace7953d 100644 --- a/fs/gfs2/main.c +++ b/fs/gfs2/main.c @@ -41,6 +41,7 @@ static void gfs2_init_inode_once(void *foo) init_rwsem(&ip->i_rw_mutex); INIT_LIST_HEAD(&ip->i_trunc_list); ip->i_alloc = NULL; + ip->i_hash_cache = NULL; } static void gfs2_init_glock_once(void *foo) diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c index fb0edf735483..b7beadd9ba4c 100644 --- a/fs/gfs2/super.c +++ b/fs/gfs2/super.c @@ -1533,7 +1533,7 @@ out: /* Case 3 starts here */ truncate_inode_pages(&inode->i_data, 0); end_writeback(inode); - + gfs2_dir_hash_inval(ip); ip->i_gl->gl_object = NULL; gfs2_glock_add_to_lru(ip->i_gl); gfs2_glock_put(ip->i_gl); -- cgit v1.2.3 From 7cf8dcd3b68a760d66fbc7f0d75d3fbb8f21775d Mon Sep 17 00:00:00 2001 From: Bob Peterson Date: Wed, 15 Jun 2011 11:41:48 -0400 Subject: GFS2: Automatically adjust glock min hold time This patch is a performance improvement for GFS2 in a clustered environment. It makes the glock hold time self-adjusting. Signed-off-by: Bob Peterson Signed-off-by: Steven Whitehouse --- fs/gfs2/glock.c | 39 +++++++++++++++++++++++++++++---------- fs/gfs2/glock.h | 6 ++++++ fs/gfs2/glops.c | 2 -- fs/gfs2/incore.h | 2 +- 4 files changed, 36 insertions(+), 13 deletions(-) (limited to 'fs') diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index 1c1336e7b3b2..88e8a23d0026 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c @@ -409,6 +409,10 @@ static void state_change(struct gfs2_glock *gl, unsigned int new_state) if (held1 && held2 && list_empty(&gl->gl_holders)) clear_bit(GLF_QUEUED, &gl->gl_flags); + if (new_state != gl->gl_target) + /* shorten our minimum hold time */ + gl->gl_hold_time = max(gl->gl_hold_time - GL_GLOCK_HOLD_DECR, + GL_GLOCK_MIN_HOLD); gl->gl_state = new_state; gl->gl_tchange = jiffies; } @@ -668,7 +672,7 @@ static void glock_work_func(struct work_struct *work) gl->gl_demote_state != LM_ST_EXCLUSIVE) { unsigned long holdtime, now = jiffies; - holdtime = gl->gl_tchange + gl->gl_ops->go_min_hold_time; + holdtime = gl->gl_tchange + gl->gl_hold_time; if (time_before(now, holdtime)) delay = holdtime - now; @@ -679,9 +683,14 @@ static void glock_work_func(struct work_struct *work) } run_queue(gl, 0); spin_unlock(&gl->gl_spin); - if (!delay || - queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0) + if (!delay) gfs2_glock_put(gl); + else { + if (gl->gl_name.ln_type != LM_TYPE_INODE) + delay = 0; + if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0) + gfs2_glock_put(gl); + } if (drop_ref) gfs2_glock_put(gl); } @@ -743,6 +752,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, gl->gl_tchange = jiffies; gl->gl_object = NULL; gl->gl_sbd = sdp; + gl->gl_hold_time = GL_GLOCK_DFT_HOLD; INIT_DELAYED_WORK(&gl->gl_work, glock_work_func); INIT_WORK(&gl->gl_delete, delete_work_func); @@ -855,8 +865,15 @@ static int gfs2_glock_demote_wait(void *word) static void wait_on_holder(struct gfs2_holder *gh) { + unsigned long time1 = jiffies; + might_sleep(); wait_on_bit(&gh->gh_iflags, HIF_WAIT, gfs2_glock_holder_wait, TASK_UNINTERRUPTIBLE); + if (time_after(jiffies, time1 + HZ)) /* have we waited > a second? */ + /* Lengthen the minimum hold time. */ + gh->gh_gl->gl_hold_time = min(gh->gh_gl->gl_hold_time + + GL_GLOCK_HOLD_INCR, + GL_GLOCK_MAX_HOLD); } static void wait_on_demote(struct gfs2_glock *gl) @@ -1093,8 +1110,9 @@ void gfs2_glock_dq(struct gfs2_holder *gh) gfs2_glock_hold(gl); if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && - !test_bit(GLF_DEMOTE, &gl->gl_flags)) - delay = gl->gl_ops->go_min_hold_time; + !test_bit(GLF_DEMOTE, &gl->gl_flags) && + gl->gl_name.ln_type == LM_TYPE_INODE) + delay = gl->gl_hold_time; if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0) gfs2_glock_put(gl); } @@ -1273,12 +1291,13 @@ void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state) unsigned long now = jiffies; gfs2_glock_hold(gl); - holdtime = gl->gl_tchange + gl->gl_ops->go_min_hold_time; - if (test_bit(GLF_QUEUED, &gl->gl_flags)) { + holdtime = gl->gl_tchange + gl->gl_hold_time; + if (test_bit(GLF_QUEUED, &gl->gl_flags) && + gl->gl_name.ln_type == LM_TYPE_INODE) { if (time_before(now, holdtime)) delay = holdtime - now; if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags)) - delay = gl->gl_ops->go_min_hold_time; + delay = gl->gl_hold_time; } spin_lock(&gl->gl_spin); @@ -1667,7 +1686,7 @@ static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl) dtime *= 1000000/HZ; /* demote time in uSec */ if (!test_bit(GLF_DEMOTE, &gl->gl_flags)) dtime = 0; - gfs2_print_dbg(seq, "G: s:%s n:%u/%llx f:%s t:%s d:%s/%llu a:%d v:%d r:%d\n", + gfs2_print_dbg(seq, "G: s:%s n:%u/%llx f:%s t:%s d:%s/%llu a:%d v:%d r:%d m:%ld\n", state2str(gl->gl_state), gl->gl_name.ln_type, (unsigned long long)gl->gl_name.ln_number, @@ -1676,7 +1695,7 @@ static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl) state2str(gl->gl_demote_state), dtime, atomic_read(&gl->gl_ail_count), atomic_read(&gl->gl_revokes), - atomic_read(&gl->gl_ref)); + atomic_read(&gl->gl_ref), gl->gl_hold_time); list_for_each_entry(gh, &gl->gl_holders, gh_list) { error = dump_holder(seq, gh); diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h index 6b2f757b9281..66707118af25 100644 --- a/fs/gfs2/glock.h +++ b/fs/gfs2/glock.h @@ -113,6 +113,12 @@ enum { #define GLR_TRYFAILED 13 +#define GL_GLOCK_MAX_HOLD (long)(HZ / 5) +#define GL_GLOCK_DFT_HOLD (long)(HZ / 5) +#define GL_GLOCK_MIN_HOLD (long)(10) +#define GL_GLOCK_HOLD_INCR (long)(HZ / 20) +#define GL_GLOCK_HOLD_DECR (long)(HZ / 40) + struct lm_lockops { const char *lm_proto_name; int (*lm_mount) (struct gfs2_sbd *sdp, const char *fsname); diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c index 95788ae436c6..a4222c3f5993 100644 --- a/fs/gfs2/glops.c +++ b/fs/gfs2/glops.c @@ -551,7 +551,6 @@ const struct gfs2_glock_operations gfs2_inode_glops = { .go_lock = inode_go_lock, .go_dump = inode_go_dump, .go_type = LM_TYPE_INODE, - .go_min_hold_time = HZ / 5, .go_flags = GLOF_ASPACE, }; @@ -562,7 +561,6 @@ const struct gfs2_glock_operations gfs2_rgrp_glops = { .go_unlock = rgrp_go_unlock, .go_dump = gfs2_rgrp_dump, .go_type = LM_TYPE_RGRP, - .go_min_hold_time = HZ / 5, .go_flags = GLOF_ASPACE, }; diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h index 24cd55f60e60..892ac37de8ae 100644 --- a/fs/gfs2/incore.h +++ b/fs/gfs2/incore.h @@ -163,7 +163,6 @@ struct gfs2_glock_operations { int (*go_dump)(struct seq_file *seq, const struct gfs2_glock *gl); void (*go_callback) (struct gfs2_glock *gl); const int go_type; - const unsigned long go_min_hold_time; const unsigned long go_flags; #define GLOF_ASPACE 1 }; @@ -221,6 +220,7 @@ struct gfs2_glock { unsigned int gl_hash; unsigned long gl_demote_time; /* time of first demote request */ + long gl_hold_time; struct list_head gl_holders; const struct gfs2_glock_operations *gl_ops; -- cgit v1.2.3 From 9964afbb797329e4c6e5d05ebc199c39af618c5f Mon Sep 17 00:00:00 2001 From: Steven Whitehouse Date: Thu, 16 Jun 2011 14:06:55 +0100 Subject: GFS2: Add S_NOSEC support This adds S_NOSEC support to GFS2. We set/reset the flag either when a user calls setattr or when we have just regained the glock from another node. The flag is only set if there are no xattrs on the inode and there is no suid bit set. Signed-off-by: Steven Whitehouse Reviewed-by: Andi Kleen Cc: Al Viro --- fs/gfs2/file.c | 4 +++- fs/gfs2/glops.c | 3 ++- fs/gfs2/ops_fstype.c | 1 + 3 files changed, 6 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c index a9f5cbe45cd9..bc2590ef5fc1 100644 --- a/fs/gfs2/file.c +++ b/fs/gfs2/file.c @@ -174,7 +174,9 @@ void gfs2_set_inode_flags(struct inode *inode) struct gfs2_inode *ip = GFS2_I(inode); unsigned int flags = inode->i_flags; - flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC); + flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|S_NOSEC); + if ((ip->i_eattr == 0) && !is_sxid(inode->i_mode)) + inode->i_flags |= S_NOSEC; if (ip->i_diskflags & GFS2_DIF_IMMUTABLE) flags |= S_IMMUTABLE; if (ip->i_diskflags & GFS2_DIF_APPENDONLY) diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c index a4222c3f5993..da21ecaafcc2 100644 --- a/fs/gfs2/glops.c +++ b/fs/gfs2/glops.c @@ -318,6 +318,8 @@ static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf) ip->i_generation = be64_to_cpu(str->di_generation); ip->i_diskflags = be32_to_cpu(str->di_flags); + ip->i_eattr = be64_to_cpu(str->di_eattr); + /* i_diskflags and i_eattr must be set before gfs2_set_inode_flags() */ gfs2_set_inode_flags(&ip->i_inode); height = be16_to_cpu(str->di_height); if (unlikely(height > GFS2_MAX_META_HEIGHT)) @@ -330,7 +332,6 @@ static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf) ip->i_depth = (u8)depth; ip->i_entries = be32_to_cpu(str->di_entries); - ip->i_eattr = be64_to_cpu(str->di_eattr); if (S_ISREG(ip->i_inode.i_mode)) gfs2_set_aops(&ip->i_inode); diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c index 2a77071fb7b6..516516e0c2a2 100644 --- a/fs/gfs2/ops_fstype.c +++ b/fs/gfs2/ops_fstype.c @@ -1094,6 +1094,7 @@ static int fill_super(struct super_block *sb, struct gfs2_args *args, int silent if (sdp->sd_args.ar_nobarrier) set_bit(SDF_NOBARRIERS, &sdp->sd_flags); + sb->s_flags |= MS_NOSEC; sb->s_magic = GFS2_MAGIC; sb->s_op = &gfs2_super_ops; sb->s_d_op = &gfs2_dops; -- cgit v1.2.3 From 46fcb2ed29ff7a9ec34c4ae67b631743eb7e756f Mon Sep 17 00:00:00 2001 From: Eric Sandeen Date: Thu, 23 Jun 2011 10:39:34 -0500 Subject: GFS2: combine duplicated block freeing routines __gfs2_free_data and __gfs2_free_meta are almost identical, and can be trivially combined. [This is as per Eric's original patch minus gfs2_free_data() which had no callers left and plus the conversion of the bmap.c calls to these functions. All in all, a nice clean up] Signed-off-by: Eric Sandeen Signed-off-by: Steven Whitehouse --- fs/gfs2/bmap.c | 12 ++---------- fs/gfs2/rgrp.c | 52 +++++----------------------------------------------- fs/gfs2/rgrp.h | 4 +--- 3 files changed, 8 insertions(+), 60 deletions(-) (limited to 'fs') diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c index e65493a8ac00..42e477f31223 100644 --- a/fs/gfs2/bmap.c +++ b/fs/gfs2/bmap.c @@ -854,11 +854,7 @@ static int do_strip(struct gfs2_inode *ip, struct buffer_head *dibh, blen++; else { if (bstart) { - if (metadata) - __gfs2_free_meta(ip, bstart, blen); - else - __gfs2_free_data(ip, bstart, blen); - + __gfs2_free_blocks(ip, bstart, blen, metadata); btotal += blen; } @@ -870,11 +866,7 @@ static int do_strip(struct gfs2_inode *ip, struct buffer_head *dibh, gfs2_add_inode_blocks(&ip->i_inode, -1); } if (bstart) { - if (metadata) - __gfs2_free_meta(ip, bstart, blen); - else - __gfs2_free_data(ip, bstart, blen); - + __gfs2_free_blocks(ip, bstart, blen, metadata); btotal += blen; } diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c index 9b780df3fd54..7f8af1eb02de 100644 --- a/fs/gfs2/rgrp.c +++ b/fs/gfs2/rgrp.c @@ -1607,14 +1607,15 @@ rgrp_error: } /** - * gfs2_free_data - free a contiguous run of data block(s) + * __gfs2_free_blocks - free a contiguous run of block(s) * @ip: the inode these blocks are being freed from * @bstart: first block of a run of contiguous blocks * @blen: the length of the block run + * @meta: 1 if the blocks represent metadata * */ -void __gfs2_free_data(struct gfs2_inode *ip, u64 bstart, u32 blen) +void __gfs2_free_blocks(struct gfs2_inode *ip, u64 bstart, u32 blen, int meta) { struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); struct gfs2_rgrpd *rgd; @@ -1631,53 +1632,10 @@ void __gfs2_free_data(struct gfs2_inode *ip, u64 bstart, u32 blen) gfs2_trans_add_rg(rgd); /* Directories keep their data in the metadata address space */ - if (ip->i_depth) + if (meta || ip->i_depth) gfs2_meta_wipe(ip, bstart, blen); } -/** - * gfs2_free_data - free a contiguous run of data block(s) - * @ip: the inode these blocks are being freed from - * @bstart: first block of a run of contiguous blocks - * @blen: the length of the block run - * - */ - -void gfs2_free_data(struct gfs2_inode *ip, u64 bstart, u32 blen) -{ - struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); - - __gfs2_free_data(ip, bstart, blen); - gfs2_statfs_change(sdp, 0, +blen, 0); - gfs2_quota_change(ip, -(s64)blen, ip->i_inode.i_uid, ip->i_inode.i_gid); -} - -/** - * gfs2_free_meta - free a contiguous run of data block(s) - * @ip: the inode these blocks are being freed from - * @bstart: first block of a run of contiguous blocks - * @blen: the length of the block run - * - */ - -void __gfs2_free_meta(struct gfs2_inode *ip, u64 bstart, u32 blen) -{ - struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); - struct gfs2_rgrpd *rgd; - - rgd = rgblk_free(sdp, bstart, blen, GFS2_BLKST_FREE); - if (!rgd) - return; - trace_gfs2_block_alloc(ip, bstart, blen, GFS2_BLKST_FREE); - rgd->rd_free += blen; - - gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1); - gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data); - - gfs2_trans_add_rg(rgd); - gfs2_meta_wipe(ip, bstart, blen); -} - /** * gfs2_free_meta - free a contiguous run of data block(s) * @ip: the inode these blocks are being freed from @@ -1690,7 +1648,7 @@ void gfs2_free_meta(struct gfs2_inode *ip, u64 bstart, u32 blen) { struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); - __gfs2_free_meta(ip, bstart, blen); + __gfs2_free_blocks(ip, bstart, blen, 1); gfs2_statfs_change(sdp, 0, +blen, 0); gfs2_quota_change(ip, -(s64)blen, ip->i_inode.i_uid, ip->i_inode.i_gid); } diff --git a/fs/gfs2/rgrp.h b/fs/gfs2/rgrp.h index a80e3034ac47..d253f9a8c70e 100644 --- a/fs/gfs2/rgrp.h +++ b/fs/gfs2/rgrp.h @@ -52,9 +52,7 @@ extern int gfs2_ri_update(struct gfs2_inode *ip); extern int gfs2_alloc_block(struct gfs2_inode *ip, u64 *bn, unsigned int *n); extern int gfs2_alloc_di(struct gfs2_inode *ip, u64 *bn, u64 *generation); -extern void __gfs2_free_data(struct gfs2_inode *ip, u64 bstart, u32 blen); -extern void gfs2_free_data(struct gfs2_inode *ip, u64 bstart, u32 blen); -extern void __gfs2_free_meta(struct gfs2_inode *ip, u64 bstart, u32 blen); +extern void __gfs2_free_blocks(struct gfs2_inode *ip, u64 bstart, u32 blen, int meta); extern void gfs2_free_meta(struct gfs2_inode *ip, u64 bstart, u32 blen); extern void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip); extern void gfs2_unlink_di(struct inode *inode); -- cgit v1.2.3 From 23e8e1aaacb10d9f05e44a93e10ea4ee5b3838a5 Mon Sep 17 00:00:00 2001 From: David Teigland Date: Tue, 5 Apr 2011 13:16:24 -0500 Subject: dlm: use workqueue for callbacks Instead of creating our own kthread (dlm_astd) to deliver callbacks for all lockspaces, use a per-lockspace workqueue to deliver the callbacks. This eliminates complications and slowdowns from many lockspaces sharing the same thread. Signed-off-by: David Teigland --- fs/dlm/ast.c | 262 ++++++++++++++++++++++---------------------------- fs/dlm/ast.h | 15 +-- fs/dlm/dlm_internal.h | 9 +- fs/dlm/lock.c | 24 ++--- fs/dlm/lockspace.c | 43 ++++----- fs/dlm/recoverd.c | 12 +-- fs/dlm/user.c | 12 +-- 7 files changed, 172 insertions(+), 205 deletions(-) (limited to 'fs') diff --git a/fs/dlm/ast.c b/fs/dlm/ast.c index abc49f292454..4f29add0e7d1 100644 --- a/fs/dlm/ast.c +++ b/fs/dlm/ast.c @@ -14,17 +14,9 @@ #include "dlm_internal.h" #include "lock.h" #include "user.h" -#include "ast.h" - -#define WAKE_ASTS 0 - -static uint64_t ast_seq_count; -static struct list_head ast_queue; -static spinlock_t ast_queue_lock; -static struct task_struct * astd_task; -static unsigned long astd_wakeflags; -static struct mutex astd_running; +static uint64_t dlm_cb_seq; +static spinlock_t dlm_cb_seq_spin; static void dlm_dump_lkb_callbacks(struct dlm_lkb *lkb) { @@ -57,21 +49,13 @@ static void dlm_dump_lkb_callbacks(struct dlm_lkb *lkb) } } -void dlm_del_ast(struct dlm_lkb *lkb) -{ - spin_lock(&ast_queue_lock); - if (!list_empty(&lkb->lkb_astqueue)) - list_del_init(&lkb->lkb_astqueue); - spin_unlock(&ast_queue_lock); -} - int dlm_add_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode, int status, uint32_t sbflags, uint64_t seq) { struct dlm_ls *ls = lkb->lkb_resource->res_ls; uint64_t prev_seq; int prev_mode; - int i; + int i, rv; for (i = 0; i < DLM_CALLBACKS_SIZE; i++) { if (lkb->lkb_callbacks[i].seq) @@ -100,7 +84,8 @@ int dlm_add_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode, mode, (unsigned long long)prev_seq, prev_mode); - return 0; + rv = 0; + goto out; } } @@ -109,6 +94,7 @@ int dlm_add_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode, lkb->lkb_callbacks[i].mode = mode; lkb->lkb_callbacks[i].sb_status = status; lkb->lkb_callbacks[i].sb_flags = (sbflags & 0x000000FF); + rv = 0; break; } @@ -117,21 +103,24 @@ int dlm_add_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode, lkb->lkb_id, (unsigned long long)seq, flags, mode, status, sbflags); dlm_dump_lkb_callbacks(lkb); - return -1; + rv = -1; + goto out; } - - return 0; + out: + return rv; } int dlm_rem_lkb_callback(struct dlm_ls *ls, struct dlm_lkb *lkb, struct dlm_callback *cb, int *resid) { - int i; + int i, rv; *resid = 0; - if (!lkb->lkb_callbacks[0].seq) - return -ENOENT; + if (!lkb->lkb_callbacks[0].seq) { + rv = -ENOENT; + goto out; + } /* oldest undelivered cb is callbacks[0] */ @@ -163,7 +152,8 @@ int dlm_rem_lkb_callback(struct dlm_ls *ls, struct dlm_lkb *lkb, cb->mode, (unsigned long long)lkb->lkb_last_cast.seq, lkb->lkb_last_cast.mode); - return 0; + rv = 0; + goto out; } } @@ -176,171 +166,147 @@ int dlm_rem_lkb_callback(struct dlm_ls *ls, struct dlm_lkb *lkb, memcpy(&lkb->lkb_last_bast, cb, sizeof(struct dlm_callback)); lkb->lkb_last_bast_time = ktime_get(); } - - return 0; + rv = 0; + out: + return rv; } -void dlm_add_ast(struct dlm_lkb *lkb, uint32_t flags, int mode, int status, - uint32_t sbflags) +void dlm_add_cb(struct dlm_lkb *lkb, uint32_t flags, int mode, int status, + uint32_t sbflags) { - uint64_t seq; + struct dlm_ls *ls = lkb->lkb_resource->res_ls; + uint64_t new_seq, prev_seq; int rv; - spin_lock(&ast_queue_lock); - - seq = ++ast_seq_count; + spin_lock(&dlm_cb_seq_spin); + new_seq = ++dlm_cb_seq; + spin_unlock(&dlm_cb_seq_spin); if (lkb->lkb_flags & DLM_IFL_USER) { - spin_unlock(&ast_queue_lock); - dlm_user_add_ast(lkb, flags, mode, status, sbflags, seq); + dlm_user_add_ast(lkb, flags, mode, status, sbflags, new_seq); return; } - rv = dlm_add_lkb_callback(lkb, flags, mode, status, sbflags, seq); - if (rv < 0) { - spin_unlock(&ast_queue_lock); - return; - } + mutex_lock(&lkb->lkb_cb_mutex); + prev_seq = lkb->lkb_callbacks[0].seq; - if (list_empty(&lkb->lkb_astqueue)) { + rv = dlm_add_lkb_callback(lkb, flags, mode, status, sbflags, new_seq); + if (rv < 0) + goto out; + + if (!prev_seq) { kref_get(&lkb->lkb_ref); - list_add_tail(&lkb->lkb_astqueue, &ast_queue); - } - spin_unlock(&ast_queue_lock); - set_bit(WAKE_ASTS, &astd_wakeflags); - wake_up_process(astd_task); + if (test_bit(LSFL_CB_DELAY, &ls->ls_flags)) { + mutex_lock(&ls->ls_cb_mutex); + list_add(&lkb->lkb_cb_list, &ls->ls_cb_delay); + mutex_unlock(&ls->ls_cb_mutex); + } else { + queue_work(ls->ls_callback_wq, &lkb->lkb_cb_work); + } + } + out: + mutex_unlock(&lkb->lkb_cb_mutex); } -static void process_asts(void) +void dlm_callback_work(struct work_struct *work) { - struct dlm_ls *ls = NULL; - struct dlm_rsb *r = NULL; - struct dlm_lkb *lkb; + struct dlm_lkb *lkb = container_of(work, struct dlm_lkb, lkb_cb_work); + struct dlm_ls *ls = lkb->lkb_resource->res_ls; void (*castfn) (void *astparam); void (*bastfn) (void *astparam, int mode); struct dlm_callback callbacks[DLM_CALLBACKS_SIZE]; int i, rv, resid; -repeat: - spin_lock(&ast_queue_lock); - list_for_each_entry(lkb, &ast_queue, lkb_astqueue) { - r = lkb->lkb_resource; - ls = r->res_ls; + memset(&callbacks, 0, sizeof(callbacks)); - if (dlm_locking_stopped(ls)) - continue; - - /* we remove from astqueue list and remove everything in - lkb_callbacks before releasing the spinlock so empty - lkb_astqueue is always consistent with empty lkb_callbacks */ - - list_del_init(&lkb->lkb_astqueue); - - castfn = lkb->lkb_astfn; - bastfn = lkb->lkb_bastfn; + mutex_lock(&lkb->lkb_cb_mutex); + if (!lkb->lkb_callbacks[0].seq) { + /* no callback work exists, shouldn't happen */ + log_error(ls, "dlm_callback_work %x no work", lkb->lkb_id); + dlm_print_lkb(lkb); + dlm_dump_lkb_callbacks(lkb); + } - memset(&callbacks, 0, sizeof(callbacks)); + for (i = 0; i < DLM_CALLBACKS_SIZE; i++) { + rv = dlm_rem_lkb_callback(ls, lkb, &callbacks[i], &resid); + if (rv < 0) + break; + } - for (i = 0; i < DLM_CALLBACKS_SIZE; i++) { - rv = dlm_rem_lkb_callback(ls, lkb, &callbacks[i], &resid); - if (rv < 0) - break; - } - spin_unlock(&ast_queue_lock); + if (resid) { + /* cbs remain, loop should have removed all, shouldn't happen */ + log_error(ls, "dlm_callback_work %x resid %d", lkb->lkb_id, + resid); + dlm_print_lkb(lkb); + dlm_dump_lkb_callbacks(lkb); + } + mutex_unlock(&lkb->lkb_cb_mutex); - if (resid) { - /* shouldn't happen, for loop should have removed all */ - log_error(ls, "callback resid %d lkb %x", - resid, lkb->lkb_id); - } + castfn = lkb->lkb_astfn; + bastfn = lkb->lkb_bastfn; - for (i = 0; i < DLM_CALLBACKS_SIZE; i++) { - if (!callbacks[i].seq) - break; - if (callbacks[i].flags & DLM_CB_SKIP) { - continue; - } else if (callbacks[i].flags & DLM_CB_BAST) { - bastfn(lkb->lkb_astparam, callbacks[i].mode); - } else if (callbacks[i].flags & DLM_CB_CAST) { - lkb->lkb_lksb->sb_status = callbacks[i].sb_status; - lkb->lkb_lksb->sb_flags = callbacks[i].sb_flags; - castfn(lkb->lkb_astparam); - } + for (i = 0; i < DLM_CALLBACKS_SIZE; i++) { + if (!callbacks[i].seq) + break; + if (callbacks[i].flags & DLM_CB_SKIP) { + continue; + } else if (callbacks[i].flags & DLM_CB_BAST) { + bastfn(lkb->lkb_astparam, callbacks[i].mode); + } else if (callbacks[i].flags & DLM_CB_CAST) { + lkb->lkb_lksb->sb_status = callbacks[i].sb_status; + lkb->lkb_lksb->sb_flags = callbacks[i].sb_flags; + castfn(lkb->lkb_astparam); } - - /* removes ref for ast_queue, may cause lkb to be freed */ - dlm_put_lkb(lkb); - - cond_resched(); - goto repeat; } - spin_unlock(&ast_queue_lock); -} - -static inline int no_asts(void) -{ - int ret; - spin_lock(&ast_queue_lock); - ret = list_empty(&ast_queue); - spin_unlock(&ast_queue_lock); - return ret; + /* undo kref_get from dlm_add_callback, may cause lkb to be freed */ + dlm_put_lkb(lkb); } -static int dlm_astd(void *data) +int dlm_callback_start(struct dlm_ls *ls) { - while (!kthread_should_stop()) { - set_current_state(TASK_INTERRUPTIBLE); - if (!test_bit(WAKE_ASTS, &astd_wakeflags)) - schedule(); - set_current_state(TASK_RUNNING); - - mutex_lock(&astd_running); - if (test_and_clear_bit(WAKE_ASTS, &astd_wakeflags)) - process_asts(); - mutex_unlock(&astd_running); + ls->ls_callback_wq = alloc_workqueue("dlm_callback", + WQ_UNBOUND | WQ_MEM_RECLAIM, 1); + if (!ls->ls_callback_wq) { + log_print("can't start dlm_callback workqueue"); + return -ENOMEM; } return 0; } -void dlm_astd_wake(void) +void dlm_callback_stop(struct dlm_ls *ls) { - if (!no_asts()) { - set_bit(WAKE_ASTS, &astd_wakeflags); - wake_up_process(astd_task); - } + if (ls->ls_callback_wq) + destroy_workqueue(ls->ls_callback_wq); } -int dlm_astd_start(void) +void dlm_callback_suspend(struct dlm_ls *ls) { - struct task_struct *p; - int error = 0; - - INIT_LIST_HEAD(&ast_queue); - spin_lock_init(&ast_queue_lock); - mutex_init(&astd_running); - - p = kthread_run(dlm_astd, NULL, "dlm_astd"); - if (IS_ERR(p)) - error = PTR_ERR(p); - else - astd_task = p; - return error; -} + set_bit(LSFL_CB_DELAY, &ls->ls_flags); -void dlm_astd_stop(void) -{ - kthread_stop(astd_task); + if (ls->ls_callback_wq) + flush_workqueue(ls->ls_callback_wq); } -void dlm_astd_suspend(void) +void dlm_callback_resume(struct dlm_ls *ls) { - mutex_lock(&astd_running); -} + struct dlm_lkb *lkb, *safe; + int count = 0; -void dlm_astd_resume(void) -{ - mutex_unlock(&astd_running); + clear_bit(LSFL_CB_DELAY, &ls->ls_flags); + + if (!ls->ls_callback_wq) + return; + + mutex_lock(&ls->ls_cb_mutex); + list_for_each_entry_safe(lkb, safe, &ls->ls_cb_delay, lkb_cb_list) { + list_del_init(&lkb->lkb_cb_list); + queue_work(ls->ls_callback_wq, &lkb->lkb_cb_work); + count++; + } + mutex_unlock(&ls->ls_cb_mutex); + + log_debug(ls, "dlm_callback_resume %d", count); } diff --git a/fs/dlm/ast.h b/fs/dlm/ast.h index 8aa89c9b5611..757b551c6820 100644 --- a/fs/dlm/ast.h +++ b/fs/dlm/ast.h @@ -18,14 +18,15 @@ int dlm_add_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode, int status, uint32_t sbflags, uint64_t seq); int dlm_rem_lkb_callback(struct dlm_ls *ls, struct dlm_lkb *lkb, struct dlm_callback *cb, int *resid); -void dlm_add_ast(struct dlm_lkb *lkb, uint32_t flags, int mode, int status, - uint32_t sbflags); +void dlm_add_cb(struct dlm_lkb *lkb, uint32_t flags, int mode, int status, + uint32_t sbflags); -void dlm_astd_wake(void); -int dlm_astd_start(void); -void dlm_astd_stop(void); -void dlm_astd_suspend(void); -void dlm_astd_resume(void); +void dlm_callback_work(struct work_struct *work); +int dlm_callback_start(struct dlm_ls *ls); +void dlm_callback_stop(struct dlm_ls *ls); +void dlm_callback_suspend(struct dlm_ls *ls); +void dlm_callback_resume(struct dlm_ls *ls); #endif + diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h index 6614f335e25d..fe2860c02449 100644 --- a/fs/dlm/dlm_internal.h +++ b/fs/dlm/dlm_internal.h @@ -246,13 +246,15 @@ struct dlm_lkb { struct list_head lkb_statequeue; /* rsb g/c/w list */ struct list_head lkb_rsb_lookup; /* waiting for rsb lookup */ struct list_head lkb_wait_reply; /* waiting for remote reply */ - struct list_head lkb_astqueue; /* need ast to be sent */ struct list_head lkb_ownqueue; /* list of locks for a process */ struct list_head lkb_time_list; ktime_t lkb_timestamp; ktime_t lkb_wait_time; unsigned long lkb_timeout_cs; + struct mutex lkb_cb_mutex; + struct work_struct lkb_cb_work; + struct list_head lkb_cb_list; /* for ls_cb_delay or proc->asts */ struct dlm_callback lkb_callbacks[DLM_CALLBACKS_SIZE]; struct dlm_callback lkb_last_cast; struct dlm_callback lkb_last_bast; @@ -504,8 +506,12 @@ struct dlm_ls { struct miscdevice ls_device; + struct workqueue_struct *ls_callback_wq; + /* recovery related */ + struct mutex ls_cb_mutex; + struct list_head ls_cb_delay; /* save for queue_work later */ struct timer_list ls_timer; struct task_struct *ls_recoverd_task; struct mutex ls_recoverd_active; @@ -542,6 +548,7 @@ struct dlm_ls { #define LSFL_RCOM_WAIT 4 #define LSFL_UEVENT_WAIT 5 #define LSFL_TIMEWARN 6 +#define LSFL_CB_DELAY 7 /* much of this is just saving user space pointers associated with the lock that we pass back to the user lib with an ast */ diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c index 6772e5c5bcd6..83b5e32514e1 100644 --- a/fs/dlm/lock.c +++ b/fs/dlm/lock.c @@ -305,7 +305,7 @@ static void queue_cast(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv) rv = -EDEADLK; } - dlm_add_ast(lkb, DLM_CB_CAST, lkb->lkb_grmode, rv, lkb->lkb_sbflags); + dlm_add_cb(lkb, DLM_CB_CAST, lkb->lkb_grmode, rv, lkb->lkb_sbflags); } static inline void queue_cast_overlap(struct dlm_rsb *r, struct dlm_lkb *lkb) @@ -319,7 +319,7 @@ static void queue_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int rqmode) if (is_master_copy(lkb)) { send_bast(r, lkb, rqmode); } else { - dlm_add_ast(lkb, DLM_CB_BAST, rqmode, 0, 0); + dlm_add_cb(lkb, DLM_CB_BAST, rqmode, 0, 0); } } @@ -638,7 +638,9 @@ static int create_lkb(struct dlm_ls *ls, struct dlm_lkb **lkb_ret) INIT_LIST_HEAD(&lkb->lkb_ownqueue); INIT_LIST_HEAD(&lkb->lkb_rsb_lookup); INIT_LIST_HEAD(&lkb->lkb_time_list); - INIT_LIST_HEAD(&lkb->lkb_astqueue); + INIT_LIST_HEAD(&lkb->lkb_cb_list); + mutex_init(&lkb->lkb_cb_mutex); + INIT_WORK(&lkb->lkb_cb_work, dlm_callback_work); retry: rv = idr_pre_get(&ls->ls_lkbidr, GFP_NOFS); @@ -4010,8 +4012,6 @@ static void _receive_message(struct dlm_ls *ls, struct dlm_message *ms) default: log_error(ls, "unknown message type %d", ms->m_type); } - - dlm_astd_wake(); } /* If the lockspace is in recovery mode (locking stopped), then normal @@ -4826,7 +4826,7 @@ int dlm_user_unlock(struct dlm_ls *ls, struct dlm_user_args *ua_tmp, goto out_put; spin_lock(&ua->proc->locks_spin); - /* dlm_user_add_ast() may have already taken lkb off the proc list */ + /* dlm_user_add_cb() may have already taken lkb off the proc list */ if (!list_empty(&lkb->lkb_ownqueue)) list_move(&lkb->lkb_ownqueue, &ua->proc->unlocking); spin_unlock(&ua->proc->locks_spin); @@ -4963,7 +4963,7 @@ static int unlock_proc_lock(struct dlm_ls *ls, struct dlm_lkb *lkb) /* We have to release clear_proc_locks mutex before calling unlock_proc_lock() (which does lock_rsb) due to deadlock with receiving a message that does - lock_rsb followed by dlm_user_add_ast() */ + lock_rsb followed by dlm_user_add_cb() */ static struct dlm_lkb *del_proc_lock(struct dlm_ls *ls, struct dlm_user_proc *proc) @@ -4986,7 +4986,7 @@ static struct dlm_lkb *del_proc_lock(struct dlm_ls *ls, return lkb; } -/* The ls_clear_proc_locks mutex protects against dlm_user_add_asts() which +/* The ls_clear_proc_locks mutex protects against dlm_user_add_cb() which 1) references lkb->ua which we free here and 2) adds lkbs to proc->asts, which we clear here. */ @@ -5028,10 +5028,10 @@ void dlm_clear_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc) dlm_put_lkb(lkb); } - list_for_each_entry_safe(lkb, safe, &proc->asts, lkb_astqueue) { + list_for_each_entry_safe(lkb, safe, &proc->asts, lkb_cb_list) { memset(&lkb->lkb_callbacks, 0, sizeof(struct dlm_callback) * DLM_CALLBACKS_SIZE); - list_del_init(&lkb->lkb_astqueue); + list_del_init(&lkb->lkb_cb_list); dlm_put_lkb(lkb); } @@ -5070,10 +5070,10 @@ static void purge_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc) spin_unlock(&proc->locks_spin); spin_lock(&proc->asts_spin); - list_for_each_entry_safe(lkb, safe, &proc->asts, lkb_astqueue) { + list_for_each_entry_safe(lkb, safe, &proc->asts, lkb_cb_list) { memset(&lkb->lkb_callbacks, 0, sizeof(struct dlm_callback) * DLM_CALLBACKS_SIZE); - list_del_init(&lkb->lkb_astqueue); + list_del_init(&lkb->lkb_cb_list); dlm_put_lkb(lkb); } spin_unlock(&proc->asts_spin); diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c index 98a97762c893..a1d8f1af144b 100644 --- a/fs/dlm/lockspace.c +++ b/fs/dlm/lockspace.c @@ -15,7 +15,6 @@ #include "lockspace.h" #include "member.h" #include "recoverd.h" -#include "ast.h" #include "dir.h" #include "lowcomms.h" #include "config.h" @@ -24,6 +23,7 @@ #include "recover.h" #include "requestqueue.h" #include "user.h" +#include "ast.h" static int ls_count; static struct mutex ls_lock; @@ -359,17 +359,10 @@ static int threads_start(void) { int error; - /* Thread which process lock requests for all lockspace's */ - error = dlm_astd_start(); - if (error) { - log_print("cannot start dlm_astd thread %d", error); - goto fail; - } - error = dlm_scand_start(); if (error) { log_print("cannot start dlm_scand thread %d", error); - goto astd_fail; + goto fail; } /* Thread for sending/receiving messages for all lockspace's */ @@ -383,8 +376,6 @@ static int threads_start(void) scand_fail: dlm_scand_stop(); - astd_fail: - dlm_astd_stop(); fail: return error; } @@ -393,7 +384,6 @@ static void threads_stop(void) { dlm_scand_stop(); dlm_lowcomms_stop(); - dlm_astd_stop(); } static int new_lockspace(const char *name, int namelen, void **lockspace, @@ -514,6 +504,9 @@ static int new_lockspace(const char *name, int namelen, void **lockspace, init_completion(&ls->ls_members_done); ls->ls_members_result = -1; + mutex_init(&ls->ls_cb_mutex); + INIT_LIST_HEAD(&ls->ls_cb_delay); + ls->ls_recoverd_task = NULL; mutex_init(&ls->ls_recoverd_active); spin_lock_init(&ls->ls_recover_lock); @@ -547,18 +540,26 @@ static int new_lockspace(const char *name, int namelen, void **lockspace, list_add(&ls->ls_list, &lslist); spin_unlock(&lslist_lock); + if (flags & DLM_LSFL_FS) { + error = dlm_callback_start(ls); + if (error) { + log_error(ls, "can't start dlm_callback %d", error); + goto out_delist; + } + } + /* needs to find ls in lslist */ error = dlm_recoverd_start(ls); if (error) { log_error(ls, "can't start dlm_recoverd %d", error); - goto out_delist; + goto out_callback; } ls->ls_kobj.kset = dlm_kset; error = kobject_init_and_add(&ls->ls_kobj, &dlm_ktype, NULL, "%s", ls->ls_name); if (error) - goto out_stop; + goto out_recoverd; kobject_uevent(&ls->ls_kobj, KOBJ_ADD); /* let kobject handle freeing of ls if there's an error */ @@ -572,7 +573,7 @@ static int new_lockspace(const char *name, int namelen, void **lockspace, error = do_uevent(ls, 1); if (error) - goto out_stop; + goto out_recoverd; wait_for_completion(&ls->ls_members_done); error = ls->ls_members_result; @@ -589,8 +590,10 @@ static int new_lockspace(const char *name, int namelen, void **lockspace, do_uevent(ls, 0); dlm_clear_members(ls); kfree(ls->ls_node_array); - out_stop: + out_recoverd: dlm_recoverd_stop(ls); + out_callback: + dlm_callback_stop(ls); out_delist: spin_lock(&lslist_lock); list_del(&ls->ls_list); @@ -652,8 +655,6 @@ static int lkb_idr_free(int id, void *p, void *data) { struct dlm_lkb *lkb = p; - dlm_del_ast(lkb); - if (lkb->lkb_lvbptr && lkb->lkb_flags & DLM_IFL_MSTCPY) dlm_free_lvb(lkb->lkb_lvbptr); @@ -717,12 +718,12 @@ static int release_lockspace(struct dlm_ls *ls, int force) dlm_recoverd_stop(ls); + dlm_callback_stop(ls); + remove_lockspace(ls); dlm_delete_debug_file(ls); - dlm_astd_suspend(); - kfree(ls->ls_recover_buf); /* @@ -740,8 +741,6 @@ static int release_lockspace(struct dlm_ls *ls, int force) idr_remove_all(&ls->ls_lkbidr); idr_destroy(&ls->ls_lkbidr); - dlm_astd_resume(); - /* * Free all rsb's on rsbtbl[] lists */ diff --git a/fs/dlm/recoverd.c b/fs/dlm/recoverd.c index fd677c8c3d3b..774da3cf92c6 100644 --- a/fs/dlm/recoverd.c +++ b/fs/dlm/recoverd.c @@ -58,13 +58,7 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv) mutex_lock(&ls->ls_recoverd_active); - /* - * Suspending and resuming dlm_astd ensures that no lkb's from this ls - * will be processed by dlm_astd during recovery. - */ - - dlm_astd_suspend(); - dlm_astd_resume(); + dlm_callback_suspend(ls); /* * Free non-master tossed rsb's. Master rsb's are kept on toss @@ -202,6 +196,8 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv) dlm_adjust_timeouts(ls); + dlm_callback_resume(ls); + error = enable_locking(ls, rv->seq); if (error) { log_debug(ls, "enable_locking failed %d", error); @@ -222,8 +218,6 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv) dlm_grant_after_purge(ls); - dlm_astd_wake(); - log_debug(ls, "recover %llx done: %u ms", (unsigned long long)rv->seq, jiffies_to_msecs(jiffies - start)); diff --git a/fs/dlm/user.c b/fs/dlm/user.c index e96bf3e9be88..d8ea60756403 100644 --- a/fs/dlm/user.c +++ b/fs/dlm/user.c @@ -213,9 +213,9 @@ void dlm_user_add_ast(struct dlm_lkb *lkb, uint32_t flags, int mode, goto out; } - if (list_empty(&lkb->lkb_astqueue)) { + if (list_empty(&lkb->lkb_cb_list)) { kref_get(&lkb->lkb_ref); - list_add_tail(&lkb->lkb_astqueue, &proc->asts); + list_add_tail(&lkb->lkb_cb_list, &proc->asts); wake_up_interruptible(&proc->wait); } spin_unlock(&proc->asts_spin); @@ -832,24 +832,24 @@ static ssize_t device_read(struct file *file, char __user *buf, size_t count, } /* if we empty lkb_callbacks, we don't want to unlock the spinlock - without removing lkb_astqueue; so empty lkb_astqueue is always + without removing lkb_cb_list; so empty lkb_cb_list is always consistent with empty lkb_callbacks */ - lkb = list_entry(proc->asts.next, struct dlm_lkb, lkb_astqueue); + lkb = list_entry(proc->asts.next, struct dlm_lkb, lkb_cb_list); rv = dlm_rem_lkb_callback(lkb->lkb_resource->res_ls, lkb, &cb, &resid); if (rv < 0) { /* this shouldn't happen; lkb should have been removed from list when resid was zero */ log_print("dlm_rem_lkb_callback empty %x", lkb->lkb_id); - list_del_init(&lkb->lkb_astqueue); + list_del_init(&lkb->lkb_cb_list); spin_unlock(&proc->asts_spin); /* removes ref for proc->asts, may cause lkb to be freed */ dlm_put_lkb(lkb); goto try_another; } if (!resid) - list_del_init(&lkb->lkb_astqueue); + list_del_init(&lkb->lkb_cb_list); spin_unlock(&proc->asts_spin); if (cb.flags & DLM_CB_SKIP) { -- cgit v1.2.3 From dc137bf553dbb6855bd7efc34fedcd03102455f7 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 16 Jul 2011 23:37:20 -0400 Subject: cifs: build_path_from_dentry() race fix deal with d_move() races properly; rename_lock read-retry loop, rcu_read_lock() held while walking to root, d_lock held over subtraction from namelen and copying the component to stabilize ->d_name. Signed-off-by: Al Viro --- fs/cifs/dir.c | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c index 81914df47ef1..fa8c21d913bc 100644 --- a/fs/cifs/dir.c +++ b/fs/cifs/dir.c @@ -55,6 +55,7 @@ build_path_from_dentry(struct dentry *direntry) char dirsep; struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb); struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb); + unsigned seq; if (direntry == NULL) return NULL; /* not much we can do if dentry is freed and @@ -68,22 +69,29 @@ build_path_from_dentry(struct dentry *direntry) dfsplen = 0; cifs_bp_rename_retry: namelen = dfsplen; + seq = read_seqbegin(&rename_lock); + rcu_read_lock(); for (temp = direntry; !IS_ROOT(temp);) { namelen += (1 + temp->d_name.len); temp = temp->d_parent; if (temp == NULL) { cERROR(1, "corrupt dentry"); + rcu_read_unlock(); return NULL; } } + rcu_read_unlock(); full_path = kmalloc(namelen+1, GFP_KERNEL); if (full_path == NULL) return full_path; full_path[namelen] = 0; /* trailing null */ + rcu_read_lock(); for (temp = direntry; !IS_ROOT(temp);) { + spin_lock(&temp->d_lock); namelen -= 1 + temp->d_name.len; if (namelen < 0) { + spin_unlock(&temp->d_lock); break; } else { full_path[namelen] = dirsep; @@ -91,14 +99,17 @@ cifs_bp_rename_retry: temp->d_name.len); cFYI(0, "name: %s", full_path + namelen); } + spin_unlock(&temp->d_lock); temp = temp->d_parent; if (temp == NULL) { cERROR(1, "corrupt dentry"); + rcu_read_unlock(); kfree(full_path); return NULL; } } - if (namelen != dfsplen) { + rcu_read_unlock(); + if (namelen != dfsplen || read_seqretry(&rename_lock, seq)) { cERROR(1, "did not end path lookup where expected namelen is %d", namelen); /* presumably this is only possible if racing with a rename -- cgit v1.2.3 From 1b71fe2efa31cd18c865db474a4cd473b6ab5281 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 16 Jul 2011 23:43:58 -0400 Subject: ceph analog of cifs build_path_from_dentry() race fix ... unfortunately, cifs bug got copied. Fix is essentially the same. Signed-off-by: Al Viro --- fs/ceph/mds_client.c | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index 79743d146be6..0c1d91756528 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -1438,12 +1438,15 @@ char *ceph_mdsc_build_path(struct dentry *dentry, int *plen, u64 *base, struct dentry *temp; char *path; int len, pos; + unsigned seq; if (dentry == NULL) return ERR_PTR(-EINVAL); retry: len = 0; + seq = read_seqbegin(&rename_lock); + rcu_read_lock(); for (temp = dentry; !IS_ROOT(temp);) { struct inode *inode = temp->d_inode; if (inode && ceph_snap(inode) == CEPH_SNAPDIR) @@ -1455,10 +1458,12 @@ retry: len += 1 + temp->d_name.len; temp = temp->d_parent; if (temp == NULL) { + rcu_read_unlock(); pr_err("build_path corrupt dentry %p\n", dentry); return ERR_PTR(-EINVAL); } } + rcu_read_unlock(); if (len) len--; /* no leading '/' */ @@ -1467,9 +1472,12 @@ retry: return ERR_PTR(-ENOMEM); pos = len; path[pos] = 0; /* trailing null */ + rcu_read_lock(); for (temp = dentry; !IS_ROOT(temp) && pos != 0; ) { - struct inode *inode = temp->d_inode; + struct inode *inode; + spin_lock(&temp->d_lock); + inode = temp->d_inode; if (inode && ceph_snap(inode) == CEPH_SNAPDIR) { dout("build_path path+%d: %p SNAPDIR\n", pos, temp); @@ -1478,21 +1486,26 @@ retry: break; } else { pos -= temp->d_name.len; - if (pos < 0) + if (pos < 0) { + spin_unlock(&temp->d_lock); break; + } strncpy(path + pos, temp->d_name.name, temp->d_name.len); } + spin_unlock(&temp->d_lock); if (pos) path[--pos] = '/'; temp = temp->d_parent; if (temp == NULL) { + rcu_read_unlock(); pr_err("build_path corrupt dentry\n"); kfree(path); return ERR_PTR(-EINVAL); } } - if (pos != 0) { + rcu_read_unlock(); + if (pos != 0 || read_seqretry(&rename_lock, seq)) { pr_err("build_path did not end path lookup where " "expected, namelen is %d, pos is %d\n", len, pos); /* presumably this is only possible if racing with a -- cgit v1.2.3 From a803b8067e317832d6a251c5b0486e36a4f81922 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Fri, 8 Jul 2011 20:56:55 -0400 Subject: fix exofs ->get_parent() NULL is not a possible return value for that method, TYVM... Signed-off-by: Al Viro --- fs/exofs/super.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/exofs/super.c b/fs/exofs/super.c index 06065bd37fc3..c57beddcc217 100644 --- a/fs/exofs/super.c +++ b/fs/exofs/super.c @@ -913,7 +913,7 @@ struct dentry *exofs_get_parent(struct dentry *child) unsigned long ino = exofs_parent_ino(child); if (!ino) - return NULL; + return ERR_PTR(-ESTALE); return d_obtain_alias(exofs_iget(child->d_inode->i_sb, ino)); } -- cgit v1.2.3 From 642c937b4ed2e51d2f2e4c46ab7cd8b5bddf268b Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 17 Jul 2011 10:07:34 -0400 Subject: ufs should use d_splice_alias() it's NFS-exportable, so... Signed-off-by: Al Viro --- fs/ufs/namei.c | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/ufs/namei.c b/fs/ufs/namei.c index 29309e25417f..b57aab9a1184 100644 --- a/fs/ufs/namei.c +++ b/fs/ufs/namei.c @@ -56,16 +56,12 @@ static struct dentry *ufs_lookup(struct inode * dir, struct dentry *dentry, stru lock_ufs(dir->i_sb); ino = ufs_inode_by_name(dir, &dentry->d_name); - if (ino) { + if (ino) inode = ufs_iget(dir->i_sb, ino); - if (IS_ERR(inode)) { - unlock_ufs(dir->i_sb); - return ERR_CAST(inode); - } - } unlock_ufs(dir->i_sb); - d_add(dentry, inode); - return NULL; + if (IS_ERR(inode)) + return ERR_CAST(inode); + return d_splice_alias(inode, dentry); } /* -- cgit v1.2.3 From 0577d1ba411f9c40693b8b3e4aa7e0892cd03091 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 17 Jul 2011 19:04:14 -0400 Subject: cramfs: get_cramfs_inode() returns ERR_PTR() on failure ... and we want to report these failures in ->lookup() anyway. Signed-off-by: Al Viro --- fs/cramfs/inode.c | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) (limited to 'fs') diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c index e141939080f0..739fb59bcdc2 100644 --- a/fs/cramfs/inode.c +++ b/fs/cramfs/inode.c @@ -37,7 +37,7 @@ static DEFINE_MUTEX(read_mutex); /* These macros may change in future, to provide better st_ino semantics. */ #define OFFSET(x) ((x)->i_ino) -static unsigned long cramino(struct cramfs_inode *cino, unsigned int offset) +static unsigned long cramino(const struct cramfs_inode *cino, unsigned int offset) { if (!cino->offset) return offset + 1; @@ -61,7 +61,7 @@ static unsigned long cramino(struct cramfs_inode *cino, unsigned int offset) } static struct inode *get_cramfs_inode(struct super_block *sb, - struct cramfs_inode *cramfs_inode, unsigned int offset) + const struct cramfs_inode *cramfs_inode, unsigned int offset) { struct inode *inode; static struct timespec zerotime; @@ -317,7 +317,7 @@ static int cramfs_fill_super(struct super_block *sb, void *data, int silent) /* Set it all up.. */ sb->s_op = &cramfs_ops; root = get_cramfs_inode(sb, &super.root, 0); - if (!root) + if (IS_ERR(root)) goto out; sb->s_root = d_alloc_root(root); if (!sb->s_root) { @@ -423,6 +423,7 @@ static int cramfs_readdir(struct file *filp, void *dirent, filldir_t filldir) static struct dentry * cramfs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) { unsigned int offset = 0; + struct inode *inode = NULL; int sorted; mutex_lock(&read_mutex); @@ -449,8 +450,8 @@ static struct dentry * cramfs_lookup(struct inode *dir, struct dentry *dentry, s for (;;) { if (!namelen) { - mutex_unlock(&read_mutex); - return ERR_PTR(-EIO); + inode = ERR_PTR(-EIO); + goto out; } if (name[namelen-1]) break; @@ -462,17 +463,18 @@ static struct dentry * cramfs_lookup(struct inode *dir, struct dentry *dentry, s if (retval > 0) continue; if (!retval) { - struct cramfs_inode entry = *de; - mutex_unlock(&read_mutex); - d_add(dentry, get_cramfs_inode(dir->i_sb, &entry, dir_off)); - return NULL; + inode = get_cramfs_inode(dir->i_sb, de, dir_off); + break; } /* else (retval < 0) */ if (sorted) break; } +out: mutex_unlock(&read_mutex); - d_add(dentry, NULL); + if (IS_ERR(inode)) + return ERR_CAST(inode); + d_add(dentry, inode); return NULL; } -- cgit v1.2.3 From 3cc0658e35124ace881f6942839dcae877c3eaed Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 17 Jul 2011 22:24:15 -0400 Subject: hppfs: fix dentry leak Signed-off-by: Al Viro --- fs/hppfs/hppfs.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/hppfs/hppfs.c b/fs/hppfs/hppfs.c index 87ed48e0343d..7d6a0e92bcf0 100644 --- a/fs/hppfs/hppfs.c +++ b/fs/hppfs/hppfs.c @@ -174,13 +174,11 @@ static struct dentry *hppfs_lookup(struct inode *ino, struct dentry *dentry, err = -ENOMEM; inode = get_inode(ino->i_sb, proc_dentry); if (!inode) - goto out_dput; + goto out; d_add(dentry, inode); return NULL; - out_dput: - dput(proc_dentry); out: return ERR_PTR(err); } @@ -690,8 +688,10 @@ static struct inode *get_inode(struct super_block *sb, struct dentry *dentry) struct inode *proc_ino = dentry->d_inode; struct inode *inode = new_inode(sb); - if (!inode) + if (!inode) { + dput(dentry); return ERR_PTR(-ENOMEM); + } if (S_ISDIR(dentry->d_inode->i_mode)) { inode->i_op = &hppfs_dir_iops; @@ -704,7 +704,7 @@ static struct inode *get_inode(struct super_block *sb, struct dentry *dentry) inode->i_fop = &hppfs_file_fops; } - HPPFS_I(inode)->proc_dentry = dget(dentry); + HPPFS_I(inode)->proc_dentry = dentry; inode->i_uid = proc_ino->i_uid; inode->i_gid = proc_ino->i_gid; @@ -737,7 +737,7 @@ static int hppfs_fill_super(struct super_block *sb, void *d, int silent) sb->s_fs_info = proc_mnt; err = -ENOMEM; - root_inode = get_inode(sb, proc_mnt->mnt_sb->s_root); + root_inode = get_inode(sb, dget(proc_mnt->mnt_sb->s_root)); if (!root_inode) goto out_mntput; -- cgit v1.2.3 From 0916a5e45fbd2604a303c8cc18e6b2b7c815e4c9 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 17 Jul 2011 22:27:22 -0400 Subject: hppfs_lookup(): don't open-code lookup_one_len() ... and it's getting it wrong, too - missing ->d_revalidate() calls when it's dealing with filesystem (procfs) that has non-trivial ->d_revalidate()... Signed-off-by: Al Viro --- fs/hppfs/hppfs.c | 19 +++---------------- 1 file changed, 3 insertions(+), 16 deletions(-) (limited to 'fs') diff --git a/fs/hppfs/hppfs.c b/fs/hppfs/hppfs.c index 7d6a0e92bcf0..85c098a499f3 100644 --- a/fs/hppfs/hppfs.c +++ b/fs/hppfs/hppfs.c @@ -139,7 +139,8 @@ static int file_removed(struct dentry *dentry, const char *file) static struct dentry *hppfs_lookup(struct inode *ino, struct dentry *dentry, struct nameidata *nd) { - struct dentry *proc_dentry, *new, *parent; + struct dentry *proc_dentry, *parent; + struct qstr *name = &dentry->d_name; struct inode *inode; int err, deleted; @@ -149,23 +150,9 @@ static struct dentry *hppfs_lookup(struct inode *ino, struct dentry *dentry, else if (deleted) return ERR_PTR(-ENOENT); - err = -ENOMEM; parent = HPPFS_I(ino)->proc_dentry; mutex_lock(&parent->d_inode->i_mutex); - proc_dentry = d_lookup(parent, &dentry->d_name); - if (proc_dentry == NULL) { - proc_dentry = d_alloc(parent, &dentry->d_name); - if (proc_dentry == NULL) { - mutex_unlock(&parent->d_inode->i_mutex); - goto out; - } - new = (*parent->d_inode->i_op->lookup)(parent->d_inode, - proc_dentry, NULL); - if (new) { - dput(proc_dentry); - proc_dentry = new; - } - } + proc_dentry = lookup_one_len(name->name, parent, name->len); mutex_unlock(&parent->d_inode->i_mutex); if (IS_ERR(proc_dentry)) -- cgit v1.2.3 From fec11dd9a0109fe52fd631e5c510778d6cbff6cc Mon Sep 17 00:00:00 2001 From: Al Viro Date: Mon, 18 Jul 2011 13:50:40 -0400 Subject: Fix cifs_get_root() Add missing ->i_mutex, convert to lookup_one_len() instead of (broken) open-coded analog, cope with getting something like a//b as relative pathname. Simplify the hell out of it, while we are there... Signed-off-by: Al Viro Reviewed-by: Jeff Layton --- fs/cifs/cifsfs.c | 100 ++++++++++++++++--------------------------------------- 1 file changed, 29 insertions(+), 71 deletions(-) (limited to 'fs') diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 3e2989976297..bc4b12ca537b 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -35,6 +35,7 @@ #include #include #include +#include #include #include "cifsfs.h" #include "cifspdu.h" @@ -542,14 +543,12 @@ static const struct super_operations cifs_super_ops = { static struct dentry * cifs_get_root(struct smb_vol *vol, struct super_block *sb) { - int xid, rc; - struct inode *inode; - struct qstr name; - struct dentry *dparent = NULL, *dchild = NULL, *alias; + struct dentry *dentry; struct cifs_sb_info *cifs_sb = CIFS_SB(sb); - unsigned int i, full_len, len; - char *full_path = NULL, *pstart; + char *full_path = NULL; + char *s, *p; char sep; + int xid; full_path = cifs_build_path_to_root(vol, cifs_sb, cifs_sb_master_tcon(cifs_sb)); @@ -560,73 +559,32 @@ cifs_get_root(struct smb_vol *vol, struct super_block *sb) xid = GetXid(); sep = CIFS_DIR_SEP(cifs_sb); - dparent = dget(sb->s_root); - full_len = strlen(full_path); - full_path[full_len] = sep; - pstart = full_path + 1; - - for (i = 1, len = 0; i <= full_len; i++) { - if (full_path[i] != sep || !len) { - len++; - continue; - } - - full_path[i] = 0; - cFYI(1, "get dentry for %s", pstart); - - name.name = pstart; - name.len = len; - name.hash = full_name_hash(pstart, len); - dchild = d_lookup(dparent, &name); - if (dchild == NULL) { - cFYI(1, "not exists"); - dchild = d_alloc(dparent, &name); - if (dchild == NULL) { - dput(dparent); - dparent = ERR_PTR(-ENOMEM); - goto out; - } - } - - cFYI(1, "get inode"); - if (dchild->d_inode == NULL) { - cFYI(1, "not exists"); - inode = NULL; - if (cifs_sb_master_tcon(CIFS_SB(sb))->unix_ext) - rc = cifs_get_inode_info_unix(&inode, full_path, - sb, xid); - else - rc = cifs_get_inode_info(&inode, full_path, - NULL, sb, xid, NULL); - if (rc) { - dput(dchild); - dput(dparent); - dparent = ERR_PTR(rc); - goto out; - } - alias = d_materialise_unique(dchild, inode); - if (alias != NULL) { - dput(dchild); - if (IS_ERR(alias)) { - dput(dparent); - dparent = ERR_PTR(-EINVAL); /* XXX */ - goto out; - } - dchild = alias; - } - } - cFYI(1, "parent %p, child %p", dparent, dchild); - - dput(dparent); - dparent = dchild; - len = 0; - pstart = full_path + i + 1; - full_path[i] = sep; - } -out: + dentry = dget(sb->s_root); + p = s = full_path; + + do { + struct inode *dir = dentry->d_inode; + struct dentry *child; + + /* skip separators */ + while (*s == sep) + s++; + if (!*s) + break; + p = s++; + /* next separator */ + while (*s && *s != sep) + s++; + + mutex_lock(&dir->i_mutex); + child = lookup_one_len(p, dentry, s - p); + mutex_unlock(&dir->i_mutex); + dput(dentry); + dentry = child; + } while (!IS_ERR(dentry)); _FreeXid(xid); kfree(full_path); - return dparent; + return dentry; } static int cifs_set_super(struct super_block *sb, void *data) -- cgit v1.2.3 From 10d1459fafbb02a18e7bc8f2c384a9e973560b00 Mon Sep 17 00:00:00 2001 From: David Teigland Date: Mon, 18 Jul 2011 15:28:34 -0500 Subject: dlm: don't limit active work items Allow multiple workqueue items (locks with callbacks) to be processed concurrently. There should be no reason not to take advantage of this workqueue feature. Signed-off-by: David Teigland --- fs/dlm/ast.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/dlm/ast.c b/fs/dlm/ast.c index 4f29add0e7d1..90e5997262ea 100644 --- a/fs/dlm/ast.c +++ b/fs/dlm/ast.c @@ -267,7 +267,10 @@ void dlm_callback_work(struct work_struct *work) int dlm_callback_start(struct dlm_ls *ls) { ls->ls_callback_wq = alloc_workqueue("dlm_callback", - WQ_UNBOUND | WQ_MEM_RECLAIM, 1); + WQ_UNBOUND | + WQ_MEM_RECLAIM | + WQ_NON_REENTRANT, + 0); if (!ls->ls_callback_wq) { log_print("can't start dlm_callback workqueue"); return -ENOMEM; -- cgit v1.2.3 From 59430262401bec02d415179c43dbe5b8819c09ce Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Mon, 18 Jul 2011 15:43:29 -0700 Subject: vfs: fix race in rcu lookup of pruned dentry Don't update *inode in __follow_mount_rcu() until we'd verified that there is mountpoint there. Kudos to Hugh Dickins for catching that one in the first place and eventually figuring out the solution (and catching a braino in the earlier version of patch). Signed-off-by: Linus Torvalds Signed-off-by: Al Viro --- fs/namei.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/namei.c b/fs/namei.c index 5c867dd1c0b3..14ab8d3f2f0c 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -942,7 +942,6 @@ static bool __follow_mount_rcu(struct nameidata *nd, struct path *path, * Don't forget we might have a non-mountpoint managed dentry * that wants to block transit. */ - *inode = path->dentry->d_inode; if (unlikely(managed_dentry_might_block(path->dentry))) return false; @@ -955,6 +954,12 @@ static bool __follow_mount_rcu(struct nameidata *nd, struct path *path, path->mnt = mounted; path->dentry = mounted->mnt_root; nd->seq = read_seqcount_begin(&path->dentry->d_seq); + /* + * Update the inode too. We don't need to re-check the + * dentry sequence number here after this d_inode read, + * because a mount-point is always pinned. + */ + *inode = path->dentry->d_inode; } return true; } -- cgit v1.2.3 From f7b88631a89757d70192044c9d9f2e8d2fc02f2c Mon Sep 17 00:00:00 2001 From: Akinobu Mita Date: Tue, 19 Jul 2011 08:49:25 -0700 Subject: fs/libfs.c: fix simple_attr_write() on 32bit machines Assume that /sys/kernel/debug/dummy64 is debugfs file created by debugfs_create_x64(). # cd /sys/kernel/debug # echo 0x1234567812345678 > dummy64 # cat dummy64 0x0000000012345678 # echo 0x80000000 > dummy64 # cat dummy64 0xffffffff80000000 A value larger than INT_MAX cannot be written to the debugfs file created by debugfs_create_u64 or debugfs_create_x64 on 32bit machine. Because simple_attr_write() uses simple_strtol() for the conversion. To fix this, use simple_strtoll() instead. Signed-off-by: Akinobu Mita Cc: Greg Kroah-Hartman Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/libfs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/libfs.c b/fs/libfs.c index c88eab55aec9..275ca4749a2e 100644 --- a/fs/libfs.c +++ b/fs/libfs.c @@ -822,7 +822,7 @@ ssize_t simple_attr_write(struct file *file, const char __user *buf, goto out; attr->set_buf[size] = '\0'; - val = simple_strtol(attr->set_buf, NULL, 0); + val = simple_strtoll(attr->set_buf, NULL, 0); ret = attr->set(attr->data, val); if (ret == 0) ret = len; /* on success, claim we got the whole input */ -- cgit v1.2.3 From 44396f4b5cb8566f7118aec55eeac99be7ad94cb Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Tue, 31 May 2011 11:58:49 -0400 Subject: fs: add a DCACHE_NEED_LOOKUP flag for d_flags Btrfs (and I'd venture most other fs's) stores its indexes in nice disk order for readdir, but unfortunately in the case of anything that stats the files in order that readdir spits back (like oh say ls) that means we still have to do the normal lookup of the file, which means looking up our other index and then looking up the inode. What I want is a way to create dummy dentries when we find them in readdir so that when ls or anything else subsequently does a stat(), we already have the location information in the dentry and can go straight to the inode itself. The lookup stuff just assumes that if it finds a dentry it is done, it doesn't perform a lookup. So add a DCACHE_NEED_LOOKUP flag so that the lookup code knows it still needs to run i_op->lookup() on the parent to get the inode for the dentry. I have tested this with btrfs and I went from something that looks like this http://people.redhat.com/jwhiter/ls-noreada.png To this http://people.redhat.com/jwhiter/ls-good.png Thats a savings of 1300 seconds, or 22 minutes. That is a significant savings. Thanks, Signed-off-by: Josef Bacik Signed-off-by: Al Viro --- fs/dcache.c | 34 ++++++++++++++++++++++++++++++++-- fs/namei.c | 49 +++++++++++++++++++++++++++++++++++++++++++++++++ include/linux/dcache.h | 7 +++++++ 3 files changed, 88 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/dcache.c b/fs/dcache.c index 6e4ea6d87774..d3902139b533 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -343,6 +343,24 @@ void d_drop(struct dentry *dentry) } EXPORT_SYMBOL(d_drop); +/* + * d_clear_need_lookup - drop a dentry from cache and clear the need lookup flag + * @dentry: dentry to drop + * + * This is called when we do a lookup on a placeholder dentry that needed to be + * looked up. The dentry should have been hashed in order for it to be found by + * the lookup code, but now needs to be unhashed while we do the actual lookup + * and clear the DCACHE_NEED_LOOKUP flag. + */ +void d_clear_need_lookup(struct dentry *dentry) +{ + spin_lock(&dentry->d_lock); + __d_drop(dentry); + dentry->d_flags &= ~DCACHE_NEED_LOOKUP; + spin_unlock(&dentry->d_lock); +} +EXPORT_SYMBOL(d_clear_need_lookup); + /* * Finish off a dentry we've decided to kill. * dentry->d_lock must be held, returns with it unlocked. @@ -432,8 +450,13 @@ repeat: if (d_unhashed(dentry)) goto kill_it; - /* Otherwise leave it cached and ensure it's on the LRU */ - dentry->d_flags |= DCACHE_REFERENCED; + /* + * If this dentry needs lookup, don't set the referenced flag so that it + * is more likely to be cleaned up by the dcache shrinker in case of + * memory pressure. + */ + if (!d_need_lookup(dentry)) + dentry->d_flags |= DCACHE_REFERENCED; dentry_lru_add(dentry); dentry->d_count--; @@ -1707,6 +1730,13 @@ struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode, return found; } + /* + * We are going to instantiate this dentry, unhash it and clear the + * lookup flag so we can do that. + */ + if (unlikely(d_need_lookup(found))) + d_clear_need_lookup(found); + /* * Negative dentry: instantiate it unless the inode is a directory and * already has a dentry. diff --git a/fs/namei.c b/fs/namei.c index 14ab8d3f2f0c..5ba42c453e31 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -1133,6 +1133,30 @@ static struct dentry *d_alloc_and_lookup(struct dentry *parent, return dentry; } +/* + * We already have a dentry, but require a lookup to be performed on the parent + * directory to fill in d_inode. Returns the new dentry, or ERR_PTR on error. + * parent->d_inode->i_mutex must be held. d_lookup must have verified that no + * child exists while under i_mutex. + */ +static struct dentry *d_inode_lookup(struct dentry *parent, struct dentry *dentry, + struct nameidata *nd) +{ + struct inode *inode = parent->d_inode; + struct dentry *old; + + /* Don't create child dentry for a dead directory. */ + if (unlikely(IS_DEADDIR(inode))) + return ERR_PTR(-ENOENT); + + old = inode->i_op->lookup(inode, dentry, nd); + if (unlikely(old)) { + dput(dentry); + dentry = old; + } + return dentry; +} + /* * It's more convoluted than I'd like it to be, but... it's still fairly * small and for now I'd prefer to have fast path as straight as possible. @@ -1172,6 +1196,8 @@ static int do_lookup(struct nameidata *nd, struct qstr *name, goto unlazy; } } + if (unlikely(d_need_lookup(dentry))) + goto unlazy; path->mnt = mnt; path->dentry = dentry; if (unlikely(!__follow_mount_rcu(nd, path, inode))) @@ -1186,6 +1212,10 @@ unlazy: dentry = __d_lookup(parent, name); } + if (dentry && unlikely(d_need_lookup(dentry))) { + dput(dentry); + dentry = NULL; + } retry: if (unlikely(!dentry)) { struct inode *dir = parent->d_inode; @@ -1202,6 +1232,15 @@ retry: /* known good */ need_reval = 0; status = 1; + } else if (unlikely(d_need_lookup(dentry))) { + dentry = d_inode_lookup(parent, dentry, nd); + if (IS_ERR(dentry)) { + mutex_unlock(&dir->i_mutex); + return PTR_ERR(dentry); + } + /* known good */ + need_reval = 0; + status = 1; } mutex_unlock(&dir->i_mutex); } @@ -1683,6 +1722,16 @@ static struct dentry *__lookup_hash(struct qstr *name, */ dentry = d_lookup(base, name); + if (dentry && d_need_lookup(dentry)) { + /* + * __lookup_hash is called with the parent dir's i_mutex already + * held, so we are good to go here. + */ + dentry = d_inode_lookup(base, dentry, nd); + if (IS_ERR(dentry)) + return dentry; + } + if (dentry && (dentry->d_flags & DCACHE_OP_REVALIDATE)) dentry = do_revalidate(dentry, nd); diff --git a/include/linux/dcache.h b/include/linux/dcache.h index 19d90a55541d..5fa5bd33b979 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -216,6 +216,7 @@ struct dentry_operations { #define DCACHE_MOUNTED 0x10000 /* is a mountpoint */ #define DCACHE_NEED_AUTOMOUNT 0x20000 /* handle automount on this dir */ #define DCACHE_MANAGE_TRANSIT 0x40000 /* manage transit from this dirent */ +#define DCACHE_NEED_LOOKUP 0x80000 /* dentry requires i_op->lookup */ #define DCACHE_MANAGED_DENTRY \ (DCACHE_MOUNTED|DCACHE_NEED_AUTOMOUNT|DCACHE_MANAGE_TRANSIT) @@ -416,6 +417,12 @@ static inline bool d_mountpoint(struct dentry *dentry) return dentry->d_flags & DCACHE_MOUNTED; } +static inline bool d_need_lookup(struct dentry *dentry) +{ + return dentry->d_flags & DCACHE_NEED_LOOKUP; +} + +extern void d_clear_need_lookup(struct dentry *dentry); extern struct dentry *lookup_create(struct nameidata *nd, int is_dir); extern int sysctl_vfs_cache_pressure; -- cgit v1.2.3 From 43e15cdbefea4ce6d68113de98d4f61c0cf45687 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Fri, 3 Jun 2011 20:16:57 -0400 Subject: new helper: iterate_supers_type() Call the given function for all superblocks of given type. Function gets a superblock (with s_umount locked shared) and (void *) argument supplied by caller of iterator. Signed-off-by: Al Viro --- fs/super.c | 36 ++++++++++++++++++++++++++++++++++++ include/linux/fs.h | 2 ++ 2 files changed, 38 insertions(+) (limited to 'fs') diff --git a/fs/super.c b/fs/super.c index ab3d672db0de..444da9579068 100644 --- a/fs/super.c +++ b/fs/super.c @@ -451,6 +451,42 @@ void iterate_supers(void (*f)(struct super_block *, void *), void *arg) spin_unlock(&sb_lock); } +/** + * iterate_supers_type - call function for superblocks of given type + * @type: fs type + * @f: function to call + * @arg: argument to pass to it + * + * Scans the superblock list and calls given function, passing it + * locked superblock and given argument. + */ +void iterate_supers_type(struct file_system_type *type, + void (*f)(struct super_block *, void *), void *arg) +{ + struct super_block *sb, *p = NULL; + + spin_lock(&sb_lock); + list_for_each_entry(sb, &type->fs_supers, s_instances) { + sb->s_count++; + spin_unlock(&sb_lock); + + down_read(&sb->s_umount); + if (sb->s_root) + f(sb, arg); + up_read(&sb->s_umount); + + spin_lock(&sb_lock); + if (p) + __put_super(p); + p = sb; + } + if (p) + __put_super(p); + spin_unlock(&sb_lock); +} + +EXPORT_SYMBOL(iterate_supers_type); + /** * get_super - get the superblock of a device * @bdev: device to get the superblock for diff --git a/include/linux/fs.h b/include/linux/fs.h index b5b979247863..a8735e7e1b35 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2432,6 +2432,8 @@ extern struct super_block *get_active_super(struct block_device *bdev); extern struct super_block *user_get_super(dev_t); extern void drop_super(struct super_block *sb); extern void iterate_supers(void (*)(struct super_block *, void *), void *); +extern void iterate_supers_type(struct file_system_type *, + void (*)(struct super_block *, void *), void *); extern int dcache_dir_open(struct inode *, struct file *); extern int dcache_dir_close(struct inode *, struct file *); -- cgit v1.2.3 From 4cf27141cbe0239f48ec6f0b37bad347d51d1785 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 19 Jun 2011 01:50:08 -0400 Subject: make exec_permission(dir) really equivalent to inode_permission(dir, MAY_EXEC) capability overrides apply only to the default case; if fs has ->permission() that does _not_ call generic_permission(), we have no business doing them. Moreover, if it has ->permission() that does call generic_permission(), we have no need to recheck capabilities. Besides, the capability overrides should apply only if we got EACCES from acl_permission_check(); any other value (-EIO, etc.) should be returned to caller, capabilities or not capabilities. Signed-off-by: Al Viro --- fs/namei.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) (limited to 'fs') diff --git a/fs/namei.c b/fs/namei.c index 5ba42c453e31..7c8a93042e63 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -584,19 +584,19 @@ static inline int exec_permission(struct inode *inode, unsigned int flags) if (inode->i_op->permission) { ret = inode->i_op->permission(inode, MAY_EXEC, flags); + if (likely(!ret)) + goto ok; } else { ret = acl_permission_check(inode, MAY_EXEC, flags, inode->i_op->check_acl); + if (likely(!ret)) + goto ok; + if (ret != -EACCES) + return ret; + if (ns_capable(ns, CAP_DAC_OVERRIDE) || + ns_capable(ns, CAP_DAC_READ_SEARCH)) + goto ok; } - if (likely(!ret)) - goto ok; - if (ret == -ECHILD) - return ret; - - if (ns_capable(ns, CAP_DAC_OVERRIDE) || - ns_capable(ns, CAP_DAC_READ_SEARCH)) - goto ok; - return ret; ok: return security_inode_exec_permission(inode, flags); -- cgit v1.2.3 From 6f2861097467852f2271c2b40f9c3d1d01757048 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 19 Jun 2011 11:49:08 -0400 Subject: switch udf_ioctl() to inode_permission() Signed-off-by: Al Viro --- fs/udf/file.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/udf/file.c b/fs/udf/file.c index 2a346bb1d9f5..d8ffa7cc661d 100644 --- a/fs/udf/file.c +++ b/fs/udf/file.c @@ -150,7 +150,7 @@ long udf_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) long old_block, new_block; int result = -EINVAL; - if (file_permission(filp, MAY_READ) != 0) { + if (inode_permission(inode, MAY_READ) != 0) { udf_debug("no permission to access inode %lu\n", inode->i_ino); result = -EPERM; goto out; -- cgit v1.2.3 From 78f32a9b479e9b9f1ce2bf620a7602c1cdbc4c8e Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 19 Jun 2011 11:54:42 -0400 Subject: switch path_init() to exec_permission() Signed-off-by: Al Viro --- fs/namei.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/namei.c b/fs/namei.c index 7c8a93042e63..cf2554635a1c 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -1554,7 +1554,7 @@ static int path_init(int dfd, const char *name, unsigned int flags, if (!S_ISDIR(dentry->d_inode->i_mode)) goto fput_fail; - retval = file_permission(file, MAY_EXEC); + retval = exec_permission(dentry->d_inode, 0); if (retval) goto fput_fail; } -- cgit v1.2.3 From 1b5d783c94c328d406e801566f161adcfb018dda Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 19 Jun 2011 12:49:47 -0400 Subject: consolidate BINPRM_FLAGS_ENFORCE_NONDUMP handling new helper: would_dump(bprm, file). Checks if we are allowed to read the file and if we are not - sets ENFORCE_NODUMP. Exported, used in places that previously open-coded the same logics. Signed-off-by: Al Viro --- fs/binfmt_elf.c | 3 +-- fs/binfmt_elf_fdpic.c | 3 +-- fs/binfmt_misc.c | 3 +-- fs/exec.c | 14 +++++++++++--- include/linux/binfmts.h | 1 + 5 files changed, 15 insertions(+), 9 deletions(-) (limited to 'fs') diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 303983fabfd6..dd0fdfc56d38 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -668,8 +668,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) * mm->dumpable = 0 regardless of the interpreter's * permissions. */ - if (file_permission(interpreter, MAY_READ) < 0) - bprm->interp_flags |= BINPRM_FLAGS_ENFORCE_NONDUMP; + would_dump(bprm, interpreter); retval = kernel_read(interpreter, 0, bprm->buf, BINPRM_BUF_SIZE); diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c index 2bc5dc644b4c..30745f459faf 100644 --- a/fs/binfmt_elf_fdpic.c +++ b/fs/binfmt_elf_fdpic.c @@ -245,8 +245,7 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm, * mm->dumpable = 0 regardless of the interpreter's * permissions. */ - if (file_permission(interpreter, MAY_READ) < 0) - bprm->interp_flags |= BINPRM_FLAGS_ENFORCE_NONDUMP; + would_dump(bprm, interpreter); retval = kernel_read(interpreter, 0, bprm->buf, BINPRM_BUF_SIZE); diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c index 1befe2ec8186..ba1a1ae4a18a 100644 --- a/fs/binfmt_misc.c +++ b/fs/binfmt_misc.c @@ -149,8 +149,7 @@ static int load_misc_binary(struct linux_binprm *bprm, struct pt_regs *regs) /* if the binary is not readable than enforce mm->dumpable=0 regardless of the interpreter's permissions */ - if (file_permission(bprm->file, MAY_READ)) - bprm->interp_flags |= BINPRM_FLAGS_ENFORCE_NONDUMP; + would_dump(bprm, bprm->file); allow_write_access(bprm->file); bprm->file = NULL; diff --git a/fs/exec.c b/fs/exec.c index 6075a1e727ae..f9f12ad299af 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -1105,6 +1105,13 @@ out: } EXPORT_SYMBOL(flush_old_exec); +void would_dump(struct linux_binprm *bprm, struct file *file) +{ + if (inode_permission(file->f_path.dentry->d_inode, MAY_READ) < 0) + bprm->interp_flags |= BINPRM_FLAGS_ENFORCE_NONDUMP; +} +EXPORT_SYMBOL(would_dump); + void setup_new_exec(struct linux_binprm * bprm) { int i, ch; @@ -1144,9 +1151,10 @@ void setup_new_exec(struct linux_binprm * bprm) if (bprm->cred->uid != current_euid() || bprm->cred->gid != current_egid()) { current->pdeath_signal = 0; - } else if (file_permission(bprm->file, MAY_READ) || - bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP) { - set_dumpable(current->mm, suid_dumpable); + } else { + would_dump(bprm, bprm->file); + if (bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP) + set_dumpable(current->mm, suid_dumpable); } /* diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h index 8845613fd7e3..fd88a3945aa1 100644 --- a/include/linux/binfmts.h +++ b/include/linux/binfmts.h @@ -111,6 +111,7 @@ extern int __must_check remove_arg_zero(struct linux_binprm *); extern int search_binary_handler(struct linux_binprm *, struct pt_regs *); extern int flush_old_exec(struct linux_binprm * bprm); extern void setup_new_exec(struct linux_binprm * bprm); +extern void would_dump(struct linux_binprm *, struct file *); extern int suid_dumpable; #define SUID_DUMP_DISABLE 0 /* No setuid dumping */ -- cgit v1.2.3 From 3bfa784a6539f91a27d7ffdd408efdb638e3bebd Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 19 Jun 2011 12:55:10 -0400 Subject: kill file_permission() completely convert the last remaining caller to inode_permission() Signed-off-by: Al Viro --- fs/namei.c | 18 ------------------ include/linux/fs.h | 1 - kernel/cgroup.c | 3 ++- 3 files changed, 2 insertions(+), 20 deletions(-) (limited to 'fs') diff --git a/fs/namei.c b/fs/namei.c index cf2554635a1c..4ad2b781a65c 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -303,23 +303,6 @@ int inode_permission(struct inode *inode, int mask) return security_inode_permission(inode, mask); } -/** - * file_permission - check for additional access rights to a given file - * @file: file to check access rights for - * @mask: right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC) - * - * Used to check for read/write/execute permissions on an already opened - * file. - * - * Note: - * Do not use this function in new code. All access checks should - * be done using inode_permission(). - */ -int file_permission(struct file *file, int mask) -{ - return inode_permission(file->f_path.dentry->d_inode, mask); -} - /* * get_write_access() gets write permission for a file. * put_write_access() releases this write permission. @@ -3405,7 +3388,6 @@ EXPORT_SYMBOL(kern_path_parent); EXPORT_SYMBOL(kern_path); EXPORT_SYMBOL(vfs_path_lookup); EXPORT_SYMBOL(inode_permission); -EXPORT_SYMBOL(file_permission); EXPORT_SYMBOL(unlock_rename); EXPORT_SYMBOL(vfs_create); EXPORT_SYMBOL(vfs_follow_link); diff --git a/include/linux/fs.h b/include/linux/fs.h index a8735e7e1b35..d04e55586a17 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1490,7 +1490,6 @@ extern void dentry_unhash(struct dentry *dentry); /* * VFS file helper functions. */ -extern int file_permission(struct file *, int); extern void inode_init_owner(struct inode *inode, const struct inode *dir, mode_t mode); /* diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 2731d115d725..e1c72c0f512b 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -3542,7 +3542,8 @@ static int cgroup_write_event_control(struct cgroup *cgrp, struct cftype *cft, } /* the process need read permission on control file */ - ret = file_permission(cfile, MAY_READ); + /* AV: shouldn't we check that it's been opened for read instead? */ + ret = inode_permission(cfile->f_path.dentry->d_inode, MAY_READ); if (ret < 0) goto fail; -- cgit v1.2.3 From f4d6ff89d8e54b68a4322388d26d518d6133fa4e Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 19 Jun 2011 13:14:21 -0400 Subject: move exec_permission() up to the rest of permission-related functions ... and convert the comment before it into linuxdoc form. Signed-off-by: Al Viro --- fs/namei.c | 72 +++++++++++++++++++++++++++++++++----------------------------- 1 file changed, 38 insertions(+), 34 deletions(-) (limited to 'fs') diff --git a/fs/namei.c b/fs/namei.c index 4ad2b781a65c..e04f15ae4b07 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -303,6 +303,44 @@ int inode_permission(struct inode *inode, int mask) return security_inode_permission(inode, mask); } +/** + * exec_permission - check for right to do lookups in a given directory + * @inode: inode to check permission on + * @flags: IPERM_FLAG_ flags. + * + * Short-cut version of inode_permission(), for calling on directories + * during pathname resolution. Combines parts of inode_permission() + * and generic_permission(), and tests ONLY for MAY_EXEC permission. + * + * If appropriate, check DAC only. If not appropriate, or + * short-cut DAC fails, then call ->permission() to do more + * complete permission check. + */ +static inline int exec_permission(struct inode *inode, unsigned int flags) +{ + int ret; + struct user_namespace *ns = inode_userns(inode); + + if (inode->i_op->permission) { + ret = inode->i_op->permission(inode, MAY_EXEC, flags); + if (likely(!ret)) + goto ok; + } else { + ret = acl_permission_check(inode, MAY_EXEC, flags, + inode->i_op->check_acl); + if (likely(!ret)) + goto ok; + if (ret != -EACCES) + return ret; + if (ns_capable(ns, CAP_DAC_OVERRIDE) || + ns_capable(ns, CAP_DAC_READ_SEARCH)) + goto ok; + } + return ret; +ok: + return security_inode_exec_permission(inode, flags); +} + /* * get_write_access() gets write permission for a file. * put_write_access() releases this write permission. @@ -551,40 +589,6 @@ static int complete_walk(struct nameidata *nd) return status; } -/* - * Short-cut version of permission(), for calling on directories - * during pathname resolution. Combines parts of permission() - * and generic_permission(), and tests ONLY for MAY_EXEC permission. - * - * If appropriate, check DAC only. If not appropriate, or - * short-cut DAC fails, then call ->permission() to do more - * complete permission check. - */ -static inline int exec_permission(struct inode *inode, unsigned int flags) -{ - int ret; - struct user_namespace *ns = inode_userns(inode); - - if (inode->i_op->permission) { - ret = inode->i_op->permission(inode, MAY_EXEC, flags); - if (likely(!ret)) - goto ok; - } else { - ret = acl_permission_check(inode, MAY_EXEC, flags, - inode->i_op->check_acl); - if (likely(!ret)) - goto ok; - if (ret != -EACCES) - return ret; - if (ns_capable(ns, CAP_DAC_OVERRIDE) || - ns_capable(ns, CAP_DAC_READ_SEARCH)) - goto ok; - } - return ret; -ok: - return security_inode_exec_permission(inode, flags); -} - static __always_inline void set_root(struct nameidata *nd) { if (!nd->root.mnt) -- cgit v1.2.3 From 07b8ce1ee87d291ff564c02cf878fae973317a52 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Mon, 20 Jun 2011 10:52:57 -0400 Subject: lockless get_write_access/deny_write_access new helpers: atomic_inc_unless_negative()/atomic_dec_unless_positive() Signed-off-by: Al Viro --- fs/namei.c | 46 ---------------------------------------------- include/linux/atomic.h | 26 ++++++++++++++++++++++++++ include/linux/fs.h | 29 ++++++++++++++++++++++++++--- 3 files changed, 52 insertions(+), 49 deletions(-) (limited to 'fs') diff --git a/fs/namei.c b/fs/namei.c index e04f15ae4b07..d286cbc3f3e5 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -341,52 +341,6 @@ ok: return security_inode_exec_permission(inode, flags); } -/* - * get_write_access() gets write permission for a file. - * put_write_access() releases this write permission. - * This is used for regular files. - * We cannot support write (and maybe mmap read-write shared) accesses and - * MAP_DENYWRITE mmappings simultaneously. The i_writecount field of an inode - * can have the following values: - * 0: no writers, no VM_DENYWRITE mappings - * < 0: (-i_writecount) vm_area_structs with VM_DENYWRITE set exist - * > 0: (i_writecount) users are writing to the file. - * - * Normally we operate on that counter with atomic_{inc,dec} and it's safe - * except for the cases where we don't hold i_writecount yet. Then we need to - * use {get,deny}_write_access() - these functions check the sign and refuse - * to do the change if sign is wrong. Exclusion between them is provided by - * the inode->i_lock spinlock. - */ - -int get_write_access(struct inode * inode) -{ - spin_lock(&inode->i_lock); - if (atomic_read(&inode->i_writecount) < 0) { - spin_unlock(&inode->i_lock); - return -ETXTBSY; - } - atomic_inc(&inode->i_writecount); - spin_unlock(&inode->i_lock); - - return 0; -} - -int deny_write_access(struct file * file) -{ - struct inode *inode = file->f_path.dentry->d_inode; - - spin_lock(&inode->i_lock); - if (atomic_read(&inode->i_writecount) > 0) { - spin_unlock(&inode->i_lock); - return -ETXTBSY; - } - atomic_dec(&inode->i_writecount); - spin_unlock(&inode->i_lock); - - return 0; -} - /** * path_get - get a reference to a path * @path: path to get the reference to diff --git a/include/linux/atomic.h b/include/linux/atomic.h index ee456c79b0e6..bc6615d4132b 100644 --- a/include/linux/atomic.h +++ b/include/linux/atomic.h @@ -34,6 +34,32 @@ static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint) } #endif +#ifndef atomic_inc_unless_negative +static inline int atomic_inc_unless_negative(atomic_t *p) +{ + int v, v1; + for (v = 0; v >= 0; v = v1) { + v1 = atomic_cmpxchg(p, v, v + 1); + if (likely(v1 == v)) + return 1; + } + return 0; +} +#endif + +#ifndef atomic_dec_unless_positive +static inline int atomic_dec_unless_positive(atomic_t *p) +{ + int v, v1; + for (v = 0; v <= 0; v = v1) { + v1 = atomic_cmpxchg(p, v, v - 1); + if (likely(v1 == v)) + return 1; + } + return 0; +} +#endif + #ifndef CONFIG_ARCH_HAS_ATOMIC_OR static inline void atomic_or(int i, atomic_t *v) { diff --git a/include/linux/fs.h b/include/linux/fs.h index d04e55586a17..8c84ed930389 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -392,8 +392,8 @@ struct inodes_stat_t { #include #include #include +#include -#include #include struct export_operations; @@ -2195,8 +2195,31 @@ static inline bool execute_ok(struct inode *inode) return (inode->i_mode & S_IXUGO) || S_ISDIR(inode->i_mode); } -extern int get_write_access(struct inode *); -extern int deny_write_access(struct file *); +/* + * get_write_access() gets write permission for a file. + * put_write_access() releases this write permission. + * This is used for regular files. + * We cannot support write (and maybe mmap read-write shared) accesses and + * MAP_DENYWRITE mmappings simultaneously. The i_writecount field of an inode + * can have the following values: + * 0: no writers, no VM_DENYWRITE mappings + * < 0: (-i_writecount) vm_area_structs with VM_DENYWRITE set exist + * > 0: (i_writecount) users are writing to the file. + * + * Normally we operate on that counter with atomic_{inc,dec} and it's safe + * except for the cases where we don't hold i_writecount yet. Then we need to + * use {get,deny}_write_access() - these functions check the sign and refuse + * to do the change if sign is wrong. + */ +static inline int get_write_access(struct inode *inode) +{ + return atomic_inc_unless_negative(&inode->i_writecount) ? 0 : -ETXTBSY; +} +static inline int deny_write_access(struct file *file) +{ + struct inode *inode = file->f_path.dentry->d_inode; + return atomic_dec_unless_positive(&inode->i_writecount) ? 0 : -ETXTBSY; +} static inline void put_write_access(struct inode * inode) { atomic_dec(&inode->i_writecount); -- cgit v1.2.3 From 178ea73521d64ba41d7aa5488fb9f549c6d4507d Mon Sep 17 00:00:00 2001 From: Al Viro Date: Mon, 20 Jun 2011 11:31:30 -0400 Subject: kill check_acl callback of generic_permission() its value depends only on inode and does not change; we might as well store it in ->i_op->check_acl and be done with that. Signed-off-by: Al Viro --- fs/afs/security.c | 2 +- fs/btrfs/inode.c | 7 ++++++- fs/ceph/inode.c | 2 +- fs/cifs/cifsfs.c | 2 +- fs/fuse/dir.c | 4 ++-- fs/gfs2/inode.c | 5 ++++- fs/hostfs/hostfs_kern.c | 2 +- fs/hpfs/namei.c | 2 +- fs/namei.c | 17 +++++++---------- fs/nfs/dir.c | 2 +- fs/nilfs2/inode.c | 2 +- fs/ocfs2/file.c | 4 +++- fs/ocfs2/namei.c | 1 + fs/proc/base.c | 2 +- fs/reiserfs/file.c | 1 + fs/reiserfs/namei.c | 4 +++- fs/reiserfs/xattr.c | 18 ++++++++---------- fs/sysfs/inode.c | 2 +- include/linux/fs.h | 3 +-- include/linux/reiserfs_xattr.h | 2 ++ 20 files changed, 47 insertions(+), 37 deletions(-) (limited to 'fs') diff --git a/fs/afs/security.c b/fs/afs/security.c index f44b9d355377..745ee654165f 100644 --- a/fs/afs/security.c +++ b/fs/afs/security.c @@ -350,7 +350,7 @@ int afs_permission(struct inode *inode, int mask, unsigned int flags) } key_put(key); - ret = generic_permission(inode, mask, flags, NULL); + ret = generic_permission(inode, mask, flags); _leave(" = %d", ret); return ret; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 3601f0aebddf..f0bd87371566 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -7339,7 +7339,7 @@ static int btrfs_permission(struct inode *inode, int mask, unsigned int flags) return -EROFS; if ((BTRFS_I(inode)->flags & BTRFS_INODE_READONLY) && (mask & MAY_WRITE)) return -EACCES; - return generic_permission(inode, mask, flags, btrfs_check_acl); + return generic_permission(inode, mask, flags); } static const struct inode_operations btrfs_dir_inode_operations = { @@ -7359,10 +7359,12 @@ static const struct inode_operations btrfs_dir_inode_operations = { .listxattr = btrfs_listxattr, .removexattr = btrfs_removexattr, .permission = btrfs_permission, + .check_acl = btrfs_check_acl, }; static const struct inode_operations btrfs_dir_ro_inode_operations = { .lookup = btrfs_lookup, .permission = btrfs_permission, + .check_acl = btrfs_check_acl, }; static const struct file_operations btrfs_dir_file_operations = { @@ -7431,6 +7433,7 @@ static const struct inode_operations btrfs_file_inode_operations = { .removexattr = btrfs_removexattr, .permission = btrfs_permission, .fiemap = btrfs_fiemap, + .check_acl = btrfs_check_acl, }; static const struct inode_operations btrfs_special_inode_operations = { .getattr = btrfs_getattr, @@ -7440,6 +7443,7 @@ static const struct inode_operations btrfs_special_inode_operations = { .getxattr = btrfs_getxattr, .listxattr = btrfs_listxattr, .removexattr = btrfs_removexattr, + .check_acl = btrfs_check_acl, }; static const struct inode_operations btrfs_symlink_inode_operations = { .readlink = generic_readlink, @@ -7451,6 +7455,7 @@ static const struct inode_operations btrfs_symlink_inode_operations = { .getxattr = btrfs_getxattr, .listxattr = btrfs_listxattr, .removexattr = btrfs_removexattr, + .check_acl = btrfs_check_acl, }; const struct dentry_operations btrfs_dentry_operations = { diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index d8858e96ab18..beb5d55d6fd2 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c @@ -1805,7 +1805,7 @@ int ceph_permission(struct inode *inode, int mask, unsigned int flags) err = ceph_do_getattr(inode, CEPH_CAP_AUTH_SHARED); if (!err) - err = generic_permission(inode, mask, flags, NULL); + err = generic_permission(inode, mask, flags); return err; } diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index bc4b12ca537b..b79804fa410f 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -239,7 +239,7 @@ static int cifs_permission(struct inode *inode, int mask, unsigned int flags) on the client (above and beyond ACL on servers) for servers which do not support setting and viewing mode bits, so allowing client to check permissions is useful */ - return generic_permission(inode, mask, flags, NULL); + return generic_permission(inode, mask, flags); } static struct kmem_cache *cifs_inode_cachep; diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index d50160714595..0a2fcd860ad6 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -1018,7 +1018,7 @@ static int fuse_permission(struct inode *inode, int mask, unsigned int flags) } if (fc->flags & FUSE_DEFAULT_PERMISSIONS) { - err = generic_permission(inode, mask, flags, NULL); + err = generic_permission(inode, mask, flags); /* If permission is denied, try to refresh file attributes. This is also needed, because the root @@ -1027,7 +1027,7 @@ static int fuse_permission(struct inode *inode, int mask, unsigned int flags) err = fuse_perm_getattr(inode, flags); if (!err) err = generic_permission(inode, mask, - flags, NULL); + flags); } /* Note: the opposite of the above test does not diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c index 03e0c529063e..d5f0f4ea25dc 100644 --- a/fs/gfs2/inode.c +++ b/fs/gfs2/inode.c @@ -1564,7 +1564,7 @@ int gfs2_permission(struct inode *inode, int mask, unsigned int flags) if ((mask & MAY_WRITE) && IS_IMMUTABLE(inode)) error = -EACCES; else - error = generic_permission(inode, mask, flags, gfs2_check_acl); + error = generic_permission(inode, mask, flags); if (unlock) gfs2_glock_dq_uninit(&i_gh); @@ -1854,6 +1854,7 @@ const struct inode_operations gfs2_file_iops = { .listxattr = gfs2_listxattr, .removexattr = gfs2_removexattr, .fiemap = gfs2_fiemap, + .check_acl = gfs2_check_acl, }; const struct inode_operations gfs2_dir_iops = { @@ -1874,6 +1875,7 @@ const struct inode_operations gfs2_dir_iops = { .listxattr = gfs2_listxattr, .removexattr = gfs2_removexattr, .fiemap = gfs2_fiemap, + .check_acl = gfs2_check_acl, }; const struct inode_operations gfs2_symlink_iops = { @@ -1888,5 +1890,6 @@ const struct inode_operations gfs2_symlink_iops = { .listxattr = gfs2_listxattr, .removexattr = gfs2_removexattr, .fiemap = gfs2_fiemap, + .check_acl = gfs2_check_acl, }; diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c index 2638c834ed28..a98d0d1aef65 100644 --- a/fs/hostfs/hostfs_kern.c +++ b/fs/hostfs/hostfs_kern.c @@ -770,7 +770,7 @@ int hostfs_permission(struct inode *ino, int desired, unsigned int flags) err = access_file(name, r, w, x); __putname(name); if (!err) - err = generic_permission(ino, desired, flags, NULL); + err = generic_permission(ino, desired, flags); return err; } diff --git a/fs/hpfs/namei.c b/fs/hpfs/namei.c index acf95dab2aac..bd2ce7dd8df3 100644 --- a/fs/hpfs/namei.c +++ b/fs/hpfs/namei.c @@ -398,7 +398,7 @@ again: hpfs_unlock(dir->i_sb); return -ENOSPC; } - if (generic_permission(inode, MAY_WRITE, 0, NULL) || + if (generic_permission(inode, MAY_WRITE, 0) || !S_ISREG(inode->i_mode) || get_write_access(inode)) { d_rehash(dentry); diff --git a/fs/namei.c b/fs/namei.c index d286cbc3f3e5..c5af0f37e67d 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -176,9 +176,9 @@ EXPORT_SYMBOL(putname); /* * This does basic POSIX ACL permission checking */ -static int acl_permission_check(struct inode *inode, int mask, unsigned int flags, - int (*check_acl)(struct inode *inode, int mask, unsigned int flags)) +static int acl_permission_check(struct inode *inode, int mask, unsigned int flags) { + int (*check_acl)(struct inode *inode, int mask, unsigned int flags); unsigned int mode = inode->i_mode; mask &= MAY_READ | MAY_WRITE | MAY_EXEC; @@ -189,6 +189,7 @@ static int acl_permission_check(struct inode *inode, int mask, unsigned int flag if (current_fsuid() == inode->i_uid) mode >>= 6; else { + check_acl = inode->i_op->check_acl; if (IS_POSIXACL(inode) && (mode & S_IRWXG) && check_acl) { int error = check_acl(inode, mask, flags); if (error != -EAGAIN) @@ -212,7 +213,6 @@ other_perms: * generic_permission - check for access rights on a Posix-like filesystem * @inode: inode to check access rights for * @mask: right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC) - * @check_acl: optional callback to check for Posix ACLs * @flags: IPERM_FLAG_ flags. * * Used to check for read/write/execute permissions on a file. @@ -224,15 +224,14 @@ other_perms: * request cannot be satisfied (eg. requires blocking or too much complexity). * It would then be called again in ref-walk mode. */ -int generic_permission(struct inode *inode, int mask, unsigned int flags, - int (*check_acl)(struct inode *inode, int mask, unsigned int flags)) +int generic_permission(struct inode *inode, int mask, unsigned int flags) { int ret; /* * Do the basic POSIX ACL permission checks. */ - ret = acl_permission_check(inode, mask, flags, check_acl); + ret = acl_permission_check(inode, mask, flags); if (ret != -EACCES) return ret; @@ -290,8 +289,7 @@ int inode_permission(struct inode *inode, int mask) if (inode->i_op->permission) retval = inode->i_op->permission(inode, mask, 0); else - retval = generic_permission(inode, mask, 0, - inode->i_op->check_acl); + retval = generic_permission(inode, mask, 0); if (retval) return retval; @@ -326,8 +324,7 @@ static inline int exec_permission(struct inode *inode, unsigned int flags) if (likely(!ret)) goto ok; } else { - ret = acl_permission_check(inode, MAY_EXEC, flags, - inode->i_op->check_acl); + ret = acl_permission_check(inode, MAY_EXEC, flags); if (likely(!ret)) goto ok; if (ret != -EACCES) diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index ededdbd0db38..0485dca34fb1 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -2328,7 +2328,7 @@ out: out_notsup: res = nfs_revalidate_inode(NFS_SERVER(inode), inode); if (res == 0) - res = generic_permission(inode, mask, flags, NULL); + res = generic_permission(inode, mask, flags); goto out; } diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c index b9b45fc2903e..650aa7755003 100644 --- a/fs/nilfs2/inode.c +++ b/fs/nilfs2/inode.c @@ -806,7 +806,7 @@ int nilfs_permission(struct inode *inode, int mask, unsigned int flags) root->cno != NILFS_CPTREE_CURRENT_CNO) return -EROFS; /* snapshot is not writable */ - return generic_permission(inode, mask, flags, NULL); + return generic_permission(inode, mask, flags); } int nilfs_load_inode_block(struct inode *inode, struct buffer_head **pbh) diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index b1e35a392ca5..d058cb7e12d4 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c @@ -1293,7 +1293,7 @@ int ocfs2_permission(struct inode *inode, int mask, unsigned int flags) goto out; } - ret = generic_permission(inode, mask, flags, ocfs2_check_acl); + ret = generic_permission(inode, mask, flags); ocfs2_inode_unlock(inode, 0); out: @@ -2593,12 +2593,14 @@ const struct inode_operations ocfs2_file_iops = { .listxattr = ocfs2_listxattr, .removexattr = generic_removexattr, .fiemap = ocfs2_fiemap, + .check_acl = ocfs2_check_acl, }; const struct inode_operations ocfs2_special_file_iops = { .setattr = ocfs2_setattr, .getattr = ocfs2_getattr, .permission = ocfs2_permission, + .check_acl = ocfs2_check_acl, }; /* diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c index e5d738cd9cc0..33889dc52dd7 100644 --- a/fs/ocfs2/namei.c +++ b/fs/ocfs2/namei.c @@ -2498,4 +2498,5 @@ const struct inode_operations ocfs2_dir_iops = { .listxattr = ocfs2_listxattr, .removexattr = generic_removexattr, .fiemap = ocfs2_fiemap, + .check_acl = ocfs2_check_acl, }; diff --git a/fs/proc/base.c b/fs/proc/base.c index fc5bc2767692..8b8470113576 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -2169,7 +2169,7 @@ static const struct file_operations proc_fd_operations = { */ static int proc_fd_permission(struct inode *inode, int mask, unsigned int flags) { - int rv = generic_permission(inode, mask, flags, NULL); + int rv = generic_permission(inode, mask, flags); if (rv == 0) return 0; if (task_pid(current) == proc_pid(inode)) diff --git a/fs/reiserfs/file.c b/fs/reiserfs/file.c index 91f080cc76c8..bbf31003d308 100644 --- a/fs/reiserfs/file.c +++ b/fs/reiserfs/file.c @@ -312,4 +312,5 @@ const struct inode_operations reiserfs_file_inode_operations = { .listxattr = reiserfs_listxattr, .removexattr = reiserfs_removexattr, .permission = reiserfs_permission, + .check_acl = reiserfs_check_acl, }; diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c index 118662690cdf..551f1b79dbc4 100644 --- a/fs/reiserfs/namei.c +++ b/fs/reiserfs/namei.c @@ -1529,6 +1529,7 @@ const struct inode_operations reiserfs_dir_inode_operations = { .listxattr = reiserfs_listxattr, .removexattr = reiserfs_removexattr, .permission = reiserfs_permission, + .check_acl = reiserfs_check_acl, }; /* @@ -1545,6 +1546,7 @@ const struct inode_operations reiserfs_symlink_inode_operations = { .listxattr = reiserfs_listxattr, .removexattr = reiserfs_removexattr, .permission = reiserfs_permission, + .check_acl = reiserfs_check_acl, }; @@ -1558,5 +1560,5 @@ const struct inode_operations reiserfs_special_inode_operations = { .listxattr = reiserfs_listxattr, .removexattr = reiserfs_removexattr, .permission = reiserfs_permission, - + .check_acl = reiserfs_check_acl, }; diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c index d78089690965..ddc5301d2986 100644 --- a/fs/reiserfs/xattr.c +++ b/fs/reiserfs/xattr.c @@ -868,11 +868,17 @@ out: return err; } -static int reiserfs_check_acl(struct inode *inode, int mask, unsigned int flags) +int reiserfs_check_acl(struct inode *inode, int mask, unsigned int flags) { struct posix_acl *acl; int error = -EAGAIN; /* do regular unix permission checks by default */ + /* + * Stat data v1 doesn't support ACLs. + */ + if (get_inode_sd_version(inode) == STAT_DATA_V1) + return -EAGAIN; + if (flags & IPERM_FLAG_RCU) return -ECHILD; @@ -961,15 +967,7 @@ int reiserfs_permission(struct inode *inode, int mask, unsigned int flags) if (IS_PRIVATE(inode)) return 0; -#ifdef CONFIG_REISERFS_FS_XATTR - /* - * Stat data v1 doesn't support ACLs. - */ - if (get_inode_sd_version(inode) != STAT_DATA_V1) - return generic_permission(inode, mask, flags, - reiserfs_check_acl); -#endif - return generic_permission(inode, mask, flags, NULL); + return generic_permission(inode, mask, flags); } static int xattr_hide_revalidate(struct dentry *dentry, struct nameidata *nd) diff --git a/fs/sysfs/inode.c b/fs/sysfs/inode.c index 0a12eb89cd32..a37165c64757 100644 --- a/fs/sysfs/inode.c +++ b/fs/sysfs/inode.c @@ -362,5 +362,5 @@ int sysfs_permission(struct inode *inode, int mask, unsigned int flags) sysfs_refresh_inode(sd, inode); mutex_unlock(&sysfs_mutex); - return generic_permission(inode, mask, flags, NULL); + return generic_permission(inode, mask, flags); } diff --git a/include/linux/fs.h b/include/linux/fs.h index 8c84ed930389..0c15d5e459d5 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2187,8 +2187,7 @@ extern sector_t bmap(struct inode *, sector_t); #endif extern int notify_change(struct dentry *, struct iattr *); extern int inode_permission(struct inode *, int); -extern int generic_permission(struct inode *, int, unsigned int, - int (*check_acl)(struct inode *, int, unsigned int)); +extern int generic_permission(struct inode *, int, unsigned int); static inline bool execute_ok(struct inode *inode) { diff --git a/include/linux/reiserfs_xattr.h b/include/linux/reiserfs_xattr.h index 6deef5dc95fb..1a3ca8f80200 100644 --- a/include/linux/reiserfs_xattr.h +++ b/include/linux/reiserfs_xattr.h @@ -45,6 +45,7 @@ int reiserfs_permission(struct inode *inode, int mask, unsigned int flags); #ifdef CONFIG_REISERFS_FS_XATTR #define has_xattr_dir(inode) (REISERFS_I(inode)->i_flags & i_has_xattr_dir) +int reiserfs_check_acl(struct inode *inode, int mask, unsigned int flags); ssize_t reiserfs_getxattr(struct dentry *dentry, const char *name, void *buffer, size_t size); int reiserfs_setxattr(struct dentry *dentry, const char *name, @@ -122,6 +123,7 @@ static inline void reiserfs_init_xattr_rwsem(struct inode *inode) #define reiserfs_setxattr NULL #define reiserfs_listxattr NULL #define reiserfs_removexattr NULL +#define reiserfs_check_acl NULL static inline void reiserfs_init_xattr_rwsem(struct inode *inode) { -- cgit v1.2.3 From 1fc0f78ca9f311c6277e2f1b7655bb4d43ceb311 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Mon, 20 Jun 2011 18:59:02 -0400 Subject: ->permission() sanitizing: MAY_NOT_BLOCK Duplicate the flags argument into mask bitmap. Signed-off-by: Al Viro --- fs/namei.c | 7 +++++-- fs/proc/proc_sysctl.c | 2 +- include/linux/fs.h | 1 + 3 files changed, 7 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/namei.c b/fs/namei.c index c5af0f37e67d..723a3fe4bc40 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -318,13 +318,16 @@ static inline int exec_permission(struct inode *inode, unsigned int flags) { int ret; struct user_namespace *ns = inode_userns(inode); + int mask = MAY_EXEC; + if (flags & IPERM_FLAG_RCU) + mask |= MAY_NOT_BLOCK; if (inode->i_op->permission) { - ret = inode->i_op->permission(inode, MAY_EXEC, flags); + ret = inode->i_op->permission(inode, mask, flags); if (likely(!ret)) goto ok; } else { - ret = acl_permission_check(inode, MAY_EXEC, flags); + ret = acl_permission_check(inode, mask, flags); if (likely(!ret)) goto ok; if (ret != -EACCES) diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c index d167de365a8d..349b22f434d8 100644 --- a/fs/proc/proc_sysctl.c +++ b/fs/proc/proc_sysctl.c @@ -316,7 +316,7 @@ static int proc_sys_permission(struct inode *inode, int mask,unsigned int flags) if (!table) /* global root - r-xr-xr-x */ error = mask & MAY_WRITE ? -EACCES : 0; else /* Use the permissions on the sysctl table entry */ - error = sysctl_perm(head->root, table, mask); + error = sysctl_perm(head->root, table, mask & ~MAY_NOT_BLOCK); sysctl_head_finish(head); return error; diff --git a/include/linux/fs.h b/include/linux/fs.h index 0c15d5e459d5..60c1fe65bb2d 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -63,6 +63,7 @@ struct inodes_stat_t { #define MAY_ACCESS 16 #define MAY_OPEN 32 #define MAY_CHDIR 64 +#define MAY_NOT_BLOCK 128 /* called from RCU mode, don't block */ /* * flags in file.f_mode. Note that FMODE_READ and FMODE_WRITE must correspond -- cgit v1.2.3 From 9c2c703929e4c41210cfa6e3f599514421bab8dc Mon Sep 17 00:00:00 2001 From: Al Viro Date: Mon, 20 Jun 2011 19:06:22 -0400 Subject: ->permission() sanitizing: pass MAY_NOT_BLOCK to ->check_acl() Signed-off-by: Al Viro --- fs/9p/acl.c | 2 +- fs/btrfs/acl.c | 3 +-- fs/ext2/acl.c | 2 +- fs/ext3/acl.c | 2 +- fs/ext4/acl.c | 2 +- fs/generic_acl.c | 2 +- fs/gfs2/acl.c | 2 +- fs/jffs2/acl.c | 2 +- fs/jfs/acl.c | 2 +- fs/namei.c | 4 ++-- fs/ocfs2/acl.c | 2 +- fs/reiserfs/xattr.c | 2 +- fs/xfs/linux-2.6/xfs_acl.c | 2 +- 13 files changed, 14 insertions(+), 15 deletions(-) (limited to 'fs') diff --git a/fs/9p/acl.c b/fs/9p/acl.c index 535ab6eccb1a..94af68b092af 100644 --- a/fs/9p/acl.c +++ b/fs/9p/acl.c @@ -101,7 +101,7 @@ int v9fs_check_acl(struct inode *inode, int mask, unsigned int flags) struct posix_acl *acl; struct v9fs_session_info *v9ses; - if (flags & IPERM_FLAG_RCU) + if (mask & MAY_NOT_BLOCK) return -ECHILD; v9ses = v9fs_inode2v9ses(inode); diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c index f66fc9959733..a25a4a2e0df2 100644 --- a/fs/btrfs/acl.c +++ b/fs/btrfs/acl.c @@ -199,10 +199,9 @@ int btrfs_check_acl(struct inode *inode, int mask, unsigned int flags) { int error = -EAGAIN; - if (flags & IPERM_FLAG_RCU) { + if (mask & MAY_NOT_BLOCK) { if (!negative_cached_acl(inode, ACL_TYPE_ACCESS)) error = -ECHILD; - } else { struct posix_acl *acl; acl = btrfs_get_acl(inode, ACL_TYPE_ACCESS); diff --git a/fs/ext2/acl.c b/fs/ext2/acl.c index abea5a17c764..6b9442d1be52 100644 --- a/fs/ext2/acl.c +++ b/fs/ext2/acl.c @@ -236,7 +236,7 @@ ext2_check_acl(struct inode *inode, int mask, unsigned int flags) { struct posix_acl *acl; - if (flags & IPERM_FLAG_RCU) { + if (mask & MAY_NOT_BLOCK) { if (!negative_cached_acl(inode, ACL_TYPE_ACCESS)) return -ECHILD; return -EAGAIN; diff --git a/fs/ext3/acl.c b/fs/ext3/acl.c index 9d021c0d472a..0a6940d6c30c 100644 --- a/fs/ext3/acl.c +++ b/fs/ext3/acl.c @@ -244,7 +244,7 @@ ext3_check_acl(struct inode *inode, int mask, unsigned int flags) { struct posix_acl *acl; - if (flags & IPERM_FLAG_RCU) { + if (mask & MAY_NOT_BLOCK) { if (!negative_cached_acl(inode, ACL_TYPE_ACCESS)) return -ECHILD; return -EAGAIN; diff --git a/fs/ext4/acl.c b/fs/ext4/acl.c index 21eacd7b7d79..4f54252e439e 100644 --- a/fs/ext4/acl.c +++ b/fs/ext4/acl.c @@ -242,7 +242,7 @@ ext4_check_acl(struct inode *inode, int mask, unsigned int flags) { struct posix_acl *acl; - if (flags & IPERM_FLAG_RCU) { + if (mask & MAY_NOT_BLOCK) { if (!negative_cached_acl(inode, ACL_TYPE_ACCESS)) return -ECHILD; return -EAGAIN; diff --git a/fs/generic_acl.c b/fs/generic_acl.c index 8f26d1a58912..5976bb1fa4ca 100644 --- a/fs/generic_acl.c +++ b/fs/generic_acl.c @@ -192,7 +192,7 @@ generic_acl_chmod(struct inode *inode) int generic_check_acl(struct inode *inode, int mask, unsigned int flags) { - if (flags & IPERM_FLAG_RCU) { + if (mask & MAY_NOT_BLOCK) { if (!negative_cached_acl(inode, ACL_TYPE_ACCESS)) return -ECHILD; } else { diff --git a/fs/gfs2/acl.c b/fs/gfs2/acl.c index cbc07155b1a0..4d97352d39a1 100644 --- a/fs/gfs2/acl.c +++ b/fs/gfs2/acl.c @@ -80,7 +80,7 @@ int gfs2_check_acl(struct inode *inode, int mask, unsigned int flags) struct posix_acl *acl; int error; - if (flags & IPERM_FLAG_RCU) { + if (mask & MAY_NOT_BLOCK) { if (!negative_cached_acl(inode, ACL_TYPE_ACCESS)) return -ECHILD; return -EAGAIN; diff --git a/fs/jffs2/acl.c b/fs/jffs2/acl.c index 828a0e1ea438..952afb59e6f1 100644 --- a/fs/jffs2/acl.c +++ b/fs/jffs2/acl.c @@ -264,7 +264,7 @@ int jffs2_check_acl(struct inode *inode, int mask, unsigned int flags) struct posix_acl *acl; int rc; - if (flags & IPERM_FLAG_RCU) + if (mask & MAY_NOT_BLOCK) return -ECHILD; acl = jffs2_get_acl(inode, ACL_TYPE_ACCESS); diff --git a/fs/jfs/acl.c b/fs/jfs/acl.c index e5de9422fa32..859ae5a92166 100644 --- a/fs/jfs/acl.c +++ b/fs/jfs/acl.c @@ -118,7 +118,7 @@ int jfs_check_acl(struct inode *inode, int mask, unsigned int flags) { struct posix_acl *acl; - if (flags & IPERM_FLAG_RCU) + if (mask & MAY_NOT_BLOCK) return -ECHILD; acl = jfs_get_acl(inode, ACL_TYPE_ACCESS); diff --git a/fs/namei.c b/fs/namei.c index 723a3fe4bc40..e0624e2f0bba 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -181,7 +181,7 @@ static int acl_permission_check(struct inode *inode, int mask, unsigned int flag int (*check_acl)(struct inode *inode, int mask, unsigned int flags); unsigned int mode = inode->i_mode; - mask &= MAY_READ | MAY_WRITE | MAY_EXEC; + mask &= MAY_READ | MAY_WRITE | MAY_EXEC | MAY_NOT_BLOCK; if (current_user_ns() != inode_userns(inode)) goto other_perms; @@ -204,7 +204,7 @@ other_perms: /* * If the DACs are ok we don't need any capability check. */ - if ((mask & ~mode) == 0) + if ((mask & ~mode & (MAY_READ | MAY_WRITE | MAY_EXEC)) == 0) return 0; return -EACCES; } diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c index e913ad130fdd..4b683ccc4506 100644 --- a/fs/ocfs2/acl.c +++ b/fs/ocfs2/acl.c @@ -297,7 +297,7 @@ int ocfs2_check_acl(struct inode *inode, int mask, unsigned int flags) struct posix_acl *acl; int ret = -EAGAIN; - if (flags & IPERM_FLAG_RCU) + if (mask & MAY_NOT_BLOCK) return -ECHILD; osb = OCFS2_SB(inode->i_sb); diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c index ddc5301d2986..6747470ec103 100644 --- a/fs/reiserfs/xattr.c +++ b/fs/reiserfs/xattr.c @@ -879,7 +879,7 @@ int reiserfs_check_acl(struct inode *inode, int mask, unsigned int flags) if (get_inode_sd_version(inode) == STAT_DATA_V1) return -EAGAIN; - if (flags & IPERM_FLAG_RCU) + if (mask & MAY_NOT_BLOCK) return -ECHILD; acl = reiserfs_get_acl(inode, ACL_TYPE_ACCESS); diff --git a/fs/xfs/linux-2.6/xfs_acl.c b/fs/xfs/linux-2.6/xfs_acl.c index 39f4f809bb68..278e6736135a 100644 --- a/fs/xfs/linux-2.6/xfs_acl.c +++ b/fs/xfs/linux-2.6/xfs_acl.c @@ -235,7 +235,7 @@ xfs_check_acl(struct inode *inode, int mask, unsigned int flags) if (!XFS_IFORK_Q(ip)) return -EAGAIN; - if (flags & IPERM_FLAG_RCU) { + if (mask & MAY_NOT_BLOCK) { if (!negative_cached_acl(inode, ACL_TYPE_ACCESS)) return -ECHILD; return -EAGAIN; -- cgit v1.2.3 From 7e40145eb111a5192e6d819f764db9d6828d1abb Mon Sep 17 00:00:00 2001 From: Al Viro Date: Mon, 20 Jun 2011 19:12:17 -0400 Subject: ->permission() sanitizing: don't pass flags to ->check_acl() not used in the instances anymore. Signed-off-by: Al Viro --- Documentation/filesystems/Locking | 2 +- Documentation/filesystems/vfs.txt | 2 +- fs/9p/acl.c | 2 +- fs/9p/acl.h | 2 +- fs/btrfs/acl.c | 2 +- fs/btrfs/ctree.h | 2 +- fs/ext2/acl.c | 2 +- fs/ext2/acl.h | 2 +- fs/ext3/acl.c | 2 +- fs/ext3/acl.h | 2 +- fs/ext4/acl.c | 2 +- fs/ext4/acl.h | 2 +- fs/generic_acl.c | 2 +- fs/gfs2/acl.c | 2 +- fs/gfs2/acl.h | 2 +- fs/jffs2/acl.c | 2 +- fs/jffs2/acl.h | 2 +- fs/jfs/acl.c | 2 +- fs/jfs/jfs_acl.h | 2 +- fs/namei.c | 10 +++++----- fs/ocfs2/acl.c | 2 +- fs/ocfs2/acl.h | 2 +- fs/reiserfs/xattr.c | 2 +- fs/xfs/linux-2.6/xfs_acl.c | 2 +- fs/xfs/xfs_acl.h | 2 +- include/linux/fs.h | 2 +- include/linux/generic_acl.h | 2 +- include/linux/reiserfs_xattr.h | 2 +- 28 files changed, 32 insertions(+), 32 deletions(-) (limited to 'fs') diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking index 57d827d6071d..9b6ed7c9f34f 100644 --- a/Documentation/filesystems/Locking +++ b/Documentation/filesystems/Locking @@ -52,7 +52,7 @@ ata *); void (*put_link) (struct dentry *, struct nameidata *, void *); void (*truncate) (struct inode *); int (*permission) (struct inode *, int, unsigned int); - int (*check_acl)(struct inode *, int, unsigned int); + int (*check_acl)(struct inode *, int); int (*setattr) (struct dentry *, struct iattr *); int (*getattr) (struct vfsmount *, struct dentry *, struct kstat *); int (*setxattr) (struct dentry *, const char *,const void *,size_t,int); diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt index 88b9f5519af9..8b4c8e04d879 100644 --- a/Documentation/filesystems/vfs.txt +++ b/Documentation/filesystems/vfs.txt @@ -334,7 +334,7 @@ struct inode_operations { void (*put_link) (struct dentry *, struct nameidata *, void *); void (*truncate) (struct inode *); int (*permission) (struct inode *, int, unsigned int); - int (*check_acl)(struct inode *, int, unsigned int); + int (*check_acl)(struct inode *, int); int (*setattr) (struct dentry *, struct iattr *); int (*getattr) (struct vfsmount *mnt, struct dentry *, struct kstat *); int (*setxattr) (struct dentry *, const char *,const void *,size_t,int); diff --git a/fs/9p/acl.c b/fs/9p/acl.c index 94af68b092af..e98f56d3787d 100644 --- a/fs/9p/acl.c +++ b/fs/9p/acl.c @@ -96,7 +96,7 @@ static struct posix_acl *v9fs_get_cached_acl(struct inode *inode, int type) return acl; } -int v9fs_check_acl(struct inode *inode, int mask, unsigned int flags) +int v9fs_check_acl(struct inode *inode, int mask) { struct posix_acl *acl; struct v9fs_session_info *v9ses; diff --git a/fs/9p/acl.h b/fs/9p/acl.h index 7ef3ac9f6d95..59e18c2e8c7e 100644 --- a/fs/9p/acl.h +++ b/fs/9p/acl.h @@ -16,7 +16,7 @@ #ifdef CONFIG_9P_FS_POSIX_ACL extern int v9fs_get_acl(struct inode *, struct p9_fid *); -extern int v9fs_check_acl(struct inode *inode, int mask, unsigned int flags); +extern int v9fs_check_acl(struct inode *inode, int mask); extern int v9fs_acl_chmod(struct dentry *); extern int v9fs_set_create_acl(struct dentry *, struct posix_acl *, struct posix_acl *); diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c index a25a4a2e0df2..9f62ab2a7282 100644 --- a/fs/btrfs/acl.c +++ b/fs/btrfs/acl.c @@ -195,7 +195,7 @@ out: return ret; } -int btrfs_check_acl(struct inode *inode, int mask, unsigned int flags) +int btrfs_check_acl(struct inode *inode, int mask) { int error = -EAGAIN; diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 3b859a3e6a0e..9864cec801ed 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2642,7 +2642,7 @@ do { \ /* acl.c */ #ifdef CONFIG_BTRFS_FS_POSIX_ACL -int btrfs_check_acl(struct inode *inode, int mask, unsigned int flags); +int btrfs_check_acl(struct inode *inode, int mask); #else #define btrfs_check_acl NULL #endif diff --git a/fs/ext2/acl.c b/fs/ext2/acl.c index 6b9442d1be52..bfe651f9ae16 100644 --- a/fs/ext2/acl.c +++ b/fs/ext2/acl.c @@ -232,7 +232,7 @@ ext2_set_acl(struct inode *inode, int type, struct posix_acl *acl) } int -ext2_check_acl(struct inode *inode, int mask, unsigned int flags) +ext2_check_acl(struct inode *inode, int mask) { struct posix_acl *acl; diff --git a/fs/ext2/acl.h b/fs/ext2/acl.h index c939b7b12099..3ff6cbb9ac44 100644 --- a/fs/ext2/acl.h +++ b/fs/ext2/acl.h @@ -54,7 +54,7 @@ static inline int ext2_acl_count(size_t size) #ifdef CONFIG_EXT2_FS_POSIX_ACL /* acl.c */ -extern int ext2_check_acl (struct inode *, int, unsigned int); +extern int ext2_check_acl (struct inode *, int); extern int ext2_acl_chmod (struct inode *); extern int ext2_init_acl (struct inode *, struct inode *); diff --git a/fs/ext3/acl.c b/fs/ext3/acl.c index 0a6940d6c30c..edfeb293d4cb 100644 --- a/fs/ext3/acl.c +++ b/fs/ext3/acl.c @@ -240,7 +240,7 @@ ext3_set_acl(handle_t *handle, struct inode *inode, int type, } int -ext3_check_acl(struct inode *inode, int mask, unsigned int flags) +ext3_check_acl(struct inode *inode, int mask) { struct posix_acl *acl; diff --git a/fs/ext3/acl.h b/fs/ext3/acl.h index 5faf8048e906..597334626de9 100644 --- a/fs/ext3/acl.h +++ b/fs/ext3/acl.h @@ -54,7 +54,7 @@ static inline int ext3_acl_count(size_t size) #ifdef CONFIG_EXT3_FS_POSIX_ACL /* acl.c */ -extern int ext3_check_acl (struct inode *, int, unsigned int); +extern int ext3_check_acl (struct inode *, int); extern int ext3_acl_chmod (struct inode *); extern int ext3_init_acl (handle_t *, struct inode *, struct inode *); diff --git a/fs/ext4/acl.c b/fs/ext4/acl.c index 4f54252e439e..60d900fcc3db 100644 --- a/fs/ext4/acl.c +++ b/fs/ext4/acl.c @@ -238,7 +238,7 @@ ext4_set_acl(handle_t *handle, struct inode *inode, int type, } int -ext4_check_acl(struct inode *inode, int mask, unsigned int flags) +ext4_check_acl(struct inode *inode, int mask) { struct posix_acl *acl; diff --git a/fs/ext4/acl.h b/fs/ext4/acl.h index dec821168fd4..9d843d5deac4 100644 --- a/fs/ext4/acl.h +++ b/fs/ext4/acl.h @@ -54,7 +54,7 @@ static inline int ext4_acl_count(size_t size) #ifdef CONFIG_EXT4_FS_POSIX_ACL /* acl.c */ -extern int ext4_check_acl(struct inode *, int, unsigned int); +extern int ext4_check_acl(struct inode *, int); extern int ext4_acl_chmod(struct inode *); extern int ext4_init_acl(handle_t *, struct inode *, struct inode *); diff --git a/fs/generic_acl.c b/fs/generic_acl.c index 5976bb1fa4ca..70e90b4974ce 100644 --- a/fs/generic_acl.c +++ b/fs/generic_acl.c @@ -190,7 +190,7 @@ generic_acl_chmod(struct inode *inode) } int -generic_check_acl(struct inode *inode, int mask, unsigned int flags) +generic_check_acl(struct inode *inode, int mask) { if (mask & MAY_NOT_BLOCK) { if (!negative_cached_acl(inode, ACL_TYPE_ACCESS)) diff --git a/fs/gfs2/acl.c b/fs/gfs2/acl.c index 4d97352d39a1..8ef1079f1665 100644 --- a/fs/gfs2/acl.c +++ b/fs/gfs2/acl.c @@ -75,7 +75,7 @@ static struct posix_acl *gfs2_acl_get(struct gfs2_inode *ip, int type) * Returns: errno */ -int gfs2_check_acl(struct inode *inode, int mask, unsigned int flags) +int gfs2_check_acl(struct inode *inode, int mask) { struct posix_acl *acl; int error; diff --git a/fs/gfs2/acl.h b/fs/gfs2/acl.h index a93907c8159b..b522b0cb39ea 100644 --- a/fs/gfs2/acl.h +++ b/fs/gfs2/acl.h @@ -16,7 +16,7 @@ #define GFS2_POSIX_ACL_DEFAULT "posix_acl_default" #define GFS2_ACL_MAX_ENTRIES 25 -extern int gfs2_check_acl(struct inode *inode, int mask, unsigned int); +extern int gfs2_check_acl(struct inode *inode, int mask); extern int gfs2_acl_create(struct gfs2_inode *dip, struct inode *inode); extern int gfs2_acl_chmod(struct gfs2_inode *ip, struct iattr *attr); extern const struct xattr_handler gfs2_xattr_system_handler; diff --git a/fs/jffs2/acl.c b/fs/jffs2/acl.c index 952afb59e6f1..3675b3cdee89 100644 --- a/fs/jffs2/acl.c +++ b/fs/jffs2/acl.c @@ -259,7 +259,7 @@ static int jffs2_set_acl(struct inode *inode, int type, struct posix_acl *acl) return rc; } -int jffs2_check_acl(struct inode *inode, int mask, unsigned int flags) +int jffs2_check_acl(struct inode *inode, int mask) { struct posix_acl *acl; int rc; diff --git a/fs/jffs2/acl.h b/fs/jffs2/acl.h index 3119f59253d3..5e42de8d9541 100644 --- a/fs/jffs2/acl.h +++ b/fs/jffs2/acl.h @@ -26,7 +26,7 @@ struct jffs2_acl_header { #ifdef CONFIG_JFFS2_FS_POSIX_ACL -extern int jffs2_check_acl(struct inode *, int, unsigned int); +extern int jffs2_check_acl(struct inode *, int); extern int jffs2_acl_chmod(struct inode *); extern int jffs2_init_acl_pre(struct inode *, struct inode *, int *); extern int jffs2_init_acl_post(struct inode *); diff --git a/fs/jfs/acl.c b/fs/jfs/acl.c index 859ae5a92166..8a0a0666d5a6 100644 --- a/fs/jfs/acl.c +++ b/fs/jfs/acl.c @@ -114,7 +114,7 @@ out: return rc; } -int jfs_check_acl(struct inode *inode, int mask, unsigned int flags) +int jfs_check_acl(struct inode *inode, int mask) { struct posix_acl *acl; diff --git a/fs/jfs/jfs_acl.h b/fs/jfs/jfs_acl.h index f9285c4900fa..54e07559878d 100644 --- a/fs/jfs/jfs_acl.h +++ b/fs/jfs/jfs_acl.h @@ -20,7 +20,7 @@ #ifdef CONFIG_JFS_POSIX_ACL -int jfs_check_acl(struct inode *, int, unsigned int flags); +int jfs_check_acl(struct inode *, int); int jfs_init_acl(tid_t, struct inode *, struct inode *); int jfs_acl_chmod(struct inode *inode); diff --git a/fs/namei.c b/fs/namei.c index e0624e2f0bba..560fd1dff1d0 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -176,9 +176,9 @@ EXPORT_SYMBOL(putname); /* * This does basic POSIX ACL permission checking */ -static int acl_permission_check(struct inode *inode, int mask, unsigned int flags) +static int acl_permission_check(struct inode *inode, int mask) { - int (*check_acl)(struct inode *inode, int mask, unsigned int flags); + int (*check_acl)(struct inode *inode, int mask); unsigned int mode = inode->i_mode; mask &= MAY_READ | MAY_WRITE | MAY_EXEC | MAY_NOT_BLOCK; @@ -191,7 +191,7 @@ static int acl_permission_check(struct inode *inode, int mask, unsigned int flag else { check_acl = inode->i_op->check_acl; if (IS_POSIXACL(inode) && (mode & S_IRWXG) && check_acl) { - int error = check_acl(inode, mask, flags); + int error = check_acl(inode, mask); if (error != -EAGAIN) return error; } @@ -231,7 +231,7 @@ int generic_permission(struct inode *inode, int mask, unsigned int flags) /* * Do the basic POSIX ACL permission checks. */ - ret = acl_permission_check(inode, mask, flags); + ret = acl_permission_check(inode, mask); if (ret != -EACCES) return ret; @@ -327,7 +327,7 @@ static inline int exec_permission(struct inode *inode, unsigned int flags) if (likely(!ret)) goto ok; } else { - ret = acl_permission_check(inode, mask, flags); + ret = acl_permission_check(inode, mask); if (likely(!ret)) goto ok; if (ret != -EACCES) diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c index 4b683ccc4506..1cee970eb55a 100644 --- a/fs/ocfs2/acl.c +++ b/fs/ocfs2/acl.c @@ -290,7 +290,7 @@ static int ocfs2_set_acl(handle_t *handle, return ret; } -int ocfs2_check_acl(struct inode *inode, int mask, unsigned int flags) +int ocfs2_check_acl(struct inode *inode, int mask) { struct ocfs2_super *osb; struct buffer_head *di_bh = NULL; diff --git a/fs/ocfs2/acl.h b/fs/ocfs2/acl.h index 4fe7c9cf4bfb..5c5d31f05853 100644 --- a/fs/ocfs2/acl.h +++ b/fs/ocfs2/acl.h @@ -26,7 +26,7 @@ struct ocfs2_acl_entry { __le32 e_id; }; -extern int ocfs2_check_acl(struct inode *, int, unsigned int); +extern int ocfs2_check_acl(struct inode *, int); extern int ocfs2_acl_chmod(struct inode *); extern int ocfs2_init_acl(handle_t *, struct inode *, struct inode *, struct buffer_head *, struct buffer_head *, diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c index 6747470ec103..6ee3c11aa8d9 100644 --- a/fs/reiserfs/xattr.c +++ b/fs/reiserfs/xattr.c @@ -868,7 +868,7 @@ out: return err; } -int reiserfs_check_acl(struct inode *inode, int mask, unsigned int flags) +int reiserfs_check_acl(struct inode *inode, int mask) { struct posix_acl *acl; int error = -EAGAIN; /* do regular unix permission checks by default */ diff --git a/fs/xfs/linux-2.6/xfs_acl.c b/fs/xfs/linux-2.6/xfs_acl.c index 278e6736135a..a5dcd6a0f1b5 100644 --- a/fs/xfs/linux-2.6/xfs_acl.c +++ b/fs/xfs/linux-2.6/xfs_acl.c @@ -219,7 +219,7 @@ xfs_set_acl(struct inode *inode, int type, struct posix_acl *acl) } int -xfs_check_acl(struct inode *inode, int mask, unsigned int flags) +xfs_check_acl(struct inode *inode, int mask) { struct xfs_inode *ip; struct posix_acl *acl; diff --git a/fs/xfs/xfs_acl.h b/fs/xfs/xfs_acl.h index 11dd72070cbb..0135e2a669d7 100644 --- a/fs/xfs/xfs_acl.h +++ b/fs/xfs/xfs_acl.h @@ -42,7 +42,7 @@ struct xfs_acl { #define SGI_ACL_DEFAULT_SIZE (sizeof(SGI_ACL_DEFAULT)-1) #ifdef CONFIG_XFS_POSIX_ACL -extern int xfs_check_acl(struct inode *inode, int mask, unsigned int flags); +extern int xfs_check_acl(struct inode *inode, int mask); extern struct posix_acl *xfs_get_acl(struct inode *inode, int type); extern int xfs_inherit_acl(struct inode *inode, struct posix_acl *default_acl); extern int xfs_acl_chmod(struct inode *inode); diff --git a/include/linux/fs.h b/include/linux/fs.h index 60c1fe65bb2d..f218b42718aa 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1579,7 +1579,7 @@ struct inode_operations { struct dentry * (*lookup) (struct inode *,struct dentry *, struct nameidata *); void * (*follow_link) (struct dentry *, struct nameidata *); int (*permission) (struct inode *, int, unsigned int); - int (*check_acl)(struct inode *, int, unsigned int); + int (*check_acl)(struct inode *, int); int (*readlink) (struct dentry *, char __user *,int); void (*put_link) (struct dentry *, struct nameidata *, void *); diff --git a/include/linux/generic_acl.h b/include/linux/generic_acl.h index 0437e377b555..574bea4013b6 100644 --- a/include/linux/generic_acl.h +++ b/include/linux/generic_acl.h @@ -10,6 +10,6 @@ extern const struct xattr_handler generic_acl_default_handler; int generic_acl_init(struct inode *, struct inode *); int generic_acl_chmod(struct inode *); -int generic_check_acl(struct inode *inode, int mask, unsigned int flags); +int generic_check_acl(struct inode *inode, int mask); #endif /* LINUX_GENERIC_ACL_H */ diff --git a/include/linux/reiserfs_xattr.h b/include/linux/reiserfs_xattr.h index 1a3ca8f80200..424ba6416e7c 100644 --- a/include/linux/reiserfs_xattr.h +++ b/include/linux/reiserfs_xattr.h @@ -45,7 +45,7 @@ int reiserfs_permission(struct inode *inode, int mask, unsigned int flags); #ifdef CONFIG_REISERFS_FS_XATTR #define has_xattr_dir(inode) (REISERFS_I(inode)->i_flags & i_has_xattr_dir) -int reiserfs_check_acl(struct inode *inode, int mask, unsigned int flags); +int reiserfs_check_acl(struct inode *inode, int mask); ssize_t reiserfs_getxattr(struct dentry *dentry, const char *name, void *buffer, size_t size); int reiserfs_setxattr(struct dentry *dentry, const char *name, -- cgit v1.2.3 From 2830ba7f34ebb27c4e5b8b6ef408cd6d74860890 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Mon, 20 Jun 2011 19:16:29 -0400 Subject: ->permission() sanitizing: don't pass flags to generic_permission() redundant; all callers get it duplicated in mask & MAY_NOT_BLOCK and none of them removes that bit. Signed-off-by: Al Viro --- fs/afs/security.c | 2 +- fs/btrfs/inode.c | 2 +- fs/ceph/inode.c | 2 +- fs/cifs/cifsfs.c | 2 +- fs/fuse/dir.c | 5 ++--- fs/gfs2/inode.c | 2 +- fs/hostfs/hostfs_kern.c | 2 +- fs/hpfs/namei.c | 2 +- fs/namei.c | 4 ++-- fs/nfs/dir.c | 2 +- fs/nilfs2/inode.c | 2 +- fs/ocfs2/file.c | 2 +- fs/proc/base.c | 2 +- fs/reiserfs/xattr.c | 2 +- fs/sysfs/inode.c | 2 +- include/linux/fs.h | 2 +- 16 files changed, 18 insertions(+), 19 deletions(-) (limited to 'fs') diff --git a/fs/afs/security.c b/fs/afs/security.c index 745ee654165f..ab6b3147f05f 100644 --- a/fs/afs/security.c +++ b/fs/afs/security.c @@ -350,7 +350,7 @@ int afs_permission(struct inode *inode, int mask, unsigned int flags) } key_put(key); - ret = generic_permission(inode, mask, flags); + ret = generic_permission(inode, mask); _leave(" = %d", ret); return ret; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index f0bd87371566..b61b0477a8ac 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -7339,7 +7339,7 @@ static int btrfs_permission(struct inode *inode, int mask, unsigned int flags) return -EROFS; if ((BTRFS_I(inode)->flags & BTRFS_INODE_READONLY) && (mask & MAY_WRITE)) return -EACCES; - return generic_permission(inode, mask, flags); + return generic_permission(inode, mask); } static const struct inode_operations btrfs_dir_inode_operations = { diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index beb5d55d6fd2..9c169741b416 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c @@ -1805,7 +1805,7 @@ int ceph_permission(struct inode *inode, int mask, unsigned int flags) err = ceph_do_getattr(inode, CEPH_CAP_AUTH_SHARED); if (!err) - err = generic_permission(inode, mask, flags); + err = generic_permission(inode, mask); return err; } diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index b79804fa410f..b285b5bcf711 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -239,7 +239,7 @@ static int cifs_permission(struct inode *inode, int mask, unsigned int flags) on the client (above and beyond ACL on servers) for servers which do not support setting and viewing mode bits, so allowing client to check permissions is useful */ - return generic_permission(inode, mask, flags); + return generic_permission(inode, mask); } static struct kmem_cache *cifs_inode_cachep; diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index 0a2fcd860ad6..0df56b6c26e2 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -1018,7 +1018,7 @@ static int fuse_permission(struct inode *inode, int mask, unsigned int flags) } if (fc->flags & FUSE_DEFAULT_PERMISSIONS) { - err = generic_permission(inode, mask, flags); + err = generic_permission(inode, mask); /* If permission is denied, try to refresh file attributes. This is also needed, because the root @@ -1026,8 +1026,7 @@ static int fuse_permission(struct inode *inode, int mask, unsigned int flags) if (err == -EACCES && !refreshed) { err = fuse_perm_getattr(inode, flags); if (!err) - err = generic_permission(inode, mask, - flags); + err = generic_permission(inode, mask); } /* Note: the opposite of the above test does not diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c index d5f0f4ea25dc..b776ec8f9c19 100644 --- a/fs/gfs2/inode.c +++ b/fs/gfs2/inode.c @@ -1564,7 +1564,7 @@ int gfs2_permission(struct inode *inode, int mask, unsigned int flags) if ((mask & MAY_WRITE) && IS_IMMUTABLE(inode)) error = -EACCES; else - error = generic_permission(inode, mask, flags); + error = generic_permission(inode, mask); if (unlock) gfs2_glock_dq_uninit(&i_gh); diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c index a98d0d1aef65..b1bc31bde833 100644 --- a/fs/hostfs/hostfs_kern.c +++ b/fs/hostfs/hostfs_kern.c @@ -770,7 +770,7 @@ int hostfs_permission(struct inode *ino, int desired, unsigned int flags) err = access_file(name, r, w, x); __putname(name); if (!err) - err = generic_permission(ino, desired, flags); + err = generic_permission(ino, desired); return err; } diff --git a/fs/hpfs/namei.c b/fs/hpfs/namei.c index bd2ce7dd8df3..2df69e2f07cf 100644 --- a/fs/hpfs/namei.c +++ b/fs/hpfs/namei.c @@ -398,7 +398,7 @@ again: hpfs_unlock(dir->i_sb); return -ENOSPC; } - if (generic_permission(inode, MAY_WRITE, 0) || + if (generic_permission(inode, MAY_WRITE) || !S_ISREG(inode->i_mode) || get_write_access(inode)) { d_rehash(dentry); diff --git a/fs/namei.c b/fs/namei.c index 560fd1dff1d0..684e0f30cf4c 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -224,7 +224,7 @@ other_perms: * request cannot be satisfied (eg. requires blocking or too much complexity). * It would then be called again in ref-walk mode. */ -int generic_permission(struct inode *inode, int mask, unsigned int flags) +int generic_permission(struct inode *inode, int mask) { int ret; @@ -289,7 +289,7 @@ int inode_permission(struct inode *inode, int mask) if (inode->i_op->permission) retval = inode->i_op->permission(inode, mask, 0); else - retval = generic_permission(inode, mask, 0); + retval = generic_permission(inode, mask); if (retval) return retval; diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 0485dca34fb1..16cf84b4afb9 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -2328,7 +2328,7 @@ out: out_notsup: res = nfs_revalidate_inode(NFS_SERVER(inode), inode); if (res == 0) - res = generic_permission(inode, mask, flags); + res = generic_permission(inode, mask); goto out; } diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c index 650aa7755003..0df6de58bbcb 100644 --- a/fs/nilfs2/inode.c +++ b/fs/nilfs2/inode.c @@ -806,7 +806,7 @@ int nilfs_permission(struct inode *inode, int mask, unsigned int flags) root->cno != NILFS_CPTREE_CURRENT_CNO) return -EROFS; /* snapshot is not writable */ - return generic_permission(inode, mask, flags); + return generic_permission(inode, mask); } int nilfs_load_inode_block(struct inode *inode, struct buffer_head **pbh) diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index d058cb7e12d4..ecb52b028964 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c @@ -1293,7 +1293,7 @@ int ocfs2_permission(struct inode *inode, int mask, unsigned int flags) goto out; } - ret = generic_permission(inode, mask, flags); + ret = generic_permission(inode, mask); ocfs2_inode_unlock(inode, 0); out: diff --git a/fs/proc/base.c b/fs/proc/base.c index 8b8470113576..53a1a961b25f 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -2169,7 +2169,7 @@ static const struct file_operations proc_fd_operations = { */ static int proc_fd_permission(struct inode *inode, int mask, unsigned int flags) { - int rv = generic_permission(inode, mask, flags); + int rv = generic_permission(inode, mask); if (rv == 0) return 0; if (task_pid(current) == proc_pid(inode)) diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c index 6ee3c11aa8d9..f17319613a85 100644 --- a/fs/reiserfs/xattr.c +++ b/fs/reiserfs/xattr.c @@ -967,7 +967,7 @@ int reiserfs_permission(struct inode *inode, int mask, unsigned int flags) if (IS_PRIVATE(inode)) return 0; - return generic_permission(inode, mask, flags); + return generic_permission(inode, mask); } static int xattr_hide_revalidate(struct dentry *dentry, struct nameidata *nd) diff --git a/fs/sysfs/inode.c b/fs/sysfs/inode.c index a37165c64757..04c81e5ba6f7 100644 --- a/fs/sysfs/inode.c +++ b/fs/sysfs/inode.c @@ -362,5 +362,5 @@ int sysfs_permission(struct inode *inode, int mask, unsigned int flags) sysfs_refresh_inode(sd, inode); mutex_unlock(&sysfs_mutex); - return generic_permission(inode, mask, flags); + return generic_permission(inode, mask); } diff --git a/include/linux/fs.h b/include/linux/fs.h index f218b42718aa..a1689c13eb77 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2188,7 +2188,7 @@ extern sector_t bmap(struct inode *, sector_t); #endif extern int notify_change(struct dentry *, struct iattr *); extern int inode_permission(struct inode *, int); -extern int generic_permission(struct inode *, int, unsigned int); +extern int generic_permission(struct inode *, int); static inline bool execute_ok(struct inode *inode) { -- cgit v1.2.3 From 10556cb21a0d0b24d95f00ea6df16f599a3345b2 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Mon, 20 Jun 2011 19:28:19 -0400 Subject: ->permission() sanitizing: don't pass flags to ->permission() not used by the instances anymore. Signed-off-by: Al Viro --- Documentation/filesystems/vfs.txt | 4 ++-- fs/afs/internal.h | 2 +- fs/afs/security.c | 4 ++-- fs/bad_inode.c | 2 +- fs/btrfs/inode.c | 2 +- fs/ceph/inode.c | 4 ++-- fs/ceph/super.h | 2 +- fs/cifs/cifsfs.c | 2 +- fs/coda/coda_linux.h | 2 +- fs/coda/dir.c | 4 ++-- fs/coda/pioctl.c | 4 ++-- fs/ecryptfs/inode.c | 4 ++-- fs/fuse/dir.c | 14 +++++++------- fs/gfs2/file.c | 2 +- fs/gfs2/inode.c | 16 ++++++++-------- fs/gfs2/inode.h | 2 +- fs/hostfs/hostfs_kern.c | 4 ++-- fs/namei.c | 4 ++-- fs/nfs/dir.c | 4 ++-- fs/nilfs2/inode.c | 2 +- fs/nilfs2/nilfs.h | 2 +- fs/ocfs2/file.c | 4 ++-- fs/ocfs2/file.h | 2 +- fs/proc/base.c | 2 +- fs/proc/proc_sysctl.c | 2 +- fs/reiserfs/xattr.c | 2 +- fs/sysfs/inode.c | 4 ++-- fs/sysfs/sysfs.h | 2 +- include/linux/fs.h | 2 +- include/linux/nfs_fs.h | 2 +- include/linux/reiserfs_xattr.h | 2 +- 31 files changed, 55 insertions(+), 55 deletions(-) (limited to 'fs') diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt index 8b4c8e04d879..d56151fe77dc 100644 --- a/Documentation/filesystems/vfs.txt +++ b/Documentation/filesystems/vfs.txt @@ -333,7 +333,7 @@ struct inode_operations { void * (*follow_link) (struct dentry *, struct nameidata *); void (*put_link) (struct dentry *, struct nameidata *, void *); void (*truncate) (struct inode *); - int (*permission) (struct inode *, int, unsigned int); + int (*permission) (struct inode *, int); int (*check_acl)(struct inode *, int); int (*setattr) (struct dentry *, struct iattr *); int (*getattr) (struct vfsmount *mnt, struct dentry *, struct kstat *); @@ -423,7 +423,7 @@ otherwise noted. permission: called by the VFS to check for access rights on a POSIX-like filesystem. - May be called in rcu-walk mode (flags & IPERM_FLAG_RCU). If in rcu-walk + May be called in rcu-walk mode (mask & MAY_NOT_BLOCK). If in rcu-walk mode, the filesystem must check the permission without blocking or storing to the inode. diff --git a/fs/afs/internal.h b/fs/afs/internal.h index 5a9b6843bac1..f396d337b817 100644 --- a/fs/afs/internal.h +++ b/fs/afs/internal.h @@ -627,7 +627,7 @@ extern void afs_clear_permits(struct afs_vnode *); extern void afs_cache_permit(struct afs_vnode *, struct key *, long); extern void afs_zap_permits(struct rcu_head *); extern struct key *afs_request_key(struct afs_cell *); -extern int afs_permission(struct inode *, int, unsigned int); +extern int afs_permission(struct inode *, int); /* * server.c diff --git a/fs/afs/security.c b/fs/afs/security.c index ab6b3147f05f..8d010422dc89 100644 --- a/fs/afs/security.c +++ b/fs/afs/security.c @@ -285,14 +285,14 @@ static int afs_check_permit(struct afs_vnode *vnode, struct key *key, * - AFS ACLs are attached to directories only, and a file is controlled by its * parent directory's ACL */ -int afs_permission(struct inode *inode, int mask, unsigned int flags) +int afs_permission(struct inode *inode, int mask) { struct afs_vnode *vnode = AFS_FS_I(inode); afs_access_t uninitialized_var(access); struct key *key; int ret; - if (flags & IPERM_FLAG_RCU) + if (mask & MAY_NOT_BLOCK) return -ECHILD; _enter("{{%x:%u},%lx},%x,", diff --git a/fs/bad_inode.c b/fs/bad_inode.c index bfcb18feb1df..f024d8aaddef 100644 --- a/fs/bad_inode.c +++ b/fs/bad_inode.c @@ -229,7 +229,7 @@ static int bad_inode_readlink(struct dentry *dentry, char __user *buffer, return -EIO; } -static int bad_inode_permission(struct inode *inode, int mask, unsigned int flags) +static int bad_inode_permission(struct inode *inode, int mask) { return -EIO; } diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index b61b0477a8ac..cb170ca51c64 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -7331,7 +7331,7 @@ static int btrfs_set_page_dirty(struct page *page) return __set_page_dirty_nobuffers(page); } -static int btrfs_permission(struct inode *inode, int mask, unsigned int flags) +static int btrfs_permission(struct inode *inode, int mask) { struct btrfs_root *root = BTRFS_I(inode)->root; diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index 9c169741b416..dfb2831d8d85 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c @@ -1795,11 +1795,11 @@ int ceph_do_getattr(struct inode *inode, int mask) * Check inode permissions. We verify we have a valid value for * the AUTH cap, then call the generic handler. */ -int ceph_permission(struct inode *inode, int mask, unsigned int flags) +int ceph_permission(struct inode *inode, int mask) { int err; - if (flags & IPERM_FLAG_RCU) + if (mask & MAY_NOT_BLOCK) return -ECHILD; err = ceph_do_getattr(inode, CEPH_CAP_AUTH_SHARED); diff --git a/fs/ceph/super.h b/fs/ceph/super.h index f5cabefa98dc..56c41ef47cad 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h @@ -692,7 +692,7 @@ extern void ceph_queue_invalidate(struct inode *inode); extern void ceph_queue_writeback(struct inode *inode); extern int ceph_do_getattr(struct inode *inode, int mask); -extern int ceph_permission(struct inode *inode, int mask, unsigned int flags); +extern int ceph_permission(struct inode *inode, int mask); extern int ceph_setattr(struct dentry *dentry, struct iattr *attr); extern int ceph_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat); diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index b285b5bcf711..cbbb55e781d3 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -224,7 +224,7 @@ cifs_statfs(struct dentry *dentry, struct kstatfs *buf) return 0; } -static int cifs_permission(struct inode *inode, int mask, unsigned int flags) +static int cifs_permission(struct inode *inode, int mask) { struct cifs_sb_info *cifs_sb; diff --git a/fs/coda/coda_linux.h b/fs/coda/coda_linux.h index 9b0c5323890b..44e17e9c21ae 100644 --- a/fs/coda/coda_linux.h +++ b/fs/coda/coda_linux.h @@ -39,7 +39,7 @@ extern const struct file_operations coda_ioctl_operations; /* operations shared over more than one file */ int coda_open(struct inode *i, struct file *f); int coda_release(struct inode *i, struct file *f); -int coda_permission(struct inode *inode, int mask, unsigned int flags); +int coda_permission(struct inode *inode, int mask); int coda_revalidate_inode(struct dentry *); int coda_getattr(struct vfsmount *, struct dentry *, struct kstat *); int coda_setattr(struct dentry *, struct iattr *); diff --git a/fs/coda/dir.c b/fs/coda/dir.c index 2b8dae4d121e..cd5532398c15 100644 --- a/fs/coda/dir.c +++ b/fs/coda/dir.c @@ -132,11 +132,11 @@ exit: } -int coda_permission(struct inode *inode, int mask, unsigned int flags) +int coda_permission(struct inode *inode, int mask) { int error; - if (flags & IPERM_FLAG_RCU) + if (mask & MAY_NOT_BLOCK) return -ECHILD; mask &= MAY_READ | MAY_WRITE | MAY_EXEC; diff --git a/fs/coda/pioctl.c b/fs/coda/pioctl.c index cb140ef293e4..ee0981f1375b 100644 --- a/fs/coda/pioctl.c +++ b/fs/coda/pioctl.c @@ -24,7 +24,7 @@ #include "coda_linux.h" /* pioctl ops */ -static int coda_ioctl_permission(struct inode *inode, int mask, unsigned int flags); +static int coda_ioctl_permission(struct inode *inode, int mask); static long coda_pioctl(struct file *filp, unsigned int cmd, unsigned long user_data); @@ -41,7 +41,7 @@ const struct file_operations coda_ioctl_operations = { }; /* the coda pioctl inode ops */ -static int coda_ioctl_permission(struct inode *inode, int mask, unsigned int flags) +static int coda_ioctl_permission(struct inode *inode, int mask) { return (mask & MAY_EXEC) ? -EACCES : 0; } diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c index 7349ade17de6..bec75f8e91ac 100644 --- a/fs/ecryptfs/inode.c +++ b/fs/ecryptfs/inode.c @@ -942,9 +942,9 @@ int ecryptfs_truncate(struct dentry *dentry, loff_t new_length) } static int -ecryptfs_permission(struct inode *inode, int mask, unsigned int flags) +ecryptfs_permission(struct inode *inode, int mask) { - if (flags & IPERM_FLAG_RCU) + if (mask & MAY_NOT_BLOCK) return -ECHILD; return inode_permission(ecryptfs_inode_to_lower(inode), mask); } diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index 0df56b6c26e2..e2b14001cea5 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -971,9 +971,9 @@ static int fuse_access(struct inode *inode, int mask) return err; } -static int fuse_perm_getattr(struct inode *inode, int flags) +static int fuse_perm_getattr(struct inode *inode, int mask) { - if (flags & IPERM_FLAG_RCU) + if (mask & MAY_NOT_BLOCK) return -ECHILD; return fuse_do_getattr(inode, NULL, NULL); @@ -992,7 +992,7 @@ static int fuse_perm_getattr(struct inode *inode, int flags) * access request is sent. Execute permission is still checked * locally based on file mode. */ -static int fuse_permission(struct inode *inode, int mask, unsigned int flags) +static int fuse_permission(struct inode *inode, int mask) { struct fuse_conn *fc = get_fuse_conn(inode); bool refreshed = false; @@ -1011,7 +1011,7 @@ static int fuse_permission(struct inode *inode, int mask, unsigned int flags) if (fi->i_time < get_jiffies_64()) { refreshed = true; - err = fuse_perm_getattr(inode, flags); + err = fuse_perm_getattr(inode, mask); if (err) return err; } @@ -1024,7 +1024,7 @@ static int fuse_permission(struct inode *inode, int mask, unsigned int flags) attributes. This is also needed, because the root node will at first have no permissions */ if (err == -EACCES && !refreshed) { - err = fuse_perm_getattr(inode, flags); + err = fuse_perm_getattr(inode, mask); if (!err) err = generic_permission(inode, mask); } @@ -1034,7 +1034,7 @@ static int fuse_permission(struct inode *inode, int mask, unsigned int flags) noticed immediately, only after the attribute timeout has expired */ } else if (mask & (MAY_ACCESS | MAY_CHDIR)) { - if (flags & IPERM_FLAG_RCU) + if (mask & MAY_NOT_BLOCK) return -ECHILD; err = fuse_access(inode, mask); @@ -1043,7 +1043,7 @@ static int fuse_permission(struct inode *inode, int mask, unsigned int flags) if (refreshed) return -EACCES; - err = fuse_perm_getattr(inode, flags); + err = fuse_perm_getattr(inode, mask); if (!err && !(inode->i_mode & S_IXUGO)) return -EACCES; } diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c index a9f5cbe45cd9..89c39e53760d 100644 --- a/fs/gfs2/file.c +++ b/fs/gfs2/file.c @@ -243,7 +243,7 @@ static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask) !capable(CAP_LINUX_IMMUTABLE)) goto out; if (!IS_IMMUTABLE(inode)) { - error = gfs2_permission(inode, MAY_WRITE, 0); + error = gfs2_permission(inode, MAY_WRITE); if (error) goto out; } diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c index b776ec8f9c19..b1090d66a6fd 100644 --- a/fs/gfs2/inode.c +++ b/fs/gfs2/inode.c @@ -307,7 +307,7 @@ struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name, } if (!is_root) { - error = gfs2_permission(dir, MAY_EXEC, 0); + error = gfs2_permission(dir, MAY_EXEC); if (error) goto out; } @@ -337,7 +337,7 @@ static int create_ok(struct gfs2_inode *dip, const struct qstr *name, { int error; - error = gfs2_permission(&dip->i_inode, MAY_WRITE | MAY_EXEC, 0); + error = gfs2_permission(&dip->i_inode, MAY_WRITE | MAY_EXEC); if (error) return error; @@ -857,7 +857,7 @@ static int gfs2_link(struct dentry *old_dentry, struct inode *dir, if (inode->i_nlink == 0) goto out_gunlock; - error = gfs2_permission(dir, MAY_WRITE | MAY_EXEC, 0); + error = gfs2_permission(dir, MAY_WRITE | MAY_EXEC); if (error) goto out_gunlock; @@ -990,7 +990,7 @@ static int gfs2_unlink_ok(struct gfs2_inode *dip, const struct qstr *name, if (IS_APPEND(&dip->i_inode)) return -EPERM; - error = gfs2_permission(&dip->i_inode, MAY_WRITE | MAY_EXEC, 0); + error = gfs2_permission(&dip->i_inode, MAY_WRITE | MAY_EXEC); if (error) return error; @@ -1336,7 +1336,7 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry, } } } else { - error = gfs2_permission(ndir, MAY_WRITE | MAY_EXEC, 0); + error = gfs2_permission(ndir, MAY_WRITE | MAY_EXEC); if (error) goto out_gunlock; @@ -1371,7 +1371,7 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry, /* Check out the dir to be renamed */ if (dir_rename) { - error = gfs2_permission(odentry->d_inode, MAY_WRITE, 0); + error = gfs2_permission(odentry->d_inode, MAY_WRITE); if (error) goto out_gunlock; } @@ -1543,7 +1543,7 @@ static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p) * Returns: errno */ -int gfs2_permission(struct inode *inode, int mask, unsigned int flags) +int gfs2_permission(struct inode *inode, int mask) { struct gfs2_inode *ip; struct gfs2_holder i_gh; @@ -1553,7 +1553,7 @@ int gfs2_permission(struct inode *inode, int mask, unsigned int flags) ip = GFS2_I(inode); if (gfs2_glock_is_locked_by_me(ip->i_gl) == NULL) { - if (flags & IPERM_FLAG_RCU) + if (mask & MAY_NOT_BLOCK) return -ECHILD; error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh); if (error) diff --git a/fs/gfs2/inode.h b/fs/gfs2/inode.h index 31606076f701..8d90e0c07672 100644 --- a/fs/gfs2/inode.h +++ b/fs/gfs2/inode.h @@ -108,7 +108,7 @@ extern int gfs2_inode_refresh(struct gfs2_inode *ip); extern struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name, int is_root); -extern int gfs2_permission(struct inode *inode, int mask, unsigned int flags); +extern int gfs2_permission(struct inode *inode, int mask); extern int gfs2_setattr_simple(struct gfs2_inode *ip, struct iattr *attr); extern struct inode *gfs2_lookup_simple(struct inode *dip, const char *name); extern void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf); diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c index b1bc31bde833..6e449c599b9d 100644 --- a/fs/hostfs/hostfs_kern.c +++ b/fs/hostfs/hostfs_kern.c @@ -748,12 +748,12 @@ int hostfs_rename(struct inode *from_ino, struct dentry *from, return err; } -int hostfs_permission(struct inode *ino, int desired, unsigned int flags) +int hostfs_permission(struct inode *ino, int desired) { char *name; int r = 0, w = 0, x = 0, err; - if (flags & IPERM_FLAG_RCU) + if (desired & MAY_NOT_BLOCK) return -ECHILD; if (desired & MAY_READ) r = 1; diff --git a/fs/namei.c b/fs/namei.c index 684e0f30cf4c..c5c382620a86 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -287,7 +287,7 @@ int inode_permission(struct inode *inode, int mask) } if (inode->i_op->permission) - retval = inode->i_op->permission(inode, mask, 0); + retval = inode->i_op->permission(inode, mask); else retval = generic_permission(inode, mask); @@ -323,7 +323,7 @@ static inline int exec_permission(struct inode *inode, unsigned int flags) mask |= MAY_NOT_BLOCK; if (inode->i_op->permission) { - ret = inode->i_op->permission(inode, mask, flags); + ret = inode->i_op->permission(inode, mask); if (likely(!ret)) goto ok; } else { diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 16cf84b4afb9..a86acd6d0cb4 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -2273,12 +2273,12 @@ int nfs_may_open(struct inode *inode, struct rpc_cred *cred, int openflags) return nfs_do_access(inode, cred, nfs_open_permission_mask(openflags)); } -int nfs_permission(struct inode *inode, int mask, unsigned int flags) +int nfs_permission(struct inode *inode, int mask) { struct rpc_cred *cred; int res = 0; - if (flags & IPERM_FLAG_RCU) + if (mask & MAY_NOT_BLOCK) return -ECHILD; nfs_inc_stats(inode, NFSIOS_VFSACCESS); diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c index 0df6de58bbcb..be8664c6a825 100644 --- a/fs/nilfs2/inode.c +++ b/fs/nilfs2/inode.c @@ -799,7 +799,7 @@ out_err: return err; } -int nilfs_permission(struct inode *inode, int mask, unsigned int flags) +int nilfs_permission(struct inode *inode, int mask) { struct nilfs_root *root = NILFS_I(inode)->i_root; if ((mask & MAY_WRITE) && root && diff --git a/fs/nilfs2/nilfs.h b/fs/nilfs2/nilfs.h index f02b9ad43a21..6fb7511c0328 100644 --- a/fs/nilfs2/nilfs.h +++ b/fs/nilfs2/nilfs.h @@ -264,7 +264,7 @@ extern void nilfs_update_inode(struct inode *, struct buffer_head *); extern void nilfs_truncate(struct inode *); extern void nilfs_evict_inode(struct inode *); extern int nilfs_setattr(struct dentry *, struct iattr *); -int nilfs_permission(struct inode *inode, int mask, unsigned int flags); +int nilfs_permission(struct inode *inode, int mask); int nilfs_load_inode_block(struct inode *inode, struct buffer_head **pbh); extern int nilfs_inode_dirty(struct inode *); int nilfs_set_file_dirty(struct inode *inode, unsigned nr_dirty); diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index ecb52b028964..1406c37a5722 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c @@ -1279,11 +1279,11 @@ bail: return err; } -int ocfs2_permission(struct inode *inode, int mask, unsigned int flags) +int ocfs2_permission(struct inode *inode, int mask) { int ret; - if (flags & IPERM_FLAG_RCU) + if (mask & MAY_NOT_BLOCK) return -ECHILD; ret = ocfs2_inode_lock(inode, NULL, 0); diff --git a/fs/ocfs2/file.h b/fs/ocfs2/file.h index f5afbbef6703..97bf761c9e7c 100644 --- a/fs/ocfs2/file.h +++ b/fs/ocfs2/file.h @@ -61,7 +61,7 @@ int ocfs2_zero_extend(struct inode *inode, struct buffer_head *di_bh, int ocfs2_setattr(struct dentry *dentry, struct iattr *attr); int ocfs2_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat); -int ocfs2_permission(struct inode *inode, int mask, unsigned int flags); +int ocfs2_permission(struct inode *inode, int mask); int ocfs2_should_update_atime(struct inode *inode, struct vfsmount *vfsmnt); diff --git a/fs/proc/base.c b/fs/proc/base.c index 53a1a961b25f..be1ff932033b 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -2167,7 +2167,7 @@ static const struct file_operations proc_fd_operations = { * /proc/pid/fd needs a special permission handler so that a process can still * access /proc/self/fd after it has executed a setuid(). */ -static int proc_fd_permission(struct inode *inode, int mask, unsigned int flags) +static int proc_fd_permission(struct inode *inode, int mask) { int rv = generic_permission(inode, mask); if (rv == 0) diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c index 349b22f434d8..1a77dbef226f 100644 --- a/fs/proc/proc_sysctl.c +++ b/fs/proc/proc_sysctl.c @@ -294,7 +294,7 @@ out: return ret; } -static int proc_sys_permission(struct inode *inode, int mask,unsigned int flags) +static int proc_sys_permission(struct inode *inode, int mask) { /* * sysctl entries that are not writeable, diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c index f17319613a85..4ea2ab41fdee 100644 --- a/fs/reiserfs/xattr.c +++ b/fs/reiserfs/xattr.c @@ -958,7 +958,7 @@ static int xattr_mount_check(struct super_block *s) return 0; } -int reiserfs_permission(struct inode *inode, int mask, unsigned int flags) +int reiserfs_permission(struct inode *inode, int mask) { /* * We don't do permission checks on the internal objects. diff --git a/fs/sysfs/inode.c b/fs/sysfs/inode.c index 04c81e5ba6f7..e3f091a81c72 100644 --- a/fs/sysfs/inode.c +++ b/fs/sysfs/inode.c @@ -349,11 +349,11 @@ int sysfs_hash_and_remove(struct sysfs_dirent *dir_sd, const void *ns, const cha return -ENOENT; } -int sysfs_permission(struct inode *inode, int mask, unsigned int flags) +int sysfs_permission(struct inode *inode, int mask) { struct sysfs_dirent *sd; - if (flags & IPERM_FLAG_RCU) + if (mask & MAY_NOT_BLOCK) return -ECHILD; sd = inode->i_private; diff --git a/fs/sysfs/sysfs.h b/fs/sysfs/sysfs.h index 2ed2404f3113..845ab3ad229d 100644 --- a/fs/sysfs/sysfs.h +++ b/fs/sysfs/sysfs.h @@ -201,7 +201,7 @@ static inline void __sysfs_put(struct sysfs_dirent *sd) struct inode *sysfs_get_inode(struct super_block *sb, struct sysfs_dirent *sd); void sysfs_evict_inode(struct inode *inode); int sysfs_sd_setattr(struct sysfs_dirent *sd, struct iattr *iattr); -int sysfs_permission(struct inode *inode, int mask, unsigned int flags); +int sysfs_permission(struct inode *inode, int mask); int sysfs_setattr(struct dentry *dentry, struct iattr *iattr); int sysfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat); int sysfs_setxattr(struct dentry *dentry, const char *name, const void *value, diff --git a/include/linux/fs.h b/include/linux/fs.h index a1689c13eb77..1fdefe3995fd 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1578,7 +1578,7 @@ struct file_operations { struct inode_operations { struct dentry * (*lookup) (struct inode *,struct dentry *, struct nameidata *); void * (*follow_link) (struct dentry *, struct nameidata *); - int (*permission) (struct inode *, int, unsigned int); + int (*permission) (struct inode *, int); int (*check_acl)(struct inode *, int); int (*readlink) (struct dentry *, char __user *,int); diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 1b93b9c60e55..86014811a0fb 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -360,7 +360,7 @@ extern int nfs_refresh_inode(struct inode *, struct nfs_fattr *); extern int nfs_post_op_update_inode(struct inode *inode, struct nfs_fattr *fattr); extern int nfs_post_op_update_inode_force_wcc(struct inode *inode, struct nfs_fattr *fattr); extern int nfs_getattr(struct vfsmount *, struct dentry *, struct kstat *); -extern int nfs_permission(struct inode *, int, unsigned int); +extern int nfs_permission(struct inode *, int); extern int nfs_open(struct inode *, struct file *); extern int nfs_release(struct inode *, struct file *); extern int nfs_attribute_timeout(struct inode *inode); diff --git a/include/linux/reiserfs_xattr.h b/include/linux/reiserfs_xattr.h index 424ba6416e7c..57958c0e1d38 100644 --- a/include/linux/reiserfs_xattr.h +++ b/include/linux/reiserfs_xattr.h @@ -41,7 +41,7 @@ int reiserfs_xattr_init(struct super_block *sb, int mount_flags); int reiserfs_lookup_privroot(struct super_block *sb); int reiserfs_delete_xattrs(struct inode *inode); int reiserfs_chown_xattrs(struct inode *inode, struct iattr *attrs); -int reiserfs_permission(struct inode *inode, int mask, unsigned int flags); +int reiserfs_permission(struct inode *inode, int mask); #ifdef CONFIG_REISERFS_FS_XATTR #define has_xattr_dir(inode) (REISERFS_I(inode)->i_flags & i_has_xattr_dir) -- cgit v1.2.3 From eecdd358b467405a084d400d5ec571bbdbfe97a3 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Mon, 20 Jun 2011 19:48:41 -0400 Subject: ->permission() sanitizing: don't pass flags to exec_permission() pass mask instead; kill security_inode_exec_permission() since we can use security_inode_permission() instead. Signed-off-by: Al Viro --- fs/namei.c | 17 +++++++---------- include/linux/security.h | 7 ------- security/security.c | 10 ---------- 3 files changed, 7 insertions(+), 27 deletions(-) (limited to 'fs') diff --git a/fs/namei.c b/fs/namei.c index c5c382620a86..21eba95368f2 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -304,7 +304,7 @@ int inode_permission(struct inode *inode, int mask) /** * exec_permission - check for right to do lookups in a given directory * @inode: inode to check permission on - * @flags: IPERM_FLAG_ flags. + * @mask: MAY_EXEC and possibly MAY_NOT_BLOCK flags. * * Short-cut version of inode_permission(), for calling on directories * during pathname resolution. Combines parts of inode_permission() @@ -314,13 +314,10 @@ int inode_permission(struct inode *inode, int mask) * short-cut DAC fails, then call ->permission() to do more * complete permission check. */ -static inline int exec_permission(struct inode *inode, unsigned int flags) +static inline int exec_permission(struct inode *inode, int mask) { int ret; struct user_namespace *ns = inode_userns(inode); - int mask = MAY_EXEC; - if (flags & IPERM_FLAG_RCU) - mask |= MAY_NOT_BLOCK; if (inode->i_op->permission) { ret = inode->i_op->permission(inode, mask); @@ -338,7 +335,7 @@ static inline int exec_permission(struct inode *inode, unsigned int flags) } return ret; ok: - return security_inode_exec_permission(inode, flags); + return security_inode_permission(inode, mask); } /** @@ -1214,13 +1211,13 @@ retry: static inline int may_lookup(struct nameidata *nd) { if (nd->flags & LOOKUP_RCU) { - int err = exec_permission(nd->inode, IPERM_FLAG_RCU); + int err = exec_permission(nd->inode, MAY_EXEC|MAY_NOT_BLOCK); if (err != -ECHILD) return err; if (unlazy_walk(nd, NULL)) return -ECHILD; } - return exec_permission(nd->inode, 0); + return exec_permission(nd->inode, MAY_EXEC); } static inline int handle_dots(struct nameidata *nd, int type) @@ -1495,7 +1492,7 @@ static int path_init(int dfd, const char *name, unsigned int flags, if (!S_ISDIR(dentry->d_inode->i_mode)) goto fput_fail; - retval = exec_permission(dentry->d_inode, 0); + retval = exec_permission(dentry->d_inode, MAY_EXEC); if (retval) goto fput_fail; } @@ -1652,7 +1649,7 @@ static struct dentry *__lookup_hash(struct qstr *name, struct dentry *dentry; int err; - err = exec_permission(inode, 0); + err = exec_permission(inode, MAY_EXEC); if (err) return ERR_PTR(err); diff --git a/include/linux/security.h b/include/linux/security.h index ca02f1716736..ebd2a53a3d07 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -1720,7 +1720,6 @@ int security_inode_rename(struct inode *old_dir, struct dentry *old_dentry, int security_inode_readlink(struct dentry *dentry); int security_inode_follow_link(struct dentry *dentry, struct nameidata *nd); int security_inode_permission(struct inode *inode, int mask); -int security_inode_exec_permission(struct inode *inode, unsigned int flags); int security_inode_setattr(struct dentry *dentry, struct iattr *attr); int security_inode_getattr(struct vfsmount *mnt, struct dentry *dentry); int security_inode_setxattr(struct dentry *dentry, const char *name, @@ -2113,12 +2112,6 @@ static inline int security_inode_permission(struct inode *inode, int mask) return 0; } -static inline int security_inode_exec_permission(struct inode *inode, - unsigned int flags) -{ - return 0; -} - static inline int security_inode_setattr(struct dentry *dentry, struct iattr *attr) { diff --git a/security/security.c b/security/security.c index db3b750da353..0e4fccfef12c 100644 --- a/security/security.c +++ b/security/security.c @@ -521,16 +521,6 @@ int security_inode_permission(struct inode *inode, int mask) return security_ops->inode_permission(inode, mask); } -int security_inode_exec_permission(struct inode *inode, unsigned int flags) -{ - int mask = MAY_EXEC; - if (unlikely(IS_PRIVATE(inode))) - return 0; - if (flags) - mask |= MAY_NOT_BLOCK; - return security_ops->inode_permission(inode, mask); -} - int security_inode_setattr(struct dentry *dentry, struct iattr *attr) { if (unlikely(IS_PRIVATE(dentry->d_inode))) -- cgit v1.2.3 From d594e7ec4da6c1a527dea4965eee37cd785792b8 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Mon, 20 Jun 2011 19:55:42 -0400 Subject: massage generic_permission() to treat directories on a separate path Signed-off-by: Al Viro --- fs/namei.c | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/namei.c b/fs/namei.c index 21eba95368f2..758bae739305 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -235,12 +235,21 @@ int generic_permission(struct inode *inode, int mask) if (ret != -EACCES) return ret; + if (S_ISDIR(inode->i_mode)) { + /* DACs are overridable for directories */ + if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE)) + return 0; + if (!(mask & MAY_WRITE)) + if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH)) + return 0; + return -EACCES; + } /* * Read/write DACs are always overridable. - * Executable DACs are overridable for all directories and - * for non-directories that have least one exec bit set. + * Executable DACs are overridable when there is + * at least one exec bit set. */ - if (!(mask & MAY_EXEC) || execute_ok(inode)) + if (!(mask & MAY_EXEC) || (inode->i_mode & S_IXUGO)) if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE)) return 0; @@ -248,7 +257,7 @@ int generic_permission(struct inode *inode, int mask) * Searching includes executable on directories, else just read. */ mask &= MAY_READ | MAY_WRITE | MAY_EXEC; - if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE))) + if (mask == MAY_READ) if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH)) return 0; -- cgit v1.2.3 From 4ad5abb3d01a2c10854969b00982fadb130784a6 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Mon, 20 Jun 2011 19:57:03 -0400 Subject: no reason to keep exec_permission() separate now cache footprint alone makes it a bad idea... Signed-off-by: Al Viro --- fs/namei.c | 45 ++++----------------------------------------- 1 file changed, 4 insertions(+), 41 deletions(-) (limited to 'fs') diff --git a/fs/namei.c b/fs/namei.c index 758bae739305..b3c1a975c834 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -310,43 +310,6 @@ int inode_permission(struct inode *inode, int mask) return security_inode_permission(inode, mask); } -/** - * exec_permission - check for right to do lookups in a given directory - * @inode: inode to check permission on - * @mask: MAY_EXEC and possibly MAY_NOT_BLOCK flags. - * - * Short-cut version of inode_permission(), for calling on directories - * during pathname resolution. Combines parts of inode_permission() - * and generic_permission(), and tests ONLY for MAY_EXEC permission. - * - * If appropriate, check DAC only. If not appropriate, or - * short-cut DAC fails, then call ->permission() to do more - * complete permission check. - */ -static inline int exec_permission(struct inode *inode, int mask) -{ - int ret; - struct user_namespace *ns = inode_userns(inode); - - if (inode->i_op->permission) { - ret = inode->i_op->permission(inode, mask); - if (likely(!ret)) - goto ok; - } else { - ret = acl_permission_check(inode, mask); - if (likely(!ret)) - goto ok; - if (ret != -EACCES) - return ret; - if (ns_capable(ns, CAP_DAC_OVERRIDE) || - ns_capable(ns, CAP_DAC_READ_SEARCH)) - goto ok; - } - return ret; -ok: - return security_inode_permission(inode, mask); -} - /** * path_get - get a reference to a path * @path: path to get the reference to @@ -1220,13 +1183,13 @@ retry: static inline int may_lookup(struct nameidata *nd) { if (nd->flags & LOOKUP_RCU) { - int err = exec_permission(nd->inode, MAY_EXEC|MAY_NOT_BLOCK); + int err = inode_permission(nd->inode, MAY_EXEC|MAY_NOT_BLOCK); if (err != -ECHILD) return err; if (unlazy_walk(nd, NULL)) return -ECHILD; } - return exec_permission(nd->inode, MAY_EXEC); + return inode_permission(nd->inode, MAY_EXEC); } static inline int handle_dots(struct nameidata *nd, int type) @@ -1501,7 +1464,7 @@ static int path_init(int dfd, const char *name, unsigned int flags, if (!S_ISDIR(dentry->d_inode->i_mode)) goto fput_fail; - retval = exec_permission(dentry->d_inode, MAY_EXEC); + retval = inode_permission(dentry->d_inode, MAY_EXEC); if (retval) goto fput_fail; } @@ -1658,7 +1621,7 @@ static struct dentry *__lookup_hash(struct qstr *name, struct dentry *dentry; int err; - err = exec_permission(inode, MAY_EXEC); + err = inode_permission(inode, MAY_EXEC); if (err) return ERR_PTR(err); -- cgit v1.2.3 From d2d9e9fbc2f8f492dae373482da61d34475c53c1 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Mon, 20 Jun 2011 10:55:26 -0400 Subject: merge do_revalidate() into its only caller Signed-off-by: Al Viro --- fs/namei.c | 42 ++++++++++++++++++------------------------ 1 file changed, 18 insertions(+), 24 deletions(-) (limited to 'fs') diff --git a/fs/namei.c b/fs/namei.c index b3c1a975c834..42ccb97cc260 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -436,28 +436,6 @@ static inline int d_revalidate(struct dentry *dentry, struct nameidata *nd) return dentry->d_op->d_revalidate(dentry, nd); } -static struct dentry * -do_revalidate(struct dentry *dentry, struct nameidata *nd) -{ - int status = d_revalidate(dentry, nd); - if (unlikely(status <= 0)) { - /* - * The dentry failed validation. - * If d_revalidate returned 0 attempt to invalidate - * the dentry otherwise d_revalidate is asking us - * to return a fail status. - */ - if (status < 0) { - dput(dentry); - dentry = ERR_PTR(status); - } else if (!d_invalidate(dentry)) { - dput(dentry); - dentry = NULL; - } - } - return dentry; -} - /** * complete_walk - successful completion of path walk * @nd: pointer nameidata @@ -1642,8 +1620,24 @@ static struct dentry *__lookup_hash(struct qstr *name, return dentry; } - if (dentry && (dentry->d_flags & DCACHE_OP_REVALIDATE)) - dentry = do_revalidate(dentry, nd); + if (dentry && (dentry->d_flags & DCACHE_OP_REVALIDATE)) { + int status = d_revalidate(dentry, nd); + if (unlikely(status <= 0)) { + /* + * The dentry failed validation. + * If d_revalidate returned 0 attempt to invalidate + * the dentry otherwise d_revalidate is asking us + * to return a fail status. + */ + if (status < 0) { + dput(dentry); + return ERR_PTR(status); + } else if (!d_invalidate(dentry)) { + dput(dentry); + dentry = NULL; + } + } + } if (!dentry) dentry = d_alloc_and_lookup(base, name, nd); -- cgit v1.2.3 From beefebf1aa611242e022e71bae87034f415d3314 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Tue, 21 Jun 2011 01:01:59 -0400 Subject: ecryptfs_inode_permission() doesn't need to bail out on RCU ... now that inode_permission() can take MAY_NOT_BLOCK and handle it properly. Signed-off-by: Al Viro --- fs/ecryptfs/inode.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'fs') diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c index bec75f8e91ac..799e01055f22 100644 --- a/fs/ecryptfs/inode.c +++ b/fs/ecryptfs/inode.c @@ -944,8 +944,6 @@ int ecryptfs_truncate(struct dentry *dentry, loff_t new_length) static int ecryptfs_permission(struct inode *inode, int mask) { - if (mask & MAY_NOT_BLOCK) - return -ECHILD; return inode_permission(ecryptfs_inode_to_lower(inode), mask); } -- cgit v1.2.3 From 7c97c200e2c5aa8b1067bebb99df0a7c2e12ebf3 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Tue, 21 Jun 2011 08:51:28 -0400 Subject: cifs: fix the type of cifs_demultiplex_thread() ... and get rid of a bogus typecast, while we are at it; it's not just that we want a function returning int and not void, but cast to pointer to function taking void * and returning void would be (void (*)(void *)) and not (void *)(void *), TYVM... Signed-off-by: Al Viro --- fs/cifs/connect.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index ccc1afa0bf3b..e66297bad412 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -320,9 +320,10 @@ requeue_echo: } static int -cifs_demultiplex_thread(struct TCP_Server_Info *server) +cifs_demultiplex_thread(void *p) { int length; + struct TCP_Server_Info *server = p; unsigned int pdu_length, total_read; struct smb_hdr *smb_buffer = NULL; struct smb_hdr *bigbuf = NULL; @@ -1791,7 +1792,7 @@ cifs_get_tcp_session(struct smb_vol *volume_info) * this will succeed. No need for try_module_get(). */ __module_get(THIS_MODULE); - tcp_ses->tsk = kthread_run((void *)(void *)cifs_demultiplex_thread, + tcp_ses->tsk = kthread_run(cifs_demultiplex_thread, tcp_ses, "cifsd"); if (IS_ERR(tcp_ses->tsk)) { rc = PTR_ERR(tcp_ses->tsk); -- cgit v1.2.3 From 643168c2dc0d04cb884ae7bcd1ee19d9ecdff6a8 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Wed, 22 Jun 2011 18:20:23 -0400 Subject: nfs4_closedata doesn't need to mess with struct path instead of path_get()/path_put(), we can just use nfs_sb_{,de}active() to pin the superblock down. Signed-off-by: Al Viro --- fs/nfs/nfs4_fs.h | 10 +++++----- fs/nfs/nfs4proc.c | 21 ++++++++++----------- fs/nfs/nfs4state.c | 12 ++++++------ 3 files changed, 21 insertions(+), 22 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index c4a69833dd0d..b788f2eb1ba0 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h @@ -238,7 +238,7 @@ extern int nfs4_proc_async_renew(struct nfs_client *, struct rpc_cred *); extern int nfs4_proc_renew(struct nfs_client *, struct rpc_cred *); extern int nfs4_init_clientid(struct nfs_client *, struct rpc_cred *); extern int nfs41_init_clientid(struct nfs_client *, struct rpc_cred *); -extern int nfs4_do_close(struct path *path, struct nfs4_state *state, gfp_t gfp_mask, int wait, bool roc); +extern int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait, bool roc); extern int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle); extern int nfs4_proc_fs_locations(struct inode *dir, const struct qstr *name, struct nfs4_fs_locations *fs_locations, struct page *page); @@ -341,8 +341,8 @@ extern struct nfs4_state_owner * nfs4_get_state_owner(struct nfs_server *, struc extern void nfs4_put_state_owner(struct nfs4_state_owner *); extern struct nfs4_state * nfs4_get_open_state(struct inode *, struct nfs4_state_owner *); extern void nfs4_put_open_state(struct nfs4_state *); -extern void nfs4_close_state(struct path *, struct nfs4_state *, fmode_t); -extern void nfs4_close_sync(struct path *, struct nfs4_state *, fmode_t); +extern void nfs4_close_state(struct nfs4_state *, fmode_t); +extern void nfs4_close_sync(struct nfs4_state *, fmode_t); extern void nfs4_state_set_mode_locked(struct nfs4_state *, fmode_t); extern void nfs4_schedule_lease_recovery(struct nfs_client *); extern void nfs4_schedule_state_manager(struct nfs_client *); @@ -373,8 +373,8 @@ extern struct svc_version nfs4_callback_version4; #else -#define nfs4_close_state(a, b, c) do { } while (0) -#define nfs4_close_sync(a, b, c) do { } while (0) +#define nfs4_close_state(a, b) do { } while (0) +#define nfs4_close_sync(a, b) do { } while (0) #endif /* CONFIG_NFS_V4 */ #endif /* __LINUX_FS_NFS_NFS4_FS.H */ diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 5879b23e0c99..e99d674149ae 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -1154,7 +1154,7 @@ static int nfs4_open_recover_helper(struct nfs4_opendata *opendata, fmode_t fmod newstate = nfs4_opendata_to_nfs4_state(opendata); if (IS_ERR(newstate)) return PTR_ERR(newstate); - nfs4_close_state(&opendata->path, newstate, fmode); + nfs4_close_state(newstate, fmode); *res = newstate; return 0; } @@ -1352,7 +1352,7 @@ static void nfs4_open_confirm_release(void *calldata) goto out_free; state = nfs4_opendata_to_nfs4_state(data); if (!IS_ERR(state)) - nfs4_close_state(&data->path, state, data->o_arg.fmode); + nfs4_close_state(state, data->o_arg.fmode); out_free: nfs4_opendata_put(data); } @@ -1497,7 +1497,7 @@ static void nfs4_open_release(void *calldata) goto out_free; state = nfs4_opendata_to_nfs4_state(data); if (!IS_ERR(state)) - nfs4_close_state(&data->path, state, data->o_arg.fmode); + nfs4_close_state(state, data->o_arg.fmode); out_free: nfs4_opendata_put(data); } @@ -1873,7 +1873,6 @@ static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred, } struct nfs4_closedata { - struct path path; struct inode *inode; struct nfs4_state *state; struct nfs_closeargs arg; @@ -1888,13 +1887,14 @@ static void nfs4_free_closedata(void *data) { struct nfs4_closedata *calldata = data; struct nfs4_state_owner *sp = calldata->state->owner; + struct super_block *sb = calldata->state->inode->i_sb; if (calldata->roc) pnfs_roc_release(calldata->state->inode); nfs4_put_open_state(calldata->state); nfs_free_seqid(calldata->arg.seqid); nfs4_put_state_owner(sp); - path_put(&calldata->path); + nfs_sb_deactive(sb); kfree(calldata); } @@ -2014,7 +2014,7 @@ static const struct rpc_call_ops nfs4_close_ops = { * * NOTE: Caller must be holding the sp->so_owner semaphore! */ -int nfs4_do_close(struct path *path, struct nfs4_state *state, gfp_t gfp_mask, int wait, bool roc) +int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait, bool roc) { struct nfs_server *server = NFS_SERVER(state->inode); struct nfs4_closedata *calldata; @@ -2050,8 +2050,7 @@ int nfs4_do_close(struct path *path, struct nfs4_state *state, gfp_t gfp_mask, i calldata->res.seqid = calldata->arg.seqid; calldata->res.server = server; calldata->roc = roc; - path_get(path); - calldata->path = *path; + nfs_sb_active(calldata->inode->i_sb); msg.rpc_argp = &calldata->arg; msg.rpc_resp = &calldata->res; @@ -2092,9 +2091,9 @@ static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync) if (ctx->state == NULL) return; if (is_sync) - nfs4_close_sync(&ctx->path, ctx->state, ctx->mode); + nfs4_close_sync(ctx->state, ctx->mode); else - nfs4_close_state(&ctx->path, ctx->state, ctx->mode); + nfs4_close_state(ctx->state, ctx->mode); } static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle) @@ -2642,7 +2641,7 @@ nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr, if (ctx != NULL) ctx->state = state; else - nfs4_close_sync(path, state, fmode); + nfs4_close_sync(state, fmode); out: return status; } diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index e97dd219f84f..7acfe8843626 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -641,7 +641,7 @@ void nfs4_put_open_state(struct nfs4_state *state) /* * Close the current file. */ -static void __nfs4_close(struct path *path, struct nfs4_state *state, +static void __nfs4_close(struct nfs4_state *state, fmode_t fmode, gfp_t gfp_mask, int wait) { struct nfs4_state_owner *owner = state->owner; @@ -685,18 +685,18 @@ static void __nfs4_close(struct path *path, struct nfs4_state *state, } else { bool roc = pnfs_roc(state->inode); - nfs4_do_close(path, state, gfp_mask, wait, roc); + nfs4_do_close(state, gfp_mask, wait, roc); } } -void nfs4_close_state(struct path *path, struct nfs4_state *state, fmode_t fmode) +void nfs4_close_state(struct nfs4_state *state, fmode_t fmode) { - __nfs4_close(path, state, fmode, GFP_NOFS, 0); + __nfs4_close(state, fmode, GFP_NOFS, 0); } -void nfs4_close_sync(struct path *path, struct nfs4_state *state, fmode_t fmode) +void nfs4_close_sync(struct nfs4_state *state, fmode_t fmode) { - __nfs4_close(path, state, fmode, GFP_KERNEL, 1); + __nfs4_close(state, fmode, GFP_KERNEL, 1); } /* -- cgit v1.2.3 From 82a2c1b77ada3b81911deeb871719ec5ff77f83c Mon Sep 17 00:00:00 2001 From: Al Viro Date: Wed, 22 Jun 2011 18:30:55 -0400 Subject: nfs4_opendata doesn't need struct path either Signed-off-by: Al Viro --- fs/nfs/nfs4proc.c | 45 ++++++++++++++++++++++----------------------- 1 file changed, 22 insertions(+), 23 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index e99d674149ae..aef22fa0a1c0 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -763,8 +763,8 @@ struct nfs4_opendata { struct nfs_open_confirmres c_res; struct nfs_fattr f_attr; struct nfs_fattr dir_attr; - struct path path; struct dentry *dir; + struct dentry *dentry; struct nfs4_state_owner *owner; struct nfs4_state *state; struct iattr attrs; @@ -786,12 +786,12 @@ static void nfs4_init_opendata_res(struct nfs4_opendata *p) nfs_fattr_init(&p->dir_attr); } -static struct nfs4_opendata *nfs4_opendata_alloc(struct path *path, +static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry, struct nfs4_state_owner *sp, fmode_t fmode, int flags, const struct iattr *attrs, gfp_t gfp_mask) { - struct dentry *parent = dget_parent(path->dentry); + struct dentry *parent = dget_parent(dentry); struct inode *dir = parent->d_inode; struct nfs_server *server = NFS_SERVER(dir); struct nfs4_opendata *p; @@ -802,8 +802,8 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct path *path, p->o_arg.seqid = nfs_alloc_seqid(&sp->so_seqid, gfp_mask); if (p->o_arg.seqid == NULL) goto err_free; - path_get(path); - p->path = *path; + nfs_sb_active(dentry->d_sb); + p->dentry = dget(dentry); p->dir = parent; p->owner = sp; atomic_inc(&sp->so_count); @@ -812,7 +812,7 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct path *path, p->o_arg.fmode = fmode & (FMODE_READ|FMODE_WRITE); p->o_arg.clientid = server->nfs_client->cl_clientid; p->o_arg.id = sp->so_owner_id.id; - p->o_arg.name = &p->path.dentry->d_name; + p->o_arg.name = &dentry->d_name; p->o_arg.server = server; p->o_arg.bitmask = server->attr_bitmask; p->o_arg.claim = NFS4_OPEN_CLAIM_NULL; @@ -842,13 +842,15 @@ static void nfs4_opendata_free(struct kref *kref) { struct nfs4_opendata *p = container_of(kref, struct nfs4_opendata, kref); + struct super_block *sb = p->dentry->d_sb; nfs_free_seqid(p->o_arg.seqid); if (p->state != NULL) nfs4_put_open_state(p->state); nfs4_put_state_owner(p->owner); dput(p->dir); - path_put(&p->path); + dput(p->dentry); + nfs_sb_deactive(sb); kfree(p); } @@ -1130,7 +1132,7 @@ static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context { struct nfs4_opendata *opendata; - opendata = nfs4_opendata_alloc(&ctx->path, state->owner, 0, 0, NULL, GFP_NOFS); + opendata = nfs4_opendata_alloc(ctx->path.dentry, state->owner, 0, 0, NULL, GFP_NOFS); if (opendata == NULL) return ERR_PTR(-ENOMEM); opendata->state = state; @@ -1706,7 +1708,7 @@ static inline void nfs4_exclusive_attrset(struct nfs4_opendata *opendata, struct /* * Returns a referenced nfs4_state */ -static int _nfs4_do_open(struct inode *dir, struct path *path, fmode_t fmode, int flags, struct iattr *sattr, struct rpc_cred *cred, struct nfs4_state **res) +static int _nfs4_do_open(struct inode *dir, struct dentry *dentry, fmode_t fmode, int flags, struct iattr *sattr, struct rpc_cred *cred, struct nfs4_state **res) { struct nfs4_state_owner *sp; struct nfs4_state *state = NULL; @@ -1723,15 +1725,15 @@ static int _nfs4_do_open(struct inode *dir, struct path *path, fmode_t fmode, in status = nfs4_recover_expired_lease(server); if (status != 0) goto err_put_state_owner; - if (path->dentry->d_inode != NULL) - nfs4_return_incompatible_delegation(path->dentry->d_inode, fmode); + if (dentry->d_inode != NULL) + nfs4_return_incompatible_delegation(dentry->d_inode, fmode); status = -ENOMEM; - opendata = nfs4_opendata_alloc(path, sp, fmode, flags, sattr, GFP_KERNEL); + opendata = nfs4_opendata_alloc(dentry, sp, fmode, flags, sattr, GFP_KERNEL); if (opendata == NULL) goto err_put_state_owner; - if (path->dentry->d_inode != NULL) - opendata->state = nfs4_get_open_state(path->dentry->d_inode, sp); + if (dentry->d_inode != NULL) + opendata->state = nfs4_get_open_state(dentry->d_inode, sp); status = _nfs4_proc_open(opendata); if (status != 0) @@ -1769,14 +1771,14 @@ out_err: } -static struct nfs4_state *nfs4_do_open(struct inode *dir, struct path *path, fmode_t fmode, int flags, struct iattr *sattr, struct rpc_cred *cred) +static struct nfs4_state *nfs4_do_open(struct inode *dir, struct dentry *dentry, fmode_t fmode, int flags, struct iattr *sattr, struct rpc_cred *cred) { struct nfs4_exception exception = { }; struct nfs4_state *res; int status; do { - status = _nfs4_do_open(dir, path, fmode, flags, sattr, cred, &res); + status = _nfs4_do_open(dir, dentry, fmode, flags, sattr, cred, &res); if (status == 0) break; /* NOTE: BAD_SEQID means the server and client disagree about the @@ -2079,7 +2081,7 @@ nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx, int open_flags struct nfs4_state *state; /* Protect against concurrent sillydeletes */ - state = nfs4_do_open(dir, &ctx->path, ctx->mode, open_flags, attr, ctx->cred); + state = nfs4_do_open(dir, ctx->path.dentry, ctx->mode, open_flags, attr, ctx->cred); if (IS_ERR(state)) return ERR_CAST(state); ctx->state = state; @@ -2615,10 +2617,7 @@ static int nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr, int flags, struct nfs_open_context *ctx) { - struct path my_path = { - .dentry = dentry, - }; - struct path *path = &my_path; + struct dentry *de = dentry; struct nfs4_state *state; struct rpc_cred *cred = NULL; fmode_t fmode = 0; @@ -2626,11 +2625,11 @@ nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr, if (ctx != NULL) { cred = ctx->cred; - path = &ctx->path; + de = ctx->path.dentry; fmode = ctx->mode; } sattr->ia_mode &= ~current_umask(); - state = nfs4_do_open(dir, path, fmode, flags, sattr, cred); + state = nfs4_do_open(dir, de, fmode, flags, sattr, cred); d_drop(dentry); if (IS_ERR(state)) { status = PTR_ERR(state); -- cgit v1.2.3 From 3d4ff43d895c50319af45eb4bf04a4618eccdf76 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Wed, 22 Jun 2011 18:40:12 -0400 Subject: nfs_open_context doesn't need struct path either just dentry, please... Signed-off-by: Al Viro --- fs/nfs/dir.c | 14 +++++--------- fs/nfs/direct.c | 4 ++-- fs/nfs/inode.c | 20 +++++++++++--------- fs/nfs/nfs4proc.c | 10 +++++----- fs/nfs/pagelist.c | 4 ++-- fs/nfs/read.c | 8 ++++---- fs/nfs/write.c | 22 +++++++++++----------- include/linux/nfs_fs.h | 4 ++-- 8 files changed, 42 insertions(+), 44 deletions(-) (limited to 'fs') diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index a86acd6d0cb4..a0693f3f11c0 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -1345,10 +1345,6 @@ static int is_atomic_open(struct nameidata *nd) static struct nfs_open_context *nameidata_to_nfs_open_context(struct dentry *dentry, struct nameidata *nd) { - struct path path = { - .mnt = nd->path.mnt, - .dentry = dentry, - }; struct nfs_open_context *ctx; struct rpc_cred *cred; fmode_t fmode = nd->intent.open.flags & (FMODE_READ | FMODE_WRITE | FMODE_EXEC); @@ -1356,7 +1352,7 @@ static struct nfs_open_context *nameidata_to_nfs_open_context(struct dentry *den cred = rpc_lookup_cred(); if (IS_ERR(cred)) return ERR_CAST(cred); - ctx = alloc_nfs_open_context(&path, cred, fmode); + ctx = alloc_nfs_open_context(dentry, cred, fmode); put_rpccred(cred); if (ctx == NULL) return ERR_PTR(-ENOMEM); @@ -1376,13 +1372,13 @@ static int nfs_intent_set_file(struct nameidata *nd, struct nfs_open_context *ct /* If the open_intent is for execute, we have an extra check to make */ if (ctx->mode & FMODE_EXEC) { - ret = nfs_may_open(ctx->path.dentry->d_inode, + ret = nfs_may_open(ctx->dentry->d_inode, ctx->cred, nd->intent.open.flags); if (ret < 0) goto out; } - filp = lookup_instantiate_filp(nd, ctx->path.dentry, do_open); + filp = lookup_instantiate_filp(nd, ctx->dentry, do_open); if (IS_ERR(filp)) ret = PTR_ERR(filp); else @@ -1463,8 +1459,8 @@ static struct dentry *nfs_atomic_lookup(struct inode *dir, struct dentry *dentry res = d_add_unique(dentry, inode); nfs_unblock_sillyrename(dentry->d_parent); if (res != NULL) { - dput(ctx->path.dentry); - ctx->path.dentry = dget(res); + dput(ctx->dentry); + ctx->dentry = dget(res); dentry = res; } err = nfs_intent_set_file(nd, ctx); diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index 8eea25366717..b35d25b98da6 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c @@ -284,7 +284,7 @@ static ssize_t nfs_direct_read_schedule_segment(struct nfs_direct_req *dreq, loff_t pos) { struct nfs_open_context *ctx = dreq->ctx; - struct inode *inode = ctx->path.dentry->d_inode; + struct inode *inode = ctx->dentry->d_inode; unsigned long user_addr = (unsigned long)iov->iov_base; size_t count = iov->iov_len; size_t rsize = NFS_SERVER(inode)->rsize; @@ -715,7 +715,7 @@ static ssize_t nfs_direct_write_schedule_segment(struct nfs_direct_req *dreq, loff_t pos, int sync) { struct nfs_open_context *ctx = dreq->ctx; - struct inode *inode = ctx->path.dentry->d_inode; + struct inode *inode = ctx->dentry->d_inode; unsigned long user_addr = (unsigned long)iov->iov_base; size_t count = iov->iov_len; struct rpc_task *task; diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 6f4850deb272..fe1203797b2b 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -567,7 +567,7 @@ static struct nfs_lock_context *__nfs_find_lock_context(struct nfs_open_context struct nfs_lock_context *nfs_get_lock_context(struct nfs_open_context *ctx) { struct nfs_lock_context *res, *new = NULL; - struct inode *inode = ctx->path.dentry->d_inode; + struct inode *inode = ctx->dentry->d_inode; spin_lock(&inode->i_lock); res = __nfs_find_lock_context(ctx); @@ -594,7 +594,7 @@ struct nfs_lock_context *nfs_get_lock_context(struct nfs_open_context *ctx) void nfs_put_lock_context(struct nfs_lock_context *l_ctx) { struct nfs_open_context *ctx = l_ctx->open_context; - struct inode *inode = ctx->path.dentry->d_inode; + struct inode *inode = ctx->dentry->d_inode; if (!atomic_dec_and_lock(&l_ctx->count, &inode->i_lock)) return; @@ -620,7 +620,7 @@ void nfs_close_context(struct nfs_open_context *ctx, int is_sync) return; if (!is_sync) return; - inode = ctx->path.dentry->d_inode; + inode = ctx->dentry->d_inode; if (!list_empty(&NFS_I(inode)->open_files)) return; server = NFS_SERVER(inode); @@ -629,14 +629,14 @@ void nfs_close_context(struct nfs_open_context *ctx, int is_sync) nfs_revalidate_inode(server, inode); } -struct nfs_open_context *alloc_nfs_open_context(struct path *path, struct rpc_cred *cred, fmode_t f_mode) +struct nfs_open_context *alloc_nfs_open_context(struct dentry *dentry, struct rpc_cred *cred, fmode_t f_mode) { struct nfs_open_context *ctx; ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); if (ctx != NULL) { - ctx->path = *path; - path_get(&ctx->path); + nfs_sb_active(dentry->d_sb); + ctx->dentry = dget(dentry); ctx->cred = get_rpccred(cred); ctx->state = NULL; ctx->mode = f_mode; @@ -658,7 +658,8 @@ struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx) static void __put_nfs_open_context(struct nfs_open_context *ctx, int is_sync) { - struct inode *inode = ctx->path.dentry->d_inode; + struct inode *inode = ctx->dentry->d_inode; + struct super_block *sb = ctx->dentry->d_sb; if (!list_empty(&ctx->list)) { if (!atomic_dec_and_lock(&ctx->lock_context.count, &inode->i_lock)) @@ -671,7 +672,8 @@ static void __put_nfs_open_context(struct nfs_open_context *ctx, int is_sync) NFS_PROTO(inode)->close_context(ctx, is_sync); if (ctx->cred != NULL) put_rpccred(ctx->cred); - path_put(&ctx->path); + dput(ctx->dentry); + nfs_sb_deactive(sb); kfree(ctx); } @@ -741,7 +743,7 @@ int nfs_open(struct inode *inode, struct file *filp) cred = rpc_lookup_cred(); if (IS_ERR(cred)) return PTR_ERR(cred); - ctx = alloc_nfs_open_context(&filp->f_path, cred, filp->f_mode); + ctx = alloc_nfs_open_context(filp->f_path.dentry, cred, filp->f_mode); put_rpccred(cred); if (ctx == NULL) return -ENOMEM; diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index aef22fa0a1c0..26bece8f3083 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -1132,7 +1132,7 @@ static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context { struct nfs4_opendata *opendata; - opendata = nfs4_opendata_alloc(ctx->path.dentry, state->owner, 0, 0, NULL, GFP_NOFS); + opendata = nfs4_opendata_alloc(ctx->dentry, state->owner, 0, 0, NULL, GFP_NOFS); if (opendata == NULL) return ERR_PTR(-ENOMEM); opendata->state = state; @@ -1650,7 +1650,7 @@ static int _nfs4_open_expired(struct nfs_open_context *ctx, struct nfs4_state *s return PTR_ERR(opendata); ret = nfs4_open_recover(opendata, state); if (ret == -ESTALE) - d_drop(ctx->path.dentry); + d_drop(ctx->dentry); nfs4_opendata_put(opendata); return ret; } @@ -2081,7 +2081,7 @@ nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx, int open_flags struct nfs4_state *state; /* Protect against concurrent sillydeletes */ - state = nfs4_do_open(dir, ctx->path.dentry, ctx->mode, open_flags, attr, ctx->cred); + state = nfs4_do_open(dir, ctx->dentry, ctx->mode, open_flags, attr, ctx->cred); if (IS_ERR(state)) return ERR_CAST(state); ctx->state = state; @@ -2625,7 +2625,7 @@ nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr, if (ctx != NULL) { cred = ctx->cred; - de = ctx->path.dentry; + de = ctx->dentry; fmode = ctx->mode; } sattr->ia_mode &= ~current_umask(); @@ -4292,7 +4292,7 @@ static void nfs4_lock_done(struct rpc_task *task, void *calldata) memcpy(data->lsp->ls_stateid.data, data->res.stateid.data, sizeof(data->lsp->ls_stateid.data)); data->lsp->ls_flags |= NFS_LOCK_INITIALIZED; - renew_lease(NFS_SERVER(data->ctx->path.dentry->d_inode), data->timestamp); + renew_lease(NFS_SERVER(data->ctx->dentry->d_inode), data->timestamp); } out: dprintk("%s: done, ret = %d!\n", __func__, data->rpc_status); diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c index 009855716286..18449f43c568 100644 --- a/fs/nfs/pagelist.c +++ b/fs/nfs/pagelist.c @@ -114,7 +114,7 @@ int nfs_set_page_tag_locked(struct nfs_page *req) if (!nfs_lock_request_dontget(req)) return 0; if (test_bit(PG_MAPPED, &req->wb_flags)) - radix_tree_tag_set(&NFS_I(req->wb_context->path.dentry->d_inode)->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED); + radix_tree_tag_set(&NFS_I(req->wb_context->dentry->d_inode)->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED); return 1; } @@ -124,7 +124,7 @@ int nfs_set_page_tag_locked(struct nfs_page *req) void nfs_clear_page_tag_locked(struct nfs_page *req) { if (test_bit(PG_MAPPED, &req->wb_flags)) { - struct inode *inode = req->wb_context->path.dentry->d_inode; + struct inode *inode = req->wb_context->dentry->d_inode; struct nfs_inode *nfsi = NFS_I(inode); spin_lock(&inode->i_lock); diff --git a/fs/nfs/read.c b/fs/nfs/read.c index 20a7f952e244..a68679f538fc 100644 --- a/fs/nfs/read.c +++ b/fs/nfs/read.c @@ -144,7 +144,7 @@ int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode, static void nfs_readpage_release(struct nfs_page *req) { - struct inode *d_inode = req->wb_context->path.dentry->d_inode; + struct inode *d_inode = req->wb_context->dentry->d_inode; if (PageUptodate(req->wb_page)) nfs_readpage_to_fscache(d_inode, req->wb_page, 0); @@ -152,8 +152,8 @@ static void nfs_readpage_release(struct nfs_page *req) unlock_page(req->wb_page); dprintk("NFS: read done (%s/%Ld %d@%Ld)\n", - req->wb_context->path.dentry->d_inode->i_sb->s_id, - (long long)NFS_FILEID(req->wb_context->path.dentry->d_inode), + req->wb_context->dentry->d_inode->i_sb->s_id, + (long long)NFS_FILEID(req->wb_context->dentry->d_inode), req->wb_bytes, (long long)req_offset(req)); nfs_release_request(req); @@ -207,7 +207,7 @@ static int nfs_read_rpcsetup(struct nfs_page *req, struct nfs_read_data *data, unsigned int count, unsigned int offset, struct pnfs_layout_segment *lseg) { - struct inode *inode = req->wb_context->path.dentry->d_inode; + struct inode *inode = req->wb_context->dentry->d_inode; data->req = req; data->inode = inode; diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 727168059684..08579312c57b 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -409,7 +409,7 @@ out: */ static void nfs_inode_remove_request(struct nfs_page *req) { - struct inode *inode = req->wb_context->path.dentry->d_inode; + struct inode *inode = req->wb_context->dentry->d_inode; struct nfs_inode *nfsi = NFS_I(inode); BUG_ON (!NFS_WBACK_BUSY(req)); @@ -438,7 +438,7 @@ nfs_mark_request_dirty(struct nfs_page *req) static void nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg) { - struct inode *inode = req->wb_context->path.dentry->d_inode; + struct inode *inode = req->wb_context->dentry->d_inode; struct nfs_inode *nfsi = NFS_I(inode); spin_lock(&inode->i_lock); @@ -852,13 +852,13 @@ static int nfs_write_rpcsetup(struct nfs_page *req, struct pnfs_layout_segment *lseg, int how) { - struct inode *inode = req->wb_context->path.dentry->d_inode; + struct inode *inode = req->wb_context->dentry->d_inode; /* Set up the RPC argument and reply structs * NB: take care not to mess about with data->commit et al. */ data->req = req; - data->inode = inode = req->wb_context->path.dentry->d_inode; + data->inode = inode = req->wb_context->dentry->d_inode; data->cred = req->wb_context->cred; data->lseg = get_lseg(lseg); @@ -1053,9 +1053,9 @@ static void nfs_writeback_done_partial(struct rpc_task *task, void *calldata) dprintk("NFS: %5u write(%s/%lld %d@%lld)", task->tk_pid, - data->req->wb_context->path.dentry->d_inode->i_sb->s_id, + data->req->wb_context->dentry->d_inode->i_sb->s_id, (long long) - NFS_FILEID(data->req->wb_context->path.dentry->d_inode), + NFS_FILEID(data->req->wb_context->dentry->d_inode), data->req->wb_bytes, (long long)req_offset(data->req)); nfs_writeback_done(task, data); @@ -1148,8 +1148,8 @@ static void nfs_writeback_release_full(void *calldata) dprintk("NFS: %5u write (%s/%lld %d@%lld)", data->task.tk_pid, - req->wb_context->path.dentry->d_inode->i_sb->s_id, - (long long)NFS_FILEID(req->wb_context->path.dentry->d_inode), + req->wb_context->dentry->d_inode->i_sb->s_id, + (long long)NFS_FILEID(req->wb_context->dentry->d_inode), req->wb_bytes, (long long)req_offset(req)); @@ -1347,7 +1347,7 @@ void nfs_init_commit(struct nfs_write_data *data, struct pnfs_layout_segment *lseg) { struct nfs_page *first = nfs_list_entry(head->next); - struct inode *inode = first->wb_context->path.dentry->d_inode; + struct inode *inode = first->wb_context->dentry->d_inode; /* Set up the RPC argument and reply structs * NB: take care not to mess about with data->commit et al. */ @@ -1435,8 +1435,8 @@ void nfs_commit_release_pages(struct nfs_write_data *data) nfs_clear_request_commit(req); dprintk("NFS: commit (%s/%lld %d@%lld)", - req->wb_context->path.dentry->d_inode->i_sb->s_id, - (long long)NFS_FILEID(req->wb_context->path.dentry->d_inode), + req->wb_context->dentry->d_sb->s_id, + (long long)NFS_FILEID(req->wb_context->dentry->d_inode), req->wb_bytes, (long long)req_offset(req)); if (status < 0) { diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 86014811a0fb..8b579beb6358 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -85,7 +85,7 @@ struct nfs_lock_context { struct nfs4_state; struct nfs_open_context { struct nfs_lock_context lock_context; - struct path path; + struct dentry *dentry; struct rpc_cred *cred; struct nfs4_state *state; fmode_t mode; @@ -372,7 +372,7 @@ extern void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr); extern struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx); extern void put_nfs_open_context(struct nfs_open_context *ctx); extern struct nfs_open_context *nfs_find_open_context(struct inode *inode, struct rpc_cred *cred, fmode_t mode); -extern struct nfs_open_context *alloc_nfs_open_context(struct path *path, struct rpc_cred *cred, fmode_t f_mode); +extern struct nfs_open_context *alloc_nfs_open_context(struct dentry *dentry, struct rpc_cred *cred, fmode_t f_mode); extern void nfs_file_set_open_context(struct file *filp, struct nfs_open_context *ctx); extern struct nfs_lock_context *nfs_get_lock_context(struct nfs_open_context *ctx); extern void nfs_put_lock_context(struct nfs_lock_context *l_ctx); -- cgit v1.2.3 From 511415980ace0ceecd71088dbe1c7ce4ca7c79fe Mon Sep 17 00:00:00 2001 From: Al Viro Date: Wed, 22 Jun 2011 18:47:28 -0400 Subject: nameidata_to_nfs_open_context() doesn't need nameidata, actually... just open flags; switched to passing just those and renamed to create_nfs_open_context() Signed-off-by: Al Viro --- fs/nfs/dir.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index a0693f3f11c0..cc613c354bb8 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -1343,11 +1343,11 @@ static int is_atomic_open(struct nameidata *nd) return 1; } -static struct nfs_open_context *nameidata_to_nfs_open_context(struct dentry *dentry, struct nameidata *nd) +static struct nfs_open_context *create_nfs_open_context(struct dentry *dentry, int open_flags) { struct nfs_open_context *ctx; struct rpc_cred *cred; - fmode_t fmode = nd->intent.open.flags & (FMODE_READ | FMODE_WRITE | FMODE_EXEC); + fmode_t fmode = open_flags & (FMODE_READ | FMODE_WRITE | FMODE_EXEC); cred = rpc_lookup_cred(); if (IS_ERR(cred)) @@ -1416,12 +1416,13 @@ static struct dentry *nfs_atomic_lookup(struct inode *dir, struct dentry *dentry goto out; } - ctx = nameidata_to_nfs_open_context(dentry, nd); + open_flags = nd->intent.open.flags; + + ctx = create_nfs_open_context(dentry, open_flags); res = ERR_CAST(ctx); if (IS_ERR(ctx)) goto out; - open_flags = nd->intent.open.flags; if (nd->flags & LOOKUP_CREATE) { attr.ia_mode = nd->intent.open.create_mode; attr.ia_valid = ATTR_MODE; @@ -1513,7 +1514,7 @@ static int nfs_open_revalidate(struct dentry *dentry, struct nameidata *nd) /* We can't create new files, or truncate existing ones here */ openflags &= ~(O_CREAT|O_EXCL|O_TRUNC); - ctx = nameidata_to_nfs_open_context(dentry, nd); + ctx = create_nfs_open_context(dentry, openflags); ret = PTR_ERR(ctx); if (IS_ERR(ctx)) goto out; @@ -1577,7 +1578,7 @@ static int nfs_open_create(struct inode *dir, struct dentry *dentry, int mode, if ((nd->flags & LOOKUP_CREATE) != 0) { open_flags = nd->intent.open.flags; - ctx = nameidata_to_nfs_open_context(dentry, nd); + ctx = create_nfs_open_context(dentry, open_flags); error = PTR_ERR(ctx); if (IS_ERR(ctx)) goto out_err_drop; -- cgit v1.2.3 From f7c85868fcacc331dd3454a4f08f006d7942521f Mon Sep 17 00:00:00 2001 From: Al Viro Date: Wed, 22 Jun 2011 18:53:18 -0400 Subject: fix mknod() on nfs4 (hopefully) a) check the right flags in ->create() (LOOKUP_OPEN, not LOOKUP_CREATE) b) default (!LOOKUP_OPEN) open_flags is O_CREAT|O_EXCL|FMODE_READ, not 0 c) lookup_instantiate_filp() should be done only with LOOKUP_OPEN; otherwise we need to issue CLOSE, lest we leak stateid on server. Signed-off-by: Al Viro --- fs/nfs/dir.c | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) (limited to 'fs') diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index cc613c354bb8..1f4625749038 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -1567,7 +1567,7 @@ static int nfs_open_create(struct inode *dir, struct dentry *dentry, int mode, struct nfs_open_context *ctx = NULL; struct iattr attr; int error; - int open_flags = 0; + int open_flags = O_CREAT|O_EXCL|FMODE_READ; dfprintk(VFS, "NFS: create(%s/%ld), %s\n", dir->i_sb->s_id, dir->i_ino, dentry->d_name.name); @@ -1575,27 +1575,27 @@ static int nfs_open_create(struct inode *dir, struct dentry *dentry, int mode, attr.ia_mode = mode; attr.ia_valid = ATTR_MODE; - if ((nd->flags & LOOKUP_CREATE) != 0) { + if (nd && (nd->flags & LOOKUP_OPEN) != 0) open_flags = nd->intent.open.flags; - ctx = create_nfs_open_context(dentry, open_flags); - error = PTR_ERR(ctx); - if (IS_ERR(ctx)) - goto out_err_drop; - } + ctx = create_nfs_open_context(dentry, open_flags); + error = PTR_ERR(ctx); + if (IS_ERR(ctx)) + goto out_err_drop; error = NFS_PROTO(dir)->create(dir, dentry, &attr, open_flags, ctx); if (error != 0) goto out_put_ctx; - if (ctx != NULL) { + if (nd && (nd->flags & LOOKUP_OPEN) != 0) { error = nfs_intent_set_file(nd, ctx); if (error < 0) goto out_err; + } else { + put_nfs_open_context(ctx); } return 0; out_put_ctx: - if (ctx != NULL) - put_nfs_open_context(ctx); + put_nfs_open_context(ctx); out_err_drop: d_drop(dentry); out_err: @@ -1657,7 +1657,7 @@ static int nfs_create(struct inode *dir, struct dentry *dentry, int mode, { struct iattr attr; int error; - int open_flags = 0; + int open_flags = O_CREAT|O_EXCL|FMODE_READ; dfprintk(VFS, "NFS: create(%s/%ld), %s\n", dir->i_sb->s_id, dir->i_ino, dentry->d_name.name); @@ -1665,7 +1665,7 @@ static int nfs_create(struct inode *dir, struct dentry *dentry, int mode, attr.ia_mode = mode; attr.ia_valid = ATTR_MODE; - if ((nd->flags & LOOKUP_CREATE) != 0) + if (nd && (nd->flags & LOOKUP_OPEN) != 0) open_flags = nd->intent.open.flags; error = NFS_PROTO(dir)->create(dir, dentry, &attr, open_flags, NULL); -- cgit v1.2.3 From 554a8b9f54cd7ca2b89f5dc227df08be082fae0d Mon Sep 17 00:00:00 2001 From: Al Viro Date: Thu, 23 Jun 2011 12:35:50 -0400 Subject: Don't pass nameidata when calling vfs_create() from mknod() All instances can cope with that now (and ceph one actually starts working properly). Signed-off-by: Al Viro --- fs/namei.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/namei.c b/fs/namei.c index 42ccb97cc260..94fd0fa2d647 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -2411,7 +2411,7 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode, goto out_drop_write; switch (mode & S_IFMT) { case 0: case S_IFREG: - error = vfs_create(nd.path.dentry->d_inode,dentry,mode,&nd); + error = vfs_create(nd.path.dentry->d_inode,dentry,mode,NULL); break; case S_IFCHR: case S_IFBLK: error = vfs_mknod(nd.path.dentry->d_inode,dentry,mode, -- cgit v1.2.3 From 8a5e929dd2e05ab4d3d89f58c5e8fca596af8f3a Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 25 Jun 2011 19:15:54 -0400 Subject: don't transliterate lower bits of ->intent.open.flags to FMODE_... ->create() instances are much happier that way... Signed-off-by: Al Viro --- drivers/staging/pohmelfs/dir.c | 2 +- fs/9p/vfs_inode.c | 2 +- fs/9p/vfs_inode_dotl.c | 2 +- fs/ceph/file.c | 2 +- fs/fuse/dir.c | 2 +- fs/namei.c | 21 ++------------------- fs/nfs/dir.c | 24 +++++++++++++++++------- 7 files changed, 24 insertions(+), 31 deletions(-) (limited to 'fs') diff --git a/drivers/staging/pohmelfs/dir.c b/drivers/staging/pohmelfs/dir.c index 9732a9666cc4..7598e77672a5 100644 --- a/drivers/staging/pohmelfs/dir.c +++ b/drivers/staging/pohmelfs/dir.c @@ -512,7 +512,7 @@ struct dentry *pohmelfs_lookup(struct inode *dir, struct dentry *dentry, struct int err, lock_type = POHMELFS_READ_LOCK, need_lock = 1; struct qstr str = dentry->d_name; - if ((nd->intent.open.flags & O_ACCMODE) > 1) + if ((nd->intent.open.flags & O_ACCMODE) != O_RDONLY) lock_type = POHMELFS_WRITE_LOCK; if (test_bit(NETFS_INODE_OWNED, &parent->state)) { diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c index 7f6c67703195..47f71eb66b32 100644 --- a/fs/9p/vfs_inode.c +++ b/fs/9p/vfs_inode.c @@ -634,7 +634,7 @@ v9fs_vfs_create(struct inode *dir, struct dentry *dentry, int mode, v9ses = v9fs_inode2v9ses(dir); perm = unixmode2p9mode(v9ses, mode); if (nd && nd->flags & LOOKUP_OPEN) - flags = nd->intent.open.flags - 1; + flags = nd->intent.open.flags; else flags = O_RDWR; diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c index 691c78f58bef..d148e69f21b5 100644 --- a/fs/9p/vfs_inode_dotl.c +++ b/fs/9p/vfs_inode_dotl.c @@ -174,7 +174,7 @@ v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, int omode, v9ses = v9fs_inode2v9ses(dir); if (nd && nd->flags & LOOKUP_OPEN) - flags = nd->intent.open.flags - 1; + flags = nd->intent.open.flags; else { /* * create call without LOOKUP_OPEN is due diff --git a/fs/ceph/file.c b/fs/ceph/file.c index 4698a5c553dc..0a292459c895 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -226,7 +226,7 @@ struct dentry *ceph_lookup_open(struct inode *dir, struct dentry *dentry, struct inode *parent_inode = get_dentry_parent_inode(file->f_dentry); struct ceph_mds_request *req; int err; - int flags = nd->intent.open.flags - 1; /* silly vfs! */ + int flags = nd->intent.open.flags; dout("ceph_lookup_open dentry %p '%.*s' flags %d mode 0%o\n", dentry, dentry->d_name.len, dentry->d_name.name, flags, mode); diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index e2b14001cea5..47559dd33193 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -382,7 +382,7 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, int mode, struct fuse_entry_out outentry; struct fuse_file *ff; struct file *file; - int flags = nd->intent.open.flags - 1; + int flags = nd->intent.open.flags; if (fc->no_create) return -ENOSYS; diff --git a/fs/namei.c b/fs/namei.c index 94fd0fa2d647..5e65f67ee926 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -1965,27 +1965,10 @@ static int handle_truncate(struct file *filp) return error; } -/* - * Note that while the flag value (low two bits) for sys_open means: - * 00 - read-only - * 01 - write-only - * 10 - read-write - * 11 - special - * it is changed into - * 00 - no permissions needed - * 01 - read-permission - * 10 - write-permission - * 11 - read-write - * for the internal routines (ie open_namei()/follow_link() etc) - * This is more logical, and also allows the 00 "no perm needed" - * to be used for symlinks (where the permissions are checked - * later). - * -*/ static inline int open_to_namei_flags(int flag) { - if ((flag+1) & O_ACCMODE) - flag++; + if ((flag & O_ACCMODE) == 3) + flag--; return flag; } diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 1f4625749038..b5f63a50fa7f 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -1338,16 +1338,26 @@ static int is_atomic_open(struct nameidata *nd) return 0; /* Are we trying to write to a read only partition? */ if (__mnt_is_readonly(nd->path.mnt) && - (nd->intent.open.flags & (O_CREAT|O_TRUNC|FMODE_WRITE))) + (nd->intent.open.flags & (O_CREAT|O_TRUNC|O_ACCMODE))) return 0; return 1; } +static fmode_t flags_to_mode(int flags) +{ + fmode_t res = (__force fmode_t)flags & FMODE_EXEC; + if ((flags & O_ACCMODE) != O_WRONLY) + res |= FMODE_READ; + if ((flags & O_ACCMODE) != O_RDONLY) + res |= FMODE_WRITE; + return res; +} + static struct nfs_open_context *create_nfs_open_context(struct dentry *dentry, int open_flags) { struct nfs_open_context *ctx; struct rpc_cred *cred; - fmode_t fmode = open_flags & (FMODE_READ | FMODE_WRITE | FMODE_EXEC); + fmode_t fmode = flags_to_mode(open_flags); cred = rpc_lookup_cred(); if (IS_ERR(cred)) @@ -1567,7 +1577,7 @@ static int nfs_open_create(struct inode *dir, struct dentry *dentry, int mode, struct nfs_open_context *ctx = NULL; struct iattr attr; int error; - int open_flags = O_CREAT|O_EXCL|FMODE_READ; + int open_flags = O_CREAT|O_EXCL; dfprintk(VFS, "NFS: create(%s/%ld), %s\n", dir->i_sb->s_id, dir->i_ino, dentry->d_name.name); @@ -1657,7 +1667,7 @@ static int nfs_create(struct inode *dir, struct dentry *dentry, int mode, { struct iattr attr; int error; - int open_flags = O_CREAT|O_EXCL|FMODE_READ; + int open_flags = O_CREAT|O_EXCL; dfprintk(VFS, "NFS: create(%s/%ld), %s\n", dir->i_sb->s_id, dir->i_ino, dentry->d_name.name); @@ -2256,11 +2266,11 @@ static int nfs_open_permission_mask(int openflags) { int mask = 0; - if (openflags & FMODE_READ) + if ((openflags & O_ACCMODE) != O_WRONLY) mask |= MAY_READ; - if (openflags & FMODE_WRITE) + if ((openflags & O_ACCMODE) != O_RDONLY) mask |= MAY_WRITE; - if (openflags & FMODE_EXEC) + if (openflags & __FMODE_EXEC) mask |= MAY_EXEC; return mask; } -- cgit v1.2.3 From bf6c7f6c7bd0ea779757d35b5fdc9f9157f056b3 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 25 Jun 2011 21:08:31 -0400 Subject: don't pass nameidata to vfs_create() from ecryptfs_create() Instead of playing with removal of LOOKUP_OPEN, mangling (and restoring) nd->path, just pass NULL to vfs_create(). The whole point of what's being done there is to suppress any attempts to open file by underlying fs, which is what nd == NULL indicates. Signed-off-by: Al Viro --- fs/ecryptfs/inode.c | 33 +++++---------------------------- 1 file changed, 5 insertions(+), 28 deletions(-) (limited to 'fs') diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c index 799e01055f22..340c657a108c 100644 --- a/fs/ecryptfs/inode.c +++ b/fs/ecryptfs/inode.c @@ -147,7 +147,6 @@ static int ecryptfs_interpose(struct dentry *lower_dentry, * @lower_dir_inode: inode of the parent in the lower fs of the new file * @dentry: New file's dentry * @mode: The mode of the new file - * @nd: nameidata of ecryptfs' parent's dentry & vfsmount * * Creates the file in the lower file system. * @@ -155,31 +154,10 @@ static int ecryptfs_interpose(struct dentry *lower_dentry, */ static int ecryptfs_create_underlying_file(struct inode *lower_dir_inode, - struct dentry *dentry, int mode, - struct nameidata *nd) + struct dentry *dentry, int mode) { struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry); - struct vfsmount *lower_mnt = ecryptfs_dentry_to_lower_mnt(dentry); - struct dentry *dentry_save; - struct vfsmount *vfsmount_save; - unsigned int flags_save; - int rc; - - if (nd) { - dentry_save = nd->path.dentry; - vfsmount_save = nd->path.mnt; - flags_save = nd->flags; - nd->path.dentry = lower_dentry; - nd->path.mnt = lower_mnt; - nd->flags &= ~LOOKUP_OPEN; - } - rc = vfs_create(lower_dir_inode, lower_dentry, mode, nd); - if (nd) { - nd->path.dentry = dentry_save; - nd->path.mnt = vfsmount_save; - nd->flags = flags_save; - } - return rc; + return vfs_create(lower_dir_inode, lower_dentry, mode, NULL); } /** @@ -197,8 +175,7 @@ ecryptfs_create_underlying_file(struct inode *lower_dir_inode, */ static int ecryptfs_do_create(struct inode *directory_inode, - struct dentry *ecryptfs_dentry, int mode, - struct nameidata *nd) + struct dentry *ecryptfs_dentry, int mode) { int rc; struct dentry *lower_dentry; @@ -213,7 +190,7 @@ ecryptfs_do_create(struct inode *directory_inode, goto out; } rc = ecryptfs_create_underlying_file(lower_dir_dentry->d_inode, - ecryptfs_dentry, mode, nd); + ecryptfs_dentry, mode); if (rc) { printk(KERN_ERR "%s: Failure to create dentry in lower fs; " "rc = [%d]\n", __func__, rc); @@ -294,7 +271,7 @@ ecryptfs_create(struct inode *directory_inode, struct dentry *ecryptfs_dentry, int rc; /* ecryptfs_do_create() calls ecryptfs_interpose() */ - rc = ecryptfs_do_create(directory_inode, ecryptfs_dentry, mode, nd); + rc = ecryptfs_do_create(directory_inode, ecryptfs_dentry, mode); if (unlikely(rc)) { ecryptfs_printk(KERN_WARNING, "Failed to create file in" "lower filesystem\n"); -- cgit v1.2.3 From dd7dd556e45133ef13f2c4bddc0e0b1ac23bc0e4 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 25 Jun 2011 21:17:17 -0400 Subject: no need to check for LOOKUP_OPEN in ->create() instances ... it will be set in nd->flag for all cases with non-NULL nd (i.e. when called from do_last()). Signed-off-by: Al Viro --- fs/9p/vfs_inode.c | 4 ++-- fs/9p/vfs_inode_dotl.c | 2 +- fs/cifs/dir.c | 6 +++--- fs/fuse/dir.c | 2 +- fs/nfs/dir.c | 6 +++--- 5 files changed, 10 insertions(+), 10 deletions(-) (limited to 'fs') diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c index 47f71eb66b32..7f9976a866e9 100644 --- a/fs/9p/vfs_inode.c +++ b/fs/9p/vfs_inode.c @@ -633,7 +633,7 @@ v9fs_vfs_create(struct inode *dir, struct dentry *dentry, int mode, fid = NULL; v9ses = v9fs_inode2v9ses(dir); perm = unixmode2p9mode(v9ses, mode); - if (nd && nd->flags & LOOKUP_OPEN) + if (nd) flags = nd->intent.open.flags; else flags = O_RDWR; @@ -649,7 +649,7 @@ v9fs_vfs_create(struct inode *dir, struct dentry *dentry, int mode, v9fs_invalidate_inode_attr(dir); /* if we are opening a file, assign the open fid to the file */ - if (nd && nd->flags & LOOKUP_OPEN) { + if (nd) { v9inode = V9FS_I(dentry->d_inode); mutex_lock(&v9inode->v_mutex); if (v9ses->cache && !v9inode->writeback_fid && diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c index d148e69f21b5..32bbbe5aa689 100644 --- a/fs/9p/vfs_inode_dotl.c +++ b/fs/9p/vfs_inode_dotl.c @@ -173,7 +173,7 @@ v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, int omode, struct posix_acl *pacl = NULL, *dacl = NULL; v9ses = v9fs_inode2v9ses(dir); - if (nd && nd->flags & LOOKUP_OPEN) + if (nd) flags = nd->intent.open.flags; else { /* diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c index fa8c21d913bc..8766149f6300 100644 --- a/fs/cifs/dir.c +++ b/fs/cifs/dir.c @@ -179,7 +179,7 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode, if (oplockEnabled) oplock = REQ_OPLOCK; - if (nd && (nd->flags & LOOKUP_OPEN)) + if (nd) oflags = nd->intent.open.file->f_flags; else oflags = O_RDONLY | O_CREAT; @@ -214,7 +214,7 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode, which should be rare for path not covered on files) */ } - if (nd && (nd->flags & LOOKUP_OPEN)) { + if (nd) { /* if the file is going to stay open, then we need to set the desired access properly */ desiredAccess = 0; @@ -328,7 +328,7 @@ cifs_create_set_dentry: else cFYI(1, "Create worked, get_inode_info failed rc = %d", rc); - if (newinode && nd && (nd->flags & LOOKUP_OPEN)) { + if (newinode && nd) { struct cifsFileInfo *pfile_info; struct file *filp; diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index 47559dd33193..02063dde2728 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -576,7 +576,7 @@ static int fuse_mknod(struct inode *dir, struct dentry *entry, int mode, static int fuse_create(struct inode *dir, struct dentry *entry, int mode, struct nameidata *nd) { - if (nd && (nd->flags & LOOKUP_OPEN)) { + if (nd) { int err = fuse_create_open(dir, entry, mode, nd); if (err != -ENOSYS) return err; diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index b5f63a50fa7f..77ae95f15497 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -1585,7 +1585,7 @@ static int nfs_open_create(struct inode *dir, struct dentry *dentry, int mode, attr.ia_mode = mode; attr.ia_valid = ATTR_MODE; - if (nd && (nd->flags & LOOKUP_OPEN) != 0) + if (nd) open_flags = nd->intent.open.flags; ctx = create_nfs_open_context(dentry, open_flags); @@ -1596,7 +1596,7 @@ static int nfs_open_create(struct inode *dir, struct dentry *dentry, int mode, error = NFS_PROTO(dir)->create(dir, dentry, &attr, open_flags, ctx); if (error != 0) goto out_put_ctx; - if (nd && (nd->flags & LOOKUP_OPEN) != 0) { + if (nd) { error = nfs_intent_set_file(nd, ctx); if (error < 0) goto out_err; @@ -1675,7 +1675,7 @@ static int nfs_create(struct inode *dir, struct dentry *dentry, int mode, attr.ia_mode = mode; attr.ia_valid = ATTR_MODE; - if (nd && (nd->flags & LOOKUP_OPEN) != 0) + if (nd) open_flags = nd->intent.open.flags; error = NFS_PROTO(dir)->create(dir, dentry, &attr, open_flags, NULL); -- cgit v1.2.3 From 407938e79edcadba1b5a17cf928584d8a191a8d7 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 25 Jun 2011 21:37:18 -0400 Subject: LOOKUP_CREATE and LOOKUP_RENAME_TARGET can be set only on the last step Signed-off-by: Al Viro --- fs/cifs/dir.c | 6 ++---- fs/fat/namei_vfat.c | 6 ++---- fs/jfs/namei.c | 6 ++---- 3 files changed, 6 insertions(+), 12 deletions(-) (limited to 'fs') diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c index 8766149f6300..251c2ca569d3 100644 --- a/fs/cifs/dir.c +++ b/fs/cifs/dir.c @@ -663,10 +663,8 @@ cifs_d_revalidate(struct dentry *direntry, struct nameidata *nd) * case sensitive name which is specified by user if this is * for creation. */ - if (!(nd->flags & (LOOKUP_CONTINUE | LOOKUP_PARENT))) { - if (nd->flags & (LOOKUP_CREATE | LOOKUP_RENAME_TARGET)) - return 0; - } + if (nd->flags & (LOOKUP_CREATE | LOOKUP_RENAME_TARGET)) + return 0; if (time_after(jiffies, direntry->d_time + HZ) || !lookupCacheEnabled) return 0; diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c index 20b4ea53fdc4..bb3f29c3557b 100644 --- a/fs/fat/namei_vfat.c +++ b/fs/fat/namei_vfat.c @@ -82,10 +82,8 @@ static int vfat_revalidate_ci(struct dentry *dentry, struct nameidata *nd) * case sensitive name which is specified by user if this is * for creation. */ - if (!(nd->flags & (LOOKUP_CONTINUE | LOOKUP_PARENT))) { - if (nd->flags & (LOOKUP_CREATE | LOOKUP_RENAME_TARGET)) - return 0; - } + if (nd->flags & (LOOKUP_CREATE | LOOKUP_RENAME_TARGET)) + return 0; return vfat_revalidate_shortname(dentry); } diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c index eaaf2b511e89..7a2e8e5152fd 100644 --- a/fs/jfs/namei.c +++ b/fs/jfs/namei.c @@ -1624,10 +1624,8 @@ static int jfs_ci_revalidate(struct dentry *dentry, struct nameidata *nd) * case sensitive name which is specified by user if this is * for creation. */ - if (!(nd->flags & (LOOKUP_CONTINUE | LOOKUP_PARENT))) { - if (nd->flags & (LOOKUP_CREATE | LOOKUP_RENAME_TARGET)) - return 0; - } + if (nd->flags & (LOOKUP_CREATE | LOOKUP_RENAME_TARGET)) + return 0; return 1; } -- cgit v1.2.3 From 5c0f360b083fb33d05d1bff4b138b82d715eb419 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 25 Jun 2011 21:41:09 -0400 Subject: jfs_ci_revalidate() is safe from RCU mode Signed-off-by: Al Viro --- fs/jfs/namei.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'fs') diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c index 7a2e8e5152fd..1da0dc799286 100644 --- a/fs/jfs/namei.c +++ b/fs/jfs/namei.c @@ -1597,8 +1597,6 @@ out: static int jfs_ci_revalidate(struct dentry *dentry, struct nameidata *nd) { - if (nd && nd->flags & LOOKUP_RCU) - return -ECHILD; /* * This is not negative dentry. Always valid. * -- cgit v1.2.3 From a127e0af59ab610e8b37ac2dfa4cdb2ec8c8f604 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 25 Jun 2011 21:43:56 -0400 Subject: ceph: LOOKUP_OPEN is set only when it's the last component Signed-off-by: Al Viro --- fs/ceph/dir.c | 1 - 1 file changed, 1 deletion(-) (limited to 'fs') diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c index ef8f08c343e8..b6d436f3c1ae 100644 --- a/fs/ceph/dir.c +++ b/fs/ceph/dir.c @@ -566,7 +566,6 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry, /* open (but not create!) intent? */ if (nd && (nd->flags & LOOKUP_OPEN) && - (nd->flags & LOOKUP_CONTINUE) == 0 && /* only open last component */ !(nd->intent.open.flags & O_CREAT)) { int mode = nd->intent.open.create_mode & ~current->fs->umask; return ceph_lookup_open(dir, dentry, nd, mode, 1); -- cgit v1.2.3 From 4352780386139ff33d2203868b392e6535deff61 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 25 Jun 2011 21:45:21 -0400 Subject: cifs_lookup(): LOOKUP_OPEN is set only on the last component Signed-off-by: Al Viro --- fs/cifs/dir.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c index 251c2ca569d3..14d602f178c2 100644 --- a/fs/cifs/dir.c +++ b/fs/cifs/dir.c @@ -568,7 +568,7 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry, * reduction in network traffic in the other paths. */ if (pTcon->unix_ext) { - if (nd && !(nd->flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY)) && + if (nd && !(nd->flags & LOOKUP_DIRECTORY) && (nd->flags & LOOKUP_OPEN) && !pTcon->broken_posix_open && (nd->intent.open.file->f_flags & O_CREAT)) { rc = cifs_posix_open(full_path, &newInode, -- cgit v1.2.3 From 8aeb376ca0fe61038166c3b8243c678addb80abf Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 25 Jun 2011 21:48:43 -0400 Subject: nfs: LOOKUP_{OPEN,CREATE,EXCL} is set only on the last step Signed-off-by: Al Viro --- fs/nfs/dir.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 77ae95f15497..8a45e6d1f6a4 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -997,14 +997,12 @@ static int nfs_check_verifier(struct inode *dir, struct dentry *dentry) * Return the intent data that applies to this particular path component * * Note that the current set of intents only apply to the very last - * component of the path. - * We check for this using LOOKUP_CONTINUE and LOOKUP_PARENT. + * component of the path and none of them is set before that last + * component. */ static inline unsigned int nfs_lookup_check_intent(struct nameidata *nd, unsigned int mask) { - if (nd->flags & (LOOKUP_CONTINUE|LOOKUP_PARENT)) - return 0; return nd->flags & mask; } -- cgit v1.2.3 From 49084c3bb2055c401f3493c13edae14d49128ca0 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 25 Jun 2011 21:59:52 -0400 Subject: kill LOOKUP_CONTINUE LOOKUP_PARENT is equivalent to it now Signed-off-by: Al Viro --- fs/namei.c | 11 +++-------- include/linux/namei.h | 1 - 2 files changed, 3 insertions(+), 9 deletions(-) (limited to 'fs') diff --git a/fs/namei.c b/fs/namei.c index 5e65f67ee926..f49d6abfa799 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -664,7 +664,7 @@ static int follow_automount(struct path *path, unsigned flags, /* We don't want to mount if someone supplied AT_NO_AUTOMOUNT * and this is the terminal part of the path. */ - if ((flags & LOOKUP_NO_AUTOMOUNT) && !(flags & LOOKUP_CONTINUE)) + if ((flags & LOOKUP_NO_AUTOMOUNT) && !(flags & LOOKUP_PARENT)) return -EISDIR; /* we actually want to stop here */ /* We want to mount if someone is trying to open/create a file of any @@ -676,7 +676,7 @@ static int follow_automount(struct path *path, unsigned flags, * appended a '/' to the name. */ if (!(flags & LOOKUP_FOLLOW) && - !(flags & (LOOKUP_CONTINUE | LOOKUP_DIRECTORY | + !(flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY | LOOKUP_OPEN | LOOKUP_CREATE))) return -EISDIR; @@ -695,7 +695,7 @@ static int follow_automount(struct path *path, unsigned flags, * the path being looked up; if it wasn't then the remainder of * the path is inaccessible and we should say so. */ - if (PTR_ERR(mnt) == -EISDIR && (flags & LOOKUP_CONTINUE)) + if (PTR_ERR(mnt) == -EISDIR && (flags & LOOKUP_PARENT)) return -EREMOTE; return PTR_ERR(mnt); } @@ -1281,7 +1281,6 @@ static int link_path_walk(const char *name, struct nameidata *nd) { struct path next; int err; - unsigned int lookup_flags = nd->flags; while (*name=='/') name++; @@ -1295,8 +1294,6 @@ static int link_path_walk(const char *name, struct nameidata *nd) unsigned int c; int type; - nd->flags |= LOOKUP_CONTINUE; - err = may_lookup(nd); if (err) break; @@ -1358,8 +1355,6 @@ static int link_path_walk(const char *name, struct nameidata *nd) /* here ends the main loop */ last_component: - /* Clear LOOKUP_CONTINUE iff it was previously unset */ - nd->flags &= lookup_flags | ~LOOKUP_CONTINUE; nd->last = this; nd->last_type = type; return 0; diff --git a/include/linux/namei.h b/include/linux/namei.h index eba45ea10298..3439ab862e1d 100644 --- a/include/linux/namei.h +++ b/include/linux/namei.h @@ -48,7 +48,6 @@ enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT, LAST_BIND}; */ #define LOOKUP_FOLLOW 0x0001 #define LOOKUP_DIRECTORY 0x0002 -#define LOOKUP_CONTINUE 0x0004 #define LOOKUP_PARENT 0x0010 #define LOOKUP_REVAL 0x0020 -- cgit v1.2.3 From dae6ad8f37529963ae7df52baaccf056b38f210e Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 26 Jun 2011 11:50:15 -0400 Subject: new helpers: kern_path_create/user_path_create combination of kern_path_parent() and lookup_create(). Does *not* expose struct nameidata to caller. Syscalls converted to that... Signed-off-by: Al Viro --- fs/namei.c | 154 ++++++++++++++++++++++++------------------------ fs/ocfs2/refcounttree.c | 49 ++++----------- include/linux/namei.h | 2 + net/unix/af_unix.c | 38 ++++++------ 4 files changed, 106 insertions(+), 137 deletions(-) (limited to 'fs') diff --git a/fs/namei.c b/fs/namei.c index f49d6abfa799..b292eb03d9d2 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -2311,6 +2311,35 @@ fail: } EXPORT_SYMBOL_GPL(lookup_create); +struct dentry *kern_path_create(int dfd, const char *pathname, struct path *path, int is_dir) +{ + struct nameidata nd; + struct dentry *res; + int error = do_path_lookup(dfd, pathname, LOOKUP_PARENT, &nd); + if (error) + return ERR_PTR(error); + res = lookup_create(&nd, is_dir); + if (IS_ERR(res)) { + mutex_unlock(&nd.path.dentry->d_inode->i_mutex); + path_put(&nd.path); + } + *path = nd.path; + return res; +} +EXPORT_SYMBOL(kern_path_create); + +struct dentry *user_path_create(int dfd, const char __user *pathname, struct path *path, int is_dir) +{ + char *tmp = getname(pathname); + struct dentry *res; + if (IS_ERR(tmp)) + return ERR_CAST(tmp); + res = kern_path_create(dfd, tmp, path, is_dir); + putname(tmp); + return res; +} +EXPORT_SYMBOL(user_path_create); + int vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev) { int error = may_create(dir, dentry); @@ -2359,54 +2388,46 @@ static int may_mknod(mode_t mode) SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode, unsigned, dev) { - int error; - char *tmp; struct dentry *dentry; - struct nameidata nd; + struct path path; + int error; if (S_ISDIR(mode)) return -EPERM; - error = user_path_parent(dfd, filename, &nd, &tmp); - if (error) - return error; + dentry = user_path_create(dfd, filename, &path, 0); + if (IS_ERR(dentry)) + return PTR_ERR(dentry); - dentry = lookup_create(&nd, 0); - if (IS_ERR(dentry)) { - error = PTR_ERR(dentry); - goto out_unlock; - } - if (!IS_POSIXACL(nd.path.dentry->d_inode)) + if (!IS_POSIXACL(path.dentry->d_inode)) mode &= ~current_umask(); error = may_mknod(mode); if (error) goto out_dput; - error = mnt_want_write(nd.path.mnt); + error = mnt_want_write(path.mnt); if (error) goto out_dput; - error = security_path_mknod(&nd.path, dentry, mode, dev); + error = security_path_mknod(&path, dentry, mode, dev); if (error) goto out_drop_write; switch (mode & S_IFMT) { case 0: case S_IFREG: - error = vfs_create(nd.path.dentry->d_inode,dentry,mode,NULL); + error = vfs_create(path.dentry->d_inode,dentry,mode,NULL); break; case S_IFCHR: case S_IFBLK: - error = vfs_mknod(nd.path.dentry->d_inode,dentry,mode, + error = vfs_mknod(path.dentry->d_inode,dentry,mode, new_decode_dev(dev)); break; case S_IFIFO: case S_IFSOCK: - error = vfs_mknod(nd.path.dentry->d_inode,dentry,mode,0); + error = vfs_mknod(path.dentry->d_inode,dentry,mode,0); break; } out_drop_write: - mnt_drop_write(nd.path.mnt); + mnt_drop_write(path.mnt); out_dput: dput(dentry); -out_unlock: - mutex_unlock(&nd.path.dentry->d_inode->i_mutex); - path_put(&nd.path); - putname(tmp); + mutex_unlock(&path.dentry->d_inode->i_mutex); + path_put(&path); return error; } @@ -2439,38 +2460,29 @@ int vfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, int, mode) { - int error = 0; - char * tmp; struct dentry *dentry; - struct nameidata nd; - - error = user_path_parent(dfd, pathname, &nd, &tmp); - if (error) - goto out_err; + struct path path; + int error; - dentry = lookup_create(&nd, 1); - error = PTR_ERR(dentry); + dentry = user_path_create(dfd, pathname, &path, 1); if (IS_ERR(dentry)) - goto out_unlock; + return PTR_ERR(dentry); - if (!IS_POSIXACL(nd.path.dentry->d_inode)) + if (!IS_POSIXACL(path.dentry->d_inode)) mode &= ~current_umask(); - error = mnt_want_write(nd.path.mnt); + error = mnt_want_write(path.mnt); if (error) goto out_dput; - error = security_path_mkdir(&nd.path, dentry, mode); + error = security_path_mkdir(&path, dentry, mode); if (error) goto out_drop_write; - error = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode); + error = vfs_mkdir(path.dentry->d_inode, dentry, mode); out_drop_write: - mnt_drop_write(nd.path.mnt); + mnt_drop_write(path.mnt); out_dput: dput(dentry); -out_unlock: - mutex_unlock(&nd.path.dentry->d_inode->i_mutex); - path_put(&nd.path); - putname(tmp); -out_err: + mutex_unlock(&path.dentry->d_inode->i_mutex); + path_put(&path); return error; } @@ -2730,38 +2742,31 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname, { int error; char *from; - char *to; struct dentry *dentry; - struct nameidata nd; + struct path path; from = getname(oldname); if (IS_ERR(from)) return PTR_ERR(from); - error = user_path_parent(newdfd, newname, &nd, &to); - if (error) - goto out_putname; - - dentry = lookup_create(&nd, 0); + dentry = user_path_create(newdfd, newname, &path, 0); error = PTR_ERR(dentry); if (IS_ERR(dentry)) - goto out_unlock; + goto out_putname; - error = mnt_want_write(nd.path.mnt); + error = mnt_want_write(path.mnt); if (error) goto out_dput; - error = security_path_symlink(&nd.path, dentry, from); + error = security_path_symlink(&path, dentry, from); if (error) goto out_drop_write; - error = vfs_symlink(nd.path.dentry->d_inode, dentry, from); + error = vfs_symlink(path.dentry->d_inode, dentry, from); out_drop_write: - mnt_drop_write(nd.path.mnt); + mnt_drop_write(path.mnt); out_dput: dput(dentry); -out_unlock: - mutex_unlock(&nd.path.dentry->d_inode->i_mutex); - path_put(&nd.path); - putname(to); + mutex_unlock(&path.dentry->d_inode->i_mutex); + path_put(&path); out_putname: putname(from); return error; @@ -2826,11 +2831,9 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname, int, newdfd, const char __user *, newname, int, flags) { struct dentry *new_dentry; - struct nameidata nd; - struct path old_path; + struct path old_path, new_path; int how = 0; int error; - char *to; if ((flags & ~(AT_SYMLINK_FOLLOW | AT_EMPTY_PATH)) != 0) return -EINVAL; @@ -2852,32 +2855,27 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname, if (error) return error; - error = user_path_parent(newdfd, newname, &nd, &to); - if (error) - goto out; - error = -EXDEV; - if (old_path.mnt != nd.path.mnt) - goto out_release; - new_dentry = lookup_create(&nd, 0); + new_dentry = user_path_create(newdfd, newname, &new_path, 0); error = PTR_ERR(new_dentry); if (IS_ERR(new_dentry)) - goto out_unlock; - error = mnt_want_write(nd.path.mnt); + goto out; + + error = -EXDEV; + if (old_path.mnt != new_path.mnt) + goto out_dput; + error = mnt_want_write(new_path.mnt); if (error) goto out_dput; - error = security_path_link(old_path.dentry, &nd.path, new_dentry); + error = security_path_link(old_path.dentry, &new_path, new_dentry); if (error) goto out_drop_write; - error = vfs_link(old_path.dentry, nd.path.dentry->d_inode, new_dentry); + error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry); out_drop_write: - mnt_drop_write(nd.path.mnt); + mnt_drop_write(new_path.mnt); out_dput: dput(new_dentry); -out_unlock: - mutex_unlock(&nd.path.dentry->d_inode->i_mutex); -out_release: - path_put(&nd.path); - putname(to); + mutex_unlock(&new_path.dentry->d_inode->i_mutex); + path_put(&new_path); out: path_put(&old_path); diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c index ebfd3825f12a..cf7823382664 100644 --- a/fs/ocfs2/refcounttree.c +++ b/fs/ocfs2/refcounttree.c @@ -4368,25 +4368,6 @@ static inline int ocfs2_may_create(struct inode *dir, struct dentry *child) return inode_permission(dir, MAY_WRITE | MAY_EXEC); } -/* copied from user_path_parent. */ -static int ocfs2_user_path_parent(const char __user *path, - struct nameidata *nd, char **name) -{ - char *s = getname(path); - int error; - - if (IS_ERR(s)) - return PTR_ERR(s); - - error = kern_path_parent(s, nd); - if (error) - putname(s); - else - *name = s; - - return error; -} - /** * ocfs2_vfs_reflink - Create a reference-counted link * @@ -4460,10 +4441,8 @@ int ocfs2_reflink_ioctl(struct inode *inode, bool preserve) { struct dentry *new_dentry; - struct nameidata nd; - struct path old_path; + struct path old_path, new_path; int error; - char *to = NULL; if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb))) return -EOPNOTSUPP; @@ -4474,39 +4453,33 @@ int ocfs2_reflink_ioctl(struct inode *inode, return error; } - error = ocfs2_user_path_parent(newname, &nd, &to); - if (error) { + new_dentry = user_path_create(AT_FDCWD, newname, &new_path, 0); + error = PTR_ERR(new_dentry); + if (IS_ERR(new_dentry)) { mlog_errno(error); goto out; } error = -EXDEV; - if (old_path.mnt != nd.path.mnt) - goto out_release; - new_dentry = lookup_create(&nd, 0); - error = PTR_ERR(new_dentry); - if (IS_ERR(new_dentry)) { + if (old_path.mnt != new_path.mnt) { mlog_errno(error); - goto out_unlock; + goto out_dput; } - error = mnt_want_write(nd.path.mnt); + error = mnt_want_write(new_path.mnt); if (error) { mlog_errno(error); goto out_dput; } error = ocfs2_vfs_reflink(old_path.dentry, - nd.path.dentry->d_inode, + new_path.dentry->d_inode, new_dentry, preserve); - mnt_drop_write(nd.path.mnt); + mnt_drop_write(new_path.mnt); out_dput: dput(new_dentry); -out_unlock: - mutex_unlock(&nd.path.dentry->d_inode->i_mutex); -out_release: - path_put(&nd.path); - putname(to); + mutex_unlock(&new_path.dentry->d_inode->i_mutex); + path_put(&new_path); out: path_put(&old_path); diff --git a/include/linux/namei.h b/include/linux/namei.h index 3439ab862e1d..b8cea804d31a 100644 --- a/include/linux/namei.h +++ b/include/linux/namei.h @@ -74,6 +74,8 @@ extern int user_path_at(int, const char __user *, unsigned, struct path *); extern int kern_path(const char *, unsigned, struct path *); +extern struct dentry *kern_path_create(int, const char *, struct path *, int); +extern struct dentry *user_path_create(int, const char __user *, struct path *, int); extern int kern_path_parent(const char *, struct nameidata *); extern int vfs_path_lookup(struct dentry *, struct vfsmount *, const char *, unsigned int, struct nameidata *); diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 0722a25a3a33..ec68e1c05b85 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -808,8 +808,9 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) struct net *net = sock_net(sk); struct unix_sock *u = unix_sk(sk); struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr; + char *sun_path = sunaddr->sun_path; struct dentry *dentry = NULL; - struct nameidata nd; + struct path path; int err; unsigned hash; struct unix_address *addr; @@ -845,48 +846,44 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) addr->hash = hash ^ sk->sk_type; atomic_set(&addr->refcnt, 1); - if (sunaddr->sun_path[0]) { + if (sun_path[0]) { unsigned int mode; err = 0; /* * Get the parent directory, calculate the hash for last * component. */ - err = kern_path_parent(sunaddr->sun_path, &nd); - if (err) - goto out_mknod_parent; - - dentry = lookup_create(&nd, 0); + dentry = kern_path_create(AT_FDCWD, sun_path, &path, 0); err = PTR_ERR(dentry); if (IS_ERR(dentry)) - goto out_mknod_unlock; + goto out_mknod_parent; /* * All right, let's create it. */ mode = S_IFSOCK | (SOCK_INODE(sock)->i_mode & ~current_umask()); - err = mnt_want_write(nd.path.mnt); + err = mnt_want_write(path.mnt); if (err) goto out_mknod_dput; - err = security_path_mknod(&nd.path, dentry, mode, 0); + err = security_path_mknod(&path, dentry, mode, 0); if (err) goto out_mknod_drop_write; - err = vfs_mknod(nd.path.dentry->d_inode, dentry, mode, 0); + err = vfs_mknod(path.dentry->d_inode, dentry, mode, 0); out_mknod_drop_write: - mnt_drop_write(nd.path.mnt); + mnt_drop_write(path.mnt); if (err) goto out_mknod_dput; - mutex_unlock(&nd.path.dentry->d_inode->i_mutex); - dput(nd.path.dentry); - nd.path.dentry = dentry; + mutex_unlock(&path.dentry->d_inode->i_mutex); + dput(path.dentry); + path.dentry = dentry; addr->hash = UNIX_HASH_SIZE; } spin_lock(&unix_table_lock); - if (!sunaddr->sun_path[0]) { + if (!sun_path[0]) { err = -EADDRINUSE; if (__unix_find_socket_byname(net, sunaddr, addr_len, sk->sk_type, hash)) { @@ -897,8 +894,8 @@ out_mknod_drop_write: list = &unix_socket_table[addr->hash]; } else { list = &unix_socket_table[dentry->d_inode->i_ino & (UNIX_HASH_SIZE-1)]; - u->dentry = nd.path.dentry; - u->mnt = nd.path.mnt; + u->dentry = path.dentry; + u->mnt = path.mnt; } err = 0; @@ -915,9 +912,8 @@ out: out_mknod_dput: dput(dentry); -out_mknod_unlock: - mutex_unlock(&nd.path.dentry->d_inode->i_mutex); - path_put(&nd.path); + mutex_unlock(&path.dentry->d_inode->i_mutex); + path_put(&path); out_mknod_parent: if (err == -EEXIST) err = -EADDRINUSE; -- cgit v1.2.3 From ed75e95de574c99575e5f3e1d9ca59ea8c12a9cb Mon Sep 17 00:00:00 2001 From: Al Viro Date: Mon, 27 Jun 2011 16:53:43 -0400 Subject: kill lookup_create() folded into the only caller (kern_path_create()) Signed-off-by: Al Viro --- fs/namei.c | 54 +++++++++++++++++--------------------------------- include/linux/dcache.h | 1 - 2 files changed, 18 insertions(+), 37 deletions(-) (limited to 'fs') diff --git a/fs/namei.c b/fs/namei.c index b292eb03d9d2..b45a039216c7 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -2258,35 +2258,29 @@ struct file *do_file_open_root(struct dentry *dentry, struct vfsmount *mnt, return file; } -/** - * lookup_create - lookup a dentry, creating it if it doesn't exist - * @nd: nameidata info - * @is_dir: directory flag - * - * Simple function to lookup and return a dentry and create it - * if it doesn't exist. Is SMP-safe. - * - * Returns with nd->path.dentry->d_inode->i_mutex locked. - */ -struct dentry *lookup_create(struct nameidata *nd, int is_dir) +struct dentry *kern_path_create(int dfd, const char *pathname, struct path *path, int is_dir) { struct dentry *dentry = ERR_PTR(-EEXIST); + struct nameidata nd; + int error = do_path_lookup(dfd, pathname, LOOKUP_PARENT, &nd); + if (error) + return ERR_PTR(error); - mutex_lock_nested(&nd->path.dentry->d_inode->i_mutex, I_MUTEX_PARENT); /* * Yucky last component or no last component at all? * (foo/., foo/.., /////) */ - if (nd->last_type != LAST_NORM) - goto fail; - nd->flags &= ~LOOKUP_PARENT; - nd->flags |= LOOKUP_CREATE | LOOKUP_EXCL; - nd->intent.open.flags = O_EXCL; + if (nd.last_type != LAST_NORM) + goto out; + nd.flags &= ~LOOKUP_PARENT; + nd.flags |= LOOKUP_CREATE | LOOKUP_EXCL; + nd.intent.open.flags = O_EXCL; /* * Do the final lookup. */ - dentry = lookup_hash(nd); + mutex_lock_nested(&nd.path.dentry->d_inode->i_mutex, I_MUTEX_PARENT); + dentry = lookup_hash(&nd); if (IS_ERR(dentry)) goto fail; @@ -2298,34 +2292,22 @@ struct dentry *lookup_create(struct nameidata *nd, int is_dir) * all is fine. Let's be bastards - you had / on the end, you've * been asking for (non-existent) directory. -ENOENT for you. */ - if (unlikely(!is_dir && nd->last.name[nd->last.len])) { + if (unlikely(!is_dir && nd.last.name[nd.last.len])) { dput(dentry); dentry = ERR_PTR(-ENOENT); + goto fail; } + *path = nd.path; return dentry; eexist: dput(dentry); dentry = ERR_PTR(-EEXIST); fail: + mutex_unlock(&nd.path.dentry->d_inode->i_mutex); +out: + path_put(&nd.path); return dentry; } -EXPORT_SYMBOL_GPL(lookup_create); - -struct dentry *kern_path_create(int dfd, const char *pathname, struct path *path, int is_dir) -{ - struct nameidata nd; - struct dentry *res; - int error = do_path_lookup(dfd, pathname, LOOKUP_PARENT, &nd); - if (error) - return ERR_PTR(error); - res = lookup_create(&nd, is_dir); - if (IS_ERR(res)) { - mutex_unlock(&nd.path.dentry->d_inode->i_mutex); - path_put(&nd.path); - } - *path = nd.path; - return res; -} EXPORT_SYMBOL(kern_path_create); struct dentry *user_path_create(int dfd, const char __user *pathname, struct path *path, int is_dir) diff --git a/include/linux/dcache.h b/include/linux/dcache.h index 5fa5bd33b979..3f22d8d6d8a3 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -423,7 +423,6 @@ static inline bool d_need_lookup(struct dentry *dentry) } extern void d_clear_need_lookup(struct dentry *dentry); -extern struct dentry *lookup_create(struct nameidata *nd, int is_dir); extern int sysctl_vfs_cache_pressure; -- cgit v1.2.3 From e0a0124936171af6156b80fe8ac8799f039e767f Mon Sep 17 00:00:00 2001 From: Al Viro Date: Mon, 27 Jun 2011 17:00:37 -0400 Subject: switch vfs_path_lookup() to struct path Signed-off-by: Al Viro --- fs/namei.c | 16 +++++++++++----- fs/nfs/cache_lib.c | 9 ++++----- fs/nfs/super.c | 16 +++++----------- include/linux/namei.h | 2 +- net/sunrpc/clnt.c | 11 +++++------ 5 files changed, 26 insertions(+), 28 deletions(-) (limited to 'fs') diff --git a/fs/namei.c b/fs/namei.c index b45a039216c7..7e6ba8c80e77 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -1575,16 +1575,22 @@ int kern_path(const char *name, unsigned int flags, struct path *path) * @mnt: pointer to vfs mount of the base directory * @name: pointer to file name * @flags: lookup flags - * @nd: pointer to nameidata + * @path: pointer to struct path to fill */ int vfs_path_lookup(struct dentry *dentry, struct vfsmount *mnt, const char *name, unsigned int flags, - struct nameidata *nd) + struct path *path) { - nd->root.dentry = dentry; - nd->root.mnt = mnt; + struct nameidata nd; + int err; + nd.root.dentry = dentry; + nd.root.mnt = mnt; + BUG_ON(flags & LOOKUP_PARENT); /* the first argument of do_path_lookup() is ignored with LOOKUP_ROOT */ - return do_path_lookup(AT_FDCWD, name, flags | LOOKUP_ROOT, nd); + err = do_path_lookup(AT_FDCWD, name, flags | LOOKUP_ROOT, &nd); + if (!err) + *path = nd.path; + return err; } static struct dentry *__lookup_hash(struct qstr *name, diff --git a/fs/nfs/cache_lib.c b/fs/nfs/cache_lib.c index 84690319e625..c98b439332fc 100644 --- a/fs/nfs/cache_lib.c +++ b/fs/nfs/cache_lib.c @@ -113,19 +113,18 @@ int nfs_cache_wait_for_upcall(struct nfs_cache_defer_req *dreq) int nfs_cache_register(struct cache_detail *cd) { - struct nameidata nd; struct vfsmount *mnt; + struct path path; int ret; mnt = rpc_get_mount(); if (IS_ERR(mnt)) return PTR_ERR(mnt); - ret = vfs_path_lookup(mnt->mnt_root, mnt, "/cache", 0, &nd); + ret = vfs_path_lookup(mnt->mnt_root, mnt, "/cache", 0, &path); if (ret) goto err; - ret = sunrpc_cache_register_pipefs(nd.path.dentry, - cd->name, 0600, cd); - path_put(&nd.path); + ret = sunrpc_cache_register_pipefs(path.dentry, cd->name, 0600, cd); + path_put(&path); if (!ret) return ret; err: diff --git a/fs/nfs/super.c b/fs/nfs/super.c index ce40e5c568ba..b961ceac66b4 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c @@ -2773,16 +2773,12 @@ static void nfs_referral_loop_unprotect(void) static struct dentry *nfs_follow_remote_path(struct vfsmount *root_mnt, const char *export_path) { - struct nameidata *nd = NULL; struct mnt_namespace *ns_private; struct super_block *s; struct dentry *dentry; + struct path path; int ret; - nd = kmalloc(sizeof(*nd), GFP_KERNEL); - if (nd == NULL) - return ERR_PTR(-ENOMEM); - ns_private = create_mnt_ns(root_mnt); ret = PTR_ERR(ns_private); if (IS_ERR(ns_private)) @@ -2793,7 +2789,7 @@ static struct dentry *nfs_follow_remote_path(struct vfsmount *root_mnt, goto out_put_mnt_ns; ret = vfs_path_lookup(root_mnt->mnt_root, root_mnt, - export_path, LOOKUP_FOLLOW, nd); + export_path, LOOKUP_FOLLOW, &path); nfs_referral_loop_unprotect(); put_mnt_ns(ns_private); @@ -2801,12 +2797,11 @@ static struct dentry *nfs_follow_remote_path(struct vfsmount *root_mnt, if (ret != 0) goto out_err; - s = nd->path.mnt->mnt_sb; + s = path.mnt->mnt_sb; atomic_inc(&s->s_active); - dentry = dget(nd->path.dentry); + dentry = dget(path.dentry); - path_put(&nd->path); - kfree(nd); + path_put(&path); down_write(&s->s_umount); return dentry; out_put_mnt_ns: @@ -2814,7 +2809,6 @@ out_put_mnt_ns: out_mntput: mntput(root_mnt); out_err: - kfree(nd); return ERR_PTR(ret); } diff --git a/include/linux/namei.h b/include/linux/namei.h index b8cea804d31a..76fe2c62ae71 100644 --- a/include/linux/namei.h +++ b/include/linux/namei.h @@ -78,7 +78,7 @@ extern struct dentry *kern_path_create(int, const char *, struct path *, int); extern struct dentry *user_path_create(int, const char __user *, struct path *, int); extern int kern_path_parent(const char *, struct nameidata *); extern int vfs_path_lookup(struct dentry *, struct vfsmount *, - const char *, unsigned int, struct nameidata *); + const char *, unsigned int, struct path *); extern struct file *lookup_instantiate_filp(struct nameidata *nd, struct dentry *dentry, int (*open)(struct inode *, struct file *)); diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index 8c9141583d6f..304f403a0411 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -97,8 +97,7 @@ static int rpc_setup_pipedir(struct rpc_clnt *clnt, char *dir_name) { static uint32_t clntid; - struct nameidata nd; - struct path path; + struct path path, dir; char name[15]; struct qstr q = { .name = name, @@ -113,7 +112,7 @@ rpc_setup_pipedir(struct rpc_clnt *clnt, char *dir_name) path.mnt = rpc_get_mount(); if (IS_ERR(path.mnt)) return PTR_ERR(path.mnt); - error = vfs_path_lookup(path.mnt->mnt_root, path.mnt, dir_name, 0, &nd); + error = vfs_path_lookup(path.mnt->mnt_root, path.mnt, dir_name, 0, &dir); if (error) goto err; @@ -121,7 +120,7 @@ rpc_setup_pipedir(struct rpc_clnt *clnt, char *dir_name) q.len = snprintf(name, sizeof(name), "clnt%x", (unsigned int)clntid++); name[sizeof(name) - 1] = '\0'; q.hash = full_name_hash(q.name, q.len); - path.dentry = rpc_create_client_dir(nd.path.dentry, &q, clnt); + path.dentry = rpc_create_client_dir(dir.dentry, &q, clnt); if (!IS_ERR(path.dentry)) break; error = PTR_ERR(path.dentry); @@ -132,11 +131,11 @@ rpc_setup_pipedir(struct rpc_clnt *clnt, char *dir_name) goto err_path_put; } } - path_put(&nd.path); + path_put(&dir); clnt->cl_path = path; return 0; err_path_put: - path_put(&nd.path); + path_put(&dir); err: rpc_put_mount(); return error; -- cgit v1.2.3 From e3c3d9c838d48c0341c40ea45ee087e3d8c8ea39 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Mon, 27 Jun 2011 17:14:56 -0400 Subject: unexport kern_path_parent() Signed-off-by: Al Viro --- fs/namei.c | 1 - 1 file changed, 1 deletion(-) (limited to 'fs') diff --git a/fs/namei.c b/fs/namei.c index 7e6ba8c80e77..0d188d95f244 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -3269,7 +3269,6 @@ EXPORT_SYMBOL(page_readlink); EXPORT_SYMBOL(__page_symlink); EXPORT_SYMBOL(page_symlink); EXPORT_SYMBOL(page_symlink_inode_operations); -EXPORT_SYMBOL(kern_path_parent); EXPORT_SYMBOL(kern_path); EXPORT_SYMBOL(vfs_path_lookup); EXPORT_SYMBOL(inode_permission); -- cgit v1.2.3 From a4464dbc0ca6a3ab8e9d1206bc05059dae2a559d Mon Sep 17 00:00:00 2001 From: Al Viro Date: Thu, 7 Jul 2011 15:03:58 -0400 Subject: Make ->d_sb assign-once and always non-NULL New helper (non-exported, fs/internal.h-only): __d_alloc(sb, name). Allocates dentry, sets its ->d_sb to given superblock and sets ->d_op accordingly. Old d_alloc(NULL, name) callers are converted to that (all of them know what superblock they want). d_alloc() itself is left only for parent != NULl case; uses __d_alloc(), inserts result into the list of parent's children. Note that now ->d_sb is assign-once and never NULL *and* ->d_parent is never NULL either. Signed-off-by: Al Viro --- fs/dcache.c | 75 +++++++++++++++++++++++++++++++---------------------------- fs/internal.h | 5 ++++ fs/libfs.c | 6 ++--- 3 files changed, 47 insertions(+), 39 deletions(-) (limited to 'fs') diff --git a/fs/dcache.c b/fs/dcache.c index d3902139b533..c61edd0318c3 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -1275,8 +1275,8 @@ static struct shrinker dcache_shrinker = { }; /** - * d_alloc - allocate a dcache entry - * @parent: parent of entry to allocate + * __d_alloc - allocate a dcache entry + * @sb: filesystem it will belong to * @name: qstr of the name * * Allocates a dentry. It returns %NULL if there is insufficient memory @@ -1284,7 +1284,7 @@ static struct shrinker dcache_shrinker = { * copied and the copy passed in may be reused after this call. */ -struct dentry *d_alloc(struct dentry * parent, const struct qstr *name) +struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name) { struct dentry *dentry; char *dname; @@ -1314,8 +1314,8 @@ struct dentry *d_alloc(struct dentry * parent, const struct qstr *name) spin_lock_init(&dentry->d_lock); seqcount_init(&dentry->d_seq); dentry->d_inode = NULL; - dentry->d_parent = NULL; - dentry->d_sb = NULL; + dentry->d_parent = dentry; + dentry->d_sb = sb; dentry->d_op = NULL; dentry->d_fsdata = NULL; INIT_HLIST_BL_NODE(&dentry->d_hash); @@ -1323,36 +1323,47 @@ struct dentry *d_alloc(struct dentry * parent, const struct qstr *name) INIT_LIST_HEAD(&dentry->d_subdirs); INIT_LIST_HEAD(&dentry->d_alias); INIT_LIST_HEAD(&dentry->d_u.d_child); - - if (parent) { - spin_lock(&parent->d_lock); - /* - * don't need child lock because it is not subject - * to concurrency here - */ - __dget_dlock(parent); - dentry->d_parent = parent; - dentry->d_sb = parent->d_sb; - d_set_d_op(dentry, dentry->d_sb->s_d_op); - list_add(&dentry->d_u.d_child, &parent->d_subdirs); - spin_unlock(&parent->d_lock); - } + d_set_d_op(dentry, dentry->d_sb->s_d_op); this_cpu_inc(nr_dentry); return dentry; } + +/** + * d_alloc - allocate a dcache entry + * @parent: parent of entry to allocate + * @name: qstr of the name + * + * Allocates a dentry. It returns %NULL if there is insufficient memory + * available. On a success the dentry is returned. The name passed in is + * copied and the copy passed in may be reused after this call. + */ +struct dentry *d_alloc(struct dentry * parent, const struct qstr *name) +{ + struct dentry *dentry = __d_alloc(parent->d_sb, name); + if (!dentry) + return NULL; + + spin_lock(&parent->d_lock); + /* + * don't need child lock because it is not subject + * to concurrency here + */ + __dget_dlock(parent); + dentry->d_parent = parent; + list_add(&dentry->d_u.d_child, &parent->d_subdirs); + spin_unlock(&parent->d_lock); + + return dentry; +} EXPORT_SYMBOL(d_alloc); struct dentry *d_alloc_pseudo(struct super_block *sb, const struct qstr *name) { - struct dentry *dentry = d_alloc(NULL, name); - if (dentry) { - dentry->d_sb = sb; - d_set_d_op(dentry, dentry->d_sb->s_d_op); - dentry->d_parent = dentry; + struct dentry *dentry = __d_alloc(sb, name); + if (dentry) dentry->d_flags |= DCACHE_DISCONNECTED; - } return dentry; } EXPORT_SYMBOL(d_alloc_pseudo); @@ -1522,13 +1533,9 @@ struct dentry * d_alloc_root(struct inode * root_inode) if (root_inode) { static const struct qstr name = { .name = "/", .len = 1 }; - res = d_alloc(NULL, &name); - if (res) { - res->d_sb = root_inode->i_sb; - d_set_d_op(res, res->d_sb->s_d_op); - res->d_parent = res; + res = __d_alloc(root_inode->i_sb, &name); + if (res) d_instantiate(res, root_inode); - } } return res; } @@ -1589,13 +1596,11 @@ struct dentry *d_obtain_alias(struct inode *inode) if (res) goto out_iput; - tmp = d_alloc(NULL, &anonstring); + tmp = __d_alloc(inode->i_sb, &anonstring); if (!tmp) { res = ERR_PTR(-ENOMEM); goto out_iput; } - tmp->d_parent = tmp; /* make sure dput doesn't croak */ - spin_lock(&inode->i_lock); res = __d_find_any_alias(inode); @@ -1607,8 +1612,6 @@ struct dentry *d_obtain_alias(struct inode *inode) /* attach a disconnected dentry */ spin_lock(&tmp->d_lock); - tmp->d_sb = inode->i_sb; - d_set_d_op(tmp, tmp->d_sb->s_d_op); tmp->d_inode = inode; tmp->d_flags |= DCACHE_DISCONNECTED; list_add(&tmp->d_alias, &inode->i_dentry); diff --git a/fs/internal.h b/fs/internal.h index b29c46e4e32f..ae47c48bedde 100644 --- a/fs/internal.h +++ b/fs/internal.h @@ -135,3 +135,8 @@ extern void inode_wb_list_del(struct inode *inode); extern int get_nr_dirty_inodes(void); extern void evict_inodes(struct super_block *); extern int invalidate_inodes(struct super_block *, bool); + +/* + * dcache.c + */ +extern struct dentry *__d_alloc(struct super_block *, const struct qstr *); diff --git a/fs/libfs.c b/fs/libfs.c index 275ca4749a2e..bd50b11f92da 100644 --- a/fs/libfs.c +++ b/fs/libfs.c @@ -16,6 +16,8 @@ #include +#include "internal.h" + static inline int simple_positive(struct dentry *dentry) { return dentry->d_inode && !d_unhashed(dentry); @@ -246,13 +248,11 @@ struct dentry *mount_pseudo(struct file_system_type *fs_type, char *name, root->i_ino = 1; root->i_mode = S_IFDIR | S_IRUSR | S_IWUSR; root->i_atime = root->i_mtime = root->i_ctime = CURRENT_TIME; - dentry = d_alloc(NULL, &d_name); + dentry = __d_alloc(s, &d_name); if (!dentry) { iput(root); goto Enomem; } - dentry->d_sb = s; - dentry->d_parent = dentry; d_instantiate(dentry, root); s->s_root = dentry; s->s_d_op = dops; -- cgit v1.2.3 From fb408e6ccc32404a05783911b6f3fed56bd17b06 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Thu, 7 Jul 2011 15:12:51 -0400 Subject: get rid of pointless checks for dentry->sb == NULL it never is... Signed-off-by: Al Viro --- fs/cachefiles/bind.c | 1 - security/tomoyo/realpath.c | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c index a2603e7c0bb5..e10c4415e8c3 100644 --- a/fs/cachefiles/bind.c +++ b/fs/cachefiles/bind.c @@ -129,7 +129,6 @@ static int cachefiles_daemon_add_cache(struct cachefiles_cache *cache) !root->d_inode->i_op->mkdir || !root->d_inode->i_op->setxattr || !root->d_inode->i_op->getxattr || - !root->d_sb || !root->d_sb->s_op || !root->d_sb->s_op->statfs || !root->d_sb->s_op->sync_fs) diff --git a/security/tomoyo/realpath.c b/security/tomoyo/realpath.c index d1e05b047715..8d95e91c9fc4 100644 --- a/security/tomoyo/realpath.c +++ b/security/tomoyo/realpath.c @@ -103,7 +103,7 @@ char *tomoyo_realpath_from_path(struct path *path) if (!buf) break; /* Get better name for socket. */ - if (dentry->d_sb && dentry->d_sb->s_magic == SOCKFS_MAGIC) { + if (dentry->d_sb->s_magic == SOCKFS_MAGIC) { struct inode *inode = dentry->d_inode; struct socket *sock = inode ? SOCKET_I(inode) : NULL; struct sock *sk = sock ? sock->sk : NULL; -- cgit v1.2.3 From 0ee5dc676a5f8fadede608c7281dfedb1ae714ea Mon Sep 17 00:00:00 2001 From: Al Viro Date: Thu, 7 Jul 2011 15:44:25 -0400 Subject: btrfs: kill magical embedded struct superblock Signed-off-by: Al Viro --- fs/btrfs/ctree.h | 2 +- fs/btrfs/disk-io.c | 15 ++++----------- fs/btrfs/inode.c | 2 +- fs/super.c | 32 +++++++++++++++++++++++--------- include/linux/fs.h | 2 ++ 5 files changed, 31 insertions(+), 22 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 9864cec801ed..9552afc24ef7 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -1219,7 +1219,7 @@ struct btrfs_root { * right now this just gets used so that a root has its own devid * for stat. It may be used for more later */ - struct super_block anon_super; + dev_t anon_dev; }; struct btrfs_ioctl_defrag_range_args { diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 1ac8db5dc0a3..b231ae13b269 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1077,12 +1077,7 @@ static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize, init_completion(&root->kobj_unregister); root->defrag_running = 0; root->root_key.objectid = objectid; - root->anon_super.s_root = NULL; - root->anon_super.s_dev = 0; - INIT_LIST_HEAD(&root->anon_super.s_list); - INIT_LIST_HEAD(&root->anon_super.s_instances); - init_rwsem(&root->anon_super.s_umount); - + root->anon_dev = 0; return 0; } @@ -1311,7 +1306,7 @@ again: spin_lock_init(&root->cache_lock); init_waitqueue_head(&root->cache_wait); - ret = set_anon_super(&root->anon_super, NULL); + ret = get_anon_bdev(&root->anon_dev); if (ret) goto fail; @@ -2393,10 +2388,8 @@ static void free_fs_root(struct btrfs_root *root) { iput(root->cache_inode); WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree)); - if (root->anon_super.s_dev) { - down_write(&root->anon_super.s_umount); - kill_anon_super(&root->anon_super); - } + if (root->anon_dev) + free_anon_bdev(root->anon_dev); free_extent_buffer(root->node); free_extent_buffer(root->commit_root); kfree(root->free_ino_ctl); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index cb170ca51c64..ecf0fac712d6 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -6900,7 +6900,7 @@ static int btrfs_getattr(struct vfsmount *mnt, { struct inode *inode = dentry->d_inode; generic_fillattr(inode, stat); - stat->dev = BTRFS_I(inode)->root->anon_super.s_dev; + stat->dev = BTRFS_I(inode)->root->anon_dev; stat->blksize = PAGE_CACHE_SIZE; stat->blocks = (inode_get_bytes(inode) + BTRFS_I(inode)->delalloc_bytes) >> 9; diff --git a/fs/super.c b/fs/super.c index 444da9579068..263edeb9f0e9 100644 --- a/fs/super.c +++ b/fs/super.c @@ -693,7 +693,7 @@ static DEFINE_IDA(unnamed_dev_ida); static DEFINE_SPINLOCK(unnamed_dev_lock);/* protects the above */ static int unnamed_dev_start = 0; /* don't bother trying below it */ -int set_anon_super(struct super_block *s, void *data) +int get_anon_bdev(dev_t *p) { int dev; int error; @@ -720,24 +720,38 @@ int set_anon_super(struct super_block *s, void *data) spin_unlock(&unnamed_dev_lock); return -EMFILE; } - s->s_dev = MKDEV(0, dev & MINORMASK); - s->s_bdi = &noop_backing_dev_info; + *p = MKDEV(0, dev & MINORMASK); return 0; } +EXPORT_SYMBOL(get_anon_bdev); -EXPORT_SYMBOL(set_anon_super); - -void kill_anon_super(struct super_block *sb) +void free_anon_bdev(dev_t dev) { - int slot = MINOR(sb->s_dev); - - generic_shutdown_super(sb); + int slot = MINOR(dev); spin_lock(&unnamed_dev_lock); ida_remove(&unnamed_dev_ida, slot); if (slot < unnamed_dev_start) unnamed_dev_start = slot; spin_unlock(&unnamed_dev_lock); } +EXPORT_SYMBOL(free_anon_bdev); + +int set_anon_super(struct super_block *s, void *data) +{ + int error = get_anon_bdev(&s->s_dev); + if (!error) + s->s_bdi = &noop_backing_dev_info; + return error; +} + +EXPORT_SYMBOL(set_anon_super); + +void kill_anon_super(struct super_block *sb) +{ + dev_t dev = sb->s_dev; + generic_shutdown_super(sb); + free_anon_bdev(dev); +} EXPORT_SYMBOL(kill_anon_super); diff --git a/include/linux/fs.h b/include/linux/fs.h index 8494aac189f0..a0011aef4338 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1835,6 +1835,8 @@ void kill_litter_super(struct super_block *sb); void deactivate_super(struct super_block *sb); void deactivate_locked_super(struct super_block *sb); int set_anon_super(struct super_block *s, void *data); +int get_anon_bdev(dev_t *); +void free_anon_bdev(dev_t); struct super_block *sget(struct file_system_type *type, int (*test)(struct super_block *,void *), int (*set)(struct super_block *,void *), -- cgit v1.2.3 From e7f59097071f2e193e900093742a4be85839f3d9 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Thu, 7 Jul 2011 15:45:59 -0400 Subject: kill useless checks for sb->s_op == NULL never is... Signed-off-by: Al Viro --- drivers/block/pktcdvd.c | 2 +- fs/cachefiles/bind.c | 1 - fs/inode.c | 2 +- 3 files changed, 2 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index 07a382eaf0a8..e133f094ab08 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c @@ -1206,7 +1206,7 @@ static int pkt_start_recovery(struct packet_data *pkt) if (!sb) return 0; - if (!sb->s_op || !sb->s_op->relocate_blocks) + if (!sb->s_op->relocate_blocks) goto out; old_block = pkt->sector / (CD_FRAMESIZE >> 9); diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c index e10c4415e8c3..622f4696e484 100644 --- a/fs/cachefiles/bind.c +++ b/fs/cachefiles/bind.c @@ -129,7 +129,6 @@ static int cachefiles_daemon_add_cache(struct cachefiles_cache *cache) !root->d_inode->i_op->mkdir || !root->d_inode->i_op->setxattr || !root->d_inode->i_op->getxattr || - !root->d_sb->s_op || !root->d_sb->s_op->statfs || !root->d_sb->s_op->sync_fs) goto error_unsupported; diff --git a/fs/inode.c b/fs/inode.c index 43566d17d1b8..cbdcab88105f 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -1331,7 +1331,7 @@ static void iput_final(struct inode *inode) WARN_ON(inode->i_state & I_NEW); - if (op && op->drop_inode) + if (op->drop_inode) drop = op->drop_inode(inode); else drop = generic_drop_inode(inode); -- cgit v1.2.3 From 5b4b299cc7b6adfb9401bf7f826a80f190b971be Mon Sep 17 00:00:00 2001 From: Al Viro Date: Thu, 7 Jul 2011 18:43:21 -0400 Subject: nfsd4_list_rec_dir(): don't bother with reopening rec_file just rewind it to the beginning before vfs_readdir() and be done with that... Signed-off-by: Al Viro --- fs/nfsd/nfs4recover.c | 52 +++++++++++++++++++++------------------------------ 1 file changed, 21 insertions(+), 31 deletions(-) (limited to 'fs') diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c index ffb59ef6f82f..29d77f60585b 100644 --- a/fs/nfsd/nfs4recover.c +++ b/fs/nfsd/nfs4recover.c @@ -191,52 +191,42 @@ nfsd4_build_namelist(void *arg, const char *name, int namlen, } static int -nfsd4_list_rec_dir(struct dentry *dir, recdir_func *f) +nfsd4_list_rec_dir(recdir_func *f) { const struct cred *original_cred; - struct file *filp; + struct dentry *dir = rec_file->f_path.dentry; LIST_HEAD(names); - struct name_list *entry; - struct dentry *dentry; int status; - if (!rec_file) - return 0; - status = nfs4_save_creds(&original_cred); if (status < 0) return status; - filp = dentry_open(dget(dir), mntget(rec_file->f_path.mnt), O_RDONLY, - current_cred()); - status = PTR_ERR(filp); - if (IS_ERR(filp)) - goto out; - status = vfs_readdir(filp, nfsd4_build_namelist, &names); - fput(filp); + status = vfs_llseek(rec_file, 0, SEEK_SET); + if (status < 0) { + nfs4_reset_creds(original_cred); + return status; + } + + status = vfs_readdir(rec_file, nfsd4_build_namelist, &names); mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_PARENT); while (!list_empty(&names)) { + struct name_list *entry; entry = list_entry(names.next, struct name_list, list); - - dentry = lookup_one_len(entry->name, dir, HEXDIR_LEN-1); - if (IS_ERR(dentry)) { - status = PTR_ERR(dentry); - break; + if (!status) { + struct dentry *dentry; + dentry = lookup_one_len(entry->name, dir, HEXDIR_LEN-1); + if (IS_ERR(dentry)) { + status = PTR_ERR(dentry); + break; + } + status = f(dir, dentry); + dput(dentry); } - status = f(dir, dentry); - dput(dentry); - if (status) - break; list_del(&entry->list); kfree(entry); } mutex_unlock(&dir->d_inode->i_mutex); -out: - while (!list_empty(&names)) { - entry = list_entry(names.next, struct name_list, list); - list_del(&entry->list); - kfree(entry); - } nfs4_reset_creds(original_cred); return status; } @@ -322,7 +312,7 @@ nfsd4_recdir_purge_old(void) { status = mnt_want_write(rec_file->f_path.mnt); if (status) goto out; - status = nfsd4_list_rec_dir(rec_file->f_path.dentry, purge_old); + status = nfsd4_list_rec_dir(purge_old); if (status == 0) vfs_fsync(rec_file, 0); mnt_drop_write(rec_file->f_path.mnt); @@ -352,7 +342,7 @@ nfsd4_recdir_load(void) { if (!rec_file) return 0; - status = nfsd4_list_rec_dir(rec_file->f_path.dentry, load_recdir); + status = nfsd4_list_rec_dir(load_recdir); if (status) printk("nfsd4: failed loading clients from recovery" " directory %s\n", rec_file->f_path.dentry->d_name.name); -- cgit v1.2.3 From 0c1aa9a952c3608eb17bf990466f1491d1ee8b6c Mon Sep 17 00:00:00 2001 From: Al Viro Date: Fri, 8 Jul 2011 20:57:47 -0400 Subject: deuglify squashfs_lookup() d_splice_alias(NULL, dentry) is equivalent to d_add(dentry, NULL), NULL so no need for that if (inode) ... in there (or ERR_PTR(0), for that matter) Signed-off-by: Al Viro --- fs/squashfs/namei.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/squashfs/namei.c b/fs/squashfs/namei.c index 4bc63ac64bc0..51b36958492b 100644 --- a/fs/squashfs/namei.c +++ b/fs/squashfs/namei.c @@ -232,10 +232,7 @@ static struct dentry *squashfs_lookup(struct inode *dir, struct dentry *dentry, exit_lookup: kfree(dire); - if (inode) - return d_splice_alias(inode, dentry); - d_add(dentry, inode); - return ERR_PTR(0); + return d_splice_alias(inode, dentry); data_error: err = -EIO; -- cgit v1.2.3 From a9049376ee05bf966bfe2b081b5071326856890a Mon Sep 17 00:00:00 2001 From: Al Viro Date: Fri, 8 Jul 2011 21:20:11 -0400 Subject: make d_splice_alias(ERR_PTR(err), dentry) = ERR_PTR(err) ... and simplify the living hell out of callers Signed-off-by: Al Viro --- fs/btrfs/inode.c | 8 +------- fs/dcache.c | 3 +++ fs/efs/namei.c | 7 ++----- fs/exofs/namei.c | 7 +------ fs/ext2/namei.c | 14 +++++--------- fs/ext3/namei.c | 14 +++++--------- fs/ext4/namei.c | 14 +++++--------- fs/fat/namei_msdos.c | 29 ++++++++++------------------- fs/isofs/namei.c | 11 +++-------- fs/jffs2/dir.c | 4 +--- fs/jfs/namei.c | 4 +--- fs/logfs/dir.c | 4 +--- fs/nilfs2/namei.c | 7 +------ fs/squashfs/namei.c | 5 ----- fs/ufs/namei.c | 2 -- 15 files changed, 39 insertions(+), 94 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index ecf0fac712d6..bcb20a9a3b93 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -4079,13 +4079,7 @@ static int btrfs_dentry_delete(const struct dentry *dentry) static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) { - struct inode *inode; - - inode = btrfs_lookup_dentry(dir, dentry); - if (IS_ERR(inode)) - return ERR_CAST(inode); - - return d_splice_alias(inode, dentry); + return d_splice_alias(btrfs_lookup_dentry(dir, dentry), dentry); } unsigned char btrfs_filetype_table[] = { diff --git a/fs/dcache.c b/fs/dcache.c index c61edd0318c3..41e2085d430b 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -1652,6 +1652,9 @@ struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry) { struct dentry *new = NULL; + if (IS_ERR(inode)) + return ERR_CAST(inode); + if (inode && S_ISDIR(inode->i_mode)) { spin_lock(&inode->i_lock); new = __d_find_alias(inode, 1); diff --git a/fs/efs/namei.c b/fs/efs/namei.c index 1511bf9e5f80..832b10ded82f 100644 --- a/fs/efs/namei.c +++ b/fs/efs/namei.c @@ -60,14 +60,11 @@ static efs_ino_t efs_find_entry(struct inode *inode, const char *name, int len) struct dentry *efs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) { efs_ino_t inodenum; - struct inode * inode = NULL; + struct inode *inode = NULL; inodenum = efs_find_entry(dir, dentry->d_name.name, dentry->d_name.len); - if (inodenum) { + if (inodenum) inode = efs_iget(dir->i_sb, inodenum); - if (IS_ERR(inode)) - return ERR_CAST(inode); - } return d_splice_alias(inode, dentry); } diff --git a/fs/exofs/namei.c b/fs/exofs/namei.c index 4d70db110cfc..b54c43775f17 100644 --- a/fs/exofs/namei.c +++ b/fs/exofs/namei.c @@ -55,12 +55,7 @@ static struct dentry *exofs_lookup(struct inode *dir, struct dentry *dentry, return ERR_PTR(-ENAMETOOLONG); ino = exofs_inode_by_name(dir, dentry); - inode = NULL; - if (ino) { - inode = exofs_iget(dir->i_sb, ino); - if (IS_ERR(inode)) - return ERR_CAST(inode); - } + inode = ino ? exofs_iget(dir->i_sb, ino) : NULL; return d_splice_alias(inode, dentry); } diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c index ed5c5d496ee9..d60b7099e2db 100644 --- a/fs/ext2/namei.c +++ b/fs/ext2/namei.c @@ -67,15 +67,11 @@ static struct dentry *ext2_lookup(struct inode * dir, struct dentry *dentry, str inode = NULL; if (ino) { inode = ext2_iget(dir->i_sb, ino); - if (IS_ERR(inode)) { - if (PTR_ERR(inode) == -ESTALE) { - ext2_error(dir->i_sb, __func__, - "deleted inode referenced: %lu", - (unsigned long) ino); - return ERR_PTR(-EIO); - } else { - return ERR_CAST(inode); - } + if (inode == ERR_PTR(-ESTALE)) { + ext2_error(dir->i_sb, __func__, + "deleted inode referenced: %lu", + (unsigned long) ino); + return ERR_PTR(-EIO); } } return d_splice_alias(inode, dentry); diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c index 34b6d9bfc48a..c095cf5640c7 100644 --- a/fs/ext3/namei.c +++ b/fs/ext3/namei.c @@ -1038,15 +1038,11 @@ static struct dentry *ext3_lookup(struct inode * dir, struct dentry *dentry, str return ERR_PTR(-EIO); } inode = ext3_iget(dir->i_sb, ino); - if (IS_ERR(inode)) { - if (PTR_ERR(inode) == -ESTALE) { - ext3_error(dir->i_sb, __func__, - "deleted inode referenced: %lu", - ino); - return ERR_PTR(-EIO); - } else { - return ERR_CAST(inode); - } + if (inode == ERR_PTR(-ESTALE)) { + ext3_error(dir->i_sb, __func__, + "deleted inode referenced: %lu", + ino); + return ERR_PTR(-EIO); } } return d_splice_alias(inode, dentry); diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index b754b7721f51..707d605bf769 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c @@ -1037,15 +1037,11 @@ static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, stru return ERR_PTR(-EIO); } inode = ext4_iget(dir->i_sb, ino); - if (IS_ERR(inode)) { - if (PTR_ERR(inode) == -ESTALE) { - EXT4_ERROR_INODE(dir, - "deleted inode referenced: %u", - ino); - return ERR_PTR(-EIO); - } else { - return ERR_CAST(inode); - } + if (inode == ERR_PTR(-ESTALE)) { + EXT4_ERROR_INODE(dir, + "deleted inode referenced: %u", + ino); + return ERR_PTR(-EIO); } } return d_splice_alias(inode, dentry); diff --git a/fs/fat/namei_msdos.c b/fs/fat/namei_msdos.c index 3b222dafd15b..66e83b845455 100644 --- a/fs/fat/namei_msdos.c +++ b/fs/fat/namei_msdos.c @@ -209,29 +209,20 @@ static struct dentry *msdos_lookup(struct inode *dir, struct dentry *dentry, int err; lock_super(sb); - err = msdos_find(dir, dentry->d_name.name, dentry->d_name.len, &sinfo); - if (err) { - if (err == -ENOENT) { - inode = NULL; - goto out; - } - goto error; - } - - inode = fat_build_inode(sb, sinfo.de, sinfo.i_pos); - brelse(sinfo.bh); - if (IS_ERR(inode)) { - err = PTR_ERR(inode); - goto error; + switch (err) { + case -ENOENT: + inode = NULL; + break; + case 0: + inode = fat_build_inode(sb, sinfo.de, sinfo.i_pos); + brelse(sinfo.bh); + break; + default: + inode = ERR_PTR(err); } -out: unlock_super(sb); return d_splice_alias(inode, dentry); - -error: - unlock_super(sb); - return ERR_PTR(err); } /***** Creates a directory entry (name is already formatted). */ diff --git a/fs/isofs/namei.c b/fs/isofs/namei.c index 4fb3e8074fd4..68fa503d877e 100644 --- a/fs/isofs/namei.c +++ b/fs/isofs/namei.c @@ -183,14 +183,9 @@ struct dentry *isofs_lookup(struct inode *dir, struct dentry *dentry, struct nam 1024 + page_address(page)); __free_page(page); - inode = NULL; - if (found) { - inode = isofs_iget(dir->i_sb, block, offset); - if (IS_ERR(inode)) { - mutex_unlock(&sbi->s_mutex); - return ERR_CAST(inode); - } - } + inode = found ? isofs_iget(dir->i_sb, block, offset) : NULL; + mutex_unlock(&sbi->s_mutex); + return d_splice_alias(inode, dentry); } diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c index 4bca6a2e5c07..8f40ce4f1777 100644 --- a/fs/jffs2/dir.c +++ b/fs/jffs2/dir.c @@ -102,10 +102,8 @@ static struct dentry *jffs2_lookup(struct inode *dir_i, struct dentry *target, mutex_unlock(&dir_f->sem); if (ino) { inode = jffs2_iget(dir_i->i_sb, ino); - if (IS_ERR(inode)) { + if (IS_ERR(inode)) printk(KERN_WARNING "iget() failed for ino #%u\n", ino); - return ERR_CAST(inode); - } } return d_splice_alias(inode, target); diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c index 1da0dc799286..247331551992 100644 --- a/fs/jfs/namei.c +++ b/fs/jfs/namei.c @@ -1481,10 +1481,8 @@ static struct dentry *jfs_lookup(struct inode *dip, struct dentry *dentry, struc } ip = jfs_iget(dip->i_sb, inum); - if (IS_ERR(ip)) { + if (IS_ERR(ip)) jfs_err("jfs_lookup: iget failed on inum %d", (uint) inum); - return ERR_CAST(ip); - } return d_splice_alias(ip, dentry); } diff --git a/fs/logfs/dir.c b/fs/logfs/dir.c index 1afae26cf236..b3ff3d894165 100644 --- a/fs/logfs/dir.c +++ b/fs/logfs/dir.c @@ -371,11 +371,9 @@ static struct dentry *logfs_lookup(struct inode *dir, struct dentry *dentry, page_cache_release(page); inode = logfs_iget(dir->i_sb, ino); - if (IS_ERR(inode)) { + if (IS_ERR(inode)) printk(KERN_ERR"LogFS: Cannot read inode #%llx for dentry (%lx, %lx)n", ino, dir->i_ino, index); - return ERR_CAST(inode); - } return d_splice_alias(inode, dentry); } diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c index 546849b3e88f..a3141990061e 100644 --- a/fs/nilfs2/namei.c +++ b/fs/nilfs2/namei.c @@ -72,12 +72,7 @@ nilfs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) return ERR_PTR(-ENAMETOOLONG); ino = nilfs_inode_by_name(dir, &dentry->d_name); - inode = NULL; - if (ino) { - inode = nilfs_iget(dir->i_sb, NILFS_I(dir)->i_root, ino); - if (IS_ERR(inode)) - return ERR_CAST(inode); - } + inode = ino ? nilfs_iget(dir->i_sb, NILFS_I(dir)->i_root, ino) : NULL; return d_splice_alias(inode, dentry); } diff --git a/fs/squashfs/namei.c b/fs/squashfs/namei.c index 51b36958492b..0682b38d7e31 100644 --- a/fs/squashfs/namei.c +++ b/fs/squashfs/namei.c @@ -220,11 +220,6 @@ static struct dentry *squashfs_lookup(struct inode *dir, struct dentry *dentry, blk, off, ino_num); inode = squashfs_iget(dir->i_sb, ino, ino_num); - if (IS_ERR(inode)) { - err = PTR_ERR(inode); - goto failed; - } - goto exit_lookup; } } diff --git a/fs/ufs/namei.c b/fs/ufs/namei.c index b57aab9a1184..639d49162241 100644 --- a/fs/ufs/namei.c +++ b/fs/ufs/namei.c @@ -59,8 +59,6 @@ static struct dentry *ufs_lookup(struct inode * dir, struct dentry *dentry, stru if (ino) inode = ufs_iget(dir->i_sb, ino); unlock_ufs(dir->i_sb); - if (IS_ERR(inode)) - return ERR_CAST(inode); return d_splice_alias(inode, dentry); } -- cgit v1.2.3 From fcb94f72d3e0f4f34b326c2986da8e5996daf72c Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Fri, 8 Jul 2011 14:14:38 +1000 Subject: inode: convert inode_stat.nr_unused to per-cpu counters Before we split up the inode_lru_lock, the unused inode counter needs to be made independent of the global inode_lru_lock. Convert it to per-cpu counters to do this. Signed-off-by: Dave Chinner Signed-off-by: Al Viro --- fs/inode.c | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/inode.c b/fs/inode.c index cbdcab88105f..9a0361121712 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -95,6 +95,7 @@ EXPORT_SYMBOL(empty_aops); struct inodes_stat_t inodes_stat; static DEFINE_PER_CPU(unsigned int, nr_inodes); +static DEFINE_PER_CPU(unsigned int, nr_unused); static struct kmem_cache *inode_cachep __read_mostly; @@ -109,7 +110,11 @@ static int get_nr_inodes(void) static inline int get_nr_inodes_unused(void) { - return inodes_stat.nr_unused; + int i; + int sum = 0; + for_each_possible_cpu(i) + sum += per_cpu(nr_unused, i); + return sum < 0 ? 0 : sum; } int get_nr_dirty_inodes(void) @@ -127,6 +132,7 @@ int proc_nr_inodes(ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { inodes_stat.nr_inodes = get_nr_inodes(); + inodes_stat.nr_unused = get_nr_inodes_unused(); return proc_dointvec(table, write, buffer, lenp, ppos); } #endif @@ -340,7 +346,7 @@ static void inode_lru_list_add(struct inode *inode) spin_lock(&inode_lru_lock); if (list_empty(&inode->i_lru)) { list_add(&inode->i_lru, &inode_lru); - inodes_stat.nr_unused++; + this_cpu_inc(nr_unused); } spin_unlock(&inode_lru_lock); } @@ -350,7 +356,7 @@ static void inode_lru_list_del(struct inode *inode) spin_lock(&inode_lru_lock); if (!list_empty(&inode->i_lru)) { list_del_init(&inode->i_lru); - inodes_stat.nr_unused--; + this_cpu_dec(nr_unused); } spin_unlock(&inode_lru_lock); } @@ -656,7 +662,7 @@ static void prune_icache(int nr_to_scan) (inode->i_state & ~I_REFERENCED)) { list_del_init(&inode->i_lru); spin_unlock(&inode->i_lock); - inodes_stat.nr_unused--; + this_cpu_dec(nr_unused); continue; } @@ -693,7 +699,7 @@ static void prune_icache(int nr_to_scan) spin_unlock(&inode->i_lock); list_move(&inode->i_lru, &freeable); - inodes_stat.nr_unused--; + this_cpu_dec(nr_unused); } if (current_is_kswapd()) __count_vm_events(KSWAPD_INODESTEAL, reap); -- cgit v1.2.3 From 98b745c647a5a90c3c21ea43cbfad9a47b0dfad7 Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Fri, 8 Jul 2011 14:14:39 +1000 Subject: inode: Make unused inode LRU per superblock The inode unused list is currently a global LRU. This does not match the other global filesystem cache - the dentry cache - which uses per-superblock LRU lists. Hence we have related filesystem object types using different LRU reclaimation schemes. To enable a per-superblock filesystem cache shrinker, both of these caches need to have per-sb unused object LRU lists. Hence this patch converts the global inode LRU to per-sb LRUs. The patch only does rudimentary per-sb propotioning in the shrinker infrastructure, as this gets removed when the per-sb shrinker callouts are introduced later on. Signed-off-by: Dave Chinner Signed-off-by: Al Viro --- fs/inode.c | 91 +++++++++++++++++++++++++++++++++++++++++++++++------- fs/super.c | 1 + include/linux/fs.h | 4 +++ 3 files changed, 85 insertions(+), 11 deletions(-) (limited to 'fs') diff --git a/fs/inode.c b/fs/inode.c index 9a0361121712..8c3491302e0c 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -34,7 +34,7 @@ * inode->i_lock protects: * inode->i_state, inode->i_hash, __iget() * inode_lru_lock protects: - * inode_lru, inode->i_lru + * inode->i_sb->s_inode_lru, inode->i_lru * inode_sb_list_lock protects: * sb->s_inodes, inode->i_sb_list * inode_wb_list_lock protects: @@ -64,7 +64,6 @@ static unsigned int i_hash_shift __read_mostly; static struct hlist_head *inode_hashtable __read_mostly; static __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_hash_lock); -static LIST_HEAD(inode_lru); static DEFINE_SPINLOCK(inode_lru_lock); __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_sb_list_lock); @@ -345,7 +344,8 @@ static void inode_lru_list_add(struct inode *inode) { spin_lock(&inode_lru_lock); if (list_empty(&inode->i_lru)) { - list_add(&inode->i_lru, &inode_lru); + list_add(&inode->i_lru, &inode->i_sb->s_inode_lru); + inode->i_sb->s_nr_inodes_unused++; this_cpu_inc(nr_unused); } spin_unlock(&inode_lru_lock); @@ -356,6 +356,7 @@ static void inode_lru_list_del(struct inode *inode) spin_lock(&inode_lru_lock); if (!list_empty(&inode->i_lru)) { list_del_init(&inode->i_lru); + inode->i_sb->s_nr_inodes_unused--; this_cpu_dec(nr_unused); } spin_unlock(&inode_lru_lock); @@ -628,21 +629,20 @@ static int can_unuse(struct inode *inode) * LRU does not have strict ordering. Hence we don't want to reclaim inodes * with this flag set because they are the inodes that are out of order. */ -static void prune_icache(int nr_to_scan) +static void shrink_icache_sb(struct super_block *sb, int *nr_to_scan) { LIST_HEAD(freeable); int nr_scanned; unsigned long reap = 0; - down_read(&iprune_sem); spin_lock(&inode_lru_lock); - for (nr_scanned = 0; nr_scanned < nr_to_scan; nr_scanned++) { + for (nr_scanned = *nr_to_scan; nr_scanned >= 0; nr_scanned--) { struct inode *inode; - if (list_empty(&inode_lru)) + if (list_empty(&sb->s_inode_lru)) break; - inode = list_entry(inode_lru.prev, struct inode, i_lru); + inode = list_entry(sb->s_inode_lru.prev, struct inode, i_lru); /* * we are inverting the inode_lru_lock/inode->i_lock here, @@ -650,7 +650,7 @@ static void prune_icache(int nr_to_scan) * inode to the back of the list so we don't spin on it. */ if (!spin_trylock(&inode->i_lock)) { - list_move(&inode->i_lru, &inode_lru); + list_move(&inode->i_lru, &sb->s_inode_lru); continue; } @@ -662,6 +662,7 @@ static void prune_icache(int nr_to_scan) (inode->i_state & ~I_REFERENCED)) { list_del_init(&inode->i_lru); spin_unlock(&inode->i_lock); + sb->s_nr_inodes_unused--; this_cpu_dec(nr_unused); continue; } @@ -669,7 +670,7 @@ static void prune_icache(int nr_to_scan) /* recently referenced inodes get one more pass */ if (inode->i_state & I_REFERENCED) { inode->i_state &= ~I_REFERENCED; - list_move(&inode->i_lru, &inode_lru); + list_move(&inode->i_lru, &sb->s_inode_lru); spin_unlock(&inode->i_lock); continue; } @@ -683,7 +684,7 @@ static void prune_icache(int nr_to_scan) iput(inode); spin_lock(&inode_lru_lock); - if (inode != list_entry(inode_lru.next, + if (inode != list_entry(sb->s_inode_lru.next, struct inode, i_lru)) continue; /* wrong inode or list_empty */ /* avoid lock inversions with trylock */ @@ -699,6 +700,7 @@ static void prune_icache(int nr_to_scan) spin_unlock(&inode->i_lock); list_move(&inode->i_lru, &freeable); + sb->s_nr_inodes_unused--; this_cpu_dec(nr_unused); } if (current_is_kswapd()) @@ -706,8 +708,75 @@ static void prune_icache(int nr_to_scan) else __count_vm_events(PGINODESTEAL, reap); spin_unlock(&inode_lru_lock); + *nr_to_scan = nr_scanned; dispose_list(&freeable); +} + +static void prune_icache(int count) +{ + struct super_block *sb, *p = NULL; + int w_count; + int unused = inodes_stat.nr_unused; + int prune_ratio; + int pruned; + + if (unused == 0 || count == 0) + return; + down_read(&iprune_sem); + if (count >= unused) + prune_ratio = 1; + else + prune_ratio = unused / count; + spin_lock(&sb_lock); + list_for_each_entry(sb, &super_blocks, s_list) { + if (list_empty(&sb->s_instances)) + continue; + if (sb->s_nr_inodes_unused == 0) + continue; + sb->s_count++; + /* Now, we reclaim unused dentrins with fairness. + * We reclaim them same percentage from each superblock. + * We calculate number of dentries to scan on this sb + * as follows, but the implementation is arranged to avoid + * overflows: + * number of dentries to scan on this sb = + * count * (number of dentries on this sb / + * number of dentries in the machine) + */ + spin_unlock(&sb_lock); + if (prune_ratio != 1) + w_count = (sb->s_nr_inodes_unused / prune_ratio) + 1; + else + w_count = sb->s_nr_inodes_unused; + pruned = w_count; + /* + * We need to be sure this filesystem isn't being unmounted, + * otherwise we could race with generic_shutdown_super(), and + * end up holding a reference to an inode while the filesystem + * is unmounted. So we try to get s_umount, and make sure + * s_root isn't NULL. + */ + if (down_read_trylock(&sb->s_umount)) { + if ((sb->s_root != NULL) && + (!list_empty(&sb->s_dentry_lru))) { + shrink_icache_sb(sb, &w_count); + pruned -= w_count; + } + up_read(&sb->s_umount); + } + spin_lock(&sb_lock); + if (p) + __put_super(p); + count -= pruned; + p = sb; + /* more work left to do? */ + if (count <= 0) + break; + } + if (p) + __put_super(p); + spin_unlock(&sb_lock); up_read(&iprune_sem); } diff --git a/fs/super.c b/fs/super.c index 263edeb9f0e9..e8e6dbfefe8c 100644 --- a/fs/super.c +++ b/fs/super.c @@ -77,6 +77,7 @@ static struct super_block *alloc_super(struct file_system_type *type) INIT_HLIST_BL_HEAD(&s->s_anon); INIT_LIST_HEAD(&s->s_inodes); INIT_LIST_HEAD(&s->s_dentry_lru); + INIT_LIST_HEAD(&s->s_inode_lru); init_rwsem(&s->s_umount); mutex_init(&s->s_lock); lockdep_set_class(&s->s_umount, &type->s_umount_key); diff --git a/include/linux/fs.h b/include/linux/fs.h index a0011aef4338..9724f0a48742 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1397,6 +1397,10 @@ struct super_block { struct list_head s_dentry_lru; /* unused dentry lru */ int s_nr_dentry_unused; /* # of dentry on lru */ + /* inode_lru_lock protects s_inode_lru and s_nr_inodes_unused */ + struct list_head s_inode_lru; /* unused inode lru */ + int s_nr_inodes_unused; /* # of inodes on lru */ + struct block_device *s_bdev; struct backing_dev_info *s_bdi; struct mtd_info *s_mtd; -- cgit v1.2.3 From 09cc9fc7a7c3d872065426d7fb0f0ad6d3eb90fc Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Fri, 8 Jul 2011 14:14:40 +1000 Subject: inode: move to per-sb LRU locks With the inode LRUs moving to per-sb structures, there is no longer a need for a global inode_lru_lock. The locking can be made more fine-grained by moving to a per-sb LRU lock, isolating the LRU operations of different filesytsems completely from each other. Signed-off-by: Dave Chinner Signed-off-by: Al Viro --- fs/inode.c | 27 +++++++++++++-------------- fs/super.c | 1 + include/linux/fs.h | 3 ++- 3 files changed, 16 insertions(+), 15 deletions(-) (limited to 'fs') diff --git a/fs/inode.c b/fs/inode.c index 8c3491302e0c..0450e25aeda0 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -33,7 +33,7 @@ * * inode->i_lock protects: * inode->i_state, inode->i_hash, __iget() - * inode_lru_lock protects: + * inode->i_sb->s_inode_lru_lock protects: * inode->i_sb->s_inode_lru, inode->i_lru * inode_sb_list_lock protects: * sb->s_inodes, inode->i_sb_list @@ -46,7 +46,7 @@ * * inode_sb_list_lock * inode->i_lock - * inode_lru_lock + * inode->i_sb->s_inode_lru_lock * * inode_wb_list_lock * inode->i_lock @@ -64,8 +64,6 @@ static unsigned int i_hash_shift __read_mostly; static struct hlist_head *inode_hashtable __read_mostly; static __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_hash_lock); -static DEFINE_SPINLOCK(inode_lru_lock); - __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_sb_list_lock); __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_wb_list_lock); @@ -342,24 +340,24 @@ EXPORT_SYMBOL(ihold); static void inode_lru_list_add(struct inode *inode) { - spin_lock(&inode_lru_lock); + spin_lock(&inode->i_sb->s_inode_lru_lock); if (list_empty(&inode->i_lru)) { list_add(&inode->i_lru, &inode->i_sb->s_inode_lru); inode->i_sb->s_nr_inodes_unused++; this_cpu_inc(nr_unused); } - spin_unlock(&inode_lru_lock); + spin_unlock(&inode->i_sb->s_inode_lru_lock); } static void inode_lru_list_del(struct inode *inode) { - spin_lock(&inode_lru_lock); + spin_lock(&inode->i_sb->s_inode_lru_lock); if (!list_empty(&inode->i_lru)) { list_del_init(&inode->i_lru); inode->i_sb->s_nr_inodes_unused--; this_cpu_dec(nr_unused); } - spin_unlock(&inode_lru_lock); + spin_unlock(&inode->i_sb->s_inode_lru_lock); } /** @@ -615,7 +613,8 @@ static int can_unuse(struct inode *inode) /* * Scan `goal' inodes on the unused list for freeable ones. They are moved to a - * temporary list and then are freed outside inode_lru_lock by dispose_list(). + * temporary list and then are freed outside sb->s_inode_lru_lock by + * dispose_list(). * * Any inodes which are pinned purely because of attached pagecache have their * pagecache removed. If the inode has metadata buffers attached to @@ -635,7 +634,7 @@ static void shrink_icache_sb(struct super_block *sb, int *nr_to_scan) int nr_scanned; unsigned long reap = 0; - spin_lock(&inode_lru_lock); + spin_lock(&sb->s_inode_lru_lock); for (nr_scanned = *nr_to_scan; nr_scanned >= 0; nr_scanned--) { struct inode *inode; @@ -645,7 +644,7 @@ static void shrink_icache_sb(struct super_block *sb, int *nr_to_scan) inode = list_entry(sb->s_inode_lru.prev, struct inode, i_lru); /* - * we are inverting the inode_lru_lock/inode->i_lock here, + * we are inverting the sb->s_inode_lru_lock/inode->i_lock here, * so use a trylock. If we fail to get the lock, just move the * inode to the back of the list so we don't spin on it. */ @@ -677,12 +676,12 @@ static void shrink_icache_sb(struct super_block *sb, int *nr_to_scan) if (inode_has_buffers(inode) || inode->i_data.nrpages) { __iget(inode); spin_unlock(&inode->i_lock); - spin_unlock(&inode_lru_lock); + spin_unlock(&sb->s_inode_lru_lock); if (remove_inode_buffers(inode)) reap += invalidate_mapping_pages(&inode->i_data, 0, -1); iput(inode); - spin_lock(&inode_lru_lock); + spin_lock(&sb->s_inode_lru_lock); if (inode != list_entry(sb->s_inode_lru.next, struct inode, i_lru)) @@ -707,7 +706,7 @@ static void shrink_icache_sb(struct super_block *sb, int *nr_to_scan) __count_vm_events(KSWAPD_INODESTEAL, reap); else __count_vm_events(PGINODESTEAL, reap); - spin_unlock(&inode_lru_lock); + spin_unlock(&sb->s_inode_lru_lock); *nr_to_scan = nr_scanned; dispose_list(&freeable); diff --git a/fs/super.c b/fs/super.c index e8e6dbfefe8c..73ab9f9b3571 100644 --- a/fs/super.c +++ b/fs/super.c @@ -78,6 +78,7 @@ static struct super_block *alloc_super(struct file_system_type *type) INIT_LIST_HEAD(&s->s_inodes); INIT_LIST_HEAD(&s->s_dentry_lru); INIT_LIST_HEAD(&s->s_inode_lru); + spin_lock_init(&s->s_inode_lru_lock); init_rwsem(&s->s_umount); mutex_init(&s->s_lock); lockdep_set_class(&s->s_umount, &type->s_umount_key); diff --git a/include/linux/fs.h b/include/linux/fs.h index 9724f0a48742..460d2cc21ec6 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1397,7 +1397,8 @@ struct super_block { struct list_head s_dentry_lru; /* unused dentry lru */ int s_nr_dentry_unused; /* # of dentry on lru */ - /* inode_lru_lock protects s_inode_lru and s_nr_inodes_unused */ + /* s_inode_lru_lock protects s_inode_lru and s_nr_inodes_unused */ + spinlock_t s_inode_lru_lock ____cacheline_aligned_in_smp; struct list_head s_inode_lru; /* unused inode lru */ int s_nr_inodes_unused; /* # of inodes on lru */ -- cgit v1.2.3 From 12ad3ab66103e6582ca69c0c9de18b13487eaaef Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Fri, 8 Jul 2011 14:14:41 +1000 Subject: superblock: move pin_sb_for_writeback() to fs/super.c The per-sb shrinker has the same requirement as the writeback threads of ensuring that the superblock is usable and pinned for the time it takes to run the work. Both need to take a passive reference to the sb, take a read lock on the s_umount lock and then only continue if an unmount is not in progress. pin_sb_for_writeback() does this exactly, so move it to fs/super.c and rename it to grab_super_passive() and exporting it via fs/internal.h for all the VFS code to be able to use. Signed-off-by: Dave Chinner Signed-off-by: Al Viro --- fs/fs-writeback.c | 28 +--------------------------- fs/internal.h | 1 + fs/super.c | 33 +++++++++++++++++++++++++++++++++ 3 files changed, 35 insertions(+), 27 deletions(-) (limited to 'fs') diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 0f015a0468de..b8c507ca42f7 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -460,32 +460,6 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc) return ret; } -/* - * For background writeback the caller does not have the sb pinned - * before calling writeback. So make sure that we do pin it, so it doesn't - * go away while we are writing inodes from it. - */ -static bool pin_sb_for_writeback(struct super_block *sb) -{ - spin_lock(&sb_lock); - if (list_empty(&sb->s_instances)) { - spin_unlock(&sb_lock); - return false; - } - - sb->s_count++; - spin_unlock(&sb_lock); - - if (down_read_trylock(&sb->s_umount)) { - if (sb->s_root) - return true; - up_read(&sb->s_umount); - } - - put_super(sb); - return false; -} - /* * Write a portion of b_io inodes which belong to @sb. * @@ -585,7 +559,7 @@ void writeback_inodes_wb(struct bdi_writeback *wb, struct inode *inode = wb_inode(wb->b_io.prev); struct super_block *sb = inode->i_sb; - if (!pin_sb_for_writeback(sb)) { + if (!grab_super_passive(sb)) { requeue_io(inode); continue; } diff --git a/fs/internal.h b/fs/internal.h index ae47c48bedde..fe327c20af83 100644 --- a/fs/internal.h +++ b/fs/internal.h @@ -97,6 +97,7 @@ extern struct file *get_empty_filp(void); * super.c */ extern int do_remount_sb(struct super_block *, int, void *, int); +extern bool grab_super_passive(struct super_block *sb); extern void __put_super(struct super_block *sb); extern void put_super(struct super_block *sb); extern struct dentry *mount_fs(struct file_system_type *, diff --git a/fs/super.c b/fs/super.c index 73ab9f9b3571..e63c754447ce 100644 --- a/fs/super.c +++ b/fs/super.c @@ -242,6 +242,39 @@ static int grab_super(struct super_block *s) __releases(sb_lock) return 0; } +/* + * grab_super_passive - acquire a passive reference + * @s: reference we are trying to grab + * + * Tries to acquire a passive reference. This is used in places where we + * cannot take an active reference but we need to ensure that the + * superblock does not go away while we are working on it. It returns + * false if a reference was not gained, and returns true with the s_umount + * lock held in read mode if a reference is gained. On successful return, + * the caller must drop the s_umount lock and the passive reference when + * done. + */ +bool grab_super_passive(struct super_block *sb) +{ + spin_lock(&sb_lock); + if (list_empty(&sb->s_instances)) { + spin_unlock(&sb_lock); + return false; + } + + sb->s_count++; + spin_unlock(&sb_lock); + + if (down_read_trylock(&sb->s_umount)) { + if (sb->s_root) + return true; + up_read(&sb->s_umount); + } + + put_super(sb); + return false; +} + /* * Superblock locking. We really ought to get rid of these two. */ -- cgit v1.2.3 From adab0f67d1cdaf468bbc311bce4d61f17626a536 Mon Sep 17 00:00:00 2001 From: Chandra Seetharaman Date: Wed, 29 Jun 2011 22:10:14 +0000 Subject: xfs: Remove the second parameter to xfs_sb_count() Remove the second parameter to xfs_sb_count() since all callers of the function set them. Also, fix the header comment regarding it being called periodically. Signed-off-by: Chandra Seetharaman Signed-off-by: Alex Elder --- fs/xfs/linux-2.6/xfs_sync.c | 2 +- fs/xfs/xfs_mount.c | 15 +++++---------- fs/xfs/xfs_mount.h | 2 +- 3 files changed, 7 insertions(+), 12 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c index f54e8ee1ed62..5cc158e52d4c 100644 --- a/fs/xfs/linux-2.6/xfs_sync.c +++ b/fs/xfs/linux-2.6/xfs_sync.c @@ -434,7 +434,7 @@ xfs_quiesce_attr( WARN_ON(atomic_read(&mp->m_active_trans) != 0); /* Push the superblock and write an unmount record */ - error = xfs_log_sbcount(mp, 1); + error = xfs_log_sbcount(mp); if (error) xfs_warn(mp, "xfs_attr_quiesce: failed to log sb changes. " "Frozen image may not be consistent."); diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c index 1fe643aa0efd..7f25245da289 100644 --- a/fs/xfs/xfs_mount.c +++ b/fs/xfs/xfs_mount.c @@ -1521,7 +1521,7 @@ xfs_unmountfs( xfs_warn(mp, "Unable to free reserved block pool. " "Freespace may not be correct on next mount."); - error = xfs_log_sbcount(mp, 1); + error = xfs_log_sbcount(mp); if (error) xfs_warn(mp, "Unable to update superblock counters. " "Freespace may not be correct on next mount."); @@ -1557,18 +1557,14 @@ xfs_fs_writable(xfs_mount_t *mp) /* * xfs_log_sbcount * - * Called either periodically to keep the on disk superblock values - * roughly up to date or from unmount to make sure the values are - * correct on a clean unmount. + * Sync the superblock counters to disk. * * Note this code can be called during the process of freezing, so - * we may need to use the transaction allocator which does not not + * we may need to use the transaction allocator which does not * block when the transaction subsystem is in its frozen state. */ int -xfs_log_sbcount( - xfs_mount_t *mp, - uint sync) +xfs_log_sbcount(xfs_mount_t *mp) { xfs_trans_t *tp; int error; @@ -1594,8 +1590,7 @@ xfs_log_sbcount( } xfs_mod_sb(tp, XFS_SB_IFREE | XFS_SB_ICOUNT | XFS_SB_FDBLOCKS); - if (sync) - xfs_trans_set_sync(tp); + xfs_trans_set_sync(tp); error = xfs_trans_commit(tp, 0); return error; } diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h index 3d68bb267c5f..bb24dac42a25 100644 --- a/fs/xfs/xfs_mount.h +++ b/fs/xfs/xfs_mount.h @@ -371,7 +371,7 @@ typedef struct xfs_mod_sb { int64_t msb_delta; /* Change to make to specified field */ } xfs_mod_sb_t; -extern int xfs_log_sbcount(xfs_mount_t *, uint); +extern int xfs_log_sbcount(xfs_mount_t *); extern __uint64_t xfs_default_resblks(xfs_mount_t *mp); extern int xfs_mountfs(xfs_mount_t *mp); -- cgit v1.2.3 From ad1a2c878ca70829874b4fcc83223cccb4e26dab Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Thu, 14 Jul 2011 20:50:36 +0000 Subject: xfs: failure mapping nfs fh to inode should return ESTALE On xfs exports, nfsd is incorrectly returning ENOENT instead of ESTALE on attempts to use a filehandle of a deleted file (spotted with pynfs test PUTFH3). The ENOENT was coming from xfs_iget. (It's tempting to wonder whether we should just map all xfs_iget errors to ESTALE, but I don't believe so--xfs_iget can also return ENOMEM at least, which we wouldn't want mapped to ESTALE.) While we're at it, the other return of ENOENT in xfs_nfs_get_inode() also looks wrong. Signed-off-by: J. Bruce Fields Signed-off-by: Alex Elder --- fs/xfs/linux-2.6/xfs_export.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_export.c b/fs/xfs/linux-2.6/xfs_export.c index f4f878fc0083..75e5d322e48f 100644 --- a/fs/xfs/linux-2.6/xfs_export.c +++ b/fs/xfs/linux-2.6/xfs_export.c @@ -151,14 +151,14 @@ xfs_nfs_get_inode( * We don't use ESTALE directly down the chain to not * confuse applications using bulkstat that expect EINVAL. */ - if (error == EINVAL) + if (error == EINVAL || error == ENOENT) error = ESTALE; return ERR_PTR(-error); } if (ip->i_d.di_gen != generation) { IRELE(ip); - return ERR_PTR(-ENOENT); + return ERR_PTR(-ESTALE); } return VFS_I(ip); -- cgit v1.2.3 From 1d8c95a363bf8cd4d4182dd19c01693b635311c2 Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Mon, 18 Jul 2011 03:40:16 +0000 Subject: xfs: use a cursor for bulk AIL insertion Delayed logging can insert tens of thousands of log items into the AIL at the same LSN. When the committing of log commit records occur, we can get insertions occurring at an LSN that is not at the end of the AIL. If there are thousands of items in the AIL on the tail LSN, each insertion has to walk the AIL to find the correct place to insert the new item into the AIL. This can consume large amounts of CPU time and block other operations from occurring while the traversals are in progress. To avoid this repeated walk, use a AIL cursor to record where we should be inserting the new items into the AIL without having to repeat the walk. The cursor infrastructure already provides this functionality for push walks, so is a simple extension of existing code. While this will not avoid the initial walk, it will avoid repeating it tens of thousands of times during a single checkpoint commit. This version includes logic improvements from Christoph Hellwig. Signed-off-by: Dave Chinner Reviewed-by: Christoph Hellwig Signed-off-by: Alex Elder --- fs/xfs/xfs_trans.c | 27 ++++++++++-- fs/xfs/xfs_trans_ail.c | 109 ++++++++++++++++++++++++++++++++++++++---------- fs/xfs/xfs_trans_priv.h | 10 +++-- 3 files changed, 118 insertions(+), 28 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c index c83f63b33aae..efc147f0e9b6 100644 --- a/fs/xfs/xfs_trans.c +++ b/fs/xfs/xfs_trans.c @@ -1426,6 +1426,7 @@ xfs_trans_committed( static inline void xfs_log_item_batch_insert( struct xfs_ail *ailp, + struct xfs_ail_cursor *cur, struct xfs_log_item **log_items, int nr_items, xfs_lsn_t commit_lsn) @@ -1434,7 +1435,7 @@ xfs_log_item_batch_insert( spin_lock(&ailp->xa_lock); /* xfs_trans_ail_update_bulk drops ailp->xa_lock */ - xfs_trans_ail_update_bulk(ailp, log_items, nr_items, commit_lsn); + xfs_trans_ail_update_bulk(ailp, cur, log_items, nr_items, commit_lsn); for (i = 0; i < nr_items; i++) IOP_UNPIN(log_items[i], 0); @@ -1452,6 +1453,13 @@ xfs_log_item_batch_insert( * as an iclog write error even though we haven't started any IO yet. Hence in * this case all we need to do is IOP_COMMITTED processing, followed by an * IOP_UNPIN(aborted) call. + * + * The AIL cursor is used to optimise the insert process. If commit_lsn is not + * at the end of the AIL, the insert cursor avoids the need to walk + * the AIL to find the insertion point on every xfs_log_item_batch_insert() + * call. This saves a lot of needless list walking and is a net win, even + * though it slightly increases that amount of AIL lock traffic to set it up + * and tear it down. */ void xfs_trans_committed_bulk( @@ -1463,8 +1471,13 @@ xfs_trans_committed_bulk( #define LOG_ITEM_BATCH_SIZE 32 struct xfs_log_item *log_items[LOG_ITEM_BATCH_SIZE]; struct xfs_log_vec *lv; + struct xfs_ail_cursor cur; int i = 0; + spin_lock(&ailp->xa_lock); + xfs_trans_ail_cursor_last(ailp, &cur, commit_lsn); + spin_unlock(&ailp->xa_lock); + /* unpin all the log items */ for (lv = log_vector; lv; lv = lv->lv_next ) { struct xfs_log_item *lip = lv->lv_item; @@ -1493,7 +1506,9 @@ xfs_trans_committed_bulk( /* * Not a bulk update option due to unusual item_lsn. * Push into AIL immediately, rechecking the lsn once - * we have the ail lock. Then unpin the item. + * we have the ail lock. Then unpin the item. This does + * not affect the AIL cursor the bulk insert path is + * using. */ spin_lock(&ailp->xa_lock); if (XFS_LSN_CMP(item_lsn, lip->li_lsn) > 0) @@ -1507,7 +1522,7 @@ xfs_trans_committed_bulk( /* Item is a candidate for bulk AIL insert. */ log_items[i++] = lv->lv_item; if (i >= LOG_ITEM_BATCH_SIZE) { - xfs_log_item_batch_insert(ailp, log_items, + xfs_log_item_batch_insert(ailp, &cur, log_items, LOG_ITEM_BATCH_SIZE, commit_lsn); i = 0; } @@ -1515,7 +1530,11 @@ xfs_trans_committed_bulk( /* make sure we insert the remainder! */ if (i) - xfs_log_item_batch_insert(ailp, log_items, i, commit_lsn); + xfs_log_item_batch_insert(ailp, &cur, log_items, i, commit_lsn); + + spin_lock(&ailp->xa_lock); + xfs_trans_ail_cursor_done(ailp, &cur); + spin_unlock(&ailp->xa_lock); } /* diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c index 5fc2380092c8..9a69dc06ea86 100644 --- a/fs/xfs/xfs_trans_ail.c +++ b/fs/xfs/xfs_trans_ail.c @@ -272,9 +272,9 @@ xfs_trans_ail_cursor_clear( } /* - * Return the item in the AIL with the current lsn. - * Return the current tree generation number for use - * in calls to xfs_trans_next_ail(). + * Initialise the cursor to the first item in the AIL with the given @lsn. + * This searches the list from lowest LSN to highest. Pass a @lsn of zero + * to initialise the cursor to the first item in the AIL. */ xfs_log_item_t * xfs_trans_ail_cursor_first( @@ -300,31 +300,97 @@ out: } /* - * splice the log item list into the AIL at the given LSN. + * Initialise the cursor to the last item in the AIL with the given @lsn. + * This searches the list from highest LSN to lowest. If there is no item with + * the value of @lsn, then it sets the cursor to the last item with an LSN lower + * than @lsn. + */ +static struct xfs_log_item * +__xfs_trans_ail_cursor_last( + struct xfs_ail *ailp, + xfs_lsn_t lsn) +{ + xfs_log_item_t *lip; + + list_for_each_entry_reverse(lip, &ailp->xa_ail, li_ail) { + if (XFS_LSN_CMP(lip->li_lsn, lsn) <= 0) + return lip; + } + return NULL; +} + +/* + * Initialise the cursor to the last item in the AIL with the given @lsn. + * This searches the list from highest LSN to lowest. + */ +struct xfs_log_item * +xfs_trans_ail_cursor_last( + struct xfs_ail *ailp, + struct xfs_ail_cursor *cur, + xfs_lsn_t lsn) +{ + xfs_trans_ail_cursor_init(ailp, cur); + cur->item = __xfs_trans_ail_cursor_last(ailp, lsn); + return cur->item; +} + +/* + * splice the log item list into the AIL at the given LSN. We splice to the + * tail of the given LSN to maintain insert order for push traversals. The + * cursor is optional, allowing repeated updates to the same LSN to avoid + * repeated traversals. */ static void xfs_ail_splice( - struct xfs_ail *ailp, - struct list_head *list, - xfs_lsn_t lsn) + struct xfs_ail *ailp, + struct xfs_ail_cursor *cur, + struct list_head *list, + xfs_lsn_t lsn) { - xfs_log_item_t *next_lip; + struct xfs_log_item *lip = cur ? cur->item : NULL; + struct xfs_log_item *next_lip; - /* If the list is empty, just insert the item. */ - if (list_empty(&ailp->xa_ail)) { - list_splice(list, &ailp->xa_ail); - return; + /* + * Get a new cursor if we don't have a placeholder or the existing one + * has been invalidated. + */ + if (!lip || (__psint_t)lip & 1) { + lip = __xfs_trans_ail_cursor_last(ailp, lsn); + + if (!lip) { + /* The list is empty, so just splice and return. */ + if (cur) + cur->item = NULL; + list_splice(list, &ailp->xa_ail); + return; + } } - list_for_each_entry_reverse(next_lip, &ailp->xa_ail, li_ail) { - if (XFS_LSN_CMP(next_lip->li_lsn, lsn) <= 0) - break; + /* + * Our cursor points to the item we want to insert _after_, so we have + * to update the cursor to point to the end of the list we are splicing + * in so that it points to the correct location for the next splice. + * i.e. before the splice + * + * lsn -> lsn -> lsn + x -> lsn + x ... + * ^ + * | cursor points here + * + * After the splice we have: + * + * lsn -> lsn -> lsn -> lsn -> .... -> lsn -> lsn + x -> lsn + x ... + * ^ ^ + * | cursor points here | needs to move here + * + * So we set the cursor to the last item in the list to be spliced + * before we execute the splice, resulting in the cursor pointing to + * the correct item after the splice occurs. + */ + if (cur) { + next_lip = list_entry(list->prev, struct xfs_log_item, li_ail); + cur->item = next_lip; } - - ASSERT(&next_lip->li_ail == &ailp->xa_ail || - XFS_LSN_CMP(next_lip->li_lsn, lsn) <= 0); - - list_splice_init(list, &next_lip->li_ail); + list_splice(list, &lip->li_ail); } /* @@ -645,6 +711,7 @@ xfs_trans_unlocked_item( void xfs_trans_ail_update_bulk( struct xfs_ail *ailp, + struct xfs_ail_cursor *cur, struct xfs_log_item **log_items, int nr_items, xfs_lsn_t lsn) __releases(ailp->xa_lock) @@ -674,7 +741,7 @@ xfs_trans_ail_update_bulk( list_add(&lip->li_ail, &tmp); } - xfs_ail_splice(ailp, &tmp, lsn); + xfs_ail_splice(ailp, cur, &tmp, lsn); if (!mlip_changed) { spin_unlock(&ailp->xa_lock); diff --git a/fs/xfs/xfs_trans_priv.h b/fs/xfs/xfs_trans_priv.h index 6b164e9e9a1f..c0cb40890329 100644 --- a/fs/xfs/xfs_trans_priv.h +++ b/fs/xfs/xfs_trans_priv.h @@ -82,6 +82,7 @@ struct xfs_ail { extern struct workqueue_struct *xfs_ail_wq; /* AIL workqueue */ void xfs_trans_ail_update_bulk(struct xfs_ail *ailp, + struct xfs_ail_cursor *cur, struct xfs_log_item **log_items, int nr_items, xfs_lsn_t lsn) __releases(ailp->xa_lock); static inline void @@ -90,7 +91,7 @@ xfs_trans_ail_update( struct xfs_log_item *lip, xfs_lsn_t lsn) __releases(ailp->xa_lock) { - xfs_trans_ail_update_bulk(ailp, &lip, 1, lsn); + xfs_trans_ail_update_bulk(ailp, NULL, &lip, 1, lsn); } void xfs_trans_ail_delete_bulk(struct xfs_ail *ailp, @@ -111,10 +112,13 @@ xfs_lsn_t xfs_ail_min_lsn(struct xfs_ail *ailp); void xfs_trans_unlocked_item(struct xfs_ail *, xfs_log_item_t *); -struct xfs_log_item *xfs_trans_ail_cursor_first(struct xfs_ail *ailp, +struct xfs_log_item * xfs_trans_ail_cursor_first(struct xfs_ail *ailp, struct xfs_ail_cursor *cur, xfs_lsn_t lsn); -struct xfs_log_item *xfs_trans_ail_cursor_next(struct xfs_ail *ailp, +struct xfs_log_item * xfs_trans_ail_cursor_last(struct xfs_ail *ailp, + struct xfs_ail_cursor *cur, + xfs_lsn_t lsn); +struct xfs_log_item * xfs_trans_ail_cursor_next(struct xfs_ail *ailp, struct xfs_ail_cursor *cur); void xfs_trans_ail_cursor_done(struct xfs_ail *ailp, struct xfs_ail_cursor *cur); -- cgit v1.2.3 From 16b5902943c793e632cf8c9526619f59893bdc29 Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Mon, 18 Jul 2011 03:40:17 +0000 Subject: xfs: remove confusing ail cursor wrapper xfs_trans_ail_cursor_set() doesn't set the cursor to the current log item, it sets it to the next item. There is already a function for doing this - xfs_trans_ail_cursor_next() - and the _set function is simply a two line wrapper. Remove it and open code the setting of the cursor in the two locations that call it to remove the confusion. Signed-off-by: Dave Chinner Reviewed-by: Christoph Hellwig Signed-off-by: Alex Elder --- fs/xfs/xfs_trans_ail.c | 50 +++++++++++++++++++------------------------------- 1 file changed, 19 insertions(+), 31 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c index 9a69dc06ea86..7908c798a02b 100644 --- a/fs/xfs/xfs_trans_ail.c +++ b/fs/xfs/xfs_trans_ail.c @@ -188,20 +188,6 @@ xfs_trans_ail_cursor_init( ailp->xa_cursors.next = cur; } -/* - * Set the cursor to the next item, because when we look - * up the cursor the current item may have been freed. - */ -STATIC void -xfs_trans_ail_cursor_set( - struct xfs_ail *ailp, - struct xfs_ail_cursor *cur, - struct xfs_log_item *lip) -{ - if (lip) - cur->item = xfs_ail_next(ailp, lip); -} - /* * Get the next item in the traversal and advance the cursor. * If the cursor was invalidated (inidicated by a lip of 1), @@ -216,7 +202,8 @@ xfs_trans_ail_cursor_next( if ((__psint_t)lip & 1) lip = xfs_ail_min(ailp); - xfs_trans_ail_cursor_set(ailp, cur, lip); + if (lip) + cur->item = xfs_ail_next(ailp, lip); return lip; } @@ -272,9 +259,10 @@ xfs_trans_ail_cursor_clear( } /* - * Initialise the cursor to the first item in the AIL with the given @lsn. - * This searches the list from lowest LSN to highest. Pass a @lsn of zero - * to initialise the cursor to the first item in the AIL. + * Find the first item in the AIL with the given @lsn by searching in ascending + * LSN order and initialise the cursor to point to the next item for a + * ascending traversal. Pass a @lsn of zero to initialise the cursor to the + * first item in the AIL. Returns NULL if the list is empty. */ xfs_log_item_t * xfs_trans_ail_cursor_first( @@ -285,26 +273,24 @@ xfs_trans_ail_cursor_first( xfs_log_item_t *lip; xfs_trans_ail_cursor_init(ailp, cur); - lip = xfs_ail_min(ailp); - if (lsn == 0) + + if (lsn == 0) { + lip = xfs_ail_min(ailp); goto out; + } list_for_each_entry(lip, &ailp->xa_ail, li_ail) { if (XFS_LSN_CMP(lip->li_lsn, lsn) >= 0) goto out; } - lip = NULL; + return NULL; + out: - xfs_trans_ail_cursor_set(ailp, cur, lip); + if (lip) + cur->item = xfs_ail_next(ailp, lip); return lip; } -/* - * Initialise the cursor to the last item in the AIL with the given @lsn. - * This searches the list from highest LSN to lowest. If there is no item with - * the value of @lsn, then it sets the cursor to the last item with an LSN lower - * than @lsn. - */ static struct xfs_log_item * __xfs_trans_ail_cursor_last( struct xfs_ail *ailp, @@ -320,8 +306,10 @@ __xfs_trans_ail_cursor_last( } /* - * Initialise the cursor to the last item in the AIL with the given @lsn. - * This searches the list from highest LSN to lowest. + * Find the last item in the AIL with the given @lsn by searching in descending + * LSN order and initialise the cursor to point to that item. If there is no + * item with the value of @lsn, then it sets the cursor to the last item with an + * LSN lower than @lsn. Returns NULL if the list is empty. */ struct xfs_log_item * xfs_trans_ail_cursor_last( @@ -335,7 +323,7 @@ xfs_trans_ail_cursor_last( } /* - * splice the log item list into the AIL at the given LSN. We splice to the + * Splice the log item list into the AIL at the given LSN. We splice to the * tail of the given LSN to maintain insert order for push traversals. The * cursor is optional, allowing repeated updates to the same LSN to avoid * repeated traversals. -- cgit v1.2.3 From af3e40228fb2dbc18d94fbd5103f07344a720ae7 Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Mon, 18 Jul 2011 03:40:18 +0000 Subject: xfs: convert AIL cursors to use struct list_head The list of active AIL cursors uses a roll-your-own linked list with special casing for the AIL push cursor. Simplify this code by replacing the list with standard struct list_head lists, and use a separate list_head to track the active cursors. This allows us to treat the AIL push cursor as a generic cursor rather than as a special case, further simplifying the code. Further, fix the duplicate push cursor initialisation that the special case handling was hiding, and clean up all the comments around the active cursor list handling. Signed-off-by: Dave Chinner Reviewed-by: Christoph Hellwig Signed-off-by: Alex Elder --- fs/xfs/xfs_trans_ail.c | 79 ++++++++++++++++--------------------------------- fs/xfs/xfs_trans_priv.h | 4 +-- 2 files changed, 28 insertions(+), 55 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c index 7908c798a02b..43233e92f0f6 100644 --- a/fs/xfs/xfs_trans_ail.c +++ b/fs/xfs/xfs_trans_ail.c @@ -163,17 +163,11 @@ xfs_ail_max_lsn( } /* - * AIL traversal cursor initialisation. - * - * The cursor keeps track of where our current traversal is up - * to by tracking the next ƣtem in the list for us. However, for - * this to be safe, removing an object from the AIL needs to invalidate - * any cursor that points to it. hence the traversal cursor needs to - * be linked to the struct xfs_ail so that deletion can search all the - * active cursors for invalidation. - * - * We don't link the push cursor because it is embedded in the struct - * xfs_ail and hence easily findable. + * The cursor keeps track of where our current traversal is up to by tracking + * the next item in the list for us. However, for this to be safe, removing an + * object from the AIL needs to invalidate any cursor that points to it. hence + * the traversal cursor needs to be linked to the struct xfs_ail so that + * deletion can search all the active cursors for invalidation. */ STATIC void xfs_trans_ail_cursor_init( @@ -181,17 +175,12 @@ xfs_trans_ail_cursor_init( struct xfs_ail_cursor *cur) { cur->item = NULL; - if (cur == &ailp->xa_cursors) - return; - - cur->next = ailp->xa_cursors.next; - ailp->xa_cursors.next = cur; + list_add_tail(&cur->list, &ailp->xa_cursors); } /* - * Get the next item in the traversal and advance the cursor. - * If the cursor was invalidated (inidicated by a lip of 1), - * restart the traversal. + * Get the next item in the traversal and advance the cursor. If the cursor + * was invalidated (indicated by a lip of 1), restart the traversal. */ struct xfs_log_item * xfs_trans_ail_cursor_next( @@ -208,40 +197,25 @@ xfs_trans_ail_cursor_next( } /* - * Now that the traversal is complete, we need to remove the cursor - * from the list of traversing cursors. Avoid removing the embedded - * push cursor, but use the fact it is always present to make the - * list deletion simple. + * When the traversal is complete, we need to remove the cursor from the list + * of traversing cursors. */ void xfs_trans_ail_cursor_done( struct xfs_ail *ailp, - struct xfs_ail_cursor *done) + struct xfs_ail_cursor *cur) { - struct xfs_ail_cursor *prev = NULL; - struct xfs_ail_cursor *cur; - - done->item = NULL; - if (done == &ailp->xa_cursors) - return; - prev = &ailp->xa_cursors; - for (cur = prev->next; cur; prev = cur, cur = prev->next) { - if (cur == done) { - prev->next = cur->next; - break; - } - } - ASSERT(cur); + cur->item = NULL; + list_del_init(&cur->list); } /* - * Invalidate any cursor that is pointing to this item. This is - * called when an item is removed from the AIL. Any cursor pointing - * to this object is now invalid and the traversal needs to be - * terminated so it doesn't reference a freed object. We set the - * cursor item to a value of 1 so we can distinguish between an - * invalidation and the end of the list when getting the next item - * from the cursor. + * Invalidate any cursor that is pointing to this item. This is called when an + * item is removed from the AIL. Any cursor pointing to this object is now + * invalid and the traversal needs to be terminated so it doesn't reference a + * freed object. We set the low bit of the cursor item pointer so we can + * distinguish between an invalidation and the end of the list when getting the + * next item from the cursor. */ STATIC void xfs_trans_ail_cursor_clear( @@ -250,8 +224,7 @@ xfs_trans_ail_cursor_clear( { struct xfs_ail_cursor *cur; - /* need to search all cursors */ - for (cur = &ailp->xa_cursors; cur; cur = cur->next) { + list_for_each_entry(cur, &ailp->xa_cursors, list) { if (cur->item == lip) cur->item = (struct xfs_log_item *) ((__psint_t)cur->item | 1); @@ -405,7 +378,7 @@ xfs_ail_worker( struct xfs_ail *ailp = container_of(to_delayed_work(work), struct xfs_ail, xa_work); xfs_mount_t *mp = ailp->xa_mount; - struct xfs_ail_cursor *cur = &ailp->xa_cursors; + struct xfs_ail_cursor cur; xfs_log_item_t *lip; xfs_lsn_t lsn; xfs_lsn_t target; @@ -417,13 +390,12 @@ xfs_ail_worker( spin_lock(&ailp->xa_lock); target = ailp->xa_target; - xfs_trans_ail_cursor_init(ailp, cur); - lip = xfs_trans_ail_cursor_first(ailp, cur, ailp->xa_last_pushed_lsn); + lip = xfs_trans_ail_cursor_first(ailp, &cur, ailp->xa_last_pushed_lsn); if (!lip || XFS_FORCED_SHUTDOWN(mp)) { /* * AIL is empty or our push has reached the end. */ - xfs_trans_ail_cursor_done(ailp, cur); + xfs_trans_ail_cursor_done(ailp, &cur); spin_unlock(&ailp->xa_lock); goto out_done; } @@ -511,12 +483,12 @@ xfs_ail_worker( if (stuck > 100) break; - lip = xfs_trans_ail_cursor_next(ailp, cur); + lip = xfs_trans_ail_cursor_next(ailp, &cur); if (lip == NULL) break; lsn = lip->li_lsn; } - xfs_trans_ail_cursor_done(ailp, cur); + xfs_trans_ail_cursor_done(ailp, &cur); spin_unlock(&ailp->xa_lock); if (flush_log) { @@ -848,6 +820,7 @@ xfs_trans_ail_init( ailp->xa_mount = mp; INIT_LIST_HEAD(&ailp->xa_ail); + INIT_LIST_HEAD(&ailp->xa_cursors); spin_lock_init(&ailp->xa_lock); INIT_DELAYED_WORK(&ailp->xa_work, xfs_ail_worker); mp->m_ail = ailp; diff --git a/fs/xfs/xfs_trans_priv.h b/fs/xfs/xfs_trans_priv.h index c0cb40890329..212946b97239 100644 --- a/fs/xfs/xfs_trans_priv.h +++ b/fs/xfs/xfs_trans_priv.h @@ -53,7 +53,7 @@ void xfs_trans_committed_bulk(struct xfs_ail *ailp, struct xfs_log_vec *lv, * of the list to trigger traversal restarts. */ struct xfs_ail_cursor { - struct xfs_ail_cursor *next; + struct list_head list; struct xfs_log_item *item; }; @@ -66,7 +66,7 @@ struct xfs_ail { struct xfs_mount *xa_mount; struct list_head xa_ail; xfs_lsn_t xa_target; - struct xfs_ail_cursor xa_cursors; + struct list_head xa_cursors; spinlock_t xa_lock; struct delayed_work xa_work; xfs_lsn_t xa_last_pushed_lsn; -- cgit v1.2.3 From 55fb25d5b39320242b41af894921f5cef0c7e293 Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Mon, 18 Jul 2011 03:40:19 +0000 Subject: xfs: add size update tracepoint to IO completion For improving insight into IO completion behaviour. Signed-off-by: Dave Chinner Reviewed-by: Christoph Hellwig Signed-off-by: Alex Elder --- fs/xfs/linux-2.6/xfs_aops.c | 1 + fs/xfs/linux-2.6/xfs_trace.h | 12 ++++++++---- 2 files changed, 9 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c index 3447733fa897..26384fe3f26d 100644 --- a/fs/xfs/linux-2.6/xfs_aops.c +++ b/fs/xfs/linux-2.6/xfs_aops.c @@ -181,6 +181,7 @@ xfs_setfilesize( isize = xfs_ioend_new_eof(ioend); if (isize) { + trace_xfs_setfilesize(ip, ioend->io_offset, ioend->io_size); ip->i_d.di_size = isize; xfs_mark_inode_dirty(ip); } diff --git a/fs/xfs/linux-2.6/xfs_trace.h b/fs/xfs/linux-2.6/xfs_trace.h index 3bdfcb9f52b7..fda0708ef2ea 100644 --- a/fs/xfs/linux-2.6/xfs_trace.h +++ b/fs/xfs/linux-2.6/xfs_trace.h @@ -998,7 +998,8 @@ DECLARE_EVENT_CLASS(xfs_simple_io_class, TP_STRUCT__entry( __field(dev_t, dev) __field(xfs_ino_t, ino) - __field(loff_t, size) + __field(loff_t, isize) + __field(loff_t, disize) __field(loff_t, new_size) __field(loff_t, offset) __field(size_t, count) @@ -1006,16 +1007,18 @@ DECLARE_EVENT_CLASS(xfs_simple_io_class, TP_fast_assign( __entry->dev = VFS_I(ip)->i_sb->s_dev; __entry->ino = ip->i_ino; - __entry->size = ip->i_d.di_size; + __entry->isize = ip->i_size; + __entry->disize = ip->i_d.di_size; __entry->new_size = ip->i_new_size; __entry->offset = offset; __entry->count = count; ), - TP_printk("dev %d:%d ino 0x%llx size 0x%llx new_size 0x%llx " + TP_printk("dev %d:%d ino 0x%llx isize 0x%llx disize 0x%llx new_size 0x%llx " "offset 0x%llx count %zd", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->ino, - __entry->size, + __entry->isize, + __entry->disize, __entry->new_size, __entry->offset, __entry->count) @@ -1028,6 +1031,7 @@ DEFINE_EVENT(xfs_simple_io_class, name, \ DEFINE_SIMPLE_IO_EVENT(xfs_delalloc_enospc); DEFINE_SIMPLE_IO_EVENT(xfs_unwritten_convert); DEFINE_SIMPLE_IO_EVENT(xfs_get_blocks_notfound); +DEFINE_SIMPLE_IO_EVENT(xfs_setfilesize); DECLARE_EVENT_CLASS(xfs_itrunc_class, TP_PROTO(struct xfs_inode *ip, xfs_fsize_t new_size), -- cgit v1.2.3 From b0d40c92adafde7c2d81203ce7c1c69275f41140 Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Fri, 8 Jul 2011 14:14:42 +1000 Subject: superblock: introduce per-sb cache shrinker infrastructure With context based shrinkers, we can implement a per-superblock shrinker that shrinks the caches attached to the superblock. We currently have global shrinkers for the inode and dentry caches that split up into per-superblock operations via a coarse proportioning method that does not batch very well. The global shrinkers also have a dependency - dentries pin inodes - so we have to be very careful about how we register the global shrinkers so that the implicit call order is always correct. With a per-sb shrinker callout, we can encode this dependency directly into the per-sb shrinker, hence avoiding the need for strictly ordering shrinker registrations. We also have no need for any proportioning code for the shrinker subsystem already provides this functionality across all shrinkers. Allowing the shrinker to operate on a single superblock at a time means that we do less superblock list traversals and locking and reclaim should batch more effectively. This should result in less CPU overhead for reclaim and potentially faster reclaim of items from each filesystem. Signed-off-by: Dave Chinner Signed-off-by: Al Viro --- fs/dcache.c | 121 +++++------------------------------------------ fs/inode.c | 117 ++++----------------------------------------- fs/super.c | 51 +++++++++++++++++++- include/linux/fs.h | 7 +++ include/linux/mm.h | 40 +--------------- include/linux/shrinker.h | 42 ++++++++++++++++ 6 files changed, 121 insertions(+), 257 deletions(-) create mode 100644 include/linux/shrinker.h (limited to 'fs') diff --git a/fs/dcache.c b/fs/dcache.c index 41e2085d430b..2762804a140d 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -743,13 +743,11 @@ static void shrink_dentry_list(struct list_head *list) * * If flags contains DCACHE_REFERENCED reference dentries will not be pruned. */ -static void __shrink_dcache_sb(struct super_block *sb, int *count, int flags) +static void __shrink_dcache_sb(struct super_block *sb, int count, int flags) { - /* called from prune_dcache() and shrink_dcache_parent() */ struct dentry *dentry; LIST_HEAD(referenced); LIST_HEAD(tmp); - int cnt = *count; relock: spin_lock(&dcache_lru_lock); @@ -777,7 +775,7 @@ relock: } else { list_move_tail(&dentry->d_lru, &tmp); spin_unlock(&dentry->d_lock); - if (!--cnt) + if (!--count) break; } cond_resched_lock(&dcache_lru_lock); @@ -787,83 +785,22 @@ relock: spin_unlock(&dcache_lru_lock); shrink_dentry_list(&tmp); - - *count = cnt; } /** - * prune_dcache - shrink the dcache - * @count: number of entries to try to free + * prune_dcache_sb - shrink the dcache + * @nr_to_scan: number of entries to try to free * - * Shrink the dcache. This is done when we need more memory, or simply when we - * need to unmount something (at which point we need to unuse all dentries). + * Attempt to shrink the superblock dcache LRU by @nr_to_scan entries. This is + * done when we need more memory an called from the superblock shrinker + * function. * - * This function may fail to free any resources if all the dentries are in use. + * This function may fail to free any resources if all the dentries are in + * use. */ -static void prune_dcache(int count) +void prune_dcache_sb(struct super_block *sb, int nr_to_scan) { - struct super_block *sb, *p = NULL; - int w_count; - int unused = dentry_stat.nr_unused; - int prune_ratio; - int pruned; - - if (unused == 0 || count == 0) - return; - if (count >= unused) - prune_ratio = 1; - else - prune_ratio = unused / count; - spin_lock(&sb_lock); - list_for_each_entry(sb, &super_blocks, s_list) { - if (list_empty(&sb->s_instances)) - continue; - if (sb->s_nr_dentry_unused == 0) - continue; - sb->s_count++; - /* Now, we reclaim unused dentrins with fairness. - * We reclaim them same percentage from each superblock. - * We calculate number of dentries to scan on this sb - * as follows, but the implementation is arranged to avoid - * overflows: - * number of dentries to scan on this sb = - * count * (number of dentries on this sb / - * number of dentries in the machine) - */ - spin_unlock(&sb_lock); - if (prune_ratio != 1) - w_count = (sb->s_nr_dentry_unused / prune_ratio) + 1; - else - w_count = sb->s_nr_dentry_unused; - pruned = w_count; - /* - * We need to be sure this filesystem isn't being unmounted, - * otherwise we could race with generic_shutdown_super(), and - * end up holding a reference to an inode while the filesystem - * is unmounted. So we try to get s_umount, and make sure - * s_root isn't NULL. - */ - if (down_read_trylock(&sb->s_umount)) { - if ((sb->s_root != NULL) && - (!list_empty(&sb->s_dentry_lru))) { - __shrink_dcache_sb(sb, &w_count, - DCACHE_REFERENCED); - pruned -= w_count; - } - up_read(&sb->s_umount); - } - spin_lock(&sb_lock); - if (p) - __put_super(p); - count -= pruned; - p = sb; - /* more work left to do? */ - if (count <= 0) - break; - } - if (p) - __put_super(p); - spin_unlock(&sb_lock); + __shrink_dcache_sb(sb, nr_to_scan, DCACHE_REFERENCED); } /** @@ -1238,42 +1175,10 @@ void shrink_dcache_parent(struct dentry * parent) int found; while ((found = select_parent(parent)) != 0) - __shrink_dcache_sb(sb, &found, 0); + __shrink_dcache_sb(sb, found, 0); } EXPORT_SYMBOL(shrink_dcache_parent); -/* - * Scan `sc->nr_slab_to_reclaim' dentries and return the number which remain. - * - * We need to avoid reentering the filesystem if the caller is performing a - * GFP_NOFS allocation attempt. One example deadlock is: - * - * ext2_new_block->getblk->GFP->shrink_dcache_memory->prune_dcache-> - * prune_one_dentry->dput->dentry_iput->iput->inode->i_sb->s_op->put_inode-> - * ext2_discard_prealloc->ext2_free_blocks->lock_super->DEADLOCK. - * - * In this case we return -1 to tell the caller that we baled. - */ -static int shrink_dcache_memory(struct shrinker *shrink, - struct shrink_control *sc) -{ - int nr = sc->nr_to_scan; - gfp_t gfp_mask = sc->gfp_mask; - - if (nr) { - if (!(gfp_mask & __GFP_FS)) - return -1; - prune_dcache(nr); - } - - return (dentry_stat.nr_unused / 100) * sysctl_vfs_cache_pressure; -} - -static struct shrinker dcache_shrinker = { - .shrink = shrink_dcache_memory, - .seeks = DEFAULT_SEEKS, -}; - /** * __d_alloc - allocate a dcache entry * @sb: filesystem it will belong to @@ -3083,8 +2988,6 @@ static void __init dcache_init(void) */ dentry_cache = KMEM_CACHE(dentry, SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD); - - register_shrinker(&dcache_shrinker); /* Hash may have been set up in dcache_init_early */ if (!hashdist) diff --git a/fs/inode.c b/fs/inode.c index 0450e25aeda0..1fdbb64a952f 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -73,7 +73,7 @@ __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_wb_list_lock); * * We don't actually need it to protect anything in the umount path, * but only need to cycle through it to make sure any inode that - * prune_icache took off the LRU list has been fully torn down by the + * prune_icache_sb took off the LRU list has been fully torn down by the * time we are past evict_inodes. */ static DECLARE_RWSEM(iprune_sem); @@ -544,7 +544,7 @@ void evict_inodes(struct super_block *sb) dispose_list(&dispose); /* - * Cycle through iprune_sem to make sure any inode that prune_icache + * Cycle through iprune_sem to make sure any inode that prune_icache_sb * moved off the list before we took the lock has been fully torn * down. */ @@ -612,9 +612,10 @@ static int can_unuse(struct inode *inode) } /* - * Scan `goal' inodes on the unused list for freeable ones. They are moved to a - * temporary list and then are freed outside sb->s_inode_lru_lock by - * dispose_list(). + * Walk the superblock inode LRU for freeable inodes and attempt to free them. + * This is called from the superblock shrinker function with a number of inodes + * to trim from the LRU. Inodes to be freed are moved to a temporary list and + * then are freed outside inode_lock by dispose_list(). * * Any inodes which are pinned purely because of attached pagecache have their * pagecache removed. If the inode has metadata buffers attached to @@ -628,14 +629,15 @@ static int can_unuse(struct inode *inode) * LRU does not have strict ordering. Hence we don't want to reclaim inodes * with this flag set because they are the inodes that are out of order. */ -static void shrink_icache_sb(struct super_block *sb, int *nr_to_scan) +void prune_icache_sb(struct super_block *sb, int nr_to_scan) { LIST_HEAD(freeable); int nr_scanned; unsigned long reap = 0; + down_read(&iprune_sem); spin_lock(&sb->s_inode_lru_lock); - for (nr_scanned = *nr_to_scan; nr_scanned >= 0; nr_scanned--) { + for (nr_scanned = nr_to_scan; nr_scanned >= 0; nr_scanned--) { struct inode *inode; if (list_empty(&sb->s_inode_lru)) @@ -707,111 +709,11 @@ static void shrink_icache_sb(struct super_block *sb, int *nr_to_scan) else __count_vm_events(PGINODESTEAL, reap); spin_unlock(&sb->s_inode_lru_lock); - *nr_to_scan = nr_scanned; dispose_list(&freeable); -} - -static void prune_icache(int count) -{ - struct super_block *sb, *p = NULL; - int w_count; - int unused = inodes_stat.nr_unused; - int prune_ratio; - int pruned; - - if (unused == 0 || count == 0) - return; - down_read(&iprune_sem); - if (count >= unused) - prune_ratio = 1; - else - prune_ratio = unused / count; - spin_lock(&sb_lock); - list_for_each_entry(sb, &super_blocks, s_list) { - if (list_empty(&sb->s_instances)) - continue; - if (sb->s_nr_inodes_unused == 0) - continue; - sb->s_count++; - /* Now, we reclaim unused dentrins with fairness. - * We reclaim them same percentage from each superblock. - * We calculate number of dentries to scan on this sb - * as follows, but the implementation is arranged to avoid - * overflows: - * number of dentries to scan on this sb = - * count * (number of dentries on this sb / - * number of dentries in the machine) - */ - spin_unlock(&sb_lock); - if (prune_ratio != 1) - w_count = (sb->s_nr_inodes_unused / prune_ratio) + 1; - else - w_count = sb->s_nr_inodes_unused; - pruned = w_count; - /* - * We need to be sure this filesystem isn't being unmounted, - * otherwise we could race with generic_shutdown_super(), and - * end up holding a reference to an inode while the filesystem - * is unmounted. So we try to get s_umount, and make sure - * s_root isn't NULL. - */ - if (down_read_trylock(&sb->s_umount)) { - if ((sb->s_root != NULL) && - (!list_empty(&sb->s_dentry_lru))) { - shrink_icache_sb(sb, &w_count); - pruned -= w_count; - } - up_read(&sb->s_umount); - } - spin_lock(&sb_lock); - if (p) - __put_super(p); - count -= pruned; - p = sb; - /* more work left to do? */ - if (count <= 0) - break; - } - if (p) - __put_super(p); - spin_unlock(&sb_lock); up_read(&iprune_sem); } -/* - * shrink_icache_memory() will attempt to reclaim some unused inodes. Here, - * "unused" means that no dentries are referring to the inodes: the files are - * not open and the dcache references to those inodes have already been - * reclaimed. - * - * This function is passed the number of inodes to scan, and it returns the - * total number of remaining possibly-reclaimable inodes. - */ -static int shrink_icache_memory(struct shrinker *shrink, - struct shrink_control *sc) -{ - int nr = sc->nr_to_scan; - gfp_t gfp_mask = sc->gfp_mask; - - if (nr) { - /* - * Nasty deadlock avoidance. We may hold various FS locks, - * and we don't want to recurse into the FS that called us - * in clear_inode() and friends.. - */ - if (!(gfp_mask & __GFP_FS)) - return -1; - prune_icache(nr); - } - return (get_nr_inodes_unused() / 100) * sysctl_vfs_cache_pressure; -} - -static struct shrinker icache_shrinker = { - .shrink = shrink_icache_memory, - .seeks = DEFAULT_SEEKS, -}; - static void __wait_on_freeing_inode(struct inode *inode); /* * Called with the inode lock held. @@ -1691,7 +1593,6 @@ void __init inode_init(void) (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC| SLAB_MEM_SPREAD), init_once); - register_shrinker(&icache_shrinker); /* Hash may have been set up in inode_init_early */ if (!hashdist) diff --git a/fs/super.c b/fs/super.c index e63c754447ce..37a75410079e 100644 --- a/fs/super.c +++ b/fs/super.c @@ -38,6 +38,48 @@ LIST_HEAD(super_blocks); DEFINE_SPINLOCK(sb_lock); +/* + * One thing we have to be careful of with a per-sb shrinker is that we don't + * drop the last active reference to the superblock from within the shrinker. + * If that happens we could trigger unregistering the shrinker from within the + * shrinker path and that leads to deadlock on the shrinker_rwsem. Hence we + * take a passive reference to the superblock to avoid this from occurring. + */ +static int prune_super(struct shrinker *shrink, struct shrink_control *sc) +{ + struct super_block *sb; + int count; + + sb = container_of(shrink, struct super_block, s_shrink); + + /* + * Deadlock avoidance. We may hold various FS locks, and we don't want + * to recurse into the FS that called us in clear_inode() and friends.. + */ + if (sc->nr_to_scan && !(sc->gfp_mask & __GFP_FS)) + return -1; + + if (!grab_super_passive(sb)) + return -1; + + if (sc->nr_to_scan) { + /* proportion the scan between the two caches */ + int total; + + total = sb->s_nr_dentry_unused + sb->s_nr_inodes_unused + 1; + count = (sc->nr_to_scan * sb->s_nr_dentry_unused) / total; + + /* prune dcache first as icache is pinned by it */ + prune_dcache_sb(sb, count); + prune_icache_sb(sb, sc->nr_to_scan - count); + } + + count = ((sb->s_nr_dentry_unused + sb->s_nr_inodes_unused) / 100) + * sysctl_vfs_cache_pressure; + drop_super(sb); + return count; +} + /** * alloc_super - create new superblock * @type: filesystem type superblock should belong to @@ -116,6 +158,9 @@ static struct super_block *alloc_super(struct file_system_type *type) s->s_op = &default_op; s->s_time_gran = 1000000000; s->cleancache_poolid = -1; + + s->s_shrink.seeks = DEFAULT_SEEKS; + s->s_shrink.shrink = prune_super; } out: return s; @@ -183,6 +228,10 @@ void deactivate_locked_super(struct super_block *s) if (atomic_dec_and_test(&s->s_active)) { cleancache_flush_fs(s); fs->kill_sb(s); + + /* caches are now gone, we can safely kill the shrinker now */ + unregister_shrinker(&s->s_shrink); + /* * We need to call rcu_barrier so all the delayed rcu free * inodes are flushed before we release the fs module. @@ -311,7 +360,6 @@ void generic_shutdown_super(struct super_block *sb) { const struct super_operations *sop = sb->s_op; - if (sb->s_root) { shrink_dcache_for_umount(sb); sync_filesystem(sb); @@ -399,6 +447,7 @@ retry: list_add(&s->s_instances, &type->fs_supers); spin_unlock(&sb_lock); get_filesystem(type); + register_shrinker(&s->s_shrink); return s; } diff --git a/include/linux/fs.h b/include/linux/fs.h index 460d2cc21ec6..d7f35e90b84a 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -393,6 +393,7 @@ struct inodes_stat_t { #include #include #include +#include #include #include @@ -1444,8 +1445,14 @@ struct super_block { * Saved pool identifier for cleancache (-1 means none) */ int cleancache_poolid; + + struct shrinker s_shrink; /* per-sb shrinker handle */ }; +/* superblock cache pruning functions */ +extern void prune_icache_sb(struct super_block *sb, int nr_to_scan); +extern void prune_dcache_sb(struct super_block *sb, int nr_to_scan); + extern struct timespec current_fs_time(struct super_block *sb); /* diff --git a/include/linux/mm.h b/include/linux/mm.h index 9b9777ac726d..e3a1a9eec0b1 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -15,6 +15,7 @@ #include #include #include +#include struct mempolicy; struct anon_vma; @@ -1121,45 +1122,6 @@ static inline void sync_mm_rss(struct task_struct *task, struct mm_struct *mm) } #endif -/* - * This struct is used to pass information from page reclaim to the shrinkers. - * We consolidate the values for easier extention later. - */ -struct shrink_control { - gfp_t gfp_mask; - - /* How many slab objects shrinker() should scan and try to reclaim */ - unsigned long nr_to_scan; -}; - -/* - * A callback you can register to apply pressure to ageable caches. - * - * 'sc' is passed shrink_control which includes a count 'nr_to_scan' - * and a 'gfpmask'. It should look through the least-recently-used - * 'nr_to_scan' entries and attempt to free them up. It should return - * the number of objects which remain in the cache. If it returns -1, it means - * it cannot do any scanning at this time (eg. there is a risk of deadlock). - * - * The 'gfpmask' refers to the allocation we are currently trying to - * fulfil. - * - * Note that 'shrink' will be passed nr_to_scan == 0 when the VM is - * querying the cache size, so a fastpath for that case is appropriate. - */ -struct shrinker { - int (*shrink)(struct shrinker *, struct shrink_control *sc); - int seeks; /* seeks to recreate an obj */ - long batch; /* reclaim batch size, 0 = default */ - - /* These are for internal use */ - struct list_head list; - long nr; /* objs pending delete */ -}; -#define DEFAULT_SEEKS 2 /* A good number if you don't know better. */ -extern void register_shrinker(struct shrinker *); -extern void unregister_shrinker(struct shrinker *); - int vma_wants_writenotify(struct vm_area_struct *vma); extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr, diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h new file mode 100644 index 000000000000..790651b4e5ba --- /dev/null +++ b/include/linux/shrinker.h @@ -0,0 +1,42 @@ +#ifndef _LINUX_SHRINKER_H +#define _LINUX_SHRINKER_H + +/* + * This struct is used to pass information from page reclaim to the shrinkers. + * We consolidate the values for easier extention later. + */ +struct shrink_control { + gfp_t gfp_mask; + + /* How many slab objects shrinker() should scan and try to reclaim */ + unsigned long nr_to_scan; +}; + +/* + * A callback you can register to apply pressure to ageable caches. + * + * 'sc' is passed shrink_control which includes a count 'nr_to_scan' + * and a 'gfpmask'. It should look through the least-recently-used + * 'nr_to_scan' entries and attempt to free them up. It should return + * the number of objects which remain in the cache. If it returns -1, it means + * it cannot do any scanning at this time (eg. there is a risk of deadlock). + * + * The 'gfpmask' refers to the allocation we are currently trying to + * fulfil. + * + * Note that 'shrink' will be passed nr_to_scan == 0 when the VM is + * querying the cache size, so a fastpath for that case is appropriate. + */ +struct shrinker { + int (*shrink)(struct shrinker *, struct shrink_control *sc); + int seeks; /* seeks to recreate an obj */ + long batch; /* reclaim batch size, 0 = default */ + + /* These are for internal use */ + struct list_head list; + long nr; /* objs pending delete */ +}; +#define DEFAULT_SEEKS 2 /* A good number if you don't know better. */ +extern void register_shrinker(struct shrinker *); +extern void unregister_shrinker(struct shrinker *); +#endif -- cgit v1.2.3 From 4f8c19fdf3f97402b68f058b1c72a6c7166c9e59 Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Fri, 8 Jul 2011 14:14:43 +1000 Subject: inode: remove iprune_sem Now that we have per-sb shrinkers with a lifecycle that is a subset of the superblock lifecycle and can reliably detect a filesystem being unmounted, there is not longer any race condition for the iprune_sem to protect against. Hence we can remove it. Signed-off-by: Dave Chinner Signed-off-by: Al Viro --- fs/inode.c | 21 --------------------- 1 file changed, 21 deletions(-) (limited to 'fs') diff --git a/fs/inode.c b/fs/inode.c index 1fdbb64a952f..cf81baf1898a 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -67,17 +67,6 @@ static __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_hash_lock); __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_sb_list_lock); __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_wb_list_lock); -/* - * iprune_sem provides exclusion between the icache shrinking and the - * umount path. - * - * We don't actually need it to protect anything in the umount path, - * but only need to cycle through it to make sure any inode that - * prune_icache_sb took off the LRU list has been fully torn down by the - * time we are past evict_inodes. - */ -static DECLARE_RWSEM(iprune_sem); - /* * Empty aops. Can be used for the cases where the user does not * define any of the address_space operations. @@ -542,14 +531,6 @@ void evict_inodes(struct super_block *sb) spin_unlock(&inode_sb_list_lock); dispose_list(&dispose); - - /* - * Cycle through iprune_sem to make sure any inode that prune_icache_sb - * moved off the list before we took the lock has been fully torn - * down. - */ - down_write(&iprune_sem); - up_write(&iprune_sem); } /** @@ -635,7 +616,6 @@ void prune_icache_sb(struct super_block *sb, int nr_to_scan) int nr_scanned; unsigned long reap = 0; - down_read(&iprune_sem); spin_lock(&sb->s_inode_lru_lock); for (nr_scanned = nr_to_scan; nr_scanned >= 0; nr_scanned--) { struct inode *inode; @@ -711,7 +691,6 @@ void prune_icache_sb(struct super_block *sb, int nr_to_scan) spin_unlock(&sb->s_inode_lru_lock); dispose_list(&freeable); - up_read(&iprune_sem); } static void __wait_on_freeing_inode(struct inode *inode); -- cgit v1.2.3 From 0e1fdafd93980eac62e778798549ce0f6073905c Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Fri, 8 Jul 2011 14:14:44 +1000 Subject: superblock: add filesystem shrinker operations Now we have a per-superblock shrinker implementation, we can add a filesystem specific callout to it to allow filesystem internal caches to be shrunk by the superblock shrinker. Rather than perpetuate the multipurpose shrinker callback API (i.e. nr_to_scan == 0 meaning "tell me how many objects freeable in the cache), two operations will be added. The first will return the number of objects that are freeable, the second is the actual shrinker call. Signed-off-by: Dave Chinner Signed-off-by: Al Viro --- Documentation/filesystems/vfs.txt | 16 ++++++++++++++ fs/super.c | 45 ++++++++++++++++++++++++++++----------- include/linux/fs.h | 2 ++ 3 files changed, 51 insertions(+), 12 deletions(-) (limited to 'fs') diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt index d56151fe77dc..fd24f34f120f 100644 --- a/Documentation/filesystems/vfs.txt +++ b/Documentation/filesystems/vfs.txt @@ -229,6 +229,8 @@ struct super_operations { ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t); ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t); + int (*nr_cached_objects)(struct super_block *); + void (*free_cached_objects)(struct super_block *, int); }; All methods are called without any locks being held, unless otherwise @@ -301,6 +303,20 @@ or bottom half). quota_write: called by the VFS to write to filesystem quota file. + nr_cached_objects: called by the sb cache shrinking function for the + filesystem to return the number of freeable cached objects it contains. + Optional. + + free_cache_objects: called by the sb cache shrinking function for the + filesystem to scan the number of objects indicated to try to free them. + Optional, but any filesystem implementing this method needs to also + implement ->nr_cached_objects for it to be called correctly. + + We can't do anything with any errors that the filesystem might + encountered, hence the void return type. This will never be called if + the VM is trying to reclaim under GFP_NOFS conditions, hence this + method does not need to handle that situation itself. + Whoever sets up the inode is responsible for filling in the "i_op" field. This is a pointer to a "struct inode_operations" which describes the methods that can be performed on individual inodes. diff --git a/fs/super.c b/fs/super.c index 37a75410079e..5101f0544960 100644 --- a/fs/super.c +++ b/fs/super.c @@ -48,7 +48,8 @@ DEFINE_SPINLOCK(sb_lock); static int prune_super(struct shrinker *shrink, struct shrink_control *sc) { struct super_block *sb; - int count; + int fs_objects = 0; + int total_objects; sb = container_of(shrink, struct super_block, s_shrink); @@ -62,22 +63,42 @@ static int prune_super(struct shrinker *shrink, struct shrink_control *sc) if (!grab_super_passive(sb)) return -1; - if (sc->nr_to_scan) { - /* proportion the scan between the two caches */ - int total; + if (sb->s_op && sb->s_op->nr_cached_objects) + fs_objects = sb->s_op->nr_cached_objects(sb); + + total_objects = sb->s_nr_dentry_unused + + sb->s_nr_inodes_unused + fs_objects + 1; - total = sb->s_nr_dentry_unused + sb->s_nr_inodes_unused + 1; - count = (sc->nr_to_scan * sb->s_nr_dentry_unused) / total; + if (sc->nr_to_scan) { + int dentries; + int inodes; + + /* proportion the scan between the caches */ + dentries = (sc->nr_to_scan * sb->s_nr_dentry_unused) / + total_objects; + inodes = (sc->nr_to_scan * sb->s_nr_inodes_unused) / + total_objects; + if (fs_objects) + fs_objects = (sc->nr_to_scan * fs_objects) / + total_objects; + /* + * prune the dcache first as the icache is pinned by it, then + * prune the icache, followed by the filesystem specific caches + */ + prune_dcache_sb(sb, dentries); + prune_icache_sb(sb, inodes); - /* prune dcache first as icache is pinned by it */ - prune_dcache_sb(sb, count); - prune_icache_sb(sb, sc->nr_to_scan - count); + if (fs_objects && sb->s_op->free_cached_objects) { + sb->s_op->free_cached_objects(sb, fs_objects); + fs_objects = sb->s_op->nr_cached_objects(sb); + } + total_objects = sb->s_nr_dentry_unused + + sb->s_nr_inodes_unused + fs_objects; } - count = ((sb->s_nr_dentry_unused + sb->s_nr_inodes_unused) / 100) - * sysctl_vfs_cache_pressure; + total_objects = (total_objects / 100) * sysctl_vfs_cache_pressure; drop_super(sb); - return count; + return total_objects; } /** diff --git a/include/linux/fs.h b/include/linux/fs.h index d7f35e90b84a..1393742bba9b 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1655,6 +1655,8 @@ struct super_operations { ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t); #endif int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t); + int (*nr_cached_objects)(struct super_block *); + void (*free_cached_objects)(struct super_block *, int); }; /* -- cgit v1.2.3 From 8ab47664d51a69ea79fe70bb07ca80664f74f76b Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Fri, 8 Jul 2011 14:14:45 +1000 Subject: vfs: increase shrinker batch size Now that the per-sb shrinker is responsible for shrinking 2 or more caches, increase the batch size to keep econmies of scale for shrinking each cache. Increase the shrinker batch size to 1024 objects. To allow for a large increase in batch size, add a conditional reschedule to prune_icache_sb() so that we don't hold the LRU spin lock for too long. This mirrors the behaviour of the __shrink_dcache_sb(), and allows us to increase the batch size without needing to worry about problems caused by long lock hold times. To ensure that filesystems using the per-sb shrinker callouts don't cause problems, document that the object freeing method must reschedule appropriately inside loops. Signed-off-by: Dave Chinner Signed-off-by: Al Viro --- Documentation/filesystems/vfs.txt | 6 ++++++ fs/super.c | 1 + 2 files changed, 7 insertions(+) (limited to 'fs') diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt index fd24f34f120f..6bf85b78cfea 100644 --- a/Documentation/filesystems/vfs.txt +++ b/Documentation/filesystems/vfs.txt @@ -317,6 +317,12 @@ or bottom half). the VM is trying to reclaim under GFP_NOFS conditions, hence this method does not need to handle that situation itself. + Implementations must include conditional reschedule calls inside any + scanning loop that is done. This allows the VFS to determine + appropriate scan batch sizes without having to worry about whether + implementations will cause holdoff problems due to large scan batch + sizes. + Whoever sets up the inode is responsible for filling in the "i_op" field. This is a pointer to a "struct inode_operations" which describes the methods that can be performed on individual inodes. diff --git a/fs/super.c b/fs/super.c index 5101f0544960..7943f04cb3a9 100644 --- a/fs/super.c +++ b/fs/super.c @@ -182,6 +182,7 @@ static struct super_block *alloc_super(struct file_system_type *type) s->s_shrink.seeks = DEFAULT_SEEKS; s->s_shrink.shrink = prune_super; + s->s_shrink.batch = 1024; } out: return s; -- cgit v1.2.3 From 8daaa83145ef1f0a146680618328dbbd0fa76939 Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Fri, 8 Jul 2011 14:14:46 +1000 Subject: xfs: make use of new shrinker callout for the inode cache Convert the inode reclaim shrinker to use the new per-sb shrinker operations. This allows much bigger reclaim batches to be used, and allows the XFS inode cache to be shrunk in proportion with the VFS dentry and inode caches. This avoids the problem of the VFS caches being shrunk significantly before the XFS inode cache is shrunk resulting in imbalances in the caches during reclaim. Signed-off-by: Dave Chinner Signed-off-by: Al Viro --- fs/xfs/linux-2.6/xfs_super.c | 26 ++++++++++------ fs/xfs/linux-2.6/xfs_sync.c | 71 +++++++++++++++++--------------------------- fs/xfs/linux-2.6/xfs_sync.h | 5 ++-- 3 files changed, 46 insertions(+), 56 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c index a1a881e68a9a..a9c6ccff7b48 100644 --- a/fs/xfs/linux-2.6/xfs_super.c +++ b/fs/xfs/linux-2.6/xfs_super.c @@ -1025,11 +1025,6 @@ xfs_fs_put_super( { struct xfs_mount *mp = XFS_M(sb); - /* - * Unregister the memory shrinker before we tear down the mount - * structure so we don't have memory reclaim racing with us here. - */ - xfs_inode_shrinker_unregister(mp); xfs_syncd_stop(mp); /* @@ -1416,8 +1411,6 @@ xfs_fs_fill_super( if (error) goto out_filestream_unmount; - xfs_inode_shrinker_register(mp); - error = xfs_mountfs(mp); if (error) goto out_syncd_stop; @@ -1440,7 +1433,6 @@ xfs_fs_fill_super( return 0; out_syncd_stop: - xfs_inode_shrinker_unregister(mp); xfs_syncd_stop(mp); out_filestream_unmount: xfs_filestream_unmount(mp); @@ -1465,7 +1457,6 @@ xfs_fs_fill_super( } fail_unmount: - xfs_inode_shrinker_unregister(mp); xfs_syncd_stop(mp); /* @@ -1491,6 +1482,21 @@ xfs_fs_mount( return mount_bdev(fs_type, flags, dev_name, data, xfs_fs_fill_super); } +static int +xfs_fs_nr_cached_objects( + struct super_block *sb) +{ + return xfs_reclaim_inodes_count(XFS_M(sb)); +} + +static void +xfs_fs_free_cached_objects( + struct super_block *sb, + int nr_to_scan) +{ + xfs_reclaim_inodes_nr(XFS_M(sb), nr_to_scan); +} + static const struct super_operations xfs_super_operations = { .alloc_inode = xfs_fs_alloc_inode, .destroy_inode = xfs_fs_destroy_inode, @@ -1504,6 +1510,8 @@ static const struct super_operations xfs_super_operations = { .statfs = xfs_fs_statfs, .remount_fs = xfs_fs_remount, .show_options = xfs_fs_show_options, + .nr_cached_objects = xfs_fs_nr_cached_objects, + .free_cached_objects = xfs_fs_free_cached_objects, }; static struct file_system_type xfs_fs_type = { diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c index 8ecad5ff9f9b..9bd7e895a4e2 100644 --- a/fs/xfs/linux-2.6/xfs_sync.c +++ b/fs/xfs/linux-2.6/xfs_sync.c @@ -179,6 +179,8 @@ restart: if (error == EFSCORRUPTED) break; + cond_resched(); + } while (nr_found && !done); if (skipped) { @@ -986,6 +988,8 @@ restart: *nr_to_scan -= XFS_LOOKUP_BATCH; + cond_resched(); + } while (nr_found && !done && *nr_to_scan > 0); if (trylock && !done) @@ -1003,7 +1007,7 @@ restart: * ensure that when we get more reclaimers than AGs we block rather * than spin trying to execute reclaim. */ - if (trylock && skipped && *nr_to_scan > 0) { + if (skipped && (flags & SYNC_WAIT) && *nr_to_scan > 0) { trylock = 0; goto restart; } @@ -1021,44 +1025,38 @@ xfs_reclaim_inodes( } /* - * Inode cache shrinker. + * Scan a certain number of inodes for reclaim. * * When called we make sure that there is a background (fast) inode reclaim in - * progress, while we will throttle the speed of reclaim via doiing synchronous + * progress, while we will throttle the speed of reclaim via doing synchronous * reclaim of inodes. That means if we come across dirty inodes, we wait for * them to be cleaned, which we hope will not be very long due to the * background walker having already kicked the IO off on those dirty inodes. */ -static int -xfs_reclaim_inode_shrink( - struct shrinker *shrink, - struct shrink_control *sc) +void +xfs_reclaim_inodes_nr( + struct xfs_mount *mp, + int nr_to_scan) { - struct xfs_mount *mp; - struct xfs_perag *pag; - xfs_agnumber_t ag; - int reclaimable; - int nr_to_scan = sc->nr_to_scan; - gfp_t gfp_mask = sc->gfp_mask; - - mp = container_of(shrink, struct xfs_mount, m_inode_shrink); - if (nr_to_scan) { - /* kick background reclaimer and push the AIL */ - xfs_syncd_queue_reclaim(mp); - xfs_ail_push_all(mp->m_ail); + /* kick background reclaimer and push the AIL */ + xfs_syncd_queue_reclaim(mp); + xfs_ail_push_all(mp->m_ail); - if (!(gfp_mask & __GFP_FS)) - return -1; + xfs_reclaim_inodes_ag(mp, SYNC_TRYLOCK | SYNC_WAIT, &nr_to_scan); +} - xfs_reclaim_inodes_ag(mp, SYNC_TRYLOCK | SYNC_WAIT, - &nr_to_scan); - /* terminate if we don't exhaust the scan */ - if (nr_to_scan > 0) - return -1; - } +/* + * Return the number of reclaimable inodes in the filesystem for + * the shrinker to determine how much to reclaim. + */ +int +xfs_reclaim_inodes_count( + struct xfs_mount *mp) +{ + struct xfs_perag *pag; + xfs_agnumber_t ag = 0; + int reclaimable = 0; - reclaimable = 0; - ag = 0; while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) { ag = pag->pag_agno + 1; reclaimable += pag->pag_ici_reclaimable; @@ -1067,18 +1065,3 @@ xfs_reclaim_inode_shrink( return reclaimable; } -void -xfs_inode_shrinker_register( - struct xfs_mount *mp) -{ - mp->m_inode_shrink.shrink = xfs_reclaim_inode_shrink; - mp->m_inode_shrink.seeks = DEFAULT_SEEKS; - register_shrinker(&mp->m_inode_shrink); -} - -void -xfs_inode_shrinker_unregister( - struct xfs_mount *mp) -{ - unregister_shrinker(&mp->m_inode_shrink); -} diff --git a/fs/xfs/linux-2.6/xfs_sync.h b/fs/xfs/linux-2.6/xfs_sync.h index e3a6ad27415f..2e1568597764 100644 --- a/fs/xfs/linux-2.6/xfs_sync.h +++ b/fs/xfs/linux-2.6/xfs_sync.h @@ -43,6 +43,8 @@ void xfs_quiesce_attr(struct xfs_mount *mp); void xfs_flush_inodes(struct xfs_inode *ip); int xfs_reclaim_inodes(struct xfs_mount *mp, int mode); +int xfs_reclaim_inodes_count(struct xfs_mount *mp); +void xfs_reclaim_inodes_nr(struct xfs_mount *mp, int nr_to_scan); void xfs_inode_set_reclaim_tag(struct xfs_inode *ip); void __xfs_inode_set_reclaim_tag(struct xfs_perag *pag, struct xfs_inode *ip); @@ -54,7 +56,4 @@ int xfs_inode_ag_iterator(struct xfs_mount *mp, int (*execute)(struct xfs_inode *ip, struct xfs_perag *pag, int flags), int flags); -void xfs_inode_shrinker_register(struct xfs_mount *mp); -void xfs_inode_shrinker_unregister(struct xfs_mount *mp); - #endif -- cgit v1.2.3 From 8c5dc70aae29d2571c0f461d69b37e4e6e01ff4c Mon Sep 17 00:00:00 2001 From: Tobias Klauser Date: Fri, 1 Jul 2011 13:44:51 +0200 Subject: VFS: Fixup kerneldoc for generic_permission() The flags parameter went away in d749519b444db985e40b897f73ce1898b11f997e Signed-off-by: Tobias Klauser Signed-off-by: Al Viro --- fs/namei.c | 1 - 1 file changed, 1 deletion(-) (limited to 'fs') diff --git a/fs/namei.c b/fs/namei.c index 0d188d95f244..b7fad009bbf6 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -213,7 +213,6 @@ other_perms: * generic_permission - check for access rights on a Posix-like filesystem * @inode: inode to check access rights for * @mask: right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC) - * @flags: IPERM_FLAG_ flags. * * Used to check for read/write/execute permissions on a file. * We use "fsuid" for this, letting us set arbitrary permissions -- cgit v1.2.3 From 582686915803e34adc8fdcd90bff7ca7f6a42221 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 24 Jun 2011 14:29:40 -0400 Subject: fat: remove i_alloc_sem abuse Add a new rw_semaphore to protect bmap against truncate. Previous i_alloc_sem was abused for this, but it's going away in this series. Note that we can't simply use i_mutex, given that the swapon code calls ->bmap under it. Signed-off-by: Christoph Hellwig Signed-off-by: Al Viro --- fs/fat/fat.h | 1 + fs/fat/file.c | 2 ++ fs/fat/inode.c | 6 ++++-- 3 files changed, 7 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/fat/fat.h b/fs/fat/fat.h index 8276cc282dec..a975b4147e91 100644 --- a/fs/fat/fat.h +++ b/fs/fat/fat.h @@ -109,6 +109,7 @@ struct msdos_inode_info { int i_attrs; /* unused attribute bits */ loff_t i_pos; /* on-disk position of directory entry or 0 */ struct hlist_node i_fat_hash; /* hash by i_location */ + struct rw_semaphore truncate_lock; /* protect bmap against truncate */ struct inode vfs_inode; }; diff --git a/fs/fat/file.c b/fs/fat/file.c index 7018e1d8902d..a4a3a3c06436 100644 --- a/fs/fat/file.c +++ b/fs/fat/file.c @@ -429,8 +429,10 @@ int fat_setattr(struct dentry *dentry, struct iattr *attr) } if (attr->ia_valid & ATTR_SIZE) { + down_write(&MSDOS_I(inode)->truncate_lock); truncate_setsize(inode, attr->ia_size); fat_truncate_blocks(inode, attr->ia_size); + up_write(&MSDOS_I(inode)->truncate_lock); } setattr_copy(inode, attr); diff --git a/fs/fat/inode.c b/fs/fat/inode.c index cb8d8391ac0b..3decce46c38f 100644 --- a/fs/fat/inode.c +++ b/fs/fat/inode.c @@ -224,9 +224,9 @@ static sector_t _fat_bmap(struct address_space *mapping, sector_t block) sector_t blocknr; /* fat_get_cluster() assumes the requested blocknr isn't truncated. */ - down_read(&mapping->host->i_alloc_sem); + down_read(&MSDOS_I(mapping->host)->truncate_lock); blocknr = generic_block_bmap(mapping, block, fat_get_block); - up_read(&mapping->host->i_alloc_sem); + up_read(&MSDOS_I(mapping->host)->truncate_lock); return blocknr; } @@ -510,6 +510,8 @@ static struct inode *fat_alloc_inode(struct super_block *sb) ei = kmem_cache_alloc(fat_inode_cachep, GFP_NOFS); if (!ei) return NULL; + + init_rwsem(&ei->truncate_lock); return &ei->vfs_inode; } -- cgit v1.2.3 From 9ea7df534ed2a18157434a496a12cf073ca00c52 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Fri, 24 Jun 2011 14:29:41 -0400 Subject: ext4: Rewrite ext4_page_mkwrite() to use generic helpers Rewrite ext4_page_mkwrite() to use __block_page_mkwrite() helper. This removes the need of using i_alloc_sem to avoid races with truncate which seems to be the wrong locking order according to lock ordering documented in mm/rmap.c. Also calling ext4_da_write_begin() as used by the old code seems to be problematic because we can decide to flush delay-allocated blocks which will acquire s_umount semaphore - again creating unpleasant lock dependency if not directly a deadlock. Also add a check for frozen filesystem so that we don't busyloop in page fault when the filesystem is frozen. Signed-off-by: Jan Kara Signed-off-by: Al Viro --- fs/ext4/inode.c | 106 +++++++++++++++++++++++++++++--------------------------- 1 file changed, 55 insertions(+), 51 deletions(-) (limited to 'fs') diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index e3126c051006..bd309764557f 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -5843,80 +5843,84 @@ int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) struct page *page = vmf->page; loff_t size; unsigned long len; - int ret = -EINVAL; - void *fsdata; + int ret; struct file *file = vma->vm_file; struct inode *inode = file->f_path.dentry->d_inode; struct address_space *mapping = inode->i_mapping; + handle_t *handle; + get_block_t *get_block; + int retries = 0; /* - * Get i_alloc_sem to stop truncates messing with the inode. We cannot - * get i_mutex because we are already holding mmap_sem. + * This check is racy but catches the common case. We rely on + * __block_page_mkwrite() to do a reliable check. */ - down_read(&inode->i_alloc_sem); - size = i_size_read(inode); - if (page->mapping != mapping || size <= page_offset(page) - || !PageUptodate(page)) { - /* page got truncated from under us? */ - goto out_unlock; + vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); + /* Delalloc case is easy... */ + if (test_opt(inode->i_sb, DELALLOC) && + !ext4_should_journal_data(inode) && + !ext4_nonda_switch(inode->i_sb)) { + do { + ret = __block_page_mkwrite(vma, vmf, + ext4_da_get_block_prep); + } while (ret == -ENOSPC && + ext4_should_retry_alloc(inode->i_sb, &retries)); + goto out_ret; } - ret = 0; lock_page(page); - wait_on_page_writeback(page); - if (PageMappedToDisk(page)) { - up_read(&inode->i_alloc_sem); - return VM_FAULT_LOCKED; + size = i_size_read(inode); + /* Page got truncated from under us? */ + if (page->mapping != mapping || page_offset(page) > size) { + unlock_page(page); + ret = VM_FAULT_NOPAGE; + goto out; } if (page->index == size >> PAGE_CACHE_SHIFT) len = size & ~PAGE_CACHE_MASK; else len = PAGE_CACHE_SIZE; - /* - * return if we have all the buffers mapped. This avoid - * the need to call write_begin/write_end which does a - * journal_start/journal_stop which can block and take - * long time + * Return if we have all the buffers mapped. This avoids the need to do + * journal_start/journal_stop which can block and take a long time */ if (page_has_buffers(page)) { if (!walk_page_buffers(NULL, page_buffers(page), 0, len, NULL, ext4_bh_unmapped)) { - up_read(&inode->i_alloc_sem); - return VM_FAULT_LOCKED; + /* Wait so that we don't change page under IO */ + wait_on_page_writeback(page); + ret = VM_FAULT_LOCKED; + goto out; } } unlock_page(page); - /* - * OK, we need to fill the hole... Do write_begin write_end - * to do block allocation/reservation.We are not holding - * inode.i__mutex here. That allow * parallel write_begin, - * write_end call. lock_page prevent this from happening - * on the same page though - */ - ret = mapping->a_ops->write_begin(file, mapping, page_offset(page), - len, AOP_FLAG_UNINTERRUPTIBLE, &page, &fsdata); - if (ret < 0) - goto out_unlock; - ret = mapping->a_ops->write_end(file, mapping, page_offset(page), - len, len, page, fsdata); - if (ret < 0) - goto out_unlock; - ret = 0; - - /* - * write_begin/end might have created a dirty page and someone - * could wander in and start the IO. Make sure that hasn't - * happened. - */ - lock_page(page); - wait_on_page_writeback(page); - up_read(&inode->i_alloc_sem); - return VM_FAULT_LOCKED; -out_unlock: - if (ret) + /* OK, we need to fill the hole... */ + if (ext4_should_dioread_nolock(inode)) + get_block = ext4_get_block_write; + else + get_block = ext4_get_block; +retry_alloc: + handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode)); + if (IS_ERR(handle)) { ret = VM_FAULT_SIGBUS; - up_read(&inode->i_alloc_sem); + goto out; + } + ret = __block_page_mkwrite(vma, vmf, get_block); + if (!ret && ext4_should_journal_data(inode)) { + if (walk_page_buffers(handle, page_buffers(page), 0, + PAGE_CACHE_SIZE, NULL, do_journal_get_write_access)) { + unlock_page(page); + ret = VM_FAULT_SIGBUS; + goto out; + } + ext4_set_inode_state(inode, EXT4_STATE_JDATA); + } + ext4_journal_stop(handle); + if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) + goto retry_alloc; +out_ret: + ret = block_page_mkwrite_return(ret); +out: return ret; } -- cgit v1.2.3 From f9b5570d7fdedff32a2e78102bfb54cd1b12b289 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 24 Jun 2011 14:29:42 -0400 Subject: fs: simplify handling of zero sized reads in __blockdev_direct_IO Reject zero sized reads as soon as we know our I/O length, and don't borther with locks or allocations that might have to be cleaned up otherwise. Signed-off-by: Christoph Hellwig Signed-off-by: Al Viro --- fs/direct-io.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/direct-io.c b/fs/direct-io.c index ac5f164170e3..98ce3ac0d94b 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c @@ -1200,6 +1200,10 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, } } + /* watch out for a 0 len io from a tricksy fs */ + if (rw == READ && end == offset) + return 0; + dio = kmalloc(sizeof(*dio), GFP_KERNEL); retval = -ENOMEM; if (!dio) @@ -1213,8 +1217,7 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, dio->flags = flags; if (dio->flags & DIO_LOCKING) { - /* watch out for a 0 len io from a tricksy fs */ - if (rw == READ && end > offset) { + if (rw == READ) { struct address_space *mapping = iocb->ki_filp->f_mapping; -- cgit v1.2.3 From bd5fe6c5eb9c548d7f07fe8f89a150bb6705e8e3 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 24 Jun 2011 14:29:43 -0400 Subject: fs: kill i_alloc_sem i_alloc_sem is a rather special rw_semaphore. It's the last one that may be released by a non-owner, and it's write side is always mirrored by real exclusion. It's intended use it to wait for all pending direct I/O requests to finish before starting a truncate. Replace it with a hand-grown construct: - exclusion for truncates is already guaranteed by i_mutex, so it can simply fall way - the reader side is replaced by an i_dio_count member in struct inode that counts the number of pending direct I/O requests. Truncate can't proceed as long as it's non-zero - when i_dio_count reaches non-zero we wake up a pending truncate using wake_up_bit on a new bit in i_flags - new references to i_dio_count can't appear while we are waiting for it to read zero because the direct I/O count always needs i_mutex (or an equivalent like XFS's i_iolock) for starting a new operation. This scheme is much simpler, and saves the space of a spinlock_t and a struct list_head in struct inode (typically 160 bits on a non-debug 64-bit system). Signed-off-by: Christoph Hellwig Signed-off-by: Al Viro --- fs/attr.c | 5 +---- fs/direct-io.c | 65 +++++++++++++++++++++++++++++++++++++++++------------ fs/inode.c | 3 +-- fs/ntfs/file.c | 3 +-- fs/ntfs/inode.c | 10 ++------- fs/ocfs2/aops.c | 7 +++--- fs/ocfs2/file.c | 15 ++++++------- fs/reiserfs/xattr.c | 3 +-- include/linux/fs.h | 11 +++++++-- mm/filemap.c | 3 --- mm/madvise.c | 2 +- mm/rmap.c | 1 - mm/truncate.c | 3 +-- 13 files changed, 78 insertions(+), 53 deletions(-) (limited to 'fs') diff --git a/fs/attr.c b/fs/attr.c index caf2aa521e2b..f177ac86fa48 100644 --- a/fs/attr.c +++ b/fs/attr.c @@ -233,16 +233,13 @@ int notify_change(struct dentry * dentry, struct iattr * attr) return error; if (ia_valid & ATTR_SIZE) - down_write(&dentry->d_inode->i_alloc_sem); + inode_dio_wait(inode); if (inode->i_op->setattr) error = inode->i_op->setattr(dentry, attr); else error = simple_setattr(dentry, attr); - if (ia_valid & ATTR_SIZE) - up_write(&dentry->d_inode->i_alloc_sem); - if (!error) fsnotify_change(dentry, ia_valid); diff --git a/fs/direct-io.c b/fs/direct-io.c index 98ce3ac0d94b..354cbdbc14bd 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c @@ -135,6 +135,50 @@ struct dio { struct page *pages[DIO_PAGES]; /* page buffer */ }; +static void __inode_dio_wait(struct inode *inode) +{ + wait_queue_head_t *wq = bit_waitqueue(&inode->i_state, __I_DIO_WAKEUP); + DEFINE_WAIT_BIT(q, &inode->i_state, __I_DIO_WAKEUP); + + do { + prepare_to_wait(wq, &q.wait, TASK_UNINTERRUPTIBLE); + if (atomic_read(&inode->i_dio_count)) + schedule(); + } while (atomic_read(&inode->i_dio_count)); + finish_wait(wq, &q.wait); +} + +/** + * inode_dio_wait - wait for outstanding DIO requests to finish + * @inode: inode to wait for + * + * Waits for all pending direct I/O requests to finish so that we can + * proceed with a truncate or equivalent operation. + * + * Must be called under a lock that serializes taking new references + * to i_dio_count, usually by inode->i_mutex. + */ +void inode_dio_wait(struct inode *inode) +{ + if (atomic_read(&inode->i_dio_count)) + __inode_dio_wait(inode); +} +EXPORT_SYMBOL_GPL(inode_dio_wait); + +/* + * inode_dio_done - signal finish of a direct I/O requests + * @inode: inode the direct I/O happens on + * + * This is called once we've finished processing a direct I/O request, + * and is used to wake up callers waiting for direct I/O to be quiesced. + */ +void inode_dio_done(struct inode *inode) +{ + if (atomic_dec_and_test(&inode->i_dio_count)) + wake_up_bit(&inode->i_state, __I_DIO_WAKEUP); +} +EXPORT_SYMBOL_GPL(inode_dio_done); + /* * How many pages are in the queue? */ @@ -254,9 +298,7 @@ static ssize_t dio_complete(struct dio *dio, loff_t offset, ssize_t ret, bool is } if (dio->flags & DIO_LOCKING) - /* lockdep: non-owner release */ - up_read_non_owner(&dio->inode->i_alloc_sem); - + inode_dio_done(dio->inode); return ret; } @@ -980,9 +1022,6 @@ out: return ret; } -/* - * Releases both i_mutex and i_alloc_sem - */ static ssize_t direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode, const struct iovec *iov, loff_t offset, unsigned long nr_segs, @@ -1146,15 +1185,14 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode, * For writes this function is called under i_mutex and returns with * i_mutex held, for reads, i_mutex is not held on entry, but it is * taken and dropped again before returning. - * For reads and writes i_alloc_sem is taken in shared mode and released - * on I/O completion (which may happen asynchronously after returning to - * the caller). + * The i_dio_count counter keeps track of the number of outstanding + * direct I/O requests, and truncate waits for it to reach zero. + * New references to i_dio_count must only be grabbed with i_mutex + * held. * * - if the flags value does NOT contain DIO_LOCKING we don't use any * internal locking but rather rely on the filesystem to synchronize * direct I/O reads/writes versus each other and truncate. - * For reads and writes both i_mutex and i_alloc_sem are not held on - * entry and are never taken. */ ssize_t __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, @@ -1234,10 +1272,9 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, } /* - * Will be released at I/O completion, possibly in a - * different thread. + * Will be decremented at I/O completion time. */ - down_read_non_owner(&inode->i_alloc_sem); + atomic_inc(&inode->i_dio_count); } /* diff --git a/fs/inode.c b/fs/inode.c index cf81baf1898a..96c77b81167c 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -168,8 +168,7 @@ int inode_init_always(struct super_block *sb, struct inode *inode) mutex_init(&inode->i_mutex); lockdep_set_class(&inode->i_mutex, &sb->s_type->i_mutex_key); - init_rwsem(&inode->i_alloc_sem); - lockdep_set_class(&inode->i_alloc_sem, &sb->s_type->i_alloc_sem_key); + atomic_set(&inode->i_dio_count, 0); mapping->a_ops = &empty_aops; mapping->host = inode; diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c index f4b1057abdd2..b59f5ac26bef 100644 --- a/fs/ntfs/file.c +++ b/fs/ntfs/file.c @@ -1832,9 +1832,8 @@ static ssize_t ntfs_file_buffered_write(struct kiocb *iocb, * fails again. */ if (unlikely(NInoTruncateFailed(ni))) { - down_write(&vi->i_alloc_sem); + inode_dio_wait(vi); err = ntfs_truncate(vi); - up_write(&vi->i_alloc_sem); if (err || NInoTruncateFailed(ni)) { if (!err) err = -EIO; diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c index c05d6dcf77a4..1371487da955 100644 --- a/fs/ntfs/inode.c +++ b/fs/ntfs/inode.c @@ -2357,12 +2357,7 @@ static const char *es = " Leaving inconsistent metadata. Unmount and run " * * Returns 0 on success or -errno on error. * - * Called with ->i_mutex held. In all but one case ->i_alloc_sem is held for - * writing. The only case in the kernel where ->i_alloc_sem is not held is - * mm/filemap.c::generic_file_buffered_write() where vmtruncate() is called - * with the current i_size as the offset. The analogous place in NTFS is in - * fs/ntfs/file.c::ntfs_file_buffered_write() where we call vmtruncate() again - * without holding ->i_alloc_sem. + * Called with ->i_mutex held. */ int ntfs_truncate(struct inode *vi) { @@ -2887,8 +2882,7 @@ void ntfs_truncate_vfs(struct inode *vi) { * We also abort all changes of user, group, and mode as we do not implement * the NTFS ACLs yet. * - * Called with ->i_mutex held. For the ATTR_SIZE (i.e. ->truncate) case, also - * called with ->i_alloc_sem held for writing. + * Called with ->i_mutex held. */ int ntfs_setattr(struct dentry *dentry, struct iattr *attr) { diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index ac97bca282d2..de1d3953599d 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c @@ -551,9 +551,8 @@ bail: /* * ocfs2_dio_end_io is called by the dio core when a dio is finished. We're - * particularly interested in the aio/dio case. Like the core uses - * i_alloc_sem, we use the rw_lock DLM lock to protect io on one node from - * truncation on another. + * particularly interested in the aio/dio case. We use the rw_lock DLM lock + * to protect io on one node from truncation on another. */ static void ocfs2_dio_end_io(struct kiocb *iocb, loff_t offset, @@ -569,7 +568,7 @@ static void ocfs2_dio_end_io(struct kiocb *iocb, BUG_ON(!ocfs2_iocb_is_rw_locked(iocb)); if (ocfs2_iocb_is_sem_locked(iocb)) { - up_read(&inode->i_alloc_sem); + inode_dio_done(inode); ocfs2_iocb_clear_sem_locked(iocb); } diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index 1406c37a5722..2c3a465514a2 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c @@ -2236,9 +2236,9 @@ static ssize_t ocfs2_file_aio_write(struct kiocb *iocb, ocfs2_iocb_clear_sem_locked(iocb); relock: - /* to match setattr's i_mutex -> i_alloc_sem -> rw_lock ordering */ + /* to match setattr's i_mutex -> rw_lock ordering */ if (direct_io) { - down_read(&inode->i_alloc_sem); + atomic_inc(&inode->i_dio_count); have_alloc_sem = 1; /* communicate with ocfs2_dio_end_io */ ocfs2_iocb_set_sem_locked(iocb); @@ -2290,7 +2290,7 @@ relock: */ if (direct_io && !can_do_direct) { ocfs2_rw_unlock(inode, rw_level); - up_read(&inode->i_alloc_sem); + inode_dio_done(inode); have_alloc_sem = 0; rw_level = -1; @@ -2361,8 +2361,7 @@ out_dio: /* * deep in g_f_a_w_n()->ocfs2_direct_IO we pass in a ocfs2_dio_end_io * function pointer which is called when o_direct io completes so that - * it can unlock our rw lock. (it's the clustered equivalent of - * i_alloc_sem; protects truncate from racing with pending ios). + * it can unlock our rw lock. * Unfortunately there are error cases which call end_io and others * that don't. so we don't have to unlock the rw_lock if either an * async dio is going to do it in the future or an end_io after an @@ -2379,7 +2378,7 @@ out: out_sems: if (have_alloc_sem) { - up_read(&inode->i_alloc_sem); + inode_dio_done(inode); ocfs2_iocb_clear_sem_locked(iocb); } @@ -2531,8 +2530,8 @@ static ssize_t ocfs2_file_aio_read(struct kiocb *iocb, * need locks to protect pending reads from racing with truncate. */ if (filp->f_flags & O_DIRECT) { - down_read(&inode->i_alloc_sem); have_alloc_sem = 1; + atomic_inc(&inode->i_dio_count); ocfs2_iocb_set_sem_locked(iocb); ret = ocfs2_rw_lock(inode, 0); @@ -2575,7 +2574,7 @@ static ssize_t ocfs2_file_aio_read(struct kiocb *iocb, bail: if (have_alloc_sem) { - up_read(&inode->i_alloc_sem); + inode_dio_done(inode); ocfs2_iocb_clear_sem_locked(iocb); } if (rw_level != -1) diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c index 4ea2ab41fdee..6938d8c68d6e 100644 --- a/fs/reiserfs/xattr.c +++ b/fs/reiserfs/xattr.c @@ -555,11 +555,10 @@ reiserfs_xattr_set_handle(struct reiserfs_transaction_handle *th, reiserfs_write_unlock(inode->i_sb); mutex_lock_nested(&dentry->d_inode->i_mutex, I_MUTEX_XATTR); - down_write(&dentry->d_inode->i_alloc_sem); + inode_dio_wait(dentry->d_inode); reiserfs_write_lock(inode->i_sb); err = reiserfs_setattr(dentry, &newattrs); - up_write(&dentry->d_inode->i_alloc_sem); mutex_unlock(&dentry->d_inode->i_mutex); } else update_ctime(inode); diff --git a/include/linux/fs.h b/include/linux/fs.h index 1393742bba9b..2fe920774abf 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -779,7 +779,7 @@ struct inode { struct timespec i_ctime; blkcnt_t i_blocks; unsigned short i_bytes; - struct rw_semaphore i_alloc_sem; + atomic_t i_dio_count; const struct file_operations *i_fop; /* former ->i_op->default_file_ops */ struct file_lock *i_flock; struct address_space *i_mapping; @@ -1705,6 +1705,10 @@ struct super_operations { * set during data writeback, and cleared with a wakeup * on the bit address once it is done. * + * I_REFERENCED Marks the inode as recently references on the LRU list. + * + * I_DIO_WAKEUP Never set. Only used as a key for wait_on_bit(). + * * Q: What is the difference between I_WILL_FREE and I_FREEING? */ #define I_DIRTY_SYNC (1 << 0) @@ -1718,6 +1722,8 @@ struct super_operations { #define __I_SYNC 7 #define I_SYNC (1 << __I_SYNC) #define I_REFERENCED (1 << 8) +#define __I_DIO_WAKEUP 9 +#define I_DIO_WAKEUP (1 << I_DIO_WAKEUP) #define I_DIRTY (I_DIRTY_SYNC | I_DIRTY_DATASYNC | I_DIRTY_PAGES) @@ -1828,7 +1834,6 @@ struct file_system_type { struct lock_class_key i_lock_key; struct lock_class_key i_mutex_key; struct lock_class_key i_mutex_dir_key; - struct lock_class_key i_alloc_sem_key; }; extern struct dentry *mount_ns(struct file_system_type *fs_type, int flags, @@ -2404,6 +2409,8 @@ enum { }; void dio_end_io(struct bio *bio, int error); +void inode_dio_wait(struct inode *inode); +void inode_dio_done(struct inode *inode); ssize_t __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, struct block_device *bdev, const struct iovec *iov, loff_t offset, diff --git a/mm/filemap.c b/mm/filemap.c index a8251a8d3457..f820e600f1ad 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -78,9 +78,6 @@ * ->i_mutex (generic_file_buffered_write) * ->mmap_sem (fault_in_pages_readable->do_page_fault) * - * ->i_mutex - * ->i_alloc_sem (various) - * * inode_wb_list_lock * sb_lock (fs/fs-writeback.c) * ->mapping->tree_lock (__sync_single_inode) diff --git a/mm/madvise.c b/mm/madvise.c index 2221491ed503..74bf193eff04 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -218,7 +218,7 @@ static long madvise_remove(struct vm_area_struct *vma, endoff = (loff_t)(end - vma->vm_start - 1) + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); - /* vmtruncate_range needs to take i_mutex and i_alloc_sem */ + /* vmtruncate_range needs to take i_mutex */ up_read(¤t->mm->mmap_sem); error = vmtruncate_range(mapping->host, offset, endoff); down_read(¤t->mm->mmap_sem); diff --git a/mm/rmap.c b/mm/rmap.c index 23295f65ae43..2540a39eea4a 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -21,7 +21,6 @@ * Lock ordering in mm: * * inode->i_mutex (while writing or truncating, not reading or faulting) - * inode->i_alloc_sem (vmtruncate_range) * mm->mmap_sem * page->flags PG_locked (lock_page) * mapping->i_mmap_mutex diff --git a/mm/truncate.c b/mm/truncate.c index e13f22efaad7..003c6c685fc8 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -622,12 +622,11 @@ int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end) return -ENOSYS; mutex_lock(&inode->i_mutex); - down_write(&inode->i_alloc_sem); + inode_dio_wait(inode); unmap_mapping_range(mapping, offset, (end - offset), 1); inode->i_op->truncate_range(inode, offset, end); /* unmap again to remove racily COWed private pages */ unmap_mapping_range(mapping, offset, (end - offset), 1); - up_write(&inode->i_alloc_sem); mutex_unlock(&inode->i_mutex); return 0; -- cgit v1.2.3 From 562c72aa57c36b178eacc3500a0215651eca9429 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 24 Jun 2011 14:29:45 -0400 Subject: fs: move inode_dio_wait calls into ->setattr Let filesystems handle waiting for direct I/O requests themselves instead of doing it beforehand. This means filesystem-specific locks to prevent new dio referenes from appearing can be held. This is important to allow generalizing i_dio_count to non-DIO_LOCKING filesystems. Signed-off-by: Christoph Hellwig Signed-off-by: Al Viro --- fs/attr.c | 3 --- fs/ext2/inode.c | 2 ++ fs/ext3/inode.c | 3 +++ fs/ext4/inode.c | 2 ++ fs/fat/file.c | 2 ++ fs/gfs2/bmap.c | 2 ++ fs/hfs/inode.c | 2 ++ fs/hfsplus/inode.c | 2 ++ fs/jfs/file.c | 2 ++ fs/nilfs2/inode.c | 2 ++ fs/ocfs2/file.c | 2 ++ fs/reiserfs/inode.c | 3 +++ 12 files changed, 24 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/attr.c b/fs/attr.c index f177ac86fa48..538e27959d3f 100644 --- a/fs/attr.c +++ b/fs/attr.c @@ -232,9 +232,6 @@ int notify_change(struct dentry * dentry, struct iattr * attr) if (error) return error; - if (ia_valid & ATTR_SIZE) - inode_dio_wait(inode); - if (inode->i_op->setattr) error = inode->i_op->setattr(dentry, attr); else diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c index 788e09a07f7e..06e7c767ab35 100644 --- a/fs/ext2/inode.c +++ b/fs/ext2/inode.c @@ -1184,6 +1184,8 @@ static int ext2_setsize(struct inode *inode, loff_t newsize) if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) return -EPERM; + inode_dio_wait(inode); + if (mapping_is_xip(inode->i_mapping)) error = xip_truncate_page(inode->i_mapping, newsize); else if (test_opt(inode->i_sb, NOBH)) diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c index 3451d23c3bae..99c28b246b89 100644 --- a/fs/ext3/inode.c +++ b/fs/ext3/inode.c @@ -3216,6 +3216,9 @@ int ext3_setattr(struct dentry *dentry, struct iattr *attr) ext3_journal_stop(handle); } + if (attr->ia_valid & ATTR_SIZE) + inode_dio_wait(inode); + if (S_ISREG(inode->i_mode) && attr->ia_valid & ATTR_SIZE && attr->ia_size < inode->i_size) { handle_t *handle; diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index bd309764557f..9ec0a2ba2502 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -5351,6 +5351,8 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr) } if (attr->ia_valid & ATTR_SIZE) { + inode_dio_wait(inode); + if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); diff --git a/fs/fat/file.c b/fs/fat/file.c index a4a3a3c06436..e1587c54d3c1 100644 --- a/fs/fat/file.c +++ b/fs/fat/file.c @@ -397,6 +397,8 @@ int fat_setattr(struct dentry *dentry, struct iattr *attr) * sequence. */ if (attr->ia_valid & ATTR_SIZE) { + inode_dio_wait(inode); + if (attr->ia_size > inode->i_size) { error = fat_cont_expand(inode, attr->ia_size); if (error || attr->ia_valid == ATTR_SIZE) diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c index e65493a8ac00..2cd0e56b8893 100644 --- a/fs/gfs2/bmap.c +++ b/fs/gfs2/bmap.c @@ -1224,6 +1224,8 @@ int gfs2_setattr_size(struct inode *inode, u64 newsize) if (ret) return ret; + inode_dio_wait(inode); + oldsize = inode->i_size; if (newsize >= oldsize) return do_grow(inode, newsize); diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c index fff16c968e67..48d567cc203e 100644 --- a/fs/hfs/inode.c +++ b/fs/hfs/inode.c @@ -615,6 +615,8 @@ int hfs_inode_setattr(struct dentry *dentry, struct iattr * attr) if ((attr->ia_valid & ATTR_SIZE) && attr->ia_size != i_size_read(inode)) { + inode_dio_wait(inode); + error = vmtruncate(inode, attr->ia_size); if (error) return error; diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c index b248a6cfcad9..b0a0a4b621eb 100644 --- a/fs/hfsplus/inode.c +++ b/fs/hfsplus/inode.c @@ -296,6 +296,8 @@ static int hfsplus_setattr(struct dentry *dentry, struct iattr *attr) if ((attr->ia_valid & ATTR_SIZE) && attr->ia_size != i_size_read(inode)) { + inode_dio_wait(inode); + error = vmtruncate(inode, attr->ia_size); if (error) return error; diff --git a/fs/jfs/file.c b/fs/jfs/file.c index 2f3f531f3606..9f32315acef1 100644 --- a/fs/jfs/file.c +++ b/fs/jfs/file.c @@ -110,6 +110,8 @@ int jfs_setattr(struct dentry *dentry, struct iattr *iattr) if ((iattr->ia_valid & ATTR_SIZE) && iattr->ia_size != i_size_read(inode)) { + inode_dio_wait(inode); + rc = vmtruncate(inode, iattr->ia_size); if (rc) return rc; diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c index be8664c6a825..13f113154a9f 100644 --- a/fs/nilfs2/inode.c +++ b/fs/nilfs2/inode.c @@ -778,6 +778,8 @@ int nilfs_setattr(struct dentry *dentry, struct iattr *iattr) if ((iattr->ia_valid & ATTR_SIZE) && iattr->ia_size != i_size_read(inode)) { + inode_dio_wait(inode); + err = vmtruncate(inode, iattr->ia_size); if (unlikely(err)) goto out_err; diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index 2c3a465514a2..736283ca4a4c 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c @@ -1142,6 +1142,8 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr) if (status) goto bail_unlock; + inode_dio_wait(inode); + if (i_size_read(inode) > attr->ia_size) { if (ocfs2_should_order_data(inode)) { status = ocfs2_begin_ordered_truncate(inode, diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c index 4fd5bb33dbb5..dcf543d8caf1 100644 --- a/fs/reiserfs/inode.c +++ b/fs/reiserfs/inode.c @@ -3114,6 +3114,9 @@ int reiserfs_setattr(struct dentry *dentry, struct iattr *attr) error = -EFBIG; goto out; } + + inode_dio_wait(inode); + /* fill in hole pointers in the expanding truncate case. */ if (attr->ia_size > inode->i_size) { error = generic_cont_expand_simple(inode, attr->ia_size); -- cgit v1.2.3 From df2d6f26586f12a24f3ae5df4e236dc5c08d6eb4 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 24 Jun 2011 14:29:46 -0400 Subject: fs: always maintain i_dio_count Maintain i_dio_count for all filesystems, not just those using DIO_LOCKING. This these filesystems to also protect truncate against direct I/O requests by using common code. Right now the only non-DIO_LOCKING filesystem that appears to do so is XFS, which uses an opencoded variant of the i_dio_count scheme. Behaviour doesn't change for filesystems never calling inode_dio_wait. For ext4 behaviour changes when using the dioread_nonlock option, which previously was missing any protection between truncate and direct I/O reads. For ocfs2 that handcrafted i_dio_count manipulations are replaced with the common code now enable. Signed-off-by: Christoph Hellwig Signed-off-by: Al Viro --- fs/direct-io.c | 25 +++++++++++++------------ fs/ocfs2/aops.c | 4 +--- fs/ocfs2/file.c | 12 +++--------- 3 files changed, 17 insertions(+), 24 deletions(-) (limited to 'fs') diff --git a/fs/direct-io.c b/fs/direct-io.c index 354cbdbc14bd..0a073c7125a6 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c @@ -297,8 +297,7 @@ static ssize_t dio_complete(struct dio *dio, loff_t offset, ssize_t ret, bool is aio_complete(dio->iocb, ret, 0); } - if (dio->flags & DIO_LOCKING) - inode_dio_done(dio->inode); + inode_dio_done(dio->inode); return ret; } @@ -1185,14 +1184,16 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode, * For writes this function is called under i_mutex and returns with * i_mutex held, for reads, i_mutex is not held on entry, but it is * taken and dropped again before returning. - * The i_dio_count counter keeps track of the number of outstanding - * direct I/O requests, and truncate waits for it to reach zero. - * New references to i_dio_count must only be grabbed with i_mutex - * held. - * * - if the flags value does NOT contain DIO_LOCKING we don't use any * internal locking but rather rely on the filesystem to synchronize * direct I/O reads/writes versus each other and truncate. + * + * To help with locking against truncate we incremented the i_dio_count + * counter before starting direct I/O, and decrement it once we are done. + * Truncate can wait for it to reach zero to provide exclusion. It is + * expected that filesystem provide exclusion between new direct I/O + * and truncates. For DIO_LOCKING filesystems this is done by i_mutex, + * but other filesystems need to take care of this on their own. */ ssize_t __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, @@ -1270,13 +1271,13 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, goto out; } } - - /* - * Will be decremented at I/O completion time. - */ - atomic_inc(&inode->i_dio_count); } + /* + * Will be decremented at I/O completion time. + */ + atomic_inc(&inode->i_dio_count); + /* * For file extending writes updating i_size before data * writeouts complete can expose uninitialized blocks. So diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index de1d3953599d..524d6167fb63 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c @@ -567,10 +567,8 @@ static void ocfs2_dio_end_io(struct kiocb *iocb, /* this io's submitter should not have unlocked this before we could */ BUG_ON(!ocfs2_iocb_is_rw_locked(iocb)); - if (ocfs2_iocb_is_sem_locked(iocb)) { - inode_dio_done(inode); + if (ocfs2_iocb_is_sem_locked(iocb)) ocfs2_iocb_clear_sem_locked(iocb); - } ocfs2_iocb_clear_rw_locked(iocb); diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index 736283ca4a4c..22d604601957 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c @@ -2240,7 +2240,6 @@ static ssize_t ocfs2_file_aio_write(struct kiocb *iocb, relock: /* to match setattr's i_mutex -> rw_lock ordering */ if (direct_io) { - atomic_inc(&inode->i_dio_count); have_alloc_sem = 1; /* communicate with ocfs2_dio_end_io */ ocfs2_iocb_set_sem_locked(iocb); @@ -2292,7 +2291,6 @@ relock: */ if (direct_io && !can_do_direct) { ocfs2_rw_unlock(inode, rw_level); - inode_dio_done(inode); have_alloc_sem = 0; rw_level = -1; @@ -2379,10 +2377,8 @@ out: ocfs2_rw_unlock(inode, rw_level); out_sems: - if (have_alloc_sem) { - inode_dio_done(inode); + if (have_alloc_sem) ocfs2_iocb_clear_sem_locked(iocb); - } mutex_unlock(&inode->i_mutex); @@ -2533,7 +2529,6 @@ static ssize_t ocfs2_file_aio_read(struct kiocb *iocb, */ if (filp->f_flags & O_DIRECT) { have_alloc_sem = 1; - atomic_inc(&inode->i_dio_count); ocfs2_iocb_set_sem_locked(iocb); ret = ocfs2_rw_lock(inode, 0); @@ -2575,10 +2570,9 @@ static ssize_t ocfs2_file_aio_read(struct kiocb *iocb, } bail: - if (have_alloc_sem) { - inode_dio_done(inode); + if (have_alloc_sem) ocfs2_iocb_clear_sem_locked(iocb); - } + if (rw_level != -1) ocfs2_rw_unlock(inode, rw_level); -- cgit v1.2.3 From aacfc19c626ebd3daa675652457d71019a1f583f Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 24 Jun 2011 14:29:47 -0400 Subject: fs: simplify the blockdev_direct_IO prototype Simple filesystems always pass inode->i_sb_bdev as the block device argument, and never need a end_io handler. Let's simply things for them and for my grepping activity by dropping these arguments. The only thing not falling into that scheme is ext4, which passes and end_io handler without needing special flags (yet), but given how messy the direct I/O code there is use of __blockdev_direct_IO in one instead of two out of three cases isn't going to make a large difference anyway. Signed-off-by: Christoph Hellwig Signed-off-by: Al Viro --- fs/ext2/inode.c | 4 ++-- fs/ext3/inode.c | 5 ++--- fs/ext4/inode.c | 12 ++++++------ fs/fat/inode.c | 4 ++-- fs/hfs/inode.c | 4 ++-- fs/hfsplus/inode.c | 4 ++-- fs/jfs/inode.c | 4 ++-- fs/nilfs2/inode.c | 4 ++-- fs/reiserfs/inode.c | 5 ++--- include/linux/fs.h | 9 ++++----- 10 files changed, 26 insertions(+), 29 deletions(-) (limited to 'fs') diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c index 06e7c767ab35..a8a58f63f07c 100644 --- a/fs/ext2/inode.c +++ b/fs/ext2/inode.c @@ -843,8 +843,8 @@ ext2_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, struct inode *inode = mapping->host; ssize_t ret; - ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, - iov, offset, nr_segs, ext2_get_block, NULL); + ret = blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs, + ext2_get_block); if (ret < 0 && (rw & WRITE)) ext2_write_failed(mapping, offset + iov_length(iov, nr_segs)); return ret; diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c index 99c28b246b89..2978a2a17a59 100644 --- a/fs/ext3/inode.c +++ b/fs/ext3/inode.c @@ -1816,9 +1816,8 @@ static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb, } retry: - ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, - offset, nr_segs, - ext3_get_block, NULL); + ret = blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs, + ext3_get_block); /* * In case of error extending write may have instantiated a few * blocks outside i_size. Trim these off again. diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 9ec0a2ba2502..1f35573a34e1 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -3501,10 +3501,8 @@ retry: offset, nr_segs, ext4_get_block, NULL, NULL, 0); else { - ret = blockdev_direct_IO(rw, iocb, inode, - inode->i_sb->s_bdev, iov, - offset, nr_segs, - ext4_get_block, NULL); + ret = blockdev_direct_IO(rw, iocb, inode, iov, + offset, nr_segs, ext4_get_block); if (unlikely((rw & WRITE) && ret < 0)) { loff_t isize = i_size_read(inode); @@ -3748,11 +3746,13 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb, EXT4_I(inode)->cur_aio_dio = iocb->private; } - ret = blockdev_direct_IO(rw, iocb, inode, + ret = __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, offset, nr_segs, ext4_get_block_write, - ext4_end_io_dio); + ext4_end_io_dio, + NULL, + DIO_LOCKING | DIO_SKIP_HOLES); if (iocb->private) EXT4_I(inode)->cur_aio_dio = NULL; /* diff --git a/fs/fat/inode.c b/fs/fat/inode.c index 3decce46c38f..5942fec22c65 100644 --- a/fs/fat/inode.c +++ b/fs/fat/inode.c @@ -211,8 +211,8 @@ static ssize_t fat_direct_IO(int rw, struct kiocb *iocb, * FAT need to use the DIO_LOCKING for avoiding the race * condition of fat_get_block() and ->truncate(). */ - ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, - iov, offset, nr_segs, fat_get_block, NULL); + ret = blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs, + fat_get_block); if (ret < 0 && (rw & WRITE)) fat_write_failed(mapping, offset + iov_length(iov, nr_segs)); diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c index 48d567cc203e..5e7c3f309617 100644 --- a/fs/hfs/inode.c +++ b/fs/hfs/inode.c @@ -123,8 +123,8 @@ static ssize_t hfs_direct_IO(int rw, struct kiocb *iocb, struct inode *inode = file->f_path.dentry->d_inode->i_mapping->host; ssize_t ret; - ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, - offset, nr_segs, hfs_get_block, NULL); + ret = blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs, + hfs_get_block); /* * In case of error extending write may have instantiated a few diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c index b0a0a4b621eb..5b1cb98741cc 100644 --- a/fs/hfsplus/inode.c +++ b/fs/hfsplus/inode.c @@ -119,8 +119,8 @@ static ssize_t hfsplus_direct_IO(int rw, struct kiocb *iocb, struct inode *inode = file->f_path.dentry->d_inode->i_mapping->host; ssize_t ret; - ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, - offset, nr_segs, hfsplus_get_block, NULL); + ret = blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs, + hfsplus_get_block); /* * In case of error extending write may have instantiated a few diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c index 109655904bbc..77b69b27f825 100644 --- a/fs/jfs/inode.c +++ b/fs/jfs/inode.c @@ -329,8 +329,8 @@ static ssize_t jfs_direct_IO(int rw, struct kiocb *iocb, struct inode *inode = file->f_mapping->host; ssize_t ret; - ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, - offset, nr_segs, jfs_get_block, NULL); + ret = blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs, + jfs_get_block); /* * In case of error extending write may have instantiated a few diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c index 13f113154a9f..666628b395f1 100644 --- a/fs/nilfs2/inode.c +++ b/fs/nilfs2/inode.c @@ -259,8 +259,8 @@ nilfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, return 0; /* Needs synchronization with the cleaner */ - size = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, - offset, nr_segs, nilfs_get_block, NULL); + size = blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs, + nilfs_get_block); /* * In case of error extending write may have instantiated a few diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c index dcf543d8caf1..2922b90ceac1 100644 --- a/fs/reiserfs/inode.c +++ b/fs/reiserfs/inode.c @@ -3068,9 +3068,8 @@ static ssize_t reiserfs_direct_IO(int rw, struct kiocb *iocb, struct inode *inode = file->f_mapping->host; ssize_t ret; - ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, - offset, nr_segs, - reiserfs_get_blocks_direct_io, NULL); + ret = blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs, + reiserfs_get_blocks_direct_io); /* * In case of error extending write may have instantiated a few diff --git a/include/linux/fs.h b/include/linux/fs.h index 2fe920774abf..824453be9fee 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2418,12 +2418,11 @@ ssize_t __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, dio_submit_t submit_io, int flags); static inline ssize_t blockdev_direct_IO(int rw, struct kiocb *iocb, - struct inode *inode, struct block_device *bdev, const struct iovec *iov, - loff_t offset, unsigned long nr_segs, get_block_t get_block, - dio_iodone_t end_io) + struct inode *inode, const struct iovec *iov, loff_t offset, + unsigned long nr_segs, get_block_t get_block) { - return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset, - nr_segs, get_block, end_io, NULL, + return __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, + offset, nr_segs, get_block, NULL, NULL, DIO_LOCKING | DIO_SKIP_HOLES); } #endif -- cgit v1.2.3 From 72c5052ddc3956d847f21c2b8d55c93664a51b2c Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 24 Jun 2011 14:29:48 -0400 Subject: fs: move inode_dio_done to the end_io handler For filesystems that delay their end_io processing we should keep our i_dio_count until the the processing is done. Enable this by moving the inode_dio_done call to the end_io handler if one exist. Note that the actual move to the workqueue for ext4 and XFS is not done in this patch yet, but left to the filesystem maintainers. At least for XFS it's not needed yet either as XFS has an internal equivalent to i_dio_count. Signed-off-by: Christoph Hellwig Signed-off-by: Al Viro --- fs/direct-io.c | 7 ++++--- fs/ext4/inode.c | 5 +++++ fs/ocfs2/aops.c | 1 + fs/xfs/linux-2.6/xfs_aops.c | 3 +++ 4 files changed, 13 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/direct-io.c b/fs/direct-io.c index 0a073c7125a6..01d2d9ef609c 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c @@ -293,11 +293,12 @@ static ssize_t dio_complete(struct dio *dio, loff_t offset, ssize_t ret, bool is if (dio->end_io && dio->result) { dio->end_io(dio->iocb, offset, transferred, dio->map_bh.b_private, ret, is_async); - } else if (is_async) { - aio_complete(dio->iocb, ret, 0); + } else { + if (is_async) + aio_complete(dio->iocb, ret, 0); + inode_dio_done(dio->inode); } - inode_dio_done(dio->inode); return ret; } diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 1f35573a34e1..678cde834f19 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -3573,6 +3573,7 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset, ssize_t size, void *private, int ret, bool is_async) { + struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode; ext4_io_end_t *io_end = iocb->private; struct workqueue_struct *wq; unsigned long flags; @@ -3594,6 +3595,7 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset, out: if (is_async) aio_complete(iocb, ret, 0); + inode_dio_done(inode); return; } @@ -3614,6 +3616,9 @@ out: /* queue the work to convert unwritten extents to written */ queue_work(wq, &io_end->work); iocb->private = NULL; + + /* XXX: probably should move into the real I/O completion handler */ + inode_dio_done(inode); } static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate) diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index 524d6167fb63..c1efe939c774 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c @@ -577,6 +577,7 @@ static void ocfs2_dio_end_io(struct kiocb *iocb, if (is_async) aio_complete(iocb, ret, 0); + inode_dio_done(inode); } /* diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c index 79ce38be15a1..b3b418f519f3 100644 --- a/fs/xfs/linux-2.6/xfs_aops.c +++ b/fs/xfs/linux-2.6/xfs_aops.c @@ -1339,6 +1339,9 @@ xfs_end_io_direct_write( } else { xfs_finish_ioend_sync(ioend); } + + /* XXX: probably should move into the real I/O completion handler */ + inode_dio_done(ioend->io_inode); } STATIC ssize_t -- cgit v1.2.3 From f15146380d28b746df3c8b81b392812eb982382a Mon Sep 17 00:00:00 2001 From: Kay Sievers Date: Tue, 12 Jul 2011 20:48:39 +0200 Subject: fs: seq_file - add event counter to simplify poll() support Moving the event counter into the dynamically allocated 'struc seq_file' allows poll() support without the need to allocate its own tracking structure. All current users are switched over to use the new counter. Requested-by: Andrew Morton akpm@linux-foundation.org Acked-by: NeilBrown Tested-by: Lucas De Marchi lucas.demarchi@profusion.mobi Signed-off-by: Kay Sievers Signed-off-by: Al Viro --- drivers/md/md.c | 26 ++++++++------------------ fs/namespace.c | 4 ++-- fs/proc/base.c | 2 +- include/linux/mnt_namespace.h | 1 - include/linux/seq_file.h | 1 + mm/swapfile.c | 29 ++++++++--------------------- 6 files changed, 20 insertions(+), 43 deletions(-) (limited to 'fs') diff --git a/drivers/md/md.c b/drivers/md/md.c index 91e31e260b4a..dfc9425db70b 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -6394,16 +6394,11 @@ static void md_seq_stop(struct seq_file *seq, void *v) mddev_put(mddev); } -struct mdstat_info { - int event; -}; - static int md_seq_show(struct seq_file *seq, void *v) { mddev_t *mddev = v; sector_t sectors; mdk_rdev_t *rdev; - struct mdstat_info *mi = seq->private; struct bitmap *bitmap; if (v == (void*)1) { @@ -6415,7 +6410,7 @@ static int md_seq_show(struct seq_file *seq, void *v) spin_unlock(&pers_lock); seq_printf(seq, "\n"); - mi->event = atomic_read(&md_event_count); + seq->poll_event = atomic_read(&md_event_count); return 0; } if (v == (void*)2) { @@ -6527,26 +6522,21 @@ static const struct seq_operations md_seq_ops = { static int md_seq_open(struct inode *inode, struct file *file) { + struct seq_file *seq; int error; - struct mdstat_info *mi = kmalloc(sizeof(*mi), GFP_KERNEL); - if (mi == NULL) - return -ENOMEM; error = seq_open(file, &md_seq_ops); if (error) - kfree(mi); - else { - struct seq_file *p = file->private_data; - p->private = mi; - mi->event = atomic_read(&md_event_count); - } + return error; + + seq = file->private_data; + seq->poll_event = atomic_read(&md_event_count); return error; } static unsigned int mdstat_poll(struct file *filp, poll_table *wait) { - struct seq_file *m = filp->private_data; - struct mdstat_info *mi = m->private; + struct seq_file *seq = filp->private_data; int mask; poll_wait(filp, &md_event_waiters, wait); @@ -6554,7 +6544,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait) /* always allow read */ mask = POLLIN | POLLRDNORM; - if (mi->event != atomic_read(&md_event_count)) + if (seq->poll_event != atomic_read(&md_event_count)) mask |= POLLERR | POLLPRI; return mask; } diff --git a/fs/namespace.c b/fs/namespace.c index fe59bd145d21..cda50fe9250a 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -934,8 +934,8 @@ int mnt_had_events(struct proc_mounts *p) int res = 0; br_read_lock(vfsmount_lock); - if (p->event != ns->event) { - p->event = ns->event; + if (p->m.poll_event != ns->event) { + p->m.poll_event = ns->event; res = 1; } br_read_unlock(vfsmount_lock); diff --git a/fs/proc/base.c b/fs/proc/base.c index be1ff932033b..3dc5e2a5cc38 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -673,7 +673,7 @@ static int mounts_open_common(struct inode *inode, struct file *file, p->m.private = p; p->ns = ns; p->root = root; - p->event = ns->event; + p->m.poll_event = ns->event; return 0; diff --git a/include/linux/mnt_namespace.h b/include/linux/mnt_namespace.h index 0b89efc6f215..29304855652d 100644 --- a/include/linux/mnt_namespace.h +++ b/include/linux/mnt_namespace.h @@ -18,7 +18,6 @@ struct proc_mounts { struct seq_file m; /* must be the first element */ struct mnt_namespace *ns; struct path root; - int event; }; struct fs_struct; diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h index 03c0232b4169..be720cd2038d 100644 --- a/include/linux/seq_file.h +++ b/include/linux/seq_file.h @@ -23,6 +23,7 @@ struct seq_file { u64 version; struct mutex lock; const struct seq_operations *op; + int poll_event; void *private; }; diff --git a/mm/swapfile.c b/mm/swapfile.c index ff8dc1a18cb4..1b8c33907242 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -1681,19 +1681,14 @@ out: } #ifdef CONFIG_PROC_FS -struct proc_swaps { - struct seq_file seq; - int event; -}; - static unsigned swaps_poll(struct file *file, poll_table *wait) { - struct proc_swaps *s = file->private_data; + struct seq_file *seq = file->private_data; poll_wait(file, &proc_poll_wait, wait); - if (s->event != atomic_read(&proc_poll_event)) { - s->event = atomic_read(&proc_poll_event); + if (seq->poll_event != atomic_read(&proc_poll_event)) { + seq->poll_event = atomic_read(&proc_poll_event); return POLLIN | POLLRDNORM | POLLERR | POLLPRI; } @@ -1783,24 +1778,16 @@ static const struct seq_operations swaps_op = { static int swaps_open(struct inode *inode, struct file *file) { - struct proc_swaps *s; + struct seq_file *seq; int ret; - s = kmalloc(sizeof(struct proc_swaps), GFP_KERNEL); - if (!s) - return -ENOMEM; - - file->private_data = s; - ret = seq_open(file, &swaps_op); - if (ret) { - kfree(s); + if (ret) return ret; - } - s->seq.private = s; - s->event = atomic_read(&proc_poll_event); - return ret; + seq = file->private_data; + seq->poll_event = atomic_read(&proc_poll_event); + return 0; } static const struct file_operations proc_swaps_operations = { -- cgit v1.2.3 From ee60498f3ef94d7ed58dcd38077e27e839e7cafc Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 16 Jul 2011 17:06:30 -0400 Subject: coda_venus_readdir(): use offsetof() Signed-off-by: Al Viro --- fs/coda/dir.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/coda/dir.c b/fs/coda/dir.c index cd5532398c15..9d52897c1b6d 100644 --- a/fs/coda/dir.c +++ b/fs/coda/dir.c @@ -449,8 +449,7 @@ static int coda_venus_readdir(struct file *coda_file, void *buf, struct file *host_file; struct dentry *de; struct venus_dirent *vdir; - unsigned long vdir_size = - (unsigned long)(&((struct venus_dirent *)0)->d_name); + unsigned long vdir_size = offsetof(struct venus_dirent, d_name); unsigned int type; struct qstr name; ino_t ino; -- cgit v1.2.3 From 2def9e4ec75e6771def66a07960dd516e119ab4c Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 16 Jul 2011 17:43:09 -0400 Subject: minix_getattr(): don't bother with ->d_parent we can find superblock easier, TYVM... Signed-off-by: Al Viro --- fs/minix/inode.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/minix/inode.c b/fs/minix/inode.c index adcdc0a4e182..e7d23e25bf1d 100644 --- a/fs/minix/inode.c +++ b/fs/minix/inode.c @@ -596,8 +596,7 @@ static int minix_write_inode(struct inode *inode, struct writeback_control *wbc) int minix_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) { - struct inode *dir = dentry->d_parent->d_inode; - struct super_block *sb = dir->i_sb; + struct super_block *sb = dentry->d_sb; generic_fillattr(dentry->d_inode, stat); if (INODE_VERSION(dentry->d_inode) == MINIX_V1) stat->blocks = (BLOCK_SIZE / 512) * V1_minix_blocks(stat->size, sb); -- cgit v1.2.3 From b85fd6bdc99da917f5ae0f90f0c2d86ef9a766aa Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 17 Jul 2011 11:19:44 -0400 Subject: don't open-code parent_ino() in assorted ->readdir() Signed-off-by: Al Viro --- fs/ceph/dir.c | 2 +- fs/cifs/readdir.c | 2 +- fs/coda/dir.c | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c index b6d436f3c1ae..e8477dc51b45 100644 --- a/fs/ceph/dir.c +++ b/fs/ceph/dir.c @@ -252,7 +252,7 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir) off = 1; } if (filp->f_pos == 1) { - ino_t ino = filp->f_dentry->d_parent->d_inode->i_ino; + ino_t ino = parent_ino(filp->f_dentry); dout("readdir off 1 -> '..'\n"); if (filldir(dirent, "..", 2, ceph_make_fpos(0, 1), ceph_translate_ino(inode->i_sb, ino), diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c index 6751e745bbc6..965a3af186a1 100644 --- a/fs/cifs/readdir.c +++ b/fs/cifs/readdir.c @@ -796,7 +796,7 @@ int cifs_readdir(struct file *file, void *direntry, filldir_t filldir) file->f_pos++; case 1: if (filldir(direntry, "..", 2, file->f_pos, - file->f_path.dentry->d_parent->d_inode->i_ino, DT_DIR) < 0) { + parent_ino(file->f_path.dentry), DT_DIR) < 0) { cERROR(1, "Filldir for parent dir failed"); rc = -ENOMEM; break; diff --git a/fs/coda/dir.c b/fs/coda/dir.c index 9d52897c1b6d..0239433f50cb 100644 --- a/fs/coda/dir.c +++ b/fs/coda/dir.c @@ -473,7 +473,7 @@ static int coda_venus_readdir(struct file *coda_file, void *buf, coda_file->f_pos++; } if (coda_file->f_pos == 1) { - ret = filldir(buf, "..", 2, 1, de->d_parent->d_inode->i_ino, DT_DIR); + ret = filldir(buf, "..", 2, 1, parent_ino(de), DT_DIR); if (ret < 0) goto out; result++; -- cgit v1.2.3 From 00eacd66cd8ab5fff9df49aa3f261ad43d495434 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sat, 16 Jul 2011 16:46:50 -0400 Subject: ext3: make ext3 mount default to barrier=1 This patch turns on barriers by default for ext3. mount -o barrier=0 will turn them off. Based on a patch from Chris Mason in the SuSE tree. Signed-off-by: Chris Mason Signed-off-by: Christoph Hellwig Acked-by: Eric Sandeen Acked-by: Jan Kara Acked-by: Jeff Mahoney Acked-by: Ted Ts'o Signed-off-by: Al Viro --- fs/ext3/super.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'fs') diff --git a/fs/ext3/super.c b/fs/ext3/super.c index aad153ef6b78..b57ea2f91269 100644 --- a/fs/ext3/super.c +++ b/fs/ext3/super.c @@ -1718,6 +1718,8 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent) sbi->s_resuid = le16_to_cpu(es->s_def_resuid); sbi->s_resgid = le16_to_cpu(es->s_def_resgid); + /* enable barriers by default */ + set_opt(sbi->s_mount_opt, BARRIER); set_opt(sbi->s_mount_opt, RESERVATION); if (!parse_options ((char *) data, sb, &journal_inum, &journal_devnum, -- cgit v1.2.3 From b4d5b10fb2e3a4327838c07d8ebd9e350fcc133d Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sat, 16 Jul 2011 16:47:00 -0400 Subject: reiserfs: make reiserfs default to barrier=flush Change the default reiserfs mount option to barrier=flush. Based on a patch from Jeff Mahoney in the SuSE tree. Signed-off-by: Jeff Mahoney Signed-off-by: Christoph Hellwig Signed-off-by: Al Viro --- fs/reiserfs/super.c | 1 + 1 file changed, 1 insertion(+) (limited to 'fs') diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c index aa91089162cb..14363b96b6af 100644 --- a/fs/reiserfs/super.c +++ b/fs/reiserfs/super.c @@ -1643,6 +1643,7 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent) /* Set default values for options: non-aggressive tails, RO on errors */ REISERFS_SB(s)->s_mount_opt |= (1 << REISERFS_SMALLTAIL); REISERFS_SB(s)->s_mount_opt |= (1 << REISERFS_ERROR_RO); + REISERFS_SB(s)->s_mount_opt |= (1 << REISERFS_BARRIER_FLUSH); /* no preallocation minimum, be smart in reiserfs_file_write instead */ REISERFS_SB(s)->s_alloc_options.preallocmin = 0; -- cgit v1.2.3 From 982d816581eeeacfe5b2b7c6d47d13a157616eff Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Mon, 18 Jul 2011 13:21:35 -0400 Subject: fs: add SEEK_HOLE and SEEK_DATA flags This just gets us ready to support the SEEK_HOLE and SEEK_DATA flags. Turns out using fiemap in things like cp cause more problems than it solves, so lets try and give userspace an interface that doesn't suck. We need to match solaris here, and the definitions are *o* If /whence/ is SEEK_HOLE, the offset of the start of the next hole greater than or equal to the supplied offset is returned. The definition of a hole is provided near the end of the DESCRIPTION. *o* If /whence/ is SEEK_DATA, the file pointer is set to the start of the next non-hole file region greater than or equal to the supplied offset. So in the generic case the entire file is data and there is a virtual hole at the end. That means we will just return i_size for SEEK_HOLE and will return the same offset for SEEK_DATA. This is how Solaris does it so we have to do it the same way. Thanks, Signed-off-by: Josef Bacik Signed-off-by: Al Viro --- Documentation/filesystems/porting | 10 +++++++++ fs/read_write.c | 44 ++++++++++++++++++++++++++++++++++++--- include/linux/fs.h | 4 +++- 3 files changed, 54 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/Documentation/filesystems/porting b/Documentation/filesystems/porting index 0eeb3954dea3..6b96773e27cb 100644 --- a/Documentation/filesystems/porting +++ b/Documentation/filesystems/porting @@ -411,3 +411,13 @@ to some pointer to returning that pointer. On errors return ERR_PTR(...). argument; instead of passing IPERM_FLAG_RCU we add MAY_NOT_BLOCK into mask. generic_permission() has also lost the check_acl argument; if you want non-NULL to be used for that inode, put it into ->i_op->check_acl. + +-- +[mandatory] + If you implement your own ->llseek() you must handle SEEK_HOLE and +SEEK_DATA. You can hanle this by returning -EINVAL, but it would be nicer to +support it in some way. The generic handler assumes that the entire file is +data and there is a virtual hole at the end of the file. So if the provided +offset is less than i_size and SEEK_DATA is specified, return the same offset. +If the above is true for the offset and you are given SEEK_HOLE, return the end +of the file. If the offset is i_size or greater return -ENXIO in either case. diff --git a/fs/read_write.c b/fs/read_write.c index 5520f8ad5504..5907b49e4d7e 100644 --- a/fs/read_write.c +++ b/fs/read_write.c @@ -64,6 +64,23 @@ generic_file_llseek_unlocked(struct file *file, loff_t offset, int origin) return file->f_pos; offset += file->f_pos; break; + case SEEK_DATA: + /* + * In the generic case the entire file is data, so as long as + * offset isn't at the end of the file then the offset is data. + */ + if (offset >= inode->i_size) + return -ENXIO; + break; + case SEEK_HOLE: + /* + * There is a virtual hole at the end of the file, so as long as + * offset isn't i_size or larger, return i_size. + */ + if (offset >= inode->i_size) + return -ENXIO; + offset = inode->i_size; + break; } if (offset < 0 && !unsigned_offsets(file)) @@ -128,12 +145,13 @@ EXPORT_SYMBOL(no_llseek); loff_t default_llseek(struct file *file, loff_t offset, int origin) { + struct inode *inode = file->f_path.dentry->d_inode; loff_t retval; - mutex_lock(&file->f_dentry->d_inode->i_mutex); + mutex_lock(&inode->i_mutex); switch (origin) { case SEEK_END: - offset += i_size_read(file->f_path.dentry->d_inode); + offset += i_size_read(inode); break; case SEEK_CUR: if (offset == 0) { @@ -141,6 +159,26 @@ loff_t default_llseek(struct file *file, loff_t offset, int origin) goto out; } offset += file->f_pos; + break; + case SEEK_DATA: + /* + * In the generic case the entire file is data, so as + * long as offset isn't at the end of the file then the + * offset is data. + */ + if (offset >= inode->i_size) + return -ENXIO; + break; + case SEEK_HOLE: + /* + * There is a virtual hole at the end of the file, so + * as long as offset isn't i_size or larger, return + * i_size. + */ + if (offset >= inode->i_size) + return -ENXIO; + offset = inode->i_size; + break; } retval = -EINVAL; if (offset >= 0 || unsigned_offsets(file)) { @@ -151,7 +189,7 @@ loff_t default_llseek(struct file *file, loff_t offset, int origin) retval = offset; } out: - mutex_unlock(&file->f_dentry->d_inode->i_mutex); + mutex_unlock(&inode->i_mutex); return retval; } EXPORT_SYMBOL(default_llseek); diff --git a/include/linux/fs.h b/include/linux/fs.h index 824453be9fee..4a61f98823a6 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -32,7 +32,9 @@ #define SEEK_SET 0 /* seek relative to beginning of file */ #define SEEK_CUR 1 /* seek relative to current file position */ #define SEEK_END 2 /* seek relative to end of file */ -#define SEEK_MAX SEEK_END +#define SEEK_DATA 3 /* seek to the next data */ +#define SEEK_HOLE 4 /* seek to the next hole */ +#define SEEK_MAX SEEK_HOLE struct fstrim_range { __u64 start; -- cgit v1.2.3 From b26751575a9aa55fd6dbf3febde3ff06dfadc44f Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Mon, 18 Jul 2011 13:21:36 -0400 Subject: Btrfs: implement our own ->llseek In order to handle SEEK_HOLE/SEEK_DATA we need to implement our own llseek. Basically for the normal SEEK_*'s we will just defer to the generic helper, and for SEEK_HOLE/SEEK_DATA we will use our fiemap helper to figure out the nearest hole or data. Currently this helper doesn't check for delalloc bytes for prealloc space, so for now treat prealloc as data until that is fixed. Thanks, Signed-off-by: Josef Bacik Signed-off-by: Al Viro --- fs/btrfs/ctree.h | 3 ++ fs/btrfs/file.c | 148 ++++++++++++++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 150 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 9552afc24ef7..f1ff62bff1b3 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2510,6 +2510,9 @@ int btrfs_csum_truncate(struct btrfs_trans_handle *trans, int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end, struct list_head *list, int search_commit); /* inode.c */ +struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *page, + size_t pg_offset, u64 start, u64 len, + int create); /* RHEL and EL kernels have a patch that renames PG_checked to FsMisc */ #if defined(ClearPageFsMisc) && !defined(ClearPageChecked) diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index fa4ef18b66b1..bd4d061c6e4d 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -1664,8 +1664,154 @@ out: return ret; } +static int find_desired_extent(struct inode *inode, loff_t *offset, int origin) +{ + struct btrfs_root *root = BTRFS_I(inode)->root; + struct extent_map *em; + struct extent_state *cached_state = NULL; + u64 lockstart = *offset; + u64 lockend = i_size_read(inode); + u64 start = *offset; + u64 orig_start = *offset; + u64 len = i_size_read(inode); + u64 last_end = 0; + int ret = 0; + + lockend = max_t(u64, root->sectorsize, lockend); + if (lockend <= lockstart) + lockend = lockstart + root->sectorsize; + + len = lockend - lockstart + 1; + + len = max_t(u64, len, root->sectorsize); + if (inode->i_size == 0) + return -ENXIO; + + lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, 0, + &cached_state, GFP_NOFS); + + /* + * Delalloc is such a pain. If we have a hole and we have pending + * delalloc for a portion of the hole we will get back a hole that + * exists for the entire range since it hasn't been actually written + * yet. So to take care of this case we need to look for an extent just + * before the position we want in case there is outstanding delalloc + * going on here. + */ + if (origin == SEEK_HOLE && start != 0) { + if (start <= root->sectorsize) + em = btrfs_get_extent_fiemap(inode, NULL, 0, 0, + root->sectorsize, 0); + else + em = btrfs_get_extent_fiemap(inode, NULL, 0, + start - root->sectorsize, + root->sectorsize, 0); + if (IS_ERR(em)) { + ret = -ENXIO; + goto out; + } + last_end = em->start + em->len; + if (em->block_start == EXTENT_MAP_DELALLOC) + last_end = min_t(u64, last_end, inode->i_size); + free_extent_map(em); + } + + while (1) { + em = btrfs_get_extent_fiemap(inode, NULL, 0, start, len, 0); + if (IS_ERR(em)) { + ret = -ENXIO; + break; + } + + if (em->block_start == EXTENT_MAP_HOLE) { + if (test_bit(EXTENT_FLAG_VACANCY, &em->flags)) { + if (last_end <= orig_start) { + free_extent_map(em); + ret = -ENXIO; + break; + } + } + + if (origin == SEEK_HOLE) { + *offset = start; + free_extent_map(em); + break; + } + } else { + if (origin == SEEK_DATA) { + if (em->block_start == EXTENT_MAP_DELALLOC) { + if (start >= inode->i_size) { + free_extent_map(em); + ret = -ENXIO; + break; + } + } + + *offset = start; + free_extent_map(em); + break; + } + } + + start = em->start + em->len; + last_end = em->start + em->len; + + if (em->block_start == EXTENT_MAP_DELALLOC) + last_end = min_t(u64, last_end, inode->i_size); + + if (test_bit(EXTENT_FLAG_VACANCY, &em->flags)) { + free_extent_map(em); + ret = -ENXIO; + break; + } + free_extent_map(em); + cond_resched(); + } + if (!ret) + *offset = min(*offset, inode->i_size); +out: + unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend, + &cached_state, GFP_NOFS); + return ret; +} + +static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int origin) +{ + struct inode *inode = file->f_mapping->host; + int ret; + + mutex_lock(&inode->i_mutex); + switch (origin) { + case SEEK_END: + case SEEK_CUR: + offset = generic_file_llseek_unlocked(file, offset, origin); + goto out; + case SEEK_DATA: + case SEEK_HOLE: + ret = find_desired_extent(inode, &offset, origin); + if (ret) { + mutex_unlock(&inode->i_mutex); + return ret; + } + } + + if (offset < 0 && !(file->f_mode & FMODE_UNSIGNED_OFFSET)) + return -EINVAL; + if (offset > inode->i_sb->s_maxbytes) + return -EINVAL; + + /* Special lock needed here? */ + if (offset != file->f_pos) { + file->f_pos = offset; + file->f_version = 0; + } +out: + mutex_unlock(&inode->i_mutex); + return offset; +} + const struct file_operations btrfs_file_operations = { - .llseek = generic_file_llseek, + .llseek = btrfs_file_llseek, .read = do_sync_read, .write = do_sync_write, .aio_read = generic_file_aio_read, -- cgit v1.2.3 From c334b1138bd44bea578eab7971c59bd9212a1093 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Mon, 18 Jul 2011 13:21:37 -0400 Subject: Ext4: handle SEEK_HOLE/SEEK_DATA generically Since Ext4 has its own lseek we need to make sure it handles SEEK_HOLE/SEEK_DATA. For now just do the same thing that is done in the generic case, somebody else can come along and make it do fancy things later. Thanks, Signed-off-by: Josef Bacik Signed-off-by: Al Viro --- fs/ext4/file.c | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) (limited to 'fs') diff --git a/fs/ext4/file.c b/fs/ext4/file.c index 2c0972322009..ce766f974b1d 100644 --- a/fs/ext4/file.c +++ b/fs/ext4/file.c @@ -236,6 +236,27 @@ loff_t ext4_llseek(struct file *file, loff_t offset, int origin) } offset += file->f_pos; break; + case SEEK_DATA: + /* + * In the generic case the entire file is data, so as long as + * offset isn't at the end of the file then the offset is data. + */ + if (offset >= inode->i_size) { + mutex_unlock(&inode->i_mutex); + return -ENXIO; + } + break; + case SEEK_HOLE: + /* + * There is a virtual hole at the end of the file, so as long as + * offset isn't i_size or larger, return i_size. + */ + if (offset >= inode->i_size) { + mutex_unlock(&inode->i_mutex); + return -ENXIO; + } + offset = inode->i_size; + break; } if (offset < 0 || offset > maxbytes) { -- cgit v1.2.3 From 06222e491e663dac939f04b125c9dc52126a75c4 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Mon, 18 Jul 2011 13:21:38 -0400 Subject: fs: handle SEEK_HOLE/SEEK_DATA properly in all fs's that define their own llseek This converts everybody to handle SEEK_HOLE/SEEK_DATA properly. In some cases we just return -EINVAL, in others we do the normal generic thing, and in others we're simply making sure that the properly due-dilligence is done. For example in NFS/CIFS we need to make sure the file size is update properly for the SEEK_HOLE and SEEK_DATA case, but since it calls the generic llseek stuff itself that is all we have to do. Thanks, Signed-off-by: Josef Bacik Signed-off-by: Al Viro --- fs/block_dev.c | 11 ++++++++--- fs/ceph/dir.c | 8 +++++++- fs/ceph/file.c | 20 ++++++++++++++++++-- fs/cifs/cifsfs.c | 7 +++++-- fs/fuse/file.c | 21 +++++++++++++++++++-- fs/hpfs/dir.c | 4 ++++ fs/nfs/file.c | 7 +++++-- 7 files changed, 66 insertions(+), 12 deletions(-) (limited to 'fs') diff --git a/fs/block_dev.c b/fs/block_dev.c index 610e8e0b04b8..966617a422d9 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -355,20 +355,25 @@ static loff_t block_llseek(struct file *file, loff_t offset, int origin) mutex_lock(&bd_inode->i_mutex); size = i_size_read(bd_inode); + retval = -EINVAL; switch (origin) { - case 2: + case SEEK_END: offset += size; break; - case 1: + case SEEK_CUR: offset += file->f_pos; + case SEEK_SET: + break; + default: + goto out; } - retval = -EINVAL; if (offset >= 0 && offset <= size) { if (offset != file->f_pos) { file->f_pos = offset; } retval = offset; } +out: mutex_unlock(&bd_inode->i_mutex); return retval; } diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c index e8477dc51b45..0972b457a03f 100644 --- a/fs/ceph/dir.c +++ b/fs/ceph/dir.c @@ -446,14 +446,19 @@ static loff_t ceph_dir_llseek(struct file *file, loff_t offset, int origin) loff_t retval; mutex_lock(&inode->i_mutex); + retval = -EINVAL; switch (origin) { case SEEK_END: offset += inode->i_size + 2; /* FIXME */ break; case SEEK_CUR: offset += file->f_pos; + case SEEK_SET: + break; + default: + goto out; } - retval = -EINVAL; + if (offset >= 0 && offset <= inode->i_sb->s_maxbytes) { if (offset != file->f_pos) { file->f_pos = offset; @@ -477,6 +482,7 @@ static loff_t ceph_dir_llseek(struct file *file, loff_t offset, int origin) if (offset > old_offset) fi->dir_release_count--; } +out: mutex_unlock(&inode->i_mutex); return retval; } diff --git a/fs/ceph/file.c b/fs/ceph/file.c index 0a292459c895..0d0eae05598f 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -768,13 +768,16 @@ static loff_t ceph_llseek(struct file *file, loff_t offset, int origin) mutex_lock(&inode->i_mutex); __ceph_do_pending_vmtruncate(inode); - switch (origin) { - case SEEK_END: + if (origin != SEEK_CUR || origin != SEEK_SET) { ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE); if (ret < 0) { offset = ret; goto out; } + } + + switch (origin) { + case SEEK_END: offset += inode->i_size; break; case SEEK_CUR: @@ -790,6 +793,19 @@ static loff_t ceph_llseek(struct file *file, loff_t offset, int origin) } offset += file->f_pos; break; + case SEEK_DATA: + if (offset >= inode->i_size) { + ret = -ENXIO; + goto out; + } + break; + case SEEK_HOLE: + if (offset >= inode->i_size) { + ret = -ENXIO; + goto out; + } + offset = inode->i_size; + break; } if (offset < 0 || offset > inode->i_sb->s_maxbytes) { diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index cbbb55e781d3..865517470967 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -704,8 +704,11 @@ static ssize_t cifs_file_aio_write(struct kiocb *iocb, const struct iovec *iov, static loff_t cifs_llseek(struct file *file, loff_t offset, int origin) { - /* origin == SEEK_END => we must revalidate the cached file length */ - if (origin == SEEK_END) { + /* + * origin == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate + * the cached file length + */ + if (origin != SEEK_SET || origin != SEEK_CUR) { int rc; struct inode *inode = file->f_path.dentry->d_inode; diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 82a66466a24c..73b89df20851 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -1600,15 +1600,32 @@ static loff_t fuse_file_llseek(struct file *file, loff_t offset, int origin) struct inode *inode = file->f_path.dentry->d_inode; mutex_lock(&inode->i_mutex); - switch (origin) { - case SEEK_END: + if (origin != SEEK_CUR || origin != SEEK_SET) { retval = fuse_update_attributes(inode, NULL, file, NULL); if (retval) goto exit; + } + + switch (origin) { + case SEEK_END: offset += i_size_read(inode); break; case SEEK_CUR: offset += file->f_pos; + break; + case SEEK_DATA: + if (offset >= i_size_read(inode)) { + retval = -ENXIO; + goto exit; + } + break; + case SEEK_HOLE: + if (offset >= i_size_read(inode)) { + retval = -ENXIO; + goto exit; + } + offset = i_size_read(inode); + break; } retval = -EINVAL; if (offset >= 0 && offset <= inode->i_sb->s_maxbytes) { diff --git a/fs/hpfs/dir.c b/fs/hpfs/dir.c index f46ae025bfb5..96a8ed91cedd 100644 --- a/fs/hpfs/dir.c +++ b/fs/hpfs/dir.c @@ -29,6 +29,10 @@ static loff_t hpfs_dir_lseek(struct file *filp, loff_t off, int whence) struct hpfs_inode_info *hpfs_inode = hpfs_i(i); struct super_block *s = i->i_sb; + /* Somebody else will have to figure out what to do here */ + if (whence == SEEK_DATA || whence == SEEK_HOLE) + return -EINVAL; + hpfs_lock(s); /*printk("dir lseek\n");*/ diff --git a/fs/nfs/file.c b/fs/nfs/file.c index 2f093ed16980..2c1705b6acd7 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c @@ -187,8 +187,11 @@ static loff_t nfs_file_llseek(struct file *filp, loff_t offset, int origin) filp->f_path.dentry->d_name.name, offset, origin); - /* origin == SEEK_END => we must revalidate the cached file length */ - if (origin == SEEK_END) { + /* + * origin == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate + * the cached file length + */ + if (origin != SEEK_SET || origin != SEEK_CUR) { struct inode *inode = filp->f_mapping->host; int retval = nfs_revalidate_file_size(inode, filp); -- cgit v1.2.3 From 02c24a82187d5a628c68edfe71ae60dc135cd178 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Sat, 16 Jul 2011 20:44:56 -0400 Subject: fs: push i_mutex and filemap_write_and_wait down into ->fsync() handlers Btrfs needs to be able to control how filemap_write_and_wait_range() is called in fsync to make it less of a painful operation, so push down taking i_mutex and the calling of filemap_write_and_wait() down into the ->fsync() handlers. Some file systems can drop taking the i_mutex altogether it seems, like ext3 and ocfs2. For correctness sake I just pushed everything down in all cases to make sure that we keep the current behavior the same for everybody, and then each individual fs maintainer can make up their mind about what to do from there. Thanks, Acked-by: Jan Kara Signed-off-by: Josef Bacik Signed-off-by: Al Viro --- Documentation/filesystems/Locking | 6 ++--- Documentation/filesystems/porting | 7 ++++++ Documentation/filesystems/vfs.txt | 2 +- arch/powerpc/platforms/cell/spufs/file.c | 11 +++++++-- drivers/char/ps3flash.c | 9 ++++++-- drivers/mtd/ubi/cdev.c | 10 ++++++--- drivers/staging/pohmelfs/inode.c | 11 ++++++--- drivers/usb/gadget/printer.c | 5 ++++- drivers/video/fb_defio.c | 11 +++++++-- fs/9p/v9fs_vfs.h | 3 ++- fs/9p/vfs_file.c | 22 ++++++++++++++++-- fs/affs/affs.h | 2 +- fs/affs/file.c | 8 ++++++- fs/afs/internal.h | 2 +- fs/afs/write.c | 18 +++++++++++---- fs/bad_inode.c | 3 ++- fs/block_dev.c | 6 +---- fs/btrfs/ctree.h | 2 +- fs/btrfs/file.c | 21 +++++++++++++----- fs/ceph/caps.c | 6 +++-- fs/ceph/dir.c | 10 ++++++++- fs/ceph/super.h | 3 ++- fs/cifs/cifsfs.h | 4 ++-- fs/cifs/file.c | 18 +++++++++++++-- fs/coda/coda_int.h | 2 +- fs/coda/file.c | 8 ++++++- fs/ecryptfs/file.c | 7 +++--- fs/exofs/file.c | 10 ++++++++- fs/ext2/ext2.h | 3 ++- fs/ext2/file.c | 4 ++-- fs/ext3/fsync.c | 18 +++++++++++++-- fs/ext4/ext4.h | 2 +- fs/ext4/fsync.c | 38 +++++++++++++++++++++++++++++--- fs/fat/fat.h | 3 ++- fs/fat/file.c | 4 ++-- fs/fuse/dir.c | 5 +++-- fs/fuse/file.c | 24 +++++++++++++++----- fs/fuse/fuse_i.h | 3 ++- fs/gfs2/file.c | 17 +++++++++++--- fs/hfs/inode.c | 9 +++++++- fs/hfsplus/hfsplus_fs.h | 3 ++- fs/hfsplus/inode.c | 10 ++++++++- fs/hostfs/hostfs_kern.c | 15 +++++++++++-- fs/hpfs/file.c | 7 +++++- fs/hpfs/hpfs_fn.h | 2 +- fs/hppfs/hppfs.c | 5 +++-- fs/jffs2/file.c | 9 +++++++- fs/jffs2/os-linux.h | 2 +- fs/jfs/file.c | 9 +++++++- fs/jfs/jfs_inode.h | 2 +- fs/libfs.c | 16 ++++++++++---- fs/logfs/file.c | 11 ++++++++- fs/logfs/logfs.h | 2 +- fs/ncpfs/file.c | 4 ++-- fs/nfs/dir.c | 8 +++++-- fs/nfs/file.c | 11 ++++++--- fs/nilfs2/file.c | 12 ++++++++-- fs/nilfs2/nilfs.h | 2 +- fs/ntfs/dir.c | 10 ++++++++- fs/ntfs/file.c | 10 ++++++++- fs/ocfs2/file.c | 14 +++++++++++- fs/reiserfs/dir.c | 13 +++++++++-- fs/reiserfs/file.c | 9 +++++++- fs/sync.c | 25 +++------------------ fs/ubifs/file.c | 21 +++++++++--------- fs/ubifs/ubifs.h | 2 +- fs/xfs/linux-2.6/xfs_file.c | 17 +++++++------- include/linux/ext3_fs.h | 2 +- include/linux/fb.h | 3 ++- include/linux/fs.h | 9 ++++---- ipc/shm.c | 4 ++-- 71 files changed, 462 insertions(+), 164 deletions(-) (limited to 'fs') diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking index 9b6ed7c9f34f..ca7e25292542 100644 --- a/Documentation/filesystems/Locking +++ b/Documentation/filesystems/Locking @@ -412,7 +412,7 @@ prototypes: int (*open) (struct inode *, struct file *); int (*flush) (struct file *); int (*release) (struct inode *, struct file *); - int (*fsync) (struct file *, int datasync); + int (*fsync) (struct file *, loff_t start, loff_t end, int datasync); int (*aio_fsync) (struct kiocb *, int datasync); int (*fasync) (int, struct file *, int); int (*lock) (struct file *, int, struct file_lock *); @@ -438,9 +438,7 @@ prototypes: locking rules: All may block except for ->setlease. - No VFS locks held on entry except for ->fsync and ->setlease. - -->fsync() has i_mutex on inode. + No VFS locks held on entry except for ->setlease. ->setlease has the file_list_lock held and must not sleep. diff --git a/Documentation/filesystems/porting b/Documentation/filesystems/porting index 6b96773e27cb..7f8861d341ea 100644 --- a/Documentation/filesystems/porting +++ b/Documentation/filesystems/porting @@ -421,3 +421,10 @@ data and there is a virtual hole at the end of the file. So if the provided offset is less than i_size and SEEK_DATA is specified, return the same offset. If the above is true for the offset and you are given SEEK_HOLE, return the end of the file. If the offset is i_size or greater return -ENXIO in either case. + +[mandatory] + If you have your own ->fsync() you must make sure to call +filemap_write_and_wait_range() so that all dirty pages are synced out properly. +You must also keep in mind that ->fsync() is not called with i_mutex held +anymore, so if you require i_mutex locking you must make sure to take it and +release it yourself. diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt index 6bf85b78cfea..eff6617c9a0f 100644 --- a/Documentation/filesystems/vfs.txt +++ b/Documentation/filesystems/vfs.txt @@ -777,7 +777,7 @@ struct file_operations { int (*open) (struct inode *, struct file *); int (*flush) (struct file *); int (*release) (struct inode *, struct file *); - int (*fsync) (struct file *, int datasync); + int (*fsync) (struct file *, loff_t, loff_t, int datasync); int (*aio_fsync) (struct kiocb *, int datasync); int (*fasync) (int, struct file *, int); int (*lock) (struct file *, int, struct file_lock *); diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c index 3c7c3f82d842..fb59c46e9e9e 100644 --- a/arch/powerpc/platforms/cell/spufs/file.c +++ b/arch/powerpc/platforms/cell/spufs/file.c @@ -1850,9 +1850,16 @@ out: return ret; } -static int spufs_mfc_fsync(struct file *file, int datasync) +static int spufs_mfc_fsync(struct file *file, loff_t start, loff_t end, int datasync) { - return spufs_mfc_flush(file, NULL); + struct inode *inode = file->f_path.dentry->d_inode; + int err = filemap_write_and_wait_range(inode->i_mapping, start, end); + if (!err) { + mutex_lock(&inode->i_mutex); + err = spufs_mfc_flush(file, NULL); + mutex_unlock(&inode->i_mutex); + } + return err; } static int spufs_mfc_fasync(int fd, struct file *file, int on) diff --git a/drivers/char/ps3flash.c b/drivers/char/ps3flash.c index 5a06787e5be3..d0c57c2e2909 100644 --- a/drivers/char/ps3flash.c +++ b/drivers/char/ps3flash.c @@ -309,9 +309,14 @@ static int ps3flash_flush(struct file *file, fl_owner_t id) return ps3flash_writeback(ps3flash_dev); } -static int ps3flash_fsync(struct file *file, int datasync) +static int ps3flash_fsync(struct file *file, loff_t start, loff_t end, int datasync) { - return ps3flash_writeback(ps3flash_dev); + struct inode *inode = file->f_path.dentry->d_inode; + int err; + mutex_lock(&inode->i_mutex); + err = ps3flash_writeback(ps3flash_dev); + mutex_unlock(&inode->i_mutex); + return err; } static irqreturn_t ps3flash_interrupt(int irq, void *data) diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c index 191f3bb3c41a..3320a50ba4f0 100644 --- a/drivers/mtd/ubi/cdev.c +++ b/drivers/mtd/ubi/cdev.c @@ -189,12 +189,16 @@ static loff_t vol_cdev_llseek(struct file *file, loff_t offset, int origin) return new_offset; } -static int vol_cdev_fsync(struct file *file, int datasync) +static int vol_cdev_fsync(struct file *file, loff_t start, loff_t end, int datasync) { struct ubi_volume_desc *desc = file->private_data; struct ubi_device *ubi = desc->vol->ubi; - - return ubi_sync(ubi->ubi_num); + struct inode *inode = file->f_path.dentry->d_inode; + int err; + mutex_lock(&inode->i_mutex); + err = ubi_sync(ubi->ubi_num); + mutex_unlock(&inode->i_mutex); + return err; } diff --git a/drivers/staging/pohmelfs/inode.c b/drivers/staging/pohmelfs/inode.c index c0f0ac7c1cdb..f3c6060c96b8 100644 --- a/drivers/staging/pohmelfs/inode.c +++ b/drivers/staging/pohmelfs/inode.c @@ -887,11 +887,16 @@ static struct inode *pohmelfs_alloc_inode(struct super_block *sb) /* * We want fsync() to work on POHMELFS. */ -static int pohmelfs_fsync(struct file *file, int datasync) +static int pohmelfs_fsync(struct file *file, loff_t start, loff_t end, int datasync) { struct inode *inode = file->f_mapping->host; - - return sync_inode_metadata(inode, 1); + int err = filemap_write_and_wait_range(inode->i_mapping, start, end); + if (!err) { + mutex_lock(&inode->i_mutex); + err = sync_inode_metadata(inode, 1); + mutex_unlock(&inode->i_mutex); + } + return err; } ssize_t pohmelfs_write(struct file *file, const char __user *buf, diff --git a/drivers/usb/gadget/printer.c b/drivers/usb/gadget/printer.c index 271ef94668e7..978e6a101bf2 100644 --- a/drivers/usb/gadget/printer.c +++ b/drivers/usb/gadget/printer.c @@ -795,12 +795,14 @@ printer_write(struct file *fd, const char __user *buf, size_t len, loff_t *ptr) } static int -printer_fsync(struct file *fd, int datasync) +printer_fsync(struct file *fd, loff_t start, loff_t end, int datasync) { struct printer_dev *dev = fd->private_data; + struct inode *inode = fd->f_path.dentry->d_inode; unsigned long flags; int tx_list_empty; + mutex_lock(&inode->i_mutex); spin_lock_irqsave(&dev->lock, flags); tx_list_empty = (likely(list_empty(&dev->tx_reqs))); spin_unlock_irqrestore(&dev->lock, flags); @@ -810,6 +812,7 @@ printer_fsync(struct file *fd, int datasync) wait_event_interruptible(dev->tx_flush_wait, (likely(list_empty(&dev->tx_reqs_active)))); } + mutex_unlock(&inode->i_mutex); return 0; } diff --git a/drivers/video/fb_defio.c b/drivers/video/fb_defio.c index 804000183c5e..32814e8800e0 100644 --- a/drivers/video/fb_defio.c +++ b/drivers/video/fb_defio.c @@ -66,19 +66,26 @@ static int fb_deferred_io_fault(struct vm_area_struct *vma, return 0; } -int fb_deferred_io_fsync(struct file *file, int datasync) +int fb_deferred_io_fsync(struct file *file, loff_t start, loff_t end, int datasync) { struct fb_info *info = file->private_data; + struct inode *inode = file->f_path.dentry->d_inode; + int err = filemap_write_and_wait_range(inode->i_mapping, start, end); + if (err) + return err; /* Skip if deferred io is compiled-in but disabled on this fbdev */ if (!info->fbdefio) return 0; + mutex_lock(&inode->i_mutex); /* Kill off the delayed work */ cancel_delayed_work_sync(&info->deferred_work); /* Run it immediately */ - return schedule_delayed_work(&info->deferred_work, 0); + err = schedule_delayed_work(&info->deferred_work, 0); + mutex_unlock(&inode->i_mutex); + return err; } EXPORT_SYMBOL_GPL(fb_deferred_io_fsync); diff --git a/fs/9p/v9fs_vfs.h b/fs/9p/v9fs_vfs.h index 4014160903a9..46ce357ca1ab 100644 --- a/fs/9p/v9fs_vfs.h +++ b/fs/9p/v9fs_vfs.h @@ -70,7 +70,8 @@ ssize_t v9fs_file_readn(struct file *, char *, char __user *, u32, u64); ssize_t v9fs_fid_readn(struct p9_fid *, char *, char __user *, u32, u64); void v9fs_blank_wstat(struct p9_wstat *wstat); int v9fs_vfs_setattr_dotl(struct dentry *, struct iattr *); -int v9fs_file_fsync_dotl(struct file *filp, int datasync); +int v9fs_file_fsync_dotl(struct file *filp, loff_t start, loff_t end, + int datasync); ssize_t v9fs_file_write_internal(struct inode *, struct p9_fid *, const char __user *, size_t, loff_t *, int); int v9fs_refresh_inode(struct p9_fid *fid, struct inode *inode); diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c index ffed55817f0c..3c173fcc2c5a 100644 --- a/fs/9p/vfs_file.c +++ b/fs/9p/vfs_file.c @@ -519,32 +519,50 @@ out: } -static int v9fs_file_fsync(struct file *filp, int datasync) +static int v9fs_file_fsync(struct file *filp, loff_t start, loff_t end, + int datasync) { struct p9_fid *fid; + struct inode *inode = filp->f_mapping->host; struct p9_wstat wstat; int retval; + retval = filemap_write_and_wait_range(inode->i_mapping, start, end); + if (retval) + return retval; + + mutex_lock(&inode->i_mutex); P9_DPRINTK(P9_DEBUG_VFS, "filp %p datasync %x\n", filp, datasync); fid = filp->private_data; v9fs_blank_wstat(&wstat); retval = p9_client_wstat(fid, &wstat); + mutex_unlock(&inode->i_mutex); + return retval; } -int v9fs_file_fsync_dotl(struct file *filp, int datasync) +int v9fs_file_fsync_dotl(struct file *filp, loff_t start, loff_t end, + int datasync) { struct p9_fid *fid; + struct inode *inode = filp->f_mapping->host; int retval; + retval = filemap_write_and_wait_range(inode->i_mapping, start, end); + if (retval) + return retval; + + mutex_lock(&inode->i_mutex); P9_DPRINTK(P9_DEBUG_VFS, "v9fs_file_fsync_dotl: filp %p datasync %x\n", filp, datasync); fid = filp->private_data; retval = p9_client_fsync(fid, datasync); + mutex_unlock(&inode->i_mutex); + return retval; } diff --git a/fs/affs/affs.h b/fs/affs/affs.h index 0e95f73a7023..c2b9c79eb64e 100644 --- a/fs/affs/affs.h +++ b/fs/affs/affs.h @@ -182,7 +182,7 @@ extern int affs_add_entry(struct inode *dir, struct inode *inode, struct dent void affs_free_prealloc(struct inode *inode); extern void affs_truncate(struct inode *); -int affs_file_fsync(struct file *, int); +int affs_file_fsync(struct file *, loff_t, loff_t, int); /* dir.c */ diff --git a/fs/affs/file.c b/fs/affs/file.c index acf321b70fcd..2f4c935cb327 100644 --- a/fs/affs/file.c +++ b/fs/affs/file.c @@ -923,14 +923,20 @@ affs_truncate(struct inode *inode) affs_free_prealloc(inode); } -int affs_file_fsync(struct file *filp, int datasync) +int affs_file_fsync(struct file *filp, loff_t start, loff_t end, int datasync) { struct inode *inode = filp->f_mapping->host; int ret, err; + err = filemap_write_and_wait_range(inode->i_mapping, start, end); + if (err) + return err; + + mutex_lock(&inode->i_mutex); ret = write_inode_now(inode, 0); err = sync_blockdev(inode->i_sb->s_bdev); if (!ret) ret = err; + mutex_unlock(&inode->i_mutex); return ret; } diff --git a/fs/afs/internal.h b/fs/afs/internal.h index f396d337b817..d2b0888126d4 100644 --- a/fs/afs/internal.h +++ b/fs/afs/internal.h @@ -750,7 +750,7 @@ extern void afs_pages_written_back(struct afs_vnode *, struct afs_call *); extern ssize_t afs_file_write(struct kiocb *, const struct iovec *, unsigned long, loff_t); extern int afs_writeback_all(struct afs_vnode *); -extern int afs_fsync(struct file *, int); +extern int afs_fsync(struct file *, loff_t, loff_t, int); /*****************************************************************************/ diff --git a/fs/afs/write.c b/fs/afs/write.c index b806285ff853..9aa52d93c73c 100644 --- a/fs/afs/write.c +++ b/fs/afs/write.c @@ -681,9 +681,10 @@ int afs_writeback_all(struct afs_vnode *vnode) * - the return status from this call provides a reliable indication of * whether any write errors occurred for this process. */ -int afs_fsync(struct file *file, int datasync) +int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync) { struct dentry *dentry = file->f_path.dentry; + struct inode *inode = file->f_mapping->host; struct afs_writeback *wb, *xwb; struct afs_vnode *vnode = AFS_FS_I(dentry->d_inode); int ret; @@ -692,12 +693,19 @@ int afs_fsync(struct file *file, int datasync) vnode->fid.vid, vnode->fid.vnode, dentry->d_name.name, datasync); + ret = filemap_write_and_wait_range(inode->i_mapping, start, end); + if (ret) + return ret; + mutex_lock(&inode->i_mutex); + /* use a writeback record as a marker in the queue - when this reaches * the front of the queue, all the outstanding writes are either * completed or rejected */ wb = kzalloc(sizeof(*wb), GFP_KERNEL); - if (!wb) - return -ENOMEM; + if (!wb) { + ret = -ENOMEM; + goto out; + } wb->vnode = vnode; wb->first = 0; wb->last = -1; @@ -720,7 +728,7 @@ int afs_fsync(struct file *file, int datasync) if (ret < 0) { afs_put_writeback(wb); _leave(" = %d [wb]", ret); - return ret; + goto out; } /* wait for the preceding writes to actually complete */ @@ -729,6 +737,8 @@ int afs_fsync(struct file *file, int datasync) vnode->writebacks.next == &wb->link); afs_put_writeback(wb); _leave(" = %d", ret); +out: + mutex_unlock(&inode->i_mutex); return ret; } diff --git a/fs/bad_inode.c b/fs/bad_inode.c index f024d8aaddef..9205cf25f1c6 100644 --- a/fs/bad_inode.c +++ b/fs/bad_inode.c @@ -87,7 +87,8 @@ static int bad_file_release(struct inode *inode, struct file *filp) return -EIO; } -static int bad_file_fsync(struct file *file, int datasync) +static int bad_file_fsync(struct file *file, loff_t start, loff_t end, + int datasync) { return -EIO; } diff --git a/fs/block_dev.c b/fs/block_dev.c index 966617a422d9..9fb0b15331d3 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -378,7 +378,7 @@ out: return retval; } -int blkdev_fsync(struct file *filp, int datasync) +int blkdev_fsync(struct file *filp, loff_t start, loff_t end, int datasync) { struct inode *bd_inode = filp->f_mapping->host; struct block_device *bdev = I_BDEV(bd_inode); @@ -389,14 +389,10 @@ int blkdev_fsync(struct file *filp, int datasync) * i_mutex and doing so causes performance issues with concurrent * O_SYNC writers to a block device. */ - mutex_unlock(&bd_inode->i_mutex); - error = blkdev_issue_flush(bdev, GFP_KERNEL, NULL); if (error == -EOPNOTSUPP) error = 0; - mutex_lock(&bd_inode->i_mutex); - return error; } EXPORT_SYMBOL(blkdev_fsync); diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index f1ff62bff1b3..82be74efbb26 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2605,7 +2605,7 @@ int btrfs_defrag_file(struct inode *inode, struct file *file, int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans, struct inode *inode); int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info); -int btrfs_sync_file(struct file *file, int datasync); +int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync); int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, int skip_pinned); extern const struct file_operations btrfs_file_operations; diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index bd4d061c6e4d..59cbdb120ad0 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -1452,7 +1452,7 @@ int btrfs_release_file(struct inode *inode, struct file *filp) * important optimization for directories because holding the mutex prevents * new operations on the dir while we write to disk. */ -int btrfs_sync_file(struct file *file, int datasync) +int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) { struct dentry *dentry = file->f_path.dentry; struct inode *inode = dentry->d_inode; @@ -1462,9 +1462,13 @@ int btrfs_sync_file(struct file *file, int datasync) trace_btrfs_sync_file(file, datasync); + ret = filemap_write_and_wait_range(inode->i_mapping, start, end); + if (ret) + return ret; + mutex_lock(&inode->i_mutex); + /* we wait first, since the writeback may change the inode */ root->log_batch++; - /* the VFS called filemap_fdatawrite for us */ btrfs_wait_ordered_range(inode, 0, (u64)-1); root->log_batch++; @@ -1472,8 +1476,10 @@ int btrfs_sync_file(struct file *file, int datasync) * check the transaction that last modified this inode * and see if its already been committed */ - if (!BTRFS_I(inode)->last_trans) + if (!BTRFS_I(inode)->last_trans) { + mutex_unlock(&inode->i_mutex); goto out; + } /* * if the last transaction that changed this file was before @@ -1484,6 +1490,7 @@ int btrfs_sync_file(struct file *file, int datasync) if (BTRFS_I(inode)->last_trans <= root->fs_info->last_trans_committed) { BTRFS_I(inode)->last_trans = 0; + mutex_unlock(&inode->i_mutex); goto out; } @@ -1496,12 +1503,15 @@ int btrfs_sync_file(struct file *file, int datasync) trans = btrfs_start_transaction(root, 0); if (IS_ERR(trans)) { ret = PTR_ERR(trans); + mutex_unlock(&inode->i_mutex); goto out; } ret = btrfs_log_dentry_safe(trans, root, dentry); - if (ret < 0) + if (ret < 0) { + mutex_unlock(&inode->i_mutex); goto out; + } /* we've logged all the items and now have a consistent * version of the file in the log. It is possible that @@ -1513,7 +1523,7 @@ int btrfs_sync_file(struct file *file, int datasync) * file again, but that will end up using the synchronization * inside btrfs_sync_log to keep things safe. */ - mutex_unlock(&dentry->d_inode->i_mutex); + mutex_unlock(&inode->i_mutex); if (ret != BTRFS_NO_LOG_SYNC) { if (ret > 0) { @@ -1528,7 +1538,6 @@ int btrfs_sync_file(struct file *file, int datasync) } else { ret = btrfs_end_transaction(trans, root); } - mutex_lock(&dentry->d_inode->i_mutex); out: return ret > 0 ? -EIO : ret; } diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index f605753c8fe9..8d74ad7ba556 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -1811,7 +1811,7 @@ out: spin_unlock(&ci->i_unsafe_lock); } -int ceph_fsync(struct file *file, int datasync) +int ceph_fsync(struct file *file, loff_t start, loff_t end, int datasync) { struct inode *inode = file->f_mapping->host; struct ceph_inode_info *ci = ceph_inode(inode); @@ -1822,9 +1822,10 @@ int ceph_fsync(struct file *file, int datasync) dout("fsync %p%s\n", inode, datasync ? " datasync" : ""); sync_write_wait(inode); - ret = filemap_write_and_wait(inode->i_mapping); + ret = filemap_write_and_wait_range(inode->i_mapping, start, end); if (ret < 0) return ret; + mutex_lock(&inode->i_mutex); dirty = try_flush_caps(inode, NULL, &flush_tid); dout("fsync dirty caps are %s\n", ceph_cap_string(dirty)); @@ -1841,6 +1842,7 @@ int ceph_fsync(struct file *file, int datasync) } dout("fsync %p%s done\n", inode, datasync ? " datasync" : ""); + mutex_unlock(&inode->i_mutex); return ret; } diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c index 0972b457a03f..1065ac779840 100644 --- a/fs/ceph/dir.c +++ b/fs/ceph/dir.c @@ -1118,7 +1118,8 @@ static ssize_t ceph_read_dir(struct file *file, char __user *buf, size_t size, * an fsync() on a dir will wait for any uncommitted directory * operations to commit. */ -static int ceph_dir_fsync(struct file *file, int datasync) +static int ceph_dir_fsync(struct file *file, loff_t start, loff_t end, + int datasync) { struct inode *inode = file->f_path.dentry->d_inode; struct ceph_inode_info *ci = ceph_inode(inode); @@ -1128,6 +1129,11 @@ static int ceph_dir_fsync(struct file *file, int datasync) int ret = 0; dout("dir_fsync %p\n", inode); + ret = filemap_write_and_wait_range(inode->i_mapping, start, end); + if (ret) + return ret; + mutex_lock(&inode->i_mutex); + spin_lock(&ci->i_unsafe_lock); if (list_empty(head)) goto out; @@ -1161,6 +1167,8 @@ static int ceph_dir_fsync(struct file *file, int datasync) } while (req->r_tid < last_tid); out: spin_unlock(&ci->i_unsafe_lock); + mutex_unlock(&inode->i_mutex); + return ret; } diff --git a/fs/ceph/super.h b/fs/ceph/super.h index 56c41ef47cad..30446b144e3d 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h @@ -728,7 +728,8 @@ extern void ceph_put_cap(struct ceph_mds_client *mdsc, extern void ceph_queue_caps_release(struct inode *inode); extern int ceph_write_inode(struct inode *inode, struct writeback_control *wbc); -extern int ceph_fsync(struct file *file, int datasync); +extern int ceph_fsync(struct file *file, loff_t start, loff_t end, + int datasync); extern void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc, struct ceph_mds_session *session); extern struct ceph_cap *ceph_get_cap_for_mds(struct ceph_inode_info *ci, diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h index 036ca83e5f46..fbd050c8d52a 100644 --- a/fs/cifs/cifsfs.h +++ b/fs/cifs/cifsfs.h @@ -91,8 +91,8 @@ extern ssize_t cifs_user_writev(struct kiocb *iocb, const struct iovec *iov, extern ssize_t cifs_strict_writev(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos); extern int cifs_lock(struct file *, int, struct file_lock *); -extern int cifs_fsync(struct file *, int); -extern int cifs_strict_fsync(struct file *, int); +extern int cifs_fsync(struct file *, loff_t, loff_t, int); +extern int cifs_strict_fsync(struct file *, loff_t, loff_t, int); extern int cifs_flush(struct file *, fl_owner_t id); extern int cifs_file_mmap(struct file * , struct vm_area_struct *); extern int cifs_file_strict_mmap(struct file * , struct vm_area_struct *); diff --git a/fs/cifs/file.c b/fs/cifs/file.c index bb71471a4d9d..cef584451113 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -1401,7 +1401,8 @@ static int cifs_write_end(struct file *file, struct address_space *mapping, return rc; } -int cifs_strict_fsync(struct file *file, int datasync) +int cifs_strict_fsync(struct file *file, loff_t start, loff_t end, + int datasync) { int xid; int rc = 0; @@ -1410,6 +1411,11 @@ int cifs_strict_fsync(struct file *file, int datasync) struct inode *inode = file->f_path.dentry->d_inode; struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); + rc = filemap_write_and_wait_range(inode->i_mapping, start, end); + if (rc) + return rc; + mutex_lock(&inode->i_mutex); + xid = GetXid(); cFYI(1, "Sync file - name: %s datasync: 0x%x", @@ -1428,16 +1434,23 @@ int cifs_strict_fsync(struct file *file, int datasync) rc = CIFSSMBFlush(xid, tcon, smbfile->netfid); FreeXid(xid); + mutex_unlock(&inode->i_mutex); return rc; } -int cifs_fsync(struct file *file, int datasync) +int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync) { int xid; int rc = 0; struct cifs_tcon *tcon; struct cifsFileInfo *smbfile = file->private_data; struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); + struct inode *inode = file->f_mapping->host; + + rc = filemap_write_and_wait_range(inode->i_mapping, start, end); + if (rc) + return rc; + mutex_lock(&inode->i_mutex); xid = GetXid(); @@ -1449,6 +1462,7 @@ int cifs_fsync(struct file *file, int datasync) rc = CIFSSMBFlush(xid, tcon, smbfile->netfid); FreeXid(xid); + mutex_unlock(&inode->i_mutex); return rc; } diff --git a/fs/coda/coda_int.h b/fs/coda/coda_int.h index 6b443ff43a19..b7143cf783ac 100644 --- a/fs/coda/coda_int.h +++ b/fs/coda/coda_int.h @@ -11,7 +11,7 @@ extern int coda_fake_statfs; void coda_destroy_inodecache(void); int coda_init_inodecache(void); -int coda_fsync(struct file *coda_file, int datasync); +int coda_fsync(struct file *coda_file, loff_t start, loff_t end, int datasync); void coda_sysctl_init(void); void coda_sysctl_clean(void); diff --git a/fs/coda/file.c b/fs/coda/file.c index 0433057be330..8edd404e6419 100644 --- a/fs/coda/file.c +++ b/fs/coda/file.c @@ -199,7 +199,7 @@ int coda_release(struct inode *coda_inode, struct file *coda_file) return 0; } -int coda_fsync(struct file *coda_file, int datasync) +int coda_fsync(struct file *coda_file, loff_t start, loff_t end, int datasync) { struct file *host_file; struct inode *coda_inode = coda_file->f_path.dentry->d_inode; @@ -210,6 +210,11 @@ int coda_fsync(struct file *coda_file, int datasync) S_ISLNK(coda_inode->i_mode))) return -EINVAL; + err = filemap_write_and_wait_range(coda_inode->i_mapping, start, end); + if (err) + return err; + mutex_lock(&coda_inode->i_mutex); + cfi = CODA_FTOC(coda_file); BUG_ON(!cfi || cfi->cfi_magic != CODA_MAGIC); host_file = cfi->cfi_container; @@ -217,6 +222,7 @@ int coda_fsync(struct file *coda_file, int datasync) err = vfs_fsync(host_file, datasync); if (!err && !datasync) err = venus_fsync(coda_inode->i_sb, coda_i2f(coda_inode)); + mutex_unlock(&coda_inode->i_mutex); return err; } diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c index 4ec9eb00a241..c6ac98cf9baa 100644 --- a/fs/ecryptfs/file.c +++ b/fs/ecryptfs/file.c @@ -270,14 +270,15 @@ static int ecryptfs_release(struct inode *inode, struct file *file) } static int -ecryptfs_fsync(struct file *file, int datasync) +ecryptfs_fsync(struct file *file, loff_t start, loff_t end, int datasync) { int rc = 0; - rc = generic_file_fsync(file, datasync); + rc = generic_file_fsync(file, start, end, datasync); if (rc) goto out; - rc = vfs_fsync(ecryptfs_file_to_lower(file), datasync); + rc = vfs_fsync_range(ecryptfs_file_to_lower(file), start, end, + datasync); out: return rc; } diff --git a/fs/exofs/file.c b/fs/exofs/file.c index 45ca323d8363..491c6c078e7f 100644 --- a/fs/exofs/file.c +++ b/fs/exofs/file.c @@ -42,11 +42,19 @@ static int exofs_release_file(struct inode *inode, struct file *filp) * Note, in exofs all metadata is written as part of inode, regardless. * The writeout is synchronous */ -static int exofs_file_fsync(struct file *filp, int datasync) +static int exofs_file_fsync(struct file *filp, loff_t start, loff_t end, + int datasync) { + struct inode *inode = filp->f_mapping->host; int ret; + ret = filemap_write_and_wait_range(inode->i_mapping, start, end); + if (ret) + return ret; + + mutex_lock(&inode->i_mutex); ret = sync_inode_metadata(filp->f_mapping->host, 1); + mutex_unlock(&inode->i_mutex); return ret; } diff --git a/fs/ext2/ext2.h b/fs/ext2/ext2.h index 645be9e7ee47..af9fc89b1b2d 100644 --- a/fs/ext2/ext2.h +++ b/fs/ext2/ext2.h @@ -150,7 +150,8 @@ extern void ext2_write_super (struct super_block *); extern const struct file_operations ext2_dir_operations; /* file.c */ -extern int ext2_fsync(struct file *file, int datasync); +extern int ext2_fsync(struct file *file, loff_t start, loff_t end, + int datasync); extern const struct inode_operations ext2_file_inode_operations; extern const struct file_operations ext2_file_operations; extern const struct file_operations ext2_xip_file_operations; diff --git a/fs/ext2/file.c b/fs/ext2/file.c index 49eec9456c5b..82e06321de35 100644 --- a/fs/ext2/file.c +++ b/fs/ext2/file.c @@ -40,13 +40,13 @@ static int ext2_release_file (struct inode * inode, struct file * filp) return 0; } -int ext2_fsync(struct file *file, int datasync) +int ext2_fsync(struct file *file, loff_t start, loff_t end, int datasync) { int ret; struct super_block *sb = file->f_mapping->host->i_sb; struct address_space *mapping = sb->s_bdev->bd_inode->i_mapping; - ret = generic_file_fsync(file, datasync); + ret = generic_file_fsync(file, start, end, datasync); if (ret == -EIO || test_and_clear_bit(AS_EIO, &mapping->flags)) { /* We don't really know where the IO error happened... */ ext2_error(sb, __func__, diff --git a/fs/ext3/fsync.c b/fs/ext3/fsync.c index 09b13bb34c94..0bcf63adb80a 100644 --- a/fs/ext3/fsync.c +++ b/fs/ext3/fsync.c @@ -43,7 +43,7 @@ * inode to disk. */ -int ext3_sync_file(struct file *file, int datasync) +int ext3_sync_file(struct file *file, loff_t start, loff_t end, int datasync) { struct inode *inode = file->f_mapping->host; struct ext3_inode_info *ei = EXT3_I(inode); @@ -54,6 +54,17 @@ int ext3_sync_file(struct file *file, int datasync) if (inode->i_sb->s_flags & MS_RDONLY) return 0; + ret = filemap_write_and_wait_range(inode->i_mapping, start, end); + if (ret) + return ret; + + /* + * Taking the mutex here just to keep consistent with how fsync was + * called previously, however it looks like we don't need to take + * i_mutex at all. + */ + mutex_lock(&inode->i_mutex); + J_ASSERT(ext3_journal_current_handle() == NULL); /* @@ -70,8 +81,10 @@ int ext3_sync_file(struct file *file, int datasync) * (they were dirtied by commit). But that's OK - the blocks are * safe in-journal, which is all fsync() needs to ensure. */ - if (ext3_should_journal_data(inode)) + if (ext3_should_journal_data(inode)) { + mutex_unlock(&inode->i_mutex); return ext3_force_commit(inode->i_sb); + } if (datasync) commit_tid = atomic_read(&ei->i_datasync_tid); @@ -91,5 +104,6 @@ int ext3_sync_file(struct file *file, int datasync) */ if (needs_barrier) blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL); + mutex_unlock(&inode->i_mutex); return ret; } diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 1921392cd708..fa44df879711 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -1758,7 +1758,7 @@ extern int ext4_htree_store_dirent(struct file *dir_file, __u32 hash, extern void ext4_htree_free_dir_info(struct dir_private_info *p); /* fsync.c */ -extern int ext4_sync_file(struct file *, int); +extern int ext4_sync_file(struct file *, loff_t, loff_t, int); extern int ext4_flush_completed_IO(struct inode *); /* hash.c */ diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c index ce66d2fe826c..da3bed3e0c29 100644 --- a/fs/ext4/fsync.c +++ b/fs/ext4/fsync.c @@ -151,6 +151,32 @@ static int ext4_sync_parent(struct inode *inode) return ret; } +/** + * __sync_file - generic_file_fsync without the locking and filemap_write + * @inode: inode to sync + * @datasync: only sync essential metadata if true + * + * This is just generic_file_fsync without the locking. This is needed for + * nojournal mode to make sure this inodes data/metadata makes it to disk + * properly. The i_mutex should be held already. + */ +static int __sync_inode(struct inode *inode, int datasync) +{ + int err; + int ret; + + ret = sync_mapping_buffers(inode->i_mapping); + if (!(inode->i_state & I_DIRTY)) + return ret; + if (datasync && !(inode->i_state & I_DIRTY_DATASYNC)) + return ret; + + err = sync_inode_metadata(inode, 1); + if (ret == 0) + ret = err; + return ret; +} + /* * akpm: A new design for ext4_sync_file(). * @@ -165,7 +191,7 @@ static int ext4_sync_parent(struct inode *inode) * i_mutex lock is held when entering and exiting this function */ -int ext4_sync_file(struct file *file, int datasync) +int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync) { struct inode *inode = file->f_mapping->host; struct ext4_inode_info *ei = EXT4_I(inode); @@ -178,15 +204,20 @@ int ext4_sync_file(struct file *file, int datasync) trace_ext4_sync_file_enter(file, datasync); + ret = filemap_write_and_wait_range(inode->i_mapping, start, end); + if (ret) + return ret; + mutex_lock(&inode->i_mutex); + if (inode->i_sb->s_flags & MS_RDONLY) - return 0; + goto out; ret = ext4_flush_completed_IO(inode); if (ret < 0) goto out; if (!journal) { - ret = generic_file_fsync(file, datasync); + ret = __sync_inode(inode, datasync); if (!ret && !list_empty(&inode->i_dentry)) ret = ext4_sync_parent(inode); goto out; @@ -220,6 +251,7 @@ int ext4_sync_file(struct file *file, int datasync) if (needs_barrier) blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL); out: + mutex_unlock(&inode->i_mutex); trace_ext4_sync_file_exit(inode, ret); return ret; } diff --git a/fs/fat/fat.h b/fs/fat/fat.h index a975b4147e91..a5d3853822e0 100644 --- a/fs/fat/fat.h +++ b/fs/fat/fat.h @@ -310,7 +310,8 @@ extern int fat_setattr(struct dentry * dentry, struct iattr * attr); extern void fat_truncate_blocks(struct inode *inode, loff_t offset); extern int fat_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat); -extern int fat_file_fsync(struct file *file, int datasync); +extern int fat_file_fsync(struct file *file, loff_t start, loff_t end, + int datasync); /* fat/inode.c */ extern void fat_attach(struct inode *inode, loff_t i_pos); diff --git a/fs/fat/file.c b/fs/fat/file.c index e1587c54d3c1..c118acf16e43 100644 --- a/fs/fat/file.c +++ b/fs/fat/file.c @@ -149,12 +149,12 @@ static int fat_file_release(struct inode *inode, struct file *filp) return 0; } -int fat_file_fsync(struct file *filp, int datasync) +int fat_file_fsync(struct file *filp, loff_t start, loff_t end, int datasync) { struct inode *inode = filp->f_mapping->host; int res, err; - res = generic_file_fsync(filp, datasync); + res = generic_file_fsync(filp, start, end, datasync); err = sync_mapping_buffers(MSDOS_SB(inode->i_sb)->fat_inode->i_mapping); return res ? res : err; diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index 02063dde2728..9f63e493a9b6 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -1176,9 +1176,10 @@ static int fuse_dir_release(struct inode *inode, struct file *file) return 0; } -static int fuse_dir_fsync(struct file *file, int datasync) +static int fuse_dir_fsync(struct file *file, loff_t start, loff_t end, + int datasync) { - return fuse_fsync_common(file, datasync, 1); + return fuse_fsync_common(file, start, end, datasync, 1); } static bool update_mtime(unsigned ivalid) diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 73b89df20851..7bb685cdd00c 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -400,7 +400,8 @@ static void fuse_sync_writes(struct inode *inode) fuse_release_nowrite(inode); } -int fuse_fsync_common(struct file *file, int datasync, int isdir) +int fuse_fsync_common(struct file *file, loff_t start, loff_t end, + int datasync, int isdir) { struct inode *inode = file->f_mapping->host; struct fuse_conn *fc = get_fuse_conn(inode); @@ -412,9 +413,15 @@ int fuse_fsync_common(struct file *file, int datasync, int isdir) if (is_bad_inode(inode)) return -EIO; + err = filemap_write_and_wait_range(inode->i_mapping, start, end); + if (err) + return err; + if ((!isdir && fc->no_fsync) || (isdir && fc->no_fsyncdir)) return 0; + mutex_lock(&inode->i_mutex); + /* * Start writeback against all dirty pages of the inode, then * wait for all outstanding writes, before sending the FSYNC @@ -422,13 +429,15 @@ int fuse_fsync_common(struct file *file, int datasync, int isdir) */ err = write_inode_now(inode, 0); if (err) - return err; + goto out; fuse_sync_writes(inode); req = fuse_get_req(fc); - if (IS_ERR(req)) - return PTR_ERR(req); + if (IS_ERR(req)) { + err = PTR_ERR(req); + goto out; + } memset(&inarg, 0, sizeof(inarg)); inarg.fh = ff->fh; @@ -448,12 +457,15 @@ int fuse_fsync_common(struct file *file, int datasync, int isdir) fc->no_fsync = 1; err = 0; } +out: + mutex_unlock(&inode->i_mutex); return err; } -static int fuse_fsync(struct file *file, int datasync) +static int fuse_fsync(struct file *file, loff_t start, loff_t end, + int datasync) { - return fuse_fsync_common(file, datasync, 0); + return fuse_fsync_common(file, start, end, datasync, 0); } void fuse_read_fill(struct fuse_req *req, struct file *file, loff_t pos, diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index b788becada76..c6aa2d4b8517 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -589,7 +589,8 @@ void fuse_release_common(struct file *file, int opcode); /** * Send FSYNC or FSYNCDIR request */ -int fuse_fsync_common(struct file *file, int datasync, int isdir); +int fuse_fsync_common(struct file *file, loff_t start, loff_t end, + int datasync, int isdir); /** * Notify poll wakeup diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c index 89c39e53760d..f82cb5e1cb6b 100644 --- a/fs/gfs2/file.c +++ b/fs/gfs2/file.c @@ -544,7 +544,9 @@ static int gfs2_close(struct inode *inode, struct file *file) /** * gfs2_fsync - sync the dirty data for a file (across the cluster) - * @file: the file that points to the dentry (we ignore this) + * @file: the file that points to the dentry + * @start: the start position in the file to sync + * @end: the end position in the file to sync * @datasync: set if we can ignore timestamp changes * * The VFS will flush data for us. We only need to worry @@ -553,23 +555,32 @@ static int gfs2_close(struct inode *inode, struct file *file) * Returns: errno */ -static int gfs2_fsync(struct file *file, int datasync) +static int gfs2_fsync(struct file *file, loff_t start, loff_t end, + int datasync) { struct inode *inode = file->f_mapping->host; int sync_state = inode->i_state & (I_DIRTY_SYNC|I_DIRTY_DATASYNC); struct gfs2_inode *ip = GFS2_I(inode); int ret; + ret = filemap_write_and_wait_range(inode->i_mapping, start, end); + if (ret) + return ret; + mutex_lock(&inode->i_mutex); + if (datasync) sync_state &= ~I_DIRTY_SYNC; if (sync_state) { ret = sync_inode_metadata(inode, 1); - if (ret) + if (ret) { + mutex_unlock(&inode->i_mutex); return ret; + } gfs2_ail_flush(ip->i_gl); } + mutex_unlock(&inode->i_mutex); return 0; } diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c index 5e7c3f309617..96a1b625fc74 100644 --- a/fs/hfs/inode.c +++ b/fs/hfs/inode.c @@ -627,12 +627,18 @@ int hfs_inode_setattr(struct dentry *dentry, struct iattr * attr) return 0; } -static int hfs_file_fsync(struct file *filp, int datasync) +static int hfs_file_fsync(struct file *filp, loff_t start, loff_t end, + int datasync) { struct inode *inode = filp->f_mapping->host; struct super_block * sb; int ret, err; + ret = filemap_write_and_wait_range(inode->i_mapping, start, end); + if (ret) + return ret; + mutex_lock(&inode->i_mutex); + /* sync the inode to buffers */ ret = write_inode_now(inode, 0); @@ -649,6 +655,7 @@ static int hfs_file_fsync(struct file *filp, int datasync) err = sync_blockdev(sb->s_bdev); if (!ret) ret = err; + mutex_unlock(&inode->i_mutex); return ret; } diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h index d6857523336d..38184e360932 100644 --- a/fs/hfsplus/hfsplus_fs.h +++ b/fs/hfsplus/hfsplus_fs.h @@ -392,7 +392,8 @@ int hfsplus_cat_read_inode(struct inode *, struct hfs_find_data *); int hfsplus_cat_write_inode(struct inode *); struct inode *hfsplus_new_inode(struct super_block *, int); void hfsplus_delete_inode(struct inode *); -int hfsplus_file_fsync(struct file *file, int datasync); +int hfsplus_file_fsync(struct file *file, loff_t start, loff_t end, + int datasync); /* ioctl.c */ long hfsplus_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c index 5b1cb98741cc..30486e01d003 100644 --- a/fs/hfsplus/inode.c +++ b/fs/hfsplus/inode.c @@ -308,13 +308,19 @@ static int hfsplus_setattr(struct dentry *dentry, struct iattr *attr) return 0; } -int hfsplus_file_fsync(struct file *file, int datasync) +int hfsplus_file_fsync(struct file *file, loff_t start, loff_t end, + int datasync) { struct inode *inode = file->f_mapping->host; struct hfsplus_inode_info *hip = HFSPLUS_I(inode); struct hfsplus_sb_info *sbi = HFSPLUS_SB(inode->i_sb); int error = 0, error2; + error = filemap_write_and_wait_range(inode->i_mapping, start, end); + if (error) + return error; + mutex_lock(&inode->i_mutex); + /* * Sync inode metadata into the catalog and extent trees. */ @@ -342,6 +348,8 @@ int hfsplus_file_fsync(struct file *file, int datasync) if (!test_bit(HFSPLUS_SB_NOBARRIER, &sbi->flags)) blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL); + mutex_unlock(&inode->i_mutex); + return error; } diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c index 6e449c599b9d..0d22afdd4611 100644 --- a/fs/hostfs/hostfs_kern.c +++ b/fs/hostfs/hostfs_kern.c @@ -362,9 +362,20 @@ retry: return 0; } -int hostfs_fsync(struct file *file, int datasync) +int hostfs_fsync(struct file *file, loff_t start, loff_t end, int datasync) { - return fsync_file(HOSTFS_I(file->f_mapping->host)->fd, datasync); + struct inode *inode = file->f_mapping->host; + int ret; + + ret = filemap_write_and_wait_range(inode->i_mapping, start, end); + if (ret) + return ret; + + mutex_lock(&inode->i_mutex); + ret = fsync_file(HOSTFS_I(inode)->fd, datasync); + mutex_unlock(&inode->i_mutex); + + return ret; } static const struct file_operations hostfs_file_fops = { diff --git a/fs/hpfs/file.c b/fs/hpfs/file.c index 89c500ee5213..89d2a5803ae3 100644 --- a/fs/hpfs/file.c +++ b/fs/hpfs/file.c @@ -18,9 +18,14 @@ static int hpfs_file_release(struct inode *inode, struct file *file) return 0; } -int hpfs_file_fsync(struct file *file, int datasync) +int hpfs_file_fsync(struct file *file, loff_t start, loff_t end, int datasync) { struct inode *inode = file->f_mapping->host; + int ret; + + ret = filemap_write_and_wait_range(file->f_mapping, start, end); + if (ret) + return ret; return sync_blockdev(inode->i_sb->s_bdev); } diff --git a/fs/hpfs/hpfs_fn.h b/fs/hpfs/hpfs_fn.h index dd552f862c8f..331b5e234ef3 100644 --- a/fs/hpfs/hpfs_fn.h +++ b/fs/hpfs/hpfs_fn.h @@ -258,7 +258,7 @@ void hpfs_set_ea(struct inode *, struct fnode *, const char *, /* file.c */ -int hpfs_file_fsync(struct file *, int); +int hpfs_file_fsync(struct file *, loff_t, loff_t, int); extern const struct file_operations hpfs_file_ops; extern const struct inode_operations hpfs_file_iops; extern const struct address_space_operations hpfs_aops; diff --git a/fs/hppfs/hppfs.c b/fs/hppfs/hppfs.c index 85c098a499f3..8635be5ffd97 100644 --- a/fs/hppfs/hppfs.c +++ b/fs/hppfs/hppfs.c @@ -573,9 +573,10 @@ static int hppfs_readdir(struct file *file, void *ent, filldir_t filldir) return err; } -static int hppfs_fsync(struct file *file, int datasync) +static int hppfs_fsync(struct file *file, loff_t start, loff_t end, + int datasync) { - return 0; + return filemap_write_and_wait_range(file->f_mapping, start, end); } static const struct file_operations hppfs_dir_fops = { diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c index 1c0a08d711aa..3989f7e09f7f 100644 --- a/fs/jffs2/file.c +++ b/fs/jffs2/file.c @@ -27,13 +27,20 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping, struct page **pagep, void **fsdata); static int jffs2_readpage (struct file *filp, struct page *pg); -int jffs2_fsync(struct file *filp, int datasync) +int jffs2_fsync(struct file *filp, loff_t start, loff_t end, int datasync) { struct inode *inode = filp->f_mapping->host; struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); + int ret; + + ret = filemap_write_and_wait_range(inode->i_mapping, start, end); + if (ret) + return ret; + mutex_lock(&inode->i_mutex); /* Trigger GC to flush any pending writes for this inode */ jffs2_flush_wbuf_gc(c, inode->i_ino); + mutex_unlock(&inode->i_mutex); return 0; } diff --git a/fs/jffs2/os-linux.h b/fs/jffs2/os-linux.h index 65c6c43ca482..9c252835e8e5 100644 --- a/fs/jffs2/os-linux.h +++ b/fs/jffs2/os-linux.h @@ -158,7 +158,7 @@ extern const struct inode_operations jffs2_dir_inode_operations; extern const struct file_operations jffs2_file_operations; extern const struct inode_operations jffs2_file_inode_operations; extern const struct address_space_operations jffs2_file_address_operations; -int jffs2_fsync(struct file *, int); +int jffs2_fsync(struct file *, loff_t, loff_t, int); int jffs2_do_readpage_unlock (struct inode *inode, struct page *pg); /* ioctl.c */ diff --git a/fs/jfs/file.c b/fs/jfs/file.c index 9f32315acef1..7527855b5cc6 100644 --- a/fs/jfs/file.c +++ b/fs/jfs/file.c @@ -28,19 +28,26 @@ #include "jfs_acl.h" #include "jfs_debug.h" -int jfs_fsync(struct file *file, int datasync) +int jfs_fsync(struct file *file, loff_t start, loff_t end, int datasync) { struct inode *inode = file->f_mapping->host; int rc = 0; + rc = filemap_write_and_wait_range(inode->i_mapping, start, end); + if (rc) + return rc; + + mutex_lock(&inode->i_mutex); if (!(inode->i_state & I_DIRTY) || (datasync && !(inode->i_state & I_DIRTY_DATASYNC))) { /* Make sure committed changes hit the disk */ jfs_flush_journal(JFS_SBI(inode->i_sb)->log, 1); + mutex_unlock(&inode->i_mutex); return rc; } rc |= jfs_commit_inode(inode, 1); + mutex_unlock(&inode->i_mutex); return rc ? -EIO : 0; } diff --git a/fs/jfs/jfs_inode.h b/fs/jfs/jfs_inode.h index ec2fb8b945fc..9271cfe4a149 100644 --- a/fs/jfs/jfs_inode.h +++ b/fs/jfs/jfs_inode.h @@ -21,7 +21,7 @@ struct fid; extern struct inode *ialloc(struct inode *, umode_t); -extern int jfs_fsync(struct file *, int); +extern int jfs_fsync(struct file *, loff_t, loff_t, int); extern long jfs_ioctl(struct file *, unsigned int, unsigned long); extern long jfs_compat_ioctl(struct file *, unsigned int, unsigned long); extern struct inode *jfs_iget(struct super_block *, unsigned long); diff --git a/fs/libfs.c b/fs/libfs.c index bd50b11f92da..8f2271a5df53 100644 --- a/fs/libfs.c +++ b/fs/libfs.c @@ -905,21 +905,29 @@ EXPORT_SYMBOL_GPL(generic_fh_to_parent); * filesystems which track all non-inode metadata in the buffers list * hanging off the address_space structure. */ -int generic_file_fsync(struct file *file, int datasync) +int generic_file_fsync(struct file *file, loff_t start, loff_t end, + int datasync) { struct inode *inode = file->f_mapping->host; int err; int ret; + err = filemap_write_and_wait_range(inode->i_mapping, start, end); + if (err) + return err; + + mutex_lock(&inode->i_mutex); ret = sync_mapping_buffers(inode->i_mapping); if (!(inode->i_state & I_DIRTY)) - return ret; + goto out; if (datasync && !(inode->i_state & I_DIRTY_DATASYNC)) - return ret; + goto out; err = sync_inode_metadata(inode, 1); if (ret == 0) ret = err; +out: + mutex_unlock(&inode->i_mutex); return ret; } EXPORT_SYMBOL(generic_file_fsync); @@ -956,7 +964,7 @@ EXPORT_SYMBOL(generic_check_addressable); /* * No-op implementation of ->fsync for in-memory filesystems. */ -int noop_fsync(struct file *file, int datasync) +int noop_fsync(struct file *file, loff_t start, loff_t end, int datasync) { return 0; } diff --git a/fs/logfs/file.c b/fs/logfs/file.c index c2ad7028def4..b548c87a86f1 100644 --- a/fs/logfs/file.c +++ b/fs/logfs/file.c @@ -219,11 +219,20 @@ long logfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg) } } -int logfs_fsync(struct file *file, int datasync) +int logfs_fsync(struct file *file, loff_t start, loff_t end, int datasync) { struct super_block *sb = file->f_mapping->host->i_sb; + struct inode *inode = file->f_mapping->host; + int ret; + + ret = filemap_write_and_wait_range(inode->i_mapping, start, end); + if (ret) + return ret; + mutex_lock(&inode->i_mutex); logfs_write_anchor(sb); + mutex_unlock(&inode->i_mutex); + return 0; } diff --git a/fs/logfs/logfs.h b/fs/logfs/logfs.h index 57afd4a6fabb..f22d108bfa5d 100644 --- a/fs/logfs/logfs.h +++ b/fs/logfs/logfs.h @@ -506,7 +506,7 @@ extern const struct file_operations logfs_reg_fops; extern const struct address_space_operations logfs_reg_aops; int logfs_readpage(struct file *file, struct page *page); long logfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg); -int logfs_fsync(struct file *file, int datasync); +int logfs_fsync(struct file *file, loff_t start, loff_t end, int datasync); /* gc.c */ u32 get_best_cand(struct super_block *sb, struct candidate_list *list, u32 *ec); diff --git a/fs/ncpfs/file.c b/fs/ncpfs/file.c index 0ed65e0c3dfe..64a326418aa2 100644 --- a/fs/ncpfs/file.c +++ b/fs/ncpfs/file.c @@ -20,9 +20,9 @@ #include "ncp_fs.h" -static int ncp_fsync(struct file *file, int datasync) +static int ncp_fsync(struct file *file, loff_t start, loff_t end, int datasync) { - return 0; + return filemap_write_and_wait_range(file->f_mapping, start, end); } /* diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 8a45e6d1f6a4..57f578e2560a 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -56,7 +56,7 @@ static int nfs_link(struct dentry *, struct inode *, struct dentry *); static int nfs_mknod(struct inode *, struct dentry *, int, dev_t); static int nfs_rename(struct inode *, struct dentry *, struct inode *, struct dentry *); -static int nfs_fsync_dir(struct file *, int); +static int nfs_fsync_dir(struct file *, loff_t, loff_t, int); static loff_t nfs_llseek_dir(struct file *, loff_t, int); static void nfs_readdir_clear_array(struct page*); @@ -945,15 +945,19 @@ out: * All directory operations under NFS are synchronous, so fsync() * is a dummy operation. */ -static int nfs_fsync_dir(struct file *filp, int datasync) +static int nfs_fsync_dir(struct file *filp, loff_t start, loff_t end, + int datasync) { struct dentry *dentry = filp->f_path.dentry; + struct inode *inode = dentry->d_inode; dfprintk(FILE, "NFS: fsync dir(%s/%s) datasync %d\n", dentry->d_parent->d_name.name, dentry->d_name.name, datasync); + mutex_lock(&inode->i_mutex); nfs_inc_stats(dentry->d_inode, NFSIOS_VFSFSYNC); + mutex_unlock(&inode->i_mutex); return 0; } diff --git a/fs/nfs/file.c b/fs/nfs/file.c index 2c1705b6acd7..28b8c3f3cda3 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c @@ -55,7 +55,7 @@ static ssize_t nfs_file_splice_write(struct pipe_inode_info *pipe, static ssize_t nfs_file_write(struct kiocb *, const struct iovec *iov, unsigned long nr_segs, loff_t pos); static int nfs_file_flush(struct file *, fl_owner_t id); -static int nfs_file_fsync(struct file *, int datasync); +static int nfs_file_fsync(struct file *, loff_t, loff_t, int datasync); static int nfs_check_flags(int flags); static int nfs_lock(struct file *filp, int cmd, struct file_lock *fl); static int nfs_flock(struct file *filp, int cmd, struct file_lock *fl); @@ -308,7 +308,7 @@ nfs_file_mmap(struct file * file, struct vm_area_struct * vma) * fall back to doing a synchronous write. */ static int -nfs_file_fsync(struct file *file, int datasync) +nfs_file_fsync(struct file *file, loff_t start, loff_t end, int datasync) { struct dentry *dentry = file->f_path.dentry; struct nfs_open_context *ctx = nfs_file_open_context(file); @@ -316,11 +316,15 @@ nfs_file_fsync(struct file *file, int datasync) int have_error, status; int ret = 0; - dprintk("NFS: fsync file(%s/%s) datasync %d\n", dentry->d_parent->d_name.name, dentry->d_name.name, datasync); + ret = filemap_write_and_wait_range(inode->i_mapping, start, end); + if (ret) + return ret; + mutex_lock(&inode->i_mutex); + nfs_inc_stats(inode, NFSIOS_VFSFSYNC); have_error = test_and_clear_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags); status = nfs_commit_inode(inode, FLUSH_SYNC); @@ -332,6 +336,7 @@ nfs_file_fsync(struct file *file, int datasync) if (!ret && !datasync) /* application has asked for meta-data sync */ ret = pnfs_layoutcommit_inode(inode, true); + mutex_unlock(&inode->i_mutex); return ret; } diff --git a/fs/nilfs2/file.c b/fs/nilfs2/file.c index d7eeca62febd..26601529dc17 100644 --- a/fs/nilfs2/file.c +++ b/fs/nilfs2/file.c @@ -27,7 +27,7 @@ #include "nilfs.h" #include "segment.h" -int nilfs_sync_file(struct file *file, int datasync) +int nilfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) { /* * Called from fsync() system call @@ -40,8 +40,15 @@ int nilfs_sync_file(struct file *file, int datasync) struct inode *inode = file->f_mapping->host; int err; - if (!nilfs_inode_dirty(inode)) + err = filemap_write_and_wait_range(inode->i_mapping, start, end); + if (err) + return err; + mutex_lock(&inode->i_mutex); + + if (!nilfs_inode_dirty(inode)) { + mutex_unlock(&inode->i_mutex); return 0; + } if (datasync) err = nilfs_construct_dsync_segment(inode->i_sb, inode, 0, @@ -49,6 +56,7 @@ int nilfs_sync_file(struct file *file, int datasync) else err = nilfs_construct_segment(inode->i_sb); + mutex_unlock(&inode->i_mutex); return err; } diff --git a/fs/nilfs2/nilfs.h b/fs/nilfs2/nilfs.h index 6fb7511c0328..255d5e1c03b7 100644 --- a/fs/nilfs2/nilfs.h +++ b/fs/nilfs2/nilfs.h @@ -235,7 +235,7 @@ extern void nilfs_set_link(struct inode *, struct nilfs_dir_entry *, struct page *, struct inode *); /* file.c */ -extern int nilfs_sync_file(struct file *, int); +extern int nilfs_sync_file(struct file *, loff_t, loff_t, int); /* ioctl.c */ long nilfs_ioctl(struct file *, unsigned int, unsigned long); diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c index 0f48e7c5d9e1..99e36107ff60 100644 --- a/fs/ntfs/dir.c +++ b/fs/ntfs/dir.c @@ -1527,13 +1527,20 @@ static int ntfs_dir_open(struct inode *vi, struct file *filp) * this problem for now. We do write the $BITMAP attribute if it is present * which is the important one for a directory so things are not too bad. */ -static int ntfs_dir_fsync(struct file *filp, int datasync) +static int ntfs_dir_fsync(struct file *filp, loff_t start, loff_t end, + int datasync) { struct inode *bmp_vi, *vi = filp->f_mapping->host; int err, ret; ntfs_attr na; ntfs_debug("Entering for inode 0x%lx.", vi->i_ino); + + err = filemap_write_and_wait_range(vi->i_mapping, start, end); + if (err) + return err; + mutex_lock(&vi->i_mutex); + BUG_ON(!S_ISDIR(vi->i_mode)); /* If the bitmap attribute inode is in memory sync it, too. */ na.mft_no = vi->i_ino; @@ -1555,6 +1562,7 @@ static int ntfs_dir_fsync(struct file *filp, int datasync) else ntfs_warning(vi->i_sb, "Failed to f%ssync inode 0x%lx. Error " "%u.", datasync ? "data" : "", vi->i_ino, -ret); + mutex_unlock(&vi->i_mutex); return ret; } diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c index b59f5ac26bef..c587e2d27183 100644 --- a/fs/ntfs/file.c +++ b/fs/ntfs/file.c @@ -2152,12 +2152,19 @@ static ssize_t ntfs_file_aio_write(struct kiocb *iocb, const struct iovec *iov, * with this inode but since we have no simple way of getting to them we ignore * this problem for now. */ -static int ntfs_file_fsync(struct file *filp, int datasync) +static int ntfs_file_fsync(struct file *filp, loff_t start, loff_t end, + int datasync) { struct inode *vi = filp->f_mapping->host; int err, ret = 0; ntfs_debug("Entering for inode 0x%lx.", vi->i_ino); + + err = filemap_write_and_wait_range(vi->i_mapping, start, end); + if (err) + return err; + mutex_lock(&vi->i_mutex); + BUG_ON(S_ISDIR(vi->i_mode)); if (!datasync || !NInoNonResident(NTFS_I(vi))) ret = __ntfs_write_inode(vi, 1); @@ -2175,6 +2182,7 @@ static int ntfs_file_fsync(struct file *filp, int datasync) else ntfs_warning(vi->i_sb, "Failed to f%ssync inode 0x%lx. Error " "%u.", datasync ? "data" : "", vi->i_ino, -ret); + mutex_unlock(&vi->i_mutex); return ret; } diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index 22d604601957..0fc2bd34039d 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c @@ -171,7 +171,8 @@ static int ocfs2_dir_release(struct inode *inode, struct file *file) return 0; } -static int ocfs2_sync_file(struct file *file, int datasync) +static int ocfs2_sync_file(struct file *file, loff_t start, loff_t end, + int datasync) { int err = 0; journal_t *journal; @@ -184,6 +185,16 @@ static int ocfs2_sync_file(struct file *file, int datasync) file->f_path.dentry->d_name.name, (unsigned long long)datasync); + err = filemap_write_and_wait_range(inode->i_mapping, start, end); + if (err) + return err; + + /* + * Probably don't need the i_mutex at all in here, just putting it here + * to be consistent with how fsync used to be called, someone more + * familiar with the fs could possibly remove it. + */ + mutex_lock(&inode->i_mutex); if (datasync && !(inode->i_state & I_DIRTY_DATASYNC)) { /* * We still have to flush drive's caches to get data to the @@ -200,6 +211,7 @@ static int ocfs2_sync_file(struct file *file, int datasync) bail: if (err) mlog_errno(err); + mutex_unlock(&inode->i_mutex); return (err < 0) ? -EIO : 0; } diff --git a/fs/reiserfs/dir.c b/fs/reiserfs/dir.c index 198dabf1b2bb..133e9355dc6f 100644 --- a/fs/reiserfs/dir.c +++ b/fs/reiserfs/dir.c @@ -14,7 +14,8 @@ extern const struct reiserfs_key MIN_KEY; static int reiserfs_readdir(struct file *, void *, filldir_t); -static int reiserfs_dir_fsync(struct file *filp, int datasync); +static int reiserfs_dir_fsync(struct file *filp, loff_t start, loff_t end, + int datasync); const struct file_operations reiserfs_dir_operations = { .llseek = generic_file_llseek, @@ -27,13 +28,21 @@ const struct file_operations reiserfs_dir_operations = { #endif }; -static int reiserfs_dir_fsync(struct file *filp, int datasync) +static int reiserfs_dir_fsync(struct file *filp, loff_t start, loff_t end, + int datasync) { struct inode *inode = filp->f_mapping->host; int err; + + err = filemap_write_and_wait_range(inode->i_mapping, start, end); + if (err) + return err; + + mutex_lock(&inode->i_mutex); reiserfs_write_lock(inode->i_sb); err = reiserfs_commit_for_inode(inode); reiserfs_write_unlock(inode->i_sb); + mutex_unlock(&inode->i_mutex); if (err < 0) return err; return 0; diff --git a/fs/reiserfs/file.c b/fs/reiserfs/file.c index bbf31003d308..c7156dc39ce7 100644 --- a/fs/reiserfs/file.c +++ b/fs/reiserfs/file.c @@ -140,12 +140,18 @@ static void reiserfs_vfs_truncate_file(struct inode *inode) * be removed... */ -static int reiserfs_sync_file(struct file *filp, int datasync) +static int reiserfs_sync_file(struct file *filp, loff_t start, loff_t end, + int datasync) { struct inode *inode = filp->f_mapping->host; int err; int barrier_done; + err = filemap_write_and_wait_range(inode->i_mapping, start, end); + if (err) + return err; + + mutex_lock(&inode->i_mutex); BUG_ON(!S_ISREG(inode->i_mode)); err = sync_mapping_buffers(inode->i_mapping); reiserfs_write_lock(inode->i_sb); @@ -153,6 +159,7 @@ static int reiserfs_sync_file(struct file *filp, int datasync) reiserfs_write_unlock(inode->i_sb); if (barrier_done != 1 && reiserfs_barrier_flush(inode->i_sb)) blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL); + mutex_unlock(&inode->i_mutex); if (barrier_done < 0) return barrier_done; return (err < 0) ? -EIO : 0; diff --git a/fs/sync.c b/fs/sync.c index c38ec163da6c..c98a7477edfd 100644 --- a/fs/sync.c +++ b/fs/sync.c @@ -165,28 +165,9 @@ SYSCALL_DEFINE1(syncfs, int, fd) */ int vfs_fsync_range(struct file *file, loff_t start, loff_t end, int datasync) { - struct address_space *mapping = file->f_mapping; - int err, ret; - - if (!file->f_op || !file->f_op->fsync) { - ret = -EINVAL; - goto out; - } - - ret = filemap_write_and_wait_range(mapping, start, end); - - /* - * We need to protect against concurrent writers, which could cause - * livelocks in fsync_buffers_list(). - */ - mutex_lock(&mapping->host->i_mutex); - err = file->f_op->fsync(file, datasync); - if (!ret) - ret = err; - mutex_unlock(&mapping->host->i_mutex); - -out: - return ret; + if (!file->f_op || !file->f_op->fsync) + return -EINVAL; + return file->f_op->fsync(file, start, end, datasync); } EXPORT_SYMBOL(vfs_fsync_range); diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c index 5e7fccfc4b29..89ef9a2f7837 100644 --- a/fs/ubifs/file.c +++ b/fs/ubifs/file.c @@ -1304,7 +1304,7 @@ static void *ubifs_follow_link(struct dentry *dentry, struct nameidata *nd) return NULL; } -int ubifs_fsync(struct file *file, int datasync) +int ubifs_fsync(struct file *file, loff_t start, loff_t end, int datasync) { struct inode *inode = file->f_mapping->host; struct ubifs_info *c = inode->i_sb->s_fs_info; @@ -1319,14 +1319,16 @@ int ubifs_fsync(struct file *file, int datasync) */ return 0; - /* - * VFS has already synchronized dirty pages for this inode. Synchronize - * the inode unless this is a 'datasync()' call. - */ + err = filemap_write_and_wait_range(inode->i_mapping, start, end); + if (err) + return err; + mutex_lock(&inode->i_mutex); + + /* Synchronize the inode unless this is a 'datasync()' call. */ if (!datasync || (inode->i_state & I_DIRTY_DATASYNC)) { err = inode->i_sb->s_op->write_inode(inode, NULL); if (err) - return err; + goto out; } /* @@ -1334,10 +1336,9 @@ int ubifs_fsync(struct file *file, int datasync) * them. */ err = ubifs_sync_wbufs_by_inode(c, inode); - if (err) - return err; - - return 0; +out: + mutex_unlock(&inode->i_mutex); + return err; } /** diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h index f79983d6f860..4cd648501fa4 100644 --- a/fs/ubifs/ubifs.h +++ b/fs/ubifs/ubifs.h @@ -1720,7 +1720,7 @@ const struct ubifs_lprops *ubifs_fast_find_frdi_idx(struct ubifs_info *c); int ubifs_calc_dark(const struct ubifs_info *c, int spc); /* file.c */ -int ubifs_fsync(struct file *file, int datasync); +int ubifs_fsync(struct file *file, loff_t start, loff_t end, int datasync); int ubifs_setattr(struct dentry *dentry, struct iattr *attr); /* dir.c */ diff --git a/fs/xfs/linux-2.6/xfs_file.c b/fs/xfs/linux-2.6/xfs_file.c index 7f782af286bf..fbbf657df0cd 100644 --- a/fs/xfs/linux-2.6/xfs_file.c +++ b/fs/xfs/linux-2.6/xfs_file.c @@ -127,6 +127,8 @@ xfs_iozero( STATIC int xfs_file_fsync( struct file *file, + loff_t start, + loff_t end, int datasync) { struct inode *inode = file->f_mapping->host; @@ -138,6 +140,10 @@ xfs_file_fsync( trace_xfs_file_fsync(ip); + error = filemap_write_and_wait_range(inode->i_mapping, start, end); + if (error) + return error; + if (XFS_FORCED_SHUTDOWN(mp)) return -XFS_ERROR(EIO); @@ -875,18 +881,11 @@ xfs_file_aio_write( /* Handle various SYNC-type writes */ if ((file->f_flags & O_DSYNC) || IS_SYNC(inode)) { loff_t end = pos + ret - 1; - int error, error2; xfs_rw_iunlock(ip, iolock); - error = filemap_write_and_wait_range(mapping, pos, end); + ret = -xfs_file_fsync(file, pos, end, + (file->f_flags & __O_SYNC) ? 0 : 1); xfs_rw_ilock(ip, iolock); - - error2 = -xfs_file_fsync(file, - (file->f_flags & __O_SYNC) ? 0 : 1); - if (error) - ret = error; - else if (error2) - ret = error2; } out_unlock: diff --git a/include/linux/ext3_fs.h b/include/linux/ext3_fs.h index 5e06acf95d0f..0c473fd79acb 100644 --- a/include/linux/ext3_fs.h +++ b/include/linux/ext3_fs.h @@ -877,7 +877,7 @@ extern int ext3_htree_store_dirent(struct file *dir_file, __u32 hash, extern void ext3_htree_free_dir_info(struct dir_private_info *p); /* fsync.c */ -extern int ext3_sync_file(struct file *, int); +extern int ext3_sync_file(struct file *, loff_t, loff_t, int); /* hash.c */ extern int ext3fs_dirhash(const char *name, int len, struct diff --git a/include/linux/fb.h b/include/linux/fb.h index 6a8274877171..1d6836c498dd 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -1043,7 +1043,8 @@ extern void fb_deferred_io_open(struct fb_info *info, struct inode *inode, struct file *file); extern void fb_deferred_io_cleanup(struct fb_info *info); -extern int fb_deferred_io_fsync(struct file *file, int datasync); +extern int fb_deferred_io_fsync(struct file *file, loff_t start, + loff_t end, int datasync); static inline bool fb_be_math(struct fb_info *info) { diff --git a/include/linux/fs.h b/include/linux/fs.h index 4a61f98823a6..9cd2075c4a39 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1572,7 +1572,7 @@ struct file_operations { int (*open) (struct inode *, struct file *); int (*flush) (struct file *, fl_owner_t id); int (*release) (struct inode *, struct file *); - int (*fsync) (struct file *, int datasync); + int (*fsync) (struct file *, loff_t, loff_t, int datasync); int (*aio_fsync) (struct kiocb *, int datasync); int (*fasync) (int, struct file *, int); int (*lock) (struct file *, int, struct file_lock *); @@ -2360,7 +2360,8 @@ extern int generic_segment_checks(const struct iovec *iov, /* fs/block_dev.c */ extern ssize_t blkdev_aio_write(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos); -extern int blkdev_fsync(struct file *filp, int datasync); +extern int blkdev_fsync(struct file *filp, loff_t start, loff_t end, + int datasync); /* fs/splice.c */ extern ssize_t generic_file_splice_read(struct file *, loff_t *, @@ -2490,7 +2491,7 @@ extern int simple_link(struct dentry *, struct inode *, struct dentry *); extern int simple_unlink(struct inode *, struct dentry *); extern int simple_rmdir(struct inode *, struct dentry *); extern int simple_rename(struct inode *, struct dentry *, struct inode *, struct dentry *); -extern int noop_fsync(struct file *, int); +extern int noop_fsync(struct file *, loff_t, loff_t, int); extern int simple_empty(struct dentry *); extern int simple_readpage(struct file *file, struct page *page); extern int simple_write_begin(struct file *file, struct address_space *mapping, @@ -2515,7 +2516,7 @@ extern ssize_t simple_read_from_buffer(void __user *to, size_t count, extern ssize_t simple_write_to_buffer(void *to, size_t available, loff_t *ppos, const void __user *from, size_t count); -extern int generic_file_fsync(struct file *, int); +extern int generic_file_fsync(struct file *, loff_t, loff_t, int); extern int generic_check_addressable(unsigned, u64); diff --git a/ipc/shm.c b/ipc/shm.c index ab3385a21b27..27884adb1a90 100644 --- a/ipc/shm.c +++ b/ipc/shm.c @@ -277,13 +277,13 @@ static int shm_release(struct inode *ino, struct file *file) return 0; } -static int shm_fsync(struct file *file, int datasync) +static int shm_fsync(struct file *file, loff_t start, loff_t end, int datasync) { struct shm_file_data *sfd = shm_file_data(file); if (!sfd->file->f_op->fsync) return -EINVAL; - return sfd->file->f_op->fsync(sfd->file, datasync); + return sfd->file->f_op->fsync(sfd->file, start, end, datasync); } static unsigned long shm_get_unmapped_area(struct file *file, -- cgit v1.2.3 From 2fbe8c8ad176db69dfc682b4abb721c8fc19c3fd Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 16 Jul 2011 21:38:06 -0400 Subject: get rid of useless dget_parent() in fs/btrfs/ioctl.c both callers there have dentry->d_parent stabilized by the fact that their caller had obtained dentry from lookup_one_len() and had not dropped ->i_mutex on parent since then. Signed-off-by: Al Viro --- fs/btrfs/ioctl.c | 16 ++++------------ 1 file changed, 4 insertions(+), 12 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index a3c4751e07db..622543309eb2 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -323,7 +323,7 @@ static noinline int create_subvol(struct btrfs_root *root, struct btrfs_inode_item *inode_item; struct extent_buffer *leaf; struct btrfs_root *new_root; - struct dentry *parent = dget_parent(dentry); + struct dentry *parent = dentry->d_parent; struct inode *dir; int ret; int err; @@ -332,10 +332,8 @@ static noinline int create_subvol(struct btrfs_root *root, u64 index = 0; ret = btrfs_find_free_objectid(root->fs_info->tree_root, &objectid); - if (ret) { - dput(parent); + if (ret) return ret; - } dir = parent->d_inode; @@ -346,10 +344,8 @@ static noinline int create_subvol(struct btrfs_root *root, * 2 - dir items */ trans = btrfs_start_transaction(root, 6); - if (IS_ERR(trans)) { - dput(parent); + if (IS_ERR(trans)) return PTR_ERR(trans); - } leaf = btrfs_alloc_free_block(trans, root, root->leafsize, 0, objectid, NULL, 0, 0, 0); @@ -439,7 +435,6 @@ static noinline int create_subvol(struct btrfs_root *root, d_instantiate(dentry, btrfs_lookup_dentry(dir, dentry)); fail: - dput(parent); if (async_transid) { *async_transid = trans->transid; err = btrfs_commit_transaction_async(trans, root, 1); @@ -456,7 +451,6 @@ static int create_snapshot(struct btrfs_root *root, struct dentry *dentry, bool readonly) { struct inode *inode; - struct dentry *parent; struct btrfs_pending_snapshot *pending_snapshot; struct btrfs_trans_handle *trans; int ret; @@ -504,9 +498,7 @@ static int create_snapshot(struct btrfs_root *root, struct dentry *dentry, if (ret) goto fail; - parent = dget_parent(dentry); - inode = btrfs_lookup_dentry(parent->d_inode, dentry); - dput(parent); + inode = btrfs_lookup_dentry(dentry->d_parent->d_inode, dentry); if (IS_ERR(inode)) { ret = PTR_ERR(inode); goto fail; -- cgit v1.2.3 From 10d9f309d88ca7f47133d57e99b72810f119f75b Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 16 Jul 2011 23:09:10 -0400 Subject: get rid of useless dget_parent() in btrfs rename() and link() ->d_parent is locked and stable there... Signed-off-by: Al Viro --- fs/btrfs/inode.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index bcb20a9a3b93..2548a04a0230 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -4766,11 +4766,10 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir, if (err) { drop_inode = 1; } else { - struct dentry *parent = dget_parent(dentry); + struct dentry *parent = dentry->d_parent; err = btrfs_update_inode(trans, root, inode); BUG_ON(err); btrfs_log_new_name(trans, inode, NULL, parent); - dput(parent); } nr = trans->blocks_used; @@ -7062,9 +7061,8 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, BUG_ON(ret); if (old_ino != BTRFS_FIRST_FREE_OBJECTID) { - struct dentry *parent = dget_parent(new_dentry); + struct dentry *parent = new_dentry->d_parent; btrfs_log_new_name(trans, old_inode, old_dir, parent); - dput(parent); btrfs_end_log_trans(root); } out_fail: -- cgit v1.2.3 From 79ac5a46c5c1c17476fbf84b4d4600d6d565defd Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 17 Jul 2011 10:17:46 -0400 Subject: jfs_lookup(): don't bother with . or .. they'll never be passed to ->lookup() Signed-off-by: Al Viro --- fs/jfs/namei.c | 39 +++++++++++++++------------------------ 1 file changed, 15 insertions(+), 24 deletions(-) (limited to 'fs') diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c index 247331551992..03787ef6a118 100644 --- a/fs/jfs/namei.c +++ b/fs/jfs/namei.c @@ -1456,34 +1456,25 @@ static struct dentry *jfs_lookup(struct inode *dip, struct dentry *dentry, struc ino_t inum; struct inode *ip; struct component_name key; - const char *name = dentry->d_name.name; - int len = dentry->d_name.len; int rc; - jfs_info("jfs_lookup: name = %s", name); - - if ((name[0] == '.') && (len == 1)) - inum = dip->i_ino; - else if (strcmp(name, "..") == 0) - inum = PARENT(dip); - else { - if ((rc = get_UCSname(&key, dentry))) - return ERR_PTR(rc); - rc = dtSearch(dip, &key, &inum, &btstack, JFS_LOOKUP); - free_UCSname(&key); - if (rc == -ENOENT) { - d_add(dentry, NULL); - return NULL; - } else if (rc) { - jfs_err("jfs_lookup: dtSearch returned %d", rc); - return ERR_PTR(rc); - } + jfs_info("jfs_lookup: name = %s", dentry->d_name.name); + + if ((rc = get_UCSname(&key, dentry))) + return ERR_PTR(rc); + rc = dtSearch(dip, &key, &inum, &btstack, JFS_LOOKUP); + free_UCSname(&key); + if (rc == -ENOENT) { + ip = NULL; + } else if (rc) { + jfs_err("jfs_lookup: dtSearch returned %d", rc); + ip = ERR_PTR(rc); + } else { + ip = jfs_iget(dip->i_sb, inum); + if (IS_ERR(ip)) + jfs_err("jfs_lookup: iget failed on inum %d", (uint)inum); } - ip = jfs_iget(dip->i_sb, inum); - if (IS_ERR(ip)) - jfs_err("jfs_lookup: iget failed on inum %d", (uint) inum); - return d_splice_alias(ip, dentry); } -- cgit v1.2.3 From 6c673ab393bc18e8bff729cd04cf384d15e72a04 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 17 Jul 2011 10:22:27 -0400 Subject: simplify gfs2_lookup() d_splice_alias() will DTRT when given NULL or ERR_PTR Signed-off-by: Al Viro --- fs/gfs2/inode.c | 14 +++----------- 1 file changed, 3 insertions(+), 11 deletions(-) (limited to 'fs') diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c index b1090d66a6fd..0fb51a96eff0 100644 --- a/fs/gfs2/inode.c +++ b/fs/gfs2/inode.c @@ -792,13 +792,8 @@ static int gfs2_create(struct inode *dir, struct dentry *dentry, static struct dentry *gfs2_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) { - struct inode *inode = NULL; - - inode = gfs2_lookupi(dir, &dentry->d_name, 0); - if (inode && IS_ERR(inode)) - return ERR_CAST(inode); - - if (inode) { + struct inode *inode = gfs2_lookupi(dir, &dentry->d_name, 0); + if (inode && !IS_ERR(inode)) { struct gfs2_glock *gl = GFS2_I(inode)->i_gl; struct gfs2_holder gh; int error; @@ -808,11 +803,8 @@ static struct dentry *gfs2_lookup(struct inode *dir, struct dentry *dentry, return ERR_PTR(error); } gfs2_glock_dq_uninit(&gh); - return d_splice_alias(inode, dentry); } - d_add(dentry, inode); - - return NULL; + return d_splice_alias(inode, dentry); } /** -- cgit v1.2.3 From 4513d899c418ff69052420e29e354e4c64b3ef76 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 17 Jul 2011 10:52:14 -0400 Subject: switch d_add_ci() to d_splice_alias() in "found negative" case as well Signed-off-by: Al Viro --- fs/dcache.c | 24 +++++------------------- 1 file changed, 5 insertions(+), 19 deletions(-) (limited to 'fs') diff --git a/fs/dcache.c b/fs/dcache.c index 2762804a140d..d1d6b3349ec7 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -1652,26 +1652,12 @@ struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode, * Negative dentry: instantiate it unless the inode is a directory and * already has a dentry. */ - spin_lock(&inode->i_lock); - if (!S_ISDIR(inode->i_mode) || list_empty(&inode->i_dentry)) { - __d_instantiate(found, inode); - spin_unlock(&inode->i_lock); - security_d_instantiate(found, inode); - return found; + new = d_splice_alias(inode, found); + if (new) { + dput(found); + found = new; } - - /* - * In case a directory already has a (disconnected) entry grab a - * reference to it, move it in place and use it. - */ - new = list_entry(inode->i_dentry.next, struct dentry, d_alias); - __dget(new); - spin_unlock(&inode->i_lock); - security_d_instantiate(found, inode); - d_move(new, found); - iput(inode); - dput(found); - return new; + return found; err_out: iput(inode); -- cgit v1.2.3 From e4b9f0058145a3bed4199eacee62a9969c163401 Mon Sep 17 00:00:00 2001 From: David Howells Date: Mon, 18 Jul 2011 13:50:00 +0100 Subject: AFS: Fix silly characters in a comment Fix silly characters in a comment in AFS code (some weird characters replaced the word 'flag' some point way back). Reported-by: viro@ZenIV.linux.org.uk Signed-off-by: David Howells Signed-off-by: Al Viro --- fs/afs/afs_vl.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/afs/afs_vl.h b/fs/afs/afs_vl.h index 8bbefe009ed4..800f607ffaf5 100644 --- a/fs/afs/afs_vl.h +++ b/fs/afs/afs_vl.h @@ -49,7 +49,7 @@ enum AFSVL_Errors { AFSVL_BADVOLOPER = 363542, /* Bad volume operation code */ AFSVL_BADRELLOCKTYPE = 363543, /* Bad release lock type */ AFSVL_RERELEASE = 363544, /* Status report: last release was aborted */ - AFSVL_BADSERVERFLAG = 363545, /* Invalid replication site server °ag */ + AFSVL_BADSERVERFLAG = 363545, /* Invalid replication site server flag */ AFSVL_PERM = 363546, /* No permission access */ AFSVL_NOMEM = 363547, /* malloc/realloc failed to alloc enough memory */ }; -- cgit v1.2.3 From 86c98e8cdb21ff4628f4d48559ab6e006380fa4b Mon Sep 17 00:00:00 2001 From: Al Viro Date: Mon, 18 Jul 2011 23:39:07 -0400 Subject: Remove dead code in dget_parent() ->d_parent is never NULL... Signed-off-by: Al Viro --- fs/dcache.c | 5 ----- 1 file changed, 5 deletions(-) (limited to 'fs') diff --git a/fs/dcache.c b/fs/dcache.c index d1d6b3349ec7..3c34ac0e9a1b 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -549,10 +549,6 @@ repeat: */ rcu_read_lock(); ret = dentry->d_parent; - if (!ret) { - rcu_read_unlock(); - goto out; - } spin_lock(&ret->d_lock); if (unlikely(ret != dentry->d_parent)) { spin_unlock(&ret->d_lock); @@ -563,7 +559,6 @@ repeat: BUG_ON(!ret->d_count); ret->d_count++; spin_unlock(&ret->d_lock); -out: return ret; } EXPORT_SYMBOL(dget_parent); -- cgit v1.2.3 From 2cebaa58b7de775386732bbd6cd11c3f5b73faf0 Mon Sep 17 00:00:00 2001 From: Pavel Shilovsky Date: Wed, 20 Jul 2011 18:24:09 +0400 Subject: CIFS: Fix wrong length in cifs_iovec_read Signed-off-by: Pavel Shilovsky Reviewed-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/file.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/cifs/file.c b/fs/cifs/file.c index bb71471a4d9d..a9b4a24f2a16 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -1737,7 +1737,7 @@ cifs_iovec_read(struct file *file, const struct iovec *iov, io_parms.pid = pid; io_parms.tcon = pTcon; io_parms.offset = *poffset; - io_parms.length = len; + io_parms.length = cur_len; rc = CIFSSMBRead(xid, &io_parms, &bytes_read, &read_data, &buf_type); pSMBr = (struct smb_com_read_rsp *)read_data; -- cgit v1.2.3 From b307d4655a71749ac3f91c6dbe33d28cc026ceeb Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Thu, 21 Jul 2011 15:02:43 +0100 Subject: FS-Cache: Fix __fscache_uncache_all_inode_pages()'s outer loop The compiler, at least for ix86 and m68k, validly warns that the comparison: next <= (loff_t)-1 is always true (and it's always true also for x86-64 and probably all other arches - as long as pgoff_t isn't wider than loff_t). The intention appears to be to avoid wrapping of "next", so rather than eliminating the pointless comparison, fix the loop to indeed get exited when "next" would otherwise wrap. On m68k the following warning is observed: fs/fscache/page.c: In function '__fscache_uncache_all_inode_pages': fs/fscache/page.c:979: warning: comparison is always false due to limited range of data type Reported-by: Geert Uytterhoeven Reported-by: Jan Beulich Signed-off-by: Jan Beulich Signed-off-by: David Howells Cc: Suresh Jayaraman Cc: Geert Uytterhoeven Cc: stable@kernel.org Signed-off-by: Linus Torvalds --- fs/fscache/page.c | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) (limited to 'fs') diff --git a/fs/fscache/page.c b/fs/fscache/page.c index 2f343b4d7a7d..3f7a59bfa7ad 100644 --- a/fs/fscache/page.c +++ b/fs/fscache/page.c @@ -976,16 +976,12 @@ void __fscache_uncache_all_inode_pages(struct fscache_cookie *cookie, pagevec_init(&pvec, 0); next = 0; - while (next <= (loff_t)-1 && - pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE) - ) { + do { + if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) + break; for (i = 0; i < pagevec_count(&pvec); i++) { struct page *page = pvec.pages[i]; - pgoff_t page_index = page->index; - - ASSERTCMP(page_index, >=, next); - next = page_index + 1; - + next = page->index; if (PageFsCache(page)) { __fscache_wait_on_page_write(cookie, page); __fscache_uncache_page(cookie, page); @@ -993,7 +989,7 @@ void __fscache_uncache_all_inode_pages(struct fscache_cookie *cookie, } pagevec_release(&pvec); cond_resched(); - } + } while (++next); _leave(""); } -- cgit v1.2.3 From b91da88fed84843313a1b6fd1b1c834a24bbcf9e Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Thu, 21 Jul 2011 11:01:42 -0700 Subject: vfs: drop conditional inode prefetch in __do_lookup_rcu It seems to hurt performance in real life. Yes, the inode will be used later, but the conditional doesn't seem to predict all that well (negative dentries are not uncommon) and it looks like the cost of prefetching is simply higher than depending on the cache doing the right thing. As usual. Signed-off-by: Linus Torvalds --- fs/dcache.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'fs') diff --git a/fs/dcache.c b/fs/dcache.c index 6e4ea6d87774..fbdcbca40725 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -1813,8 +1813,6 @@ seqretry: tname = dentry->d_name.name; i = dentry->d_inode; prefetch(tname); - if (i) - prefetch(i); /* * This seqcount check is required to ensure name and * len are loaded atomically, so as not to walk off the -- cgit v1.2.3 From eac1b5e57d7abc836e78fd3fbcf77dbeed01edc9 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Thu, 21 Jul 2011 20:00:43 +0200 Subject: ptrace: do_wait(traced_leader_killed_by_mt_exec) can block forever Test-case: void *tfunc(void *arg) { execvp("true", NULL); return NULL; } int main(void) { int pid; if (fork()) { pthread_t t; kill(getpid(), SIGSTOP); pthread_create(&t, NULL, tfunc, NULL); for (;;) pause(); } pid = getppid(); assert(ptrace(PTRACE_ATTACH, pid, 0,0) == 0); while (wait(NULL) > 0) ptrace(PTRACE_CONT, pid, 0,0); return 0; } It is racy, exit_notify() does __wake_up_parent() too. But in the likely case it triggers the problem: de_thread() does release_task() and the old leader goes away without the notification, the tracer sleeps in do_wait() without children/tracees. Change de_thread() to do __wake_up_parent(traced_leader->parent). Since it is already EXIT_DEAD we can do this without ptrace_unlink(), EXIT_DEAD threads do not exist from do_wait's pov. Signed-off-by: Oleg Nesterov Acked-by: Tejun Heo --- fs/exec.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'fs') diff --git a/fs/exec.c b/fs/exec.c index b08367abf30e..d219541db06c 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -967,6 +967,14 @@ static int de_thread(struct task_struct *tsk) BUG_ON(leader->exit_state != EXIT_ZOMBIE); leader->exit_state = EXIT_DEAD; + + /* + * We are going to release_task()->ptrace_unlink() silently, + * the tracer can sleep in do_wait(). EXIT_DEAD guarantees + * the tracer wont't block again waiting for this thread. + */ + if (unlikely(leader->ptrace)) + __wake_up_parent(leader, leader->parent); write_unlock_irq(&tasklist_lock); release_task(leader); -- cgit v1.2.3 From aac4e4198eff7f9551d586c55342403d49249d95 Mon Sep 17 00:00:00 2001 From: Naohiro Aota Date: Tue, 12 Jul 2011 02:54:13 +0900 Subject: hfsplus: Add additional range check to handle on-disk corruptions 'recoff' is read from disk and used for an argument to memcpy, so if the value read from disk is larger than the page size, it result to "general protection fault". This patch add additional range check for the value, so that disk fuzz won't cause such fault. Signed-off-by: Naohiro Aota Signed-off-by: Christoph Hellwig --- fs/hfsplus/brec.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'fs') diff --git a/fs/hfsplus/brec.c b/fs/hfsplus/brec.c index 2312de34bd42..2a734cfccc92 100644 --- a/fs/hfsplus/brec.c +++ b/fs/hfsplus/brec.c @@ -43,6 +43,10 @@ u16 hfs_brec_keylen(struct hfs_bnode *node, u16 rec) node->tree->node_size - (rec + 1) * 2); if (!recoff) return 0; + if (recoff > node->tree->node_size - 2) { + printk(KERN_ERR "hfs: recoff %d too large\n", recoff); + return 0; + } retval = hfs_bnode_read_u16(node, recoff) + 2; if (retval > node->tree->max_key_len + 2) { -- cgit v1.2.3 From 6596528e391ad978a6a120142cba97a1d7324cb6 Mon Sep 17 00:00:00 2001 From: Seth Forshee Date: Mon, 18 Jul 2011 08:06:23 -0700 Subject: hfsplus: ensure bio requests are not smaller than the hardware sectors Currently all bio requests are 512 bytes, which may fail for media whose physical sector size is larger than this. Ensure these requests are not smaller than the block device logical block size. BugLink: http://bugs.launchpad.net/bugs/734883 Signed-off-by: Seth Forshee Signed-off-by: Christoph Hellwig --- fs/hfsplus/hfsplus_fs.h | 16 ++++++++-- fs/hfsplus/part_tbl.c | 32 ++++++++++--------- fs/hfsplus/super.c | 12 +++---- fs/hfsplus/wrapper.c | 83 +++++++++++++++++++++++++++++++++++++------------ 4 files changed, 101 insertions(+), 42 deletions(-) (limited to 'fs') diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h index 0bebf7488feb..81dfd1e495e3 100644 --- a/fs/hfsplus/hfsplus_fs.h +++ b/fs/hfsplus/hfsplus_fs.h @@ -13,6 +13,7 @@ #include #include #include +#include #include "hfsplus_raw.h" #define DBG_BNODE_REFS 0x00000001 @@ -110,7 +111,9 @@ struct hfsplus_vh; struct hfs_btree; struct hfsplus_sb_info { + void *s_vhdr_buf; struct hfsplus_vh *s_vhdr; + void *s_backup_vhdr_buf; struct hfsplus_vh *s_backup_vhdr; struct hfs_btree *ext_tree; struct hfs_btree *cat_tree; @@ -258,6 +261,15 @@ struct hfsplus_readdir_data { struct hfsplus_cat_key key; }; +/* + * Find minimum acceptible I/O size for an hfsplus sb. + */ +static inline unsigned short hfsplus_min_io_size(struct super_block *sb) +{ + return max_t(unsigned short, bdev_logical_block_size(sb->s_bdev), + HFSPLUS_SECTOR_SIZE); +} + #define hfs_btree_open hfsplus_btree_open #define hfs_btree_close hfsplus_btree_close #define hfs_btree_write hfsplus_btree_write @@ -436,8 +448,8 @@ int hfsplus_compare_dentry(const struct dentry *parent, /* wrapper.c */ int hfsplus_read_wrapper(struct super_block *); int hfs_part_find(struct super_block *, sector_t *, sector_t *); -int hfsplus_submit_bio(struct block_device *bdev, sector_t sector, - void *data, int rw); +int hfsplus_submit_bio(struct super_block *sb, sector_t sector, + void *buf, void **data, int rw); /* time macros */ #define __hfsp_mt2ut(t) (be32_to_cpu(t) - 2082844800U) diff --git a/fs/hfsplus/part_tbl.c b/fs/hfsplus/part_tbl.c index 40ad88c12c64..eb355d81e279 100644 --- a/fs/hfsplus/part_tbl.c +++ b/fs/hfsplus/part_tbl.c @@ -88,11 +88,12 @@ static int hfs_parse_old_pmap(struct super_block *sb, struct old_pmap *pm, return -ENOENT; } -static int hfs_parse_new_pmap(struct super_block *sb, struct new_pmap *pm, - sector_t *part_start, sector_t *part_size) +static int hfs_parse_new_pmap(struct super_block *sb, void *buf, + struct new_pmap *pm, sector_t *part_start, sector_t *part_size) { struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb); int size = be32_to_cpu(pm->pmMapBlkCnt); + int buf_size = hfsplus_min_io_size(sb); int res; int i = 0; @@ -107,11 +108,14 @@ static int hfs_parse_new_pmap(struct super_block *sb, struct new_pmap *pm, if (++i >= size) return -ENOENT; - res = hfsplus_submit_bio(sb->s_bdev, - *part_start + HFS_PMAP_BLK + i, - pm, READ); - if (res) - return res; + pm = (struct new_pmap *)((u8 *)pm + HFSPLUS_SECTOR_SIZE); + if ((u8 *)pm - (u8 *)buf >= buf_size) { + res = hfsplus_submit_bio(sb, + *part_start + HFS_PMAP_BLK + i, + buf, (void **)&pm, READ); + if (res) + return res; + } } while (pm->pmSig == cpu_to_be16(HFS_NEW_PMAP_MAGIC)); return -ENOENT; @@ -124,15 +128,15 @@ static int hfs_parse_new_pmap(struct super_block *sb, struct new_pmap *pm, int hfs_part_find(struct super_block *sb, sector_t *part_start, sector_t *part_size) { - void *data; + void *buf, *data; int res; - data = kmalloc(HFSPLUS_SECTOR_SIZE, GFP_KERNEL); - if (!data) + buf = kmalloc(hfsplus_min_io_size(sb), GFP_KERNEL); + if (!buf) return -ENOMEM; - res = hfsplus_submit_bio(sb->s_bdev, *part_start + HFS_PMAP_BLK, - data, READ); + res = hfsplus_submit_bio(sb, *part_start + HFS_PMAP_BLK, + buf, &data, READ); if (res) goto out; @@ -141,13 +145,13 @@ int hfs_part_find(struct super_block *sb, res = hfs_parse_old_pmap(sb, data, part_start, part_size); break; case HFS_NEW_PMAP_MAGIC: - res = hfs_parse_new_pmap(sb, data, part_start, part_size); + res = hfs_parse_new_pmap(sb, buf, data, part_start, part_size); break; default: res = -ENOENT; break; } out: - kfree(data); + kfree(buf); return res; } diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c index 84f56e1f6dac..c106ca22e812 100644 --- a/fs/hfsplus/super.c +++ b/fs/hfsplus/super.c @@ -203,17 +203,17 @@ int hfsplus_sync_fs(struct super_block *sb, int wait) write_backup = 1; } - error2 = hfsplus_submit_bio(sb->s_bdev, + error2 = hfsplus_submit_bio(sb, sbi->part_start + HFSPLUS_VOLHEAD_SECTOR, - sbi->s_vhdr, WRITE_SYNC); + sbi->s_vhdr_buf, NULL, WRITE_SYNC); if (!error) error = error2; if (!write_backup) goto out; - error2 = hfsplus_submit_bio(sb->s_bdev, + error2 = hfsplus_submit_bio(sb, sbi->part_start + sbi->sect_count - 2, - sbi->s_backup_vhdr, WRITE_SYNC); + sbi->s_backup_vhdr_buf, NULL, WRITE_SYNC); if (!error) error2 = error; out: @@ -257,8 +257,8 @@ static void hfsplus_put_super(struct super_block *sb) hfs_btree_close(sbi->ext_tree); iput(sbi->alloc_file); iput(sbi->hidden_dir); - kfree(sbi->s_vhdr); - kfree(sbi->s_backup_vhdr); + kfree(sbi->s_vhdr_buf); + kfree(sbi->s_backup_vhdr_buf); unload_nls(sbi->nls); kfree(sb->s_fs_info); sb->s_fs_info = NULL; diff --git a/fs/hfsplus/wrapper.c b/fs/hfsplus/wrapper.c index 2f933e83f5c5..10e515a0d452 100644 --- a/fs/hfsplus/wrapper.c +++ b/fs/hfsplus/wrapper.c @@ -31,25 +31,67 @@ static void hfsplus_end_io_sync(struct bio *bio, int err) complete(bio->bi_private); } -int hfsplus_submit_bio(struct block_device *bdev, sector_t sector, - void *data, int rw) +/* + * hfsplus_submit_bio - Perfrom block I/O + * @sb: super block of volume for I/O + * @sector: block to read or write, for blocks of HFSPLUS_SECTOR_SIZE bytes + * @buf: buffer for I/O + * @data: output pointer for location of requested data + * @rw: direction of I/O + * + * The unit of I/O is hfsplus_min_io_size(sb), which may be bigger than + * HFSPLUS_SECTOR_SIZE, and @buf must be sized accordingly. On reads + * @data will return a pointer to the start of the requested sector, + * which may not be the same location as @buf. + * + * If @sector is not aligned to the bdev logical block size it will + * be rounded down. For writes this means that @buf should contain data + * that starts at the rounded-down address. As long as the data was + * read using hfsplus_submit_bio() and the same buffer is used things + * will work correctly. + */ +int hfsplus_submit_bio(struct super_block *sb, sector_t sector, + void *buf, void **data, int rw) { DECLARE_COMPLETION_ONSTACK(wait); struct bio *bio; int ret = 0; + unsigned int io_size; + loff_t start; + int offset; + + /* + * Align sector to hardware sector size and find offset. We + * assume that io_size is a power of two, which _should_ + * be true. + */ + io_size = hfsplus_min_io_size(sb); + start = (loff_t)sector << HFSPLUS_SECTOR_SHIFT; + offset = start & (io_size - 1); + sector &= ~((io_size >> HFSPLUS_SECTOR_SHIFT) - 1); bio = bio_alloc(GFP_NOIO, 1); bio->bi_sector = sector; - bio->bi_bdev = bdev; + bio->bi_bdev = sb->s_bdev; bio->bi_end_io = hfsplus_end_io_sync; bio->bi_private = &wait; - /* - * We always submit one sector at a time, so bio_add_page must not fail. - */ - if (bio_add_page(bio, virt_to_page(data), HFSPLUS_SECTOR_SIZE, - offset_in_page(data)) != HFSPLUS_SECTOR_SIZE) - BUG(); + if (!(rw & WRITE) && data) + *data = (u8 *)buf + offset; + + while (io_size > 0) { + unsigned int page_offset = offset_in_page(buf); + unsigned int len = min_t(unsigned int, PAGE_SIZE - page_offset, + io_size); + + ret = bio_add_page(bio, virt_to_page(buf), len, page_offset); + if (ret != len) { + ret = -EIO; + goto out; + } + io_size -= len; + buf = (u8 *)buf + len; + } submit_bio(rw, bio); wait_for_completion(&wait); @@ -57,8 +99,9 @@ int hfsplus_submit_bio(struct block_device *bdev, sector_t sector, if (!bio_flagged(bio, BIO_UPTODATE)) ret = -EIO; +out: bio_put(bio); - return ret; + return ret < 0 ? ret : 0; } static int hfsplus_read_mdb(void *bufptr, struct hfsplus_wd *wd) @@ -143,17 +186,17 @@ int hfsplus_read_wrapper(struct super_block *sb) goto out; error = -ENOMEM; - sbi->s_vhdr = kmalloc(HFSPLUS_SECTOR_SIZE, GFP_KERNEL); - if (!sbi->s_vhdr) + sbi->s_vhdr_buf = kmalloc(hfsplus_min_io_size(sb), GFP_KERNEL); + if (!sbi->s_vhdr_buf) goto out; - sbi->s_backup_vhdr = kmalloc(HFSPLUS_SECTOR_SIZE, GFP_KERNEL); - if (!sbi->s_backup_vhdr) + sbi->s_backup_vhdr_buf = kmalloc(hfsplus_min_io_size(sb), GFP_KERNEL); + if (!sbi->s_backup_vhdr_buf) goto out_free_vhdr; reread: - error = hfsplus_submit_bio(sb->s_bdev, - part_start + HFSPLUS_VOLHEAD_SECTOR, - sbi->s_vhdr, READ); + error = hfsplus_submit_bio(sb, part_start + HFSPLUS_VOLHEAD_SECTOR, + sbi->s_vhdr_buf, (void **)&sbi->s_vhdr, + READ); if (error) goto out_free_backup_vhdr; @@ -183,9 +226,9 @@ reread: goto reread; } - error = hfsplus_submit_bio(sb->s_bdev, - part_start + part_size - 2, - sbi->s_backup_vhdr, READ); + error = hfsplus_submit_bio(sb, part_start + part_size - 2, + sbi->s_backup_vhdr_buf, + (void **)&sbi->s_backup_vhdr, READ); if (error) goto out_free_backup_vhdr; -- cgit v1.2.3 From 841590ce16c19a3ce38028adfc8b1955482ee00c Mon Sep 17 00:00:00 2001 From: Al Viro Date: Thu, 21 Jul 2011 15:49:09 -0400 Subject: fix IN_DELETE_SELF on overwriting rename() on ramfs et.al. On ramfs and other simple_rename() users IN_DELETE_SELF is not generated for victim of overwriting rename() if it's is a directory. Works on most of the local filesystems and really trivial to fix... Signed-off-by: Al Viro --- fs/libfs.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/libfs.c b/fs/libfs.c index 8f2271a5df53..c18e9a1235b6 100644 --- a/fs/libfs.c +++ b/fs/libfs.c @@ -328,8 +328,10 @@ int simple_rename(struct inode *old_dir, struct dentry *old_dentry, if (new_dentry->d_inode) { simple_unlink(new_dir, new_dentry); - if (they_are_dirs) + if (they_are_dirs) { + drop_nlink(new_dentry->d_inode); drop_nlink(old_dir); + } } else if (they_are_dirs) { drop_nlink(old_dir); inc_nlink(new_dir); -- cgit v1.2.3 From 22ba747f660c0acd14761628c24aa972d18058a0 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Thu, 21 Jul 2011 15:57:47 -0400 Subject: jffs2: fix IN_DELETE_SELF on overwriting rename() killing a directory We don't generate IN_DELETE_SELF on victim of overwriting rename() if it happens to be a directory. Trivially fixed by doing to ->i_nlink what we do ->pino_nlink a couple of lines later in jffs2_rename(). Signed-off-by: Al Viro --- fs/jffs2/dir.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c index 8f40ce4f1777..5f243cd63afc 100644 --- a/fs/jffs2/dir.c +++ b/fs/jffs2/dir.c @@ -820,7 +820,10 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry, if (victim_f) { /* There was a victim. Kill it off nicely */ - drop_nlink(new_dentry->d_inode); + if (S_ISDIR(new_dentry->d_inode->i_mode)) + clear_nlink(new_dentry->d_inode); + else + drop_nlink(new_dentry->d_inode); /* Don't oops if the victim was a dirent pointing to an inode which didn't exist. */ if (victim_f->inocache) { -- cgit v1.2.3 From d769b3c2ab7184ddd42056595b627cc871caa90e Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Thu, 21 Jul 2011 22:22:25 +0200 Subject: isofs: Remove global fs lock sbi->s_mutex isn't needed for isofs at all so we can just remove it. Generally, since isofs is always mounted read-only, filesystem structure cannot change under us. So buffer_head contents stays constant after it's filled in. That leaves us with possible changes of global data structures. Superblock changes only during filesystem mount (even remount does not change it), inodes are only filled in during reading from disk. So there are no changes of these structures to bother about. Arguments why sbi->s_mutex can be removed at each place: isofs_readdir: Accesses sb, inode, filp, local variables => s_mutex not needed isofs_lookup: Protected by directory's i_mutex. Accesses sb, inode, dentry, local variables => s_mutex not needed rock_ridge_symlink_readpage: Protected by page lock. Accesses sb, inode, local variables => s_mutex not needed. Signed-off-by: Jan Kara Signed-off-by: Al Viro --- fs/isofs/dir.c | 3 --- fs/isofs/inode.c | 1 - fs/isofs/isofs.h | 1 - fs/isofs/namei.c | 4 ---- fs/isofs/rock.c | 3 --- 5 files changed, 12 deletions(-) (limited to 'fs') diff --git a/fs/isofs/dir.c b/fs/isofs/dir.c index 0542b6eedf80..f20437c068a0 100644 --- a/fs/isofs/dir.c +++ b/fs/isofs/dir.c @@ -254,19 +254,16 @@ static int isofs_readdir(struct file *filp, char *tmpname; struct iso_directory_record *tmpde; struct inode *inode = filp->f_path.dentry->d_inode; - struct isofs_sb_info *sbi = ISOFS_SB(inode->i_sb); tmpname = (char *)__get_free_page(GFP_KERNEL); if (tmpname == NULL) return -ENOMEM; - mutex_lock(&sbi->s_mutex); tmpde = (struct iso_directory_record *) (tmpname+1024); result = do_isofs_readdir(inode, filp, dirent, filldir, tmpname, tmpde); free_page((unsigned long) tmpname); - mutex_unlock(&sbi->s_mutex); return result; } diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c index b3cc8586984e..a5d03672d04e 100644 --- a/fs/isofs/inode.c +++ b/fs/isofs/inode.c @@ -863,7 +863,6 @@ root_found: sbi->s_utf8 = opt.utf8; sbi->s_nocompress = opt.nocompress; sbi->s_overriderockperm = opt.overriderockperm; - mutex_init(&sbi->s_mutex); /* * It would be incredibly stupid to allow people to mark every file * on the disk as suid, so we merely allow them to set the default diff --git a/fs/isofs/isofs.h b/fs/isofs/isofs.h index 2882dc089f87..7d33de84f52a 100644 --- a/fs/isofs/isofs.h +++ b/fs/isofs/isofs.h @@ -55,7 +55,6 @@ struct isofs_sb_info { gid_t s_gid; uid_t s_uid; struct nls_table *s_nls_iocharset; /* Native language support table */ - struct mutex s_mutex; /* replaces BKL, please remove if possible */ }; #define ISOFS_INVALID_MODE ((mode_t) -1) diff --git a/fs/isofs/namei.c b/fs/isofs/namei.c index 68fa503d877e..1e2946f2a69e 100644 --- a/fs/isofs/namei.c +++ b/fs/isofs/namei.c @@ -168,7 +168,6 @@ struct dentry *isofs_lookup(struct inode *dir, struct dentry *dentry, struct nam int found; unsigned long uninitialized_var(block); unsigned long uninitialized_var(offset); - struct isofs_sb_info *sbi = ISOFS_SB(dir->i_sb); struct inode *inode; struct page *page; @@ -176,7 +175,6 @@ struct dentry *isofs_lookup(struct inode *dir, struct dentry *dentry, struct nam if (!page) return ERR_PTR(-ENOMEM); - mutex_lock(&sbi->s_mutex); found = isofs_find_entry(dir, dentry, &block, &offset, page_address(page), @@ -185,7 +183,5 @@ struct dentry *isofs_lookup(struct inode *dir, struct dentry *dentry, struct nam inode = found ? isofs_iget(dir->i_sb, block, offset) : NULL; - mutex_unlock(&sbi->s_mutex); - return d_splice_alias(inode, dentry); } diff --git a/fs/isofs/rock.c b/fs/isofs/rock.c index f9cd04db6eab..1fbc7de88f50 100644 --- a/fs/isofs/rock.c +++ b/fs/isofs/rock.c @@ -678,7 +678,6 @@ static int rock_ridge_symlink_readpage(struct file *file, struct page *page) init_rock_state(&rs, inode); block = ei->i_iget5_block; - mutex_lock(&sbi->s_mutex); bh = sb_bread(inode->i_sb, block); if (!bh) goto out_noread; @@ -748,7 +747,6 @@ repeat: goto fail; brelse(bh); *rpnt = '\0'; - mutex_unlock(&sbi->s_mutex); SetPageUptodate(page); kunmap(page); unlock_page(page); @@ -765,7 +763,6 @@ out_bad_span: printk("symlink spans iso9660 blocks\n"); fail: brelse(bh); - mutex_unlock(&sbi->s_mutex); error: SetPageError(page); kunmap(page); -- cgit v1.2.3 From 5a9a43646cf709312d71eca71cef90ad802f28f9 Mon Sep 17 00:00:00 2001 From: Konstantin Khlebnikov Date: Sun, 17 Jul 2011 15:35:12 +0400 Subject: vfs: use ERR_CAST for err-ptr tossing in lookup_instantiate_filp Replace unclear (struct dentry *) to (struct file *) typecast with ERR_CAST() macro. Signed-off-by: Konstantin Khlebnikov Signed-off-by: Al Viro --- fs/open.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/open.c b/fs/open.c index b52cf013ffa1..739b751aa73e 100644 --- a/fs/open.c +++ b/fs/open.c @@ -793,7 +793,7 @@ out: return nd->intent.open.file; out_err: release_open_intent(nd); - nd->intent.open.file = (struct file *)dentry; + nd->intent.open.file = ERR_CAST(dentry); goto out; } EXPORT_SYMBOL_GPL(lookup_instantiate_filp); -- cgit v1.2.3 From 2053d67c5451d92a3ddf58d944e1d6b0efc9c419 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Mon, 6 Jun 2011 18:01:47 +0000 Subject: fs/9p: remove rename work around in 9p Now that VFS does the right thing remove the work around. Signed-off-by: Aneesh Kumar K.V Signed-off-by: Eric Van Hensbergen --- fs/9p/vfs_inode.c | 5 ----- 1 file changed, 5 deletions(-) (limited to 'fs') diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c index 7f9976a866e9..a5a95372cd68 100644 --- a/fs/9p/vfs_inode.c +++ b/fs/9p/vfs_inode.c @@ -889,11 +889,6 @@ clunk_newdir: clear_nlink(new_inode); else drop_nlink(new_inode); - /* - * Work around vfs rename rehash bug with - * FS_RENAME_DOES_D_MOVE - */ - v9fs_invalidate_inode_attr(new_inode); } if (S_ISDIR(old_inode->i_mode)) { if (!new_inode) -- cgit v1.2.3 From fd2421f54423f307ecd31bdebdca6bc317e0c492 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Mon, 11 Jul 2011 16:40:59 +0000 Subject: fs/9p: When doing inode lookup compare qid details and inode mode bits. This make sure we don't use wrong inode from the inode hash. The inode number of the file deleted is reused by the next file system object created and if we only use inode number for inode hash lookup we could end up with wrong struct inode. Also compare inode generation number. Not all Linux file system provide st_gen in userspace. So it could be 0; Signed-off-by: Aneesh Kumar K.V Signed-off-by: Eric Van Hensbergen --- fs/9p/cache.c | 20 +++++++++----------- fs/9p/cache.h | 9 --------- fs/9p/v9fs.c | 2 +- fs/9p/v9fs.h | 2 +- fs/9p/vfs_inode.c | 36 +++++++++++++++++++++++++++++++++--- fs/9p/vfs_inode_dotl.c | 41 +++++++++++++++++++++++++++++++++++++---- 6 files changed, 81 insertions(+), 29 deletions(-) (limited to 'fs') diff --git a/fs/9p/cache.c b/fs/9p/cache.c index 5b335c5086a1..945aa5f02f9b 100644 --- a/fs/9p/cache.c +++ b/fs/9p/cache.c @@ -108,11 +108,10 @@ static uint16_t v9fs_cache_inode_get_key(const void *cookie_netfs_data, void *buffer, uint16_t bufmax) { const struct v9fs_inode *v9inode = cookie_netfs_data; - memcpy(buffer, &v9inode->fscache_key->path, - sizeof(v9inode->fscache_key->path)); + memcpy(buffer, &v9inode->qid.path, sizeof(v9inode->qid.path)); P9_DPRINTK(P9_DEBUG_FSC, "inode %p get key %llu", &v9inode->vfs_inode, - v9inode->fscache_key->path); - return sizeof(v9inode->fscache_key->path); + v9inode->qid.path); + return sizeof(v9inode->qid.path); } static void v9fs_cache_inode_get_attr(const void *cookie_netfs_data, @@ -129,11 +128,10 @@ static uint16_t v9fs_cache_inode_get_aux(const void *cookie_netfs_data, void *buffer, uint16_t buflen) { const struct v9fs_inode *v9inode = cookie_netfs_data; - memcpy(buffer, &v9inode->fscache_key->version, - sizeof(v9inode->fscache_key->version)); + memcpy(buffer, &v9inode->qid.version, sizeof(v9inode->qid.version)); P9_DPRINTK(P9_DEBUG_FSC, "inode %p get aux %u", &v9inode->vfs_inode, - v9inode->fscache_key->version); - return sizeof(v9inode->fscache_key->version); + v9inode->qid.version); + return sizeof(v9inode->qid.version); } static enum @@ -143,11 +141,11 @@ fscache_checkaux v9fs_cache_inode_check_aux(void *cookie_netfs_data, { const struct v9fs_inode *v9inode = cookie_netfs_data; - if (buflen != sizeof(v9inode->fscache_key->version)) + if (buflen != sizeof(v9inode->qid.version)) return FSCACHE_CHECKAUX_OBSOLETE; - if (memcmp(buffer, &v9inode->fscache_key->version, - sizeof(v9inode->fscache_key->version))) + if (memcmp(buffer, &v9inode->qid.version, + sizeof(v9inode->qid.version))) return FSCACHE_CHECKAUX_OBSOLETE; return FSCACHE_CHECKAUX_OKAY; diff --git a/fs/9p/cache.h b/fs/9p/cache.h index 049507a5b01c..40cc54ced5d9 100644 --- a/fs/9p/cache.h +++ b/fs/9p/cache.h @@ -93,15 +93,6 @@ static inline void v9fs_uncache_page(struct inode *inode, struct page *page) BUG_ON(PageFsCache(page)); } -static inline void v9fs_fscache_set_key(struct inode *inode, - struct p9_qid *qid) -{ - struct v9fs_inode *v9inode = V9FS_I(inode); - spin_lock(&v9inode->fscache_lock); - v9inode->fscache_key = qid; - spin_unlock(&v9inode->fscache_lock); -} - static inline void v9fs_fscache_wait_on_page_write(struct inode *inode, struct page *page) { diff --git a/fs/9p/v9fs.c b/fs/9p/v9fs.c index c82b017f51f3..8b7c6be2450b 100644 --- a/fs/9p/v9fs.c +++ b/fs/9p/v9fs.c @@ -487,8 +487,8 @@ static void v9fs_inode_init_once(void *foo) struct v9fs_inode *v9inode = (struct v9fs_inode *)foo; #ifdef CONFIG_9P_FSCACHE v9inode->fscache = NULL; - v9inode->fscache_key = NULL; #endif + memset(&v9inode->qid, 0, sizeof(v9inode->qid)); inode_init_once(&v9inode->vfs_inode); } diff --git a/fs/9p/v9fs.h b/fs/9p/v9fs.h index e5ebedfc5ed8..5d7392ead24b 100644 --- a/fs/9p/v9fs.h +++ b/fs/9p/v9fs.h @@ -125,8 +125,8 @@ struct v9fs_inode { #ifdef CONFIG_9P_FSCACHE spinlock_t fscache_lock; struct fscache_cookie *fscache; - struct p9_qid *fscache_key; #endif + struct p9_qid qid; unsigned int cache_validity; struct p9_fid *writeback_fid; struct mutex v_mutex; diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c index a5a95372cd68..3d8a18ee78e6 100644 --- a/fs/9p/vfs_inode.c +++ b/fs/9p/vfs_inode.c @@ -216,7 +216,6 @@ struct inode *v9fs_alloc_inode(struct super_block *sb) return NULL; #ifdef CONFIG_9P_FSCACHE v9inode->fscache = NULL; - v9inode->fscache_key = NULL; spin_lock_init(&v9inode->fscache_lock); #endif v9inode->writeback_fid = NULL; @@ -433,6 +432,37 @@ void v9fs_evict_inode(struct inode *inode) } } +static int v9fs_test_inode(struct inode *inode, void *data) +{ + int umode; + struct v9fs_inode *v9inode = V9FS_I(inode); + struct p9_wstat *st = (struct p9_wstat *)data; + struct v9fs_session_info *v9ses = v9fs_inode2v9ses(inode); + + umode = p9mode2unixmode(v9ses, st->mode); + /* don't match inode of different type */ + if ((inode->i_mode & S_IFMT) != (umode & S_IFMT)) + return 0; + + /* compare qid details */ + if (memcmp(&v9inode->qid.version, + &st->qid.version, sizeof(v9inode->qid.version))) + return 0; + + if (v9inode->qid.type != st->qid.type) + return 0; + return 1; +} + +static int v9fs_set_inode(struct inode *inode, void *data) +{ + struct v9fs_inode *v9inode = V9FS_I(inode); + struct p9_wstat *st = (struct p9_wstat *)data; + + memcpy(&v9inode->qid, &st->qid, sizeof(st->qid)); + return 0; +} + static struct inode *v9fs_qid_iget(struct super_block *sb, struct p9_qid *qid, struct p9_wstat *st) @@ -443,7 +473,7 @@ static struct inode *v9fs_qid_iget(struct super_block *sb, struct v9fs_session_info *v9ses = sb->s_fs_info; i_ino = v9fs_qid2ino(qid); - inode = iget_locked(sb, i_ino); + inode = iget5_locked(sb, i_ino, v9fs_test_inode, v9fs_set_inode, st); if (!inode) return ERR_PTR(-ENOMEM); if (!(inode->i_state & I_NEW)) @@ -453,6 +483,7 @@ static struct inode *v9fs_qid_iget(struct super_block *sb, * FIXME!! we may need support for stale inodes * later. */ + inode->i_ino = i_ino; umode = p9mode2unixmode(v9ses, st->mode); retval = v9fs_init_inode(v9ses, inode, umode); if (retval) @@ -460,7 +491,6 @@ static struct inode *v9fs_qid_iget(struct super_block *sb, v9fs_stat2inode(st, inode, sb); #ifdef CONFIG_9P_FSCACHE - v9fs_fscache_set_key(inode, &st->qid); v9fs_cache_inode_get_cookie(inode); #endif unlock_new_inode(inode); diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c index 32bbbe5aa689..dc9e7de38202 100644 --- a/fs/9p/vfs_inode_dotl.c +++ b/fs/9p/vfs_inode_dotl.c @@ -86,6 +86,38 @@ static struct dentry *v9fs_dentry_from_dir_inode(struct inode *inode) return dentry; } +static int v9fs_test_inode_dotl(struct inode *inode, void *data) +{ + struct v9fs_inode *v9inode = V9FS_I(inode); + struct p9_stat_dotl *st = (struct p9_stat_dotl *)data; + + /* don't match inode of different type */ + if ((inode->i_mode & S_IFMT) != (st->st_mode & S_IFMT)) + return 0; + + if (inode->i_generation != st->st_gen) + return 0; + + /* compare qid details */ + if (memcmp(&v9inode->qid.version, + &st->qid.version, sizeof(v9inode->qid.version))) + return 0; + + if (v9inode->qid.type != st->qid.type) + return 0; + return 1; +} + +static int v9fs_set_inode_dotl(struct inode *inode, void *data) +{ + struct v9fs_inode *v9inode = V9FS_I(inode); + struct p9_stat_dotl *st = (struct p9_stat_dotl *)data; + + memcpy(&v9inode->qid, &st->qid, sizeof(st->qid)); + inode->i_generation = st->st_gen; + return 0; +} + static struct inode *v9fs_qid_iget_dotl(struct super_block *sb, struct p9_qid *qid, struct p9_fid *fid, @@ -97,7 +129,8 @@ static struct inode *v9fs_qid_iget_dotl(struct super_block *sb, struct v9fs_session_info *v9ses = sb->s_fs_info; i_ino = v9fs_qid2ino(qid); - inode = iget_locked(sb, i_ino); + inode = iget5_locked(sb, i_ino, v9fs_test_inode_dotl, + v9fs_set_inode_dotl, st); if (!inode) return ERR_PTR(-ENOMEM); if (!(inode->i_state & I_NEW)) @@ -107,13 +140,13 @@ static struct inode *v9fs_qid_iget_dotl(struct super_block *sb, * FIXME!! we may need support for stale inodes * later. */ + inode->i_ino = i_ino; retval = v9fs_init_inode(v9ses, inode, st->st_mode); if (retval) goto error; v9fs_stat2inode_dotl(st, inode); #ifdef CONFIG_9P_FSCACHE - v9fs_fscache_set_key(inode, &st->qid); v9fs_cache_inode_get_cookie(inode); #endif retval = v9fs_get_acl(inode, fid); @@ -136,7 +169,7 @@ v9fs_inode_from_fid_dotl(struct v9fs_session_info *v9ses, struct p9_fid *fid, struct p9_stat_dotl *st; struct inode *inode = NULL; - st = p9_client_getattr_dotl(fid, P9_STATS_BASIC); + st = p9_client_getattr_dotl(fid, P9_STATS_BASIC | P9_STATS_GEN); if (IS_ERR(st)) return ERR_CAST(st); @@ -547,7 +580,7 @@ v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode) inode->i_blocks = stat->st_blocks; } if (stat->st_result_mask & P9_STATS_GEN) - inode->i_generation = stat->st_gen; + inode->i_generation = stat->st_gen; /* Currently we don't support P9_STATS_BTIME and P9_STATS_DATA_VERSION * because the inode structure does not have fields for them. -- cgit v1.2.3 From a2dd43bb0d7b9ce28f8a39254c25840c0730498e Mon Sep 17 00:00:00 2001 From: Prem Karat Date: Fri, 6 May 2011 18:24:18 +0530 Subject: fs/9p: Fix invalid mount options/args Without this fix, if any invalid mount options/args are passed while mouting the 9p fs, no error (-EINVAL) is returned and default arg value is assigned. This fix returns -EINVAL when an invalid arguement is found while parsing mount options. Signed-off-by: Prem Karat Signed-off-by: Aneesh Kumar K.V Signed-off-by: Eric Van Hensbergen --- fs/9p/v9fs.c | 43 ++++++++++++++++++++++++++++++++++--------- 1 file changed, 34 insertions(+), 9 deletions(-) (limited to 'fs') diff --git a/fs/9p/v9fs.c b/fs/9p/v9fs.c index 8b7c6be2450b..ef9661886112 100644 --- a/fs/9p/v9fs.c +++ b/fs/9p/v9fs.c @@ -78,6 +78,25 @@ static const match_table_t tokens = { {Opt_err, NULL} }; +/* Interpret mount options for cache mode */ +static int get_cache_mode(char *s) +{ + int version = -EINVAL; + + if (!strcmp(s, "loose")) { + version = CACHE_LOOSE; + P9_DPRINTK(P9_DEBUG_9P, "Cache mode: loose\n"); + } else if (!strcmp(s, "fscache")) { + version = CACHE_FSCACHE; + P9_DPRINTK(P9_DEBUG_9P, "Cache mode: fscache\n"); + } else if (!strcmp(s, "none")) { + version = CACHE_NONE; + P9_DPRINTK(P9_DEBUG_9P, "Cache mode: none\n"); + } else + printk(KERN_INFO "9p: Unknown Cache mode %s.\n", s); + return version; +} + /** * v9fs_parse_options - parse mount options into session structure * @v9ses: existing v9fs session information @@ -97,7 +116,7 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts) /* setup defaults */ v9ses->afid = ~0; v9ses->debug = 0; - v9ses->cache = 0; + v9ses->cache = CACHE_NONE; #ifdef CONFIG_9P_FSCACHE v9ses->cachetag = NULL; #endif @@ -171,13 +190,13 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts) "problem allocating copy of cache arg\n"); goto free_and_return; } + ret = get_cache_mode(s); + if (ret == -EINVAL) { + kfree(s); + goto free_and_return; + } - if (strcmp(s, "loose") == 0) - v9ses->cache = CACHE_LOOSE; - else if (strcmp(s, "fscache") == 0) - v9ses->cache = CACHE_FSCACHE; - else - v9ses->cache = CACHE_NONE; + v9ses->cache = ret; kfree(s); break; @@ -200,9 +219,15 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts) } else { v9ses->flags |= V9FS_ACCESS_SINGLE; v9ses->uid = simple_strtoul(s, &e, 10); - if (*e != '\0') - v9ses->uid = ~0; + if (*e != '\0') { + ret = -EINVAL; + printk(KERN_INFO "9p: Unknown access " + "argument %s.\n", s); + kfree(s); + goto free_and_return; + } } + kfree(s); break; -- cgit v1.2.3 From ed80fcfac2565fa866d93ba14f0e75de17a8223e Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Wed, 6 Jul 2011 16:32:31 +0530 Subject: fs/9p: Always ask new inode in create This make sure we don't end up reusing the unlinked inode object. The ideal way is to use inode i_generation. But i_generation is not available in userspace always. Signed-off-by: Aneesh Kumar K.V Signed-off-by: Eric Van Hensbergen --- fs/9p/v9fs.h | 27 +++++++++++++++++++++++---- fs/9p/vfs_inode.c | 22 +++++++++++++++++----- fs/9p/vfs_inode_dotl.c | 30 +++++++++++++++++++++--------- 3 files changed, 61 insertions(+), 18 deletions(-) (limited to 'fs') diff --git a/fs/9p/v9fs.h b/fs/9p/v9fs.h index 5d7392ead24b..e78956cbd702 100644 --- a/fs/9p/v9fs.h +++ b/fs/9p/v9fs.h @@ -153,13 +153,13 @@ extern void v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p); extern struct inode *v9fs_inode_from_fid(struct v9fs_session_info *v9ses, struct p9_fid *fid, - struct super_block *sb); + struct super_block *sb, int new); extern const struct inode_operations v9fs_dir_inode_operations_dotl; extern const struct inode_operations v9fs_file_inode_operations_dotl; extern const struct inode_operations v9fs_symlink_inode_operations_dotl; extern struct inode *v9fs_inode_from_fid_dotl(struct v9fs_session_info *v9ses, struct p9_fid *fid, - struct super_block *sb); + struct super_block *sb, int new); /* other default globals */ #define V9FS_PORT 564 @@ -201,8 +201,27 @@ v9fs_get_inode_from_fid(struct v9fs_session_info *v9ses, struct p9_fid *fid, struct super_block *sb) { if (v9fs_proto_dotl(v9ses)) - return v9fs_inode_from_fid_dotl(v9ses, fid, sb); + return v9fs_inode_from_fid_dotl(v9ses, fid, sb, 0); else - return v9fs_inode_from_fid(v9ses, fid, sb); + return v9fs_inode_from_fid(v9ses, fid, sb, 0); } + +/** + * v9fs_get_new_inode_from_fid - Helper routine to populate an inode by + * issuing a attribute request + * @v9ses: session information + * @fid: fid to issue attribute request for + * @sb: superblock on which to create inode + * + */ +static inline struct inode * +v9fs_get_new_inode_from_fid(struct v9fs_session_info *v9ses, struct p9_fid *fid, + struct super_block *sb) +{ + if (v9fs_proto_dotl(v9ses)) + return v9fs_inode_from_fid_dotl(v9ses, fid, sb, 1); + else + return v9fs_inode_from_fid(v9ses, fid, sb, 1); +} + #endif diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c index 3d8a18ee78e6..3bbf705634b2 100644 --- a/fs/9p/vfs_inode.c +++ b/fs/9p/vfs_inode.c @@ -454,6 +454,11 @@ static int v9fs_test_inode(struct inode *inode, void *data) return 1; } +static int v9fs_test_new_inode(struct inode *inode, void *data) +{ + return 0; +} + static int v9fs_set_inode(struct inode *inode, void *data) { struct v9fs_inode *v9inode = V9FS_I(inode); @@ -465,15 +470,22 @@ static int v9fs_set_inode(struct inode *inode, void *data) static struct inode *v9fs_qid_iget(struct super_block *sb, struct p9_qid *qid, - struct p9_wstat *st) + struct p9_wstat *st, + int new) { int retval, umode; unsigned long i_ino; struct inode *inode; struct v9fs_session_info *v9ses = sb->s_fs_info; + int (*test)(struct inode *, void *); + + if (new) + test = v9fs_test_new_inode; + else + test = v9fs_test_inode; i_ino = v9fs_qid2ino(qid); - inode = iget5_locked(sb, i_ino, v9fs_test_inode, v9fs_set_inode, st); + inode = iget5_locked(sb, i_ino, test, v9fs_set_inode, st); if (!inode) return ERR_PTR(-ENOMEM); if (!(inode->i_state & I_NEW)) @@ -504,7 +516,7 @@ error: struct inode * v9fs_inode_from_fid(struct v9fs_session_info *v9ses, struct p9_fid *fid, - struct super_block *sb) + struct super_block *sb, int new) { struct p9_wstat *st; struct inode *inode = NULL; @@ -513,7 +525,7 @@ v9fs_inode_from_fid(struct v9fs_session_info *v9ses, struct p9_fid *fid, if (IS_ERR(st)) return ERR_CAST(st); - inode = v9fs_qid_iget(sb, &st->qid, st); + inode = v9fs_qid_iget(sb, &st->qid, st, new); p9stat_free(st); kfree(st); return inode; @@ -615,7 +627,7 @@ v9fs_create(struct v9fs_session_info *v9ses, struct inode *dir, } /* instantiate inode and assign the unopened fid to the dentry */ - inode = v9fs_get_inode_from_fid(v9ses, fid, dir->i_sb); + inode = v9fs_get_new_inode_from_fid(v9ses, fid, dir->i_sb); if (IS_ERR(inode)) { err = PTR_ERR(inode); P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n", err); diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c index dc9e7de38202..276f4a69ecd4 100644 --- a/fs/9p/vfs_inode_dotl.c +++ b/fs/9p/vfs_inode_dotl.c @@ -108,6 +108,12 @@ static int v9fs_test_inode_dotl(struct inode *inode, void *data) return 1; } +/* Always get a new inode */ +static int v9fs_test_new_inode_dotl(struct inode *inode, void *data) +{ + return 0; +} + static int v9fs_set_inode_dotl(struct inode *inode, void *data) { struct v9fs_inode *v9inode = V9FS_I(inode); @@ -121,16 +127,22 @@ static int v9fs_set_inode_dotl(struct inode *inode, void *data) static struct inode *v9fs_qid_iget_dotl(struct super_block *sb, struct p9_qid *qid, struct p9_fid *fid, - struct p9_stat_dotl *st) + struct p9_stat_dotl *st, + int new) { int retval; unsigned long i_ino; struct inode *inode; struct v9fs_session_info *v9ses = sb->s_fs_info; + int (*test)(struct inode *, void *); + + if (new) + test = v9fs_test_new_inode_dotl; + else + test = v9fs_test_inode_dotl; i_ino = v9fs_qid2ino(qid); - inode = iget5_locked(sb, i_ino, v9fs_test_inode_dotl, - v9fs_set_inode_dotl, st); + inode = iget5_locked(sb, i_ino, test, v9fs_set_inode_dotl, st); if (!inode) return ERR_PTR(-ENOMEM); if (!(inode->i_state & I_NEW)) @@ -164,7 +176,7 @@ error: struct inode * v9fs_inode_from_fid_dotl(struct v9fs_session_info *v9ses, struct p9_fid *fid, - struct super_block *sb) + struct super_block *sb, int new) { struct p9_stat_dotl *st; struct inode *inode = NULL; @@ -173,7 +185,7 @@ v9fs_inode_from_fid_dotl(struct v9fs_session_info *v9ses, struct p9_fid *fid, if (IS_ERR(st)) return ERR_CAST(st); - inode = v9fs_qid_iget_dotl(sb, &st->qid, fid, st); + inode = v9fs_qid_iget_dotl(sb, &st->qid, fid, st, new); kfree(st); return inode; } @@ -263,7 +275,7 @@ v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, int omode, fid = NULL; goto error; } - inode = v9fs_get_inode_from_fid(v9ses, fid, dir->i_sb); + inode = v9fs_get_new_inode_from_fid(v9ses, fid, dir->i_sb); if (IS_ERR(inode)) { err = PTR_ERR(inode); P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n", err); @@ -383,7 +395,7 @@ static int v9fs_vfs_mkdir_dotl(struct inode *dir, goto error; } - inode = v9fs_get_inode_from_fid(v9ses, fid, dir->i_sb); + inode = v9fs_get_new_inode_from_fid(v9ses, fid, dir->i_sb); if (IS_ERR(inode)) { err = PTR_ERR(inode); P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n", @@ -636,7 +648,7 @@ v9fs_vfs_symlink_dotl(struct inode *dir, struct dentry *dentry, } /* instantiate inode and assign the unopened fid to dentry */ - inode = v9fs_get_inode_from_fid(v9ses, fid, dir->i_sb); + inode = v9fs_get_new_inode_from_fid(v9ses, fid, dir->i_sb); if (IS_ERR(inode)) { err = PTR_ERR(inode); P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n", @@ -789,7 +801,7 @@ v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, int omode, goto error; } - inode = v9fs_get_inode_from_fid(v9ses, fid, dir->i_sb); + inode = v9fs_get_new_inode_from_fid(v9ses, fid, dir->i_sb); if (IS_ERR(inode)) { err = PTR_ERR(inode); P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n", -- cgit v1.2.3 From 9e8fb38e7d7a00e5f63bbb331f0ea4c02286d5e6 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Tue, 28 Jun 2011 15:41:16 +0530 Subject: fs/9p: add 9P2000.L renameat operation renameat - change name of file or directory size[4] Trenameat tag[2] olddirfid[4] oldname[s] newdirfid[4] newname[s] size[4] Rrenameat tag[2] older Trename have the below request format size[4] Trename tag[2] fid[4] newdirfid[4] name[s] The rename message is used to change the name of a file, possibly moving it to a new directory. The rename opreation is actually a directory opertation and should ideally have olddirfid, if not we cannot represent the fid on server with anything other than name. We will have to derive the old directory name from fid in the Trename request. Signed-off-by: Aneesh Kumar K.V Signed-off-by: Eric Van Hensbergen --- fs/9p/vfs_inode.c | 9 ++++++--- include/net/9p/9p.h | 2 ++ include/net/9p/client.h | 5 ++++- net/9p/client.c | 33 ++++++++++++++++++++++++++++++++- 4 files changed, 44 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c index 3bbf705634b2..bce66f56c62c 100644 --- a/fs/9p/vfs_inode.c +++ b/fs/9p/vfs_inode.c @@ -904,9 +904,12 @@ v9fs_vfs_rename(struct inode *old_dir, struct dentry *old_dentry, down_write(&v9ses->rename_sem); if (v9fs_proto_dotl(v9ses)) { - retval = p9_client_rename(oldfid, newdirfid, - (char *) new_dentry->d_name.name); - if (retval != -ENOSYS) + retval = p9_client_renameat(olddirfid, old_dentry->d_name.name, + newdirfid, new_dentry->d_name.name); + if (retval == -EOPNOTSUPP) + retval = p9_client_rename(oldfid, newdirfid, + new_dentry->d_name.name); + if (retval != -EOPNOTSUPP) goto clunk_newdir; } if (old_dentry->d_parent != new_dentry->d_parent) { diff --git a/include/net/9p/9p.h b/include/net/9p/9p.h index 160193e6dddd..61156207c98c 100644 --- a/include/net/9p/9p.h +++ b/include/net/9p/9p.h @@ -181,6 +181,8 @@ enum p9_msg_t { P9_RLINK, P9_TMKDIR = 72, P9_RMKDIR, + P9_TRENAMEAT = 74, + P9_RRENAMEAT, P9_TVERSION = 100, P9_RVERSION, P9_TAUTH = 102, diff --git a/include/net/9p/client.h b/include/net/9p/client.h index f7a8d036f803..62ceddf9994a 100644 --- a/include/net/9p/client.h +++ b/include/net/9p/client.h @@ -211,7 +211,10 @@ struct p9_dirent { }; int p9_client_statfs(struct p9_fid *fid, struct p9_rstatfs *sb); -int p9_client_rename(struct p9_fid *fid, struct p9_fid *newdirfid, char *name); +int p9_client_rename(struct p9_fid *fid, struct p9_fid *newdirfid, + const char *name); +int p9_client_renameat(struct p9_fid *olddirfid, const char *old_name, + struct p9_fid *newdirfid, const char *new_name); struct p9_client *p9_client_create(const char *dev_name, char *options); void p9_client_destroy(struct p9_client *clnt); void p9_client_disconnect(struct p9_client *clnt); diff --git a/net/9p/client.c b/net/9p/client.c index 431eaef697c7..c4b77f383582 100644 --- a/net/9p/client.c +++ b/net/9p/client.c @@ -1666,7 +1666,8 @@ error: } EXPORT_SYMBOL(p9_client_statfs); -int p9_client_rename(struct p9_fid *fid, struct p9_fid *newdirfid, char *name) +int p9_client_rename(struct p9_fid *fid, + struct p9_fid *newdirfid, const char *name) { int err; struct p9_req_t *req; @@ -1693,6 +1694,36 @@ error: } EXPORT_SYMBOL(p9_client_rename); +int p9_client_renameat(struct p9_fid *olddirfid, const char *old_name, + struct p9_fid *newdirfid, const char *new_name) +{ + int err; + struct p9_req_t *req; + struct p9_client *clnt; + + err = 0; + clnt = olddirfid->clnt; + + P9_DPRINTK(P9_DEBUG_9P, ">>> TRENAMEAT olddirfid %d old name %s" + " newdirfid %d new name %s\n", olddirfid->fid, old_name, + newdirfid->fid, new_name); + + req = p9_client_rpc(clnt, P9_TRENAMEAT, "dsds", olddirfid->fid, + old_name, newdirfid->fid, new_name); + if (IS_ERR(req)) { + err = PTR_ERR(req); + goto error; + } + + P9_DPRINTK(P9_DEBUG_9P, "<<< RRENAMEAT newdirfid %d new name %s\n", + newdirfid->fid, new_name); + + p9_free_req(clnt, req); +error: + return err; +} +EXPORT_SYMBOL(p9_client_renameat); + /* * An xattrwalk without @attr_name gives the fid for the lisxattr namespace */ -- cgit v1.2.3 From 48e370ff93769ee6e592ddef3bb38686b8147c67 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Tue, 28 Jun 2011 15:41:18 +0530 Subject: fs/9p: add 9P2000.L unlinkat operation unlinkat - Remove a directory entry size[4] Tunlinkat tag[2] dirfid[4] name[s] flag[4] size[4] Runlinkat tag[2] older Tremove have the below request format size[4] Tremove tag[2] fid[4] The remove message is used to remove a directory entry either file or directory The remove opreation is actually a directory opertation and should ideally have dirfid, if not we cannot represent the fid on server with anything other than name. We will have to derive the directory name from fid in the Tremove request. NOTE: The operation doesn't clunk the unlink fid. Signed-off-by: Aneesh Kumar K.V Signed-off-by: Eric Van Hensbergen --- fs/9p/vfs_inode.c | 48 ++++++++++++++++++++++++++++++------------------ include/net/9p/9p.h | 2 ++ include/net/9p/client.h | 1 + net/9p/client.c | 23 +++++++++++++++++++++++ 4 files changed, 56 insertions(+), 18 deletions(-) (limited to 'fs') diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c index bce66f56c62c..8bb5507e822f 100644 --- a/fs/9p/vfs_inode.c +++ b/fs/9p/vfs_inode.c @@ -534,38 +534,50 @@ v9fs_inode_from_fid(struct v9fs_session_info *v9ses, struct p9_fid *fid, /** * v9fs_remove - helper function to remove files and directories * @dir: directory inode that is being deleted - * @file: dentry that is being deleted + * @dentry: dentry that is being deleted * @rmdir: removing a directory * */ -static int v9fs_remove(struct inode *dir, struct dentry *file, int rmdir) +static int v9fs_remove(struct inode *dir, struct dentry *dentry, int flags) { - int retval; - struct p9_fid *v9fid; - struct inode *file_inode; - - P9_DPRINTK(P9_DEBUG_VFS, "inode: %p dentry: %p rmdir: %d\n", dir, file, - rmdir); + struct inode *inode; + int retval = -EOPNOTSUPP; + struct p9_fid *v9fid, *dfid; + struct v9fs_session_info *v9ses; - file_inode = file->d_inode; - v9fid = v9fs_fid_clone(file); - if (IS_ERR(v9fid)) - return PTR_ERR(v9fid); + P9_DPRINTK(P9_DEBUG_VFS, "inode: %p dentry: %p rmdir: %x\n", + dir, dentry, flags); - retval = p9_client_remove(v9fid); + v9ses = v9fs_inode2v9ses(dir); + inode = dentry->d_inode; + dfid = v9fs_fid_lookup(dentry->d_parent); + if (IS_ERR(dfid)) { + retval = PTR_ERR(dfid); + P9_DPRINTK(P9_DEBUG_VFS, "fid lookup failed %d\n", retval); + return retval; + } + if (v9fs_proto_dotl(v9ses)) + retval = p9_client_unlinkat(dfid, dentry->d_name.name, flags); + if (retval == -EOPNOTSUPP) { + /* Try the one based on path */ + v9fid = v9fs_fid_clone(dentry); + if (IS_ERR(v9fid)) + return PTR_ERR(v9fid); + retval = p9_client_remove(v9fid); + } if (!retval) { /* * directories on unlink should have zero * link count */ - if (rmdir) { - clear_nlink(file_inode); + if (flags & AT_REMOVEDIR) { + clear_nlink(inode); drop_nlink(dir); } else - drop_nlink(file_inode); + drop_nlink(inode); - v9fs_invalidate_inode_attr(file_inode); + v9fs_invalidate_inode_attr(inode); v9fs_invalidate_inode_attr(dir); } return retval; @@ -856,7 +868,7 @@ int v9fs_vfs_unlink(struct inode *i, struct dentry *d) int v9fs_vfs_rmdir(struct inode *i, struct dentry *d) { - return v9fs_remove(i, d, 1); + return v9fs_remove(i, d, AT_REMOVEDIR); } /** diff --git a/include/net/9p/9p.h b/include/net/9p/9p.h index 61156207c98c..342dcf13d039 100644 --- a/include/net/9p/9p.h +++ b/include/net/9p/9p.h @@ -183,6 +183,8 @@ enum p9_msg_t { P9_RMKDIR, P9_TRENAMEAT = 74, P9_RRENAMEAT, + P9_TUNLINKAT = 76, + P9_RUNLINKAT, P9_TVERSION = 100, P9_RVERSION, P9_TAUTH = 102, diff --git a/include/net/9p/client.h b/include/net/9p/client.h index 62ceddf9994a..55ce72ce9861 100644 --- a/include/net/9p/client.h +++ b/include/net/9p/client.h @@ -234,6 +234,7 @@ int p9_client_create_dotl(struct p9_fid *ofid, char *name, u32 flags, u32 mode, int p9_client_clunk(struct p9_fid *fid); int p9_client_fsync(struct p9_fid *fid, int datasync); int p9_client_remove(struct p9_fid *fid); +int p9_client_unlinkat(struct p9_fid *dfid, const char *name, int flags); int p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset, u32 count); int p9_client_write(struct p9_fid *fid, char *data, const char __user *udata, diff --git a/net/9p/client.c b/net/9p/client.c index c4b77f383582..a953baa3624e 100644 --- a/net/9p/client.c +++ b/net/9p/client.c @@ -1303,6 +1303,29 @@ error: } EXPORT_SYMBOL(p9_client_remove); +int p9_client_unlinkat(struct p9_fid *dfid, const char *name, int flags) +{ + int err = 0; + struct p9_req_t *req; + struct p9_client *clnt; + + P9_DPRINTK(P9_DEBUG_9P, ">>> TUNLINKAT fid %d %s %d\n", + dfid->fid, name, flags); + + clnt = dfid->clnt; + req = p9_client_rpc(clnt, P9_TUNLINKAT, "dsd", dfid->fid, name, flags); + if (IS_ERR(req)) { + err = PTR_ERR(req); + goto error; + } + P9_DPRINTK(P9_DEBUG_9P, "<<< RUNLINKAT fid %d %s\n", dfid->fid, name); + + p9_free_req(clnt, req); +error: + return err; +} +EXPORT_SYMBOL(p9_client_unlinkat); + int p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset, u32 count) -- cgit v1.2.3 From d2eece376648d2f7ba0a7d78f3c4d0421e608ac2 Mon Sep 17 00:00:00 2001 From: Sunil Mushran Date: Sun, 24 Jul 2011 10:21:54 -0700 Subject: ocfs2/cluster: Abort heartbeat start on hard-ro devices Currently if the heartbeat device is hard-ro, the o2hb thread keeps chugging along and dumping errors along the way. The user needs to manually stop the heartbeat. The patch addresses this shortcoming by adding a limit to the number of times the hb thread will iterate in an unsteady state. If the hb thread does not ready steady state in that many interation, the start is aborted. Signed-off-by: Sunil Mushran --- fs/ocfs2/cluster/heartbeat.c | 185 +++++++++++++++++++++++++++---------------- 1 file changed, 116 insertions(+), 69 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c index 9a3e6bbff27b..4728a746281d 100644 --- a/fs/ocfs2/cluster/heartbeat.c +++ b/fs/ocfs2/cluster/heartbeat.c @@ -216,6 +216,7 @@ struct o2hb_region { struct list_head hr_all_item; unsigned hr_unclean_stop:1, + hr_aborted_start:1, hr_item_pinned:1, hr_item_dropped:1; @@ -254,6 +255,10 @@ struct o2hb_region { * a more complete api that doesn't lead to this sort of fragility. */ atomic_t hr_steady_iterations; + /* terminate o2hb thread if it does not reach steady state + * (hr_steady_iterations == 0) within hr_unsteady_iterations */ + atomic_t hr_unsteady_iterations; + char hr_dev_name[BDEVNAME_SIZE]; unsigned int hr_timeout_ms; @@ -324,6 +329,10 @@ static void o2hb_write_timeout(struct work_struct *work) static void o2hb_arm_write_timeout(struct o2hb_region *reg) { + /* Arm writeout only after thread reaches steady state */ + if (atomic_read(®->hr_steady_iterations) != 0) + return; + mlog(ML_HEARTBEAT, "Queue write timeout for %u ms\n", O2HB_MAX_WRITE_TIMEOUT_MS); @@ -537,9 +546,14 @@ static int o2hb_verify_crc(struct o2hb_region *reg, return read == computed; } -/* We want to make sure that nobody is heartbeating on top of us -- - * this will help detect an invalid configuration. */ -static void o2hb_check_last_timestamp(struct o2hb_region *reg) +/* + * Compare the slot data with what we wrote in the last iteration. + * If the match fails, print an appropriate error message. This is to + * detect errors like... another node hearting on the same slot, + * flaky device that is losing writes, etc. + * Returns 1 if check succeeds, 0 otherwise. + */ +static int o2hb_check_own_slot(struct o2hb_region *reg) { struct o2hb_disk_slot *slot; struct o2hb_disk_heartbeat_block *hb_block; @@ -548,13 +562,13 @@ static void o2hb_check_last_timestamp(struct o2hb_region *reg) slot = ®->hr_slots[o2nm_this_node()]; /* Don't check on our 1st timestamp */ if (!slot->ds_last_time) - return; + return 0; hb_block = slot->ds_raw_block; if (le64_to_cpu(hb_block->hb_seq) == slot->ds_last_time && le64_to_cpu(hb_block->hb_generation) == slot->ds_last_generation && hb_block->hb_node == slot->ds_node_num) - return; + return 1; #define ERRSTR1 "Another node is heartbeating on device" #define ERRSTR2 "Heartbeat generation mismatch on device" @@ -574,6 +588,8 @@ static void o2hb_check_last_timestamp(struct o2hb_region *reg) (unsigned long long)slot->ds_last_time, hb_block->hb_node, (unsigned long long)le64_to_cpu(hb_block->hb_generation), (unsigned long long)le64_to_cpu(hb_block->hb_seq)); + + return 0; } static inline void o2hb_prepare_block(struct o2hb_region *reg, @@ -719,17 +735,24 @@ static void o2hb_shutdown_slot(struct o2hb_disk_slot *slot) o2nm_node_put(node); } -static void o2hb_set_quorum_device(struct o2hb_region *reg, - struct o2hb_disk_slot *slot) +static void o2hb_set_quorum_device(struct o2hb_region *reg) { - assert_spin_locked(&o2hb_live_lock); - if (!o2hb_global_heartbeat_active()) return; - if (test_bit(reg->hr_region_num, o2hb_quorum_region_bitmap)) + /* Prevent race with o2hb_heartbeat_group_drop_item() */ + if (kthread_should_stop()) + return; + + /* Tag region as quorum only after thread reaches steady state */ + if (atomic_read(®->hr_steady_iterations) != 0) return; + spin_lock(&o2hb_live_lock); + + if (test_bit(reg->hr_region_num, o2hb_quorum_region_bitmap)) + goto unlock; + /* * A region can be added to the quorum only when it sees all * live nodes heartbeat on it. In other words, the region has been @@ -737,13 +760,10 @@ static void o2hb_set_quorum_device(struct o2hb_region *reg, */ if (memcmp(reg->hr_live_node_bitmap, o2hb_live_node_bitmap, sizeof(o2hb_live_node_bitmap))) - return; - - if (slot->ds_changed_samples < O2HB_LIVE_THRESHOLD) - return; + goto unlock; - printk(KERN_NOTICE "o2hb: Region %s is now a quorum device\n", - config_item_name(®->hr_item)); + printk(KERN_NOTICE "o2hb: Region %s (%s) is now a quorum device\n", + config_item_name(®->hr_item), reg->hr_dev_name); set_bit(reg->hr_region_num, o2hb_quorum_region_bitmap); @@ -754,6 +774,8 @@ static void o2hb_set_quorum_device(struct o2hb_region *reg, if (o2hb_pop_count(&o2hb_quorum_region_bitmap, O2NM_MAX_REGIONS) > O2HB_PIN_CUT_OFF) o2hb_region_unpin(NULL); +unlock: + spin_unlock(&o2hb_live_lock); } static int o2hb_check_slot(struct o2hb_region *reg, @@ -925,8 +947,6 @@ fire_callbacks: slot->ds_equal_samples = 0; } out: - o2hb_set_quorum_device(reg, slot); - spin_unlock(&o2hb_live_lock); o2hb_run_event_list(&event); @@ -957,7 +977,8 @@ static int o2hb_highest_node(unsigned long *nodes, static int o2hb_do_disk_heartbeat(struct o2hb_region *reg) { - int i, ret, highest_node, change = 0; + int i, ret, highest_node; + int membership_change = 0, own_slot_ok = 0; unsigned long configured_nodes[BITS_TO_LONGS(O2NM_MAX_NODES)]; unsigned long live_node_bitmap[BITS_TO_LONGS(O2NM_MAX_NODES)]; struct o2hb_bio_wait_ctxt write_wc; @@ -966,7 +987,7 @@ static int o2hb_do_disk_heartbeat(struct o2hb_region *reg) sizeof(configured_nodes)); if (ret) { mlog_errno(ret); - return ret; + goto bail; } /* @@ -982,8 +1003,9 @@ static int o2hb_do_disk_heartbeat(struct o2hb_region *reg) highest_node = o2hb_highest_node(configured_nodes, O2NM_MAX_NODES); if (highest_node >= O2NM_MAX_NODES) { - mlog(ML_NOTICE, "ocfs2_heartbeat: no configured nodes found!\n"); - return -EINVAL; + mlog(ML_NOTICE, "o2hb: No configured nodes found!\n"); + ret = -EINVAL; + goto bail; } /* No sense in reading the slots of nodes that don't exist @@ -993,29 +1015,27 @@ static int o2hb_do_disk_heartbeat(struct o2hb_region *reg) ret = o2hb_read_slots(reg, highest_node + 1); if (ret < 0) { mlog_errno(ret); - return ret; + goto bail; } /* With an up to date view of the slots, we can check that no * other node has been improperly configured to heartbeat in * our slot. */ - o2hb_check_last_timestamp(reg); + own_slot_ok = o2hb_check_own_slot(reg); /* fill in the proper info for our next heartbeat */ o2hb_prepare_block(reg, reg->hr_generation); - /* And fire off the write. Note that we don't wait on this I/O - * until later. */ ret = o2hb_issue_node_write(reg, &write_wc); if (ret < 0) { mlog_errno(ret); - return ret; + goto bail; } i = -1; while((i = find_next_bit(configured_nodes, O2NM_MAX_NODES, i + 1)) < O2NM_MAX_NODES) { - change |= o2hb_check_slot(reg, ®->hr_slots[i]); + membership_change |= o2hb_check_slot(reg, ®->hr_slots[i]); } /* @@ -1030,18 +1050,39 @@ static int o2hb_do_disk_heartbeat(struct o2hb_region *reg) * disk */ mlog(ML_ERROR, "Write error %d on device \"%s\"\n", write_wc.wc_error, reg->hr_dev_name); - return write_wc.wc_error; + ret = write_wc.wc_error; + goto bail; } - o2hb_arm_write_timeout(reg); + /* Skip disarming the timeout if own slot has stale/bad data */ + if (own_slot_ok) { + o2hb_set_quorum_device(reg); + o2hb_arm_write_timeout(reg); + } +bail: /* let the person who launched us know when things are steady */ - if (!change && (atomic_read(®->hr_steady_iterations) != 0)) { - if (atomic_dec_and_test(®->hr_steady_iterations)) + if (atomic_read(®->hr_steady_iterations) != 0) { + if (!ret && own_slot_ok && !membership_change) { + if (atomic_dec_and_test(®->hr_steady_iterations)) + wake_up(&o2hb_steady_queue); + } + } + + if (atomic_read(®->hr_steady_iterations) != 0) { + if (atomic_dec_and_test(®->hr_unsteady_iterations)) { + printk(KERN_NOTICE "o2hb: Unable to stabilize " + "heartbeart on region %s (%s)\n", + config_item_name(®->hr_item), + reg->hr_dev_name); + atomic_set(®->hr_steady_iterations, 0); + reg->hr_aborted_start = 1; wake_up(&o2hb_steady_queue); + ret = -EIO; + } } - return 0; + return ret; } /* Subtract b from a, storing the result in a. a *must* have a larger @@ -1095,7 +1136,8 @@ static int o2hb_thread(void *data) /* Pin node */ o2nm_depend_this_node(); - while (!kthread_should_stop() && !reg->hr_unclean_stop) { + while (!kthread_should_stop() && + !reg->hr_unclean_stop && !reg->hr_aborted_start) { /* We track the time spent inside * o2hb_do_disk_heartbeat so that we avoid more than * hr_timeout_ms between disk writes. On busy systems @@ -1103,10 +1145,7 @@ static int o2hb_thread(void *data) * likely to time itself out. */ do_gettimeofday(&before_hb); - i = 0; - do { - ret = o2hb_do_disk_heartbeat(reg); - } while (ret && ++i < 2); + ret = o2hb_do_disk_heartbeat(reg); do_gettimeofday(&after_hb); elapsed_msec = o2hb_elapsed_msecs(&before_hb, &after_hb); @@ -1117,7 +1156,8 @@ static int o2hb_thread(void *data) after_hb.tv_sec, (unsigned long) after_hb.tv_usec, elapsed_msec); - if (elapsed_msec < reg->hr_timeout_ms) { + if (!kthread_should_stop() && + elapsed_msec < reg->hr_timeout_ms) { /* the kthread api has blocked signals for us so no * need to record the return value. */ msleep_interruptible(reg->hr_timeout_ms - elapsed_msec); @@ -1134,20 +1174,20 @@ static int o2hb_thread(void *data) * to timeout on this region when we could just as easily * write a clear generation - thus indicating to them that * this node has left this region. - * - * XXX: Should we skip this on unclean_stop? */ - o2hb_prepare_block(reg, 0); - ret = o2hb_issue_node_write(reg, &write_wc); - if (ret == 0) { - o2hb_wait_on_io(reg, &write_wc); - } else { - mlog_errno(ret); + */ + if (!reg->hr_unclean_stop && !reg->hr_aborted_start) { + o2hb_prepare_block(reg, 0); + ret = o2hb_issue_node_write(reg, &write_wc); + if (ret == 0) + o2hb_wait_on_io(reg, &write_wc); + else + mlog_errno(ret); } /* Unpin node */ o2nm_undepend_this_node(); - mlog(ML_HEARTBEAT|ML_KTHREAD, "hb thread exiting\n"); + mlog(ML_HEARTBEAT|ML_KTHREAD, "o2hb thread exiting\n"); return 0; } @@ -1426,6 +1466,8 @@ static void o2hb_region_release(struct config_item *item) struct page *page; struct o2hb_region *reg = to_o2hb_region(item); + mlog(ML_HEARTBEAT, "hb region release (%s)\n", reg->hr_dev_name); + if (reg->hr_tmp_block) kfree(reg->hr_tmp_block); @@ -1792,7 +1834,10 @@ static ssize_t o2hb_region_dev_write(struct o2hb_region *reg, live_threshold <<= 1; spin_unlock(&o2hb_live_lock); } - atomic_set(®->hr_steady_iterations, live_threshold + 1); + ++live_threshold; + atomic_set(®->hr_steady_iterations, live_threshold); + /* unsteady_iterations is double the steady_iterations */ + atomic_set(®->hr_unsteady_iterations, (live_threshold << 1)); hb_task = kthread_run(o2hb_thread, reg, "o2hb-%s", reg->hr_item.ci_name); @@ -1809,14 +1854,12 @@ static ssize_t o2hb_region_dev_write(struct o2hb_region *reg, ret = wait_event_interruptible(o2hb_steady_queue, atomic_read(®->hr_steady_iterations) == 0); if (ret) { - /* We got interrupted (hello ptrace!). Clean up */ - spin_lock(&o2hb_live_lock); - hb_task = reg->hr_task; - reg->hr_task = NULL; - spin_unlock(&o2hb_live_lock); + atomic_set(®->hr_steady_iterations, 0); + reg->hr_aborted_start = 1; + } - if (hb_task) - kthread_stop(hb_task); + if (reg->hr_aborted_start) { + ret = -EIO; goto out; } @@ -1833,8 +1876,8 @@ static ssize_t o2hb_region_dev_write(struct o2hb_region *reg, ret = -EIO; if (hb_task && o2hb_global_heartbeat_active()) - printk(KERN_NOTICE "o2hb: Heartbeat started on region %s\n", - config_item_name(®->hr_item)); + printk(KERN_NOTICE "o2hb: Heartbeat started on region %s (%s)\n", + config_item_name(®->hr_item), reg->hr_dev_name); out: if (filp) @@ -2092,13 +2135,6 @@ static void o2hb_heartbeat_group_drop_item(struct config_group *group, /* stop the thread when the user removes the region dir */ spin_lock(&o2hb_live_lock); - if (o2hb_global_heartbeat_active()) { - clear_bit(reg->hr_region_num, o2hb_region_bitmap); - clear_bit(reg->hr_region_num, o2hb_live_region_bitmap); - if (test_bit(reg->hr_region_num, o2hb_quorum_region_bitmap)) - quorum_region = 1; - clear_bit(reg->hr_region_num, o2hb_quorum_region_bitmap); - } hb_task = reg->hr_task; reg->hr_task = NULL; reg->hr_item_dropped = 1; @@ -2107,19 +2143,30 @@ static void o2hb_heartbeat_group_drop_item(struct config_group *group, if (hb_task) kthread_stop(hb_task); + if (o2hb_global_heartbeat_active()) { + spin_lock(&o2hb_live_lock); + clear_bit(reg->hr_region_num, o2hb_region_bitmap); + clear_bit(reg->hr_region_num, o2hb_live_region_bitmap); + if (test_bit(reg->hr_region_num, o2hb_quorum_region_bitmap)) + quorum_region = 1; + clear_bit(reg->hr_region_num, o2hb_quorum_region_bitmap); + spin_unlock(&o2hb_live_lock); + printk(KERN_NOTICE "o2hb: Heartbeat %s on region %s (%s)\n", + ((atomic_read(®->hr_steady_iterations) == 0) ? + "stopped" : "start aborted"), config_item_name(item), + reg->hr_dev_name); + } + /* * If we're racing a dev_write(), we need to wake them. They will * check reg->hr_task */ if (atomic_read(®->hr_steady_iterations) != 0) { + reg->hr_aborted_start = 1; atomic_set(®->hr_steady_iterations, 0); wake_up(&o2hb_steady_queue); } - if (o2hb_global_heartbeat_active()) - printk(KERN_NOTICE "o2hb: Heartbeat stopped on region %s\n", - config_item_name(®->hr_item)); - config_item_put(item); if (!o2hb_global_heartbeat_active() || !quorum_region) -- cgit v1.2.3 From 1dfecf810e0eacb35987905082f23e5c2cd26e91 Mon Sep 17 00:00:00 2001 From: Sunil Mushran Date: Sun, 24 Jul 2011 10:22:54 -0700 Subject: ocfs2/cluster: Clean up messages in o2net o2net messages needed a facelift. Signed-off-by: Sunil Mushran --- fs/ocfs2/cluster/tcp.c | 119 ++++++++++++++++++++++--------------------------- 1 file changed, 53 insertions(+), 66 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c index db5ee4b4f47a..6e97895ecf24 100644 --- a/fs/ocfs2/cluster/tcp.c +++ b/fs/ocfs2/cluster/tcp.c @@ -545,7 +545,7 @@ static void o2net_set_nn_state(struct o2net_node *nn, } if (was_valid && !valid) { - printk(KERN_NOTICE "o2net: no longer connected to " + printk(KERN_NOTICE "o2net: No longer connected to " SC_NODEF_FMT "\n", SC_NODEF_ARGS(old_sc)); o2net_complete_nodes_nsw(nn); } @@ -555,7 +555,7 @@ static void o2net_set_nn_state(struct o2net_node *nn, cancel_delayed_work(&nn->nn_connect_expired); printk(KERN_NOTICE "o2net: %s " SC_NODEF_FMT "\n", o2nm_this_node() > sc->sc_node->nd_num ? - "connected to" : "accepted connection from", + "Connected to" : "Accepted connection from", SC_NODEF_ARGS(sc)); } @@ -643,7 +643,7 @@ static void o2net_state_change(struct sock *sk) o2net_sc_queue_work(sc, &sc->sc_connect_work); break; default: - printk(KERN_INFO "o2net: connection to " SC_NODEF_FMT + printk(KERN_INFO "o2net: Connection to " SC_NODEF_FMT " shutdown, state %d\n", SC_NODEF_ARGS(sc), sk->sk_state); o2net_sc_queue_work(sc, &sc->sc_shutdown_work); @@ -1284,11 +1284,11 @@ static int o2net_check_handshake(struct o2net_sock_container *sc) struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num); if (hand->protocol_version != cpu_to_be64(O2NET_PROTOCOL_VERSION)) { - mlog(ML_NOTICE, SC_NODEF_FMT " advertised net protocol " - "version %llu but %llu is required, disconnecting\n", - SC_NODEF_ARGS(sc), - (unsigned long long)be64_to_cpu(hand->protocol_version), - O2NET_PROTOCOL_VERSION); + printk(KERN_NOTICE "o2net: " SC_NODEF_FMT " Advertised net " + "protocol version %llu but %llu is required. " + "Disconnecting.\n", SC_NODEF_ARGS(sc), + (unsigned long long)be64_to_cpu(hand->protocol_version), + O2NET_PROTOCOL_VERSION); /* don't bother reconnecting if its the wrong version. */ o2net_ensure_shutdown(nn, sc, -ENOTCONN); @@ -1302,33 +1302,33 @@ static int o2net_check_handshake(struct o2net_sock_container *sc) */ if (be32_to_cpu(hand->o2net_idle_timeout_ms) != o2net_idle_timeout()) { - mlog(ML_NOTICE, SC_NODEF_FMT " uses a network idle timeout of " - "%u ms, but we use %u ms locally. disconnecting\n", - SC_NODEF_ARGS(sc), - be32_to_cpu(hand->o2net_idle_timeout_ms), - o2net_idle_timeout()); + printk(KERN_NOTICE "o2net: " SC_NODEF_FMT " uses a network " + "idle timeout of %u ms, but we use %u ms locally. " + "Disconnecting.\n", SC_NODEF_ARGS(sc), + be32_to_cpu(hand->o2net_idle_timeout_ms), + o2net_idle_timeout()); o2net_ensure_shutdown(nn, sc, -ENOTCONN); return -1; } if (be32_to_cpu(hand->o2net_keepalive_delay_ms) != o2net_keepalive_delay()) { - mlog(ML_NOTICE, SC_NODEF_FMT " uses a keepalive delay of " - "%u ms, but we use %u ms locally. disconnecting\n", - SC_NODEF_ARGS(sc), - be32_to_cpu(hand->o2net_keepalive_delay_ms), - o2net_keepalive_delay()); + printk(KERN_NOTICE "o2net: " SC_NODEF_FMT " uses a keepalive " + "delay of %u ms, but we use %u ms locally. " + "Disconnecting.\n", SC_NODEF_ARGS(sc), + be32_to_cpu(hand->o2net_keepalive_delay_ms), + o2net_keepalive_delay()); o2net_ensure_shutdown(nn, sc, -ENOTCONN); return -1; } if (be32_to_cpu(hand->o2hb_heartbeat_timeout_ms) != O2HB_MAX_WRITE_TIMEOUT_MS) { - mlog(ML_NOTICE, SC_NODEF_FMT " uses a heartbeat timeout of " - "%u ms, but we use %u ms locally. disconnecting\n", - SC_NODEF_ARGS(sc), - be32_to_cpu(hand->o2hb_heartbeat_timeout_ms), - O2HB_MAX_WRITE_TIMEOUT_MS); + printk(KERN_NOTICE "o2net: " SC_NODEF_FMT " uses a heartbeat " + "timeout of %u ms, but we use %u ms locally. " + "Disconnecting.\n", SC_NODEF_ARGS(sc), + be32_to_cpu(hand->o2hb_heartbeat_timeout_ms), + O2HB_MAX_WRITE_TIMEOUT_MS); o2net_ensure_shutdown(nn, sc, -ENOTCONN); return -1; } @@ -1539,28 +1539,16 @@ static void o2net_idle_timer(unsigned long data) { struct o2net_sock_container *sc = (struct o2net_sock_container *)data; struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num); - #ifdef CONFIG_DEBUG_FS - ktime_t now = ktime_get(); + unsigned long msecs = ktime_to_ms(ktime_get()) - + ktime_to_ms(sc->sc_tv_timer); +#else + unsigned long msecs = o2net_idle_timeout(); #endif - printk(KERN_NOTICE "o2net: connection to " SC_NODEF_FMT " has been idle for %u.%u " - "seconds, shutting it down.\n", SC_NODEF_ARGS(sc), - o2net_idle_timeout() / 1000, - o2net_idle_timeout() % 1000); - -#ifdef CONFIG_DEBUG_FS - mlog(ML_NOTICE, "Here are some times that might help debug the " - "situation: (Timer: %lld, Now %lld, DataReady %lld, Advance %lld-%lld, " - "Key 0x%08x, Func %u, FuncTime %lld-%lld)\n", - (long long)ktime_to_us(sc->sc_tv_timer), (long long)ktime_to_us(now), - (long long)ktime_to_us(sc->sc_tv_data_ready), - (long long)ktime_to_us(sc->sc_tv_advance_start), - (long long)ktime_to_us(sc->sc_tv_advance_stop), - sc->sc_msg_key, sc->sc_msg_type, - (long long)ktime_to_us(sc->sc_tv_func_start), - (long long)ktime_to_us(sc->sc_tv_func_stop)); -#endif + printk(KERN_NOTICE "o2net: Connection to " SC_NODEF_FMT " has been " + "idle for %lu.%lu secs, shutting it down.\n", SC_NODEF_ARGS(sc), + msecs / 1000, msecs % 1000); /* * Initialize the nn_timeout so that the next connection attempt @@ -1693,8 +1681,8 @@ static void o2net_start_connect(struct work_struct *work) out: if (ret) { - mlog(ML_NOTICE, "connect attempt to " SC_NODEF_FMT " failed " - "with errno %d\n", SC_NODEF_ARGS(sc), ret); + printk(KERN_NOTICE "o2net: Connect attempt to " SC_NODEF_FMT + " failed with errno %d\n", SC_NODEF_ARGS(sc), ret); /* 0 err so that another will be queued and attempted * from set_nn_state */ if (sc) @@ -1717,8 +1705,8 @@ static void o2net_connect_expired(struct work_struct *work) spin_lock(&nn->nn_lock); if (!nn->nn_sc_valid) { - mlog(ML_ERROR, "no connection established with node %u after " - "%u.%u seconds, giving up and returning errors.\n", + printk(KERN_NOTICE "o2net: No connection established with " + "node %u after %u.%u seconds, giving up.\n", o2net_num_from_nn(nn), o2net_idle_timeout() / 1000, o2net_idle_timeout() % 1000); @@ -1861,21 +1849,21 @@ static int o2net_accept_one(struct socket *sock) node = o2nm_get_node_by_ip(sin.sin_addr.s_addr); if (node == NULL) { - mlog(ML_NOTICE, "attempt to connect from unknown node at %pI4:%d\n", - &sin.sin_addr.s_addr, ntohs(sin.sin_port)); + printk(KERN_NOTICE "o2net: Attempt to connect from unknown " + "node at %pI4:%d\n", &sin.sin_addr.s_addr, + ntohs(sin.sin_port)); ret = -EINVAL; goto out; } if (o2nm_this_node() >= node->nd_num) { local_node = o2nm_get_node_by_num(o2nm_this_node()); - mlog(ML_NOTICE, "unexpected connect attempt seen at node '%s' (" - "%u, %pI4:%d) from node '%s' (%u, %pI4:%d)\n", - local_node->nd_name, local_node->nd_num, - &(local_node->nd_ipv4_address), - ntohs(local_node->nd_ipv4_port), - node->nd_name, node->nd_num, &sin.sin_addr.s_addr, - ntohs(sin.sin_port)); + printk(KERN_NOTICE "o2net: Unexpected connect attempt seen " + "at node '%s' (%u, %pI4:%d) from node '%s' (%u, " + "%pI4:%d)\n", local_node->nd_name, local_node->nd_num, + &(local_node->nd_ipv4_address), + ntohs(local_node->nd_ipv4_port), node->nd_name, + node->nd_num, &sin.sin_addr.s_addr, ntohs(sin.sin_port)); ret = -EINVAL; goto out; } @@ -1900,10 +1888,10 @@ static int o2net_accept_one(struct socket *sock) ret = 0; spin_unlock(&nn->nn_lock); if (ret) { - mlog(ML_NOTICE, "attempt to connect from node '%s' at " - "%pI4:%d but it already has an open connection\n", - node->nd_name, &sin.sin_addr.s_addr, - ntohs(sin.sin_port)); + printk(KERN_NOTICE "o2net: Attempt to connect from node '%s' " + "at %pI4:%d but it already has an open connection\n", + node->nd_name, &sin.sin_addr.s_addr, + ntohs(sin.sin_port)); goto out; } @@ -1983,7 +1971,7 @@ static int o2net_open_listening_sock(__be32 addr, __be16 port) ret = sock_create(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock); if (ret < 0) { - mlog(ML_ERROR, "unable to create socket, ret=%d\n", ret); + printk(KERN_ERR "o2net: Error %d while creating socket\n", ret); goto out; } @@ -2000,16 +1988,15 @@ static int o2net_open_listening_sock(__be32 addr, __be16 port) sock->sk->sk_reuse = 1; ret = sock->ops->bind(sock, (struct sockaddr *)&sin, sizeof(sin)); if (ret < 0) { - mlog(ML_ERROR, "unable to bind socket at %pI4:%u, " - "ret=%d\n", &addr, ntohs(port), ret); + printk(KERN_ERR "o2net: Error %d while binding socket at " + "%pI4:%u\n", ret, &addr, ntohs(port)); goto out; } ret = sock->ops->listen(sock, 64); - if (ret < 0) { - mlog(ML_ERROR, "unable to listen on %pI4:%u, ret=%d\n", - &addr, ntohs(port), ret); - } + if (ret < 0) + printk(KERN_ERR "o2net: Error %d while listening on %pI4:%u\n", + ret, &addr, ntohs(port)); out: if (ret) { -- cgit v1.2.3 From 8decab3c8dadcdf4f54ffb30df6e6f67b398b6e0 Mon Sep 17 00:00:00 2001 From: Sunil Mushran Date: Sun, 24 Jul 2011 10:23:54 -0700 Subject: ocfs2/dlm: Clean up messages in o2dlm o2dlm messages needed a facelift. Signed-off-by: Sunil Mushran --- fs/ocfs2/dlm/dlmdomain.c | 18 +++++++++------- fs/ocfs2/dlm/dlmlock.c | 22 ++++++++----------- fs/ocfs2/dlm/dlmmaster.c | 46 ++++++++++++++++++---------------------- fs/ocfs2/dlm/dlmrecovery.c | 53 +++++++++++++++++++++++++++------------------- fs/ocfs2/dlm/dlmthread.c | 2 -- 5 files changed, 71 insertions(+), 70 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c index 6ed6b95dcf93..ce225fb0cea7 100644 --- a/fs/ocfs2/dlm/dlmdomain.c +++ b/fs/ocfs2/dlm/dlmdomain.c @@ -539,17 +539,17 @@ again: static void __dlm_print_nodes(struct dlm_ctxt *dlm) { - int node = -1; + int node = -1, num = 0; assert_spin_locked(&dlm->spinlock); - printk(KERN_NOTICE "o2dlm: Nodes in domain %s: ", dlm->name); - + printk("( "); while ((node = find_next_bit(dlm->domain_map, O2NM_MAX_NODES, node + 1)) < O2NM_MAX_NODES) { printk("%d ", node); + ++num; } - printk("\n"); + printk(") %u nodes\n", num); } static int dlm_exit_domain_handler(struct o2net_msg *msg, u32 len, void *data, @@ -566,11 +566,10 @@ static int dlm_exit_domain_handler(struct o2net_msg *msg, u32 len, void *data, node = exit_msg->node_idx; - printk(KERN_NOTICE "o2dlm: Node %u leaves domain %s\n", node, dlm->name); - spin_lock(&dlm->spinlock); clear_bit(node, dlm->domain_map); clear_bit(node, dlm->exit_domain_map); + printk(KERN_NOTICE "o2dlm: Node %u leaves domain %s ", node, dlm->name); __dlm_print_nodes(dlm); /* notify anything attached to the heartbeat events */ @@ -755,6 +754,7 @@ void dlm_unregister_domain(struct dlm_ctxt *dlm) dlm_mark_domain_leaving(dlm); dlm_leave_domain(dlm); + printk(KERN_NOTICE "o2dlm: Leaving domain %s\n", dlm->name); dlm_force_free_mles(dlm); dlm_complete_dlm_shutdown(dlm); } @@ -970,7 +970,7 @@ static int dlm_assert_joined_handler(struct o2net_msg *msg, u32 len, void *data, clear_bit(assert->node_idx, dlm->exit_domain_map); __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN); - printk(KERN_NOTICE "o2dlm: Node %u joins domain %s\n", + printk(KERN_NOTICE "o2dlm: Node %u joins domain %s ", assert->node_idx, dlm->name); __dlm_print_nodes(dlm); @@ -1701,8 +1701,10 @@ static int dlm_try_to_join_domain(struct dlm_ctxt *dlm) bail: spin_lock(&dlm->spinlock); __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN); - if (!status) + if (!status) { + printk(KERN_NOTICE "o2dlm: Joining domain %s ", dlm->name); __dlm_print_nodes(dlm); + } spin_unlock(&dlm->spinlock); if (ctxt) { diff --git a/fs/ocfs2/dlm/dlmlock.c b/fs/ocfs2/dlm/dlmlock.c index 8d39e0fd66f7..c7f3e22bda1e 100644 --- a/fs/ocfs2/dlm/dlmlock.c +++ b/fs/ocfs2/dlm/dlmlock.c @@ -319,27 +319,23 @@ static enum dlm_status dlm_send_remote_lock_request(struct dlm_ctxt *dlm, tmpret = o2net_send_message(DLM_CREATE_LOCK_MSG, dlm->key, &create, sizeof(create), res->owner, &status); if (tmpret >= 0) { - // successfully sent and received - ret = status; // this is already a dlm_status + ret = status; if (ret == DLM_REJECTED) { - mlog(ML_ERROR, "%s:%.*s: BUG. this is a stale lockres " - "no longer owned by %u. that node is coming back " - "up currently.\n", dlm->name, create.namelen, + mlog(ML_ERROR, "%s: res %.*s, Stale lockres no longer " + "owned by node %u. That node is coming back up " + "currently.\n", dlm->name, create.namelen, create.name, res->owner); dlm_print_one_lock_resource(res); BUG(); } } else { - mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to " - "node %u\n", tmpret, DLM_CREATE_LOCK_MSG, dlm->key, - res->owner); - if (dlm_is_host_down(tmpret)) { + mlog(ML_ERROR, "%s: res %.*s, Error %d send CREATE LOCK to " + "node %u\n", dlm->name, create.namelen, create.name, + tmpret, res->owner); + if (dlm_is_host_down(tmpret)) ret = DLM_RECOVERING; - mlog(0, "node %u died so returning DLM_RECOVERING " - "from lock message!\n", res->owner); - } else { + else ret = dlm_err_to_dlm_status(tmpret); - } } return ret; diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c index 11eefb8c12e9..9f3b093b629a 100644 --- a/fs/ocfs2/dlm/dlmmaster.c +++ b/fs/ocfs2/dlm/dlmmaster.c @@ -829,8 +829,8 @@ lookup: * but they might own this lockres. wait on them. */ bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0); if (bit < O2NM_MAX_NODES) { - mlog(ML_NOTICE, "%s:%.*s: at least one node (%d) to " - "recover before lock mastery can begin\n", + mlog(0, "%s: res %.*s, At least one node (%d) " + "to recover before lock mastery can begin\n", dlm->name, namelen, (char *)lockid, bit); wait_on_recovery = 1; } @@ -864,8 +864,8 @@ redo_request: * dlm spinlock would be detectable be a change on the mle, * so we only need to clear out the recovery map once. */ if (dlm_is_recovery_lock(lockid, namelen)) { - mlog(ML_NOTICE, "%s: recovery map is not empty, but " - "must master $RECOVERY lock now\n", dlm->name); + mlog(0, "%s: Recovery map is not empty, but must " + "master $RECOVERY lock now\n", dlm->name); if (!dlm_pre_master_reco_lockres(dlm, res)) wait_on_recovery = 0; else { @@ -883,8 +883,8 @@ redo_request: spin_lock(&dlm->spinlock); bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0); if (bit < O2NM_MAX_NODES) { - mlog(ML_NOTICE, "%s:%.*s: at least one node (%d) to " - "recover before lock mastery can begin\n", + mlog(0, "%s: res %.*s, At least one node (%d) " + "to recover before lock mastery can begin\n", dlm->name, namelen, (char *)lockid, bit); wait_on_recovery = 1; } else @@ -913,8 +913,8 @@ redo_request: * yet, keep going until it does. this is how the * master will know that asserts are needed back to * the lower nodes. */ - mlog(0, "%s:%.*s: requests only up to %u but master " - "is %u, keep going\n", dlm->name, namelen, + mlog(0, "%s: res %.*s, Requests only up to %u but " + "master is %u, keep going\n", dlm->name, namelen, lockid, nodenum, mle->master); } } @@ -924,13 +924,12 @@ wait: ret = dlm_wait_for_lock_mastery(dlm, res, mle, &blocked); if (ret < 0) { wait_on_recovery = 1; - mlog(0, "%s:%.*s: node map changed, redo the " - "master request now, blocked=%d\n", - dlm->name, res->lockname.len, + mlog(0, "%s: res %.*s, Node map changed, redo the master " + "request now, blocked=%d\n", dlm->name, res->lockname.len, res->lockname.name, blocked); if (++tries > 20) { - mlog(ML_ERROR, "%s:%.*s: spinning on " - "dlm_wait_for_lock_mastery, blocked=%d\n", + mlog(ML_ERROR, "%s: res %.*s, Spinning on " + "dlm_wait_for_lock_mastery, blocked = %d\n", dlm->name, res->lockname.len, res->lockname.name, blocked); dlm_print_one_lock_resource(res); @@ -940,7 +939,8 @@ wait: goto redo_request; } - mlog(0, "lockres mastered by %u\n", res->owner); + mlog(0, "%s: res %.*s, Mastered by %u\n", dlm->name, res->lockname.len, + res->lockname.name, res->owner); /* make sure we never continue without this */ BUG_ON(res->owner == O2NM_MAX_NODES); @@ -2187,8 +2187,6 @@ int dlm_drop_lockres_ref(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) namelen = res->lockname.len; BUG_ON(namelen > O2NM_MAX_NAME_LEN); - mlog(0, "%s:%.*s: sending deref to %d\n", - dlm->name, namelen, lockname, res->owner); memset(&deref, 0, sizeof(deref)); deref.node_idx = dlm->node_num; deref.namelen = namelen; @@ -2197,14 +2195,12 @@ int dlm_drop_lockres_ref(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) ret = o2net_send_message(DLM_DEREF_LOCKRES_MSG, dlm->key, &deref, sizeof(deref), res->owner, &r); if (ret < 0) - mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to " - "node %u\n", ret, DLM_DEREF_LOCKRES_MSG, dlm->key, - res->owner); + mlog(ML_ERROR, "%s: res %.*s, error %d send DEREF to node %u\n", + dlm->name, namelen, lockname, ret, res->owner); else if (r < 0) { /* BAD. other node says I did not have a ref. */ - mlog(ML_ERROR,"while dropping ref on %s:%.*s " - "(master=%u) got %d.\n", dlm->name, namelen, - lockname, res->owner, r); + mlog(ML_ERROR, "%s: res %.*s, DEREF to node %u got %d\n", + dlm->name, namelen, lockname, res->owner, r); dlm_print_one_lock_resource(res); BUG(); } @@ -2916,9 +2912,9 @@ static int dlm_do_migrate_request(struct dlm_ctxt *dlm, &migrate, sizeof(migrate), nodenum, &status); if (ret < 0) { - mlog(ML_ERROR, "Error %d when sending message %u (key " - "0x%x) to node %u\n", ret, DLM_MIGRATE_REQUEST_MSG, - dlm->key, nodenum); + mlog(ML_ERROR, "%s: res %.*s, Error %d send " + "MIGRATE_REQUEST to node %u\n", dlm->name, + migrate.namelen, migrate.name, ret, nodenum); if (!dlm_is_host_down(ret)) { mlog(ML_ERROR, "unhandled error=%d!\n", ret); BUG(); diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c index 7efab6d28a21..a3c312c43b90 100644 --- a/fs/ocfs2/dlm/dlmrecovery.c +++ b/fs/ocfs2/dlm/dlmrecovery.c @@ -430,6 +430,8 @@ static void dlm_begin_recovery(struct dlm_ctxt *dlm) { spin_lock(&dlm->spinlock); BUG_ON(dlm->reco.state & DLM_RECO_STATE_ACTIVE); + printk(KERN_NOTICE "o2dlm: Begin recovery on domain %s for node %u\n", + dlm->name, dlm->reco.dead_node); dlm->reco.state |= DLM_RECO_STATE_ACTIVE; spin_unlock(&dlm->spinlock); } @@ -440,9 +442,18 @@ static void dlm_end_recovery(struct dlm_ctxt *dlm) BUG_ON(!(dlm->reco.state & DLM_RECO_STATE_ACTIVE)); dlm->reco.state &= ~DLM_RECO_STATE_ACTIVE; spin_unlock(&dlm->spinlock); + printk(KERN_NOTICE "o2dlm: End recovery on domain %s\n", dlm->name); wake_up(&dlm->reco.event); } +static void dlm_print_recovery_master(struct dlm_ctxt *dlm) +{ + printk(KERN_NOTICE "o2dlm: Node %u (%s) is the Recovery Master for the " + "dead node %u in domain %s\n", dlm->reco.new_master, + (dlm->node_num == dlm->reco.new_master ? "me" : "he"), + dlm->reco.dead_node, dlm->name); +} + static int dlm_do_recovery(struct dlm_ctxt *dlm) { int status = 0; @@ -505,9 +516,8 @@ static int dlm_do_recovery(struct dlm_ctxt *dlm) } mlog(0, "another node will master this recovery session.\n"); } - mlog(0, "dlm=%s (%d), new_master=%u, this node=%u, dead_node=%u\n", - dlm->name, task_pid_nr(dlm->dlm_reco_thread_task), dlm->reco.new_master, - dlm->node_num, dlm->reco.dead_node); + + dlm_print_recovery_master(dlm); /* it is safe to start everything back up here * because all of the dead node's lock resources @@ -518,15 +528,13 @@ static int dlm_do_recovery(struct dlm_ctxt *dlm) return 0; master_here: - mlog(ML_NOTICE, "(%d) Node %u is the Recovery Master for the Dead Node " - "%u for Domain %s\n", task_pid_nr(dlm->dlm_reco_thread_task), - dlm->node_num, dlm->reco.dead_node, dlm->name); + dlm_print_recovery_master(dlm); status = dlm_remaster_locks(dlm, dlm->reco.dead_node); if (status < 0) { /* we should never hit this anymore */ - mlog(ML_ERROR, "error %d remastering locks for node %u, " - "retrying.\n", status, dlm->reco.dead_node); + mlog(ML_ERROR, "%s: Error %d remastering locks for node %u, " + "retrying.\n", dlm->name, status, dlm->reco.dead_node); /* yield a bit to allow any final network messages * to get handled on remaining nodes */ msleep(100); @@ -567,7 +575,7 @@ static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node) BUG_ON(ndata->state != DLM_RECO_NODE_DATA_INIT); ndata->state = DLM_RECO_NODE_DATA_REQUESTING; - mlog(0, "requesting lock info from node %u\n", + mlog(0, "%s: Requesting lock info from node %u\n", dlm->name, ndata->node_num); if (ndata->node_num == dlm->node_num) { @@ -640,7 +648,7 @@ static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node) spin_unlock(&dlm_reco_state_lock); } - mlog(0, "done requesting all lock info\n"); + mlog(0, "%s: Done requesting all lock info\n", dlm->name); /* nodes should be sending reco data now * just need to wait */ @@ -802,10 +810,9 @@ static int dlm_request_all_locks(struct dlm_ctxt *dlm, u8 request_from, /* negative status is handled by caller */ if (ret < 0) - mlog(ML_ERROR, "Error %d when sending message %u (key " - "0x%x) to node %u\n", ret, DLM_LOCK_REQUEST_MSG, - dlm->key, request_from); - + mlog(ML_ERROR, "%s: Error %d send LOCK_REQUEST to node %u " + "to recover dead node %u\n", dlm->name, ret, + request_from, dead_node); // return from here, then // sleep until all received or error return ret; @@ -956,9 +963,9 @@ static int dlm_send_all_done_msg(struct dlm_ctxt *dlm, u8 dead_node, u8 send_to) ret = o2net_send_message(DLM_RECO_DATA_DONE_MSG, dlm->key, &done_msg, sizeof(done_msg), send_to, &tmpret); if (ret < 0) { - mlog(ML_ERROR, "Error %d when sending message %u (key " - "0x%x) to node %u\n", ret, DLM_RECO_DATA_DONE_MSG, - dlm->key, send_to); + mlog(ML_ERROR, "%s: Error %d send RECO_DATA_DONE to node %u " + "to recover dead node %u\n", dlm->name, ret, send_to, + dead_node); if (!dlm_is_host_down(ret)) { BUG(); } @@ -1127,9 +1134,11 @@ static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm, if (ret < 0) { /* XXX: negative status is not handled. * this will end up killing this node. */ - mlog(ML_ERROR, "Error %d when sending message %u (key " - "0x%x) to node %u\n", ret, DLM_MIG_LOCKRES_MSG, - dlm->key, send_to); + mlog(ML_ERROR, "%s: res %.*s, Error %d send MIG_LOCKRES to " + "node %u (%s)\n", dlm->name, mres->lockname_len, + mres->lockname, ret, send_to, + (orig_flags & DLM_MRES_MIGRATION ? + "migration" : "recovery")); } else { /* might get an -ENOMEM back here */ ret = status; @@ -2324,9 +2333,9 @@ static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node) dlm_revalidate_lvb(dlm, res, dead_node); if (res->owner == dead_node) { if (res->state & DLM_LOCK_RES_DROPPING_REF) { - mlog(ML_NOTICE, "Ignore %.*s for " + mlog(ML_NOTICE, "%s: res %.*s, Skip " "recovery as it is being freed\n", - res->lockname.len, + dlm->name, res->lockname.len, res->lockname.name); } else dlm_move_lockres_to_recovery_list(dlm, diff --git a/fs/ocfs2/dlm/dlmthread.c b/fs/ocfs2/dlm/dlmthread.c index 1d6d1d22c471..46faac20f16c 100644 --- a/fs/ocfs2/dlm/dlmthread.c +++ b/fs/ocfs2/dlm/dlmthread.c @@ -185,8 +185,6 @@ static void dlm_purge_lockres(struct dlm_ctxt *dlm, /* clear our bit from the master's refmap, ignore errors */ ret = dlm_drop_lockres_ref(dlm, res); if (ret < 0) { - mlog(ML_ERROR, "%s: deref %.*s failed %d\n", dlm->name, - res->lockname.len, res->lockname.name, ret); if (!dlm_is_host_down(ret)) BUG(); } -- cgit v1.2.3 From 394eb3d38a3ecc549cc34a3040103a9164be516b Mon Sep 17 00:00:00 2001 From: Sunil Mushran Date: Sun, 24 Jul 2011 10:24:54 -0700 Subject: ocfs2: Clean up messages in stack_o2cb.c o2cb messages needed a facelift. Signed-off-by: Sunil Mushran --- fs/ocfs2/stack_o2cb.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/stack_o2cb.c b/fs/ocfs2/stack_o2cb.c index 19965b00c43c..be935d89a4e8 100644 --- a/fs/ocfs2/stack_o2cb.c +++ b/fs/ocfs2/stack_o2cb.c @@ -263,8 +263,8 @@ static void o2dlm_eviction_cb(int node_num, void *data) { struct ocfs2_cluster_connection *conn = data; - mlog(ML_NOTICE, "o2dlm has evicted node %d from group %.*s\n", - node_num, conn->cc_namelen, conn->cc_name); + printk(KERN_NOTICE "o2cb: o2dlm has evicted node %d from domain %.*s\n", + node_num, conn->cc_namelen, conn->cc_name); conn->cc_recovery_handler(node_num, conn->cc_recovery_data); } -- cgit v1.2.3 From 0afbba13226fcdbd4327f6b13a42f6efbb8c9caf Mon Sep 17 00:00:00 2001 From: Sunil Mushran Date: Sun, 24 Jul 2011 10:25:54 -0700 Subject: ocfs2/dlm: Cleanup up dlm_finish_local_lockres_recovery() dlm_finish_local_lockres_recovery() needed a facelift. Signed-off-by: Sunil Mushran --- fs/ocfs2/dlm/dlmrecovery.c | 57 ++++++++++++++++++++-------------------------- 1 file changed, 25 insertions(+), 32 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c index a3c312c43b90..41e74f0e2d35 100644 --- a/fs/ocfs2/dlm/dlmrecovery.c +++ b/fs/ocfs2/dlm/dlmrecovery.c @@ -2093,6 +2093,9 @@ static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm, list_for_each_entry_safe(res, next, &dlm->reco.resources, recovering) { if (res->owner == dead_node) { + mlog(0, "%s: res %.*s, Changing owner from %u to %u\n", + dlm->name, res->lockname.len, res->lockname.name, + res->owner, new_master); list_del_init(&res->recovering); spin_lock(&res->spinlock); /* new_master has our reference from @@ -2114,40 +2117,30 @@ static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm, for (i = 0; i < DLM_HASH_BUCKETS; i++) { bucket = dlm_lockres_hash(dlm, i); hlist_for_each_entry(res, hash_iter, bucket, hash_node) { - if (res->state & DLM_LOCK_RES_RECOVERING) { - if (res->owner == dead_node) { - mlog(0, "(this=%u) res %.*s owner=%u " - "was not on recovering list, but " - "clearing state anyway\n", - dlm->node_num, res->lockname.len, - res->lockname.name, new_master); - } else if (res->owner == dlm->node_num) { - mlog(0, "(this=%u) res %.*s owner=%u " - "was not on recovering list, " - "owner is THIS node, clearing\n", - dlm->node_num, res->lockname.len, - res->lockname.name, new_master); - } else - continue; + if (!(res->state & DLM_LOCK_RES_RECOVERING)) + continue; - if (!list_empty(&res->recovering)) { - mlog(0, "%s:%.*s: lockres was " - "marked RECOVERING, owner=%u\n", - dlm->name, res->lockname.len, - res->lockname.name, res->owner); - list_del_init(&res->recovering); - dlm_lockres_put(res); - } - spin_lock(&res->spinlock); - /* new_master has our reference from - * the lock state sent during recovery */ - dlm_change_lockres_owner(dlm, res, new_master); - res->state &= ~DLM_LOCK_RES_RECOVERING; - if (__dlm_lockres_has_locks(res)) - __dlm_dirty_lockres(dlm, res); - spin_unlock(&res->spinlock); - wake_up(&res->wq); + if (res->owner != dead_node && + res->owner != dlm->node_num) + continue; + + if (!list_empty(&res->recovering)) { + list_del_init(&res->recovering); + dlm_lockres_put(res); } + + /* new_master has our reference from + * the lock state sent during recovery */ + mlog(0, "%s: res %.*s, Changing owner from %u to %u\n", + dlm->name, res->lockname.len, res->lockname.name, + res->owner, new_master); + spin_lock(&res->spinlock); + dlm_change_lockres_owner(dlm, res, new_master); + res->state &= ~DLM_LOCK_RES_RECOVERING; + if (__dlm_lockres_has_locks(res)) + __dlm_dirty_lockres(dlm, res); + spin_unlock(&res->spinlock); + wake_up(&res->wq); } } } -- cgit v1.2.3 From 8d400b81cc83b171ff872587723a37eb7fae9abe Mon Sep 17 00:00:00 2001 From: Sunil Mushran Date: Sun, 24 Jul 2011 10:26:54 -0700 Subject: ocfs2/dlm: Clean up refmap helpers Patch cleans up helpers that set/clear refmap bits and grab/drop inflight lock ref counts. Signed-off-by: Sunil Mushran --- fs/ocfs2/dlm/dlmcommon.h | 49 +++++--------------------- fs/ocfs2/dlm/dlmmaster.c | 88 ++++++++++++++++++++++++++++------------------ fs/ocfs2/dlm/dlmrecovery.c | 8 ++--- 3 files changed, 66 insertions(+), 79 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/dlm/dlmcommon.h b/fs/ocfs2/dlm/dlmcommon.h index d602abb51b61..4a5d5804b130 100644 --- a/fs/ocfs2/dlm/dlmcommon.h +++ b/fs/ocfs2/dlm/dlmcommon.h @@ -902,46 +902,15 @@ struct dlm_lock_resource *dlm_new_lockres(struct dlm_ctxt *dlm, const char *name, unsigned int namelen); -#define dlm_lockres_set_refmap_bit(bit,res) \ - __dlm_lockres_set_refmap_bit(bit,res,__FILE__,__LINE__) -#define dlm_lockres_clear_refmap_bit(bit,res) \ - __dlm_lockres_clear_refmap_bit(bit,res,__FILE__,__LINE__) - -static inline void __dlm_lockres_set_refmap_bit(int bit, - struct dlm_lock_resource *res, - const char *file, - int line) -{ - //printk("%s:%d:%.*s: setting bit %d\n", file, line, - // res->lockname.len, res->lockname.name, bit); - set_bit(bit, res->refmap); -} - -static inline void __dlm_lockres_clear_refmap_bit(int bit, - struct dlm_lock_resource *res, - const char *file, - int line) -{ - //printk("%s:%d:%.*s: clearing bit %d\n", file, line, - // res->lockname.len, res->lockname.name, bit); - clear_bit(bit, res->refmap); -} - -void __dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm, - struct dlm_lock_resource *res, - const char *file, - int line); -void __dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm, - struct dlm_lock_resource *res, - int new_lockres, - const char *file, - int line); -#define dlm_lockres_drop_inflight_ref(d,r) \ - __dlm_lockres_drop_inflight_ref(d,r,__FILE__,__LINE__) -#define dlm_lockres_grab_inflight_ref(d,r) \ - __dlm_lockres_grab_inflight_ref(d,r,0,__FILE__,__LINE__) -#define dlm_lockres_grab_inflight_ref_new(d,r) \ - __dlm_lockres_grab_inflight_ref(d,r,1,__FILE__,__LINE__) +void dlm_lockres_set_refmap_bit(struct dlm_ctxt *dlm, + struct dlm_lock_resource *res, int bit); +void dlm_lockres_clear_refmap_bit(struct dlm_ctxt *dlm, + struct dlm_lock_resource *res, int bit); + +void dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm, + struct dlm_lock_resource *res); +void dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm, + struct dlm_lock_resource *res); void dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock); void dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock); diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c index 9f3b093b629a..11e446ffb67a 100644 --- a/fs/ocfs2/dlm/dlmmaster.c +++ b/fs/ocfs2/dlm/dlmmaster.c @@ -631,39 +631,58 @@ error: return NULL; } -void __dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm, - struct dlm_lock_resource *res, - int new_lockres, - const char *file, - int line) +void dlm_lockres_set_refmap_bit(struct dlm_ctxt *dlm, + struct dlm_lock_resource *res, int bit) { - if (!new_lockres) - assert_spin_locked(&res->spinlock); + assert_spin_locked(&res->spinlock); + + mlog(0, "res %.*s, set node %u, %ps()\n", res->lockname.len, + res->lockname.name, bit, __builtin_return_address(0)); + + set_bit(bit, res->refmap); +} + +void dlm_lockres_clear_refmap_bit(struct dlm_ctxt *dlm, + struct dlm_lock_resource *res, int bit) +{ + assert_spin_locked(&res->spinlock); + + mlog(0, "res %.*s, clr node %u, %ps()\n", res->lockname.len, + res->lockname.name, bit, __builtin_return_address(0)); + + clear_bit(bit, res->refmap); +} + + +void dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm, + struct dlm_lock_resource *res) +{ + assert_spin_locked(&res->spinlock); if (!test_bit(dlm->node_num, res->refmap)) { BUG_ON(res->inflight_locks != 0); - dlm_lockres_set_refmap_bit(dlm->node_num, res); + dlm_lockres_set_refmap_bit(dlm, res, dlm->node_num); } res->inflight_locks++; - mlog(0, "%s:%.*s: inflight++: now %u\n", - dlm->name, res->lockname.len, res->lockname.name, - res->inflight_locks); + mlog(0, "%s: res %.*s, inflight++: now %u, %ps()\n", dlm->name, + res->lockname.len, res->lockname.name, res->inflight_locks, + __builtin_return_address(0)); } -void __dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm, - struct dlm_lock_resource *res, - const char *file, - int line) +void dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm, + struct dlm_lock_resource *res) { assert_spin_locked(&res->spinlock); BUG_ON(res->inflight_locks == 0); + res->inflight_locks--; - mlog(0, "%s:%.*s: inflight--: now %u\n", - dlm->name, res->lockname.len, res->lockname.name, - res->inflight_locks); + mlog(0, "%s: res %.*s, inflight--: now %u, %ps()\n", dlm->name, + res->lockname.len, res->lockname.name, res->inflight_locks, + __builtin_return_address(0)); + if (res->inflight_locks == 0) - dlm_lockres_clear_refmap_bit(dlm->node_num, res); + dlm_lockres_clear_refmap_bit(dlm, res, dlm->node_num); wake_up(&res->wq); } @@ -843,8 +862,10 @@ lookup: /* finally add the lockres to its hash bucket */ __dlm_insert_lockres(dlm, res); - /* since this lockres is new it doesn't not require the spinlock */ - dlm_lockres_grab_inflight_ref_new(dlm, res); + + spin_lock(&res->spinlock); + dlm_lockres_grab_inflight_ref(dlm, res); + spin_unlock(&res->spinlock); /* if this node does not become the master make sure to drop * this inflight reference below */ @@ -1426,9 +1447,7 @@ way_up_top: } if (res->owner == dlm->node_num) { - mlog(0, "%s:%.*s: setting bit %u in refmap\n", - dlm->name, namelen, name, request->node_idx); - dlm_lockres_set_refmap_bit(request->node_idx, res); + dlm_lockres_set_refmap_bit(dlm, res, request->node_idx); spin_unlock(&res->spinlock); response = DLM_MASTER_RESP_YES; if (mle) @@ -1493,10 +1512,8 @@ way_up_top: * go back and clean the mles on any * other nodes */ dispatch_assert = 1; - dlm_lockres_set_refmap_bit(request->node_idx, res); - mlog(0, "%s:%.*s: setting bit %u in refmap\n", - dlm->name, namelen, name, - request->node_idx); + dlm_lockres_set_refmap_bit(dlm, res, + request->node_idx); } else response = DLM_MASTER_RESP_NO; } else { @@ -1702,7 +1719,7 @@ again: "lockres, set the bit in the refmap\n", namelen, lockname, to); spin_lock(&res->spinlock); - dlm_lockres_set_refmap_bit(to, res); + dlm_lockres_set_refmap_bit(dlm, res, to); spin_unlock(&res->spinlock); } } @@ -2256,7 +2273,7 @@ int dlm_deref_lockres_handler(struct o2net_msg *msg, u32 len, void *data, else { BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF); if (test_bit(node, res->refmap)) { - dlm_lockres_clear_refmap_bit(node, res); + dlm_lockres_clear_refmap_bit(dlm, res, node); cleared = 1; } } @@ -2316,7 +2333,7 @@ static void dlm_deref_lockres_worker(struct dlm_work_item *item, void *data) BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF); if (test_bit(node, res->refmap)) { __dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_SETREF_INPROG); - dlm_lockres_clear_refmap_bit(node, res); + dlm_lockres_clear_refmap_bit(dlm, res, node); cleared = 1; } spin_unlock(&res->spinlock); @@ -2798,7 +2815,8 @@ static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm, BUG_ON(!list_empty(&lock->bast_list)); BUG_ON(lock->ast_pending); BUG_ON(lock->bast_pending); - dlm_lockres_clear_refmap_bit(lock->ml.node, res); + dlm_lockres_clear_refmap_bit(dlm, res, + lock->ml.node); list_del_init(&lock->list); dlm_lock_put(lock); /* In a normal unlock, we would have added a @@ -2819,7 +2837,7 @@ static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm, mlog(0, "%s:%.*s: node %u had a ref to this " "migrating lockres, clearing\n", dlm->name, res->lockname.len, res->lockname.name, bit); - dlm_lockres_clear_refmap_bit(bit, res); + dlm_lockres_clear_refmap_bit(dlm, res, bit); } bit++; } @@ -2933,7 +2951,7 @@ static int dlm_do_migrate_request(struct dlm_ctxt *dlm, dlm->name, res->lockname.len, res->lockname.name, nodenum); spin_lock(&res->spinlock); - dlm_lockres_set_refmap_bit(nodenum, res); + dlm_lockres_set_refmap_bit(dlm, res, nodenum); spin_unlock(&res->spinlock); } } @@ -3267,7 +3285,7 @@ int dlm_finish_migration(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, * mastery reference here since old_master will briefly have * a reference after the migration completes */ spin_lock(&res->spinlock); - dlm_lockres_set_refmap_bit(old_master, res); + dlm_lockres_set_refmap_bit(dlm, res, old_master); spin_unlock(&res->spinlock); mlog(0, "now time to do a migrate request to other nodes\n"); diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c index 41e74f0e2d35..f1a3aa84ad89 100644 --- a/fs/ocfs2/dlm/dlmrecovery.c +++ b/fs/ocfs2/dlm/dlmrecovery.c @@ -1776,7 +1776,7 @@ static int dlm_process_recovery_data(struct dlm_ctxt *dlm, dlm->name, mres->lockname_len, mres->lockname, from); spin_lock(&res->spinlock); - dlm_lockres_set_refmap_bit(from, res); + dlm_lockres_set_refmap_bit(dlm, res, from); spin_unlock(&res->spinlock); added++; break; @@ -1974,7 +1974,7 @@ skip_lvb: mlog(0, "%s:%.*s: added lock for node %u, " "setting refmap bit\n", dlm->name, res->lockname.len, res->lockname.name, ml->node); - dlm_lockres_set_refmap_bit(ml->node, res); + dlm_lockres_set_refmap_bit(dlm, res, ml->node); added++; } spin_unlock(&res->spinlock); @@ -2254,12 +2254,12 @@ static void dlm_free_dead_locks(struct dlm_ctxt *dlm, res->lockname.len, res->lockname.name, freed, dead_node); __dlm_print_one_lock_resource(res); } - dlm_lockres_clear_refmap_bit(dead_node, res); + dlm_lockres_clear_refmap_bit(dlm, res, dead_node); } else if (test_bit(dead_node, res->refmap)) { mlog(0, "%s:%.*s: dead node %u had a ref, but had " "no locks and had not purged before dying\n", dlm->name, res->lockname.len, res->lockname.name, dead_node); - dlm_lockres_clear_refmap_bit(dead_node, res); + dlm_lockres_clear_refmap_bit(dlm, res, dead_node); } /* do not kick thread yet */ -- cgit v1.2.3 From e9f0b6a6233105c188064328c3f15e21942d0241 Mon Sep 17 00:00:00 2001 From: Sunil Mushran Date: Sun, 24 Jul 2011 10:27:54 -0700 Subject: ocfs2/dlm: Trace insert/remove of resource to/from hash Add mlog to trace adding and removing the resource from/to the hash table. Signed-off-by: Sunil Mushran --- fs/ocfs2/dlm/dlmcommon.h | 5 ++--- fs/ocfs2/dlm/dlmdomain.c | 19 ++++++++++++------- fs/ocfs2/dlm/dlmthread.c | 2 +- 3 files changed, 15 insertions(+), 11 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/dlm/dlmcommon.h b/fs/ocfs2/dlm/dlmcommon.h index 4a5d5804b130..b1748ac74751 100644 --- a/fs/ocfs2/dlm/dlmcommon.h +++ b/fs/ocfs2/dlm/dlmcommon.h @@ -877,9 +877,8 @@ static inline void dlm_lockres_get(struct dlm_lock_resource *res) kref_get(&res->refs); } void dlm_lockres_put(struct dlm_lock_resource *res); -void __dlm_unhash_lockres(struct dlm_lock_resource *res); -void __dlm_insert_lockres(struct dlm_ctxt *dlm, - struct dlm_lock_resource *res); +void __dlm_unhash_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res); +void __dlm_insert_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res); struct dlm_lock_resource * __dlm_lookup_lockres_full(struct dlm_ctxt *dlm, const char *name, unsigned int len, diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c index ce225fb0cea7..200d9888ffee 100644 --- a/fs/ocfs2/dlm/dlmdomain.c +++ b/fs/ocfs2/dlm/dlmdomain.c @@ -157,16 +157,18 @@ static int dlm_protocol_compare(struct dlm_protocol_version *existing, static void dlm_unregister_domain_handlers(struct dlm_ctxt *dlm); -void __dlm_unhash_lockres(struct dlm_lock_resource *lockres) +void __dlm_unhash_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) { - if (!hlist_unhashed(&lockres->hash_node)) { - hlist_del_init(&lockres->hash_node); - dlm_lockres_put(lockres); - } + if (hlist_unhashed(&res->hash_node)) + return; + + mlog(0, "%s: Unhash res %.*s\n", dlm->name, res->lockname.len, + res->lockname.name); + hlist_del_init(&res->hash_node); + dlm_lockres_put(res); } -void __dlm_insert_lockres(struct dlm_ctxt *dlm, - struct dlm_lock_resource *res) +void __dlm_insert_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) { struct hlist_head *bucket; struct qstr *q; @@ -180,6 +182,9 @@ void __dlm_insert_lockres(struct dlm_ctxt *dlm, dlm_lockres_get(res); hlist_add_head(&res->hash_node, bucket); + + mlog(0, "%s: Hash res %.*s\n", dlm->name, res->lockname.len, + res->lockname.name); } struct dlm_lock_resource * __dlm_lookup_lockres_full(struct dlm_ctxt *dlm, diff --git a/fs/ocfs2/dlm/dlmthread.c b/fs/ocfs2/dlm/dlmthread.c index 46faac20f16c..4eff65e7e59e 100644 --- a/fs/ocfs2/dlm/dlmthread.c +++ b/fs/ocfs2/dlm/dlmthread.c @@ -207,7 +207,7 @@ static void dlm_purge_lockres(struct dlm_ctxt *dlm, BUG(); } - __dlm_unhash_lockres(res); + __dlm_unhash_lockres(dlm, res); /* lockres is not in the hash now. drop the flag and wake up * any processes waiting in dlm_get_lock_resource. */ -- cgit v1.2.3 From ed8625c6fb93d750ed022db571a8a7b7a6724b3b Mon Sep 17 00:00:00 2001 From: Sunil Mushran Date: Sun, 24 Jul 2011 10:28:54 -0700 Subject: ocfs2/dlm: Cleanup dlm_wait_for_node_death() and dlm_wait_for_node_recovery() dlm_wait_for_node_death() and dlm_wait_for_node_recovery() needed a facelift. Signed-off-by: Sunil Mushran --- fs/ocfs2/dlm/dlmcommon.h | 4 ++-- fs/ocfs2/dlm/dlmrecovery.c | 46 ++++++++++++++++++++++------------------------ 2 files changed, 24 insertions(+), 26 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/dlm/dlmcommon.h b/fs/ocfs2/dlm/dlmcommon.h index b1748ac74751..a5952ceecba5 100644 --- a/fs/ocfs2/dlm/dlmcommon.h +++ b/fs/ocfs2/dlm/dlmcommon.h @@ -859,8 +859,8 @@ void dlm_complete_recovery_thread(struct dlm_ctxt *dlm); void dlm_wait_for_recovery(struct dlm_ctxt *dlm); void dlm_kick_recovery_thread(struct dlm_ctxt *dlm); int dlm_is_node_dead(struct dlm_ctxt *dlm, u8 node); -int dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout); -int dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout); +void dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout); +void dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout); void dlm_put(struct dlm_ctxt *dlm); struct dlm_ctxt *dlm_grab(struct dlm_ctxt *dlm); diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c index f1a3aa84ad89..01ebfd0bdad7 100644 --- a/fs/ocfs2/dlm/dlmrecovery.c +++ b/fs/ocfs2/dlm/dlmrecovery.c @@ -362,40 +362,38 @@ static int dlm_is_node_recovered(struct dlm_ctxt *dlm, u8 node) } -int dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout) +void dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout) { - if (timeout) { - mlog(ML_NOTICE, "%s: waiting %dms for notification of " - "death of node %u\n", dlm->name, timeout, node); + if (dlm_is_node_dead(dlm, node)) + return; + + printk(KERN_NOTICE "o2dlm: Waiting on the death of node %u in " + "domain %s\n", node, dlm->name); + + if (timeout) wait_event_timeout(dlm->dlm_reco_thread_wq, - dlm_is_node_dead(dlm, node), - msecs_to_jiffies(timeout)); - } else { - mlog(ML_NOTICE, "%s: waiting indefinitely for notification " - "of death of node %u\n", dlm->name, node); + dlm_is_node_dead(dlm, node), + msecs_to_jiffies(timeout)); + else wait_event(dlm->dlm_reco_thread_wq, dlm_is_node_dead(dlm, node)); - } - /* for now, return 0 */ - return 0; } -int dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout) +void dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout) { - if (timeout) { - mlog(0, "%s: waiting %dms for notification of " - "recovery of node %u\n", dlm->name, timeout, node); + if (dlm_is_node_recovered(dlm, node)) + return; + + printk(KERN_NOTICE "o2dlm: Waiting on the recovery of node %u in " + "domain %s\n", node, dlm->name); + + if (timeout) wait_event_timeout(dlm->dlm_reco_thread_wq, - dlm_is_node_recovered(dlm, node), - msecs_to_jiffies(timeout)); - } else { - mlog(0, "%s: waiting indefinitely for notification " - "of recovery of node %u\n", dlm->name, node); + dlm_is_node_recovered(dlm, node), + msecs_to_jiffies(timeout)); + else wait_event(dlm->dlm_reco_thread_wq, dlm_is_node_recovered(dlm, node)); - } - /* for now, return 0 */ - return 0; } /* callers of the top-level api calls (dlmlock/dlmunlock) should -- cgit v1.2.3 From ff0a522e7db79625aa27a433467eb94c5e255718 Mon Sep 17 00:00:00 2001 From: Sunil Mushran Date: Sun, 24 Jul 2011 10:29:54 -0700 Subject: ocfs2/dlm: Take inflight reference count for remotely mastered resources too The inflight reference count, in the lock resource, is taken to pin the resource in memory. We take it when a new resource is created and release it after a lock is attached to it. We do this to prevent the resource from getting purged prematurely. Earlier this reference count was being taken for locally mastered resources only. This patch extends the same functionality for remotely mastered ones. We are doing this because the same premature purging could occur for remotely mastered resources if the remote node were to die before completion of the create lock. Fix for Oracle bug#12405575. Signed-off-by: Sunil Mushran --- fs/ocfs2/dlm/dlmlock.c | 12 ++++++++---- fs/ocfs2/dlm/dlmmaster.c | 47 +++++++++++++++++------------------------------ fs/ocfs2/dlm/dlmthread.c | 12 +++++++----- 3 files changed, 32 insertions(+), 39 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/dlm/dlmlock.c b/fs/ocfs2/dlm/dlmlock.c index c7f3e22bda1e..3ef2c1adfb8f 100644 --- a/fs/ocfs2/dlm/dlmlock.c +++ b/fs/ocfs2/dlm/dlmlock.c @@ -183,10 +183,6 @@ static enum dlm_status dlmlock_master(struct dlm_ctxt *dlm, kick_thread = 1; } } - /* reduce the inflight count, this may result in the lockres - * being purged below during calc_usage */ - if (lock->ml.node == dlm->node_num) - dlm_lockres_drop_inflight_ref(dlm, res); spin_unlock(&res->spinlock); wake_up(&res->wq); @@ -737,6 +733,14 @@ retry_lock: } } + /* Inflight taken in dlm_get_lock_resource() is dropped here */ + spin_lock(&res->spinlock); + dlm_lockres_drop_inflight_ref(dlm, res); + spin_unlock(&res->spinlock); + + dlm_lockres_calc_usage(dlm, res); + dlm_kick_thread(dlm, res); + if (status != DLM_NORMAL) { lock->lksb->flags &= ~DLM_LKSB_GET_LVB; if (status != DLM_NOTQUEUED) diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c index 11e446ffb67a..005261c333b0 100644 --- a/fs/ocfs2/dlm/dlmmaster.c +++ b/fs/ocfs2/dlm/dlmmaster.c @@ -659,11 +659,8 @@ void dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm, { assert_spin_locked(&res->spinlock); - if (!test_bit(dlm->node_num, res->refmap)) { - BUG_ON(res->inflight_locks != 0); - dlm_lockres_set_refmap_bit(dlm, res, dlm->node_num); - } res->inflight_locks++; + mlog(0, "%s: res %.*s, inflight++: now %u, %ps()\n", dlm->name, res->lockname.len, res->lockname.name, res->inflight_locks, __builtin_return_address(0)); @@ -677,12 +674,11 @@ void dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm, BUG_ON(res->inflight_locks == 0); res->inflight_locks--; + mlog(0, "%s: res %.*s, inflight--: now %u, %ps()\n", dlm->name, res->lockname.len, res->lockname.name, res->inflight_locks, __builtin_return_address(0)); - if (res->inflight_locks == 0) - dlm_lockres_clear_refmap_bit(dlm, res, dlm->node_num); wake_up(&res->wq); } @@ -716,7 +712,6 @@ struct dlm_lock_resource * dlm_get_lock_resource(struct dlm_ctxt *dlm, unsigned int hash; int tries = 0; int bit, wait_on_recovery = 0; - int drop_inflight_if_nonlocal = 0; BUG_ON(!lockid); @@ -728,36 +723,33 @@ lookup: spin_lock(&dlm->spinlock); tmpres = __dlm_lookup_lockres_full(dlm, lockid, namelen, hash); if (tmpres) { - int dropping_ref = 0; - spin_unlock(&dlm->spinlock); - spin_lock(&tmpres->spinlock); - /* We wait for the other thread that is mastering the resource */ + /* Wait on the thread that is mastering the resource */ if (tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN) { __dlm_wait_on_lockres(tmpres); BUG_ON(tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN); + spin_unlock(&tmpres->spinlock); + dlm_lockres_put(tmpres); + tmpres = NULL; + goto lookup; } - if (tmpres->owner == dlm->node_num) { - BUG_ON(tmpres->state & DLM_LOCK_RES_DROPPING_REF); - dlm_lockres_grab_inflight_ref(dlm, tmpres); - } else if (tmpres->state & DLM_LOCK_RES_DROPPING_REF) - dropping_ref = 1; - spin_unlock(&tmpres->spinlock); - - /* wait until done messaging the master, drop our ref to allow - * the lockres to be purged, start over. */ - if (dropping_ref) { - spin_lock(&tmpres->spinlock); - __dlm_wait_on_lockres_flags(tmpres, DLM_LOCK_RES_DROPPING_REF); + /* Wait on the resource purge to complete before continuing */ + if (tmpres->state & DLM_LOCK_RES_DROPPING_REF) { + BUG_ON(tmpres->owner == dlm->node_num); + __dlm_wait_on_lockres_flags(tmpres, + DLM_LOCK_RES_DROPPING_REF); spin_unlock(&tmpres->spinlock); dlm_lockres_put(tmpres); tmpres = NULL; goto lookup; } - mlog(0, "found in hash!\n"); + /* Grab inflight ref to pin the resource */ + dlm_lockres_grab_inflight_ref(dlm, tmpres); + + spin_unlock(&tmpres->spinlock); if (res) dlm_lockres_put(res); res = tmpres; @@ -863,14 +855,11 @@ lookup: /* finally add the lockres to its hash bucket */ __dlm_insert_lockres(dlm, res); + /* Grab inflight ref to pin the resource */ spin_lock(&res->spinlock); dlm_lockres_grab_inflight_ref(dlm, res); spin_unlock(&res->spinlock); - /* if this node does not become the master make sure to drop - * this inflight reference below */ - drop_inflight_if_nonlocal = 1; - /* get an extra ref on the mle in case this is a BLOCK * if so, the creator of the BLOCK may try to put the last * ref at this time in the assert master handler, so we @@ -973,8 +962,6 @@ wait: wake_waiters: spin_lock(&res->spinlock); - if (res->owner != dlm->node_num && drop_inflight_if_nonlocal) - dlm_lockres_drop_inflight_ref(dlm, res); res->state &= ~DLM_LOCK_RES_IN_PROGRESS; spin_unlock(&res->spinlock); wake_up(&res->wq); diff --git a/fs/ocfs2/dlm/dlmthread.c b/fs/ocfs2/dlm/dlmthread.c index 4eff65e7e59e..e73c833fc2a1 100644 --- a/fs/ocfs2/dlm/dlmthread.c +++ b/fs/ocfs2/dlm/dlmthread.c @@ -94,24 +94,26 @@ int __dlm_lockres_unused(struct dlm_lock_resource *res) { int bit; + assert_spin_locked(&res->spinlock); + if (__dlm_lockres_has_locks(res)) return 0; + /* Locks are in the process of being created */ + if (res->inflight_locks) + return 0; + if (!list_empty(&res->dirty) || res->state & DLM_LOCK_RES_DIRTY) return 0; if (res->state & DLM_LOCK_RES_RECOVERING) return 0; + /* Another node has this resource with this node as the master */ bit = find_next_bit(res->refmap, O2NM_MAX_NODES, 0); if (bit < O2NM_MAX_NODES) return 0; - /* - * since the bit for dlm->node_num is not set, inflight_locks better - * be zero - */ - BUG_ON(res->inflight_locks != 0); return 1; } -- cgit v1.2.3 From a2c0cc1579176bd0808ef7deea456767dfa80217 Mon Sep 17 00:00:00 2001 From: Sunil Mushran Date: Sun, 24 Jul 2011 10:30:54 -0700 Subject: ocfs2/dlm: dlmlock_remote() needs to account for remastery In dlmlock_remote(), we wait for the resource to stop being active before setting the inprogress flag. Active includes recovery, migration, etc. The problem here is that if the resource was being recovered or migrated, the new owner could very well be that node itself (and thus not a remote node). This problem was observed in Oracle bug#12583620. The error messages observed were as follows: dlm_send_remote_lock_request:337 ERROR: Error -40 (ELOOP) when sending message 503 (key 0xd6d8c7) to node 2 dlmlock_remote:271 ERROR: dlm status = DLM_BADARGS dlmlock:751 ERROR: dlm status = DLM_BADARGS Signed-off-by: Sunil Mushran --- fs/ocfs2/dlm/dlmlock.c | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/dlm/dlmlock.c b/fs/ocfs2/dlm/dlmlock.c index 3ef2c1adfb8f..f32fcba04923 100644 --- a/fs/ocfs2/dlm/dlmlock.c +++ b/fs/ocfs2/dlm/dlmlock.c @@ -227,10 +227,16 @@ static enum dlm_status dlmlock_remote(struct dlm_ctxt *dlm, lock->ml.type, res->lockname.len, res->lockname.name, flags); + /* + * Wait if resource is getting recovered, remastered, etc. + * If the resource was remastered and new owner is self, then exit. + */ spin_lock(&res->spinlock); - - /* will exit this call with spinlock held */ __dlm_wait_on_lockres(res); + if (res->owner == dlm->node_num) { + spin_unlock(&res->spinlock); + return DLM_RECOVERING; + } res->state |= DLM_LOCK_RES_IN_PROGRESS; /* add lock to local (secondary) queue */ @@ -710,18 +716,10 @@ retry_lock: if (status == DLM_RECOVERING || status == DLM_MIGRATING || status == DLM_FORWARD) { - mlog(0, "retrying lock with migration/" - "recovery/in progress\n"); msleep(100); - /* no waiting for dlm_reco_thread */ if (recovery) { if (status != DLM_RECOVERING) goto retry_lock; - - mlog(0, "%s: got RECOVERING " - "for $RECOVERY lock, master " - "was %u\n", dlm->name, - res->owner); /* wait to see the node go down, then * drop down and allow the lockres to * get cleaned up. need to remaster. */ -- cgit v1.2.3 From bb570a5d9e74f71d32751823052db4a97d6a5e7c Mon Sep 17 00:00:00 2001 From: Sunil Mushran Date: Sun, 24 Jul 2011 10:31:54 -0700 Subject: ocfs2/cluster: Fix output in file elapsed_time_in_ms The o2hb debugfs file, elapsed_time_in_ms, should return values only after the timer is armed atleast once. Signed-off-by: Sunil Mushran --- fs/ocfs2/cluster/heartbeat.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c index 4728a746281d..a4e855e3690e 100644 --- a/fs/ocfs2/cluster/heartbeat.c +++ b/fs/ocfs2/cluster/heartbeat.c @@ -1198,6 +1198,7 @@ static int o2hb_debug_open(struct inode *inode, struct file *file) struct o2hb_debug_buf *db = inode->i_private; struct o2hb_region *reg; unsigned long map[BITS_TO_LONGS(O2NM_MAX_NODES)]; + unsigned long lts; char *buf = NULL; int i = -1; int out = 0; @@ -1234,9 +1235,11 @@ static int o2hb_debug_open(struct inode *inode, struct file *file) case O2HB_DB_TYPE_REGION_ELAPSED_TIME: reg = (struct o2hb_region *)db->db_data; - out += snprintf(buf + out, PAGE_SIZE - out, "%u\n", - jiffies_to_msecs(jiffies - - reg->hr_last_timeout_start)); + lts = reg->hr_last_timeout_start; + /* If 0, it has never been set before */ + if (lts) + lts = jiffies_to_msecs(jiffies - lts); + out += snprintf(buf + out, PAGE_SIZE - out, "%lu\n", lts); goto done; case O2HB_DB_TYPE_REGION_PINNED: -- cgit v1.2.3 From 3ba169ccec1c5ad0f678e04fd29b990197fdfe79 Mon Sep 17 00:00:00 2001 From: Sunil Mushran Date: Sun, 24 Jul 2011 10:32:54 -0700 Subject: ocfs2/cluster: Add new function o2net_fill_node_map() Patch adds function o2net_fill_node_map() to return the bitmap of nodes that it is connected to. This bitmap is also accessible by the user via the debugfs file, /sys/kernel/debug/o2net/connected_nodes. Signed-off-by: Sunil Mushran --- fs/ocfs2/cluster/netdebug.c | 102 ++++++++++++++++++++++++++++++-------------- fs/ocfs2/cluster/tcp.c | 19 +++++++++ fs/ocfs2/cluster/tcp.h | 2 + 3 files changed, 90 insertions(+), 33 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/cluster/netdebug.c b/fs/ocfs2/cluster/netdebug.c index 3a5835904b3d..dc45deb19e68 100644 --- a/fs/ocfs2/cluster/netdebug.c +++ b/fs/ocfs2/cluster/netdebug.c @@ -47,6 +47,7 @@ #define SC_DEBUG_NAME "sock_containers" #define NST_DEBUG_NAME "send_tracking" #define STATS_DEBUG_NAME "stats" +#define NODES_DEBUG_NAME "connected_nodes" #define SHOW_SOCK_CONTAINERS 0 #define SHOW_SOCK_STATS 1 @@ -55,6 +56,7 @@ static struct dentry *o2net_dentry; static struct dentry *sc_dentry; static struct dentry *nst_dentry; static struct dentry *stats_dentry; +static struct dentry *nodes_dentry; static DEFINE_SPINLOCK(o2net_debug_lock); @@ -491,53 +493,87 @@ static const struct file_operations sc_seq_fops = { .release = sc_fop_release, }; -int o2net_debugfs_init(void) +static int o2net_fill_bitmap(char *buf, int len) { - o2net_dentry = debugfs_create_dir(O2NET_DEBUG_DIR, NULL); - if (!o2net_dentry) { - mlog_errno(-ENOMEM); - goto bail; - } + unsigned long map[BITS_TO_LONGS(O2NM_MAX_NODES)]; + int i = -1, out = 0; - nst_dentry = debugfs_create_file(NST_DEBUG_NAME, S_IFREG|S_IRUSR, - o2net_dentry, NULL, - &nst_seq_fops); - if (!nst_dentry) { - mlog_errno(-ENOMEM); - goto bail; - } + o2net_fill_node_map(map, sizeof(map)); - sc_dentry = debugfs_create_file(SC_DEBUG_NAME, S_IFREG|S_IRUSR, - o2net_dentry, NULL, - &sc_seq_fops); - if (!sc_dentry) { - mlog_errno(-ENOMEM); - goto bail; - } + while ((i = find_next_bit(map, O2NM_MAX_NODES, i + 1)) < O2NM_MAX_NODES) + out += snprintf(buf + out, PAGE_SIZE - out, "%d ", i); + out += snprintf(buf + out, PAGE_SIZE - out, "\n"); - stats_dentry = debugfs_create_file(STATS_DEBUG_NAME, S_IFREG|S_IRUSR, - o2net_dentry, NULL, - &stats_seq_fops); - if (!stats_dentry) { - mlog_errno(-ENOMEM); - goto bail; - } + return out; +} + +static int nodes_fop_open(struct inode *inode, struct file *file) +{ + char *buf; + + buf = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + i_size_write(inode, o2net_fill_bitmap(buf, PAGE_SIZE)); + + file->private_data = buf; return 0; -bail: - debugfs_remove(stats_dentry); - debugfs_remove(sc_dentry); - debugfs_remove(nst_dentry); - debugfs_remove(o2net_dentry); - return -ENOMEM; } +static int o2net_debug_release(struct inode *inode, struct file *file) +{ + kfree(file->private_data); + return 0; +} + +static ssize_t o2net_debug_read(struct file *file, char __user *buf, + size_t nbytes, loff_t *ppos) +{ + return simple_read_from_buffer(buf, nbytes, ppos, file->private_data, + i_size_read(file->f_mapping->host)); +} + +static const struct file_operations nodes_fops = { + .open = nodes_fop_open, + .release = o2net_debug_release, + .read = o2net_debug_read, + .llseek = generic_file_llseek, +}; + void o2net_debugfs_exit(void) { + debugfs_remove(nodes_dentry); debugfs_remove(stats_dentry); debugfs_remove(sc_dentry); debugfs_remove(nst_dentry); debugfs_remove(o2net_dentry); } +int o2net_debugfs_init(void) +{ + mode_t mode = S_IFREG|S_IRUSR; + + o2net_dentry = debugfs_create_dir(O2NET_DEBUG_DIR, NULL); + if (o2net_dentry) + nst_dentry = debugfs_create_file(NST_DEBUG_NAME, mode, + o2net_dentry, NULL, &nst_seq_fops); + if (nst_dentry) + sc_dentry = debugfs_create_file(SC_DEBUG_NAME, mode, + o2net_dentry, NULL, &sc_seq_fops); + if (sc_dentry) + stats_dentry = debugfs_create_file(STATS_DEBUG_NAME, mode, + o2net_dentry, NULL, &stats_seq_fops); + if (stats_dentry) + nodes_dentry = debugfs_create_file(NODES_DEBUG_NAME, mode, + o2net_dentry, NULL, &nodes_fops); + if (nodes_dentry) + return 0; + + o2net_debugfs_exit(); + mlog_errno(-ENOMEM); + return -ENOMEM; +} + #endif /* CONFIG_DEBUG_FS */ diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c index 6e97895ecf24..ae13d5ca7908 100644 --- a/fs/ocfs2/cluster/tcp.c +++ b/fs/ocfs2/cluster/tcp.c @@ -1034,6 +1034,25 @@ static int o2net_tx_can_proceed(struct o2net_node *nn, return ret; } +/* Get a map of all nodes to which this node is currently connected to */ +void o2net_fill_node_map(unsigned long *map, unsigned bytes) +{ + struct o2net_sock_container *sc; + int node, ret; + + BUG_ON(bytes < (BITS_TO_LONGS(O2NM_MAX_NODES) * sizeof(unsigned long))); + + memset(map, 0, bytes); + for (node = 0; node < O2NM_MAX_NODES; ++node) { + o2net_tx_can_proceed(o2net_nn_from_num(node), &sc, &ret); + if (!ret) { + set_bit(node, map); + sc_put(sc); + } + } +} +EXPORT_SYMBOL_GPL(o2net_fill_node_map); + int o2net_send_message_vec(u32 msg_type, u32 key, struct kvec *caller_vec, size_t caller_veclen, u8 target_node, int *status) { diff --git a/fs/ocfs2/cluster/tcp.h b/fs/ocfs2/cluster/tcp.h index fd6179eb26d4..5bada2a69b50 100644 --- a/fs/ocfs2/cluster/tcp.h +++ b/fs/ocfs2/cluster/tcp.h @@ -106,6 +106,8 @@ int o2net_register_handler(u32 msg_type, u32 key, u32 max_len, struct list_head *unreg_list); void o2net_unregister_handler_list(struct list_head *list); +void o2net_fill_node_map(unsigned long *map, unsigned bytes); + struct o2nm_node; int o2net_register_hb_callbacks(void); void o2net_unregister_hb_callbacks(void); -- cgit v1.2.3 From 6b27f62fc750d85bc6fc3718b3b38ec60edc2d74 Mon Sep 17 00:00:00 2001 From: Sunil Mushran Date: Sun, 24 Jul 2011 10:33:54 -0700 Subject: ocfs2/cluster: Cluster up now includes network connections too The cluster up check only checks to see if the node is heartbeating or not. If yes it continues assuming that the node is connected to all the nodes. But if that is not the case, the cluster join aborts with a stack of errors that are not easy to comprehend. This patch adds the network connect check upfront and prints the nodes that the node is not yet connected to, before aborting. Signed-off-by: Sunil Mushran --- fs/ocfs2/dlm/dlmdomain.c | 7 ----- fs/ocfs2/stack_o2cb.c | 67 +++++++++++++++++++++++++++++++++++++++++++----- 2 files changed, 61 insertions(+), 13 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c index 200d9888ffee..92f2ead0fab6 100644 --- a/fs/ocfs2/dlm/dlmdomain.c +++ b/fs/ocfs2/dlm/dlmdomain.c @@ -2138,13 +2138,6 @@ struct dlm_ctxt * dlm_register_domain(const char *domain, goto leave; } - if (!o2hb_check_local_node_heartbeating()) { - mlog(ML_ERROR, "the local node has not been configured, or is " - "not heartbeating\n"); - ret = -EPROTO; - goto leave; - } - mlog(0, "register called for domain \"%s\"\n", domain); retry: diff --git a/fs/ocfs2/stack_o2cb.c b/fs/ocfs2/stack_o2cb.c index be935d89a4e8..94368017edb3 100644 --- a/fs/ocfs2/stack_o2cb.c +++ b/fs/ocfs2/stack_o2cb.c @@ -28,6 +28,7 @@ #include "cluster/masklog.h" #include "cluster/nodemanager.h" #include "cluster/heartbeat.h" +#include "cluster/tcp.h" #include "stackglue.h" @@ -255,6 +256,61 @@ static void o2cb_dump_lksb(struct ocfs2_dlm_lksb *lksb) dlm_print_one_lock(lksb->lksb_o2dlm.lockid); } +/* + * Check if this node is heartbeating and is connected to all other + * heartbeating nodes. + */ +static int o2cb_cluster_check(void) +{ + u8 node_num; + int i; + unsigned long hbmap[BITS_TO_LONGS(O2NM_MAX_NODES)]; + unsigned long netmap[BITS_TO_LONGS(O2NM_MAX_NODES)]; + + node_num = o2nm_this_node(); + if (node_num == O2NM_MAX_NODES) { + printk(KERN_ERR "o2cb: This node has not been configured.\n"); + return -EINVAL; + } + + /* + * o2dlm expects o2net sockets to be created. If not, then + * dlm_join_domain() fails with a stack of errors which are both cryptic + * and incomplete. The idea here is to detect upfront whether we have + * managed to connect to all nodes or not. If not, then list the nodes + * to allow the user to check the configuration (incorrect IP, firewall, + * etc.) Yes, this is racy. But its not the end of the world. + */ +#define O2CB_MAP_STABILIZE_COUNT 60 + for (i = 0; i < O2CB_MAP_STABILIZE_COUNT; ++i) { + o2hb_fill_node_map(hbmap, sizeof(hbmap)); + if (!test_bit(node_num, hbmap)) { + printk(KERN_ERR "o2cb: %s heartbeat has not been " + "started.\n", (o2hb_global_heartbeat_active() ? + "Global" : "Local")); + return -EINVAL; + } + o2net_fill_node_map(netmap, sizeof(netmap)); + /* Force set the current node to allow easy compare */ + set_bit(node_num, netmap); + if (!memcmp(hbmap, netmap, sizeof(hbmap))) + return 0; + if (i < O2CB_MAP_STABILIZE_COUNT) + msleep(1000); + } + + printk(KERN_ERR "o2cb: This node could not connect to nodes:"); + i = -1; + while ((i = find_next_bit(hbmap, O2NM_MAX_NODES, + i + 1)) < O2NM_MAX_NODES) { + if (!test_bit(i, netmap)) + printk(" %u", i); + } + printk(".\n"); + + return -ENOTCONN; +} + /* * Called from the dlm when it's about to evict a node. This is how the * classic stack signals node death. @@ -280,12 +336,11 @@ static int o2cb_cluster_connect(struct ocfs2_cluster_connection *conn) BUG_ON(conn == NULL); BUG_ON(conn->cc_proto == NULL); - /* for now we only have one cluster/node, make sure we see it - * in the heartbeat universe */ - if (!o2hb_check_local_node_heartbeating()) { - if (o2hb_global_heartbeat_active()) - mlog(ML_ERROR, "Global heartbeat not started\n"); - rc = -EINVAL; + /* Ensure cluster stack is up and all nodes are connected */ + rc = o2cb_cluster_check(); + if (rc) { + printk(KERN_ERR "o2cb: Cluster check failed. Fix errors " + "before retrying.\n"); goto out; } -- cgit v1.2.3 From 619c200de144b44f5e405305241bcd7edbb8c6cf Mon Sep 17 00:00:00 2001 From: Sunil Mushran Date: Sun, 24 Jul 2011 10:34:54 -0700 Subject: ocfs2: Clean up messages in the fs Convert useful messages from ML_NOTICE to KERN_NOTICE to improve readability. Signed-off-by: Sunil Mushran --- fs/ocfs2/journal.c | 9 ++++++--- fs/ocfs2/quota_local.c | 13 +++++++++---- fs/ocfs2/slot_map.c | 4 ++-- fs/ocfs2/super.c | 10 +++++----- 4 files changed, 22 insertions(+), 14 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c index 295d56454e8b..8eaaa7854409 100644 --- a/fs/ocfs2/journal.c +++ b/fs/ocfs2/journal.c @@ -1544,9 +1544,9 @@ static int ocfs2_replay_journal(struct ocfs2_super *osb, /* we need to run complete recovery for offline orphan slots */ ocfs2_replay_map_set_state(osb, REPLAY_NEEDED); - mlog(ML_NOTICE, "Recovering node %d from slot %d on device (%u,%u)\n", - node_num, slot_num, - MAJOR(osb->sb->s_dev), MINOR(osb->sb->s_dev)); + printk(KERN_NOTICE "ocfs2: Begin replay journal (node %d, slot %d) on "\ + "device (%u,%u)\n", node_num, slot_num, MAJOR(osb->sb->s_dev), + MINOR(osb->sb->s_dev)); OCFS2_I(inode)->ip_clusters = le32_to_cpu(fe->i_clusters); @@ -1601,6 +1601,9 @@ static int ocfs2_replay_journal(struct ocfs2_super *osb, jbd2_journal_destroy(journal); + printk(KERN_NOTICE "ocfs2: End replay journal (node %d, slot %d) on "\ + "device (%u,%u)\n", node_num, slot_num, MAJOR(osb->sb->s_dev), + MINOR(osb->sb->s_dev)); done: /* drop the lock on this nodes journal */ if (got_lock) diff --git a/fs/ocfs2/quota_local.c b/fs/ocfs2/quota_local.c index dc8007fc9247..942fd65bdad3 100644 --- a/fs/ocfs2/quota_local.c +++ b/fs/ocfs2/quota_local.c @@ -404,7 +404,9 @@ struct ocfs2_quota_recovery *ocfs2_begin_quota_recovery( int status = 0; struct ocfs2_quota_recovery *rec; - mlog(ML_NOTICE, "Beginning quota recovery in slot %u\n", slot_num); + printk(KERN_NOTICE "ocfs2: Beginning quota recovery on device (%s) for " + "slot %u\n", osb->dev_str, slot_num); + rec = ocfs2_alloc_quota_recovery(); if (!rec) return ERR_PTR(-ENOMEM); @@ -596,7 +598,9 @@ int ocfs2_finish_quota_recovery(struct ocfs2_super *osb, struct inode *lqinode; unsigned int flags; - mlog(ML_NOTICE, "Finishing quota recovery in slot %u\n", slot_num); + printk(KERN_NOTICE "ocfs2: Finishing quota recovery on device (%s) for " + "slot %u\n", osb->dev_str, slot_num); + mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); for (type = 0; type < MAXQUOTAS; type++) { if (list_empty(&(rec->r_list[type]))) @@ -612,8 +616,9 @@ int ocfs2_finish_quota_recovery(struct ocfs2_super *osb, /* Someone else is holding the lock? Then he must be * doing the recovery. Just skip the file... */ if (status == -EAGAIN) { - mlog(ML_NOTICE, "skipping quota recovery for slot %d " - "because quota file is locked.\n", slot_num); + printk(KERN_NOTICE "ocfs2: Skipping quota recovery on " + "device (%s) for slot %d because quota file is " + "locked.\n", osb->dev_str, slot_num); status = 0; goto out_put; } else if (status < 0) { diff --git a/fs/ocfs2/slot_map.c b/fs/ocfs2/slot_map.c index 26fc0014d509..1424c151cccc 100644 --- a/fs/ocfs2/slot_map.c +++ b/fs/ocfs2/slot_map.c @@ -493,8 +493,8 @@ int ocfs2_find_slot(struct ocfs2_super *osb) goto bail; } } else - mlog(ML_NOTICE, "slot %d is already allocated to this node!\n", - slot); + printk(KERN_INFO "ocfs2: Slot %d on device (%s) was already " + "allocated to this node!\n", slot, osb->dev_str); ocfs2_set_slot(si, slot, osb->node_num); osb->slot_num = slot; diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index 56f61027236b..3e7850380d2b 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c @@ -1107,9 +1107,9 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent) ocfs2_set_ro_flag(osb, 1); - printk(KERN_NOTICE "Readonly device detected. No cluster " - "services will be utilized for this mount. Recovery " - "will be skipped.\n"); + printk(KERN_NOTICE "ocfs2: Readonly device (%s) detected. " + "Cluster services will not be used for this mount. " + "Recovery will be skipped.\n", osb->dev_str); } if (!ocfs2_is_hard_readonly(osb)) { @@ -2462,8 +2462,8 @@ static int ocfs2_check_volume(struct ocfs2_super *osb) goto finally; } } else { - mlog(ML_NOTICE, "File system was not unmounted cleanly, " - "recovering volume.\n"); + printk(KERN_NOTICE "ocfs2: File system on device (%s) was not " + "unmounted cleanly, recovering it.\n", osb->dev_str); } local = ocfs2_mount_local(osb); -- cgit v1.2.3 From a035bff6b82aca89c1223e2c614adc2d17ec8aa2 Mon Sep 17 00:00:00 2001 From: Sunil Mushran Date: Sun, 24 Jul 2011 10:35:54 -0700 Subject: ocfs2: Add comment about orphan scanning Add a comment that explains the reason as to why orphan scan scans all the slots. Signed-off-by: Sunil Mushran --- fs/ocfs2/journal.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) (limited to 'fs') diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c index 8eaaa7854409..0a42ae96dca7 100644 --- a/fs/ocfs2/journal.c +++ b/fs/ocfs2/journal.c @@ -1811,6 +1811,20 @@ static inline unsigned long ocfs2_orphan_scan_timeout(void) * every slot, queuing a recovery of the slot on the ocfs2_wq thread. This * is done to catch any orphans that are left over in orphan directories. * + * It scans all slots, even ones that are in use. It does so to handle the + * case described below: + * + * Node 1 has an inode it was using. The dentry went away due to memory + * pressure. Node 1 closes the inode, but it's on the free list. The node + * has the open lock. + * Node 2 unlinks the inode. It grabs the dentry lock to notify others, + * but node 1 has no dentry and doesn't get the message. It trylocks the + * open lock, sees that another node has a PR, and does nothing. + * Later node 2 runs its orphan dir. It igets the inode, trylocks the + * open lock, sees the PR still, and does nothing. + * Basically, we have to trigger an orphan iput on node 1. The only way + * for this to happen is if node 1 runs node 2's orphan dir. + * * ocfs2_queue_orphan_scan gets called every ORPHAN_SCAN_SCHEDULE_TIMEOUT * seconds. It gets an EX lock on os_lockres and checks sequence number * stored in LVB. If the sequence number has changed, it means some other -- cgit v1.2.3 From 5cffff9e29866a3de98c2c25135b3199491f93b0 Mon Sep 17 00:00:00 2001 From: Wengang Wang Date: Sun, 24 Jul 2011 10:36:54 -0700 Subject: ocfs2: Fix ocfs2_page_mkwrite() This patch address two shortcomings in ocfs2_page_mkwrite(): 1. Makes the function return better VM_FAULT_* errors. 2. It handles a error that is triggered when a page is dropped from the mapping due to memory pressure. This patch locks the page to prevent that. [Patch was cleaned up by Sunil Mushran.] Signed-off-by: Wengang Wang Signed-off-by: Sunil Mushran --- fs/ocfs2/aops.c | 51 +++++++++++++++++++++++++++++++++++++++++++-------- fs/ocfs2/mmap.c | 53 ++++++++++++++++++++++++----------------------------- 2 files changed, 67 insertions(+), 37 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index c1efe939c774..ff98c169b631 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c @@ -862,6 +862,12 @@ struct ocfs2_write_ctxt { struct page *w_pages[OCFS2_MAX_CTXT_PAGES]; struct page *w_target_page; + /* + * w_target_locked is used for page_mkwrite path indicating no unlocking + * against w_target_page in ocfs2_write_end_nolock. + */ + unsigned int w_target_locked:1; + /* * ocfs2_write_end() uses this to know what the real range to * write in the target should be. @@ -895,6 +901,24 @@ void ocfs2_unlock_and_free_pages(struct page **pages, int num_pages) static void ocfs2_free_write_ctxt(struct ocfs2_write_ctxt *wc) { + int i; + + /* + * w_target_locked is only set to true in the page_mkwrite() case. + * The intent is to allow us to lock the target page from write_begin() + * to write_end(). The caller must hold a ref on w_target_page. + */ + if (wc->w_target_locked) { + BUG_ON(!wc->w_target_page); + for (i = 0; i < wc->w_num_pages; i++) { + if (wc->w_target_page == wc->w_pages[i]) { + wc->w_pages[i] = NULL; + break; + } + } + mark_page_accessed(wc->w_target_page); + page_cache_release(wc->w_target_page); + } ocfs2_unlock_and_free_pages(wc->w_pages, wc->w_num_pages); brelse(wc->w_di_bh); @@ -1132,20 +1156,17 @@ static int ocfs2_grab_pages_for_write(struct address_space *mapping, */ lock_page(mmap_page); + /* Exit and let the caller retry */ if (mmap_page->mapping != mapping) { + WARN_ON(mmap_page->mapping); unlock_page(mmap_page); - /* - * Sanity check - the locking in - * ocfs2_pagemkwrite() should ensure - * that this code doesn't trigger. - */ - ret = -EINVAL; - mlog_errno(ret); + ret = -EAGAIN; goto out; } page_cache_get(mmap_page); wc->w_pages[i] = mmap_page; + wc->w_target_locked = true; } else { wc->w_pages[i] = find_or_create_page(mapping, index, GFP_NOFS); @@ -1160,6 +1181,8 @@ static int ocfs2_grab_pages_for_write(struct address_space *mapping, wc->w_target_page = wc->w_pages[i]; } out: + if (ret) + wc->w_target_locked = false; return ret; } @@ -1817,11 +1840,23 @@ try_again: */ ret = ocfs2_grab_pages_for_write(mapping, wc, wc->w_cpos, pos, len, cluster_of_pages, mmap_page); - if (ret) { + if (ret && ret != -EAGAIN) { mlog_errno(ret); goto out_quota; } + /* + * ocfs2_grab_pages_for_write() returns -EAGAIN if it could not lock + * the target page. In this case, we exit with no error and no target + * page. This will trigger the caller, page_mkwrite(), to re-try + * the operation. + */ + if (ret == -EAGAIN) { + BUG_ON(wc->w_target_page); + ret = 0; + goto out_quota; + } + ret = ocfs2_write_cluster_by_desc(mapping, data_ac, meta_ac, wc, pos, len); if (ret) { diff --git a/fs/ocfs2/mmap.c b/fs/ocfs2/mmap.c index 3e9393ca39eb..9cd41083e991 100644 --- a/fs/ocfs2/mmap.c +++ b/fs/ocfs2/mmap.c @@ -61,7 +61,7 @@ static int ocfs2_fault(struct vm_area_struct *area, struct vm_fault *vmf) static int __ocfs2_page_mkwrite(struct file *file, struct buffer_head *di_bh, struct page *page) { - int ret; + int ret = VM_FAULT_NOPAGE; struct inode *inode = file->f_path.dentry->d_inode; struct address_space *mapping = inode->i_mapping; loff_t pos = page_offset(page); @@ -71,32 +71,25 @@ static int __ocfs2_page_mkwrite(struct file *file, struct buffer_head *di_bh, void *fsdata; loff_t size = i_size_read(inode); - /* - * Another node might have truncated while we were waiting on - * cluster locks. - * We don't check size == 0 before the shift. This is borrowed - * from do_generic_file_read. - */ last_index = (size - 1) >> PAGE_CACHE_SHIFT; - if (unlikely(!size || page->index > last_index)) { - ret = -EINVAL; - goto out; - } /* - * The i_size check above doesn't catch the case where nodes - * truncated and then re-extended the file. We'll re-check the - * page mapping after taking the page lock inside of - * ocfs2_write_begin_nolock(). + * There are cases that lead to the page no longer bebongs to the + * mapping. + * 1) pagecache truncates locally due to memory pressure. + * 2) pagecache truncates when another is taking EX lock against + * inode lock. see ocfs2_data_convert_worker. + * + * The i_size check doesn't catch the case where nodes truncated and + * then re-extended the file. We'll re-check the page mapping after + * taking the page lock inside of ocfs2_write_begin_nolock(). + * + * Let VM retry with these cases. */ - if (!PageUptodate(page) || page->mapping != inode->i_mapping) { - /* - * the page has been umapped in ocfs2_data_downconvert_worker. - * So return 0 here and let VFS retry. - */ - ret = 0; + if ((page->mapping != inode->i_mapping) || + (!PageUptodate(page)) || + (page_offset(page) >= size)) goto out; - } /* * Call ocfs2_write_begin() and ocfs2_write_end() to take @@ -116,17 +109,21 @@ static int __ocfs2_page_mkwrite(struct file *file, struct buffer_head *di_bh, if (ret) { if (ret != -ENOSPC) mlog_errno(ret); + if (ret == -ENOMEM) + ret = VM_FAULT_OOM; + else + ret = VM_FAULT_SIGBUS; goto out; } - ret = ocfs2_write_end_nolock(mapping, pos, len, len, locked_page, - fsdata); - if (ret < 0) { - mlog_errno(ret); + if (!locked_page) { + ret = VM_FAULT_NOPAGE; goto out; } + ret = ocfs2_write_end_nolock(mapping, pos, len, len, locked_page, + fsdata); BUG_ON(ret != len); - ret = 0; + ret = VM_FAULT_LOCKED; out: return ret; } @@ -168,8 +165,6 @@ static int ocfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) out: ocfs2_unblock_signals(&oldset); - if (ret) - ret = VM_FAULT_SIGBUS; return ret; } -- cgit v1.2.3 From 93862d5e1ab875664c6cc95254fc365028a48bb1 Mon Sep 17 00:00:00 2001 From: Sunil Mushran Date: Mon, 25 Jul 2011 14:58:15 -0700 Subject: ocfs2: Implement llseek() ocfs2 implements its own llseek() to provide the SEEK_HOLE/SEEK_DATA functionality. SEEK_HOLE sets the file pointer to the start of either a hole or an unwritten (preallocated) extent, that is greater than or equal to the supplied offset. SEEK_DATA sets the file pointer to the start of an allocated extent (not unwritten) that is greater than or equal to the supplied offset. If the supplied offset is on a desired region, then the file pointer is set to it. Offsets greater than or equal to the file size return -ENXIO. Unwritten (preallocated) extents are considered holes because the file system treats reads to such regions in the same way as it does to holes. Signed-off-by: Sunil Mushran --- fs/ocfs2/extent_map.c | 96 +++++++++++++++++++++++++++++++++++++++++++++++++++ fs/ocfs2/extent_map.h | 2 ++ fs/ocfs2/file.c | 55 +++++++++++++++++++++++++++-- 3 files changed, 151 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c index 23457b491e8c..2f5b92ef0e53 100644 --- a/fs/ocfs2/extent_map.c +++ b/fs/ocfs2/extent_map.c @@ -832,6 +832,102 @@ out: return ret; } +int ocfs2_seek_data_hole_offset(struct file *file, loff_t *offset, int origin) +{ + struct inode *inode = file->f_mapping->host; + int ret; + unsigned int is_last = 0, is_data = 0; + u16 cs_bits = OCFS2_SB(inode->i_sb)->s_clustersize_bits; + u32 cpos, cend, clen, hole_size; + u64 extoff, extlen; + struct buffer_head *di_bh = NULL; + struct ocfs2_extent_rec rec; + + BUG_ON(origin != SEEK_DATA && origin != SEEK_HOLE); + + ret = ocfs2_inode_lock(inode, &di_bh, 0); + if (ret) { + mlog_errno(ret); + goto out; + } + + down_read(&OCFS2_I(inode)->ip_alloc_sem); + + if (*offset >= inode->i_size) { + ret = -ENXIO; + goto out_unlock; + } + + if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) { + if (origin == SEEK_HOLE) + *offset = inode->i_size; + goto out_unlock; + } + + clen = 0; + cpos = *offset >> cs_bits; + cend = ocfs2_clusters_for_bytes(inode->i_sb, inode->i_size); + + while (cpos < cend && !is_last) { + ret = ocfs2_get_clusters_nocache(inode, di_bh, cpos, &hole_size, + &rec, &is_last); + if (ret) { + mlog_errno(ret); + goto out_unlock; + } + + extoff = cpos; + extoff <<= cs_bits; + + if (rec.e_blkno == 0ULL) { + clen = hole_size; + is_data = 0; + } else { + clen = le16_to_cpu(rec.e_leaf_clusters) - + (cpos - le32_to_cpu(rec.e_cpos)); + is_data = (rec.e_flags & OCFS2_EXT_UNWRITTEN) ? 0 : 1; + } + + if ((!is_data && origin == SEEK_HOLE) || + (is_data && origin == SEEK_DATA)) { + if (extoff > *offset) + *offset = extoff; + goto out_unlock; + } + + if (!is_last) + cpos += clen; + } + + if (origin == SEEK_HOLE) { + extoff = cpos; + extoff <<= cs_bits; + extlen = clen; + extlen <<= cs_bits; + + if ((extoff + extlen) > inode->i_size) + extlen = inode->i_size - extoff; + extoff += extlen; + if (extoff > *offset) + *offset = extoff; + goto out_unlock; + } + + ret = -ENXIO; + +out_unlock: + + brelse(di_bh); + + up_read(&OCFS2_I(inode)->ip_alloc_sem); + + ocfs2_inode_unlock(inode, 0); +out: + if (ret && ret != -ENXIO) + ret = -ENXIO; + return ret; +} + int ocfs2_read_virt_blocks(struct inode *inode, u64 v_block, int nr, struct buffer_head *bhs[], int flags, int (*validate)(struct super_block *sb, diff --git a/fs/ocfs2/extent_map.h b/fs/ocfs2/extent_map.h index e79d41c2c909..67ea57d2fd59 100644 --- a/fs/ocfs2/extent_map.h +++ b/fs/ocfs2/extent_map.h @@ -53,6 +53,8 @@ int ocfs2_extent_map_get_blocks(struct inode *inode, u64 v_blkno, u64 *p_blkno, int ocfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, u64 map_start, u64 map_len); +int ocfs2_seek_data_hole_offset(struct file *file, loff_t *offset, int origin); + int ocfs2_xattr_get_clusters(struct inode *inode, u32 v_cluster, u32 *p_cluster, u32 *num_clusters, struct ocfs2_extent_list *el, diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index 0fc2bd34039d..c0f015e11c28 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c @@ -2591,6 +2591,57 @@ bail: return ret; } +/* Refer generic_file_llseek_unlocked() */ +static loff_t ocfs2_file_llseek(struct file *file, loff_t offset, int origin) +{ + struct inode *inode = file->f_mapping->host; + int ret = 0; + + mutex_lock(&inode->i_mutex); + + switch (origin) { + case SEEK_SET: + break; + case SEEK_END: + offset += inode->i_size; + break; + case SEEK_CUR: + if (offset == 0) { + offset = file->f_pos; + goto out; + } + offset += file->f_pos; + break; + case SEEK_DATA: + case SEEK_HOLE: + ret = ocfs2_seek_data_hole_offset(file, &offset, origin); + if (ret) + goto out; + break; + default: + ret = -EINVAL; + goto out; + } + + if (offset < 0 && !(file->f_mode & FMODE_UNSIGNED_OFFSET)) + ret = -EINVAL; + if (!ret && offset > inode->i_sb->s_maxbytes) + ret = -EINVAL; + if (ret) + goto out; + + if (offset != file->f_pos) { + file->f_pos = offset; + file->f_version = 0; + } + +out: + mutex_unlock(&inode->i_mutex); + if (ret) + return ret; + return offset; +} + const struct inode_operations ocfs2_file_iops = { .setattr = ocfs2_setattr, .getattr = ocfs2_getattr, @@ -2615,7 +2666,7 @@ const struct inode_operations ocfs2_special_file_iops = { * ocfs2_fops_no_plocks and ocfs2_dops_no_plocks! */ const struct file_operations ocfs2_fops = { - .llseek = generic_file_llseek, + .llseek = ocfs2_file_llseek, .read = do_sync_read, .write = do_sync_write, .mmap = ocfs2_mmap, @@ -2663,7 +2714,7 @@ const struct file_operations ocfs2_dops = { * the cluster. */ const struct file_operations ocfs2_fops_no_plocks = { - .llseek = generic_file_llseek, + .llseek = ocfs2_file_llseek, .read = do_sync_read, .write = do_sync_write, .mmap = ocfs2_mmap, -- cgit v1.2.3