From d3e46fea1b1e8ba97a8c9dd8f54b97d086cd25aa Mon Sep 17 00:00:00 2001 From: David Sterba Date: Sun, 15 Jun 2014 02:04:19 +0200 Subject: btrfs: sink blocksize parameter to readahead_tree_block All callers pass nodesize. Signed-off-by: David Sterba --- fs/btrfs/ctree.c | 8 +++----- fs/btrfs/disk-io.c | 4 ++-- fs/btrfs/disk-io.h | 2 +- fs/btrfs/extent-tree.c | 2 +- fs/btrfs/relocation.c | 3 +-- 5 files changed, 8 insertions(+), 11 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 14a72ed14ef7..50eca331812c 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -2282,7 +2282,7 @@ static void reada_for_search(struct btrfs_root *root, if ((search <= target && target - search <= 65536) || (search > target && search - target <= 65536)) { gen = btrfs_node_ptr_generation(node, nr); - readahead_tree_block(root, search, blocksize); + readahead_tree_block(root, search); nread += blocksize; } nscan++; @@ -2301,7 +2301,6 @@ static noinline void reada_for_balance(struct btrfs_root *root, u64 gen; u64 block1 = 0; u64 block2 = 0; - int blocksize; parent = path->nodes[level + 1]; if (!parent) @@ -2309,7 +2308,6 @@ static noinline void reada_for_balance(struct btrfs_root *root, nritems = btrfs_header_nritems(parent); slot = path->slots[level + 1]; - blocksize = root->nodesize; if (slot > 0) { block1 = btrfs_node_blockptr(parent, slot - 1); @@ -2334,9 +2332,9 @@ static noinline void reada_for_balance(struct btrfs_root *root, } if (block1) - readahead_tree_block(root, block1, blocksize); + readahead_tree_block(root, block1); if (block2) - readahead_tree_block(root, block2, blocksize); + readahead_tree_block(root, block2); } diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 30965120772b..be9d7c612489 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1073,12 +1073,12 @@ static const struct address_space_operations btree_aops = { .set_page_dirty = btree_set_page_dirty, }; -void readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize) +void readahead_tree_block(struct btrfs_root *root, u64 bytenr) { struct extent_buffer *buf = NULL; struct inode *btree_inode = root->fs_info->btree_inode; - buf = btrfs_find_create_tree_block(root, bytenr, blocksize); + buf = btrfs_find_create_tree_block(root, bytenr, root->nodesize); if (!buf) return; read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree, diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h index 414651821fb3..9cf4359ace05 100644 --- a/fs/btrfs/disk-io.h +++ b/fs/btrfs/disk-io.h @@ -46,7 +46,7 @@ struct btrfs_fs_devices; struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr, u64 parent_transid); -void readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize); +void readahead_tree_block(struct btrfs_root *root, u64 bytenr); int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr, u32 blocksize, int mirror_num, struct extent_buffer **eb); struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root, diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 222d6aea4a8a..c025751c20d7 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -7485,7 +7485,7 @@ static noinline void reada_walk_down(struct btrfs_trans_handle *trans, continue; } reada: - readahead_tree_block(root, bytenr, blocksize); + readahead_tree_block(root, bytenr); nread++; } wc->reada_slot = slot; diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 74257d6436ad..cb5d4462ebb4 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -2965,8 +2965,7 @@ int relocate_tree_blocks(struct btrfs_trans_handle *trans, while (rb_node) { block = rb_entry(rb_node, struct tree_block, rb_node); if (!block->key_ready) - readahead_tree_block(rc->extent_root, block->bytenr, - block->key.objectid); + readahead_tree_block(rc->extent_root, block->bytenr); rb_node = rb_next(rb_node); } -- cgit v1.2.3 From b6ae40ec76dc7589d9f77bd281515be3ad58c533 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Sun, 15 Jun 2014 02:18:25 +0200 Subject: btrfs: remove blocksize from reada_extent Replace with global nodesize instead. Signed-off-by: David Sterba --- fs/btrfs/reada.c | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/reada.c b/fs/btrfs/reada.c index b63ae20618fb..5c3fde6571bb 100644 --- a/fs/btrfs/reada.c +++ b/fs/btrfs/reada.c @@ -66,7 +66,6 @@ struct reada_extctl { struct reada_extent { u64 logical; struct btrfs_key top; - u32 blocksize; int err; struct list_head extctl; int refcnt; @@ -349,7 +348,6 @@ static struct reada_extent *reada_find_extent(struct btrfs_root *root, blocksize = root->nodesize; re->logical = logical; - re->blocksize = blocksize; re->top = *top; INIT_LIST_HEAD(&re->extctl); spin_lock_init(&re->lock); @@ -660,7 +658,6 @@ static int reada_start_machine_dev(struct btrfs_fs_info *fs_info, int mirror_num = 0; struct extent_buffer *eb = NULL; u64 logical; - u32 blocksize; int ret; int i; int need_kick = 0; @@ -694,7 +691,7 @@ static int reada_start_machine_dev(struct btrfs_fs_info *fs_info, spin_unlock(&fs_info->reada_lock); return 0; } - dev->reada_next = re->logical + re->blocksize; + dev->reada_next = re->logical + fs_info->tree_root->nodesize; re->refcnt++; spin_unlock(&fs_info->reada_lock); @@ -709,7 +706,6 @@ static int reada_start_machine_dev(struct btrfs_fs_info *fs_info, } } logical = re->logical; - blocksize = re->blocksize; spin_lock(&re->lock); if (re->scheduled_for == NULL) { @@ -724,8 +720,8 @@ static int reada_start_machine_dev(struct btrfs_fs_info *fs_info, return 0; atomic_inc(&dev->reada_in_flight); - ret = reada_tree_block_flagged(fs_info->extent_root, logical, blocksize, - mirror_num, &eb); + ret = reada_tree_block_flagged(fs_info->extent_root, logical, + fs_info->tree_root->nodesize, mirror_num, &eb); if (ret) __readahead_hook(fs_info->extent_root, NULL, logical, ret); else if (eb) @@ -851,7 +847,7 @@ static void dump_devs(struct btrfs_fs_info *fs_info, int all) break; printk(KERN_DEBUG " re: logical %llu size %u empty %d for %lld", - re->logical, re->blocksize, + re->logical, fs_info->tree_root->nodesize, list_empty(&re->extctl), re->scheduled_for ? re->scheduled_for->devid : -1); @@ -886,7 +882,8 @@ static void dump_devs(struct btrfs_fs_info *fs_info, int all) } printk(KERN_DEBUG "re: logical %llu size %u list empty %d for %lld", - re->logical, re->blocksize, list_empty(&re->extctl), + re->logical, fs_info->tree_root->nodesize, + list_empty(&re->extctl), re->scheduled_for ? re->scheduled_for->devid : -1); for (i = 0; i < re->nzones; ++i) { printk(KERN_CONT " zone %llu-%llu devs", -- cgit v1.2.3 From c0dcaa4d7b66b788b315c418bda3cca75c5938dc Mon Sep 17 00:00:00 2001 From: David Sterba Date: Sun, 15 Jun 2014 02:24:21 +0200 Subject: btrfs: sink blocksize parameter to reada_tree_block_flagged Signed-off-by: David Sterba --- fs/btrfs/disk-io.c | 4 ++-- fs/btrfs/disk-io.h | 2 +- fs/btrfs/reada.c | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index be9d7c612489..8123b03b1f9d 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1086,7 +1086,7 @@ void readahead_tree_block(struct btrfs_root *root, u64 bytenr) free_extent_buffer(buf); } -int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr, u32 blocksize, +int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr, int mirror_num, struct extent_buffer **eb) { struct extent_buffer *buf = NULL; @@ -1094,7 +1094,7 @@ int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr, u32 blocksize, struct extent_io_tree *io_tree = &BTRFS_I(btree_inode)->io_tree; int ret; - buf = btrfs_find_create_tree_block(root, bytenr, blocksize); + buf = btrfs_find_create_tree_block(root, bytenr, root->nodesize); if (!buf) return 0; diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h index 9cf4359ace05..4d4ecdd9f4a2 100644 --- a/fs/btrfs/disk-io.h +++ b/fs/btrfs/disk-io.h @@ -47,7 +47,7 @@ struct btrfs_fs_devices; struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr, u64 parent_transid); void readahead_tree_block(struct btrfs_root *root, u64 bytenr); -int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr, u32 blocksize, +int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr, int mirror_num, struct extent_buffer **eb); struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize); diff --git a/fs/btrfs/reada.c b/fs/btrfs/reada.c index 5c3fde6571bb..4d3d4e5287c5 100644 --- a/fs/btrfs/reada.c +++ b/fs/btrfs/reada.c @@ -721,7 +721,7 @@ static int reada_start_machine_dev(struct btrfs_fs_info *fs_info, atomic_inc(&dev->reada_in_flight); ret = reada_tree_block_flagged(fs_info->extent_root, logical, - fs_info->tree_root->nodesize, mirror_num, &eb); + mirror_num, &eb); if (ret) __readahead_hook(fs_info->extent_root, NULL, logical, ret); else if (eb) -- cgit v1.2.3 From fe864576de7fb940b5bd1f8ab8908a08a3416ca0 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Sun, 15 Jun 2014 02:28:42 +0200 Subject: btrfs: sink blocksize parameter to btrfs_init_new_buffer Signed-off-by: David Sterba --- fs/btrfs/extent-tree.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index c025751c20d7..50ebc74db508 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -7215,11 +7215,11 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans, static struct extent_buffer * btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root, - u64 bytenr, u32 blocksize, int level) + u64 bytenr, int level) { struct extent_buffer *buf; - buf = btrfs_find_create_tree_block(root, bytenr, blocksize); + buf = btrfs_find_create_tree_block(root, bytenr, root->nodesize); if (!buf) return ERR_PTR(-ENOMEM); btrfs_set_header_generation(buf, trans->transid); @@ -7338,7 +7338,7 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans, if (btrfs_test_is_dummy_root(root)) { buf = btrfs_init_new_buffer(trans, root, root->alloc_bytenr, - blocksize, level); + level); if (!IS_ERR(buf)) root->alloc_bytenr += blocksize; return buf; @@ -7355,8 +7355,7 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans, return ERR_PTR(ret); } - buf = btrfs_init_new_buffer(trans, root, ins.objectid, - blocksize, level); + buf = btrfs_init_new_buffer(trans, root, ins.objectid, level); BUG_ON(IS_ERR(buf)); /* -ENOMEM */ if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) { -- cgit v1.2.3 From a83fffb75d09cd3d44167b7fb9c1ab9e2269445f Mon Sep 17 00:00:00 2001 From: David Sterba Date: Sun, 15 Jun 2014 02:39:54 +0200 Subject: btrfs: sink blocksize parameter to btrfs_find_create_tree_block Finally it's clear that the requested blocksize is always equal to nodesize, with one exception, the superblock. Superblock has fixed size regardless of the metadata block size, but uses the same helpers to initialize sys array/chunk tree and to work with the chunk items. So it pretends to be an extent_buffer for a moment, btrfs_read_sys_array is full of special cases, we're adding one more. Signed-off-by: David Sterba --- fs/btrfs/disk-io.c | 12 ++++++------ fs/btrfs/disk-io.h | 2 +- fs/btrfs/extent-tree.c | 4 ++-- fs/btrfs/tree-log.c | 2 +- fs/btrfs/volumes.c | 9 +++++++-- 5 files changed, 17 insertions(+), 12 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 8123b03b1f9d..548cb540e516 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1078,7 +1078,7 @@ void readahead_tree_block(struct btrfs_root *root, u64 bytenr) struct extent_buffer *buf = NULL; struct inode *btree_inode = root->fs_info->btree_inode; - buf = btrfs_find_create_tree_block(root, bytenr, root->nodesize); + buf = btrfs_find_create_tree_block(root, bytenr); if (!buf) return; read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree, @@ -1094,7 +1094,7 @@ int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr, struct extent_io_tree *io_tree = &BTRFS_I(btree_inode)->io_tree; int ret; - buf = btrfs_find_create_tree_block(root, bytenr, root->nodesize); + buf = btrfs_find_create_tree_block(root, bytenr); if (!buf) return 0; @@ -1125,12 +1125,12 @@ struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root, } struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root, - u64 bytenr, u32 blocksize) + u64 bytenr) { if (btrfs_test_is_dummy_root(root)) return alloc_test_extent_buffer(root->fs_info, bytenr, - blocksize); - return alloc_extent_buffer(root->fs_info, bytenr, blocksize); + root->nodesize); + return alloc_extent_buffer(root->fs_info, bytenr, root->nodesize); } @@ -1152,7 +1152,7 @@ struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr, struct extent_buffer *buf = NULL; int ret; - buf = btrfs_find_create_tree_block(root, bytenr, root->nodesize); + buf = btrfs_find_create_tree_block(root, bytenr); if (!buf) return NULL; diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h index 4d4ecdd9f4a2..27d44c0fd236 100644 --- a/fs/btrfs/disk-io.h +++ b/fs/btrfs/disk-io.h @@ -50,7 +50,7 @@ void readahead_tree_block(struct btrfs_root *root, u64 bytenr); int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr, int mirror_num, struct extent_buffer **eb); struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root, - u64 bytenr, u32 blocksize); + u64 bytenr); void clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *buf); int open_ctree(struct super_block *sb, diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 50ebc74db508..8ff31f81d870 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -7219,7 +7219,7 @@ btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root, { struct extent_buffer *buf; - buf = btrfs_find_create_tree_block(root, bytenr, root->nodesize); + buf = btrfs_find_create_tree_block(root, bytenr); if (!buf) return ERR_PTR(-ENOMEM); btrfs_set_header_generation(buf, trans->transid); @@ -7825,7 +7825,7 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans, next = btrfs_find_tree_block(root, bytenr); if (!next) { - next = btrfs_find_create_tree_block(root, bytenr, blocksize); + next = btrfs_find_create_tree_block(root, bytenr); if (!next) return -ENOMEM; btrfs_set_buffer_lockdep_class(root->root_key.objectid, next, diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 9a02da16f2be..4a42edc224a8 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -2164,7 +2164,7 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans, parent = path->nodes[*level]; root_owner = btrfs_header_owner(parent); - next = btrfs_find_create_tree_block(root, bytenr, blocksize); + next = btrfs_find_create_tree_block(root, bytenr); if (!next) return -ENOMEM; diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 0144790e296e..f0af9cd0814c 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -6247,8 +6247,13 @@ int btrfs_read_sys_array(struct btrfs_root *root) u32 cur; struct btrfs_key key; - sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET, - BTRFS_SUPER_INFO_SIZE); + ASSERT(BTRFS_SUPER_INFO_SIZE <= root->nodesize); + /* + * This will create extent buffer of nodesize, superblock size is + * fixed to BTRFS_SUPER_INFO_SIZE. If nodesize > sb size, this will + * overallocate but we can keep it as-is, only the first page is used. + */ + sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET); if (!sb) return -ENOMEM; btrfs_set_buffer_uptodate(sb); -- cgit v1.2.3 From 7476dfdaade5b373db4679555706546bd5b4bd6c Mon Sep 17 00:00:00 2001 From: David Sterba Date: Sun, 15 Jun 2014 03:34:59 +0200 Subject: btrfs: sink blocksize parameter to tree_block_processed Signed-off-by: David Sterba --- fs/btrfs/relocation.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index cb5d4462ebb4..d83085381bcc 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -2855,9 +2855,10 @@ static void update_processed_blocks(struct reloc_control *rc, } } -static int tree_block_processed(u64 bytenr, u32 blocksize, - struct reloc_control *rc) +static int tree_block_processed(u64 bytenr, struct reloc_control *rc) { + u32 blocksize = rc->extent_root->nodesize; + if (test_range_bit(&rc->processed_blocks, bytenr, bytenr + blocksize - 1, EXTENT_DIRTY, 1, NULL)) return 1; @@ -3352,7 +3353,7 @@ static int __add_tree_block(struct reloc_control *rc, bool skinny = btrfs_fs_incompat(rc->extent_root->fs_info, SKINNY_METADATA); - if (tree_block_processed(bytenr, blocksize, rc)) + if (tree_block_processed(bytenr, rc)) return 0; if (tree_search(blocks, bytenr)) @@ -3610,7 +3611,7 @@ static int find_data_references(struct reloc_control *rc, if (added) goto next; - if (!tree_block_processed(leaf->start, leaf->len, rc)) { + if (!tree_block_processed(leaf->start, rc)) { block = kmalloc(sizeof(*block), GFP_NOFS); if (!block) { err = -ENOMEM; -- cgit v1.2.3 From 23d79d81b13431756fee428acde49c9b770da132 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Sun, 15 Jun 2014 02:55:29 +0200 Subject: btrfs: use GFP_NOFS in __alloc_extent_buffer directly Same mask from all callers. Signed-off-by: David Sterba --- fs/btrfs/extent_io.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 4ebabd237153..619592d86c2a 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -4598,11 +4598,11 @@ static inline void btrfs_release_extent_buffer(struct extent_buffer *eb) static struct extent_buffer * __alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start, - unsigned long len, gfp_t mask) + unsigned long len) { struct extent_buffer *eb = NULL; - eb = kmem_cache_zalloc(extent_buffer_cache, mask); + eb = kmem_cache_zalloc(extent_buffer_cache, GFP_NOFS); if (eb == NULL) return NULL; eb->start = start; @@ -4643,7 +4643,7 @@ struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src) struct extent_buffer *new; unsigned long num_pages = num_extent_pages(src->start, src->len); - new = __alloc_extent_buffer(NULL, src->start, src->len, GFP_NOFS); + new = __alloc_extent_buffer(NULL, src->start, src->len); if (new == NULL) return NULL; @@ -4672,7 +4672,7 @@ struct extent_buffer *alloc_dummy_extent_buffer(u64 start, unsigned long len) unsigned long num_pages = num_extent_pages(0, len); unsigned long i; - eb = __alloc_extent_buffer(NULL, start, len, GFP_NOFS); + eb = __alloc_extent_buffer(NULL, start, len); if (!eb) return NULL; @@ -4824,7 +4824,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info, if (eb) return eb; - eb = __alloc_extent_buffer(fs_info, start, len, GFP_NOFS); + eb = __alloc_extent_buffer(fs_info, start, len); if (!eb) return NULL; -- cgit v1.2.3 From 3f556f7853ec4845a7c219d026cbcdf4cfa8cea7 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Sun, 15 Jun 2014 03:20:26 +0200 Subject: btrfs: unify extent buffer allocation api Make the extent buffer allocation interface consistent. Cloned eb will set a valid fs_info. For dummy eb, we can drop the length parameter and set it from fs_info. The built-in sanity checks may pass a NULL fs_info that's queried for nodesize, but we know it's 4096. Signed-off-by: David Sterba --- fs/btrfs/ctree.c | 5 ++--- fs/btrfs/extent_io.c | 23 ++++++++++++++++++----- fs/btrfs/extent_io.h | 3 ++- fs/btrfs/tests/extent-buffer-tests.c | 2 +- fs/btrfs/tests/inode-tests.c | 4 ++-- fs/btrfs/tests/qgroup-tests.c | 21 ++++++++++----------- 6 files changed, 35 insertions(+), 23 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 50eca331812c..276d4187cbf0 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -1363,8 +1363,7 @@ tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct btrfs_path *path, if (tm->op == MOD_LOG_KEY_REMOVE_WHILE_FREEING) { BUG_ON(tm->slot != 0); - eb_rewin = alloc_dummy_extent_buffer(eb->start, - fs_info->tree_root->nodesize); + eb_rewin = alloc_dummy_extent_buffer(fs_info, eb->start); if (!eb_rewin) { btrfs_tree_read_unlock_blocking(eb); free_extent_buffer(eb); @@ -1444,7 +1443,7 @@ get_old_root(struct btrfs_root *root, u64 time_seq) } else if (old_root) { btrfs_tree_read_unlock(eb_root); free_extent_buffer(eb_root); - eb = alloc_dummy_extent_buffer(logical, root->nodesize); + eb = alloc_dummy_extent_buffer(root->fs_info, logical); } else { btrfs_set_lock_blocking_rw(eb_root, BTRFS_READ_LOCK); eb = btrfs_clone_extent_buffer(eb_root); diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 619592d86c2a..dc424e32545a 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -4643,7 +4643,7 @@ struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src) struct extent_buffer *new; unsigned long num_pages = num_extent_pages(src->start, src->len); - new = __alloc_extent_buffer(NULL, src->start, src->len); + new = __alloc_extent_buffer(src->fs_info, src->start, src->len); if (new == NULL) return NULL; @@ -4666,13 +4666,26 @@ struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src) return new; } -struct extent_buffer *alloc_dummy_extent_buffer(u64 start, unsigned long len) +struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info, + u64 start) { struct extent_buffer *eb; - unsigned long num_pages = num_extent_pages(0, len); + unsigned long len; + unsigned long num_pages; unsigned long i; - eb = __alloc_extent_buffer(NULL, start, len); + if (!fs_info) { + /* + * Called only from tests that don't always have a fs_info + * available, but we know that nodesize is 4096 + */ + len = 4096; + } else { + len = fs_info->tree_root->nodesize; + } + num_pages = num_extent_pages(0, len); + + eb = __alloc_extent_buffer(fs_info, start, len); if (!eb) return NULL; @@ -4770,7 +4783,7 @@ struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info, eb = find_extent_buffer(fs_info, start); if (eb) return eb; - eb = alloc_dummy_extent_buffer(start, len); + eb = alloc_dummy_extent_buffer(fs_info, start); if (!eb) return NULL; eb->fs_info = fs_info; diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index ece9ce87edff..e6553e3d35c8 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -263,7 +263,8 @@ void set_page_extent_mapped(struct page *page); struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start, unsigned long len); -struct extent_buffer *alloc_dummy_extent_buffer(u64 start, unsigned long len); +struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info, + u64 start); struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src); struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info, u64 start); diff --git a/fs/btrfs/tests/extent-buffer-tests.c b/fs/btrfs/tests/extent-buffer-tests.c index cc286ce97d1e..f51963a8f929 100644 --- a/fs/btrfs/tests/extent-buffer-tests.c +++ b/fs/btrfs/tests/extent-buffer-tests.c @@ -53,7 +53,7 @@ static int test_btrfs_split_item(void) return -ENOMEM; } - path->nodes[0] = eb = alloc_dummy_extent_buffer(0, 4096); + path->nodes[0] = eb = alloc_dummy_extent_buffer(NULL, 4096); if (!eb) { test_msg("Could not allocate dummy buffer\n"); ret = -ENOMEM; diff --git a/fs/btrfs/tests/inode-tests.c b/fs/btrfs/tests/inode-tests.c index 3ae0f5b8bb80..a116b55ce788 100644 --- a/fs/btrfs/tests/inode-tests.c +++ b/fs/btrfs/tests/inode-tests.c @@ -255,7 +255,7 @@ static noinline int test_btrfs_get_extent(void) goto out; } - root->node = alloc_dummy_extent_buffer(0, 4096); + root->node = alloc_dummy_extent_buffer(NULL, 4096); if (!root->node) { test_msg("Couldn't allocate dummy buffer\n"); goto out; @@ -843,7 +843,7 @@ static int test_hole_first(void) goto out; } - root->node = alloc_dummy_extent_buffer(0, 4096); + root->node = alloc_dummy_extent_buffer(NULL, 4096); if (!root->node) { test_msg("Couldn't allocate dummy buffer\n"); goto out; diff --git a/fs/btrfs/tests/qgroup-tests.c b/fs/btrfs/tests/qgroup-tests.c index ec3dcb202357..7336b1c09cd8 100644 --- a/fs/btrfs/tests/qgroup-tests.c +++ b/fs/btrfs/tests/qgroup-tests.c @@ -404,6 +404,16 @@ int btrfs_test_qgroups(void) ret = -ENOMEM; goto out; } + /* We are using this root as our extent root */ + root->fs_info->extent_root = root; + + /* + * Some of the paths we test assume we have a filled out fs_info, so we + * just need to add the root in there so we don't panic. + */ + root->fs_info->tree_root = root; + root->fs_info->quota_root = root; + root->fs_info->quota_enabled = 1; /* * Can't use bytenr 0, some things freak out @@ -448,17 +458,6 @@ int btrfs_test_qgroups(void) goto out; } - /* We are using this root as our extent root */ - root->fs_info->extent_root = root; - - /* - * Some of the paths we test assume we have a filled out fs_info, so we - * just need to addt he root in there so we don't panic. - */ - root->fs_info->tree_root = root; - root->fs_info->quota_root = root; - root->fs_info->quota_enabled = 1; - test_msg("Running qgroup tests\n"); ret = test_no_shared_qgroup(root); if (ret) -- cgit v1.2.3 From ce3e69847e3ec79a38421bfd3d6f554d5e481231 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Sun, 15 Jun 2014 03:00:04 +0200 Subject: btrfs: sink parameter len to alloc_extent_buffer Because we're using globally known nodesize. Do the same for the sanity test function variant. Signed-off-by: David Sterba --- fs/btrfs/disk-io.c | 5 ++--- fs/btrfs/extent_io.c | 5 +++-- fs/btrfs/extent_io.h | 4 ++-- fs/btrfs/tests/qgroup-tests.c | 2 +- 4 files changed, 8 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 548cb540e516..9c204533fd22 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1128,9 +1128,8 @@ struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root, u64 bytenr) { if (btrfs_test_is_dummy_root(root)) - return alloc_test_extent_buffer(root->fs_info, bytenr, - root->nodesize); - return alloc_extent_buffer(root->fs_info, bytenr, root->nodesize); + return alloc_test_extent_buffer(root->fs_info, bytenr); + return alloc_extent_buffer(root->fs_info, bytenr); } diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index dc424e32545a..c4ca90ab687e 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -4775,7 +4775,7 @@ struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info, #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info, - u64 start, unsigned long len) + u64 start) { struct extent_buffer *eb, *exists = NULL; int ret; @@ -4821,8 +4821,9 @@ free_eb: #endif struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info, - u64 start, unsigned long len) + u64 start) { + unsigned long len = fs_info->tree_root->nodesize; unsigned long num_pages = num_extent_pages(start, len); unsigned long i; unsigned long index = start >> PAGE_CACHE_SHIFT; diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index e6553e3d35c8..71268e508b7a 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -262,7 +262,7 @@ int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private); void set_page_extent_mapped(struct page *page); struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info, - u64 start, unsigned long len); + u64 start); struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info, u64 start); struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src); @@ -378,5 +378,5 @@ noinline u64 find_lock_delalloc_range(struct inode *inode, u64 *end, u64 max_bytes); #endif struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info, - u64 start, unsigned long len); + u64 start); #endif diff --git a/fs/btrfs/tests/qgroup-tests.c b/fs/btrfs/tests/qgroup-tests.c index 7336b1c09cd8..73f299ebdabb 100644 --- a/fs/btrfs/tests/qgroup-tests.c +++ b/fs/btrfs/tests/qgroup-tests.c @@ -419,7 +419,7 @@ int btrfs_test_qgroups(void) * Can't use bytenr 0, some things freak out * *cough*backref walking code*cough* */ - root->node = alloc_test_extent_buffer(root->fs_info, 4096, 4096); + root->node = alloc_test_extent_buffer(root->fs_info, 4096); if (!root->node) { test_msg("Couldn't allocate dummy buffer\n"); ret = -ENOMEM; -- cgit v1.2.3 From 49c21e1cacd74a8c83407c70ad860c994e606e25 Mon Sep 17 00:00:00 2001 From: Miklos Szeredi Date: Sat, 13 Dec 2014 00:59:42 +0100 Subject: ovl: check whiteout while reading directory Don't make a separate pass for checking whiteouts, since we can do it while reading the upper directory. This will make it easier to handle multiple layers. Signed-off-by: Miklos Szeredi --- fs/overlayfs/readdir.c | 77 ++++++++++++++++++-------------------------------- 1 file changed, 28 insertions(+), 49 deletions(-) (limited to 'fs') diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c index ab1e3dcbed95..3efa44acf98b 100644 --- a/fs/overlayfs/readdir.c +++ b/fs/overlayfs/readdir.c @@ -40,6 +40,7 @@ struct ovl_readdir_data { struct rb_root root; struct list_head *list; struct list_head middle; + struct dentry *dir; int count; int err; }; @@ -126,6 +127,32 @@ static int ovl_cache_entry_add_rb(struct ovl_readdir_data *rdd, if (p == NULL) return -ENOMEM; + if (d_type == DT_CHR) { + struct dentry *dentry; + const struct cred *old_cred; + struct cred *override_cred; + + override_cred = prepare_creds(); + if (!override_cred) { + kfree(p); + return -ENOMEM; + } + + /* + * CAP_DAC_OVERRIDE for lookup + */ + cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE); + old_cred = override_creds(override_cred); + + dentry = lookup_one_len(name, rdd->dir, len); + if (!IS_ERR(dentry)) { + p->is_whiteout = ovl_is_whiteout(dentry); + dput(dentry); + } + revert_creds(old_cred); + put_cred(override_cred); + } + list_add_tail(&p->l_node, rdd->list); rb_link_node(&p->node, parent, newp); rb_insert_color(&p->node, &rdd->root); @@ -231,49 +258,6 @@ static void ovl_dir_reset(struct file *file) od->is_real = false; } -static int ovl_dir_mark_whiteouts(struct dentry *dir, - struct ovl_readdir_data *rdd) -{ - struct ovl_cache_entry *p; - struct dentry *dentry; - const struct cred *old_cred; - struct cred *override_cred; - - override_cred = prepare_creds(); - if (!override_cred) { - ovl_cache_free(rdd->list); - return -ENOMEM; - } - - /* - * CAP_DAC_OVERRIDE for lookup - */ - cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE); - old_cred = override_creds(override_cred); - - mutex_lock(&dir->d_inode->i_mutex); - list_for_each_entry(p, rdd->list, l_node) { - if (p->is_cursor) - continue; - - if (p->type != DT_CHR) - continue; - - dentry = lookup_one_len(p->name, dir, p->len); - if (IS_ERR(dentry)) - continue; - - p->is_whiteout = ovl_is_whiteout(dentry); - dput(dentry); - } - mutex_unlock(&dir->d_inode->i_mutex); - - revert_creds(old_cred); - put_cred(override_cred); - - return 0; -} - static int ovl_dir_read_merged(struct dentry *dentry, struct list_head *list) { int err; @@ -290,15 +274,10 @@ static int ovl_dir_read_merged(struct dentry *dentry, struct list_head *list) ovl_path_upper(dentry, &upperpath); if (upperpath.dentry) { + rdd.dir = upperpath.dentry; err = ovl_dir_read(&upperpath, &rdd); if (err) goto out; - - if (lowerpath.dentry) { - err = ovl_dir_mark_whiteouts(upperpath.dentry, &rdd); - if (err) - goto out; - } } if (lowerpath.dentry) { /* -- cgit v1.2.3 From 1afaba1ecb5299cdd0f69b5bad98b0185fe71e79 Mon Sep 17 00:00:00 2001 From: Miklos Szeredi Date: Sat, 13 Dec 2014 00:59:42 +0100 Subject: ovl: make path-type a bitmap OVL_PATH_PURE_UPPER -> __OVL_PATH_UPPER | __OVL_PATH_PURE OVL_PATH_UPPER -> __OVL_PATH_UPPER OVL_PATH_MERGE -> __OVL_PATH_UPPER | __OVL_PATH_MERGE OVL_PATH_LOWER -> 0 Multiple R/O layers will allow __OVL_PATH_MERGE without __OVL_PATH_UPPER. Signed-off-by: Miklos Szeredi --- fs/overlayfs/copy_up.c | 4 ++-- fs/overlayfs/dir.c | 22 +++++++++++----------- fs/overlayfs/inode.c | 9 ++++++--- fs/overlayfs/overlayfs.h | 13 +++++++++---- fs/overlayfs/readdir.c | 10 +++++----- fs/overlayfs/super.c | 19 ++++++++----------- 6 files changed, 41 insertions(+), 36 deletions(-) (limited to 'fs') diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c index ea10a8719107..a5bfd60f4f6f 100644 --- a/fs/overlayfs/copy_up.c +++ b/fs/overlayfs/copy_up.c @@ -385,7 +385,7 @@ int ovl_copy_up(struct dentry *dentry) struct kstat stat; enum ovl_path_type type = ovl_path_type(dentry); - if (type != OVL_PATH_LOWER) + if (OVL_TYPE_UPPER(type)) break; next = dget(dentry); @@ -394,7 +394,7 @@ int ovl_copy_up(struct dentry *dentry) parent = dget_parent(next); type = ovl_path_type(parent); - if (type != OVL_PATH_LOWER) + if (OVL_TYPE_UPPER(type)) break; dput(next); diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c index 8ffc4b980f1b..ab50bd111feb 100644 --- a/fs/overlayfs/dir.c +++ b/fs/overlayfs/dir.c @@ -152,7 +152,7 @@ static int ovl_dir_getattr(struct vfsmount *mnt, struct dentry *dentry, * correct link count. nlink=1 seems to pacify 'find' and * other utilities. */ - if (type == OVL_PATH_MERGE) + if (OVL_TYPE_MERGE(type)) stat->nlink = 1; return 0; @@ -630,7 +630,7 @@ static int ovl_do_remove(struct dentry *dentry, bool is_dir) goto out_drop_write; type = ovl_path_type(dentry); - if (type == OVL_PATH_PURE_UPPER) { + if (OVL_TYPE_PURE_UPPER(type)) { err = ovl_remove_upper(dentry, is_dir); } else { const struct cred *old_cred; @@ -712,7 +712,7 @@ static int ovl_rename2(struct inode *olddir, struct dentry *old, /* Don't copy up directory trees */ old_type = ovl_path_type(old); err = -EXDEV; - if ((old_type == OVL_PATH_LOWER || old_type == OVL_PATH_MERGE) && is_dir) + if (OVL_TYPE_MERGE_OR_LOWER(old_type) && is_dir) goto out; if (new->d_inode) { @@ -725,25 +725,25 @@ static int ovl_rename2(struct inode *olddir, struct dentry *old, new_type = ovl_path_type(new); err = -EXDEV; - if (!overwrite && (new_type == OVL_PATH_LOWER || new_type == OVL_PATH_MERGE) && new_is_dir) + if (!overwrite && OVL_TYPE_MERGE_OR_LOWER(new_type) && new_is_dir) goto out; err = 0; - if (new_type == OVL_PATH_LOWER && old_type == OVL_PATH_LOWER) { + if (!OVL_TYPE_UPPER(new_type) && !OVL_TYPE_UPPER(old_type)) { if (ovl_dentry_lower(old)->d_inode == ovl_dentry_lower(new)->d_inode) goto out; } - if (new_type != OVL_PATH_LOWER && old_type != OVL_PATH_LOWER) { + if (OVL_TYPE_UPPER(new_type) && OVL_TYPE_UPPER(old_type)) { if (ovl_dentry_upper(old)->d_inode == ovl_dentry_upper(new)->d_inode) goto out; } } else { if (ovl_dentry_is_opaque(new)) - new_type = OVL_PATH_UPPER; + new_type = __OVL_PATH_UPPER; else - new_type = OVL_PATH_PURE_UPPER; + new_type = __OVL_PATH_UPPER | __OVL_PATH_PURE; } err = ovl_want_write(old); @@ -763,8 +763,8 @@ static int ovl_rename2(struct inode *olddir, struct dentry *old, goto out_drop_write; } - old_opaque = old_type != OVL_PATH_PURE_UPPER; - new_opaque = new_type != OVL_PATH_PURE_UPPER; + old_opaque = !OVL_TYPE_PURE_UPPER(old_type); + new_opaque = !OVL_TYPE_PURE_UPPER(new_type); if (old_opaque || new_opaque) { err = -ENOMEM; @@ -787,7 +787,7 @@ static int ovl_rename2(struct inode *olddir, struct dentry *old, old_cred = override_creds(override_cred); } - if (overwrite && (new_type == OVL_PATH_LOWER || new_type == OVL_PATH_MERGE) && new_is_dir) { + if (overwrite && OVL_TYPE_MERGE_OR_LOWER(new_type) && new_is_dir) { opaquedir = ovl_check_empty_and_clear(new); err = PTR_ERR(opaquedir); if (IS_ERR(opaquedir)) { diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c index 07d74b24913b..48492f1240ad 100644 --- a/fs/overlayfs/inode.c +++ b/fs/overlayfs/inode.c @@ -238,7 +238,10 @@ out: static bool ovl_need_xattr_filter(struct dentry *dentry, enum ovl_path_type type) { - return type == OVL_PATH_UPPER && S_ISDIR(dentry->d_inode->i_mode); + if ((type & (__OVL_PATH_PURE | __OVL_PATH_UPPER)) == __OVL_PATH_UPPER) + return S_ISDIR(dentry->d_inode->i_mode); + else + return false; } ssize_t ovl_getxattr(struct dentry *dentry, const char *name, @@ -299,7 +302,7 @@ int ovl_removexattr(struct dentry *dentry, const char *name) if (ovl_need_xattr_filter(dentry, type) && ovl_is_private_xattr(name)) goto out_drop_write; - if (type == OVL_PATH_LOWER) { + if (!OVL_TYPE_UPPER(type)) { err = vfs_getxattr(realpath.dentry, name, NULL, 0); if (err < 0) goto out_drop_write; @@ -321,7 +324,7 @@ out: static bool ovl_open_need_copy_up(int flags, enum ovl_path_type type, struct dentry *realdentry) { - if (type != OVL_PATH_LOWER) + if (OVL_TYPE_UPPER(type)) return false; if (special_file(realdentry->d_inode->i_mode)) diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h index 814bed33dd07..d39eaa8f5e47 100644 --- a/fs/overlayfs/overlayfs.h +++ b/fs/overlayfs/overlayfs.h @@ -12,12 +12,17 @@ struct ovl_entry; enum ovl_path_type { - OVL_PATH_PURE_UPPER, - OVL_PATH_UPPER, - OVL_PATH_MERGE, - OVL_PATH_LOWER, + __OVL_PATH_PURE = (1 << 0), + __OVL_PATH_UPPER = (1 << 1), + __OVL_PATH_MERGE = (1 << 2), }; +#define OVL_TYPE_UPPER(type) ((type) & __OVL_PATH_UPPER) +#define OVL_TYPE_MERGE(type) ((type) & __OVL_PATH_MERGE) +#define OVL_TYPE_PURE_UPPER(type) ((type) & __OVL_PATH_PURE) +#define OVL_TYPE_MERGE_OR_LOWER(type) \ + (OVL_TYPE_MERGE(type) || !OVL_TYPE_UPPER(type)) + extern const char *ovl_opaque_xattr; static inline int ovl_do_rmdir(struct inode *dir, struct dentry *dentry) diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c index 3efa44acf98b..481e44873b65 100644 --- a/fs/overlayfs/readdir.c +++ b/fs/overlayfs/readdir.c @@ -253,8 +253,8 @@ static void ovl_dir_reset(struct file *file) ovl_cache_put(od, dentry); od->cache = NULL; } - WARN_ON(!od->is_real && type != OVL_PATH_MERGE); - if (od->is_real && type == OVL_PATH_MERGE) + WARN_ON(!od->is_real && !OVL_TYPE_MERGE(type)); + if (od->is_real && OVL_TYPE_MERGE(type)) od->is_real = false; } @@ -429,7 +429,7 @@ static int ovl_dir_fsync(struct file *file, loff_t start, loff_t end, /* * Need to check if we started out being a lower dir, but got copied up */ - if (!od->is_upper && ovl_path_type(dentry) != OVL_PATH_LOWER) { + if (!od->is_upper && OVL_TYPE_UPPER(ovl_path_type(dentry))) { struct inode *inode = file_inode(file); realfile = lockless_dereference(od->upperfile); @@ -495,8 +495,8 @@ static int ovl_dir_open(struct inode *inode, struct file *file) } INIT_LIST_HEAD(&od->cursor.l_node); od->realfile = realfile; - od->is_real = (type != OVL_PATH_MERGE); - od->is_upper = (type != OVL_PATH_LOWER); + od->is_real = !OVL_TYPE_MERGE(type); + od->is_upper = OVL_TYPE_UPPER(type); od->cursor.is_cursor = true; file->private_data = od; diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c index f16d318b71f8..821719cc8537 100644 --- a/fs/overlayfs/super.c +++ b/fs/overlayfs/super.c @@ -64,22 +64,19 @@ const char *ovl_opaque_xattr = "trusted.overlay.opaque"; enum ovl_path_type ovl_path_type(struct dentry *dentry) { struct ovl_entry *oe = dentry->d_fsdata; + enum ovl_path_type type = 0; if (oe->__upperdentry) { + type = __OVL_PATH_UPPER; + if (oe->lowerdentry) { if (S_ISDIR(dentry->d_inode->i_mode)) - return OVL_PATH_MERGE; - else - return OVL_PATH_UPPER; - } else { - if (oe->opaque) - return OVL_PATH_UPPER; - else - return OVL_PATH_PURE_UPPER; + type |= __OVL_PATH_MERGE; + } else if (!oe->opaque) { + type |= __OVL_PATH_PURE; } - } else { - return OVL_PATH_LOWER; } + return type; } static struct dentry *ovl_upperdentry_dereference(struct ovl_entry *oe) @@ -101,7 +98,7 @@ enum ovl_path_type ovl_path_real(struct dentry *dentry, struct path *path) enum ovl_path_type type = ovl_path_type(dentry); - if (type == OVL_PATH_LOWER) + if (!OVL_TYPE_UPPER(type)) ovl_path_lower(dentry, path); else ovl_path_upper(dentry, path); -- cgit v1.2.3 From 263b4a0fee43f1239c4d6f3c3a62fb5a20d84f2e Mon Sep 17 00:00:00 2001 From: Miklos Szeredi Date: Sat, 13 Dec 2014 00:59:43 +0100 Subject: ovl: dont replace opaque dir When removing an empty opaque directory, then it makes no sense to replace it with an exact replica of itself before removal. Signed-off-by: Miklos Szeredi --- fs/overlayfs/dir.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c index ab50bd111feb..dcae3ac5aa76 100644 --- a/fs/overlayfs/dir.c +++ b/fs/overlayfs/dir.c @@ -506,7 +506,7 @@ static int ovl_remove_and_whiteout(struct dentry *dentry, bool is_dir) struct dentry *opaquedir = NULL; int err; - if (is_dir) { + if (is_dir && OVL_TYPE_MERGE_OR_LOWER(ovl_path_type(dentry))) { opaquedir = ovl_check_empty_and_clear(dentry); err = PTR_ERR(opaquedir); if (IS_ERR(opaquedir)) -- cgit v1.2.3 From dd662667e6d3e55b42798a6e6e7f37dddc639460 Mon Sep 17 00:00:00 2001 From: Miklos Szeredi Date: Sat, 13 Dec 2014 00:59:43 +0100 Subject: ovl: add mutli-layer infrastructure Add multiple lower layers to 'struct ovl_fs' and 'struct ovl_entry'. ovl_entry will have an array of paths, instead of just the dentry. This allows a compact array containing just the layers which exist at current point in the tree (which is expected to be a small number for the majority of dentries). The number of layers is not limited by this infrastructure. Signed-off-by: Miklos Szeredi --- fs/overlayfs/super.c | 98 ++++++++++++++++++++++++++++++++++------------------ 1 file changed, 64 insertions(+), 34 deletions(-) (limited to 'fs') diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c index 821719cc8537..460d866b97a2 100644 --- a/fs/overlayfs/super.c +++ b/fs/overlayfs/super.c @@ -35,7 +35,8 @@ struct ovl_config { /* private information held for overlayfs's superblock */ struct ovl_fs { struct vfsmount *upper_mnt; - struct vfsmount *lower_mnt; + unsigned numlower; + struct vfsmount **lower_mnt; struct dentry *workdir; long lower_namelen; /* pathnames of lower and upper dirs, for show_options */ @@ -47,7 +48,6 @@ struct ovl_dir_cache; /* private information held for every overlayfs dentry */ struct ovl_entry { struct dentry *__upperdentry; - struct dentry *lowerdentry; struct ovl_dir_cache *cache; union { struct { @@ -56,10 +56,16 @@ struct ovl_entry { }; struct rcu_head rcu; }; + unsigned numlower; + struct path lowerstack[]; }; const char *ovl_opaque_xattr = "trusted.overlay.opaque"; +static struct dentry *__ovl_dentry_lower(struct ovl_entry *oe) +{ + return oe->numlower ? oe->lowerstack[0].dentry : NULL; +} enum ovl_path_type ovl_path_type(struct dentry *dentry) { @@ -69,7 +75,7 @@ enum ovl_path_type ovl_path_type(struct dentry *dentry) if (oe->__upperdentry) { type = __OVL_PATH_UPPER; - if (oe->lowerdentry) { + if (oe->numlower) { if (S_ISDIR(dentry->d_inode->i_mode)) type |= __OVL_PATH_MERGE; } else if (!oe->opaque) { @@ -117,7 +123,7 @@ struct dentry *ovl_dentry_lower(struct dentry *dentry) { struct ovl_entry *oe = dentry->d_fsdata; - return oe->lowerdentry; + return __ovl_dentry_lower(oe); } struct dentry *ovl_dentry_real(struct dentry *dentry) @@ -127,7 +133,7 @@ struct dentry *ovl_dentry_real(struct dentry *dentry) realdentry = ovl_upperdentry_dereference(oe); if (!realdentry) - realdentry = oe->lowerdentry; + realdentry = __ovl_dentry_lower(oe); return realdentry; } @@ -140,7 +146,7 @@ struct dentry *ovl_entry_real(struct ovl_entry *oe, bool *is_upper) if (realdentry) { *is_upper = true; } else { - realdentry = oe->lowerdentry; + realdentry = __ovl_dentry_lower(oe); *is_upper = false; } return realdentry; @@ -162,11 +168,9 @@ void ovl_set_dir_cache(struct dentry *dentry, struct ovl_dir_cache *cache) void ovl_path_lower(struct dentry *dentry, struct path *path) { - struct ovl_fs *ofs = dentry->d_sb->s_fs_info; struct ovl_entry *oe = dentry->d_fsdata; - path->mnt = ofs->lower_mnt; - path->dentry = oe->lowerdentry; + *path = oe->numlower ? oe->lowerstack[0] : (struct path) { NULL, NULL }; } int ovl_want_write(struct dentry *dentry) @@ -258,8 +262,11 @@ static void ovl_dentry_release(struct dentry *dentry) struct ovl_entry *oe = dentry->d_fsdata; if (oe) { + unsigned int i; + dput(oe->__upperdentry); - dput(oe->lowerdentry); + for (i = 0; i < oe->numlower; i++) + dput(oe->lowerstack[i].dentry); kfree_rcu(oe, rcu); } } @@ -268,9 +275,15 @@ static const struct dentry_operations ovl_dentry_operations = { .d_release = ovl_dentry_release, }; -static struct ovl_entry *ovl_alloc_entry(void) +static struct ovl_entry *ovl_alloc_entry(unsigned int numlower) { - return kzalloc(sizeof(struct ovl_entry), GFP_KERNEL); + size_t size = offsetof(struct ovl_entry, lowerstack[numlower]); + struct ovl_entry *oe = kzalloc(size, GFP_KERNEL); + + if (oe) + oe->numlower = numlower; + + return oe; } static inline struct dentry *ovl_lookup_real(struct dentry *dir, @@ -297,19 +310,19 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry, { struct ovl_entry *oe; struct dentry *upperdir; - struct dentry *lowerdir; + struct path lowerdir; struct dentry *upperdentry = NULL; struct dentry *lowerdentry = NULL; struct inode *inode = NULL; int err; err = -ENOMEM; - oe = ovl_alloc_entry(); + oe = ovl_alloc_entry(1); if (!oe) goto out; upperdir = ovl_dentry_upper(dentry->d_parent); - lowerdir = ovl_dentry_lower(dentry->d_parent); + ovl_path_lower(dentry->d_parent, &lowerdir); if (upperdir) { upperdentry = ovl_lookup_real(upperdir, &dentry->d_name); @@ -317,7 +330,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry, if (IS_ERR(upperdentry)) goto out_put_dir; - if (lowerdir && upperdentry) { + if (lowerdir.dentry && upperdentry) { if (ovl_is_whiteout(upperdentry)) { dput(upperdentry); upperdentry = NULL; @@ -327,8 +340,8 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry, } } } - if (lowerdir && !oe->opaque) { - lowerdentry = ovl_lookup_real(lowerdir, &dentry->d_name); + if (lowerdir.dentry && !oe->opaque) { + lowerdentry = ovl_lookup_real(lowerdir.dentry, &dentry->d_name); err = PTR_ERR(lowerdentry); if (IS_ERR(lowerdentry)) goto out_dput_upper; @@ -355,8 +368,12 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry, } oe->__upperdentry = upperdentry; - oe->lowerdentry = lowerdentry; - + if (lowerdentry) { + oe->lowerstack[0].dentry = lowerdentry; + oe->lowerstack[0].mnt = lowerdir.mnt; + } else { + oe->numlower = 0; + } dentry->d_fsdata = oe; d_add(dentry, inode); @@ -380,10 +397,12 @@ struct file *ovl_path_open(struct path *path, int flags) static void ovl_put_super(struct super_block *sb) { struct ovl_fs *ufs = sb->s_fs_info; + unsigned i; dput(ufs->workdir); mntput(ufs->upper_mnt); - mntput(ufs->lower_mnt); + for (i = 0; i < ufs->numlower; i++) + mntput(ufs->lower_mnt[i]); kfree(ufs->config.lowerdir); kfree(ufs->config.upperdir); @@ -641,6 +660,8 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) struct ovl_entry *oe; struct ovl_fs *ufs; struct kstatfs statfs; + struct vfsmount *mnt; + unsigned int i; int err; err = -ENOMEM; @@ -661,7 +682,7 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) } err = -ENOMEM; - oe = ovl_alloc_entry(); + oe = ovl_alloc_entry(1); if (oe == NULL) goto out_free_config; @@ -727,12 +748,24 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) goto out_put_workpath; } - ufs->lower_mnt = clone_private_mount(&lowerpath); - err = PTR_ERR(ufs->lower_mnt); - if (IS_ERR(ufs->lower_mnt)) { - pr_err("overlayfs: failed to clone lowerpath\n"); + ufs->lower_mnt = kcalloc(1, sizeof(struct vfsmount *), GFP_KERNEL); + if (ufs->lower_mnt == NULL) goto out_put_upper_mnt; + + mnt = clone_private_mount(&lowerpath); + err = PTR_ERR(mnt); + if (IS_ERR(mnt)) { + pr_err("overlayfs: failed to clone lowerpath\n"); + goto out_put_lower_mnt; } + /* + * Make lower_mnt R/O. That way fchmod/fchown on lower file + * will fail instead of modifying lower fs. + */ + mnt->mnt_flags |= MNT_READONLY; + + ufs->lower_mnt[0] = mnt; + ufs->numlower = 1; ufs->workdir = ovl_workdir_create(ufs->upper_mnt, workpath.dentry); err = PTR_ERR(ufs->workdir); @@ -742,12 +775,6 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) goto out_put_lower_mnt; } - /* - * Make lower_mnt R/O. That way fchmod/fchown on lower file - * will fail instead of modifying lower fs. - */ - ufs->lower_mnt->mnt_flags |= MNT_READONLY; - /* If the upper fs is r/o, we mark overlayfs r/o too */ if (ufs->upper_mnt->mnt_sb->s_flags & MS_RDONLY) sb->s_flags |= MS_RDONLY; @@ -768,7 +795,8 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) path_put(&workpath); oe->__upperdentry = upperpath.dentry; - oe->lowerdentry = lowerpath.dentry; + oe->lowerstack[0].dentry = lowerpath.dentry; + oe->lowerstack[0].mnt = ufs->lower_mnt[0]; root_dentry->d_fsdata = oe; @@ -782,7 +810,9 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) out_put_workdir: dput(ufs->workdir); out_put_lower_mnt: - mntput(ufs->lower_mnt); + for (i = 0; i < ufs->numlower; i++) + mntput(ufs->lower_mnt[i]); + kfree(ufs->lower_mnt); out_put_upper_mnt: mntput(ufs->upper_mnt); out_put_workpath: -- cgit v1.2.3 From 5ef88da56a77bfb3b9631f5e5775f3bff86b6219 Mon Sep 17 00:00:00 2001 From: Miklos Szeredi Date: Sat, 13 Dec 2014 00:59:43 +0100 Subject: ovl: helper to iterate layers Add helper to iterate through all the layers, starting from the upper layer (if exists) and continuing down through the lower layers. Signed-off-by: Miklos Szeredi --- fs/overlayfs/overlayfs.h | 1 + fs/overlayfs/super.c | 21 +++++++++++++++++++++ 2 files changed, 22 insertions(+) (limited to 'fs') diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h index d39eaa8f5e47..d176b679f526 100644 --- a/fs/overlayfs/overlayfs.h +++ b/fs/overlayfs/overlayfs.h @@ -135,6 +135,7 @@ void ovl_dentry_version_inc(struct dentry *dentry); void ovl_path_upper(struct dentry *dentry, struct path *path); void ovl_path_lower(struct dentry *dentry, struct path *path); enum ovl_path_type ovl_path_real(struct dentry *dentry, struct path *path); +int ovl_path_next(int idx, struct dentry *dentry, struct path *path); struct dentry *ovl_dentry_upper(struct dentry *dentry); struct dentry *ovl_dentry_lower(struct dentry *dentry); struct dentry *ovl_dentry_real(struct dentry *dentry); diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c index 460d866b97a2..07e4c576e93e 100644 --- a/fs/overlayfs/super.c +++ b/fs/overlayfs/super.c @@ -305,6 +305,27 @@ static inline struct dentry *ovl_lookup_real(struct dentry *dir, return dentry; } +/* + * Returns next layer in stack starting from top. + * Returns -1 if this is the last layer. + */ +int ovl_path_next(int idx, struct dentry *dentry, struct path *path) +{ + struct ovl_entry *oe = dentry->d_fsdata; + + BUG_ON(idx < 0); + if (idx == 0) { + ovl_path_upper(dentry, path); + if (path->dentry) + return oe->numlower ? 1 : -1; + idx++; + } + BUG_ON(idx > oe->numlower); + *path = oe->lowerstack[idx - 1]; + + return (idx < oe->numlower) ? idx + 1 : -1; +} + struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) { -- cgit v1.2.3 From 9d7459d834c28f55c82f1737f638a6c90e0c0e0f Mon Sep 17 00:00:00 2001 From: Miklos Szeredi Date: Sat, 13 Dec 2014 00:59:44 +0100 Subject: ovl: multi-layer readdir If multiple lower layers exist, merge them as well in readdir according to the same rules as merging upper with lower. I.e. take whiteouts and opaque directories into account on all but the lowers layer. Signed-off-by: Miklos Szeredi --- fs/overlayfs/readdir.c | 43 +++++++++++++++++++++---------------------- fs/overlayfs/super.c | 3 +++ 2 files changed, 24 insertions(+), 22 deletions(-) (limited to 'fs') diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c index 481e44873b65..dfef6ca53dfe 100644 --- a/fs/overlayfs/readdir.c +++ b/fs/overlayfs/readdir.c @@ -261,35 +261,34 @@ static void ovl_dir_reset(struct file *file) static int ovl_dir_read_merged(struct dentry *dentry, struct list_head *list) { int err; - struct path lowerpath; - struct path upperpath; + struct path realpath; struct ovl_readdir_data rdd = { .ctx.actor = ovl_fill_merge, .list = list, .root = RB_ROOT, .is_merge = false, }; - - ovl_path_lower(dentry, &lowerpath); - ovl_path_upper(dentry, &upperpath); - - if (upperpath.dentry) { - rdd.dir = upperpath.dentry; - err = ovl_dir_read(&upperpath, &rdd); - if (err) - goto out; - } - if (lowerpath.dentry) { - /* - * Insert lowerpath entries before upperpath ones, this allows - * offsets to be reasonably constant - */ - list_add(&rdd.middle, rdd.list); - rdd.is_merge = true; - err = ovl_dir_read(&lowerpath, &rdd); - list_del(&rdd.middle); + int idx, next; + + for (idx = 0; idx != -1; idx = next) { + next = ovl_path_next(idx, dentry, &realpath); + + if (next != -1) { + rdd.dir = realpath.dentry; + err = ovl_dir_read(&realpath, &rdd); + if (err) + break; + } else { + /* + * Insert lowest layer entries before upper ones, this + * allows offsets to be reasonably constant + */ + list_add(&rdd.middle, rdd.list); + rdd.is_merge = true; + err = ovl_dir_read(&realpath, &rdd); + list_del(&rdd.middle); + } } -out: return err; } diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c index 07e4c576e93e..c245043aa1b9 100644 --- a/fs/overlayfs/super.c +++ b/fs/overlayfs/super.c @@ -81,6 +81,9 @@ enum ovl_path_type ovl_path_type(struct dentry *dentry) } else if (!oe->opaque) { type |= __OVL_PATH_PURE; } + } else { + if (oe->numlower > 1) + type |= __OVL_PATH_MERGE; } return type; } -- cgit v1.2.3 From 3d3c6b89399a1b5e8a59ffbb8cb2a7797a9ef154 Mon Sep 17 00:00:00 2001 From: Miklos Szeredi Date: Sat, 13 Dec 2014 00:59:44 +0100 Subject: ovl: multi-layer lookup Look up dentry in all relevant layers. Signed-off-by: Miklos Szeredi --- fs/overlayfs/super.c | 143 +++++++++++++++++++++++++++++++++------------------ 1 file changed, 94 insertions(+), 49 deletions(-) (limited to 'fs') diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c index c245043aa1b9..f72b82fdc1e6 100644 --- a/fs/overlayfs/super.c +++ b/fs/overlayfs/super.c @@ -333,82 +333,127 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) { struct ovl_entry *oe; - struct dentry *upperdir; - struct path lowerdir; - struct dentry *upperdentry = NULL; - struct dentry *lowerdentry = NULL; + struct ovl_entry *poe = dentry->d_parent->d_fsdata; + struct path *stack = NULL; + struct dentry *upperdir, *upperdentry = NULL; + unsigned int ctr = 0; struct inode *inode = NULL; + bool upperopaque = false; + struct dentry *this, *prev = NULL; + unsigned int i; int err; - err = -ENOMEM; - oe = ovl_alloc_entry(1); - if (!oe) - goto out; - - upperdir = ovl_dentry_upper(dentry->d_parent); - ovl_path_lower(dentry->d_parent, &lowerdir); - + upperdir = ovl_upperdentry_dereference(poe); if (upperdir) { - upperdentry = ovl_lookup_real(upperdir, &dentry->d_name); - err = PTR_ERR(upperdentry); - if (IS_ERR(upperdentry)) - goto out_put_dir; - - if (lowerdir.dentry && upperdentry) { - if (ovl_is_whiteout(upperdentry)) { - dput(upperdentry); - upperdentry = NULL; - oe->opaque = true; - } else if (ovl_is_opaquedir(upperdentry)) { - oe->opaque = true; + this = ovl_lookup_real(upperdir, &dentry->d_name); + err = PTR_ERR(this); + if (IS_ERR(this)) + goto out; + + /* + * If this is not the lowermost layer, check whiteout and opaque + * directory. + */ + if (poe->numlower && this) { + if (ovl_is_whiteout(this)) { + dput(this); + this = NULL; + upperopaque = true; + } else if (ovl_is_opaquedir(this)) { + upperopaque = true; } } + upperdentry = prev = this; } - if (lowerdir.dentry && !oe->opaque) { - lowerdentry = ovl_lookup_real(lowerdir.dentry, &dentry->d_name); - err = PTR_ERR(lowerdentry); - if (IS_ERR(lowerdentry)) - goto out_dput_upper; + + if (!upperopaque && poe->numlower) { + err = -ENOMEM; + stack = kcalloc(poe->numlower, sizeof(struct path), GFP_KERNEL); + if (!stack) + goto out_put_upper; } - if (lowerdentry && upperdentry && - (!S_ISDIR(upperdentry->d_inode->i_mode) || - !S_ISDIR(lowerdentry->d_inode->i_mode))) { - dput(lowerdentry); - lowerdentry = NULL; - oe->opaque = true; + for (i = 0; !upperopaque && i < poe->numlower; i++) { + bool opaque = false; + struct path lowerpath = poe->lowerstack[i]; + + opaque = false; + this = ovl_lookup_real(lowerpath.dentry, &dentry->d_name); + err = PTR_ERR(this); + if (IS_ERR(this)) + goto out_put; + if (!this) + continue; + + /* + * If this is not the lowermost layer, check whiteout and opaque + * directory. + */ + if (i < poe->numlower - 1) { + if (ovl_is_whiteout(this)) { + dput(this); + break; + } else if (ovl_is_opaquedir(this)) { + opaque = true; + } + } + /* + * If this is a non-directory then stop here. + * + * FIXME: check for opaqueness maybe better done in remove code. + */ + if (!S_ISDIR(this->d_inode->i_mode)) { + opaque = true; + } else if (prev && (!S_ISDIR(prev->d_inode->i_mode) || + !S_ISDIR(this->d_inode->i_mode))) { + if (prev == upperdentry) + upperopaque = true; + dput(this); + break; + } + stack[ctr].dentry = this; + stack[ctr].mnt = lowerpath.mnt; + ctr++; + prev = this; + if (opaque) + break; } - if (lowerdentry || upperdentry) { + oe = ovl_alloc_entry(ctr); + err = -ENOMEM; + if (!oe) + goto out_put; + + if (upperdentry || ctr) { struct dentry *realdentry; - realdentry = upperdentry ? upperdentry : lowerdentry; + realdentry = upperdentry ? upperdentry : stack[0].dentry; + err = -ENOMEM; inode = ovl_new_inode(dentry->d_sb, realdentry->d_inode->i_mode, oe); if (!inode) - goto out_dput; + goto out_free_oe; ovl_copyattr(realdentry->d_inode, inode); } + oe->opaque = upperopaque; oe->__upperdentry = upperdentry; - if (lowerdentry) { - oe->lowerstack[0].dentry = lowerdentry; - oe->lowerstack[0].mnt = lowerdir.mnt; - } else { - oe->numlower = 0; - } + memcpy(oe->lowerstack, stack, sizeof(struct path) * ctr); + kfree(stack); dentry->d_fsdata = oe; d_add(dentry, inode); return NULL; -out_dput: - dput(lowerdentry); -out_dput_upper: - dput(upperdentry); -out_put_dir: +out_free_oe: kfree(oe); +out_put: + for (i = 0; i < ctr; i++) + dput(stack[i].dentry); + kfree(stack); +out_put_upper: + dput(upperdentry); out: return ERR_PTR(err); } -- cgit v1.2.3 From 3e01cee3b980f96463cb6f378ab05303a99903d9 Mon Sep 17 00:00:00 2001 From: Miklos Szeredi Date: Sat, 13 Dec 2014 00:59:45 +0100 Subject: ovl: check whiteout on lowest layer as well Not checking whiteouts on lowest layer was an optimization (there's nothing to white out there), but it could result in inconsitent behavior when a layer previously used as upper/middle is later used as lowest. Signed-off-by: Miklos Szeredi --- fs/overlayfs/readdir.c | 79 +++++++++++++++++++++++++------------------------- fs/overlayfs/super.c | 27 +++++++---------- 2 files changed, 50 insertions(+), 56 deletions(-) (limited to 'fs') diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c index dfef6ca53dfe..9df848f2e622 100644 --- a/fs/overlayfs/readdir.c +++ b/fs/overlayfs/readdir.c @@ -80,23 +80,50 @@ static struct ovl_cache_entry *ovl_cache_entry_find(struct rb_root *root, return NULL; } -static struct ovl_cache_entry *ovl_cache_entry_new(const char *name, int len, +static struct ovl_cache_entry *ovl_cache_entry_new(struct dentry *dir, + const char *name, int len, u64 ino, unsigned int d_type) { struct ovl_cache_entry *p; size_t size = offsetof(struct ovl_cache_entry, name[len + 1]); p = kmalloc(size, GFP_KERNEL); - if (p) { - memcpy(p->name, name, len); - p->name[len] = '\0'; - p->len = len; - p->type = d_type; - p->ino = ino; - p->is_whiteout = false; - p->is_cursor = false; - } + if (!p) + return NULL; + + memcpy(p->name, name, len); + p->name[len] = '\0'; + p->len = len; + p->type = d_type; + p->ino = ino; + p->is_whiteout = false; + p->is_cursor = false; + + if (d_type == DT_CHR) { + struct dentry *dentry; + const struct cred *old_cred; + struct cred *override_cred; + + override_cred = prepare_creds(); + if (!override_cred) { + kfree(p); + return NULL; + } + /* + * CAP_DAC_OVERRIDE for lookup + */ + cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE); + old_cred = override_creds(override_cred); + + dentry = lookup_one_len(name, dir, len); + if (!IS_ERR(dentry)) { + p->is_whiteout = ovl_is_whiteout(dentry); + dput(dentry); + } + revert_creds(old_cred); + put_cred(override_cred); + } return p; } @@ -123,36 +150,10 @@ static int ovl_cache_entry_add_rb(struct ovl_readdir_data *rdd, return 0; } - p = ovl_cache_entry_new(name, len, ino, d_type); + p = ovl_cache_entry_new(rdd->dir, name, len, ino, d_type); if (p == NULL) return -ENOMEM; - if (d_type == DT_CHR) { - struct dentry *dentry; - const struct cred *old_cred; - struct cred *override_cred; - - override_cred = prepare_creds(); - if (!override_cred) { - kfree(p); - return -ENOMEM; - } - - /* - * CAP_DAC_OVERRIDE for lookup - */ - cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE); - old_cred = override_creds(override_cred); - - dentry = lookup_one_len(name, rdd->dir, len); - if (!IS_ERR(dentry)) { - p->is_whiteout = ovl_is_whiteout(dentry); - dput(dentry); - } - revert_creds(old_cred); - put_cred(override_cred); - } - list_add_tail(&p->l_node, rdd->list); rb_link_node(&p->node, parent, newp); rb_insert_color(&p->node, &rdd->root); @@ -170,7 +171,7 @@ static int ovl_fill_lower(struct ovl_readdir_data *rdd, if (p) { list_move_tail(&p->l_node, &rdd->middle); } else { - p = ovl_cache_entry_new(name, namelen, ino, d_type); + p = ovl_cache_entry_new(rdd->dir, name, namelen, ino, d_type); if (p == NULL) rdd->err = -ENOMEM; else @@ -229,6 +230,7 @@ static inline int ovl_dir_read(struct path *realpath, if (IS_ERR(realfile)) return PTR_ERR(realfile); + rdd->dir = realpath->dentry; rdd->ctx.pos = 0; do { rdd->count = 0; @@ -274,7 +276,6 @@ static int ovl_dir_read_merged(struct dentry *dentry, struct list_head *list) next = ovl_path_next(idx, dentry, &realpath); if (next != -1) { - rdd.dir = realpath.dentry; err = ovl_dir_read(&realpath, &rdd); if (err) break; diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c index f72b82fdc1e6..5dbc6789fd5f 100644 --- a/fs/overlayfs/super.c +++ b/fs/overlayfs/super.c @@ -350,16 +350,12 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry, if (IS_ERR(this)) goto out; - /* - * If this is not the lowermost layer, check whiteout and opaque - * directory. - */ - if (poe->numlower && this) { + if (this) { if (ovl_is_whiteout(this)) { dput(this); this = NULL; upperopaque = true; - } else if (ovl_is_opaquedir(this)) { + } else if (poe->numlower && ovl_is_opaquedir(this)) { upperopaque = true; } } @@ -384,19 +380,16 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry, goto out_put; if (!this) continue; - + if (ovl_is_whiteout(this)) { + dput(this); + break; + } /* - * If this is not the lowermost layer, check whiteout and opaque - * directory. + * Only makes sense to check opaque dir if this is not the + * lowermost layer. */ - if (i < poe->numlower - 1) { - if (ovl_is_whiteout(this)) { - dput(this); - break; - } else if (ovl_is_opaquedir(this)) { - opaque = true; - } - } + if (i < poe->numlower - 1 && ovl_is_opaquedir(this)) + opaque = true; /* * If this is a non-directory then stop here. * -- cgit v1.2.3 From 09e10322b71716adf567d453889ef0871cf226b9 Mon Sep 17 00:00:00 2001 From: Miklos Szeredi Date: Sat, 13 Dec 2014 00:59:45 +0100 Subject: ovl: lookup ENAMETOOLONG on lower means ENOENT "Suppose you have in one of the lower layers a filesystem with ->lookup()-enforced upper limit on name length. Pretty much every local fs has one, but... they are not all equal. 255 characters is the common upper limit, but e.g. jffs2 stops at 254, minixfs upper limit is somewhere from 14 to 60, depending upon version, etc. You are doing a lookup for something that is present in upper layer, but happens to be too long for one of the lower layers. Too bad - ENAMETOOLONG for you..." Reported-by: Al Viro Signed-off-by: Miklos Szeredi --- fs/overlayfs/super.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c index 5dbc6789fd5f..110c968dcb3b 100644 --- a/fs/overlayfs/super.c +++ b/fs/overlayfs/super.c @@ -376,8 +376,14 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry, opaque = false; this = ovl_lookup_real(lowerpath.dentry, &dentry->d_name); err = PTR_ERR(this); - if (IS_ERR(this)) + if (IS_ERR(this)) { + /* + * If it's positive, then treat ENAMETOOLONG as ENOENT. + */ + if (err == -ENAMETOOLONG && (upperdentry || ctr)) + continue; goto out_put; + } if (!this) continue; if (ovl_is_whiteout(this)) { -- cgit v1.2.3 From 4ebc581828d5d0fe189ca06cef8b7a63cb4583d5 Mon Sep 17 00:00:00 2001 From: Miklos Szeredi Date: Sat, 13 Dec 2014 00:59:46 +0100 Subject: ovl: allow statfs if no upper layer Handle "no upper layer" case in statfs. Signed-off-by: Miklos Szeredi --- fs/overlayfs/super.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c index 110c968dcb3b..cc7a0f3aa0dd 100644 --- a/fs/overlayfs/super.c +++ b/fs/overlayfs/super.c @@ -484,7 +484,7 @@ static void ovl_put_super(struct super_block *sb) * @buf: The struct kstatfs to fill in with stats * * Get the filesystem statistics. As writes always target the upper layer - * filesystem pass the statfs to the same filesystem. + * filesystem pass the statfs to the upper filesystem (if it exists) */ static int ovl_statfs(struct dentry *dentry, struct kstatfs *buf) { @@ -493,7 +493,7 @@ static int ovl_statfs(struct dentry *dentry, struct kstatfs *buf) struct path path; int err; - ovl_path_upper(root_dentry, &path); + ovl_path_real(root_dentry, &path); err = vfs_statfs(&path, buf); if (!err) { -- cgit v1.2.3 From 3b7a9a249a93e68b7bb318de40e64d3b68ba1a6d Mon Sep 17 00:00:00 2001 From: Miklos Szeredi Date: Sat, 13 Dec 2014 00:59:48 +0100 Subject: ovl: mount: change order of initialization Move allocation of root entry above to where it's needed. Move initializations related to upperdir and workdir near each other. Signed-off-by: Miklos Szeredi --- fs/overlayfs/super.c | 70 ++++++++++++++++++++++++---------------------------- 1 file changed, 32 insertions(+), 38 deletions(-) (limited to 'fs') diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c index cc7a0f3aa0dd..a17702833dd0 100644 --- a/fs/overlayfs/super.c +++ b/fs/overlayfs/super.c @@ -723,7 +723,6 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) struct path lowerpath; struct path upperpath; struct path workpath; - struct inode *root_inode; struct dentry *root_dentry; struct ovl_entry *oe; struct ovl_fs *ufs; @@ -749,54 +748,49 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) goto out_free_config; } - err = -ENOMEM; - oe = ovl_alloc_entry(1); - if (oe == NULL) - goto out_free_config; - err = ovl_mount_dir(ufs->config.upperdir, &upperpath); if (err) - goto out_free_oe; + goto out_free_config; - err = ovl_mount_dir(ufs->config.lowerdir, &lowerpath); + err = ovl_mount_dir(ufs->config.workdir, &workpath); if (err) goto out_put_upperpath; - err = ovl_mount_dir(ufs->config.workdir, &workpath); + err = ovl_mount_dir(ufs->config.lowerdir, &lowerpath); if (err) - goto out_put_lowerpath; + goto out_put_workpath; err = -EINVAL; if (!S_ISDIR(upperpath.dentry->d_inode->i_mode) || !S_ISDIR(lowerpath.dentry->d_inode->i_mode) || !S_ISDIR(workpath.dentry->d_inode->i_mode)) { pr_err("overlayfs: upperdir or lowerdir or workdir not a directory\n"); - goto out_put_workpath; + goto out_put_lowerpath; } if (upperpath.mnt != workpath.mnt) { pr_err("overlayfs: workdir and upperdir must reside under the same mount\n"); - goto out_put_workpath; + goto out_put_lowerpath; } if (!ovl_workdir_ok(workpath.dentry, upperpath.dentry)) { pr_err("overlayfs: workdir and upperdir must be separate subtrees\n"); - goto out_put_workpath; + goto out_put_lowerpath; } if (!ovl_is_allowed_fs_type(upperpath.dentry)) { pr_err("overlayfs: filesystem of upperdir is not supported\n"); - goto out_put_workpath; + goto out_put_lowerpath; } if (!ovl_is_allowed_fs_type(lowerpath.dentry)) { pr_err("overlayfs: filesystem of lowerdir is not supported\n"); - goto out_put_workpath; + goto out_put_lowerpath; } err = vfs_statfs(&lowerpath, &statfs); if (err) { pr_err("overlayfs: statfs failed on lowerpath\n"); - goto out_put_workpath; + goto out_put_lowerpath; } ufs->lower_namelen = statfs.f_namelen; @@ -806,19 +800,27 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) err = -EINVAL; if (sb->s_stack_depth > FILESYSTEM_MAX_STACK_DEPTH) { pr_err("overlayfs: maximum fs stacking depth exceeded\n"); - goto out_put_workpath; + goto out_put_lowerpath; } ufs->upper_mnt = clone_private_mount(&upperpath); err = PTR_ERR(ufs->upper_mnt); if (IS_ERR(ufs->upper_mnt)) { pr_err("overlayfs: failed to clone upperpath\n"); - goto out_put_workpath; + goto out_put_lowerpath; + } + + ufs->workdir = ovl_workdir_create(ufs->upper_mnt, workpath.dentry); + err = PTR_ERR(ufs->workdir); + if (IS_ERR(ufs->workdir)) { + pr_err("overlayfs: failed to create directory %s/%s\n", + ufs->config.workdir, OVL_WORKDIR_NAME); + goto out_put_upper_mnt; } ufs->lower_mnt = kcalloc(1, sizeof(struct vfsmount *), GFP_KERNEL); if (ufs->lower_mnt == NULL) - goto out_put_upper_mnt; + goto out_put_workdir; mnt = clone_private_mount(&lowerpath); err = PTR_ERR(mnt); @@ -835,14 +837,6 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) ufs->lower_mnt[0] = mnt; ufs->numlower = 1; - ufs->workdir = ovl_workdir_create(ufs->upper_mnt, workpath.dentry); - err = PTR_ERR(ufs->workdir); - if (IS_ERR(ufs->workdir)) { - pr_err("overlayfs: failed to create directory %s/%s\n", - ufs->config.workdir, OVL_WORKDIR_NAME); - goto out_put_lower_mnt; - } - /* If the upper fs is r/o, we mark overlayfs r/o too */ if (ufs->upper_mnt->mnt_sb->s_flags & MS_RDONLY) sb->s_flags |= MS_RDONLY; @@ -850,13 +844,13 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) sb->s_d_op = &ovl_dentry_operations; err = -ENOMEM; - root_inode = ovl_new_inode(sb, S_IFDIR, oe); - if (!root_inode) - goto out_put_workdir; + oe = ovl_alloc_entry(1); + if (!oe) + goto out_put_lower_mnt; - root_dentry = d_make_root(root_inode); + root_dentry = d_make_root(ovl_new_inode(sb, S_IFDIR, oe)); if (!root_dentry) - goto out_put_workdir; + goto out_free_oe; mntput(upperpath.mnt); mntput(lowerpath.mnt); @@ -875,22 +869,22 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) return 0; -out_put_workdir: - dput(ufs->workdir); +out_free_oe: + kfree(oe); out_put_lower_mnt: for (i = 0; i < ufs->numlower; i++) mntput(ufs->lower_mnt[i]); kfree(ufs->lower_mnt); +out_put_workdir: + dput(ufs->workdir); out_put_upper_mnt: mntput(ufs->upper_mnt); -out_put_workpath: - path_put(&workpath); out_put_lowerpath: path_put(&lowerpath); +out_put_workpath: + path_put(&workpath); out_put_upperpath: path_put(&upperpath); -out_free_oe: - kfree(oe); out_free_config: kfree(ufs->config.lowerdir); kfree(ufs->config.upperdir); -- cgit v1.2.3 From ab508822cab4c84f07373cd6ad107a1fd1362831 Mon Sep 17 00:00:00 2001 From: Miklos Szeredi Date: Sat, 13 Dec 2014 00:59:49 +0100 Subject: ovl: improve mount helpers Move common checks into ovl_mount_dir() helper. Create helper for looking up lower directories. Signed-off-by: Miklos Szeredi --- fs/overlayfs/super.c | 125 ++++++++++++++++++++++++++++++--------------------- 1 file changed, 73 insertions(+), 52 deletions(-) (limited to 'fs') diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c index a17702833dd0..592370ff453a 100644 --- a/fs/overlayfs/super.c +++ b/fs/overlayfs/super.c @@ -669,24 +669,6 @@ static void ovl_unescape(char *s) } } -static int ovl_mount_dir(const char *name, struct path *path) -{ - int err; - char *tmp = kstrdup(name, GFP_KERNEL); - - if (!tmp) - return -ENOMEM; - - ovl_unescape(tmp); - err = kern_path(tmp, LOOKUP_FOLLOW, path); - if (err) { - pr_err("overlayfs: failed to resolve '%s': %i\n", tmp, err); - err = -EINVAL; - } - kfree(tmp); - return err; -} - static bool ovl_is_allowed_fs_type(struct dentry *root) { const struct dentry_operations *dop = root->d_op; @@ -706,6 +688,71 @@ static bool ovl_is_allowed_fs_type(struct dentry *root) return true; } +static int ovl_mount_dir_noesc(const char *name, struct path *path) +{ + int err; + + err = kern_path(name, LOOKUP_FOLLOW, path); + if (err) { + pr_err("overlayfs: failed to resolve '%s': %i\n", name, err); + goto out; + } + err = -EINVAL; + if (!ovl_is_allowed_fs_type(path->dentry)) { + pr_err("overlayfs: filesystem on '%s' not supported\n", name); + goto out_put; + } + if (!S_ISDIR(path->dentry->d_inode->i_mode)) { + pr_err("overlayfs: '%s' not a directory\n", name); + goto out_put; + } + return 0; + +out_put: + path_put(path); +out: + return err; +} + +static int ovl_mount_dir(const char *name, struct path *path) +{ + int err = -ENOMEM; + char *tmp = kstrdup(name, GFP_KERNEL); + + if (tmp) { + ovl_unescape(tmp); + err = ovl_mount_dir_noesc(tmp, path); + kfree(tmp); + } + return err; +} + +static int ovl_lower_dir(const char *name, struct path *path, long *namelen, + int *stack_depth) +{ + int err; + struct kstatfs statfs; + + err = ovl_mount_dir(name, path); + if (err) + goto out; + + err = vfs_statfs(path, &statfs); + if (err) { + pr_err("overlayfs: statfs failed on '%s'\n", name); + goto out_put; + } + *namelen = max(*namelen, statfs.f_namelen); + *stack_depth = max(*stack_depth, path->mnt->mnt_sb->s_stack_depth); + + return 0; + +out_put: + path_put(path); +out: + return err; +} + /* Workdir should not be subdir of upperdir and vice versa */ static bool ovl_workdir_ok(struct dentry *workdir, struct dentry *upperdir) { @@ -726,7 +773,6 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) struct dentry *root_dentry; struct ovl_entry *oe; struct ovl_fs *ufs; - struct kstatfs statfs; struct vfsmount *mnt; unsigned int i; int err; @@ -756,48 +802,23 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) if (err) goto out_put_upperpath; - err = ovl_mount_dir(ufs->config.lowerdir, &lowerpath); - if (err) - goto out_put_workpath; - - err = -EINVAL; - if (!S_ISDIR(upperpath.dentry->d_inode->i_mode) || - !S_ISDIR(lowerpath.dentry->d_inode->i_mode) || - !S_ISDIR(workpath.dentry->d_inode->i_mode)) { - pr_err("overlayfs: upperdir or lowerdir or workdir not a directory\n"); - goto out_put_lowerpath; - } - if (upperpath.mnt != workpath.mnt) { pr_err("overlayfs: workdir and upperdir must reside under the same mount\n"); - goto out_put_lowerpath; + goto out_put_workpath; } if (!ovl_workdir_ok(workpath.dentry, upperpath.dentry)) { pr_err("overlayfs: workdir and upperdir must be separate subtrees\n"); - goto out_put_lowerpath; - } - - if (!ovl_is_allowed_fs_type(upperpath.dentry)) { - pr_err("overlayfs: filesystem of upperdir is not supported\n"); - goto out_put_lowerpath; - } - - if (!ovl_is_allowed_fs_type(lowerpath.dentry)) { - pr_err("overlayfs: filesystem of lowerdir is not supported\n"); - goto out_put_lowerpath; - } - - err = vfs_statfs(&lowerpath, &statfs); - if (err) { - pr_err("overlayfs: statfs failed on lowerpath\n"); - goto out_put_lowerpath; + goto out_put_workpath; } - ufs->lower_namelen = statfs.f_namelen; + sb->s_stack_depth = upperpath.mnt->mnt_sb->s_stack_depth; - sb->s_stack_depth = max(upperpath.mnt->mnt_sb->s_stack_depth, - lowerpath.mnt->mnt_sb->s_stack_depth) + 1; + err = ovl_lower_dir(ufs->config.lowerdir, &lowerpath, + &ufs->lower_namelen, &sb->s_stack_depth); + if (err) + goto out_put_workpath; err = -EINVAL; + sb->s_stack_depth++; if (sb->s_stack_depth > FILESYSTEM_MAX_STACK_DEPTH) { pr_err("overlayfs: maximum fs stacking depth exceeded\n"); goto out_put_lowerpath; -- cgit v1.2.3 From 53a08cb9b8bccfe58f1228c7c27baf34a83da78b Mon Sep 17 00:00:00 2001 From: Miklos Szeredi Date: Sat, 13 Dec 2014 00:59:51 +0100 Subject: ovl: make upperdir optional Make "upperdir=" mount option optional. If "upperdir=" is not given, then the "workdir=" option is also optional (and ignored if given). Signed-off-by: Miklos Szeredi --- fs/overlayfs/super.c | 83 +++++++++++++++++++++++++++++----------------------- 1 file changed, 47 insertions(+), 36 deletions(-) (limited to 'fs') diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c index 592370ff453a..35bb0adf10cf 100644 --- a/fs/overlayfs/super.c +++ b/fs/overlayfs/super.c @@ -516,8 +516,10 @@ static int ovl_show_options(struct seq_file *m, struct dentry *dentry) struct ovl_fs *ufs = sb->s_fs_info; seq_printf(m, ",lowerdir=%s", ufs->config.lowerdir); - seq_printf(m, ",upperdir=%s", ufs->config.upperdir); - seq_printf(m, ",workdir=%s", ufs->config.workdir); + if (ufs->config.upperdir) { + seq_printf(m, ",upperdir=%s", ufs->config.upperdir); + seq_printf(m, ",workdir=%s", ufs->config.workdir); + } return 0; } @@ -768,8 +770,8 @@ static bool ovl_workdir_ok(struct dentry *workdir, struct dentry *upperdir) static int ovl_fill_super(struct super_block *sb, void *data, int silent) { struct path lowerpath; - struct path upperpath; - struct path workpath; + struct path upperpath = { NULL, NULL }; + struct path workpath = { NULL, NULL }; struct dentry *root_dentry; struct ovl_entry *oe; struct ovl_fs *ufs; @@ -786,31 +788,38 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) if (err) goto out_free_config; - /* FIXME: workdir is not needed for a R/O mount */ err = -EINVAL; - if (!ufs->config.upperdir || !ufs->config.lowerdir || - !ufs->config.workdir) { - pr_err("overlayfs: missing upperdir or lowerdir or workdir\n"); + if (!ufs->config.lowerdir) { + pr_err("overlayfs: missing 'lowerdir'\n"); goto out_free_config; } - err = ovl_mount_dir(ufs->config.upperdir, &upperpath); - if (err) - goto out_free_config; + sb->s_stack_depth = 0; + if (ufs->config.upperdir) { + /* FIXME: workdir is not needed for a R/O mount */ + if (!ufs->config.workdir) { + pr_err("overlayfs: missing 'workdir'\n"); + goto out_free_config; + } - err = ovl_mount_dir(ufs->config.workdir, &workpath); - if (err) - goto out_put_upperpath; + err = ovl_mount_dir(ufs->config.upperdir, &upperpath); + if (err) + goto out_free_config; - if (upperpath.mnt != workpath.mnt) { - pr_err("overlayfs: workdir and upperdir must reside under the same mount\n"); - goto out_put_workpath; - } - if (!ovl_workdir_ok(workpath.dentry, upperpath.dentry)) { - pr_err("overlayfs: workdir and upperdir must be separate subtrees\n"); - goto out_put_workpath; + err = ovl_mount_dir(ufs->config.workdir, &workpath); + if (err) + goto out_put_upperpath; + + if (upperpath.mnt != workpath.mnt) { + pr_err("overlayfs: workdir and upperdir must reside under the same mount\n"); + goto out_put_workpath; + } + if (!ovl_workdir_ok(workpath.dentry, upperpath.dentry)) { + pr_err("overlayfs: workdir and upperdir must be separate subtrees\n"); + goto out_put_workpath; + } + sb->s_stack_depth = upperpath.mnt->mnt_sb->s_stack_depth; } - sb->s_stack_depth = upperpath.mnt->mnt_sb->s_stack_depth; err = ovl_lower_dir(ufs->config.lowerdir, &lowerpath, &ufs->lower_namelen, &sb->s_stack_depth); @@ -824,19 +833,21 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) goto out_put_lowerpath; } - ufs->upper_mnt = clone_private_mount(&upperpath); - err = PTR_ERR(ufs->upper_mnt); - if (IS_ERR(ufs->upper_mnt)) { - pr_err("overlayfs: failed to clone upperpath\n"); - goto out_put_lowerpath; - } + if (ufs->config.upperdir) { + ufs->upper_mnt = clone_private_mount(&upperpath); + err = PTR_ERR(ufs->upper_mnt); + if (IS_ERR(ufs->upper_mnt)) { + pr_err("overlayfs: failed to clone upperpath\n"); + goto out_put_lowerpath; + } - ufs->workdir = ovl_workdir_create(ufs->upper_mnt, workpath.dentry); - err = PTR_ERR(ufs->workdir); - if (IS_ERR(ufs->workdir)) { - pr_err("overlayfs: failed to create directory %s/%s\n", - ufs->config.workdir, OVL_WORKDIR_NAME); - goto out_put_upper_mnt; + ufs->workdir = ovl_workdir_create(ufs->upper_mnt, workpath.dentry); + err = PTR_ERR(ufs->workdir); + if (IS_ERR(ufs->workdir)) { + pr_err("overlayfs: failed to create directory %s/%s\n", + ufs->config.workdir, OVL_WORKDIR_NAME); + goto out_put_upper_mnt; + } } ufs->lower_mnt = kcalloc(1, sizeof(struct vfsmount *), GFP_KERNEL); @@ -858,8 +869,8 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) ufs->lower_mnt[0] = mnt; ufs->numlower = 1; - /* If the upper fs is r/o, we mark overlayfs r/o too */ - if (ufs->upper_mnt->mnt_sb->s_flags & MS_RDONLY) + /* If the upper fs is r/o or nonexistent, we mark overlayfs r/o too */ + if (!ufs->upper_mnt || (ufs->upper_mnt->mnt_sb->s_flags & MS_RDONLY)) sb->s_flags |= MS_RDONLY; sb->s_d_op = &ovl_dentry_operations; -- cgit v1.2.3 From a78d9f0d5d5ca9054703376c7c23c901807ddd87 Mon Sep 17 00:00:00 2001 From: Miklos Szeredi Date: Sat, 13 Dec 2014 00:59:52 +0100 Subject: ovl: support multiple lower layers Allow "lowerdir=" option to contain multiple lower directories separated by a colon (e.g. "lowerdir=/bin:/usr/bin"). Colon characters in filenames can be escaped with a backslash. Signed-off-by: Miklos Szeredi --- Documentation/filesystems/overlayfs.txt | 12 ++++ fs/overlayfs/super.c | 110 ++++++++++++++++++++++++-------- 2 files changed, 95 insertions(+), 27 deletions(-) (limited to 'fs') diff --git a/Documentation/filesystems/overlayfs.txt b/Documentation/filesystems/overlayfs.txt index a27c950ece61..b37092886dcc 100644 --- a/Documentation/filesystems/overlayfs.txt +++ b/Documentation/filesystems/overlayfs.txt @@ -159,6 +159,18 @@ overlay filesystem (though an operation on the name of the file such as rename or unlink will of course be noticed and handled). +Multiple lower layers +--------------------- + +Multiple lower layers can now be given using the the colon (":") as a +separator character between the directory names. For example: + + mount -t overlay overlay -olowerdir=/lower1:/lower2:/lower3 /merged + +As the example shows, "upperdir=" and "workdir=" may be omitted. In that case +the overlay will be read-only. + + Non-standard behavior --------------------- diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c index 35bb0adf10cf..5c495a17a5a3 100644 --- a/fs/overlayfs/super.c +++ b/fs/overlayfs/super.c @@ -60,6 +60,8 @@ struct ovl_entry { struct path lowerstack[]; }; +#define OVL_MAX_STACK 500 + const char *ovl_opaque_xattr = "trusted.overlay.opaque"; static struct dentry *__ovl_dentry_lower(struct ovl_entry *oe) @@ -692,8 +694,12 @@ static bool ovl_is_allowed_fs_type(struct dentry *root) static int ovl_mount_dir_noesc(const char *name, struct path *path) { - int err; + int err = -EINVAL; + if (!*name) { + pr_err("overlayfs: empty lowerdir\n"); + goto out; + } err = kern_path(name, LOOKUP_FOLLOW, path); if (err) { pr_err("overlayfs: failed to resolve '%s': %i\n", name, err); @@ -735,7 +741,7 @@ static int ovl_lower_dir(const char *name, struct path *path, long *namelen, int err; struct kstatfs statfs; - err = ovl_mount_dir(name, path); + err = ovl_mount_dir_noesc(name, path); if (err) goto out; @@ -767,15 +773,38 @@ static bool ovl_workdir_ok(struct dentry *workdir, struct dentry *upperdir) return ok; } +static unsigned int ovl_split_lowerdirs(char *str) +{ + unsigned int ctr = 1; + char *s, *d; + + for (s = d = str;; s++, d++) { + if (*s == '\\') { + s++; + } else if (*s == ':') { + *d = '\0'; + ctr++; + continue; + } + *d = *s; + if (!*s) + break; + } + return ctr; +} + static int ovl_fill_super(struct super_block *sb, void *data, int silent) { - struct path lowerpath; struct path upperpath = { NULL, NULL }; struct path workpath = { NULL, NULL }; struct dentry *root_dentry; struct ovl_entry *oe; struct ovl_fs *ufs; - struct vfsmount *mnt; + struct path *stack = NULL; + char *lowertmp; + char *lower; + unsigned int numlower; + unsigned int stacklen = 0; unsigned int i; int err; @@ -820,12 +849,30 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) } sb->s_stack_depth = upperpath.mnt->mnt_sb->s_stack_depth; } - - err = ovl_lower_dir(ufs->config.lowerdir, &lowerpath, - &ufs->lower_namelen, &sb->s_stack_depth); - if (err) + err = -ENOMEM; + lowertmp = kstrdup(ufs->config.lowerdir, GFP_KERNEL); + if (!lowertmp) goto out_put_workpath; + err = -EINVAL; + stacklen = ovl_split_lowerdirs(lowertmp); + if (stacklen > OVL_MAX_STACK) + goto out_free_lowertmp; + + stack = kcalloc(stacklen, sizeof(struct path), GFP_KERNEL); + if (!stack) + goto out_free_lowertmp; + + lower = lowertmp; + for (numlower = 0; numlower < stacklen; numlower++) { + err = ovl_lower_dir(lower, &stack[numlower], + &ufs->lower_namelen, &sb->s_stack_depth); + if (err) + goto out_put_lowerpath; + + lower = strchr(lower, '\0') + 1; + } + err = -EINVAL; sb->s_stack_depth++; if (sb->s_stack_depth > FILESYSTEM_MAX_STACK_DEPTH) { @@ -850,24 +897,25 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) } } - ufs->lower_mnt = kcalloc(1, sizeof(struct vfsmount *), GFP_KERNEL); + ufs->lower_mnt = kcalloc(numlower, sizeof(struct vfsmount *), GFP_KERNEL); if (ufs->lower_mnt == NULL) goto out_put_workdir; + for (i = 0; i < numlower; i++) { + struct vfsmount *mnt = clone_private_mount(&stack[i]); - mnt = clone_private_mount(&lowerpath); - err = PTR_ERR(mnt); - if (IS_ERR(mnt)) { - pr_err("overlayfs: failed to clone lowerpath\n"); - goto out_put_lower_mnt; - } - /* - * Make lower_mnt R/O. That way fchmod/fchown on lower file - * will fail instead of modifying lower fs. - */ - mnt->mnt_flags |= MNT_READONLY; + if (IS_ERR(mnt)) { + pr_err("overlayfs: failed to clone lowerpath\n"); + goto out_put_lower_mnt; + } + /* + * Make lower_mnt R/O. That way fchmod/fchown on lower file + * will fail instead of modifying lower fs. + */ + mnt->mnt_flags |= MNT_READONLY; - ufs->lower_mnt[0] = mnt; - ufs->numlower = 1; + ufs->lower_mnt[ufs->numlower] = mnt; + ufs->numlower++; + } /* If the upper fs is r/o or nonexistent, we mark overlayfs r/o too */ if (!ufs->upper_mnt || (ufs->upper_mnt->mnt_sb->s_flags & MS_RDONLY)) @@ -876,7 +924,7 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) sb->s_d_op = &ovl_dentry_operations; err = -ENOMEM; - oe = ovl_alloc_entry(1); + oe = ovl_alloc_entry(numlower); if (!oe) goto out_put_lower_mnt; @@ -885,12 +933,16 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) goto out_free_oe; mntput(upperpath.mnt); - mntput(lowerpath.mnt); + for (i = 0; i < numlower; i++) + mntput(stack[i].mnt); path_put(&workpath); + kfree(lowertmp); oe->__upperdentry = upperpath.dentry; - oe->lowerstack[0].dentry = lowerpath.dentry; - oe->lowerstack[0].mnt = ufs->lower_mnt[0]; + for (i = 0; i < numlower; i++) { + oe->lowerstack[i].dentry = stack[i].dentry; + oe->lowerstack[i].mnt = ufs->lower_mnt[i]; + } root_dentry->d_fsdata = oe; @@ -912,7 +964,11 @@ out_put_workdir: out_put_upper_mnt: mntput(ufs->upper_mnt); out_put_lowerpath: - path_put(&lowerpath); + for (i = 0; i < numlower; i++) + path_put(&stack[i]); + kfree(stack); +out_free_lowertmp: + kfree(lowertmp); out_put_workpath: path_put(&workpath); out_put_upperpath: -- cgit v1.2.3 From 1ba38725a351f91769918b132c17fb7fcaf6c2f5 Mon Sep 17 00:00:00 2001 From: hujianyang Date: Wed, 26 Nov 2014 16:16:59 +0800 Subject: ovl: Cleanup redundant blank lines This patch removes redundant blanks lines in overlayfs. Signed-off-by: hujianyang Signed-off-by: Miklos Szeredi --- fs/overlayfs/copy_up.c | 1 - fs/overlayfs/inode.c | 1 - fs/overlayfs/super.c | 1 - 3 files changed, 3 deletions(-) (limited to 'fs') diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c index a5bfd60f4f6f..24f640441bd9 100644 --- a/fs/overlayfs/copy_up.c +++ b/fs/overlayfs/copy_up.c @@ -191,7 +191,6 @@ int ovl_set_attr(struct dentry *upperdentry, struct kstat *stat) ovl_set_timestamps(upperdentry, stat); return err; - } static int ovl_copy_up_locked(struct dentry *workdir, struct dentry *upperdir, diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c index 48492f1240ad..5ac1236afb15 100644 --- a/fs/overlayfs/inode.c +++ b/fs/overlayfs/inode.c @@ -433,5 +433,4 @@ struct inode *ovl_new_inode(struct super_block *sb, umode_t mode, } return inode; - } diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c index 5c495a17a5a3..e9ce4a9e8749 100644 --- a/fs/overlayfs/super.c +++ b/fs/overlayfs/super.c @@ -106,7 +106,6 @@ void ovl_path_upper(struct dentry *dentry, struct path *path) enum ovl_path_type ovl_path_real(struct dentry *dentry, struct path *path) { - enum ovl_path_type type = ovl_path_type(dentry); if (!OVL_TYPE_UPPER(type)) -- cgit v1.2.3 From cead89bb08c0f64e23886f1c18df9bb98e97c55c Mon Sep 17 00:00:00 2001 From: hujianyang Date: Mon, 24 Nov 2014 18:25:21 +0800 Subject: ovl: Use macros to present ovl_xattr This patch adds two macros: OVL_XATTR_PRE_NAME and OVL_XATTR_PRE_LEN to present ovl_xattr name prefix and its length. Also, a new macro OVL_XATTR_OPAQUE is introduced to replace old *ovl_opaque_xattr*. Fix the length of "trusted.overlay." to *16*. Signed-off-by: hujianyang Signed-off-by: Miklos Szeredi --- fs/overlayfs/dir.c | 4 ++-- fs/overlayfs/inode.c | 2 +- fs/overlayfs/overlayfs.h | 4 +++- fs/overlayfs/super.c | 4 +--- 4 files changed, 7 insertions(+), 7 deletions(-) (limited to 'fs') diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c index dcae3ac5aa76..0dc4c33a0a1b 100644 --- a/fs/overlayfs/dir.c +++ b/fs/overlayfs/dir.c @@ -118,14 +118,14 @@ int ovl_create_real(struct inode *dir, struct dentry *newdentry, static int ovl_set_opaque(struct dentry *upperdentry) { - return ovl_do_setxattr(upperdentry, ovl_opaque_xattr, "y", 1, 0); + return ovl_do_setxattr(upperdentry, OVL_XATTR_OPAQUE, "y", 1, 0); } static void ovl_remove_opaque(struct dentry *upperdentry) { int err; - err = ovl_do_removexattr(upperdentry, ovl_opaque_xattr); + err = ovl_do_removexattr(upperdentry, OVL_XATTR_OPAQUE); if (err) { pr_warn("overlayfs: failed to remove opaque from '%s' (%i)\n", upperdentry->d_name.name, err); diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c index 5ac1236afb15..04f124884687 100644 --- a/fs/overlayfs/inode.c +++ b/fs/overlayfs/inode.c @@ -205,7 +205,7 @@ static int ovl_readlink(struct dentry *dentry, char __user *buf, int bufsiz) static bool ovl_is_private_xattr(const char *name) { - return strncmp(name, "trusted.overlay.", 14) == 0; + return strncmp(name, OVL_XATTR_PRE_NAME, OVL_XATTR_PRE_LEN) == 0; } int ovl_setxattr(struct dentry *dentry, const char *name, diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h index d176b679f526..17ac5afc9ffb 100644 --- a/fs/overlayfs/overlayfs.h +++ b/fs/overlayfs/overlayfs.h @@ -23,7 +23,9 @@ enum ovl_path_type { #define OVL_TYPE_MERGE_OR_LOWER(type) \ (OVL_TYPE_MERGE(type) || !OVL_TYPE_UPPER(type)) -extern const char *ovl_opaque_xattr; +#define OVL_XATTR_PRE_NAME "trusted.overlay." +#define OVL_XATTR_PRE_LEN 16 +#define OVL_XATTR_OPAQUE OVL_XATTR_PRE_NAME"opaque" static inline int ovl_do_rmdir(struct inode *dir, struct dentry *dentry) { diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c index e9ce4a9e8749..84f3144e1b33 100644 --- a/fs/overlayfs/super.c +++ b/fs/overlayfs/super.c @@ -62,8 +62,6 @@ struct ovl_entry { #define OVL_MAX_STACK 500 -const char *ovl_opaque_xattr = "trusted.overlay.opaque"; - static struct dentry *__ovl_dentry_lower(struct ovl_entry *oe) { return oe->numlower ? oe->lowerstack[0].dentry : NULL; @@ -254,7 +252,7 @@ static bool ovl_is_opaquedir(struct dentry *dentry) if (!S_ISDIR(inode->i_mode) || !inode->i_op->getxattr) return false; - res = inode->i_op->getxattr(dentry, ovl_opaque_xattr, &val, 1); + res = inode->i_op->getxattr(dentry, OVL_XATTR_OPAQUE, &val, 1); if (res == 1 && val == 'y') return true; -- cgit v1.2.3 From e1f1fe798d2f2eab904624125ba924d08205f960 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Fri, 19 Dec 2014 06:51:26 +0000 Subject: jfs: get rid of homegrown endianness helpers Get rid of le24 stuff, along with the bitfields use - all that stuff can be done with standard stuff, in sparse-verifiable manner. Moreover, that way (shift-and-mask) often generates better code - gcc optimizer sucks on bitfields... Signed-off-by: Al Viro Signed-off-by: Dave Kleikamp ---- --- fs/jfs/endian24.h | 49 ------------------------------------------------ fs/jfs/jfs_dtree.c | 4 ++-- fs/jfs/jfs_types.h | 55 ++++++++++++++++++++++++++++++++---------------------- fs/jfs/jfs_xtree.h | 25 +++++++++---------------- 4 files changed, 44 insertions(+), 89 deletions(-) delete mode 100644 fs/jfs/endian24.h (limited to 'fs') diff --git a/fs/jfs/endian24.h b/fs/jfs/endian24.h deleted file mode 100644 index fa92f7f1d0d0..000000000000 --- a/fs/jfs/endian24.h +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Copyright (C) International Business Machines Corp., 2001 - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See - * the GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ -#ifndef _H_ENDIAN24 -#define _H_ENDIAN24 - -/* - * endian24.h: - * - * Endian conversion for 24-byte data - * - */ -#define __swab24(x) \ -({ \ - __u32 __x = (x); \ - ((__u32)( \ - ((__x & (__u32)0x000000ffUL) << 16) | \ - (__x & (__u32)0x0000ff00UL) | \ - ((__x & (__u32)0x00ff0000UL) >> 16) )); \ -}) - -#if (defined(__KERNEL__) && defined(__LITTLE_ENDIAN)) || (defined(__BYTE_ORDER) && (__BYTE_ORDER == __LITTLE_ENDIAN)) - #define __cpu_to_le24(x) ((__u32)(x)) - #define __le24_to_cpu(x) ((__u32)(x)) -#else - #define __cpu_to_le24(x) __swab24(x) - #define __le24_to_cpu(x) __swab24(x) -#endif - -#ifdef __KERNEL__ - #define cpu_to_le24 __cpu_to_le24 - #define le24_to_cpu __le24_to_cpu -#endif - -#endif /* !_H_ENDIAN24 */ diff --git a/fs/jfs/jfs_dtree.c b/fs/jfs/jfs_dtree.c index 984c2bbf4f61..d88576e23fe4 100644 --- a/fs/jfs/jfs_dtree.c +++ b/fs/jfs/jfs_dtree.c @@ -1040,8 +1040,8 @@ static int dtSplitUp(tid_t tid, pxdlist.maxnpxd = 1; pxdlist.npxd = 0; pxd = &pxdlist.pxd[0]; - PXDaddress(pxd, nxaddr) - PXDlength(pxd, xlen + n); + PXDaddress(pxd, nxaddr); + PXDlength(pxd, xlen + n); split->pxdlist = &pxdlist; if ((rc = dtExtendPage(tid, ip, split, btstack))) { nxaddr = addressPXD(pxd); diff --git a/fs/jfs/jfs_types.h b/fs/jfs/jfs_types.h index 43ea3713c083..8f602dcb51fa 100644 --- a/fs/jfs/jfs_types.h +++ b/fs/jfs/jfs_types.h @@ -30,8 +30,6 @@ #include #include -#include "endian24.h" - /* * transaction and lock id's * @@ -59,26 +57,42 @@ struct timestruc_t { /* * physical xd (pxd) + * + * The leftmost 24 bits of len_addr are the extent length. + * The rightmost 8 bits of len_addr are the most signficant bits of + * the extent address */ typedef struct { - unsigned len:24; - unsigned addr1:8; + __le32 len_addr; __le32 addr2; } pxd_t; /* xd_t field construction */ -#define PXDlength(pxd, length32) ((pxd)->len = __cpu_to_le24(length32)) -#define PXDaddress(pxd, address64)\ -{\ - (pxd)->addr1 = ((s64)address64) >> 32;\ - (pxd)->addr2 = __cpu_to_le32((address64) & 0xffffffff);\ +static inline void PXDlength(pxd_t *pxd, __u32 len) +{ + pxd->len_addr = (pxd->len_addr & cpu_to_le32(~0xffffff)) | + cpu_to_le32(len & 0xffffff); +} + +static inline void PXDaddress(pxd_t *pxd, __u64 addr) +{ + pxd->len_addr = (pxd->len_addr & cpu_to_le32(0xffffff)) | + cpu_to_le32((addr >> 32)<<24); + pxd->addr2 = cpu_to_le32(addr & 0xffffffff); } /* xd_t field extraction */ -#define lengthPXD(pxd) __le24_to_cpu((pxd)->len) -#define addressPXD(pxd)\ - ( ((s64)((pxd)->addr1)) << 32 | __le32_to_cpu((pxd)->addr2)) +static inline __u32 lengthPXD(pxd_t *pxd) +{ + return le32_to_cpu((pxd)->len_addr) & 0xffffff; +} + +static inline __u64 addressPXD(pxd_t *pxd) +{ + __u64 n = le32_to_cpu(pxd->len_addr) & ~0xffffff; + return (n << 8) + le32_to_cpu(pxd->addr2); +} #define MAXTREEHEIGHT 8 /* pxd list */ @@ -93,12 +107,10 @@ struct pxdlist { * data extent descriptor (dxd) */ typedef struct { - unsigned flag:8; /* 1: flags */ - unsigned rsrvd:24; + __u8 flag; /* 1: flags */ + __u8 rsrvd[3]; __le32 size; /* 4: size in byte */ - unsigned len:24; /* 3: length in unit of fsblksize */ - unsigned addr1:8; /* 1: address in unit of fsblksize */ - __le32 addr2; /* 4: address in unit of fsblksize */ + pxd_t loc; /* 8: address and length in unit of fsblksize */ } dxd_t; /* - 16 - */ /* dxd_t flags */ @@ -109,12 +121,11 @@ typedef struct { #define DXD_CORRUPT 0x08 /* Inconsistency detected */ /* dxd_t field construction - * Conveniently, the PXD macros work for DXD */ -#define DXDlength PXDlength -#define DXDaddress PXDaddress -#define lengthDXD lengthPXD -#define addressDXD addressPXD +#define DXDlength(dxd, len) PXDlength(&(dxd)->loc, len) +#define DXDaddress(dxd, addr) PXDaddress(&(dxd)->loc, addr) +#define lengthDXD(dxd) lengthPXD(&(dxd)->loc) +#define addressDXD(dxd) addressPXD(&(dxd)->loc) #define DXDsize(dxd, size32) ((dxd)->size = cpu_to_le32(size32)) #define sizeDXD(dxd) le32_to_cpu((dxd)->size) diff --git a/fs/jfs/jfs_xtree.h b/fs/jfs/jfs_xtree.h index 08c0c749b986..1e0987986d5f 100644 --- a/fs/jfs/jfs_xtree.h +++ b/fs/jfs/jfs_xtree.h @@ -29,13 +29,11 @@ * extent allocation descriptor (xad) */ typedef struct xad { - unsigned flag:8; /* 1: flag */ - unsigned rsvrd:16; /* 2: reserved */ - unsigned off1:8; /* 1: offset in unit of fsblksize */ - __le32 off2; /* 4: offset in unit of fsblksize */ - unsigned len:24; /* 3: length in unit of fsblksize */ - unsigned addr1:8; /* 1: address in unit of fsblksize */ - __le32 addr2; /* 4: address in unit of fsblksize */ + __u8 flag; /* 1: flag */ + __u8 rsvrd[2]; /* 2: reserved */ + __u8 off1; /* 1: offset in unit of fsblksize */ + __le32 off2; /* 4: offset in unit of fsblksize */ + pxd_t loc; /* 8: length and address in unit of fsblksize */ } xad_t; /* (16) */ #define MAXXLEN ((1 << 24) - 1) @@ -49,19 +47,14 @@ typedef struct xad { (xad)->off1 = ((u64)offset64) >> 32;\ (xad)->off2 = __cpu_to_le32((offset64) & 0xffffffff);\ } -#define XADaddress(xad, address64)\ -{\ - (xad)->addr1 = ((u64)address64) >> 32;\ - (xad)->addr2 = __cpu_to_le32((address64) & 0xffffffff);\ -} -#define XADlength(xad, length32) (xad)->len = __cpu_to_le24(length32) +#define XADaddress(xad, address64) PXDaddress(&(xad)->loc, address64) +#define XADlength(xad, length32) PXDlength(&(xad)->loc, length32) /* xad_t field extraction */ #define offsetXAD(xad)\ ( ((s64)((xad)->off1)) << 32 | __le32_to_cpu((xad)->off2)) -#define addressXAD(xad)\ - ( ((s64)((xad)->addr1)) << 32 | __le32_to_cpu((xad)->addr2)) -#define lengthXAD(xad) __le24_to_cpu((xad)->len) +#define addressXAD(xad) addressPXD(&(xad)->loc) +#define lengthXAD(xad) lengthPXD(&(xad)->loc) /* xad list */ struct xadlist { -- cgit v1.2.3 From 67db10344816c74709271c30905bb83781e7050c Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Sat, 13 Dec 2014 09:11:40 -0500 Subject: nfsd: fi_delegees doesn't need to be an atomic_t fi_delegees is always handled under the fi_lock, so there's no need to use an atomic_t for this field. Signed-off-by: Jeff Layton Signed-off-by: J. Bruce Fields --- fs/nfsd/nfs4state.c | 8 ++++---- fs/nfsd/state.h | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index c06a1ba80d73..277f8b8529d6 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -688,7 +688,7 @@ static void nfs4_put_deleg_lease(struct nfs4_file *fp) struct file *filp = NULL; spin_lock(&fp->fi_lock); - if (fp->fi_deleg_file && atomic_dec_and_test(&fp->fi_delegees)) + if (fp->fi_deleg_file && --fp->fi_delegees == 0) swap(filp, fp->fi_deleg_file); spin_unlock(&fp->fi_lock); @@ -3855,12 +3855,12 @@ static int nfs4_setlease(struct nfs4_delegation *dp) /* Race breaker */ if (fp->fi_deleg_file) { status = 0; - atomic_inc(&fp->fi_delegees); + ++fp->fi_delegees; hash_delegation_locked(dp, fp); goto out_unlock; } fp->fi_deleg_file = filp; - atomic_set(&fp->fi_delegees, 1); + fp->fi_delegees = 1; hash_delegation_locked(dp, fp); spin_unlock(&fp->fi_lock); spin_unlock(&state_lock); @@ -3901,7 +3901,7 @@ nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh, status = -EAGAIN; goto out_unlock; } - atomic_inc(&fp->fi_delegees); + ++fp->fi_delegees; hash_delegation_locked(dp, fp); status = 0; out_unlock: diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h index 9d3be371240a..dab6553ceea1 100644 --- a/fs/nfsd/state.h +++ b/fs/nfsd/state.h @@ -493,7 +493,7 @@ struct nfs4_file { atomic_t fi_access[2]; u32 fi_share_deny; struct file *fi_deleg_file; - atomic_t fi_delegees; + int fi_delegees; struct knfsd_fh fi_fhandle; bool fi_had_conflict; }; -- cgit v1.2.3 From 0ec016e3e02fe07e7250b87daffab611f219e7f1 Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Fri, 19 Dec 2014 18:01:35 -0500 Subject: nfsd4: tweak rd_dircount accounting RFC 3530 14.2.24 says This value represents the length of the names of the directory entries and the cookie value for these entries. This length represents the XDR encoding of the data (names and cookies)... The "xdr encoding" of the name should probably include the 4 bytes for the length. But this is all just a hint so not worth e.g. backporting to stable. Also reshuffle some lines to more clearly group together the dircount-related code. Signed-off-by: J. Bruce Fields --- fs/nfsd/nfs4xdr.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index 15f7b73e0c0f..91f7a3644ffb 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c @@ -2768,16 +2768,17 @@ nfsd4_encode_dirent(void *ccdv, const char *name, int namlen, if (entry_bytes > cd->rd_maxcount) goto fail; cd->rd_maxcount -= entry_bytes; - if (!cd->rd_dircount) - goto fail; /* * RFC 3530 14.2.24 describes rd_dircount as only a "hint", so * let's always let through the first entry, at least: */ - name_and_cookie = 4 * XDR_QUADLEN(namlen) + 8; + if (!cd->rd_dircount) + goto fail; + name_and_cookie = 4 + 4 * XDR_QUADLEN(namlen) + 8; if (name_and_cookie > cd->rd_dircount && cd->cookie_offset) goto fail; cd->rd_dircount -= min(cd->rd_dircount, name_and_cookie); + cd->cookie_offset = cookie_offset; skip_entry: cd->common.err = nfs_ok; -- cgit v1.2.3 From 2f83fd8c2849a388082f30d755a75c1e67c4643b Mon Sep 17 00:00:00 2001 From: hujianyang Date: Tue, 6 Jan 2015 12:52:13 +0800 Subject: ovl: Fix kernel panic while mounting overlayfs The function ovl_fill_super() in recently multi-layer support version will incorrectly return 0 at error handling path and then cause kernel panic. This failure can be reproduced by mounting a overlayfs with upperdir and workdir in different mounts. And also, If the memory allocation of *lower_mnt* fail, this function may return an zero either. This patch fix this problem by setting *err* to proper error number before jumping to error handling path. Signed-off-by: hujianyang Signed-off-by: Miklos Szeredi --- fs/overlayfs/super.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'fs') diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c index 84f3144e1b33..6ca8ea8cb9b2 100644 --- a/fs/overlayfs/super.c +++ b/fs/overlayfs/super.c @@ -836,6 +836,7 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) if (err) goto out_put_upperpath; + err = -EINVAL; if (upperpath.mnt != workpath.mnt) { pr_err("overlayfs: workdir and upperdir must reside under the same mount\n"); goto out_put_workpath; @@ -894,12 +895,14 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) } } + err = -ENOMEM; ufs->lower_mnt = kcalloc(numlower, sizeof(struct vfsmount *), GFP_KERNEL); if (ufs->lower_mnt == NULL) goto out_put_workdir; for (i = 0; i < numlower; i++) { struct vfsmount *mnt = clone_private_mount(&stack[i]); + err = PTR_ERR(mnt); if (IS_ERR(mnt)) { pr_err("overlayfs: failed to clone lowerpath\n"); goto out_put_lower_mnt; -- cgit v1.2.3 From a425c037f3dd8a56469158ab5f37beb46402d958 Mon Sep 17 00:00:00 2001 From: hujianyang Date: Tue, 6 Jan 2015 16:10:01 +0800 Subject: ovl: Fix opaque regression in ovl_lookup Current multi-layer support overlayfs has a regression in .lookup(). If there is a directory in upperdir and a regular file has same name in lowerdir in a merged directory, lower file is hidden and upper directory is set to opaque in former case. But it is changed in present code. In lowerdir lookup path, if a found inode is not directory, the type checking of previous inode is missing. This inode will be copied to the lowerstack of ovl_entry directly. That will lead to several wrong conditions, for example, the reading of the directory in upperdir may return an error like: ls: reading directory .: Not a directory This patch makes the lowerdir lookup path check the opaque for non-directory file too. Signed-off-by: hujianyang Signed-off-by: Miklos Szeredi --- fs/overlayfs/super.c | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) (limited to 'fs') diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c index 6ca8ea8cb9b2..9e94f4acef1a 100644 --- a/fs/overlayfs/super.c +++ b/fs/overlayfs/super.c @@ -372,7 +372,6 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry, bool opaque = false; struct path lowerpath = poe->lowerstack[i]; - opaque = false; this = ovl_lookup_real(lowerpath.dentry, &dentry->d_name); err = PTR_ERR(this); if (IS_ERR(this)) { @@ -395,20 +394,24 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry, */ if (i < poe->numlower - 1 && ovl_is_opaquedir(this)) opaque = true; - /* - * If this is a non-directory then stop here. - * - * FIXME: check for opaqueness maybe better done in remove code. - */ - if (!S_ISDIR(this->d_inode->i_mode)) { - opaque = true; - } else if (prev && (!S_ISDIR(prev->d_inode->i_mode) || - !S_ISDIR(this->d_inode->i_mode))) { + + if (prev && (!S_ISDIR(prev->d_inode->i_mode) || + !S_ISDIR(this->d_inode->i_mode))) { + /* + * FIXME: check for upper-opaqueness maybe better done + * in remove code. + */ if (prev == upperdentry) upperopaque = true; dput(this); break; } + /* + * If this is a non-directory then stop here. + */ + if (!S_ISDIR(this->d_inode->i_mode)) + opaque = true; + stack[ctr].dentry = this; stack[ctr].mnt = lowerpath.mnt; ctr++; -- cgit v1.2.3 From 3cdf6fe91041b3afd6761f76254f7b6cbe8020fc Mon Sep 17 00:00:00 2001 From: Seunghun Lee Date: Sat, 3 Jan 2015 02:26:49 +0900 Subject: ovl: Prevent rw remount when it should be ro mount Overlayfs should be mounted read-only when upper-fs is read-only or nonexistent. But now it can be remounted read-write and this can cause kernel panic. So we should prevent read-write remount when the above situation happens. Signed-off-by: Seunghun Lee Signed-off-by: Miklos Szeredi --- fs/overlayfs/super.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'fs') diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c index 9e94f4acef1a..b90952f528b1 100644 --- a/fs/overlayfs/super.c +++ b/fs/overlayfs/super.c @@ -525,10 +525,22 @@ static int ovl_show_options(struct seq_file *m, struct dentry *dentry) return 0; } +static int ovl_remount(struct super_block *sb, int *flags, char *data) +{ + struct ovl_fs *ufs = sb->s_fs_info; + + if (!(*flags & MS_RDONLY) && + (!ufs->upper_mnt || (ufs->upper_mnt->mnt_sb->s_flags & MS_RDONLY))) + return -EROFS; + + return 0; +} + static const struct super_operations ovl_super_operations = { .put_super = ovl_put_super, .statfs = ovl_statfs, .show_options = ovl_show_options, + .remount_fs = ovl_remount, }; enum { -- cgit v1.2.3 From 4330397e4e8a662f36d101659e2a59ce32e76ff4 Mon Sep 17 00:00:00 2001 From: hujianyang Date: Thu, 11 Dec 2014 10:30:18 +0800 Subject: ovl: discard independent cursor in readdir() Since the ovl_dir_cache is stable during a directory reading, the cursor of struct ovl_dir_file don't need to be an independent entry in the list of a merged directory. This patch changes *cursor* to a pointer which points to the entry in the ovl_dir_cache. After this, we don't need to check *is_cursor* either. Signed-off-by: hujianyang Signed-off-by: Miklos Szeredi --- fs/overlayfs/readdir.c | 38 ++++++++++++++------------------------ 1 file changed, 14 insertions(+), 24 deletions(-) (limited to 'fs') diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c index 9df848f2e622..dcf1d412888d 100644 --- a/fs/overlayfs/readdir.c +++ b/fs/overlayfs/readdir.c @@ -24,7 +24,6 @@ struct ovl_cache_entry { struct list_head l_node; struct rb_node node; bool is_whiteout; - bool is_cursor; char name[]; }; @@ -49,7 +48,7 @@ struct ovl_dir_file { bool is_real; bool is_upper; struct ovl_dir_cache *cache; - struct ovl_cache_entry cursor; + struct list_head *cursor; struct file *realfile; struct file *upperfile; }; @@ -97,7 +96,6 @@ static struct ovl_cache_entry *ovl_cache_entry_new(struct dentry *dir, p->type = d_type; p->ino = ino; p->is_whiteout = false; - p->is_cursor = false; if (d_type == DT_CHR) { struct dentry *dentry; @@ -196,7 +194,6 @@ static void ovl_cache_put(struct ovl_dir_file *od, struct dentry *dentry) { struct ovl_dir_cache *cache = od->cache; - list_del_init(&od->cursor.l_node); WARN_ON(cache->refcount <= 0); cache->refcount--; if (!cache->refcount) { @@ -254,6 +251,7 @@ static void ovl_dir_reset(struct file *file) if (cache && ovl_dentry_version_get(dentry) != cache->version) { ovl_cache_put(od, dentry); od->cache = NULL; + od->cursor = NULL; } WARN_ON(!od->is_real && !OVL_TYPE_MERGE(type)); if (od->is_real && OVL_TYPE_MERGE(type)) @@ -295,17 +293,16 @@ static int ovl_dir_read_merged(struct dentry *dentry, struct list_head *list) static void ovl_seek_cursor(struct ovl_dir_file *od, loff_t pos) { - struct ovl_cache_entry *p; + struct list_head *p; loff_t off = 0; - list_for_each_entry(p, &od->cache->entries, l_node) { - if (p->is_cursor) - continue; + list_for_each(p, &od->cache->entries) { if (off >= pos) break; off++; } - list_move_tail(&od->cursor.l_node, &p->l_node); + /* Cursor is safe since the cache is stable */ + od->cursor = p; } static struct ovl_dir_cache *ovl_cache_get(struct dentry *dentry) @@ -344,6 +341,7 @@ static int ovl_iterate(struct file *file, struct dir_context *ctx) { struct ovl_dir_file *od = file->private_data; struct dentry *dentry = file->f_path.dentry; + struct ovl_cache_entry *p; if (!ctx->pos) ovl_dir_reset(file); @@ -362,19 +360,13 @@ static int ovl_iterate(struct file *file, struct dir_context *ctx) ovl_seek_cursor(od, ctx->pos); } - while (od->cursor.l_node.next != &od->cache->entries) { - struct ovl_cache_entry *p; - - p = list_entry(od->cursor.l_node.next, struct ovl_cache_entry, l_node); - /* Skip cursors */ - if (!p->is_cursor) { - if (!p->is_whiteout) { - if (!dir_emit(ctx, p->name, p->len, p->ino, p->type)) - break; - } - ctx->pos++; - } - list_move(&od->cursor.l_node, &p->l_node); + while (od->cursor != &od->cache->entries) { + p = list_entry(od->cursor, struct ovl_cache_entry, l_node); + if (!p->is_whiteout) + if (!dir_emit(ctx, p->name, p->len, p->ino, p->type)) + break; + od->cursor = p->l_node.next; + ctx->pos++; } return 0; } @@ -493,11 +485,9 @@ static int ovl_dir_open(struct inode *inode, struct file *file) kfree(od); return PTR_ERR(realfile); } - INIT_LIST_HEAD(&od->cursor.l_node); od->realfile = realfile; od->is_real = !OVL_TYPE_MERGE(type); od->is_upper = OVL_TYPE_UPPER(type); - od->cursor.is_cursor = true; file->private_data = od; return 0; -- cgit v1.2.3 From cd52b6368f1301b55d0e484105c876930e443d83 Mon Sep 17 00:00:00 2001 From: Jaegeuk Kim Date: Wed, 10 Dec 2014 15:18:34 -0800 Subject: f2fs: remove checking dirty_exceed We don't need to force to write dirty_exceeded for f2fs_balance_fs_bg. This flag was only meaningful to write bypassing conditions. Signed-off-by: Jaegeuk Kim --- fs/f2fs/node.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'fs') diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c index f83326ca32ef..d6f073edd861 100644 --- a/fs/f2fs/node.c +++ b/fs/f2fs/node.c @@ -57,8 +57,6 @@ bool available_free_memory(struct f2fs_sb_info *sbi, int type) } else if (type == INO_ENTRIES) { int i; - if (sbi->sb->s_bdi->dirty_exceeded) - return false; for (i = 0; i <= UPDATE_INO; i++) mem_size += (sbi->im[i].ino_num * sizeof(struct ino_entry)) >> PAGE_CACHE_SHIFT; -- cgit v1.2.3 From 88a70a69c088933011615fe26242e0335b012284 Mon Sep 17 00:00:00 2001 From: Jaegeuk Kim Date: Wed, 10 Dec 2014 15:20:48 -0800 Subject: f2fs: fix wrong condition check to trigger f2fs_sync_fs If there is not enough available memory, we need to trigger f2fs_sync_fs. Signed-off-by: Jaegeuk Kim --- fs/f2fs/segment.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index 42607a679923..11e4b5c14616 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -290,7 +290,7 @@ void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi) /* check the # of cached NAT entries and prefree segments */ if (try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK) || excess_prefree_segs(sbi) || - available_free_memory(sbi, INO_ENTRIES)) + !available_free_memory(sbi, INO_ENTRIES)) f2fs_sync_fs(sbi->sb, true); } -- cgit v1.2.3 From 70c640b1d6fd8484d0629b90052d6f6f738023fb Mon Sep 17 00:00:00 2001 From: Jaegeuk Kim Date: Wed, 10 Dec 2014 13:59:33 -0800 Subject: f2fs: don't need to call lock_op and lock_page for abort We don't need to call lock_op and lock_page at the aborting path. Signed-off-by: Jaegeuk Kim --- fs/f2fs/segment.c | 35 ++++++++++++++++++++--------------- 1 file changed, 20 insertions(+), 15 deletions(-) (limited to 'fs') diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index 11e4b5c14616..3ce86c533604 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -240,33 +240,38 @@ void commit_inmem_pages(struct inode *inode, bool abort) * Otherwise, f2fs_gc in f2fs_balance_fs can wait forever until this * inode becomes free by iget_locked in f2fs_iget. */ - if (!abort) + if (!abort) { f2fs_balance_fs(sbi); - - f2fs_lock_op(sbi); + f2fs_lock_op(sbi); + } mutex_lock(&fi->inmem_lock); list_for_each_entry_safe(cur, tmp, &fi->inmem_pages, list) { - lock_page(cur->page); - if (!abort && cur->page->mapping == inode->i_mapping) { - f2fs_wait_on_page_writeback(cur->page, DATA); - if (clear_page_dirty_for_io(cur->page)) - inode_dec_dirty_pages(inode); - do_write_data_page(cur->page, &fio); - submit_bio = true; + if (!abort) { + lock_page(cur->page); + if (cur->page->mapping == inode->i_mapping) { + f2fs_wait_on_page_writeback(cur->page, DATA); + if (clear_page_dirty_for_io(cur->page)) + inode_dec_dirty_pages(inode); + do_write_data_page(cur->page, &fio); + submit_bio = true; + } + f2fs_put_page(cur->page, 1); + } else { + put_page(cur->page); } radix_tree_delete(&fi->inmem_root, cur->page->index); - f2fs_put_page(cur->page, 1); list_del(&cur->list); kmem_cache_free(inmem_entry_slab, cur); dec_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES); } - if (submit_bio) - f2fs_submit_merged_bio(sbi, DATA, WRITE); mutex_unlock(&fi->inmem_lock); - filemap_fdatawait_range(inode->i_mapping, 0, LLONG_MAX); - f2fs_unlock_op(sbi); + if (!abort) { + f2fs_unlock_op(sbi); + if (submit_bio) + f2fs_submit_merged_bio(sbi, DATA, WRITE); + } } /* -- cgit v1.2.3 From 1e84371ffeef451e8532e0cd04c2fe59ff10c514 Mon Sep 17 00:00:00 2001 From: Jaegeuk Kim Date: Tue, 9 Dec 2014 06:08:59 -0800 Subject: f2fs: change atomic and volatile write policies This patch adds two new ioctls to release inmemory pages grabbed by atomic writes. o f2fs_ioc_abort_volatile_write - If transaction was failed, all the grabbed pages and data should be written. o f2fs_ioc_release_volatile_write - This is to enhance the performance of PERSIST mode in sqlite. In order to avoid huge memory consumption which causes OOM, this patch changes volatile writes to use normal dirty pages, instead blocked flushing to the disk as long as system does not suffer from memory pressure. Signed-off-by: Jaegeuk Kim --- fs/f2fs/data.c | 9 +++++-- fs/f2fs/f2fs.h | 8 ++++++ fs/f2fs/file.c | 77 +++++++++++++++++++++++++++++++++++++++++++++++-------- fs/f2fs/inode.c | 2 +- fs/f2fs/node.c | 3 +++ fs/f2fs/node.h | 1 + fs/f2fs/segment.c | 2 +- 7 files changed, 88 insertions(+), 14 deletions(-) (limited to 'fs') diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index 7ec697b37f19..32264e3d524f 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -814,6 +814,11 @@ static int f2fs_write_data_page(struct page *page, write: if (unlikely(sbi->por_doing)) goto redirty_out; + if (f2fs_is_drop_cache(inode)) + goto out; + if (f2fs_is_volatile_file(inode) && !wbc->for_reclaim && + available_free_memory(sbi, BASE_CHECK)) + goto redirty_out; /* Dentry blocks are controlled by checkpoint */ if (S_ISDIR(inode->i_mode)) { @@ -1109,7 +1114,7 @@ static void f2fs_invalidate_data_page(struct page *page, unsigned int offset, if (offset % PAGE_CACHE_SIZE || length != PAGE_CACHE_SIZE) return; - if (f2fs_is_atomic_file(inode) || f2fs_is_volatile_file(inode)) + if (f2fs_is_atomic_file(inode)) invalidate_inmem_page(inode, page); if (PageDirty(page)) @@ -1132,7 +1137,7 @@ static int f2fs_set_data_page_dirty(struct page *page) SetPageUptodate(page); - if (f2fs_is_atomic_file(inode) || f2fs_is_volatile_file(inode)) { + if (f2fs_is_atomic_file(inode)) { register_inmem_page(inode, page); return 1; } diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index ec58bb2373fc..8c9bf3d6cb7d 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -201,6 +201,8 @@ static inline bool __has_cursum_space(struct f2fs_summary_block *sum, int size, #define F2FS_IOC_START_ATOMIC_WRITE _IO(F2FS_IOCTL_MAGIC, 1) #define F2FS_IOC_COMMIT_ATOMIC_WRITE _IO(F2FS_IOCTL_MAGIC, 2) #define F2FS_IOC_START_VOLATILE_WRITE _IO(F2FS_IOCTL_MAGIC, 3) +#define F2FS_IOC_RELEASE_VOLATILE_WRITE _IO(F2FS_IOCTL_MAGIC, 4) +#define F2FS_IOC_ABORT_VOLATILE_WRITE _IO(F2FS_IOCTL_MAGIC, 5) #if defined(__KERNEL__) && defined(CONFIG_COMPAT) /* @@ -1113,6 +1115,7 @@ enum { FI_NEED_IPU, /* used for ipu per file */ FI_ATOMIC_FILE, /* indicate atomic file */ FI_VOLATILE_FILE, /* indicate volatile file */ + FI_DROP_CACHE, /* drop dirty page cache */ FI_DATA_EXIST, /* indicate data exists */ }; @@ -1220,6 +1223,11 @@ static inline bool f2fs_is_volatile_file(struct inode *inode) return is_inode_flag_set(F2FS_I(inode), FI_VOLATILE_FILE); } +static inline bool f2fs_is_drop_cache(struct inode *inode) +{ + return is_inode_flag_set(F2FS_I(inode), FI_DROP_CACHE); +} + static inline void *inline_data_addr(struct page *page) { struct f2fs_inode *ri = F2FS_INODE(page); diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index 3c27e0ecb3bc..5139f90e92c1 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c @@ -836,6 +836,19 @@ static long f2fs_fallocate(struct file *file, int mode, return ret; } +static int f2fs_release_file(struct inode *inode, struct file *filp) +{ + /* some remained atomic pages should discarded */ + if (f2fs_is_atomic_file(inode)) + commit_inmem_pages(inode, true); + if (f2fs_is_volatile_file(inode)) { + set_inode_flag(F2FS_I(inode), FI_DROP_CACHE); + filemap_fdatawrite(inode->i_mapping); + clear_inode_flag(F2FS_I(inode), FI_DROP_CACHE); + } + return 0; +} + #define F2FS_REG_FLMASK (~(FS_DIRSYNC_FL | FS_TOPDIR_FL)) #define F2FS_OTHER_FLMASK (FS_NODUMP_FL | FS_NOATIME_FL) @@ -909,26 +922,20 @@ out: static int f2fs_ioc_start_atomic_write(struct file *filp) { struct inode *inode = file_inode(filp); - struct f2fs_sb_info *sbi = F2FS_I_SB(inode); if (!inode_owner_or_capable(inode)) return -EACCES; - f2fs_balance_fs(sbi); + f2fs_balance_fs(F2FS_I_SB(inode)); + + if (f2fs_is_atomic_file(inode)) + return 0; set_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE); return f2fs_convert_inline_inode(inode); } -static int f2fs_release_file(struct inode *inode, struct file *filp) -{ - /* some remained atomic pages should discarded */ - if (f2fs_is_atomic_file(inode) || f2fs_is_volatile_file(inode)) - commit_inmem_pages(inode, true); - return 0; -} - static int f2fs_ioc_commit_atomic_write(struct file *filp) { struct inode *inode = file_inode(filp); @@ -949,6 +956,7 @@ static int f2fs_ioc_commit_atomic_write(struct file *filp) ret = f2fs_sync_file(filp, 0, LONG_MAX, 0); mnt_drop_write_file(filp); + clear_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE); return ret; } @@ -959,11 +967,56 @@ static int f2fs_ioc_start_volatile_write(struct file *filp) if (!inode_owner_or_capable(inode)) return -EACCES; + if (f2fs_is_volatile_file(inode)) + return 0; + set_inode_flag(F2FS_I(inode), FI_VOLATILE_FILE); return f2fs_convert_inline_inode(inode); } +static int f2fs_ioc_release_volatile_write(struct file *filp) +{ + struct inode *inode = file_inode(filp); + + if (!inode_owner_or_capable(inode)) + return -EACCES; + + if (!f2fs_is_volatile_file(inode)) + return 0; + + punch_hole(inode, 0, F2FS_BLKSIZE); + return 0; +} + +static int f2fs_ioc_abort_volatile_write(struct file *filp) +{ + struct inode *inode = file_inode(filp); + int ret; + + if (!inode_owner_or_capable(inode)) + return -EACCES; + + ret = mnt_want_write_file(filp); + if (ret) + return ret; + + f2fs_balance_fs(F2FS_I_SB(inode)); + + if (f2fs_is_atomic_file(inode)) { + commit_inmem_pages(inode, false); + clear_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE); + } + + if (f2fs_is_volatile_file(inode)) { + clear_inode_flag(F2FS_I(inode), FI_VOLATILE_FILE); + filemap_fdatawrite(inode->i_mapping); + set_inode_flag(F2FS_I(inode), FI_VOLATILE_FILE); + } + mnt_drop_write_file(filp); + return ret; +} + static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg) { struct inode *inode = file_inode(filp); @@ -1007,6 +1060,10 @@ long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) return f2fs_ioc_commit_atomic_write(filp); case F2FS_IOC_START_VOLATILE_WRITE: return f2fs_ioc_start_volatile_write(filp); + case F2FS_IOC_RELEASE_VOLATILE_WRITE: + return f2fs_ioc_release_volatile_write(filp); + case F2FS_IOC_ABORT_VOLATILE_WRITE: + return f2fs_ioc_abort_volatile_write(filp); case FITRIM: return f2fs_ioc_fitrim(filp, arg); default: diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c index 196cc7843aaf..3a8958d1684f 100644 --- a/fs/f2fs/inode.c +++ b/fs/f2fs/inode.c @@ -304,7 +304,7 @@ void f2fs_evict_inode(struct inode *inode) nid_t xnid = F2FS_I(inode)->i_xattr_nid; /* some remained atomic pages should discarded */ - if (f2fs_is_atomic_file(inode) || f2fs_is_volatile_file(inode)) + if (f2fs_is_atomic_file(inode)) commit_inmem_pages(inode, true); trace_f2fs_evict_inode(inode); diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c index d6f073edd861..cabecee88377 100644 --- a/fs/f2fs/node.c +++ b/fs/f2fs/node.c @@ -61,6 +61,9 @@ bool available_free_memory(struct f2fs_sb_info *sbi, int type) mem_size += (sbi->im[i].ino_num * sizeof(struct ino_entry)) >> PAGE_CACHE_SHIFT; res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1); + } else { + if (sbi->sb->s_bdi->dirty_exceeded) + return false; } return res; } diff --git a/fs/f2fs/node.h b/fs/f2fs/node.h index d10b6448a671..036b887176ff 100644 --- a/fs/f2fs/node.h +++ b/fs/f2fs/node.h @@ -108,6 +108,7 @@ enum mem_type { NAT_ENTRIES, /* indicates the cached nat entry */ DIRTY_DENTS, /* indicates dirty dentry pages */ INO_ENTRIES, /* indicates inode entries */ + BASE_CHECK, /* check kernel status */ }; struct nat_entry_set { diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index 3ce86c533604..de9c0700c88e 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -230,7 +230,7 @@ void commit_inmem_pages(struct inode *inode, bool abort) bool submit_bio = false; struct f2fs_io_info fio = { .type = DATA, - .rw = WRITE_SYNC, + .rw = WRITE_SYNC | REQ_PRIO, }; /* -- cgit v1.2.3 From d7bc2484b8d4e580370c66ad93c4319225bd104d Mon Sep 17 00:00:00 2001 From: Jaegeuk Kim Date: Fri, 12 Dec 2014 13:53:41 -0800 Subject: f2fs: fix small discards not to issue redundantly The ckpt_valid_map and cur_valid_map are synced by seg_info_to_raw_sit. In the case of small discards, the candidates are selected before sync, while fitrim selects candidates after sync. So, for small discards, we need to add candidates only just being obsoleted. Signed-off-by: Jaegeuk Kim --- fs/f2fs/segment.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index de9c0700c88e..335418c9a06b 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -525,7 +525,8 @@ static void add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc) bool force = (cpc->reason == CP_DISCARD); int i; - if (!force && !test_opt(sbi, DISCARD)) + if (!force && (!test_opt(sbi, DISCARD) || + SM_I(sbi)->nr_discards >= SM_I(sbi)->max_discards)) return; if (force && !se->valid_blocks) { @@ -553,7 +554,8 @@ static void add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc) /* SIT_VBLOCK_MAP_SIZE should be multiple of sizeof(unsigned long) */ for (i = 0; i < entries; i++) - dmap[i] = ~(cur_map[i] | ckpt_map[i]); + dmap[i] = force ? ~ckpt_map[i] : + (cur_map[i] ^ ckpt_map[i]) & ckpt_map[i]; while (force || SM_I(sbi)->nr_discards <= SM_I(sbi)->max_discards) { start = __find_rev_next_bit(dmap, max_blocks, end + 1); @@ -1759,7 +1761,7 @@ void flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc) se = get_seg_entry(sbi, segno); /* add discard candidates */ - if (SM_I(sbi)->nr_discards < SM_I(sbi)->max_discards) { + if (cpc->reason != CP_DISCARD) { cpc->trim_start = segno; add_discard_addrs(sbi, cpc); } -- cgit v1.2.3 From 042b7816aaebb1eb137b9889c20b595d951d15b7 Mon Sep 17 00:00:00 2001 From: Jaegeuk Kim Date: Fri, 12 Dec 2014 19:40:02 -0800 Subject: f2fs: remove unnecessary call to invalidate inmemory pages Now we use inmemory pages for atomic write only and provide abort procedure, we don't need to truncate them explicitly. Signed-off-by: Jaegeuk Kim --- fs/f2fs/data.c | 3 --- fs/f2fs/f2fs.h | 1 - fs/f2fs/segment.c | 17 ----------------- 3 files changed, 21 deletions(-) (limited to 'fs') diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index 32264e3d524f..caa08e46697a 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -1114,9 +1114,6 @@ static void f2fs_invalidate_data_page(struct page *page, unsigned int offset, if (offset % PAGE_CACHE_SIZE || length != PAGE_CACHE_SIZE) return; - if (f2fs_is_atomic_file(inode)) - invalidate_inmem_page(inode, page); - if (PageDirty(page)) inode_dec_dirty_pages(inode); ClearPagePrivate(page); diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index 8c9bf3d6cb7d..d3699da17e6b 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -1397,7 +1397,6 @@ void destroy_node_manager_caches(void); * segment.c */ void register_inmem_page(struct inode *, struct page *); -void invalidate_inmem_page(struct inode *, struct page *); void commit_inmem_pages(struct inode *, bool); void f2fs_balance_fs(struct f2fs_sb_info *); void f2fs_balance_fs_bg(struct f2fs_sb_info *); diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index 335418c9a06b..3791fa93dc7b 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -205,23 +205,6 @@ retry: mutex_unlock(&fi->inmem_lock); } -void invalidate_inmem_page(struct inode *inode, struct page *page) -{ - struct f2fs_inode_info *fi = F2FS_I(inode); - struct inmem_pages *cur; - - mutex_lock(&fi->inmem_lock); - cur = radix_tree_lookup(&fi->inmem_root, page->index); - if (cur) { - radix_tree_delete(&fi->inmem_root, cur->page->index); - f2fs_put_page(cur->page, 0); - list_del(&cur->list); - kmem_cache_free(inmem_entry_slab, cur); - dec_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES); - } - mutex_unlock(&fi->inmem_lock); -} - void commit_inmem_pages(struct inode *inode, bool abort) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); -- cgit v1.2.3 From 5df1f1da7a148c4a14d035b49c4d89790f59a57a Mon Sep 17 00:00:00 2001 From: Jaegeuk Kim Date: Sat, 13 Dec 2014 13:13:11 -0800 Subject: f2fs: use missing the use of f2fs_kunmap_page This patch calls f2fs_kunmap_page which I missed before. Signed-off-by: Jaegeuk Kim --- fs/f2fs/dir.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c index b1a7d5737cd0..b74097a7f6d9 100644 --- a/fs/f2fs/dir.c +++ b/fs/f2fs/dir.c @@ -286,8 +286,7 @@ void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de, f2fs_wait_on_page_writeback(page, type); de->ino = cpu_to_le32(inode->i_ino); set_de_type(de, inode); - if (!f2fs_has_inline_dentry(dir)) - kunmap(page); + f2fs_dentry_kunmap(dir, page); set_page_dirty(page); dir->i_mtime = dir->i_ctime = CURRENT_TIME; mark_inode_dirty(dir); -- cgit v1.2.3 From 3fa06d7bc9f579bd180e879fd1c9bdb6b1b0d9b7 Mon Sep 17 00:00:00 2001 From: Chao Yu Date: Tue, 9 Dec 2014 14:21:46 +0800 Subject: f2fs: readahead contiguous current summary blocks in checkpoint Let's add readahead code for reading contiguous compact/normal summary blocks in checkpoint, then we will gain better performance in mount procedure. Changes from v1 o remove inappropriate 'unlikely' in npages_for_summary_flush. Signed-off-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/checkpoint.c | 2 +- fs/f2fs/f2fs.h | 2 +- fs/f2fs/segment.c | 21 ++++++++++++++++++--- 3 files changed, 20 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c index e6c271fefaca..825158e4855a 100644 --- a/fs/f2fs/checkpoint.c +++ b/fs/f2fs/checkpoint.c @@ -922,7 +922,7 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) ckpt->next_free_nid = cpu_to_le32(last_nid); /* 2 cp + n data seg summary + orphan inode blocks */ - data_sum_blocks = npages_for_summary_flush(sbi); + data_sum_blocks = npages_for_summary_flush(sbi, false); if (data_sum_blocks < NR_CURSEG_DATA_TYPE) set_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG); else diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index d3699da17e6b..a8ccbceb869a 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -1408,7 +1408,7 @@ void refresh_sit_entry(struct f2fs_sb_info *, block_t, block_t); void clear_prefree_segments(struct f2fs_sb_info *); void release_discard_addrs(struct f2fs_sb_info *); void discard_next_dnode(struct f2fs_sb_info *, block_t); -int npages_for_summary_flush(struct f2fs_sb_info *); +int npages_for_summary_flush(struct f2fs_sb_info *, bool); void allocate_new_segments(struct f2fs_sb_info *); int f2fs_trim_fs(struct f2fs_sb_info *, struct fstrim_range *); struct page *get_sum_page(struct f2fs_sb_info *, unsigned int); diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index 3791fa93dc7b..c950c9318e8e 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -725,7 +725,7 @@ static void __add_sum_entry(struct f2fs_sb_info *sbi, int type, /* * Calculate the number of current summary pages for writing */ -int npages_for_summary_flush(struct f2fs_sb_info *sbi) +int npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra) { int valid_sum_count = 0; int i, sum_in_page; @@ -733,8 +733,13 @@ int npages_for_summary_flush(struct f2fs_sb_info *sbi) for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) { if (sbi->ckpt->alloc_type[i] == SSR) valid_sum_count += sbi->blocks_per_seg; - else - valid_sum_count += curseg_blkoff(sbi, i); + else { + if (for_ra) + valid_sum_count += le16_to_cpu( + F2FS_CKPT(sbi)->cur_data_blkoff[i]); + else + valid_sum_count += curseg_blkoff(sbi, i); + } } sum_in_page = (PAGE_CACHE_SIZE - 2 * SUM_JOURNAL_SIZE - @@ -1440,12 +1445,22 @@ static int restore_curseg_summaries(struct f2fs_sb_info *sbi) int err; if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_COMPACT_SUM_FLAG)) { + int npages = npages_for_summary_flush(sbi, true); + + if (npages >= 2) + ra_meta_pages(sbi, start_sum_block(sbi), npages, + META_CP); + /* restore for compacted data summary */ if (read_compacted_summaries(sbi)) return -EINVAL; type = CURSEG_HOT_NODE; } + if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_UMOUNT_FLAG)) + ra_meta_pages(sbi, sum_blk_addr(sbi, NR_CURSEG_TYPE, type), + NR_CURSEG_TYPE - type, META_CP); + for (; type <= CURSEG_COLD_NODE; type++) { err = read_normal_summaries(sbi, type); if (err) -- cgit v1.2.3 From 5c27f4ee447b4ef1cd88d5313eeb838c56265571 Mon Sep 17 00:00:00 2001 From: Chao Yu Date: Thu, 18 Dec 2014 17:37:21 +0800 Subject: f2fs: merge two uchar variable in struct node_info to reduce memory cost This patch moves one member of struct nat_entry: _flag_ to struct node_info, so _version_ in struct node_info and _flag_ which are unsigned char type will merge to one 32-bit space in register/memory. So the size of nat_entry will be reduced from 28 bytes to 24 bytes (for 64-bit machine, reduce its size from 40 bytes to 32 bytes) and then slab memory using by f2fs will be reduced. changes from v2: o update description of memory usage gain for 64-bit machine suggested by Changman Lee. changes from v1: o introduce inline copy_node_info() to copy valid data from node info suggested by Jaegeuk Kim, it can avoid bug. Reviewed-by: Changman Lee Signed-off-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/node.c | 4 ++-- fs/f2fs/node.h | 33 ++++++++++++++++++++++----------- 2 files changed, 24 insertions(+), 13 deletions(-) (limited to 'fs') diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c index cabecee88377..e565b9638971 100644 --- a/fs/f2fs/node.c +++ b/fs/f2fs/node.c @@ -269,7 +269,7 @@ static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni, e = __lookup_nat_cache(nm_i, ni->nid); if (!e) { e = grab_nat_entry(nm_i, ni->nid); - e->ni = *ni; + copy_node_info(&e->ni, ni); f2fs_bug_on(sbi, ni->blk_addr == NEW_ADDR); } else if (new_blkaddr == NEW_ADDR) { /* @@ -277,7 +277,7 @@ static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni, * previous nat entry can be remained in nat cache. * So, reinitialize it with new information. */ - e->ni = *ni; + copy_node_info(&e->ni, ni); f2fs_bug_on(sbi, ni->blk_addr != NULL_ADDR); } diff --git a/fs/f2fs/node.h b/fs/f2fs/node.h index 036b887176ff..fa6f95968b42 100644 --- a/fs/f2fs/node.h +++ b/fs/f2fs/node.h @@ -29,6 +29,14 @@ /* return value for read_node_page */ #define LOCKED_PAGE 1 +/* For flag in struct node_info */ +enum { + IS_CHECKPOINTED, /* is it checkpointed before? */ + HAS_FSYNCED_INODE, /* is the inode fsynced before? */ + HAS_LAST_FSYNC, /* has the latest node fsync mark? */ + IS_DIRTY, /* this nat entry is dirty? */ +}; + /* * For node information */ @@ -37,18 +45,11 @@ struct node_info { nid_t ino; /* inode number of the node's owner */ block_t blk_addr; /* block address of the node */ unsigned char version; /* version of the node */ -}; - -enum { - IS_CHECKPOINTED, /* is it checkpointed before? */ - HAS_FSYNCED_INODE, /* is the inode fsynced before? */ - HAS_LAST_FSYNC, /* has the latest node fsync mark? */ - IS_DIRTY, /* this nat entry is dirty? */ + unsigned char flag; /* for node information bits */ }; struct nat_entry { struct list_head list; /* for clean or dirty nat list */ - unsigned char flag; /* for node information bits */ struct node_info ni; /* in-memory node information */ }; @@ -63,20 +64,30 @@ struct nat_entry { #define inc_node_version(version) (++version) +static inline void copy_node_info(struct node_info *dst, + struct node_info *src) +{ + dst->nid = src->nid; + dst->ino = src->ino; + dst->blk_addr = src->blk_addr; + dst->version = src->version; + /* should not copy flag here */ +} + static inline void set_nat_flag(struct nat_entry *ne, unsigned int type, bool set) { unsigned char mask = 0x01 << type; if (set) - ne->flag |= mask; + ne->ni.flag |= mask; else - ne->flag &= ~mask; + ne->ni.flag &= ~mask; } static inline bool get_nat_flag(struct nat_entry *ne, unsigned int type) { unsigned char mask = 0x01 << type; - return ne->flag & mask; + return ne->ni.flag & mask; } static inline void nat_reset_flag(struct nat_entry *ne) -- cgit v1.2.3 From 9ecf4b80bd32ad727d7fca56706bb5b059935c18 Mon Sep 17 00:00:00 2001 From: Chao Yu Date: Thu, 18 Dec 2014 18:29:05 +0800 Subject: f2fs: use ra_meta_pages to simplify readahead code in restore_node_summary Use more common function ra_meta_pages() with META_POR to readahead node blocks in restore_node_summary() instead of ra_sum_pages(), hence we can simplify the readahead code there, and also we can remove unused function ra_sum_pages(). changes from v2: o use invalidate_mapping_pages as before suggested by Changman Lee. changes from v1: o fix one bug when using truncate_inode_pages_range which is pointed out by Jaegeuk Kim. Reviewed-by: Changman Lee Signed-off-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/node.c | 65 ++++++++++++---------------------------------------------- 1 file changed, 13 insertions(+), 52 deletions(-) (limited to 'fs') diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c index e565b9638971..bcfd67c80196 100644 --- a/fs/f2fs/node.c +++ b/fs/f2fs/node.c @@ -1727,80 +1727,41 @@ int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page) return 0; } -/* - * ra_sum_pages() merge contiguous pages into one bio and submit. - * these pre-read pages are allocated in bd_inode's mapping tree. - */ -static int ra_sum_pages(struct f2fs_sb_info *sbi, struct page **pages, - int start, int nrpages) -{ - struct inode *inode = sbi->sb->s_bdev->bd_inode; - struct address_space *mapping = inode->i_mapping; - int i, page_idx = start; - struct f2fs_io_info fio = { - .type = META, - .rw = READ_SYNC | REQ_META | REQ_PRIO - }; - - for (i = 0; page_idx < start + nrpages; page_idx++, i++) { - /* alloc page in bd_inode for reading node summary info */ - pages[i] = grab_cache_page(mapping, page_idx); - if (!pages[i]) - break; - f2fs_submit_page_mbio(sbi, pages[i], page_idx, &fio); - } - - f2fs_submit_merged_bio(sbi, META, READ); - return i; -} - int restore_node_summary(struct f2fs_sb_info *sbi, unsigned int segno, struct f2fs_summary_block *sum) { struct f2fs_node *rn; struct f2fs_summary *sum_entry; - struct inode *inode = sbi->sb->s_bdev->bd_inode; block_t addr; int bio_blocks = MAX_BIO_BLOCKS(sbi); - struct page *pages[bio_blocks]; - int i, idx, last_offset, nrpages, err = 0; + int i, idx, last_offset, nrpages; /* scan the node segment */ last_offset = sbi->blocks_per_seg; addr = START_BLOCK(sbi, segno); sum_entry = &sum->entries[0]; - for (i = 0; !err && i < last_offset; i += nrpages, addr += nrpages) { + for (i = 0; i < last_offset; i += nrpages, addr += nrpages) { nrpages = min(last_offset - i, bio_blocks); /* readahead node pages */ - nrpages = ra_sum_pages(sbi, pages, addr, nrpages); - if (!nrpages) - return -ENOMEM; + ra_meta_pages(sbi, addr, nrpages, META_POR); - for (idx = 0; idx < nrpages; idx++) { - if (err) - goto skip; + for (idx = addr; idx < addr + nrpages; idx++) { + struct page *page = get_meta_page(sbi, idx); - lock_page(pages[idx]); - if (unlikely(!PageUptodate(pages[idx]))) { - err = -EIO; - } else { - rn = F2FS_NODE(pages[idx]); - sum_entry->nid = rn->footer.nid; - sum_entry->version = 0; - sum_entry->ofs_in_node = 0; - sum_entry++; - } - unlock_page(pages[idx]); -skip: - page_cache_release(pages[idx]); + rn = F2FS_NODE(page); + sum_entry->nid = rn->footer.nid; + sum_entry->version = 0; + sum_entry->ofs_in_node = 0; + sum_entry++; + f2fs_put_page(page, 1); } - invalidate_mapping_pages(inode->i_mapping, addr, + invalidate_mapping_pages(META_MAPPING(sbi), addr, addr + nrpages); } - return err; + return 0; } static void remove_nats_in_journal(struct f2fs_sb_info *sbi) -- cgit v1.2.3 From cf04e8eb55290c7b836c36f0b4e1a8d0fe8ee275 Mon Sep 17 00:00:00 2001 From: Jaegeuk Kim Date: Wed, 17 Dec 2014 19:33:13 -0800 Subject: f2fs: use f2fs_io_info to clean up messy parameters during IO path This patch cleans up parameters on IO paths. The key idea is to use f2fs_io_info adding a parameter, block address, and then use this structure as parameters. Reviewed-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/checkpoint.c | 24 ++++++++++--------- fs/f2fs/data.c | 66 +++++++++++++++++++++++++++++++--------------------- fs/f2fs/f2fs.h | 14 ++++++----- fs/f2fs/inline.c | 7 +++--- fs/f2fs/node.c | 14 +++++++---- fs/f2fs/segment.c | 28 +++++++++++----------- 6 files changed, 87 insertions(+), 66 deletions(-) (limited to 'fs') diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c index 825158e4855a..0ac7c39605ed 100644 --- a/fs/f2fs/checkpoint.c +++ b/fs/f2fs/checkpoint.c @@ -50,6 +50,11 @@ struct page *get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index) { struct address_space *mapping = META_MAPPING(sbi); struct page *page; + struct f2fs_io_info fio = { + .type = META, + .rw = READ_SYNC | REQ_META | REQ_PRIO, + .blk_addr = index, + }; repeat: page = grab_cache_page(mapping, index); if (!page) { @@ -59,8 +64,7 @@ repeat: if (PageUptodate(page)) goto out; - if (f2fs_submit_page_bio(sbi, page, index, - READ_SYNC | REQ_META | REQ_PRIO)) + if (f2fs_submit_page_bio(sbi, page, &fio)) goto repeat; lock_page(page); @@ -112,14 +116,12 @@ int ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages, int type block_t prev_blk_addr = 0; struct page *page; block_t blkno = start; - struct f2fs_io_info fio = { .type = META, .rw = READ_SYNC | REQ_META | REQ_PRIO }; for (; nrpages-- > 0; blkno++) { - block_t blk_addr; if (!is_valid_blkaddr(sbi, blkno, type)) goto out; @@ -130,27 +132,27 @@ int ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages, int type NAT_BLOCK_OFFSET(NM_I(sbi)->max_nid))) blkno = 0; /* get nat block addr */ - blk_addr = current_nat_addr(sbi, + fio.blk_addr = current_nat_addr(sbi, blkno * NAT_ENTRY_PER_BLOCK); break; case META_SIT: /* get sit block addr */ - blk_addr = current_sit_addr(sbi, + fio.blk_addr = current_sit_addr(sbi, blkno * SIT_ENTRY_PER_BLOCK); - if (blkno != start && prev_blk_addr + 1 != blk_addr) + if (blkno != start && prev_blk_addr + 1 != fio.blk_addr) goto out; - prev_blk_addr = blk_addr; + prev_blk_addr = fio.blk_addr; break; case META_SSA: case META_CP: case META_POR: - blk_addr = blkno; + fio.blk_addr = blkno; break; default: BUG(); } - page = grab_cache_page(META_MAPPING(sbi), blk_addr); + page = grab_cache_page(META_MAPPING(sbi), fio.blk_addr); if (!page) continue; if (PageUptodate(page)) { @@ -158,7 +160,7 @@ int ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages, int type continue; } - f2fs_submit_page_mbio(sbi, page, blk_addr, &fio); + f2fs_submit_page_mbio(sbi, page, &fio); f2fs_put_page(page, 0); } out: diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index caa08e46697a..d86f8b1413f1 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -132,14 +132,14 @@ void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi, * Return unlocked page. */ int f2fs_submit_page_bio(struct f2fs_sb_info *sbi, struct page *page, - block_t blk_addr, int rw) + struct f2fs_io_info *fio) { struct bio *bio; - trace_f2fs_submit_page_bio(page, blk_addr, rw); + trace_f2fs_submit_page_bio(page, fio->blk_addr, fio->rw); /* Allocate a new bio */ - bio = __bio_alloc(sbi, blk_addr, 1, is_read_io(rw)); + bio = __bio_alloc(sbi, fio->blk_addr, 1, is_read_io(fio->rw)); if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) { bio_put(bio); @@ -147,12 +147,12 @@ int f2fs_submit_page_bio(struct f2fs_sb_info *sbi, struct page *page, return -EFAULT; } - submit_bio(rw, bio); + submit_bio(fio->rw, bio); return 0; } void f2fs_submit_page_mbio(struct f2fs_sb_info *sbi, struct page *page, - block_t blk_addr, struct f2fs_io_info *fio) + struct f2fs_io_info *fio) { enum page_type btype = PAGE_TYPE_OF_BIO(fio->type); struct f2fs_bio_info *io; @@ -160,21 +160,21 @@ void f2fs_submit_page_mbio(struct f2fs_sb_info *sbi, struct page *page, io = is_read ? &sbi->read_io : &sbi->write_io[btype]; - verify_block_addr(sbi, blk_addr); + verify_block_addr(sbi, fio->blk_addr); down_write(&io->io_rwsem); if (!is_read) inc_page_count(sbi, F2FS_WRITEBACK); - if (io->bio && (io->last_block_in_bio != blk_addr - 1 || + if (io->bio && (io->last_block_in_bio != fio->blk_addr - 1 || io->fio.rw != fio->rw)) __submit_merged_bio(io); alloc_new: if (io->bio == NULL) { int bio_blocks = MAX_BIO_BLOCKS(sbi); - io->bio = __bio_alloc(sbi, blk_addr, bio_blocks, is_read); + io->bio = __bio_alloc(sbi, fio->blk_addr, bio_blocks, is_read); io->fio = *fio; } @@ -184,10 +184,10 @@ alloc_new: goto alloc_new; } - io->last_block_in_bio = blk_addr; + io->last_block_in_bio = fio->blk_addr; up_write(&io->io_rwsem); - trace_f2fs_submit_page_mbio(page, fio->rw, fio->type, blk_addr); + trace_f2fs_submit_page_mbio(page, fio->rw, fio->type, fio->blk_addr); } /* @@ -376,6 +376,10 @@ struct page *find_data_page(struct inode *inode, pgoff_t index, bool sync) struct dnode_of_data dn; struct page *page; int err; + struct f2fs_io_info fio = { + .type = DATA, + .rw = sync ? READ_SYNC : READA, + }; page = find_get_page(mapping, index); if (page && PageUptodate(page)) @@ -404,8 +408,8 @@ struct page *find_data_page(struct inode *inode, pgoff_t index, bool sync) return page; } - err = f2fs_submit_page_bio(F2FS_I_SB(inode), page, dn.data_blkaddr, - sync ? READ_SYNC : READA); + fio.blk_addr = dn.data_blkaddr; + err = f2fs_submit_page_bio(F2FS_I_SB(inode), page, &fio); if (err) return ERR_PTR(err); @@ -430,7 +434,10 @@ struct page *get_lock_data_page(struct inode *inode, pgoff_t index) struct dnode_of_data dn; struct page *page; int err; - + struct f2fs_io_info fio = { + .type = DATA, + .rw = READ_SYNC, + }; repeat: page = grab_cache_page(mapping, index); if (!page) @@ -464,8 +471,8 @@ repeat: return page; } - err = f2fs_submit_page_bio(F2FS_I_SB(inode), page, - dn.data_blkaddr, READ_SYNC); + fio.blk_addr = dn.data_blkaddr; + err = f2fs_submit_page_bio(F2FS_I_SB(inode), page, &fio); if (err) return ERR_PTR(err); @@ -515,8 +522,12 @@ repeat: zero_user_segment(page, 0, PAGE_CACHE_SIZE); SetPageUptodate(page); } else { - err = f2fs_submit_page_bio(F2FS_I_SB(inode), page, - dn.data_blkaddr, READ_SYNC); + struct f2fs_io_info fio = { + .type = DATA, + .rw = READ_SYNC, + .blk_addr = dn.data_blkaddr, + }; + err = f2fs_submit_page_bio(F2FS_I_SB(inode), page, &fio); if (err) goto put_err; @@ -745,7 +756,6 @@ static int f2fs_read_data_pages(struct file *file, int do_write_data_page(struct page *page, struct f2fs_io_info *fio) { struct inode *inode = page->mapping->host; - block_t old_blkaddr, new_blkaddr; struct dnode_of_data dn; int err = 0; @@ -754,10 +764,10 @@ int do_write_data_page(struct page *page, struct f2fs_io_info *fio) if (err) return err; - old_blkaddr = dn.data_blkaddr; + fio->blk_addr = dn.data_blkaddr; /* This page is already truncated */ - if (old_blkaddr == NULL_ADDR) + if (fio->blk_addr == NULL_ADDR) goto out_writepage; set_page_writeback(page); @@ -766,14 +776,14 @@ int do_write_data_page(struct page *page, struct f2fs_io_info *fio) * If current allocation needs SSR, * it had better in-place writes for updated data. */ - if (unlikely(old_blkaddr != NEW_ADDR && + if (unlikely(fio->blk_addr != NEW_ADDR && !is_cold_data(page) && need_inplace_update(inode))) { - rewrite_data_page(page, old_blkaddr, fio); + rewrite_data_page(page, fio); set_inode_flag(F2FS_I(inode), FI_UPDATE_WRITE); } else { - write_data_page(page, &dn, &new_blkaddr, fio); - update_extent_cache(new_blkaddr, &dn); + write_data_page(page, &dn, fio); + update_extent_cache(fio->blk_addr, &dn); set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE); } out_writepage: @@ -1007,8 +1017,12 @@ put_next: if (dn.data_blkaddr == NEW_ADDR) { zero_user_segment(page, 0, PAGE_CACHE_SIZE); } else { - err = f2fs_submit_page_bio(sbi, page, dn.data_blkaddr, - READ_SYNC); + struct f2fs_io_info fio = { + .type = DATA, + .rw = READ_SYNC, + .blk_addr = dn.data_blkaddr, + }; + err = f2fs_submit_page_bio(sbi, page, &fio); if (err) goto fail; diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index a8ccbceb869a..3f07b504f6d8 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -491,6 +491,7 @@ enum page_type { struct f2fs_io_info { enum page_type type; /* contains DATA/NODE/META/META_FLUSH */ int rw; /* contains R/RS/W/WS with REQ_META/REQ_PRIO */ + block_t blk_addr; /* block address to be written */ }; #define is_read_io(rw) (((rw) & 1) == READ) @@ -1414,10 +1415,10 @@ int f2fs_trim_fs(struct f2fs_sb_info *, struct fstrim_range *); struct page *get_sum_page(struct f2fs_sb_info *, unsigned int); void write_meta_page(struct f2fs_sb_info *, struct page *); void write_node_page(struct f2fs_sb_info *, struct page *, - struct f2fs_io_info *, unsigned int, block_t, block_t *); -void write_data_page(struct page *, struct dnode_of_data *, block_t *, - struct f2fs_io_info *); -void rewrite_data_page(struct page *, block_t, struct f2fs_io_info *); + unsigned int, struct f2fs_io_info *); +void write_data_page(struct page *, struct dnode_of_data *, + struct f2fs_io_info *); +void rewrite_data_page(struct page *, struct f2fs_io_info *); void recover_data_page(struct f2fs_sb_info *, struct page *, struct f2fs_summary *, block_t, block_t); void allocate_data_block(struct f2fs_sb_info *, struct page *, @@ -1464,8 +1465,9 @@ void destroy_checkpoint_caches(void); * data.c */ void f2fs_submit_merged_bio(struct f2fs_sb_info *, enum page_type, int); -int f2fs_submit_page_bio(struct f2fs_sb_info *, struct page *, block_t, int); -void f2fs_submit_page_mbio(struct f2fs_sb_info *, struct page *, block_t, +int f2fs_submit_page_bio(struct f2fs_sb_info *, struct page *, + struct f2fs_io_info *); +void f2fs_submit_page_mbio(struct f2fs_sb_info *, struct page *, struct f2fs_io_info *); int reserve_new_block(struct dnode_of_data *); int f2fs_reserve_block(struct dnode_of_data *, pgoff_t); diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c index f2d3c581e776..0c3f3f9b9f88 100644 --- a/fs/f2fs/inline.c +++ b/fs/f2fs/inline.c @@ -79,7 +79,6 @@ int f2fs_read_inline_data(struct inode *inode, struct page *page) int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page) { void *src_addr, *dst_addr; - block_t new_blk_addr; struct f2fs_io_info fio = { .type = DATA, .rw = WRITE_SYNC | REQ_PRIO, @@ -115,9 +114,9 @@ no_update: /* write data page to try to make data consistent */ set_page_writeback(page); - - write_data_page(page, dn, &new_blk_addr, &fio); - update_extent_cache(new_blk_addr, dn); + fio.blk_addr = dn->data_blkaddr; + write_data_page(page, dn, &fio); + update_extent_cache(fio.blk_addr, dn); f2fs_wait_on_page_writeback(page, DATA); if (dirty) inode_dec_dirty_pages(dn->inode); diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c index bcfd67c80196..adc35c978306 100644 --- a/fs/f2fs/node.c +++ b/fs/f2fs/node.c @@ -977,6 +977,10 @@ static int read_node_page(struct page *page, int rw) { struct f2fs_sb_info *sbi = F2FS_P_SB(page); struct node_info ni; + struct f2fs_io_info fio = { + .type = NODE, + .rw = rw, + }; get_node_info(sbi, page->index, &ni); @@ -988,7 +992,8 @@ static int read_node_page(struct page *page, int rw) if (PageUptodate(page)) return LOCKED_PAGE; - return f2fs_submit_page_bio(sbi, page, ni.blk_addr, rw); + fio.blk_addr = ni.blk_addr; + return f2fs_submit_page_bio(sbi, page, &fio); } /* @@ -1269,7 +1274,6 @@ static int f2fs_write_node_page(struct page *page, { struct f2fs_sb_info *sbi = F2FS_P_SB(page); nid_t nid; - block_t new_addr; struct node_info ni; struct f2fs_io_info fio = { .type = NODE, @@ -1304,9 +1308,11 @@ static int f2fs_write_node_page(struct page *page, } else { down_read(&sbi->node_write); } + set_page_writeback(page); - write_node_page(sbi, page, &fio, nid, ni.blk_addr, &new_addr); - set_node_addr(sbi, &ni, new_addr, is_fsync_dnode(page)); + fio.blk_addr = ni.blk_addr; + write_node_page(sbi, page, nid, &fio); + set_node_addr(sbi, &ni, fio.blk_addr, is_fsync_dnode(page)); dec_page_count(sbi, F2FS_DIRTY_NODES); up_read(&sbi->node_write); unlock_page(page); diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index c950c9318e8e..c726f86c2ea0 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -1182,39 +1182,39 @@ void allocate_data_block(struct f2fs_sb_info *sbi, struct page *page, } static void do_write_page(struct f2fs_sb_info *sbi, struct page *page, - block_t old_blkaddr, block_t *new_blkaddr, - struct f2fs_summary *sum, struct f2fs_io_info *fio) + struct f2fs_summary *sum, + struct f2fs_io_info *fio) { int type = __get_segment_type(page, fio->type); - allocate_data_block(sbi, page, old_blkaddr, new_blkaddr, sum, type); + allocate_data_block(sbi, page, fio->blk_addr, &fio->blk_addr, sum, type); /* writeout dirty page into bdev */ - f2fs_submit_page_mbio(sbi, page, *new_blkaddr, fio); + f2fs_submit_page_mbio(sbi, page, fio); } void write_meta_page(struct f2fs_sb_info *sbi, struct page *page) { struct f2fs_io_info fio = { .type = META, - .rw = WRITE_SYNC | REQ_META | REQ_PRIO + .rw = WRITE_SYNC | REQ_META | REQ_PRIO, + .blk_addr = page->index, }; set_page_writeback(page); - f2fs_submit_page_mbio(sbi, page, page->index, &fio); + f2fs_submit_page_mbio(sbi, page, &fio); } void write_node_page(struct f2fs_sb_info *sbi, struct page *page, - struct f2fs_io_info *fio, - unsigned int nid, block_t old_blkaddr, block_t *new_blkaddr) + unsigned int nid, struct f2fs_io_info *fio) { struct f2fs_summary sum; set_summary(&sum, nid, 0, 0); - do_write_page(sbi, page, old_blkaddr, new_blkaddr, &sum, fio); + do_write_page(sbi, page, &sum, fio); } void write_data_page(struct page *page, struct dnode_of_data *dn, - block_t *new_blkaddr, struct f2fs_io_info *fio) + struct f2fs_io_info *fio) { struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); struct f2fs_summary sum; @@ -1223,14 +1223,12 @@ void write_data_page(struct page *page, struct dnode_of_data *dn, f2fs_bug_on(sbi, dn->data_blkaddr == NULL_ADDR); get_node_info(sbi, dn->nid, &ni); set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version); - - do_write_page(sbi, page, dn->data_blkaddr, new_blkaddr, &sum, fio); + do_write_page(sbi, page, &sum, fio); } -void rewrite_data_page(struct page *page, block_t old_blkaddr, - struct f2fs_io_info *fio) +void rewrite_data_page(struct page *page, struct f2fs_io_info *fio) { - f2fs_submit_page_mbio(F2FS_P_SB(page), page, old_blkaddr, fio); + f2fs_submit_page_mbio(F2FS_P_SB(page), page, fio); } void recover_data_page(struct f2fs_sb_info *sbi, -- cgit v1.2.3 From 63f92ddc8aee18838441179064338702a93326a9 Mon Sep 17 00:00:00 2001 From: Jaegeuk Kim Date: Wed, 17 Dec 2014 19:45:05 -0800 Subject: f2fs: add f2fs_io_tracer support This patch adds: o initial trace.c and trace.h with skeleton functions o Kconfig and Makefile to activate this feature Signed-off-by: Jaegeuk Kim --- fs/f2fs/Kconfig | 10 ++++++++++ fs/f2fs/Makefile | 1 + fs/f2fs/trace.c | 24 ++++++++++++++++++++++++ fs/f2fs/trace.h | 24 ++++++++++++++++++++++++ 4 files changed, 59 insertions(+) create mode 100644 fs/f2fs/trace.c create mode 100644 fs/f2fs/trace.h (limited to 'fs') diff --git a/fs/f2fs/Kconfig b/fs/f2fs/Kconfig index 736a348509f7..94e2d2ffabe1 100644 --- a/fs/f2fs/Kconfig +++ b/fs/f2fs/Kconfig @@ -71,3 +71,13 @@ config F2FS_CHECK_FS Enables BUG_ONs which check the filesystem consistency in runtime. If you want to improve the performance, say N. + +config F2FS_IO_TRACE + bool "F2FS IO tracer" + depends on F2FS_FS + depends on FUNCTION_TRACER + help + F2FS IO trace is based on a function trace, which gathers process + information and block IO patterns in the filesystem level. + + If unsure, say N. diff --git a/fs/f2fs/Makefile b/fs/f2fs/Makefile index 2e35da12d292..d92397731db8 100644 --- a/fs/f2fs/Makefile +++ b/fs/f2fs/Makefile @@ -5,3 +5,4 @@ f2fs-y += checkpoint.o gc.o data.o node.o segment.o recovery.o f2fs-$(CONFIG_F2FS_STAT_FS) += debug.o f2fs-$(CONFIG_F2FS_FS_XATTR) += xattr.o f2fs-$(CONFIG_F2FS_FS_POSIX_ACL) += acl.o +f2fs-$(CONFIG_F2FS_IO_TRACE) += trace.o diff --git a/fs/f2fs/trace.c b/fs/f2fs/trace.c new file mode 100644 index 000000000000..4e4a0691d32f --- /dev/null +++ b/fs/f2fs/trace.c @@ -0,0 +1,24 @@ +/* + * f2fs IO tracer + * + * Copyright (c) 2014 Motorola Mobility + * Copyright (c) 2014 Jaegeuk Kim + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include + +#include "f2fs.h" +#include "trace.h" + +void f2fs_trace_pid(struct page *page) +{ +} + +void f2fs_trace_ios(struct page *page, struct f2fs_io_info *fio, int flush) +{ +} diff --git a/fs/f2fs/trace.h b/fs/f2fs/trace.h new file mode 100644 index 000000000000..08856a978d6b --- /dev/null +++ b/fs/f2fs/trace.h @@ -0,0 +1,24 @@ +/* + * f2fs IO tracer + * + * Copyright (c) 2014 Motorola Mobility + * Copyright (c) 2014 Jaegeuk Kim + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __F2FS_TRACE_H__ +#define __F2FS_TRACE_H__ + +#ifdef CONFIG_F2FS_IO_TRACE +#include + +extern void f2fs_trace_pid(struct page *); +extern void f2fs_trace_ios(struct page *, struct f2fs_io_info *, int); +#else +#define f2fs_trace_pid(p) +#define f2fs_trace_ios(p, i, n) + +#endif +#endif /* __F2FS_TRACE_H__ */ -- cgit v1.2.3 From 0e689d036952a6018ed5f5c1aee7697f5c57cea1 Mon Sep 17 00:00:00 2001 From: Jaegeuk Kim Date: Wed, 17 Dec 2014 19:51:57 -0800 Subject: f2fs: add key functions for f2fs_io_tracer This patch adds two key functions to trace process ids and IOs. The basic idea is to 1. remain process ids, pids, in page->private. 2. show pids in IO traces. So, later we can retrieve process information according to IO traces. Signed-off-by: Jaegeuk Kim --- fs/f2fs/trace.c | 86 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++ fs/f2fs/trace.h | 18 ++++++++++++ 2 files changed, 104 insertions(+) (limited to 'fs') diff --git a/fs/f2fs/trace.c b/fs/f2fs/trace.c index 4e4a0691d32f..19f5216b9b9c 100644 --- a/fs/f2fs/trace.c +++ b/fs/f2fs/trace.c @@ -15,10 +15,96 @@ #include "f2fs.h" #include "trace.h" +RADIX_TREE(pids, GFP_NOIO); +struct last_io_info last_io; + +static inline void __print_last_io(void) +{ + if (!last_io.len) + return; + + trace_printk("%3x:%3x %4x %-16s %2x %5x %12x %4x\n", + last_io.major, last_io.minor, + last_io.pid, "----------------", + last_io.type, + last_io.fio.rw, last_io.fio.blk_addr, + last_io.len); + memset(&last_io, 0, sizeof(last_io)); +} + +static int __file_type(struct inode *inode, pid_t pid) +{ + if (f2fs_is_atomic_file(inode)) + return __ATOMIC_FILE; + else if (f2fs_is_volatile_file(inode)) + return __VOLATILE_FILE; + else if (S_ISDIR(inode->i_mode)) + return __DIR_FILE; + else if (inode->i_ino == F2FS_NODE_INO(F2FS_I_SB(inode))) + return __NODE_FILE; + else if (inode->i_ino == F2FS_META_INO(F2FS_I_SB(inode))) + return __META_FILE; + else if (pid) + return __NORMAL_FILE; + else + return __MISC_FILE; +} + void f2fs_trace_pid(struct page *page) { + struct inode *inode = page->mapping->host; + pid_t pid = task_pid_nr(current); + void *p; + + page->private = pid; + + p = radix_tree_lookup(&pids, pid); + if (p == current) + return; + if (p) + radix_tree_delete(&pids, pid); + + f2fs_radix_tree_insert(&pids, pid, current); + + trace_printk("%3x:%3x %4x %-16s\n", + MAJOR(inode->i_sb->s_dev), MINOR(inode->i_sb->s_dev), + pid, current->comm); + } void f2fs_trace_ios(struct page *page, struct f2fs_io_info *fio, int flush) { + struct inode *inode; + pid_t pid; + int major, minor; + + if (flush) { + __print_last_io(); + return; + } + + inode = page->mapping->host; + pid = page_private(page); + + major = MAJOR(inode->i_sb->s_dev); + minor = MINOR(inode->i_sb->s_dev); + + if (last_io.major == major && last_io.minor == minor && + last_io.pid == pid && + last_io.type == __file_type(inode, pid) && + last_io.fio.rw == fio->rw && + last_io.fio.blk_addr + last_io.len == fio->blk_addr) { + last_io.len++; + return; + } + + __print_last_io(); + + last_io.major = major; + last_io.minor = minor; + last_io.pid = pid; + last_io.type = __file_type(inode, pid); + last_io.fio = *fio; + last_io.len = 1; + return; } diff --git a/fs/f2fs/trace.h b/fs/f2fs/trace.h index 08856a978d6b..aa6663be528c 100644 --- a/fs/f2fs/trace.h +++ b/fs/f2fs/trace.h @@ -14,6 +14,24 @@ #ifdef CONFIG_F2FS_IO_TRACE #include +enum file_type { + __NORMAL_FILE, + __DIR_FILE, + __NODE_FILE, + __META_FILE, + __ATOMIC_FILE, + __VOLATILE_FILE, + __MISC_FILE, +}; + +struct last_io_info { + int major, minor; + pid_t pid; + enum file_type type; + struct f2fs_io_info fio; + block_t len; +}; + extern void f2fs_trace_pid(struct page *); extern void f2fs_trace_ios(struct page *, struct f2fs_io_info *, int); #else -- cgit v1.2.3 From 9e4ded3f309eb5b5a9be0ca2acd26e5ea7f00914 Mon Sep 17 00:00:00 2001 From: Jaegeuk Kim Date: Wed, 17 Dec 2014 19:58:58 -0800 Subject: f2fs: activate f2fs_trace_pid This patch activates f2fs_trace_pid. Signed-off-by: Jaegeuk Kim --- fs/f2fs/checkpoint.c | 3 +++ fs/f2fs/node.c | 2 ++ fs/f2fs/segment.c | 2 ++ 3 files changed, 7 insertions(+) (limited to 'fs') diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c index 0ac7c39605ed..3999933f7a74 100644 --- a/fs/f2fs/checkpoint.c +++ b/fs/f2fs/checkpoint.c @@ -20,6 +20,7 @@ #include "f2fs.h" #include "node.h" #include "segment.h" +#include "trace.h" #include static struct kmem_cache *ino_entry_slab; @@ -301,6 +302,7 @@ static int f2fs_set_meta_page_dirty(struct page *page) if (!PageDirty(page)) { __set_page_dirty_nobuffers(page); inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_META); + f2fs_trace_pid(page); return 1; } return 0; @@ -712,6 +714,7 @@ void update_dirty_page(struct inode *inode, struct page *page) kmem_cache_free(inode_entry_slab, new); out: SetPagePrivate(page); + f2fs_trace_pid(page); } void add_dirty_dir_inode(struct inode *inode) diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c index adc35c978306..a7cb0db2e3e8 100644 --- a/fs/f2fs/node.c +++ b/fs/f2fs/node.c @@ -19,6 +19,7 @@ #include "f2fs.h" #include "node.h" #include "segment.h" +#include "trace.h" #include #define on_build_free_nids(nmi) mutex_is_locked(&nm_i->build_lock) @@ -1362,6 +1363,7 @@ static int f2fs_set_node_page_dirty(struct page *page) __set_page_dirty_nobuffers(page); inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES); SetPagePrivate(page); + f2fs_trace_pid(page); return 1; } return 0; diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index c726f86c2ea0..b688991514fb 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -20,6 +20,7 @@ #include "f2fs.h" #include "segment.h" #include "node.h" +#include "trace.h" #include #define __reverse_ffz(x) __reverse_ffs(~(x)) @@ -181,6 +182,7 @@ void register_inmem_page(struct inode *inode, struct page *page) int err; SetPagePrivate(page); + f2fs_trace_pid(page); new = f2fs_kmem_cache_alloc(inmem_entry_slab, GFP_NOFS); -- cgit v1.2.3 From db9f7c1a9561e998d6227bcc1c19bc4c1fbbca1b Mon Sep 17 00:00:00 2001 From: Jaegeuk Kim Date: Wed, 17 Dec 2014 20:04:08 -0800 Subject: f2fs: activate f2fs_trace_ios This patch activates f2fs_trace_ios. Signed-off-by: Jaegeuk Kim --- fs/f2fs/data.c | 3 +++ fs/f2fs/file.c | 2 ++ fs/f2fs/super.c | 2 ++ 3 files changed, 7 insertions(+) (limited to 'fs') diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index d86f8b1413f1..20aa3c355a0e 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -22,6 +22,7 @@ #include "f2fs.h" #include "node.h" #include "segment.h" +#include "trace.h" #include static void f2fs_read_end_io(struct bio *bio, int err) @@ -137,6 +138,7 @@ int f2fs_submit_page_bio(struct f2fs_sb_info *sbi, struct page *page, struct bio *bio; trace_f2fs_submit_page_bio(page, fio->blk_addr, fio->rw); + f2fs_trace_ios(page, fio, 0); /* Allocate a new bio */ bio = __bio_alloc(sbi, fio->blk_addr, 1, is_read_io(fio->rw)); @@ -185,6 +187,7 @@ alloc_new: } io->last_block_in_bio = fio->blk_addr; + f2fs_trace_ios(page, fio, 0); up_write(&io->io_rwsem); trace_f2fs_submit_page_mbio(page, fio->rw, fio->type, fio->blk_addr); diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index 5139f90e92c1..f172ddc4ecdd 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c @@ -26,6 +26,7 @@ #include "segment.h" #include "xattr.h" #include "acl.h" +#include "trace.h" #include static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma, @@ -265,6 +266,7 @@ flush_out: ret = f2fs_issue_flush(sbi); out: trace_f2fs_sync_file_exit(inode, need_cp, datasync, ret); + f2fs_trace_ios(NULL, NULL, 1); return ret; } diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index f71421d70475..cf68e44570bc 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -30,6 +30,7 @@ #include "segment.h" #include "xattr.h" #include "gc.h" +#include "trace.h" #define CREATE_TRACE_POINTS #include @@ -493,6 +494,7 @@ int f2fs_sync_fs(struct super_block *sb, int sync) } else { f2fs_balance_fs(sbi); } + f2fs_trace_ios(NULL, NULL, 1); return 0; } -- cgit v1.2.3 From dd802406e396c22dfb5aa0d16196f04d515be49e Mon Sep 17 00:00:00 2001 From: Jaegeuk Kim Date: Thu, 18 Dec 2014 19:32:36 -0800 Subject: f2fs: avoid double lock for cp_rwsem The __f2fs_add_link is covered by cp_rwsem all the time. This calls init_inode_metadata, which conducts some acl operations including memory allocation with GFP_KERNEL previously. But, under memory pressure, f2fs_write_data_page can be called, which also grabs cp_rwsem too. In this case, this incurs a deadlock pointed by Chao. Thread #1 Thread #2 down_read down_write down_read -> here down_read should wait forever. Reviewed-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/acl.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/f2fs/acl.c b/fs/f2fs/acl.c index 1ccb26bc2a0b..7f12d28ad94c 100644 --- a/fs/f2fs/acl.c +++ b/fs/f2fs/acl.c @@ -62,7 +62,7 @@ static struct posix_acl *f2fs_acl_from_disk(const char *value, size_t size) if (count == 0) return NULL; - acl = posix_acl_alloc(count, GFP_KERNEL); + acl = posix_acl_alloc(count, GFP_NOFS); if (!acl) return ERR_PTR(-ENOMEM); @@ -116,7 +116,7 @@ static void *f2fs_acl_to_disk(const struct posix_acl *acl, size_t *size) int i; f2fs_acl = kmalloc(sizeof(struct f2fs_acl_header) + acl->a_count * - sizeof(struct f2fs_acl_entry), GFP_KERNEL); + sizeof(struct f2fs_acl_entry), GFP_NOFS); if (!f2fs_acl) return ERR_PTR(-ENOMEM); -- cgit v1.2.3 From b9a2c252071d44d4a22082611db84272be1f3b49 Mon Sep 17 00:00:00 2001 From: Changman Lee Date: Wed, 24 Dec 2014 02:16:54 +0900 Subject: f2fs: add block count by in-place-update in stat info This patch adds block count by in-place-update in stat. Signed-off-by: Changman Lee Reviewed-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/debug.c | 4 ++++ fs/f2fs/f2fs.h | 6 +++++- fs/f2fs/segment.c | 1 + 3 files changed, 10 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c index 91e8f699ab30..2b6422156ea0 100644 --- a/fs/f2fs/debug.c +++ b/fs/f2fs/debug.c @@ -79,6 +79,8 @@ static void update_general_status(struct f2fs_sb_info *sbi) si->segment_count[i] = sbi->segment_count[i]; si->block_count[i] = sbi->block_count[i]; } + + si->inplace_count = atomic_read(&sbi->inplace_count); } /* @@ -277,6 +279,7 @@ static int stat_show(struct seq_file *s, void *v) for (j = 0; j < si->util_free; j++) seq_putc(s, '-'); seq_puts(s, "]\n\n"); + seq_printf(s, "IPU: %u blocks\n", si->inplace_count); seq_printf(s, "SSR: %u blocks in %u segments\n", si->block_count[SSR], si->segment_count[SSR]); seq_printf(s, "LFS: %u blocks in %u segments\n", @@ -331,6 +334,7 @@ int f2fs_build_stats(struct f2fs_sb_info *sbi) atomic_set(&sbi->inline_inode, 0); atomic_set(&sbi->inline_dir, 0); + atomic_set(&sbi->inplace_count, 0); mutex_lock(&f2fs_stat_mutex); list_add_tail(&si->stat_list, &f2fs_stat_list); diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index 3f07b504f6d8..3226af06e050 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -592,6 +592,7 @@ struct f2fs_sb_info { struct f2fs_stat_info *stat_info; /* FS status information */ unsigned int segment_count[2]; /* # of allocated segments */ unsigned int block_count[2]; /* # of allocated blocks */ + atomic_t inplace_count; /* # of inplace update */ int total_hit_ext, read_hit_ext; /* extent cache hit ratio */ atomic_t inline_inode; /* # of inline_data inodes */ atomic_t inline_dir; /* # of inline_dentry inodes */ @@ -1523,6 +1524,7 @@ struct f2fs_stat_info { unsigned int segment_count[2]; unsigned int block_count[2]; + unsigned int inplace_count; unsigned base_mem, cache_mem; }; @@ -1562,7 +1564,8 @@ static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi) ((sbi)->segment_count[(curseg)->alloc_type]++) #define stat_inc_block_count(sbi, curseg) \ ((sbi)->block_count[(curseg)->alloc_type]++) - +#define stat_inc_inplace_blocks(sbi) \ + (atomic_inc(&(sbi)->inplace_count)) #define stat_inc_seg_count(sbi, type) \ do { \ struct f2fs_stat_info *si = F2FS_STAT(sbi); \ @@ -1608,6 +1611,7 @@ void f2fs_destroy_root_stats(void); #define stat_dec_inline_dir(inode) #define stat_inc_seg_type(sbi, curseg) #define stat_inc_block_count(sbi, curseg) +#define stat_inc_inplace_blocks(sbi) #define stat_inc_seg_count(si, type) #define stat_inc_tot_blk_count(si, blks) #define stat_inc_data_blk_count(si, blks) diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index b688991514fb..f995a850dfe7 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -1230,6 +1230,7 @@ void write_data_page(struct page *page, struct dnode_of_data *dn, void rewrite_data_page(struct page *page, struct f2fs_io_info *fio) { + stat_inc_inplace_blocks(F2FS_P_SB(page)); f2fs_submit_page_mbio(F2FS_P_SB(page), page, fio); } -- cgit v1.2.3 From 09eb483e895f36fd002e88c878e9578c359aa468 Mon Sep 17 00:00:00 2001 From: Jaegeuk Kim Date: Tue, 23 Dec 2014 16:26:31 -0800 Subject: f2fs: fix missing cold bit during recovery In do_recover_data, we find and update previous node pages after updating its new block addresses. After then, we call fill_node_footer without reset field, we erase its cold bit so that this new cold node block is written to wrong log area. This patch fixes not to miss its old flag. Signed-off-by: Jaegeuk Kim --- fs/f2fs/node.h | 10 +++++++++- include/linux/f2fs_fs.h | 2 ++ 2 files changed, 11 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/f2fs/node.h b/fs/f2fs/node.h index fa6f95968b42..cac8a3d9acbd 100644 --- a/fs/f2fs/node.h +++ b/fs/f2fs/node.h @@ -212,11 +212,19 @@ static inline void fill_node_footer(struct page *page, nid_t nid, nid_t ino, unsigned int ofs, bool reset) { struct f2fs_node *rn = F2FS_NODE(page); + unsigned int old_flag = 0; + if (reset) memset(rn, 0, sizeof(*rn)); + else + old_flag = le32_to_cpu(rn->footer.flag); + rn->footer.nid = cpu_to_le32(nid); rn->footer.ino = cpu_to_le32(ino); - rn->footer.flag = cpu_to_le32(ofs << OFFSET_BIT_SHIFT); + + /* should remain old flag bits such as COLD_BIT_SHIFT */ + rn->footer.flag = cpu_to_le32((ofs << OFFSET_BIT_SHIFT) | + (old_flag & OFFSET_BIT_MASK)); } static inline void copy_node_footer(struct page *dst, struct page *src) diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h index 87f14e90e984..e993b0bc9abf 100644 --- a/include/linux/f2fs_fs.h +++ b/include/linux/f2fs_fs.h @@ -224,6 +224,8 @@ enum { OFFSET_BIT_SHIFT }; +#define OFFSET_BIT_MASK (0x07) /* (0x01 << OFFSET_BIT_SHIFT) - 1 */ + struct node_footer { __le32 nid; /* node id */ __le32 ino; /* inode nunmber */ -- cgit v1.2.3 From 3e1c8f125eeea0f8111e2b9131162bfba32c6381 Mon Sep 17 00:00:00 2001 From: Chao Yu Date: Tue, 23 Dec 2014 16:35:21 +0800 Subject: f2fs: cleanup trace event of f2fs_submit_page_{m,}bio with DECLARE_EVENT_CLASS This patch adds missing parameter _type_ for trace_f2fs_submit_page_bio, then use DECLARE_EVENT_CLASS/DEFINE_EVENT_CONDITION pair to cleanup some trace event code related to f2fs_submit_page_{m,}bio. Additionally, after we remove redundant code, size of code can be reduced: text data bss dec hex filename 176787 8712 56 185555 2d4d3 f2fs.ko.org 174408 8648 56 183112 2cb48 f2fs.ko Signed-off-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/data.c | 4 +- include/trace/events/f2fs.h | 115 ++++++++++++++++++++------------------------ 2 files changed, 53 insertions(+), 66 deletions(-) (limited to 'fs') diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index 20aa3c355a0e..7953bc279205 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -137,7 +137,7 @@ int f2fs_submit_page_bio(struct f2fs_sb_info *sbi, struct page *page, { struct bio *bio; - trace_f2fs_submit_page_bio(page, fio->blk_addr, fio->rw); + trace_f2fs_submit_page_bio(page, fio->blk_addr, fio->rw, fio->type); f2fs_trace_ios(page, fio, 0); /* Allocate a new bio */ @@ -190,7 +190,7 @@ alloc_new: f2fs_trace_ios(page, fio, 0); up_write(&io->io_rwsem); - trace_f2fs_submit_page_mbio(page, fio->rw, fio->type, fio->blk_addr); + trace_f2fs_submit_page_mbio(page, fio->blk_addr, fio->rw, fio->type); } /* diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h index bbc4de9baef7..553311f14f6c 100644 --- a/include/trace/events/f2fs.h +++ b/include/trace/events/f2fs.h @@ -440,38 +440,6 @@ TRACE_EVENT(f2fs_truncate_partial_nodes, __entry->err) ); -TRACE_EVENT_CONDITION(f2fs_submit_page_bio, - - TP_PROTO(struct page *page, sector_t blkaddr, int type), - - TP_ARGS(page, blkaddr, type), - - TP_CONDITION(page->mapping), - - TP_STRUCT__entry( - __field(dev_t, dev) - __field(ino_t, ino) - __field(pgoff_t, index) - __field(sector_t, blkaddr) - __field(int, type) - ), - - TP_fast_assign( - __entry->dev = page->mapping->host->i_sb->s_dev; - __entry->ino = page->mapping->host->i_ino; - __entry->index = page->index; - __entry->blkaddr = blkaddr; - __entry->type = type; - ), - - TP_printk("dev = (%d,%d), ino = %lu, page_index = 0x%lx, " - "blkaddr = 0x%llx, bio_type = %s%s", - show_dev_ino(__entry), - (unsigned long)__entry->index, - (unsigned long long)__entry->blkaddr, - show_bio_type(__entry->type)) -); - TRACE_EVENT(f2fs_get_data_block, TP_PROTO(struct inode *inode, sector_t iblock, struct buffer_head *bh, int ret), @@ -680,6 +648,57 @@ TRACE_EVENT(f2fs_reserve_new_block, __entry->ofs_in_node) ); +DECLARE_EVENT_CLASS(f2fs__submit_page_bio, + + TP_PROTO(struct page *page, block_t blkaddr, int rw, int type), + + TP_ARGS(page, blkaddr, rw, type), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(ino_t, ino) + __field(pgoff_t, index) + __field(block_t, blkaddr) + __field(int, rw) + __field(int, type) + ), + + TP_fast_assign( + __entry->dev = page->mapping->host->i_sb->s_dev; + __entry->ino = page->mapping->host->i_ino; + __entry->index = page->index; + __entry->blkaddr = blkaddr; + __entry->rw = rw; + __entry->type = type; + ), + + TP_printk("dev = (%d,%d), ino = %lu, page_index = 0x%lx, " + "blkaddr = 0x%llx, rw = %s%s, type = %s", + show_dev_ino(__entry), + (unsigned long)__entry->index, + (unsigned long long)__entry->blkaddr, + show_bio_type(__entry->rw), + show_block_type(__entry->type)) +); + +DEFINE_EVENT_CONDITION(f2fs__submit_page_bio, f2fs_submit_page_bio, + + TP_PROTO(struct page *page, block_t blkaddr, int rw, int type), + + TP_ARGS(page, blkaddr, rw, type), + + TP_CONDITION(page->mapping) +); + +DEFINE_EVENT_CONDITION(f2fs__submit_page_bio, f2fs_submit_page_mbio, + + TP_PROTO(struct page *page, block_t blkaddr, int rw, int type), + + TP_ARGS(page, blkaddr, rw, type), + + TP_CONDITION(page->mapping) +); + DECLARE_EVENT_CLASS(f2fs__submit_bio, TP_PROTO(struct super_block *sb, int rw, int type, struct bio *bio), @@ -916,38 +935,6 @@ TRACE_EVENT(f2fs_writepages, __entry->for_sync) ); -TRACE_EVENT(f2fs_submit_page_mbio, - - TP_PROTO(struct page *page, int rw, int type, block_t blk_addr), - - TP_ARGS(page, rw, type, blk_addr), - - TP_STRUCT__entry( - __field(dev_t, dev) - __field(ino_t, ino) - __field(int, rw) - __field(int, type) - __field(pgoff_t, index) - __field(block_t, block) - ), - - TP_fast_assign( - __entry->dev = page->mapping->host->i_sb->s_dev; - __entry->ino = page->mapping->host->i_ino; - __entry->rw = rw; - __entry->type = type; - __entry->index = page->index; - __entry->block = blk_addr; - ), - - TP_printk("dev = (%d,%d), ino = %lu, %s%s, %s, index = %lu, blkaddr = 0x%llx", - show_dev_ino(__entry), - show_bio_type(__entry->rw), - show_block_type(__entry->type), - (unsigned long)__entry->index, - (unsigned long long)__entry->block) -); - TRACE_EVENT(f2fs_write_checkpoint, TP_PROTO(struct super_block *sb, int reason, char *msg), -- cgit v1.2.3 From 2ace38e00e54f5c722d8c5eba36d1172548a3466 Mon Sep 17 00:00:00 2001 From: Chao Yu Date: Wed, 24 Dec 2014 16:08:14 +0800 Subject: f2fs: cleanup parameters for trace_f2fs_submit_{read_,write_,page_,page_m}bio with fio Cleanup parameters for trace_f2fs_submit_{read_,write_,page_,page_m}bio with fio as one parameter. Suggested-by: Jaegeuk Kim Signed-off-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/data.c | 10 ++++------ include/trace/events/f2fs.h | 37 ++++++++++++++++++++----------------- 2 files changed, 24 insertions(+), 23 deletions(-) (limited to 'fs') diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index 7953bc279205..6308435028f6 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -96,11 +96,9 @@ static void __submit_merged_bio(struct f2fs_bio_info *io) return; if (is_read_io(fio->rw)) - trace_f2fs_submit_read_bio(io->sbi->sb, fio->rw, - fio->type, io->bio); + trace_f2fs_submit_read_bio(io->sbi->sb, fio, io->bio); else - trace_f2fs_submit_write_bio(io->sbi->sb, fio->rw, - fio->type, io->bio); + trace_f2fs_submit_write_bio(io->sbi->sb, fio, io->bio); submit_bio(fio->rw, io->bio); io->bio = NULL; @@ -137,7 +135,7 @@ int f2fs_submit_page_bio(struct f2fs_sb_info *sbi, struct page *page, { struct bio *bio; - trace_f2fs_submit_page_bio(page, fio->blk_addr, fio->rw, fio->type); + trace_f2fs_submit_page_bio(page, fio); f2fs_trace_ios(page, fio, 0); /* Allocate a new bio */ @@ -190,7 +188,7 @@ alloc_new: f2fs_trace_ios(page, fio, 0); up_write(&io->io_rwsem); - trace_f2fs_submit_page_mbio(page, fio->blk_addr, fio->rw, fio->type); + trace_f2fs_submit_page_mbio(page, fio); } /* diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h index 553311f14f6c..13992f3c1445 100644 --- a/include/trace/events/f2fs.h +++ b/include/trace/events/f2fs.h @@ -650,9 +650,9 @@ TRACE_EVENT(f2fs_reserve_new_block, DECLARE_EVENT_CLASS(f2fs__submit_page_bio, - TP_PROTO(struct page *page, block_t blkaddr, int rw, int type), + TP_PROTO(struct page *page, struct f2fs_io_info *fio), - TP_ARGS(page, blkaddr, rw, type), + TP_ARGS(page, fio), TP_STRUCT__entry( __field(dev_t, dev) @@ -667,9 +667,9 @@ DECLARE_EVENT_CLASS(f2fs__submit_page_bio, __entry->dev = page->mapping->host->i_sb->s_dev; __entry->ino = page->mapping->host->i_ino; __entry->index = page->index; - __entry->blkaddr = blkaddr; - __entry->rw = rw; - __entry->type = type; + __entry->blkaddr = fio->blk_addr; + __entry->rw = fio->rw; + __entry->type = fio->type; ), TP_printk("dev = (%d,%d), ino = %lu, page_index = 0x%lx, " @@ -683,27 +683,28 @@ DECLARE_EVENT_CLASS(f2fs__submit_page_bio, DEFINE_EVENT_CONDITION(f2fs__submit_page_bio, f2fs_submit_page_bio, - TP_PROTO(struct page *page, block_t blkaddr, int rw, int type), + TP_PROTO(struct page *page, struct f2fs_io_info *fio), - TP_ARGS(page, blkaddr, rw, type), + TP_ARGS(page, fio), TP_CONDITION(page->mapping) ); DEFINE_EVENT_CONDITION(f2fs__submit_page_bio, f2fs_submit_page_mbio, - TP_PROTO(struct page *page, block_t blkaddr, int rw, int type), + TP_PROTO(struct page *page, struct f2fs_io_info *fio), - TP_ARGS(page, blkaddr, rw, type), + TP_ARGS(page, fio), TP_CONDITION(page->mapping) ); DECLARE_EVENT_CLASS(f2fs__submit_bio, - TP_PROTO(struct super_block *sb, int rw, int type, struct bio *bio), + TP_PROTO(struct super_block *sb, struct f2fs_io_info *fio, + struct bio *bio), - TP_ARGS(sb, rw, type, bio), + TP_ARGS(sb, fio, bio), TP_STRUCT__entry( __field(dev_t, dev) @@ -715,8 +716,8 @@ DECLARE_EVENT_CLASS(f2fs__submit_bio, TP_fast_assign( __entry->dev = sb->s_dev; - __entry->rw = rw; - __entry->type = type; + __entry->rw = fio->rw; + __entry->type = fio->type; __entry->sector = bio->bi_iter.bi_sector; __entry->size = bio->bi_iter.bi_size; ), @@ -731,18 +732,20 @@ DECLARE_EVENT_CLASS(f2fs__submit_bio, DEFINE_EVENT_CONDITION(f2fs__submit_bio, f2fs_submit_write_bio, - TP_PROTO(struct super_block *sb, int rw, int type, struct bio *bio), + TP_PROTO(struct super_block *sb, struct f2fs_io_info *fio, + struct bio *bio), - TP_ARGS(sb, rw, type, bio), + TP_ARGS(sb, fio, bio), TP_CONDITION(bio) ); DEFINE_EVENT_CONDITION(f2fs__submit_bio, f2fs_submit_read_bio, - TP_PROTO(struct super_block *sb, int rw, int type, struct bio *bio), + TP_PROTO(struct super_block *sb, struct f2fs_io_info *fio, + struct bio *bio), - TP_ARGS(sb, rw, type, bio), + TP_ARGS(sb, fio, bio), TP_CONDITION(bio) ); -- cgit v1.2.3 From 062920734c0de9dd4f0a9bdc36fdcabc2751eb34 Mon Sep 17 00:00:00 2001 From: Chao Yu Date: Mon, 29 Dec 2014 15:56:18 +0800 Subject: f2fs: reuse inode_entry_slab in gc procedure for using slab more effectively There are two slab cache inode_entry_slab and winode_slab using the same structure as below: struct dir_inode_entry { struct list_head list; /* list head */ struct inode *inode; /* vfs inode pointer */ }; struct inode_entry { struct list_head list; struct inode *inode; }; It's a little waste that the two cache can not share their memory space for each other. So in this patch we remove one redundant winode_slab slab cache, then use more universal name struct inode_entry as remaining data structure name of slab, finally we reuse the inode_entry_slab to store dirty dir item and gc item for more effective. Signed-off-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/checkpoint.c | 18 +++++++++--------- fs/f2fs/debug.c | 2 +- fs/f2fs/f2fs.h | 14 +++++++++----- fs/f2fs/gc.c | 20 ++------------------ fs/f2fs/gc.h | 7 ++----- fs/f2fs/super.c | 8 +------- 6 files changed, 24 insertions(+), 45 deletions(-) (limited to 'fs') diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c index 3999933f7a74..9f5317c9ad72 100644 --- a/fs/f2fs/checkpoint.c +++ b/fs/f2fs/checkpoint.c @@ -24,7 +24,7 @@ #include static struct kmem_cache *ino_entry_slab; -static struct kmem_cache *inode_entry_slab; +struct kmem_cache *inode_entry_slab; /* * We guarantee no failure on the returned page. @@ -673,7 +673,7 @@ fail_no_cp: return -EINVAL; } -static int __add_dirty_inode(struct inode *inode, struct dir_inode_entry *new) +static int __add_dirty_inode(struct inode *inode, struct inode_entry *new) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); @@ -690,7 +690,7 @@ static int __add_dirty_inode(struct inode *inode, struct dir_inode_entry *new) void update_dirty_page(struct inode *inode, struct page *page) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); - struct dir_inode_entry *new; + struct inode_entry *new; int ret = 0; if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode)) @@ -720,7 +720,7 @@ out: void add_dirty_dir_inode(struct inode *inode) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); - struct dir_inode_entry *new = + struct inode_entry *new = f2fs_kmem_cache_alloc(inode_entry_slab, GFP_NOFS); int ret = 0; @@ -738,7 +738,7 @@ void add_dirty_dir_inode(struct inode *inode) void remove_dirty_dir_inode(struct inode *inode) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); - struct dir_inode_entry *entry; + struct inode_entry *entry; if (!S_ISDIR(inode->i_mode)) return; @@ -768,7 +768,7 @@ void remove_dirty_dir_inode(struct inode *inode) void sync_dirty_dir_inodes(struct f2fs_sb_info *sbi) { struct list_head *head; - struct dir_inode_entry *entry; + struct inode_entry *entry; struct inode *inode; retry: if (unlikely(f2fs_cp_error(sbi))) @@ -781,7 +781,7 @@ retry: spin_unlock(&sbi->dir_inode_lock); return; } - entry = list_entry(head->next, struct dir_inode_entry, list); + entry = list_entry(head->next, struct inode_entry, list); inode = igrab(entry->inode); spin_unlock(&sbi->dir_inode_lock); if (inode) { @@ -1107,8 +1107,8 @@ int __init create_checkpoint_caches(void) sizeof(struct ino_entry)); if (!ino_entry_slab) return -ENOMEM; - inode_entry_slab = f2fs_kmem_cache_create("f2fs_dirty_dir_entry", - sizeof(struct dir_inode_entry)); + inode_entry_slab = f2fs_kmem_cache_create("f2fs_inode_entry", + sizeof(struct inode_entry)); if (!inode_entry_slab) { kmem_cache_destroy(ino_entry_slab); return -ENOMEM; diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c index 2b6422156ea0..dd7835b27498 100644 --- a/fs/f2fs/debug.c +++ b/fs/f2fs/debug.c @@ -172,7 +172,7 @@ get_cache: si->cache_mem += npages << PAGE_CACHE_SHIFT; npages = META_MAPPING(sbi)->nrpages; si->cache_mem += npages << PAGE_CACHE_SHIFT; - si->cache_mem += sbi->n_dirty_dirs * sizeof(struct dir_inode_entry); + si->cache_mem += sbi->n_dirty_dirs * sizeof(struct inode_entry); for (i = 0; i <= UPDATE_INO; i++) si->cache_mem += sbi->im[i].ino_num * sizeof(struct ino_entry); } diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index 3226af06e050..c48847e06dae 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -136,8 +136,14 @@ struct ino_entry { nid_t ino; /* inode number */ }; -/* for the list of directory inodes */ -struct dir_inode_entry { +/* + * for the list of directory inodes or gc inodes. + * NOTE: there are two slab users for this structure, if we add/modify/delete + * fields in structure for one of slab users, it may affect fields or size of + * other one, in this condition, it's better to split both of slab and related + * data structure. + */ +struct inode_entry { struct list_head list; /* list head */ struct inode *inode; /* vfs inode pointer */ }; @@ -297,7 +303,7 @@ struct f2fs_inode_info { nid_t i_xattr_nid; /* node id that contains xattrs */ unsigned long long xattr_ver; /* cp version of xattr modification */ struct extent_info ext; /* in-memory extent cache entry */ - struct dir_inode_entry *dirty_dir; /* the pointer of dirty dir */ + struct inode_entry *dirty_dir; /* the pointer of dirty dir */ struct radix_tree_root inmem_root; /* radix tree for inmem pages */ struct list_head inmem_pages; /* inmemory pages managed by f2fs */ @@ -1487,8 +1493,6 @@ void stop_gc_thread(struct f2fs_sb_info *); block_t start_bidx_of_node(unsigned int, struct f2fs_inode_info *); int f2fs_gc(struct f2fs_sb_info *); void build_gc_manager(struct f2fs_sb_info *); -int __init create_gc_caches(void); -void destroy_gc_caches(void); /* * recovery.c diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c index eec0933a4819..40887d3c9d01 100644 --- a/fs/f2fs/gc.c +++ b/fs/f2fs/gc.c @@ -24,8 +24,6 @@ #include "gc.h" #include -static struct kmem_cache *winode_slab; - static int gc_thread_func(void *data) { struct f2fs_sb_info *sbi = data; @@ -356,7 +354,7 @@ static void add_gc_inode(struct gc_inode_list *gc_list, struct inode *inode) iput(inode); return; } - new_ie = f2fs_kmem_cache_alloc(winode_slab, GFP_NOFS); + new_ie = f2fs_kmem_cache_alloc(inode_entry_slab, GFP_NOFS); new_ie->inode = inode; retry: if (radix_tree_insert(&gc_list->iroot, inode->i_ino, new_ie)) { @@ -373,7 +371,7 @@ static void put_gc_inode(struct gc_inode_list *gc_list) radix_tree_delete(&gc_list->iroot, ie->inode->i_ino); iput(ie->inode); list_del(&ie->list); - kmem_cache_free(winode_slab, ie); + kmem_cache_free(inode_entry_slab, ie); } } @@ -750,17 +748,3 @@ void build_gc_manager(struct f2fs_sb_info *sbi) { DIRTY_I(sbi)->v_ops = &default_v_ops; } - -int __init create_gc_caches(void) -{ - winode_slab = f2fs_kmem_cache_create("f2fs_gc_inodes", - sizeof(struct inode_entry)); - if (!winode_slab) - return -ENOMEM; - return 0; -} - -void destroy_gc_caches(void) -{ - kmem_cache_destroy(winode_slab); -} diff --git a/fs/f2fs/gc.h b/fs/f2fs/gc.h index 6ff7ad38463e..524543a6a34a 100644 --- a/fs/f2fs/gc.h +++ b/fs/f2fs/gc.h @@ -35,16 +35,13 @@ struct f2fs_gc_kthread { unsigned int gc_idle; }; -struct inode_entry { - struct list_head list; - struct inode *inode; -}; - struct gc_inode_list { struct list_head ilist; struct radix_tree_root iroot; }; +extern struct kmem_cache *inode_entry_slab; + /* * inline functions */ diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index cf68e44570bc..0ae6a2fbdb2a 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -1231,12 +1231,9 @@ static int __init init_f2fs_fs(void) err = create_segment_manager_caches(); if (err) goto free_node_manager_caches; - err = create_gc_caches(); - if (err) - goto free_segment_manager_caches; err = create_checkpoint_caches(); if (err) - goto free_gc_caches; + goto free_segment_manager_caches; f2fs_kset = kset_create_and_add("f2fs", NULL, fs_kobj); if (!f2fs_kset) { err = -ENOMEM; @@ -1253,8 +1250,6 @@ free_kset: kset_unregister(f2fs_kset); free_checkpoint_caches: destroy_checkpoint_caches(); -free_gc_caches: - destroy_gc_caches(); free_segment_manager_caches: destroy_segment_manager_caches(); free_node_manager_caches: @@ -1271,7 +1266,6 @@ static void __exit exit_f2fs_fs(void) f2fs_destroy_root_stats(); unregister_filesystem(&f2fs_fs_type); destroy_checkpoint_caches(); - destroy_gc_caches(); destroy_segment_manager_caches(); destroy_node_manager_caches(); destroy_inodecache(); -- cgit v1.2.3 From e1509cf294cc670cda1fedd430f0ff175c42b591 Mon Sep 17 00:00:00 2001 From: Jaegeuk Kim Date: Tue, 30 Dec 2014 22:57:55 -0800 Subject: f2fs: clean up to remove parameter This patch uses dn->data_blkaddr as a parameter for the destination block address. Signed-off-by: Jaegeuk Kim --- fs/f2fs/data.c | 27 ++++++++++++++------------- fs/f2fs/f2fs.h | 2 +- fs/f2fs/file.c | 3 ++- fs/f2fs/inline.c | 2 +- fs/f2fs/recovery.c | 3 ++- fs/f2fs/segment.c | 1 + 6 files changed, 21 insertions(+), 17 deletions(-) (limited to 'fs') diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index 6308435028f6..2c0cb6617918 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -197,7 +197,7 @@ alloc_new: * ->node_page * update block addresses in the node page */ -static void __set_data_blkaddr(struct dnode_of_data *dn, block_t new_addr) +static void __set_data_blkaddr(struct dnode_of_data *dn) { struct f2fs_node *rn; __le32 *addr_array; @@ -210,7 +210,7 @@ static void __set_data_blkaddr(struct dnode_of_data *dn, block_t new_addr) /* Get physical address of data block */ addr_array = blkaddr_in_node(rn); - addr_array[ofs_in_node] = cpu_to_le32(new_addr); + addr_array[ofs_in_node] = cpu_to_le32(dn->data_blkaddr); set_page_dirty(node_page); } @@ -225,8 +225,8 @@ int reserve_new_block(struct dnode_of_data *dn) trace_f2fs_reserve_new_block(dn->inode, dn->nid, dn->ofs_in_node); - __set_data_blkaddr(dn, NEW_ADDR); dn->data_blkaddr = NEW_ADDR; + __set_data_blkaddr(dn); mark_inode_dirty(dn->inode); sync_inode_page(dn); return 0; @@ -291,19 +291,19 @@ static int check_extent_cache(struct inode *inode, pgoff_t pgofs, return 0; } -void update_extent_cache(block_t blk_addr, struct dnode_of_data *dn) +void update_extent_cache(struct dnode_of_data *dn) { struct f2fs_inode_info *fi = F2FS_I(dn->inode); pgoff_t fofs, start_fofs, end_fofs; block_t start_blkaddr, end_blkaddr; int need_update = true; - f2fs_bug_on(F2FS_I_SB(dn->inode), blk_addr == NEW_ADDR); + f2fs_bug_on(F2FS_I_SB(dn->inode), dn->data_blkaddr == NEW_ADDR); fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) + dn->ofs_in_node; /* Update the page address in the parent node */ - __set_data_blkaddr(dn, blk_addr); + __set_data_blkaddr(dn); if (is_inode_flag_set(fi, FI_NO_EXTENT)) return; @@ -321,16 +321,16 @@ void update_extent_cache(block_t blk_addr, struct dnode_of_data *dn) /* Initial extent */ if (fi->ext.len == 0) { - if (blk_addr != NULL_ADDR) { + if (dn->data_blkaddr != NULL_ADDR) { fi->ext.fofs = fofs; - fi->ext.blk_addr = blk_addr; + fi->ext.blk_addr = dn->data_blkaddr; fi->ext.len = 1; } goto end_update; } /* Front merge */ - if (fofs == start_fofs - 1 && blk_addr == start_blkaddr - 1) { + if (fofs == start_fofs - 1 && dn->data_blkaddr == start_blkaddr - 1) { fi->ext.fofs--; fi->ext.blk_addr--; fi->ext.len++; @@ -338,7 +338,7 @@ void update_extent_cache(block_t blk_addr, struct dnode_of_data *dn) } /* Back merge */ - if (fofs == end_fofs + 1 && blk_addr == end_blkaddr + 1) { + if (fofs == end_fofs + 1 && dn->data_blkaddr == end_blkaddr + 1) { fi->ext.len++; goto end_update; } @@ -572,8 +572,8 @@ static int __allocate_data_block(struct dnode_of_data *dn) if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1))) return -ENOSPC; - __set_data_blkaddr(dn, NEW_ADDR); dn->data_blkaddr = NEW_ADDR; + __set_data_blkaddr(dn); get_node_info(sbi, dn->nid, &ni); set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version); @@ -584,7 +584,8 @@ static int __allocate_data_block(struct dnode_of_data *dn) /* direct IO doesn't use extent cache to maximize the performance */ set_inode_flag(F2FS_I(dn->inode), FI_NO_EXTENT); - update_extent_cache(new_blkaddr, dn); + dn->data_blkaddr = new_blkaddr; + update_extent_cache(dn); clear_inode_flag(F2FS_I(dn->inode), FI_NO_EXTENT); /* update i_size */ @@ -784,7 +785,7 @@ int do_write_data_page(struct page *page, struct f2fs_io_info *fio) set_inode_flag(F2FS_I(inode), FI_UPDATE_WRITE); } else { write_data_page(page, &dn, fio); - update_extent_cache(fio->blk_addr, &dn); + update_extent_cache(&dn); set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE); } out_writepage: diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index c48847e06dae..37e935a362cd 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -1478,7 +1478,7 @@ void f2fs_submit_page_mbio(struct f2fs_sb_info *, struct page *, struct f2fs_io_info *); int reserve_new_block(struct dnode_of_data *); int f2fs_reserve_block(struct dnode_of_data *, pgoff_t); -void update_extent_cache(block_t, struct dnode_of_data *); +void update_extent_cache(struct dnode_of_data *); struct page *find_data_page(struct inode *, pgoff_t, bool); struct page *get_lock_data_page(struct inode *, pgoff_t); struct page *get_new_data_page(struct inode *, struct page *, pgoff_t, bool); diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index f172ddc4ecdd..5df336757d6c 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c @@ -429,7 +429,8 @@ int truncate_data_blocks_range(struct dnode_of_data *dn, int count) if (blkaddr == NULL_ADDR) continue; - update_extent_cache(NULL_ADDR, dn); + dn->data_blkaddr = NULL_ADDR; + update_extent_cache(dn); invalidate_blocks(sbi, blkaddr); nr_free++; } diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c index 0c3f3f9b9f88..fa2aa2f7bb96 100644 --- a/fs/f2fs/inline.c +++ b/fs/f2fs/inline.c @@ -116,7 +116,7 @@ no_update: set_page_writeback(page); fio.blk_addr = dn->data_blkaddr; write_data_page(page, dn, &fio); - update_extent_cache(fio.blk_addr, dn); + update_extent_cache(dn); f2fs_wait_on_page_writeback(page, DATA); if (dirty) inode_dec_dirty_pages(dn->inode); diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c index 9160a37e1c7a..c4211a5862df 100644 --- a/fs/f2fs/recovery.c +++ b/fs/f2fs/recovery.c @@ -396,7 +396,8 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode, /* write dummy data page */ recover_data_page(sbi, NULL, &sum, src, dest); - update_extent_cache(dest, &dn); + dn.data_blkaddr = dest; + update_extent_cache(&dn); recovered++; } dn.ofs_in_node++; diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index f995a850dfe7..8144a30b1567 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -1226,6 +1226,7 @@ void write_data_page(struct page *page, struct dnode_of_data *dn, get_node_info(sbi, dn->nid, &ni); set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version); do_write_page(sbi, page, &sum, fio); + dn->data_blkaddr = fio->blk_addr; } void rewrite_data_page(struct page *page, struct f2fs_io_info *fio) -- cgit v1.2.3 From 3547ea961dd66a474c6f709c4f5e8a2472289df9 Mon Sep 17 00:00:00 2001 From: Jaegeuk Kim Date: Tue, 30 Dec 2014 23:08:26 -0800 Subject: f2fs: avoid potential unnecessary codes This patch relocates some operations to avoid unnecessary execution. Signed-off-by: Jaegeuk Kim --- fs/f2fs/data.c | 5 +++-- fs/f2fs/node.c | 3 ++- 2 files changed, 5 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index 2c0cb6617918..155885bf714c 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -299,8 +299,6 @@ void update_extent_cache(struct dnode_of_data *dn) int need_update = true; f2fs_bug_on(F2FS_I_SB(dn->inode), dn->data_blkaddr == NEW_ADDR); - fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) + - dn->ofs_in_node; /* Update the page address in the parent node */ __set_data_blkaddr(dn); @@ -308,6 +306,9 @@ void update_extent_cache(struct dnode_of_data *dn) if (is_inode_flag_set(fi, FI_NO_EXTENT)) return; + fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) + + dn->ofs_in_node; + write_lock(&fi->ext.ext_lock); start_fofs = fi->ext.fofs; diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c index a7cb0db2e3e8..9bed0161efee 100644 --- a/fs/f2fs/node.c +++ b/fs/f2fs/node.c @@ -348,7 +348,6 @@ void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni) struct nat_entry *e; int i; - memset(&ne, 0, sizeof(struct f2fs_nat_entry)); ni->nid = nid; /* Check nat cache */ @@ -363,6 +362,8 @@ void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni) if (e) return; + memset(&ne, 0, sizeof(struct f2fs_nat_entry)); + /* Check current segment summary */ mutex_lock(&curseg->curseg_mutex); i = lookup_journal_in_cursum(sum, NAT_JOURNAL, nid, 0); -- cgit v1.2.3 From 41ef94b35c8df8e01780b4a3362246c51f3aa79b Mon Sep 17 00:00:00 2001 From: Jaegeuk Kim Date: Tue, 30 Dec 2014 23:12:11 -0800 Subject: f2fs: remove uncovered code path This patch removes unnecessary function calls. Signed-off-by: Jaegeuk Kim --- fs/f2fs/data.c | 16 +++------------- 1 file changed, 3 insertions(+), 13 deletions(-) (limited to 'fs') diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index 155885bf714c..3e0f5f303c97 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -563,31 +563,22 @@ static int __allocate_data_block(struct dnode_of_data *dn) struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); struct f2fs_inode_info *fi = F2FS_I(dn->inode); struct f2fs_summary sum; - block_t new_blkaddr; struct node_info ni; pgoff_t fofs; - int type; if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC))) return -EPERM; if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1))) return -ENOSPC; - dn->data_blkaddr = NEW_ADDR; - __set_data_blkaddr(dn); - get_node_info(sbi, dn->nid, &ni); set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version); - type = CURSEG_WARM_DATA; - - allocate_data_block(sbi, NULL, NULL_ADDR, &new_blkaddr, &sum, type); + allocate_data_block(sbi, NULL, NULL_ADDR, &dn->data_blkaddr, &sum, + CURSEG_WARM_DATA); /* direct IO doesn't use extent cache to maximize the performance */ - set_inode_flag(F2FS_I(dn->inode), FI_NO_EXTENT); - dn->data_blkaddr = new_blkaddr; - update_extent_cache(dn); - clear_inode_flag(F2FS_I(dn->inode), FI_NO_EXTENT); + __set_data_blkaddr(dn); /* update i_size */ fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) + @@ -595,7 +586,6 @@ static int __allocate_data_block(struct dnode_of_data *dn) if (i_size_read(dn->inode) < ((fofs + 1) << PAGE_CACHE_SHIFT)) i_size_write(dn->inode, ((fofs + 1) << PAGE_CACHE_SHIFT)); - dn->data_blkaddr = new_blkaddr; return 0; } -- cgit v1.2.3 From 38aa0889b2504bbe68e47f51cf73bf7f0a7246bd Mon Sep 17 00:00:00 2001 From: Jaegeuk Kim Date: Mon, 5 Jan 2015 16:02:20 -0800 Subject: f2fs: align direct_io'ed data to section This patch aligns the start block address of a file for direct io to the f2fs's section size. Some flash devices manage an over 4KB-sized page as a write unit, and if the direct_io'ed data are written but not aligned to that unit, the performance can be degraded due to the partial page copies. Thus, since f2fs has a section that is well aligned to FTL units, we can align the block address to the section size so that f2fs avoids this misalignment. Signed-off-by: Jaegeuk Kim --- fs/f2fs/data.c | 7 +++++-- fs/f2fs/f2fs.h | 3 ++- fs/f2fs/segment.c | 27 +++++++++++++++++++-------- 3 files changed, 26 insertions(+), 11 deletions(-) (limited to 'fs') diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index 3e0f5f303c97..b48b355104c3 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -564,6 +564,7 @@ static int __allocate_data_block(struct dnode_of_data *dn) struct f2fs_inode_info *fi = F2FS_I(dn->inode); struct f2fs_summary sum; struct node_info ni; + int seg = CURSEG_WARM_DATA; pgoff_t fofs; if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC))) @@ -574,8 +575,10 @@ static int __allocate_data_block(struct dnode_of_data *dn) get_node_info(sbi, dn->nid, &ni); set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version); - allocate_data_block(sbi, NULL, NULL_ADDR, &dn->data_blkaddr, &sum, - CURSEG_WARM_DATA); + if (dn->ofs_in_node == 0 && dn->inode_page == dn->node_page) + seg = CURSEG_DIRECT_IO; + + allocate_data_block(sbi, NULL, NULL_ADDR, &dn->data_blkaddr, &sum, seg); /* direct IO doesn't use extent cache to maximize the performance */ __set_data_blkaddr(dn); diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index 37e935a362cd..ba30218770da 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -406,7 +406,8 @@ enum { CURSEG_HOT_NODE, /* direct node blocks of directory files */ CURSEG_WARM_NODE, /* direct node blocks of normal files */ CURSEG_COLD_NODE, /* indirect node blocks */ - NO_CHECK_TYPE + NO_CHECK_TYPE, + CURSEG_DIRECT_IO, /* to use for the direct IO path */ }; struct flush_cmd { diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index 8144a30b1567..31c4e5702c7d 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -1024,18 +1024,22 @@ static void allocate_segment_by_default(struct f2fs_sb_info *sbi, stat_inc_seg_type(sbi, curseg); } +static void __allocate_new_segments(struct f2fs_sb_info *sbi, int type) +{ + struct curseg_info *curseg = CURSEG_I(sbi, type); + unsigned int old_segno; + + old_segno = curseg->segno; + SIT_I(sbi)->s_ops->allocate_segment(sbi, type, true); + locate_dirty_segment(sbi, old_segno); +} + void allocate_new_segments(struct f2fs_sb_info *sbi) { - struct curseg_info *curseg; - unsigned int old_curseg; int i; - for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) { - curseg = CURSEG_I(sbi, i); - old_curseg = curseg->segno; - SIT_I(sbi)->s_ops->allocate_segment(sbi, i, true); - locate_dirty_segment(sbi, old_curseg); - } + for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) + __allocate_new_segments(sbi, i); } static const struct segment_allocation default_salloc_ops = { @@ -1148,11 +1152,18 @@ void allocate_data_block(struct f2fs_sb_info *sbi, struct page *page, { struct sit_info *sit_i = SIT_I(sbi); struct curseg_info *curseg; + bool direct_io = (type == CURSEG_DIRECT_IO); + + type = direct_io ? CURSEG_WARM_DATA : type; curseg = CURSEG_I(sbi, type); mutex_lock(&curseg->curseg_mutex); + /* direct_io'ed data is aligned to the segment for better performance */ + if (direct_io && curseg->next_blkoff) + __allocate_new_segments(sbi, type); + *new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg); /* -- cgit v1.2.3 From 9e5ba77fdbe3fea86b1e36903dd696b24b1c4607 Mon Sep 17 00:00:00 2001 From: Chao Yu Date: Tue, 6 Jan 2015 14:28:43 +0800 Subject: f2fs: get rid of kzalloc in __recover_inline_status We use kzalloc to allocate memory in __recover_inline_status, and use this all-zero memory to check the inline date content of inode page by comparing them. This is low effective and not needed, let's check inline date content directly. Signed-off-by: Chao Yu [Jaegeuk Kim: make the code more neat] Signed-off-by: Jaegeuk Kim --- fs/f2fs/inode.c | 35 ++++++++++++++--------------------- 1 file changed, 14 insertions(+), 21 deletions(-) (limited to 'fs') diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c index 3a8958d1684f..2d002e3738a7 100644 --- a/fs/f2fs/inode.c +++ b/fs/f2fs/inode.c @@ -67,29 +67,23 @@ static void __set_inode_rdev(struct inode *inode, struct f2fs_inode *ri) } } -static int __recover_inline_status(struct inode *inode, struct page *ipage) +static void __recover_inline_status(struct inode *inode, struct page *ipage) { void *inline_data = inline_data_addr(ipage); - struct f2fs_inode *ri; - void *zbuf; + __le32 *start = inline_data; + __le32 *end = start + MAX_INLINE_DATA / sizeof(__le32); - zbuf = kzalloc(MAX_INLINE_DATA, GFP_NOFS); - if (!zbuf) - return -ENOMEM; + while (start < end) { + if (*start++) { + f2fs_wait_on_page_writeback(ipage, NODE); - if (!memcmp(zbuf, inline_data, MAX_INLINE_DATA)) { - kfree(zbuf); - return 0; + set_inode_flag(F2FS_I(inode), FI_DATA_EXIST); + set_raw_inline(F2FS_I(inode), F2FS_INODE(ipage)); + set_page_dirty(ipage); + return; + } } - kfree(zbuf); - - f2fs_wait_on_page_writeback(ipage, NODE); - set_inode_flag(F2FS_I(inode), FI_DATA_EXIST); - - ri = F2FS_INODE(ipage); - set_raw_inline(F2FS_I(inode), ri); - set_page_dirty(ipage); - return 0; + return; } static int do_read_inode(struct inode *inode) @@ -98,7 +92,6 @@ static int do_read_inode(struct inode *inode) struct f2fs_inode_info *fi = F2FS_I(inode); struct page *node_page; struct f2fs_inode *ri; - int err = 0; /* Check if ino is within scope */ if (check_nid_range(sbi, inode->i_ino)) { @@ -142,7 +135,7 @@ static int do_read_inode(struct inode *inode) /* check data exist */ if (f2fs_has_inline_data(inode) && !f2fs_exist_data(inode)) - err = __recover_inline_status(inode, node_page); + __recover_inline_status(inode, node_page); /* get rdev by using inline_info */ __get_inode_rdev(inode, ri); @@ -152,7 +145,7 @@ static int do_read_inode(struct inode *inode) stat_inc_inline_inode(inode); stat_inc_inline_dir(inode); - return err; + return 0; } struct inode *f2fs_iget(struct super_block *sb, unsigned long ino) -- cgit v1.2.3 From df1991391f9301efbd5e46fb776bf5103d4c59f8 Mon Sep 17 00:00:00 2001 From: Jaegeuk Kim Date: Tue, 6 Jan 2015 16:00:03 -0800 Subject: f2fs: fix wrong unlock_page call This patch removes wrongly called unlock_page. Signed-off-by: Jaegeuk Kim --- fs/f2fs/data.c | 1 - 1 file changed, 1 deletion(-) (limited to 'fs') diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index b48b355104c3..a7b905cb05f8 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -837,7 +837,6 @@ write: /* we should bypass data pages to proceed the kworkder jobs */ if (unlikely(f2fs_cp_error(sbi))) { SetPageError(page); - unlock_page(page); goto out; } -- cgit v1.2.3 From 7aed0d45376e531330cf20af581a76edc0347d06 Mon Sep 17 00:00:00 2001 From: Jaegeuk Kim Date: Wed, 7 Jan 2015 10:47:57 -0800 Subject: f2fs: free radix_tree_nodes used by nat_set entries In the normal case, the radix_tree_nodes are freed successfully. But, when cp_error was detected, we should destroy them forcefully. Signed-off-by: Jaegeuk Kim --- fs/f2fs/node.c | 21 +++++++++++++++++++-- fs/f2fs/node.h | 1 + 2 files changed, 20 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c index 9bed0161efee..d7c143626705 100644 --- a/fs/f2fs/node.c +++ b/fs/f2fs/node.c @@ -1894,7 +1894,7 @@ void flush_nat_entries(struct f2fs_sb_info *sbi) struct f2fs_nm_info *nm_i = NM_I(sbi); struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); struct f2fs_summary_block *sum = curseg->sum_blk; - struct nat_entry_set *setvec[NATVEC_SIZE]; + struct nat_entry_set *setvec[SETVEC_SIZE]; struct nat_entry_set *set, *tmp; unsigned int found; nid_t set_idx = 0; @@ -1911,7 +1911,7 @@ void flush_nat_entries(struct f2fs_sb_info *sbi) remove_nats_in_journal(sbi); while ((found = __gang_lookup_nat_set(nm_i, - set_idx, NATVEC_SIZE, setvec))) { + set_idx, SETVEC_SIZE, setvec))) { unsigned idx; set_idx = setvec[found - 1]->set + 1; for (idx = 0; idx < found; idx++) @@ -1991,6 +1991,7 @@ void destroy_node_manager(struct f2fs_sb_info *sbi) struct f2fs_nm_info *nm_i = NM_I(sbi); struct free_nid *i, *next_i; struct nat_entry *natvec[NATVEC_SIZE]; + struct nat_entry_set *setvec[SETVEC_SIZE]; nid_t nid = 0; unsigned int found; @@ -2015,11 +2016,27 @@ void destroy_node_manager(struct f2fs_sb_info *sbi) while ((found = __gang_lookup_nat_cache(nm_i, nid, NATVEC_SIZE, natvec))) { unsigned idx; + nid = nat_get_nid(natvec[found - 1]) + 1; for (idx = 0; idx < found; idx++) __del_from_nat_cache(nm_i, natvec[idx]); } f2fs_bug_on(sbi, nm_i->nat_cnt); + + /* destroy nat set cache */ + nid = 0; + while ((found = __gang_lookup_nat_set(nm_i, + nid, SETVEC_SIZE, setvec))) { + unsigned idx; + + nid = setvec[found - 1]->set + 1; + for (idx = 0; idx < found; idx++) { + /* entry_cnt is not zero, when cp_error was occurred */ + f2fs_bug_on(sbi, !list_empty(&setvec[idx]->entry_list)); + radix_tree_delete(&nm_i->nat_set_root, setvec[idx]->set); + kmem_cache_free(nat_entry_set_slab, setvec[idx]); + } + } up_write(&nm_i->nat_tree_lock); kfree(nm_i->nat_bitmap); diff --git a/fs/f2fs/node.h b/fs/f2fs/node.h index cac8a3d9acbd..f405bbf2435a 100644 --- a/fs/f2fs/node.h +++ b/fs/f2fs/node.h @@ -25,6 +25,7 @@ /* vector size for gang look-up from nat cache that consists of radix tree */ #define NATVEC_SIZE 64 +#define SETVEC_SIZE 32 /* return value for read_node_page */ #define LOCKED_PAGE 1 -- cgit v1.2.3 From dd4e4b59b1a4a7e69083e7dc2abbedb5186fd850 Mon Sep 17 00:00:00 2001 From: Jaegeuk Kim Date: Wed, 7 Jan 2015 11:09:37 -0800 Subject: f2fs: add nat/sit entries into status This patch adds NAT/SIT entry informations. Signed-off-by: Jaegeuk Kim --- fs/f2fs/debug.c | 8 +++++--- fs/f2fs/f2fs.h | 2 +- 2 files changed, 6 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c index dd7835b27498..1f0fb58072e7 100644 --- a/fs/f2fs/debug.c +++ b/fs/f2fs/debug.c @@ -57,7 +57,9 @@ static void update_general_status(struct f2fs_sb_info *sbi) si->node_pages = NODE_MAPPING(sbi)->nrpages; si->meta_pages = META_MAPPING(sbi)->nrpages; si->nats = NM_I(sbi)->nat_cnt; - si->sits = SIT_I(sbi)->dirty_sentries; + si->dirty_nats = NM_I(sbi)->dirty_nat_cnt; + si->sits = MAIN_SEGS(sbi); + si->dirty_sits = SIT_I(sbi)->dirty_sentries; si->fnids = NM_I(sbi)->fcnt; si->bg_gc = sbi->bg_gc; si->util_free = (int)(free_user_blocks(sbi) >> sbi->log_blocks_per_seg) @@ -260,8 +262,8 @@ static int stat_show(struct seq_file *s, void *v) si->ndirty_dent, si->ndirty_dirs); seq_printf(s, " - meta: %4d in %4d\n", si->ndirty_meta, si->meta_pages); - seq_printf(s, " - NATs: %9d\n - SITs: %9d\n", - si->nats, si->sits); + seq_printf(s, " - NATs: %9d/%9d\n - SITs: %9d/%9d\n", + si->dirty_nats, si->nats, si->dirty_sits, si->sits); seq_printf(s, " - free_nids: %9d\n", si->fnids); seq_puts(s, "\nDistribution of User Blocks:"); diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index ba30218770da..209532cbee43 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -1512,7 +1512,7 @@ struct f2fs_stat_info { int main_area_segs, main_area_sections, main_area_zones; int hit_ext, total_ext; int ndirty_node, ndirty_dent, ndirty_dirs, ndirty_meta; - int nats, sits, fnids; + int nats, dirty_nats, sits, dirty_sits, fnids; int total_count, utilization; int bg_gc, inline_inode, inline_dir, inmem_pages; unsigned int valid_count, valid_node_count, valid_inode_count; -- cgit v1.2.3 From c05086506f2204bcf02c9d3eb7950cdf0d8a3bef Mon Sep 17 00:00:00 2001 From: Jaegeuk Kim Date: Wed, 7 Jan 2015 14:07:36 -0800 Subject: f2fs: add spin_lock to cover radix operations in IO tracer This patch adds spin_lock to cover radix tree operations in IO tracer. Signed-off-by: Jaegeuk Kim --- fs/f2fs/super.c | 2 ++ fs/f2fs/trace.c | 18 +++++++++++++++--- fs/f2fs/trace.h | 2 ++ 3 files changed, 19 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index 0ae6a2fbdb2a..e6f035c868d3 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -1222,6 +1222,8 @@ static int __init init_f2fs_fs(void) { int err; + f2fs_build_trace_ios(); + err = init_inodecache(); if (err) goto fail; diff --git a/fs/f2fs/trace.c b/fs/f2fs/trace.c index 19f5216b9b9c..92fa38a47e63 100644 --- a/fs/f2fs/trace.c +++ b/fs/f2fs/trace.c @@ -15,7 +15,8 @@ #include "f2fs.h" #include "trace.h" -RADIX_TREE(pids, GFP_NOIO); +RADIX_TREE(pids, GFP_ATOMIC); +spinlock_t pids_lock; struct last_io_info last_io; static inline void __print_last_io(void) @@ -58,9 +59,13 @@ void f2fs_trace_pid(struct page *page) page->private = pid; + if (radix_tree_preload(GFP_NOFS)) + return; + + spin_lock(&pids_lock); p = radix_tree_lookup(&pids, pid); if (p == current) - return; + goto out; if (p) radix_tree_delete(&pids, pid); @@ -69,7 +74,9 @@ void f2fs_trace_pid(struct page *page) trace_printk("%3x:%3x %4x %-16s\n", MAJOR(inode->i_sb->s_dev), MINOR(inode->i_sb->s_dev), pid, current->comm); - +out: + spin_unlock(&pids_lock); + radix_tree_preload_end(); } void f2fs_trace_ios(struct page *page, struct f2fs_io_info *fio, int flush) @@ -108,3 +115,8 @@ void f2fs_trace_ios(struct page *page, struct f2fs_io_info *fio, int flush) last_io.len = 1; return; } + +void f2fs_build_trace_ios(void) +{ + spin_lock_init(&pids_lock); +} diff --git a/fs/f2fs/trace.h b/fs/f2fs/trace.h index aa6663be528c..eb39fa08cd45 100644 --- a/fs/f2fs/trace.h +++ b/fs/f2fs/trace.h @@ -34,9 +34,11 @@ struct last_io_info { extern void f2fs_trace_pid(struct page *); extern void f2fs_trace_ios(struct page *, struct f2fs_io_info *, int); +extern void f2fs_build_trace_ios(void); #else #define f2fs_trace_pid(p) #define f2fs_trace_ios(p, i, n) +#define f2fs_build_trace_ios() #endif #endif /* __F2FS_TRACE_H__ */ -- cgit v1.2.3 From 351f4fba843db3303a92897b21a3d4dff4b6c717 Mon Sep 17 00:00:00 2001 From: Jaegeuk Kim Date: Wed, 7 Jan 2015 14:09:48 -0800 Subject: f2fs: add f2fs_destroy_trace_ios to free radix tree This patch removes radix tree after finishing tracing IOs. Signed-off-by: Jaegeuk Kim --- fs/f2fs/super.c | 1 + fs/f2fs/trace.c | 37 +++++++++++++++++++++++++++++++++++++ fs/f2fs/trace.h | 2 ++ 3 files changed, 40 insertions(+) (limited to 'fs') diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index e6f035c868d3..0e97974bbbbd 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -1272,6 +1272,7 @@ static void __exit exit_f2fs_fs(void) destroy_node_manager_caches(); destroy_inodecache(); kset_unregister(f2fs_kset); + f2fs_destroy_trace_ios(); } module_init(init_f2fs_fs) diff --git a/fs/f2fs/trace.c b/fs/f2fs/trace.c index 92fa38a47e63..b3570dcca945 100644 --- a/fs/f2fs/trace.c +++ b/fs/f2fs/trace.c @@ -11,6 +11,7 @@ #include #include #include +#include #include "f2fs.h" #include "trace.h" @@ -120,3 +121,39 @@ void f2fs_build_trace_ios(void) { spin_lock_init(&pids_lock); } + +#define PIDVEC_SIZE 128 +static unsigned int gang_lookup_pids(pid_t *results, unsigned long first_index, + unsigned int max_items) +{ + struct radix_tree_iter iter; + void **slot; + unsigned int ret = 0; + + if (unlikely(!max_items)) + return 0; + + radix_tree_for_each_slot(slot, &pids, &iter, first_index) { + results[ret] = iter.index; + if (++ret == PIDVEC_SIZE) + break; + } + return ret; +} + +void f2fs_destroy_trace_ios(void) +{ + pid_t pid[PIDVEC_SIZE]; + pid_t next_pid = 0; + unsigned int found; + + spin_lock(&pids_lock); + while ((found = gang_lookup_pids(pid, next_pid, PIDVEC_SIZE))) { + unsigned idx; + + next_pid = pid[found - 1] + 1; + for (idx = 0; idx < found; idx++) + radix_tree_delete(&pids, pid[idx]); + } + spin_unlock(&pids_lock); +} diff --git a/fs/f2fs/trace.h b/fs/f2fs/trace.h index eb39fa08cd45..1041dbeb52ae 100644 --- a/fs/f2fs/trace.h +++ b/fs/f2fs/trace.h @@ -35,10 +35,12 @@ struct last_io_info { extern void f2fs_trace_pid(struct page *); extern void f2fs_trace_ios(struct page *, struct f2fs_io_info *, int); extern void f2fs_build_trace_ios(void); +extern void f2fs_destroy_trace_ios(void); #else #define f2fs_trace_pid(p) #define f2fs_trace_ios(p, i, n) #define f2fs_build_trace_ios() +#define f2fs_destroy_trace_ios() #endif #endif /* __F2FS_TRACE_H__ */ -- cgit v1.2.3 From 08e4126e1e91b01bea5c71980e7008ec5075d701 Mon Sep 17 00:00:00 2001 From: kbuild test robot Date: Fri, 9 Jan 2015 03:38:38 +0800 Subject: f2fs: pids_lock can be static fs/f2fs/trace.c:19:12: sparse: symbol 'pids_lock' was not declared. Should it be static? Signed-off-by: Fengguang Wu Signed-off-by: Jaegeuk Kim --- fs/f2fs/trace.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/f2fs/trace.c b/fs/f2fs/trace.c index b3570dcca945..ce01a2c903bd 100644 --- a/fs/f2fs/trace.c +++ b/fs/f2fs/trace.c @@ -17,7 +17,7 @@ #include "trace.h" RADIX_TREE(pids, GFP_ATOMIC); -spinlock_t pids_lock; +static spinlock_t pids_lock; struct last_io_info last_io; static inline void __print_last_io(void) -- cgit v1.2.3 From e4999bbe0802ddff82b3fcbbab3b6274b789aad7 Mon Sep 17 00:00:00 2001 From: Rickard Strandqvist Date: Sun, 11 Jan 2015 17:36:48 +0100 Subject: jffs2: compr_rubin: Remove unused function Remove the function pulledbits() that is not used anywhere. This was partially found by using a static code analysis program called cppcheck. Signed-off-by: Rickard Strandqvist Reviewed-by: Richard Weinberger Signed-off-by: Brian Norris --- fs/jffs2/compr_rubin.c | 5 ----- 1 file changed, 5 deletions(-) (limited to 'fs') diff --git a/fs/jffs2/compr_rubin.c b/fs/jffs2/compr_rubin.c index 92e0644bf867..556de100ebd5 100644 --- a/fs/jffs2/compr_rubin.c +++ b/fs/jffs2/compr_rubin.c @@ -84,11 +84,6 @@ static inline int pullbit(struct pushpull *pp) return bit; } -static inline int pulledbits(struct pushpull *pp) -{ - return pp->ofs; -} - static void init_rubin(struct rubin_state *rs, int div, int *bits) { -- cgit v1.2.3 From dd22f551ac0ad366f92f601835f6623b83adc331 Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Wed, 7 Jan 2015 18:05:34 +0200 Subject: block: Change direct_access calling convention In order to support accesses to larger chunks of memory, pass in a 'size' parameter (counted in bytes), and return the amount available at that address. Add a new helper function, bdev_direct_access(), to handle common functionality including partition handling, checking the length requested is positive, checking for the sector being page-aligned, and checking the length of the request does not pass the end of the partition. Signed-off-by: Matthew Wilcox Reviewed-by: Jan Kara Reviewed-by: Boaz Harrosh Signed-off-by: Jens Axboe --- Documentation/filesystems/xip.txt | 15 +++++++++------ arch/powerpc/sysdev/axonram.c | 17 ++++------------- drivers/block/brd.c | 14 +++++++------- drivers/s390/block/dcssblk.c | 21 +++++++++----------- fs/block_dev.c | 40 +++++++++++++++++++++++++++++++++++++++ fs/ext2/xip.c | 31 +++++++++++++----------------- include/linux/blkdev.h | 6 ++++-- 7 files changed, 86 insertions(+), 58 deletions(-) (limited to 'fs') diff --git a/Documentation/filesystems/xip.txt b/Documentation/filesystems/xip.txt index 0466ee569278..b77472949ede 100644 --- a/Documentation/filesystems/xip.txt +++ b/Documentation/filesystems/xip.txt @@ -28,12 +28,15 @@ Implementation Execute-in-place is implemented in three steps: block device operation, address space operation, and file operations. -A block device operation named direct_access is used to retrieve a -reference (pointer) to a block on-disk. The reference is supposed to be -cpu-addressable, physical address and remain valid until the release operation -is performed. A struct block_device reference is used to address the device, -and a sector_t argument is used to identify the individual block. As an -alternative, memory technology devices can be used for this. +A block device operation named direct_access is used to translate the +block device sector number to a page frame number (pfn) that identifies +the physical page for the memory. It also returns a kernel virtual +address that can be used to access the memory. + +The direct_access method takes a 'size' parameter that indicates the +number of bytes being requested. The function should return the number +of bytes that can be contiguously accessed at that offset. It may also +return a negative errno if an error occurs. The block device operation is optional, these block devices support it as of today: diff --git a/arch/powerpc/sysdev/axonram.c b/arch/powerpc/sysdev/axonram.c index f532c92bf99d..20f8afe855d1 100644 --- a/arch/powerpc/sysdev/axonram.c +++ b/arch/powerpc/sysdev/axonram.c @@ -139,26 +139,17 @@ axon_ram_make_request(struct request_queue *queue, struct bio *bio) * axon_ram_direct_access - direct_access() method for block device * @device, @sector, @data: see block_device_operations method */ -static int +static long axon_ram_direct_access(struct block_device *device, sector_t sector, - void **kaddr, unsigned long *pfn) + void **kaddr, unsigned long *pfn, long size) { struct axon_ram_bank *bank = device->bd_disk->private_data; - loff_t offset; - - offset = sector; - if (device->bd_part != NULL) - offset += device->bd_part->start_sect; - offset <<= AXON_RAM_SECTOR_SHIFT; - if (offset >= bank->size) { - dev_err(&bank->device->dev, "Access outside of address space\n"); - return -ERANGE; - } + loff_t offset = (loff_t)sector << AXON_RAM_SECTOR_SHIFT; *kaddr = (void *)(bank->ph_addr + offset); *pfn = virt_to_phys(kaddr) >> PAGE_SHIFT; - return 0; + return bank->size - offset; } static const struct block_device_operations axon_ram_devops = { diff --git a/drivers/block/brd.c b/drivers/block/brd.c index 3598110d2cef..89e90ec52f28 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c @@ -370,25 +370,25 @@ static int brd_rw_page(struct block_device *bdev, sector_t sector, } #ifdef CONFIG_BLK_DEV_XIP -static int brd_direct_access(struct block_device *bdev, sector_t sector, - void **kaddr, unsigned long *pfn) +static long brd_direct_access(struct block_device *bdev, sector_t sector, + void **kaddr, unsigned long *pfn, long size) { struct brd_device *brd = bdev->bd_disk->private_data; struct page *page; if (!brd) return -ENODEV; - if (sector & (PAGE_SECTORS-1)) - return -EINVAL; - if (sector + PAGE_SECTORS > get_capacity(bdev->bd_disk)) - return -ERANGE; page = brd_insert_page(brd, sector); if (!page) return -ENOSPC; *kaddr = page_address(page); *pfn = page_to_pfn(page); - return 0; + /* + * TODO: If size > PAGE_SIZE, we could look to see if the next page in + * the file happens to be mapped to the next page of physical RAM. + */ + return PAGE_SIZE; } #endif diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index b550c8c8d010..31d6884f3351 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c @@ -28,8 +28,8 @@ static int dcssblk_open(struct block_device *bdev, fmode_t mode); static void dcssblk_release(struct gendisk *disk, fmode_t mode); static void dcssblk_make_request(struct request_queue *q, struct bio *bio); -static int dcssblk_direct_access(struct block_device *bdev, sector_t secnum, - void **kaddr, unsigned long *pfn); +static long dcssblk_direct_access(struct block_device *bdev, sector_t secnum, + void **kaddr, unsigned long *pfn, long size); static char dcssblk_segments[DCSSBLK_PARM_LEN] = "\0"; @@ -866,25 +866,22 @@ fail: bio_io_error(bio); } -static int +static long dcssblk_direct_access (struct block_device *bdev, sector_t secnum, - void **kaddr, unsigned long *pfn) + void **kaddr, unsigned long *pfn, long size) { struct dcssblk_dev_info *dev_info; - unsigned long pgoff; + unsigned long offset, dev_sz; dev_info = bdev->bd_disk->private_data; if (!dev_info) return -ENODEV; - if (secnum % (PAGE_SIZE/512)) - return -EINVAL; - pgoff = secnum / (PAGE_SIZE / 512); - if ((pgoff+1)*PAGE_SIZE-1 > dev_info->end - dev_info->start) - return -ERANGE; - *kaddr = (void *) (dev_info->start+pgoff*PAGE_SIZE); + dev_sz = dev_info->end - dev_info->start; + offset = secnum * 512; + *kaddr = (void *) (dev_info->start + offset); *pfn = virt_to_phys(*kaddr) >> PAGE_SHIFT; - return 0; + return dev_sz - offset; } static void diff --git a/fs/block_dev.c b/fs/block_dev.c index b48c41bf0f86..f314c2c0567d 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -429,6 +429,46 @@ int bdev_write_page(struct block_device *bdev, sector_t sector, } EXPORT_SYMBOL_GPL(bdev_write_page); +/** + * bdev_direct_access() - Get the address for directly-accessibly memory + * @bdev: The device containing the memory + * @sector: The offset within the device + * @addr: Where to put the address of the memory + * @pfn: The Page Frame Number for the memory + * @size: The number of bytes requested + * + * If a block device is made up of directly addressable memory, this function + * will tell the caller the PFN and the address of the memory. The address + * may be directly dereferenced within the kernel without the need to call + * ioremap(), kmap() or similar. The PFN is suitable for inserting into + * page tables. + * + * Return: negative errno if an error occurs, otherwise the number of bytes + * accessible at this address. + */ +long bdev_direct_access(struct block_device *bdev, sector_t sector, + void **addr, unsigned long *pfn, long size) +{ + long avail; + const struct block_device_operations *ops = bdev->bd_disk->fops; + + if (size < 0) + return size; + if (!ops->direct_access) + return -EOPNOTSUPP; + if ((sector + DIV_ROUND_UP(size, 512)) > + part_nr_sects_read(bdev->bd_part)) + return -ERANGE; + sector += get_start_sect(bdev); + if (sector % (PAGE_SIZE / 512)) + return -EINVAL; + avail = ops->direct_access(bdev, sector, addr, pfn, size); + if (!avail) + return -ERANGE; + return min(avail, size); +} +EXPORT_SYMBOL_GPL(bdev_direct_access); + /* * pseudo-fs */ diff --git a/fs/ext2/xip.c b/fs/ext2/xip.c index e98171a11cfe..bbc5fec6ff7f 100644 --- a/fs/ext2/xip.c +++ b/fs/ext2/xip.c @@ -13,18 +13,12 @@ #include "ext2.h" #include "xip.h" -static inline int -__inode_direct_access(struct inode *inode, sector_t block, - void **kaddr, unsigned long *pfn) +static inline long __inode_direct_access(struct inode *inode, sector_t block, + void **kaddr, unsigned long *pfn, long size) { struct block_device *bdev = inode->i_sb->s_bdev; - const struct block_device_operations *ops = bdev->bd_disk->fops; - sector_t sector; - - sector = block * (PAGE_SIZE / 512); /* ext2 block to bdev sector */ - - BUG_ON(!ops->direct_access); - return ops->direct_access(bdev, sector, kaddr, pfn); + sector_t sector = block * (PAGE_SIZE / 512); + return bdev_direct_access(bdev, sector, kaddr, pfn, size); } static inline int @@ -53,12 +47,13 @@ ext2_clear_xip_target(struct inode *inode, sector_t block) { void *kaddr; unsigned long pfn; - int rc; + long size; - rc = __inode_direct_access(inode, block, &kaddr, &pfn); - if (!rc) - clear_page(kaddr); - return rc; + size = __inode_direct_access(inode, block, &kaddr, &pfn, PAGE_SIZE); + if (size < 0) + return size; + clear_page(kaddr); + return 0; } void ext2_xip_verify_sb(struct super_block *sb) @@ -77,7 +72,7 @@ void ext2_xip_verify_sb(struct super_block *sb) int ext2_get_xip_mem(struct address_space *mapping, pgoff_t pgoff, int create, void **kmem, unsigned long *pfn) { - int rc; + long rc; sector_t block; /* first, retrieve the sector number */ @@ -86,6 +81,6 @@ int ext2_get_xip_mem(struct address_space *mapping, pgoff_t pgoff, int create, return rc; /* retrieve address of the target data */ - rc = __inode_direct_access(mapping->host, block, kmem, pfn); - return rc; + rc = __inode_direct_access(mapping->host, block, kmem, pfn, PAGE_SIZE); + return (rc < 0) ? rc : 0; } diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 92f4b4b288dd..e9086be6d9a0 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -1601,8 +1601,8 @@ struct block_device_operations { int (*rw_page)(struct block_device *, sector_t, struct page *, int rw); int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); - int (*direct_access) (struct block_device *, sector_t, - void **, unsigned long *); + long (*direct_access)(struct block_device *, sector_t, + void **, unsigned long *pfn, long size); unsigned int (*check_events) (struct gendisk *disk, unsigned int clearing); /* ->media_changed() is DEPRECATED, use ->check_events() instead */ @@ -1620,6 +1620,8 @@ extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int, extern int bdev_read_page(struct block_device *, sector_t, struct page *); extern int bdev_write_page(struct block_device *, sector_t, struct page *, struct writeback_control *); +extern long bdev_direct_access(struct block_device *, sector_t, void **addr, + unsigned long *pfn, long size); #else /* CONFIG_BLOCK */ struct block_device; -- cgit v1.2.3 From 381cf6587f8a8a8e981bc0c1aaaa8859b51dc756 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Fri, 2 Jan 2015 18:45:16 +0100 Subject: btrfs: fix leak of path in btrfs_find_item If btrfs_find_item is called with NULL path it allocates one locally but does not free it. Affected paths are inserting an orphan item for a file and for a subvol root. Move the path allocation to the callers. CC: # 3.14+ Fixes: 3f870c289900 ("btrfs: expand btrfs_find_item() to include find_orphan_item functionality") Signed-off-by: David Sterba --- fs/btrfs/ctree.c | 17 ++++------------- fs/btrfs/disk-io.c | 9 ++++++++- fs/btrfs/tree-log.c | 11 ++++++++++- 3 files changed, 22 insertions(+), 15 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 14a72ed14ef7..f54511dd287e 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -2609,32 +2609,23 @@ static int key_search(struct extent_buffer *b, struct btrfs_key *key, return 0; } -int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *found_path, +int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path, u64 iobjectid, u64 ioff, u8 key_type, struct btrfs_key *found_key) { int ret; struct btrfs_key key; struct extent_buffer *eb; - struct btrfs_path *path; + + ASSERT(path); key.type = key_type; key.objectid = iobjectid; key.offset = ioff; - if (found_path == NULL) { - path = btrfs_alloc_path(); - if (!path) - return -ENOMEM; - } else - path = found_path; - ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0); - if ((ret < 0) || (found_key == NULL)) { - if (path != found_path) - btrfs_free_path(path); + if ((ret < 0) || (found_key == NULL)) return ret; - } eb = path->nodes[0]; if (ret && path->slots[0] >= btrfs_header_nritems(eb)) { diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 8c63419a7f70..6182e5493d0f 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1630,6 +1630,7 @@ struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info, bool check_ref) { struct btrfs_root *root; + struct btrfs_path *path; int ret; if (location->objectid == BTRFS_ROOT_TREE_OBJECTID) @@ -1669,8 +1670,14 @@ again: if (ret) goto fail; - ret = btrfs_find_item(fs_info->tree_root, NULL, BTRFS_ORPHAN_OBJECTID, + path = btrfs_alloc_path(); + if (!path) { + ret = -ENOMEM; + goto fail; + } + ret = btrfs_find_item(fs_info->tree_root, path, BTRFS_ORPHAN_OBJECTID, location->objectid, BTRFS_ORPHAN_ITEM_KEY, NULL); + btrfs_free_path(path); if (ret < 0) goto fail; if (ret == 0) diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 9a02da16f2be..5be45c12dd71 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -1257,10 +1257,19 @@ static int insert_orphan_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 offset) { int ret; - ret = btrfs_find_item(root, NULL, BTRFS_ORPHAN_OBJECTID, + struct btrfs_path *path; + + path = btrfs_alloc_path(); + if (!path) + return -ENOMEM; + + ret = btrfs_find_item(root, path, BTRFS_ORPHAN_OBJECTID, offset, BTRFS_ORPHAN_ITEM_KEY, NULL); if (ret > 0) ret = btrfs_insert_orphan_item(trans, root, offset); + + btrfs_free_path(path); + return ret; } -- cgit v1.2.3 From 14692cc150d3ce10ea8766ccb2a8f483b77b49f0 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Fri, 2 Jan 2015 18:55:46 +0100 Subject: btrfs: cleanup, remove inode_item_info helper It's only a simple wrapper around btrfs_find_item, the locally defined key is not used. Signed-off-by: David Sterba --- fs/btrfs/backref.c | 11 ----------- fs/btrfs/backref.h | 3 --- fs/btrfs/scrub.c | 6 +++++- 3 files changed, 5 insertions(+), 15 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c index 8729cf68d2fe..6fdf82bb03d4 100644 --- a/fs/btrfs/backref.c +++ b/fs/btrfs/backref.c @@ -1246,17 +1246,6 @@ int btrfs_check_shared(struct btrfs_trans_handle *trans, return ret; } -/* - * this makes the path point to (inum INODE_ITEM ioff) - */ -int inode_item_info(u64 inum, u64 ioff, struct btrfs_root *fs_root, - struct btrfs_path *path) -{ - struct btrfs_key key; - return btrfs_find_item(fs_root, path, inum, ioff, - BTRFS_INODE_ITEM_KEY, &key); -} - static int inode_ref_info(u64 inum, u64 ioff, struct btrfs_root *fs_root, struct btrfs_path *path, struct btrfs_key *found_key) diff --git a/fs/btrfs/backref.h b/fs/btrfs/backref.h index 2a1ac6bfc724..9c41fbac3009 100644 --- a/fs/btrfs/backref.h +++ b/fs/btrfs/backref.h @@ -32,9 +32,6 @@ struct inode_fs_paths { typedef int (iterate_extent_inodes_t)(u64 inum, u64 offset, u64 root, void *ctx); -int inode_item_info(u64 inum, u64 ioff, struct btrfs_root *fs_root, - struct btrfs_path *path); - int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical, struct btrfs_path *path, struct btrfs_key *found_key, u64 *flags); diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 9e1569ffbf6e..4846f66391a4 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -530,7 +530,11 @@ static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root, goto err; } - ret = inode_item_info(inum, 0, local_root, swarn->path); + /* + * this makes the path point to (inum INODE_ITEM ioff) + */ + ret = btrfs_find_item(local_root, swarn->path, inum, 0, + BTRFS_INODE_ITEM_KEY, NULL); if (ret) { btrfs_release_path(swarn->path); goto err; -- cgit v1.2.3 From c234a24de91837db6f00aeebe28e3daddc53c1b7 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Fri, 2 Jan 2015 19:03:17 +0100 Subject: btrfs: cleanup, remove inode_ref_info helper A simple wrapper around btrfs_find_item. Signed-off-by: David Sterba --- fs/btrfs/backref.c | 17 ++++++----------- 1 file changed, 6 insertions(+), 11 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c index 6fdf82bb03d4..f55721ff9385 100644 --- a/fs/btrfs/backref.c +++ b/fs/btrfs/backref.c @@ -1246,14 +1246,6 @@ int btrfs_check_shared(struct btrfs_trans_handle *trans, return ret; } -static int inode_ref_info(u64 inum, u64 ioff, struct btrfs_root *fs_root, - struct btrfs_path *path, - struct btrfs_key *found_key) -{ - return btrfs_find_item(fs_root, path, inum, ioff, - BTRFS_INODE_REF_KEY, found_key); -} - int btrfs_find_one_extref(struct btrfs_root *root, u64 inode_objectid, u64 start_off, struct btrfs_path *path, struct btrfs_inode_extref **ret_extref, @@ -1363,7 +1355,8 @@ char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path, btrfs_tree_read_unlock_blocking(eb); free_extent_buffer(eb); } - ret = inode_ref_info(parent, 0, fs_root, path, &found_key); + ret = btrfs_find_item(fs_root, path, parent, 0, + BTRFS_INODE_REF_KEY, &found_key); if (ret > 0) ret = -ENOENT; if (ret) @@ -1716,8 +1709,10 @@ static int iterate_inode_refs(u64 inum, struct btrfs_root *fs_root, struct btrfs_key found_key; while (!ret) { - ret = inode_ref_info(inum, parent ? parent+1 : 0, fs_root, path, - &found_key); + ret = btrfs_find_item(fs_root, path, inum, + parent ? parent + 1 : 0, BTRFS_INODE_REF_KEY, + &found_key); + if (ret < 0) break; if (ret) { -- cgit v1.2.3 From 9c4f61f01d269815bb7c37be3ede59c5587747c6 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Fri, 2 Jan 2015 19:12:57 +0100 Subject: btrfs: simplify insert_orphan_item We can search and add the orphan item in one go, btrfs_insert_orphan_item will find out if the item already exists. Signed-off-by: David Sterba --- fs/btrfs/tree-log.c | 16 ++++------------ 1 file changed, 4 insertions(+), 12 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 5be45c12dd71..25a1c363a5f4 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -1254,21 +1254,13 @@ out: } static int insert_orphan_item(struct btrfs_trans_handle *trans, - struct btrfs_root *root, u64 offset) + struct btrfs_root *root, u64 ino) { int ret; - struct btrfs_path *path; - - path = btrfs_alloc_path(); - if (!path) - return -ENOMEM; - ret = btrfs_find_item(root, path, BTRFS_ORPHAN_OBJECTID, - offset, BTRFS_ORPHAN_ITEM_KEY, NULL); - if (ret > 0) - ret = btrfs_insert_orphan_item(trans, root, offset); - - btrfs_free_path(path); + ret = btrfs_insert_orphan_item(trans, root, ino); + if (ret == -EEXIST) + ret = 0; return ret; } -- cgit v1.2.3 From 1d4c08e0a60be356134d0c466744d0d4e16ebab0 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Fri, 2 Jan 2015 19:36:14 +0100 Subject: btrfs: expand btrfs_find_item if found_key is NULL If the found_key is NULL, then btrfs_find_item becomes a verbose wrapper for simple btrfs_search_slot. After we've removed all such callers, passing a NULL key is not valid anymore. Signed-off-by: David Sterba --- fs/btrfs/ctree.c | 3 ++- fs/btrfs/disk-io.c | 8 ++++++-- fs/btrfs/inode.c | 10 +++++++--- fs/btrfs/scrub.c | 8 ++++++-- 4 files changed, 21 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index f54511dd287e..20d1f2b0403d 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -2618,13 +2618,14 @@ int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path, struct extent_buffer *eb; ASSERT(path); + ASSERT(found_key); key.type = key_type; key.objectid = iobjectid; key.offset = ioff; ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0); - if ((ret < 0) || (found_key == NULL)) + if (ret < 0) return ret; eb = path->nodes[0]; diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 6182e5493d0f..8d486603e8a3 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1631,6 +1631,7 @@ struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info, { struct btrfs_root *root; struct btrfs_path *path; + struct btrfs_key key; int ret; if (location->objectid == BTRFS_ROOT_TREE_OBJECTID) @@ -1675,8 +1676,11 @@ again: ret = -ENOMEM; goto fail; } - ret = btrfs_find_item(fs_info->tree_root, path, BTRFS_ORPHAN_OBJECTID, - location->objectid, BTRFS_ORPHAN_ITEM_KEY, NULL); + key.objectid = BTRFS_ORPHAN_OBJECTID; + key.type = BTRFS_ORPHAN_ITEM_KEY; + key.offset = location->objectid; + + ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0); btrfs_free_path(path); if (ret < 0) goto fail; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 8bf326affb94..370f23416080 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -5009,6 +5009,7 @@ static int fixup_tree_root_location(struct btrfs_root *root, struct btrfs_root *new_root; struct btrfs_root_ref *ref; struct extent_buffer *leaf; + struct btrfs_key key; int ret; int err = 0; @@ -5019,9 +5020,12 @@ static int fixup_tree_root_location(struct btrfs_root *root, } err = -ENOENT; - ret = btrfs_find_item(root->fs_info->tree_root, path, - BTRFS_I(dir)->root->root_key.objectid, - location->objectid, BTRFS_ROOT_REF_KEY, NULL); + key.objectid = BTRFS_I(dir)->root->root_key.objectid; + key.type = BTRFS_ROOT_REF_KEY; + key.offset = location->objectid; + + ret = btrfs_search_slot(NULL, root->fs_info->tree_root, &key, path, + 0, 0); if (ret) { if (ret < 0) err = ret; diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 4846f66391a4..53575a45f7d1 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -520,6 +520,7 @@ static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root, struct inode_fs_paths *ipath = NULL; struct btrfs_root *local_root; struct btrfs_key root_key; + struct btrfs_key key; root_key.objectid = root; root_key.type = BTRFS_ROOT_ITEM_KEY; @@ -533,8 +534,11 @@ static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root, /* * this makes the path point to (inum INODE_ITEM ioff) */ - ret = btrfs_find_item(local_root, swarn->path, inum, 0, - BTRFS_INODE_ITEM_KEY, NULL); + key.objectid = inum; + key.type = BTRFS_INODE_ITEM_KEY; + key.offset = 0; + + ret = btrfs_search_slot(NULL, local_root, &key, swarn->path, 0, 0); if (ret) { btrfs_release_path(swarn->path); goto err; -- cgit v1.2.3 From 4cb7208a4105a74581ccd4541af75cfd772e99fb Mon Sep 17 00:00:00 2001 From: Rickard Strandqvist Date: Sat, 10 Jan 2015 18:02:42 +0100 Subject: lockd: xdr: Remove unused function Remove the function nlm_encode_fh() that is not used anywhere. This was partially found by using a static code analysis program called cppcheck. Signed-off-by: Rickard Strandqvist Signed-off-by: J. Bruce Fields --- fs/lockd/xdr.c | 8 -------- 1 file changed, 8 deletions(-) (limited to 'fs') diff --git a/fs/lockd/xdr.c b/fs/lockd/xdr.c index 9340e7e10ef6..5b651daad518 100644 --- a/fs/lockd/xdr.c +++ b/fs/lockd/xdr.c @@ -95,14 +95,6 @@ nlm_decode_fh(__be32 *p, struct nfs_fh *f) return p + XDR_QUADLEN(NFS2_FHSIZE); } -static inline __be32 * -nlm_encode_fh(__be32 *p, struct nfs_fh *f) -{ - *p++ = htonl(NFS2_FHSIZE); - memcpy(p, f->data, NFS2_FHSIZE); - return p + XDR_QUADLEN(NFS2_FHSIZE); -} - /* * Encode and decode owner handle */ -- cgit v1.2.3 From 917937025a955e239e5cdcc62b6ca9a5ef9e5e48 Mon Sep 17 00:00:00 2001 From: Rickard Strandqvist Date: Tue, 13 Jan 2015 21:57:24 +0100 Subject: nfsd: nfs4state: Remove unused function Remove the function renew_client() that is not used anywhere. This was partially found by using a static code analysis program called cppcheck. Signed-off-by: Rickard Strandqvist Signed-off-by: J. Bruce Fields --- fs/nfsd/nfs4state.c | 10 ---------- 1 file changed, 10 deletions(-) (limited to 'fs') diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 277f8b8529d6..f924f0618cb5 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -150,16 +150,6 @@ renew_client_locked(struct nfs4_client *clp) clp->cl_time = get_seconds(); } -static inline void -renew_client(struct nfs4_client *clp) -{ - struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); - - spin_lock(&nn->client_lock); - renew_client_locked(clp); - spin_unlock(&nn->client_lock); -} - static void put_client_renew_locked(struct nfs4_client *clp) { struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); -- cgit v1.2.3 From dbaffde76405012778b8815b7721554b1302038e Mon Sep 17 00:00:00 2001 From: Mark Salyzyn Date: Tue, 6 Jan 2015 11:18:24 -0800 Subject: pstore: Use scnprintf() in pstore_mkfile() No guarantees that the names will not exceed the name buffer with future adjustments. Signed-off-by: Mark Salyzyn Acked-by: Kees Cook Signed-off-by: Tony Luck --- fs/pstore/inode.c | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) (limited to 'fs') diff --git a/fs/pstore/inode.c b/fs/pstore/inode.c index 50416602774d..d69586f09ffd 100644 --- a/fs/pstore/inode.c +++ b/fs/pstore/inode.c @@ -338,32 +338,35 @@ int pstore_mkfile(enum pstore_type_id type, char *psname, u64 id, int count, switch (type) { case PSTORE_TYPE_DMESG: - sprintf(name, "dmesg-%s-%lld%s", psname, id, - compressed ? ".enc.z" : ""); + scnprintf(name, sizeof(name), "dmesg-%s-%lld%s", + psname, id, compressed ? ".enc.z" : ""); break; case PSTORE_TYPE_CONSOLE: - sprintf(name, "console-%s-%lld", psname, id); + scnprintf(name, sizeof(name), "console-%s-%lld", psname, id); break; case PSTORE_TYPE_FTRACE: - sprintf(name, "ftrace-%s-%lld", psname, id); + scnprintf(name, sizeof(name), "ftrace-%s-%lld", psname, id); break; case PSTORE_TYPE_MCE: - sprintf(name, "mce-%s-%lld", psname, id); + scnprintf(name, sizeof(name), "mce-%s-%lld", psname, id); break; case PSTORE_TYPE_PPC_RTAS: - sprintf(name, "rtas-%s-%lld", psname, id); + scnprintf(name, sizeof(name), "rtas-%s-%lld", psname, id); break; case PSTORE_TYPE_PPC_OF: - sprintf(name, "powerpc-ofw-%s-%lld", psname, id); + scnprintf(name, sizeof(name), "powerpc-ofw-%s-%lld", + psname, id); break; case PSTORE_TYPE_PPC_COMMON: - sprintf(name, "powerpc-common-%s-%lld", psname, id); + scnprintf(name, sizeof(name), "powerpc-common-%s-%lld", + psname, id); break; case PSTORE_TYPE_UNKNOWN: - sprintf(name, "unknown-%s-%lld", psname, id); + scnprintf(name, sizeof(name), "unknown-%s-%lld", psname, id); break; default: - sprintf(name, "type%d-%s-%lld", type, psname, id); + scnprintf(name, sizeof(name), "type%d-%s-%lld", + type, psname, id); break; } -- cgit v1.2.3 From ff6bf6e8024f073ecea7dbf5c6afe6bd3872a569 Mon Sep 17 00:00:00 2001 From: Mark Salyzyn Date: Tue, 13 Jan 2015 14:32:35 -0800 Subject: pstore: Remove superfluous memory size check All previous checks will fail with error if memory size is not sufficient to register a zone, so this legacy check has become redundant. Signed-off-by: Mark Salyzyn Acked-by: Kees Cook Signed-off-by: Tony Luck --- fs/pstore/ram.c | 9 --------- 1 file changed, 9 deletions(-) (limited to 'fs') diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c index 8613e5b35c22..34ed8f860e23 100644 --- a/fs/pstore/ram.c +++ b/fs/pstore/ram.c @@ -474,14 +474,6 @@ static int ramoops_probe(struct platform_device *pdev) if (err) goto fail_init_fprz; - if (!cxt->przs && !cxt->cprz && !cxt->fprz) { - pr_err("memory size too small, minimum is %zu\n", - cxt->console_size + cxt->record_size + - cxt->ftrace_size); - err = -EINVAL; - goto fail_cnt; - } - cxt->pstore.data = cxt; /* * Console can handle any buffer size, so prefer LOG_LINE_MAX. If we @@ -525,7 +517,6 @@ fail_buf: kfree(cxt->pstore.buf); fail_clear: cxt->pstore.bufsize = 0; -fail_cnt: kfree(cxt->fprz); fail_init_fprz: kfree(cxt->cprz); -- cgit v1.2.3 From f44f96528a8cd4134a4f2c1a9d8c785600aa4888 Mon Sep 17 00:00:00 2001 From: Mark Salyzyn Date: Tue, 13 Jan 2015 14:32:50 -0800 Subject: pstore: Handle zero-sized prz in series ramoops_pstore_read fails to return the next in a prz series after first zero-sized entry, not venturing to the next non-zero entry. Signed-off-by: Mark Salyzyn Acked-by: Kees Cook Signed-off-by: Tony Luck --- fs/pstore/ram.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c index 34ed8f860e23..6150e54eed30 100644 --- a/fs/pstore/ram.c +++ b/fs/pstore/ram.c @@ -164,6 +164,12 @@ static int ramoops_read_kmsg_hdr(char *buffer, struct timespec *time, return header_length; } +static bool prz_ok(struct persistent_ram_zone *prz) +{ + return !!prz && !!(persistent_ram_old_size(prz) + + persistent_ram_ecc_string(prz, NULL, 0)); +} + static ssize_t ramoops_pstore_read(u64 *id, enum pstore_type_id *type, int *count, struct timespec *time, char **buf, bool *compressed, @@ -178,13 +184,13 @@ static ssize_t ramoops_pstore_read(u64 *id, enum pstore_type_id *type, prz = ramoops_get_next_prz(cxt->przs, &cxt->dump_read_cnt, cxt->max_dump_cnt, id, type, PSTORE_TYPE_DMESG, 1); - if (!prz) + if (!prz_ok(prz)) prz = ramoops_get_next_prz(&cxt->cprz, &cxt->console_read_cnt, 1, id, type, PSTORE_TYPE_CONSOLE, 0); - if (!prz) + if (!prz_ok(prz)) prz = ramoops_get_next_prz(&cxt->fprz, &cxt->ftrace_read_cnt, 1, id, type, PSTORE_TYPE_FTRACE, 0); - if (!prz) + if (!prz_ok(prz)) return 0; if (!persistent_ram_old(prz)) -- cgit v1.2.3 From 9d5438f462abd6398cdb7b3211bdcec271873a3b Mon Sep 17 00:00:00 2001 From: Mark Salyzyn Date: Fri, 16 Jan 2015 16:01:10 -0800 Subject: pstore: Add pmsg - user-space accessible pstore object A secured user-space accessible pstore object. Writes to /dev/pmsg0 are appended to the buffer, on reboot the persistent contents are available in /sys/fs/pstore/pmsg-ramoops-[ID]. One possible use is syslogd, or other daemon, can write messages, then on reboot provides a means to triage user-space activities leading up to a panic as a companion to the pstore dmesg or console logs. Signed-off-by: Mark Salyzyn Acked-by: Kees Cook Signed-off-by: Tony Luck --- fs/pstore/Kconfig | 10 ++++ fs/pstore/Makefile | 2 + fs/pstore/inode.c | 3 ++ fs/pstore/internal.h | 6 +++ fs/pstore/platform.c | 1 + fs/pstore/pmsg.c | 114 +++++++++++++++++++++++++++++++++++++++++++++ fs/pstore/ram.c | 34 +++++++++++++- include/linux/pstore.h | 1 + include/linux/pstore_ram.h | 1 + 9 files changed, 170 insertions(+), 2 deletions(-) create mode 100644 fs/pstore/pmsg.c (limited to 'fs') diff --git a/fs/pstore/Kconfig b/fs/pstore/Kconfig index 983d9510becc..916b8e23d968 100644 --- a/fs/pstore/Kconfig +++ b/fs/pstore/Kconfig @@ -21,6 +21,16 @@ config PSTORE_CONSOLE When the option is enabled, pstore will log all kernel messages, even if no oops or panic happened. +config PSTORE_PMSG + bool "Log user space messages" + depends on PSTORE + help + When the option is enabled, pstore will export a character + interface /dev/pmsg0 to log user space messages. On reboot + data can be retrieved from /sys/fs/pstore/pmsg-ramoops-[ID]. + + If unsure, say N. + config PSTORE_FTRACE bool "Persistent function tracer" depends on PSTORE diff --git a/fs/pstore/Makefile b/fs/pstore/Makefile index 4c9095c2781e..e647d8e81712 100644 --- a/fs/pstore/Makefile +++ b/fs/pstore/Makefile @@ -7,5 +7,7 @@ obj-y += pstore.o pstore-objs += inode.o platform.o obj-$(CONFIG_PSTORE_FTRACE) += ftrace.o +obj-$(CONFIG_PSTORE_PMSG) += pmsg.o + ramoops-objs += ram.o ram_core.o obj-$(CONFIG_PSTORE_RAM) += ramoops.o diff --git a/fs/pstore/inode.c b/fs/pstore/inode.c index d69586f09ffd..b32ce53d24ee 100644 --- a/fs/pstore/inode.c +++ b/fs/pstore/inode.c @@ -361,6 +361,9 @@ int pstore_mkfile(enum pstore_type_id type, char *psname, u64 id, int count, scnprintf(name, sizeof(name), "powerpc-common-%s-%lld", psname, id); break; + case PSTORE_TYPE_PMSG: + scnprintf(name, sizeof(name), "pmsg-%s-%lld", psname, id); + break; case PSTORE_TYPE_UNKNOWN: scnprintf(name, sizeof(name), "unknown-%s-%lld", psname, id); break; diff --git a/fs/pstore/internal.h b/fs/pstore/internal.h index 3b3d305277c4..c36ba2cd0b5d 100644 --- a/fs/pstore/internal.h +++ b/fs/pstore/internal.h @@ -45,6 +45,12 @@ extern void pstore_register_ftrace(void); static inline void pstore_register_ftrace(void) {} #endif +#ifdef CONFIG_PSTORE_PMSG +extern void pstore_register_pmsg(void); +#else +static inline void pstore_register_pmsg(void) {} +#endif + extern struct pstore_info *psinfo; extern void pstore_set_kmsg_bytes(int); diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c index 0a9b72cdfeca..15ee78c5020b 100644 --- a/fs/pstore/platform.c +++ b/fs/pstore/platform.c @@ -447,6 +447,7 @@ int pstore_register(struct pstore_info *psi) if ((psi->flags & PSTORE_FLAGS_FRAGILE) == 0) { pstore_register_console(); pstore_register_ftrace(); + pstore_register_pmsg(); } if (pstore_update_ms >= 0) { diff --git a/fs/pstore/pmsg.c b/fs/pstore/pmsg.c new file mode 100644 index 000000000000..feb5dd2948b4 --- /dev/null +++ b/fs/pstore/pmsg.c @@ -0,0 +1,114 @@ +/* + * Copyright 2014 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include "internal.h" + +static DEFINE_MUTEX(pmsg_lock); +#define PMSG_MAX_BOUNCE_BUFFER_SIZE (2*PAGE_SIZE) + +static ssize_t write_pmsg(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + size_t i, buffer_size; + char *buffer; + + if (!count) + return 0; + + if (!access_ok(VERIFY_READ, buf, count)) + return -EFAULT; + + buffer_size = count; + if (buffer_size > PMSG_MAX_BOUNCE_BUFFER_SIZE) + buffer_size = PMSG_MAX_BOUNCE_BUFFER_SIZE; + buffer = vmalloc(buffer_size); + + mutex_lock(&pmsg_lock); + for (i = 0; i < count; ) { + size_t c = min(count - i, buffer_size); + u64 id; + long ret; + + ret = __copy_from_user(buffer, buf + i, c); + if (unlikely(ret != 0)) { + mutex_unlock(&pmsg_lock); + vfree(buffer); + return -EFAULT; + } + psinfo->write_buf(PSTORE_TYPE_PMSG, 0, &id, 0, buffer, 0, c, + psinfo); + + i += c; + } + + mutex_unlock(&pmsg_lock); + vfree(buffer); + return count; +} + +static const struct file_operations pmsg_fops = { + .owner = THIS_MODULE, + .llseek = noop_llseek, + .write = write_pmsg, +}; + +static struct class *pmsg_class; +static int pmsg_major; +#define PMSG_NAME "pmsg" +#undef pr_fmt +#define pr_fmt(fmt) PMSG_NAME ": " fmt + +static char *pmsg_devnode(struct device *dev, umode_t *mode) +{ + if (mode) + *mode = 0220; + return NULL; +} + +void pstore_register_pmsg(void) +{ + struct device *pmsg_device; + + pmsg_major = register_chrdev(0, PMSG_NAME, &pmsg_fops); + if (pmsg_major < 0) { + pr_err("register_chrdev failed\n"); + goto err; + } + + pmsg_class = class_create(THIS_MODULE, PMSG_NAME); + if (IS_ERR(pmsg_class)) { + pr_err("device class file already in use\n"); + goto err_class; + } + pmsg_class->devnode = pmsg_devnode; + + pmsg_device = device_create(pmsg_class, NULL, MKDEV(pmsg_major, 0), + NULL, "%s%d", PMSG_NAME, 0); + if (IS_ERR(pmsg_device)) { + pr_err("failed to create device\n"); + goto err_device; + } + return; + +err_device: + class_destroy(pmsg_class); +err_class: + unregister_chrdev(pmsg_major, PMSG_NAME); +err: + return; +} diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c index 6150e54eed30..39d1373128e9 100644 --- a/fs/pstore/ram.c +++ b/fs/pstore/ram.c @@ -51,6 +51,10 @@ static ulong ramoops_ftrace_size = MIN_MEM_SIZE; module_param_named(ftrace_size, ramoops_ftrace_size, ulong, 0400); MODULE_PARM_DESC(ftrace_size, "size of ftrace log"); +static ulong ramoops_pmsg_size = MIN_MEM_SIZE; +module_param_named(pmsg_size, ramoops_pmsg_size, ulong, 0400); +MODULE_PARM_DESC(pmsg_size, "size of user space message log"); + static ulong mem_address; module_param(mem_address, ulong, 0400); MODULE_PARM_DESC(mem_address, @@ -82,12 +86,14 @@ struct ramoops_context { struct persistent_ram_zone **przs; struct persistent_ram_zone *cprz; struct persistent_ram_zone *fprz; + struct persistent_ram_zone *mprz; phys_addr_t phys_addr; unsigned long size; unsigned int memtype; size_t record_size; size_t console_size; size_t ftrace_size; + size_t pmsg_size; int dump_oops; struct persistent_ram_ecc_info ecc_info; unsigned int max_dump_cnt; @@ -96,6 +102,7 @@ struct ramoops_context { unsigned int dump_read_cnt; unsigned int console_read_cnt; unsigned int ftrace_read_cnt; + unsigned int pmsg_read_cnt; struct pstore_info pstore; }; @@ -109,6 +116,7 @@ static int ramoops_pstore_open(struct pstore_info *psi) cxt->dump_read_cnt = 0; cxt->console_read_cnt = 0; cxt->ftrace_read_cnt = 0; + cxt->pmsg_read_cnt = 0; return 0; } @@ -190,6 +198,9 @@ static ssize_t ramoops_pstore_read(u64 *id, enum pstore_type_id *type, if (!prz_ok(prz)) prz = ramoops_get_next_prz(&cxt->fprz, &cxt->ftrace_read_cnt, 1, id, type, PSTORE_TYPE_FTRACE, 0); + if (!prz_ok(prz)) + prz = ramoops_get_next_prz(&cxt->mprz, &cxt->pmsg_read_cnt, + 1, id, type, PSTORE_TYPE_PMSG, 0); if (!prz_ok(prz)) return 0; @@ -258,6 +269,11 @@ static int notrace ramoops_pstore_write_buf(enum pstore_type_id type, return -ENOMEM; persistent_ram_write(cxt->fprz, buf, size); return 0; + } else if (type == PSTORE_TYPE_PMSG) { + if (!cxt->mprz) + return -ENOMEM; + persistent_ram_write(cxt->mprz, buf, size); + return 0; } if (type != PSTORE_TYPE_DMESG) @@ -315,6 +331,9 @@ static int ramoops_pstore_erase(enum pstore_type_id type, u64 id, int count, case PSTORE_TYPE_FTRACE: prz = cxt->fprz; break; + case PSTORE_TYPE_PMSG: + prz = cxt->mprz; + break; default: return -EINVAL; } @@ -441,7 +460,7 @@ static int ramoops_probe(struct platform_device *pdev) goto fail_out; if (!pdata->mem_size || (!pdata->record_size && !pdata->console_size && - !pdata->ftrace_size)) { + !pdata->ftrace_size && !pdata->pmsg_size)) { pr_err("The memory size and the record/console size must be " "non-zero\n"); goto fail_out; @@ -453,6 +472,8 @@ static int ramoops_probe(struct platform_device *pdev) pdata->console_size = rounddown_pow_of_two(pdata->console_size); if (pdata->ftrace_size && !is_power_of_2(pdata->ftrace_size)) pdata->ftrace_size = rounddown_pow_of_two(pdata->ftrace_size); + if (pdata->pmsg_size && !is_power_of_2(pdata->pmsg_size)) + pdata->pmsg_size = rounddown_pow_of_two(pdata->pmsg_size); cxt->size = pdata->mem_size; cxt->phys_addr = pdata->mem_address; @@ -460,12 +481,14 @@ static int ramoops_probe(struct platform_device *pdev) cxt->record_size = pdata->record_size; cxt->console_size = pdata->console_size; cxt->ftrace_size = pdata->ftrace_size; + cxt->pmsg_size = pdata->pmsg_size; cxt->dump_oops = pdata->dump_oops; cxt->ecc_info = pdata->ecc_info; paddr = cxt->phys_addr; - dump_mem_sz = cxt->size - cxt->console_size - cxt->ftrace_size; + dump_mem_sz = cxt->size - cxt->console_size - cxt->ftrace_size + - cxt->pmsg_size; err = ramoops_init_przs(dev, cxt, &paddr, dump_mem_sz); if (err) goto fail_out; @@ -480,6 +503,10 @@ static int ramoops_probe(struct platform_device *pdev) if (err) goto fail_init_fprz; + err = ramoops_init_prz(dev, cxt, &cxt->mprz, &paddr, cxt->pmsg_size, 0); + if (err) + goto fail_init_mprz; + cxt->pstore.data = cxt; /* * Console can handle any buffer size, so prefer LOG_LINE_MAX. If we @@ -523,6 +550,8 @@ fail_buf: kfree(cxt->pstore.buf); fail_clear: cxt->pstore.bufsize = 0; + kfree(cxt->mprz); +fail_init_mprz: kfree(cxt->fprz); fail_init_fprz: kfree(cxt->cprz); @@ -580,6 +609,7 @@ static void ramoops_register_dummy(void) dummy_data->record_size = record_size; dummy_data->console_size = ramoops_console_size; dummy_data->ftrace_size = ramoops_ftrace_size; + dummy_data->pmsg_size = ramoops_pmsg_size; dummy_data->dump_oops = dump_oops; /* * For backwards compatibility ramoops.ecc=1 means 16 bytes ECC diff --git a/include/linux/pstore.h b/include/linux/pstore.h index ece0c6bbfcc5..8884f6e507f7 100644 --- a/include/linux/pstore.h +++ b/include/linux/pstore.h @@ -39,6 +39,7 @@ enum pstore_type_id { PSTORE_TYPE_PPC_RTAS = 4, PSTORE_TYPE_PPC_OF = 5, PSTORE_TYPE_PPC_COMMON = 6, + PSTORE_TYPE_PMSG = 7, PSTORE_TYPE_UNKNOWN = 255 }; diff --git a/include/linux/pstore_ram.h b/include/linux/pstore_ram.h index 4af3fdc85b01..9c9d6c154c8e 100644 --- a/include/linux/pstore_ram.h +++ b/include/linux/pstore_ram.h @@ -81,6 +81,7 @@ struct ramoops_platform_data { unsigned long record_size; unsigned long console_size; unsigned long ftrace_size; + unsigned long pmsg_size; int dump_oops; struct persistent_ram_ecc_info ecc_info; }; -- cgit v1.2.3 From a6b8978c54b771308f6f1692b9735ac0bb087cc2 Mon Sep 17 00:00:00 2001 From: alex chen Date: Fri, 16 Jan 2015 15:52:03 -0800 Subject: pstore: Fix sprintf format specifier in pstore_dump() We should use sprintf format specifier "%u" instead of "%d" for argument of type 'unsigned int' in pstore_dump(). Signed-off-by: Alex Chen Reviewed-by: Joseph Qi Acked-by: Kees Cook Signed-off-by: Tony Luck --- fs/pstore/platform.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c index 15ee78c5020b..c4c9a10c5760 100644 --- a/fs/pstore/platform.c +++ b/fs/pstore/platform.c @@ -301,7 +301,7 @@ static void pstore_dump(struct kmsg_dumper *dumper, if (big_oops_buf) { dst = big_oops_buf; - hsize = sprintf(dst, "%s#%d Part%d\n", why, + hsize = sprintf(dst, "%s#%d Part%u\n", why, oopscount, part); size = big_oops_buf_sz - hsize; @@ -321,7 +321,7 @@ static void pstore_dump(struct kmsg_dumper *dumper, } } else { dst = psinfo->buf; - hsize = sprintf(dst, "%s#%d Part%d\n", why, oopscount, + hsize = sprintf(dst, "%s#%d Part%u\n", why, oopscount, part); size = psinfo->bufsize - hsize; dst += hsize; -- cgit v1.2.3 From 053c095a82cf773075e83d7233b5cc19a1f73ece Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Fri, 16 Jan 2015 22:09:00 +0100 Subject: netlink: make nlmsg_end() and genlmsg_end() void Contrary to common expectations for an "int" return, these functions return only a positive value -- if used correctly they cannot even return 0 because the message header will necessarily be in the skb. This makes the very common pattern of if (genlmsg_end(...) < 0) { ... } be a whole bunch of dead code. Many places also simply do return nlmsg_end(...); and the caller is expected to deal with it. This also commonly (at least for me) causes errors, because it is very common to write if (my_function(...)) /* error condition */ and if my_function() does "return nlmsg_end()" this is of course wrong. Additionally, there's not a single place in the kernel that actually needs the message length returned, and if anyone needs it later then it'll be very easy to just use skb->len there. Remove this, and make the functions void. This removes a bunch of dead code as described above. The patch adds lines because I did - return nlmsg_end(...); + nlmsg_end(...); + return 0; I could have preserved all the function's return values by returning skb->len, but instead I've audited all the places calling the affected functions and found that none cared. A few places actually compared the return value with <= 0 in dump functionality, but that could just be changed to < 0 with no change in behaviour, so I opted for the more efficient version. One instance of the error I've made numerous times now is also present in net/phonet/pn_netlink.c in the route_dumpit() function - it didn't check for <0 or <=0 and thus broke out of the loop every single time. I've preserved this since it will (I think) have caused the messages to userspace to be formatted differently with just a single message for every SKB returned to userspace. It's possible that this isn't needed for the tools that actually use this, but I don't even know what they are so couldn't test that changing this behaviour would be acceptable. Signed-off-by: Johannes Berg Signed-off-by: David S. Miller --- drivers/acpi/event.c | 7 +------ drivers/net/ethernet/rocker/rocker.c | 3 ++- drivers/net/vxlan.c | 3 ++- drivers/net/wireless/mac80211_hwsim.c | 3 ++- drivers/scsi/pmcraid.c | 8 +------- drivers/target/target_core_user.c | 4 +--- drivers/thermal/thermal_core.c | 6 +----- fs/dlm/netlink.c | 7 +------ include/net/genetlink.h | 4 ++-- include/net/netlink.h | 6 +----- kernel/taskstats.c | 13 ++----------- net/bridge/br_fdb.c | 3 ++- net/bridge/br_mdb.c | 3 ++- net/bridge/br_netlink.c | 3 ++- net/can/gw.c | 3 ++- net/core/fib_rules.c | 3 ++- net/core/neighbour.c | 12 ++++++++---- net/core/rtnetlink.c | 9 ++++++--- net/decnet/dn_dev.c | 3 ++- net/decnet/dn_route.c | 3 ++- net/decnet/dn_table.c | 3 ++- net/ieee802154/netlink.c | 12 ++---------- net/ieee802154/nl-mac.c | 3 ++- net/ieee802154/nl-phy.c | 3 ++- net/ieee802154/nl802154.c | 6 ++++-- net/ipv4/devinet.c | 8 +++++--- net/ipv4/fib_semantics.c | 3 ++- net/ipv4/inet_diag.c | 9 ++++++--- net/ipv4/ipmr.c | 3 ++- net/ipv4/route.c | 3 ++- net/ipv4/tcp_metrics.c | 3 ++- net/ipv6/addrconf.c | 32 +++++++++++++++++++------------- net/ipv6/addrlabel.c | 5 +++-- net/ipv6/ip6_fib.c | 1 - net/ipv6/ip6mr.c | 3 ++- net/ipv6/route.c | 3 ++- net/l2tp/l2tp_netlink.c | 10 ++++++---- net/netfilter/ipvs/ip_vs_ctl.c | 9 ++++++--- net/netfilter/nf_tables_api.c | 18 ++++++++++++------ net/netlabel/netlabel_cipso_v4.c | 3 ++- net/netlabel/netlabel_mgmt.c | 6 ++++-- net/netlabel/netlabel_unlabeled.c | 3 ++- net/netlink/diag.c | 3 ++- net/netlink/genetlink.c | 6 ++++-- net/nfc/netlink.c | 12 +++++++----- net/openvswitch/datapath.c | 9 ++++++--- net/packet/diag.c | 3 ++- net/phonet/pn_netlink.c | 16 +++++++++++----- net/unix/diag.c | 3 ++- net/wireless/nl80211.c | 27 ++++++++++++++++++--------- net/xfrm/xfrm_user.c | 27 ++++++++++++++++++--------- 51 files changed, 203 insertions(+), 158 deletions(-) (limited to 'fs') diff --git a/drivers/acpi/event.c b/drivers/acpi/event.c index ef2d730734dc..e24ea4e796e4 100644 --- a/drivers/acpi/event.c +++ b/drivers/acpi/event.c @@ -100,7 +100,6 @@ int acpi_bus_generate_netlink_event(const char *device_class, struct acpi_genl_event *event; void *msg_header; int size; - int result; /* allocate memory */ size = nla_total_size(sizeof(struct acpi_genl_event)) + @@ -137,11 +136,7 @@ int acpi_bus_generate_netlink_event(const char *device_class, event->data = data; /* send multicast genetlink message */ - result = genlmsg_end(skb, msg_header); - if (result < 0) { - nlmsg_free(skb); - return result; - } + genlmsg_end(skb, msg_header); genlmsg_multicast(&acpi_event_genl_family, skb, 0, 0, GFP_ATOMIC); return 0; diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c index 964d719b150f..d54781e71cd4 100644 --- a/drivers/net/ethernet/rocker/rocker.c +++ b/drivers/net/ethernet/rocker/rocker.c @@ -3674,7 +3674,8 @@ static int rocker_fdb_fill_info(struct sk_buff *skb, if (vid && nla_put_u16(skb, NDA_VLAN, vid)) goto nla_put_failure; - return nlmsg_end(skb, nlh); + nlmsg_end(skb, nlh); + return 0; nla_put_failure: nlmsg_cancel(skb, nlh); diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 6b6b45622a0a..c5f79e7513a6 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -363,7 +363,8 @@ static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan, if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci)) goto nla_put_failure; - return nlmsg_end(skb, nlh); + nlmsg_end(skb, nlh); + return 0; nla_put_failure: nlmsg_cancel(skb, nlh); diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 494e7335aa64..4a4c6586a8d2 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -2557,7 +2557,8 @@ static int mac80211_hwsim_get_radio(struct sk_buff *skb, if (res < 0) goto out_err; - return genlmsg_end(skb, hdr); + genlmsg_end(skb, hdr); + return 0; out_err: genlmsg_cancel(skb, hdr); diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c index 8c27b6a77ec4..cf222f46eac5 100644 --- a/drivers/scsi/pmcraid.c +++ b/drivers/scsi/pmcraid.c @@ -1473,13 +1473,7 @@ static int pmcraid_notify_aen( } /* send genetlink multicast message to notify appplications */ - result = genlmsg_end(skb, msg_header); - - if (result < 0) { - pmcraid_err("genlmsg_end failed\n"); - nlmsg_free(skb); - return result; - } + genlmsg_end(skb, msg_header); result = genlmsg_multicast(&pmcraid_event_family, skb, 0, 0, GFP_ATOMIC); diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index 1157b559683b..1a1bcf71ec9d 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c @@ -784,9 +784,7 @@ static int tcmu_netlink_event(enum tcmu_genl_cmd cmd, const char *name, int mino if (ret < 0) goto free_skb; - ret = genlmsg_end(skb, msg_header); - if (ret < 0) - goto free_skb; + genlmsg_end(skb, msg_header); ret = genlmsg_multicast(&tcmu_genl_family, skb, 0, TCMU_MCGRP_CONFIG, GFP_KERNEL); diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c index 87e0b0782023..48491d1a81d6 100644 --- a/drivers/thermal/thermal_core.c +++ b/drivers/thermal/thermal_core.c @@ -1759,11 +1759,7 @@ int thermal_generate_netlink_event(struct thermal_zone_device *tz, thermal_event->event = event; /* send multicast genetlink message */ - result = genlmsg_end(skb, msg_header); - if (result < 0) { - nlmsg_free(skb); - return result; - } + genlmsg_end(skb, msg_header); result = genlmsg_multicast(&thermal_event_genl_family, skb, 0, 0, GFP_ATOMIC); diff --git a/fs/dlm/netlink.c b/fs/dlm/netlink.c index e7cfbaf8d0e2..1e6e227134d7 100644 --- a/fs/dlm/netlink.c +++ b/fs/dlm/netlink.c @@ -56,13 +56,8 @@ static int send_data(struct sk_buff *skb) { struct genlmsghdr *genlhdr = nlmsg_data((struct nlmsghdr *)skb->data); void *data = genlmsg_data(genlhdr); - int rv; - rv = genlmsg_end(skb, data); - if (rv < 0) { - nlmsg_free(skb); - return rv; - } + genlmsg_end(skb, data); return genlmsg_unicast(&init_net, skb, listener_nlportid); } diff --git a/include/net/genetlink.h b/include/net/genetlink.h index 84125088c309..f24aa83b80b6 100644 --- a/include/net/genetlink.h +++ b/include/net/genetlink.h @@ -245,9 +245,9 @@ static inline void *genlmsg_put_reply(struct sk_buff *skb, * @skb: socket buffer the message is stored in * @hdr: user specific header */ -static inline int genlmsg_end(struct sk_buff *skb, void *hdr) +static inline void genlmsg_end(struct sk_buff *skb, void *hdr) { - return nlmsg_end(skb, hdr - GENL_HDRLEN - NLMSG_HDRLEN); + nlmsg_end(skb, hdr - GENL_HDRLEN - NLMSG_HDRLEN); } /** diff --git a/include/net/netlink.h b/include/net/netlink.h index d5869b90bfbb..e010ee8da41d 100644 --- a/include/net/netlink.h +++ b/include/net/netlink.h @@ -490,14 +490,10 @@ static inline struct sk_buff *nlmsg_new(size_t payload, gfp_t flags) * Corrects the netlink message header to include the appeneded * attributes. Only necessary if attributes have been added to * the message. - * - * Returns the total data length of the skb. */ -static inline int nlmsg_end(struct sk_buff *skb, struct nlmsghdr *nlh) +static inline void nlmsg_end(struct sk_buff *skb, struct nlmsghdr *nlh) { nlh->nlmsg_len = skb_tail_pointer(skb) - (unsigned char *)nlh; - - return skb->len; } /** diff --git a/kernel/taskstats.c b/kernel/taskstats.c index 670fff88a961..21f82c29c914 100644 --- a/kernel/taskstats.c +++ b/kernel/taskstats.c @@ -111,13 +111,8 @@ static int send_reply(struct sk_buff *skb, struct genl_info *info) { struct genlmsghdr *genlhdr = nlmsg_data(nlmsg_hdr(skb)); void *reply = genlmsg_data(genlhdr); - int rc; - rc = genlmsg_end(skb, reply); - if (rc < 0) { - nlmsg_free(skb); - return rc; - } + genlmsg_end(skb, reply); return genlmsg_reply(skb, info); } @@ -134,11 +129,7 @@ static void send_cpu_listeners(struct sk_buff *skb, void *reply = genlmsg_data(genlhdr); int rc, delcount = 0; - rc = genlmsg_end(skb, reply); - if (rc < 0) { - nlmsg_free(skb); - return; - } + genlmsg_end(skb, reply); rc = 0; down_read(&listeners->sem); diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c index 03667e65cc29..08bf04bdac58 100644 --- a/net/bridge/br_fdb.c +++ b/net/bridge/br_fdb.c @@ -633,7 +633,8 @@ static int fdb_fill_info(struct sk_buff *skb, const struct net_bridge *br, if (fdb->vlan_id && nla_put(skb, NDA_VLAN, sizeof(u16), &fdb->vlan_id)) goto nla_put_failure; - return nlmsg_end(skb, nlh); + nlmsg_end(skb, nlh); + return 0; nla_put_failure: nlmsg_cancel(skb, nlh); diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c index fed61c971b17..409608960899 100644 --- a/net/bridge/br_mdb.c +++ b/net/bridge/br_mdb.c @@ -190,7 +190,8 @@ static int nlmsg_populate_mdb_fill(struct sk_buff *skb, nla_nest_end(skb, nest2); nla_nest_end(skb, nest); - return nlmsg_end(skb, nlh); + nlmsg_end(skb, nlh); + return 0; end: nla_nest_end(skb, nest); diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index 163950b10d8c..528cf2790a5f 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c @@ -263,7 +263,8 @@ static int br_fill_ifinfo(struct sk_buff *skb, } done: - return nlmsg_end(skb, nlh); + nlmsg_end(skb, nlh); + return 0; nla_put_failure: nlmsg_cancel(skb, nlh); diff --git a/net/can/gw.c b/net/can/gw.c index 295f62e62eb3..a6f448e18ea8 100644 --- a/net/can/gw.c +++ b/net/can/gw.c @@ -575,7 +575,8 @@ static int cgw_put_job(struct sk_buff *skb, struct cgw_job *gwj, int type, goto cancel; } - return nlmsg_end(skb, nlh); + nlmsg_end(skb, nlh); + return 0; cancel: nlmsg_cancel(skb, nlh); diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c index 185c341fafbd..44706e81b2e0 100644 --- a/net/core/fib_rules.c +++ b/net/core/fib_rules.c @@ -609,7 +609,8 @@ static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule, if (ops->fill(rule, skb, frh) < 0) goto nla_put_failure; - return nlmsg_end(skb, nlh); + nlmsg_end(skb, nlh); + return 0; nla_put_failure: nlmsg_cancel(skb, nlh); diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 8d614c93f86a..d36d564f149f 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -1884,7 +1884,8 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl, goto nla_put_failure; read_unlock_bh(&tbl->lock); - return nlmsg_end(skb, nlh); + nlmsg_end(skb, nlh); + return 0; nla_put_failure: read_unlock_bh(&tbl->lock); @@ -1917,7 +1918,8 @@ static int neightbl_fill_param_info(struct sk_buff *skb, goto errout; read_unlock_bh(&tbl->lock); - return nlmsg_end(skb, nlh); + nlmsg_end(skb, nlh); + return 0; errout: read_unlock_bh(&tbl->lock); nlmsg_cancel(skb, nlh); @@ -2202,7 +2204,8 @@ static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh, nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci)) goto nla_put_failure; - return nlmsg_end(skb, nlh); + nlmsg_end(skb, nlh); + return 0; nla_put_failure: nlmsg_cancel(skb, nlh); @@ -2232,7 +2235,8 @@ static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn, if (nla_put(skb, NDA_DST, tbl->key_len, pn->key)) goto nla_put_failure; - return nlmsg_end(skb, nlh); + nlmsg_end(skb, nlh); + return 0; nla_put_failure: nlmsg_cancel(skb, nlh); diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index eadc5c0e2dfa..e13b9dbdf154 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -1199,7 +1199,8 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, nla_nest_end(skb, af_spec); - return nlmsg_end(skb, nlh); + nlmsg_end(skb, nlh); + return 0; nla_put_failure: nlmsg_cancel(skb, nlh); @@ -2326,7 +2327,8 @@ static int nlmsg_populate_fdb_fill(struct sk_buff *skb, if (nla_put(skb, NDA_LLADDR, ETH_ALEN, addr)) goto nla_put_failure; - return nlmsg_end(skb, nlh); + nlmsg_end(skb, nlh); + return 0; nla_put_failure: nlmsg_cancel(skb, nlh); @@ -2809,7 +2811,8 @@ int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, nla_nest_end(skb, protinfo); - return nlmsg_end(skb, nlh); + nlmsg_end(skb, nlh); + return 0; nla_put_failure: nlmsg_cancel(skb, nlh); return -EMSGSIZE; diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c index 4400da7739da..b2c26b081134 100644 --- a/net/decnet/dn_dev.c +++ b/net/decnet/dn_dev.c @@ -702,7 +702,8 @@ static int dn_nl_fill_ifaddr(struct sk_buff *skb, struct dn_ifaddr *ifa, nla_put_string(skb, IFA_LABEL, ifa->ifa_label)) || nla_put_u32(skb, IFA_FLAGS, ifa_flags)) goto nla_put_failure; - return nlmsg_end(skb, nlh); + nlmsg_end(skb, nlh); + return 0; nla_put_failure: nlmsg_cancel(skb, nlh); diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c index daccc4a36d80..812e5e6e88fb 100644 --- a/net/decnet/dn_route.c +++ b/net/decnet/dn_route.c @@ -1616,7 +1616,8 @@ static int dn_rt_fill_info(struct sk_buff *skb, u32 portid, u32 seq, nla_put_u32(skb, RTA_IIF, rt->fld.flowidn_iif) < 0) goto errout; - return nlmsg_end(skb, nlh); + nlmsg_end(skb, nlh); + return 0; errout: nlmsg_cancel(skb, nlh); diff --git a/net/decnet/dn_table.c b/net/decnet/dn_table.c index 3f19fcbf126d..1540b506e3e0 100644 --- a/net/decnet/dn_table.c +++ b/net/decnet/dn_table.c @@ -367,7 +367,8 @@ static int dn_fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event, nla_nest_end(skb, mp_head); } - return nlmsg_end(skb, nlh); + nlmsg_end(skb, nlh); + return 0; errout: nlmsg_cancel(skb, nlh); diff --git a/net/ieee802154/netlink.c b/net/ieee802154/netlink.c index fa1464762d0d..c8133c07ceee 100644 --- a/net/ieee802154/netlink.c +++ b/net/ieee802154/netlink.c @@ -63,13 +63,9 @@ int ieee802154_nl_mcast(struct sk_buff *msg, unsigned int group) struct nlmsghdr *nlh = nlmsg_hdr(msg); void *hdr = genlmsg_data(nlmsg_data(nlh)); - if (genlmsg_end(msg, hdr) < 0) - goto out; + genlmsg_end(msg, hdr); return genlmsg_multicast(&nl802154_family, msg, 0, group, GFP_ATOMIC); -out: - nlmsg_free(msg); - return -ENOBUFS; } struct sk_buff *ieee802154_nl_new_reply(struct genl_info *info, @@ -96,13 +92,9 @@ int ieee802154_nl_reply(struct sk_buff *msg, struct genl_info *info) struct nlmsghdr *nlh = nlmsg_hdr(msg); void *hdr = genlmsg_data(nlmsg_data(nlh)); - if (genlmsg_end(msg, hdr) < 0) - goto out; + genlmsg_end(msg, hdr); return genlmsg_reply(msg, info); -out: - nlmsg_free(msg); - return -ENOBUFS; } static const struct genl_ops ieee8021154_ops[] = { diff --git a/net/ieee802154/nl-mac.c b/net/ieee802154/nl-mac.c index 3c902e9516fb..9105265920fe 100644 --- a/net/ieee802154/nl-mac.c +++ b/net/ieee802154/nl-mac.c @@ -136,7 +136,8 @@ static int ieee802154_nl_fill_iface(struct sk_buff *msg, u32 portid, } wpan_phy_put(phy); - return genlmsg_end(msg, hdr); + genlmsg_end(msg, hdr); + return 0; nla_put_failure: wpan_phy_put(phy); diff --git a/net/ieee802154/nl-phy.c b/net/ieee802154/nl-phy.c index 7baf98b14611..1b9d25f6e898 100644 --- a/net/ieee802154/nl-phy.c +++ b/net/ieee802154/nl-phy.c @@ -65,7 +65,8 @@ static int ieee802154_nl_fill_phy(struct sk_buff *msg, u32 portid, goto nla_put_failure; mutex_unlock(&phy->pib_lock); kfree(buf); - return genlmsg_end(msg, hdr); + genlmsg_end(msg, hdr); + return 0; nla_put_failure: mutex_unlock(&phy->pib_lock); diff --git a/net/ieee802154/nl802154.c b/net/ieee802154/nl802154.c index a25b9bbd077b..a4daf91b8d0a 100644 --- a/net/ieee802154/nl802154.c +++ b/net/ieee802154/nl802154.c @@ -306,7 +306,8 @@ static int nl802154_send_wpan_phy(struct cfg802154_registered_device *rdev, goto nla_put_failure; finish: - return genlmsg_end(msg, hdr); + genlmsg_end(msg, hdr); + return 0; nla_put_failure: genlmsg_cancel(msg, hdr); @@ -489,7 +490,8 @@ nl802154_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flags, if (nla_put_u8(msg, NL802154_ATTR_LBT_MODE, wpan_dev->lbt)) goto nla_put_failure; - return genlmsg_end(msg, hdr); + genlmsg_end(msg, hdr); + return 0; nla_put_failure: genlmsg_cancel(msg, hdr); diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index 214882e7d6de..5f344eb3fc25 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c @@ -1522,7 +1522,8 @@ static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa, preferred, valid)) goto nla_put_failure; - return nlmsg_end(skb, nlh); + nlmsg_end(skb, nlh); + return 0; nla_put_failure: nlmsg_cancel(skb, nlh); @@ -1566,7 +1567,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb) if (inet_fill_ifaddr(skb, ifa, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, - RTM_NEWADDR, NLM_F_MULTI) <= 0) { + RTM_NEWADDR, NLM_F_MULTI) < 0) { rcu_read_unlock(); goto done; } @@ -1749,7 +1750,8 @@ static int inet_netconf_fill_devconf(struct sk_buff *skb, int ifindex, IPV4_DEVCONF(*devconf, PROXY_ARP)) < 0) goto nla_put_failure; - return nlmsg_end(skb, nlh); + nlmsg_end(skb, nlh); + return 0; nla_put_failure: nlmsg_cancel(skb, nlh); diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index d2b7b5521b1b..265cb72b7c1b 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c @@ -1091,7 +1091,8 @@ int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event, nla_nest_end(skb, mp); } #endif - return nlmsg_end(skb, nlh); + nlmsg_end(skb, nlh); + return 0; nla_put_failure: nlmsg_cancel(skb, nlh); diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c index e34dccbc4d70..81751f12645f 100644 --- a/net/ipv4/inet_diag.c +++ b/net/ipv4/inet_diag.c @@ -203,7 +203,8 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, icsk->icsk_ca_ops->get_info(sk, ext, skb); out: - return nlmsg_end(skb, nlh); + nlmsg_end(skb, nlh); + return 0; errout: nlmsg_cancel(skb, nlh); @@ -271,7 +272,8 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw, } #endif - return nlmsg_end(skb, nlh); + nlmsg_end(skb, nlh); + return 0; } static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, @@ -758,7 +760,8 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk, } #endif - return nlmsg_end(skb, nlh); + nlmsg_end(skb, nlh); + return 0; } static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk, diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index c8034587859d..9d78427652d2 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -2290,7 +2290,8 @@ static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, if (err < 0 && err != -ENOENT) goto nla_put_failure; - return nlmsg_end(skb, nlh); + nlmsg_end(skb, nlh); + return 0; nla_put_failure: nlmsg_cancel(skb, nlh); diff --git a/net/ipv4/route.c b/net/ipv4/route.c index ce112d0f2698..f6e43ca5e641 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -2390,7 +2390,8 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src, if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, error) < 0) goto nla_put_failure; - return nlmsg_end(skb, nlh); + nlmsg_end(skb, nlh); + return 0; nla_put_failure: nlmsg_cancel(skb, nlh); diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c index ed9c9a91851c..e5f41bd5ec1b 100644 --- a/net/ipv4/tcp_metrics.c +++ b/net/ipv4/tcp_metrics.c @@ -886,7 +886,8 @@ static int tcp_metrics_dump_info(struct sk_buff *skb, if (tcp_metrics_fill_info(skb, tm) < 0) goto nla_put_failure; - return genlmsg_end(skb, hdr); + genlmsg_end(skb, hdr); + return 0; nla_put_failure: genlmsg_cancel(skb, hdr); diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index f7c8bbeb27b7..8975d9501d50 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -489,7 +489,8 @@ static int inet6_netconf_fill_devconf(struct sk_buff *skb, int ifindex, nla_put_s32(skb, NETCONFA_PROXY_NEIGH, devconf->proxy_ndp) < 0) goto nla_put_failure; - return nlmsg_end(skb, nlh); + nlmsg_end(skb, nlh); + return 0; nla_put_failure: nlmsg_cancel(skb, nlh); @@ -619,7 +620,7 @@ static int inet6_netconf_dump_devconf(struct sk_buff *skb, cb->nlh->nlmsg_seq, RTM_NEWNETCONF, NLM_F_MULTI, - -1) <= 0) { + -1) < 0) { rcu_read_unlock(); goto done; } @@ -635,7 +636,7 @@ cont: NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, RTM_NEWNETCONF, NLM_F_MULTI, - -1) <= 0) + -1) < 0) goto done; else h++; @@ -646,7 +647,7 @@ cont: NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, RTM_NEWNETCONF, NLM_F_MULTI, - -1) <= 0) + -1) < 0) goto done; else h++; @@ -4047,7 +4048,8 @@ static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa, if (nla_put_u32(skb, IFA_FLAGS, ifa->flags) < 0) goto error; - return nlmsg_end(skb, nlh); + nlmsg_end(skb, nlh); + return 0; error: nlmsg_cancel(skb, nlh); @@ -4076,7 +4078,8 @@ static int inet6_fill_ifmcaddr(struct sk_buff *skb, struct ifmcaddr6 *ifmca, return -EMSGSIZE; } - return nlmsg_end(skb, nlh); + nlmsg_end(skb, nlh); + return 0; } static int inet6_fill_ifacaddr(struct sk_buff *skb, struct ifacaddr6 *ifaca, @@ -4101,7 +4104,8 @@ static int inet6_fill_ifacaddr(struct sk_buff *skb, struct ifacaddr6 *ifaca, return -EMSGSIZE; } - return nlmsg_end(skb, nlh); + nlmsg_end(skb, nlh); + return 0; } enum addr_type_t { @@ -4134,7 +4138,7 @@ static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb, cb->nlh->nlmsg_seq, RTM_NEWADDR, NLM_F_MULTI); - if (err <= 0) + if (err < 0) break; nl_dump_check_consistent(cb, nlmsg_hdr(skb)); } @@ -4151,7 +4155,7 @@ static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb, cb->nlh->nlmsg_seq, RTM_GETMULTICAST, NLM_F_MULTI); - if (err <= 0) + if (err < 0) break; } break; @@ -4166,7 +4170,7 @@ static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb, cb->nlh->nlmsg_seq, RTM_GETANYCAST, NLM_F_MULTI); - if (err <= 0) + if (err < 0) break; } break; @@ -4638,7 +4642,8 @@ static int inet6_fill_ifinfo(struct sk_buff *skb, struct inet6_dev *idev, goto nla_put_failure; nla_nest_end(skb, protoinfo); - return nlmsg_end(skb, nlh); + nlmsg_end(skb, nlh); + return 0; nla_put_failure: nlmsg_cancel(skb, nlh); @@ -4670,7 +4675,7 @@ static int inet6_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) if (inet6_fill_ifinfo(skb, idev, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, - RTM_NEWLINK, NLM_F_MULTI) <= 0) + RTM_NEWLINK, NLM_F_MULTI) < 0) goto out; cont: idx++; @@ -4747,7 +4752,8 @@ static int inet6_fill_prefix(struct sk_buff *skb, struct inet6_dev *idev, ci.valid_time = ntohl(pinfo->valid); if (nla_put(skb, PREFIX_CACHEINFO, sizeof(ci), &ci)) goto nla_put_failure; - return nlmsg_end(skb, nlh); + nlmsg_end(skb, nlh); + return 0; nla_put_failure: nlmsg_cancel(skb, nlh); diff --git a/net/ipv6/addrlabel.c b/net/ipv6/addrlabel.c index fd0dc47f471d..e43e79d0a612 100644 --- a/net/ipv6/addrlabel.c +++ b/net/ipv6/addrlabel.c @@ -490,7 +490,8 @@ static int ip6addrlbl_fill(struct sk_buff *skb, return -EMSGSIZE; } - return nlmsg_end(skb, nlh); + nlmsg_end(skb, nlh); + return 0; } static int ip6addrlbl_dump(struct sk_buff *skb, struct netlink_callback *cb) @@ -510,7 +511,7 @@ static int ip6addrlbl_dump(struct sk_buff *skb, struct netlink_callback *cb) cb->nlh->nlmsg_seq, RTM_NEWADDRLABEL, NLM_F_MULTI); - if (err <= 0) + if (err < 0) break; } idx++; diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index 03c520a4ebeb..53775ee7d376 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@ -277,7 +277,6 @@ static int fib6_dump_node(struct fib6_walker *w) w->leaf = rt; return 1; } - WARN_ON(res == 0); } w->leaf = NULL; return 0; diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index 722669754bbf..34b682617f50 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@ -2388,7 +2388,8 @@ static int ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb, if (err < 0 && err != -ENOENT) goto nla_put_failure; - return nlmsg_end(skb, nlh); + nlmsg_end(skb, nlh); + return 0; nla_put_failure: nlmsg_cancel(skb, nlh); diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 34dcbb59df75..c60f15775c53 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -2725,7 +2725,8 @@ static int rt6_fill_node(struct net *net, if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, rt->dst.error) < 0) goto nla_put_failure; - return nlmsg_end(skb, nlh); + nlmsg_end(skb, nlh); + return 0; nla_put_failure: nlmsg_cancel(skb, nlh); diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c index 6b16598f31d5..b4e923f77954 100644 --- a/net/l2tp/l2tp_netlink.c +++ b/net/l2tp/l2tp_netlink.c @@ -390,7 +390,8 @@ static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 portid, u32 seq, int fla } out: - return genlmsg_end(skb, hdr); + genlmsg_end(skb, hdr); + return 0; nla_put_failure: genlmsg_cancel(skb, hdr); @@ -451,7 +452,7 @@ static int l2tp_nl_cmd_tunnel_dump(struct sk_buff *skb, struct netlink_callback if (l2tp_nl_tunnel_send(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, NLM_F_MULTI, - tunnel, L2TP_CMD_TUNNEL_GET) <= 0) + tunnel, L2TP_CMD_TUNNEL_GET) < 0) goto out; ti++; @@ -752,7 +753,8 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 portid, u32 seq, int fl goto nla_put_failure; nla_nest_end(skb, nest); - return genlmsg_end(skb, hdr); + genlmsg_end(skb, hdr); + return 0; nla_put_failure: genlmsg_cancel(skb, hdr); @@ -816,7 +818,7 @@ static int l2tp_nl_cmd_session_dump(struct sk_buff *skb, struct netlink_callback if (l2tp_nl_session_send(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, NLM_F_MULTI, - session, L2TP_CMD_SESSION_GET) <= 0) + session, L2TP_CMD_SESSION_GET) < 0) break; si++; diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index b8295a430a56..e55759056361 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c @@ -2887,7 +2887,8 @@ static int ip_vs_genl_dump_service(struct sk_buff *skb, if (ip_vs_genl_fill_service(skb, svc) < 0) goto nla_put_failure; - return genlmsg_end(skb, hdr); + genlmsg_end(skb, hdr); + return 0; nla_put_failure: genlmsg_cancel(skb, hdr); @@ -3079,7 +3080,8 @@ static int ip_vs_genl_dump_dest(struct sk_buff *skb, struct ip_vs_dest *dest, if (ip_vs_genl_fill_dest(skb, dest) < 0) goto nla_put_failure; - return genlmsg_end(skb, hdr); + genlmsg_end(skb, hdr); + return 0; nla_put_failure: genlmsg_cancel(skb, hdr); @@ -3215,7 +3217,8 @@ static int ip_vs_genl_dump_daemon(struct sk_buff *skb, __u32 state, if (ip_vs_genl_fill_daemon(skb, state, mcast_ifn, syncid)) goto nla_put_failure; - return genlmsg_end(skb, hdr); + genlmsg_end(skb, hdr); + return 0; nla_put_failure: genlmsg_cancel(skb, hdr); diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 3b3ddb4fb9ee..70f697827b9b 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -427,7 +427,8 @@ static int nf_tables_fill_table_info(struct sk_buff *skb, struct net *net, nla_put_be32(skb, NFTA_TABLE_USE, htonl(table->use))) goto nla_put_failure; - return nlmsg_end(skb, nlh); + nlmsg_end(skb, nlh); + return 0; nla_put_failure: nlmsg_trim(skb, nlh); @@ -971,7 +972,8 @@ static int nf_tables_fill_chain_info(struct sk_buff *skb, struct net *net, if (nla_put_be32(skb, NFTA_CHAIN_USE, htonl(chain->use))) goto nla_put_failure; - return nlmsg_end(skb, nlh); + nlmsg_end(skb, nlh); + return 0; nla_put_failure: nlmsg_trim(skb, nlh); @@ -1707,7 +1709,8 @@ static int nf_tables_fill_rule_info(struct sk_buff *skb, struct net *net, nla_put(skb, NFTA_RULE_USERDATA, rule->ulen, nft_userdata(rule))) goto nla_put_failure; - return nlmsg_end(skb, nlh); + nlmsg_end(skb, nlh); + return 0; nla_put_failure: nlmsg_trim(skb, nlh); @@ -2361,7 +2364,8 @@ static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx, goto nla_put_failure; nla_nest_end(skb, desc); - return nlmsg_end(skb, nlh); + nlmsg_end(skb, nlh); + return 0; nla_put_failure: nlmsg_trim(skb, nlh); @@ -3035,7 +3039,8 @@ static int nf_tables_fill_setelem_info(struct sk_buff *skb, nla_nest_end(skb, nest); - return nlmsg_end(skb, nlh); + nlmsg_end(skb, nlh); + return 0; nla_put_failure: nlmsg_trim(skb, nlh); @@ -3324,7 +3329,8 @@ static int nf_tables_fill_gen_info(struct sk_buff *skb, struct net *net, if (nla_put_be32(skb, NFTA_GEN_ID, htonl(net->nft.base_seq))) goto nla_put_failure; - return nlmsg_end(skb, nlh); + nlmsg_end(skb, nlh); + return 0; nla_put_failure: nlmsg_trim(skb, nlh); diff --git a/net/netlabel/netlabel_cipso_v4.c b/net/netlabel/netlabel_cipso_v4.c index c2f2a53a4879..179625353cac 100644 --- a/net/netlabel/netlabel_cipso_v4.c +++ b/net/netlabel/netlabel_cipso_v4.c @@ -641,7 +641,8 @@ static int netlbl_cipsov4_listall_cb(struct cipso_v4_doi *doi_def, void *arg) if (ret_val != 0) goto listall_cb_failure; - return genlmsg_end(cb_arg->skb, data); + genlmsg_end(cb_arg->skb, data); + return 0; listall_cb_failure: genlmsg_cancel(cb_arg->skb, data); diff --git a/net/netlabel/netlabel_mgmt.c b/net/netlabel/netlabel_mgmt.c index e66e977ef2fa..8b3b789c43c2 100644 --- a/net/netlabel/netlabel_mgmt.c +++ b/net/netlabel/netlabel_mgmt.c @@ -456,7 +456,8 @@ static int netlbl_mgmt_listall_cb(struct netlbl_dom_map *entry, void *arg) goto listall_cb_failure; cb_arg->seq++; - return genlmsg_end(cb_arg->skb, data); + genlmsg_end(cb_arg->skb, data); + return 0; listall_cb_failure: genlmsg_cancel(cb_arg->skb, data); @@ -620,7 +621,8 @@ static int netlbl_mgmt_protocols_cb(struct sk_buff *skb, if (ret_val != 0) goto protocols_cb_failure; - return genlmsg_end(skb, data); + genlmsg_end(skb, data); + return 0; protocols_cb_failure: genlmsg_cancel(skb, data); diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c index 78a63c18779e..aec7994f78cf 100644 --- a/net/netlabel/netlabel_unlabeled.c +++ b/net/netlabel/netlabel_unlabeled.c @@ -1163,7 +1163,8 @@ static int netlbl_unlabel_staticlist_gen(u32 cmd, goto list_cb_failure; cb_arg->seq++; - return genlmsg_end(cb_arg->skb, data); + genlmsg_end(cb_arg->skb, data); + return 0; list_cb_failure: genlmsg_cancel(cb_arg->skb, data); diff --git a/net/netlink/diag.c b/net/netlink/diag.c index bb59a7ed0859..3ee63a3cff30 100644 --- a/net/netlink/diag.c +++ b/net/netlink/diag.c @@ -91,7 +91,8 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, sk_diag_put_rings_cfg(sk, skb)) goto out_nlmsg_trim; - return nlmsg_end(skb, nlh); + nlmsg_end(skb, nlh); + return 0; out_nlmsg_trim: nlmsg_cancel(skb, nlh); diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c index 2e11061ef885..f52a7d5734cd 100644 --- a/net/netlink/genetlink.c +++ b/net/netlink/genetlink.c @@ -756,7 +756,8 @@ static int ctrl_fill_info(struct genl_family *family, u32 portid, u32 seq, nla_nest_end(skb, nla_grps); } - return genlmsg_end(skb, hdr); + genlmsg_end(skb, hdr); + return 0; nla_put_failure: genlmsg_cancel(skb, hdr); @@ -796,7 +797,8 @@ static int ctrl_fill_mcgrp_info(struct genl_family *family, nla_nest_end(skb, nest); nla_nest_end(skb, nla_grps); - return genlmsg_end(skb, hdr); + genlmsg_end(skb, hdr); + return 0; nla_put_failure: genlmsg_cancel(skb, hdr); diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c index 44989fc8cddf..be387e6219a0 100644 --- a/net/nfc/netlink.c +++ b/net/nfc/netlink.c @@ -102,7 +102,8 @@ static int nfc_genl_send_target(struct sk_buff *msg, struct nfc_target *target, goto nla_put_failure; } - return genlmsg_end(msg, hdr); + genlmsg_end(msg, hdr); + return 0; nla_put_failure: genlmsg_cancel(msg, hdr); @@ -518,7 +519,8 @@ static int nfc_genl_send_device(struct sk_buff *msg, struct nfc_dev *dev, nla_put_u8(msg, NFC_ATTR_RF_MODE, dev->rf_mode)) goto nla_put_failure; - return genlmsg_end(msg, hdr); + genlmsg_end(msg, hdr); + return 0; nla_put_failure: genlmsg_cancel(msg, hdr); @@ -908,7 +910,8 @@ static int nfc_genl_send_params(struct sk_buff *msg, nla_put_u16(msg, NFC_ATTR_LLC_PARAM_MIUX, be16_to_cpu(local->miux))) goto nla_put_failure; - return genlmsg_end(msg, hdr); + genlmsg_end(msg, hdr); + return 0; nla_put_failure: @@ -1247,8 +1250,7 @@ static int nfc_genl_send_se(struct sk_buff *msg, struct nfc_dev *dev, nla_put_u8(msg, NFC_ATTR_SE_TYPE, se->type)) goto nla_put_failure; - if (genlmsg_end(msg, hdr) < 0) - goto nla_put_failure; + genlmsg_end(msg, hdr); } return 0; diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index 8bda3cc12344..f45f1bf4422c 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c @@ -799,7 +799,8 @@ static int ovs_flow_cmd_fill_info(const struct sw_flow *flow, int dp_ifindex, if (err) goto error; - return genlmsg_end(skb, ovs_header); + genlmsg_end(skb, ovs_header); + return 0; error: genlmsg_cancel(skb, ovs_header); @@ -1349,7 +1350,8 @@ static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb, if (nla_put_u32(skb, OVS_DP_ATTR_USER_FEATURES, dp->user_features)) goto nla_put_failure; - return genlmsg_end(skb, ovs_header); + genlmsg_end(skb, ovs_header); + return 0; nla_put_failure: genlmsg_cancel(skb, ovs_header); @@ -1723,7 +1725,8 @@ static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb, if (err == -EMSGSIZE) goto error; - return genlmsg_end(skb, ovs_header); + genlmsg_end(skb, ovs_header); + return 0; nla_put_failure: err = -EMSGSIZE; diff --git a/net/packet/diag.c b/net/packet/diag.c index 92f2c7107eec..0ed68f0238bf 100644 --- a/net/packet/diag.c +++ b/net/packet/diag.c @@ -177,7 +177,8 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, PACKET_DIAG_FILTER)) goto out_nlmsg_trim; - return nlmsg_end(skb, nlh); + nlmsg_end(skb, nlh); + return 0; out_nlmsg_trim: nlmsg_cancel(skb, nlh); diff --git a/net/phonet/pn_netlink.c b/net/phonet/pn_netlink.c index b64151ade6b3..54d766842c2b 100644 --- a/net/phonet/pn_netlink.c +++ b/net/phonet/pn_netlink.c @@ -121,7 +121,8 @@ static int fill_addr(struct sk_buff *skb, struct net_device *dev, u8 addr, ifm->ifa_index = dev->ifindex; if (nla_put_u8(skb, IFA_LOCAL, addr)) goto nla_put_failure; - return nlmsg_end(skb, nlh); + nlmsg_end(skb, nlh); + return 0; nla_put_failure: nlmsg_cancel(skb, nlh); @@ -190,7 +191,8 @@ static int fill_route(struct sk_buff *skb, struct net_device *dev, u8 dst, if (nla_put_u8(skb, RTA_DST, dst) || nla_put_u32(skb, RTA_OIF, dev->ifindex)) goto nla_put_failure; - return nlmsg_end(skb, nlh); + nlmsg_end(skb, nlh); + return 0; nla_put_failure: nlmsg_cancel(skb, nlh); @@ -282,9 +284,13 @@ static int route_dumpit(struct sk_buff *skb, struct netlink_callback *cb) if (addr_idx++ < addr_start_idx) continue; - if (fill_route(skb, dev, addr << 2, NETLINK_CB(cb->skb).portid, - cb->nlh->nlmsg_seq, RTM_NEWROUTE)) - goto out; + fill_route(skb, dev, addr << 2, NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, RTM_NEWROUTE); + /* fill_route() used to return > 0 (or negative errors) but + * never 0 - ignore the return value and just go out to + * call dumpit again from outside to preserve the behavior + */ + goto out; } out: diff --git a/net/unix/diag.c b/net/unix/diag.c index 86fa0f3b2caf..ef542fbca9fe 100644 --- a/net/unix/diag.c +++ b/net/unix/diag.c @@ -155,7 +155,8 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_r if (nla_put_u8(skb, UNIX_DIAG_SHUTDOWN, sk->sk_shutdown)) goto out_nlmsg_trim; - return nlmsg_end(skb, nlh); + nlmsg_end(skb, nlh); + return 0; out_nlmsg_trim: nlmsg_cancel(skb, nlh); diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 380784378df8..4ed9039bd5f9 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -1721,7 +1721,8 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev, break; } finish: - return genlmsg_end(msg, hdr); + genlmsg_end(msg, hdr); + return 0; nla_put_failure: genlmsg_cancel(msg, hdr); @@ -2404,7 +2405,8 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flag goto nla_put_failure; } - return genlmsg_end(msg, hdr); + genlmsg_end(msg, hdr); + return 0; nla_put_failure: genlmsg_cancel(msg, hdr); @@ -3825,7 +3827,8 @@ static int nl80211_send_station(struct sk_buff *msg, u32 cmd, u32 portid, sinfo->assoc_req_ies)) goto nla_put_failure; - return genlmsg_end(msg, hdr); + genlmsg_end(msg, hdr); + return 0; nla_put_failure: genlmsg_cancel(msg, hdr); @@ -4555,7 +4558,8 @@ static int nl80211_send_mpath(struct sk_buff *msg, u32 portid, u32 seq, nla_nest_end(msg, pinfoattr); - return genlmsg_end(msg, hdr); + genlmsg_end(msg, hdr); + return 0; nla_put_failure: genlmsg_cancel(msg, hdr); @@ -5507,7 +5511,8 @@ static int nl80211_send_regdom(struct sk_buff *msg, struct netlink_callback *cb, nla_put_flag(msg, NL80211_ATTR_WIPHY_SELF_MANAGED_REG)) goto nla_put_failure; - return genlmsg_end(msg, hdr); + genlmsg_end(msg, hdr); + return 0; nla_put_failure: genlmsg_cancel(msg, hdr); @@ -6577,7 +6582,8 @@ static int nl80211_send_bss(struct sk_buff *msg, struct netlink_callback *cb, nla_nest_end(msg, bss); - return genlmsg_end(msg, hdr); + genlmsg_end(msg, hdr); + return 0; fail_unlock_rcu: rcu_read_unlock(); @@ -6686,7 +6692,8 @@ static int nl80211_send_survey(struct sk_buff *msg, u32 portid, u32 seq, nla_nest_end(msg, infoattr); - return genlmsg_end(msg, hdr); + genlmsg_end(msg, hdr); + return 0; nla_put_failure: genlmsg_cancel(msg, hdr); @@ -11025,7 +11032,8 @@ static int nl80211_send_scan_msg(struct sk_buff *msg, /* ignore errors and send incomplete event anyway */ nl80211_add_scan_req(msg, rdev); - return genlmsg_end(msg, hdr); + genlmsg_end(msg, hdr); + return 0; nla_put_failure: genlmsg_cancel(msg, hdr); @@ -11048,7 +11056,8 @@ nl80211_send_sched_scan_msg(struct sk_buff *msg, nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex)) goto nla_put_failure; - return genlmsg_end(msg, hdr); + genlmsg_end(msg, hdr); + return 0; nla_put_failure: genlmsg_cancel(msg, hdr); diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index 8128594ab379..7de2ed9ec46d 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -1019,7 +1019,8 @@ static int build_spdinfo(struct sk_buff *skb, struct net *net, return err; } - return nlmsg_end(skb, nlh); + nlmsg_end(skb, nlh); + return 0; } static int xfrm_set_spdinfo(struct sk_buff *skb, struct nlmsghdr *nlh, @@ -1121,7 +1122,8 @@ static int build_sadinfo(struct sk_buff *skb, struct net *net, return err; } - return nlmsg_end(skb, nlh); + nlmsg_end(skb, nlh); + return 0; } static int xfrm_get_sadinfo(struct sk_buff *skb, struct nlmsghdr *nlh, @@ -1842,7 +1844,8 @@ static int build_aevent(struct sk_buff *skb, struct xfrm_state *x, const struct if (err) goto out_cancel; - return nlmsg_end(skb, nlh); + nlmsg_end(skb, nlh); + return 0; out_cancel: nlmsg_cancel(skb, nlh); @@ -2282,7 +2285,8 @@ static int build_migrate(struct sk_buff *skb, const struct xfrm_migrate *m, goto out_cancel; } - return nlmsg_end(skb, nlh); + nlmsg_end(skb, nlh); + return 0; out_cancel: nlmsg_cancel(skb, nlh); @@ -2490,7 +2494,8 @@ static int build_expire(struct sk_buff *skb, struct xfrm_state *x, const struct if (err) return err; - return nlmsg_end(skb, nlh); + nlmsg_end(skb, nlh); + return 0; } static int xfrm_exp_state_notify(struct xfrm_state *x, const struct km_event *c) @@ -2712,7 +2717,8 @@ static int build_acquire(struct sk_buff *skb, struct xfrm_state *x, return err; } - return nlmsg_end(skb, nlh); + nlmsg_end(skb, nlh); + return 0; } static int xfrm_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *xt, @@ -2827,7 +2833,8 @@ static int build_polexpire(struct sk_buff *skb, struct xfrm_policy *xp, } upe->hard = !!hard; - return nlmsg_end(skb, nlh); + nlmsg_end(skb, nlh); + return 0; } static int xfrm_exp_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c) @@ -2986,7 +2993,8 @@ static int build_report(struct sk_buff *skb, u8 proto, return err; } } - return nlmsg_end(skb, nlh); + nlmsg_end(skb, nlh); + return 0; } static int xfrm_send_report(struct net *net, u8 proto, @@ -3031,7 +3039,8 @@ static int build_mapping(struct sk_buff *skb, struct xfrm_state *x, um->old_sport = x->encap->encap_sport; um->reqid = x->props.reqid; - return nlmsg_end(skb, nlh); + nlmsg_end(skb, nlh); + return 0; } static int xfrm_send_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, -- cgit v1.2.3 From b6924225c292593189e90604c395f87cbd4130ba Mon Sep 17 00:00:00 2001 From: "Darrick J. Wong" Date: Mon, 19 Jan 2015 15:59:58 -0500 Subject: jbd2: complain about descriptor block checksum errors We should complain in dmesg when journal recovery fails on account of the descriptor block being corrupt, so that the diagnostic data can be recovered. Signed-off-by: Darrick J. Wong Signed-off-by: Theodore Ts'o --- fs/jbd2/recovery.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'fs') diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c index bcbef08a4d8f..b5128c6e63ad 100644 --- a/fs/jbd2/recovery.c +++ b/fs/jbd2/recovery.c @@ -524,6 +524,9 @@ static int do_one_pass(journal_t *journal, if (descr_csum_size > 0 && !jbd2_descr_block_csum_verify(journal, bh->b_data)) { + printk(KERN_ERR "JBD2: Invalid checksum " + "recovering block %lu in log\n", + next_log_block); err = -EIO; brelse(bh); goto failed; -- cgit v1.2.3 From 3edc18d84540b94c0eba9226d01a8cbe4c162b55 Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Mon, 19 Jan 2015 16:00:58 -0500 Subject: ext4: reserve codepoints used by the ext4 encryption feature Signed-off-by: Theodore Ts'o --- fs/ext4/ext4.h | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index a75fba67bb1f..b7f393df2e4c 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -364,7 +364,8 @@ struct flex_groups { #define EXT4_DIRTY_FL 0x00000100 #define EXT4_COMPRBLK_FL 0x00000200 /* One or more compressed clusters */ #define EXT4_NOCOMPR_FL 0x00000400 /* Don't compress */ -#define EXT4_ECOMPR_FL 0x00000800 /* Compression error */ + /* nb: was previously EXT2_ECOMPR_FL */ +#define EXT4_ENCRYPT_FL 0x00000800 /* encrypted file */ /* End compression flags --- maybe not all used */ #define EXT4_INDEX_FL 0x00001000 /* hash-indexed directory */ #define EXT4_IMAGIC_FL 0x00002000 /* AFS directory */ @@ -421,7 +422,7 @@ enum { EXT4_INODE_DIRTY = 8, EXT4_INODE_COMPRBLK = 9, /* One or more compressed clusters */ EXT4_INODE_NOCOMPR = 10, /* Don't compress */ - EXT4_INODE_ECOMPR = 11, /* Compression error */ + EXT4_INODE_ENCRYPT = 11, /* Compression error */ /* End compression flags --- maybe not all used */ EXT4_INODE_INDEX = 12, /* hash-indexed directory */ EXT4_INODE_IMAGIC = 13, /* AFS directory */ @@ -466,7 +467,7 @@ static inline void ext4_check_flag_values(void) CHECK_FLAG_VALUE(DIRTY); CHECK_FLAG_VALUE(COMPRBLK); CHECK_FLAG_VALUE(NOCOMPR); - CHECK_FLAG_VALUE(ECOMPR); + CHECK_FLAG_VALUE(ENCRYPT); CHECK_FLAG_VALUE(INDEX); CHECK_FLAG_VALUE(IMAGIC); CHECK_FLAG_VALUE(JOURNAL_DATA); @@ -1043,6 +1044,12 @@ extern void ext4_set_bits(void *bm, int cur, int len); /* Metadata checksum algorithm codes */ #define EXT4_CRC32C_CHKSUM 1 +/* Encryption algorithms */ +#define EXT4_ENCRYPTION_MODE_INVALID 0 +#define EXT4_ENCRYPTION_MODE_AES_256_XTS 1 +#define EXT4_ENCRYPTION_MODE_AES_256_GCM 2 +#define EXT4_ENCRYPTION_MODE_AES_256_CBC 3 + /* * Structure of the super block */ @@ -1156,7 +1163,8 @@ struct ext4_super_block { __le32 s_grp_quota_inum; /* inode for tracking group quota */ __le32 s_overhead_clusters; /* overhead blocks/clusters in fs */ __le32 s_backup_bgs[2]; /* groups with sparse_super2 SBs */ - __le32 s_reserved[106]; /* Padding to the end of the block */ + __u8 s_encrypt_algos[4]; /* Encryption algorithms in use */ + __le32 s_reserved[105]; /* Padding to the end of the block */ __le32 s_checksum; /* crc32c(superblock) */ }; @@ -1537,6 +1545,7 @@ static inline void ext4_clear_state_flags(struct ext4_inode_info *ei) #define EXT4_FEATURE_INCOMPAT_BG_USE_META_CSUM 0x2000 /* use crc32c for bg */ #define EXT4_FEATURE_INCOMPAT_LARGEDIR 0x4000 /* >2GB or 3-lvl htree */ #define EXT4_FEATURE_INCOMPAT_INLINE_DATA 0x8000 /* data in inode */ +#define EXT4_FEATURE_INCOMPAT_ENCRYPT 0x10000 #define EXT2_FEATURE_COMPAT_SUPP EXT4_FEATURE_COMPAT_EXT_ATTR #define EXT2_FEATURE_INCOMPAT_SUPP (EXT4_FEATURE_INCOMPAT_FILETYPE| \ -- cgit v1.2.3 From a7a2c680a2ad81b3181a335ee76e23d5195007ee Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 14 Jan 2015 10:42:30 +0100 Subject: fs: deduplicate noop_backing_dev_info hugetlbfs, kernfs and dlmfs can simply use noop_backing_dev_info instead of creating a local duplicate. Signed-off-by: Christoph Hellwig Reviewed-by: Jan Kara Acked-by: Tejun Heo Signed-off-by: Jens Axboe --- fs/hugetlbfs/inode.c | 14 +------------- fs/kernfs/inode.c | 14 +------------- fs/kernfs/kernfs-internal.h | 1 - fs/kernfs/mount.c | 1 - fs/ocfs2/dlmfs/dlmfs.c | 16 ++-------------- 5 files changed, 4 insertions(+), 42 deletions(-) (limited to 'fs') diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 5eba47f593f8..de7c95c7d840 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -62,12 +62,6 @@ static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode) return container_of(inode, struct hugetlbfs_inode_info, vfs_inode); } -static struct backing_dev_info hugetlbfs_backing_dev_info = { - .name = "hugetlbfs", - .ra_pages = 0, /* No readahead */ - .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK, -}; - int sysctl_hugetlb_shm_group; enum { @@ -498,7 +492,7 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb, lockdep_set_class(&inode->i_mapping->i_mmap_rwsem, &hugetlbfs_i_mmap_rwsem_key); inode->i_mapping->a_ops = &hugetlbfs_aops; - inode->i_mapping->backing_dev_info =&hugetlbfs_backing_dev_info; + inode->i_mapping->backing_dev_info = &noop_backing_dev_info; inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; inode->i_mapping->private_data = resv_map; info = HUGETLBFS_I(inode); @@ -1032,10 +1026,6 @@ static int __init init_hugetlbfs_fs(void) return -ENOTSUPP; } - error = bdi_init(&hugetlbfs_backing_dev_info); - if (error) - return error; - error = -ENOMEM; hugetlbfs_inode_cachep = kmem_cache_create("hugetlbfs_inode_cache", sizeof(struct hugetlbfs_inode_info), @@ -1071,7 +1061,6 @@ static int __init init_hugetlbfs_fs(void) out: kmem_cache_destroy(hugetlbfs_inode_cachep); out2: - bdi_destroy(&hugetlbfs_backing_dev_info); return error; } @@ -1091,7 +1080,6 @@ static void __exit exit_hugetlbfs_fs(void) for_each_hstate(h) kern_unmount(hugetlbfs_vfsmount[i++]); unregister_filesystem(&hugetlbfs_fs_type); - bdi_destroy(&hugetlbfs_backing_dev_info); } module_init(init_hugetlbfs_fs) diff --git a/fs/kernfs/inode.c b/fs/kernfs/inode.c index 985217626e66..06f06887b2d2 100644 --- a/fs/kernfs/inode.c +++ b/fs/kernfs/inode.c @@ -24,12 +24,6 @@ static const struct address_space_operations kernfs_aops = { .write_end = simple_write_end, }; -static struct backing_dev_info kernfs_bdi = { - .name = "kernfs", - .ra_pages = 0, /* No readahead */ - .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK, -}; - static const struct inode_operations kernfs_iops = { .permission = kernfs_iop_permission, .setattr = kernfs_iop_setattr, @@ -40,12 +34,6 @@ static const struct inode_operations kernfs_iops = { .listxattr = kernfs_iop_listxattr, }; -void __init kernfs_inode_init(void) -{ - if (bdi_init(&kernfs_bdi)) - panic("failed to init kernfs_bdi"); -} - static struct kernfs_iattrs *kernfs_iattrs(struct kernfs_node *kn) { static DEFINE_MUTEX(iattr_mutex); @@ -298,7 +286,7 @@ static void kernfs_init_inode(struct kernfs_node *kn, struct inode *inode) kernfs_get(kn); inode->i_private = kn; inode->i_mapping->a_ops = &kernfs_aops; - inode->i_mapping->backing_dev_info = &kernfs_bdi; + inode->i_mapping->backing_dev_info = &noop_backing_dev_info; inode->i_op = &kernfs_iops; set_default_inode_attr(inode, kn->mode); diff --git a/fs/kernfs/kernfs-internal.h b/fs/kernfs/kernfs-internal.h index dc84a3ef9ca2..af9fa7499919 100644 --- a/fs/kernfs/kernfs-internal.h +++ b/fs/kernfs/kernfs-internal.h @@ -88,7 +88,6 @@ int kernfs_iop_removexattr(struct dentry *dentry, const char *name); ssize_t kernfs_iop_getxattr(struct dentry *dentry, const char *name, void *buf, size_t size); ssize_t kernfs_iop_listxattr(struct dentry *dentry, char *buf, size_t size); -void kernfs_inode_init(void); /* * dir.c diff --git a/fs/kernfs/mount.c b/fs/kernfs/mount.c index f973ae9b05f1..8eaf417187f1 100644 --- a/fs/kernfs/mount.c +++ b/fs/kernfs/mount.c @@ -246,5 +246,4 @@ void __init kernfs_init(void) kernfs_node_cache = kmem_cache_create("kernfs_node_cache", sizeof(struct kernfs_node), 0, SLAB_PANIC, NULL); - kernfs_inode_init(); } diff --git a/fs/ocfs2/dlmfs/dlmfs.c b/fs/ocfs2/dlmfs/dlmfs.c index 57c40e34f56f..6000d3029b26 100644 --- a/fs/ocfs2/dlmfs/dlmfs.c +++ b/fs/ocfs2/dlmfs/dlmfs.c @@ -390,12 +390,6 @@ clear_fields: ip->ip_conn = NULL; } -static struct backing_dev_info dlmfs_backing_dev_info = { - .name = "ocfs2-dlmfs", - .ra_pages = 0, /* No readahead */ - .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK, -}; - static struct inode *dlmfs_get_root_inode(struct super_block *sb) { struct inode *inode = new_inode(sb); @@ -404,7 +398,7 @@ static struct inode *dlmfs_get_root_inode(struct super_block *sb) if (inode) { inode->i_ino = get_next_ino(); inode_init_owner(inode, NULL, mode); - inode->i_mapping->backing_dev_info = &dlmfs_backing_dev_info; + inode->i_mapping->backing_dev_info = &noop_backing_dev_info; inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; inc_nlink(inode); @@ -428,7 +422,7 @@ static struct inode *dlmfs_get_inode(struct inode *parent, inode->i_ino = get_next_ino(); inode_init_owner(inode, parent, mode); - inode->i_mapping->backing_dev_info = &dlmfs_backing_dev_info; + inode->i_mapping->backing_dev_info = &noop_backing_dev_info; inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; ip = DLMFS_I(inode); @@ -643,10 +637,6 @@ static int __init init_dlmfs_fs(void) int status; int cleanup_inode = 0, cleanup_worker = 0; - status = bdi_init(&dlmfs_backing_dev_info); - if (status) - return status; - dlmfs_inode_cache = kmem_cache_create("dlmfs_inode_cache", sizeof(struct dlmfs_inode_private), 0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT| @@ -673,7 +663,6 @@ bail: kmem_cache_destroy(dlmfs_inode_cache); if (cleanup_worker) destroy_workqueue(user_dlm_worker); - bdi_destroy(&dlmfs_backing_dev_info); } else printk("OCFS2 User DLM kernel interface loaded\n"); return status; @@ -693,7 +682,6 @@ static void __exit exit_dlmfs_fs(void) rcu_barrier(); kmem_cache_destroy(dlmfs_inode_cache); - bdi_destroy(&dlmfs_backing_dev_info); } MODULE_AUTHOR("Oracle"); -- cgit v1.2.3 From b4caecd48005fbed3949dde6c1cb233142fd69e9 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 14 Jan 2015 10:42:32 +0100 Subject: fs: introduce f_op->mmap_capabilities for nommu mmap support Since "BDI: Provide backing device capability information [try #3]" the backing_dev_info structure also provides flags for the kind of mmap operation available in a nommu environment, which is entirely unrelated to it's original purpose. Introduce a new nommu-only file operation to provide this information to the nommu mmap code instead. Splitting this from the backing_dev_info structure allows to remove lots of backing_dev_info instance that aren't otherwise needed, and entirely gets rid of the concept of providing a backing_dev_info for a character device. It also removes the need for the mtd_inodefs filesystem. Signed-off-by: Christoph Hellwig Reviewed-by: Tejun Heo Acked-by: Brian Norris Signed-off-by: Jens Axboe --- Documentation/nommu-mmap.txt | 8 +-- block/blk-core.c | 2 +- drivers/char/mem.c | 64 ++++++++++---------- drivers/mtd/mtdchar.c | 72 ++++------------------ drivers/mtd/mtdconcat.c | 10 ---- drivers/mtd/mtdcore.c | 80 +++++++------------------ drivers/mtd/mtdpart.c | 1 - drivers/staging/lustre/lustre/llite/llite_lib.c | 2 +- fs/9p/v9fs.c | 2 +- fs/afs/volume.c | 2 +- fs/aio.c | 14 +---- fs/btrfs/disk-io.c | 3 +- fs/char_dev.c | 24 -------- fs/cifs/connect.c | 2 +- fs/coda/inode.c | 2 +- fs/configfs/configfs_internal.h | 2 - fs/configfs/inode.c | 18 +----- fs/configfs/mount.c | 11 +--- fs/ecryptfs/main.c | 2 +- fs/exofs/super.c | 2 +- fs/ncpfs/inode.c | 2 +- fs/ramfs/file-nommu.c | 7 +++ fs/ramfs/inode.c | 22 +------ fs/romfs/mmap-nommu.c | 10 ++++ fs/ubifs/super.c | 2 +- include/linux/backing-dev.h | 33 ++-------- include/linux/cdev.h | 2 - include/linux/fs.h | 23 +++++++ include/linux/mtd/mtd.h | 2 + mm/backing-dev.c | 7 +-- mm/nommu.c | 69 ++++++++++----------- security/security.c | 13 ++-- 32 files changed, 169 insertions(+), 346 deletions(-) (limited to 'fs') diff --git a/Documentation/nommu-mmap.txt b/Documentation/nommu-mmap.txt index 8e1ddec2c78a..ae57b9ea0d41 100644 --- a/Documentation/nommu-mmap.txt +++ b/Documentation/nommu-mmap.txt @@ -43,12 +43,12 @@ and it's also much more restricted in the latter case: even if this was created by another process. - If possible, the file mapping will be directly on the backing device - if the backing device has the BDI_CAP_MAP_DIRECT capability and + if the backing device has the NOMMU_MAP_DIRECT capability and appropriate mapping protection capabilities. Ramfs, romfs, cramfs and mtd might all permit this. - If the backing device device can't or won't permit direct sharing, - but does have the BDI_CAP_MAP_COPY capability, then a copy of the + but does have the NOMMU_MAP_COPY capability, then a copy of the appropriate bit of the file will be read into a contiguous bit of memory and any extraneous space beyond the EOF will be cleared @@ -220,7 +220,7 @@ directly (can't be copied). The file->f_op->mmap() operation will be called to actually inaugurate the mapping. It can be rejected at that point. Returning the ENOSYS error will -cause the mapping to be copied instead if BDI_CAP_MAP_COPY is specified. +cause the mapping to be copied instead if NOMMU_MAP_COPY is specified. The vm_ops->close() routine will be invoked when the last mapping on a chardev is removed. An existing mapping will be shared, partially or not, if possible @@ -232,7 +232,7 @@ want to handle it, despite the fact it's got an operation. For instance, it might try directing the call to a secondary driver which turns out not to implement it. Such is the case for the framebuffer driver which attempts to direct the call to the device-specific driver. Under such circumstances, the -mapping request will be rejected if BDI_CAP_MAP_COPY is not specified, and a +mapping request will be rejected if NOMMU_MAP_COPY is not specified, and a copy mapped otherwise. IMPORTANT NOTE: diff --git a/block/blk-core.c b/block/blk-core.c index 3ad405571dcc..928aac29bccd 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -607,7 +607,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) q->backing_dev_info.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; q->backing_dev_info.state = 0; - q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY; + q->backing_dev_info.capabilities = 0; q->backing_dev_info.name = "block"; q->node = node_id; diff --git a/drivers/char/mem.c b/drivers/char/mem.c index 4c58333b4257..9a6b63783a94 100644 --- a/drivers/char/mem.c +++ b/drivers/char/mem.c @@ -287,13 +287,24 @@ static unsigned long get_unmapped_area_mem(struct file *file, return pgoff << PAGE_SHIFT; } +/* permit direct mmap, for read, write or exec */ +static unsigned memory_mmap_capabilities(struct file *file) +{ + return NOMMU_MAP_DIRECT | + NOMMU_MAP_READ | NOMMU_MAP_WRITE | NOMMU_MAP_EXEC; +} + +static unsigned zero_mmap_capabilities(struct file *file) +{ + return NOMMU_MAP_COPY; +} + /* can't do an in-place private mapping if there's no MMU */ static inline int private_mapping_ok(struct vm_area_struct *vma) { return vma->vm_flags & VM_MAYSHARE; } #else -#define get_unmapped_area_mem NULL static inline int private_mapping_ok(struct vm_area_struct *vma) { @@ -721,7 +732,10 @@ static const struct file_operations mem_fops = { .write = write_mem, .mmap = mmap_mem, .open = open_mem, +#ifndef CONFIG_MMU .get_unmapped_area = get_unmapped_area_mem, + .mmap_capabilities = memory_mmap_capabilities, +#endif }; #ifdef CONFIG_DEVKMEM @@ -731,7 +745,10 @@ static const struct file_operations kmem_fops = { .write = write_kmem, .mmap = mmap_kmem, .open = open_kmem, +#ifndef CONFIG_MMU .get_unmapped_area = get_unmapped_area_mem, + .mmap_capabilities = memory_mmap_capabilities, +#endif }; #endif @@ -760,16 +777,9 @@ static const struct file_operations zero_fops = { .read_iter = read_iter_zero, .aio_write = aio_write_zero, .mmap = mmap_zero, -}; - -/* - * capabilities for /dev/zero - * - permits private mappings, "copies" are taken of the source of zeros - * - no writeback happens - */ -static struct backing_dev_info zero_bdi = { - .name = "char/mem", - .capabilities = BDI_CAP_MAP_COPY | BDI_CAP_NO_ACCT_AND_WRITEBACK, +#ifndef CONFIG_MMU + .mmap_capabilities = zero_mmap_capabilities, +#endif }; static const struct file_operations full_fops = { @@ -783,22 +793,22 @@ static const struct memdev { const char *name; umode_t mode; const struct file_operations *fops; - struct backing_dev_info *dev_info; + fmode_t fmode; } devlist[] = { - [1] = { "mem", 0, &mem_fops, &directly_mappable_cdev_bdi }, + [1] = { "mem", 0, &mem_fops, FMODE_UNSIGNED_OFFSET }, #ifdef CONFIG_DEVKMEM - [2] = { "kmem", 0, &kmem_fops, &directly_mappable_cdev_bdi }, + [2] = { "kmem", 0, &kmem_fops, FMODE_UNSIGNED_OFFSET }, #endif - [3] = { "null", 0666, &null_fops, NULL }, + [3] = { "null", 0666, &null_fops, 0 }, #ifdef CONFIG_DEVPORT - [4] = { "port", 0, &port_fops, NULL }, + [4] = { "port", 0, &port_fops, 0 }, #endif - [5] = { "zero", 0666, &zero_fops, &zero_bdi }, - [7] = { "full", 0666, &full_fops, NULL }, - [8] = { "random", 0666, &random_fops, NULL }, - [9] = { "urandom", 0666, &urandom_fops, NULL }, + [5] = { "zero", 0666, &zero_fops, 0 }, + [7] = { "full", 0666, &full_fops, 0 }, + [8] = { "random", 0666, &random_fops, 0 }, + [9] = { "urandom", 0666, &urandom_fops, 0 }, #ifdef CONFIG_PRINTK - [11] = { "kmsg", 0644, &kmsg_fops, NULL }, + [11] = { "kmsg", 0644, &kmsg_fops, 0 }, #endif }; @@ -816,12 +826,7 @@ static int memory_open(struct inode *inode, struct file *filp) return -ENXIO; filp->f_op = dev->fops; - if (dev->dev_info) - filp->f_mapping->backing_dev_info = dev->dev_info; - - /* Is /dev/mem or /dev/kmem ? */ - if (dev->dev_info == &directly_mappable_cdev_bdi) - filp->f_mode |= FMODE_UNSIGNED_OFFSET; + filp->f_mode |= dev->fmode; if (dev->fops->open) return dev->fops->open(inode, filp); @@ -846,11 +851,6 @@ static struct class *mem_class; static int __init chr_dev_init(void) { int minor; - int err; - - err = bdi_init(&zero_bdi); - if (err) - return err; if (register_chrdev(MEM_MAJOR, "mem", &memory_fops)) printk("unable to get major %d for memory devs\n", MEM_MAJOR); diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c index 53563955931b..55fa27ecf4e1 100644 --- a/drivers/mtd/mtdchar.c +++ b/drivers/mtd/mtdchar.c @@ -49,7 +49,6 @@ static DEFINE_MUTEX(mtd_mutex); */ struct mtd_file_info { struct mtd_info *mtd; - struct inode *ino; enum mtd_file_modes mode; }; @@ -59,10 +58,6 @@ static loff_t mtdchar_lseek(struct file *file, loff_t offset, int orig) return fixed_size_llseek(file, offset, orig, mfi->mtd->size); } -static int count; -static struct vfsmount *mnt; -static struct file_system_type mtd_inodefs_type; - static int mtdchar_open(struct inode *inode, struct file *file) { int minor = iminor(inode); @@ -70,7 +65,6 @@ static int mtdchar_open(struct inode *inode, struct file *file) int ret = 0; struct mtd_info *mtd; struct mtd_file_info *mfi; - struct inode *mtd_ino; pr_debug("MTD_open\n"); @@ -78,10 +72,6 @@ static int mtdchar_open(struct inode *inode, struct file *file) if ((file->f_mode & FMODE_WRITE) && (minor & 1)) return -EACCES; - ret = simple_pin_fs(&mtd_inodefs_type, &mnt, &count); - if (ret) - return ret; - mutex_lock(&mtd_mutex); mtd = get_mtd_device(NULL, devnum); @@ -95,43 +85,26 @@ static int mtdchar_open(struct inode *inode, struct file *file) goto out1; } - mtd_ino = iget_locked(mnt->mnt_sb, devnum); - if (!mtd_ino) { - ret = -ENOMEM; - goto out1; - } - if (mtd_ino->i_state & I_NEW) { - mtd_ino->i_private = mtd; - mtd_ino->i_mode = S_IFCHR; - mtd_ino->i_data.backing_dev_info = mtd->backing_dev_info; - unlock_new_inode(mtd_ino); - } - file->f_mapping = mtd_ino->i_mapping; - /* You can't open it RW if it's not a writeable device */ if ((file->f_mode & FMODE_WRITE) && !(mtd->flags & MTD_WRITEABLE)) { ret = -EACCES; - goto out2; + goto out1; } mfi = kzalloc(sizeof(*mfi), GFP_KERNEL); if (!mfi) { ret = -ENOMEM; - goto out2; + goto out1; } - mfi->ino = mtd_ino; mfi->mtd = mtd; file->private_data = mfi; mutex_unlock(&mtd_mutex); return 0; -out2: - iput(mtd_ino); out1: put_mtd_device(mtd); out: mutex_unlock(&mtd_mutex); - simple_release_fs(&mnt, &count); return ret; } /* mtdchar_open */ @@ -148,12 +121,9 @@ static int mtdchar_close(struct inode *inode, struct file *file) if ((file->f_mode & FMODE_WRITE)) mtd_sync(mtd); - iput(mfi->ino); - put_mtd_device(mtd); file->private_data = NULL; kfree(mfi); - simple_release_fs(&mnt, &count); return 0; } /* mtdchar_close */ @@ -1117,6 +1087,13 @@ static unsigned long mtdchar_get_unmapped_area(struct file *file, ret = mtd_get_unmapped_area(mtd, len, offset, flags); return ret == -EOPNOTSUPP ? -ENODEV : ret; } + +static unsigned mtdchar_mmap_capabilities(struct file *file) +{ + struct mtd_file_info *mfi = file->private_data; + + return mtd_mmap_capabilities(mfi->mtd); +} #endif /* @@ -1160,27 +1137,10 @@ static const struct file_operations mtd_fops = { .mmap = mtdchar_mmap, #ifndef CONFIG_MMU .get_unmapped_area = mtdchar_get_unmapped_area, + .mmap_capabilities = mtdchar_mmap_capabilities, #endif }; -static const struct super_operations mtd_ops = { - .drop_inode = generic_delete_inode, - .statfs = simple_statfs, -}; - -static struct dentry *mtd_inodefs_mount(struct file_system_type *fs_type, - int flags, const char *dev_name, void *data) -{ - return mount_pseudo(fs_type, "mtd_inode:", &mtd_ops, NULL, MTD_INODE_FS_MAGIC); -} - -static struct file_system_type mtd_inodefs_type = { - .name = "mtd_inodefs", - .mount = mtd_inodefs_mount, - .kill_sb = kill_anon_super, -}; -MODULE_ALIAS_FS("mtd_inodefs"); - int __init init_mtdchar(void) { int ret; @@ -1193,23 +1153,11 @@ int __init init_mtdchar(void) return ret; } - ret = register_filesystem(&mtd_inodefs_type); - if (ret) { - pr_err("Can't register mtd_inodefs filesystem, error %d\n", - ret); - goto err_unregister_chdev; - } - - return ret; - -err_unregister_chdev: - __unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd"); return ret; } void __exit cleanup_mtdchar(void) { - unregister_filesystem(&mtd_inodefs_type); __unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd"); } diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c index b9000563b9f4..eacc3aac7327 100644 --- a/drivers/mtd/mtdconcat.c +++ b/drivers/mtd/mtdconcat.c @@ -732,8 +732,6 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c concat->mtd.ecc_stats.badblocks = subdev[0]->ecc_stats.badblocks; - concat->mtd.backing_dev_info = subdev[0]->backing_dev_info; - concat->subdev[0] = subdev[0]; for (i = 1; i < num_devs; i++) { @@ -761,14 +759,6 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c subdev[i]->flags & MTD_WRITEABLE; } - /* only permit direct mapping if the BDIs are all the same - * - copy-mapping is still permitted - */ - if (concat->mtd.backing_dev_info != - subdev[i]->backing_dev_info) - concat->mtd.backing_dev_info = - &default_backing_dev_info; - concat->mtd.size += subdev[i]->size; concat->mtd.ecc_stats.badblocks += subdev[i]->ecc_stats.badblocks; diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c index 4c611871d7e6..ff38a1df22f7 100644 --- a/drivers/mtd/mtdcore.c +++ b/drivers/mtd/mtdcore.c @@ -43,33 +43,7 @@ #include "mtdcore.h" -/* - * backing device capabilities for non-mappable devices (such as NAND flash) - * - permits private mappings, copies are taken of the data - */ -static struct backing_dev_info mtd_bdi_unmappable = { - .capabilities = BDI_CAP_MAP_COPY, -}; - -/* - * backing device capabilities for R/O mappable devices (such as ROM) - * - permits private mappings, copies are taken of the data - * - permits non-writable shared mappings - */ -static struct backing_dev_info mtd_bdi_ro_mappable = { - .capabilities = (BDI_CAP_MAP_COPY | BDI_CAP_MAP_DIRECT | - BDI_CAP_EXEC_MAP | BDI_CAP_READ_MAP), -}; - -/* - * backing device capabilities for writable mappable devices (such as RAM) - * - permits private mappings, copies are taken of the data - * - permits non-writable shared mappings - */ -static struct backing_dev_info mtd_bdi_rw_mappable = { - .capabilities = (BDI_CAP_MAP_COPY | BDI_CAP_MAP_DIRECT | - BDI_CAP_EXEC_MAP | BDI_CAP_READ_MAP | - BDI_CAP_WRITE_MAP), +static struct backing_dev_info mtd_bdi = { }; static int mtd_cls_suspend(struct device *dev, pm_message_t state); @@ -365,6 +339,22 @@ static struct device_type mtd_devtype = { .release = mtd_release, }; +#ifndef CONFIG_MMU +unsigned mtd_mmap_capabilities(struct mtd_info *mtd) +{ + switch (mtd->type) { + case MTD_RAM: + return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT | NOMMU_MAP_EXEC | + NOMMU_MAP_READ | NOMMU_MAP_WRITE; + case MTD_ROM: + return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT | NOMMU_MAP_EXEC | + NOMMU_MAP_READ; + default: + return NOMMU_MAP_COPY; + } +} +#endif + /** * add_mtd_device - register an MTD device * @mtd: pointer to new MTD device info structure @@ -380,19 +370,7 @@ int add_mtd_device(struct mtd_info *mtd) struct mtd_notifier *not; int i, error; - if (!mtd->backing_dev_info) { - switch (mtd->type) { - case MTD_RAM: - mtd->backing_dev_info = &mtd_bdi_rw_mappable; - break; - case MTD_ROM: - mtd->backing_dev_info = &mtd_bdi_ro_mappable; - break; - default: - mtd->backing_dev_info = &mtd_bdi_unmappable; - break; - } - } + mtd->backing_dev_info = &mtd_bdi; BUG_ON(mtd->writesize == 0); mutex_lock(&mtd_table_mutex); @@ -1237,17 +1215,9 @@ static int __init init_mtd(void) if (ret) goto err_reg; - ret = mtd_bdi_init(&mtd_bdi_unmappable, "mtd-unmap"); - if (ret) - goto err_bdi1; - - ret = mtd_bdi_init(&mtd_bdi_ro_mappable, "mtd-romap"); - if (ret) - goto err_bdi2; - - ret = mtd_bdi_init(&mtd_bdi_rw_mappable, "mtd-rwmap"); + ret = mtd_bdi_init(&mtd_bdi, "mtd"); if (ret) - goto err_bdi3; + goto err_bdi; proc_mtd = proc_create("mtd", 0, NULL, &mtd_proc_ops); @@ -1260,11 +1230,7 @@ static int __init init_mtd(void) out_procfs: if (proc_mtd) remove_proc_entry("mtd", NULL); -err_bdi3: - bdi_destroy(&mtd_bdi_ro_mappable); -err_bdi2: - bdi_destroy(&mtd_bdi_unmappable); -err_bdi1: +err_bdi: class_unregister(&mtd_class); err_reg: pr_err("Error registering mtd class or bdi: %d\n", ret); @@ -1277,9 +1243,7 @@ static void __exit cleanup_mtd(void) if (proc_mtd) remove_proc_entry("mtd", NULL); class_unregister(&mtd_class); - bdi_destroy(&mtd_bdi_unmappable); - bdi_destroy(&mtd_bdi_ro_mappable); - bdi_destroy(&mtd_bdi_rw_mappable); + bdi_destroy(&mtd_bdi); } module_init(init_mtd); diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c index a3e3a7d074d5..e779de315ade 100644 --- a/drivers/mtd/mtdpart.c +++ b/drivers/mtd/mtdpart.c @@ -378,7 +378,6 @@ static struct mtd_part *allocate_partition(struct mtd_info *master, slave->mtd.name = name; slave->mtd.owner = master->owner; - slave->mtd.backing_dev_info = master->backing_dev_info; /* NOTE: we don't arrange MTDs as a tree; it'd be error-prone * to have the same data be in two different partitions. diff --git a/drivers/staging/lustre/lustre/llite/llite_lib.c b/drivers/staging/lustre/lustre/llite/llite_lib.c index a3367bfb1456..d5b149c5542d 100644 --- a/drivers/staging/lustre/lustre/llite/llite_lib.c +++ b/drivers/staging/lustre/lustre/llite/llite_lib.c @@ -987,7 +987,7 @@ int ll_fill_super(struct super_block *sb, struct vfsmount *mnt) if (err) goto out_free; lsi->lsi_flags |= LSI_BDI_INITIALIZED; - lsi->lsi_bdi.capabilities = BDI_CAP_MAP_COPY; + lsi->lsi_bdi.capabilities = 0; err = ll_bdi_register(&lsi->lsi_bdi); if (err) goto out_free; diff --git a/fs/9p/v9fs.c b/fs/9p/v9fs.c index 6894b085f0ee..620d93489539 100644 --- a/fs/9p/v9fs.c +++ b/fs/9p/v9fs.c @@ -335,7 +335,7 @@ struct p9_fid *v9fs_session_init(struct v9fs_session_info *v9ses, } init_rwsem(&v9ses->rename_sem); - rc = bdi_setup_and_register(&v9ses->bdi, "9p", BDI_CAP_MAP_COPY); + rc = bdi_setup_and_register(&v9ses->bdi, "9p"); if (rc) { kfree(v9ses->aname); kfree(v9ses->uname); diff --git a/fs/afs/volume.c b/fs/afs/volume.c index 2b607257820c..d142a2449e65 100644 --- a/fs/afs/volume.c +++ b/fs/afs/volume.c @@ -106,7 +106,7 @@ struct afs_volume *afs_volume_lookup(struct afs_mount_params *params) volume->cell = params->cell; volume->vid = vlocation->vldb.vid[params->type]; - ret = bdi_setup_and_register(&volume->bdi, "afs", BDI_CAP_MAP_COPY); + ret = bdi_setup_and_register(&volume->bdi, "afs"); if (ret) goto error_bdi; diff --git a/fs/aio.c b/fs/aio.c index 1b7893ecc296..6f13d3fab07f 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -165,15 +165,6 @@ static struct vfsmount *aio_mnt; static const struct file_operations aio_ring_fops; static const struct address_space_operations aio_ctx_aops; -/* Backing dev info for aio fs. - * -no dirty page accounting or writeback happens - */ -static struct backing_dev_info aio_fs_backing_dev_info = { - .name = "aiofs", - .state = 0, - .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_MAP_COPY, -}; - static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages) { struct qstr this = QSTR_INIT("[aio]", 5); @@ -185,7 +176,7 @@ static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages) inode->i_mapping->a_ops = &aio_ctx_aops; inode->i_mapping->private_data = ctx; - inode->i_mapping->backing_dev_info = &aio_fs_backing_dev_info; + inode->i_mapping->backing_dev_info = &noop_backing_dev_info; inode->i_size = PAGE_SIZE * nr_pages; path.dentry = d_alloc_pseudo(aio_mnt->mnt_sb, &this); @@ -230,9 +221,6 @@ static int __init aio_setup(void) if (IS_ERR(aio_mnt)) panic("Failed to create aio fs mount."); - if (bdi_init(&aio_fs_backing_dev_info)) - panic("Failed to init aio fs backing dev info."); - kiocb_cachep = KMEM_CACHE(kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC); kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC); diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 8c63419a7f70..afc4092989cd 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1715,8 +1715,7 @@ static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi) { int err; - bdi->capabilities = BDI_CAP_MAP_COPY; - err = bdi_setup_and_register(bdi, "btrfs", BDI_CAP_MAP_COPY); + err = bdi_setup_and_register(bdi, "btrfs"); if (err) return err; diff --git a/fs/char_dev.c b/fs/char_dev.c index 67b2007f10fe..ea06a3d0364c 100644 --- a/fs/char_dev.c +++ b/fs/char_dev.c @@ -24,27 +24,6 @@ #include "internal.h" -/* - * capabilities for /dev/mem, /dev/kmem and similar directly mappable character - * devices - * - permits shared-mmap for read, write and/or exec - * - does not permit private mmap in NOMMU mode (can't do COW) - * - no readahead or I/O queue unplugging required - */ -struct backing_dev_info directly_mappable_cdev_bdi = { - .name = "char", - .capabilities = ( -#ifdef CONFIG_MMU - /* permit private copies of the data to be taken */ - BDI_CAP_MAP_COPY | -#endif - /* permit direct mmap, for read, write or exec */ - BDI_CAP_MAP_DIRECT | - BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP | - /* no writeback happens */ - BDI_CAP_NO_ACCT_AND_WRITEBACK), -}; - static struct kobj_map *cdev_map; static DEFINE_MUTEX(chrdevs_lock); @@ -575,8 +554,6 @@ static struct kobject *base_probe(dev_t dev, int *part, void *data) void __init chrdev_init(void) { cdev_map = kobj_map_init(base_probe, &chrdevs_lock); - if (bdi_init(&directly_mappable_cdev_bdi)) - panic("Failed to init directly mappable cdev bdi"); } @@ -590,4 +567,3 @@ EXPORT_SYMBOL(cdev_del); EXPORT_SYMBOL(cdev_add); EXPORT_SYMBOL(__register_chrdev); EXPORT_SYMBOL(__unregister_chrdev); -EXPORT_SYMBOL(directly_mappable_cdev_bdi); diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 2a772da16b83..d3aa999ab785 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -3446,7 +3446,7 @@ cifs_mount(struct cifs_sb_info *cifs_sb, struct smb_vol *volume_info) int referral_walks_count = 0; #endif - rc = bdi_setup_and_register(&cifs_sb->bdi, "cifs", BDI_CAP_MAP_COPY); + rc = bdi_setup_and_register(&cifs_sb->bdi, "cifs"); if (rc) return rc; diff --git a/fs/coda/inode.c b/fs/coda/inode.c index b945410bfcd5..82ec68b59208 100644 --- a/fs/coda/inode.c +++ b/fs/coda/inode.c @@ -183,7 +183,7 @@ static int coda_fill_super(struct super_block *sb, void *data, int silent) goto unlock_out; } - error = bdi_setup_and_register(&vc->bdi, "coda", BDI_CAP_MAP_COPY); + error = bdi_setup_and_register(&vc->bdi, "coda"); if (error) goto unlock_out; diff --git a/fs/configfs/configfs_internal.h b/fs/configfs/configfs_internal.h index bd4a3c167091..a315677e44d3 100644 --- a/fs/configfs/configfs_internal.h +++ b/fs/configfs/configfs_internal.h @@ -70,8 +70,6 @@ extern int configfs_is_root(struct config_item *item); extern struct inode * configfs_new_inode(umode_t mode, struct configfs_dirent *, struct super_block *); extern int configfs_create(struct dentry *, umode_t mode, int (*init)(struct inode *)); -extern int configfs_inode_init(void); -extern void configfs_inode_exit(void); extern int configfs_create_file(struct config_item *, const struct configfs_attribute *); extern int configfs_make_dirent(struct configfs_dirent *, diff --git a/fs/configfs/inode.c b/fs/configfs/inode.c index 5946ad98053f..0ad6b4d6de00 100644 --- a/fs/configfs/inode.c +++ b/fs/configfs/inode.c @@ -50,12 +50,6 @@ static const struct address_space_operations configfs_aops = { .write_end = simple_write_end, }; -static struct backing_dev_info configfs_backing_dev_info = { - .name = "configfs", - .ra_pages = 0, /* No readahead */ - .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK, -}; - static const struct inode_operations configfs_inode_operations ={ .setattr = configfs_setattr, }; @@ -137,7 +131,7 @@ struct inode *configfs_new_inode(umode_t mode, struct configfs_dirent *sd, if (inode) { inode->i_ino = get_next_ino(); inode->i_mapping->a_ops = &configfs_aops; - inode->i_mapping->backing_dev_info = &configfs_backing_dev_info; + inode->i_mapping->backing_dev_info = &noop_backing_dev_info; inode->i_op = &configfs_inode_operations; if (sd->s_iattr) { @@ -283,13 +277,3 @@ void configfs_hash_and_remove(struct dentry * dir, const char * name) } mutex_unlock(&dir->d_inode->i_mutex); } - -int __init configfs_inode_init(void) -{ - return bdi_init(&configfs_backing_dev_info); -} - -void configfs_inode_exit(void) -{ - bdi_destroy(&configfs_backing_dev_info); -} diff --git a/fs/configfs/mount.c b/fs/configfs/mount.c index f6c285833390..da94e41bdbf6 100644 --- a/fs/configfs/mount.c +++ b/fs/configfs/mount.c @@ -145,19 +145,13 @@ static int __init configfs_init(void) if (!config_kobj) goto out2; - err = configfs_inode_init(); - if (err) - goto out3; - err = register_filesystem(&configfs_fs_type); if (err) - goto out4; + goto out3; return 0; -out4: - pr_err("Unable to register filesystem!\n"); - configfs_inode_exit(); out3: + pr_err("Unable to register filesystem!\n"); kobject_put(config_kobj); out2: kmem_cache_destroy(configfs_dir_cachep); @@ -172,7 +166,6 @@ static void __exit configfs_exit(void) kobject_put(config_kobj); kmem_cache_destroy(configfs_dir_cachep); configfs_dir_cachep = NULL; - configfs_inode_exit(); } MODULE_AUTHOR("Oracle"); diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c index d9eb84bda559..1895d60f4122 100644 --- a/fs/ecryptfs/main.c +++ b/fs/ecryptfs/main.c @@ -520,7 +520,7 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags goto out; } - rc = bdi_setup_and_register(&sbi->bdi, "ecryptfs", BDI_CAP_MAP_COPY); + rc = bdi_setup_and_register(&sbi->bdi, "ecryptfs"); if (rc) goto out1; diff --git a/fs/exofs/super.c b/fs/exofs/super.c index 95965503afcb..fcc2e565f540 100644 --- a/fs/exofs/super.c +++ b/fs/exofs/super.c @@ -836,7 +836,7 @@ static int exofs_fill_super(struct super_block *sb, void *data, int silent) goto free_sbi; } - ret = bdi_setup_and_register(&sbi->bdi, "exofs", BDI_CAP_MAP_COPY); + ret = bdi_setup_and_register(&sbi->bdi, "exofs"); if (ret) { EXOFS_DBGMSG("Failed to bdi_setup_and_register\n"); dput(sb->s_root); diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c index e31e589369a4..a699a3fc62c0 100644 --- a/fs/ncpfs/inode.c +++ b/fs/ncpfs/inode.c @@ -560,7 +560,7 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent) server = NCP_SBP(sb); memset(server, 0, sizeof(*server)); - error = bdi_setup_and_register(&server->bdi, "ncpfs", BDI_CAP_MAP_COPY); + error = bdi_setup_and_register(&server->bdi, "ncpfs"); if (error) goto out_fput; diff --git a/fs/ramfs/file-nommu.c b/fs/ramfs/file-nommu.c index bbafbde3471a..f6ab41b39612 100644 --- a/fs/ramfs/file-nommu.c +++ b/fs/ramfs/file-nommu.c @@ -34,7 +34,14 @@ static unsigned long ramfs_nommu_get_unmapped_area(struct file *file, unsigned long flags); static int ramfs_nommu_mmap(struct file *file, struct vm_area_struct *vma); +static unsigned ramfs_mmap_capabilities(struct file *file) +{ + return NOMMU_MAP_DIRECT | NOMMU_MAP_COPY | NOMMU_MAP_READ | + NOMMU_MAP_WRITE | NOMMU_MAP_EXEC; +} + const struct file_operations ramfs_file_operations = { + .mmap_capabilities = ramfs_mmap_capabilities, .mmap = ramfs_nommu_mmap, .get_unmapped_area = ramfs_nommu_get_unmapped_area, .read = new_sync_read, diff --git a/fs/ramfs/inode.c b/fs/ramfs/inode.c index d365b1c4eb3c..ad4d712002f4 100644 --- a/fs/ramfs/inode.c +++ b/fs/ramfs/inode.c @@ -50,14 +50,6 @@ static const struct address_space_operations ramfs_aops = { .set_page_dirty = __set_page_dirty_no_writeback, }; -static struct backing_dev_info ramfs_backing_dev_info = { - .name = "ramfs", - .ra_pages = 0, /* No readahead */ - .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | - BDI_CAP_MAP_DIRECT | BDI_CAP_MAP_COPY | - BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP, -}; - struct inode *ramfs_get_inode(struct super_block *sb, const struct inode *dir, umode_t mode, dev_t dev) { @@ -67,7 +59,7 @@ struct inode *ramfs_get_inode(struct super_block *sb, inode->i_ino = get_next_ino(); inode_init_owner(inode, dir, mode); inode->i_mapping->a_ops = &ramfs_aops; - inode->i_mapping->backing_dev_info = &ramfs_backing_dev_info; + inode->i_mapping->backing_dev_info = &noop_backing_dev_info; mapping_set_gfp_mask(inode->i_mapping, GFP_HIGHUSER); mapping_set_unevictable(inode->i_mapping); inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; @@ -267,19 +259,9 @@ static struct file_system_type ramfs_fs_type = { int __init init_ramfs_fs(void) { static unsigned long once; - int err; if (test_and_set_bit(0, &once)) return 0; - - err = bdi_init(&ramfs_backing_dev_info); - if (err) - return err; - - err = register_filesystem(&ramfs_fs_type); - if (err) - bdi_destroy(&ramfs_backing_dev_info); - - return err; + return register_filesystem(&ramfs_fs_type); } fs_initcall(init_ramfs_fs); diff --git a/fs/romfs/mmap-nommu.c b/fs/romfs/mmap-nommu.c index ea06c7554860..7da9e2153953 100644 --- a/fs/romfs/mmap-nommu.c +++ b/fs/romfs/mmap-nommu.c @@ -70,6 +70,15 @@ static int romfs_mmap(struct file *file, struct vm_area_struct *vma) return vma->vm_flags & (VM_SHARED | VM_MAYSHARE) ? 0 : -ENOSYS; } +static unsigned romfs_mmap_capabilities(struct file *file) +{ + struct mtd_info *mtd = file_inode(file)->i_sb->s_mtd; + + if (!mtd) + return NOMMU_MAP_COPY; + return mtd_mmap_capabilities(mtd); +} + const struct file_operations romfs_ro_fops = { .llseek = generic_file_llseek, .read = new_sync_read, @@ -77,4 +86,5 @@ const struct file_operations romfs_ro_fops = { .splice_read = generic_file_splice_read, .mmap = romfs_mmap, .get_unmapped_area = romfs_get_unmapped_area, + .mmap_capabilities = romfs_mmap_capabilities, }; diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index 106bf20629ce..ed93dc6ae245 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c @@ -2017,7 +2017,7 @@ static int ubifs_fill_super(struct super_block *sb, void *data, int silent) * Read-ahead will be disabled because @c->bdi.ra_pages is 0. */ c->bdi.name = "ubifs", - c->bdi.capabilities = BDI_CAP_MAP_COPY; + c->bdi.capabilities = 0; err = bdi_init(&c->bdi); if (err) goto out_close; diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index e936cea856dc..478f95d92d73 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h @@ -114,7 +114,7 @@ int bdi_register(struct backing_dev_info *bdi, struct device *parent, const char *fmt, ...); int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev); void bdi_unregister(struct backing_dev_info *bdi); -int __must_check bdi_setup_and_register(struct backing_dev_info *, char *, unsigned int); +int __must_check bdi_setup_and_register(struct backing_dev_info *, char *); void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages, enum wb_reason reason); void bdi_start_background_writeback(struct backing_dev_info *bdi); @@ -228,42 +228,17 @@ int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio); * BDI_CAP_NO_ACCT_DIRTY: Dirty pages shouldn't contribute to accounting * BDI_CAP_NO_WRITEBACK: Don't write pages back * BDI_CAP_NO_ACCT_WB: Don't automatically account writeback pages - * - * These flags let !MMU mmap() govern direct device mapping vs immediate - * copying more easily for MAP_PRIVATE, especially for ROM filesystems. - * - * BDI_CAP_MAP_COPY: Copy can be mapped (MAP_PRIVATE) - * BDI_CAP_MAP_DIRECT: Can be mapped directly (MAP_SHARED) - * BDI_CAP_READ_MAP: Can be mapped for reading - * BDI_CAP_WRITE_MAP: Can be mapped for writing - * BDI_CAP_EXEC_MAP: Can be mapped for execution - * * BDI_CAP_STRICTLIMIT: Keep number of dirty pages below bdi threshold. */ #define BDI_CAP_NO_ACCT_DIRTY 0x00000001 #define BDI_CAP_NO_WRITEBACK 0x00000002 -#define BDI_CAP_MAP_COPY 0x00000004 -#define BDI_CAP_MAP_DIRECT 0x00000008 -#define BDI_CAP_READ_MAP 0x00000010 -#define BDI_CAP_WRITE_MAP 0x00000020 -#define BDI_CAP_EXEC_MAP 0x00000040 -#define BDI_CAP_NO_ACCT_WB 0x00000080 -#define BDI_CAP_STABLE_WRITES 0x00000200 -#define BDI_CAP_STRICTLIMIT 0x00000400 - -#define BDI_CAP_VMFLAGS \ - (BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP) +#define BDI_CAP_NO_ACCT_WB 0x00000004 +#define BDI_CAP_STABLE_WRITES 0x00000008 +#define BDI_CAP_STRICTLIMIT 0x00000010 #define BDI_CAP_NO_ACCT_AND_WRITEBACK \ (BDI_CAP_NO_WRITEBACK | BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_ACCT_WB) -#if defined(VM_MAYREAD) && \ - (BDI_CAP_READ_MAP != VM_MAYREAD || \ - BDI_CAP_WRITE_MAP != VM_MAYWRITE || \ - BDI_CAP_EXEC_MAP != VM_MAYEXEC) -#error please change backing_dev_info::capabilities flags -#endif - extern struct backing_dev_info default_backing_dev_info; extern struct backing_dev_info noop_backing_dev_info; diff --git a/include/linux/cdev.h b/include/linux/cdev.h index fb4591977b03..f8763615a5f2 100644 --- a/include/linux/cdev.h +++ b/include/linux/cdev.h @@ -30,6 +30,4 @@ void cdev_del(struct cdev *); void cd_forget(struct inode *); -extern struct backing_dev_info directly_mappable_cdev_bdi; - #endif diff --git a/include/linux/fs.h b/include/linux/fs.h index 42efe13077b6..1dada399aa23 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1502,6 +1502,26 @@ struct block_device_operations; #define HAVE_COMPAT_IOCTL 1 #define HAVE_UNLOCKED_IOCTL 1 +/* + * These flags let !MMU mmap() govern direct device mapping vs immediate + * copying more easily for MAP_PRIVATE, especially for ROM filesystems. + * + * NOMMU_MAP_COPY: Copy can be mapped (MAP_PRIVATE) + * NOMMU_MAP_DIRECT: Can be mapped directly (MAP_SHARED) + * NOMMU_MAP_READ: Can be mapped for reading + * NOMMU_MAP_WRITE: Can be mapped for writing + * NOMMU_MAP_EXEC: Can be mapped for execution + */ +#define NOMMU_MAP_COPY 0x00000001 +#define NOMMU_MAP_DIRECT 0x00000008 +#define NOMMU_MAP_READ VM_MAYREAD +#define NOMMU_MAP_WRITE VM_MAYWRITE +#define NOMMU_MAP_EXEC VM_MAYEXEC + +#define NOMMU_VMFLAGS \ + (NOMMU_MAP_READ | NOMMU_MAP_WRITE | NOMMU_MAP_EXEC) + + struct iov_iter; struct file_operations { @@ -1536,6 +1556,9 @@ struct file_operations { long (*fallocate)(struct file *file, int mode, loff_t offset, loff_t len); void (*show_fdinfo)(struct seq_file *m, struct file *f); +#ifndef CONFIG_MMU + unsigned (*mmap_capabilities)(struct file *); +#endif }; struct inode_operations { diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h index 031ff3a9a0bd..3301c4c289d6 100644 --- a/include/linux/mtd/mtd.h +++ b/include/linux/mtd/mtd.h @@ -408,4 +408,6 @@ static inline int mtd_is_bitflip_or_eccerr(int err) { return mtd_is_bitflip(err) || mtd_is_eccerr(err); } +unsigned mtd_mmap_capabilities(struct mtd_info *mtd); + #endif /* __MTD_MTD_H__ */ diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 0ae0df55000b..16c68958aeda 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -17,8 +17,6 @@ static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0); struct backing_dev_info default_backing_dev_info = { .name = "default", .ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE, - .state = 0, - .capabilities = BDI_CAP_MAP_COPY, }; EXPORT_SYMBOL_GPL(default_backing_dev_info); @@ -513,13 +511,12 @@ EXPORT_SYMBOL(bdi_destroy); * For use from filesystems to quickly init and register a bdi associated * with dirty writeback */ -int bdi_setup_and_register(struct backing_dev_info *bdi, char *name, - unsigned int cap) +int bdi_setup_and_register(struct backing_dev_info *bdi, char *name) { int err; bdi->name = name; - bdi->capabilities = cap; + bdi->capabilities = 0; err = bdi_init(bdi); if (err) return err; diff --git a/mm/nommu.c b/mm/nommu.c index b51eadf6d952..13af96f35a4b 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -946,9 +946,6 @@ static int validate_mmap_request(struct file *file, return -EOVERFLOW; if (file) { - /* validate file mapping requests */ - struct address_space *mapping; - /* files must support mmap */ if (!file->f_op->mmap) return -ENODEV; @@ -957,28 +954,22 @@ static int validate_mmap_request(struct file *file, * - we support chardevs that provide their own "memory" * - we support files/blockdevs that are memory backed */ - mapping = file->f_mapping; - if (!mapping) - mapping = file_inode(file)->i_mapping; - - capabilities = 0; - if (mapping && mapping->backing_dev_info) - capabilities = mapping->backing_dev_info->capabilities; - - if (!capabilities) { + if (file->f_op->mmap_capabilities) { + capabilities = file->f_op->mmap_capabilities(file); + } else { /* no explicit capabilities set, so assume some * defaults */ switch (file_inode(file)->i_mode & S_IFMT) { case S_IFREG: case S_IFBLK: - capabilities = BDI_CAP_MAP_COPY; + capabilities = NOMMU_MAP_COPY; break; case S_IFCHR: capabilities = - BDI_CAP_MAP_DIRECT | - BDI_CAP_READ_MAP | - BDI_CAP_WRITE_MAP; + NOMMU_MAP_DIRECT | + NOMMU_MAP_READ | + NOMMU_MAP_WRITE; break; default: @@ -989,9 +980,9 @@ static int validate_mmap_request(struct file *file, /* eliminate any capabilities that we can't support on this * device */ if (!file->f_op->get_unmapped_area) - capabilities &= ~BDI_CAP_MAP_DIRECT; + capabilities &= ~NOMMU_MAP_DIRECT; if (!file->f_op->read) - capabilities &= ~BDI_CAP_MAP_COPY; + capabilities &= ~NOMMU_MAP_COPY; /* The file shall have been opened with read permission. */ if (!(file->f_mode & FMODE_READ)) @@ -1010,29 +1001,29 @@ static int validate_mmap_request(struct file *file, if (locks_verify_locked(file)) return -EAGAIN; - if (!(capabilities & BDI_CAP_MAP_DIRECT)) + if (!(capabilities & NOMMU_MAP_DIRECT)) return -ENODEV; /* we mustn't privatise shared mappings */ - capabilities &= ~BDI_CAP_MAP_COPY; + capabilities &= ~NOMMU_MAP_COPY; } else { /* we're going to read the file into private memory we * allocate */ - if (!(capabilities & BDI_CAP_MAP_COPY)) + if (!(capabilities & NOMMU_MAP_COPY)) return -ENODEV; /* we don't permit a private writable mapping to be * shared with the backing device */ if (prot & PROT_WRITE) - capabilities &= ~BDI_CAP_MAP_DIRECT; + capabilities &= ~NOMMU_MAP_DIRECT; } - if (capabilities & BDI_CAP_MAP_DIRECT) { - if (((prot & PROT_READ) && !(capabilities & BDI_CAP_READ_MAP)) || - ((prot & PROT_WRITE) && !(capabilities & BDI_CAP_WRITE_MAP)) || - ((prot & PROT_EXEC) && !(capabilities & BDI_CAP_EXEC_MAP)) + if (capabilities & NOMMU_MAP_DIRECT) { + if (((prot & PROT_READ) && !(capabilities & NOMMU_MAP_READ)) || + ((prot & PROT_WRITE) && !(capabilities & NOMMU_MAP_WRITE)) || + ((prot & PROT_EXEC) && !(capabilities & NOMMU_MAP_EXEC)) ) { - capabilities &= ~BDI_CAP_MAP_DIRECT; + capabilities &= ~NOMMU_MAP_DIRECT; if (flags & MAP_SHARED) { printk(KERN_WARNING "MAP_SHARED not completely supported on !MMU\n"); @@ -1049,21 +1040,21 @@ static int validate_mmap_request(struct file *file, } else if ((prot & PROT_READ) && !(prot & PROT_EXEC)) { /* handle implication of PROT_EXEC by PROT_READ */ if (current->personality & READ_IMPLIES_EXEC) { - if (capabilities & BDI_CAP_EXEC_MAP) + if (capabilities & NOMMU_MAP_EXEC) prot |= PROT_EXEC; } } else if ((prot & PROT_READ) && (prot & PROT_EXEC) && - !(capabilities & BDI_CAP_EXEC_MAP) + !(capabilities & NOMMU_MAP_EXEC) ) { /* backing file is not executable, try to copy */ - capabilities &= ~BDI_CAP_MAP_DIRECT; + capabilities &= ~NOMMU_MAP_DIRECT; } } else { /* anonymous mappings are always memory backed and can be * privately mapped */ - capabilities = BDI_CAP_MAP_COPY; + capabilities = NOMMU_MAP_COPY; /* handle PROT_EXEC implication by PROT_READ */ if ((prot & PROT_READ) && @@ -1095,7 +1086,7 @@ static unsigned long determine_vm_flags(struct file *file, vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags); /* vm_flags |= mm->def_flags; */ - if (!(capabilities & BDI_CAP_MAP_DIRECT)) { + if (!(capabilities & NOMMU_MAP_DIRECT)) { /* attempt to share read-only copies of mapped file chunks */ vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; if (file && !(prot & PROT_WRITE)) @@ -1104,7 +1095,7 @@ static unsigned long determine_vm_flags(struct file *file, /* overlay a shareable mapping on the backing device or inode * if possible - used for chardevs, ramfs/tmpfs/shmfs and * romfs/cramfs */ - vm_flags |= VM_MAYSHARE | (capabilities & BDI_CAP_VMFLAGS); + vm_flags |= VM_MAYSHARE | (capabilities & NOMMU_VMFLAGS); if (flags & MAP_SHARED) vm_flags |= VM_SHARED; } @@ -1157,7 +1148,7 @@ static int do_mmap_private(struct vm_area_struct *vma, * shared mappings on devices or memory * - VM_MAYSHARE will be set if it may attempt to share */ - if (capabilities & BDI_CAP_MAP_DIRECT) { + if (capabilities & NOMMU_MAP_DIRECT) { ret = vma->vm_file->f_op->mmap(vma->vm_file, vma); if (ret == 0) { /* shouldn't return success if we're not sharing */ @@ -1346,7 +1337,7 @@ unsigned long do_mmap_pgoff(struct file *file, if ((pregion->vm_pgoff != pgoff || rpglen != pglen) && !(pgoff >= pregion->vm_pgoff && pgend <= rpgend)) { /* new mapping is not a subset of the region */ - if (!(capabilities & BDI_CAP_MAP_DIRECT)) + if (!(capabilities & NOMMU_MAP_DIRECT)) goto sharing_violation; continue; } @@ -1385,7 +1376,7 @@ unsigned long do_mmap_pgoff(struct file *file, * - this is the hook for quasi-memory character devices to * tell us the location of a shared mapping */ - if (capabilities & BDI_CAP_MAP_DIRECT) { + if (capabilities & NOMMU_MAP_DIRECT) { addr = file->f_op->get_unmapped_area(file, addr, len, pgoff, flags); if (IS_ERR_VALUE(addr)) { @@ -1397,10 +1388,10 @@ unsigned long do_mmap_pgoff(struct file *file, * the mapping so we'll have to attempt to copy * it */ ret = -ENODEV; - if (!(capabilities & BDI_CAP_MAP_COPY)) + if (!(capabilities & NOMMU_MAP_COPY)) goto error_just_free; - capabilities &= ~BDI_CAP_MAP_DIRECT; + capabilities &= ~NOMMU_MAP_DIRECT; } else { vma->vm_start = region->vm_start = addr; vma->vm_end = region->vm_end = addr + len; @@ -1411,7 +1402,7 @@ unsigned long do_mmap_pgoff(struct file *file, vma->vm_region = region; /* set up the mapping - * - the region is filled in if BDI_CAP_MAP_DIRECT is still set + * - the region is filled in if NOMMU_MAP_DIRECT is still set */ if (file && vma->vm_flags & VM_SHARED) ret = do_mmap_shared_file(vma); diff --git a/security/security.c b/security/security.c index 18b35c63fc0c..a0442b20f001 100644 --- a/security/security.c +++ b/security/security.c @@ -726,16 +726,15 @@ static inline unsigned long mmap_prot(struct file *file, unsigned long prot) return prot | PROT_EXEC; /* * ditto if it's not on noexec mount, except that on !MMU we need - * BDI_CAP_EXEC_MMAP (== VM_MAYEXEC) in this case + * NOMMU_MAP_EXEC (== VM_MAYEXEC) in this case */ if (!(file->f_path.mnt->mnt_flags & MNT_NOEXEC)) { #ifndef CONFIG_MMU - unsigned long caps = 0; - struct address_space *mapping = file->f_mapping; - if (mapping && mapping->backing_dev_info) - caps = mapping->backing_dev_info->capabilities; - if (!(caps & BDI_CAP_EXEC_MAP)) - return prot; + if (file->f_op->mmap_capabilities) { + unsigned caps = file->f_op->mmap_capabilities(file); + if (!(caps & NOMMU_MAP_EXEC)) + return prot; + } #endif return prot | PROT_EXEC; } -- cgit v1.2.3 From 564f00f6c03a9339c259eb71a5b3aa8ef7ff1c2e Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 14 Jan 2015 10:42:33 +0100 Subject: block_dev: only write bdev inode on close Since 018a17bdc865 ("bdi: reimplement bdev_inode_switch_bdi()") the block device code writes out all dirty data whenever switching the backing_dev_info for a block device inode. But a block device inode can only be dirtied when it is in use, which means we only have to write it out on the final blkdev_put, but not when doing a blkdev_get. Factoring out the write out from the bdi list switch prepares from removing the list switch later in the series. Signed-off-by: Christoph Hellwig Acked-by: Tejun Heo Reviewed-by: Jan Kara Signed-off-by: Jens Axboe --- fs/block_dev.c | 31 +++++++++++++++++++------------ 1 file changed, 19 insertions(+), 12 deletions(-) (limited to 'fs') diff --git a/fs/block_dev.c b/fs/block_dev.c index b48c41bf0f86..026ca7b8431c 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -49,6 +49,17 @@ inline struct block_device *I_BDEV(struct inode *inode) } EXPORT_SYMBOL(I_BDEV); +static void bdev_write_inode(struct inode *inode) +{ + spin_lock(&inode->i_lock); + while (inode->i_state & I_DIRTY) { + spin_unlock(&inode->i_lock); + WARN_ON_ONCE(write_inode_now(inode, true)); + spin_lock(&inode->i_lock); + } + spin_unlock(&inode->i_lock); +} + /* * Move the inode from its current bdi to a new bdi. Make sure the inode * is clean before moving so that it doesn't linger on the old bdi. @@ -56,16 +67,10 @@ EXPORT_SYMBOL(I_BDEV); static void bdev_inode_switch_bdi(struct inode *inode, struct backing_dev_info *dst) { - while (true) { - spin_lock(&inode->i_lock); - if (!(inode->i_state & I_DIRTY)) { - inode->i_data.backing_dev_info = dst; - spin_unlock(&inode->i_lock); - return; - } - spin_unlock(&inode->i_lock); - WARN_ON_ONCE(write_inode_now(inode, true)); - } + spin_lock(&inode->i_lock); + WARN_ON_ONCE(inode->i_state & I_DIRTY); + inode->i_data.backing_dev_info = dst; + spin_unlock(&inode->i_lock); } /* Kill _all_ buffers and pagecache , dirty or not.. */ @@ -1464,9 +1469,11 @@ static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part) WARN_ON_ONCE(bdev->bd_holders); sync_blockdev(bdev); kill_bdev(bdev); - /* ->release can cause the old bdi to disappear, - * so must switch it out first + /* + * ->release can cause the queue to disappear, so flush all + * dirty data before. */ + bdev_write_inode(bdev->bd_inode); bdev_inode_switch_bdi(bdev->bd_inode, &default_backing_dev_info); } -- cgit v1.2.3 From 495a276e1ca96af622edb67ad0f85431935b20d2 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 14 Jan 2015 10:42:34 +0100 Subject: block_dev: get bdev inode bdi directly from the block device Directly grab the backing_dev_info from the request_queue instead of detouring through the address_space. Signed-off-by: Christoph Hellwig Reviewed-by: Tejun Heo Reviewed-by: Jan Kara Signed-off-by: Jens Axboe --- fs/fs-writeback.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 2d609a5fbfea..e8116a44cc29 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -69,10 +69,10 @@ EXPORT_SYMBOL(writeback_in_progress); static inline struct backing_dev_info *inode_to_bdi(struct inode *inode) { struct super_block *sb = inode->i_sb; - +#ifdef CONFIG_BLOCK if (sb_is_blkdev_sb(sb)) - return inode->i_mapping->backing_dev_info; - + return blk_get_backing_dev_info(I_BDEV(inode)); +#endif return sb->s_bdi; } -- cgit v1.2.3 From 26ff13047e3dc6c0230a629867e8dbd4a15a7626 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 14 Jan 2015 10:42:35 +0100 Subject: nilfs2: set up s_bdi like the generic mount_bdev code mapping->backing_dev_info will go away, so don't rely on it. Signed-off-by: Christoph Hellwig Acked-by: Ryusuke Konishi Reviewed-by: Tejun Heo Reviewed-by: Jan Kara Signed-off-by: Jens Axboe --- fs/nilfs2/super.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c index 2e5b3ec85b8f..3d4bbac36bea 100644 --- a/fs/nilfs2/super.c +++ b/fs/nilfs2/super.c @@ -1057,7 +1057,6 @@ nilfs_fill_super(struct super_block *sb, void *data, int silent) { struct the_nilfs *nilfs; struct nilfs_root *fsroot; - struct backing_dev_info *bdi; __u64 cno; int err; @@ -1077,8 +1076,7 @@ nilfs_fill_super(struct super_block *sb, void *data, int silent) sb->s_time_gran = 1; sb->s_max_links = NILFS_LINK_MAX; - bdi = sb->s_bdev->bd_inode->i_mapping->backing_dev_info; - sb->s_bdi = bdi ? : &default_backing_dev_info; + sb->s_bdi = &bdev_get_queue(sb->s_bdev)->backing_dev_info; err = load_nilfs(nilfs, sb); if (err) -- cgit v1.2.3 From de1414a654e66b81b5348dbc5259ecf2fb61655e Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 14 Jan 2015 10:42:36 +0100 Subject: fs: export inode_to_bdi and use it in favor of mapping->backing_dev_info Now that we got rid of the bdi abuse on character devices we can always use sb->s_bdi to get at the backing_dev_info for a file, except for the block device special case. Export inode_to_bdi and replace uses of mapping->backing_dev_info with it to prepare for the removal of mapping->backing_dev_info. Signed-off-by: Christoph Hellwig Reviewed-by: Tejun Heo Reviewed-by: Jan Kara Signed-off-by: Jens Axboe --- fs/btrfs/file.c | 2 +- fs/ceph/file.c | 2 +- fs/ext2/ialloc.c | 2 +- fs/ext4/super.c | 2 +- fs/fs-writeback.c | 3 ++- fs/fuse/file.c | 10 +++++----- fs/gfs2/aops.c | 2 +- fs/gfs2/super.c | 2 +- fs/nfs/filelayout/filelayout.c | 2 +- fs/nfs/write.c | 6 +++--- fs/ntfs/file.c | 3 ++- fs/ocfs2/file.c | 2 +- fs/xfs/xfs_file.c | 2 +- include/linux/backing-dev.h | 6 ++++-- include/trace/events/writeback.h | 6 +++--- mm/fadvise.c | 4 ++-- mm/filemap.c | 4 ++-- mm/filemap_xip.c | 3 ++- mm/page-writeback.c | 29 +++++++++++++---------------- mm/readahead.c | 4 ++-- mm/truncate.c | 2 +- mm/vmscan.c | 4 ++-- 22 files changed, 52 insertions(+), 50 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index e4090259569b..835c04a874fd 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -1746,7 +1746,7 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb, mutex_lock(&inode->i_mutex); - current->backing_dev_info = inode->i_mapping->backing_dev_info; + current->backing_dev_info = inode_to_bdi(inode); err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode)); if (err) { mutex_unlock(&inode->i_mutex); diff --git a/fs/ceph/file.c b/fs/ceph/file.c index ce74b394b49d..905986dd4c3c 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -945,7 +945,7 @@ static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from) mutex_lock(&inode->i_mutex); /* We can write back this queue in page reclaim */ - current->backing_dev_info = file->f_mapping->backing_dev_info; + current->backing_dev_info = inode_to_bdi(inode); err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode)); if (err) diff --git a/fs/ext2/ialloc.c b/fs/ext2/ialloc.c index 7d66fb0e4cca..6c14bb8322fa 100644 --- a/fs/ext2/ialloc.c +++ b/fs/ext2/ialloc.c @@ -170,7 +170,7 @@ static void ext2_preread_inode(struct inode *inode) struct ext2_group_desc * gdp; struct backing_dev_info *bdi; - bdi = inode->i_mapping->backing_dev_info; + bdi = inode_to_bdi(inode); if (bdi_read_congested(bdi)) return; if (bdi_write_congested(bdi)) diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 74c5f53595fb..ad88e601a6cd 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -334,7 +334,7 @@ static void save_error_info(struct super_block *sb, const char *func, static int block_device_ejected(struct super_block *sb) { struct inode *bd_inode = sb->s_bdev->bd_inode; - struct backing_dev_info *bdi = bd_inode->i_mapping->backing_dev_info; + struct backing_dev_info *bdi = inode_to_bdi(bd_inode); return bdi->dev == NULL; } diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index e8116a44cc29..a20b1145f4d5 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -66,7 +66,7 @@ int writeback_in_progress(struct backing_dev_info *bdi) } EXPORT_SYMBOL(writeback_in_progress); -static inline struct backing_dev_info *inode_to_bdi(struct inode *inode) +struct backing_dev_info *inode_to_bdi(struct inode *inode) { struct super_block *sb = inode->i_sb; #ifdef CONFIG_BLOCK @@ -75,6 +75,7 @@ static inline struct backing_dev_info *inode_to_bdi(struct inode *inode) #endif return sb->s_bdi; } +EXPORT_SYMBOL_GPL(inode_to_bdi); static inline struct inode *wb_inode(struct list_head *head) { diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 760b2c552197..19d80b82d344 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -1159,7 +1159,7 @@ static ssize_t fuse_file_write_iter(struct kiocb *iocb, struct iov_iter *from) mutex_lock(&inode->i_mutex); /* We can write back this queue in page reclaim */ - current->backing_dev_info = mapping->backing_dev_info; + current->backing_dev_info = inode_to_bdi(inode); err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode)); if (err) @@ -1464,7 +1464,7 @@ static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req) { struct inode *inode = req->inode; struct fuse_inode *fi = get_fuse_inode(inode); - struct backing_dev_info *bdi = inode->i_mapping->backing_dev_info; + struct backing_dev_info *bdi = inode_to_bdi(inode); int i; list_del(&req->writepages_entry); @@ -1658,7 +1658,7 @@ static int fuse_writepage_locked(struct page *page) req->end = fuse_writepage_end; req->inode = inode; - inc_bdi_stat(mapping->backing_dev_info, BDI_WRITEBACK); + inc_bdi_stat(inode_to_bdi(inode), BDI_WRITEBACK); inc_zone_page_state(tmp_page, NR_WRITEBACK_TEMP); spin_lock(&fc->lock); @@ -1768,7 +1768,7 @@ static bool fuse_writepage_in_flight(struct fuse_req *new_req, if (old_req->num_pages == 1 && (old_req->state == FUSE_REQ_INIT || old_req->state == FUSE_REQ_PENDING)) { - struct backing_dev_info *bdi = page->mapping->backing_dev_info; + struct backing_dev_info *bdi = inode_to_bdi(page->mapping->host); copy_highpage(old_req->pages[0], page); spin_unlock(&fc->lock); @@ -1872,7 +1872,7 @@ static int fuse_writepages_fill(struct page *page, req->page_descs[req->num_pages].offset = 0; req->page_descs[req->num_pages].length = PAGE_SIZE; - inc_bdi_stat(page->mapping->backing_dev_info, BDI_WRITEBACK); + inc_bdi_stat(inode_to_bdi(inode), BDI_WRITEBACK); inc_zone_page_state(tmp_page, NR_WRITEBACK_TEMP); err = 0; diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c index 805b37fed638..4ad4f94edebe 100644 --- a/fs/gfs2/aops.c +++ b/fs/gfs2/aops.c @@ -289,7 +289,7 @@ continue_unlock: if (!clear_page_dirty_for_io(page)) goto continue_unlock; - trace_wbc_writepage(wbc, mapping->backing_dev_info); + trace_wbc_writepage(wbc, inode_to_bdi(inode)); ret = __gfs2_jdata_writepage(page, wbc); if (unlikely(ret)) { diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c index 5b327f837de7..1666382b198d 100644 --- a/fs/gfs2/super.c +++ b/fs/gfs2/super.c @@ -743,7 +743,7 @@ static int gfs2_write_inode(struct inode *inode, struct writeback_control *wbc) struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_sbd *sdp = GFS2_SB(inode); struct address_space *metamapping = gfs2_glock2aspace(ip->i_gl); - struct backing_dev_info *bdi = metamapping->backing_dev_info; + struct backing_dev_info *bdi = inode_to_bdi(metamapping->host); int ret = 0; if (wbc->sync_mode == WB_SYNC_ALL) diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c index 7afb52f6a25a..51aa889611cf 100644 --- a/fs/nfs/filelayout/filelayout.c +++ b/fs/nfs/filelayout/filelayout.c @@ -1081,7 +1081,7 @@ mds_commit: spin_unlock(cinfo->lock); if (!cinfo->dreq) { inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); - inc_bdi_stat(page_file_mapping(req->wb_page)->backing_dev_info, + inc_bdi_stat(inode_to_bdi(page_file_mapping(req->wb_page)->host), BDI_RECLAIMABLE); __mark_inode_dirty(req->wb_context->dentry->d_inode, I_DIRTY_DATASYNC); diff --git a/fs/nfs/write.c b/fs/nfs/write.c index af3af685a9e3..298abcc5281b 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -786,7 +786,7 @@ nfs_request_add_commit_list(struct nfs_page *req, struct list_head *dst, spin_unlock(cinfo->lock); if (!cinfo->dreq) { inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); - inc_bdi_stat(page_file_mapping(req->wb_page)->backing_dev_info, + inc_bdi_stat(inode_to_bdi(page_file_mapping(req->wb_page)->host), BDI_RECLAIMABLE); __mark_inode_dirty(req->wb_context->dentry->d_inode, I_DIRTY_DATASYNC); @@ -853,7 +853,7 @@ static void nfs_clear_page_commit(struct page *page) { dec_zone_page_state(page, NR_UNSTABLE_NFS); - dec_bdi_stat(page_file_mapping(page)->backing_dev_info, BDI_RECLAIMABLE); + dec_bdi_stat(inode_to_bdi(page_file_mapping(page)->host), BDI_RECLAIMABLE); } /* Called holding inode (/cinfo) lock */ @@ -1564,7 +1564,7 @@ void nfs_retry_commit(struct list_head *page_list, nfs_mark_request_commit(req, lseg, cinfo); if (!cinfo->dreq) { dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); - dec_bdi_stat(page_file_mapping(req->wb_page)->backing_dev_info, + dec_bdi_stat(inode_to_bdi(page_file_mapping(req->wb_page)->host), BDI_RECLAIMABLE); } nfs_unlock_and_release_request(req); diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c index 643faa44f22b..1da9b2d184dc 100644 --- a/fs/ntfs/file.c +++ b/fs/ntfs/file.c @@ -19,6 +19,7 @@ * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +#include #include #include #include @@ -2091,7 +2092,7 @@ static ssize_t ntfs_file_aio_write_nolock(struct kiocb *iocb, count = iov_length(iov, nr_segs); pos = *ppos; /* We can write back this queue in page reclaim. */ - current->backing_dev_info = mapping->backing_dev_info; + current->backing_dev_info = inode_to_bdi(inode); written = 0; err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode)); if (err) diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index 3950693dd0f6..abe7d98d6178 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c @@ -2363,7 +2363,7 @@ relock: goto out_dio; } } else { - current->backing_dev_info = file->f_mapping->backing_dev_info; + current->backing_dev_info = inode_to_bdi(inode); written = generic_perform_write(file, from, *ppos); if (likely(written >= 0)) iocb->ki_pos = *ppos + written; diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index 13e974e6a889..5684ac3e7d18 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c @@ -699,7 +699,7 @@ xfs_file_buffered_aio_write( iov_iter_truncate(from, count); /* We can write back this queue in page reclaim */ - current->backing_dev_info = mapping->backing_dev_info; + current->backing_dev_info = inode_to_bdi(inode); write_retry: trace_xfs_file_buffered_write(ip, count, iocb->ki_pos, 0); diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index 478f95d92d73..ed59dee03a71 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h @@ -106,6 +106,8 @@ struct backing_dev_info { #endif }; +struct backing_dev_info *inode_to_bdi(struct inode *inode); + int __must_check bdi_init(struct backing_dev_info *bdi); void bdi_destroy(struct backing_dev_info *bdi); @@ -303,12 +305,12 @@ static inline bool bdi_cap_account_writeback(struct backing_dev_info *bdi) static inline bool mapping_cap_writeback_dirty(struct address_space *mapping) { - return bdi_cap_writeback_dirty(mapping->backing_dev_info); + return bdi_cap_writeback_dirty(inode_to_bdi(mapping->host)); } static inline bool mapping_cap_account_dirty(struct address_space *mapping) { - return bdi_cap_account_dirty(mapping->backing_dev_info); + return bdi_cap_account_dirty(inode_to_bdi(mapping->host)); } static inline int bdi_sched_wait(void *word) diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h index cee02d65ab3f..74f5207bd090 100644 --- a/include/trace/events/writeback.h +++ b/include/trace/events/writeback.h @@ -47,7 +47,7 @@ TRACE_EVENT(writeback_dirty_page, TP_fast_assign( strncpy(__entry->name, - mapping ? dev_name(mapping->backing_dev_info->dev) : "(unknown)", 32); + mapping ? dev_name(inode_to_bdi(mapping->host)->dev) : "(unknown)", 32); __entry->ino = mapping ? mapping->host->i_ino : 0; __entry->index = page->index; ), @@ -72,7 +72,7 @@ DECLARE_EVENT_CLASS(writeback_dirty_inode_template, ), TP_fast_assign( - struct backing_dev_info *bdi = inode->i_mapping->backing_dev_info; + struct backing_dev_info *bdi = inode_to_bdi(inode); /* may be called for files on pseudo FSes w/ unregistered bdi */ strncpy(__entry->name, @@ -116,7 +116,7 @@ DECLARE_EVENT_CLASS(writeback_write_inode_template, TP_fast_assign( strncpy(__entry->name, - dev_name(inode->i_mapping->backing_dev_info->dev), 32); + dev_name(inode_to_bdi(inode)->dev), 32); __entry->ino = inode->i_ino; __entry->sync_mode = wbc->sync_mode; ), diff --git a/mm/fadvise.c b/mm/fadvise.c index 2ad7adf4f0a4..fac23ecf8d72 100644 --- a/mm/fadvise.c +++ b/mm/fadvise.c @@ -73,7 +73,7 @@ SYSCALL_DEFINE4(fadvise64_64, int, fd, loff_t, offset, loff_t, len, int, advice) else endbyte--; /* inclusive */ - bdi = mapping->backing_dev_info; + bdi = inode_to_bdi(mapping->host); switch (advice) { case POSIX_FADV_NORMAL: @@ -113,7 +113,7 @@ SYSCALL_DEFINE4(fadvise64_64, int, fd, loff_t, offset, loff_t, len, int, advice) case POSIX_FADV_NOREUSE: break; case POSIX_FADV_DONTNEED: - if (!bdi_write_congested(mapping->backing_dev_info)) + if (!bdi_write_congested(bdi)) __filemap_fdatawrite_range(mapping, offset, endbyte, WB_SYNC_NONE); diff --git a/mm/filemap.c b/mm/filemap.c index 673e4581a2e5..5d7c23c26f81 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -211,7 +211,7 @@ void __delete_from_page_cache(struct page *page, void *shadow) */ if (PageDirty(page) && mapping_cap_account_dirty(mapping)) { dec_zone_page_state(page, NR_FILE_DIRTY); - dec_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE); + dec_bdi_stat(inode_to_bdi(mapping->host), BDI_RECLAIMABLE); } } @@ -2565,7 +2565,7 @@ ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from) size_t count = iov_iter_count(from); /* We can write back this queue in page reclaim */ - current->backing_dev_info = mapping->backing_dev_info; + current->backing_dev_info = inode_to_bdi(inode); err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode)); if (err) goto out; diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c index 0d105aeff82f..26897fbfbe19 100644 --- a/mm/filemap_xip.c +++ b/mm/filemap_xip.c @@ -9,6 +9,7 @@ */ #include +#include #include #include #include @@ -410,7 +411,7 @@ xip_file_write(struct file *filp, const char __user *buf, size_t len, count = len; /* We can write back this queue in page reclaim */ - current->backing_dev_info = mapping->backing_dev_info; + current->backing_dev_info = inode_to_bdi(inode); ret = generic_write_checks(filp, &pos, &count, S_ISBLK(inode->i_mode)); if (ret) diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 6f4335238e33..d4cbb4bd7d1c 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -1351,7 +1351,7 @@ static void balance_dirty_pages(struct address_space *mapping, unsigned long task_ratelimit; unsigned long dirty_ratelimit; unsigned long pos_ratio; - struct backing_dev_info *bdi = mapping->backing_dev_info; + struct backing_dev_info *bdi = inode_to_bdi(mapping->host); bool strictlimit = bdi->capabilities & BDI_CAP_STRICTLIMIT; unsigned long start_time = jiffies; @@ -1574,7 +1574,7 @@ DEFINE_PER_CPU(int, dirty_throttle_leaks) = 0; */ void balance_dirty_pages_ratelimited(struct address_space *mapping) { - struct backing_dev_info *bdi = mapping->backing_dev_info; + struct backing_dev_info *bdi = inode_to_bdi(mapping->host); int ratelimit; int *p; @@ -1929,7 +1929,7 @@ continue_unlock: if (!clear_page_dirty_for_io(page)) goto continue_unlock; - trace_wbc_writepage(wbc, mapping->backing_dev_info); + trace_wbc_writepage(wbc, inode_to_bdi(mapping->host)); ret = (*writepage)(page, wbc, data); if (unlikely(ret)) { if (ret == AOP_WRITEPAGE_ACTIVATE) { @@ -2094,10 +2094,12 @@ void account_page_dirtied(struct page *page, struct address_space *mapping) trace_writeback_dirty_page(page, mapping); if (mapping_cap_account_dirty(mapping)) { + struct backing_dev_info *bdi = inode_to_bdi(mapping->host); + __inc_zone_page_state(page, NR_FILE_DIRTY); __inc_zone_page_state(page, NR_DIRTIED); - __inc_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE); - __inc_bdi_stat(mapping->backing_dev_info, BDI_DIRTIED); + __inc_bdi_stat(bdi, BDI_RECLAIMABLE); + __inc_bdi_stat(bdi, BDI_DIRTIED); task_io_account_write(PAGE_CACHE_SIZE); current->nr_dirtied++; this_cpu_inc(bdp_ratelimits); @@ -2156,7 +2158,7 @@ void account_page_redirty(struct page *page) if (mapping && mapping_cap_account_dirty(mapping)) { current->nr_dirtied--; dec_zone_page_state(page, NR_DIRTIED); - dec_bdi_stat(mapping->backing_dev_info, BDI_DIRTIED); + dec_bdi_stat(inode_to_bdi(mapping->host), BDI_DIRTIED); } } EXPORT_SYMBOL(account_page_redirty); @@ -2295,7 +2297,7 @@ int clear_page_dirty_for_io(struct page *page) */ if (TestClearPageDirty(page)) { dec_zone_page_state(page, NR_FILE_DIRTY); - dec_bdi_stat(mapping->backing_dev_info, + dec_bdi_stat(inode_to_bdi(mapping->host), BDI_RECLAIMABLE); return 1; } @@ -2315,7 +2317,7 @@ int test_clear_page_writeback(struct page *page) memcg = mem_cgroup_begin_page_stat(page, &locked, &memcg_flags); if (mapping) { - struct backing_dev_info *bdi = mapping->backing_dev_info; + struct backing_dev_info *bdi = inode_to_bdi(mapping->host); unsigned long flags; spin_lock_irqsave(&mapping->tree_lock, flags); @@ -2352,7 +2354,7 @@ int __test_set_page_writeback(struct page *page, bool keep_write) memcg = mem_cgroup_begin_page_stat(page, &locked, &memcg_flags); if (mapping) { - struct backing_dev_info *bdi = mapping->backing_dev_info; + struct backing_dev_info *bdi = inode_to_bdi(mapping->host); unsigned long flags; spin_lock_irqsave(&mapping->tree_lock, flags); @@ -2406,12 +2408,7 @@ EXPORT_SYMBOL(mapping_tagged); */ void wait_for_stable_page(struct page *page) { - struct address_space *mapping = page_mapping(page); - struct backing_dev_info *bdi = mapping->backing_dev_info; - - if (!bdi_cap_stable_pages_required(bdi)) - return; - - wait_on_page_writeback(page); + if (bdi_cap_stable_pages_required(inode_to_bdi(page->mapping->host))) + wait_on_page_writeback(page); } EXPORT_SYMBOL_GPL(wait_for_stable_page); diff --git a/mm/readahead.c b/mm/readahead.c index 17b9172ec37f..935675844b2e 100644 --- a/mm/readahead.c +++ b/mm/readahead.c @@ -27,7 +27,7 @@ void file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping) { - ra->ra_pages = mapping->backing_dev_info->ra_pages; + ra->ra_pages = inode_to_bdi(mapping->host)->ra_pages; ra->prev_pos = -1; } EXPORT_SYMBOL_GPL(file_ra_state_init); @@ -541,7 +541,7 @@ page_cache_async_readahead(struct address_space *mapping, /* * Defer asynchronous read-ahead on IO congestion. */ - if (bdi_read_congested(mapping->backing_dev_info)) + if (bdi_read_congested(inode_to_bdi(mapping->host))) return; /* do read-ahead */ diff --git a/mm/truncate.c b/mm/truncate.c index f1e4d6052369..ddec5a5966d7 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -112,7 +112,7 @@ void cancel_dirty_page(struct page *page, unsigned int account_size) struct address_space *mapping = page->mapping; if (mapping && mapping_cap_account_dirty(mapping)) { dec_zone_page_state(page, NR_FILE_DIRTY); - dec_bdi_stat(mapping->backing_dev_info, + dec_bdi_stat(inode_to_bdi(mapping->host), BDI_RECLAIMABLE); if (account_size) task_io_account_cancelled_write(account_size); diff --git a/mm/vmscan.c b/mm/vmscan.c index ab2505c3ef54..e00a16393f21 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -497,7 +497,7 @@ static pageout_t pageout(struct page *page, struct address_space *mapping, } if (mapping->a_ops->writepage == NULL) return PAGE_ACTIVATE; - if (!may_write_to_queue(mapping->backing_dev_info, sc)) + if (!may_write_to_queue(inode_to_bdi(mapping->host), sc)) return PAGE_KEEP; if (clear_page_dirty_for_io(page)) { @@ -876,7 +876,7 @@ static unsigned long shrink_page_list(struct list_head *page_list, */ mapping = page_mapping(page); if (((dirty || writeback) && mapping && - bdi_write_congested(mapping->backing_dev_info)) || + bdi_write_congested(inode_to_bdi(mapping->host))) || (writeback && PageReclaim(page))) nr_congested++; -- cgit v1.2.3 From b83ae6d421435c6204150300f1c25bfbd39cd62b Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 14 Jan 2015 10:42:37 +0100 Subject: fs: remove mapping->backing_dev_info Now that we never use the backing_dev_info pointer in struct address_space we can simply remove it and save 4 to 8 bytes in every inode. Signed-off-by: Christoph Hellwig Acked-by: Ryusuke Konishi Reviewed-by: Tejun Heo Reviewed-by: Jan Kara Signed-off-by: Jens Axboe --- drivers/char/raw.c | 4 +--- fs/aio.c | 1 - fs/block_dev.c | 26 +------------------------- fs/btrfs/disk-io.c | 1 - fs/btrfs/inode.c | 6 ------ fs/ceph/inode.c | 2 -- fs/cifs/inode.c | 2 -- fs/configfs/inode.c | 1 - fs/ecryptfs/inode.c | 1 - fs/exofs/inode.c | 2 -- fs/fuse/inode.c | 1 - fs/gfs2/glock.c | 1 - fs/gfs2/ops_fstype.c | 1 - fs/hugetlbfs/inode.c | 1 - fs/inode.c | 13 ------------- fs/kernfs/inode.c | 1 - fs/ncpfs/inode.c | 1 - fs/nfs/inode.c | 1 - fs/nilfs2/gcinode.c | 1 - fs/nilfs2/mdt.c | 6 ++---- fs/nilfs2/page.c | 4 +--- fs/nilfs2/page.h | 3 +-- fs/nilfs2/super.c | 2 +- fs/ocfs2/dlmfs/dlmfs.c | 2 -- fs/ramfs/inode.c | 1 - fs/romfs/super.c | 3 --- fs/ubifs/dir.c | 2 -- fs/ubifs/super.c | 3 --- include/linux/fs.h | 3 +-- mm/backing-dev.c | 1 - mm/shmem.c | 1 - mm/swap_state.c | 1 - 32 files changed, 8 insertions(+), 91 deletions(-) (limited to 'fs') diff --git a/drivers/char/raw.c b/drivers/char/raw.c index a24891b97547..6e29bf2db536 100644 --- a/drivers/char/raw.c +++ b/drivers/char/raw.c @@ -104,11 +104,9 @@ static int raw_release(struct inode *inode, struct file *filp) mutex_lock(&raw_mutex); bdev = raw_devices[minor].binding; - if (--raw_devices[minor].inuse == 0) { + if (--raw_devices[minor].inuse == 0) /* Here inode->i_mapping == bdev->bd_inode->i_mapping */ inode->i_mapping = &inode->i_data; - inode->i_mapping->backing_dev_info = &default_backing_dev_info; - } mutex_unlock(&raw_mutex); blkdev_put(bdev, filp->f_mode | FMODE_EXCL); diff --git a/fs/aio.c b/fs/aio.c index 6f13d3fab07f..3bf8b1d250c3 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -176,7 +176,6 @@ static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages) inode->i_mapping->a_ops = &aio_ctx_aops; inode->i_mapping->private_data = ctx; - inode->i_mapping->backing_dev_info = &noop_backing_dev_info; inode->i_size = PAGE_SIZE * nr_pages; path.dentry = d_alloc_pseudo(aio_mnt->mnt_sb, &this); diff --git a/fs/block_dev.c b/fs/block_dev.c index 026ca7b8431c..a9f92794d7a0 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -60,19 +60,6 @@ static void bdev_write_inode(struct inode *inode) spin_unlock(&inode->i_lock); } -/* - * Move the inode from its current bdi to a new bdi. Make sure the inode - * is clean before moving so that it doesn't linger on the old bdi. - */ -static void bdev_inode_switch_bdi(struct inode *inode, - struct backing_dev_info *dst) -{ - spin_lock(&inode->i_lock); - WARN_ON_ONCE(inode->i_state & I_DIRTY); - inode->i_data.backing_dev_info = dst; - spin_unlock(&inode->i_lock); -} - /* Kill _all_ buffers and pagecache , dirty or not.. */ void kill_bdev(struct block_device *bdev) { @@ -589,7 +576,6 @@ struct block_device *bdget(dev_t dev) inode->i_bdev = bdev; inode->i_data.a_ops = &def_blk_aops; mapping_set_gfp_mask(&inode->i_data, GFP_USER); - inode->i_data.backing_dev_info = &default_backing_dev_info; spin_lock(&bdev_lock); list_add(&bdev->bd_list, &all_bdevs); spin_unlock(&bdev_lock); @@ -1150,8 +1136,6 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) bdev->bd_queue = disk->queue; bdev->bd_contains = bdev; if (!partno) { - struct backing_dev_info *bdi; - ret = -ENXIO; bdev->bd_part = disk_get_part(disk, partno); if (!bdev->bd_part) @@ -1177,11 +1161,8 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) } } - if (!ret) { + if (!ret) bd_set_size(bdev,(loff_t)get_capacity(disk)<<9); - bdi = blk_get_backing_dev_info(bdev); - bdev_inode_switch_bdi(bdev->bd_inode, bdi); - } /* * If the device is invalidated, rescan partition @@ -1208,8 +1189,6 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) if (ret) goto out_clear; bdev->bd_contains = whole; - bdev_inode_switch_bdi(bdev->bd_inode, - whole->bd_inode->i_data.backing_dev_info); bdev->bd_part = disk_get_part(disk, partno); if (!(disk->flags & GENHD_FL_UP) || !bdev->bd_part || !bdev->bd_part->nr_sects) { @@ -1249,7 +1228,6 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) bdev->bd_disk = NULL; bdev->bd_part = NULL; bdev->bd_queue = NULL; - bdev_inode_switch_bdi(bdev->bd_inode, &default_backing_dev_info); if (bdev != bdev->bd_contains) __blkdev_put(bdev->bd_contains, mode, 1); bdev->bd_contains = NULL; @@ -1474,8 +1452,6 @@ static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part) * dirty data before. */ bdev_write_inode(bdev->bd_inode); - bdev_inode_switch_bdi(bdev->bd_inode, - &default_backing_dev_info); } if (bdev->bd_contains == bdev) { if (disk->fops->release) diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index afc4092989cd..1ec872e3a926 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -2318,7 +2318,6 @@ int open_ctree(struct super_block *sb, */ fs_info->btree_inode->i_size = OFFSET_MAX; fs_info->btree_inode->i_mapping->a_ops = &btree_aops; - fs_info->btree_inode->i_mapping->backing_dev_info = &fs_info->bdi; RB_CLEAR_NODE(&BTRFS_I(fs_info->btree_inode)->rb_node); extent_io_tree_init(&BTRFS_I(fs_info->btree_inode)->io_tree, diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 8bf326affb94..54bcf639d1cf 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -3608,7 +3608,6 @@ cache_acl: switch (inode->i_mode & S_IFMT) { case S_IFREG: inode->i_mapping->a_ops = &btrfs_aops; - inode->i_mapping->backing_dev_info = &root->fs_info->bdi; BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; inode->i_fop = &btrfs_file_operations; inode->i_op = &btrfs_file_inode_operations; @@ -3623,7 +3622,6 @@ cache_acl: case S_IFLNK: inode->i_op = &btrfs_symlink_inode_operations; inode->i_mapping->a_ops = &btrfs_symlink_aops; - inode->i_mapping->backing_dev_info = &root->fs_info->bdi; break; default: inode->i_op = &btrfs_special_inode_operations; @@ -6088,7 +6086,6 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry, inode->i_fop = &btrfs_file_operations; inode->i_op = &btrfs_file_inode_operations; inode->i_mapping->a_ops = &btrfs_aops; - inode->i_mapping->backing_dev_info = &root->fs_info->bdi; err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name); if (err) @@ -9203,7 +9200,6 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry, inode->i_fop = &btrfs_file_operations; inode->i_op = &btrfs_file_inode_operations; inode->i_mapping->a_ops = &btrfs_aops; - inode->i_mapping->backing_dev_info = &root->fs_info->bdi; BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name); @@ -9247,7 +9243,6 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry, inode->i_op = &btrfs_symlink_inode_operations; inode->i_mapping->a_ops = &btrfs_symlink_aops; - inode->i_mapping->backing_dev_info = &root->fs_info->bdi; inode_set_bytes(inode, name_len); btrfs_i_size_write(inode, name_len); err = btrfs_update_inode(trans, root, inode); @@ -9459,7 +9454,6 @@ static int btrfs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode) inode->i_op = &btrfs_file_inode_operations; inode->i_mapping->a_ops = &btrfs_aops; - inode->i_mapping->backing_dev_info = &root->fs_info->bdi; BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; ret = btrfs_init_inode_security(trans, inode, dir, NULL); diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index f61a74115beb..6b5173605154 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c @@ -783,8 +783,6 @@ static int fill_inode(struct inode *inode, struct page *locked_page, } inode->i_mapping->a_ops = &ceph_aops; - inode->i_mapping->backing_dev_info = - &ceph_sb_to_client(inode->i_sb)->backing_dev_info; switch (inode->i_mode & S_IFMT) { case S_IFIFO: diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index 0c3ce464cae4..2d4f37235ed0 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c @@ -937,8 +937,6 @@ retry_iget5_locked: inode->i_flags |= S_NOATIME | S_NOCMTIME; if (inode->i_state & I_NEW) { inode->i_ino = hash; - if (S_ISREG(inode->i_mode)) - inode->i_data.backing_dev_info = sb->s_bdi; #ifdef CONFIG_CIFS_FSCACHE /* initialize per-inode cache cookie pointer */ CIFS_I(inode)->fscache = NULL; diff --git a/fs/configfs/inode.c b/fs/configfs/inode.c index 0ad6b4d6de00..65af86147154 100644 --- a/fs/configfs/inode.c +++ b/fs/configfs/inode.c @@ -131,7 +131,6 @@ struct inode *configfs_new_inode(umode_t mode, struct configfs_dirent *sd, if (inode) { inode->i_ino = get_next_ino(); inode->i_mapping->a_ops = &configfs_aops; - inode->i_mapping->backing_dev_info = &noop_backing_dev_info; inode->i_op = &configfs_inode_operations; if (sd->s_iattr) { diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c index 1686dc2da9fd..34b36a504059 100644 --- a/fs/ecryptfs/inode.c +++ b/fs/ecryptfs/inode.c @@ -67,7 +67,6 @@ static int ecryptfs_inode_set(struct inode *inode, void *opaque) inode->i_ino = lower_inode->i_ino; inode->i_version++; inode->i_mapping->a_ops = &ecryptfs_aops; - inode->i_mapping->backing_dev_info = inode->i_sb->s_bdi; if (S_ISLNK(inode->i_mode)) inode->i_op = &ecryptfs_symlink_iops; diff --git a/fs/exofs/inode.c b/fs/exofs/inode.c index f1d3d4eb8c4f..6fc91df99ff8 100644 --- a/fs/exofs/inode.c +++ b/fs/exofs/inode.c @@ -1214,7 +1214,6 @@ struct inode *exofs_iget(struct super_block *sb, unsigned long ino) memcpy(oi->i_data, fcb.i_data, sizeof(fcb.i_data)); } - inode->i_mapping->backing_dev_info = sb->s_bdi; if (S_ISREG(inode->i_mode)) { inode->i_op = &exofs_file_inode_operations; inode->i_fop = &exofs_file_operations; @@ -1314,7 +1313,6 @@ struct inode *exofs_new_inode(struct inode *dir, umode_t mode) set_obj_2bcreated(oi); - inode->i_mapping->backing_dev_info = sb->s_bdi; inode_init_owner(inode, dir, mode); inode->i_ino = sbi->s_nextid++; inode->i_blkbits = EXOFS_BLKSHIFT; diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index f38256e4476e..e8799c11424b 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c @@ -308,7 +308,6 @@ struct inode *fuse_iget(struct super_block *sb, u64 nodeid, if (!fc->writeback_cache || !S_ISREG(attr->mode)) inode->i_flags |= S_NOCMTIME; inode->i_generation = generation; - inode->i_data.backing_dev_info = &fc->bdi; fuse_init_inode(inode, attr); unlock_new_inode(inode); } else if ((inode->i_mode ^ attr->mode) & S_IFMT) { diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index a23524aa3eac..08ea717981f7 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c @@ -775,7 +775,6 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, mapping->flags = 0; mapping_set_gfp_mask(mapping, GFP_NOFS); mapping->private_data = NULL; - mapping->backing_dev_info = s->s_bdi; mapping->writeback_index = 0; } diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c index 8633ad328ee2..efc8e254787c 100644 --- a/fs/gfs2/ops_fstype.c +++ b/fs/gfs2/ops_fstype.c @@ -112,7 +112,6 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb) mapping->flags = 0; mapping_set_gfp_mask(mapping, GFP_NOFS); mapping->private_data = NULL; - mapping->backing_dev_info = sb->s_bdi; mapping->writeback_index = 0; spin_lock_init(&sdp->sd_log_lock); diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index de7c95c7d840..c274aca8e8dc 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -492,7 +492,6 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb, lockdep_set_class(&inode->i_mapping->i_mmap_rwsem, &hugetlbfs_i_mmap_rwsem_key); inode->i_mapping->a_ops = &hugetlbfs_aops; - inode->i_mapping->backing_dev_info = &noop_backing_dev_info; inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; inode->i_mapping->private_data = resv_map; info = HUGETLBFS_I(inode); diff --git a/fs/inode.c b/fs/inode.c index aa149e7262ac..e4e8caa7464c 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -170,20 +170,7 @@ int inode_init_always(struct super_block *sb, struct inode *inode) atomic_set(&mapping->i_mmap_writable, 0); mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE); mapping->private_data = NULL; - mapping->backing_dev_info = &default_backing_dev_info; mapping->writeback_index = 0; - - /* - * If the block_device provides a backing_dev_info for client - * inodes then use that. Otherwise the inode share the bdev's - * backing_dev_info. - */ - if (sb->s_bdev) { - struct backing_dev_info *bdi; - - bdi = sb->s_bdev->bd_inode->i_mapping->backing_dev_info; - mapping->backing_dev_info = bdi; - } inode->i_private = NULL; inode->i_mapping = mapping; INIT_HLIST_HEAD(&inode->i_dentry); /* buggered by rcu freeing */ diff --git a/fs/kernfs/inode.c b/fs/kernfs/inode.c index 06f06887b2d2..9000874a945b 100644 --- a/fs/kernfs/inode.c +++ b/fs/kernfs/inode.c @@ -286,7 +286,6 @@ static void kernfs_init_inode(struct kernfs_node *kn, struct inode *inode) kernfs_get(kn); inode->i_private = kn; inode->i_mapping->a_ops = &kernfs_aops; - inode->i_mapping->backing_dev_info = &noop_backing_dev_info; inode->i_op = &kernfs_iops; set_default_inode_attr(inode, kn->mode); diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c index a699a3fc62c0..01a9e16e9782 100644 --- a/fs/ncpfs/inode.c +++ b/fs/ncpfs/inode.c @@ -267,7 +267,6 @@ ncp_iget(struct super_block *sb, struct ncp_entry_info *info) if (inode) { atomic_set(&NCP_FINFO(inode)->opened, info->opened); - inode->i_mapping->backing_dev_info = sb->s_bdi; inode->i_ino = info->ino; ncp_set_attr(inode, info); if (S_ISREG(inode->i_mode)) { diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 4bffe637ea32..24aac72420f4 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -387,7 +387,6 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr, st if (S_ISREG(inode->i_mode)) { inode->i_fop = NFS_SB(sb)->nfs_client->rpc_ops->file_ops; inode->i_data.a_ops = &nfs_file_aops; - inode->i_data.backing_dev_info = &NFS_SB(sb)->backing_dev_info; } else if (S_ISDIR(inode->i_mode)) { inode->i_op = NFS_SB(sb)->nfs_client->rpc_ops->dir_inode_ops; inode->i_fop = &nfs_dir_operations; diff --git a/fs/nilfs2/gcinode.c b/fs/nilfs2/gcinode.c index 57ceaf33d177..748ca238915a 100644 --- a/fs/nilfs2/gcinode.c +++ b/fs/nilfs2/gcinode.c @@ -172,7 +172,6 @@ int nilfs_init_gcinode(struct inode *inode) inode->i_mode = S_IFREG; mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS); inode->i_mapping->a_ops = &empty_aops; - inode->i_mapping->backing_dev_info = inode->i_sb->s_bdi; ii->i_flags = 0; nilfs_bmap_init_gc(ii->i_bmap); diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c index c4dcd1db57ee..892cf5ffdb8e 100644 --- a/fs/nilfs2/mdt.c +++ b/fs/nilfs2/mdt.c @@ -429,7 +429,6 @@ int nilfs_mdt_init(struct inode *inode, gfp_t gfp_mask, size_t objsz) inode->i_mode = S_IFREG; mapping_set_gfp_mask(inode->i_mapping, gfp_mask); - inode->i_mapping->backing_dev_info = inode->i_sb->s_bdi; inode->i_op = &def_mdt_iops; inode->i_fop = &def_mdt_fops; @@ -457,13 +456,12 @@ int nilfs_mdt_setup_shadow_map(struct inode *inode, struct nilfs_shadow_map *shadow) { struct nilfs_mdt_info *mi = NILFS_MDT(inode); - struct backing_dev_info *bdi = inode->i_sb->s_bdi; INIT_LIST_HEAD(&shadow->frozen_buffers); address_space_init_once(&shadow->frozen_data); - nilfs_mapping_init(&shadow->frozen_data, inode, bdi); + nilfs_mapping_init(&shadow->frozen_data, inode); address_space_init_once(&shadow->frozen_btnodes); - nilfs_mapping_init(&shadow->frozen_btnodes, inode, bdi); + nilfs_mapping_init(&shadow->frozen_btnodes, inode); mi->mi_shadow = shadow; return 0; } diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c index da276640f776..700ecbcca55d 100644 --- a/fs/nilfs2/page.c +++ b/fs/nilfs2/page.c @@ -461,14 +461,12 @@ unsigned nilfs_page_count_clean_buffers(struct page *page, return nc; } -void nilfs_mapping_init(struct address_space *mapping, struct inode *inode, - struct backing_dev_info *bdi) +void nilfs_mapping_init(struct address_space *mapping, struct inode *inode) { mapping->host = inode; mapping->flags = 0; mapping_set_gfp_mask(mapping, GFP_NOFS); mapping->private_data = NULL; - mapping->backing_dev_info = bdi; mapping->a_ops = &empty_aops; } diff --git a/fs/nilfs2/page.h b/fs/nilfs2/page.h index ef30c5c2426f..a43b8287d012 100644 --- a/fs/nilfs2/page.h +++ b/fs/nilfs2/page.h @@ -57,8 +57,7 @@ int nilfs_copy_dirty_pages(struct address_space *, struct address_space *); void nilfs_copy_back_pages(struct address_space *, struct address_space *); void nilfs_clear_dirty_page(struct page *, bool); void nilfs_clear_dirty_pages(struct address_space *, bool); -void nilfs_mapping_init(struct address_space *mapping, struct inode *inode, - struct backing_dev_info *bdi); +void nilfs_mapping_init(struct address_space *mapping, struct inode *inode); unsigned nilfs_page_count_clean_buffers(struct page *, unsigned, unsigned); unsigned long nilfs_find_uncommitted_extent(struct inode *inode, sector_t start_blk, diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c index 3d4bbac36bea..5bc2a1cf73c3 100644 --- a/fs/nilfs2/super.c +++ b/fs/nilfs2/super.c @@ -166,7 +166,7 @@ struct inode *nilfs_alloc_inode(struct super_block *sb) ii->i_state = 0; ii->i_cno = 0; ii->vfs_inode.i_version = 1; - nilfs_mapping_init(&ii->i_btnode_cache, &ii->vfs_inode, sb->s_bdi); + nilfs_mapping_init(&ii->i_btnode_cache, &ii->vfs_inode); return &ii->vfs_inode; } diff --git a/fs/ocfs2/dlmfs/dlmfs.c b/fs/ocfs2/dlmfs/dlmfs.c index 6000d3029b26..061ba6a91bf2 100644 --- a/fs/ocfs2/dlmfs/dlmfs.c +++ b/fs/ocfs2/dlmfs/dlmfs.c @@ -398,7 +398,6 @@ static struct inode *dlmfs_get_root_inode(struct super_block *sb) if (inode) { inode->i_ino = get_next_ino(); inode_init_owner(inode, NULL, mode); - inode->i_mapping->backing_dev_info = &noop_backing_dev_info; inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; inc_nlink(inode); @@ -422,7 +421,6 @@ static struct inode *dlmfs_get_inode(struct inode *parent, inode->i_ino = get_next_ino(); inode_init_owner(inode, parent, mode); - inode->i_mapping->backing_dev_info = &noop_backing_dev_info; inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; ip = DLMFS_I(inode); diff --git a/fs/ramfs/inode.c b/fs/ramfs/inode.c index ad4d712002f4..889d558b4e05 100644 --- a/fs/ramfs/inode.c +++ b/fs/ramfs/inode.c @@ -59,7 +59,6 @@ struct inode *ramfs_get_inode(struct super_block *sb, inode->i_ino = get_next_ino(); inode_init_owner(inode, dir, mode); inode->i_mapping->a_ops = &ramfs_aops; - inode->i_mapping->backing_dev_info = &noop_backing_dev_info; mapping_set_gfp_mask(inode->i_mapping, GFP_HIGHUSER); mapping_set_unevictable(inode->i_mapping); inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; diff --git a/fs/romfs/super.c b/fs/romfs/super.c index e98dd88197d5..268733cda397 100644 --- a/fs/romfs/super.c +++ b/fs/romfs/super.c @@ -355,9 +355,6 @@ static struct inode *romfs_iget(struct super_block *sb, unsigned long pos) case ROMFH_REG: i->i_fop = &romfs_ro_fops; i->i_data.a_ops = &romfs_aops; - if (i->i_sb->s_mtd) - i->i_data.backing_dev_info = - i->i_sb->s_mtd->backing_dev_info; if (nextfh & ROMFH_EXEC) mode |= S_IXUGO; break; diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c index ea41649e4ca5..c49b1981ac95 100644 --- a/fs/ubifs/dir.c +++ b/fs/ubifs/dir.c @@ -108,8 +108,6 @@ struct inode *ubifs_new_inode(struct ubifs_info *c, const struct inode *dir, inode->i_mtime = inode->i_atime = inode->i_ctime = ubifs_current_time(inode); inode->i_mapping->nrpages = 0; - /* Disable readahead */ - inode->i_mapping->backing_dev_info = &c->bdi; switch (mode & S_IFMT) { case S_IFREG: diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index ed93dc6ae245..6197154f36ca 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c @@ -156,9 +156,6 @@ struct inode *ubifs_iget(struct super_block *sb, unsigned long inum) if (err) goto out_invalid; - /* Disable read-ahead */ - inode->i_mapping->backing_dev_info = &c->bdi; - switch (inode->i_mode & S_IFMT) { case S_IFREG: inode->i_mapping->a_ops = &ubifs_file_address_operations; diff --git a/include/linux/fs.h b/include/linux/fs.h index 1dada399aa23..65d02de342e1 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -34,6 +34,7 @@ #include #include +struct backing_dev_info; struct export_operations; struct hd_geometry; struct iovec; @@ -394,7 +395,6 @@ int pagecache_write_end(struct file *, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata); -struct backing_dev_info; struct address_space { struct inode *host; /* owner: inode, block_device */ struct radix_tree_root page_tree; /* radix tree of all pages */ @@ -409,7 +409,6 @@ struct address_space { pgoff_t writeback_index;/* writeback starts here */ const struct address_space_operations *a_ops; /* methods */ unsigned long flags; /* error bits/gfp mask */ - struct backing_dev_info *backing_dev_info; /* device readahead, etc */ spinlock_t private_lock; /* for use by the address_space */ struct list_head private_list; /* ditto */ void *private_data; /* ditto */ diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 16c68958aeda..52e0c7652448 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -24,7 +24,6 @@ struct backing_dev_info noop_backing_dev_info = { .name = "noop", .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK, }; -EXPORT_SYMBOL_GPL(noop_backing_dev_info); static struct class *bdi_class; diff --git a/mm/shmem.c b/mm/shmem.c index 1b77eaf589fd..4c61d3d5bfb4 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1410,7 +1410,6 @@ static struct inode *shmem_get_inode(struct super_block *sb, const struct inode inode->i_ino = get_next_ino(); inode_init_owner(inode, dir, mode); inode->i_blocks = 0; - inode->i_mapping->backing_dev_info = &noop_backing_dev_info; inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; inode->i_generation = get_seconds(); info = SHMEM_I(inode); diff --git a/mm/swap_state.c b/mm/swap_state.c index 1c137b69ecde..405923f77334 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -37,7 +37,6 @@ struct address_space swapper_spaces[MAX_SWAPFILES] = { .page_tree = RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN), .i_mmap_writable = ATOMIC_INIT(0), .a_ops = &swap_aops, - .backing_dev_info = &noop_backing_dev_info, } }; -- cgit v1.2.3 From e4d2750909aa26129bd0bf1b0dc832e0845aae90 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 14 Jan 2015 10:42:38 +0100 Subject: ceph: remove call to bdi_unregister bdi_destroy already does all the work, and if we delay freeing the anon bdev we can get away with just that single call. Signed-off-by: Christoph Hellwig Reviewed-by: Jan Kara Signed-off-by: Jens Axboe --- fs/ceph/super.c | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) (limited to 'fs') diff --git a/fs/ceph/super.c b/fs/ceph/super.c index 50f06cddc94b..e350cc1611e4 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c @@ -40,17 +40,6 @@ static void ceph_put_super(struct super_block *s) dout("put_super\n"); ceph_mdsc_close_sessions(fsc->mdsc); - - /* - * ensure we release the bdi before put_anon_super releases - * the device name. - */ - if (s->s_bdi == &fsc->backing_dev_info) { - bdi_unregister(&fsc->backing_dev_info); - s->s_bdi = NULL; - } - - return; } static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf) @@ -1002,11 +991,16 @@ out_final: static void ceph_kill_sb(struct super_block *s) { struct ceph_fs_client *fsc = ceph_sb_to_client(s); + dev_t dev = s->s_dev; + dout("kill_sb %p\n", s); + ceph_mdsc_pre_umount(fsc->mdsc); - kill_anon_super(s); /* will call put_super after sb is r/o */ + generic_shutdown_super(s); ceph_mdsc_destroy(fsc); + destroy_fs_client(fsc); + free_anon_bdev(dev); } static struct file_system_type ceph_fs_type = { -- cgit v1.2.3 From 7b14a213890a81473ec97ad7e322d5c2f19854ae Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 14 Jan 2015 10:42:39 +0100 Subject: nfs: don't call bdi_unregister bdi_destroy already does all the work, and if we delay freeing the anon bdev we can get away with just that single call. Addintionally remove the call during mount failure, as deactivate_super_locked will already call ->kill_sb and clean up the bdi for us. Signed-off-by: Christoph Hellwig Reviewed-by: Jan Kara Signed-off-by: Jens Axboe --- fs/nfs/internal.h | 1 - fs/nfs/nfs4super.c | 1 - fs/nfs/super.c | 24 ++++++------------------ 3 files changed, 6 insertions(+), 20 deletions(-) (limited to 'fs') diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index efaa31c70fbe..f519d4187332 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h @@ -416,7 +416,6 @@ int nfs_show_options(struct seq_file *, struct dentry *); int nfs_show_devname(struct seq_file *, struct dentry *); int nfs_show_path(struct seq_file *, struct dentry *); int nfs_show_stats(struct seq_file *, struct dentry *); -void nfs_put_super(struct super_block *); int nfs_remount(struct super_block *sb, int *flags, char *raw_data); /* write.c */ diff --git a/fs/nfs/nfs4super.c b/fs/nfs/nfs4super.c index 6f340f02f2ba..ab30a3a637de 100644 --- a/fs/nfs/nfs4super.c +++ b/fs/nfs/nfs4super.c @@ -53,7 +53,6 @@ static const struct super_operations nfs4_sops = { .destroy_inode = nfs_destroy_inode, .write_inode = nfs4_write_inode, .drop_inode = nfs_drop_inode, - .put_super = nfs_put_super, .statfs = nfs_statfs, .evict_inode = nfs4_evict_inode, .umount_begin = nfs_umount_begin, diff --git a/fs/nfs/super.c b/fs/nfs/super.c index 31a11b0e885d..6ec4fe23b756 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c @@ -311,7 +311,6 @@ const struct super_operations nfs_sops = { .destroy_inode = nfs_destroy_inode, .write_inode = nfs_write_inode, .drop_inode = nfs_drop_inode, - .put_super = nfs_put_super, .statfs = nfs_statfs, .evict_inode = nfs_evict_inode, .umount_begin = nfs_umount_begin, @@ -2569,7 +2568,7 @@ struct dentry *nfs_fs_mount_common(struct nfs_server *server, error = nfs_bdi_register(server); if (error) { mntroot = ERR_PTR(error); - goto error_splat_bdi; + goto error_splat_super; } server->super = s; } @@ -2601,9 +2600,6 @@ error_splat_root: dput(mntroot); mntroot = ERR_PTR(error); error_splat_super: - if (server && !s->s_root) - bdi_unregister(&server->backing_dev_info); -error_splat_bdi: deactivate_locked_super(s); goto out; } @@ -2650,28 +2646,20 @@ out: } EXPORT_SYMBOL_GPL(nfs_fs_mount); -/* - * Ensure that we unregister the bdi before kill_anon_super - * releases the device name - */ -void nfs_put_super(struct super_block *s) -{ - struct nfs_server *server = NFS_SB(s); - - bdi_unregister(&server->backing_dev_info); -} -EXPORT_SYMBOL_GPL(nfs_put_super); - /* * Destroy an NFS2/3 superblock */ void nfs_kill_super(struct super_block *s) { struct nfs_server *server = NFS_SB(s); + dev_t dev = s->s_dev; + + generic_shutdown_super(s); - kill_anon_super(s); nfs_fscache_release_super_cookie(s); + nfs_free_server(server); + free_anon_bdev(dev); } EXPORT_SYMBOL_GPL(nfs_kill_super); -- cgit v1.2.3 From df0ce26cb4ee8bc233d50213b97213532aff0a3c Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 14 Jan 2015 10:42:41 +0100 Subject: fs: remove default_backing_dev_info Now that default_backing_dev_info is not used for writeback purposes we can git rid of it easily: - instead of using it's name for tracing unregistered bdi we just use "unknown" - btrfs and ceph can just assign the default read ahead window themselves like several other filesystems already do. - we can assign noop_backing_dev_info as the default one in alloc_super. All filesystems already either assigned their own or noop_backing_dev_info. Signed-off-by: Christoph Hellwig Reviewed-by: Tejun Heo Reviewed-by: Jan Kara Signed-off-by: Jens Axboe --- fs/btrfs/disk-io.c | 2 +- fs/ceph/super.c | 2 +- fs/super.c | 8 ++------ include/linux/backing-dev.h | 1 - include/trace/events/writeback.h | 6 ++---- mm/backing-dev.c | 9 --------- 6 files changed, 6 insertions(+), 22 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 1ec872e3a926..1afb18226da8 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1719,7 +1719,7 @@ static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi) if (err) return err; - bdi->ra_pages = default_backing_dev_info.ra_pages; + bdi->ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE; bdi->congested_fn = btrfs_congested_fn; bdi->congested_data = info; return 0; diff --git a/fs/ceph/super.c b/fs/ceph/super.c index e350cc1611e4..5ae62587a71d 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c @@ -899,7 +899,7 @@ static int ceph_register_bdi(struct super_block *sb, >> PAGE_SHIFT; else fsc->backing_dev_info.ra_pages = - default_backing_dev_info.ra_pages; + VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE; err = bdi_register(&fsc->backing_dev_info, NULL, "ceph-%ld", atomic_long_inc_return(&bdi_seq)); diff --git a/fs/super.c b/fs/super.c index eae088f6aaae..3b4dadafdd60 100644 --- a/fs/super.c +++ b/fs/super.c @@ -185,8 +185,8 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags) } init_waitqueue_head(&s->s_writers.wait); init_waitqueue_head(&s->s_writers.wait_unfrozen); + s->s_bdi = &noop_backing_dev_info; s->s_flags = flags; - s->s_bdi = &default_backing_dev_info; INIT_HLIST_NODE(&s->s_instances); INIT_HLIST_BL_HEAD(&s->s_anon); INIT_LIST_HEAD(&s->s_inodes); @@ -863,10 +863,7 @@ EXPORT_SYMBOL(free_anon_bdev); int set_anon_super(struct super_block *s, void *data) { - int error = get_anon_bdev(&s->s_dev); - if (!error) - s->s_bdi = &noop_backing_dev_info; - return error; + return get_anon_bdev(&s->s_dev); } EXPORT_SYMBOL(set_anon_super); @@ -1111,7 +1108,6 @@ mount_fs(struct file_system_type *type, int flags, const char *name, void *data) sb = root->d_sb; BUG_ON(!sb); WARN_ON(!sb->s_bdi); - WARN_ON(sb->s_bdi == &default_backing_dev_info); sb->s_flags |= MS_BORN; error = security_sb_kern_mount(sb, flags, secdata); diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index ed59dee03a71..d94077fea1f8 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h @@ -241,7 +241,6 @@ int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio); #define BDI_CAP_NO_ACCT_AND_WRITEBACK \ (BDI_CAP_NO_WRITEBACK | BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_ACCT_WB) -extern struct backing_dev_info default_backing_dev_info; extern struct backing_dev_info noop_backing_dev_info; int writeback_in_progress(struct backing_dev_info *bdi); diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h index 74f5207bd090..0e9310905413 100644 --- a/include/trace/events/writeback.h +++ b/include/trace/events/writeback.h @@ -156,10 +156,8 @@ DECLARE_EVENT_CLASS(writeback_work_class, __field(int, reason) ), TP_fast_assign( - struct device *dev = bdi->dev; - if (!dev) - dev = default_backing_dev_info.dev; - strncpy(__entry->name, dev_name(dev), 32); + strncpy(__entry->name, + bdi->dev ? dev_name(bdi->dev) : "(unknown)", 32); __entry->nr_pages = work->nr_pages; __entry->sb_dev = work->sb ? work->sb->s_dev : 0; __entry->sync_mode = work->sync_mode; diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 1725adb242e0..7690ec77c722 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -14,12 +14,6 @@ static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0); -struct backing_dev_info default_backing_dev_info = { - .name = "default", - .ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE, -}; -EXPORT_SYMBOL_GPL(default_backing_dev_info); - struct backing_dev_info noop_backing_dev_info = { .name = "noop", .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK, @@ -250,9 +244,6 @@ static int __init default_bdi_init(void) if (!bdi_wq) return -ENOMEM; - err = bdi_init(&default_backing_dev_info); - if (!err) - bdi_register(&default_backing_dev_info, NULL, "default"); err = bdi_init(&noop_backing_dev_info); return err; -- cgit v1.2.3 From e7070be198b34c26f39bd9010a29ce6462dc4f3e Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Tue, 16 Dec 2014 08:54:43 -0800 Subject: Btrfs: change how we track dirty roots I've been overloading root->dirty_list to keep track of dirty roots and which roots need to have their commit roots switched at transaction commit time. This could cause us to lose an update to the root which could corrupt the file system. To fix this use a state bit to know if the root is dirty, and if it isn't set we go ahead and move the root to the dirty list. This way if we re-dirty the root after adding it to the switch_commit list we make sure to update it. This also makes it so that the extent root is always the last root on the dirty list to try and keep the amount of churn down at this point in the commit. Thanks, Signed-off-by: Josef Bacik Signed-off-by: Chris Mason --- fs/btrfs/ctree.c | 16 ++++++++++++---- fs/btrfs/ctree.h | 1 + fs/btrfs/transaction.c | 9 ++++++++- 3 files changed, 21 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 14a72ed14ef7..97a98fc07cfc 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -213,11 +213,19 @@ static struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root) */ static void add_root_to_dirty_list(struct btrfs_root *root) { + if (test_bit(BTRFS_ROOT_DIRTY, &root->state) || + !test_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state)) + return; + spin_lock(&root->fs_info->trans_lock); - if (test_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state) && - list_empty(&root->dirty_list)) { - list_add(&root->dirty_list, - &root->fs_info->dirty_cowonly_roots); + if (!test_and_set_bit(BTRFS_ROOT_DIRTY, &root->state)) { + /* Want the extent tree to be the last on the list */ + if (root->objectid == BTRFS_EXTENT_TREE_OBJECTID) + list_move_tail(&root->dirty_list, + &root->fs_info->dirty_cowonly_roots); + else + list_move(&root->dirty_list, + &root->fs_info->dirty_cowonly_roots); } spin_unlock(&root->fs_info->trans_lock); } diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 7e607416755a..45ed4dc6a0ce 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -1775,6 +1775,7 @@ struct btrfs_subvolume_writers { #define BTRFS_ROOT_DEFRAG_RUNNING 6 #define BTRFS_ROOT_FORCE_COW 7 #define BTRFS_ROOT_MULTI_LOG_TASKS 8 +#define BTRFS_ROOT_DIRTY 9 /* * in ram representation of the tree. extent_root is used for all allocations diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index a605d4e2f2bc..aa2219ebecc9 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -1020,6 +1020,7 @@ static int update_cowonly_root(struct btrfs_trans_handle *trans, u64 old_root_bytenr; u64 old_root_used; struct btrfs_root *tree_root = root->fs_info->tree_root; + bool extent_root = (root->objectid == BTRFS_EXTENT_TREE_OBJECTID); old_root_used = btrfs_root_used(&root->root_item); btrfs_write_dirty_block_groups(trans, root); @@ -1038,7 +1039,12 @@ static int update_cowonly_root(struct btrfs_trans_handle *trans, return ret; old_root_used = btrfs_root_used(&root->root_item); - ret = btrfs_write_dirty_block_groups(trans, root); + if (extent_root) { + ret = btrfs_write_dirty_block_groups(trans, root); + if (ret) + return ret; + } + ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); if (ret) return ret; } @@ -1097,6 +1103,7 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans, next = fs_info->dirty_cowonly_roots.next; list_del_init(next); root = list_entry(next, struct btrfs_root, dirty_list); + clear_bit(BTRFS_ROOT_DIRTY, &root->state); if (root != fs_info->extent_root) list_add_tail(&root->dirty_list, -- cgit v1.2.3 From ce93ec548cfa02f9cd6b70d546d5f36f4d160f57 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Mon, 17 Nov 2014 15:45:48 -0500 Subject: Btrfs: track dirty block groups on their own list Currently any time we try to update the block groups on disk we will walk _all_ block groups and check for the ->dirty flag to see if it is set. This function can get called several times during a commit. So if you have several terabytes of data you will be a very sad panda as we will loop through _all_ of the block groups several times, which makes the commit take a while which slows down the rest of the file system operations. This patch introduces a dirty list for the block groups that we get added to when we dirty the block group for the first time. Then we simply update any block groups that have been dirtied since the last time we called btrfs_write_dirty_block_groups. This allows us to clean up how we write the free space cache out so it is much cleaner. Thanks, Signed-off-by: Josef Bacik Signed-off-by: Chris Mason --- fs/btrfs/ctree.h | 5 +- fs/btrfs/extent-tree.c | 167 ++++++++++++++------------------------------ fs/btrfs/free-space-cache.c | 8 ++- fs/btrfs/transaction.c | 14 ++-- fs/btrfs/transaction.h | 2 + 5 files changed, 72 insertions(+), 124 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 45ed4dc6a0ce..0b4683f560c8 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -1238,7 +1238,6 @@ enum btrfs_disk_cache_state { BTRFS_DC_ERROR = 1, BTRFS_DC_CLEAR = 2, BTRFS_DC_SETUP = 3, - BTRFS_DC_NEED_WRITE = 4, }; struct btrfs_caching_control { @@ -1276,7 +1275,6 @@ struct btrfs_block_group_cache { unsigned long full_stripe_len; unsigned int ro:1; - unsigned int dirty:1; unsigned int iref:1; unsigned int has_caching_ctl:1; unsigned int removed:1; @@ -1314,6 +1312,9 @@ struct btrfs_block_group_cache { struct list_head ro_list; atomic_t trimming; + + /* For dirty block groups */ + struct list_head dirty_list; }; /* delayed seq elem */ diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 15116585e714..21c373fe256c 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -74,8 +74,9 @@ enum { RESERVE_ALLOC_NO_ACCOUNT = 2, }; -static int update_block_group(struct btrfs_root *root, - u64 bytenr, u64 num_bytes, int alloc); +static int update_block_group(struct btrfs_trans_handle *trans, + struct btrfs_root *root, u64 bytenr, + u64 num_bytes, int alloc); static int __btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 bytenr, u64 num_bytes, u64 parent, @@ -3315,120 +3316,42 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans, struct btrfs_root *root) { struct btrfs_block_group_cache *cache; - int err = 0; + struct btrfs_transaction *cur_trans = trans->transaction; + int ret = 0; struct btrfs_path *path; - u64 last = 0; + + if (list_empty(&cur_trans->dirty_bgs)) + return 0; path = btrfs_alloc_path(); if (!path) return -ENOMEM; -again: - while (1) { - cache = btrfs_lookup_first_block_group(root->fs_info, last); - while (cache) { - if (cache->disk_cache_state == BTRFS_DC_CLEAR) - break; - cache = next_block_group(root, cache); - } - if (!cache) { - if (last == 0) - break; - last = 0; - continue; - } - err = cache_save_setup(cache, trans, path); - last = cache->key.objectid + cache->key.offset; - btrfs_put_block_group(cache); - } - - while (1) { - if (last == 0) { - err = btrfs_run_delayed_refs(trans, root, - (unsigned long)-1); - if (err) /* File system offline */ - goto out; - } - - cache = btrfs_lookup_first_block_group(root->fs_info, last); - while (cache) { - if (cache->disk_cache_state == BTRFS_DC_CLEAR) { - btrfs_put_block_group(cache); - goto again; - } - - if (cache->dirty) - break; - cache = next_block_group(root, cache); - } - if (!cache) { - if (last == 0) - break; - last = 0; - continue; - } - - if (cache->disk_cache_state == BTRFS_DC_SETUP) - cache->disk_cache_state = BTRFS_DC_NEED_WRITE; - cache->dirty = 0; - last = cache->key.objectid + cache->key.offset; - - err = write_one_cache_group(trans, root, path, cache); - btrfs_put_block_group(cache); - if (err) /* File system offline */ - goto out; - } - - while (1) { - /* - * I don't think this is needed since we're just marking our - * preallocated extent as written, but just in case it can't - * hurt. - */ - if (last == 0) { - err = btrfs_run_delayed_refs(trans, root, - (unsigned long)-1); - if (err) /* File system offline */ - goto out; - } - - cache = btrfs_lookup_first_block_group(root->fs_info, last); - while (cache) { - /* - * Really this shouldn't happen, but it could if we - * couldn't write the entire preallocated extent and - * splitting the extent resulted in a new block. - */ - if (cache->dirty) { - btrfs_put_block_group(cache); - goto again; - } - if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE) - break; - cache = next_block_group(root, cache); - } - if (!cache) { - if (last == 0) - break; - last = 0; - continue; - } - - err = btrfs_write_out_cache(root, trans, cache, path); - - /* - * If we didn't have an error then the cache state is still - * NEED_WRITE, so we can set it to WRITTEN. - */ - if (!err && cache->disk_cache_state == BTRFS_DC_NEED_WRITE) - cache->disk_cache_state = BTRFS_DC_WRITTEN; - last = cache->key.objectid + cache->key.offset; + /* + * We don't need the lock here since we are protected by the transaction + * commit. We want to do the cache_save_setup first and then run the + * delayed refs to make sure we have the best chance at doing this all + * in one shot. + */ + while (!list_empty(&cur_trans->dirty_bgs)) { + cache = list_first_entry(&cur_trans->dirty_bgs, + struct btrfs_block_group_cache, + dirty_list); + list_del_init(&cache->dirty_list); + if (cache->disk_cache_state == BTRFS_DC_CLEAR) + cache_save_setup(cache, trans, path); + if (!ret) + ret = btrfs_run_delayed_refs(trans, root, + (unsigned long) -1); + if (!ret && cache->disk_cache_state == BTRFS_DC_SETUP) + btrfs_write_out_cache(root, trans, cache, path); + if (!ret) + ret = write_one_cache_group(trans, root, path, cache); btrfs_put_block_group(cache); } -out: btrfs_free_path(path); - return err; + return ret; } int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr) @@ -5375,8 +5298,9 @@ void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes) btrfs_free_reserved_data_space(inode, num_bytes); } -static int update_block_group(struct btrfs_root *root, - u64 bytenr, u64 num_bytes, int alloc) +static int update_block_group(struct btrfs_trans_handle *trans, + struct btrfs_root *root, u64 bytenr, + u64 num_bytes, int alloc) { struct btrfs_block_group_cache *cache = NULL; struct btrfs_fs_info *info = root->fs_info; @@ -5414,6 +5338,14 @@ static int update_block_group(struct btrfs_root *root, if (!alloc && cache->cached == BTRFS_CACHE_NO) cache_block_group(cache, 1); + spin_lock(&trans->transaction->dirty_bgs_lock); + if (list_empty(&cache->dirty_list)) { + list_add_tail(&cache->dirty_list, + &trans->transaction->dirty_bgs); + btrfs_get_block_group(cache); + } + spin_unlock(&trans->transaction->dirty_bgs_lock); + byte_in_group = bytenr - cache->key.objectid; WARN_ON(byte_in_group > cache->key.offset); @@ -5424,7 +5356,6 @@ static int update_block_group(struct btrfs_root *root, cache->disk_cache_state < BTRFS_DC_CLEAR) cache->disk_cache_state = BTRFS_DC_CLEAR; - cache->dirty = 1; old_val = btrfs_block_group_used(&cache->item); num_bytes = min(total, cache->key.offset - byte_in_group); if (alloc) { @@ -6103,7 +6034,7 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans, } } - ret = update_block_group(root, bytenr, num_bytes, 0); + ret = update_block_group(trans, root, bytenr, num_bytes, 0); if (ret) { btrfs_abort_transaction(trans, extent_root, ret); goto out; @@ -7063,7 +6994,7 @@ static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans, if (ret) return ret; - ret = update_block_group(root, ins->objectid, ins->offset, 1); + ret = update_block_group(trans, root, ins->objectid, ins->offset, 1); if (ret) { /* -ENOENT, logic error */ btrfs_err(fs_info, "update block group failed for %llu %llu", ins->objectid, ins->offset); @@ -7152,7 +7083,8 @@ static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans, return ret; } - ret = update_block_group(root, ins->objectid, root->nodesize, 1); + ret = update_block_group(trans, root, ins->objectid, root->nodesize, + 1); if (ret) { /* -ENOENT, logic error */ btrfs_err(fs_info, "update block group failed for %llu %llu", ins->objectid, ins->offset); @@ -9005,6 +8937,7 @@ btrfs_create_block_group_cache(struct btrfs_root *root, u64 start, u64 size) INIT_LIST_HEAD(&cache->cluster_list); INIT_LIST_HEAD(&cache->bg_list); INIT_LIST_HEAD(&cache->ro_list); + INIT_LIST_HEAD(&cache->dirty_list); btrfs_init_free_space_ctl(cache); atomic_set(&cache->trimming, 0); @@ -9068,9 +9001,8 @@ int btrfs_read_block_groups(struct btrfs_root *root) * b) Setting 'dirty flag' makes sure that we flush * the new space cache info onto disk. */ - cache->disk_cache_state = BTRFS_DC_CLEAR; if (btrfs_test_opt(root, SPACE_CACHE)) - cache->dirty = 1; + cache->disk_cache_state = BTRFS_DC_CLEAR; } read_extent_buffer(leaf, &cache->item, @@ -9461,6 +9393,13 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, } } + spin_lock(&trans->transaction->dirty_bgs_lock); + if (!list_empty(&block_group->dirty_list)) { + list_del_init(&block_group->dirty_list); + btrfs_put_block_group(block_group); + } + spin_unlock(&trans->transaction->dirty_bgs_lock); + btrfs_remove_free_space_cache(block_group); spin_lock(&block_group->space_info->lock); diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index d6c03f7f136b..80a3141463e7 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -1243,6 +1243,7 @@ int btrfs_write_out_cache(struct btrfs_root *root, struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; struct inode *inode; int ret = 0; + enum btrfs_disk_cache_state dcs = BTRFS_DC_WRITTEN; root = root->fs_info->tree_root; @@ -1266,9 +1267,7 @@ int btrfs_write_out_cache(struct btrfs_root *root, ret = __btrfs_write_out_cache(root, inode, ctl, block_group, trans, path, block_group->key.objectid); if (ret) { - spin_lock(&block_group->lock); - block_group->disk_cache_state = BTRFS_DC_ERROR; - spin_unlock(&block_group->lock); + dcs = BTRFS_DC_ERROR; ret = 0; #ifdef DEBUG btrfs_err(root->fs_info, @@ -1277,6 +1276,9 @@ int btrfs_write_out_cache(struct btrfs_root *root, #endif } + spin_lock(&block_group->lock); + block_group->disk_cache_state = dcs; + spin_unlock(&block_group->lock); iput(inode); return ret; } diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index aa2219ebecc9..e0faf803513a 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -248,6 +248,8 @@ loop: INIT_LIST_HEAD(&cur_trans->pending_chunks); INIT_LIST_HEAD(&cur_trans->switch_commits); INIT_LIST_HEAD(&cur_trans->pending_ordered); + INIT_LIST_HEAD(&cur_trans->dirty_bgs); + spin_lock_init(&cur_trans->dirty_bgs_lock); list_add_tail(&cur_trans->list, &fs_info->trans_list); extent_io_tree_init(&cur_trans->dirty_pages, fs_info->btree_inode->i_mapping); @@ -1028,7 +1030,9 @@ static int update_cowonly_root(struct btrfs_trans_handle *trans, while (1) { old_root_bytenr = btrfs_root_bytenr(&root->root_item); if (old_root_bytenr == root->node->start && - old_root_used == btrfs_root_used(&root->root_item)) + old_root_used == btrfs_root_used(&root->root_item) && + (!extent_root || + list_empty(&trans->transaction->dirty_bgs))) break; btrfs_set_root_node(&root->root_item, root->node); @@ -1047,6 +1051,9 @@ static int update_cowonly_root(struct btrfs_trans_handle *trans, ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); if (ret) return ret; + ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); + if (ret) + return ret; } return 0; @@ -1067,10 +1074,6 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans, struct extent_buffer *eb; int ret; - ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); - if (ret) - return ret; - eb = btrfs_lock_root_node(fs_info->tree_root); ret = btrfs_cow_block(trans, fs_info->tree_root, eb, NULL, 0, &eb); @@ -1990,6 +1993,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, switch_commit_roots(cur_trans, root->fs_info); assert_qgroups_uptodate(trans); + ASSERT(list_empty(&cur_trans->dirty_bgs)); update_super_roots(root); btrfs_set_super_log_root(root->fs_info->super_copy, 0); diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h index 00ed29c4b3f9..3305451451ca 100644 --- a/fs/btrfs/transaction.h +++ b/fs/btrfs/transaction.h @@ -58,6 +58,8 @@ struct btrfs_transaction { struct list_head pending_chunks; struct list_head pending_ordered; struct list_head switch_commits; + struct list_head dirty_bgs; + spinlock_t dirty_bgs_lock; struct btrfs_delayed_ref_root delayed_refs; int aborted; }; -- cgit v1.2.3 From a8df6fe666f9a3a7c08df70f94351f967462dd95 Mon Sep 17 00:00:00 2001 From: Filipe Manana Date: Tue, 20 Jan 2015 12:40:53 +0000 Subject: Btrfs: fix setup_leaf_for_split() to avoid leaf corruption We were incorrectly detecting when the target key didn't exist anymore after releasing the path and re-searching the tree. This could make us split or duplicate (btrfs_split_item() and btrfs_duplicate_item() are its only callers at the moment) an item when we should not. For the case of duplicating an item, we currently only duplicate checksum items (csum tree) and file extent items (fs/subvol trees). For the checksum items we end up overriding the item completely, but for file extent items we update only some of their fields in the copy (done in __btrfs_drop_extents), which means we can end up having a logical corruption for some values. Also for the case where we duplicate a file extent item it will make us produce a leaf with a wrong key order, as btrfs_duplicate_item() advances us to the next slot and then its caller sets a smaller key on the new item at that slot (like in __btrfs_drop_extents() e.g.). Alternatively if the tree search in setup_leaf_for_split() leaves with path->slots[0] == btrfs_header_nritems(path->nodes[0]), we end up accessing beyond the leaf's end (when we check if the item's size has changed) and make our caller insert an item at the invalid slot btrfs_header_nritems(path->nodes[0]) + 1, causing an invalid memory access if the leaf is full or nearly full. This issue has been present since the introduction of this function in 2009: Btrfs: Add btrfs_duplicate_item commit ad48fd754676bfae4139be1a897b1ea58f9aaf21 Signed-off-by: Filipe Manana Signed-off-by: Chris Mason --- fs/btrfs/ctree.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 6b2ec9089229..fdd8fb4c6075 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -4353,13 +4353,15 @@ static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans, path->search_for_split = 1; ret = btrfs_search_slot(trans, root, &key, path, 0, 1); path->search_for_split = 0; + if (ret > 0) + ret = -EAGAIN; if (ret < 0) goto err; ret = -EAGAIN; leaf = path->nodes[0]; - /* if our item isn't there or got smaller, return now */ - if (ret > 0 || item_size != btrfs_item_size_nr(leaf, path->slots[0])) + /* if our item isn't there, return now */ + if (item_size != btrfs_item_size_nr(leaf, path->slots[0])) goto err; /* the leaf has changed, it now has room. return now */ -- cgit v1.2.3 From 68b663d13ceba777ee6e250154f0e3ab28bd1181 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Fri, 19 Dec 2014 18:38:37 +0100 Subject: btrfs: update message levels for errors Several messages that point to some internal problem, level INFO is wrong here. Signed-off-by: David Sterba Signed-off-by: Chris Mason --- fs/btrfs/disk-io.c | 9 +++++---- fs/btrfs/inode.c | 2 +- 2 files changed, 6 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 65384de10375..2a3e2252ddbf 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -367,7 +367,8 @@ static int verify_parent_transid(struct extent_io_tree *io_tree, ret = 0; goto out; } - printk_ratelimited(KERN_INFO "BTRFS (device %s): parent transid verify failed on %llu wanted %llu found %llu\n", + printk_ratelimited(KERN_ERR + "BTRFS (device %s): parent transid verify failed on %llu wanted %llu found %llu\n", eb->fs_info->sb->s_id, eb->start, parent_transid, btrfs_header_generation(eb)); ret = 1; @@ -633,21 +634,21 @@ static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio, found_start = btrfs_header_bytenr(eb); if (found_start != eb->start) { - printk_ratelimited(KERN_INFO "BTRFS (device %s): bad tree block start " + printk_ratelimited(KERN_ERR "BTRFS (device %s): bad tree block start " "%llu %llu\n", eb->fs_info->sb->s_id, found_start, eb->start); ret = -EIO; goto err; } if (check_tree_block_fsid(root, eb)) { - printk_ratelimited(KERN_INFO "BTRFS (device %s): bad fsid on block %llu\n", + printk_ratelimited(KERN_ERR "BTRFS (device %s): bad fsid on block %llu\n", eb->fs_info->sb->s_id, eb->start); ret = -EIO; goto err; } found_level = btrfs_header_level(eb); if (found_level >= BTRFS_MAX_LEVEL) { - btrfs_info(root->fs_info, "bad tree block level %d", + btrfs_err(root->fs_info, "bad tree block level %d", (int)btrfs_header_level(eb)); ret = -EIO; goto err; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 370f23416080..29892ebc798b 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -3407,7 +3407,7 @@ int btrfs_orphan_cleanup(struct btrfs_root *root) out: if (ret) - btrfs_crit(root->fs_info, + btrfs_err(root->fs_info, "could not do orphan cleanup %d", ret); btrfs_free_path(path); return ret; -- cgit v1.2.3 From aa8ee31209d644a3f4716cb0c43aba7889fbef31 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Fri, 19 Dec 2014 18:38:41 +0100 Subject: btrfs: update message levels during failed mount All error conditions from open_ctree shall be ERR. Warning would suggest that something's wrong and we can continue. Signed-off-by: David Sterba Signed-off-by: Chris Mason --- fs/btrfs/disk-io.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 2a3e2252ddbf..7ff24cbcf656 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -2533,7 +2533,7 @@ int open_ctree(struct super_block *sb, */ if ((features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) && (sectorsize != nodesize)) { - printk(KERN_WARNING "BTRFS: unequal leaf/node/sector sizes " + printk(KERN_ERR "BTRFS: unequal leaf/node/sector sizes " "are not allowed for mixed block groups on %s\n", sb->s_id); goto fail_alloc; @@ -2641,12 +2641,12 @@ int open_ctree(struct super_block *sb, sb->s_blocksize_bits = blksize_bits(sectorsize); if (btrfs_super_magic(disk_super) != BTRFS_MAGIC) { - printk(KERN_INFO "BTRFS: valid FS not found on %s\n", sb->s_id); + printk(KERN_ERR "BTRFS: valid FS not found on %s\n", sb->s_id); goto fail_sb_buffer; } if (sectorsize != PAGE_SIZE) { - printk(KERN_WARNING "BTRFS: Incompatible sector size(%lu) " + printk(KERN_ERR "BTRFS: incompatible sector size (%lu) " "found on %s\n", (unsigned long)sectorsize, sb->s_id); goto fail_sb_buffer; } @@ -2655,7 +2655,7 @@ int open_ctree(struct super_block *sb, ret = btrfs_read_sys_array(tree_root); mutex_unlock(&fs_info->chunk_mutex); if (ret) { - printk(KERN_WARNING "BTRFS: failed to read the system " + printk(KERN_ERR "BTRFS: failed to read the system " "array on %s\n", sb->s_id); goto fail_sb_buffer; } @@ -2670,7 +2670,7 @@ int open_ctree(struct super_block *sb, generation); if (!chunk_root->node || !test_bit(EXTENT_BUFFER_UPTODATE, &chunk_root->node->bflags)) { - printk(KERN_WARNING "BTRFS: failed to read chunk root on %s\n", + printk(KERN_ERR "BTRFS: failed to read chunk root on %s\n", sb->s_id); goto fail_tree_roots; } @@ -2682,7 +2682,7 @@ int open_ctree(struct super_block *sb, ret = btrfs_read_chunk_tree(chunk_root); if (ret) { - printk(KERN_WARNING "BTRFS: failed to read chunk tree on %s\n", + printk(KERN_ERR "BTRFS: failed to read chunk tree on %s\n", sb->s_id); goto fail_tree_roots; } @@ -2694,7 +2694,7 @@ int open_ctree(struct super_block *sb, btrfs_close_extra_devices(fs_info, fs_devices, 0); if (!fs_devices->latest_bdev) { - printk(KERN_CRIT "BTRFS: failed to read devices on %s\n", + printk(KERN_ERR "BTRFS: failed to read devices on %s\n", sb->s_id); goto fail_tree_roots; } @@ -2778,7 +2778,7 @@ retry_root_backup: ret = btrfs_recover_balance(fs_info); if (ret) { - printk(KERN_WARNING "BTRFS: failed to recover balance\n"); + printk(KERN_ERR "BTRFS: failed to recover balance\n"); goto fail_block_groups; } -- cgit v1.2.3 From f0954c66377b1610a512fd05f4d33afa54441c3b Mon Sep 17 00:00:00 2001 From: David Sterba Date: Fri, 19 Dec 2014 18:38:44 +0100 Subject: btrfs: update message levels after checksum errors The errors are worth noting and might get missed with INFO level. Signed-off-by: David Sterba Signed-off-by: Chris Mason --- fs/btrfs/disk-io.c | 2 +- fs/btrfs/inode.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 7ff24cbcf656..20da68206fba 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -318,7 +318,7 @@ static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf, memcpy(&found, result, csum_size); read_extent_buffer(buf, &val, 0, csum_size); - printk_ratelimited(KERN_INFO + printk_ratelimited(KERN_WARNING "BTRFS: %s checksum verify failed on %llu wanted %X found %X " "level %d\n", root->fs_info->sb->s_id, buf->start, diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 29892ebc798b..5e529208ffea 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -2945,7 +2945,7 @@ static int __readpage_endio_check(struct inode *inode, return 0; zeroit: if (__ratelimit(&_rs)) - btrfs_info(BTRFS_I(inode)->root->fs_info, + btrfs_warn(BTRFS_I(inode)->root->fs_info, "csum failed ino %llu off %llu csum %u expected csum %u", btrfs_ino(inode), start, csum, csum_expected); memset(kaddr + pgoff, 1, len); -- cgit v1.2.3 From 5efa0490cc94aee06cd8d282683e22a8ce0a0026 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Fri, 19 Dec 2014 18:38:47 +0100 Subject: btrfs: set proper message level for skinny metadata This has been confusing people for too long, the message is really just informative. CC: # 3.10+ Signed-off-by: David Sterba Signed-off-by: Chris Mason --- fs/btrfs/disk-io.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 20da68206fba..0696a103751e 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -2509,7 +2509,7 @@ int open_ctree(struct super_block *sb, features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO; if (features & BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA) - printk(KERN_ERR "BTRFS: has skinny extents\n"); + printk(KERN_INFO "BTRFS: has skinny extents\n"); /* * flag our filesystem as having big metadata blocks if -- cgit v1.2.3 From 9ee49a047dc53fd21808cbb7f3b6a3345463e834 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Wed, 14 Jan 2015 19:52:13 +0100 Subject: btrfs: switch extent_state state to unsigned Currently there's a 4B hole in the structure between refs and state and there are only 16 bits used so we can make it unsigned. This will get a better packing and may save some stack space for local variables. The size of extent_state gets reduced by 8B and there are usually a lot of slab objects. struct extent_state { u64 start; /* 0 8 */ u64 end; /* 8 8 */ struct rb_node rb_node; /* 16 24 */ wait_queue_head_t wq; /* 40 24 */ /* --- cacheline 1 boundary (64 bytes) --- */ atomic_t refs; /* 64 4 */ /* XXX 4 bytes hole, try to pack */ long unsigned int state; /* 72 8 */ u64 private; /* 80 8 */ /* size: 88, cachelines: 2, members: 7 */ /* sum members: 84, holes: 1, sum holes: 4 */ /* last cacheline: 24 bytes */ }; Signed-off-by: David Sterba Signed-off-by: Chris Mason --- fs/btrfs/extent_io.c | 45 ++++++++++++++++--------------- fs/btrfs/extent_io.h | 58 ++++++++++++++++++++-------------------- fs/btrfs/inode.c | 4 +-- fs/btrfs/tests/extent-io-tests.c | 3 +-- 4 files changed, 55 insertions(+), 55 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index c4ca90ab687e..65bd285ef361 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -64,7 +64,7 @@ void btrfs_leak_debug_check(void) while (!list_empty(&states)) { state = list_entry(states.next, struct extent_state, leak_list); - pr_err("BTRFS: state leak: start %llu end %llu state %lu in tree %d refs %d\n", + pr_err("BTRFS: state leak: start %llu end %llu state %u in tree %d refs %d\n", state->start, state->end, state->state, extent_state_in_tree(state), atomic_read(&state->refs)); @@ -396,21 +396,21 @@ static void merge_state(struct extent_io_tree *tree, } static void set_state_cb(struct extent_io_tree *tree, - struct extent_state *state, unsigned long *bits) + struct extent_state *state, unsigned *bits) { if (tree->ops && tree->ops->set_bit_hook) tree->ops->set_bit_hook(tree->mapping->host, state, bits); } static void clear_state_cb(struct extent_io_tree *tree, - struct extent_state *state, unsigned long *bits) + struct extent_state *state, unsigned *bits) { if (tree->ops && tree->ops->clear_bit_hook) tree->ops->clear_bit_hook(tree->mapping->host, state, bits); } static void set_state_bits(struct extent_io_tree *tree, - struct extent_state *state, unsigned long *bits); + struct extent_state *state, unsigned *bits); /* * insert an extent_state struct into the tree. 'bits' are set on the @@ -426,7 +426,7 @@ static int insert_state(struct extent_io_tree *tree, struct extent_state *state, u64 start, u64 end, struct rb_node ***p, struct rb_node **parent, - unsigned long *bits) + unsigned *bits) { struct rb_node *node; @@ -511,10 +511,10 @@ static struct extent_state *next_state(struct extent_state *state) */ static struct extent_state *clear_state_bit(struct extent_io_tree *tree, struct extent_state *state, - unsigned long *bits, int wake) + unsigned *bits, int wake) { struct extent_state *next; - unsigned long bits_to_clear = *bits & ~EXTENT_CTLBITS; + unsigned bits_to_clear = *bits & ~EXTENT_CTLBITS; if ((bits_to_clear & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) { u64 range = state->end - state->start + 1; @@ -570,7 +570,7 @@ static void extent_io_tree_panic(struct extent_io_tree *tree, int err) * This takes the tree lock, and returns 0 on success and < 0 on error. */ int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, - unsigned long bits, int wake, int delete, + unsigned bits, int wake, int delete, struct extent_state **cached_state, gfp_t mask) { @@ -789,9 +789,9 @@ out: static void set_state_bits(struct extent_io_tree *tree, struct extent_state *state, - unsigned long *bits) + unsigned *bits) { - unsigned long bits_to_set = *bits & ~EXTENT_CTLBITS; + unsigned bits_to_set = *bits & ~EXTENT_CTLBITS; set_state_cb(tree, state, bits); if ((bits_to_set & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) { @@ -803,7 +803,7 @@ static void set_state_bits(struct extent_io_tree *tree, static void cache_state_if_flags(struct extent_state *state, struct extent_state **cached_ptr, - const u64 flags) + unsigned flags) { if (cached_ptr && !(*cached_ptr)) { if (!flags || (state->state & flags)) { @@ -833,7 +833,7 @@ static void cache_state(struct extent_state *state, static int __must_check __set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, - unsigned long bits, unsigned long exclusive_bits, + unsigned bits, unsigned exclusive_bits, u64 *failed_start, struct extent_state **cached_state, gfp_t mask) { @@ -1034,7 +1034,7 @@ search_again: } int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, - unsigned long bits, u64 * failed_start, + unsigned bits, u64 * failed_start, struct extent_state **cached_state, gfp_t mask) { return __set_extent_bit(tree, start, end, bits, 0, failed_start, @@ -1060,7 +1060,7 @@ int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, * boundary bits like LOCK. */ int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, - unsigned long bits, unsigned long clear_bits, + unsigned bits, unsigned clear_bits, struct extent_state **cached_state, gfp_t mask) { struct extent_state *state; @@ -1268,14 +1268,14 @@ int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end, } int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, - unsigned long bits, gfp_t mask) + unsigned bits, gfp_t mask) { return set_extent_bit(tree, start, end, bits, NULL, NULL, mask); } int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, - unsigned long bits, gfp_t mask) + unsigned bits, gfp_t mask) { return clear_extent_bit(tree, start, end, bits, 0, 0, NULL, mask); } @@ -1330,10 +1330,11 @@ int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end, * us if waiting is desired. */ int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, - unsigned long bits, struct extent_state **cached_state) + unsigned bits, struct extent_state **cached_state) { int err; u64 failed_start; + while (1) { err = __set_extent_bit(tree, start, end, EXTENT_LOCKED | bits, EXTENT_LOCKED, &failed_start, @@ -1440,7 +1441,7 @@ static int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end) */ static struct extent_state * find_first_extent_bit_state(struct extent_io_tree *tree, - u64 start, unsigned long bits) + u64 start, unsigned bits) { struct rb_node *node; struct extent_state *state; @@ -1474,7 +1475,7 @@ out: * If nothing was found, 1 is returned. If found something, return 0. */ int find_first_extent_bit(struct extent_io_tree *tree, u64 start, - u64 *start_ret, u64 *end_ret, unsigned long bits, + u64 *start_ret, u64 *end_ret, unsigned bits, struct extent_state **cached_state) { struct extent_state *state; @@ -1753,7 +1754,7 @@ out_failed: int extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end, struct page *locked_page, - unsigned long clear_bits, + unsigned clear_bits, unsigned long page_ops) { struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree; @@ -1810,7 +1811,7 @@ int extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end, */ u64 count_range_bits(struct extent_io_tree *tree, u64 *start, u64 search_end, u64 max_bytes, - unsigned long bits, int contig) + unsigned bits, int contig) { struct rb_node *node; struct extent_state *state; @@ -1928,7 +1929,7 @@ out: * range is found set. */ int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, - unsigned long bits, int filled, struct extent_state *cached) + unsigned bits, int filled, struct extent_state *cached) { struct extent_state *state = NULL; struct rb_node *node; diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index 71268e508b7a..695b0ccfb755 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -4,22 +4,22 @@ #include /* bits for the extent state */ -#define EXTENT_DIRTY 1 -#define EXTENT_WRITEBACK (1 << 1) -#define EXTENT_UPTODATE (1 << 2) -#define EXTENT_LOCKED (1 << 3) -#define EXTENT_NEW (1 << 4) -#define EXTENT_DELALLOC (1 << 5) -#define EXTENT_DEFRAG (1 << 6) -#define EXTENT_BOUNDARY (1 << 9) -#define EXTENT_NODATASUM (1 << 10) -#define EXTENT_DO_ACCOUNTING (1 << 11) -#define EXTENT_FIRST_DELALLOC (1 << 12) -#define EXTENT_NEED_WAIT (1 << 13) -#define EXTENT_DAMAGED (1 << 14) -#define EXTENT_NORESERVE (1 << 15) -#define EXTENT_IOBITS (EXTENT_LOCKED | EXTENT_WRITEBACK) -#define EXTENT_CTLBITS (EXTENT_DO_ACCOUNTING | EXTENT_FIRST_DELALLOC) +#define EXTENT_DIRTY (1U << 0) +#define EXTENT_WRITEBACK (1U << 1) +#define EXTENT_UPTODATE (1U << 2) +#define EXTENT_LOCKED (1U << 3) +#define EXTENT_NEW (1U << 4) +#define EXTENT_DELALLOC (1U << 5) +#define EXTENT_DEFRAG (1U << 6) +#define EXTENT_BOUNDARY (1U << 9) +#define EXTENT_NODATASUM (1U << 10) +#define EXTENT_DO_ACCOUNTING (1U << 11) +#define EXTENT_FIRST_DELALLOC (1U << 12) +#define EXTENT_NEED_WAIT (1U << 13) +#define EXTENT_DAMAGED (1U << 14) +#define EXTENT_NORESERVE (1U << 15) +#define EXTENT_IOBITS (EXTENT_LOCKED | EXTENT_WRITEBACK) +#define EXTENT_CTLBITS (EXTENT_DO_ACCOUNTING | EXTENT_FIRST_DELALLOC) /* * flags for bio submission. The high bits indicate the compression @@ -81,9 +81,9 @@ struct extent_io_ops { int (*writepage_end_io_hook)(struct page *page, u64 start, u64 end, struct extent_state *state, int uptodate); void (*set_bit_hook)(struct inode *inode, struct extent_state *state, - unsigned long *bits); + unsigned *bits); void (*clear_bit_hook)(struct inode *inode, struct extent_state *state, - unsigned long *bits); + unsigned *bits); void (*merge_extent_hook)(struct inode *inode, struct extent_state *new, struct extent_state *other); @@ -108,7 +108,7 @@ struct extent_state { /* ADD NEW ELEMENTS AFTER THIS */ wait_queue_head_t wq; atomic_t refs; - unsigned long state; + unsigned state; /* for use by the FS */ u64 private; @@ -188,7 +188,7 @@ int try_release_extent_mapping(struct extent_map_tree *map, int try_release_extent_buffer(struct page *page); int lock_extent(struct extent_io_tree *tree, u64 start, u64 end); int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, - unsigned long bits, struct extent_state **cached); + unsigned bits, struct extent_state **cached); int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end); int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end, struct extent_state **cached, gfp_t mask); @@ -202,21 +202,21 @@ void extent_io_exit(void); u64 count_range_bits(struct extent_io_tree *tree, u64 *start, u64 search_end, - u64 max_bytes, unsigned long bits, int contig); + u64 max_bytes, unsigned bits, int contig); void free_extent_state(struct extent_state *state); int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, - unsigned long bits, int filled, + unsigned bits, int filled, struct extent_state *cached_state); int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, - unsigned long bits, gfp_t mask); + unsigned bits, gfp_t mask); int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, - unsigned long bits, int wake, int delete, + unsigned bits, int wake, int delete, struct extent_state **cached, gfp_t mask); int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, - unsigned long bits, gfp_t mask); + unsigned bits, gfp_t mask); int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, - unsigned long bits, u64 *failed_start, + unsigned bits, u64 *failed_start, struct extent_state **cached_state, gfp_t mask); int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end, struct extent_state **cached_state, gfp_t mask); @@ -229,14 +229,14 @@ int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end, int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask); int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, - unsigned long bits, unsigned long clear_bits, + unsigned bits, unsigned clear_bits, struct extent_state **cached_state, gfp_t mask); int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end, struct extent_state **cached_state, gfp_t mask); int set_extent_defrag(struct extent_io_tree *tree, u64 start, u64 end, struct extent_state **cached_state, gfp_t mask); int find_first_extent_bit(struct extent_io_tree *tree, u64 start, - u64 *start_ret, u64 *end_ret, unsigned long bits, + u64 *start_ret, u64 *end_ret, unsigned bits, struct extent_state **cached_state); int extent_invalidatepage(struct extent_io_tree *tree, struct page *page, unsigned long offset); @@ -323,7 +323,7 @@ int extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end); int extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end); int extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end, struct page *locked_page, - unsigned long bits_to_clear, + unsigned bits_to_clear, unsigned long page_ops); struct bio * btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs, diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 5e529208ffea..220f0c3f2df6 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -1604,7 +1604,7 @@ static void btrfs_del_delalloc_inode(struct btrfs_root *root, * have pending delalloc work to be done. */ static void btrfs_set_bit_hook(struct inode *inode, - struct extent_state *state, unsigned long *bits) + struct extent_state *state, unsigned *bits) { if ((*bits & EXTENT_DEFRAG) && !(*bits & EXTENT_DELALLOC)) @@ -1645,7 +1645,7 @@ static void btrfs_set_bit_hook(struct inode *inode, */ static void btrfs_clear_bit_hook(struct inode *inode, struct extent_state *state, - unsigned long *bits) + unsigned *bits) { u64 len = state->end + 1 - state->start; diff --git a/fs/btrfs/tests/extent-io-tests.c b/fs/btrfs/tests/extent-io-tests.c index 7e99c2f98dd0..9e9f2368177d 100644 --- a/fs/btrfs/tests/extent-io-tests.c +++ b/fs/btrfs/tests/extent-io-tests.c @@ -258,8 +258,7 @@ static int test_find_delalloc(void) } ret = 0; out_bits: - clear_extent_bits(&tmp, 0, total_dirty - 1, - (unsigned long)-1, GFP_NOFS); + clear_extent_bits(&tmp, 0, total_dirty - 1, (unsigned)-1, GFP_NOFS); out: if (locked_page) page_cache_release(locked_page); -- cgit v1.2.3 From 730a78c741df4eaa24589015191f03c2d1240091 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Tue, 20 Jan 2015 19:05:37 +0100 Subject: btrfs: remove a no-op unfreeze superbock callback Signed-off-by: David Sterba Signed-off-by: Chris Mason --- fs/btrfs/super.c | 6 ------ 1 file changed, 6 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 60f7cbe815e9..100a0442c413 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -1948,11 +1948,6 @@ static int btrfs_freeze(struct super_block *sb) return btrfs_commit_transaction(trans, root); } -static int btrfs_unfreeze(struct super_block *sb) -{ - return 0; -} - static int btrfs_show_devname(struct seq_file *m, struct dentry *root) { struct btrfs_fs_info *fs_info = btrfs_sb(root->d_sb); @@ -2001,7 +1996,6 @@ static const struct super_operations btrfs_super_ops = { .statfs = btrfs_statfs, .remount_fs = btrfs_remount, .freeze_fs = btrfs_freeze, - .unfreeze_fs = btrfs_unfreeze, }; static const struct file_operations btrfs_ctl_fops = { -- cgit v1.2.3 From 6219872dc6e56529159f04e73587ed0fcd63eb20 Mon Sep 17 00:00:00 2001 From: Filipe Manana Date: Tue, 6 Jan 2015 20:18:45 +0000 Subject: Btrfs: lookup for block group only if needed when freeing a tree block Very often our extent buffer's header generation doesn't match the current transaction's id or it is also referenced by other trees (snapshots), so we don't need the corresponding block group cache object. Therefore only search for it if we are going to use it, so we avoid an unnecessary search in the block groups rbtree (and acquiring and releasing its spinlock). Freeing a tree block is performed when COWing or deleting a node/leaf, which implies we are holding the node/leaf's parent node lock, therefore reducing the amount of time spent when freeing a tree block helps reducing the amount of time we are holding the parent node's lock. For example, for a run of xfstests/generic/083, the block group cache object was needed only 682 times for a total of 226691 calls to free a tree block. Signed-off-by: Filipe Manana Signed-off-by: Chris Mason --- fs/btrfs/extent-tree.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 1c591d6eae58..3af53f8313af 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -6136,7 +6136,6 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans, struct extent_buffer *buf, u64 parent, int last_ref) { - struct btrfs_block_group_cache *cache = NULL; int pin = 1; int ret; @@ -6152,17 +6151,20 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans, if (!last_ref) return; - cache = btrfs_lookup_block_group(root->fs_info, buf->start); - if (btrfs_header_generation(buf) == trans->transid) { + struct btrfs_block_group_cache *cache; + if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) { ret = check_ref_cleanup(trans, root, buf->start); if (!ret) goto out; } + cache = btrfs_lookup_block_group(root->fs_info, buf->start); + if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) { pin_down_extent(root, cache, buf->start, buf->len, 1); + btrfs_put_block_group(cache); goto out; } @@ -6170,6 +6172,7 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans, btrfs_add_free_space(cache, buf->start, buf->len); btrfs_update_reserved_bytes(cache, buf->len, RESERVE_FREE, 0); + btrfs_put_block_group(cache); trace_btrfs_reserved_extent_free(root, buf->start, buf->len); pin = 0; } @@ -6184,7 +6187,6 @@ out: * anymore. */ clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags); - btrfs_put_block_group(cache); } /* Can return -ENOMEM */ -- cgit v1.2.3 From d36808e0d4fc4802892dbd1dba964912cddf1323 Mon Sep 17 00:00:00 2001 From: Filipe Manana Date: Sat, 10 Jan 2015 10:56:48 +0000 Subject: Btrfs: fix directory inconsistency after fsync log replay If we have an inode (file) with a link count greater than 1, remove one of its hard links, fsync the inode, power fail/crash and then replay the fsync log on the next mount, we end up getting the parent directory's metadata inconsistent - its i_size still reflects the deleted hard link and has dangling index entries (with no matching inode reference entries). This prevents the directory from ever being deletable, as its i_size can never decrease to BTRFS_EMPTY_DIR_SIZE even if all of its children inodes are deleted, and the dangling index entries can never be removed (as they point to an inode that does not exist anymore). This is easy to reproduce with the following excerpt from the test case for xfstests that I just made: _scratch_mkfs >> $seqres.full 2>&1 _init_flakey _mount_flakey # Create a test file with 2 hard links in the same directory. mkdir -p $SCRATCH_MNT/a/b echo "hello world" > $SCRATCH_MNT/a/b/foo ln $SCRATCH_MNT/a/b/foo $SCRATCH_MNT/a/b/bar # Make sure all metadata and data are durably persisted. sync # Now remove one of the hard links and fsync the inode. rm -f $SCRATCH_MNT/a/b/bar $XFS_IO_PROG -c "fsync" $SCRATCH_MNT/a/b/foo # Simulate a crash/power loss. This makes sure the next mount # will see an fsync log and will replay that log. _load_flakey_table $FLAKEY_DROP_WRITES _unmount_flakey _load_flakey_table $FLAKEY_ALLOW_WRITES _mount_flakey # Remove the last hard link of the file and attempt to remove its parent # directory - this failed in btrfs because the fsync log and replay code # didn't decrement the parent directory's i_size and left dangling directory # index entries - this made the btrfs rmdir implementation always fail with # the error -ENOTEMPTY. # # The dangling directory index entries were visible to user space, but it was # impossible to do anything on them (unlink, open, read, write, stat, etc) # because the inode they pointed to did not exist anymore. # # The parent directory's metadata inconsistency (stale index entries) was # also detected by btrfs' fsck tool, which is run automatically by the fstests # framework when the test finishes. The error message reported by fsck was: # # root 5 inode 259 errors 2001, no inode item, link count wrong # unresolved ref dir 258 index 3 namelen 3 name bar filetype 1 errors 4, no inode ref # rm -f $SCRATCH_MNT/a/b/* rmdir $SCRATCH_MNT/a/b rmdir $SCRATCH_MNT/a To fix this just make sure that after an unlink, if the inode is fsync'ed, he parent inode is fully logged in the fsync log. A test case for xfstests follows soon. Signed-off-by: Filipe Manana Signed-off-by: Chris Mason --- fs/btrfs/tree-log.c | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 67e5bf709dca..60e1d0083faa 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -4273,6 +4273,9 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans, struct dentry *old_parent = NULL; int ret = 0; u64 last_committed = root->fs_info->last_trans_committed; + const struct dentry * const first_parent = parent; + const bool did_unlink = (BTRFS_I(inode)->last_unlink_trans > + last_committed); sb = inode->i_sb; @@ -4328,7 +4331,6 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans, goto end_trans; } - inode_only = LOG_INODE_EXISTS; while (1) { if (!parent || !parent->d_inode || sb != parent->d_inode->i_sb) break; @@ -4337,8 +4339,22 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans, if (root != BTRFS_I(inode)->root) break; + /* + * On unlink we must make sure our immediate parent directory + * inode is fully logged. This is to prevent leaving dangling + * directory index entries and a wrong directory inode's i_size. + * Not doing so can result in a directory being impossible to + * delete after log replay (rmdir will always fail with error + * -ENOTEMPTY). + */ + if (did_unlink && parent == first_parent) + inode_only = LOG_INODE_ALL; + else + inode_only = LOG_INODE_EXISTS; + if (BTRFS_I(inode)->generation > - root->fs_info->last_trans_committed) { + root->fs_info->last_trans_committed || + inode_only == LOG_INODE_ALL) { ret = btrfs_log_inode(trans, root, inode, inode_only, 0, LLONG_MAX, ctx); if (ret) -- cgit v1.2.3 From 2c2c452b0cafdc27442796c526ac929443654b0b Mon Sep 17 00:00:00 2001 From: Filipe Manana Date: Tue, 13 Jan 2015 16:40:04 +0000 Subject: Btrfs: fix fsync when extend references are added to an inode If we added an extended reference to an inode and fsync'ed it, the log replay code would make our inode have an incorrect link count, which was lower then the expected/correct count. This resulted in stale directory index entries after deleting some of the hard links, and any access to the dangling directory entries resulted in -ESTALE errors because the entries pointed to inode items that don't exist anymore. This is easy to reproduce with the test case I made for xfstests, and the bulk of that test is: _scratch_mkfs "-O extref" >> $seqres.full 2>&1 _init_flakey _mount_flakey # Create a test file with 3001 hard links. This number is large enough to # make btrfs start using extrefs at some point even if the fs has the maximum # possible leaf/node size (64Kb). echo "hello world" > $SCRATCH_MNT/foo for i in `seq 1 3000`; do ln $SCRATCH_MNT/foo $SCRATCH_MNT/foo_link_`printf "%04d" $i` done # Make sure all metadata and data are durably persisted. sync # Add one more link to the inode that ends up being a btrfs extref and fsync # the inode. ln $SCRATCH_MNT/foo $SCRATCH_MNT/foo_link_3001 $XFS_IO_PROG -c "fsync" $SCRATCH_MNT/foo # Simulate a crash/power loss. This makes sure the next mount # will see an fsync log and will replay that log. _load_flakey_table $FLAKEY_DROP_WRITES _unmount_flakey _load_flakey_table $FLAKEY_ALLOW_WRITES _mount_flakey # Now after the fsync log replay btrfs left our inode with a wrong link count N, # which was smaller than the correct link count M (N < M). # So after removing N hard links, the remaining M - N directory entries were # still visible to user space but it was impossible to do anything with them # because they pointed to an inode that didn't exist anymore. This resulted in # stale file handle errors (-ESTALE) when accessing those dentries for example. # # So remove all hard links except the first one and then attempt to read the # file, to verify we don't get an -ESTALE error when accessing the inodel # # The btrfs fsck tool also detected the incorrect inode link count and it # reported an error message like the following: # # root 5 inode 257 errors 2001, no inode item, link count wrong # unresolved ref dir 256 index 2978 namelen 13 name foo_link_2976 filetype 1 errors 4, no inode ref # # The fstests framework automatically calls fsck after a test is run, so we # don't need to call fsck explicitly here. rm -f $SCRATCH_MNT/foo_link_* cat $SCRATCH_MNT/foo status=0 exit So make sure an fsync always flushes the delayed inode item, so that the fsync log contains it (needed in order to trigger the link count fixup code) and fix the extref counting function, which always return -ENOENT to its caller (and made it assume there were always 0 extrefs). This issue has been present since the introduction of the extrefs feature (2012). A test case for xfstests follows soon. Signed-off-by: Filipe Manana Signed-off-by: Chris Mason --- fs/btrfs/tree-log.c | 27 ++++++++++++++++----------- 1 file changed, 16 insertions(+), 11 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 60e1d0083faa..533cdb02978a 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -1288,6 +1288,7 @@ static int count_inode_extrefs(struct btrfs_root *root, leaf = path->nodes[0]; item_size = btrfs_item_size_nr(leaf, path->slots[0]); ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); + cur_offset = 0; while (cur_offset < item_size) { extref = (struct btrfs_inode_extref *) (ptr + cur_offset); @@ -1303,7 +1304,7 @@ static int count_inode_extrefs(struct btrfs_root *root, } btrfs_release_path(path); - if (ret < 0) + if (ret < 0 && ret != -ENOENT) return ret; return nlink; } @@ -1395,9 +1396,6 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans, nlink = ret; ret = count_inode_extrefs(root, inode, path); - if (ret == -ENOENT) - ret = 0; - if (ret < 0) goto out; @@ -3966,15 +3964,22 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans, max_key.type = (u8)-1; max_key.offset = (u64)-1; - /* Only run delayed items if we are a dir or a new file */ + /* + * Only run delayed items if we are a dir or a new file. + * Otherwise commit the delayed inode only, which is needed in + * order for the log replay code to mark inodes for link count + * fixup (create temporary BTRFS_TREE_LOG_FIXUP_OBJECTID items). + */ if (S_ISDIR(inode->i_mode) || - BTRFS_I(inode)->generation > root->fs_info->last_trans_committed) { + BTRFS_I(inode)->generation > root->fs_info->last_trans_committed) ret = btrfs_commit_inode_delayed_items(trans, inode); - if (ret) { - btrfs_free_path(path); - btrfs_free_path(dst_path); - return ret; - } + else + ret = btrfs_commit_inode_delayed_inode(inode); + + if (ret) { + btrfs_free_path(path); + btrfs_free_path(dst_path); + return ret; } mutex_lock(&BTRFS_I(inode)->log_mutex); -- cgit v1.2.3 From df8d116ffa379f3ef09d7ff28da0f0c921cc9fa1 Mon Sep 17 00:00:00 2001 From: Filipe Manana Date: Wed, 14 Jan 2015 01:52:25 +0000 Subject: Btrfs: fix fsync log replay for inodes with a mix of regular refs and extrefs If we have an inode with a large number of hard links, some of which may be extrefs, turn a regular ref into an extref, fsync the inode and then replay the fsync log (after a crash/reboot), we can endup with an fsync log that makes the replay code always fail with -EOVERFLOW when processing the inode's references. This is easy to reproduce with the test case I made for xfstests. Its steps are the following: _scratch_mkfs "-O extref" >> $seqres.full 2>&1 _init_flakey _mount_flakey # Create a test file with 3001 hard links. This number is large enough to # make btrfs start using extrefs at some point even if the fs has the maximum # possible leaf/node size (64Kb). echo "hello world" > $SCRATCH_MNT/foo for i in `seq 1 3000`; do ln $SCRATCH_MNT/foo $SCRATCH_MNT/foo_link_`printf "%04d" $i` done # Make sure all metadata and data are durably persisted. sync # Now remove one link, add a new one with a new name, add another new one with # the same name as the one we just removed and fsync the inode. rm -f $SCRATCH_MNT/foo_link_0001 ln $SCRATCH_MNT/foo $SCRATCH_MNT/foo_link_3001 ln $SCRATCH_MNT/foo $SCRATCH_MNT/foo_link_0001 rm -f $SCRATCH_MNT/foo_link_0002 ln $SCRATCH_MNT/foo $SCRATCH_MNT/foo_link_3002 ln $SCRATCH_MNT/foo $SCRATCH_MNT/foo_link_3003 $XFS_IO_PROG -c "fsync" $SCRATCH_MNT/foo # Simulate a crash/power loss. This makes sure the next mount # will see an fsync log and will replay that log. _load_flakey_table $FLAKEY_DROP_WRITES _unmount_flakey _load_flakey_table $FLAKEY_ALLOW_WRITES _mount_flakey # Check that the number of hard links is correct, we are able to remove all # the hard links and read the file's data. This is just to verify we don't # get stale file handle errors (due to dangling directory index entries that # point to inodes that no longer exist). echo "Link count: $(stat --format=%h $SCRATCH_MNT/foo)" [ -f $SCRATCH_MNT/foo ] || echo "Link foo is missing" for ((i = 1; i <= 3003; i++)); do name=foo_link_`printf "%04d" $i` if [ $i -eq 2 ]; then [ -f $SCRATCH_MNT/$name ] && echo "Link $name found" else [ -f $SCRATCH_MNT/$name ] || echo "Link $name is missing" fi done rm -f $SCRATCH_MNT/foo_link_* cat $SCRATCH_MNT/foo rm -f $SCRATCH_MNT/foo status=0 exit The fix is simply to correct the overflow condition when overwriting a reference item because it was wrong, trying to increase the item in the fs/subvol tree by an impossible amount. Also ensure that we don't insert one normal ref and one ext ref for the same dentry - this happened because processing a dir index entry from the parent in the log happened when the normal ref item was full, which made the logic insert an extref and later when the normal ref had enough room, it would be inserted again when processing the ref item from the child inode in the log. This issue has been present since the introduction of the extrefs feature (2012). A test case for xfstests follows soon. This test only passes if the previous patch titled "Btrfs: fix fsync when extend references are added to an inode" is applied too. Signed-off-by: Filipe Manana Signed-off-by: Chris Mason --- fs/btrfs/inode-item.c | 9 +++++++-- fs/btrfs/tree-log.c | 39 ++++++++++++++++++++++++++++++++++++--- 2 files changed, 43 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/inode-item.c b/fs/btrfs/inode-item.c index 8ffa4783cbf4..265e03c73f4d 100644 --- a/fs/btrfs/inode-item.c +++ b/fs/btrfs/inode-item.c @@ -344,6 +344,7 @@ int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans, return -ENOMEM; path->leave_spinning = 1; + path->skip_release_on_error = 1; ret = btrfs_insert_empty_item(trans, root, path, &key, ins_len); if (ret == -EEXIST) { @@ -362,8 +363,12 @@ int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans, ptr = (unsigned long)(ref + 1); ret = 0; } else if (ret < 0) { - if (ret == -EOVERFLOW) - ret = -EMLINK; + if (ret == -EOVERFLOW) { + if (find_name_in_backref(path, name, name_len, &ref)) + ret = -EEXIST; + else + ret = -EMLINK; + } goto out; } else { ref = btrfs_item_ptr(path->nodes[0], path->slots[0], diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 533cdb02978a..a26658756537 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -453,11 +453,13 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans, insert: btrfs_release_path(path); /* try to insert the key into the destination tree */ + path->skip_release_on_error = 1; ret = btrfs_insert_empty_item(trans, root, path, key, item_size); + path->skip_release_on_error = 0; /* make sure any existing item is the correct size */ - if (ret == -EEXIST) { + if (ret == -EEXIST || ret == -EOVERFLOW) { u32 found_size; found_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]); @@ -844,7 +846,7 @@ out: static noinline int backref_in_log(struct btrfs_root *log, struct btrfs_key *key, u64 ref_objectid, - char *name, int namelen) + const char *name, int namelen) { struct btrfs_path *path; struct btrfs_inode_ref *ref; @@ -1555,6 +1557,30 @@ static noinline int insert_one_name(struct btrfs_trans_handle *trans, return ret; } +/* + * Return true if an inode reference exists in the log for the given name, + * inode and parent inode. + */ +static bool name_in_log_ref(struct btrfs_root *log_root, + const char *name, const int name_len, + const u64 dirid, const u64 ino) +{ + struct btrfs_key search_key; + + search_key.objectid = ino; + search_key.type = BTRFS_INODE_REF_KEY; + search_key.offset = dirid; + if (backref_in_log(log_root, &search_key, dirid, name, name_len)) + return true; + + search_key.type = BTRFS_INODE_EXTREF_KEY; + search_key.offset = btrfs_extref_hash(dirid, name, name_len); + if (backref_in_log(log_root, &search_key, dirid, name, name_len)) + return true; + + return false; +} + /* * take a single entry in a log directory item and replay it into * the subvolume. @@ -1665,10 +1691,17 @@ out: return ret; insert: + if (name_in_log_ref(root->log_root, name, name_len, + key->objectid, log_key.objectid)) { + /* The dentry will be added later. */ + ret = 0; + update_size = false; + goto out; + } btrfs_release_path(path); ret = insert_one_name(trans, root, path, key->objectid, key->offset, name, name_len, log_type, &log_key); - if (ret && ret != -ENOENT) + if (ret && ret != -ENOENT && ret != -EEXIST) goto out; update_size = false; ret = 0; -- cgit v1.2.3 From e34c330d639177bbb345bf2bde16613b00cc6e6b Mon Sep 17 00:00:00 2001 From: Zhao Lei Date: Tue, 20 Jan 2015 15:11:31 +0800 Subject: Btrfs: fix a out-of-bound access of raid_map We add the number of stripes on target devices into bbio->num_stripes if we are under device replacement, and we just sort the raid_map of those stripes that not on the target devices, so if when we need real raid_map, we need skip the stripes on the target devices. Signed-off-by: Zhao Lei Signed-off-by: Miao Xie Signed-off-by: Chris Mason --- fs/btrfs/scrub.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 53575a45f7d1..673e32be88fa 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -1299,7 +1299,9 @@ out: static inline int scrub_nr_raid_mirrors(struct btrfs_bio *bbio, u64 *raid_map) { if (raid_map) { - if (raid_map[bbio->num_stripes - 1] == RAID6_Q_STRIPE) + int real_stripes = bbio->num_stripes - bbio->num_tgtdevs; + + if (raid_map[real_stripes - 1] == RAID6_Q_STRIPE) return 3; else return 2; @@ -1420,7 +1422,8 @@ leave_nomem: scrub_stripe_index_and_offset(logical, raid_map, mapped_length, - bbio->num_stripes, + bbio->num_stripes - + bbio->num_tgtdevs, mirror_index, &stripe_index, &stripe_offset); -- cgit v1.2.3 From cc7539edea6dd02536d56f0a3405b8bb7ae24168 Mon Sep 17 00:00:00 2001 From: Zhao Lei Date: Tue, 20 Jan 2015 15:11:32 +0800 Subject: Btrfs: sort raid_map before adding tgtdev stripes It can avoid complex calculation of real stripes in sort, moreover, we can clean up code of sorting tgtdev_map because it will be in order initially. Signed-off-by: Zhao Lei Signed-off-by: Miao Xie Signed-off-by: Chris Mason --- fs/btrfs/volumes.c | 22 ++++++++-------------- 1 file changed, 8 insertions(+), 14 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 0d9bfebdaa4c..711ce38543a1 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -4876,18 +4876,17 @@ static inline int parity_smaller(u64 a, u64 b) } /* Bubble-sort the stripe set to put the parity/syndrome stripes last */ -static void sort_parity_stripes(struct btrfs_bio *bbio, u64 *raid_map) +static void sort_parity_stripes(struct btrfs_bio *bbio, u64 *raid_map, + int num_stripes) { struct btrfs_bio_stripe s; - int real_stripes = bbio->num_stripes - bbio->num_tgtdevs; int i; u64 l; int again = 1; - int m; while (again) { again = 0; - for (i = 0; i < real_stripes - 1; i++) { + for (i = 0; i < num_stripes - 1; i++) { if (parity_smaller(raid_map[i], raid_map[i+1])) { s = bbio->stripes[i]; l = raid_map[i]; @@ -4896,13 +4895,6 @@ static void sort_parity_stripes(struct btrfs_bio *bbio, u64 *raid_map) bbio->stripes[i+1] = s; raid_map[i+1] = l; - if (bbio->tgtdev_map) { - m = bbio->tgtdev_map[i]; - bbio->tgtdev_map[i] = - bbio->tgtdev_map[i + 1]; - bbio->tgtdev_map[i + 1] = m; - } - again = 1; } } @@ -5340,6 +5332,9 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) max_errors = btrfs_chunk_max_errors(map); + if (raid_map) + sort_parity_stripes(bbio, raid_map, num_stripes); + tgtdev_indexes = 0; if (dev_replace_is_ongoing && (rw & (REQ_WRITE | REQ_DISCARD)) && dev_replace->tgtdev != NULL) { @@ -5443,10 +5438,9 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, bbio->stripes[0].physical = physical_to_patch_in_first_stripe; bbio->mirror_num = map->num_stripes + 1; } - if (raid_map) { - sort_parity_stripes(bbio, raid_map); + + if (raid_map_ret) *raid_map_ret = raid_map; - } out: if (dev_replace_is_ongoing) btrfs_dev_replace_unlock(dev_replace); -- cgit v1.2.3 From 8e5cfb55d3f7dc764cd7f4c966d4c2687eaf7569 Mon Sep 17 00:00:00 2001 From: Zhao Lei Date: Tue, 20 Jan 2015 15:11:33 +0800 Subject: Btrfs: Make raid_map array be inlined in btrfs_bio structure It can make code more simple and clear, we need not care about free bbio and raid_map together. Signed-off-by: Miao Xie Signed-off-by: Zhao Lei Signed-off-by: Chris Mason --- fs/btrfs/raid56.c | 77 ++++++++++++++++++----------------------- fs/btrfs/raid56.h | 11 +++--- fs/btrfs/scrub.c | 31 ++++++----------- fs/btrfs/volumes.c | 100 +++++++++++++++++++++++++---------------------------- fs/btrfs/volumes.h | 11 ++++-- 5 files changed, 105 insertions(+), 125 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c index 8ab2a17bbba8..e301d3302edf 100644 --- a/fs/btrfs/raid56.c +++ b/fs/btrfs/raid56.c @@ -79,13 +79,6 @@ struct btrfs_raid_bio { struct btrfs_fs_info *fs_info; struct btrfs_bio *bbio; - /* - * logical block numbers for the start of each stripe - * The last one or two are p/q. These are sorted, - * so raid_map[0] is the start of our full stripe - */ - u64 *raid_map; - /* while we're doing rmw on a stripe * we put it into a hash table so we can * lock the stripe and merge more rbios @@ -303,7 +296,7 @@ static void cache_rbio_pages(struct btrfs_raid_bio *rbio) */ static int rbio_bucket(struct btrfs_raid_bio *rbio) { - u64 num = rbio->raid_map[0]; + u64 num = rbio->bbio->raid_map[0]; /* * we shift down quite a bit. We're using byte @@ -606,8 +599,8 @@ static int rbio_can_merge(struct btrfs_raid_bio *last, test_bit(RBIO_CACHE_BIT, &cur->flags)) return 0; - if (last->raid_map[0] != - cur->raid_map[0]) + if (last->bbio->raid_map[0] != + cur->bbio->raid_map[0]) return 0; /* we can't merge with different operations */ @@ -689,7 +682,7 @@ static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio) spin_lock_irqsave(&h->lock, flags); list_for_each_entry(cur, &h->hash_list, hash_list) { walk++; - if (cur->raid_map[0] == rbio->raid_map[0]) { + if (cur->bbio->raid_map[0] == rbio->bbio->raid_map[0]) { spin_lock(&cur->bio_list_lock); /* can we steal this cached rbio's pages? */ @@ -842,18 +835,16 @@ done_nolock: } static inline void -__free_bbio_and_raid_map(struct btrfs_bio *bbio, u64 *raid_map, int need) +__free_bbio(struct btrfs_bio *bbio, int need) { - if (need) { - kfree(raid_map); + if (need) kfree(bbio); - } } -static inline void free_bbio_and_raid_map(struct btrfs_raid_bio *rbio) +static inline void free_bbio(struct btrfs_raid_bio *rbio) { - __free_bbio_and_raid_map(rbio->bbio, rbio->raid_map, - !test_bit(RBIO_HOLD_BBIO_MAP_BIT, &rbio->flags)); + __free_bbio(rbio->bbio, + !test_bit(RBIO_HOLD_BBIO_MAP_BIT, &rbio->flags)); } static void __free_raid_bio(struct btrfs_raid_bio *rbio) @@ -875,7 +866,7 @@ static void __free_raid_bio(struct btrfs_raid_bio *rbio) } } - free_bbio_and_raid_map(rbio); + free_bbio(rbio); kfree(rbio); } @@ -985,8 +976,7 @@ static unsigned long rbio_nr_pages(unsigned long stripe_len, int nr_stripes) * this does not allocate any pages for rbio->pages. */ static struct btrfs_raid_bio *alloc_rbio(struct btrfs_root *root, - struct btrfs_bio *bbio, u64 *raid_map, - u64 stripe_len) + struct btrfs_bio *bbio, u64 stripe_len) { struct btrfs_raid_bio *rbio; int nr_data = 0; @@ -1007,7 +997,6 @@ static struct btrfs_raid_bio *alloc_rbio(struct btrfs_root *root, INIT_LIST_HEAD(&rbio->stripe_cache); INIT_LIST_HEAD(&rbio->hash_list); rbio->bbio = bbio; - rbio->raid_map = raid_map; rbio->fs_info = root->fs_info; rbio->stripe_len = stripe_len; rbio->nr_pages = num_pages; @@ -1028,7 +1017,7 @@ static struct btrfs_raid_bio *alloc_rbio(struct btrfs_root *root, rbio->bio_pages = p + sizeof(struct page *) * num_pages; rbio->dbitmap = p + sizeof(struct page *) * num_pages * 2; - if (raid_map[real_stripes - 1] == RAID6_Q_STRIPE) + if (bbio->raid_map[real_stripes - 1] == RAID6_Q_STRIPE) nr_data = real_stripes - 2; else nr_data = real_stripes - 1; @@ -1182,7 +1171,7 @@ static void index_rbio_pages(struct btrfs_raid_bio *rbio) spin_lock_irq(&rbio->bio_list_lock); bio_list_for_each(bio, &rbio->bio_list) { start = (u64)bio->bi_iter.bi_sector << 9; - stripe_offset = start - rbio->raid_map[0]; + stripe_offset = start - rbio->bbio->raid_map[0]; page_index = stripe_offset >> PAGE_CACHE_SHIFT; for (i = 0; i < bio->bi_vcnt; i++) { @@ -1402,7 +1391,7 @@ static int find_logical_bio_stripe(struct btrfs_raid_bio *rbio, logical <<= 9; for (i = 0; i < rbio->nr_data; i++) { - stripe_start = rbio->raid_map[i]; + stripe_start = rbio->bbio->raid_map[i]; if (logical >= stripe_start && logical < stripe_start + rbio->stripe_len) { return i; @@ -1776,17 +1765,16 @@ static void btrfs_raid_unplug(struct blk_plug_cb *cb, bool from_schedule) * our main entry point for writes from the rest of the FS. */ int raid56_parity_write(struct btrfs_root *root, struct bio *bio, - struct btrfs_bio *bbio, u64 *raid_map, - u64 stripe_len) + struct btrfs_bio *bbio, u64 stripe_len) { struct btrfs_raid_bio *rbio; struct btrfs_plug_cb *plug = NULL; struct blk_plug_cb *cb; int ret; - rbio = alloc_rbio(root, bbio, raid_map, stripe_len); + rbio = alloc_rbio(root, bbio, stripe_len); if (IS_ERR(rbio)) { - __free_bbio_and_raid_map(bbio, raid_map, 1); + __free_bbio(bbio, 1); return PTR_ERR(rbio); } bio_list_add(&rbio->bio_list, bio); @@ -1885,7 +1873,7 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio) } /* all raid6 handling here */ - if (rbio->raid_map[rbio->real_stripes - 1] == + if (rbio->bbio->raid_map[rbio->real_stripes - 1] == RAID6_Q_STRIPE) { /* @@ -1922,8 +1910,9 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio) * here due to a crc mismatch and we can't give them the * data they want */ - if (rbio->raid_map[failb] == RAID6_Q_STRIPE) { - if (rbio->raid_map[faila] == RAID5_P_STRIPE) { + if (rbio->bbio->raid_map[failb] == RAID6_Q_STRIPE) { + if (rbio->bbio->raid_map[faila] == + RAID5_P_STRIPE) { err = -EIO; goto cleanup; } @@ -1934,7 +1923,7 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio) goto pstripe; } - if (rbio->raid_map[failb] == RAID5_P_STRIPE) { + if (rbio->bbio->raid_map[failb] == RAID5_P_STRIPE) { raid6_datap_recov(rbio->real_stripes, PAGE_SIZE, faila, pointers); } else { @@ -2156,15 +2145,15 @@ cleanup: * of the drive. */ int raid56_parity_recover(struct btrfs_root *root, struct bio *bio, - struct btrfs_bio *bbio, u64 *raid_map, - u64 stripe_len, int mirror_num, int generic_io) + struct btrfs_bio *bbio, u64 stripe_len, + int mirror_num, int generic_io) { struct btrfs_raid_bio *rbio; int ret; - rbio = alloc_rbio(root, bbio, raid_map, stripe_len); + rbio = alloc_rbio(root, bbio, stripe_len); if (IS_ERR(rbio)) { - __free_bbio_and_raid_map(bbio, raid_map, generic_io); + __free_bbio(bbio, generic_io); return PTR_ERR(rbio); } @@ -2175,7 +2164,7 @@ int raid56_parity_recover(struct btrfs_root *root, struct bio *bio, rbio->faila = find_logical_bio_stripe(rbio, bio); if (rbio->faila == -1) { BUG(); - __free_bbio_and_raid_map(bbio, raid_map, generic_io); + __free_bbio(bbio, generic_io); kfree(rbio); return -EIO; } @@ -2240,14 +2229,14 @@ static void read_rebuild_work(struct btrfs_work *work) struct btrfs_raid_bio * raid56_parity_alloc_scrub_rbio(struct btrfs_root *root, struct bio *bio, - struct btrfs_bio *bbio, u64 *raid_map, - u64 stripe_len, struct btrfs_device *scrub_dev, + struct btrfs_bio *bbio, u64 stripe_len, + struct btrfs_device *scrub_dev, unsigned long *dbitmap, int stripe_nsectors) { struct btrfs_raid_bio *rbio; int i; - rbio = alloc_rbio(root, bbio, raid_map, stripe_len); + rbio = alloc_rbio(root, bbio, stripe_len); if (IS_ERR(rbio)) return NULL; bio_list_add(&rbio->bio_list, bio); @@ -2279,10 +2268,10 @@ void raid56_parity_add_scrub_pages(struct btrfs_raid_bio *rbio, int stripe_offset; int index; - ASSERT(logical >= rbio->raid_map[0]); - ASSERT(logical + PAGE_SIZE <= rbio->raid_map[0] + + ASSERT(logical >= rbio->bbio->raid_map[0]); + ASSERT(logical + PAGE_SIZE <= rbio->bbio->raid_map[0] + rbio->stripe_len * rbio->nr_data); - stripe_offset = (int)(logical - rbio->raid_map[0]); + stripe_offset = (int)(logical - rbio->bbio->raid_map[0]); index = stripe_offset >> PAGE_CACHE_SHIFT; rbio->bio_pages[index] = page; } diff --git a/fs/btrfs/raid56.h b/fs/btrfs/raid56.h index 31d4a157b5e3..2b5d7977d83b 100644 --- a/fs/btrfs/raid56.h +++ b/fs/btrfs/raid56.h @@ -43,16 +43,15 @@ struct btrfs_raid_bio; struct btrfs_device; int raid56_parity_recover(struct btrfs_root *root, struct bio *bio, - struct btrfs_bio *bbio, u64 *raid_map, - u64 stripe_len, int mirror_num, int generic_io); + struct btrfs_bio *bbio, u64 stripe_len, + int mirror_num, int generic_io); int raid56_parity_write(struct btrfs_root *root, struct bio *bio, - struct btrfs_bio *bbio, u64 *raid_map, - u64 stripe_len); + struct btrfs_bio *bbio, u64 stripe_len); struct btrfs_raid_bio * raid56_parity_alloc_scrub_rbio(struct btrfs_root *root, struct bio *bio, - struct btrfs_bio *bbio, u64 *raid_map, - u64 stripe_len, struct btrfs_device *scrub_dev, + struct btrfs_bio *bbio, u64 stripe_len, + struct btrfs_device *scrub_dev, unsigned long *dbitmap, int stripe_nsectors); void raid56_parity_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page, u64 logical); diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 673e32be88fa..9d07c981ec82 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -66,7 +66,6 @@ struct scrub_ctx; struct scrub_recover { atomic_t refs; struct btrfs_bio *bbio; - u64 *raid_map; u64 map_length; }; @@ -857,7 +856,6 @@ static inline void scrub_put_recover(struct scrub_recover *recover) { if (atomic_dec_and_test(&recover->refs)) { kfree(recover->bbio); - kfree(recover->raid_map); kfree(recover); } } @@ -1296,12 +1294,12 @@ out: return 0; } -static inline int scrub_nr_raid_mirrors(struct btrfs_bio *bbio, u64 *raid_map) +static inline int scrub_nr_raid_mirrors(struct btrfs_bio *bbio) { - if (raid_map) { + if (bbio->raid_map) { int real_stripes = bbio->num_stripes - bbio->num_tgtdevs; - if (raid_map[real_stripes - 1] == RAID6_Q_STRIPE) + if (bbio->raid_map[real_stripes - 1] == RAID6_Q_STRIPE) return 3; else return 2; @@ -1347,7 +1345,6 @@ static int scrub_setup_recheck_block(struct scrub_ctx *sctx, { struct scrub_recover *recover; struct btrfs_bio *bbio; - u64 *raid_map; u64 sublen; u64 mapped_length; u64 stripe_offset; @@ -1368,35 +1365,31 @@ static int scrub_setup_recheck_block(struct scrub_ctx *sctx, sublen = min_t(u64, length, PAGE_SIZE); mapped_length = sublen; bbio = NULL; - raid_map = NULL; /* * with a length of PAGE_SIZE, each returned stripe * represents one mirror */ ret = btrfs_map_sblock(fs_info, REQ_GET_READ_MIRRORS, logical, - &mapped_length, &bbio, 0, &raid_map); + &mapped_length, &bbio, 0, 1); if (ret || !bbio || mapped_length < sublen) { kfree(bbio); - kfree(raid_map); return -EIO; } recover = kzalloc(sizeof(struct scrub_recover), GFP_NOFS); if (!recover) { kfree(bbio); - kfree(raid_map); return -ENOMEM; } atomic_set(&recover->refs, 1); recover->bbio = bbio; - recover->raid_map = raid_map; recover->map_length = mapped_length; BUG_ON(page_index >= SCRUB_PAGES_PER_RD_BIO); - nmirrors = scrub_nr_raid_mirrors(bbio, raid_map); + nmirrors = scrub_nr_raid_mirrors(bbio); for (mirror_index = 0; mirror_index < nmirrors; mirror_index++) { struct scrub_block *sblock; @@ -1420,7 +1413,7 @@ leave_nomem: sblock->pagev[page_index] = page; page->logical = logical; - scrub_stripe_index_and_offset(logical, raid_map, + scrub_stripe_index_and_offset(logical, bbio->raid_map, mapped_length, bbio->num_stripes - bbio->num_tgtdevs, @@ -1469,7 +1462,7 @@ static void scrub_bio_wait_endio(struct bio *bio, int error) static inline int scrub_is_page_on_raid56(struct scrub_page *page) { - return page->recover && page->recover->raid_map; + return page->recover && page->recover->bbio->raid_map; } static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info, @@ -1486,7 +1479,6 @@ static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info, bio->bi_end_io = scrub_bio_wait_endio; ret = raid56_parity_recover(fs_info->fs_root, bio, page->recover->bbio, - page->recover->raid_map, page->recover->map_length, page->mirror_num, 0); if (ret) @@ -2716,7 +2708,6 @@ static void scrub_parity_check_and_repair(struct scrub_parity *sparity) struct btrfs_raid_bio *rbio; struct scrub_page *spage; struct btrfs_bio *bbio = NULL; - u64 *raid_map = NULL; u64 length; int ret; @@ -2727,8 +2718,8 @@ static void scrub_parity_check_and_repair(struct scrub_parity *sparity) length = sparity->logic_end - sparity->logic_start + 1; ret = btrfs_map_sblock(sctx->dev_root->fs_info, WRITE, sparity->logic_start, - &length, &bbio, 0, &raid_map); - if (ret || !bbio || !raid_map) + &length, &bbio, 0, 1); + if (ret || !bbio || !bbio->raid_map) goto bbio_out; bio = btrfs_io_bio_alloc(GFP_NOFS, 0); @@ -2740,8 +2731,7 @@ static void scrub_parity_check_and_repair(struct scrub_parity *sparity) bio->bi_end_io = scrub_parity_bio_endio; rbio = raid56_parity_alloc_scrub_rbio(sctx->dev_root, bio, bbio, - raid_map, length, - sparity->scrub_dev, + length, sparity->scrub_dev, sparity->dbitmap, sparity->nsectors); if (!rbio) @@ -2759,7 +2749,6 @@ rbio_out: bio_put(bio); bbio_out: kfree(bbio); - kfree(raid_map); bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap, sparity->nsectors); spin_lock(&sctx->stat_lock); diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 711ce38543a1..c0f1d524c371 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -4876,8 +4876,7 @@ static inline int parity_smaller(u64 a, u64 b) } /* Bubble-sort the stripe set to put the parity/syndrome stripes last */ -static void sort_parity_stripes(struct btrfs_bio *bbio, u64 *raid_map, - int num_stripes) +static void sort_parity_stripes(struct btrfs_bio *bbio, int num_stripes) { struct btrfs_bio_stripe s; int i; @@ -4887,13 +4886,14 @@ static void sort_parity_stripes(struct btrfs_bio *bbio, u64 *raid_map, while (again) { again = 0; for (i = 0; i < num_stripes - 1; i++) { - if (parity_smaller(raid_map[i], raid_map[i+1])) { + if (parity_smaller(bbio->raid_map[i], + bbio->raid_map[i+1])) { s = bbio->stripes[i]; - l = raid_map[i]; + l = bbio->raid_map[i]; bbio->stripes[i] = bbio->stripes[i+1]; - raid_map[i] = raid_map[i+1]; + bbio->raid_map[i] = bbio->raid_map[i+1]; bbio->stripes[i+1] = s; - raid_map[i+1] = l; + bbio->raid_map[i+1] = l; again = 1; } @@ -4904,7 +4904,7 @@ static void sort_parity_stripes(struct btrfs_bio *bbio, u64 *raid_map, static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, u64 logical, u64 *length, struct btrfs_bio **bbio_ret, - int mirror_num, u64 **raid_map_ret) + int mirror_num, int need_raid_map) { struct extent_map *em; struct map_lookup *map; @@ -4917,7 +4917,6 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, u64 stripe_nr_orig; u64 stripe_nr_end; u64 stripe_len; - u64 *raid_map = NULL; int stripe_index; int i; int ret = 0; @@ -5039,7 +5038,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, u64 physical_of_found = 0; ret = __btrfs_map_block(fs_info, REQ_GET_READ_MIRRORS, - logical, &tmp_length, &tmp_bbio, 0, NULL); + logical, &tmp_length, &tmp_bbio, 0, 0); if (ret) { WARN_ON(tmp_bbio != NULL); goto out; @@ -5160,13 +5159,9 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, } else if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)) { - u64 tmp; - - if (raid_map_ret && + if (need_raid_map && ((rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) || mirror_num > 1)) { - int i, rot; - /* push stripe_nr back to the start of the full stripe */ stripe_nr = raid56_full_stripe_start; do_div(stripe_nr, stripe_len * nr_data_stripes(map)); @@ -5175,32 +5170,12 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, num_stripes = map->num_stripes; max_errors = nr_parity_stripes(map); - raid_map = kmalloc_array(num_stripes, sizeof(u64), - GFP_NOFS); - if (!raid_map) { - ret = -ENOMEM; - goto out; - } - - /* Work out the disk rotation on this stripe-set */ - tmp = stripe_nr; - rot = do_div(tmp, num_stripes); - - /* Fill in the logical address of each stripe */ - tmp = stripe_nr * nr_data_stripes(map); - for (i = 0; i < nr_data_stripes(map); i++) - raid_map[(i+rot) % num_stripes] = - em->start + (tmp + i) * map->stripe_len; - - raid_map[(i+rot) % map->num_stripes] = RAID5_P_STRIPE; - if (map->type & BTRFS_BLOCK_GROUP_RAID6) - raid_map[(i+rot+1) % num_stripes] = - RAID6_Q_STRIPE; - *length = map->stripe_len; stripe_index = 0; stripe_offset = 0; } else { + u64 tmp; + /* * Mirror #0 or #1 means the original data block. * Mirror #2 is RAID5 parity block. @@ -5241,7 +5216,6 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, bbio = kzalloc(btrfs_bio_size(num_alloc_stripes, tgtdev_indexes), GFP_NOFS); if (!bbio) { - kfree(raid_map); ret = -ENOMEM; goto out; } @@ -5249,6 +5223,34 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, if (dev_replace_is_ongoing) bbio->tgtdev_map = (int *)(bbio->stripes + num_alloc_stripes); + /* build raid_map */ + if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6) && + need_raid_map && ((rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) || + mirror_num > 1)) { + u64 tmp; + int i, rot; + + bbio->raid_map = (u64 *)((void *)bbio->stripes + + sizeof(struct btrfs_bio_stripe) * + num_alloc_stripes + + sizeof(int) * tgtdev_indexes); + + /* Work out the disk rotation on this stripe-set */ + tmp = stripe_nr; + rot = do_div(tmp, num_stripes); + + /* Fill in the logical address of each stripe */ + tmp = stripe_nr * nr_data_stripes(map); + for (i = 0; i < nr_data_stripes(map); i++) + bbio->raid_map[(i+rot) % num_stripes] = + em->start + (tmp + i) * map->stripe_len; + + bbio->raid_map[(i+rot) % map->num_stripes] = RAID5_P_STRIPE; + if (map->type & BTRFS_BLOCK_GROUP_RAID6) + bbio->raid_map[(i+rot+1) % num_stripes] = + RAID6_Q_STRIPE; + } + if (rw & REQ_DISCARD) { int factor = 0; int sub_stripes = 0; @@ -5332,8 +5334,8 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) max_errors = btrfs_chunk_max_errors(map); - if (raid_map) - sort_parity_stripes(bbio, raid_map, num_stripes); + if (bbio->raid_map) + sort_parity_stripes(bbio, num_stripes); tgtdev_indexes = 0; if (dev_replace_is_ongoing && (rw & (REQ_WRITE | REQ_DISCARD)) && @@ -5438,9 +5440,6 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, bbio->stripes[0].physical = physical_to_patch_in_first_stripe; bbio->mirror_num = map->num_stripes + 1; } - - if (raid_map_ret) - *raid_map_ret = raid_map; out: if (dev_replace_is_ongoing) btrfs_dev_replace_unlock(dev_replace); @@ -5453,17 +5452,17 @@ int btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, struct btrfs_bio **bbio_ret, int mirror_num) { return __btrfs_map_block(fs_info, rw, logical, length, bbio_ret, - mirror_num, NULL); + mirror_num, 0); } /* For Scrub/replace */ int btrfs_map_sblock(struct btrfs_fs_info *fs_info, int rw, u64 logical, u64 *length, struct btrfs_bio **bbio_ret, int mirror_num, - u64 **raid_map_ret) + int need_raid_map) { return __btrfs_map_block(fs_info, rw, logical, length, bbio_ret, - mirror_num, raid_map_ret); + mirror_num, need_raid_map); } int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree, @@ -5802,7 +5801,6 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio, u64 logical = (u64)bio->bi_iter.bi_sector << 9; u64 length = 0; u64 map_length; - u64 *raid_map = NULL; int ret; int dev_nr = 0; int total_devs = 1; @@ -5813,7 +5811,7 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio, btrfs_bio_counter_inc_blocked(root->fs_info); ret = __btrfs_map_block(root->fs_info, rw, logical, &map_length, &bbio, - mirror_num, &raid_map); + mirror_num, 1); if (ret) { btrfs_bio_counter_dec(root->fs_info); return ret; @@ -5826,15 +5824,13 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio, bbio->fs_info = root->fs_info; atomic_set(&bbio->stripes_pending, bbio->num_stripes); - if (raid_map) { + if (bbio->raid_map) { /* In this case, map_length has been set to the length of a single stripe; not the whole write */ if (rw & WRITE) { - ret = raid56_parity_write(root, bio, bbio, - raid_map, map_length); + ret = raid56_parity_write(root, bio, bbio, map_length); } else { - ret = raid56_parity_recover(root, bio, bbio, - raid_map, map_length, + ret = raid56_parity_recover(root, bio, bbio, map_length, mirror_num, 1); } diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index d6fe73c0f4a2..fb0e8c3f296e 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h @@ -307,6 +307,12 @@ struct btrfs_bio { int mirror_num; int num_tgtdevs; int *tgtdev_map; + /* + * logical block numbers for the start of each stripe + * The last one or two are p/q. These are sorted, + * so raid_map[0] is the start of our full stripe + */ + u64 *raid_map; struct btrfs_bio_stripe stripes[]; }; @@ -392,7 +398,8 @@ int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start, #define btrfs_bio_size(total_stripes, real_stripes) \ (sizeof(struct btrfs_bio) + \ (sizeof(struct btrfs_bio_stripe) * (total_stripes)) + \ - (sizeof(int) * (real_stripes))) + (sizeof(int) * (real_stripes)) + \ + (sizeof(u64) * (real_stripes))) int btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, u64 logical, u64 *length, @@ -400,7 +407,7 @@ int btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, int btrfs_map_sblock(struct btrfs_fs_info *fs_info, int rw, u64 logical, u64 *length, struct btrfs_bio **bbio_ret, int mirror_num, - u64 **raid_map_ret); + int need_raid_map); int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree, u64 chunk_start, u64 physical, u64 devid, u64 **logical, int *naddrs, int *stripe_len); -- cgit v1.2.3 From 6e9606d2a2dce098c1739fb3cd82a1c34fd73d3a Mon Sep 17 00:00:00 2001 From: Zhao Lei Date: Tue, 20 Jan 2015 15:11:34 +0800 Subject: Btrfs: add ref_count and free function for btrfs_bio 1: ref_count is simple than current RBIO_HOLD_BBIO_MAP_BIT flag to keep btrfs_bio's memory in raid56 recovery implement. 2: free function for bbio will make code clean and flexible, plus forced data type checking in compile. Changelog v1->v2: Rename following by David Sterba's suggestion: put_btrfs_bio() -> btrfs_put_bio() get_btrfs_bio() -> btrfs_get_bio() bbio->ref_count -> bbio->refs Signed-off-by: Zhao Lei Signed-off-by: Miao Xie Signed-off-by: Chris Mason --- fs/btrfs/extent-tree.c | 2 +- fs/btrfs/extent_io.c | 2 +- fs/btrfs/raid56.c | 38 ++++++++------------------------------ fs/btrfs/reada.c | 4 ++-- fs/btrfs/scrub.c | 12 ++++++------ fs/btrfs/volumes.c | 43 ++++++++++++++++++++++++++++++++++++------- fs/btrfs/volumes.h | 10 +++------- 7 files changed, 57 insertions(+), 54 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 3af53f8313af..1d361ac9abc9 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -1926,7 +1926,7 @@ int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr, */ ret = 0; } - kfree(bbio); + btrfs_put_bbio(bbio); } if (actual_bytes) diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 65bd285ef361..dab8af4450e1 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -2058,7 +2058,7 @@ int repair_io_failure(struct inode *inode, u64 start, u64 length, u64 logical, sector = bbio->stripes[mirror_num-1].physical >> 9; bio->bi_iter.bi_sector = sector; dev = bbio->stripes[mirror_num-1].dev; - kfree(bbio); + btrfs_put_bbio(bbio); if (!dev || !dev->bdev || !dev->writeable) { bio_put(bio); return -EIO; diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c index e301d3302edf..cbc416204452 100644 --- a/fs/btrfs/raid56.c +++ b/fs/btrfs/raid56.c @@ -58,15 +58,6 @@ */ #define RBIO_CACHE_READY_BIT 3 -/* - * bbio and raid_map is managed by the caller, so we shouldn't free - * them here. And besides that, all rbios with this flag should not - * be cached, because we need raid_map to check the rbios' stripe - * is the same or not, but it is very likely that the caller has - * free raid_map, so don't cache those rbios. - */ -#define RBIO_HOLD_BBIO_MAP_BIT 4 - #define RBIO_CACHE_SIZE 1024 enum btrfs_rbio_ops { @@ -834,19 +825,6 @@ done_nolock: remove_rbio_from_cache(rbio); } -static inline void -__free_bbio(struct btrfs_bio *bbio, int need) -{ - if (need) - kfree(bbio); -} - -static inline void free_bbio(struct btrfs_raid_bio *rbio) -{ - __free_bbio(rbio->bbio, - !test_bit(RBIO_HOLD_BBIO_MAP_BIT, &rbio->flags)); -} - static void __free_raid_bio(struct btrfs_raid_bio *rbio) { int i; @@ -866,8 +844,7 @@ static void __free_raid_bio(struct btrfs_raid_bio *rbio) } } - free_bbio(rbio); - + btrfs_put_bbio(rbio->bbio); kfree(rbio); } @@ -1774,7 +1751,7 @@ int raid56_parity_write(struct btrfs_root *root, struct bio *bio, rbio = alloc_rbio(root, bbio, stripe_len); if (IS_ERR(rbio)) { - __free_bbio(bbio, 1); + btrfs_put_bbio(bbio); return PTR_ERR(rbio); } bio_list_add(&rbio->bio_list, bio); @@ -1990,8 +1967,7 @@ cleanup: cleanup_io: if (rbio->operation == BTRFS_RBIO_READ_REBUILD) { - if (err == 0 && - !test_bit(RBIO_HOLD_BBIO_MAP_BIT, &rbio->flags)) + if (err == 0) cache_rbio_pages(rbio); else clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags); @@ -2153,7 +2129,8 @@ int raid56_parity_recover(struct btrfs_root *root, struct bio *bio, rbio = alloc_rbio(root, bbio, stripe_len); if (IS_ERR(rbio)) { - __free_bbio(bbio, generic_io); + if (generic_io) + btrfs_put_bbio(bbio); return PTR_ERR(rbio); } @@ -2164,7 +2141,8 @@ int raid56_parity_recover(struct btrfs_root *root, struct bio *bio, rbio->faila = find_logical_bio_stripe(rbio, bio); if (rbio->faila == -1) { BUG(); - __free_bbio(bbio, generic_io); + if (generic_io) + btrfs_put_bbio(bbio); kfree(rbio); return -EIO; } @@ -2173,7 +2151,7 @@ int raid56_parity_recover(struct btrfs_root *root, struct bio *bio, btrfs_bio_counter_inc_noblocked(root->fs_info); rbio->generic_bio_cnt = 1; } else { - set_bit(RBIO_HOLD_BBIO_MAP_BIT, &rbio->flags); + btrfs_get_bbio(bbio); } /* diff --git a/fs/btrfs/reada.c b/fs/btrfs/reada.c index 4d3d4e5287c5..0e7beea92b4c 100644 --- a/fs/btrfs/reada.c +++ b/fs/btrfs/reada.c @@ -461,7 +461,7 @@ static struct reada_extent *reada_find_extent(struct btrfs_root *root, spin_unlock(&fs_info->reada_lock); btrfs_dev_replace_unlock(&fs_info->dev_replace); - kfree(bbio); + btrfs_put_bbio(bbio); return re; error: @@ -486,7 +486,7 @@ error: kref_put(&zone->refcnt, reada_zone_release); spin_unlock(&fs_info->reada_lock); } - kfree(bbio); + btrfs_put_bbio(bbio); kfree(re); return re_exist; } diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 9d07c981ec82..1388e7127eea 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -855,7 +855,7 @@ static inline void scrub_get_recover(struct scrub_recover *recover) static inline void scrub_put_recover(struct scrub_recover *recover) { if (atomic_dec_and_test(&recover->refs)) { - kfree(recover->bbio); + btrfs_put_bbio(recover->bbio); kfree(recover); } } @@ -1373,13 +1373,13 @@ static int scrub_setup_recheck_block(struct scrub_ctx *sctx, ret = btrfs_map_sblock(fs_info, REQ_GET_READ_MIRRORS, logical, &mapped_length, &bbio, 0, 1); if (ret || !bbio || mapped_length < sublen) { - kfree(bbio); + btrfs_put_bbio(bbio); return -EIO; } recover = kzalloc(sizeof(struct scrub_recover), GFP_NOFS); if (!recover) { - kfree(bbio); + btrfs_put_bbio(bbio); return -ENOMEM; } @@ -2748,7 +2748,7 @@ static void scrub_parity_check_and_repair(struct scrub_parity *sparity) rbio_out: bio_put(bio); bbio_out: - kfree(bbio); + btrfs_put_bbio(bbio); bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap, sparity->nsectors); spin_lock(&sctx->stat_lock); @@ -3879,14 +3879,14 @@ static void scrub_remap_extent(struct btrfs_fs_info *fs_info, &mapped_length, &bbio, 0); if (ret || !bbio || mapped_length < extent_len || !bbio->stripes[0].dev->bdev) { - kfree(bbio); + btrfs_put_bbio(bbio); return; } *extent_physical = bbio->stripes[0].physical; *extent_mirror_num = bbio->mirror_num; *extent_dev = bbio->stripes[0].dev; - kfree(bbio); + btrfs_put_bbio(bbio); } static int scrub_setup_wr_ctx(struct scrub_ctx *sctx, diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index c0f1d524c371..0843bcd6304c 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -4901,6 +4901,37 @@ static void sort_parity_stripes(struct btrfs_bio *bbio, int num_stripes) } } +static struct btrfs_bio *alloc_btrfs_bio(int total_stripes, int real_stripes) +{ + struct btrfs_bio *bbio = kzalloc( + sizeof(struct btrfs_bio) + + sizeof(struct btrfs_bio_stripe) * (total_stripes) + + sizeof(int) * (real_stripes) + + sizeof(u64) * (real_stripes), + GFP_NOFS); + if (!bbio) + return NULL; + + atomic_set(&bbio->error, 0); + atomic_set(&bbio->refs, 1); + + return bbio; +} + +void btrfs_get_bbio(struct btrfs_bio *bbio) +{ + WARN_ON(!atomic_read(&bbio->refs)); + atomic_inc(&bbio->refs); +} + +void btrfs_put_bbio(struct btrfs_bio *bbio) +{ + if (!bbio) + return; + if (atomic_dec_and_test(&bbio->refs)) + kfree(bbio); +} + static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, u64 logical, u64 *length, struct btrfs_bio **bbio_ret, @@ -5052,7 +5083,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, * is not left of the left cursor */ ret = -EIO; - kfree(tmp_bbio); + btrfs_put_bbio(tmp_bbio); goto out; } @@ -5087,11 +5118,11 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, } else { WARN_ON(1); ret = -EIO; - kfree(tmp_bbio); + btrfs_put_bbio(tmp_bbio); goto out; } - kfree(tmp_bbio); + btrfs_put_bbio(tmp_bbio); } else if (mirror_num > map->num_stripes) { mirror_num = 0; } @@ -5213,13 +5244,11 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, tgtdev_indexes = num_stripes; } - bbio = kzalloc(btrfs_bio_size(num_alloc_stripes, tgtdev_indexes), - GFP_NOFS); + bbio = alloc_btrfs_bio(num_alloc_stripes, tgtdev_indexes); if (!bbio) { ret = -ENOMEM; goto out; } - atomic_set(&bbio->error, 0); if (dev_replace_is_ongoing) bbio->tgtdev_map = (int *)(bbio->stripes + num_alloc_stripes); @@ -5558,7 +5587,7 @@ static inline void btrfs_end_bbio(struct btrfs_bio *bbio, struct bio *bio, int e bio_endio_nodec(bio, err); else bio_endio(bio, err); - kfree(bbio); + btrfs_put_bbio(bbio); } static void btrfs_end_bio(struct bio *bio, int err) diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index fb0e8c3f296e..4313e8ff91e5 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h @@ -295,6 +295,7 @@ typedef void (btrfs_bio_end_io_t) (struct btrfs_bio *bio, int err); #define BTRFS_BIO_ORIG_BIO_SUBMITTED (1 << 0) struct btrfs_bio { + atomic_t refs; atomic_t stripes_pending; struct btrfs_fs_info *fs_info; bio_end_io_t *end_io; @@ -394,13 +395,8 @@ struct btrfs_balance_control { int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start, u64 end, u64 *length); - -#define btrfs_bio_size(total_stripes, real_stripes) \ - (sizeof(struct btrfs_bio) + \ - (sizeof(struct btrfs_bio_stripe) * (total_stripes)) + \ - (sizeof(int) * (real_stripes)) + \ - (sizeof(u64) * (real_stripes))) - +void btrfs_get_bbio(struct btrfs_bio *bbio); +void btrfs_put_bbio(struct btrfs_bio *bbio); int btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, u64 logical, u64 *length, struct btrfs_bio **bbio_ret, int mirror_num); -- cgit v1.2.3 From b25c94c580c27bb6cff1e2f478746e77119215ad Mon Sep 17 00:00:00 2001 From: Zhao Lei Date: Tue, 20 Jan 2015 15:11:35 +0800 Subject: Btrfs: Fix a jump typo of nodatasum_case to avoid wrong WARN_ON() if (sctx->is_dev_replace && !is_metadata && !have_csum) { ... goto nodatasum_case; } ... nodatasum_case: WARN_ON(sctx->is_dev_replace); In above code, nodatasum_case marker should be moved after WARN_ON(). Signed-off-by: Zhao Lei Signed-off-by: Miao Xie Signed-off-by: Chris Mason --- fs/btrfs/scrub.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 1388e7127eea..016512c590ea 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -1036,9 +1036,10 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check) if (!is_metadata && !have_csum) { struct scrub_fixup_nodatasum *fixup_nodatasum; -nodatasum_case: WARN_ON(sctx->is_dev_replace); +nodatasum_case: + /* * !is_metadata and !have_csum, this means that the data * might not be COW'ed, that it might be modified -- cgit v1.2.3 From 114ab50d823c2cc7cf60658fdbdaaba413009921 Mon Sep 17 00:00:00 2001 From: Zhao Lei Date: Tue, 20 Jan 2015 15:11:36 +0800 Subject: Btrfs: Remove noneed force_write in scrub_write_block_to_dev_replace It is always 1 in this place, because !1 case was already jumped out in previous code. Signed-off-by: Zhao Lei Signed-off-by: Miao Xie Signed-off-by: Chris Mason --- fs/btrfs/scrub.c | 19 +++++++------------ 1 file changed, 7 insertions(+), 12 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 016512c590ea..f9e108bdfc45 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -250,8 +250,7 @@ static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info, const u8 *csum, u64 generation, u16 csum_size); static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad, - struct scrub_block *sblock_good, - int force_write); + struct scrub_block *sblock_good); static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad, struct scrub_block *sblock_good, int page_num, int force_write); @@ -1098,15 +1097,13 @@ nodatasum_case: sblock_other->no_io_error_seen) { if (sctx->is_dev_replace) { scrub_write_block_to_dev_replace(sblock_other); + goto corrected_error; } else { - int force_write = is_metadata || have_csum; - ret = scrub_repair_block_from_good_copy( - sblock_bad, sblock_other, - force_write); + sblock_bad, sblock_other); + if (!ret) + goto corrected_error; } - if (0 == ret) - goto corrected_error; } } @@ -1619,8 +1616,7 @@ static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info, } static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad, - struct scrub_block *sblock_good, - int force_write) + struct scrub_block *sblock_good) { int page_num; int ret = 0; @@ -1630,8 +1626,7 @@ static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad, ret_sub = scrub_repair_page_from_good_copy(sblock_bad, sblock_good, - page_num, - force_write); + page_num, 1); if (ret_sub) ret = ret_sub; } -- cgit v1.2.3 From 09dd7a01c3436344fb4c3974b275e016264e0a27 Mon Sep 17 00:00:00 2001 From: Zhao Lei Date: Tue, 20 Jan 2015 15:11:37 +0800 Subject: Btrfs: Cleanup btrfs_bio_counter_inc_blocked() 1: Remove no-need DEFINE_WAIT(wait) 2: Add likely() for BTRFS_FS_STATE_DEV_REPLACING condition 3: Use while loop instead of goto Signed-off-by: Zhao Lei Signed-off-by: Miao Xie Signed-off-by: Chris Mason --- fs/btrfs/dev-replace.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c index ca6a3a3b6b6c..92109b7c865c 100644 --- a/fs/btrfs/dev-replace.c +++ b/fs/btrfs/dev-replace.c @@ -932,15 +932,15 @@ void btrfs_bio_counter_sub(struct btrfs_fs_info *fs_info, s64 amount) void btrfs_bio_counter_inc_blocked(struct btrfs_fs_info *fs_info) { - DEFINE_WAIT(wait); -again: - percpu_counter_inc(&fs_info->bio_counter); - if (test_bit(BTRFS_FS_STATE_DEV_REPLACING, &fs_info->fs_state)) { + while (1) { + percpu_counter_inc(&fs_info->bio_counter); + if (likely(!test_bit(BTRFS_FS_STATE_DEV_REPLACING, + &fs_info->fs_state))) + break; + btrfs_bio_counter_dec(fs_info); wait_event(fs_info->replace_wait, !test_bit(BTRFS_FS_STATE_DEV_REPLACING, &fs_info->fs_state)); - goto again; } - } -- cgit v1.2.3 From 7653947fe6d59db72fbc26c4fc9ef842febc79e3 Mon Sep 17 00:00:00 2001 From: Zhao Lei Date: Tue, 20 Jan 2015 15:11:38 +0800 Subject: Btrfs: btrfs_rm_dev_replace_blocked(): Use wait_event() Signed-off-by: Zhao Lei Signed-off-by: Miao Xie Signed-off-by: Chris Mason --- fs/btrfs/dev-replace.c | 13 ++----------- 1 file changed, 2 insertions(+), 11 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c index 92109b7c865c..5ec03d999c37 100644 --- a/fs/btrfs/dev-replace.c +++ b/fs/btrfs/dev-replace.c @@ -440,18 +440,9 @@ leave: */ static void btrfs_rm_dev_replace_blocked(struct btrfs_fs_info *fs_info) { - s64 writers; - DEFINE_WAIT(wait); - set_bit(BTRFS_FS_STATE_DEV_REPLACING, &fs_info->fs_state); - do { - prepare_to_wait(&fs_info->replace_wait, &wait, - TASK_UNINTERRUPTIBLE); - writers = percpu_counter_sum(&fs_info->bio_counter); - if (writers) - schedule(); - finish_wait(&fs_info->replace_wait, &wait); - } while (writers); + wait_event(fs_info->replace_wait, !percpu_counter_sum( + &fs_info->bio_counter)); } /* -- cgit v1.2.3 From dc5f7a3bd820883793bb0d9789e938a34aa4da5f Mon Sep 17 00:00:00 2001 From: Zhao Lei Date: Tue, 20 Jan 2015 15:11:39 +0800 Subject: Btrfs: Break loop when reach BTRFS_MAX_MIRRORS in scrub_setup_recheck_block() Use break instead of useless loop should be more suitable in this case. Signed-off-by: Zhao Lei Signed-off-by: Miao Xie Signed-off-by: Chris Mason --- fs/btrfs/scrub.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index f9e108bdfc45..68867b2e4620 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -1394,7 +1394,7 @@ static int scrub_setup_recheck_block(struct scrub_ctx *sctx, struct scrub_page *page; if (mirror_index >= BTRFS_MAX_MIRRORS) - continue; + break; sblock = sblocks_for_recheck + mirror_index; sblock->sctx = sctx; -- cgit v1.2.3 From 8d6738c1bd74a27ff6a5043c5211c7bff7745420 Mon Sep 17 00:00:00 2001 From: Zhao Lei Date: Tue, 20 Jan 2015 15:11:40 +0800 Subject: Btrfs: Separate finding-right-mirror and writing-to-target's process in scrub_handle_errored_block() In corrent code, code of finding-right-mirror and writing-to-target are mixed in logic, if we find a right mirror but failed in writing to target, it will treat as "hadn't found right block", and fill the target with sblock_bad. Actually, "failed in writing to target" does not mean "source block is wrong", this patch separate above two condition in logic, and do some cleanup to make code clean. Signed-off-by: Zhao Lei Signed-off-by: Miao Xie Signed-off-by: Chris Mason --- fs/btrfs/scrub.c | 44 +++++++++++++++++--------------------------- 1 file changed, 17 insertions(+), 27 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 68867b2e4620..c40ffbc9581f 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -1114,35 +1114,21 @@ nodatasum_case: success = 1; for (page_num = 0; page_num < sblock_bad->page_count; page_num++) { - int sub_success; + struct scrub_block *sblock_other = NULL; - sub_success = 0; for (mirror_index = 0; mirror_index < BTRFS_MAX_MIRRORS && sblocks_for_recheck[mirror_index].page_count > 0; mirror_index++) { - struct scrub_block *sblock_other = - sblocks_for_recheck + mirror_index; - struct scrub_page *page_other = - sblock_other->pagev[page_num]; - - if (!page_other->io_error) { - ret = scrub_write_page_to_dev_replace( - sblock_other, page_num); - if (ret == 0) { - /* succeeded for this page */ - sub_success = 1; - break; - } else { - btrfs_dev_replace_stats_inc( - &sctx->dev_root-> - fs_info->dev_replace. - num_write_errors); - } + if (!sblocks_for_recheck[mirror_index]. + pagev[page_num]->io_error) { + sblock_other = sblocks_for_recheck + + mirror_index; + break; } } - if (!sub_success) { + if (!sblock_other) { /* * did not find a mirror to fetch the page * from. scrub_write_page_to_dev_replace() @@ -1150,13 +1136,17 @@ nodatasum_case: * filling the block with zeros before * submitting the write request */ + sblock_other = sblock_bad; + success = 0; + } + + if (scrub_write_page_to_dev_replace(sblock_other, + page_num) != 0) { + btrfs_dev_replace_stats_inc( + &sctx->dev_root-> + fs_info->dev_replace. + num_write_errors); success = 0; - ret = scrub_write_page_to_dev_replace( - sblock_bad, page_num); - if (ret) - btrfs_dev_replace_stats_inc( - &sctx->dev_root->fs_info-> - dev_replace.num_write_errors); } } -- cgit v1.2.3 From b968fed1c3810a0a0d575cab8a72431fbc807615 Mon Sep 17 00:00:00 2001 From: Zhao Lei Date: Tue, 20 Jan 2015 15:11:41 +0800 Subject: Btrfs: Combine per-page recover in dev-replace and scrub The code are similar, combine them to make code clean and easy to maintenance. Some lost condition are also completed with benefit of this combination. Signed-off-by: Zhao Lei Signed-off-by: Miao Xie Signed-off-by: Chris Mason --- fs/btrfs/scrub.c | 120 ++++++++++++++++++++++--------------------------------- 1 file changed, 48 insertions(+), 72 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index c40ffbc9581f..aac40aefa552 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -1107,54 +1107,10 @@ nodatasum_case: } } - /* - * for dev_replace, pick good pages and write to the target device. - */ - if (sctx->is_dev_replace) { - success = 1; - for (page_num = 0; page_num < sblock_bad->page_count; - page_num++) { - struct scrub_block *sblock_other = NULL; - - for (mirror_index = 0; - mirror_index < BTRFS_MAX_MIRRORS && - sblocks_for_recheck[mirror_index].page_count > 0; - mirror_index++) { - if (!sblocks_for_recheck[mirror_index]. - pagev[page_num]->io_error) { - sblock_other = sblocks_for_recheck + - mirror_index; - break; - } - } - - if (!sblock_other) { - /* - * did not find a mirror to fetch the page - * from. scrub_write_page_to_dev_replace() - * handles this case (page->io_error), by - * filling the block with zeros before - * submitting the write request - */ - sblock_other = sblock_bad; - success = 0; - } - - if (scrub_write_page_to_dev_replace(sblock_other, - page_num) != 0) { - btrfs_dev_replace_stats_inc( - &sctx->dev_root-> - fs_info->dev_replace. - num_write_errors); - success = 0; - } - } - - goto out; - } + if (sblock_bad->no_io_error_seen && !sctx->is_dev_replace) + goto did_not_correct_error; /* - * for regular scrub, repair those pages that are errored. * In case of I/O errors in the area that is supposed to be * repaired, continue by picking good copies of those pages. * Select the good pages from mirrors to rewrite bad pages from @@ -1178,44 +1134,64 @@ nodatasum_case: * mirror, even if other 512 byte sectors in the same PAGE_SIZE * area are unreadable. */ - - /* can only fix I/O errors from here on */ - if (sblock_bad->no_io_error_seen) - goto did_not_correct_error; - success = 1; - for (page_num = 0; page_num < sblock_bad->page_count; page_num++) { + for (page_num = 0; page_num < sblock_bad->page_count; + page_num++) { struct scrub_page *page_bad = sblock_bad->pagev[page_num]; + struct scrub_block *sblock_other = NULL; - if (!page_bad->io_error) + /* skip no-io-error page in scrub */ + if (!page_bad->io_error && !sctx->is_dev_replace) continue; - for (mirror_index = 0; - mirror_index < BTRFS_MAX_MIRRORS && - sblocks_for_recheck[mirror_index].page_count > 0; - mirror_index++) { - struct scrub_block *sblock_other = sblocks_for_recheck + - mirror_index; - struct scrub_page *page_other = sblock_other->pagev[ - page_num]; - - if (!page_other->io_error) { - ret = scrub_repair_page_from_good_copy( - sblock_bad, sblock_other, page_num, 0); - if (0 == ret) { - page_bad->io_error = 0; - break; /* succeeded for this page */ + /* try to find no-io-error page in mirrors */ + if (page_bad->io_error) { + for (mirror_index = 0; + mirror_index < BTRFS_MAX_MIRRORS && + sblocks_for_recheck[mirror_index].page_count > 0; + mirror_index++) { + if (!sblocks_for_recheck[mirror_index]. + pagev[page_num]->io_error) { + sblock_other = sblocks_for_recheck + + mirror_index; + break; } } + if (!sblock_other) + success = 0; } - if (page_bad->io_error) { - /* did not find a mirror to copy the page from */ - success = 0; + if (sctx->is_dev_replace) { + /* + * did not find a mirror to fetch the page + * from. scrub_write_page_to_dev_replace() + * handles this case (page->io_error), by + * filling the block with zeros before + * submitting the write request + */ + if (!sblock_other) + sblock_other = sblock_bad; + + if (scrub_write_page_to_dev_replace(sblock_other, + page_num) != 0) { + btrfs_dev_replace_stats_inc( + &sctx->dev_root-> + fs_info->dev_replace. + num_write_errors); + success = 0; + } + } else if (sblock_other) { + ret = scrub_repair_page_from_good_copy(sblock_bad, + sblock_other, + page_num, 0); + if (0 == ret) + page_bad->io_error = 0; + else + success = 0; } } - if (success) { + if (success && !sctx->is_dev_replace) { if (is_metadata || have_csum) { /* * need to verify the checksum now that all -- cgit v1.2.3 From be50a8ddaae1d07135fd7e1c7017c1611075a560 Mon Sep 17 00:00:00 2001 From: Zhao Lei Date: Tue, 20 Jan 2015 15:11:42 +0800 Subject: Btrfs: Simplify scrub_setup_recheck_block()'s argument scrub_setup_recheck_block() have many arguments but most of them can be get from one of them, we can remove them to make code clean. Some other cleanup for that function also included in this patch. Signed-off-by: Zhao Lei Signed-off-by: Miao Xie Signed-off-by: Chris Mason --- fs/btrfs/scrub.c | 25 +++++++++---------------- 1 file changed, 9 insertions(+), 16 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index aac40aefa552..c3a98931980f 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -235,10 +235,7 @@ static void scrub_pending_bio_dec(struct scrub_ctx *sctx); static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx); static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx); static int scrub_handle_errored_block(struct scrub_block *sblock_to_check); -static int scrub_setup_recheck_block(struct scrub_ctx *sctx, - struct btrfs_fs_info *fs_info, - struct scrub_block *original_sblock, - u64 length, u64 logical, +static int scrub_setup_recheck_block(struct scrub_block *original_sblock, struct scrub_block *sblocks_for_recheck); static void scrub_recheck_block(struct btrfs_fs_info *fs_info, struct scrub_block *sblock, int is_metadata, @@ -960,8 +957,7 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check) } /* setup the context, map the logical blocks and alloc the pages */ - ret = scrub_setup_recheck_block(sctx, fs_info, sblock_to_check, length, - logical, sblocks_for_recheck); + ret = scrub_setup_recheck_block(sblock_to_check, sblocks_for_recheck); if (ret) { spin_lock(&sctx->stat_lock); sctx->stat.read_errors++; @@ -1301,19 +1297,20 @@ static inline void scrub_stripe_index_and_offset(u64 logical, u64 *raid_map, } } -static int scrub_setup_recheck_block(struct scrub_ctx *sctx, - struct btrfs_fs_info *fs_info, - struct scrub_block *original_sblock, - u64 length, u64 logical, +static int scrub_setup_recheck_block(struct scrub_block *original_sblock, struct scrub_block *sblocks_for_recheck) { + struct scrub_ctx *sctx = original_sblock->sctx; + struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info; + u64 length = original_sblock->page_count * PAGE_SIZE; + u64 logical = original_sblock->pagev[0]->logical; struct scrub_recover *recover; struct btrfs_bio *bbio; u64 sublen; u64 mapped_length; u64 stripe_offset; int stripe_index; - int page_index; + int page_index = 0; int mirror_index; int nmirrors; int ret; @@ -1324,7 +1321,6 @@ static int scrub_setup_recheck_block(struct scrub_ctx *sctx, * the recheck procedure */ - page_index = 0; while (length > 0) { sublen = min_t(u64, length, PAGE_SIZE); mapped_length = sublen; @@ -1353,15 +1349,12 @@ static int scrub_setup_recheck_block(struct scrub_ctx *sctx, BUG_ON(page_index >= SCRUB_PAGES_PER_RD_BIO); - nmirrors = scrub_nr_raid_mirrors(bbio); + nmirrors = min(scrub_nr_raid_mirrors(bbio), BTRFS_MAX_MIRRORS); for (mirror_index = 0; mirror_index < nmirrors; mirror_index++) { struct scrub_block *sblock; struct scrub_page *page; - if (mirror_index >= BTRFS_MAX_MIRRORS) - break; - sblock = sblocks_for_recheck + mirror_index; sblock->sctx = sctx; page = kzalloc(sizeof(*page), GFP_NOFS); -- cgit v1.2.3 From 10f11900167a83e0c229c4c27e73e720ebd55b5c Mon Sep 17 00:00:00 2001 From: Zhao Lei Date: Tue, 20 Jan 2015 15:11:43 +0800 Subject: Btrfs: Include map_type in raid_bio Corrent code use many kinds of "clever" way to determine operation target's raid type, as: raid_map != NULL or raid_map[MAX_NR] == RAID[56]_Q_STRIPE To make code easy to maintenance, this patch put raid type into bbio, and we can always get raid type from bbio with a "stupid" way. Signed-off-by: Zhao Lei Signed-off-by: Miao Xie Signed-off-by: Chris Mason --- fs/btrfs/raid56.c | 10 +++++----- fs/btrfs/scrub.c | 28 +++++++++++++++------------- fs/btrfs/volumes.c | 1 + fs/btrfs/volumes.h | 1 + 4 files changed, 22 insertions(+), 18 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c index cbc416204452..5264858ed768 100644 --- a/fs/btrfs/raid56.c +++ b/fs/btrfs/raid56.c @@ -994,10 +994,12 @@ static struct btrfs_raid_bio *alloc_rbio(struct btrfs_root *root, rbio->bio_pages = p + sizeof(struct page *) * num_pages; rbio->dbitmap = p + sizeof(struct page *) * num_pages * 2; - if (bbio->raid_map[real_stripes - 1] == RAID6_Q_STRIPE) + if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID5) + nr_data = real_stripes - 1; + else if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID6) nr_data = real_stripes - 2; else - nr_data = real_stripes - 1; + BUG(); rbio->nr_data = nr_data; return rbio; @@ -1850,9 +1852,7 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio) } /* all raid6 handling here */ - if (rbio->bbio->raid_map[rbio->real_stripes - 1] == - RAID6_Q_STRIPE) { - + if (rbio->bbio->map_type & BTRFS_BLOCK_GROUP_RAID6) { /* * single failure, rebuild from parity raid5 * style diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index c3a98931980f..19781e924443 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -1256,19 +1256,16 @@ out: static inline int scrub_nr_raid_mirrors(struct btrfs_bio *bbio) { - if (bbio->raid_map) { - int real_stripes = bbio->num_stripes - bbio->num_tgtdevs; - - if (bbio->raid_map[real_stripes - 1] == RAID6_Q_STRIPE) - return 3; - else - return 2; - } else { + if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID5) + return 2; + else if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID6) + return 3; + else return (int)bbio->num_stripes; - } } -static inline void scrub_stripe_index_and_offset(u64 logical, u64 *raid_map, +static inline void scrub_stripe_index_and_offset(u64 logical, u64 map_type, + u64 *raid_map, u64 mapped_length, int nstripes, int mirror, int *stripe_index, @@ -1276,7 +1273,7 @@ static inline void scrub_stripe_index_and_offset(u64 logical, u64 *raid_map, { int i; - if (raid_map) { + if (map_type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)) { /* RAID5/6 */ for (i = 0; i < nstripes; i++) { if (raid_map[i] == RAID6_Q_STRIPE || @@ -1350,6 +1347,7 @@ static int scrub_setup_recheck_block(struct scrub_block *original_sblock, BUG_ON(page_index >= SCRUB_PAGES_PER_RD_BIO); nmirrors = min(scrub_nr_raid_mirrors(bbio), BTRFS_MAX_MIRRORS); + for (mirror_index = 0; mirror_index < nmirrors; mirror_index++) { struct scrub_block *sblock; @@ -1370,7 +1368,9 @@ leave_nomem: sblock->pagev[page_index] = page; page->logical = logical; - scrub_stripe_index_and_offset(logical, bbio->raid_map, + scrub_stripe_index_and_offset(logical, + bbio->map_type, + bbio->raid_map, mapped_length, bbio->num_stripes - bbio->num_tgtdevs, @@ -1419,7 +1419,9 @@ static void scrub_bio_wait_endio(struct bio *bio, int error) static inline int scrub_is_page_on_raid56(struct scrub_page *page) { - return page->recover && page->recover->bbio->raid_map; + return page->recover && + (page->recover->bbio->map_type & (BTRFS_BLOCK_GROUP_RAID5 | + BTRFS_BLOCK_GROUP_RAID6)); } static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info, diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 0843bcd6304c..8933d70c3d53 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -5453,6 +5453,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, } *bbio_ret = bbio; + bbio->map_type = map->type; bbio->num_stripes = num_stripes; bbio->max_errors = max_errors; bbio->mirror_num = mirror_num; diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index 4313e8ff91e5..83069dec6898 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h @@ -298,6 +298,7 @@ struct btrfs_bio { atomic_t refs; atomic_t stripes_pending; struct btrfs_fs_info *fs_info; + u64 map_type; /* get from map_lookup->type */ bio_end_io_t *end_io; struct bio *orig_bio; unsigned long flags; -- cgit v1.2.3 From ffe2d2034bbb34f49f76c808550fdfbea2ea1659 Mon Sep 17 00:00:00 2001 From: Zhao Lei Date: Tue, 20 Jan 2015 15:11:44 +0800 Subject: Btrfs: Introduce BTRFS_BLOCK_GROUP_RAID56_MASK to check raid56 simply So we can check raid56 with: (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) instead of long: (map->type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)) Signed-off-by: Zhao Lei Signed-off-by: Miao Xie Signed-off-by: Chris Mason --- fs/btrfs/ctree.h | 3 +++ fs/btrfs/inode.c | 3 +-- fs/btrfs/scrub.c | 17 ++++++----------- fs/btrfs/volumes.c | 24 +++++++++--------------- 4 files changed, 19 insertions(+), 28 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 0b4683f560c8..2ecdac0bd850 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -1020,6 +1020,9 @@ enum btrfs_raid_types { BTRFS_BLOCK_GROUP_RAID6 | \ BTRFS_BLOCK_GROUP_DUP | \ BTRFS_BLOCK_GROUP_RAID10) +#define BTRFS_BLOCK_GROUP_RAID56_MASK (BTRFS_BLOCK_GROUP_RAID5 | \ + BTRFS_BLOCK_GROUP_RAID6) + /* * We need a bit for restriper to be able to tell when chunks of type * SINGLE are available. This "extended" profile format is used in diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 220f0c3f2df6..2d14acbdae47 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -7812,8 +7812,7 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip, } /* async crcs make it difficult to collect full stripe writes. */ - if (btrfs_get_alloc_profile(root, 1) & - (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)) + if (btrfs_get_alloc_profile(root, 1) & BTRFS_BLOCK_GROUP_RAID56_MASK) async_submit = 0; else async_submit = 1; diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 19781e924443..1ae527c9f8f4 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -1273,7 +1273,7 @@ static inline void scrub_stripe_index_and_offset(u64 logical, u64 map_type, { int i; - if (map_type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)) { + if (map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) { /* RAID5/6 */ for (i = 0; i < nstripes; i++) { if (raid_map[i] == RAID6_Q_STRIPE || @@ -1420,8 +1420,7 @@ static void scrub_bio_wait_endio(struct bio *bio, int error) static inline int scrub_is_page_on_raid56(struct scrub_page *page) { return page->recover && - (page->recover->bbio->map_type & (BTRFS_BLOCK_GROUP_RAID5 | - BTRFS_BLOCK_GROUP_RAID6)); + (page->recover->bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK); } static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info, @@ -2994,8 +2993,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx, } else if (map->type & BTRFS_BLOCK_GROUP_DUP) { increment = map->stripe_len; mirror_num = num % map->num_stripes + 1; - } else if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | - BTRFS_BLOCK_GROUP_RAID6)) { + } else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { get_raid56_logic_offset(physical, num, map, &offset, NULL); increment = map->stripe_len * nr_data_stripes(map); mirror_num = 1; @@ -3029,8 +3027,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx, */ logical = base + offset; physical_end = physical + nstripes * map->stripe_len; - if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | - BTRFS_BLOCK_GROUP_RAID6)) { + if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { get_raid56_logic_offset(physical_end, num, map, &logic_end, NULL); logic_end += base; @@ -3076,8 +3073,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx, ret = 0; while (physical < physical_end) { /* for raid56, we skip parity stripe */ - if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | - BTRFS_BLOCK_GROUP_RAID6)) { + if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { ret = get_raid56_logic_offset(physical, num, map, &logical, &stripe_logical); logical += base; @@ -3235,8 +3231,7 @@ again: scrub_free_csums(sctx); if (extent_logical + extent_len < key.objectid + bytes) { - if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | - BTRFS_BLOCK_GROUP_RAID6)) { + if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { /* * loop until we find next data stripe * or we have finished all stripes. diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 8933d70c3d53..da7e0e1107d2 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -4196,7 +4196,7 @@ static u32 find_raid56_stripe_len(u32 data_devices, u32 dev_stripe_target) static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type) { - if (!(type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6))) + if (!(type & BTRFS_BLOCK_GROUP_RAID56_MASK)) return; btrfs_set_fs_incompat(info, RAID56); @@ -4803,10 +4803,8 @@ unsigned long btrfs_full_stripe_len(struct btrfs_root *root, BUG_ON(em->start > logical || em->start + em->len < logical); map = (struct map_lookup *)em->bdev; - if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | - BTRFS_BLOCK_GROUP_RAID6)) { + if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) len = map->stripe_len * nr_data_stripes(map); - } free_extent_map(em); return len; } @@ -4826,8 +4824,7 @@ int btrfs_is_parity_mirror(struct btrfs_mapping_tree *map_tree, BUG_ON(em->start > logical || em->start + em->len < logical); map = (struct map_lookup *)em->bdev; - if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | - BTRFS_BLOCK_GROUP_RAID6)) + if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) ret = 1; free_extent_map(em); return ret; @@ -4998,7 +4995,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, stripe_offset = offset - stripe_offset; /* if we're here for raid56, we need to know the stripe aligned start */ - if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)) { + if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { unsigned long full_stripe_len = stripe_len * nr_data_stripes(map); raid56_full_stripe_start = offset; @@ -5011,8 +5008,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, if (rw & REQ_DISCARD) { /* we don't discard raid56 yet */ - if (map->type & - (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)) { + if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { ret = -EOPNOTSUPP; goto out; } @@ -5022,7 +5018,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, /* For writes to RAID[56], allow a full stripeset across all disks. For other RAID types and for RAID[56] reads, just allow a single stripe (on a single disk). */ - if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6) && + if ((map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) && (rw & REQ_WRITE)) { max_len = stripe_len * nr_data_stripes(map) - (offset - raid56_full_stripe_start); @@ -5188,8 +5184,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, mirror_num = stripe_index - old_stripe_index + 1; } - } else if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | - BTRFS_BLOCK_GROUP_RAID6)) { + } else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { if (need_raid_map && ((rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) || mirror_num > 1)) { @@ -5253,7 +5248,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, bbio->tgtdev_map = (int *)(bbio->stripes + num_alloc_stripes); /* build raid_map */ - if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6) && + if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK && need_raid_map && ((rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) || mirror_num > 1)) { u64 tmp; @@ -5534,8 +5529,7 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree, do_div(length, map->num_stripes / map->sub_stripes); else if (map->type & BTRFS_BLOCK_GROUP_RAID0) do_div(length, map->num_stripes); - else if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | - BTRFS_BLOCK_GROUP_RAID6)) { + else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { do_div(length, nr_data_stripes(map)); rmap_len = map->stripe_len * nr_data_stripes(map); } -- cgit v1.2.3 From 570193454a5841a9fa477ce1bcc6c30ca6fcf552 Mon Sep 17 00:00:00 2001 From: Zhao Lei Date: Tue, 20 Jan 2015 15:11:45 +0800 Subject: Rename all ref_count to refs in struct refs is better than ref_count to record a struct's ref count. Signed-off-by: Zhao Lei Suggested-by: David Sterba Signed-off-by: Chris Mason --- fs/btrfs/scrub.c | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 1ae527c9f8f4..8af7372238fc 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -79,7 +79,7 @@ struct scrub_page { u64 logical; u64 physical; u64 physical_for_dev_replace; - atomic_t ref_count; + atomic_t refs; struct { unsigned int mirror_num:8; unsigned int have_csum:1; @@ -112,7 +112,7 @@ struct scrub_block { struct scrub_page *pagev[SCRUB_MAX_PAGES_PER_BLOCK]; int page_count; atomic_t outstanding_pages; - atomic_t ref_count; /* free mem on transition to zero */ + atomic_t refs; /* free mem on transition to zero */ struct scrub_ctx *sctx; struct scrub_parity *sparity; struct { @@ -141,7 +141,7 @@ struct scrub_parity { int stripe_len; - atomic_t ref_count; + atomic_t refs; struct list_head spages; @@ -1313,7 +1313,7 @@ static int scrub_setup_recheck_block(struct scrub_block *original_sblock, int ret; /* - * note: the two members ref_count and outstanding_pages + * note: the two members refs and outstanding_pages * are not used (and not set) in the blocks that are used for * the recheck procedure */ @@ -2026,12 +2026,12 @@ static int scrub_checksum_super(struct scrub_block *sblock) static void scrub_block_get(struct scrub_block *sblock) { - atomic_inc(&sblock->ref_count); + atomic_inc(&sblock->refs); } static void scrub_block_put(struct scrub_block *sblock) { - if (atomic_dec_and_test(&sblock->ref_count)) { + if (atomic_dec_and_test(&sblock->refs)) { int i; if (sblock->sparity) @@ -2045,12 +2045,12 @@ static void scrub_block_put(struct scrub_block *sblock) static void scrub_page_get(struct scrub_page *spage) { - atomic_inc(&spage->ref_count); + atomic_inc(&spage->refs); } static void scrub_page_put(struct scrub_page *spage) { - if (atomic_dec_and_test(&spage->ref_count)) { + if (atomic_dec_and_test(&spage->refs)) { if (spage->page) __free_page(spage->page); kfree(spage); @@ -2176,7 +2176,7 @@ static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len, /* one ref inside this function, plus one for each page added to * a bio later on */ - atomic_set(&sblock->ref_count, 1); + atomic_set(&sblock->refs, 1); sblock->sctx = sctx; sblock->no_io_error_seen = 1; @@ -2469,7 +2469,7 @@ static int scrub_pages_for_parity(struct scrub_parity *sparity, /* one ref inside this function, plus one for each page added to * a bio later on */ - atomic_set(&sblock->ref_count, 1); + atomic_set(&sblock->refs, 1); sblock->sctx = sctx; sblock->no_io_error_seen = 1; sblock->sparity = sparity; @@ -2721,12 +2721,12 @@ static inline int scrub_calc_parity_bitmap_len(int nsectors) static void scrub_parity_get(struct scrub_parity *sparity) { - atomic_inc(&sparity->ref_count); + atomic_inc(&sparity->refs); } static void scrub_parity_put(struct scrub_parity *sparity) { - if (!atomic_dec_and_test(&sparity->ref_count)) + if (!atomic_dec_and_test(&sparity->refs)) return; scrub_parity_check_and_repair(sparity); @@ -2776,7 +2776,7 @@ static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx, sparity->scrub_dev = sdev; sparity->logic_start = logic_start; sparity->logic_end = logic_end; - atomic_set(&sparity->ref_count, 1); + atomic_set(&sparity->refs, 1); INIT_LIST_HEAD(&sparity->spages); sparity->dbitmap = sparity->bitmap; sparity->ebitmap = (void *)sparity->bitmap + bitmap_len; -- cgit v1.2.3 From 26455d3318a1e2a38f783db07981e3ed67de40ed Mon Sep 17 00:00:00 2001 From: Liu Bo Date: Wed, 17 Dec 2014 16:14:09 +0800 Subject: Btrfs: cleanup unused run_most "run_most" is not used anymore. Signed-off-by: Liu Bo Reviewed-by: Satoru Takeuchi Signed-off-by: Chris Mason --- fs/btrfs/extent-tree.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 1d361ac9abc9..53294da0749d 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -2769,7 +2769,6 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, struct btrfs_delayed_ref_head *head; int ret; int run_all = count == (unsigned long)-1; - int run_most = 0; /* We'll clean this up in btrfs_cleanup_transaction */ if (trans->aborted) @@ -2779,10 +2778,8 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, root = root->fs_info->tree_root; delayed_refs = &trans->transaction->delayed_refs; - if (count == 0) { + if (count == 0) count = atomic_read(&delayed_refs->num_entries) * 2; - run_most = 1; - } again: #ifdef SCRAMBLE_DELAYED_REFS -- cgit v1.2.3 From 0ee13fe28ce387864c0d2b1e8c52b64abe2fcd02 Mon Sep 17 00:00:00 2001 From: Yang Dongsheng Date: Tue, 6 Jan 2015 20:54:42 +0800 Subject: btrfs: qgroup: move WARN_ON() to the correct location. In function qgroup_excl_accounting(), we need to WARN when qg->excl is less than what we want to free, same to child and parents. But currently, for parent qgroup, the WARN_ON() is located after freeing qg->excl. It will WARN out even we free it normally. This patch move this WARN_ON() before freeing qg->excl. Signed-off-by: Dongsheng Yang Reviewed-by: Satoru Takeuchi Signed-off-by: Chris Mason --- fs/btrfs/qgroup.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index 48b60dbf807f..97159a8e91d4 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c @@ -1431,9 +1431,8 @@ static int qgroup_excl_accounting(struct btrfs_fs_info *fs_info, qgroup = u64_to_ptr(unode->aux); qgroup->rfer += sign * oper->num_bytes; qgroup->rfer_cmpr += sign * oper->num_bytes; + WARN_ON(sign < 0 && qgroup->excl < oper->num_bytes); qgroup->excl += sign * oper->num_bytes; - if (sign < 0) - WARN_ON(qgroup->excl < oper->num_bytes); qgroup->excl_cmpr += sign * oper->num_bytes; qgroup_dirty(fs_info, qgroup); -- cgit v1.2.3 From 78f55e5e1fa9f684705325667d455a957f43ad66 Mon Sep 17 00:00:00 2001 From: Anand Jain Date: Tue, 13 Jan 2015 00:55:10 +0800 Subject: Btrfs: fix unused members in struct btrfs_root There isn't any real use of following members of struct btrfs_root so delete them. struct kobject root_kobj; struct completion kobj_unregister; Signed-off-by: Anand Jain Reviewed-by: David Sterba Signed-off-by: Chris Mason --- fs/btrfs/ctree.h | 2 -- fs/btrfs/disk-io.c | 2 -- 2 files changed, 4 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 2ecdac0bd850..85c697d482c2 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -1798,8 +1798,6 @@ struct btrfs_root { struct btrfs_fs_info *fs_info; struct extent_io_tree dirty_log_pages; - struct kobject root_kobj; - struct completion kobj_unregister; struct mutex objectid_mutex; spinlock_t accounting_lock; diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 0696a103751e..612d46eece62 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1275,12 +1275,10 @@ static void __setup_root(u32 nodesize, u32 sectorsize, u32 stripesize, memset(&root->root_key, 0, sizeof(root->root_key)); memset(&root->root_item, 0, sizeof(root->root_item)); memset(&root->defrag_progress, 0, sizeof(root->defrag_progress)); - memset(&root->root_kobj, 0, sizeof(root->root_kobj)); if (fs_info) root->defrag_trans_start = fs_info->generation; else root->defrag_trans_start = 0; - init_completion(&root->kobj_unregister); root->root_key.objectid = objectid; root->anon_dev = 0; -- cgit v1.2.3 From 95449a1626fb6b643b576b9fbafed02793627577 Mon Sep 17 00:00:00 2001 From: chandan Date: Thu, 15 Jan 2015 12:22:03 +0530 Subject: Btrfs: insert_new_root: Fix lock type of the extent buffer. btrfs_alloc_tree_block() returns an extent buffer on which a blocked lock has been taken. Hence assign the appropriate value to path->locks[level]. Signed-off-by: Chandan Rajendra Signed-off-by: Chris Mason --- fs/btrfs/ctree.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index fdd8fb4c6075..993642199326 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -3380,7 +3380,7 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans, add_root_to_dirty_list(root); extent_buffer_get(c); path->nodes[level] = c; - path->locks[level] = BTRFS_WRITE_LOCK; + path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING; path->slots[level] = 0; return 0; } -- cgit v1.2.3 From b520252aa287b14e1f39a51e20051775b273b82a Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 22 Jan 2015 08:13:17 -0700 Subject: fs: make inode_to_bdi() handle NULL inode Running a heavy fs workload, I ran into a situation where we pass down a page for writeback/swap that doesn't have an inode mapping: BUG: unable to handle kernel NULL pointer dereference at 0000000000000028 IP: [] inode_to_bdi+0xf/0x50 PGD 0 Oops: 0000 [#1] PREEMPT SMP Modules linked in: wl(O) tun cfg80211 btusb joydev hid_apple hid_generic usbhid hid bcm5974 usb_storage nouveau snd_hda_codec_hdmi snd_hda_codec_cirrus snd_hda_codec_generic x86_pkg_temp_thermal snd_hda_intel kvm_intel snd_hda_controller snd_hda_codec kvm snd_hwdep snd_pcm applesmc input_polldev snd_seq_midi snd_seq_midi_event snd_rawmidi snd_seq snd_timer snd_seq_device snd xhci_pci xhci_hcd ttm thunderbolt soundcore apple_gmux apple_bl bluetooth binfmt_misc fuse nls_iso8859_1 nls_cp437 vfat fat [last unloaded: wl] CPU: 4 PID: 50 Comm: kswapd0 Tainted: G U O 3.19.0-rc5+ #60 Hardware name: Apple Inc. MacBookPro11,3/Mac-2BD1B31983FE1663, BIOS MBP112.88Z.0138.B02.1310181745 10/18/2013 task: ffff880462e917f0 ti: ffff880462edc000 task.ti: ffff880462edc000 RIP: 0010:[] [] inode_to_bdi+0xf/0x50 RSP: 0000:ffff880462edf8e8 EFLAGS: 00010282 RAX: ffffffff81c4cd80 RBX: ffffea0001b3abc0 RCX: 0000000000000000 RDX: 0000000000000001 RSI: 0000000000000000 RDI: 0000000000000000 RBP: ffff880462edf8f8 R08: 00000000001e8500 R09: ffff880460f7cb68 R10: ffff880462edfa00 R11: 0000000000000101 R12: 0000000000000000 R13: ffffffff81c4cd98 R14: 0000000000000000 R15: ffff880460f7c9c0 FS: 0000000000000000(0000) GS:ffff88047f300000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 0000000000000028 CR3: 00000002b6341000 CR4: 00000000001407e0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 Stack: ffffea0001b3abc0 ffffffff81c4cd80 ffff880462edf948 ffffffff811244aa ffffffff811565b0 ffff880460f7c9c0 ffff880462edf948 ffffea0001b3abc0 0000000000000001 ffff880462edfb40 ffff880008b999c0 ffff880460f7c9c0 Call Trace: [] __test_set_page_writeback+0x3a/0x170 [] ? SyS_madvise+0x790/0x790 [] __swap_writepage+0x216/0x280 [] ? radix_tree_insert+0x32/0xe0 [] ? swap_info_get+0x61/0xf0 [] ? page_swapcount+0x4c/0x60 [] swap_writepage+0x2d/0x50 [] shmem_writepage+0x198/0x2c0 [] shrink_page_list+0x464/0xa00 [] shrink_inactive_list+0x266/0x500 [] shrink_lruvec+0x5d5/0x720 [] shrink_zone+0x5b/0x190 [] kswapd+0x48f/0x8d0 [] ? try_to_free_pages+0x4c0/0x4c0 [] kthread+0xd2/0xf0 [] ? workqueue_congested+0x30/0x80 [] ? kthread_create_on_node+0x180/0x180 [] ret_from_fork+0x7c/0xb0 [] ? kthread_create_on_node+0x180/0x180 Code: 00 48 c7 c7 8d 8d a4 81 e8 3f 62 eb ff e9 fc fe ff ff 66 2e 0f 1f 84 00 00 00 00 00 0f 1f 44 00 00 55 48 89 e5 41 54 49 89 fc 53 <48> 8b 5f 28 48 89 df e8 15 f8 00 00 85 c0 75 11 48 8b 83 d8 00 RIP [] inode_to_bdi+0xf/0x50 RSP CR2: 0000000000000028 ---[ end trace eb0e21aa7dad3ddf ]--- Handle this in inode_to_bdi() by punting it to noop_backing_dev_info, if mapping->host is NULL. Signed-off-by: Jens Axboe --- fs/fs-writeback.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index a20b1145f4d5..c399152de397 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -68,7 +68,12 @@ EXPORT_SYMBOL(writeback_in_progress); struct backing_dev_info *inode_to_bdi(struct inode *inode) { - struct super_block *sb = inode->i_sb; + struct super_block *sb; + + if (!inode) + return &noop_backing_dev_info; + + sb = inode->i_sb; #ifdef CONFIG_BLOCK if (sb_is_blkdev_sb(sb)) return blk_get_backing_dev_info(I_BDEV(inode)); -- cgit v1.2.3 From fa14a0b8d2bca1d2226afaa04bdf80200d8e9b03 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Thu, 22 Jan 2015 02:16:49 -0500 Subject: cut down the number of do_path_lookup() callers ... and don't bother with new struct filename when we already have one Signed-off-by: Al Viro --- fs/namei.c | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/namei.c b/fs/namei.c index bc35b02883bb..73fcf4280d6e 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -2046,7 +2046,8 @@ struct dentry *kern_path_locked(const char *name, struct path *path) { struct nameidata nd; struct dentry *d; - int err = do_path_lookup(AT_FDCWD, name, LOOKUP_PARENT, &nd); + struct filename filename = {.name = name}; + int err = filename_lookup(AT_FDCWD, &filename, LOOKUP_PARENT, &nd); if (err) return ERR_PTR(err); if (nd.last_type != LAST_NORM) { @@ -3290,7 +3291,7 @@ struct file *do_file_open_root(struct dentry *dentry, struct vfsmount *mnt, return file; } -struct dentry *kern_path_create(int dfd, const char *pathname, +static struct dentry *filename_create(int dfd, struct filename *name, struct path *path, unsigned int lookup_flags) { struct dentry *dentry = ERR_PTR(-EEXIST); @@ -3305,7 +3306,7 @@ struct dentry *kern_path_create(int dfd, const char *pathname, */ lookup_flags &= LOOKUP_REVAL; - error = do_path_lookup(dfd, pathname, LOOKUP_PARENT|lookup_flags, &nd); + error = filename_lookup(dfd, name, LOOKUP_PARENT|lookup_flags, &nd); if (error) return ERR_PTR(error); @@ -3359,6 +3360,13 @@ out: path_put(&nd.path); return dentry; } + +struct dentry *kern_path_create(int dfd, const char *pathname, + struct path *path, unsigned int lookup_flags) +{ + struct filename filename = {.name = pathname}; + return filename_create(dfd, &filename, path, lookup_flags); +} EXPORT_SYMBOL(kern_path_create); void done_path_create(struct path *path, struct dentry *dentry) @@ -3377,7 +3385,7 @@ struct dentry *user_path_create(int dfd, const char __user *pathname, struct dentry *res; if (IS_ERR(tmp)) return ERR_CAST(tmp); - res = kern_path_create(dfd, tmp->name, path, lookup_flags); + res = filename_create(dfd, tmp, path, lookup_flags); putname(tmp); return res; } -- cgit v1.2.3 From 08518549722f0c992a9e4be71a0777f37147e9d2 Mon Sep 17 00:00:00 2001 From: Paul Moore Date: Wed, 21 Jan 2015 23:59:56 -0500 Subject: fs: rework getname_kernel to handle up to PATH_MAX sized filenames In preparation for expanded use in the kernel, make getname_kernel() more useful by allowing it to handle any legal filename length. Thanks to Guenter Roeck for his suggestion to substitute memcpy() for strlcpy(). CC: linux@roeck-us.net CC: viro@zeniv.linux.org.uk CC: linux-fsdevel@vger.kernel.org Signed-off-by: Paul Moore Signed-off-by: Al Viro --- fs/namei.c | 34 ++++++++++++++++++++-------------- 1 file changed, 20 insertions(+), 14 deletions(-) (limited to 'fs') diff --git a/fs/namei.c b/fs/namei.c index 73fcf4280d6e..71dc1cc23b1a 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -212,32 +212,38 @@ getname(const char __user * filename) return getname_flags(filename, 0, NULL); } -/* - * The "getname_kernel()" interface doesn't do pathnames longer - * than EMBEDDED_NAME_MAX. Deal with it - you're a kernel user. - */ struct filename * getname_kernel(const char * filename) { struct filename *result; - char *kname; - int len; - - len = strlen(filename); - if (len >= EMBEDDED_NAME_MAX) - return ERR_PTR(-ENAMETOOLONG); + int len = strlen(filename) + 1; result = __getname(); if (unlikely(!result)) return ERR_PTR(-ENOMEM); - kname = (char *)result + sizeof(*result); - result->name = kname; + if (len <= EMBEDDED_NAME_MAX) { + result->name = (char *)(result) + sizeof(*result); + result->separate = false; + } else if (len <= PATH_MAX) { + struct filename *tmp; + + tmp = kmalloc(sizeof(*tmp), GFP_KERNEL); + if (unlikely(!tmp)) { + __putname(result); + return ERR_PTR(-ENOMEM); + } + tmp->name = (char *)result; + tmp->separate = true; + result = tmp; + } else { + __putname(result); + return ERR_PTR(-ENAMETOOLONG); + } + memcpy((char *)result->name, filename, len); result->uptr = NULL; result->aname = NULL; - result->separate = false; - strlcpy(kname, filename, EMBEDDED_NAME_MAX); return result; } -- cgit v1.2.3 From 5168910413830435fa3f0a593933a83721ec8bad Mon Sep 17 00:00:00 2001 From: Paul Moore Date: Thu, 22 Jan 2015 00:00:03 -0500 Subject: fs: create proper filename objects using getname_kernel() There are several areas in the kernel that create temporary filename objects using the following pattern: int func(const char *name) { struct filename *file = { .name = name }; ... return 0; } ... which for the most part works okay, but it causes havoc within the audit subsystem as the filename object does not persist beyond the lifetime of the function. This patch converts all of these temporary filename objects into proper filename objects using getname_kernel() and putname() which ensure that the filename object persists until the audit subsystem is finished with it. Also, a special thanks to Al Viro, Guenter Roeck, and Sabrina Dubroca for helping resolve a difficult kernel panic on boot related to a use-after-free problem in kern_path_create(); the thread can be seen at the link below: * https://lkml.org/lkml/2015/1/20/710 This patch includes code that was either based on, or directly written by Al in the above thread. CC: viro@zeniv.linux.org.uk CC: linux@roeck-us.net CC: sd@queasysnail.net CC: linux-fsdevel@vger.kernel.org Signed-off-by: Paul Moore Signed-off-by: Al Viro --- fs/exec.c | 10 ++++++++-- fs/namei.c | 64 ++++++++++++++++++++++++++++++++++++++++++++++---------------- fs/open.c | 10 ++++++++-- 3 files changed, 64 insertions(+), 20 deletions(-) (limited to 'fs') diff --git a/fs/exec.c b/fs/exec.c index ad8798e26be9..c7f9b733406d 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -794,8 +794,14 @@ exit: struct file *open_exec(const char *name) { - struct filename tmp = { .name = name }; - return do_open_execat(AT_FDCWD, &tmp, 0); + struct filename *filename = getname_kernel(name); + struct file *f = ERR_CAST(filename); + + if (!IS_ERR(filename)) { + f = do_open_execat(AT_FDCWD, filename, 0); + putname(filename); + } + return f; } EXPORT_SYMBOL(open_exec); diff --git a/fs/namei.c b/fs/namei.c index 71dc1cc23b1a..0bc7742a591d 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -2042,32 +2042,47 @@ static int filename_lookup(int dfd, struct filename *name, static int do_path_lookup(int dfd, const char *name, unsigned int flags, struct nameidata *nd) { - struct filename filename = { .name = name }; + struct filename *filename = getname_kernel(name); + int retval = PTR_ERR(filename); - return filename_lookup(dfd, &filename, flags, nd); + if (!IS_ERR(filename)) { + retval = filename_lookup(dfd, filename, flags, nd); + putname(filename); + } + return retval; } /* does lookup, returns the object with parent locked */ struct dentry *kern_path_locked(const char *name, struct path *path) { + struct filename *filename = getname_kernel(name); struct nameidata nd; struct dentry *d; - struct filename filename = {.name = name}; - int err = filename_lookup(AT_FDCWD, &filename, LOOKUP_PARENT, &nd); - if (err) - return ERR_PTR(err); + int err; + + if (IS_ERR(filename)) + return ERR_CAST(filename); + + err = filename_lookup(AT_FDCWD, filename, LOOKUP_PARENT, &nd); + if (err) { + d = ERR_PTR(err); + goto out; + } if (nd.last_type != LAST_NORM) { path_put(&nd.path); - return ERR_PTR(-EINVAL); + d = ERR_PTR(-EINVAL); + goto out; } mutex_lock_nested(&nd.path.dentry->d_inode->i_mutex, I_MUTEX_PARENT); d = __lookup_hash(&nd.last, nd.path.dentry, 0); if (IS_ERR(d)) { mutex_unlock(&nd.path.dentry->d_inode->i_mutex); path_put(&nd.path); - return d; + goto out; } *path = nd.path; +out: + putname(filename); return d; } @@ -2399,8 +2414,14 @@ int kern_path_mountpoint(int dfd, const char *name, struct path *path, unsigned int flags) { - struct filename s = {.name = name}; - return filename_mountpoint(dfd, &s, path, flags); + struct filename *s = getname_kernel(name); + int retval = PTR_ERR(s); + + if (!IS_ERR(s)) { + retval = filename_mountpoint(dfd, s, path, flags); + putname(s); + } + return retval; } EXPORT_SYMBOL(kern_path_mountpoint); @@ -3280,7 +3301,7 @@ struct file *do_file_open_root(struct dentry *dentry, struct vfsmount *mnt, { struct nameidata nd; struct file *file; - struct filename filename = { .name = name }; + struct filename *filename; int flags = op->lookup_flags | LOOKUP_ROOT; nd.root.mnt = mnt; @@ -3289,11 +3310,16 @@ struct file *do_file_open_root(struct dentry *dentry, struct vfsmount *mnt, if (d_is_symlink(dentry) && op->intent & LOOKUP_OPEN) return ERR_PTR(-ELOOP); - file = path_openat(-1, &filename, &nd, op, flags | LOOKUP_RCU); + filename = getname_kernel(name); + if (unlikely(IS_ERR(filename))) + return ERR_CAST(filename); + + file = path_openat(-1, filename, &nd, op, flags | LOOKUP_RCU); if (unlikely(file == ERR_PTR(-ECHILD))) - file = path_openat(-1, &filename, &nd, op, flags); + file = path_openat(-1, filename, &nd, op, flags); if (unlikely(file == ERR_PTR(-ESTALE))) - file = path_openat(-1, &filename, &nd, op, flags | LOOKUP_REVAL); + file = path_openat(-1, filename, &nd, op, flags | LOOKUP_REVAL); + putname(filename); return file; } @@ -3370,8 +3396,14 @@ out: struct dentry *kern_path_create(int dfd, const char *pathname, struct path *path, unsigned int lookup_flags) { - struct filename filename = {.name = pathname}; - return filename_create(dfd, &filename, path, lookup_flags); + struct filename *filename = getname_kernel(pathname); + struct dentry *res; + + if (IS_ERR(filename)) + return ERR_CAST(filename); + res = filename_create(dfd, filename, path, lookup_flags); + putname(filename); + return res; } EXPORT_SYMBOL(kern_path_create); diff --git a/fs/open.c b/fs/open.c index 813be037b412..d36c42ff019d 100644 --- a/fs/open.c +++ b/fs/open.c @@ -971,8 +971,14 @@ struct file *file_open_name(struct filename *name, int flags, umode_t mode) */ struct file *filp_open(const char *filename, int flags, umode_t mode) { - struct filename name = {.name = filename}; - return file_open_name(&name, flags, mode); + struct filename *name = getname_kernel(filename); + struct file *file = ERR_CAST(name); + + if (!IS_ERR(name)) { + file = file_open_name(name, flags, mode); + putname(name); + } + return file; } EXPORT_SYMBOL(filp_open); -- cgit v1.2.3 From cbaab2db9103cc6727c7166d2fda9f64038c828c Mon Sep 17 00:00:00 2001 From: Al Viro Date: Thu, 22 Jan 2015 02:49:00 -0500 Subject: simpler calling conventions for filename_mountpoint() a) make it accept ERR_PTR() as filename (and return its PTR_ERR() in that case) b) make it putname() the sucker in the end otherwise simplifies life for callers... Signed-off-by: Al Viro --- fs/namei.c | 23 +++++++---------------- 1 file changed, 7 insertions(+), 16 deletions(-) (limited to 'fs') diff --git a/fs/namei.c b/fs/namei.c index 0bc7742a591d..5ec3515162e6 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -2373,13 +2373,17 @@ static int filename_mountpoint(int dfd, struct filename *s, struct path *path, unsigned int flags) { - int error = path_mountpoint(dfd, s->name, path, flags | LOOKUP_RCU); + int error; + if (IS_ERR(s)) + return PTR_ERR(s); + error = path_mountpoint(dfd, s->name, path, flags | LOOKUP_RCU); if (unlikely(error == -ECHILD)) error = path_mountpoint(dfd, s->name, path, flags); if (unlikely(error == -ESTALE)) error = path_mountpoint(dfd, s->name, path, flags | LOOKUP_REVAL); if (likely(!error)) audit_inode(s, path->dentry, 0); + putname(s); return error; } @@ -2401,27 +2405,14 @@ int user_path_mountpoint_at(int dfd, const char __user *name, unsigned int flags, struct path *path) { - struct filename *s = getname(name); - int error; - if (IS_ERR(s)) - return PTR_ERR(s); - error = filename_mountpoint(dfd, s, path, flags); - putname(s); - return error; + return filename_mountpoint(dfd, getname(name), path, flags); } int kern_path_mountpoint(int dfd, const char *name, struct path *path, unsigned int flags) { - struct filename *s = getname_kernel(name); - int retval = PTR_ERR(s); - - if (!IS_ERR(s)) { - retval = filename_mountpoint(dfd, s, path, flags); - putname(s); - } - return retval; + return filename_mountpoint(dfd, getname_kernel(name), path, flags); } EXPORT_SYMBOL(kern_path_mountpoint); -- cgit v1.2.3 From fd3522fdc84023b050bb40318d9fc71a9adc22bc Mon Sep 17 00:00:00 2001 From: Paul Moore Date: Thu, 22 Jan 2015 00:00:10 -0500 Subject: audit: enable filename recording via getname_kernel() Enable recording of filenames in getname_kernel() and remove the kludgy workaround in __audit_inode() now that we have proper filename logging for kernel users. CC: viro@zeniv.linux.org.uk CC: linux-fsdevel@vger.kernel.org Signed-off-by: Paul Moore Reviewed-by: Richard Guy Briggs Signed-off-by: Al Viro --- fs/namei.c | 1 + kernel/auditsc.c | 40 +++------------------------------------- 2 files changed, 4 insertions(+), 37 deletions(-) (limited to 'fs') diff --git a/fs/namei.c b/fs/namei.c index 5ec3515162e6..a3fde77d4abf 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -243,6 +243,7 @@ getname_kernel(const char * filename) memcpy((char *)result->name, filename, len); result->uptr = NULL; result->aname = NULL; + audit_getname(result); return result; } diff --git a/kernel/auditsc.c b/kernel/auditsc.c index 072566dd0caf..132dbcdef6ec 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c @@ -1882,44 +1882,10 @@ out_alloc: n = audit_alloc_name(context, AUDIT_TYPE_UNKNOWN); if (!n) return; - /* unfortunately, while we may have a path name to record with the - * inode, we can't always rely on the string lasting until the end of - * the syscall so we need to create our own copy, it may fail due to - * memory allocation issues, but we do our best */ - if (name) { - /* we can't use getname_kernel() due to size limits */ - size_t len = strlen(name->name) + 1; - struct filename *new = __getname(); - - if (unlikely(!new)) - goto out; + if (name) + /* no need to set ->name_put as the original will cleanup */ + n->name = name; - if (len <= (PATH_MAX - sizeof(*new))) { - new->name = (char *)(new) + sizeof(*new); - new->separate = false; - } else if (len <= PATH_MAX) { - /* this looks odd, but is due to final_putname() */ - struct filename *new2; - - new2 = kmalloc(sizeof(*new2), GFP_KERNEL); - if (unlikely(!new2)) { - __putname(new); - goto out; - } - new2->name = (char *)new; - new2->separate = true; - new = new2; - } else { - /* we should never get here, but let's be safe */ - __putname(new); - goto out; - } - strlcpy((char *)new->name, name->name, len); - new->uptr = NULL; - new->aname = n; - n->name = new; - n->name_put = true; - } out: if (parent) { n->name_len = n->name ? parent_len(n->name->name) : AUDIT_NAME_FULL; -- cgit v1.2.3 From 55422d0bd292f5ad143cc32cb8bb8505257274c4 Mon Sep 17 00:00:00 2001 From: Paul Moore Date: Thu, 22 Jan 2015 00:00:23 -0500 Subject: audit: replace getname()/putname() hacks with reference counters In order to ensure that filenames are not released before the audit subsystem is done with the strings there are a number of hacks built into the fs and audit subsystems around getname() and putname(). To say these hacks are "ugly" would be kind. This patch removes the filename hackery in favor of a more conventional reference count based approach. The diffstat below tells most of the story; lots of audit/fs specific code is replaced with a traditional reference count based approach that is easily understood, even by those not familiar with the audit and/or fs subsystems. CC: viro@zeniv.linux.org.uk CC: linux-fsdevel@vger.kernel.org Signed-off-by: Paul Moore Signed-off-by: Al Viro --- fs/namei.c | 29 +++++++------- include/linux/audit.h | 3 -- include/linux/fs.h | 9 +---- kernel/audit.h | 17 +------- kernel/auditsc.c | 105 ++++++-------------------------------------------- 5 files changed, 29 insertions(+), 134 deletions(-) (limited to 'fs') diff --git a/fs/namei.c b/fs/namei.c index a3fde77d4abf..96ca11dea4a2 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -118,15 +118,6 @@ * POSIX.1 2.4: an empty pathname is invalid (ENOENT). * PATH_MAX includes the nul terminator --RR. */ -void final_putname(struct filename *name) -{ - if (name->separate) { - __putname(name->name); - kfree(name); - } else { - __putname(name); - } -} #define EMBEDDED_NAME_MAX (PATH_MAX - sizeof(struct filename)) @@ -145,6 +136,7 @@ getname_flags(const char __user *filename, int flags, int *empty) result = __getname(); if (unlikely(!result)) return ERR_PTR(-ENOMEM); + result->refcnt = 1; /* * First, try to embed the struct filename inside the names_cache @@ -179,6 +171,7 @@ recopy: } result->name = kname; result->separate = true; + result->refcnt = 1; max = PATH_MAX; goto recopy; } @@ -202,7 +195,7 @@ recopy: return result; error: - final_putname(result); + putname(result); return err; } @@ -243,19 +236,25 @@ getname_kernel(const char * filename) memcpy((char *)result->name, filename, len); result->uptr = NULL; result->aname = NULL; + result->refcnt = 1; audit_getname(result); return result; } -#ifdef CONFIG_AUDITSYSCALL void putname(struct filename *name) { - if (unlikely(!audit_dummy_context())) - return audit_putname(name); - final_putname(name); + BUG_ON(name->refcnt <= 0); + + if (--name->refcnt > 0) + return; + + if (name->separate) { + __putname(name->name); + kfree(name); + } else + __putname(name); } -#endif static int check_acl(struct inode *inode, int mask) { diff --git a/include/linux/audit.h b/include/linux/audit.h index af84234e1f6e..87c2d347d255 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h @@ -128,7 +128,6 @@ extern void __audit_syscall_entry(int major, unsigned long a0, unsigned long a1, extern void __audit_syscall_exit(int ret_success, long ret_value); extern struct filename *__audit_reusename(const __user char *uptr); extern void __audit_getname(struct filename *name); -extern void audit_putname(struct filename *name); #define AUDIT_INODE_PARENT 1 /* dentry represents the parent */ #define AUDIT_INODE_HIDDEN 2 /* audit record should be hidden */ @@ -353,8 +352,6 @@ static inline struct filename *audit_reusename(const __user char *name) } static inline void audit_getname(struct filename *name) { } -static inline void audit_putname(struct filename *name) -{ } static inline void __audit_inode(struct filename *name, const struct dentry *dentry, unsigned int flags) diff --git a/include/linux/fs.h b/include/linux/fs.h index f90c0282c114..b49842fe203f 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2080,6 +2080,7 @@ struct filename { const char *name; /* pointer to actual string */ const __user char *uptr; /* original userland pointer */ struct audit_names *aname; + int refcnt; bool separate; /* should "name" be freed? */ }; @@ -2101,6 +2102,7 @@ extern int filp_close(struct file *, fl_owner_t id); extern struct filename *getname_flags(const char __user *, int, int *); extern struct filename *getname(const char __user *); extern struct filename *getname_kernel(const char *); +extern void putname(struct filename *name); enum { FILE_CREATED = 1, @@ -2121,15 +2123,8 @@ extern void __init vfs_caches_init(unsigned long); extern struct kmem_cache *names_cachep; -extern void final_putname(struct filename *name); - #define __getname() kmem_cache_alloc(names_cachep, GFP_KERNEL) #define __putname(name) kmem_cache_free(names_cachep, (void *)(name)) -#ifndef CONFIG_AUDITSYSCALL -#define putname(name) final_putname(name) -#else -extern void putname(struct filename *name); -#endif #ifdef CONFIG_BLOCK extern int register_blkdev(unsigned int, const char *); diff --git a/kernel/audit.h b/kernel/audit.h index 3cdffad5a1d9..1caa0d345d90 100644 --- a/kernel/audit.h +++ b/kernel/audit.h @@ -24,12 +24,6 @@ #include #include -/* 0 = no checking - 1 = put_count checking - 2 = verbose put_count checking -*/ -#define AUDIT_DEBUG 0 - /* AUDIT_NAMES is the number of slots we reserve in the audit_context * for saving names from getname(). If we get more names we will allocate * a name dynamically and also add those to the list anchored by names_list. */ @@ -74,9 +68,8 @@ struct audit_cap_data { }; }; -/* When fs/namei.c:getname() is called, we store the pointer in name and - * we don't let putname() free it (instead we free all of the saved - * pointers at syscall exit time). +/* When fs/namei.c:getname() is called, we store the pointer in name and bump + * the refcnt in the associated filename struct. * * Further, in fs/namei.c:path_lookup() we store the inode and device. */ @@ -86,7 +79,6 @@ struct audit_names { struct filename *name; int name_len; /* number of chars to log */ bool hidden; /* don't log this record */ - bool name_put; /* call __putname()? */ unsigned long ino; dev_t dev; @@ -208,11 +200,6 @@ struct audit_context { }; int fds[2]; struct audit_proctitle proctitle; - -#if AUDIT_DEBUG - int put_count; - int ino_count; -#endif }; extern u32 audit_ever_enabled; diff --git a/kernel/auditsc.c b/kernel/auditsc.c index 4f521964ccaa..0c38604a630c 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c @@ -866,33 +866,10 @@ static inline void audit_free_names(struct audit_context *context) { struct audit_names *n, *next; -#if AUDIT_DEBUG == 2 - if (context->put_count + context->ino_count != context->name_count) { - int i = 0; - - pr_err("%s:%d(:%d): major=%d in_syscall=%d" - " name_count=%d put_count=%d ino_count=%d" - " [NOT freeing]\n", __FILE__, __LINE__, - context->serial, context->major, context->in_syscall, - context->name_count, context->put_count, - context->ino_count); - list_for_each_entry(n, &context->names_list, list) { - pr_err("names[%d] = %p = %s\n", i++, n->name, - n->name->name ?: "(null)"); - } - dump_stack(); - return; - } -#endif -#if AUDIT_DEBUG - context->put_count = 0; - context->ino_count = 0; -#endif - list_for_each_entry_safe(n, next, &context->names_list, list) { list_del(&n->list); - if (n->name && n->name_put) - final_putname(n->name); + if (n->name) + putname(n->name); if (n->should_free) kfree(n); } @@ -1711,9 +1688,6 @@ static struct audit_names *audit_alloc_name(struct audit_context *context, list_add_tail(&aname->list, &context->names_list); context->name_count++; -#if AUDIT_DEBUG - context->ino_count++; -#endif return aname; } @@ -1734,8 +1708,10 @@ __audit_reusename(const __user char *uptr) list_for_each_entry(n, &context->names_list, list) { if (!n->name) continue; - if (n->name->uptr == uptr) + if (n->name->uptr == uptr) { + n->name->refcnt++; return n->name; + } } return NULL; } @@ -1752,19 +1728,8 @@ void __audit_getname(struct filename *name) struct audit_context *context = current->audit_context; struct audit_names *n; - if (!context->in_syscall) { -#if AUDIT_DEBUG == 2 - pr_err("%s:%d(:%d): ignoring getname(%p)\n", - __FILE__, __LINE__, context->serial, name); - dump_stack(); -#endif + if (!context->in_syscall) return; - } - -#if AUDIT_DEBUG - /* The filename _must_ have a populated ->name */ - BUG_ON(!name->name); -#endif n = audit_alloc_name(context, AUDIT_TYPE_UNKNOWN); if (!n) @@ -1772,56 +1737,13 @@ void __audit_getname(struct filename *name) n->name = name; n->name_len = AUDIT_NAME_FULL; - n->name_put = true; name->aname = n; + name->refcnt++; if (!context->pwd.dentry) get_fs_pwd(current->fs, &context->pwd); } -/* audit_putname - intercept a putname request - * @name: name to intercept and delay for putname - * - * If we have stored the name from getname in the audit context, - * then we delay the putname until syscall exit. - * Called from include/linux/fs.h:putname(). - */ -void audit_putname(struct filename *name) -{ - struct audit_context *context = current->audit_context; - - BUG_ON(!context); - if (!name->aname || !context->in_syscall) { -#if AUDIT_DEBUG == 2 - pr_err("%s:%d(:%d): final_putname(%p)\n", - __FILE__, __LINE__, context->serial, name); - if (context->name_count) { - struct audit_names *n; - int i = 0; - - list_for_each_entry(n, &context->names_list, list) - pr_err("name[%d] = %p = %s\n", i++, n->name, - n->name->name ?: "(null)"); - } -#endif - final_putname(name); - } -#if AUDIT_DEBUG - else { - ++context->put_count; - if (context->put_count > context->name_count) { - pr_err("%s:%d(:%d): major=%d in_syscall=%d putname(%p)" - " name_count=%d put_count=%d\n", - __FILE__, __LINE__, - context->serial, context->major, - context->in_syscall, name->name, - context->name_count, context->put_count); - dump_stack(); - } - } -#endif -} - /** * __audit_inode - store the inode and device from a lookup * @name: name being audited @@ -1842,11 +1764,6 @@ void __audit_inode(struct filename *name, const struct dentry *dentry, if (!name) goto out_alloc; -#if AUDIT_DEBUG - /* The struct filename _must_ have a populated ->name */ - BUG_ON(!name->name); -#endif - /* * If we have a pointer to an audit_names entry already, then we can * just use it directly if the type is correct. @@ -1893,9 +1810,10 @@ out_alloc: n = audit_alloc_name(context, AUDIT_TYPE_UNKNOWN); if (!n) return; - if (name) - /* no need to set ->name_put as the original will cleanup */ + if (name) { n->name = name; + name->refcnt++; + } out: if (parent) { @@ -2000,8 +1918,7 @@ void __audit_inode_child(const struct inode *parent, if (found_parent) { found_child->name = found_parent->name; found_child->name_len = AUDIT_NAME_FULL; - /* don't call __putname() */ - found_child->name_put = false; + found_child->name->refcnt++; } } -- cgit v1.2.3 From bbc7f33ac6ff6f48709ce892aa906ddb68b34517 Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Tue, 20 Jan 2015 11:51:26 -0500 Subject: nfsd: fix year-2038 nfs4 state problem Someone with a weird time_t happened to notice this, it shouldn't really manifest till 2038. It may not be our ownly year-2038 problem. Reported-by: Aaron Pace Signed-off-by: J. Bruce Fields --- fs/nfsd/nfs4state.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index f924f0618cb5..1f4b85b15125 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -1508,7 +1508,12 @@ unhash_session(struct nfsd4_session *ses) static int STALE_CLIENTID(clientid_t *clid, struct nfsd_net *nn) { - if (clid->cl_boot == nn->boot_time) + /* + * We're assuming the clid was not given out from a boot + * precisely 2^32 (about 136 years) before this one. That seems + * a safe assumption: + */ + if (clid->cl_boot == (u32)nn->boot_time) return 0; dprintk("NFSD stale clientid (%08x/%08x) boot_time %08lx\n", clid->cl_boot, clid->cl_id, nn->boot_time); -- cgit v1.2.3 From 3c5199143bc4b35f472c5c2534026d74821e2044 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Thu, 22 Jan 2015 08:19:32 -0500 Subject: sunrpc/lockd: fix references to the BKL The BKL is completely out of the picture in the lockd and sunrpc code these days. Update the antiquated comments that refer to it. Signed-off-by: Jeff Layton Signed-off-by: J. Bruce Fields --- fs/lockd/svclock.c | 4 ++-- include/linux/sunrpc/svc.h | 2 +- net/sunrpc/svc.c | 4 ++-- net/sunrpc/svc_xprt.c | 3 +-- 4 files changed, 6 insertions(+), 7 deletions(-) (limited to 'fs') diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c index 56598742dde4..5581e020644b 100644 --- a/fs/lockd/svclock.c +++ b/fs/lockd/svclock.c @@ -57,8 +57,8 @@ static DEFINE_SPINLOCK(nlm_blocked_lock); static const char *nlmdbg_cookie2a(const struct nlm_cookie *cookie) { /* - * We can get away with a static buffer because we're only - * called with BKL held. + * We can get away with a static buffer because this is only called + * from lockd, which is single-threaded. */ static char buf[2*NLM_MAXCOOKIELEN+1]; unsigned int i, len = sizeof(buf); diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index 6f22cfeef5e3..fae6fb947fc8 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h @@ -110,7 +110,7 @@ struct svc_serv { * We use sv_nrthreads as a reference count. svc_destroy() drops * this refcount, so we need to bump it up around operations that * change the number of threads. Horrible, but there it is. - * Should be called with the BKL held. + * Should be called with the "service mutex" held. */ static inline void svc_get(struct svc_serv *serv) { diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index 91eaef1844c8..78974e4d9ad2 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c @@ -768,8 +768,8 @@ svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs) EXPORT_SYMBOL_GPL(svc_set_num_threads); /* - * Called from a server thread as it's exiting. Caller must hold the BKL or - * the "service mutex", whichever is appropriate for the service. + * Called from a server thread as it's exiting. Caller must hold the "service + * mutex" for the service. */ void svc_exit_thread(struct svc_rqst *rqstp) diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index c69358b3cf7f..163ac45c3639 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c @@ -42,7 +42,7 @@ static LIST_HEAD(svc_xprt_class_list); * svc_pool->sp_lock protects most of the fields of that pool. * svc_serv->sv_lock protects sv_tempsocks, sv_permsocks, sv_tmpcnt. * when both need to be taken (rare), svc_serv->sv_lock is first. - * BKL protects svc_serv->sv_nrthread. + * The "service mutex" protects svc_serv->sv_nrthread. * svc_sock->sk_lock protects the svc_sock->sk_deferred list * and the ->sk_info_authunix cache. * @@ -67,7 +67,6 @@ static LIST_HEAD(svc_xprt_class_list); * that no other thread will be using the transport or will * try to set XPT_DEAD. */ - int svc_reg_xprt_class(struct svc_xprt_class *xcl) { struct svc_xprt_class *cl; -- cgit v1.2.3 From 4c94e13e9caed09103419c087f436d79f9d2faba Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 22 Jan 2015 12:09:50 +0100 Subject: nfsd: factor out a helper to decode nfstime4 values Signed-off-by: Christoph Hellwig Signed-off-by: J. Bruce Fields --- fs/nfsd/nfs4xdr.c | 43 ++++++++++++++++++++++++++----------------- 1 file changed, 26 insertions(+), 17 deletions(-) (limited to 'fs') diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index 91f7a3644ffb..974533e5a427 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c @@ -234,6 +234,26 @@ static char *savemem(struct nfsd4_compoundargs *argp, __be32 *p, int nbytes) return ret; } +/* + * We require the high 32 bits of 'seconds' to be 0, and + * we ignore all 32 bits of 'nseconds'. + */ +static __be32 +nfsd4_decode_time(struct nfsd4_compoundargs *argp, struct timespec *tv) +{ + DECODE_HEAD; + u64 sec; + + READ_BUF(12); + p = xdr_decode_hyper(p, &sec); + tv->tv_sec = sec; + tv->tv_nsec = be32_to_cpup(p++); + if (tv->tv_nsec >= (u32)1000000000) + return nfserr_inval; + + DECODE_TAIL; +} + static __be32 nfsd4_decode_bitmap(struct nfsd4_compoundargs *argp, u32 *bmval) { @@ -267,7 +287,6 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval, { int expected_len, len = 0; u32 dummy32; - u64 sec; char *buf; DECODE_HEAD; @@ -358,15 +377,10 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval, dummy32 = be32_to_cpup(p++); switch (dummy32) { case NFS4_SET_TO_CLIENT_TIME: - /* We require the high 32 bits of 'seconds' to be 0, and we ignore - all 32 bits of 'nseconds'. */ - READ_BUF(12); len += 12; - p = xdr_decode_hyper(p, &sec); - iattr->ia_atime.tv_sec = (time_t)sec; - iattr->ia_atime.tv_nsec = be32_to_cpup(p++); - if (iattr->ia_atime.tv_nsec >= (u32)1000000000) - return nfserr_inval; + status = nfsd4_decode_time(argp, &iattr->ia_atime); + if (status) + return status; iattr->ia_valid |= (ATTR_ATIME | ATTR_ATIME_SET); break; case NFS4_SET_TO_SERVER_TIME: @@ -382,15 +396,10 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval, dummy32 = be32_to_cpup(p++); switch (dummy32) { case NFS4_SET_TO_CLIENT_TIME: - /* We require the high 32 bits of 'seconds' to be 0, and we ignore - all 32 bits of 'nseconds'. */ - READ_BUF(12); len += 12; - p = xdr_decode_hyper(p, &sec); - iattr->ia_mtime.tv_sec = sec; - iattr->ia_mtime.tv_nsec = be32_to_cpup(p++); - if (iattr->ia_mtime.tv_nsec >= (u32)1000000000) - return nfserr_inval; + status = nfsd4_decode_time(argp, &iattr->ia_mtime); + if (status) + return status; iattr->ia_valid |= (ATTR_MTIME | ATTR_MTIME_SET); break; case NFS4_SET_TO_SERVER_TIME: -- cgit v1.2.3 From 566fcec60b7458784d4ed9bca974c5a56dacf214 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Fri, 23 Jan 2015 15:32:46 -0500 Subject: NFSv4: Fix an atomicity problem in CLOSE If we are to remove the serialisation of OPEN/CLOSE, then we need to ensure that the stateid sent as part of a CLOSE operation does not change after we test the state in nfs4_close_prepare. Signed-off-by: Trond Myklebust --- fs/nfs/nfs4proc.c | 7 ++++++- fs/nfs/nfs4xdr.c | 4 ++-- include/linux/nfs_xdr.h | 2 +- 3 files changed, 9 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index c347705b0161..4863dec10865 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -2587,6 +2587,11 @@ static void nfs4_close_done(struct rpc_task *task, void *data) case -NFS4ERR_OLD_STATEID: case -NFS4ERR_BAD_STATEID: case -NFS4ERR_EXPIRED: + if (!nfs4_stateid_match(&calldata->arg.stateid, + &state->stateid)) { + rpc_restart_call_prepare(task); + goto out_release; + } if (calldata->arg.fmode == 0) break; default: @@ -2619,6 +2624,7 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data) is_rdwr = test_bit(NFS_O_RDWR_STATE, &state->flags); is_rdonly = test_bit(NFS_O_RDONLY_STATE, &state->flags); is_wronly = test_bit(NFS_O_WRONLY_STATE, &state->flags); + nfs4_stateid_copy(&calldata->arg.stateid, &state->stateid); /* Calculate the change in open mode */ calldata->arg.fmode = 0; if (state->n_rdwr == 0) { @@ -2757,7 +2763,6 @@ int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait) calldata->inode = state->inode; calldata->state = state; calldata->arg.fh = NFS_FH(state->inode); - calldata->arg.stateid = &state->open_stateid; /* Serialization for the sequence id */ calldata->arg.seqid = nfs_alloc_seqid(&state->owner->so_seqid, gfp_mask); if (calldata->arg.seqid == NULL) diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index cb4376b78ed9..7e7be5ab70bb 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -1125,7 +1125,7 @@ static void encode_close(struct xdr_stream *xdr, const struct nfs_closeargs *arg { encode_op_hdr(xdr, OP_CLOSE, decode_close_maxsz, hdr); encode_nfs4_seqid(xdr, arg->seqid); - encode_nfs4_stateid(xdr, arg->stateid); + encode_nfs4_stateid(xdr, &arg->stateid); } static void encode_commit(struct xdr_stream *xdr, const struct nfs_commitargs *args, struct compound_hdr *hdr) @@ -1530,7 +1530,7 @@ static void encode_open_confirm(struct xdr_stream *xdr, const struct nfs_open_co static void encode_open_downgrade(struct xdr_stream *xdr, const struct nfs_closeargs *arg, struct compound_hdr *hdr) { encode_op_hdr(xdr, OP_OPEN_DOWNGRADE, decode_open_downgrade_maxsz, hdr); - encode_nfs4_stateid(xdr, arg->stateid); + encode_nfs4_stateid(xdr, &arg->stateid); encode_nfs4_seqid(xdr, arg->seqid); encode_share_access(xdr, arg->fmode); } diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 467c84efb596..7e38d641236e 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -389,7 +389,7 @@ struct nfs_open_confirmres { struct nfs_closeargs { struct nfs4_sequence_args seq_args; struct nfs_fh * fh; - nfs4_stateid * stateid; + nfs4_stateid stateid; struct nfs_seqid * seqid; fmode_t fmode; const u32 * bitmask; -- cgit v1.2.3 From f95549cf24660255c880b3ea7ee2d7d08de1f5c5 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Fri, 23 Jan 2015 18:06:09 -0500 Subject: NFSv4: More CLOSE/OPEN races If an OPEN RPC call races with a CLOSE or OPEN_DOWNGRADE so that it updates the nfs_state structure before the CLOSE/OPEN_DOWNGRADE has a chance to do so, then we know that the state->flags need to be recalculated from scratch. Signed-off-by: Trond Myklebust --- fs/nfs/nfs4proc.c | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 4863dec10865..a6c04a5812d0 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -1167,6 +1167,16 @@ static bool nfs_need_update_open_stateid(struct nfs4_state *state, return false; } +static void nfs_resync_open_stateid_locked(struct nfs4_state *state) +{ + if (state->n_wronly) + set_bit(NFS_O_WRONLY_STATE, &state->flags); + if (state->n_rdonly) + set_bit(NFS_O_RDONLY_STATE, &state->flags); + if (state->n_rdwr) + set_bit(NFS_O_RDWR_STATE, &state->flags); +} + static void nfs_clear_open_stateid_locked(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode) { @@ -1185,8 +1195,12 @@ static void nfs_clear_open_stateid_locked(struct nfs4_state *state, } if (stateid == NULL) return; - if (!nfs_need_update_open_stateid(state, stateid)) + /* Handle races with OPEN */ + if (!nfs4_stateid_match_other(stateid, &state->open_stateid) || + !nfs4_stateid_is_newer(stateid, &state->open_stateid)) { + nfs_resync_open_stateid_locked(state); return; + } if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) nfs4_stateid_copy(&state->stateid, stateid); nfs4_stateid_copy(&state->open_stateid, stateid); -- cgit v1.2.3 From badc76dd0dc6d55a86c79e952f19d3af24708058 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Fri, 23 Jan 2015 18:48:00 -0500 Subject: NFSv4: Convert nfs_alloc_seqid() to return an ERR_PTR() if allocation fails When we relax the sequencing on the NFSv4.1 OPEN/CLOSE code, we will want to use the value NULL to indicate that no sequencing is needed. Signed-off-by: Trond Myklebust --- fs/nfs/nfs4proc.c | 10 +++++----- fs/nfs/nfs4state.c | 10 +++++----- 2 files changed, 10 insertions(+), 10 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index a6c04a5812d0..c9c38077075c 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -988,7 +988,7 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry, goto err_free_p; p->o_arg.seqid = nfs_alloc_seqid(&sp->so_seqid, gfp_mask); - if (p->o_arg.seqid == NULL) + if (IS_ERR(p->o_arg.seqid)) goto err_free_label; nfs_sb_active(dentry->d_sb); p->dentry = dget(dentry); @@ -2779,7 +2779,7 @@ int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait) calldata->arg.fh = NFS_FH(state->inode); /* Serialization for the sequence id */ calldata->arg.seqid = nfs_alloc_seqid(&state->owner->so_seqid, gfp_mask); - if (calldata->arg.seqid == NULL) + if (IS_ERR(calldata->arg.seqid)) goto out_free_calldata; calldata->arg.fmode = 0; calldata->arg.bitmask = server->cache_consistency_bitmask; @@ -5517,7 +5517,7 @@ static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock * goto out; seqid = nfs_alloc_seqid(&lsp->ls_seqid, GFP_KERNEL); status = -ENOMEM; - if (seqid == NULL) + if (IS_ERR(seqid)) goto out; task = nfs4_do_unlck(request, nfs_file_open_context(request->fl_file), lsp, seqid); status = PTR_ERR(task); @@ -5558,10 +5558,10 @@ static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl, p->arg.fh = NFS_FH(inode); p->arg.fl = &p->fl; p->arg.open_seqid = nfs_alloc_seqid(&lsp->ls_state->owner->so_seqid, gfp_mask); - if (p->arg.open_seqid == NULL) + if (IS_ERR(p->arg.open_seqid)) goto out_free; p->arg.lock_seqid = nfs_alloc_seqid(&lsp->ls_seqid, gfp_mask); - if (p->arg.lock_seqid == NULL) + if (IS_ERR(p->arg.lock_seqid)) goto out_free_seqid; p->arg.lock_stateid = &lsp->ls_stateid; p->arg.lock_owner.clientid = server->nfs_client->cl_clientid; diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 5194933ed419..b922e43d69b8 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -1003,11 +1003,11 @@ struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter, gfp_t gfp_m struct nfs_seqid *new; new = kmalloc(sizeof(*new), gfp_mask); - if (new != NULL) { - new->sequence = counter; - INIT_LIST_HEAD(&new->list); - new->task = NULL; - } + if (new == NULL) + return ERR_PTR(-ENOMEM); + new->sequence = counter; + INIT_LIST_HEAD(&new->list); + new->task = NULL; return new; } -- cgit v1.2.3 From a67964197c946adbb14d91c3d878af22de47091c Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Fri, 23 Jan 2015 19:04:44 -0500 Subject: NFSv4: Check for NULL argument in nfs_*_seqid() functions Signed-off-by: Trond Myklebust --- fs/nfs/nfs4state.c | 21 ++++++++++++++------- fs/nfs/nfs4xdr.c | 5 ++++- 2 files changed, 18 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index b922e43d69b8..590f096fd011 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -1015,7 +1015,7 @@ void nfs_release_seqid(struct nfs_seqid *seqid) { struct nfs_seqid_counter *sequence; - if (list_empty(&seqid->list)) + if (seqid == NULL || list_empty(&seqid->list)) return; sequence = seqid->sequence; spin_lock(&sequence->lock); @@ -1071,13 +1071,15 @@ static void nfs_increment_seqid(int status, struct nfs_seqid *seqid) void nfs_increment_open_seqid(int status, struct nfs_seqid *seqid) { - struct nfs4_state_owner *sp = container_of(seqid->sequence, - struct nfs4_state_owner, so_seqid); - struct nfs_server *server = sp->so_server; + struct nfs4_state_owner *sp; + + if (seqid == NULL) + return; + sp = container_of(seqid->sequence, struct nfs4_state_owner, so_seqid); if (status == -NFS4ERR_BAD_SEQID) nfs4_drop_state_owner(sp); - if (!nfs4_has_session(server->nfs_client)) + if (!nfs4_has_session(sp->so_server->nfs_client)) nfs_increment_seqid(status, seqid); } @@ -1088,14 +1090,18 @@ void nfs_increment_open_seqid(int status, struct nfs_seqid *seqid) */ void nfs_increment_lock_seqid(int status, struct nfs_seqid *seqid) { - nfs_increment_seqid(status, seqid); + if (seqid != NULL) + nfs_increment_seqid(status, seqid); } int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task) { - struct nfs_seqid_counter *sequence = seqid->sequence; + struct nfs_seqid_counter *sequence; int status = 0; + if (seqid == NULL) + goto out; + sequence = seqid->sequence; spin_lock(&sequence->lock); seqid->task = task; if (list_empty(&seqid->list)) @@ -1106,6 +1112,7 @@ int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task) status = -EAGAIN; unlock: spin_unlock(&sequence->lock); +out: return status; } diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index 7e7be5ab70bb..d05fada4929c 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -946,7 +946,10 @@ static void encode_uint64(struct xdr_stream *xdr, u64 n) static void encode_nfs4_seqid(struct xdr_stream *xdr, const struct nfs_seqid *seqid) { - encode_uint32(xdr, seqid->sequence->counter); + if (seqid != NULL) + encode_uint32(xdr, seqid->sequence->counter); + else + encode_uint32(xdr, 0); } static void encode_compound_hdr(struct xdr_stream *xdr, -- cgit v1.2.3 From 63f5f796af613898669b23ccfc091ec77de7591c Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Fri, 23 Jan 2015 19:19:25 -0500 Subject: NFSv4.1: Allow parallel OPEN/OPEN_DOWNGRADE/CLOSE Remove the serialisation of OPEN/OPEN_DOWNGRADE and CLOSE calls for the case of NFSv4.1 and newer. Signed-off-by: Trond Myklebust --- fs/nfs/nfs4_fs.h | 3 +++ fs/nfs/nfs4proc.c | 17 +++++++++++++++-- 2 files changed, 18 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index a08178764cf9..e57290a66b76 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h @@ -44,6 +44,7 @@ enum nfs4_client_state { #define NFS4_RENEW_TIMEOUT 0x01 #define NFS4_RENEW_DELEGATION_CB 0x02 +struct nfs_seqid_counter; struct nfs4_minor_version_ops { u32 minor_version; unsigned init_caps; @@ -56,6 +57,8 @@ struct nfs4_minor_version_ops { struct nfs_fsinfo *); void (*free_lock_state)(struct nfs_server *, struct nfs4_lock_state *); + struct nfs_seqid * + (*alloc_seqid)(struct nfs_seqid_counter *, gfp_t); const struct rpc_call_ops *call_sync_ops; const struct nfs4_state_recovery_ops *reboot_recovery_ops; const struct nfs4_state_recovery_ops *nograce_recovery_ops; diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index c9c38077075c..0a279ad5421f 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -977,6 +977,7 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry, struct dentry *parent = dget_parent(dentry); struct inode *dir = parent->d_inode; struct nfs_server *server = NFS_SERVER(dir); + struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t); struct nfs4_opendata *p; p = kzalloc(sizeof(*p), gfp_mask); @@ -987,7 +988,8 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry, if (IS_ERR(p->f_label)) goto err_free_p; - p->o_arg.seqid = nfs_alloc_seqid(&sp->so_seqid, gfp_mask); + alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid; + p->o_arg.seqid = alloc_seqid(&sp->so_seqid, gfp_mask); if (IS_ERR(p->o_arg.seqid)) goto err_free_label; nfs_sb_active(dentry->d_sb); @@ -2751,6 +2753,7 @@ static bool nfs4_roc(struct inode *inode) int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait) { struct nfs_server *server = NFS_SERVER(state->inode); + struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t); struct nfs4_closedata *calldata; struct nfs4_state_owner *sp = state->owner; struct rpc_task *task; @@ -2778,7 +2781,8 @@ int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait) calldata->state = state; calldata->arg.fh = NFS_FH(state->inode); /* Serialization for the sequence id */ - calldata->arg.seqid = nfs_alloc_seqid(&state->owner->so_seqid, gfp_mask); + alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid; + calldata->arg.seqid = alloc_seqid(&state->owner->so_seqid, gfp_mask); if (IS_ERR(calldata->arg.seqid)) goto out_free_calldata; calldata->arg.fmode = 0; @@ -8414,6 +8418,7 @@ static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = { .match_stateid = nfs4_match_stateid, .find_root_sec = nfs4_find_root_sec, .free_lock_state = nfs4_release_lockowner, + .alloc_seqid = nfs_alloc_seqid, .call_sync_ops = &nfs40_call_sync_ops, .reboot_recovery_ops = &nfs40_reboot_recovery_ops, .nograce_recovery_ops = &nfs40_nograce_recovery_ops, @@ -8422,6 +8427,12 @@ static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = { }; #if defined(CONFIG_NFS_V4_1) +static struct nfs_seqid * +nfs_alloc_no_seqid(struct nfs_seqid_counter *arg1, gfp_t arg2) +{ + return NULL; +} + static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = { .minor_version = 1, .init_caps = NFS_CAP_READDIRPLUS @@ -8435,6 +8446,7 @@ static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = { .match_stateid = nfs41_match_stateid, .find_root_sec = nfs41_find_root_sec, .free_lock_state = nfs41_free_lock_state, + .alloc_seqid = nfs_alloc_no_seqid, .call_sync_ops = &nfs41_call_sync_ops, .reboot_recovery_ops = &nfs41_reboot_recovery_ops, .nograce_recovery_ops = &nfs41_nograce_recovery_ops, @@ -8461,6 +8473,7 @@ static const struct nfs4_minor_version_ops nfs_v4_2_minor_ops = { .find_root_sec = nfs41_find_root_sec, .free_lock_state = nfs41_free_lock_state, .call_sync_ops = &nfs41_call_sync_ops, + .alloc_seqid = nfs_alloc_no_seqid, .reboot_recovery_ops = &nfs41_reboot_recovery_ops, .nograce_recovery_ops = &nfs41_nograce_recovery_ops, .state_renewal_ops = &nfs41_state_renewal_ops, -- cgit v1.2.3 From 39071e6fff7d7e11a5993afd67240ef04a4d05a0 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Sat, 24 Jan 2015 15:07:56 -0500 Subject: NFSv4: Fix atomicity problems with lock stateid updates When we update the lock stateid, we really do need to ensure that this is done under the state->state_lock, and that we are indeed only updating confirmed locks with a newer version of the same stateid. Signed-off-by: Trond Myklebust --- fs/nfs/nfs4proc.c | 42 +++++++++++++++++++++++++++++------------- 1 file changed, 29 insertions(+), 13 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 0a279ad5421f..db9d98eda07b 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -1297,6 +1297,23 @@ no_delegation: return ret; } +static bool nfs4_update_lock_stateid(struct nfs4_lock_state *lsp, + const nfs4_stateid *stateid) +{ + struct nfs4_state *state = lsp->ls_state; + bool ret = false; + + spin_lock(&state->state_lock); + if (!nfs4_stateid_match_other(stateid, &lsp->ls_stateid)) + goto out_noupdate; + if (!nfs4_stateid_is_newer(stateid, &lsp->ls_stateid)) + goto out_noupdate; + nfs4_stateid_copy(&lsp->ls_stateid, stateid); + ret = true; +out_noupdate: + spin_unlock(&state->state_lock); + return ret; +} static void nfs4_return_incompatible_delegation(struct inode *inode, fmode_t fmode) { @@ -5403,9 +5420,9 @@ static void nfs4_locku_done(struct rpc_task *task, void *data) return; switch (task->tk_status) { case 0: - nfs4_stateid_copy(&calldata->lsp->ls_stateid, - &calldata->res.stateid); renew_lease(calldata->server, calldata->timestamp); + nfs4_update_lock_stateid(calldata->lsp, + &calldata->res.stateid); break; case -NFS4ERR_BAD_STATEID: case -NFS4ERR_OLD_STATEID: @@ -5626,6 +5643,7 @@ out_wait: static void nfs4_lock_done(struct rpc_task *task, void *calldata) { struct nfs4_lockdata *data = calldata; + struct nfs4_lock_state *lsp = data->lsp; dprintk("%s: begin!\n", __func__); @@ -5633,18 +5651,16 @@ static void nfs4_lock_done(struct rpc_task *task, void *calldata) return; data->rpc_status = task->tk_status; - if (data->arg.new_lock_owner != 0) { - if (data->rpc_status == 0) - nfs_confirm_seqid(&data->lsp->ls_seqid, 0); - else - goto out; - } - if (data->rpc_status == 0) { - nfs4_stateid_copy(&data->lsp->ls_stateid, &data->res.stateid); - set_bit(NFS_LOCK_INITIALIZED, &data->lsp->ls_flags); - renew_lease(NFS_SERVER(data->ctx->dentry->d_inode), data->timestamp); + if (task->tk_status == 0) { + renew_lease(NFS_SERVER(data->ctx->dentry->d_inode), + data->timestamp); + if (data->arg.new_lock_owner != 0) { + nfs_confirm_seqid(&lsp->ls_seqid, 0); + nfs4_stateid_copy(&lsp->ls_stateid, &data->res.stateid); + set_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags); + } else if (!nfs4_update_lock_stateid(lsp, &data->res.stateid)) + rpc_restart_call_prepare(task); } -out: dprintk("%s: done, ret = %d!\n", __func__, data->rpc_status); } -- cgit v1.2.3 From 6b447539aa9aaac0a0215f3e28a0839553210e7e Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Sat, 24 Jan 2015 18:38:15 -0500 Subject: NFSv4: Always do open_to_lock_owner if the lock stateid is uninitialised The original text in RFC3530 was terribly confusing since it conflated lockowners and lock stateids. RFC3530bis clarifies that you must use open_to_lock_owner when there is no lock state for that file+lockowner combination. Signed-off-by: Trond Myklebust --- fs/nfs/nfs4proc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index db9d98eda07b..f12ded041a42 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -5611,7 +5611,7 @@ static void nfs4_lock_prepare(struct rpc_task *task, void *calldata) if (nfs_wait_on_sequence(data->arg.lock_seqid, task) != 0) goto out_wait; /* Do we need to do an open_to_lock_owner? */ - if (!(data->arg.lock_seqid->sequence->flags & NFS_SEQID_CONFIRMED)) { + if (!test_bit(NFS_LOCK_INITIALIZED, &data->lsp->ls_flags)) { if (nfs_wait_on_sequence(data->arg.open_seqid, task) != 0) { goto out_release_lock_seqid; } -- cgit v1.2.3 From 425c1d4e5b6d4bd700eb94ad8318bdb05431fdc7 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Sat, 24 Jan 2015 14:57:53 -0500 Subject: NFSv4: Fix lock on-wire reordering issues This patch ensures that the server cannot reorder our LOCK/LOCKU requests if they are sent in parallel on the wire. Signed-off-by: Trond Myklebust --- fs/nfs/nfs4proc.c | 29 ++++++++++++++++++++++++----- fs/nfs/nfs4xdr.c | 6 +++--- include/linux/nfs_xdr.h | 6 +++--- 3 files changed, 30 insertions(+), 11 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index f12ded041a42..41e7c2fc046e 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -5393,7 +5393,6 @@ static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl, p->arg.fl = &p->fl; p->arg.seqid = seqid; p->res.seqid = seqid; - p->arg.stateid = &lsp->ls_stateid; p->lsp = lsp; atomic_inc(&lsp->ls_count); /* Ensure we don't close file until we're done freeing locks! */ @@ -5428,6 +5427,9 @@ static void nfs4_locku_done(struct rpc_task *task, void *data) case -NFS4ERR_OLD_STATEID: case -NFS4ERR_STALE_STATEID: case -NFS4ERR_EXPIRED: + if (!nfs4_stateid_match(&calldata->arg.stateid, + &calldata->lsp->ls_stateid)) + rpc_restart_call_prepare(task); break; default: if (nfs4_async_handle_error(task, calldata->server, @@ -5443,6 +5445,7 @@ static void nfs4_locku_prepare(struct rpc_task *task, void *data) if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0) goto out_wait; + nfs4_stateid_copy(&calldata->arg.stateid, &calldata->lsp->ls_stateid); if (test_bit(NFS_LOCK_INITIALIZED, &calldata->lsp->ls_flags) == 0) { /* Note: exit _without_ running nfs4_locku_done */ goto out_no_action; @@ -5584,7 +5587,6 @@ static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl, p->arg.lock_seqid = nfs_alloc_seqid(&lsp->ls_seqid, gfp_mask); if (IS_ERR(p->arg.lock_seqid)) goto out_free_seqid; - p->arg.lock_stateid = &lsp->ls_stateid; p->arg.lock_owner.clientid = server->nfs_client->cl_clientid; p->arg.lock_owner.id = lsp->ls_seqid.owner_id; p->arg.lock_owner.s_dev = server->s_dev; @@ -5615,11 +5617,15 @@ static void nfs4_lock_prepare(struct rpc_task *task, void *calldata) if (nfs_wait_on_sequence(data->arg.open_seqid, task) != 0) { goto out_release_lock_seqid; } - data->arg.open_stateid = &state->open_stateid; + nfs4_stateid_copy(&data->arg.open_stateid, + &state->open_stateid); data->arg.new_lock_owner = 1; data->res.open_seqid = data->arg.open_seqid; - } else + } else { data->arg.new_lock_owner = 0; + nfs4_stateid_copy(&data->arg.lock_stateid, + &data->lsp->ls_stateid); + } if (!nfs4_valid_open_stateid(state)) { data->rpc_status = -EBADF; task->tk_action = NULL; @@ -5651,7 +5657,8 @@ static void nfs4_lock_done(struct rpc_task *task, void *calldata) return; data->rpc_status = task->tk_status; - if (task->tk_status == 0) { + switch (task->tk_status) { + case 0: renew_lease(NFS_SERVER(data->ctx->dentry->d_inode), data->timestamp); if (data->arg.new_lock_owner != 0) { @@ -5660,6 +5667,18 @@ static void nfs4_lock_done(struct rpc_task *task, void *calldata) set_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags); } else if (!nfs4_update_lock_stateid(lsp, &data->res.stateid)) rpc_restart_call_prepare(task); + break; + case -NFS4ERR_BAD_STATEID: + case -NFS4ERR_OLD_STATEID: + case -NFS4ERR_STALE_STATEID: + case -NFS4ERR_EXPIRED: + if (data->arg.new_lock_owner != 0) { + if (!nfs4_stateid_match(&data->arg.open_stateid, + &lsp->ls_state->open_stateid)) + rpc_restart_call_prepare(task); + } else if (!nfs4_stateid_match(&data->arg.lock_stateid, + &lsp->ls_stateid)) + rpc_restart_call_prepare(task); } dprintk("%s: done, ret = %d!\n", __func__, data->rpc_status); } diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index d05fada4929c..e3018e7a316c 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -1304,12 +1304,12 @@ static void encode_lock(struct xdr_stream *xdr, const struct nfs_lock_args *args *p = cpu_to_be32(args->new_lock_owner); if (args->new_lock_owner){ encode_nfs4_seqid(xdr, args->open_seqid); - encode_nfs4_stateid(xdr, args->open_stateid); + encode_nfs4_stateid(xdr, &args->open_stateid); encode_nfs4_seqid(xdr, args->lock_seqid); encode_lockowner(xdr, &args->lock_owner); } else { - encode_nfs4_stateid(xdr, args->lock_stateid); + encode_nfs4_stateid(xdr, &args->lock_stateid); encode_nfs4_seqid(xdr, args->lock_seqid); } } @@ -1333,7 +1333,7 @@ static void encode_locku(struct xdr_stream *xdr, const struct nfs_locku_args *ar encode_op_hdr(xdr, OP_LOCKU, decode_locku_maxsz, hdr); encode_uint32(xdr, nfs4_lock_type(args->fl, 0)); encode_nfs4_seqid(xdr, args->seqid); - encode_nfs4_stateid(xdr, args->stateid); + encode_nfs4_stateid(xdr, &args->stateid); p = reserve_space(xdr, 16); p = xdr_encode_hyper(p, args->fl->fl_start); xdr_encode_hyper(p, nfs4_lock_length(args->fl)); diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 7e38d641236e..b6a6953c0f09 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -416,9 +416,9 @@ struct nfs_lock_args { struct nfs_fh * fh; struct file_lock * fl; struct nfs_seqid * lock_seqid; - nfs4_stateid * lock_stateid; + nfs4_stateid lock_stateid; struct nfs_seqid * open_seqid; - nfs4_stateid * open_stateid; + nfs4_stateid open_stateid; struct nfs_lowner lock_owner; unsigned char block : 1; unsigned char reclaim : 1; @@ -437,7 +437,7 @@ struct nfs_locku_args { struct nfs_fh * fh; struct file_lock * fl; struct nfs_seqid * seqid; - nfs4_stateid * stateid; + nfs4_stateid stateid; }; struct nfs_locku_res { -- cgit v1.2.3 From c69899a17ca4836230720e65493942d9582a0424 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Sat, 24 Jan 2015 16:03:52 -0500 Subject: NFSv4: Update of VFS byte range lock must be atomic with the stateid update Ensure that we test the lock stateid remained unchanged while we were updating the VFS tracking of the byte range lock. Have the process replay the lock to the server if we detect that was not the case. Signed-off-by: Trond Myklebust --- fs/nfs/nfs4proc.c | 37 +++++++++++++++---------------------- include/linux/nfs_xdr.h | 1 + 2 files changed, 16 insertions(+), 22 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 41e7c2fc046e..9f6baf98942c 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -5420,9 +5420,10 @@ static void nfs4_locku_done(struct rpc_task *task, void *data) switch (task->tk_status) { case 0: renew_lease(calldata->server, calldata->timestamp); - nfs4_update_lock_stateid(calldata->lsp, - &calldata->res.stateid); - break; + do_vfs_lock(calldata->fl.fl_file, &calldata->fl); + if (nfs4_update_lock_stateid(calldata->lsp, + &calldata->res.stateid)) + break; case -NFS4ERR_BAD_STATEID: case -NFS4ERR_OLD_STATEID: case -NFS4ERR_STALE_STATEID: @@ -5661,6 +5662,13 @@ static void nfs4_lock_done(struct rpc_task *task, void *calldata) case 0: renew_lease(NFS_SERVER(data->ctx->dentry->d_inode), data->timestamp); + if (data->arg.new_lock) { + data->fl.fl_flags &= ~(FL_SLEEP | FL_ACCESS); + if (do_vfs_lock(data->fl.fl_file, &data->fl) < 0) { + rpc_restart_call_prepare(task); + break; + } + } if (data->arg.new_lock_owner != 0) { nfs_confirm_seqid(&lsp->ls_seqid, 0); nfs4_stateid_copy(&lsp->ls_stateid, &data->res.stateid); @@ -5760,7 +5768,8 @@ static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *f if (recovery_type == NFS_LOCK_RECLAIM) data->arg.reclaim = NFS_LOCK_RECLAIM; nfs4_set_sequence_privileged(&data->arg.seq_args); - } + } else + data->arg.new_lock = 1; task = rpc_run_task(&task_setup_data); if (IS_ERR(task)) return PTR_ERR(task); @@ -5884,10 +5893,8 @@ static int nfs41_lock_expired(struct nfs4_state *state, struct file_lock *reques static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) { - struct nfs4_state_owner *sp = state->owner; struct nfs_inode *nfsi = NFS_I(state->inode); unsigned char fl_flags = request->fl_flags; - unsigned int seq; int status = -ENOLCK; if ((fl_flags & FL_POSIX) && @@ -5907,25 +5914,11 @@ static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock /* ...but avoid races with delegation recall... */ request->fl_flags = fl_flags & ~FL_SLEEP; status = do_vfs_lock(request->fl_file, request); - goto out_unlock; - } - seq = raw_seqcount_begin(&sp->so_reclaim_seqcount); - up_read(&nfsi->rwsem); - status = _nfs4_do_setlk(state, cmd, request, NFS_LOCK_NEW); - if (status != 0) + up_read(&nfsi->rwsem); goto out; - down_read(&nfsi->rwsem); - if (read_seqcount_retry(&sp->so_reclaim_seqcount, seq)) { - status = -NFS4ERR_DELAY; - goto out_unlock; } - /* Note: we always want to sleep here! */ - request->fl_flags = fl_flags | FL_SLEEP; - if (do_vfs_lock(request->fl_file, request) < 0) - printk(KERN_WARNING "NFS: %s: VFS is out of sync with lock " - "manager!\n", __func__); -out_unlock: up_read(&nfsi->rwsem); + status = _nfs4_do_setlk(state, cmd, request, NFS_LOCK_NEW); out: request->fl_flags = fl_flags; return status; diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index b6a6953c0f09..e5c3b620a609 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -422,6 +422,7 @@ struct nfs_lock_args { struct nfs_lowner lock_owner; unsigned char block : 1; unsigned char reclaim : 1; + unsigned char new_lock : 1; unsigned char new_lock_owner : 1; }; -- cgit v1.2.3 From b4019c0e219bb1301865f8b2efedb4773526ed91 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Sat, 24 Jan 2015 14:19:19 -0500 Subject: NFSv4.1: Allow parallel LOCK/LOCKU calls Note, however, that we still serialise on the open stateid if the lock stateid is unconfirmed. Hopefully that will not prove too much of a burden for first time locks; it should leave the ability to parallelise OPENs unchanged, since they no longer call the serialisation primitives. Signed-off-by: Trond Myklebust --- fs/nfs/nfs4proc.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 9f6baf98942c..66befb0dd241 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -5517,6 +5517,7 @@ static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock * struct nfs_seqid *seqid; struct nfs4_lock_state *lsp; struct rpc_task *task; + struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t); int status = 0; unsigned char fl_flags = request->fl_flags; @@ -5540,7 +5541,8 @@ static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock * lsp = request->fl_u.nfs4_fl.owner; if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) == 0) goto out; - seqid = nfs_alloc_seqid(&lsp->ls_seqid, GFP_KERNEL); + alloc_seqid = NFS_SERVER(inode)->nfs_client->cl_mvops->alloc_seqid; + seqid = alloc_seqid(&lsp->ls_seqid, GFP_KERNEL); status = -ENOMEM; if (IS_ERR(seqid)) goto out; @@ -5575,6 +5577,7 @@ static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl, struct nfs4_lockdata *p; struct inode *inode = lsp->ls_state->inode; struct nfs_server *server = NFS_SERVER(inode); + struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t); p = kzalloc(sizeof(*p), gfp_mask); if (p == NULL) @@ -5585,7 +5588,8 @@ static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl, p->arg.open_seqid = nfs_alloc_seqid(&lsp->ls_state->owner->so_seqid, gfp_mask); if (IS_ERR(p->arg.open_seqid)) goto out_free; - p->arg.lock_seqid = nfs_alloc_seqid(&lsp->ls_seqid, gfp_mask); + alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid; + p->arg.lock_seqid = alloc_seqid(&lsp->ls_seqid, gfp_mask); if (IS_ERR(p->arg.lock_seqid)) goto out_free_seqid; p->arg.lock_owner.clientid = server->nfs_client->cl_clientid; -- cgit v1.2.3 From 40dd4b7aee1a8c3b8dac7b67ba710692d7691b77 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Sat, 24 Jan 2015 13:54:37 -0500 Subject: NFSv4.1: Optimise layout return-on-close Optimise the layout return on close code by ensuring that 1) Add a check for whether we hold a layout before taking any spinlocks 2) Only take the spin lock once 3) Use nfs_state->state to speed up open file checks Signed-off-by: Trond Myklebust --- fs/nfs/nfs4proc.c | 37 +------------------------------------ fs/nfs/pnfs.c | 24 ++++++++++++++++++++---- fs/nfs/pnfs.h | 10 ++++++++++ 3 files changed, 31 insertions(+), 40 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 66befb0dd241..0f75b9276726 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -2714,45 +2714,10 @@ static const struct rpc_call_ops nfs4_close_ops = { .rpc_release = nfs4_free_closedata, }; -static bool nfs4_state_has_opener(struct nfs4_state *state) -{ - /* first check existing openers */ - if (test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0 && - state->n_rdonly != 0) - return true; - - if (test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0 && - state->n_wronly != 0) - return true; - - if (test_bit(NFS_O_RDWR_STATE, &state->flags) != 0 && - state->n_rdwr != 0) - return true; - - return false; -} - static bool nfs4_roc(struct inode *inode) { - struct nfs_inode *nfsi = NFS_I(inode); - struct nfs_open_context *ctx; - struct nfs4_state *state; - - spin_lock(&inode->i_lock); - list_for_each_entry(ctx, &nfsi->open_files, list) { - state = ctx->state; - if (state == NULL) - continue; - if (nfs4_state_has_opener(state)) { - spin_unlock(&inode->i_lock); - return false; - } - } - spin_unlock(&inode->i_lock); - - if (nfs4_check_delegation(inode, FMODE_READ)) + if (!nfs_have_layout(inode)) return false; - return pnfs_roc(inode); } diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index 0a5dda4d85c2..4d69076a6028 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -34,6 +34,7 @@ #include "pnfs.h" #include "iostat.h" #include "nfs4trace.h" +#include "delegation.h" #define NFSDBG_FACILITY NFSDBG_PNFS #define PNFS_LAYOUTGET_RETRY_TIMEOUT (120*HZ) @@ -954,30 +955,45 @@ pnfs_commit_and_return_layout(struct inode *inode) bool pnfs_roc(struct inode *ino) { + struct nfs_inode *nfsi = NFS_I(ino); + struct nfs_open_context *ctx; + struct nfs4_state *state; struct pnfs_layout_hdr *lo; struct pnfs_layout_segment *lseg, *tmp; LIST_HEAD(tmp_list); bool found = false; spin_lock(&ino->i_lock); - lo = NFS_I(ino)->layout; + lo = nfsi->layout; if (!lo || !test_and_clear_bit(NFS_LAYOUT_ROC, &lo->plh_flags) || test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) - goto out_nolayout; + goto out_noroc; + + /* Don't return layout if we hold a delegation */ + if (nfs4_check_delegation(ino, FMODE_READ)) + goto out_noroc; + + list_for_each_entry(ctx, &nfsi->open_files, list) { + state = ctx->state; + /* Don't return layout if there is open file state */ + if (state != NULL && state->state != 0) + goto out_noroc; + } + list_for_each_entry_safe(lseg, tmp, &lo->plh_segs, pls_list) if (test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) { mark_lseg_invalid(lseg, &tmp_list); found = true; } if (!found) - goto out_nolayout; + goto out_noroc; lo->plh_block_lgets++; pnfs_get_layout_hdr(lo); /* matched in pnfs_roc_release */ spin_unlock(&ino->i_lock); pnfs_free_lseg_list(&tmp_list); return true; -out_nolayout: +out_noroc: spin_unlock(&ino->i_lock); return false; } diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h index 9ae5b765b073..a98d8fd9637f 100644 --- a/fs/nfs/pnfs.h +++ b/fs/nfs/pnfs.h @@ -275,6 +275,11 @@ void nfs4_mark_deviceid_unavailable(struct nfs4_deviceid_node *node); bool nfs4_test_deviceid_unavailable(struct nfs4_deviceid_node *node); void nfs4_deviceid_purge_client(const struct nfs_client *); +static inline bool nfs_have_layout(struct inode *inode) +{ + return NFS_I(inode)->layout != NULL; +} + static inline struct nfs4_deviceid_node * nfs4_get_deviceid(struct nfs4_deviceid_node *d) { @@ -427,6 +432,11 @@ static inline void nfs4_print_deviceid(const struct nfs4_deviceid *dev_id) #endif /* NFS_DEBUG */ #else /* CONFIG_NFS_V4_1 */ +static inline bool nfs_have_layout(struct inode *inode) +{ + return false; +} + static inline void pnfs_destroy_all_layouts(struct nfs_client *clp) { } -- cgit v1.2.3 From 89f0ff386cb1ebca0da7940d05bf609bc86f3972 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Sat, 3 Jan 2015 14:47:43 -0500 Subject: NFSv4.1: Replace usage of nfs_client->cl_addr in encode_create_session Replace the current code with something that is a little closer to what net/sunrpc/auth_unix.c uses. Signed-off-by: Trond Myklebust --- fs/nfs/nfs4xdr.c | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index e3018e7a316c..41253393171f 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -1804,9 +1804,8 @@ static void encode_create_session(struct xdr_stream *xdr, struct compound_hdr *hdr) { __be32 *p; - char machine_name[NFS4_MAX_MACHINE_NAME_LEN]; - uint32_t len; struct nfs_client *clp = args->client; + struct rpc_clnt *clnt = clp->cl_rpcclient; struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id); u32 max_resp_sz_cached; @@ -1817,11 +1816,8 @@ static void encode_create_session(struct xdr_stream *xdr, max_resp_sz_cached = (NFS4_dec_open_sz + RPC_REPHDRSIZE + RPC_MAX_AUTH_SIZE + 2) * XDR_UNIT; - len = scnprintf(machine_name, sizeof(machine_name), "%s", - clp->cl_ipaddr); - encode_op_hdr(xdr, OP_CREATE_SESSION, decode_create_session_maxsz, hdr); - p = reserve_space(xdr, 16 + 2*28 + 20 + len + 12); + p = reserve_space(xdr, 16 + 2*28 + 20 + clnt->cl_nodelen + 12); p = xdr_encode_hyper(p, clp->cl_clientid); *p++ = cpu_to_be32(clp->cl_seqid); /*Sequence id */ *p++ = cpu_to_be32(args->flags); /*flags */ @@ -1850,7 +1846,7 @@ static void encode_create_session(struct xdr_stream *xdr, /* authsys_parms rfc1831 */ *p++ = cpu_to_be32(nn->boot_time.tv_nsec); /* stamp */ - p = xdr_encode_opaque(p, machine_name, len); + p = xdr_encode_array(p, clnt->cl_nodename, clnt->cl_nodelen); *p++ = cpu_to_be32(0); /* UID */ *p++ = cpu_to_be32(0); /* GID */ *p = cpu_to_be32(0); /* No more gids */ -- cgit v1.2.3 From cf6726e2ee387b0eff303628eaa0beaf36a1aeb4 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Fri, 19 Dec 2014 11:22:28 -0500 Subject: NFSv4: Deal with atomic upgrades of an existing delegation Ensure that we deal correctly with the case where the server sends us a newer instance of the same delegation. If the stateids match, but the sequence numbers differ, then treat the new delegation as if it were an atomic upgrade. Signed-off-by: Trond Myklebust --- fs/nfs/delegation.c | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c index 7f3f60641344..16b754ee0d09 100644 --- a/fs/nfs/delegation.c +++ b/fs/nfs/delegation.c @@ -301,6 +301,17 @@ nfs_inode_detach_delegation(struct inode *inode) return nfs_detach_delegation(nfsi, delegation, server); } +static void +nfs_update_inplace_delegation(struct nfs_delegation *delegation, + const struct nfs_delegation *update) +{ + if (nfs4_stateid_is_newer(&update->stateid, &delegation->stateid)) { + delegation->stateid.seqid = update->stateid.seqid; + smp_wmb(); + delegation->type = update->type; + } +} + /** * nfs_inode_set_delegation - set up a delegation on an inode * @inode: inode to which delegation applies @@ -334,9 +345,12 @@ int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct old_delegation = rcu_dereference_protected(nfsi->delegation, lockdep_is_held(&clp->cl_lock)); if (old_delegation != NULL) { - if (nfs4_stateid_match(&delegation->stateid, - &old_delegation->stateid) && - delegation->type == old_delegation->type) { + /* Is this an update of the existing delegation? */ + if (nfs4_stateid_match_other(&old_delegation->stateid, + &delegation->stateid)) { + nfs_update_inplace_delegation(old_delegation, + delegation); + nfsi->delegation_state = old_delegation->type; goto out; } /* -- cgit v1.2.3 From e09ddf36dd986e91edd69a0355bdd44cb738b45e Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 25 Jan 2015 13:50:23 -0500 Subject: debugfs_{mkdir,create,link}(): get rid of redundant argument Signed-off-by: Al Viro --- fs/debugfs/inode.c | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c index 05f2960ed7c3..adaaa04448b3 100644 --- a/fs/debugfs/inode.c +++ b/fs/debugfs/inode.c @@ -89,8 +89,9 @@ static int debugfs_mknod(struct inode *dir, struct dentry *dentry, return error; } -static int debugfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) +static int debugfs_mkdir(struct dentry *dentry, umode_t mode) { + struct inode *dir = dentry->d_parent->d_inode; int res; mode = (mode & (S_IRWXUGO | S_ISVTX)) | S_IFDIR; @@ -102,16 +103,18 @@ static int debugfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) return res; } -static int debugfs_link(struct inode *dir, struct dentry *dentry, umode_t mode, +static int debugfs_link(struct dentry *dentry, umode_t mode, void *data) { + struct inode *dir = dentry->d_parent->d_inode; mode = (mode & S_IALLUGO) | S_IFLNK; return debugfs_mknod(dir, dentry, mode, 0, data, NULL); } -static int debugfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, +static int debugfs_create(struct dentry *dentry, umode_t mode, void *data, const struct file_operations *fops) { + struct inode *dir = dentry->d_parent->d_inode; int res; mode = (mode & S_IALLUGO) | S_IFREG; @@ -329,16 +332,14 @@ static struct dentry *__create_file(const char *name, umode_t mode, if (!IS_ERR(dentry)) { switch (mode & S_IFMT) { case S_IFDIR: - error = debugfs_mkdir(parent->d_inode, dentry, mode); + error = debugfs_mkdir(dentry, mode); break; case S_IFLNK: - error = debugfs_link(parent->d_inode, dentry, mode, - data); + error = debugfs_link(dentry, mode, data); break; default: - error = debugfs_create(parent->d_inode, dentry, mode, - data, fops); + error = debugfs_create(dentry, mode, data, fops); break; } dput(dentry); -- cgit v1.2.3 From 190afd81e4a5f24d1db6c774ca7f18acde2fca30 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 25 Jan 2015 13:55:55 -0500 Subject: debugfs: split the beginning and the end of __create_file() off Signed-off-by: Al Viro --- fs/debugfs/inode.c | 61 ++++++++++++++++++++++++++++++++++-------------------- 1 file changed, 39 insertions(+), 22 deletions(-) (limited to 'fs') diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c index adaaa04448b3..100186c2bf0a 100644 --- a/fs/debugfs/inode.c +++ b/fs/debugfs/inode.c @@ -305,11 +305,9 @@ static struct file_system_type debug_fs_type = { }; MODULE_ALIAS_FS("debugfs"); -static struct dentry *__create_file(const char *name, umode_t mode, - struct dentry *parent, void *data, - const struct file_operations *fops) +static struct dentry *start_creating(const char *name, struct dentry *parent) { - struct dentry *dentry = NULL; + struct dentry *dentry; int error; pr_debug("debugfs: creating file '%s'\n",name); @@ -317,7 +315,7 @@ static struct dentry *__create_file(const char *name, umode_t mode, error = simple_pin_fs(&debug_fs_type, &debugfs_mount, &debugfs_mount_count); if (error) - goto exit; + return ERR_PTR(error); /* If the parent is not specified, we create it in the root. * We need the root dentry to do this, which is in the super @@ -329,32 +327,51 @@ static struct dentry *__create_file(const char *name, umode_t mode, mutex_lock(&parent->d_inode->i_mutex); dentry = lookup_one_len(name, parent, strlen(name)); - if (!IS_ERR(dentry)) { - switch (mode & S_IFMT) { - case S_IFDIR: - error = debugfs_mkdir(dentry, mode); - - break; - case S_IFLNK: - error = debugfs_link(dentry, mode, data); - break; - default: - error = debugfs_create(dentry, mode, data, fops); - break; - } + if (!IS_ERR(dentry) && dentry->d_inode) { dput(dentry); - } else - error = PTR_ERR(dentry); - mutex_unlock(&parent->d_inode->i_mutex); + dentry = ERR_PTR(-EEXIST); + } + if (IS_ERR(dentry)) + mutex_unlock(&parent->d_inode->i_mutex); + return dentry; +} + +static struct dentry *end_creating(struct dentry *dentry, int error) +{ + mutex_unlock(&dentry->d_parent->d_inode->i_mutex); + dput(dentry); if (error) { dentry = NULL; simple_release_fs(&debugfs_mount, &debugfs_mount_count); } -exit: return dentry; } +static struct dentry *__create_file(const char *name, umode_t mode, + struct dentry *parent, void *data, + const struct file_operations *fops) +{ + struct dentry *dentry = start_creating(name, parent); + int error; + + if (IS_ERR(dentry)) + return NULL; + + switch (mode & S_IFMT) { + case S_IFDIR: + error = debugfs_mkdir(dentry, mode); + break; + case S_IFLNK: + error = debugfs_link(dentry, mode, data); + break; + default: + error = debugfs_create(dentry, mode, data, fops); + break; + } + return end_creating(dentry, error); +} + /** * debugfs_create_file - create a file in the debugfs filesystem * @name: a pointer to a string containing the name of the file to create. -- cgit v1.2.3 From ad5abd5ba8c66ea44e24d7b6996e85ab83b412fc Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 25 Jan 2015 14:02:31 -0500 Subject: debugfs: kill __create_file() Signed-off-by: Al Viro --- fs/debugfs/inode.c | 71 +++++++++++++++++++++++++----------------------------- 1 file changed, 33 insertions(+), 38 deletions(-) (limited to 'fs') diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c index 100186c2bf0a..18564b08884c 100644 --- a/fs/debugfs/inode.c +++ b/fs/debugfs/inode.c @@ -348,30 +348,6 @@ static struct dentry *end_creating(struct dentry *dentry, int error) return dentry; } -static struct dentry *__create_file(const char *name, umode_t mode, - struct dentry *parent, void *data, - const struct file_operations *fops) -{ - struct dentry *dentry = start_creating(name, parent); - int error; - - if (IS_ERR(dentry)) - return NULL; - - switch (mode & S_IFMT) { - case S_IFDIR: - error = debugfs_mkdir(dentry, mode); - break; - case S_IFLNK: - error = debugfs_link(dentry, mode, data); - break; - default: - error = debugfs_create(dentry, mode, data, fops); - break; - } - return end_creating(dentry, error); -} - /** * debugfs_create_file - create a file in the debugfs filesystem * @name: a pointer to a string containing the name of the file to create. @@ -402,15 +378,19 @@ struct dentry *debugfs_create_file(const char *name, umode_t mode, struct dentry *parent, void *data, const struct file_operations *fops) { - switch (mode & S_IFMT) { - case S_IFREG: - case 0: - break; - default: - BUG(); - } + struct dentry *dentry; + int error; + + if (!(mode & S_IFMT)) + mode |= S_IFREG; + BUG_ON(!S_ISREG(mode)); + dentry = start_creating(name, parent); + + if (IS_ERR(dentry)) + return NULL; - return __create_file(name, mode, parent, data, fops); + error = debugfs_create(dentry, mode, data, fops); + return end_creating(dentry, error); } EXPORT_SYMBOL_GPL(debugfs_create_file); @@ -434,8 +414,14 @@ EXPORT_SYMBOL_GPL(debugfs_create_file); */ struct dentry *debugfs_create_dir(const char *name, struct dentry *parent) { - return __create_file(name, S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO, - parent, NULL, NULL); + struct dentry *dentry = start_creating(name, parent); + int error; + + if (IS_ERR(dentry)) + return NULL; + + error = debugfs_mkdir(dentry, S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO); + return end_creating(dentry, error); } EXPORT_SYMBOL_GPL(debugfs_create_dir); @@ -465,17 +451,26 @@ EXPORT_SYMBOL_GPL(debugfs_create_dir); struct dentry *debugfs_create_symlink(const char *name, struct dentry *parent, const char *target) { - struct dentry *result; + struct dentry *dentry; char *link; + int error; link = kstrdup(target, GFP_KERNEL); if (!link) return NULL; - result = __create_file(name, S_IFLNK | S_IRWXUGO, parent, link, NULL); - if (!result) + dentry = start_creating(name, parent); + + if (IS_ERR(dentry)) { kfree(link); - return result; + return NULL; + } + + error = debugfs_link(dentry, S_IFLNK | S_IRWXUGO, link); + if (error) + kfree(link); + + return end_creating(dentry, error); } EXPORT_SYMBOL_GPL(debugfs_create_symlink); -- cgit v1.2.3 From 9b73fab01bcd62530e8c9a5da44d3ed8a753b3eb Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 25 Jan 2015 14:05:55 -0500 Subject: fold debugfs_link() into caller Signed-off-by: Al Viro --- fs/debugfs/inode.c | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) (limited to 'fs') diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c index 18564b08884c..c69e00d69ff1 100644 --- a/fs/debugfs/inode.c +++ b/fs/debugfs/inode.c @@ -103,14 +103,6 @@ static int debugfs_mkdir(struct dentry *dentry, umode_t mode) return res; } -static int debugfs_link(struct dentry *dentry, umode_t mode, - void *data) -{ - struct inode *dir = dentry->d_parent->d_inode; - mode = (mode & S_IALLUGO) | S_IFLNK; - return debugfs_mknod(dir, dentry, mode, 0, data, NULL); -} - static int debugfs_create(struct dentry *dentry, umode_t mode, void *data, const struct file_operations *fops) { @@ -466,7 +458,8 @@ struct dentry *debugfs_create_symlink(const char *name, struct dentry *parent, return NULL; } - error = debugfs_link(dentry, S_IFLNK | S_IRWXUGO, link); + error = debugfs_mknod(dentry->d_parent->d_inode, dentry, + S_IFLNK | S_IRWXUGO, 0, link, NULL); if (error) kfree(link); -- cgit v1.2.3 From 160f7592f2364b47c615cff96117f0877d58e427 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 25 Jan 2015 14:09:49 -0500 Subject: debugfs_mknod(): get rid useless arguments dev is always zero, dir was only used to get its ->i_sb, which is equal to ->d_sb of dentry... Signed-off-by: Al Viro --- fs/debugfs/inode.c | 28 +++++++++++----------------- 1 file changed, 11 insertions(+), 17 deletions(-) (limited to 'fs') diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c index c69e00d69ff1..bc02e2096977 100644 --- a/fs/debugfs/inode.c +++ b/fs/debugfs/inode.c @@ -70,23 +70,18 @@ static struct inode *debugfs_get_inode(struct super_block *sb, umode_t mode, dev } /* SMP-safe */ -static int debugfs_mknod(struct inode *dir, struct dentry *dentry, - umode_t mode, dev_t dev, void *data, +static int debugfs_mknod(struct dentry *dentry, + umode_t mode, void *data, const struct file_operations *fops) { struct inode *inode; - int error = -EPERM; - if (dentry->d_inode) - return -EEXIST; - - inode = debugfs_get_inode(dir->i_sb, mode, dev, data, fops); - if (inode) { - d_instantiate(dentry, inode); - dget(dentry); - error = 0; - } - return error; + inode = debugfs_get_inode(dentry->d_sb, mode, 0, data, fops); + if (unlikely(!inode)) + return -EPERM; + d_instantiate(dentry, inode); + dget(dentry); + return 0; } static int debugfs_mkdir(struct dentry *dentry, umode_t mode) @@ -95,7 +90,7 @@ static int debugfs_mkdir(struct dentry *dentry, umode_t mode) int res; mode = (mode & (S_IRWXUGO | S_ISVTX)) | S_IFDIR; - res = debugfs_mknod(dir, dentry, mode, 0, NULL, NULL); + res = debugfs_mknod(dentry, mode, NULL, NULL); if (!res) { inc_nlink(dir); fsnotify_mkdir(dir, dentry); @@ -110,7 +105,7 @@ static int debugfs_create(struct dentry *dentry, umode_t mode, int res; mode = (mode & S_IALLUGO) | S_IFREG; - res = debugfs_mknod(dir, dentry, mode, 0, data, fops); + res = debugfs_mknod(dentry, mode, data, fops); if (!res) fsnotify_create(dir, dentry); return res; @@ -458,8 +453,7 @@ struct dentry *debugfs_create_symlink(const char *name, struct dentry *parent, return NULL; } - error = debugfs_mknod(dentry->d_parent->d_inode, dentry, - S_IFLNK | S_IRWXUGO, 0, link, NULL); + error = debugfs_mknod(dentry, S_IFLNK | S_IRWXUGO, link, NULL); if (error) kfree(link); -- cgit v1.2.3 From 02538a75ba176d286a421d1e41a66061f407c3b3 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 25 Jan 2015 14:15:18 -0500 Subject: fold debugfs_mkdir() into caller Signed-off-by: Al Viro --- fs/debugfs/inode.c | 21 ++++++--------------- 1 file changed, 6 insertions(+), 15 deletions(-) (limited to 'fs') diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c index bc02e2096977..8d12935c87f1 100644 --- a/fs/debugfs/inode.c +++ b/fs/debugfs/inode.c @@ -84,20 +84,6 @@ static int debugfs_mknod(struct dentry *dentry, return 0; } -static int debugfs_mkdir(struct dentry *dentry, umode_t mode) -{ - struct inode *dir = dentry->d_parent->d_inode; - int res; - - mode = (mode & (S_IRWXUGO | S_ISVTX)) | S_IFDIR; - res = debugfs_mknod(dentry, mode, NULL, NULL); - if (!res) { - inc_nlink(dir); - fsnotify_mkdir(dir, dentry); - } - return res; -} - static int debugfs_create(struct dentry *dentry, umode_t mode, void *data, const struct file_operations *fops) { @@ -407,7 +393,12 @@ struct dentry *debugfs_create_dir(const char *name, struct dentry *parent) if (IS_ERR(dentry)) return NULL; - error = debugfs_mkdir(dentry, S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO); + error = debugfs_mknod(dentry, S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO, + NULL, NULL); + if (!error) { + inc_nlink(dentry->d_parent->d_inode); + fsnotify_mkdir(dentry->d_parent->d_inode, dentry); + } return end_creating(dentry, error); } EXPORT_SYMBOL_GPL(debugfs_create_dir); -- cgit v1.2.3 From 3473cde5655cf4501cadb8ed11ab8aad1484b377 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 25 Jan 2015 14:17:11 -0500 Subject: fold debugfs_create() into caller Signed-off-by: Al Viro --- fs/debugfs/inode.c | 17 +++-------------- 1 file changed, 3 insertions(+), 14 deletions(-) (limited to 'fs') diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c index 8d12935c87f1..778c0e32eb51 100644 --- a/fs/debugfs/inode.c +++ b/fs/debugfs/inode.c @@ -84,19 +84,6 @@ static int debugfs_mknod(struct dentry *dentry, return 0; } -static int debugfs_create(struct dentry *dentry, umode_t mode, - void *data, const struct file_operations *fops) -{ - struct inode *dir = dentry->d_parent->d_inode; - int res; - - mode = (mode & S_IALLUGO) | S_IFREG; - res = debugfs_mknod(dentry, mode, data, fops); - if (!res) - fsnotify_create(dir, dentry); - return res; -} - static inline int debugfs_positive(struct dentry *dentry) { return dentry->d_inode && !d_unhashed(dentry); @@ -362,7 +349,9 @@ struct dentry *debugfs_create_file(const char *name, umode_t mode, if (IS_ERR(dentry)) return NULL; - error = debugfs_create(dentry, mode, data, fops); + error = debugfs_mknod(dentry, mode, data, fops); + if (!error) + fsnotify_create(dentry->d_parent->d_inode, dentry); return end_creating(dentry, error); } EXPORT_SYMBOL_GPL(debugfs_create_file); -- cgit v1.2.3 From 680b302409cdc87c2000a31f2ceb2951bd642260 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 25 Jan 2015 14:31:32 -0500 Subject: fold debugfs_mknod() into callers Signed-off-by: Al Viro --- fs/debugfs/inode.c | 68 +++++++++++++++++++++++++----------------------------- 1 file changed, 31 insertions(+), 37 deletions(-) (limited to 'fs') diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c index 778c0e32eb51..b765c04eba20 100644 --- a/fs/debugfs/inode.c +++ b/fs/debugfs/inode.c @@ -69,21 +69,6 @@ static struct inode *debugfs_get_inode(struct super_block *sb, umode_t mode, dev return inode; } -/* SMP-safe */ -static int debugfs_mknod(struct dentry *dentry, - umode_t mode, void *data, - const struct file_operations *fops) -{ - struct inode *inode; - - inode = debugfs_get_inode(dentry->d_sb, mode, 0, data, fops); - if (unlikely(!inode)) - return -EPERM; - d_instantiate(dentry, inode); - dget(dentry); - return 0; -} - static inline int debugfs_positive(struct dentry *dentry) { return dentry->d_inode && !d_unhashed(dentry); @@ -339,7 +324,7 @@ struct dentry *debugfs_create_file(const char *name, umode_t mode, const struct file_operations *fops) { struct dentry *dentry; - int error; + struct inode *inode; if (!(mode & S_IFMT)) mode |= S_IFREG; @@ -349,10 +334,14 @@ struct dentry *debugfs_create_file(const char *name, umode_t mode, if (IS_ERR(dentry)) return NULL; - error = debugfs_mknod(dentry, mode, data, fops); - if (!error) - fsnotify_create(dentry->d_parent->d_inode, dentry); - return end_creating(dentry, error); + inode = debugfs_get_inode(dentry->d_sb, mode, 0, data, fops); + if (unlikely(!inode)) + return end_creating(dentry, -ENOMEM); + + d_instantiate(dentry, inode); + dget(dentry); + fsnotify_create(dentry->d_parent->d_inode, dentry); + return end_creating(dentry, 0); } EXPORT_SYMBOL_GPL(debugfs_create_file); @@ -377,18 +366,22 @@ EXPORT_SYMBOL_GPL(debugfs_create_file); struct dentry *debugfs_create_dir(const char *name, struct dentry *parent) { struct dentry *dentry = start_creating(name, parent); - int error; + struct inode *inode; if (IS_ERR(dentry)) return NULL; - error = debugfs_mknod(dentry, S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO, - NULL, NULL); - if (!error) { - inc_nlink(dentry->d_parent->d_inode); - fsnotify_mkdir(dentry->d_parent->d_inode, dentry); - } - return end_creating(dentry, error); + inode = debugfs_get_inode(dentry->d_sb, + S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO, + 0, NULL, NULL); + if (unlikely(!inode)) + return end_creating(dentry, -ENOMEM); + + d_instantiate(dentry, inode); + dget(dentry); + inc_nlink(dentry->d_parent->d_inode); + fsnotify_mkdir(dentry->d_parent->d_inode, dentry); + return end_creating(dentry, 0); } EXPORT_SYMBOL_GPL(debugfs_create_dir); @@ -419,25 +412,26 @@ struct dentry *debugfs_create_symlink(const char *name, struct dentry *parent, const char *target) { struct dentry *dentry; - char *link; - int error; - - link = kstrdup(target, GFP_KERNEL); + struct inode *inode; + char *link = kstrdup(target, GFP_KERNEL); if (!link) return NULL; dentry = start_creating(name, parent); - if (IS_ERR(dentry)) { kfree(link); return NULL; } - error = debugfs_mknod(dentry, S_IFLNK | S_IRWXUGO, link, NULL); - if (error) + inode = debugfs_get_inode(dentry->d_sb, S_IFLNK | S_IRWXUGO, 0, + link, NULL); + if (unlikely(!inode)) { kfree(link); - - return end_creating(dentry, error); + return end_creating(dentry, -ENOMEM); + } + d_instantiate(dentry, inode); + dget(dentry); + return end_creating(dentry, 0); } EXPORT_SYMBOL_GPL(debugfs_create_symlink); -- cgit v1.2.3 From edac65eaf8d5c00b1d6004a44e76b9de6b038dc6 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 25 Jan 2015 14:36:18 -0500 Subject: debugfs: take mode-dependent parts of debugfs_get_inode() into callers ... and trim the arguments list Signed-off-by: Al Viro --- fs/debugfs/inode.c | 48 ++++++++++++++++-------------------------------- 1 file changed, 16 insertions(+), 32 deletions(-) (limited to 'fs') diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c index b765c04eba20..61e9a6815a19 100644 --- a/fs/debugfs/inode.c +++ b/fs/debugfs/inode.c @@ -34,37 +34,12 @@ static struct vfsmount *debugfs_mount; static int debugfs_mount_count; static bool debugfs_registered; -static struct inode *debugfs_get_inode(struct super_block *sb, umode_t mode, dev_t dev, - void *data, const struct file_operations *fops) - +static struct inode *debugfs_get_inode(struct super_block *sb) { struct inode *inode = new_inode(sb); - if (inode) { inode->i_ino = get_next_ino(); - inode->i_mode = mode; inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; - switch (mode & S_IFMT) { - default: - init_special_inode(inode, mode, dev); - break; - case S_IFREG: - inode->i_fop = fops ? fops : &debugfs_file_operations; - inode->i_private = data; - break; - case S_IFLNK: - inode->i_op = &debugfs_link_operations; - inode->i_private = data; - break; - case S_IFDIR: - inode->i_op = &simple_dir_inode_operations; - inode->i_fop = &simple_dir_operations; - - /* directory inodes start off with i_nlink == 2 - * (for "." entry) */ - inc_nlink(inode); - break; - } } return inode; } @@ -334,10 +309,13 @@ struct dentry *debugfs_create_file(const char *name, umode_t mode, if (IS_ERR(dentry)) return NULL; - inode = debugfs_get_inode(dentry->d_sb, mode, 0, data, fops); + inode = debugfs_get_inode(dentry->d_sb); if (unlikely(!inode)) return end_creating(dentry, -ENOMEM); + inode->i_mode = mode; + inode->i_fop = fops ? fops : &debugfs_file_operations; + inode->i_private = data; d_instantiate(dentry, inode); dget(dentry); fsnotify_create(dentry->d_parent->d_inode, dentry); @@ -371,12 +349,16 @@ struct dentry *debugfs_create_dir(const char *name, struct dentry *parent) if (IS_ERR(dentry)) return NULL; - inode = debugfs_get_inode(dentry->d_sb, - S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO, - 0, NULL, NULL); + inode = debugfs_get_inode(dentry->d_sb); if (unlikely(!inode)) return end_creating(dentry, -ENOMEM); + inode->i_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO; + inode->i_op = &simple_dir_inode_operations; + inode->i_fop = &simple_dir_operations; + + /* directory inodes start off with i_nlink == 2 (for "." entry) */ + inc_nlink(inode); d_instantiate(dentry, inode); dget(dentry); inc_nlink(dentry->d_parent->d_inode); @@ -423,12 +405,14 @@ struct dentry *debugfs_create_symlink(const char *name, struct dentry *parent, return NULL; } - inode = debugfs_get_inode(dentry->d_sb, S_IFLNK | S_IRWXUGO, 0, - link, NULL); + inode = debugfs_get_inode(dentry->d_sb); if (unlikely(!inode)) { kfree(link); return end_creating(dentry, -ENOMEM); } + inode->i_mode = S_IFLNK | S_IRWXUGO; + inode->i_op = &debugfs_link_operations; + inode->i_private = link; d_instantiate(dentry, inode); dget(dentry); return end_creating(dentry, 0); -- cgit v1.2.3 From 5233e31191af661389a4f5b060873bfcb155c828 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 25 Jan 2015 14:39:49 -0500 Subject: debugfs: split end_creating() into success and failure cases ... and don't bother with dput(dentry) in the former and with dget(dentry) preceding all its calls. Signed-off-by: Al Viro --- fs/debugfs/inode.c | 27 +++++++++++++-------------- 1 file changed, 13 insertions(+), 14 deletions(-) (limited to 'fs') diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c index 61e9a6815a19..1219dff8e18f 100644 --- a/fs/debugfs/inode.c +++ b/fs/debugfs/inode.c @@ -256,15 +256,17 @@ static struct dentry *start_creating(const char *name, struct dentry *parent) return dentry; } -static struct dentry *end_creating(struct dentry *dentry, int error) +static struct dentry *failed_creating(struct dentry *dentry) { mutex_unlock(&dentry->d_parent->d_inode->i_mutex); dput(dentry); + simple_release_fs(&debugfs_mount, &debugfs_mount_count); + return NULL; +} - if (error) { - dentry = NULL; - simple_release_fs(&debugfs_mount, &debugfs_mount_count); - } +static struct dentry *end_creating(struct dentry *dentry) +{ + mutex_unlock(&dentry->d_parent->d_inode->i_mutex); return dentry; } @@ -311,15 +313,14 @@ struct dentry *debugfs_create_file(const char *name, umode_t mode, inode = debugfs_get_inode(dentry->d_sb); if (unlikely(!inode)) - return end_creating(dentry, -ENOMEM); + return failed_creating(dentry); inode->i_mode = mode; inode->i_fop = fops ? fops : &debugfs_file_operations; inode->i_private = data; d_instantiate(dentry, inode); - dget(dentry); fsnotify_create(dentry->d_parent->d_inode, dentry); - return end_creating(dentry, 0); + return end_creating(dentry); } EXPORT_SYMBOL_GPL(debugfs_create_file); @@ -351,7 +352,7 @@ struct dentry *debugfs_create_dir(const char *name, struct dentry *parent) inode = debugfs_get_inode(dentry->d_sb); if (unlikely(!inode)) - return end_creating(dentry, -ENOMEM); + return failed_creating(dentry); inode->i_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO; inode->i_op = &simple_dir_inode_operations; @@ -360,10 +361,9 @@ struct dentry *debugfs_create_dir(const char *name, struct dentry *parent) /* directory inodes start off with i_nlink == 2 (for "." entry) */ inc_nlink(inode); d_instantiate(dentry, inode); - dget(dentry); inc_nlink(dentry->d_parent->d_inode); fsnotify_mkdir(dentry->d_parent->d_inode, dentry); - return end_creating(dentry, 0); + return end_creating(dentry); } EXPORT_SYMBOL_GPL(debugfs_create_dir); @@ -408,14 +408,13 @@ struct dentry *debugfs_create_symlink(const char *name, struct dentry *parent, inode = debugfs_get_inode(dentry->d_sb); if (unlikely(!inode)) { kfree(link); - return end_creating(dentry, -ENOMEM); + return failed_creating(dentry); } inode->i_mode = S_IFLNK | S_IRWXUGO; inode->i_op = &debugfs_link_operations; inode->i_private = link; d_instantiate(dentry, inode); - dget(dentry); - return end_creating(dentry, 0); + return end_creating(dentry); } EXPORT_SYMBOL_GPL(debugfs_create_symlink); -- cgit v1.2.3 From 77b3da6e3232d3b4d4b8addb4b05799fe98f3bf8 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 25 Jan 2015 15:10:32 -0500 Subject: new primitive: debugfs_create_automount() Signed-off-by: Al Viro --- fs/debugfs/inode.c | 48 ++++++++++++++++++++++++++++++++++++++++++++++++ include/linux/debugfs.h | 5 +++++ 2 files changed, 53 insertions(+) (limited to 'fs') diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c index 1219dff8e18f..957c40ce09f6 100644 --- a/fs/debugfs/inode.c +++ b/fs/debugfs/inode.c @@ -175,6 +175,18 @@ static const struct super_operations debugfs_super_operations = { .show_options = debugfs_show_options, }; +static struct vfsmount *debugfs_automount(struct path *path) +{ + struct vfsmount *(*f)(void *); + f = (struct vfsmount *(*)(void *))path->dentry->d_fsdata; + return f(path->dentry->d_inode->i_private); +} + +static const struct dentry_operations debugfs_dops = { + .d_delete = always_delete_dentry, + .d_automount = debugfs_automount, +}; + static int debug_fill_super(struct super_block *sb, void *data, int silent) { static struct tree_descr debug_files[] = {{""}}; @@ -199,6 +211,7 @@ static int debug_fill_super(struct super_block *sb, void *data, int silent) goto fail; sb->s_op = &debugfs_super_operations; + sb->s_d_op = &debugfs_dops; debugfs_apply_options(sb); @@ -367,6 +380,41 @@ struct dentry *debugfs_create_dir(const char *name, struct dentry *parent) } EXPORT_SYMBOL_GPL(debugfs_create_dir); +/** + * debugfs_create_automount - create automount point in the debugfs filesystem + * @name: a pointer to a string containing the name of the file to create. + * @parent: a pointer to the parent dentry for this file. This should be a + * directory dentry if set. If this parameter is NULL, then the + * file will be created in the root of the debugfs filesystem. + * @f: function to be called when pathname resolution steps on that one. + * @data: opaque argument to pass to f(). + * + * @f should return what ->d_automount() would. + */ +struct dentry *debugfs_create_automount(const char *name, + struct dentry *parent, + struct vfsmount *(*f)(void *), + void *data) +{ + struct dentry *dentry = start_creating(name, parent); + struct inode *inode; + + if (IS_ERR(dentry)) + return NULL; + + inode = debugfs_get_inode(dentry->d_sb); + if (unlikely(!inode)) + return failed_creating(dentry); + + inode->i_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO; + inode->i_flags |= S_AUTOMOUNT; + inode->i_private = data; + dentry->d_fsdata = (void *)f; + d_instantiate(dentry, inode); + return end_creating(dentry); +} +EXPORT_SYMBOL(debugfs_create_automount); + /** * debugfs_create_symlink- create a symbolic link in the debugfs filesystem * @name: a pointer to a string containing the name of the symbolic link to diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h index da4c4983adbe..ea149a24a1f2 100644 --- a/include/linux/debugfs.h +++ b/include/linux/debugfs.h @@ -56,6 +56,11 @@ struct dentry *debugfs_create_dir(const char *name, struct dentry *parent); struct dentry *debugfs_create_symlink(const char *name, struct dentry *parent, const char *dest); +struct dentry *debugfs_create_automount(const char *name, + struct dentry *parent, + struct vfsmount *(*f)(void *), + void *data); + void debugfs_remove(struct dentry *dentry); void debugfs_remove_recursive(struct dentry *dentry); -- cgit v1.2.3 From 5e993e2534e29b62790f9a2908b551b7fb3a63f0 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Wed, 24 Dec 2014 21:41:47 -0500 Subject: ncpfs: get rid of d_validate() nonsense What we want is to have non-counting references to children in pagecache of parent directory, and avoid picking them after a child has been freed. Fine, so let's just have ->d_prune() clear parent's inode "has directory contents in page cache" flag. That way we don't need ->d_fsdata for storing offsets, so we can use it as a quick and dirty "is it referenced from page cache" flag. Signed-off-by: Al Viro --- fs/ncpfs/dir.c | 98 +++++++++++++++++++++++++----------------------- fs/ncpfs/ncp_fs_i.h | 1 + fs/ncpfs/ncplib_kernel.h | 30 --------------- 3 files changed, 52 insertions(+), 77 deletions(-) (limited to 'fs') diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c index 008960101520..e7ca827d7694 100644 --- a/fs/ncpfs/dir.c +++ b/fs/ncpfs/dir.c @@ -77,6 +77,7 @@ static int ncp_hash_dentry(const struct dentry *, struct qstr *); static int ncp_compare_dentry(const struct dentry *, const struct dentry *, unsigned int, const char *, const struct qstr *); static int ncp_delete_dentry(const struct dentry *); +static void ncp_d_prune(struct dentry *dentry); const struct dentry_operations ncp_dentry_operations = { @@ -84,6 +85,7 @@ const struct dentry_operations ncp_dentry_operations = .d_hash = ncp_hash_dentry, .d_compare = ncp_compare_dentry, .d_delete = ncp_delete_dentry, + .d_prune = ncp_d_prune, }; #define ncp_namespace(i) (NCP_SERVER(i)->name_space[NCP_FINFO(i)->volNumber]) @@ -384,42 +386,6 @@ finished: return val; } -static struct dentry * -ncp_dget_fpos(struct dentry *dentry, struct dentry *parent, unsigned long fpos) -{ - struct dentry *dent = dentry; - - if (d_validate(dent, parent)) { - if (dent->d_name.len <= NCP_MAXPATHLEN && - (unsigned long)dent->d_fsdata == fpos) { - if (!dent->d_inode) { - dput(dent); - dent = NULL; - } - return dent; - } - dput(dent); - } - - /* If a pointer is invalid, we search the dentry. */ - spin_lock(&parent->d_lock); - list_for_each_entry(dent, &parent->d_subdirs, d_child) { - if ((unsigned long)dent->d_fsdata == fpos) { - if (dent->d_inode) - dget(dent); - else - dent = NULL; - spin_unlock(&parent->d_lock); - goto out; - } - } - spin_unlock(&parent->d_lock); - return NULL; - -out: - return dent; -} - static time_t ncp_obtain_mtime(struct dentry *dentry) { struct inode *inode = dentry->d_inode; @@ -435,6 +401,20 @@ static time_t ncp_obtain_mtime(struct dentry *dentry) return ncp_date_dos2unix(i.modifyTime, i.modifyDate); } +static inline void +ncp_invalidate_dircache_entries(struct dentry *parent) +{ + struct ncp_server *server = NCP_SERVER(parent->d_inode); + struct dentry *dentry; + + spin_lock(&parent->d_lock); + list_for_each_entry(dentry, &parent->d_subdirs, d_child) { + dentry->d_fsdata = NULL; + ncp_age_dentry(server, dentry); + } + spin_unlock(&parent->d_lock); +} + static int ncp_readdir(struct file *file, struct dir_context *ctx) { struct dentry *dentry = file->f_path.dentry; @@ -500,10 +480,21 @@ static int ncp_readdir(struct file *file, struct dir_context *ctx) struct dentry *dent; bool over; - dent = ncp_dget_fpos(ctl.cache->dentry[ctl.idx], - dentry, ctx->pos); - if (!dent) + spin_lock(&dentry->d_lock); + if (!(NCP_FINFO(inode)->flags & NCPI_DIR_CACHE)) { + spin_unlock(&dentry->d_lock); + goto invalid_cache; + } + dent = ctl.cache->dentry[ctl.idx]; + if (unlikely(!lockref_get_not_dead(&dent->d_lockref))) { + spin_unlock(&dentry->d_lock); + goto invalid_cache; + } + spin_unlock(&dentry->d_lock); + if (!dent->d_inode) { + dput(dent); goto invalid_cache; + } over = !dir_emit(ctx, dent->d_name.name, dent->d_name.len, dent->d_inode->i_ino, DT_UNKNOWN); @@ -548,6 +539,9 @@ init_cache: ctl.filled = 0; ctl.valid = 1; read_really: + spin_lock(&dentry->d_lock); + NCP_FINFO(inode)->flags |= NCPI_DIR_CACHE; + spin_unlock(&dentry->d_lock); if (ncp_is_server_root(inode)) { ncp_read_volume_list(file, ctx, &ctl); } else { @@ -573,6 +567,13 @@ out: return result; } +static void ncp_d_prune(struct dentry *dentry) +{ + if (!dentry->d_fsdata) /* not referenced from page cache */ + return; + NCP_FINFO(dentry->d_parent->d_inode)->flags &= ~NCPI_DIR_CACHE; +} + static int ncp_fill_cache(struct file *file, struct dir_context *ctx, struct ncp_cache_control *ctrl, struct ncp_entry_info *entry, @@ -630,6 +631,10 @@ ncp_fill_cache(struct file *file, struct dir_context *ctx, d_instantiate(newdent, inode); if (!hashed) d_rehash(newdent); + } else { + spin_lock(&dentry->d_lock); + NCP_FINFO(inode)->flags &= ~NCPI_DIR_CACHE; + spin_unlock(&dentry->d_lock); } } else { struct inode *inode = newdent->d_inode; @@ -639,12 +644,6 @@ ncp_fill_cache(struct file *file, struct dir_context *ctx, mutex_unlock(&inode->i_mutex); } - if (newdent->d_inode) { - ino = newdent->d_inode->i_ino; - newdent->d_fsdata = (void *) ctl.fpos; - ncp_new_dentry(newdent); - } - if (ctl.idx >= NCP_DIRCACHE_SIZE) { if (ctl.page) { kunmap(ctl.page); @@ -660,8 +659,13 @@ ncp_fill_cache(struct file *file, struct dir_context *ctx, ctl.cache = kmap(ctl.page); } if (ctl.cache) { - ctl.cache->dentry[ctl.idx] = newdent; - valid = 1; + if (newdent->d_inode) { + newdent->d_fsdata = newdent; + ctl.cache->dentry[ctl.idx] = newdent; + ino = newdent->d_inode->i_ino; + ncp_new_dentry(newdent); + } + valid = 1; } dput(newdent); end_advance: diff --git a/fs/ncpfs/ncp_fs_i.h b/fs/ncpfs/ncp_fs_i.h index 4b0bec477846..c4794504f843 100644 --- a/fs/ncpfs/ncp_fs_i.h +++ b/fs/ncpfs/ncp_fs_i.h @@ -22,6 +22,7 @@ struct ncp_inode_info { int access; int flags; #define NCPI_KLUDGE_SYMLINK 0x0001 +#define NCPI_DIR_CACHE 0x0002 __u8 file_handle[6]; struct inode vfs_inode; }; diff --git a/fs/ncpfs/ncplib_kernel.h b/fs/ncpfs/ncplib_kernel.h index b785f74bfe3c..250e443a07f3 100644 --- a/fs/ncpfs/ncplib_kernel.h +++ b/fs/ncpfs/ncplib_kernel.h @@ -184,36 +184,6 @@ ncp_new_dentry(struct dentry* dentry) dentry->d_time = jiffies; } -static inline void -ncp_renew_dentries(struct dentry *parent) -{ - struct ncp_server *server = NCP_SERVER(parent->d_inode); - struct dentry *dentry; - - spin_lock(&parent->d_lock); - list_for_each_entry(dentry, &parent->d_subdirs, d_child) { - if (dentry->d_fsdata == NULL) - ncp_age_dentry(server, dentry); - else - ncp_new_dentry(dentry); - } - spin_unlock(&parent->d_lock); -} - -static inline void -ncp_invalidate_dircache_entries(struct dentry *parent) -{ - struct ncp_server *server = NCP_SERVER(parent->d_inode); - struct dentry *dentry; - - spin_lock(&parent->d_lock); - list_for_each_entry(dentry, &parent->d_subdirs, d_child) { - dentry->d_fsdata = NULL; - ncp_age_dentry(server, dentry); - } - spin_unlock(&parent->d_lock); -} - struct ncp_cache_head { time_t mtime; unsigned long time; /* cache age */ -- cgit v1.2.3 From d6cb125b9983e1ea9444f794b2d3ed5e3ad737b7 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Wed, 24 Dec 2014 22:47:00 -0500 Subject: kill d_validate() no users left Signed-off-by: Al Viro --- fs/dcache.c | 31 ------------------------------- include/linux/dcache.h | 3 --- 2 files changed, 34 deletions(-) (limited to 'fs') diff --git a/fs/dcache.c b/fs/dcache.c index e368d4f412f9..40432e59d72e 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -2187,37 +2187,6 @@ struct dentry *d_hash_and_lookup(struct dentry *dir, struct qstr *name) } EXPORT_SYMBOL(d_hash_and_lookup); -/** - * d_validate - verify dentry provided from insecure source (deprecated) - * @dentry: The dentry alleged to be valid child of @dparent - * @dparent: The parent dentry (known to be valid) - * - * An insecure source has sent us a dentry, here we verify it and dget() it. - * This is used by ncpfs in its readdir implementation. - * Zero is returned in the dentry is invalid. - * - * This function is slow for big directories, and deprecated, do not use it. - */ -int d_validate(struct dentry *dentry, struct dentry *dparent) -{ - struct dentry *child; - - spin_lock(&dparent->d_lock); - list_for_each_entry(child, &dparent->d_subdirs, d_child) { - if (dentry == child) { - spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); - __dget_dlock(dentry); - spin_unlock(&dentry->d_lock); - spin_unlock(&dparent->d_lock); - return 1; - } - } - spin_unlock(&dparent->d_lock); - - return 0; -} -EXPORT_SYMBOL(d_validate); - /* * When a file is deleted, we have two options: * - turn this dentry into a negative dentry diff --git a/include/linux/dcache.h b/include/linux/dcache.h index 5a813988e6d4..92c08cf7670e 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -319,9 +319,6 @@ static inline unsigned d_count(const struct dentry *dentry) return dentry->d_lockref.count; } -/* validate "insecure" dentry pointer */ -extern int d_validate(struct dentry *, struct dentry *); - /* * helper function for dentry_operations.d_dname() members */ -- cgit v1.2.3 From d443b9fd56e85c0e58d10b75cf5eb38e0b2c4c02 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Thu, 25 Dec 2014 16:47:49 -0500 Subject: gut proc_register() a bit There are only 3 callers and quite a bit of that thing is executed exactly in one of those. Just lift it there... Signed-off-by: Al Viro --- fs/proc/generic.c | 25 ++++++++----------------- 1 file changed, 8 insertions(+), 17 deletions(-) (limited to 'fs') diff --git a/fs/proc/generic.c b/fs/proc/generic.c index 7fea13229f33..1766fe70233e 100644 --- a/fs/proc/generic.c +++ b/fs/proc/generic.c @@ -350,29 +350,12 @@ static int proc_register(struct proc_dir_entry * dir, struct proc_dir_entry * dp if (ret) return ret; - if (S_ISDIR(dp->mode)) { - dp->proc_fops = &proc_dir_operations; - dp->proc_iops = &proc_dir_inode_operations; - dir->nlink++; - } else if (S_ISLNK(dp->mode)) { - dp->proc_iops = &proc_link_inode_operations; - } else if (S_ISREG(dp->mode)) { - BUG_ON(dp->proc_fops == NULL); - dp->proc_iops = &proc_file_inode_operations; - } else { - WARN_ON(1); - proc_free_inum(dp->low_ino); - return -EINVAL; - } - spin_lock(&proc_subdir_lock); dp->parent = dir; if (pde_subdir_insert(dir, dp) == false) { WARN(1, "proc_dir_entry '%s/%s' already registered\n", dir->name, dp->name); spin_unlock(&proc_subdir_lock); - if (S_ISDIR(dp->mode)) - dir->nlink--; proc_free_inum(dp->low_ino); return -EEXIST; } @@ -431,6 +414,7 @@ struct proc_dir_entry *proc_symlink(const char *name, ent->data = kmalloc((ent->size=strlen(dest))+1, GFP_KERNEL); if (ent->data) { strcpy((char*)ent->data,dest); + ent->proc_iops = &proc_link_inode_operations; if (proc_register(parent, ent) < 0) { kfree(ent->data); kfree(ent); @@ -456,8 +440,12 @@ struct proc_dir_entry *proc_mkdir_data(const char *name, umode_t mode, ent = __proc_create(&parent, name, S_IFDIR | mode, 2); if (ent) { ent->data = data; + ent->proc_fops = &proc_dir_operations; + ent->proc_iops = &proc_dir_inode_operations; + parent->nlink++; if (proc_register(parent, ent) < 0) { kfree(ent); + parent->nlink--; ent = NULL; } } @@ -493,6 +481,8 @@ struct proc_dir_entry *proc_create_data(const char *name, umode_t mode, return NULL; } + BUG_ON(proc_fops == NULL); + if ((mode & S_IALLUGO) == 0) mode |= S_IRUGO; pde = __proc_create(&parent, name, mode, 1); @@ -500,6 +490,7 @@ struct proc_dir_entry *proc_create_data(const char *name, umode_t mode, goto out; pde->proc_fops = proc_fops; pde->data = data; + pde->proc_iops = &proc_file_inode_operations; if (proc_register(parent, pde) < 0) goto out_free; return pde; -- cgit v1.2.3 From 9e251d02041432487d89cb340e72490c4bbc198a Mon Sep 17 00:00:00 2001 From: Al Viro Date: Fri, 9 Jan 2015 20:40:02 -0500 Subject: kill pin_put() Signed-off-by: Al Viro --- fs/fs_pin.c | 11 ----------- include/linux/fs_pin.h | 1 - kernel/acct.c | 14 ++++++++++---- 3 files changed, 10 insertions(+), 16 deletions(-) (limited to 'fs') diff --git a/fs/fs_pin.c b/fs/fs_pin.c index 9368236ca100..f173313760b8 100644 --- a/fs/fs_pin.c +++ b/fs/fs_pin.c @@ -4,19 +4,8 @@ #include "internal.h" #include "mount.h" -static void pin_free_rcu(struct rcu_head *head) -{ - kfree(container_of(head, struct fs_pin, rcu)); -} - static DEFINE_SPINLOCK(pin_lock); -void pin_put(struct fs_pin *p) -{ - if (atomic_long_dec_and_test(&p->count)) - call_rcu(&p->rcu, pin_free_rcu); -} - void pin_remove(struct fs_pin *pin) { spin_lock(&pin_lock); diff --git a/include/linux/fs_pin.h b/include/linux/fs_pin.h index f66525e72ccf..68a54b7741aa 100644 --- a/include/linux/fs_pin.h +++ b/include/linux/fs_pin.h @@ -12,6 +12,5 @@ struct fs_pin { void (*kill)(struct fs_pin *); }; -void pin_put(struct fs_pin *); void pin_remove(struct fs_pin *); void pin_insert(struct fs_pin *, struct vfsmount *); diff --git a/kernel/acct.c b/kernel/acct.c index 33738ef972f3..7bb9e659a7da 100644 --- a/kernel/acct.c +++ b/kernel/acct.c @@ -124,6 +124,12 @@ out: return acct->active; } +static void acct_put(struct bsd_acct_struct *p) +{ + if (atomic_long_dec_and_test(&p->pin.count)) + kfree_rcu(p, pin.rcu); +} + static struct bsd_acct_struct *acct_get(struct pid_namespace *ns) { struct bsd_acct_struct *res; @@ -144,7 +150,7 @@ again: mutex_lock(&res->lock); if (!res->ns) { mutex_unlock(&res->lock); - pin_put(&res->pin); + acct_put(res); goto again; } return res; @@ -175,7 +181,7 @@ static void acct_kill(struct bsd_acct_struct *acct, acct->ns = NULL; atomic_long_dec(&acct->pin.count); mutex_unlock(&acct->lock); - pin_put(&acct->pin); + acct_put(acct); } } @@ -186,7 +192,7 @@ static void acct_pin_kill(struct fs_pin *pin) mutex_lock(&acct->lock); if (!acct->ns) { mutex_unlock(&acct->lock); - pin_put(pin); + acct_put(acct); acct = NULL; } acct_kill(acct, NULL); @@ -576,7 +582,7 @@ static void slow_acct_process(struct pid_namespace *ns) if (acct) { do_acct_process(acct); mutex_unlock(&acct->lock); - pin_put(&acct->pin); + acct_put(acct); } } } -- cgit v1.2.3 From 32426f6653cbfde1ca16aff27a530ee36332f796 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 10 Jan 2015 00:07:35 -0500 Subject: pull bumping refcount into ->kill() there will be one more change of ->kill() calling conventions; this isn't final. Signed-off-by: Al Viro --- fs/fs_pin.c | 12 ------------ kernel/acct.c | 6 ++++++ 2 files changed, 6 insertions(+), 12 deletions(-) (limited to 'fs') diff --git a/fs/fs_pin.c b/fs/fs_pin.c index f173313760b8..5eb39a93a560 100644 --- a/fs/fs_pin.c +++ b/fs/fs_pin.c @@ -34,12 +34,6 @@ void mnt_pin_kill(struct mount *m) break; } pin = hlist_entry(p, struct fs_pin, m_list); - if (!atomic_long_inc_not_zero(&pin->count)) { - rcu_read_unlock(); - cpu_relax(); - continue; - } - rcu_read_unlock(); pin->kill(pin); } } @@ -56,12 +50,6 @@ void sb_pin_kill(struct super_block *sb) break; } pin = hlist_entry(p, struct fs_pin, s_list); - if (!atomic_long_inc_not_zero(&pin->count)) { - rcu_read_unlock(); - cpu_relax(); - continue; - } - rcu_read_unlock(); pin->kill(pin); } } diff --git a/kernel/acct.c b/kernel/acct.c index 7bb9e659a7da..a74f3d1a0c26 100644 --- a/kernel/acct.c +++ b/kernel/acct.c @@ -189,6 +189,12 @@ static void acct_pin_kill(struct fs_pin *pin) { struct bsd_acct_struct *acct; acct = container_of(pin, struct bsd_acct_struct, pin); + if (!atomic_long_inc_not_zero(&pin->count)) { + rcu_read_unlock(); + cpu_relax(); + return; + } + rcu_read_unlock(); mutex_lock(&acct->lock); if (!acct->ns) { mutex_unlock(&acct->lock); -- cgit v1.2.3 From 360f54796ed65939093ae373b92ebd5ef3341776 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Fri, 9 Jan 2015 15:19:03 -0800 Subject: dcache: let the dentry count go down to zero without taking d_lock We can be more aggressive about this, if we are clever and careful. This is subtle. Signed-off-by: Linus Torvalds Signed-off-by: Al Viro --- fs/dcache.c | 118 ++++++++++++++++++++++++++++++++++++++++++++++-- include/linux/lockref.h | 3 +- lib/lockref.c | 36 +++++++++++---- 3 files changed, 144 insertions(+), 13 deletions(-) (limited to 'fs') diff --git a/fs/dcache.c b/fs/dcache.c index 40432e59d72e..a14d00e9839e 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -508,7 +508,7 @@ static void __dentry_kill(struct dentry *dentry) * dentry_iput drops the locks, at which point nobody (except * transient RCU lookups) can reach this dentry. */ - BUG_ON((int)dentry->d_lockref.count > 0); + BUG_ON(dentry->d_lockref.count > 0); this_cpu_dec(nr_dentry); if (dentry->d_op && dentry->d_op->d_release) dentry->d_op->d_release(dentry); @@ -561,7 +561,7 @@ static inline struct dentry *lock_parent(struct dentry *dentry) struct dentry *parent = dentry->d_parent; if (IS_ROOT(dentry)) return NULL; - if (unlikely((int)dentry->d_lockref.count < 0)) + if (unlikely(dentry->d_lockref.count < 0)) return NULL; if (likely(spin_trylock(&parent->d_lock))) return parent; @@ -590,6 +590,110 @@ again: return parent; } +/* + * Try to do a lockless dput(), and return whether that was successful. + * + * If unsuccessful, we return false, having already taken the dentry lock. + * + * The caller needs to hold the RCU read lock, so that the dentry is + * guaranteed to stay around even if the refcount goes down to zero! + */ +static inline bool fast_dput(struct dentry *dentry) +{ + int ret; + unsigned int d_flags; + + /* + * If we have a d_op->d_delete() operation, we sould not + * let the dentry count go to zero, so use "put__or_lock". + */ + if (unlikely(dentry->d_flags & DCACHE_OP_DELETE)) + return lockref_put_or_lock(&dentry->d_lockref); + + /* + * .. otherwise, we can try to just decrement the + * lockref optimistically. + */ + ret = lockref_put_return(&dentry->d_lockref); + + /* + * If the lockref_put_return() failed due to the lock being held + * by somebody else, the fast path has failed. We will need to + * get the lock, and then check the count again. + */ + if (unlikely(ret < 0)) { + spin_lock(&dentry->d_lock); + if (dentry->d_lockref.count > 1) { + dentry->d_lockref.count--; + spin_unlock(&dentry->d_lock); + return 1; + } + return 0; + } + + /* + * If we weren't the last ref, we're done. + */ + if (ret) + return 1; + + /* + * Careful, careful. The reference count went down + * to zero, but we don't hold the dentry lock, so + * somebody else could get it again, and do another + * dput(), and we need to not race with that. + * + * However, there is a very special and common case + * where we don't care, because there is nothing to + * do: the dentry is still hashed, it does not have + * a 'delete' op, and it's referenced and already on + * the LRU list. + * + * NOTE! Since we aren't locked, these values are + * not "stable". However, it is sufficient that at + * some point after we dropped the reference the + * dentry was hashed and the flags had the proper + * value. Other dentry users may have re-gotten + * a reference to the dentry and change that, but + * our work is done - we can leave the dentry + * around with a zero refcount. + */ + smp_rmb(); + d_flags = ACCESS_ONCE(dentry->d_flags); + d_flags &= DCACHE_REFERENCED | DCACHE_LRU_LIST; + + /* Nothing to do? Dropping the reference was all we needed? */ + if (d_flags == (DCACHE_REFERENCED | DCACHE_LRU_LIST) && !d_unhashed(dentry)) + return 1; + + /* + * Not the fast normal case? Get the lock. We've already decremented + * the refcount, but we'll need to re-check the situation after + * getting the lock. + */ + spin_lock(&dentry->d_lock); + + /* + * Did somebody else grab a reference to it in the meantime, and + * we're no longer the last user after all? Alternatively, somebody + * else could have killed it and marked it dead. Either way, we + * don't need to do anything else. + */ + if (dentry->d_lockref.count) { + spin_unlock(&dentry->d_lock); + return 1; + } + + /* + * Re-get the reference we optimistically dropped. We hold the + * lock, and we just tested that it was zero, so we can just + * set it to 1. + */ + dentry->d_lockref.count = 1; + return 0; +} + + /* * This is dput * @@ -622,8 +726,14 @@ void dput(struct dentry *dentry) return; repeat: - if (lockref_put_or_lock(&dentry->d_lockref)) + rcu_read_lock(); + if (likely(fast_dput(dentry))) { + rcu_read_unlock(); return; + } + + /* Slow case: now with the dentry lock held */ + rcu_read_unlock(); /* Unreachable? Get rid of it */ if (unlikely(d_unhashed(dentry))) @@ -810,7 +920,7 @@ static void shrink_dentry_list(struct list_head *list) * We found an inuse dentry which was not removed from * the LRU because of laziness during lookup. Do not free it. */ - if ((int)dentry->d_lockref.count > 0) { + if (dentry->d_lockref.count > 0) { spin_unlock(&dentry->d_lock); if (parent) spin_unlock(&parent->d_lock); diff --git a/include/linux/lockref.h b/include/linux/lockref.h index 4bfde0e99ed5..b10b122dd099 100644 --- a/include/linux/lockref.h +++ b/include/linux/lockref.h @@ -28,12 +28,13 @@ struct lockref { #endif struct { spinlock_t lock; - unsigned int count; + int count; }; }; }; extern void lockref_get(struct lockref *); +extern int lockref_put_return(struct lockref *); extern int lockref_get_not_zero(struct lockref *); extern int lockref_get_or_lock(struct lockref *); extern int lockref_put_or_lock(struct lockref *); diff --git a/lib/lockref.c b/lib/lockref.c index d2233de9a86e..ecb9a665ec19 100644 --- a/lib/lockref.c +++ b/lib/lockref.c @@ -60,7 +60,7 @@ void lockref_get(struct lockref *lockref) EXPORT_SYMBOL(lockref_get); /** - * lockref_get_not_zero - Increments count unless the count is 0 + * lockref_get_not_zero - Increments count unless the count is 0 or dead * @lockref: pointer to lockref structure * Return: 1 if count updated successfully or 0 if count was zero */ @@ -70,7 +70,7 @@ int lockref_get_not_zero(struct lockref *lockref) CMPXCHG_LOOP( new.count++; - if (!old.count) + if (old.count <= 0) return 0; , return 1; @@ -78,7 +78,7 @@ int lockref_get_not_zero(struct lockref *lockref) spin_lock(&lockref->lock); retval = 0; - if (lockref->count) { + if (lockref->count > 0) { lockref->count++; retval = 1; } @@ -88,7 +88,7 @@ int lockref_get_not_zero(struct lockref *lockref) EXPORT_SYMBOL(lockref_get_not_zero); /** - * lockref_get_or_lock - Increments count unless the count is 0 + * lockref_get_or_lock - Increments count unless the count is 0 or dead * @lockref: pointer to lockref structure * Return: 1 if count updated successfully or 0 if count was zero * and we got the lock instead. @@ -97,14 +97,14 @@ int lockref_get_or_lock(struct lockref *lockref) { CMPXCHG_LOOP( new.count++; - if (!old.count) + if (old.count <= 0) break; , return 1; ); spin_lock(&lockref->lock); - if (!lockref->count) + if (lockref->count <= 0) return 0; lockref->count++; spin_unlock(&lockref->lock); @@ -112,6 +112,26 @@ int lockref_get_or_lock(struct lockref *lockref) } EXPORT_SYMBOL(lockref_get_or_lock); +/** + * lockref_put_return - Decrement reference count if possible + * @lockref: pointer to lockref structure + * + * Decrement the reference count and return the new value. + * If the lockref was dead or locked, return an error. + */ +int lockref_put_return(struct lockref *lockref) +{ + CMPXCHG_LOOP( + new.count--; + if (old.count <= 0) + return -1; + , + return new.count; + ); + return -1; +} +EXPORT_SYMBOL(lockref_put_return); + /** * lockref_put_or_lock - decrements count unless count <= 1 before decrement * @lockref: pointer to lockref structure @@ -158,7 +178,7 @@ int lockref_get_not_dead(struct lockref *lockref) CMPXCHG_LOOP( new.count++; - if ((int)old.count < 0) + if (old.count < 0) return 0; , return 1; @@ -166,7 +186,7 @@ int lockref_get_not_dead(struct lockref *lockref) spin_lock(&lockref->lock); retval = 0; - if ((int) lockref->count >= 0) { + if (lockref->count >= 0) { lockref->count++; retval = 1; } -- cgit v1.2.3 From fdab684d7202774bfd8762d4a656a553b787c8ec Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 11 Jan 2015 10:57:27 -0500 Subject: allow attaching fs_pin to a group not associated with some superblock Signed-off-by: Al Viro --- fs/fs_pin.c | 20 +++++++++++++------- fs/internal.h | 2 +- fs/super.c | 4 ++-- include/linux/fs_pin.h | 1 + 4 files changed, 17 insertions(+), 10 deletions(-) (limited to 'fs') diff --git a/fs/fs_pin.c b/fs/fs_pin.c index 5eb39a93a560..50ef7d2ef03c 100644 --- a/fs/fs_pin.c +++ b/fs/fs_pin.c @@ -14,14 +14,20 @@ void pin_remove(struct fs_pin *pin) spin_unlock(&pin_lock); } -void pin_insert(struct fs_pin *pin, struct vfsmount *m) +void pin_insert_group(struct fs_pin *pin, struct vfsmount *m, struct hlist_head *p) { spin_lock(&pin_lock); - hlist_add_head(&pin->s_list, &m->mnt_sb->s_pins); + if (p) + hlist_add_head(&pin->s_list, p); hlist_add_head(&pin->m_list, &real_mount(m)->mnt_pins); spin_unlock(&pin_lock); } +void pin_insert(struct fs_pin *pin, struct vfsmount *m) +{ + pin_insert_group(pin, m, &m->mnt_sb->s_pins); +} + void mnt_pin_kill(struct mount *m) { while (1) { @@ -38,18 +44,18 @@ void mnt_pin_kill(struct mount *m) } } -void sb_pin_kill(struct super_block *sb) +void group_pin_kill(struct hlist_head *p) { while (1) { - struct hlist_node *p; + struct hlist_node *q; struct fs_pin *pin; rcu_read_lock(); - p = ACCESS_ONCE(sb->s_pins.first); - if (!p) { + q = ACCESS_ONCE(p->first); + if (!q) { rcu_read_unlock(); break; } - pin = hlist_entry(p, struct fs_pin, s_list); + pin = hlist_entry(q, struct fs_pin, s_list); pin->kill(pin); } } diff --git a/fs/internal.h b/fs/internal.h index e9a61fe67575..a6efd2f09ba3 100644 --- a/fs/internal.h +++ b/fs/internal.h @@ -145,7 +145,7 @@ extern const struct file_operations pipefifo_fops; /* * fs_pin.c */ -extern void sb_pin_kill(struct super_block *sb); +extern void group_pin_kill(struct hlist_head *p); extern void mnt_pin_kill(struct mount *m); /* diff --git a/fs/super.c b/fs/super.c index eae088f6aaae..2d822459bc3d 100644 --- a/fs/super.c +++ b/fs/super.c @@ -706,9 +706,9 @@ int do_remount_sb(struct super_block *sb, int flags, void *data, int force) remount_ro = (flags & MS_RDONLY) && !(sb->s_flags & MS_RDONLY); if (remount_ro) { - if (sb->s_pins.first) { + if (!hlist_empty(&sb->s_pins)) { up_write(&sb->s_umount); - sb_pin_kill(sb); + group_pin_kill(&sb->s_pins); down_write(&sb->s_umount); if (!sb->s_root) return 0; diff --git a/include/linux/fs_pin.h b/include/linux/fs_pin.h index a2e71912e758..2be38d1464ae 100644 --- a/include/linux/fs_pin.h +++ b/include/linux/fs_pin.h @@ -7,4 +7,5 @@ struct fs_pin { }; void pin_remove(struct fs_pin *); +void pin_insert_group(struct fs_pin *, struct vfsmount *, struct hlist_head *); void pin_insert(struct fs_pin *, struct vfsmount *); -- cgit v1.2.3 From 59eda0e07f43c950d31756213b607af673e551f0 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 10 Jan 2015 17:53:21 -0500 Subject: new fs_pin killing logics Signed-off-by: Al Viro --- fs/fs_pin.c | 54 +++++++++++++++++++++++++---- include/linux/fs_pin.h | 13 ++++++- include/linux/pid_namespace.h | 4 +-- kernel/acct.c | 81 ++++++++++++++++++------------------------- 4 files changed, 96 insertions(+), 56 deletions(-) (limited to 'fs') diff --git a/fs/fs_pin.c b/fs/fs_pin.c index 50ef7d2ef03c..0c77bdc238b2 100644 --- a/fs/fs_pin.c +++ b/fs/fs_pin.c @@ -1,4 +1,5 @@ #include +#include #include #include #include "internal.h" @@ -12,6 +13,10 @@ void pin_remove(struct fs_pin *pin) hlist_del(&pin->m_list); hlist_del(&pin->s_list); spin_unlock(&pin_lock); + spin_lock_irq(&pin->wait.lock); + pin->done = 1; + wake_up_locked(&pin->wait); + spin_unlock_irq(&pin->wait.lock); } void pin_insert_group(struct fs_pin *pin, struct vfsmount *m, struct hlist_head *p) @@ -28,19 +33,58 @@ void pin_insert(struct fs_pin *pin, struct vfsmount *m) pin_insert_group(pin, m, &m->mnt_sb->s_pins); } +void pin_kill(struct fs_pin *p) +{ + wait_queue_t wait; + + if (!p) { + rcu_read_unlock(); + return; + } + init_wait(&wait); + spin_lock_irq(&p->wait.lock); + if (likely(!p->done)) { + p->done = -1; + spin_unlock_irq(&p->wait.lock); + rcu_read_unlock(); + p->kill(p); + return; + } + if (p->done > 0) { + spin_unlock_irq(&p->wait.lock); + rcu_read_unlock(); + return; + } + __add_wait_queue(&p->wait, &wait); + while (1) { + set_current_state(TASK_UNINTERRUPTIBLE); + spin_unlock_irq(&p->wait.lock); + rcu_read_unlock(); + schedule(); + rcu_read_lock(); + if (likely(list_empty(&wait.task_list))) + break; + /* OK, we know p couldn't have been freed yet */ + spin_lock_irq(&p->wait.lock); + if (p->done > 0) { + spin_unlock_irq(&p->wait.lock); + break; + } + } + rcu_read_unlock(); +} + void mnt_pin_kill(struct mount *m) { while (1) { struct hlist_node *p; - struct fs_pin *pin; rcu_read_lock(); p = ACCESS_ONCE(m->mnt_pins.first); if (!p) { rcu_read_unlock(); break; } - pin = hlist_entry(p, struct fs_pin, m_list); - pin->kill(pin); + pin_kill(hlist_entry(p, struct fs_pin, m_list)); } } @@ -48,14 +92,12 @@ void group_pin_kill(struct hlist_head *p) { while (1) { struct hlist_node *q; - struct fs_pin *pin; rcu_read_lock(); q = ACCESS_ONCE(p->first); if (!q) { rcu_read_unlock(); break; } - pin = hlist_entry(q, struct fs_pin, s_list); - pin->kill(pin); + pin_kill(hlist_entry(q, struct fs_pin, s_list)); } } diff --git a/include/linux/fs_pin.h b/include/linux/fs_pin.h index 2be38d1464ae..9dc4e0384bfb 100644 --- a/include/linux/fs_pin.h +++ b/include/linux/fs_pin.h @@ -1,11 +1,22 @@ -#include +#include struct fs_pin { + wait_queue_head_t wait; + int done; struct hlist_node s_list; struct hlist_node m_list; void (*kill)(struct fs_pin *); }; +struct vfsmount; + +static inline void init_fs_pin(struct fs_pin *p, void (*kill)(struct fs_pin *)) +{ + init_waitqueue_head(&p->wait); + p->kill = kill; +} + void pin_remove(struct fs_pin *); void pin_insert_group(struct fs_pin *, struct vfsmount *, struct hlist_head *); void pin_insert(struct fs_pin *, struct vfsmount *); +void pin_kill(struct fs_pin *); diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h index b9cf6c51b181..918b117a7cd3 100644 --- a/include/linux/pid_namespace.h +++ b/include/linux/pid_namespace.h @@ -19,7 +19,7 @@ struct pidmap { #define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1) #define PIDMAP_ENTRIES ((PID_MAX_LIMIT+BITS_PER_PAGE-1)/BITS_PER_PAGE) -struct bsd_acct_struct; +struct fs_pin; struct pid_namespace { struct kref kref; @@ -37,7 +37,7 @@ struct pid_namespace { struct dentry *proc_thread_self; #endif #ifdef CONFIG_BSD_PROCESS_ACCT - struct bsd_acct_struct *bacct; + struct fs_pin *bacct; #endif struct user_namespace *user_ns; struct work_struct proc_work; diff --git a/kernel/acct.c b/kernel/acct.c index cf6588ab517b..e6c10d1a4058 100644 --- a/kernel/acct.c +++ b/kernel/acct.c @@ -76,7 +76,6 @@ int acct_parm[3] = {4, 2, 30}; /* * External references and all of the globals. */ -static void do_acct_process(struct bsd_acct_struct *acct); struct bsd_acct_struct { struct fs_pin pin; @@ -91,6 +90,8 @@ struct bsd_acct_struct { struct completion done; }; +static void do_acct_process(struct bsd_acct_struct *acct); + /* * Check the amount of free space and suspend/resume accordingly. */ @@ -132,13 +133,18 @@ static void acct_put(struct bsd_acct_struct *p) kfree_rcu(p, rcu); } +static inline struct bsd_acct_struct *to_acct(struct fs_pin *p) +{ + return p ? container_of(p, struct bsd_acct_struct, pin) : NULL; +} + static struct bsd_acct_struct *acct_get(struct pid_namespace *ns) { struct bsd_acct_struct *res; again: smp_rmb(); rcu_read_lock(); - res = ACCESS_ONCE(ns->bacct); + res = to_acct(ACCESS_ONCE(ns->bacct)); if (!res) { rcu_read_unlock(); return NULL; @@ -150,7 +156,7 @@ again: } rcu_read_unlock(); mutex_lock(&res->lock); - if (!res->ns) { + if (res != to_acct(ACCESS_ONCE(ns->bacct))) { mutex_unlock(&res->lock); acct_put(res); goto again; @@ -158,6 +164,19 @@ again: return res; } +static void acct_pin_kill(struct fs_pin *pin) +{ + struct bsd_acct_struct *acct = to_acct(pin); + mutex_lock(&acct->lock); + do_acct_process(acct); + schedule_work(&acct->work); + wait_for_completion(&acct->done); + cmpxchg(&acct->ns->bacct, pin, NULL); + mutex_unlock(&acct->lock); + pin_remove(pin); + acct_put(acct); +} + static void close_work(struct work_struct *work) { struct bsd_acct_struct *acct = container_of(work, struct bsd_acct_struct, work); @@ -168,49 +187,13 @@ static void close_work(struct work_struct *work) complete(&acct->done); } -static void acct_kill(struct bsd_acct_struct *acct) -{ - if (acct) { - struct pid_namespace *ns = acct->ns; - do_acct_process(acct); - INIT_WORK(&acct->work, close_work); - init_completion(&acct->done); - schedule_work(&acct->work); - wait_for_completion(&acct->done); - pin_remove(&acct->pin); - cmpxchg(&ns->bacct, acct, NULL); - acct->ns = NULL; - atomic_long_dec(&acct->count); - mutex_unlock(&acct->lock); - acct_put(acct); - } -} - -static void acct_pin_kill(struct fs_pin *pin) -{ - struct bsd_acct_struct *acct; - acct = container_of(pin, struct bsd_acct_struct, pin); - if (!atomic_long_inc_not_zero(&acct->count)) { - rcu_read_unlock(); - cpu_relax(); - return; - } - rcu_read_unlock(); - mutex_lock(&acct->lock); - if (!acct->ns) { - mutex_unlock(&acct->lock); - acct_put(acct); - acct = NULL; - } - acct_kill(acct); -} - static int acct_on(struct filename *pathname) { struct file *file; struct vfsmount *mnt, *internal; struct pid_namespace *ns = task_active_pid_ns(current); - struct bsd_acct_struct *acct, *old; + struct bsd_acct_struct *acct; + struct fs_pin *old; int err; acct = kzalloc(sizeof(struct bsd_acct_struct), GFP_KERNEL); @@ -252,18 +235,20 @@ static int acct_on(struct filename *pathname) file->f_path.mnt = internal; atomic_long_set(&acct->count, 1); - acct->pin.kill = acct_pin_kill; + init_fs_pin(&acct->pin, acct_pin_kill); acct->file = file; acct->needcheck = jiffies; acct->ns = ns; mutex_init(&acct->lock); + INIT_WORK(&acct->work, close_work); + init_completion(&acct->done); mutex_lock_nested(&acct->lock, 1); /* nobody has seen it yet */ pin_insert(&acct->pin, mnt); - old = acct_get(ns); - ns->bacct = acct; - acct_kill(old); + rcu_read_lock(); + old = xchg(&ns->bacct, &acct->pin); mutex_unlock(&acct->lock); + pin_kill(old); mnt_drop_write(mnt); mntput(mnt); return 0; @@ -299,7 +284,8 @@ SYSCALL_DEFINE1(acct, const char __user *, name) mutex_unlock(&acct_on_mutex); putname(tmp); } else { - acct_kill(acct_get(task_active_pid_ns(current))); + rcu_read_lock(); + pin_kill(task_active_pid_ns(current)->bacct); } return error; @@ -307,7 +293,8 @@ SYSCALL_DEFINE1(acct, const char __user *, name) void acct_exit_ns(struct pid_namespace *ns) { - acct_kill(acct_get(ns)); + rcu_read_lock(); + pin_kill(ns->bacct); } /* -- cgit v1.2.3 From 87b95ce0964c016ede92763be9c164e49f1019e9 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 10 Jan 2015 19:01:08 -0500 Subject: switch the IO-triggering parts of umount to fs_pin Signed-off-by: Al Viro --- fs/fs_pin.c | 1 - fs/mount.h | 4 +++- fs/namespace.c | 44 +++++++++++++++++--------------------------- 3 files changed, 20 insertions(+), 29 deletions(-) (limited to 'fs') diff --git a/fs/fs_pin.c b/fs/fs_pin.c index 0c77bdc238b2..b06c98796afb 100644 --- a/fs/fs_pin.c +++ b/fs/fs_pin.c @@ -1,7 +1,6 @@ #include #include #include -#include #include "internal.h" #include "mount.h" diff --git a/fs/mount.h b/fs/mount.h index 0ad6f760ce52..6a61c2b3e385 100644 --- a/fs/mount.h +++ b/fs/mount.h @@ -2,6 +2,7 @@ #include #include #include +#include struct mnt_namespace { atomic_t count; @@ -62,7 +63,8 @@ struct mount { int mnt_group_id; /* peer group identifier */ int mnt_expiry_mark; /* true if marked for expiry */ struct hlist_head mnt_pins; - struct path mnt_ex_mountpoint; + struct fs_pin mnt_umount; + struct dentry *mnt_ex_mountpoint; }; #define MNT_NS_INTERNAL ERR_PTR(-EINVAL) /* distinct from any mnt_namespace */ diff --git a/fs/namespace.c b/fs/namespace.c index cd1e9681a0cf..4a985ff0ddfc 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -190,6 +190,14 @@ unsigned int mnt_get_count(struct mount *mnt) #endif } +static void drop_mountpoint(struct fs_pin *p) +{ + struct mount *m = container_of(p, struct mount, mnt_umount); + dput(m->mnt_ex_mountpoint); + pin_remove(p); + mntput(&m->mnt); +} + static struct mount *alloc_vfsmnt(const char *name) { struct mount *mnt = kmem_cache_zalloc(mnt_cache, GFP_KERNEL); @@ -229,6 +237,7 @@ static struct mount *alloc_vfsmnt(const char *name) #ifdef CONFIG_FSNOTIFY INIT_HLIST_HEAD(&mnt->mnt_fsnotify_marks); #endif + init_fs_pin(&mnt->mnt_umount, drop_mountpoint); } return mnt; @@ -1289,7 +1298,6 @@ static HLIST_HEAD(unmounted); /* protected by namespace_sem */ static void namespace_unlock(void) { - struct mount *mnt; struct hlist_head head = unmounted; if (likely(hlist_empty(&head))) { @@ -1299,23 +1307,11 @@ static void namespace_unlock(void) head.first->pprev = &head.first; INIT_HLIST_HEAD(&unmounted); - - /* undo decrements we'd done in umount_tree() */ - hlist_for_each_entry(mnt, &head, mnt_hash) - if (mnt->mnt_ex_mountpoint.mnt) - mntget(mnt->mnt_ex_mountpoint.mnt); - up_write(&namespace_sem); synchronize_rcu(); - while (!hlist_empty(&head)) { - mnt = hlist_entry(head.first, struct mount, mnt_hash); - hlist_del_init(&mnt->mnt_hash); - if (mnt->mnt_ex_mountpoint.mnt) - path_put(&mnt->mnt_ex_mountpoint); - mntput(&mnt->mnt); - } + group_pin_kill(&head); } static inline void namespace_lock(void) @@ -1334,7 +1330,6 @@ void umount_tree(struct mount *mnt, int how) { HLIST_HEAD(tmp_list); struct mount *p; - struct mount *last = NULL; for (p = mnt; p; p = next_mnt(p, mnt)) { hlist_del_init_rcu(&p->mnt_hash); @@ -1347,33 +1342,28 @@ void umount_tree(struct mount *mnt, int how) if (how) propagate_umount(&tmp_list); - hlist_for_each_entry(p, &tmp_list, mnt_hash) { + while (!hlist_empty(&tmp_list)) { + p = hlist_entry(tmp_list.first, struct mount, mnt_hash); + hlist_del_init_rcu(&p->mnt_hash); list_del_init(&p->mnt_expire); list_del_init(&p->mnt_list); __touch_mnt_namespace(p->mnt_ns); p->mnt_ns = NULL; if (how < 2) p->mnt.mnt_flags |= MNT_SYNC_UMOUNT; + + pin_insert_group(&p->mnt_umount, &p->mnt_parent->mnt, &unmounted); if (mnt_has_parent(p)) { hlist_del_init(&p->mnt_mp_list); put_mountpoint(p->mnt_mp); mnt_add_count(p->mnt_parent, -1); - /* move the reference to mountpoint into ->mnt_ex_mountpoint */ - p->mnt_ex_mountpoint.dentry = p->mnt_mountpoint; - p->mnt_ex_mountpoint.mnt = &p->mnt_parent->mnt; + /* old mountpoint will be dropped when we can do that */ + p->mnt_ex_mountpoint = p->mnt_mountpoint; p->mnt_mountpoint = p->mnt.mnt_root; p->mnt_parent = p; p->mnt_mp = NULL; } change_mnt_propagation(p, MS_PRIVATE); - last = p; - } - if (last) { - last->mnt_hash.next = unmounted.first; - if (unmounted.first) - unmounted.first->pprev = &last->mnt_hash.next; - unmounted.first = tmp_list.first; - unmounted.first->pprev = &unmounted.first; } } -- cgit v1.2.3 From 04ecddb73dd83d1094a904b8d03c57880998daee Mon Sep 17 00:00:00 2001 From: Jan Mrazek Date: Mon, 26 Jan 2015 14:42:31 -0500 Subject: ext4: change to use setup_timer() instead of init_timer() Signed-off-by: Jan Mrazek Signed-off-by: Theodore Ts'o --- fs/ext4/super.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 74c5f53595fb..95b388cae0ab 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -3915,9 +3915,8 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) get_random_bytes(&sbi->s_next_generation, sizeof(u32)); spin_lock_init(&sbi->s_next_gen_lock); - init_timer(&sbi->s_err_report); - sbi->s_err_report.function = print_daily_error_info; - sbi->s_err_report.data = (unsigned long) sb; + setup_timer(&sbi->s_err_report, print_daily_error_info, + (unsigned long) sb); /* Register extent status tree shrinker */ if (ext4_es_register_shrinker(sbi)) -- cgit v1.2.3 From 895d9db253a0b0b1f8a6e635fb2460d80bf72d5a Mon Sep 17 00:00:00 2001 From: Subodh Nijsure Date: Fri, 31 Oct 2014 13:50:29 -0500 Subject: UBIFS: Add xattr support for symlinks Artem: rename the __ubifs_setxattr() functions to just 'setxattr()'. Signed-off-by: Subodh Nijsure Signed-off-by: Marc Kleine-Budde Signed-off-by: Ben Shelton Acked-by: Terry Wilcox Acked-by: Gratian Crisan Signed-off-by: Artem Bityutskiy --- fs/ubifs/file.c | 4 ++++ fs/ubifs/xattr.c | 17 ++++++++++++----- 2 files changed, 16 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c index 538519ee37d9..4855abcfe256 100644 --- a/fs/ubifs/file.c +++ b/fs/ubifs/file.c @@ -1574,6 +1574,10 @@ const struct inode_operations ubifs_symlink_inode_operations = { .follow_link = ubifs_follow_link, .setattr = ubifs_setattr, .getattr = ubifs_getattr, + .setxattr = ubifs_setxattr, + .getxattr = ubifs_getxattr, + .listxattr = ubifs_listxattr, + .removexattr = ubifs_removexattr, }; const struct file_operations ubifs_file_operations = { diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c index 5e0a63b1b0d5..c3254a681a78 100644 --- a/fs/ubifs/xattr.c +++ b/fs/ubifs/xattr.c @@ -293,18 +293,16 @@ static struct inode *iget_xattr(struct ubifs_info *c, ino_t inum) return ERR_PTR(-EINVAL); } -int ubifs_setxattr(struct dentry *dentry, const char *name, - const void *value, size_t size, int flags) +static int setxattr(struct inode *host, const char *name, const void *value, + size_t size, int flags) { - struct inode *inode, *host = dentry->d_inode; + struct inode *inode; struct ubifs_info *c = host->i_sb->s_fs_info; struct qstr nm = QSTR_INIT(name, strlen(name)); struct ubifs_dent_node *xent; union ubifs_key key; int err, type; - dbg_gen("xattr '%s', host ino %lu ('%pd'), size %zd", name, - host->i_ino, dentry, size); ubifs_assert(mutex_is_locked(&host->i_mutex)); if (size > UBIFS_MAX_INO_DATA) @@ -356,6 +354,15 @@ out_free: return err; } +int ubifs_setxattr(struct dentry *dentry, const char *name, + const void *value, size_t size, int flags) +{ + dbg_gen("xattr '%s', host ino %lu ('%pd'), size %zd", + name, dentry->d_inode->i_ino, dentry, size); + + return setxattr(dentry->d_inode, name, value, size, flags); +} + ssize_t ubifs_getxattr(struct dentry *dentry, const char *name, void *buf, size_t size) { -- cgit v1.2.3 From d7f0b70d30ffb9bbe6b8a3e1035cf0b79965ef53 Mon Sep 17 00:00:00 2001 From: Subodh Nijsure Date: Fri, 31 Oct 2014 13:50:30 -0500 Subject: UBIFS: Add security.* XATTR support for the UBIFS Artem: rename static functions so that they do not use the "ubifs_" prefix - we only use this prefix for non-static functions. Artem: remove few junk white-space changes in file.c Signed-off-by: Subodh Nijsure Signed-off-by: Marc Kleine-Budde Signed-off-by: Ben Shelton Acked-by: Brad Mouring Acked-by: Terry Wilcox Acked-by: Gratian Crisan Signed-off-by: Artem Bityutskiy --- fs/ubifs/dir.c | 16 ++++++++++++ fs/ubifs/super.c | 1 + fs/ubifs/ubifs.h | 4 +++ fs/ubifs/xattr.c | 78 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 99 insertions(+) (limited to 'fs') diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c index ea41649e4ca5..f7e8f765b36f 100644 --- a/fs/ubifs/dir.c +++ b/fs/ubifs/dir.c @@ -272,6 +272,10 @@ static int ubifs_create(struct inode *dir, struct dentry *dentry, umode_t mode, goto out_budg; } + err = ubifs_init_security(dir, inode, &dentry->d_name); + if (err) + goto out_cancel; + mutex_lock(&dir_ui->ui_mutex); dir->i_size += sz_change; dir_ui->ui_size = dir->i_size; @@ -728,6 +732,10 @@ static int ubifs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) goto out_budg; } + err = ubifs_init_security(dir, inode, &dentry->d_name); + if (err) + goto out_cancel; + mutex_lock(&dir_ui->ui_mutex); insert_inode_hash(inode); inc_nlink(inode); @@ -808,6 +816,10 @@ static int ubifs_mknod(struct inode *dir, struct dentry *dentry, ui->data = dev; ui->data_len = devlen; + err = ubifs_init_security(dir, inode, &dentry->d_name); + if (err) + goto out_cancel; + mutex_lock(&dir_ui->ui_mutex); dir->i_size += sz_change; dir_ui->ui_size = dir->i_size; @@ -884,6 +896,10 @@ static int ubifs_symlink(struct inode *dir, struct dentry *dentry, ui->data_len = len; inode->i_size = ubifs_inode(inode)->ui_size = len; + err = ubifs_init_security(dir, inode, &dentry->d_name); + if (err) + goto out_cancel; + mutex_lock(&dir_ui->ui_mutex); dir->i_size += sz_change; dir_ui->ui_size = dir->i_size; diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index 106bf20629ce..e6420677cebc 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c @@ -2039,6 +2039,7 @@ static int ubifs_fill_super(struct super_block *sb, void *data, int silent) if (c->max_inode_sz > MAX_LFS_FILESIZE) sb->s_maxbytes = c->max_inode_sz = MAX_LFS_FILESIZE; sb->s_op = &ubifs_super_operations; + sb->s_xattr = ubifs_xattr_handlers; mutex_lock(&c->umount_mutex); err = mount_ubifs(c); diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h index c4fe900c67ab..bc04b9c69891 100644 --- a/fs/ubifs/ubifs.h +++ b/fs/ubifs/ubifs.h @@ -36,6 +36,7 @@ #include #include #include +#include #include "ubifs-media.h" /* Version of this UBIFS implementation */ @@ -1465,6 +1466,7 @@ extern spinlock_t ubifs_infos_lock; extern atomic_long_t ubifs_clean_zn_cnt; extern struct kmem_cache *ubifs_inode_slab; extern const struct super_operations ubifs_super_operations; +extern const struct xattr_handler *ubifs_xattr_handlers[]; extern const struct address_space_operations ubifs_file_address_operations; extern const struct file_operations ubifs_file_operations; extern const struct inode_operations ubifs_file_inode_operations; @@ -1754,6 +1756,8 @@ ssize_t ubifs_getxattr(struct dentry *dentry, const char *name, void *buf, size_t size); ssize_t ubifs_listxattr(struct dentry *dentry, char *buffer, size_t size); int ubifs_removexattr(struct dentry *dentry, const char *name); +int ubifs_init_security(struct inode *dentry, struct inode *inode, + const struct qstr *qstr); /* super.c */ struct inode *ubifs_iget(struct super_block *sb, unsigned long inum); diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c index c3254a681a78..2bdab8b11f3f 100644 --- a/fs/ubifs/xattr.c +++ b/fs/ubifs/xattr.c @@ -575,3 +575,81 @@ out_free: kfree(xent); return err; } + +static size_t security_listxattr(struct dentry *d, char *list, size_t list_size, + const char *name, size_t name_len, int flags) +{ + const int prefix_len = XATTR_SECURITY_PREFIX_LEN; + const size_t total_len = prefix_len + name_len + 1; + + if (list && total_len <= list_size) { + memcpy(list, XATTR_SECURITY_PREFIX, prefix_len); + memcpy(list + prefix_len, name, name_len); + list[prefix_len + name_len] = '\0'; + } + + return total_len; +} + +static int security_getxattr(struct dentry *d, const char *name, void *buffer, + size_t size, int flags) +{ + return ubifs_getxattr(d, name, buffer, size); +} + +static int security_setxattr(struct dentry *d, const char *name, + const void *value, size_t size, int flags, + int handler_flags) +{ + return ubifs_setxattr(d, name, value, size, flags); +} + +static const struct xattr_handler ubifs_xattr_security_handler = { + .prefix = XATTR_SECURITY_PREFIX, + .list = security_listxattr, + .get = security_getxattr, + .set = security_setxattr, +}; + +const struct xattr_handler *ubifs_xattr_handlers[] = { + &ubifs_xattr_security_handler, + NULL, +}; + +static int init_xattrs(struct inode *inode, const struct xattr *xattr_array, + void *fs_info) +{ + const struct xattr *xattr; + char *name; + int err = 0; + + for (xattr = xattr_array; xattr->name != NULL; xattr++) { + name = kmalloc(XATTR_SECURITY_PREFIX_LEN + + strlen(xattr->name) + 1, GFP_NOFS); + if (!name) { + err = -ENOMEM; + break; + } + strcpy(name, XATTR_SECURITY_PREFIX); + strcpy(name + XATTR_SECURITY_PREFIX_LEN, xattr->name); + err = setxattr(inode, name, xattr->value, xattr->value_len, 0); + kfree(name); + if (err < 0) + break; + } + + return err; +} + +int ubifs_init_security(struct inode *dentry, struct inode *inode, + const struct qstr *qstr) +{ + int err; + + mutex_lock(&inode->i_mutex); + err = security_inode_init_security(inode, dentry, qstr, + &init_xattrs, 0); + mutex_unlock(&inode->i_mutex); + + return err; +} -- cgit v1.2.3 From fee1756d80c24d5a3171cb9f76d612e512439dd2 Mon Sep 17 00:00:00 2001 From: Subodh Nijsure Date: Fri, 31 Oct 2014 13:50:31 -0500 Subject: UBIFS: add ubifs_err() to print error reason This patch adds ubifs_err() output to some error paths to tell the user what's going on. Artem: improve the messages, rename too long variable Signed-off-by: Subodh Nijsure Signed-off-by: Marc Kleine-Budde Signed-off-by: Ben Shelton Acked-by: Brad Mouring Acked-by: Terry Wilcox Acked-by: Gratian Crisan Signed-off-by: Artem Bityutskiy --- fs/ubifs/xattr.c | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c index 2bdab8b11f3f..a92be244a6fb 100644 --- a/fs/ubifs/xattr.c +++ b/fs/ubifs/xattr.c @@ -100,24 +100,30 @@ static const struct file_operations empty_fops; static int create_xattr(struct ubifs_info *c, struct inode *host, const struct qstr *nm, const void *value, int size) { - int err; + int err, names_len; struct inode *inode; struct ubifs_inode *ui, *host_ui = ubifs_inode(host); struct ubifs_budget_req req = { .new_ino = 1, .new_dent = 1, .new_ino_d = ALIGN(size, 8), .dirtied_ino = 1, .dirtied_ino_d = ALIGN(host_ui->data_len, 8) }; - if (host_ui->xattr_cnt >= MAX_XATTRS_PER_INODE) + if (host_ui->xattr_cnt >= MAX_XATTRS_PER_INODE) { + ubifs_err("inode %lu already has too many xattrs (%d), cannot create more", + host->i_ino, host_ui->xattr_cnt); return -ENOSPC; + } /* * Linux limits the maximum size of the extended attribute names list * to %XATTR_LIST_MAX. This means we should not allow creating more * extended attributes if the name list becomes larger. This limitation * is artificial for UBIFS, though. */ - if (host_ui->xattr_names + host_ui->xattr_cnt + - nm->len + 1 > XATTR_LIST_MAX) + names_len = host_ui->xattr_names + host_ui->xattr_cnt + nm->len + 1; + if (names_len > XATTR_LIST_MAX) { + ubifs_err("cannot add one more xattr name to inode %lu, total names length would become %d, max. is %d", + host->i_ino, names_len, XATTR_LIST_MAX); return -ENOSPC; + } err = ubifs_budget_space(c, &req); if (err) @@ -651,5 +657,8 @@ int ubifs_init_security(struct inode *dentry, struct inode *inode, &init_xattrs, 0); mutex_unlock(&inode->i_mutex); + if (err) + ubifs_err("cannot initialize security for inode %lu, error %d", + inode->i_ino, err); return err; } -- cgit v1.2.3 From fb4325a3d9f983160f142b919880ccbe2304bc25 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Tue, 25 Nov 2014 16:41:26 +0200 Subject: UBIFS: add a couple of extra asserts ... to catch possible memory corruptions. Reported-by: Dan Carpenter Signed-off-by: Artem Bityutskiy --- fs/ubifs/debug.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'fs') diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c index 7ed13e1e216a..4cfb3e82c56f 100644 --- a/fs/ubifs/debug.c +++ b/fs/ubifs/debug.c @@ -2032,6 +2032,8 @@ static int check_leaf(struct ubifs_info *c, struct ubifs_zbranch *zbr, long long blk_offs; struct ubifs_data_node *dn = node; + ubifs_assert(zbr->len >= UBIFS_DATA_NODE_SZ); + /* * Search the inode node this data node belongs to and insert * it to the RB-tree of inodes. @@ -2060,6 +2062,8 @@ static int check_leaf(struct ubifs_info *c, struct ubifs_zbranch *zbr, struct ubifs_dent_node *dent = node; struct fsck_inode *fscki1; + ubifs_assert(zbr->len >= UBIFS_DENT_NODE_SZ); + err = ubifs_validate_entry(c, dent); if (err) goto out_dump; -- cgit v1.2.3 From 05afcb77eb4713f46e7ebaa3cb54bc465c5d516e Mon Sep 17 00:00:00 2001 From: Al Viro Date: Fri, 23 Jan 2015 01:08:07 -0500 Subject: new helper: iov_iter_bvec() similar to iov_iter_kvec(), for ITER_BVEC ones Signed-off-by: Al Viro --- fs/splice.c | 7 ++----- include/linux/uio.h | 4 +++- mm/iov_iter.c | 17 +++++++++++++++-- mm/page_io.c | 9 ++------- 4 files changed, 22 insertions(+), 15 deletions(-) (limited to 'fs') diff --git a/fs/splice.c b/fs/splice.c index 75c6058eabf2..7c7176f2d1e7 100644 --- a/fs/splice.c +++ b/fs/splice.c @@ -1006,11 +1006,8 @@ iter_file_splice_write(struct pipe_inode_info *pipe, struct file *out, } /* ... iov_iter */ - from.type = ITER_BVEC | WRITE; - from.bvec = array; - from.nr_segs = n; - from.count = sd.total_len - left; - from.iov_offset = 0; + iov_iter_bvec(&from, ITER_BVEC | WRITE, array, n, + sd.total_len - left); /* ... and iocb */ init_sync_kiocb(&kiocb, out); diff --git a/include/linux/uio.h b/include/linux/uio.h index 1c5e453f7ea9..b447402d9737 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h @@ -88,7 +88,9 @@ size_t iov_iter_zero(size_t bytes, struct iov_iter *); unsigned long iov_iter_alignment(const struct iov_iter *i); void iov_iter_init(struct iov_iter *i, int direction, const struct iovec *iov, unsigned long nr_segs, size_t count); -void iov_iter_kvec(struct iov_iter *i, int direction, const struct kvec *iov, +void iov_iter_kvec(struct iov_iter *i, int direction, const struct kvec *kvec, + unsigned long nr_segs, size_t count); +void iov_iter_bvec(struct iov_iter *i, int direction, const struct bio_vec *bvec, unsigned long nr_segs, size_t count); ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages, size_t maxsize, unsigned maxpages, size_t *start); diff --git a/mm/iov_iter.c b/mm/iov_iter.c index a1599ca4ab0e..827732047da1 100644 --- a/mm/iov_iter.c +++ b/mm/iov_iter.c @@ -501,18 +501,31 @@ size_t iov_iter_single_seg_count(const struct iov_iter *i) EXPORT_SYMBOL(iov_iter_single_seg_count); void iov_iter_kvec(struct iov_iter *i, int direction, - const struct kvec *iov, unsigned long nr_segs, + const struct kvec *kvec, unsigned long nr_segs, size_t count) { BUG_ON(!(direction & ITER_KVEC)); i->type = direction; - i->kvec = (struct kvec *)iov; + i->kvec = kvec; i->nr_segs = nr_segs; i->iov_offset = 0; i->count = count; } EXPORT_SYMBOL(iov_iter_kvec); +void iov_iter_bvec(struct iov_iter *i, int direction, + const struct bio_vec *bvec, unsigned long nr_segs, + size_t count) +{ + BUG_ON(!(direction & ITER_BVEC)); + i->type = direction; + i->bvec = bvec; + i->nr_segs = nr_segs; + i->iov_offset = 0; + i->count = count; +} +EXPORT_SYMBOL(iov_iter_bvec); + unsigned long iov_iter_alignment(const struct iov_iter *i) { unsigned long res = 0; diff --git a/mm/page_io.c b/mm/page_io.c index 955db8b0d497..e6045804c8d8 100644 --- a/mm/page_io.c +++ b/mm/page_io.c @@ -269,14 +269,9 @@ int __swap_writepage(struct page *page, struct writeback_control *wbc, .bv_len = PAGE_SIZE, .bv_offset = 0 }; - struct iov_iter from = { - .type = ITER_BVEC | WRITE, - .count = PAGE_SIZE, - .iov_offset = 0, - .nr_segs = 1, - }; - from.bvec = &bv; /* older gcc versions are broken */ + struct iov_iter from; + iov_iter_bvec(&from, ITER_BVEC | WRITE, &bv, 1, PAGE_SIZE); init_sync_kiocb(&kiocb, swap_file); kiocb.ki_pos = page_file_offset(page); kiocb.ki_nbytes = PAGE_SIZE; -- cgit v1.2.3 From dbe4e192a234cd6133d86fffb965d0f032c12ccc Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sun, 25 Jan 2015 21:11:59 +0100 Subject: fs: add vfs_iter_{read,write} helpers Simple helpers that pass an arbitrary iov_iter to filesystems. Signed-off-by: Christoph Hellwig Signed-off-by: Al Viro --- fs/read_write.c | 46 ++++++++++++++++++++++++++++++++++++++++++++++ fs/splice.c | 16 ++-------------- include/linux/fs.h | 3 +++ 3 files changed, 51 insertions(+), 14 deletions(-) (limited to 'fs') diff --git a/fs/read_write.c b/fs/read_write.c index c0805c93b6fa..ab4f26a7d5cb 100644 --- a/fs/read_write.c +++ b/fs/read_write.c @@ -333,6 +333,52 @@ out_putf: } #endif +ssize_t vfs_iter_read(struct file *file, struct iov_iter *iter, loff_t *ppos) +{ + struct kiocb kiocb; + ssize_t ret; + + if (!file->f_op->read_iter) + return -EINVAL; + + init_sync_kiocb(&kiocb, file); + kiocb.ki_pos = *ppos; + kiocb.ki_nbytes = iov_iter_count(iter); + + iter->type |= READ; + ret = file->f_op->read_iter(&kiocb, iter); + if (ret == -EIOCBQUEUED) + ret = wait_on_sync_kiocb(&kiocb); + + if (ret > 0) + *ppos = kiocb.ki_pos; + return ret; +} +EXPORT_SYMBOL(vfs_iter_read); + +ssize_t vfs_iter_write(struct file *file, struct iov_iter *iter, loff_t *ppos) +{ + struct kiocb kiocb; + ssize_t ret; + + if (!file->f_op->write_iter) + return -EINVAL; + + init_sync_kiocb(&kiocb, file); + kiocb.ki_pos = *ppos; + kiocb.ki_nbytes = iov_iter_count(iter); + + iter->type |= WRITE; + ret = file->f_op->write_iter(&kiocb, iter); + if (ret == -EIOCBQUEUED) + ret = wait_on_sync_kiocb(&kiocb); + + if (ret > 0) + *ppos = kiocb.ki_pos; + return ret; +} +EXPORT_SYMBOL(vfs_iter_write); + /* * rw_verify_area doesn't like huge counts. We limit * them to something that fits in "int" so that others diff --git a/fs/splice.c b/fs/splice.c index 7c7176f2d1e7..7968da96bebb 100644 --- a/fs/splice.c +++ b/fs/splice.c @@ -961,7 +961,6 @@ iter_file_splice_write(struct pipe_inode_info *pipe, struct file *out, splice_from_pipe_begin(&sd); while (sd.total_len) { struct iov_iter from; - struct kiocb kiocb; size_t left; int n, idx; @@ -1005,26 +1004,15 @@ iter_file_splice_write(struct pipe_inode_info *pipe, struct file *out, left -= this_len; } - /* ... iov_iter */ iov_iter_bvec(&from, ITER_BVEC | WRITE, array, n, sd.total_len - left); - - /* ... and iocb */ - init_sync_kiocb(&kiocb, out); - kiocb.ki_pos = sd.pos; - kiocb.ki_nbytes = sd.total_len - left; - - /* now, send it */ - ret = out->f_op->write_iter(&kiocb, &from); - if (-EIOCBQUEUED == ret) - ret = wait_on_sync_kiocb(&kiocb); - + ret = vfs_iter_write(out, &from, &sd.pos); if (ret <= 0) break; sd.num_spliced += ret; sd.total_len -= ret; - *ppos = sd.pos = kiocb.ki_pos; + *ppos = sd.pos; /* dismiss the fully eaten buffers, adjust the partial one */ while (ret) { diff --git a/include/linux/fs.h b/include/linux/fs.h index 42efe13077b6..1f3c43952540 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2494,6 +2494,9 @@ extern ssize_t do_sync_write(struct file *filp, const char __user *buf, size_t l extern ssize_t new_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos); extern ssize_t new_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos); +ssize_t vfs_iter_read(struct file *file, struct iov_iter *iter, loff_t *ppos); +ssize_t vfs_iter_write(struct file *file, struct iov_iter *iter, loff_t *ppos); + /* fs/block_dev.c */ extern ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to); extern ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from); -- cgit v1.2.3 From 6ffa30d3f734d4f6b478081dfc09592021028f90 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Wed, 14 Jan 2015 13:08:57 -0500 Subject: nfs: don't call blocking operations while !TASK_RUNNING Bruce reported seeing this warning pop when mounting using v4.1: ------------[ cut here ]------------ WARNING: CPU: 1 PID: 1121 at kernel/sched/core.c:7300 __might_sleep+0xbd/0xd0() do not call blocking ops when !TASK_RUNNING; state=1 set at [] prepare_to_wait+0x2f/0x90 Modules linked in: rpcsec_gss_krb5 auth_rpcgss nfsv4 dns_resolver nfs lockd grace sunrpc fscache ip6t_rpfilter ip6t_REJECT nf_reject_ipv6 xt_conntrack ebtable_nat ebtable_broute bridge stp llc ebtable_filter ebtables ip6table_nat nf_conntrack_ipv6 nf_defrag_ipv6 nf_nat_ipv6 ip6table_mangle ip6table_security ip6table_raw ip6table_filter ip6_tables iptable_nat nf_conntrack_ipv4 nf_defrag_ipv4 nf_nat_ipv4 nf_nat nf_conntrack iptable_mangle iptable_security iptable_raw snd_hda_codec_generic snd_hda_intel snd_hda_controller snd_hda_codec snd_hwdep snd_pcm snd_timer ppdev joydev snd virtio_console virtio_balloon pcspkr serio_raw parport_pc parport pvpanic floppy soundcore i2c_piix4 virtio_blk virtio_net qxl drm_kms_helper ttm drm virtio_pci virtio_ring ata_generic virtio pata_acpi CPU: 1 PID: 1121 Comm: nfsv4.1-svc Not tainted 3.19.0-rc4+ #25 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.7.5-20140709_153950- 04/01/2014 0000000000000000 000000004e5e3f73 ffff8800b998fb48 ffffffff8186ac78 0000000000000000 ffff8800b998fba0 ffff8800b998fb88 ffffffff810ac9da ffff8800b998fb68 ffffffff81c923e7 00000000000004d9 0000000000000000 Call Trace: [] dump_stack+0x4c/0x65 [] warn_slowpath_common+0x8a/0xc0 [] warn_slowpath_fmt+0x55/0x70 [] ? prepare_to_wait+0x2f/0x90 [] ? prepare_to_wait+0x2f/0x90 [] __might_sleep+0xbd/0xd0 [] kmem_cache_alloc_trace+0x243/0x430 [] ? groups_alloc+0x3e/0x130 [] groups_alloc+0x3e/0x130 [] svcauth_unix_accept+0x16e/0x290 [sunrpc] [] svc_authenticate+0xe1/0xf0 [sunrpc] [] svc_process_common+0x244/0x6a0 [sunrpc] [] bc_svc_process+0x1c4/0x260 [sunrpc] [] nfs41_callback_svc+0x128/0x1f0 [nfsv4] [] ? wait_woken+0xc0/0xc0 [] ? nfs4_callback_svc+0x60/0x60 [nfsv4] [] kthread+0x11f/0x140 [] ? local_clock+0x15/0x30 [] ? kthread_create_on_node+0x250/0x250 [] ret_from_fork+0x7c/0xb0 [] ? kthread_create_on_node+0x250/0x250 ---[ end trace 675220a11e30f4f2 ]--- nfs41_callback_svc does most of its work while in TASK_INTERRUPTIBLE, which is just wrong. Fix that by finishing the wait immediately if we've found that the list has something on it. Also, we don't expect this kthread to accept signals, so we should be using a TASK_UNINTERRUPTIBLE sleep instead. That however, opens us up hung task warnings from the watchdog, so have the schedule_timeout wake up every 60s if there's no callback activity. Reported-by: "J. Bruce Fields" Signed-off-by: Jeff Layton Cc: stable@vger.kernel.org Signed-off-by: Trond Myklebust --- fs/nfs/callback.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c index b8fb3a4ef649..351be9205bf8 100644 --- a/fs/nfs/callback.c +++ b/fs/nfs/callback.c @@ -128,22 +128,24 @@ nfs41_callback_svc(void *vrqstp) if (try_to_freeze()) continue; - prepare_to_wait(&serv->sv_cb_waitq, &wq, TASK_INTERRUPTIBLE); + prepare_to_wait(&serv->sv_cb_waitq, &wq, TASK_UNINTERRUPTIBLE); spin_lock_bh(&serv->sv_cb_lock); if (!list_empty(&serv->sv_cb_list)) { req = list_first_entry(&serv->sv_cb_list, struct rpc_rqst, rq_bc_list); list_del(&req->rq_bc_list); spin_unlock_bh(&serv->sv_cb_lock); + finish_wait(&serv->sv_cb_waitq, &wq); dprintk("Invoking bc_svc_process()\n"); error = bc_svc_process(serv, req, rqstp); dprintk("bc_svc_process() returned w/ error code= %d\n", error); } else { spin_unlock_bh(&serv->sv_cb_lock); - schedule(); + /* schedule_timeout to game the hung task watchdog */ + schedule_timeout(60 * HZ); + finish_wait(&serv->sv_cb_waitq, &wq); } - finish_wait(&serv->sv_cb_waitq, &wq); } return 0; } -- cgit v1.2.3 From 3a7ed3fff3bb22828f7d7ba6b75c7d22ee54df38 Mon Sep 17 00:00:00 2001 From: Omar Sandoval Date: Thu, 8 Jan 2015 01:18:30 -0800 Subject: nfs: prevent truncate on active swapfile Most filesystems prevent truncation of an active swapfile by way of inode_newsize_ok, called from inode_change_ok. NFS doesn't call either from nfs_setattr, presumably because most of these checks are expected to be done server-side. However, the IS_SWAPFILE check can only be done client-side, and truncating a swapfile can't possibly be good. Signed-off-by: Omar Sandoval Signed-off-by: Trond Myklebust --- fs/nfs/inode.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 2211f6ba8736..d2398c193bda 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -507,10 +507,15 @@ nfs_setattr(struct dentry *dentry, struct iattr *attr) attr->ia_valid &= ~ATTR_MODE; if (attr->ia_valid & ATTR_SIZE) { + loff_t i_size; + BUG_ON(!S_ISREG(inode->i_mode)); - if (attr->ia_size == i_size_read(inode)) + i_size = i_size_read(inode); + if (attr->ia_size == i_size) attr->ia_valid &= ~ATTR_SIZE; + else if (attr->ia_size < i_size && IS_SWAPFILE(inode)) + return -ETXTBSY; } /* Optimization: if the end result is no change, don't RPC */ -- cgit v1.2.3 From c7c545d4a34872f4a3d710e22f21fb61f7258706 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Wed, 17 Dec 2014 02:52:26 +0300 Subject: NFS: a couple off by ones These tests are off by one because if len == sizeof(nfs_export_path) then we have truncated the name. Signed-off-by: Dan Carpenter Signed-off-by: Trond Myklebust --- fs/nfs/nfsroot.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfsroot.c b/fs/nfs/nfsroot.c index cd3c910d2d12..9bc9f04fb7f6 100644 --- a/fs/nfs/nfsroot.c +++ b/fs/nfs/nfsroot.c @@ -261,11 +261,11 @@ static int __init root_nfs_data(char *cmdline) */ len = snprintf(nfs_export_path, sizeof(nfs_export_path), tmp, utsname()->nodename); - if (len > (int)sizeof(nfs_export_path)) + if (len >= (int)sizeof(nfs_export_path)) goto out_devnametoolong; len = snprintf(nfs_root_device, sizeof(nfs_root_device), "%pI4:%s", &servaddr, nfs_export_path); - if (len > (int)sizeof(nfs_root_device)) + if (len >= (int)sizeof(nfs_root_device)) goto out_devnametoolong; retval = 0; -- cgit v1.2.3 From 15d0f5ea348b9c4e6d41df294dde38a56a39c7bf Mon Sep 17 00:00:00 2001 From: Al Viro Date: Mon, 2 Feb 2015 10:07:59 -0700 Subject: Make super_blocks and sb_lock static The only user outside of fs/super.c is gone now Signed-off-by: Al Viro Acked-by: Christoph Hellwig Signed-off-by: Jens Axboe --- fs/super.c | 4 ++-- include/linux/fs.h | 2 -- 2 files changed, 2 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/super.c b/fs/super.c index 3b4dadafdd60..05a021638b11 100644 --- a/fs/super.c +++ b/fs/super.c @@ -36,8 +36,8 @@ #include "internal.h" -LIST_HEAD(super_blocks); -DEFINE_SPINLOCK(sb_lock); +static LIST_HEAD(super_blocks); +static DEFINE_SPINLOCK(sb_lock); static char *sb_writers_name[SB_FREEZE_LEVELS] = { "sb_writers", diff --git a/include/linux/fs.h b/include/linux/fs.h index 65d02de342e1..2f717baefdf8 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1183,8 +1183,6 @@ struct mm_struct; #define UMOUNT_NOFOLLOW 0x00000008 /* Don't follow symlink on umount */ #define UMOUNT_UNUSED 0x80000000 /* Flag guaranteed to be unused */ -extern struct list_head super_blocks; -extern spinlock_t sb_lock; /* Possible states of 'frozen' field */ enum { -- cgit v1.2.3 From 2ab99ee12440e66ec1efd2a98599010471de785e Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 21 Jan 2015 19:14:02 +0100 Subject: fs: track fl_owner for leases Just like for other lock types we should allow different owners to have a read lease on a file. Currently this can't happen, but with the addition of pNFS layout leases we'll need this feature. Signed-off-by: Christoph Hellwig --- fs/locks.c | 12 +++++++----- fs/nfsd/nfs4state.c | 2 +- 2 files changed, 8 insertions(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/locks.c b/fs/locks.c index 4d0d41163a50..22ac7694cc84 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -1661,7 +1661,8 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr */ error = -EAGAIN; list_for_each_entry(fl, &ctx->flc_lease, fl_list) { - if (fl->fl_file == filp) { + if (fl->fl_file == filp && + fl->fl_owner == lease->fl_owner) { my_fl = fl; continue; } @@ -1721,7 +1722,7 @@ out: return error; } -static int generic_delete_lease(struct file *filp) +static int generic_delete_lease(struct file *filp, void *owner) { int error = -EAGAIN; struct file_lock *fl, *victim = NULL; @@ -1737,7 +1738,8 @@ static int generic_delete_lease(struct file *filp) spin_lock(&ctx->flc_lock); list_for_each_entry(fl, &ctx->flc_lease, fl_list) { - if (fl->fl_file == filp) { + if (fl->fl_file == filp && + fl->fl_owner == owner) { victim = fl; break; } @@ -1778,7 +1780,7 @@ int generic_setlease(struct file *filp, long arg, struct file_lock **flp, switch (arg) { case F_UNLCK: - return generic_delete_lease(filp); + return generic_delete_lease(filp, *priv); case F_RDLCK: case F_WRLCK: if (!(*flp)->fl_lmops->lm_break) { @@ -1857,7 +1859,7 @@ static int do_fcntl_add_lease(unsigned int fd, struct file *filp, long arg) int fcntl_setlease(unsigned int fd, struct file *filp, long arg) { if (arg == F_UNLCK) - return vfs_setlease(filp, F_UNLCK, NULL, NULL); + return vfs_setlease(filp, F_UNLCK, NULL, (void **)&filp); return do_fcntl_add_lease(fd, filp, arg); } diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 370a53a5da13..e6b354a0d89e 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -683,7 +683,7 @@ static void nfs4_put_deleg_lease(struct nfs4_file *fp) spin_unlock(&fp->fi_lock); if (filp) { - vfs_setlease(filp, F_UNLCK, NULL, NULL); + vfs_setlease(filp, F_UNLCK, NULL, (void **)&fp); fput(filp); } } -- cgit v1.2.3 From 11afe9f76e121e960445deee5b7f26f0787a1990 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 21 Jan 2015 19:17:03 +0100 Subject: fs: add FL_LAYOUT lease type This (ab-)uses the file locking code to allow filesystems to recall outstanding pNFS layouts on a file. This new lease type is similar but not quite the same as FL_DELEG. A FL_LAYOUT lease can always be granted, an a per-filesystem lock (XFS iolock for the initial implementation) ensures not FL_LAYOUT leases granted when we would need to recall them. Also included are changes that allow multiple outstanding read leases of different types on the same file as long as they have a differnt owner. This wasn't a problem until now as nfsd never set FL_LEASE leases, and no one else used FL_DELEG leases, but given that nfsd will also issues FL_LAYOUT leases we will have to handle it now. Signed-off-by: Christoph Hellwig --- fs/locks.c | 14 ++++++++++---- include/linux/fs.h | 16 ++++++++++++++++ 2 files changed, 26 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/locks.c b/fs/locks.c index 22ac7694cc84..4753218f308e 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -137,7 +137,7 @@ #define IS_POSIX(fl) (fl->fl_flags & FL_POSIX) #define IS_FLOCK(fl) (fl->fl_flags & FL_FLOCK) -#define IS_LEASE(fl) (fl->fl_flags & (FL_LEASE|FL_DELEG)) +#define IS_LEASE(fl) (fl->fl_flags & (FL_LEASE|FL_DELEG|FL_LAYOUT)) #define IS_OFDLCK(fl) (fl->fl_flags & FL_OFDLCK) static bool lease_breaking(struct file_lock *fl) @@ -1371,6 +1371,8 @@ static void time_out_leases(struct inode *inode, struct list_head *dispose) static bool leases_conflict(struct file_lock *lease, struct file_lock *breaker) { + if ((breaker->fl_flags & FL_LAYOUT) != (lease->fl_flags & FL_LAYOUT)) + return false; if ((breaker->fl_flags & FL_DELEG) && (lease->fl_flags & FL_LEASE)) return false; return locks_conflict(breaker, lease); @@ -1594,11 +1596,14 @@ int fcntl_getlease(struct file *filp) * conflict with the lease we're trying to set. */ static int -check_conflicting_open(const struct dentry *dentry, const long arg) +check_conflicting_open(const struct dentry *dentry, const long arg, int flags) { int ret = 0; struct inode *inode = dentry->d_inode; + if (flags & FL_LAYOUT) + return 0; + if ((arg == F_RDLCK) && (atomic_read(&inode->i_writecount) > 0)) return -EAGAIN; @@ -1647,7 +1652,7 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr spin_lock(&ctx->flc_lock); time_out_leases(inode, &dispose); - error = check_conflicting_open(dentry, arg); + error = check_conflicting_open(dentry, arg, lease->fl_flags); if (error) goto out; @@ -1703,7 +1708,7 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr * precedes these checks. */ smp_mb(); - error = check_conflicting_open(dentry, arg); + error = check_conflicting_open(dentry, arg, lease->fl_flags); if (error) { locks_unlink_lock_ctx(lease, &ctx->flc_lease_cnt); goto out; @@ -1787,6 +1792,7 @@ int generic_setlease(struct file *filp, long arg, struct file_lock **flp, WARN_ON_ONCE(1); return -ENOLCK; } + return generic_add_lease(filp, arg, flp, priv); default: return -EINVAL; diff --git a/include/linux/fs.h b/include/linux/fs.h index ddd2fa7cefd3..84740145f835 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -875,6 +875,7 @@ static inline struct file *get_file(struct file *f) #define FL_DOWNGRADE_PENDING 256 /* Lease is being downgraded */ #define FL_UNLOCK_PENDING 512 /* Lease is being broken */ #define FL_OFDLCK 1024 /* lock is "owned" by struct file */ +#define FL_LAYOUT 2048 /* outstanding pNFS layout */ /* * Special return value from posix_lock_file() and vfs_lock_file() for @@ -2037,6 +2038,16 @@ static inline int break_deleg_wait(struct inode **delegated_inode) return ret; } +static inline int break_layout(struct inode *inode, bool wait) +{ + smp_mb(); + if (inode->i_flctx && !list_empty_careful(&inode->i_flctx->flc_lease)) + return __break_lease(inode, + wait ? O_WRONLY : O_WRONLY | O_NONBLOCK, + FL_LAYOUT); + return 0; +} + #else /* !CONFIG_FILE_LOCKING */ static inline int locks_mandatory_locked(struct file *file) { @@ -2092,6 +2103,11 @@ static inline int break_deleg_wait(struct inode **delegated_inode) return 0; } +static inline int break_layout(struct inode *inode, bool wait) +{ + return 0; +} + #endif /* CONFIG_FILE_LOCKING */ /* fs/open.c */ -- cgit v1.2.3 From 4d94c2ef2008a07fb1467e33da156de6fba9aad1 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 14 Aug 2014 08:41:48 +0200 Subject: nfsd: move nfsd_fh_match to nfsfh.h The pnfs code will need it too. Also remove the nfsd_ prefix to match the other filehandle helpers in that file. Signed-off-by: Christoph Hellwig --- fs/nfsd/nfs4state.c | 12 ++---------- fs/nfsd/nfsfh.h | 9 +++++++++ 2 files changed, 11 insertions(+), 10 deletions(-) (limited to 'fs') diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index e6b354a0d89e..eb0336e526d2 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -398,14 +398,6 @@ static unsigned int file_hashval(struct knfsd_fh *fh) return nfsd_fh_hashval(fh) & (FILE_HASH_SIZE - 1); } -static bool nfsd_fh_match(struct knfsd_fh *fh1, struct knfsd_fh *fh2) -{ - return fh1->fh_size == fh2->fh_size && - !memcmp(fh1->fh_base.fh_pad, - fh2->fh_base.fh_pad, - fh1->fh_size); -} - static struct hlist_head file_hashtbl[FILE_HASH_SIZE]; static void @@ -3295,7 +3287,7 @@ find_file_locked(struct knfsd_fh *fh, unsigned int hashval) struct nfs4_file *fp; hlist_for_each_entry_rcu(fp, &file_hashtbl[hashval], fi_hash) { - if (nfsd_fh_match(&fp->fi_fhandle, fh)) { + if (fh_match(&fp->fi_fhandle, fh)) { if (atomic_inc_not_zero(&fp->fi_ref)) return fp; } @@ -4290,7 +4282,7 @@ laundromat_main(struct work_struct *laundry) static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_ol_stateid *stp) { - if (!nfsd_fh_match(&fhp->fh_handle, &stp->st_stid.sc_file->fi_fhandle)) + if (!fh_match(&fhp->fh_handle, &stp->st_stid.sc_file->fi_fhandle)) return nfserr_bad_stateid; return nfs_ok; } diff --git a/fs/nfsd/nfsfh.h b/fs/nfsd/nfsfh.h index 08236d70c667..e24d95436db3 100644 --- a/fs/nfsd/nfsfh.h +++ b/fs/nfsd/nfsfh.h @@ -187,6 +187,15 @@ fh_init(struct svc_fh *fhp, int maxsize) return fhp; } +static inline bool fh_match(struct knfsd_fh *fh1, struct knfsd_fh *fh2) +{ + if (fh1->fh_size != fh2->fh_size) + return false; + if (memcmp(fh1->fh_base.fh_pad, fh2->fh_base.fh_pad, fh1->fh_size) != 0) + return false; + return true; +} + #ifdef CONFIG_NFSD_V3 /* * The wcc data stored in current_fh should be cleared -- cgit v1.2.3 From 9558f2500a2028ffc05cfd8fceaa0fe0a0a3804e Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 13 Aug 2014 20:56:13 +0200 Subject: nfsd: add fh_fsid_match helper Add a helper to check that the fsid parts of two file handles match. Signed-off-by: Christoph Hellwig --- fs/nfsd/nfsfh.h | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'fs') diff --git a/fs/nfsd/nfsfh.h b/fs/nfsd/nfsfh.h index e24d95436db3..84cae2079d21 100644 --- a/fs/nfsd/nfsfh.h +++ b/fs/nfsd/nfsfh.h @@ -196,6 +196,15 @@ static inline bool fh_match(struct knfsd_fh *fh1, struct knfsd_fh *fh2) return true; } +static inline bool fh_fsid_match(struct knfsd_fh *fh1, struct knfsd_fh *fh2) +{ + if (fh1->fh_fsid_type != fh2->fh_fsid_type) + return false; + if (memcmp(fh1->fh_fsid, fh2->fh_fsid, key_len(fh1->fh_fsid_type) != 0)) + return false; + return true; +} + #ifdef CONFIG_NFSD_V3 /* * The wcc data stored in current_fh should be cleared -- cgit v1.2.3 From cd61c522318f2c30ce731bfdb14e7c34203e3d7c Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 14 Aug 2014 08:44:57 +0200 Subject: nfsd: make lookup/alloc/unhash_stid available outside nfs4state.c Signed-off-by: Christoph Hellwig --- fs/nfsd/nfs4state.c | 8 ++++---- fs/nfsd/state.h | 6 ++++++ 2 files changed, 10 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index eb0336e526d2..75faacb03e8e 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -476,7 +476,7 @@ static void nfs4_file_put_access(struct nfs4_file *fp, u32 access) __nfs4_file_put_access(fp, O_RDONLY); } -static struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, +struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab) { struct nfs4_stid *stid; @@ -680,7 +680,7 @@ static void nfs4_put_deleg_lease(struct nfs4_file *fp) } } -static void unhash_stid(struct nfs4_stid *s) +void nfs4_unhash_stid(struct nfs4_stid *s) { s->sc_type = 0; } @@ -988,7 +988,7 @@ static void unhash_lock_stateid(struct nfs4_ol_stateid *stp) list_del_init(&stp->st_locks); unhash_ol_stateid(stp); - unhash_stid(&stp->st_stid); + nfs4_unhash_stid(&stp->st_stid); } static void release_lock_stateid(struct nfs4_ol_stateid *stp) @@ -4433,7 +4433,7 @@ out_unlock: return status; } -static __be32 +__be32 nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid, unsigned char typemask, struct nfs4_stid **s, struct nfsd_net *nn) diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h index dab6553ceea1..55a3ece5fe06 100644 --- a/fs/nfsd/state.h +++ b/fs/nfsd/state.h @@ -545,6 +545,12 @@ struct nfsd_net; extern __be32 nfs4_preprocess_stateid_op(struct net *net, struct nfsd4_compound_state *cstate, stateid_t *stateid, int flags, struct file **filp); +__be32 nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate, + stateid_t *stateid, unsigned char typemask, + struct nfs4_stid **s, struct nfsd_net *nn); +struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, + struct kmem_cache *slab); +void nfs4_unhash_stid(struct nfs4_stid *s); void nfs4_put_stid(struct nfs4_stid *s); void nfs4_remove_reclaim_record(struct nfs4_client_reclaim *, struct nfsd_net *); extern void nfs4_release_reclaim(struct nfsd_net *); -- cgit v1.2.3 From e6ba76e1944613f16dddcba4b5836954ed3981f3 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 14 Aug 2014 08:50:16 +0200 Subject: nfsd: make find/get/put file available outside nfs4state.c Signed-off-by: Christoph Hellwig --- fs/nfsd/nfs4state.c | 10 ++-------- fs/nfsd/state.h | 7 +++++++ 2 files changed, 9 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 75faacb03e8e..f5fc5d72c362 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -272,7 +272,7 @@ static void nfsd4_free_file_rcu(struct rcu_head *rcu) kmem_cache_free(file_slab, fp); } -static inline void +void put_nfs4_file(struct nfs4_file *fi) { might_lock(&state_lock); @@ -285,12 +285,6 @@ put_nfs4_file(struct nfs4_file *fi) } } -static inline void -get_nfs4_file(struct nfs4_file *fi) -{ - atomic_inc(&fi->fi_ref); -} - static struct file * __nfs4_get_fd(struct nfs4_file *f, int oflag) { @@ -3295,7 +3289,7 @@ find_file_locked(struct knfsd_fh *fh, unsigned int hashval) return NULL; } -static struct nfs4_file * +struct nfs4_file * find_file(struct knfsd_fh *fh) { struct nfs4_file *fp; diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h index 55a3ece5fe06..8bc961e192f2 100644 --- a/fs/nfsd/state.h +++ b/fs/nfsd/state.h @@ -573,6 +573,13 @@ extern struct nfs4_client_reclaim *nfs4_client_to_reclaim(const char *name, struct nfsd_net *nn); extern bool nfs4_has_reclaimed_state(const char *name, struct nfsd_net *nn); +struct nfs4_file *find_file(struct knfsd_fh *fh); +void put_nfs4_file(struct nfs4_file *fi); +static inline void get_nfs4_file(struct nfs4_file *fi) +{ + atomic_inc(&fi->fi_ref); +} + /* grace period management */ void nfsd4_end_grace(struct nfsd_net *nn); -- cgit v1.2.3 From 4d227fca1b32f95f1246894ebef879efccb2ec15 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sun, 17 Aug 2014 07:40:00 -0500 Subject: nfsd: make find_any_file available outside nfs4state.c Signed-off-by: Christoph Hellwig --- fs/nfsd/nfs4state.c | 2 +- fs/nfsd/state.h | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index f5fc5d72c362..eefd29ec43f2 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -342,7 +342,7 @@ find_readable_file(struct nfs4_file *f) return ret; } -static struct file * +struct file * find_any_file(struct nfs4_file *f) { struct file *ret; diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h index 8bc961e192f2..38ebb1268b59 100644 --- a/fs/nfsd/state.h +++ b/fs/nfsd/state.h @@ -579,6 +579,7 @@ static inline void get_nfs4_file(struct nfs4_file *fi) { atomic_inc(&fi->fi_ref); } +struct file *find_any_file(struct nfs4_file *f); /* grace period management */ void nfsd4_end_grace(struct nfsd_net *nn); -- cgit v1.2.3 From 9cf514ccfacb301f3b1b4509a8ce25dffad55880 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 5 May 2014 13:11:59 +0200 Subject: nfsd: implement pNFS operations Add support for the GETDEVICEINFO, LAYOUTGET, LAYOUTCOMMIT and LAYOUTRETURN NFSv4.1 operations, as well as backing code to manage outstanding layouts and devices. Layout management is very straight forward, with a nfs4_layout_stateid structure that extends nfs4_stid to manage layout stateids as the top-level structure. It is linked into the nfs4_file and nfs4_client structures like the other stateids, and contains a linked list of layouts that hang of the stateid. The actual layout operations are implemented in layout drivers that are not part of this commit, but will be added later. The worst part of this commit is the management of the pNFS device IDs, which suffers from a specification that is not sanely implementable due to the fact that the device-IDs are global and not bound to an export, and have a small enough size so that we can't store the fsid portion of a file handle, and must never be reused. As we still do need perform all export authentication and validation checks on a device ID passed to GETDEVICEINFO we are caught between a rock and a hard place. To work around this issue we add a new hash that maps from a 64-bit integer to a fsid so that we can look up the export to authenticate against it, a 32-bit integer as a generation that we can bump when changing the device, and a currently unused 32-bit integer that could be used in the future to handle more than a single device per export. Entries in this hash table are never deleted as we can't reuse the ids anyway, and would have a severe lifetime problem anyway as Linux export structures are temporary structures that can go away under load. Parts of the XDR data, structures and marshaling/unmarshaling code, as well as many concepts are derived from the old pNFS server implementation from Andy Adamson, Benny Halevy, Dean Hildebrand, Marc Eshel, Fred Isaman, Mike Sager, Ricardo Labiaga and many others. Signed-off-by: Christoph Hellwig --- fs/nfsd/Kconfig | 10 + fs/nfsd/Makefile | 1 + fs/nfsd/export.c | 8 + fs/nfsd/export.h | 2 + fs/nfsd/nfs4layouts.c | 487 +++++++++++++++++++++++++++++++++++++++ fs/nfsd/nfs4proc.c | 302 ++++++++++++++++++++++++ fs/nfsd/nfs4state.c | 16 +- fs/nfsd/nfs4xdr.c | 312 +++++++++++++++++++++++++ fs/nfsd/nfsctl.c | 9 +- fs/nfsd/nfsd.h | 16 +- fs/nfsd/pnfs.h | 80 +++++++ fs/nfsd/state.h | 21 ++ fs/nfsd/xdr4.h | 59 +++++ include/linux/nfs4.h | 1 + include/uapi/linux/nfsd/debug.h | 1 + include/uapi/linux/nfsd/export.h | 4 +- 16 files changed, 1324 insertions(+), 5 deletions(-) create mode 100644 fs/nfsd/nfs4layouts.c create mode 100644 fs/nfsd/pnfs.h (limited to 'fs') diff --git a/fs/nfsd/Kconfig b/fs/nfsd/Kconfig index 73395156bdb4..683bf718aead 100644 --- a/fs/nfsd/Kconfig +++ b/fs/nfsd/Kconfig @@ -82,6 +82,16 @@ config NFSD_V4 If unsure, say N. +config NFSD_PNFS + bool "NFSv4.1 server support for Parallel NFS (pNFS)" + depends on NFSD_V4 + help + This option enables support for the parallel NFS features of the + minor version 1 of the NFSv4 protocol (RFC5661) in the kernel's NFS + server. + + If unsure, say N. + config NFSD_V4_SECURITY_LABEL bool "Provide Security Label support for NFSv4 server" depends on NFSD_V4 && SECURITY diff --git a/fs/nfsd/Makefile b/fs/nfsd/Makefile index af32ef06b4fe..5806270a8567 100644 --- a/fs/nfsd/Makefile +++ b/fs/nfsd/Makefile @@ -12,3 +12,4 @@ nfsd-$(CONFIG_NFSD_V3) += nfs3proc.o nfs3xdr.o nfsd-$(CONFIG_NFSD_V3_ACL) += nfs3acl.o nfsd-$(CONFIG_NFSD_V4) += nfs4proc.o nfs4xdr.o nfs4state.o nfs4idmap.o \ nfs4acl.o nfs4callback.o nfs4recover.o +nfsd-$(CONFIG_NFSD_PNFS) += nfs4layouts.o diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c index 30a739d896ff..c3e3b6e55ae2 100644 --- a/fs/nfsd/export.c +++ b/fs/nfsd/export.c @@ -20,6 +20,7 @@ #include "nfsd.h" #include "nfsfh.h" #include "netns.h" +#include "pnfs.h" #define NFSDDBG_FACILITY NFSDDBG_EXPORT @@ -545,6 +546,7 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen) exp.ex_client = dom; exp.cd = cd; + exp.ex_devid_map = NULL; /* expiry */ err = -EINVAL; @@ -621,6 +623,8 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen) if (!gid_valid(exp.ex_anon_gid)) goto out4; err = 0; + + nfsd4_setup_layout_type(&exp); } expp = svc_export_lookup(&exp); @@ -703,6 +707,7 @@ static void svc_export_init(struct cache_head *cnew, struct cache_head *citem) new->ex_fslocs.locations = NULL; new->ex_fslocs.locations_count = 0; new->ex_fslocs.migrated = 0; + new->ex_layout_type = 0; new->ex_uuid = NULL; new->cd = item->cd; } @@ -717,6 +722,8 @@ static void export_update(struct cache_head *cnew, struct cache_head *citem) new->ex_anon_uid = item->ex_anon_uid; new->ex_anon_gid = item->ex_anon_gid; new->ex_fsid = item->ex_fsid; + new->ex_devid_map = item->ex_devid_map; + item->ex_devid_map = NULL; new->ex_uuid = item->ex_uuid; item->ex_uuid = NULL; new->ex_fslocs.locations = item->ex_fslocs.locations; @@ -725,6 +732,7 @@ static void export_update(struct cache_head *cnew, struct cache_head *citem) item->ex_fslocs.locations_count = 0; new->ex_fslocs.migrated = item->ex_fslocs.migrated; item->ex_fslocs.migrated = 0; + new->ex_layout_type = item->ex_layout_type; new->ex_nflavors = item->ex_nflavors; for (i = 0; i < MAX_SECINFO_LIST; i++) { new->ex_flavors[i] = item->ex_flavors[i]; diff --git a/fs/nfsd/export.h b/fs/nfsd/export.h index 04dc8c167b0c..1f52bfcc436f 100644 --- a/fs/nfsd/export.h +++ b/fs/nfsd/export.h @@ -56,6 +56,8 @@ struct svc_export { struct nfsd4_fs_locations ex_fslocs; uint32_t ex_nflavors; struct exp_flavor_info ex_flavors[MAX_SECINFO_LIST]; + enum pnfs_layouttype ex_layout_type; + struct nfsd4_deviceid_map *ex_devid_map; struct cache_detail *cd; }; diff --git a/fs/nfsd/nfs4layouts.c b/fs/nfsd/nfs4layouts.c new file mode 100644 index 000000000000..8273270418b1 --- /dev/null +++ b/fs/nfsd/nfs4layouts.c @@ -0,0 +1,487 @@ +/* + * Copyright (c) 2014 Christoph Hellwig. + */ +#include +#include + +#include "pnfs.h" +#include "netns.h" + +#define NFSDDBG_FACILITY NFSDDBG_PNFS + +struct nfs4_layout { + struct list_head lo_perstate; + struct nfs4_layout_stateid *lo_state; + struct nfsd4_layout_seg lo_seg; +}; + +static struct kmem_cache *nfs4_layout_cache; +static struct kmem_cache *nfs4_layout_stateid_cache; + +const struct nfsd4_layout_ops *nfsd4_layout_ops[LAYOUT_TYPE_MAX] = { +}; + +/* pNFS device ID to export fsid mapping */ +#define DEVID_HASH_BITS 8 +#define DEVID_HASH_SIZE (1 << DEVID_HASH_BITS) +#define DEVID_HASH_MASK (DEVID_HASH_SIZE - 1) +static u64 nfsd_devid_seq = 1; +static struct list_head nfsd_devid_hash[DEVID_HASH_SIZE]; +static DEFINE_SPINLOCK(nfsd_devid_lock); + +static inline u32 devid_hashfn(u64 idx) +{ + return jhash_2words(idx, idx >> 32, 0) & DEVID_HASH_MASK; +} + +static void +nfsd4_alloc_devid_map(const struct svc_fh *fhp) +{ + const struct knfsd_fh *fh = &fhp->fh_handle; + size_t fsid_len = key_len(fh->fh_fsid_type); + struct nfsd4_deviceid_map *map, *old; + int i; + + map = kzalloc(sizeof(*map) + fsid_len, GFP_KERNEL); + if (!map) + return; + + map->fsid_type = fh->fh_fsid_type; + memcpy(&map->fsid, fh->fh_fsid, fsid_len); + + spin_lock(&nfsd_devid_lock); + if (fhp->fh_export->ex_devid_map) + goto out_unlock; + + for (i = 0; i < DEVID_HASH_SIZE; i++) { + list_for_each_entry(old, &nfsd_devid_hash[i], hash) { + if (old->fsid_type != fh->fh_fsid_type) + continue; + if (memcmp(old->fsid, fh->fh_fsid, + key_len(old->fsid_type))) + continue; + + fhp->fh_export->ex_devid_map = old; + goto out_unlock; + } + } + + map->idx = nfsd_devid_seq++; + list_add_tail_rcu(&map->hash, &nfsd_devid_hash[devid_hashfn(map->idx)]); + fhp->fh_export->ex_devid_map = map; + map = NULL; + +out_unlock: + spin_unlock(&nfsd_devid_lock); + kfree(map); +} + +struct nfsd4_deviceid_map * +nfsd4_find_devid_map(int idx) +{ + struct nfsd4_deviceid_map *map, *ret = NULL; + + rcu_read_lock(); + list_for_each_entry_rcu(map, &nfsd_devid_hash[devid_hashfn(idx)], hash) + if (map->idx == idx) + ret = map; + rcu_read_unlock(); + + return ret; +} + +int +nfsd4_set_deviceid(struct nfsd4_deviceid *id, const struct svc_fh *fhp, + u32 device_generation) +{ + if (!fhp->fh_export->ex_devid_map) { + nfsd4_alloc_devid_map(fhp); + if (!fhp->fh_export->ex_devid_map) + return -ENOMEM; + } + + id->fsid_idx = fhp->fh_export->ex_devid_map->idx; + id->generation = device_generation; + id->pad = 0; + return 0; +} + +void nfsd4_setup_layout_type(struct svc_export *exp) +{ + if (exp->ex_flags & NFSEXP_NOPNFS) + return; +} + +static void +nfsd4_free_layout_stateid(struct nfs4_stid *stid) +{ + struct nfs4_layout_stateid *ls = layoutstateid(stid); + struct nfs4_client *clp = ls->ls_stid.sc_client; + struct nfs4_file *fp = ls->ls_stid.sc_file; + + spin_lock(&clp->cl_lock); + list_del_init(&ls->ls_perclnt); + spin_unlock(&clp->cl_lock); + + spin_lock(&fp->fi_lock); + list_del_init(&ls->ls_perfile); + spin_unlock(&fp->fi_lock); + + kmem_cache_free(nfs4_layout_stateid_cache, ls); +} + +static struct nfs4_layout_stateid * +nfsd4_alloc_layout_stateid(struct nfsd4_compound_state *cstate, + struct nfs4_stid *parent, u32 layout_type) +{ + struct nfs4_client *clp = cstate->clp; + struct nfs4_file *fp = parent->sc_file; + struct nfs4_layout_stateid *ls; + struct nfs4_stid *stp; + + stp = nfs4_alloc_stid(cstate->clp, nfs4_layout_stateid_cache); + if (!stp) + return NULL; + stp->sc_free = nfsd4_free_layout_stateid; + get_nfs4_file(fp); + stp->sc_file = fp; + + ls = layoutstateid(stp); + INIT_LIST_HEAD(&ls->ls_perclnt); + INIT_LIST_HEAD(&ls->ls_perfile); + spin_lock_init(&ls->ls_lock); + INIT_LIST_HEAD(&ls->ls_layouts); + ls->ls_layout_type = layout_type; + + spin_lock(&clp->cl_lock); + stp->sc_type = NFS4_LAYOUT_STID; + list_add(&ls->ls_perclnt, &clp->cl_lo_states); + spin_unlock(&clp->cl_lock); + + spin_lock(&fp->fi_lock); + list_add(&ls->ls_perfile, &fp->fi_lo_states); + spin_unlock(&fp->fi_lock); + + return ls; +} + +__be32 +nfsd4_preprocess_layout_stateid(struct svc_rqst *rqstp, + struct nfsd4_compound_state *cstate, stateid_t *stateid, + bool create, u32 layout_type, struct nfs4_layout_stateid **lsp) +{ + struct nfs4_layout_stateid *ls; + struct nfs4_stid *stid; + unsigned char typemask = NFS4_LAYOUT_STID; + __be32 status; + + if (create) + typemask |= (NFS4_OPEN_STID | NFS4_LOCK_STID | NFS4_DELEG_STID); + + status = nfsd4_lookup_stateid(cstate, stateid, typemask, &stid, + net_generic(SVC_NET(rqstp), nfsd_net_id)); + if (status) + goto out; + + if (!fh_match(&cstate->current_fh.fh_handle, + &stid->sc_file->fi_fhandle)) { + status = nfserr_bad_stateid; + goto out_put_stid; + } + + if (stid->sc_type != NFS4_LAYOUT_STID) { + ls = nfsd4_alloc_layout_stateid(cstate, stid, layout_type); + nfs4_put_stid(stid); + + status = nfserr_jukebox; + if (!ls) + goto out; + } else { + ls = container_of(stid, struct nfs4_layout_stateid, ls_stid); + + status = nfserr_bad_stateid; + if (stateid->si_generation > stid->sc_stateid.si_generation) + goto out_put_stid; + if (layout_type != ls->ls_layout_type) + goto out_put_stid; + } + + *lsp = ls; + return 0; + +out_put_stid: + nfs4_put_stid(stid); +out: + return status; +} + +static inline u64 +layout_end(struct nfsd4_layout_seg *seg) +{ + u64 end = seg->offset + seg->length; + return end >= seg->offset ? end : NFS4_MAX_UINT64; +} + +static void +layout_update_len(struct nfsd4_layout_seg *lo, u64 end) +{ + if (end == NFS4_MAX_UINT64) + lo->length = NFS4_MAX_UINT64; + else + lo->length = end - lo->offset; +} + +static bool +layouts_overlapping(struct nfs4_layout *lo, struct nfsd4_layout_seg *s) +{ + if (s->iomode != IOMODE_ANY && s->iomode != lo->lo_seg.iomode) + return false; + if (layout_end(&lo->lo_seg) <= s->offset) + return false; + if (layout_end(s) <= lo->lo_seg.offset) + return false; + return true; +} + +static bool +layouts_try_merge(struct nfsd4_layout_seg *lo, struct nfsd4_layout_seg *new) +{ + if (lo->iomode != new->iomode) + return false; + if (layout_end(new) < lo->offset) + return false; + if (layout_end(lo) < new->offset) + return false; + + lo->offset = min(lo->offset, new->offset); + layout_update_len(lo, max(layout_end(lo), layout_end(new))); + return true; +} + +__be32 +nfsd4_insert_layout(struct nfsd4_layoutget *lgp, struct nfs4_layout_stateid *ls) +{ + struct nfsd4_layout_seg *seg = &lgp->lg_seg; + struct nfs4_layout *lp, *new = NULL; + + spin_lock(&ls->ls_lock); + list_for_each_entry(lp, &ls->ls_layouts, lo_perstate) { + if (layouts_try_merge(&lp->lo_seg, seg)) + goto done; + } + spin_unlock(&ls->ls_lock); + + new = kmem_cache_alloc(nfs4_layout_cache, GFP_KERNEL); + if (!new) + return nfserr_jukebox; + memcpy(&new->lo_seg, seg, sizeof(lp->lo_seg)); + new->lo_state = ls; + + spin_lock(&ls->ls_lock); + list_for_each_entry(lp, &ls->ls_layouts, lo_perstate) { + if (layouts_try_merge(&lp->lo_seg, seg)) + goto done; + } + + atomic_inc(&ls->ls_stid.sc_count); + list_add_tail(&new->lo_perstate, &ls->ls_layouts); + new = NULL; +done: + update_stateid(&ls->ls_stid.sc_stateid); + memcpy(&lgp->lg_sid, &ls->ls_stid.sc_stateid, sizeof(stateid_t)); + spin_unlock(&ls->ls_lock); + if (new) + kmem_cache_free(nfs4_layout_cache, new); + return nfs_ok; +} + +static void +nfsd4_free_layouts(struct list_head *reaplist) +{ + while (!list_empty(reaplist)) { + struct nfs4_layout *lp = list_first_entry(reaplist, + struct nfs4_layout, lo_perstate); + + list_del(&lp->lo_perstate); + nfs4_put_stid(&lp->lo_state->ls_stid); + kmem_cache_free(nfs4_layout_cache, lp); + } +} + +static void +nfsd4_return_file_layout(struct nfs4_layout *lp, struct nfsd4_layout_seg *seg, + struct list_head *reaplist) +{ + struct nfsd4_layout_seg *lo = &lp->lo_seg; + u64 end = layout_end(lo); + + if (seg->offset <= lo->offset) { + if (layout_end(seg) >= end) { + list_move_tail(&lp->lo_perstate, reaplist); + return; + } + end = seg->offset; + } else { + /* retain the whole layout segment on a split. */ + if (layout_end(seg) < end) { + dprintk("%s: split not supported\n", __func__); + return; + } + + lo->offset = layout_end(seg); + } + + layout_update_len(lo, end); +} + +__be32 +nfsd4_return_file_layouts(struct svc_rqst *rqstp, + struct nfsd4_compound_state *cstate, + struct nfsd4_layoutreturn *lrp) +{ + struct nfs4_layout_stateid *ls; + struct nfs4_layout *lp, *n; + LIST_HEAD(reaplist); + __be32 nfserr; + int found = 0; + + nfserr = nfsd4_preprocess_layout_stateid(rqstp, cstate, &lrp->lr_sid, + false, lrp->lr_layout_type, + &ls); + if (nfserr) + return nfserr; + + spin_lock(&ls->ls_lock); + list_for_each_entry_safe(lp, n, &ls->ls_layouts, lo_perstate) { + if (layouts_overlapping(lp, &lrp->lr_seg)) { + nfsd4_return_file_layout(lp, &lrp->lr_seg, &reaplist); + found++; + } + } + if (!list_empty(&ls->ls_layouts)) { + if (found) { + update_stateid(&ls->ls_stid.sc_stateid); + memcpy(&lrp->lr_sid, &ls->ls_stid.sc_stateid, + sizeof(stateid_t)); + } + lrp->lrs_present = 1; + } else { + nfs4_unhash_stid(&ls->ls_stid); + lrp->lrs_present = 0; + } + spin_unlock(&ls->ls_lock); + + nfs4_put_stid(&ls->ls_stid); + nfsd4_free_layouts(&reaplist); + return nfs_ok; +} + +__be32 +nfsd4_return_client_layouts(struct svc_rqst *rqstp, + struct nfsd4_compound_state *cstate, + struct nfsd4_layoutreturn *lrp) +{ + struct nfs4_layout_stateid *ls, *n; + struct nfs4_client *clp = cstate->clp; + struct nfs4_layout *lp, *t; + LIST_HEAD(reaplist); + + lrp->lrs_present = 0; + + spin_lock(&clp->cl_lock); + list_for_each_entry_safe(ls, n, &clp->cl_lo_states, ls_perclnt) { + if (lrp->lr_return_type == RETURN_FSID && + !fh_fsid_match(&ls->ls_stid.sc_file->fi_fhandle, + &cstate->current_fh.fh_handle)) + continue; + + spin_lock(&ls->ls_lock); + list_for_each_entry_safe(lp, t, &ls->ls_layouts, lo_perstate) { + if (lrp->lr_seg.iomode == IOMODE_ANY || + lrp->lr_seg.iomode == lp->lo_seg.iomode) + list_move_tail(&lp->lo_perstate, &reaplist); + } + spin_unlock(&ls->ls_lock); + } + spin_unlock(&clp->cl_lock); + + nfsd4_free_layouts(&reaplist); + return 0; +} + +static void +nfsd4_return_all_layouts(struct nfs4_layout_stateid *ls, + struct list_head *reaplist) +{ + spin_lock(&ls->ls_lock); + list_splice_init(&ls->ls_layouts, reaplist); + spin_unlock(&ls->ls_lock); +} + +void +nfsd4_return_all_client_layouts(struct nfs4_client *clp) +{ + struct nfs4_layout_stateid *ls, *n; + LIST_HEAD(reaplist); + + spin_lock(&clp->cl_lock); + list_for_each_entry_safe(ls, n, &clp->cl_lo_states, ls_perclnt) + nfsd4_return_all_layouts(ls, &reaplist); + spin_unlock(&clp->cl_lock); + + nfsd4_free_layouts(&reaplist); +} + +void +nfsd4_return_all_file_layouts(struct nfs4_client *clp, struct nfs4_file *fp) +{ + struct nfs4_layout_stateid *ls, *n; + LIST_HEAD(reaplist); + + spin_lock(&fp->fi_lock); + list_for_each_entry_safe(ls, n, &fp->fi_lo_states, ls_perfile) { + if (ls->ls_stid.sc_client == clp) + nfsd4_return_all_layouts(ls, &reaplist); + } + spin_unlock(&fp->fi_lock); + + nfsd4_free_layouts(&reaplist); +} + +int +nfsd4_init_pnfs(void) +{ + int i; + + for (i = 0; i < DEVID_HASH_SIZE; i++) + INIT_LIST_HEAD(&nfsd_devid_hash[i]); + + nfs4_layout_cache = kmem_cache_create("nfs4_layout", + sizeof(struct nfs4_layout), 0, 0, NULL); + if (!nfs4_layout_cache) + return -ENOMEM; + + nfs4_layout_stateid_cache = kmem_cache_create("nfs4_layout_stateid", + sizeof(struct nfs4_layout_stateid), 0, 0, NULL); + if (!nfs4_layout_stateid_cache) { + kmem_cache_destroy(nfs4_layout_cache); + return -ENOMEM; + } + return 0; +} + +void +nfsd4_exit_pnfs(void) +{ + int i; + + kmem_cache_destroy(nfs4_layout_cache); + kmem_cache_destroy(nfs4_layout_stateid_cache); + + for (i = 0; i < DEVID_HASH_SIZE; i++) { + struct nfsd4_deviceid_map *map, *n; + + list_for_each_entry_safe(map, n, &nfsd_devid_hash[i], hash) + kfree(map); + } +} diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c index ac71d13c69ef..2b91443497cc 100644 --- a/fs/nfsd/nfs4proc.c +++ b/fs/nfsd/nfs4proc.c @@ -43,6 +43,7 @@ #include "current_stateid.h" #include "netns.h" #include "acl.h" +#include "pnfs.h" #ifdef CONFIG_NFSD_V4_SECURITY_LABEL #include @@ -1178,6 +1179,252 @@ nfsd4_verify(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, return status == nfserr_same ? nfs_ok : status; } +#ifdef CONFIG_NFSD_PNFS +static const struct nfsd4_layout_ops * +nfsd4_layout_verify(struct svc_export *exp, unsigned int layout_type) +{ + if (!exp->ex_layout_type) { + dprintk("%s: export does not support pNFS\n", __func__); + return NULL; + } + + if (exp->ex_layout_type != layout_type) { + dprintk("%s: layout type %d not supported\n", + __func__, layout_type); + return NULL; + } + + return nfsd4_layout_ops[layout_type]; +} + +static __be32 +nfsd4_getdeviceinfo(struct svc_rqst *rqstp, + struct nfsd4_compound_state *cstate, + struct nfsd4_getdeviceinfo *gdp) +{ + const struct nfsd4_layout_ops *ops; + struct nfsd4_deviceid_map *map; + struct svc_export *exp; + __be32 nfserr; + + dprintk("%s: layout_type %u dev_id [0x%llx:0x%x] maxcnt %u\n", + __func__, + gdp->gd_layout_type, + gdp->gd_devid.fsid_idx, gdp->gd_devid.generation, + gdp->gd_maxcount); + + map = nfsd4_find_devid_map(gdp->gd_devid.fsid_idx); + if (!map) { + dprintk("%s: couldn't find device ID to export mapping!\n", + __func__); + return nfserr_noent; + } + + exp = rqst_exp_find(rqstp, map->fsid_type, map->fsid); + if (IS_ERR(exp)) { + dprintk("%s: could not find device id\n", __func__); + return nfserr_noent; + } + + nfserr = nfserr_layoutunavailable; + ops = nfsd4_layout_verify(exp, gdp->gd_layout_type); + if (!ops) + goto out; + + nfserr = nfs_ok; + if (gdp->gd_maxcount != 0) + nfserr = ops->proc_getdeviceinfo(exp->ex_path.mnt->mnt_sb, gdp); + + gdp->gd_notify_types &= ops->notify_types; + exp_put(exp); +out: + return nfserr; +} + +static __be32 +nfsd4_layoutget(struct svc_rqst *rqstp, + struct nfsd4_compound_state *cstate, + struct nfsd4_layoutget *lgp) +{ + struct svc_fh *current_fh = &cstate->current_fh; + const struct nfsd4_layout_ops *ops; + struct nfs4_layout_stateid *ls; + __be32 nfserr; + int accmode; + + switch (lgp->lg_seg.iomode) { + case IOMODE_READ: + accmode = NFSD_MAY_READ; + break; + case IOMODE_RW: + accmode = NFSD_MAY_READ | NFSD_MAY_WRITE; + break; + default: + dprintk("%s: invalid iomode %d\n", + __func__, lgp->lg_seg.iomode); + nfserr = nfserr_badiomode; + goto out; + } + + nfserr = fh_verify(rqstp, current_fh, 0, accmode); + if (nfserr) + goto out; + + nfserr = nfserr_layoutunavailable; + ops = nfsd4_layout_verify(current_fh->fh_export, lgp->lg_layout_type); + if (!ops) + goto out; + + /* + * Verify minlength and range as per RFC5661: + * o If loga_length is less than loga_minlength, + * the metadata server MUST return NFS4ERR_INVAL. + * o If the sum of loga_offset and loga_minlength exceeds + * NFS4_UINT64_MAX, and loga_minlength is not + * NFS4_UINT64_MAX, the error NFS4ERR_INVAL MUST result. + * o If the sum of loga_offset and loga_length exceeds + * NFS4_UINT64_MAX, and loga_length is not NFS4_UINT64_MAX, + * the error NFS4ERR_INVAL MUST result. + */ + nfserr = nfserr_inval; + if (lgp->lg_seg.length < lgp->lg_minlength || + (lgp->lg_minlength != NFS4_MAX_UINT64 && + lgp->lg_minlength > NFS4_MAX_UINT64 - lgp->lg_seg.offset) || + (lgp->lg_seg.length != NFS4_MAX_UINT64 && + lgp->lg_seg.length > NFS4_MAX_UINT64 - lgp->lg_seg.offset)) + goto out; + if (lgp->lg_seg.length == 0) + goto out; + + nfserr = nfsd4_preprocess_layout_stateid(rqstp, cstate, &lgp->lg_sid, + true, lgp->lg_layout_type, &ls); + if (nfserr) + goto out; + + nfserr = ops->proc_layoutget(current_fh->fh_dentry->d_inode, + current_fh, lgp); + if (nfserr) + goto out_put_stid; + + nfserr = nfsd4_insert_layout(lgp, ls); + +out_put_stid: + nfs4_put_stid(&ls->ls_stid); +out: + return nfserr; +} + +static __be32 +nfsd4_layoutcommit(struct svc_rqst *rqstp, + struct nfsd4_compound_state *cstate, + struct nfsd4_layoutcommit *lcp) +{ + const struct nfsd4_layout_seg *seg = &lcp->lc_seg; + struct svc_fh *current_fh = &cstate->current_fh; + const struct nfsd4_layout_ops *ops; + loff_t new_size = lcp->lc_last_wr + 1; + struct inode *inode; + struct nfs4_layout_stateid *ls; + __be32 nfserr; + + nfserr = fh_verify(rqstp, current_fh, 0, NFSD_MAY_WRITE); + if (nfserr) + goto out; + + nfserr = nfserr_layoutunavailable; + ops = nfsd4_layout_verify(current_fh->fh_export, lcp->lc_layout_type); + if (!ops) + goto out; + inode = current_fh->fh_dentry->d_inode; + + nfserr = nfserr_inval; + if (new_size <= seg->offset) { + dprintk("pnfsd: last write before layout segment\n"); + goto out; + } + if (new_size > seg->offset + seg->length) { + dprintk("pnfsd: last write beyond layout segment\n"); + goto out; + } + if (!lcp->lc_newoffset && new_size > i_size_read(inode)) { + dprintk("pnfsd: layoutcommit beyond EOF\n"); + goto out; + } + + nfserr = nfsd4_preprocess_layout_stateid(rqstp, cstate, &lcp->lc_sid, + false, lcp->lc_layout_type, + &ls); + if (nfserr) { + /* fixup error code as per RFC5661 */ + if (nfserr == nfserr_bad_stateid) + nfserr = nfserr_badlayout; + goto out; + } + + nfserr = ops->proc_layoutcommit(inode, lcp); + if (nfserr) + goto out_put_stid; + + if (new_size > i_size_read(inode)) { + lcp->lc_size_chg = 1; + lcp->lc_newsize = new_size; + } else { + lcp->lc_size_chg = 0; + } + +out_put_stid: + nfs4_put_stid(&ls->ls_stid); +out: + return nfserr; +} + +static __be32 +nfsd4_layoutreturn(struct svc_rqst *rqstp, + struct nfsd4_compound_state *cstate, + struct nfsd4_layoutreturn *lrp) +{ + struct svc_fh *current_fh = &cstate->current_fh; + __be32 nfserr; + + nfserr = fh_verify(rqstp, current_fh, 0, NFSD_MAY_NOP); + if (nfserr) + goto out; + + nfserr = nfserr_layoutunavailable; + if (!nfsd4_layout_verify(current_fh->fh_export, lrp->lr_layout_type)) + goto out; + + switch (lrp->lr_seg.iomode) { + case IOMODE_READ: + case IOMODE_RW: + case IOMODE_ANY: + break; + default: + dprintk("%s: invalid iomode %d\n", __func__, + lrp->lr_seg.iomode); + nfserr = nfserr_inval; + goto out; + } + + switch (lrp->lr_return_type) { + case RETURN_FILE: + nfserr = nfsd4_return_file_layouts(rqstp, cstate, lrp); + break; + case RETURN_FSID: + case RETURN_ALL: + nfserr = nfsd4_return_client_layouts(rqstp, cstate, lrp); + break; + default: + dprintk("%s: invalid return_type %d\n", __func__, + lrp->lr_return_type); + nfserr = nfserr_inval; + break; + } +out: + return nfserr; +} +#endif /* CONFIG_NFSD_PNFS */ + /* * NULL call. */ @@ -1679,6 +1926,36 @@ static inline u32 nfsd4_create_session_rsize(struct svc_rqst *rqstp, struct nfsd op_encode_channel_attrs_maxsz) * sizeof(__be32); } +#ifdef CONFIG_NFSD_PNFS +/* + * At this stage we don't really know what layout driver will handle the request, + * so we need to define an arbitrary upper bound here. + */ +#define MAX_LAYOUT_SIZE 128 +static inline u32 nfsd4_layoutget_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op) +{ + return (op_encode_hdr_size + + 1 /* logr_return_on_close */ + + op_encode_stateid_maxsz + + 1 /* nr of layouts */ + + MAX_LAYOUT_SIZE) * sizeof(__be32); +} + +static inline u32 nfsd4_layoutcommit_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op) +{ + return (op_encode_hdr_size + + 1 /* locr_newsize */ + + 2 /* ns_size */) * sizeof(__be32); +} + +static inline u32 nfsd4_layoutreturn_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op) +{ + return (op_encode_hdr_size + + 1 /* lrs_stateid */ + + op_encode_stateid_maxsz) * sizeof(__be32); +} +#endif /* CONFIG_NFSD_PNFS */ + static struct nfsd4_operation nfsd4_ops[] = { [OP_ACCESS] = { .op_func = (nfsd4op_func)nfsd4_access, @@ -1966,6 +2243,31 @@ static struct nfsd4_operation nfsd4_ops[] = { .op_get_currentstateid = (stateid_getter)nfsd4_get_freestateid, .op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize, }, +#ifdef CONFIG_NFSD_PNFS + [OP_GETDEVICEINFO] = { + .op_func = (nfsd4op_func)nfsd4_getdeviceinfo, + .op_flags = ALLOWED_WITHOUT_FH, + .op_name = "OP_GETDEVICEINFO", + }, + [OP_LAYOUTGET] = { + .op_func = (nfsd4op_func)nfsd4_layoutget, + .op_flags = OP_MODIFIES_SOMETHING, + .op_name = "OP_LAYOUTGET", + .op_rsize_bop = (nfsd4op_rsize)nfsd4_layoutget_rsize, + }, + [OP_LAYOUTCOMMIT] = { + .op_func = (nfsd4op_func)nfsd4_layoutcommit, + .op_flags = OP_MODIFIES_SOMETHING, + .op_name = "OP_LAYOUTCOMMIT", + .op_rsize_bop = (nfsd4op_rsize)nfsd4_layoutcommit_rsize, + }, + [OP_LAYOUTRETURN] = { + .op_func = (nfsd4op_func)nfsd4_layoutreturn, + .op_flags = OP_MODIFIES_SOMETHING, + .op_name = "OP_LAYOUTRETURN", + .op_rsize_bop = (nfsd4op_rsize)nfsd4_layoutreturn_rsize, + }, +#endif /* CONFIG_NFSD_PNFS */ /* NFSv4.2 operations */ [OP_ALLOCATE] = { diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index eefd29ec43f2..c89f79dc69e2 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -48,6 +48,7 @@ #include "current_stateid.h" #include "netns.h" +#include "pnfs.h" #define NFSDDBG_FACILITY NFSDDBG_PROC @@ -1539,6 +1540,9 @@ static struct nfs4_client *alloc_client(struct xdr_netobj name) INIT_LIST_HEAD(&clp->cl_lru); INIT_LIST_HEAD(&clp->cl_callbacks); INIT_LIST_HEAD(&clp->cl_revoked); +#ifdef CONFIG_NFSD_PNFS + INIT_LIST_HEAD(&clp->cl_lo_states); +#endif spin_lock_init(&clp->cl_lock); rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table"); return clp; @@ -1643,6 +1647,7 @@ __destroy_client(struct nfs4_client *clp) nfs4_get_stateowner(&oo->oo_owner); release_openowner(oo); } + nfsd4_return_all_client_layouts(clp); nfsd4_shutdown_callback(clp); if (clp->cl_cb_conn.cb_xprt) svc_xprt_put(clp->cl_cb_conn.cb_xprt); @@ -2126,8 +2131,11 @@ nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp, static void nfsd4_set_ex_flags(struct nfs4_client *new, struct nfsd4_exchange_id *clid) { - /* pNFS is not supported */ +#ifdef CONFIG_NFSD_PNFS + new->cl_exchange_flags |= EXCHGID4_FLAG_USE_PNFS_MDS; +#else new->cl_exchange_flags |= EXCHGID4_FLAG_USE_NON_PNFS; +#endif /* Referrals are supported, Migration is not. */ new->cl_exchange_flags |= EXCHGID4_FLAG_SUPP_MOVED_REFER; @@ -3055,6 +3063,9 @@ static void nfsd4_init_file(struct knfsd_fh *fh, unsigned int hashval, fp->fi_share_deny = 0; memset(fp->fi_fds, 0, sizeof(fp->fi_fds)); memset(fp->fi_access, 0, sizeof(fp->fi_access)); +#ifdef CONFIG_NFSD_PNFS + INIT_LIST_HEAD(&fp->fi_lo_states); +#endif hlist_add_head_rcu(&fp->fi_hash, &file_hashtbl[hashval]); } @@ -4841,6 +4852,9 @@ nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, update_stateid(&stp->st_stid.sc_stateid); memcpy(&close->cl_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t)); + nfsd4_return_all_file_layouts(stp->st_stateowner->so_client, + stp->st_stid.sc_file); + nfsd4_close_open_stateid(stp); /* put reference from nfs4_preprocess_seqid_op */ diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index 974533e5a427..df5e66caf100 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c @@ -47,6 +47,7 @@ #include "state.h" #include "cache.h" #include "netns.h" +#include "pnfs.h" #ifdef CONFIG_NFSD_V4_SECURITY_LABEL #include @@ -1522,6 +1523,127 @@ static __be32 nfsd4_decode_reclaim_complete(struct nfsd4_compoundargs *argp, str DECODE_TAIL; } +#ifdef CONFIG_NFSD_PNFS +static __be32 +nfsd4_decode_getdeviceinfo(struct nfsd4_compoundargs *argp, + struct nfsd4_getdeviceinfo *gdev) +{ + DECODE_HEAD; + u32 num, i; + + READ_BUF(sizeof(struct nfsd4_deviceid) + 3 * 4); + COPYMEM(&gdev->gd_devid, sizeof(struct nfsd4_deviceid)); + gdev->gd_layout_type = be32_to_cpup(p++); + gdev->gd_maxcount = be32_to_cpup(p++); + num = be32_to_cpup(p++); + if (num) { + READ_BUF(4 * num); + gdev->gd_notify_types = be32_to_cpup(p++); + for (i = 1; i < num; i++) { + if (be32_to_cpup(p++)) { + status = nfserr_inval; + goto out; + } + } + } + DECODE_TAIL; +} + +static __be32 +nfsd4_decode_layoutget(struct nfsd4_compoundargs *argp, + struct nfsd4_layoutget *lgp) +{ + DECODE_HEAD; + + READ_BUF(36); + lgp->lg_signal = be32_to_cpup(p++); + lgp->lg_layout_type = be32_to_cpup(p++); + lgp->lg_seg.iomode = be32_to_cpup(p++); + p = xdr_decode_hyper(p, &lgp->lg_seg.offset); + p = xdr_decode_hyper(p, &lgp->lg_seg.length); + p = xdr_decode_hyper(p, &lgp->lg_minlength); + nfsd4_decode_stateid(argp, &lgp->lg_sid); + READ_BUF(4); + lgp->lg_maxcount = be32_to_cpup(p++); + + DECODE_TAIL; +} + +static __be32 +nfsd4_decode_layoutcommit(struct nfsd4_compoundargs *argp, + struct nfsd4_layoutcommit *lcp) +{ + DECODE_HEAD; + u32 timechange; + + READ_BUF(20); + p = xdr_decode_hyper(p, &lcp->lc_seg.offset); + p = xdr_decode_hyper(p, &lcp->lc_seg.length); + lcp->lc_reclaim = be32_to_cpup(p++); + nfsd4_decode_stateid(argp, &lcp->lc_sid); + READ_BUF(4); + lcp->lc_newoffset = be32_to_cpup(p++); + if (lcp->lc_newoffset) { + READ_BUF(8); + p = xdr_decode_hyper(p, &lcp->lc_last_wr); + } else + lcp->lc_last_wr = 0; + READ_BUF(4); + timechange = be32_to_cpup(p++); + if (timechange) { + status = nfsd4_decode_time(argp, &lcp->lc_mtime); + if (status) + return status; + } else { + lcp->lc_mtime.tv_nsec = UTIME_NOW; + } + READ_BUF(8); + lcp->lc_layout_type = be32_to_cpup(p++); + + /* + * Save the layout update in XDR format and let the layout driver deal + * with it later. + */ + lcp->lc_up_len = be32_to_cpup(p++); + if (lcp->lc_up_len > 0) { + READ_BUF(lcp->lc_up_len); + READMEM(lcp->lc_up_layout, lcp->lc_up_len); + } + + DECODE_TAIL; +} + +static __be32 +nfsd4_decode_layoutreturn(struct nfsd4_compoundargs *argp, + struct nfsd4_layoutreturn *lrp) +{ + DECODE_HEAD; + + READ_BUF(16); + lrp->lr_reclaim = be32_to_cpup(p++); + lrp->lr_layout_type = be32_to_cpup(p++); + lrp->lr_seg.iomode = be32_to_cpup(p++); + lrp->lr_return_type = be32_to_cpup(p++); + if (lrp->lr_return_type == RETURN_FILE) { + READ_BUF(16); + p = xdr_decode_hyper(p, &lrp->lr_seg.offset); + p = xdr_decode_hyper(p, &lrp->lr_seg.length); + nfsd4_decode_stateid(argp, &lrp->lr_sid); + READ_BUF(4); + lrp->lrf_body_len = be32_to_cpup(p++); + if (lrp->lrf_body_len > 0) { + READ_BUF(lrp->lrf_body_len); + READMEM(lrp->lrf_body, lrp->lrf_body_len); + } + } else { + lrp->lr_seg.offset = 0; + lrp->lr_seg.length = NFS4_MAX_UINT64; + } + + DECODE_TAIL; +} +#endif /* CONFIG_NFSD_PNFS */ + static __be32 nfsd4_decode_fallocate(struct nfsd4_compoundargs *argp, struct nfsd4_fallocate *fallocate) @@ -1616,11 +1738,19 @@ static nfsd4_dec nfsd4_dec_ops[] = { [OP_DESTROY_SESSION] = (nfsd4_dec)nfsd4_decode_destroy_session, [OP_FREE_STATEID] = (nfsd4_dec)nfsd4_decode_free_stateid, [OP_GET_DIR_DELEGATION] = (nfsd4_dec)nfsd4_decode_notsupp, +#ifdef CONFIG_NFSD_PNFS + [OP_GETDEVICEINFO] = (nfsd4_dec)nfsd4_decode_getdeviceinfo, + [OP_GETDEVICELIST] = (nfsd4_dec)nfsd4_decode_notsupp, + [OP_LAYOUTCOMMIT] = (nfsd4_dec)nfsd4_decode_layoutcommit, + [OP_LAYOUTGET] = (nfsd4_dec)nfsd4_decode_layoutget, + [OP_LAYOUTRETURN] = (nfsd4_dec)nfsd4_decode_layoutreturn, +#else [OP_GETDEVICEINFO] = (nfsd4_dec)nfsd4_decode_notsupp, [OP_GETDEVICELIST] = (nfsd4_dec)nfsd4_decode_notsupp, [OP_LAYOUTCOMMIT] = (nfsd4_dec)nfsd4_decode_notsupp, [OP_LAYOUTGET] = (nfsd4_dec)nfsd4_decode_notsupp, [OP_LAYOUTRETURN] = (nfsd4_dec)nfsd4_decode_notsupp, +#endif [OP_SECINFO_NO_NAME] = (nfsd4_dec)nfsd4_decode_secinfo_no_name, [OP_SEQUENCE] = (nfsd4_dec)nfsd4_decode_sequence, [OP_SET_SSV] = (nfsd4_dec)nfsd4_decode_notsupp, @@ -2548,6 +2678,30 @@ out_acl: get_parent_attributes(exp, &stat); p = xdr_encode_hyper(p, stat.ino); } +#ifdef CONFIG_NFSD_PNFS + if ((bmval1 & FATTR4_WORD1_FS_LAYOUT_TYPES) || + (bmval2 & FATTR4_WORD2_LAYOUT_TYPES)) { + if (exp->ex_layout_type) { + p = xdr_reserve_space(xdr, 8); + if (!p) + goto out_resource; + *p++ = cpu_to_be32(1); + *p++ = cpu_to_be32(exp->ex_layout_type); + } else { + p = xdr_reserve_space(xdr, 4); + if (!p) + goto out_resource; + *p++ = cpu_to_be32(0); + } + } + + if (bmval2 & FATTR4_WORD2_LAYOUT_BLKSIZE) { + p = xdr_reserve_space(xdr, 4); + if (!p) + goto out_resource; + *p++ = cpu_to_be32(stat.blksize); + } +#endif /* CONFIG_NFSD_PNFS */ if (bmval2 & FATTR4_WORD2_SECURITY_LABEL) { status = nfsd4_encode_security_label(xdr, rqstp, context, contextlen); @@ -3824,6 +3978,156 @@ nfsd4_encode_test_stateid(struct nfsd4_compoundres *resp, __be32 nfserr, return nfserr; } +#ifdef CONFIG_NFSD_PNFS +static __be32 +nfsd4_encode_getdeviceinfo(struct nfsd4_compoundres *resp, __be32 nfserr, + struct nfsd4_getdeviceinfo *gdev) +{ + struct xdr_stream *xdr = &resp->xdr; + const struct nfsd4_layout_ops *ops = + nfsd4_layout_ops[gdev->gd_layout_type]; + u32 starting_len = xdr->buf->len, needed_len; + __be32 *p; + + dprintk("%s: err %d\n", __func__, nfserr); + if (nfserr) + goto out; + + nfserr = nfserr_resource; + p = xdr_reserve_space(xdr, 4); + if (!p) + goto out; + + *p++ = cpu_to_be32(gdev->gd_layout_type); + + /* If maxcount is 0 then just update notifications */ + if (gdev->gd_maxcount != 0) { + nfserr = ops->encode_getdeviceinfo(xdr, gdev); + if (nfserr) { + /* + * We don't bother to burden the layout drivers with + * enforcing gd_maxcount, just tell the client to + * come back with a bigger buffer if it's not enough. + */ + if (xdr->buf->len + 4 > gdev->gd_maxcount) + goto toosmall; + goto out; + } + } + + nfserr = nfserr_resource; + if (gdev->gd_notify_types) { + p = xdr_reserve_space(xdr, 4 + 4); + if (!p) + goto out; + *p++ = cpu_to_be32(1); /* bitmap length */ + *p++ = cpu_to_be32(gdev->gd_notify_types); + } else { + p = xdr_reserve_space(xdr, 4); + if (!p) + goto out; + *p++ = 0; + } + + nfserr = 0; +out: + kfree(gdev->gd_device); + dprintk("%s: done: %d\n", __func__, be32_to_cpu(nfserr)); + return nfserr; + +toosmall: + dprintk("%s: maxcount too small\n", __func__); + needed_len = xdr->buf->len + 4 /* notifications */; + xdr_truncate_encode(xdr, starting_len); + p = xdr_reserve_space(xdr, 4); + if (!p) { + nfserr = nfserr_resource; + } else { + *p++ = cpu_to_be32(needed_len); + nfserr = nfserr_toosmall; + } + goto out; +} + +static __be32 +nfsd4_encode_layoutget(struct nfsd4_compoundres *resp, __be32 nfserr, + struct nfsd4_layoutget *lgp) +{ + struct xdr_stream *xdr = &resp->xdr; + const struct nfsd4_layout_ops *ops = + nfsd4_layout_ops[lgp->lg_layout_type]; + __be32 *p; + + dprintk("%s: err %d\n", __func__, nfserr); + if (nfserr) + goto out; + + nfserr = nfserr_resource; + p = xdr_reserve_space(xdr, 36 + sizeof(stateid_opaque_t)); + if (!p) + goto out; + + *p++ = cpu_to_be32(1); /* we always set return-on-close */ + *p++ = cpu_to_be32(lgp->lg_sid.si_generation); + p = xdr_encode_opaque_fixed(p, &lgp->lg_sid.si_opaque, + sizeof(stateid_opaque_t)); + + *p++ = cpu_to_be32(1); /* we always return a single layout */ + p = xdr_encode_hyper(p, lgp->lg_seg.offset); + p = xdr_encode_hyper(p, lgp->lg_seg.length); + *p++ = cpu_to_be32(lgp->lg_seg.iomode); + *p++ = cpu_to_be32(lgp->lg_layout_type); + + nfserr = ops->encode_layoutget(xdr, lgp); +out: + kfree(lgp->lg_content); + return nfserr; +} + +static __be32 +nfsd4_encode_layoutcommit(struct nfsd4_compoundres *resp, __be32 nfserr, + struct nfsd4_layoutcommit *lcp) +{ + struct xdr_stream *xdr = &resp->xdr; + __be32 *p; + + if (nfserr) + return nfserr; + + p = xdr_reserve_space(xdr, 4); + if (!p) + return nfserr_resource; + *p++ = cpu_to_be32(lcp->lc_size_chg); + if (lcp->lc_size_chg) { + p = xdr_reserve_space(xdr, 8); + if (!p) + return nfserr_resource; + p = xdr_encode_hyper(p, lcp->lc_newsize); + } + + return nfs_ok; +} + +static __be32 +nfsd4_encode_layoutreturn(struct nfsd4_compoundres *resp, __be32 nfserr, + struct nfsd4_layoutreturn *lrp) +{ + struct xdr_stream *xdr = &resp->xdr; + __be32 *p; + + if (nfserr) + return nfserr; + + p = xdr_reserve_space(xdr, 4); + if (!p) + return nfserr_resource; + *p++ = cpu_to_be32(lrp->lrs_present); + if (lrp->lrs_present) + nfsd4_encode_stateid(xdr, &lrp->lr_sid); + return nfs_ok; +} +#endif /* CONFIG_NFSD_PNFS */ + static __be32 nfsd4_encode_seek(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_seek *seek) @@ -3900,11 +4204,19 @@ static nfsd4_enc nfsd4_enc_ops[] = { [OP_DESTROY_SESSION] = (nfsd4_enc)nfsd4_encode_noop, [OP_FREE_STATEID] = (nfsd4_enc)nfsd4_encode_noop, [OP_GET_DIR_DELEGATION] = (nfsd4_enc)nfsd4_encode_noop, +#ifdef CONFIG_NFSD_PNFS + [OP_GETDEVICEINFO] = (nfsd4_enc)nfsd4_encode_getdeviceinfo, + [OP_GETDEVICELIST] = (nfsd4_enc)nfsd4_encode_noop, + [OP_LAYOUTCOMMIT] = (nfsd4_enc)nfsd4_encode_layoutcommit, + [OP_LAYOUTGET] = (nfsd4_enc)nfsd4_encode_layoutget, + [OP_LAYOUTRETURN] = (nfsd4_enc)nfsd4_encode_layoutreturn, +#else [OP_GETDEVICEINFO] = (nfsd4_enc)nfsd4_encode_noop, [OP_GETDEVICELIST] = (nfsd4_enc)nfsd4_encode_noop, [OP_LAYOUTCOMMIT] = (nfsd4_enc)nfsd4_encode_noop, [OP_LAYOUTGET] = (nfsd4_enc)nfsd4_encode_noop, [OP_LAYOUTRETURN] = (nfsd4_enc)nfsd4_encode_noop, +#endif [OP_SECINFO_NO_NAME] = (nfsd4_enc)nfsd4_encode_secinfo_no_name, [OP_SEQUENCE] = (nfsd4_enc)nfsd4_encode_sequence, [OP_SET_SSV] = (nfsd4_enc)nfsd4_encode_noop, diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c index 19ace74d35f6..aa47d75ddb26 100644 --- a/fs/nfsd/nfsctl.c +++ b/fs/nfsd/nfsctl.c @@ -21,6 +21,7 @@ #include "cache.h" #include "state.h" #include "netns.h" +#include "pnfs.h" /* * We have a single directory with several nodes in it. @@ -1258,9 +1259,12 @@ static int __init init_nfsd(void) retval = nfsd4_init_slabs(); if (retval) goto out_unregister_pernet; - retval = nfsd_fault_inject_init(); /* nfsd fault injection controls */ + retval = nfsd4_init_pnfs(); if (retval) goto out_free_slabs; + retval = nfsd_fault_inject_init(); /* nfsd fault injection controls */ + if (retval) + goto out_exit_pnfs; nfsd_stat_init(); /* Statistics */ retval = nfsd_reply_cache_init(); if (retval) @@ -1282,6 +1286,8 @@ out_free_lockd: out_free_stat: nfsd_stat_shutdown(); nfsd_fault_inject_cleanup(); +out_exit_pnfs: + nfsd4_exit_pnfs(); out_free_slabs: nfsd4_free_slabs(); out_unregister_pernet: @@ -1299,6 +1305,7 @@ static void __exit exit_nfsd(void) nfsd_stat_shutdown(); nfsd_lockd_shutdown(); nfsd4_free_slabs(); + nfsd4_exit_pnfs(); nfsd_fault_inject_cleanup(); unregister_filesystem(&nfsd_fs_type); unregister_pernet_subsys(&nfsd_net_ops); diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h index 33a46a8dfaf7..565c4da1a9eb 100644 --- a/fs/nfsd/nfsd.h +++ b/fs/nfsd/nfsd.h @@ -325,15 +325,27 @@ void nfsd_lockd_shutdown(void); #define NFSD4_SUPPORTED_ATTRS_WORD2 0 +/* 4.1 */ +#ifdef CONFIG_NFSD_PNFS +#define PNFSD_SUPPORTED_ATTRS_WORD1 FATTR4_WORD1_FS_LAYOUT_TYPES +#define PNFSD_SUPPORTED_ATTRS_WORD2 \ +(FATTR4_WORD2_LAYOUT_BLKSIZE | FATTR4_WORD2_LAYOUT_TYPES) +#else +#define PNFSD_SUPPORTED_ATTRS_WORD1 0 +#define PNFSD_SUPPORTED_ATTRS_WORD2 0 +#endif /* CONFIG_NFSD_PNFS */ + #define NFSD4_1_SUPPORTED_ATTRS_WORD0 \ NFSD4_SUPPORTED_ATTRS_WORD0 #define NFSD4_1_SUPPORTED_ATTRS_WORD1 \ - NFSD4_SUPPORTED_ATTRS_WORD1 + (NFSD4_SUPPORTED_ATTRS_WORD1 | PNFSD_SUPPORTED_ATTRS_WORD1) #define NFSD4_1_SUPPORTED_ATTRS_WORD2 \ - (NFSD4_SUPPORTED_ATTRS_WORD2 | FATTR4_WORD2_SUPPATTR_EXCLCREAT) + (NFSD4_SUPPORTED_ATTRS_WORD2 | PNFSD_SUPPORTED_ATTRS_WORD2 | \ + FATTR4_WORD2_SUPPATTR_EXCLCREAT) +/* 4.2 */ #ifdef CONFIG_NFSD_V4_SECURITY_LABEL #define NFSD4_2_SECURITY_ATTRS FATTR4_WORD2_SECURITY_LABEL #else diff --git a/fs/nfsd/pnfs.h b/fs/nfsd/pnfs.h new file mode 100644 index 000000000000..a9616a4e13cd --- /dev/null +++ b/fs/nfsd/pnfs.h @@ -0,0 +1,80 @@ +#ifndef _FS_NFSD_PNFS_H +#define _FS_NFSD_PNFS_H 1 + +#include +#include + +#include "state.h" +#include "xdr4.h" + +struct xdr_stream; + +struct nfsd4_deviceid_map { + struct list_head hash; + u64 idx; + int fsid_type; + u32 fsid[]; +}; + +struct nfsd4_layout_ops { + u32 notify_types; + + __be32 (*proc_getdeviceinfo)(struct super_block *sb, + struct nfsd4_getdeviceinfo *gdevp); + __be32 (*encode_getdeviceinfo)(struct xdr_stream *xdr, + struct nfsd4_getdeviceinfo *gdevp); + + __be32 (*proc_layoutget)(struct inode *, const struct svc_fh *fhp, + struct nfsd4_layoutget *lgp); + __be32 (*encode_layoutget)(struct xdr_stream *, + struct nfsd4_layoutget *lgp); + + __be32 (*proc_layoutcommit)(struct inode *inode, + struct nfsd4_layoutcommit *lcp); +}; + +extern const struct nfsd4_layout_ops *nfsd4_layout_ops[]; + +__be32 nfsd4_preprocess_layout_stateid(struct svc_rqst *rqstp, + struct nfsd4_compound_state *cstate, stateid_t *stateid, + bool create, u32 layout_type, struct nfs4_layout_stateid **lsp); +__be32 nfsd4_insert_layout(struct nfsd4_layoutget *lgp, + struct nfs4_layout_stateid *ls); +__be32 nfsd4_return_file_layouts(struct svc_rqst *rqstp, + struct nfsd4_compound_state *cstate, + struct nfsd4_layoutreturn *lrp); +__be32 nfsd4_return_client_layouts(struct svc_rqst *rqstp, + struct nfsd4_compound_state *cstate, + struct nfsd4_layoutreturn *lrp); +int nfsd4_set_deviceid(struct nfsd4_deviceid *id, const struct svc_fh *fhp, + u32 device_generation); +struct nfsd4_deviceid_map *nfsd4_find_devid_map(int idx); + +#ifdef CONFIG_NFSD_PNFS +void nfsd4_setup_layout_type(struct svc_export *exp); +void nfsd4_return_all_client_layouts(struct nfs4_client *); +void nfsd4_return_all_file_layouts(struct nfs4_client *clp, + struct nfs4_file *fp); +int nfsd4_init_pnfs(void); +void nfsd4_exit_pnfs(void); +#else +static inline void nfsd4_setup_layout_type(struct svc_export *exp) +{ +} + +static inline void nfsd4_return_all_client_layouts(struct nfs4_client *clp) +{ +} +static inline void nfsd4_return_all_file_layouts(struct nfs4_client *clp, + struct nfs4_file *fp) +{ +} +static inline void nfsd4_exit_pnfs(void) +{ +} +static inline int nfsd4_init_pnfs(void) +{ + return 0; +} +#endif /* CONFIG_NFSD_PNFS */ +#endif /* _FS_NFSD_PNFS_H */ diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h index 38ebb1268b59..5f66b7fd0297 100644 --- a/fs/nfsd/state.h +++ b/fs/nfsd/state.h @@ -92,6 +92,7 @@ struct nfs4_stid { /* For a deleg stateid kept around only to process free_stateid's: */ #define NFS4_REVOKED_DELEG_STID 16 #define NFS4_CLOSED_DELEG_STID 32 +#define NFS4_LAYOUT_STID 64 unsigned char sc_type; stateid_t sc_stateid; struct nfs4_client *sc_client; @@ -297,6 +298,9 @@ struct nfs4_client { struct list_head cl_delegations; struct list_head cl_revoked; /* unacknowledged, revoked 4.1 state */ struct list_head cl_lru; /* tail queue */ +#ifdef CONFIG_NFSD_PNFS + struct list_head cl_lo_states; /* outstanding layout states */ +#endif struct xdr_netobj cl_name; /* id generated by client */ nfs4_verifier cl_verifier; /* generated by client */ time_t cl_time; /* time of last lease renewal */ @@ -496,6 +500,9 @@ struct nfs4_file { int fi_delegees; struct knfsd_fh fi_fhandle; bool fi_had_conflict; +#ifdef CONFIG_NFSD_PNFS + struct list_head fi_lo_states; +#endif }; /* @@ -528,6 +535,20 @@ static inline struct nfs4_ol_stateid *openlockstateid(struct nfs4_stid *s) return container_of(s, struct nfs4_ol_stateid, st_stid); } +struct nfs4_layout_stateid { + struct nfs4_stid ls_stid; + struct list_head ls_perclnt; + struct list_head ls_perfile; + spinlock_t ls_lock; + struct list_head ls_layouts; + u32 ls_layout_type; +}; + +static inline struct nfs4_layout_stateid *layoutstateid(struct nfs4_stid *s) +{ + return container_of(s, struct nfs4_layout_stateid, ls_stid); +} + /* flags for preprocess_seqid_op() */ #define RD_STATE 0x00000010 #define WR_STATE 0x00000020 diff --git a/fs/nfsd/xdr4.h b/fs/nfsd/xdr4.h index 90a5925bd6ab..0bda93e58e1b 100644 --- a/fs/nfsd/xdr4.h +++ b/fs/nfsd/xdr4.h @@ -428,6 +428,61 @@ struct nfsd4_reclaim_complete { u32 rca_one_fs; }; +struct nfsd4_deviceid { + u64 fsid_idx; + u32 generation; + u32 pad; +}; + +struct nfsd4_layout_seg { + u32 iomode; + u64 offset; + u64 length; +}; + +struct nfsd4_getdeviceinfo { + struct nfsd4_deviceid gd_devid; /* request */ + u32 gd_layout_type; /* request */ + u32 gd_maxcount; /* request */ + u32 gd_notify_types;/* request - response */ + void *gd_device; /* response */ +}; + +struct nfsd4_layoutget { + u64 lg_minlength; /* request */ + u32 lg_signal; /* request */ + u32 lg_layout_type; /* request */ + u32 lg_maxcount; /* request */ + stateid_t lg_sid; /* request/response */ + struct nfsd4_layout_seg lg_seg; /* request/response */ + void *lg_content; /* response */ +}; + +struct nfsd4_layoutcommit { + stateid_t lc_sid; /* request */ + struct nfsd4_layout_seg lc_seg; /* request */ + u32 lc_reclaim; /* request */ + u32 lc_newoffset; /* request */ + u64 lc_last_wr; /* request */ + struct timespec lc_mtime; /* request */ + u32 lc_layout_type; /* request */ + u32 lc_up_len; /* layout length */ + void *lc_up_layout; /* decoded by callback */ + u32 lc_size_chg; /* boolean for response */ + u64 lc_newsize; /* response */ +}; + +struct nfsd4_layoutreturn { + u32 lr_return_type; /* request */ + u32 lr_layout_type; /* request */ + struct nfsd4_layout_seg lr_seg; /* request */ + u32 lr_reclaim; /* request */ + u32 lrf_body_len; /* request */ + void *lrf_body; /* request */ + stateid_t lr_sid; /* request/response */ + u32 lrs_present; /* response */ +}; + struct nfsd4_fallocate { /* request */ stateid_t falloc_stateid; @@ -491,6 +546,10 @@ struct nfsd4_op { struct nfsd4_reclaim_complete reclaim_complete; struct nfsd4_test_stateid test_stateid; struct nfsd4_free_stateid free_stateid; + struct nfsd4_getdeviceinfo getdeviceinfo; + struct nfsd4_layoutget layoutget; + struct nfsd4_layoutcommit layoutcommit; + struct nfsd4_layoutreturn layoutreturn; /* NFSv4.2 */ struct nfsd4_fallocate allocate; diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h index 8a3589c2542c..bc10d687f2ce 100644 --- a/include/linux/nfs4.h +++ b/include/linux/nfs4.h @@ -411,6 +411,7 @@ enum lock_type4 { #define FATTR4_WORD1_TIME_MODIFY_SET (1UL << 22) #define FATTR4_WORD1_MOUNTED_ON_FILEID (1UL << 23) #define FATTR4_WORD1_FS_LAYOUT_TYPES (1UL << 30) +#define FATTR4_WORD2_LAYOUT_TYPES (1UL << 0) #define FATTR4_WORD2_LAYOUT_BLKSIZE (1UL << 1) #define FATTR4_WORD2_MDSTHRESHOLD (1UL << 4) #define FATTR4_WORD2_SECURITY_LABEL (1UL << 16) diff --git a/include/uapi/linux/nfsd/debug.h b/include/uapi/linux/nfsd/debug.h index 1fdc95bb2375..0bf130a1c58d 100644 --- a/include/uapi/linux/nfsd/debug.h +++ b/include/uapi/linux/nfsd/debug.h @@ -32,6 +32,7 @@ #define NFSDDBG_REPCACHE 0x0080 #define NFSDDBG_XDR 0x0100 #define NFSDDBG_LOCKD 0x0200 +#define NFSDDBG_PNFS 0x0400 #define NFSDDBG_ALL 0x7FFF #define NFSDDBG_NOCHANGE 0xFFFF diff --git a/include/uapi/linux/nfsd/export.h b/include/uapi/linux/nfsd/export.h index 584b6ef3a5e8..4742f2cb42f2 100644 --- a/include/uapi/linux/nfsd/export.h +++ b/include/uapi/linux/nfsd/export.h @@ -47,8 +47,10 @@ * exported filesystem. */ #define NFSEXP_V4ROOT 0x10000 +#define NFSEXP_NOPNFS 0x20000 + /* All flags that we claim to support. (Note we don't support NOACL.) */ -#define NFSEXP_ALLFLAGS 0x1FE7F +#define NFSEXP_ALLFLAGS 0x3FE7F /* The flags that may vary depending on security flavor: */ #define NFSEXP_SECINFO_FLAGS (NFSEXP_READONLY | NFSEXP_ROOTSQUASH \ -- cgit v1.2.3 From c5c707f96fc9a6e5a57ca5baac892673270abe3d Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 23 Sep 2014 12:38:48 +0200 Subject: nfsd: implement pNFS layout recalls Add support to issue layout recalls to clients. For now we only support full-file recalls to get a simple and stable implementation. This allows to embedd a nfsd4_callback structure in the layout_state and thus avoid any memory allocations under spinlocks during a recall. For normal use cases that do not intent to share a single file between multiple clients this implementation is fully sufficient. To ensure layouts are recalled on local filesystem access each layout state registers a new FL_LAYOUT lease with the kernel file locking code, which filesystems that support pNFS exports that require recalls need to break on conflicting access patterns. The XDR code is based on the old pNFS server implementation by Andy Adamson, Benny Halevy, Boaz Harrosh, Dean Hildebrand, Fred Isaman, Marc Eshel, Mike Sager and Ricardo Labiaga. Signed-off-by: Christoph Hellwig --- fs/nfsd/nfs4callback.c | 99 +++++++++++++++++++++++ fs/nfsd/nfs4layouts.c | 214 ++++++++++++++++++++++++++++++++++++++++++++++++- fs/nfsd/nfs4proc.c | 4 + fs/nfsd/nfs4state.c | 1 + fs/nfsd/state.h | 6 ++ fs/nfsd/xdr4cb.h | 7 ++ 6 files changed, 330 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c index 7cbdf1b2e4ab..58277859a467 100644 --- a/fs/nfsd/nfs4callback.c +++ b/fs/nfsd/nfs4callback.c @@ -546,6 +546,102 @@ out: return status; } +#ifdef CONFIG_NFSD_PNFS +/* + * CB_LAYOUTRECALL4args + * + * struct layoutrecall_file4 { + * nfs_fh4 lor_fh; + * offset4 lor_offset; + * length4 lor_length; + * stateid4 lor_stateid; + * }; + * + * union layoutrecall4 switch(layoutrecall_type4 lor_recalltype) { + * case LAYOUTRECALL4_FILE: + * layoutrecall_file4 lor_layout; + * case LAYOUTRECALL4_FSID: + * fsid4 lor_fsid; + * case LAYOUTRECALL4_ALL: + * void; + * }; + * + * struct CB_LAYOUTRECALL4args { + * layouttype4 clora_type; + * layoutiomode4 clora_iomode; + * bool clora_changed; + * layoutrecall4 clora_recall; + * }; + */ +static void encode_cb_layout4args(struct xdr_stream *xdr, + const struct nfs4_layout_stateid *ls, + struct nfs4_cb_compound_hdr *hdr) +{ + __be32 *p; + + BUG_ON(hdr->minorversion == 0); + + p = xdr_reserve_space(xdr, 5 * 4); + *p++ = cpu_to_be32(OP_CB_LAYOUTRECALL); + *p++ = cpu_to_be32(ls->ls_layout_type); + *p++ = cpu_to_be32(IOMODE_ANY); + *p++ = cpu_to_be32(1); + *p = cpu_to_be32(RETURN_FILE); + + encode_nfs_fh4(xdr, &ls->ls_stid.sc_file->fi_fhandle); + + p = xdr_reserve_space(xdr, 2 * 8); + p = xdr_encode_hyper(p, 0); + xdr_encode_hyper(p, NFS4_MAX_UINT64); + + encode_stateid4(xdr, &ls->ls_recall_sid); + + hdr->nops++; +} + +static void nfs4_xdr_enc_cb_layout(struct rpc_rqst *req, + struct xdr_stream *xdr, + const struct nfsd4_callback *cb) +{ + const struct nfs4_layout_stateid *ls = + container_of(cb, struct nfs4_layout_stateid, ls_recall); + struct nfs4_cb_compound_hdr hdr = { + .ident = 0, + .minorversion = cb->cb_minorversion, + }; + + encode_cb_compound4args(xdr, &hdr); + encode_cb_sequence4args(xdr, cb, &hdr); + encode_cb_layout4args(xdr, ls, &hdr); + encode_cb_nops(&hdr); +} + +static int nfs4_xdr_dec_cb_layout(struct rpc_rqst *rqstp, + struct xdr_stream *xdr, + struct nfsd4_callback *cb) +{ + struct nfs4_cb_compound_hdr hdr; + enum nfsstat4 nfserr; + int status; + + status = decode_cb_compound4res(xdr, &hdr); + if (unlikely(status)) + goto out; + if (cb) { + status = decode_cb_sequence4res(xdr, cb); + if (unlikely(status)) + goto out; + } + status = decode_cb_op_status(xdr, OP_CB_LAYOUTRECALL, &nfserr); + if (unlikely(status)) + goto out; + if (unlikely(nfserr != NFS4_OK)) + status = nfs_cb_stat_to_errno(nfserr); +out: + return status; +} +#endif /* CONFIG_NFSD_PNFS */ + /* * RPC procedure tables */ @@ -563,6 +659,9 @@ out: static struct rpc_procinfo nfs4_cb_procedures[] = { PROC(CB_NULL, NULL, cb_null, cb_null), PROC(CB_RECALL, COMPOUND, cb_recall, cb_recall), +#ifdef CONFIG_NFSD_PNFS + PROC(CB_LAYOUT, COMPOUND, cb_layout, cb_layout), +#endif }; static struct rpc_version nfs_cb_version4 = { diff --git a/fs/nfsd/nfs4layouts.c b/fs/nfsd/nfs4layouts.c index 8273270418b1..d926865df94f 100644 --- a/fs/nfsd/nfs4layouts.c +++ b/fs/nfsd/nfs4layouts.c @@ -1,8 +1,11 @@ /* * Copyright (c) 2014 Christoph Hellwig. */ +#include +#include #include #include +#include #include "pnfs.h" #include "netns.h" @@ -18,6 +21,9 @@ struct nfs4_layout { static struct kmem_cache *nfs4_layout_cache; static struct kmem_cache *nfs4_layout_stateid_cache; +static struct nfsd4_callback_ops nfsd4_cb_layout_ops; +static const struct lock_manager_operations nfsd4_layouts_lm_ops; + const struct nfsd4_layout_ops *nfsd4_layout_ops[LAYOUT_TYPE_MAX] = { }; @@ -127,9 +133,42 @@ nfsd4_free_layout_stateid(struct nfs4_stid *stid) list_del_init(&ls->ls_perfile); spin_unlock(&fp->fi_lock); + vfs_setlease(ls->ls_file, F_UNLCK, NULL, (void **)&ls); + fput(ls->ls_file); + + if (ls->ls_recalled) + atomic_dec(&ls->ls_stid.sc_file->fi_lo_recalls); + kmem_cache_free(nfs4_layout_stateid_cache, ls); } +static int +nfsd4_layout_setlease(struct nfs4_layout_stateid *ls) +{ + struct file_lock *fl; + int status; + + fl = locks_alloc_lock(); + if (!fl) + return -ENOMEM; + locks_init_lock(fl); + fl->fl_lmops = &nfsd4_layouts_lm_ops; + fl->fl_flags = FL_LAYOUT; + fl->fl_type = F_RDLCK; + fl->fl_end = OFFSET_MAX; + fl->fl_owner = ls; + fl->fl_pid = current->tgid; + fl->fl_file = ls->ls_file; + + status = vfs_setlease(fl->fl_file, fl->fl_type, &fl, NULL); + if (status) { + locks_free_lock(fl); + return status; + } + BUG_ON(fl != NULL); + return 0; +} + static struct nfs4_layout_stateid * nfsd4_alloc_layout_stateid(struct nfsd4_compound_state *cstate, struct nfs4_stid *parent, u32 layout_type) @@ -152,6 +191,20 @@ nfsd4_alloc_layout_stateid(struct nfsd4_compound_state *cstate, spin_lock_init(&ls->ls_lock); INIT_LIST_HEAD(&ls->ls_layouts); ls->ls_layout_type = layout_type; + nfsd4_init_cb(&ls->ls_recall, clp, &nfsd4_cb_layout_ops, + NFSPROC4_CLNT_CB_LAYOUT); + + if (parent->sc_type == NFS4_DELEG_STID) + ls->ls_file = get_file(fp->fi_deleg_file); + else + ls->ls_file = find_any_file(fp); + BUG_ON(!ls->ls_file); + + if (nfsd4_layout_setlease(ls)) { + put_nfs4_file(fp); + kmem_cache_free(nfs4_layout_stateid_cache, ls); + return NULL; + } spin_lock(&clp->cl_lock); stp->sc_type = NFS4_LAYOUT_STID; @@ -215,6 +268,27 @@ out: return status; } +static void +nfsd4_recall_file_layout(struct nfs4_layout_stateid *ls) +{ + spin_lock(&ls->ls_lock); + if (ls->ls_recalled) + goto out_unlock; + + ls->ls_recalled = true; + atomic_inc(&ls->ls_stid.sc_file->fi_lo_recalls); + if (list_empty(&ls->ls_layouts)) + goto out_unlock; + + atomic_inc(&ls->ls_stid.sc_count); + update_stateid(&ls->ls_stid.sc_stateid); + memcpy(&ls->ls_recall_sid, &ls->ls_stid.sc_stateid, sizeof(stateid_t)); + nfsd4_run_cb(&ls->ls_recall); + +out_unlock: + spin_unlock(&ls->ls_lock); +} + static inline u64 layout_end(struct nfsd4_layout_seg *seg) { @@ -258,18 +332,44 @@ layouts_try_merge(struct nfsd4_layout_seg *lo, struct nfsd4_layout_seg *new) return true; } +static __be32 +nfsd4_recall_conflict(struct nfs4_layout_stateid *ls) +{ + struct nfs4_file *fp = ls->ls_stid.sc_file; + struct nfs4_layout_stateid *l, *n; + __be32 nfserr = nfs_ok; + + assert_spin_locked(&fp->fi_lock); + + list_for_each_entry_safe(l, n, &fp->fi_lo_states, ls_perfile) { + if (l != ls) { + nfsd4_recall_file_layout(l); + nfserr = nfserr_recallconflict; + } + } + + return nfserr; +} + __be32 nfsd4_insert_layout(struct nfsd4_layoutget *lgp, struct nfs4_layout_stateid *ls) { struct nfsd4_layout_seg *seg = &lgp->lg_seg; + struct nfs4_file *fp = ls->ls_stid.sc_file; struct nfs4_layout *lp, *new = NULL; + __be32 nfserr; + spin_lock(&fp->fi_lock); + nfserr = nfsd4_recall_conflict(ls); + if (nfserr) + goto out; spin_lock(&ls->ls_lock); list_for_each_entry(lp, &ls->ls_layouts, lo_perstate) { if (layouts_try_merge(&lp->lo_seg, seg)) goto done; } spin_unlock(&ls->ls_lock); + spin_unlock(&fp->fi_lock); new = kmem_cache_alloc(nfs4_layout_cache, GFP_KERNEL); if (!new) @@ -277,6 +377,10 @@ nfsd4_insert_layout(struct nfsd4_layoutget *lgp, struct nfs4_layout_stateid *ls) memcpy(&new->lo_seg, seg, sizeof(lp->lo_seg)); new->lo_state = ls; + spin_lock(&fp->fi_lock); + nfserr = nfsd4_recall_conflict(ls); + if (nfserr) + goto out; spin_lock(&ls->ls_lock); list_for_each_entry(lp, &ls->ls_layouts, lo_perstate) { if (layouts_try_merge(&lp->lo_seg, seg)) @@ -290,9 +394,11 @@ done: update_stateid(&ls->ls_stid.sc_stateid); memcpy(&lgp->lg_sid, &ls->ls_stid.sc_stateid, sizeof(stateid_t)); spin_unlock(&ls->ls_lock); +out: + spin_unlock(&fp->fi_lock); if (new) kmem_cache_free(nfs4_layout_cache, new); - return nfs_ok; + return nfserr; } static void @@ -448,6 +554,112 @@ nfsd4_return_all_file_layouts(struct nfs4_client *clp, struct nfs4_file *fp) nfsd4_free_layouts(&reaplist); } +static void +nfsd4_cb_layout_fail(struct nfs4_layout_stateid *ls) +{ + struct nfs4_client *clp = ls->ls_stid.sc_client; + char addr_str[INET6_ADDRSTRLEN]; + static char *envp[] = { + "HOME=/", + "TERM=linux", + "PATH=/sbin:/usr/sbin:/bin:/usr/bin", + NULL + }; + char *argv[8]; + int error; + + rpc_ntop((struct sockaddr *)&clp->cl_addr, addr_str, sizeof(addr_str)); + + printk(KERN_WARNING + "nfsd: client %s failed to respond to layout recall. " + " Fencing..\n", addr_str); + + argv[0] = "/sbin/nfsd-recall-failed"; + argv[1] = addr_str; + argv[2] = ls->ls_file->f_path.mnt->mnt_sb->s_id; + argv[3] = NULL; + + error = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC); + if (error) { + printk(KERN_ERR "nfsd: fence failed for client %s: %d!\n", + addr_str, error); + } +} + +static int +nfsd4_cb_layout_done(struct nfsd4_callback *cb, struct rpc_task *task) +{ + struct nfs4_layout_stateid *ls = + container_of(cb, struct nfs4_layout_stateid, ls_recall); + LIST_HEAD(reaplist); + + switch (task->tk_status) { + case 0: + return 1; + case -NFS4ERR_NOMATCHING_LAYOUT: + task->tk_status = 0; + return 1; + case -NFS4ERR_DELAY: + /* Poll the client until it's done with the layout */ + /* FIXME: cap number of retries. + * The pnfs standard states that we need to only expire + * the client after at-least "lease time" .eg lease-time * 2 + * when failing to communicate a recall + */ + rpc_delay(task, HZ/100); /* 10 mili-seconds */ + return 0; + default: + /* + * Unknown error or non-responding client, we'll need to fence. + */ + nfsd4_cb_layout_fail(ls); + return -1; + } +} + +static void +nfsd4_cb_layout_release(struct nfsd4_callback *cb) +{ + struct nfs4_layout_stateid *ls = + container_of(cb, struct nfs4_layout_stateid, ls_recall); + LIST_HEAD(reaplist); + + nfsd4_return_all_layouts(ls, &reaplist); + nfsd4_free_layouts(&reaplist); + nfs4_put_stid(&ls->ls_stid); +} + +static struct nfsd4_callback_ops nfsd4_cb_layout_ops = { + .done = nfsd4_cb_layout_done, + .release = nfsd4_cb_layout_release, +}; + +static bool +nfsd4_layout_lm_break(struct file_lock *fl) +{ + /* + * We don't want the locks code to timeout the lease for us; + * we'll remove it ourself if a layout isn't returned + * in time: + */ + fl->fl_break_time = 0; + nfsd4_recall_file_layout(fl->fl_owner); + return false; +} + +static int +nfsd4_layout_lm_change(struct file_lock *onlist, int arg, + struct list_head *dispose) +{ + BUG_ON(!(arg & F_UNLCK)); + return lease_modify(onlist, arg, dispose); +} + +static const struct lock_manager_operations nfsd4_layouts_lm_ops = { + .lm_break = nfsd4_layout_lm_break, + .lm_change = nfsd4_layout_lm_change, +}; + int nfsd4_init_pnfs(void) { diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c index 2b91443497cc..fa14359eb956 100644 --- a/fs/nfsd/nfs4proc.c +++ b/fs/nfsd/nfs4proc.c @@ -1301,6 +1301,10 @@ nfsd4_layoutget(struct svc_rqst *rqstp, if (nfserr) goto out; + nfserr = nfserr_recallconflict; + if (atomic_read(&ls->ls_stid.sc_file->fi_lo_recalls)) + goto out_put_stid; + nfserr = ops->proc_layoutget(current_fh->fh_dentry->d_inode, current_fh, lgp); if (nfserr) diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index c89f79dc69e2..f6b2a09f793f 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -3065,6 +3065,7 @@ static void nfsd4_init_file(struct knfsd_fh *fh, unsigned int hashval, memset(fp->fi_access, 0, sizeof(fp->fi_access)); #ifdef CONFIG_NFSD_PNFS INIT_LIST_HEAD(&fp->fi_lo_states); + atomic_set(&fp->fi_lo_recalls, 0); #endif hlist_add_head_rcu(&fp->fi_hash, &file_hashtbl[hashval]); } diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h index 5f66b7fd0297..4f3bfeb11766 100644 --- a/fs/nfsd/state.h +++ b/fs/nfsd/state.h @@ -502,6 +502,7 @@ struct nfs4_file { bool fi_had_conflict; #ifdef CONFIG_NFSD_PNFS struct list_head fi_lo_states; + atomic_t fi_lo_recalls; #endif }; @@ -542,6 +543,10 @@ struct nfs4_layout_stateid { spinlock_t ls_lock; struct list_head ls_layouts; u32 ls_layout_type; + struct file *ls_file; + struct nfsd4_callback ls_recall; + stateid_t ls_recall_sid; + bool ls_recalled; }; static inline struct nfs4_layout_stateid *layoutstateid(struct nfs4_stid *s) @@ -556,6 +561,7 @@ static inline struct nfs4_layout_stateid *layoutstateid(struct nfs4_stid *s) enum nfsd4_cb_op { NFSPROC4_CLNT_CB_NULL = 0, NFSPROC4_CLNT_CB_RECALL, + NFSPROC4_CLNT_CB_LAYOUT, NFSPROC4_CLNT_CB_SEQUENCE, }; diff --git a/fs/nfsd/xdr4cb.h b/fs/nfsd/xdr4cb.h index c5c55dfb91a9..c47f6fdb111a 100644 --- a/fs/nfsd/xdr4cb.h +++ b/fs/nfsd/xdr4cb.h @@ -21,3 +21,10 @@ #define NFS4_dec_cb_recall_sz (cb_compound_dec_hdr_sz + \ cb_sequence_dec_sz + \ op_dec_sz) +#define NFS4_enc_cb_layout_sz (cb_compound_enc_hdr_sz + \ + cb_sequence_enc_sz + \ + 1 + 3 + \ + enc_nfs4_fh_sz + 4) +#define NFS4_dec_cb_layout_sz (cb_compound_dec_hdr_sz + \ + cb_sequence_dec_sz + \ + op_dec_sz) -- cgit v1.2.3 From 31ef83dc053835fc14741426e20c60dbbba8c13d Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sat, 16 Aug 2014 19:02:22 -0500 Subject: nfsd: add trace events For now just a few simple events to trace the layout stateid lifetime, but these already were enough to find several bugs in the Linux client layout stateid handling. Signed-off-by: Christoph Hellwig --- fs/nfsd/Makefile | 7 ++++++- fs/nfsd/nfs4layouts.c | 16 ++++++++++++++- fs/nfsd/nfs4proc.c | 6 +++++- fs/nfsd/trace.c | 5 +++++ fs/nfsd/trace.h | 54 +++++++++++++++++++++++++++++++++++++++++++++++++++ 5 files changed, 85 insertions(+), 3 deletions(-) create mode 100644 fs/nfsd/trace.c create mode 100644 fs/nfsd/trace.h (limited to 'fs') diff --git a/fs/nfsd/Makefile b/fs/nfsd/Makefile index 5806270a8567..6cba933880c5 100644 --- a/fs/nfsd/Makefile +++ b/fs/nfsd/Makefile @@ -2,9 +2,14 @@ # Makefile for the Linux nfs server # +ccflags-y += -I$(src) # needed for trace events + obj-$(CONFIG_NFSD) += nfsd.o -nfsd-y := nfssvc.o nfsctl.o nfsproc.o nfsfh.o vfs.o \ +# this one should be compiled first, as the tracing macros can easily blow up +nfsd-y += trace.o + +nfsd-y += nfssvc.o nfsctl.o nfsproc.o nfsfh.o vfs.o \ export.o auth.o lockd.o nfscache.o nfsxdr.o stats.o nfsd-$(CONFIG_NFSD_FAULT_INJECTION) += fault_inject.o nfsd-$(CONFIG_NFSD_V2_ACL) += nfs2acl.o diff --git a/fs/nfsd/nfs4layouts.c b/fs/nfsd/nfs4layouts.c index d926865df94f..60137c54b2f7 100644 --- a/fs/nfsd/nfs4layouts.c +++ b/fs/nfsd/nfs4layouts.c @@ -9,6 +9,7 @@ #include "pnfs.h" #include "netns.h" +#include "trace.h" #define NFSDDBG_FACILITY NFSDDBG_PNFS @@ -125,6 +126,8 @@ nfsd4_free_layout_stateid(struct nfs4_stid *stid) struct nfs4_client *clp = ls->ls_stid.sc_client; struct nfs4_file *fp = ls->ls_stid.sc_file; + trace_layoutstate_free(&ls->ls_stid.sc_stateid); + spin_lock(&clp->cl_lock); list_del_init(&ls->ls_perclnt); spin_unlock(&clp->cl_lock); @@ -215,6 +218,7 @@ nfsd4_alloc_layout_stateid(struct nfsd4_compound_state *cstate, list_add(&ls->ls_perfile, &fp->fi_lo_states); spin_unlock(&fp->fi_lock); + trace_layoutstate_alloc(&ls->ls_stid.sc_stateid); return ls; } @@ -280,6 +284,8 @@ nfsd4_recall_file_layout(struct nfs4_layout_stateid *ls) if (list_empty(&ls->ls_layouts)) goto out_unlock; + trace_layout_recall(&ls->ls_stid.sc_stateid); + atomic_inc(&ls->ls_stid.sc_count); update_stateid(&ls->ls_stid.sc_stateid); memcpy(&ls->ls_recall_sid, &ls->ls_stid.sc_stateid, sizeof(stateid_t)); @@ -454,8 +460,10 @@ nfsd4_return_file_layouts(struct svc_rqst *rqstp, nfserr = nfsd4_preprocess_layout_stateid(rqstp, cstate, &lrp->lr_sid, false, lrp->lr_layout_type, &ls); - if (nfserr) + if (nfserr) { + trace_layout_return_lookup_fail(&lrp->lr_sid); return nfserr; + } spin_lock(&ls->ls_lock); list_for_each_entry_safe(lp, n, &ls->ls_layouts, lo_perstate) { @@ -472,6 +480,7 @@ nfsd4_return_file_layouts(struct svc_rqst *rqstp, } lrp->lrs_present = 1; } else { + trace_layoutstate_unhash(&ls->ls_stid.sc_stateid); nfs4_unhash_stid(&ls->ls_stid); lrp->lrs_present = 0; } @@ -570,6 +579,8 @@ nfsd4_cb_layout_fail(struct nfs4_layout_stateid *ls) rpc_ntop((struct sockaddr *)&clp->cl_addr, addr_str, sizeof(addr_str)); + nfsd4_cb_layout_fail(ls); + printk(KERN_WARNING "nfsd: client %s failed to respond to layout recall. " " Fencing..\n", addr_str); @@ -597,6 +608,7 @@ nfsd4_cb_layout_done(struct nfsd4_callback *cb, struct rpc_task *task) case 0: return 1; case -NFS4ERR_NOMATCHING_LAYOUT: + trace_layout_recall_done(&ls->ls_stid.sc_stateid); task->tk_status = 0; return 1; case -NFS4ERR_DELAY: @@ -624,6 +636,8 @@ nfsd4_cb_layout_release(struct nfsd4_callback *cb) container_of(cb, struct nfs4_layout_stateid, ls_recall); LIST_HEAD(reaplist); + trace_layout_recall_release(&ls->ls_stid.sc_stateid); + nfsd4_return_all_layouts(ls, &reaplist); nfsd4_free_layouts(&reaplist); nfs4_put_stid(&ls->ls_stid); diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c index fa14359eb956..d30bea8d0277 100644 --- a/fs/nfsd/nfs4proc.c +++ b/fs/nfsd/nfs4proc.c @@ -44,6 +44,7 @@ #include "netns.h" #include "acl.h" #include "pnfs.h" +#include "trace.h" #ifdef CONFIG_NFSD_V4_SECURITY_LABEL #include @@ -1298,8 +1299,10 @@ nfsd4_layoutget(struct svc_rqst *rqstp, nfserr = nfsd4_preprocess_layout_stateid(rqstp, cstate, &lgp->lg_sid, true, lgp->lg_layout_type, &ls); - if (nfserr) + if (nfserr) { + trace_layout_get_lookup_fail(&lgp->lg_sid); goto out; + } nfserr = nfserr_recallconflict; if (atomic_read(&ls->ls_stid.sc_file->fi_lo_recalls)) @@ -1359,6 +1362,7 @@ nfsd4_layoutcommit(struct svc_rqst *rqstp, false, lcp->lc_layout_type, &ls); if (nfserr) { + trace_layout_commit_lookup_fail(&lcp->lc_sid); /* fixup error code as per RFC5661 */ if (nfserr == nfserr_bad_stateid) nfserr = nfserr_badlayout; diff --git a/fs/nfsd/trace.c b/fs/nfsd/trace.c new file mode 100644 index 000000000000..82f89070594c --- /dev/null +++ b/fs/nfsd/trace.c @@ -0,0 +1,5 @@ + +#include "state.h" + +#define CREATE_TRACE_POINTS +#include "trace.h" diff --git a/fs/nfsd/trace.h b/fs/nfsd/trace.h new file mode 100644 index 000000000000..c668520c344b --- /dev/null +++ b/fs/nfsd/trace.h @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2014 Christoph Hellwig. + */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM nfsd + +#if !defined(_NFSD_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) +#define _NFSD_TRACE_H + +#include + +DECLARE_EVENT_CLASS(nfsd_stateid_class, + TP_PROTO(stateid_t *stp), + TP_ARGS(stp), + TP_STRUCT__entry( + __field(u32, cl_boot) + __field(u32, cl_id) + __field(u32, si_id) + __field(u32, si_generation) + ), + TP_fast_assign( + __entry->cl_boot = stp->si_opaque.so_clid.cl_boot; + __entry->cl_id = stp->si_opaque.so_clid.cl_id; + __entry->si_id = stp->si_opaque.so_id; + __entry->si_generation = stp->si_generation; + ), + TP_printk("client %08x:%08x stateid %08x:%08x", + __entry->cl_boot, + __entry->cl_id, + __entry->si_id, + __entry->si_generation) +) + +#define DEFINE_STATEID_EVENT(name) \ +DEFINE_EVENT(nfsd_stateid_class, name, \ + TP_PROTO(stateid_t *stp), \ + TP_ARGS(stp)) +DEFINE_STATEID_EVENT(layoutstate_alloc); +DEFINE_STATEID_EVENT(layoutstate_unhash); +DEFINE_STATEID_EVENT(layoutstate_free); +DEFINE_STATEID_EVENT(layout_get_lookup_fail); +DEFINE_STATEID_EVENT(layout_commit_lookup_fail); +DEFINE_STATEID_EVENT(layout_return_lookup_fail); +DEFINE_STATEID_EVENT(layout_recall); +DEFINE_STATEID_EVENT(layout_recall_done); +DEFINE_STATEID_EVENT(layout_recall_fail); +DEFINE_STATEID_EVENT(layout_recall_release); + +#endif /* _NFSD_TRACE_H */ + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#define TRACE_INCLUDE_FILE trace +#include -- cgit v1.2.3 From 648695c74811f09a8ad80d7c3be72b8169589a64 Mon Sep 17 00:00:00 2001 From: Markus Elfring Date: Sun, 1 Feb 2015 17:00:24 +0100 Subject: jfs: Deletion of an unnecessary check before the function call "unload_nls" The unload_nls() function tests whether its argument is NULL and then returns immediately. Thus the test around the call is not needed. This issue was detected by using the Coccinelle software. Signed-off-by: Markus Elfring Signed-off-by: Dave Kleikamp --- fs/jfs/super.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/jfs/super.c b/fs/jfs/super.c index 16c3a9556634..5d30c56ae075 100644 --- a/fs/jfs/super.c +++ b/fs/jfs/super.c @@ -619,8 +619,7 @@ out_mount_failed: iput(sbi->direct_inode); sbi->direct_inode = NULL; out_unload: - if (sbi->nls_tab) - unload_nls(sbi->nls_tab); + unload_nls(sbi->nls_tab); out_kfree: kfree(sbi); return ret; -- cgit v1.2.3 From a937b9791ec2ee71d5e303d2c02c8c1ad8abff35 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Fri, 12 Dec 2014 17:39:12 +0100 Subject: btrfs: kill btrfs_inode_*time helpers They just opencode taking address of the timespec member. Signed-off-by: David Sterba Signed-off-by: Chris Mason --- fs/btrfs/ctree.h | 25 ------------------------- fs/btrfs/delayed-inode.c | 28 ++++++++++++---------------- fs/btrfs/inode.c | 28 ++++++++++++---------------- fs/btrfs/send.c | 9 +++------ fs/btrfs/tree-log.c | 12 ++++++------ 5 files changed, 33 insertions(+), 69 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 85c697d482c2..54a66fa6beb1 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2467,31 +2467,6 @@ BTRFS_SETGET_STACK_FUNCS(stack_inode_gid, struct btrfs_inode_item, gid, 32); BTRFS_SETGET_STACK_FUNCS(stack_inode_mode, struct btrfs_inode_item, mode, 32); BTRFS_SETGET_STACK_FUNCS(stack_inode_rdev, struct btrfs_inode_item, rdev, 64); BTRFS_SETGET_STACK_FUNCS(stack_inode_flags, struct btrfs_inode_item, flags, 64); - -static inline struct btrfs_timespec * -btrfs_inode_atime(struct btrfs_inode_item *inode_item) -{ - unsigned long ptr = (unsigned long)inode_item; - ptr += offsetof(struct btrfs_inode_item, atime); - return (struct btrfs_timespec *)ptr; -} - -static inline struct btrfs_timespec * -btrfs_inode_mtime(struct btrfs_inode_item *inode_item) -{ - unsigned long ptr = (unsigned long)inode_item; - ptr += offsetof(struct btrfs_inode_item, mtime); - return (struct btrfs_timespec *)ptr; -} - -static inline struct btrfs_timespec * -btrfs_inode_ctime(struct btrfs_inode_item *inode_item) -{ - unsigned long ptr = (unsigned long)inode_item; - ptr += offsetof(struct btrfs_inode_item, ctime); - return (struct btrfs_timespec *)ptr; -} - BTRFS_SETGET_FUNCS(timespec_sec, struct btrfs_timespec, sec, 64); BTRFS_SETGET_FUNCS(timespec_nsec, struct btrfs_timespec, nsec, 32); BTRFS_SETGET_STACK_FUNCS(stack_timespec_sec, struct btrfs_timespec, sec, 64); diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c index de4e70fb3cbb..116eb4bed8d3 100644 --- a/fs/btrfs/delayed-inode.c +++ b/fs/btrfs/delayed-inode.c @@ -1755,19 +1755,19 @@ static void fill_stack_inode_item(struct btrfs_trans_handle *trans, btrfs_set_stack_inode_flags(inode_item, BTRFS_I(inode)->flags); btrfs_set_stack_inode_block_group(inode_item, 0); - btrfs_set_stack_timespec_sec(btrfs_inode_atime(inode_item), + btrfs_set_stack_timespec_sec(&inode_item->atime, inode->i_atime.tv_sec); - btrfs_set_stack_timespec_nsec(btrfs_inode_atime(inode_item), + btrfs_set_stack_timespec_nsec(&inode_item->atime, inode->i_atime.tv_nsec); - btrfs_set_stack_timespec_sec(btrfs_inode_mtime(inode_item), + btrfs_set_stack_timespec_sec(&inode_item->mtime, inode->i_mtime.tv_sec); - btrfs_set_stack_timespec_nsec(btrfs_inode_mtime(inode_item), + btrfs_set_stack_timespec_nsec(&inode_item->mtime, inode->i_mtime.tv_nsec); - btrfs_set_stack_timespec_sec(btrfs_inode_ctime(inode_item), + btrfs_set_stack_timespec_sec(&inode_item->ctime, inode->i_ctime.tv_sec); - btrfs_set_stack_timespec_nsec(btrfs_inode_ctime(inode_item), + btrfs_set_stack_timespec_nsec(&inode_item->ctime, inode->i_ctime.tv_nsec); } @@ -1775,7 +1775,6 @@ int btrfs_fill_inode(struct inode *inode, u32 *rdev) { struct btrfs_delayed_node *delayed_node; struct btrfs_inode_item *inode_item; - struct btrfs_timespec *tspec; delayed_node = btrfs_get_delayed_node(inode); if (!delayed_node) @@ -1802,17 +1801,14 @@ int btrfs_fill_inode(struct inode *inode, u32 *rdev) *rdev = btrfs_stack_inode_rdev(inode_item); BTRFS_I(inode)->flags = btrfs_stack_inode_flags(inode_item); - tspec = btrfs_inode_atime(inode_item); - inode->i_atime.tv_sec = btrfs_stack_timespec_sec(tspec); - inode->i_atime.tv_nsec = btrfs_stack_timespec_nsec(tspec); + inode->i_atime.tv_sec = btrfs_stack_timespec_sec(&inode_item->atime); + inode->i_atime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->atime); - tspec = btrfs_inode_mtime(inode_item); - inode->i_mtime.tv_sec = btrfs_stack_timespec_sec(tspec); - inode->i_mtime.tv_nsec = btrfs_stack_timespec_nsec(tspec); + inode->i_mtime.tv_sec = btrfs_stack_timespec_sec(&inode_item->mtime); + inode->i_mtime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->mtime); - tspec = btrfs_inode_ctime(inode_item); - inode->i_ctime.tv_sec = btrfs_stack_timespec_sec(tspec); - inode->i_ctime.tv_nsec = btrfs_stack_timespec_nsec(tspec); + inode->i_ctime.tv_sec = btrfs_stack_timespec_sec(&inode_item->ctime); + inode->i_ctime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->ctime); inode->i_generation = BTRFS_I(inode)->generation; BTRFS_I(inode)->index_cnt = (u64)-1; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 2d14acbdae47..575164d802af 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -3490,7 +3490,6 @@ static void btrfs_read_locked_inode(struct inode *inode) struct btrfs_path *path; struct extent_buffer *leaf; struct btrfs_inode_item *inode_item; - struct btrfs_timespec *tspec; struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_key location; unsigned long ptr; @@ -3527,17 +3526,14 @@ static void btrfs_read_locked_inode(struct inode *inode) i_gid_write(inode, btrfs_inode_gid(leaf, inode_item)); btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item)); - tspec = btrfs_inode_atime(inode_item); - inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, tspec); - inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, tspec); + inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->atime); + inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->atime); - tspec = btrfs_inode_mtime(inode_item); - inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, tspec); - inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, tspec); + inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->mtime); + inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->mtime); - tspec = btrfs_inode_ctime(inode_item); - inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, tspec); - inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, tspec); + inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->ctime); + inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->ctime); inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item)); BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item); @@ -3658,19 +3654,19 @@ static void fill_inode_item(struct btrfs_trans_handle *trans, btrfs_set_token_inode_mode(leaf, item, inode->i_mode, &token); btrfs_set_token_inode_nlink(leaf, item, inode->i_nlink, &token); - btrfs_set_token_timespec_sec(leaf, btrfs_inode_atime(item), + btrfs_set_token_timespec_sec(leaf, &item->atime, inode->i_atime.tv_sec, &token); - btrfs_set_token_timespec_nsec(leaf, btrfs_inode_atime(item), + btrfs_set_token_timespec_nsec(leaf, &item->atime, inode->i_atime.tv_nsec, &token); - btrfs_set_token_timespec_sec(leaf, btrfs_inode_mtime(item), + btrfs_set_token_timespec_sec(leaf, &item->mtime, inode->i_mtime.tv_sec, &token); - btrfs_set_token_timespec_nsec(leaf, btrfs_inode_mtime(item), + btrfs_set_token_timespec_nsec(leaf, &item->mtime, inode->i_mtime.tv_nsec, &token); - btrfs_set_token_timespec_sec(leaf, btrfs_inode_ctime(item), + btrfs_set_token_timespec_sec(leaf, &item->ctime, inode->i_ctime.tv_sec, &token); - btrfs_set_token_timespec_nsec(leaf, btrfs_inode_ctime(item), + btrfs_set_token_timespec_nsec(leaf, &item->ctime, inode->i_ctime.tv_nsec, &token); btrfs_set_token_inode_nbytes(leaf, item, inode_get_bytes(inode), diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c index 804432dbc351..fe5857223515 100644 --- a/fs/btrfs/send.c +++ b/fs/btrfs/send.c @@ -2471,12 +2471,9 @@ verbose_printk("btrfs: send_utimes %llu\n", ino); if (ret < 0) goto out; TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); - TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_ATIME, eb, - btrfs_inode_atime(ii)); - TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_MTIME, eb, - btrfs_inode_mtime(ii)); - TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_CTIME, eb, - btrfs_inode_ctime(ii)); + TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_ATIME, eb, &ii->atime); + TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_MTIME, eb, &ii->mtime); + TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_CTIME, eb, &ii->ctime); /* TODO Add otime support when the otime patches get into upstream */ ret = send_cmd(sctx); diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index a26658756537..79f05bc9d6ec 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -3276,19 +3276,19 @@ static void fill_inode_item(struct btrfs_trans_handle *trans, btrfs_set_token_inode_mode(leaf, item, inode->i_mode, &token); btrfs_set_token_inode_nlink(leaf, item, inode->i_nlink, &token); - btrfs_set_token_timespec_sec(leaf, btrfs_inode_atime(item), + btrfs_set_token_timespec_sec(leaf, &item->atime, inode->i_atime.tv_sec, &token); - btrfs_set_token_timespec_nsec(leaf, btrfs_inode_atime(item), + btrfs_set_token_timespec_nsec(leaf, &item->atime, inode->i_atime.tv_nsec, &token); - btrfs_set_token_timespec_sec(leaf, btrfs_inode_mtime(item), + btrfs_set_token_timespec_sec(leaf, &item->mtime, inode->i_mtime.tv_sec, &token); - btrfs_set_token_timespec_nsec(leaf, btrfs_inode_mtime(item), + btrfs_set_token_timespec_nsec(leaf, &item->mtime, inode->i_mtime.tv_nsec, &token); - btrfs_set_token_timespec_sec(leaf, btrfs_inode_ctime(item), + btrfs_set_token_timespec_sec(leaf, &item->ctime, inode->i_ctime.tv_sec, &token); - btrfs_set_token_timespec_nsec(leaf, btrfs_inode_ctime(item), + btrfs_set_token_timespec_nsec(leaf, &item->ctime, inode->i_ctime.tv_nsec, &token); btrfs_set_token_inode_nbytes(leaf, item, inode_get_bytes(inode), -- cgit v1.2.3 From 9cc97d646216b6f2473fa4ab9f103514b86c6814 Mon Sep 17 00:00:00 2001 From: chandan r Date: Wed, 4 Jul 2012 12:48:07 +0530 Subject: Btrfs: Add code to support file creation time This patch adds a new member to the 'struct btrfs_inode' structure to hold the file creation time. Signed-off-by: chandan [refreshed, removed btrfs_inode_otime] Signed-off-by: David Sterba Signed-off-by: Chris Mason --- fs/btrfs/btrfs_inode.h | 3 +++ fs/btrfs/delayed-inode.c | 10 ++++++++++ fs/btrfs/inode.c | 25 +++++++++++++++++++++++-- 3 files changed, 36 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h index 4aadadcfab20..de5e4f2adfea 100644 --- a/fs/btrfs/btrfs_inode.h +++ b/fs/btrfs/btrfs_inode.h @@ -185,6 +185,9 @@ struct btrfs_inode { struct btrfs_delayed_node *delayed_node; + /* File creation time. */ + struct timespec i_otime; + struct inode vfs_inode; }; diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c index 116eb4bed8d3..82f0c7c95474 100644 --- a/fs/btrfs/delayed-inode.c +++ b/fs/btrfs/delayed-inode.c @@ -1769,6 +1769,11 @@ static void fill_stack_inode_item(struct btrfs_trans_handle *trans, inode->i_ctime.tv_sec); btrfs_set_stack_timespec_nsec(&inode_item->ctime, inode->i_ctime.tv_nsec); + + btrfs_set_stack_timespec_sec(&inode_item->otime, + BTRFS_I(inode)->i_otime.tv_sec); + btrfs_set_stack_timespec_nsec(&inode_item->otime, + BTRFS_I(inode)->i_otime.tv_nsec); } int btrfs_fill_inode(struct inode *inode, u32 *rdev) @@ -1810,6 +1815,11 @@ int btrfs_fill_inode(struct inode *inode, u32 *rdev) inode->i_ctime.tv_sec = btrfs_stack_timespec_sec(&inode_item->ctime); inode->i_ctime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->ctime); + BTRFS_I(inode)->i_otime.tv_sec = + btrfs_stack_timespec_sec(&inode_item->otime); + BTRFS_I(inode)->i_otime.tv_nsec = + btrfs_stack_timespec_nsec(&inode_item->otime); + inode->i_generation = BTRFS_I(inode)->generation; BTRFS_I(inode)->index_cnt = (u64)-1; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 575164d802af..b0292333fd84 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -3535,6 +3535,11 @@ static void btrfs_read_locked_inode(struct inode *inode) inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->ctime); inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->ctime); + BTRFS_I(inode)->i_otime.tv_sec = + btrfs_timespec_sec(leaf, &inode_item->otime); + BTRFS_I(inode)->i_otime.tv_nsec = + btrfs_timespec_nsec(leaf, &inode_item->otime); + inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item)); BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item); BTRFS_I(inode)->last_trans = btrfs_inode_transid(leaf, inode_item); @@ -3669,6 +3674,11 @@ static void fill_inode_item(struct btrfs_trans_handle *trans, btrfs_set_token_timespec_nsec(leaf, &item->ctime, inode->i_ctime.tv_nsec, &token); + btrfs_set_token_timespec_sec(leaf, &item->otime, + BTRFS_I(inode)->i_otime.tv_sec, &token); + btrfs_set_token_timespec_nsec(leaf, &item->otime, + BTRFS_I(inode)->i_otime.tv_nsec, &token); + btrfs_set_token_inode_nbytes(leaf, item, inode_get_bytes(inode), &token); btrfs_set_token_inode_generation(leaf, item, BTRFS_I(inode)->generation, @@ -5260,7 +5270,10 @@ static struct inode *new_simple_dir(struct super_block *s, inode->i_op = &btrfs_dir_ro_inode_operations; inode->i_fop = &simple_dir_operations; inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO; - inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; + inode->i_mtime = CURRENT_TIME; + inode->i_atime = inode->i_mtime; + inode->i_ctime = inode->i_mtime; + BTRFS_I(inode)->i_otime = inode->i_mtime; return inode; } @@ -5828,7 +5841,12 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans, inode_init_owner(inode, dir, mode); inode_set_bytes(inode, 0); - inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; + + inode->i_mtime = CURRENT_TIME; + inode->i_atime = inode->i_mtime; + inode->i_ctime = inode->i_mtime; + BTRFS_I(inode)->i_otime = inode->i_mtime; + inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0], struct btrfs_inode_item); memset_extent_buffer(path->nodes[0], 0, (unsigned long)inode_item, @@ -8577,6 +8595,9 @@ struct inode *btrfs_alloc_inode(struct super_block *sb) ei->delayed_node = NULL; + ei->i_otime.tv_sec = 0; + ei->i_otime.tv_nsec = 0; + inode = &ei->vfs_inode; extent_map_tree_init(&ei->extent_tree); extent_io_tree_init(&ei->io_tree, &inode->i_data); -- cgit v1.2.3 From 75d6ad382bb91f363452119d34238e156589ca2d Mon Sep 17 00:00:00 2001 From: David Sterba Date: Fri, 31 Oct 2014 17:18:08 +0100 Subject: btrfs: more superblock checks, lower bounds on devices and sectorsize/nodesize I received a few crafted images from Jiri, all got through the recently added superblock checks. The lower bounds checks for num_devices and sector/node -sizes were missing and caused a crash during mount. Tools for symbolic code execution were used to prepare the images contents. Reported-by: Jiri Slaby Signed-off-by: David Sterba Signed-off-by: Chris Mason --- fs/btrfs/disk-io.c | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) (limited to 'fs') diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 612d46eece62..11171362bd33 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -3871,6 +3871,21 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info, printk(KERN_WARNING "BTRFS: log_root block unaligned: %llu\n", btrfs_super_log_root(sb)); + /* + * Check the lower bound, the alignment and other constraints are + * checked later. + */ + if (btrfs_super_nodesize(sb) < 4096) { + printk(KERN_ERR "BTRFS: nodesize too small: %u < 4096\n", + btrfs_super_nodesize(sb)); + ret = -EINVAL; + } + if (btrfs_super_sectorsize(sb) < 4096) { + printk(KERN_ERR "BTRFS: sectorsize too small: %u < 4096\n", + btrfs_super_sectorsize(sb)); + ret = -EINVAL; + } + if (memcmp(fs_info->fsid, sb->dev_item.fsid, BTRFS_UUID_SIZE) != 0) { printk(KERN_ERR "BTRFS: dev_item UUID does not match fsid: %pU != %pU\n", fs_info->fsid, sb->dev_item.fsid); @@ -3884,6 +3899,10 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info, if (btrfs_super_num_devices(sb) > (1UL << 31)) printk(KERN_WARNING "BTRFS: suspicious number of devices: %llu\n", btrfs_super_num_devices(sb)); + if (btrfs_super_num_devices(sb) == 0) { + printk(KERN_ERR "BTRFS: number of devices is 0\n"); + ret = -EINVAL; + } if (btrfs_super_bytenr(sb) != BTRFS_SUPER_INFO_OFFSET) { printk(KERN_ERR "BTRFS: super offset mismatch %llu != %u\n", -- cgit v1.2.3 From ce7fca5f57ed0fcd7e7b3d7b1a3e1791f8e56fa3 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Fri, 31 Oct 2014 18:42:05 +0100 Subject: btrfs: add checks for sys_chunk_array sizes Verify that possible minimum and maximum size is set, validity of contents is checked in btrfs_read_sys_array. Signed-off-by: David Sterba Signed-off-by: Chris Mason --- fs/btrfs/disk-io.c | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) (limited to 'fs') diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 11171362bd33..263d1471d01a 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -3910,6 +3910,25 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info, ret = -EINVAL; } + /* + * Obvious sys_chunk_array corruptions, it must hold at least one key + * and one chunk + */ + if (btrfs_super_sys_array_size(sb) > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) { + printk(KERN_ERR "BTRFS: system chunk array too big %u > %u\n", + btrfs_super_sys_array_size(sb), + BTRFS_SYSTEM_CHUNK_ARRAY_SIZE); + ret = -EINVAL; + } + if (btrfs_super_sys_array_size(sb) < sizeof(struct btrfs_disk_key) + + sizeof(struct btrfs_chunk)) { + printk(KERN_ERR "BTRFS: system chunk array too small %u < %lu\n", + btrfs_super_sys_array_size(sb), + sizeof(struct btrfs_disk_key) + + sizeof(struct btrfs_chunk)); + ret = -EINVAL; + } + /* * The generation is a global counter, we'll trust it more than the others * but it's still possible that it's the one that's wrong. -- cgit v1.2.3 From 1ffb22cf8c322bbfea6b35fe23d025841b49fede Mon Sep 17 00:00:00 2001 From: David Sterba Date: Fri, 31 Oct 2014 19:02:42 +0100 Subject: btrfs: cleanup, rename a few variables in btrfs_read_sys_array There's a pointer to buffer, integer offset and offset passed as pointer, try to find matching names for them. Signed-off-by: David Sterba Signed-off-by: Chris Mason --- fs/btrfs/volumes.c | 31 ++++++++++++++++--------------- 1 file changed, 16 insertions(+), 15 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index da7e0e1107d2..b33c83be2a97 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -6252,13 +6252,13 @@ int btrfs_read_sys_array(struct btrfs_root *root) struct extent_buffer *sb; struct btrfs_disk_key *disk_key; struct btrfs_chunk *chunk; - u8 *ptr; - unsigned long sb_ptr; + u8 *array_ptr; + unsigned long sb_array_offset; int ret = 0; u32 num_stripes; u32 array_size; u32 len = 0; - u32 cur; + u32 cur_offset; struct btrfs_key key; ASSERT(BTRFS_SUPER_INFO_SIZE <= root->nodesize); @@ -6290,20 +6290,21 @@ int btrfs_read_sys_array(struct btrfs_root *root) write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE); array_size = btrfs_super_sys_array_size(super_copy); - ptr = super_copy->sys_chunk_array; - sb_ptr = offsetof(struct btrfs_super_block, sys_chunk_array); - cur = 0; + array_ptr = super_copy->sys_chunk_array; + sb_array_offset = offsetof(struct btrfs_super_block, sys_chunk_array); + cur_offset = 0; - while (cur < array_size) { - disk_key = (struct btrfs_disk_key *)ptr; + while (cur_offset < array_size) { + disk_key = (struct btrfs_disk_key *)array_ptr; btrfs_disk_key_to_cpu(&key, disk_key); - len = sizeof(*disk_key); ptr += len; - sb_ptr += len; - cur += len; + len = sizeof(*disk_key); + array_ptr += len; + sb_array_offset += len; + cur_offset += len; if (key.type == BTRFS_CHUNK_ITEM_KEY) { - chunk = (struct btrfs_chunk *)sb_ptr; + chunk = (struct btrfs_chunk *)sb_array_offset; ret = read_one_chunk(root, &key, sb, chunk); if (ret) break; @@ -6313,9 +6314,9 @@ int btrfs_read_sys_array(struct btrfs_root *root) ret = -EIO; break; } - ptr += len; - sb_ptr += len; - cur += len; + array_ptr += len; + sb_array_offset += len; + cur_offset += len; } free_extent_buffer(sb); return ret; -- cgit v1.2.3 From e3540eab29e1b2260bc4b9b3979a49a00e3e3af8 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Wed, 5 Nov 2014 15:24:51 +0100 Subject: btrfs: add more checks to btrfs_read_sys_array Verify that the sys_array has enough bytes to read the next item. Signed-off-by: David Sterba Signed-off-by: Chris Mason --- fs/btrfs/volumes.c | 26 +++++++++++++++++++++++--- 1 file changed, 23 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index b33c83be2a97..2c4cab2dbd1a 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -6296,20 +6296,34 @@ int btrfs_read_sys_array(struct btrfs_root *root) while (cur_offset < array_size) { disk_key = (struct btrfs_disk_key *)array_ptr; + len = sizeof(*disk_key); + if (cur_offset + len > array_size) + goto out_short_read; + btrfs_disk_key_to_cpu(&key, disk_key); - len = sizeof(*disk_key); array_ptr += len; sb_array_offset += len; cur_offset += len; if (key.type == BTRFS_CHUNK_ITEM_KEY) { chunk = (struct btrfs_chunk *)sb_array_offset; + /* + * At least one btrfs_chunk with one stripe must be + * present, exact stripe count check comes afterwards + */ + len = btrfs_chunk_item_size(1); + if (cur_offset + len > array_size) + goto out_short_read; + + num_stripes = btrfs_chunk_num_stripes(sb, chunk); + len = btrfs_chunk_item_size(num_stripes); + if (cur_offset + len > array_size) + goto out_short_read; + ret = read_one_chunk(root, &key, sb, chunk); if (ret) break; - num_stripes = btrfs_chunk_num_stripes(sb, chunk); - len = btrfs_chunk_item_size(num_stripes); } else { ret = -EIO; break; @@ -6320,6 +6334,12 @@ int btrfs_read_sys_array(struct btrfs_root *root) } free_extent_buffer(sb); return ret; + +out_short_read: + printk(KERN_ERR "BTRFS: sys_array too short to read %u bytes at offset %u\n", + len, cur_offset); + free_extent_buffer(sb); + return -EIO; } int btrfs_read_chunk_tree(struct btrfs_root *root) -- cgit v1.2.3 From d4b450cd4b33ce7c572e7fdccf33b59c4cdf361c Mon Sep 17 00:00:00 2001 From: Filipe Manana Date: Thu, 29 Jan 2015 19:18:25 +0000 Subject: Btrfs: fix race between transaction commit and empty block group removal Committing a transaction can race with automatic removal of empty block groups (cleaner kthread), leading to a BUG_ON() in the transaction commit code while running btrfs_finish_extent_commit(). The following sequence diagram shows how it can happen: CPU 1 CPU 2 btrfs_commit_transaction() fs_info->running_transaction = NULL btrfs_finish_extent_commit() find_first_extent_bit() -> found range for block group X in fs_info->freed_extents[] btrfs_delete_unused_bgs() -> found block group X Removed block group X's range from fs_info->freed_extents[] btrfs_remove_chunk() btrfs_remove_block_group(bg X) unpin_extent_range(bg X range) btrfs_lookup_block_group(bg X) -> returns NULL -> BUG_ON() The trace that results from the BUG_ON() is: [48665.187808] ------------[ cut here ]------------ [48665.188032] kernel BUG at fs/btrfs/extent-tree.c:5675! [48665.188032] invalid opcode: 0000 [#1] SMP DEBUG_PAGEALLOC [48665.188032] Modules linked in: dm_flakey dm_mod crc32c_generic btrfs xor raid6_pq nfsd auth_rpcgss oid_registry nfs_acl nfs lockd grace fscache sunrpc loop parport_pc evdev microcode [48665.197388] CPU: 2 PID: 31211 Comm: kworker/u32:16 Tainted: G W 3.19.0-rc5-btrfs-next-4+ #1 [48665.197388] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.7.5-0-ge51488c-20140602_164612-nilsson.home.kraxel.org 04/01/2014 [48665.197388] Workqueue: events_unbound btrfs_async_reclaim_metadata_space [btrfs] [48665.197388] task: ffff880222011810 ti: ffff8801b56a4000 task.ti: ffff8801b56a4000 [48665.197388] RIP: 0010:[] [] unpin_extent_range+0x6a/0x1ba [btrfs] [48665.197388] RSP: 0018:ffff8801b56a7b88 EFLAGS: 00010246 [48665.197388] RAX: 0000000000000000 RBX: ffff8802143a6000 RCX: ffff8802220120c8 [48665.197388] RDX: 0000000000000001 RSI: 0000000000000001 RDI: ffff8800a3c140b0 [48665.197388] RBP: ffff8801b56a7bd8 R08: 0000000000000003 R09: 0000000000000000 [48665.197388] R10: 0000000000000000 R11: 000000000000bbac R12: 0000000012e8e000 [48665.197388] R13: ffff8800a3c14000 R14: 0000000000000000 R15: 0000000000000000 [48665.197388] FS: 0000000000000000(0000) GS:ffff88023ec40000(0000) knlGS:0000000000000000 [48665.197388] CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b [48665.197388] CR2: 00007f065e42f270 CR3: 0000000206f70000 CR4: 00000000000006e0 [48665.197388] Stack: [48665.197388] ffff8801b56a7bd8 0000000012ea0000 01ff8800a3c14138 0000000012e9ffff [48665.197388] ffff880141df3dd8 ffff8802143a6000 ffff8800a3c14138 ffff880141df3df0 [48665.197388] ffff880141df3dd8 0000000000000000 ffff8801b56a7c08 ffffffffa0354227 [48665.197388] Call Trace: [48665.197388] [] btrfs_finish_extent_commit+0xb0/0xd9 [btrfs] [48665.197388] [] btrfs_commit_transaction+0x791/0x92c [btrfs] [48665.197388] [] flush_space+0x43d/0x452 [btrfs] [48665.197388] [] ? _raw_spin_unlock+0x28/0x33 [48665.197388] [] btrfs_async_reclaim_metadata_space+0x118/0x164 [btrfs] [48665.197388] [] ? process_one_work+0x14b/0x3ab [48665.197388] [] process_one_work+0x1e0/0x3ab [48665.197388] [] ? trace_hardirqs_off+0xd/0xf [48665.197388] [] worker_thread+0x210/0x2d0 [48665.197388] [] ? rescuer_thread+0x2c3/0x2c3 [48665.197388] [] kthread+0xef/0xf7 [48665.197388] [] ? _raw_spin_unlock_irq+0x2d/0x39 [48665.197388] [] ? __kthread_parkme+0xad/0xad [48665.197388] [] ret_from_fork+0x7c/0xb0 [48665.197388] [] ? __kthread_parkme+0xad/0xad [48665.197388] Code: 85 f6 74 14 49 8b 06 49 03 46 09 49 39 c4 72 1d 4c 89 f7 e8 83 ec ff ff 4c 89 e6 4c 89 ef e8 1e f1 ff ff 48 85 c0 49 89 c6 75 02 <0f> 0b 49 8b 1e 49 03 5e 09 48 8b [48665.197388] RIP [] unpin_extent_range+0x6a/0x1ba [btrfs] [48665.197388] RSP [48665.272246] ---[ end trace b9c6ab9957521376 ]--- Fix this by ensuring that unpining the block group's range in btrfs_finish_extent_commit() is done in a synchronized fashion with removing the block group's range from freed_extents[] in btrfs_delete_unused_bgs() This race got introduced with the change: Btrfs: remove empty block groups automatically commit 47ab2a6c689913db23ccae38349714edf8365e0a Signed-off-by: Filipe Manana Signed-off-by: Chris Mason --- fs/btrfs/ctree.h | 1 + fs/btrfs/disk-io.c | 1 + fs/btrfs/extent-tree.c | 21 ++++++++++++++++++++- 3 files changed, 22 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 54a66fa6beb1..d3562dd43c66 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -1744,6 +1744,7 @@ struct btrfs_fs_info { spinlock_t unused_bgs_lock; struct list_head unused_bgs; + struct mutex unused_bg_unpin_mutex; /* For btrfs to record security options */ struct security_mnt_opts security_opts; diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 263d1471d01a..41b320e235d7 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -2242,6 +2242,7 @@ int open_ctree(struct super_block *sb, spin_lock_init(&fs_info->qgroup_op_lock); spin_lock_init(&fs_info->buffer_lock); spin_lock_init(&fs_info->unused_bgs_lock); + mutex_init(&fs_info->unused_bg_unpin_mutex); rwlock_init(&fs_info->tree_mod_log_lock); mutex_init(&fs_info->reloc_mutex); mutex_init(&fs_info->delalloc_root_mutex); diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 53294da0749d..857a859948a3 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -5735,10 +5735,13 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans, unpin = &fs_info->freed_extents[0]; while (1) { + mutex_lock(&fs_info->unused_bg_unpin_mutex); ret = find_first_extent_bit(unpin, 0, &start, &end, EXTENT_DIRTY, NULL); - if (ret) + if (ret) { + mutex_unlock(&fs_info->unused_bg_unpin_mutex); break; + } if (btrfs_test_opt(root, DISCARD)) ret = btrfs_discard_extent(root, start, @@ -5746,6 +5749,7 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans, clear_extent_dirty(unpin, start, end, GFP_NOFS); unpin_extent_range(root, start, end, true); + mutex_unlock(&fs_info->unused_bg_unpin_mutex); cond_resched(); } @@ -9561,18 +9565,33 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info) */ start = block_group->key.objectid; end = start + block_group->key.offset - 1; + /* + * Hold the unused_bg_unpin_mutex lock to avoid racing with + * btrfs_finish_extent_commit(). If we are at transaction N, + * another task might be running finish_extent_commit() for the + * previous transaction N - 1, and have seen a range belonging + * to the block group in freed_extents[] before we were able to + * clear the whole block group range from freed_extents[]. This + * means that task can lookup for the block group after we + * unpinned it from freed_extents[] and removed it, leading to + * a BUG_ON() at btrfs_unpin_extent_range(). + */ + mutex_lock(&fs_info->unused_bg_unpin_mutex); ret = clear_extent_bits(&fs_info->freed_extents[0], start, end, EXTENT_DIRTY, GFP_NOFS); if (ret) { + mutex_unlock(&fs_info->unused_bg_unpin_mutex); btrfs_set_block_group_rw(root, block_group); goto end_trans; } ret = clear_extent_bits(&fs_info->freed_extents[1], start, end, EXTENT_DIRTY, GFP_NOFS); if (ret) { + mutex_unlock(&fs_info->unused_bg_unpin_mutex); btrfs_set_block_group_rw(root, block_group); goto end_trans; } + mutex_unlock(&fs_info->unused_bg_unpin_mutex); /* Reset pinned so btrfs_put_block_group doesn't complain */ block_group->pinned = 0; -- cgit v1.2.3 From 001a648df40ce6e7906773587f5fff48f61d0d73 Mon Sep 17 00:00:00 2001 From: Filipe Manana Date: Fri, 23 Jan 2015 18:27:00 +0000 Subject: Btrfs: add missing cleanup on sysfs init failure If we failed during initialization of sysfs, we weren't unregistering the top level btrfs sysfs entry nor the debugfs stuff. Not unregistering the top level sysfs entry makes future attempts to reload the btrfs module impossible and the following is reported in dmesg: [ 2246.451296] WARNING: CPU: 3 PID: 10999 at fs/sysfs/dir.c:486 sysfs_warn_dup+0x91/0xb0() [ 2246.451298] sysfs: cannot create duplicate filename '/fs/btrfs' [ 2246.451298] Modules linked in: btrfs(+) raid6_pq xor bnep rfcomm bluetooth binfmt_misc nfsd auth_rpcgss oid_registry nfs_acl nfs lockd fscache sunrpc parport_pc parport psmouse serio_raw pcspkr evbug i2c_piix4 e1000 floppy [last unloaded: btrfs] [ 2246.451310] CPU: 3 PID: 10999 Comm: modprobe Tainted: G W 3.13.0-fdm-btrfs-next-24+ #7 [ 2246.451311] Hardware name: Bochs Bochs, BIOS Bochs 01/01/2011 [ 2246.451312] 0000000000000009 ffff8800d353fa08 ffffffff816f1da6 0000000000000410 [ 2246.451314] ffff8800d353fa58 ffff8800d353fa48 ffffffff8104a32c ffff88020821a290 [ 2246.451316] ffff88020821a290 ffff88020821a290 ffff8802148f0000 ffff8800d353fb80 [ 2246.451318] Call Trace: [ 2246.451322] [] dump_stack+0x4e/0x68 [ 2246.451324] [] warn_slowpath_common+0x8c/0xc0 [ 2246.451325] [] warn_slowpath_fmt+0x46/0x50 [ 2246.451328] [] ? strlcat+0x65/0x90 (....) This fixes the following change: btrfs: add simple debugfs interface commit 1bae30982bc86ab66d61ccb6e22792593b45d44d Signed-off-by: Filipe Manana Signed-off-by: Chris Mason --- fs/btrfs/sysfs.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c index 92db3f648df4..94edb0a2a026 100644 --- a/fs/btrfs/sysfs.c +++ b/fs/btrfs/sysfs.c @@ -733,10 +733,18 @@ int btrfs_init_sysfs(void) ret = btrfs_init_debugfs(); if (ret) - return ret; + goto out1; init_feature_attrs(); ret = sysfs_create_group(&btrfs_kset->kobj, &btrfs_feature_attr_group); + if (ret) + goto out2; + + return 0; +out2: + debugfs_remove_recursive(btrfs_debugfs_root_dentry); +out1: + kset_unregister(btrfs_kset); return ret; } -- cgit v1.2.3 From de554a4fa61d77df2704be5b6b47472b2dbd1875 Mon Sep 17 00:00:00 2001 From: Filipe Manana Date: Tue, 27 Jan 2015 23:06:42 +0000 Subject: Btrfs: fix scrub race leading to use-after-free While running a scrub on a kernel with CONFIG_DEBUG_PAGEALLOC=y, I got the following trace: [68127.807663] BUG: unable to handle kernel paging request at ffff8803f8947a50 [68127.807663] IP: [] do_raw_spin_lock+0x94/0x122 [68127.807663] PGD 3003067 PUD 43e1f5067 PMD 43e030067 PTE 80000003f8947060 [68127.807663] Oops: 0000 [#1] SMP DEBUG_PAGEALLOC [68127.807663] Modules linked in: dm_flakey dm_mod crc32c_generic btrfs xor raid6_pq nfsd auth_rpcgss oid_registry nfs_acl nfs lockd grace fscache sunrpc loop parport_pc processor parpo [68127.807663] CPU: 2 PID: 3081 Comm: kworker/u8:5 Not tainted 3.18.0-rc6-btrfs-next-3+ #4 [68127.807663] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.7.5-0-ge51488c-20140602_164612-nilsson.home.kraxel.org 04/01/2014 [68127.807663] Workqueue: btrfs-btrfs-scrub btrfs_scrub_helper [btrfs] [68127.807663] task: ffff880101fc5250 ti: ffff8803f097c000 task.ti: ffff8803f097c000 [68127.807663] RIP: 0010:[] [] do_raw_spin_lock+0x94/0x122 [68127.807663] RSP: 0018:ffff8803f097fbb8 EFLAGS: 00010093 [68127.807663] RAX: 0000000028dd386c RBX: ffff8803f8947a50 RCX: 0000000028dd3854 [68127.807663] RDX: 0000000000000018 RSI: 0000000000000002 RDI: 0000000000000001 [68127.807663] RBP: ffff8803f097fbd8 R08: 0000000000000004 R09: 0000000000000001 [68127.807663] R10: ffff880102620980 R11: ffff8801f3e8c900 R12: 000000000001d390 [68127.807663] R13: 00000000cabd13c8 R14: ffff8803f8947800 R15: ffff88037c574f00 [68127.807663] FS: 0000000000000000(0000) GS:ffff88043dd00000(0000) knlGS:0000000000000000 [68127.807663] CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b [68127.807663] CR2: ffff8803f8947a50 CR3: 00000000b6481000 CR4: 00000000000006e0 [68127.807663] Stack: [68127.807663] ffffffff823942a8 ffff8803f8947a50 ffff8802a3416f80 0000000000000000 [68127.807663] ffff8803f097fc18 ffffffff8141e7c0 ffffffff81072948 000000000034f314 [68127.807663] ffff8803f097fc08 0000000000000292 ffff8803f097fc48 ffff8803f8947a50 [68127.807663] Call Trace: [68127.807663] [] _raw_spin_lock_irqsave+0x4b/0x55 [68127.807663] [] ? __wake_up+0x22/0x4b [68127.807663] [] __wake_up+0x22/0x4b [68127.807663] [] scrub_pending_bio_dec+0x32/0x36 [btrfs] [68127.807663] [] scrub_bio_end_io_worker+0x5a3/0x5c9 [btrfs] [68127.807663] [] ? time_hardirqs_off+0x15/0x28 [68127.807663] [] ? trace_hardirqs_off_caller+0x4c/0xb9 [68127.807663] [] normal_work_helper+0xf1/0x238 [btrfs] [68127.807663] [] btrfs_scrub_helper+0x12/0x14 [btrfs] [68127.807663] [] process_one_work+0x1e4/0x3b6 [68127.807663] [] ? trace_hardirqs_off+0xd/0xf [68127.807663] [] worker_thread+0x1fb/0x2a8 [68127.807663] [] ? rescuer_thread+0x219/0x219 [68127.807663] [] kthread+0xdb/0xe3 [68127.807663] [] ? __kthread_parkme+0x67/0x67 [68127.807663] [] ret_from_fork+0x7c/0xb0 [68127.807663] [] ? __kthread_parkme+0x67/0x67 [68127.807663] Code: 39 c2 75 14 8d 8a 00 00 01 00 89 d0 f0 0f b1 0b 39 d0 0f 84 81 00 00 00 4c 69 2d 27 86 99 00 fa 00 00 00 45 31 e4 4d 39 ec 74 2b <8b> 13 89 d0 c1 e8 10 66 39 c2 75 [68127.807663] RIP [] do_raw_spin_lock+0x94/0x122 [68127.807663] RSP [68127.807663] CR2: ffff8803f8947a50 [68127.807663] ---[ end trace d7045aac00a66cd8 ]--- This is due to a race that can happen in a very tiny time window and is illustrated by the following sequence diagram: CPU 1 CPU 2 btrfs_scrub_dev() scrub_bio_end_io_worker() scrub_pending_bio_dec() atomic_dec(&sctx->bios_in_flight) wait sctx->bios_in_flight == 0 wait sctx->workers_pending == 0 mutex_lock(&fs_info->scrub_lock) (...) mutex_lock(&fs_info->scrub_lock) scrub_free_ctx(sctx) kfree(sctx) wake_up(&sctx->list_wait) __wake_up() spin_lock_irqsave(&sctx->list_wait->lock, flags) Another variation of this scenario that results in the same use-after-free issue is: CPU 1 CPU 2 btrfs_scrub_dev() wait sctx->bios_in_flight == 0 scrub_bio_end_io_worker() scrub_pending_bio_dec() __wake_up(&sctx->list_wait) spin_lock_irqsave(&sctx->list_wait->lock, flags) default_wake_function() wake up task at CPU 2 wait sctx->workers_pending == 0 mutex_lock(&fs_info->scrub_lock) (...) mutex_lock(&fs_info->scrub_lock) scrub_free_ctx(sctx) kfree(sctx) spin_unlock_irqrestore(&sctx->list_wait->lock, flags) Fix this by holding the scrub lock while doing the wakeup. This isn't a recent regression, the issue as been around since the scrub feature was added (2011, commit a2de733c78fa7af51ba9670482fa7d392aa67c57). Signed-off-by: Filipe Manana Signed-off-by: Chris Mason --- fs/btrfs/scrub.c | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 8af7372238fc..6d6e155c8c8b 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -306,8 +306,17 @@ static void scrub_pending_bio_inc(struct scrub_ctx *sctx) static void scrub_pending_bio_dec(struct scrub_ctx *sctx) { + struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info; + + /* + * Hold the scrub_lock while doing the wakeup to ensure the + * sctx (and its wait queue list_wait) isn't destroyed/freed + * during the wakeup. + */ + mutex_lock(&fs_info->scrub_lock); atomic_dec(&sctx->bios_in_flight); wake_up(&sctx->list_wait); + mutex_unlock(&fs_info->scrub_lock); } static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info) @@ -379,10 +388,15 @@ static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx) mutex_lock(&fs_info->scrub_lock); atomic_dec(&fs_info->scrubs_running); atomic_dec(&fs_info->scrubs_paused); - mutex_unlock(&fs_info->scrub_lock); atomic_dec(&sctx->workers_pending); wake_up(&fs_info->scrub_pause_wait); + /* + * Hold the scrub_lock while doing the wakeup to ensure the + * sctx (and its wait queue list_wait) isn't destroyed/freed + * during the wakeup. + */ wake_up(&sctx->list_wait); + mutex_unlock(&fs_info->scrub_lock); } static void scrub_free_csums(struct scrub_ctx *sctx) -- cgit v1.2.3 From 289454ad26a2d752e04b07234a175feda9ec0f4e Mon Sep 17 00:00:00 2001 From: Naohiro Aota Date: Tue, 6 Jan 2015 01:01:03 +0900 Subject: btrfs: clear bio reference after submit_one_bio() After submit_one_bio(), `bio' can go away. However submit_extent_page() leave `bio' referable if submit_one_bio() failed (e.g. -ENOMEM on OOM). It will cause invalid paging request when submit_extent_page() is called next time. I reproduced ENOMEM case with the following script (need CONFIG_FAIL_PAGE_ALLOC, and CONFIG_FAULT_INJECTION_DEBUG_FS). #!/bin/bash dmesgout=dmesg.txt start=100000 end=300000 step=1000 # btrfs options device=/dev/vdb1 directory=/mnt/btrfs # fault-injection options percent=100 times=3 mkdir -p $directory || exit 1 mount -o compress $device $directory || exit 1 rm -f $directory/file || exit 1 dd if=/dev/zero of=$directory/file bs=1M count=512 || exit 1 for interval in `seq $start $step $end`; do dmesg -C echo 1 > /proc/sys/vm/drop_caches sync export FAILCMD_TYPE=fail_page_alloc ./failcmd.sh -p $percent -t $times -i $interval \ --ignore-gfp-highmem=N --ignore-gfp-wait=N --min-order=0 \ -- \ cat $directory/file > /dev/null dmesg > ${dmesgout} if grep -q BUG: ${dmesgout}; then cat ${dmesgout} exit 1 fi done umount $directory exit 0 Signed-off-by: Naohiro Aota Tested-by: Satoru Takeuchi Signed-off-by: Chris Mason --- fs/btrfs/extent_io.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index dab8af4450e1..a7f66009519a 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -2817,8 +2817,10 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree, bio_add_page(bio, page, page_size, offset) < page_size) { ret = submit_one_bio(rw, bio, mirror_num, prev_bio_flags); - if (ret < 0) + if (ret < 0) { + *bio_ret = NULL; return ret; + } bio = NULL; } else { return 0; -- cgit v1.2.3 From 2f0810880f082fa8ba66ab2c33b02e4ff9770a5e Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Fri, 9 Jan 2015 10:40:15 -0800 Subject: btrfs: delete chunk allocation attemp when setting block group ro Below test will fail currently: mkfs.ext4 -F /dev/sda btrfs-convert /dev/sda mount /dev/sda /mnt btrfs device add -f /dev/sdb /mnt btrfs balance start -v -dconvert=raid1 -mconvert=raid1 /mnt The reason is there are some block groups with usage 0, but the whole disk hasn't free space to allocate new chunk, so we even can't set such block group readonly. This patch deletes the chunk allocation when setting block group ro. For META, we already have reserve. But for SYSTEM, we don't have, so the check_system_chunk is still required. Signed-off-by: Shaohua Li Signed-off-by: Chris Mason --- fs/btrfs/extent-tree.c | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 857a859948a3..50de1fa6fc9e 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -8482,14 +8482,6 @@ int btrfs_set_block_group_ro(struct btrfs_root *root, if (IS_ERR(trans)) return PTR_ERR(trans); - alloc_flags = update_block_group_flags(root, cache->flags); - if (alloc_flags != cache->flags) { - ret = do_chunk_alloc(trans, root, alloc_flags, - CHUNK_ALLOC_FORCE); - if (ret < 0) - goto out; - } - ret = set_block_group_ro(cache, 0); if (!ret) goto out; @@ -8500,6 +8492,11 @@ int btrfs_set_block_group_ro(struct btrfs_root *root, goto out; ret = set_block_group_ro(cache, 0); out: + if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) { + alloc_flags = update_block_group_flags(root, cache->flags); + check_system_chunk(trans, root, alloc_flags); + } + btrfs_end_transaction(trans, root); return ret; } -- cgit v1.2.3 From b76808fc26a6ffab6ff0f85689d3051df9293af5 Mon Sep 17 00:00:00 2001 From: Gui Hecheng Date: Wed, 31 Dec 2014 09:51:35 +0800 Subject: btrfs: cleanup init for list in free-space-cache o removed an unecessary INIT_LIST_HEAD after LIST_HEAD o merge a declare & INIT_LIST_HEAD pair into one LIST_HEAD Signed-off-by: Gui Hecheng Reviewed-by: David Sterba Signed-off-by: Chris Mason --- fs/btrfs/free-space-cache.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 80a3141463e7..a71978578fa7 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -651,15 +651,13 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode, struct io_ctl io_ctl; struct btrfs_key key; struct btrfs_free_space *e, *n; - struct list_head bitmaps; + LIST_HEAD(bitmaps); u64 num_entries; u64 num_bitmaps; u64 generation; u8 type; int ret = 0; - INIT_LIST_HEAD(&bitmaps); - /* Nothing in the space cache, goodbye */ if (!i_size_read(inode)) return 0; @@ -2905,7 +2903,6 @@ int btrfs_find_space_cluster(struct btrfs_root *root, trace_btrfs_find_cluster(block_group, offset, bytes, empty_size, min_bytes); - INIT_LIST_HEAD(&bitmaps); ret = setup_cluster_no_bitmap(block_group, cluster, &bitmaps, offset, bytes + empty_size, cont1_bytes, min_bytes); -- cgit v1.2.3 From f54bcf2ecee982da47c2baf8bd87fd9ad9984651 Mon Sep 17 00:00:00 2001 From: Tom Haynes Date: Thu, 11 Dec 2014 15:34:59 -0500 Subject: pnfs: Prepare for flexfiles by pulling out common code The flexfilelayout driver will share some common code with the filelayout driver. This set of changes refactors that common code out to avoid any module depenencies. Signed-off-by: Tom Haynes --- fs/nfs/Makefile | 2 +- fs/nfs/filelayout/filelayout.c | 291 ++------------------------------------ fs/nfs/filelayout/filelayout.h | 11 -- fs/nfs/filelayout/filelayoutdev.c | 2 +- fs/nfs/pnfs.h | 23 +++ fs/nfs/pnfs_nfs.c | 291 ++++++++++++++++++++++++++++++++++++++ 6 files changed, 330 insertions(+), 290 deletions(-) create mode 100644 fs/nfs/pnfs_nfs.c (limited to 'fs') diff --git a/fs/nfs/Makefile b/fs/nfs/Makefile index 04cb830fa09f..23abffa8a4ce 100644 --- a/fs/nfs/Makefile +++ b/fs/nfs/Makefile @@ -27,7 +27,7 @@ nfsv4-y := nfs4proc.o nfs4xdr.o nfs4state.o nfs4renewd.o nfs4super.o nfs4file.o dns_resolve.o nfs4trace.o nfsv4-$(CONFIG_NFS_USE_LEGACY_DNS) += cache_lib.o nfsv4-$(CONFIG_SYSCTL) += nfs4sysctl.o -nfsv4-$(CONFIG_NFS_V4_1) += pnfs.o pnfs_dev.o +nfsv4-$(CONFIG_NFS_V4_1) += pnfs.o pnfs_dev.o pnfs_nfs.o nfsv4-$(CONFIG_NFS_V4_2) += nfs42proc.o obj-$(CONFIG_PNFS_FILE_LAYOUT) += filelayout/ diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c index 7afb52f6a25a..bc36ed350a68 100644 --- a/fs/nfs/filelayout/filelayout.c +++ b/fs/nfs/filelayout/filelayout.c @@ -118,13 +118,6 @@ static void filelayout_reset_read(struct nfs_pgio_header *hdr) } } -static void filelayout_fenceme(struct inode *inode, struct pnfs_layout_hdr *lo) -{ - if (!test_and_clear_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) - return; - pnfs_return_layout(inode); -} - static int filelayout_async_handle_error(struct rpc_task *task, struct nfs4_state *state, struct nfs_client *clp, @@ -339,16 +332,6 @@ static void filelayout_read_count_stats(struct rpc_task *task, void *data) rpc_count_iostats(task, NFS_SERVER(hdr->inode)->client->cl_metrics); } -static void filelayout_read_release(void *data) -{ - struct nfs_pgio_header *hdr = data; - struct pnfs_layout_hdr *lo = hdr->lseg->pls_layout; - - filelayout_fenceme(lo->plh_inode, lo); - nfs_put_client(hdr->ds_clp); - hdr->mds_ops->rpc_release(data); -} - static int filelayout_write_done_cb(struct rpc_task *task, struct nfs_pgio_header *hdr) { @@ -371,17 +354,6 @@ static int filelayout_write_done_cb(struct rpc_task *task, return 0; } -/* Fake up some data that will cause nfs_commit_release to retry the writes. */ -static void prepare_to_resend_writes(struct nfs_commit_data *data) -{ - struct nfs_page *first = nfs_list_entry(data->pages.next); - - data->task.tk_status = 0; - memcpy(&data->verf.verifier, &first->wb_verf, - sizeof(data->verf.verifier)); - data->verf.verifier.data[0]++; /* ensure verifier mismatch */ -} - static int filelayout_commit_done_cb(struct rpc_task *task, struct nfs_commit_data *data) { @@ -393,7 +365,7 @@ static int filelayout_commit_done_cb(struct rpc_task *task, switch (err) { case -NFS4ERR_RESET_TO_MDS: - prepare_to_resend_writes(data); + pnfs_generic_prepare_to_resend_writes(data); return -EAGAIN; case -EAGAIN: rpc_restart_call_prepare(task); @@ -451,16 +423,6 @@ static void filelayout_write_count_stats(struct rpc_task *task, void *data) rpc_count_iostats(task, NFS_SERVER(hdr->inode)->client->cl_metrics); } -static void filelayout_write_release(void *data) -{ - struct nfs_pgio_header *hdr = data; - struct pnfs_layout_hdr *lo = hdr->lseg->pls_layout; - - filelayout_fenceme(lo->plh_inode, lo); - nfs_put_client(hdr->ds_clp); - hdr->mds_ops->rpc_release(data); -} - static void filelayout_commit_prepare(struct rpc_task *task, void *data) { struct nfs_commit_data *wdata = data; @@ -471,14 +433,6 @@ static void filelayout_commit_prepare(struct rpc_task *task, void *data) task); } -static void filelayout_write_commit_done(struct rpc_task *task, void *data) -{ - struct nfs_commit_data *wdata = data; - - /* Note this may cause RPC to be resent */ - wdata->mds_ops->rpc_call_done(task, data); -} - static void filelayout_commit_count_stats(struct rpc_task *task, void *data) { struct nfs_commit_data *cdata = data; @@ -486,35 +440,25 @@ static void filelayout_commit_count_stats(struct rpc_task *task, void *data) rpc_count_iostats(task, NFS_SERVER(cdata->inode)->client->cl_metrics); } -static void filelayout_commit_release(void *calldata) -{ - struct nfs_commit_data *data = calldata; - - data->completion_ops->completion(data); - pnfs_put_lseg(data->lseg); - nfs_put_client(data->ds_clp); - nfs_commitdata_release(data); -} - static const struct rpc_call_ops filelayout_read_call_ops = { .rpc_call_prepare = filelayout_read_prepare, .rpc_call_done = filelayout_read_call_done, .rpc_count_stats = filelayout_read_count_stats, - .rpc_release = filelayout_read_release, + .rpc_release = pnfs_generic_rw_release, }; static const struct rpc_call_ops filelayout_write_call_ops = { .rpc_call_prepare = filelayout_write_prepare, .rpc_call_done = filelayout_write_call_done, .rpc_count_stats = filelayout_write_count_stats, - .rpc_release = filelayout_write_release, + .rpc_release = pnfs_generic_rw_release, }; static const struct rpc_call_ops filelayout_commit_call_ops = { .rpc_call_prepare = filelayout_commit_prepare, - .rpc_call_done = filelayout_write_commit_done, + .rpc_call_done = pnfs_generic_write_commit_done, .rpc_count_stats = filelayout_commit_count_stats, - .rpc_release = filelayout_commit_release, + .rpc_release = pnfs_generic_commit_release, }; static enum pnfs_try_status @@ -1004,33 +948,6 @@ static u32 select_bucket_index(struct nfs4_filelayout_segment *fl, u32 j) return j; } -/* The generic layer is about to remove the req from the commit list. - * If this will make the bucket empty, it will need to put the lseg reference. - * Note this is must be called holding the inode (/cinfo) lock - */ -static void -filelayout_clear_request_commit(struct nfs_page *req, - struct nfs_commit_info *cinfo) -{ - struct pnfs_layout_segment *freeme = NULL; - - if (!test_and_clear_bit(PG_COMMIT_TO_DS, &req->wb_flags)) - goto out; - cinfo->ds->nwritten--; - if (list_is_singular(&req->wb_list)) { - struct pnfs_commit_bucket *bucket; - - bucket = list_first_entry(&req->wb_list, - struct pnfs_commit_bucket, - written); - freeme = bucket->wlseg; - bucket->wlseg = NULL; - } -out: - nfs_request_remove_commit_list(req, cinfo); - pnfs_put_lseg_locked(freeme); -} - static void filelayout_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg, @@ -1064,7 +981,7 @@ filelayout_mark_request_commit(struct nfs_page *req, * is normally transferred to the COMMIT call and released * there. It could also be released if the last req is pulled * off due to a rewrite, in which case it will be done in - * filelayout_clear_request_commit + * pnfs_generic_clear_request_commit */ buckets[i].wlseg = pnfs_get_lseg(lseg); } @@ -1142,97 +1059,11 @@ static int filelayout_initiate_commit(struct nfs_commit_data *data, int how) &filelayout_commit_call_ops, how, RPC_TASK_SOFTCONN); out_err: - prepare_to_resend_writes(data); - filelayout_commit_release(data); + pnfs_generic_prepare_to_resend_writes(data); + pnfs_generic_commit_release(data); return -EAGAIN; } -static int -transfer_commit_list(struct list_head *src, struct list_head *dst, - struct nfs_commit_info *cinfo, int max) -{ - struct nfs_page *req, *tmp; - int ret = 0; - - list_for_each_entry_safe(req, tmp, src, wb_list) { - if (!nfs_lock_request(req)) - continue; - kref_get(&req->wb_kref); - if (cond_resched_lock(cinfo->lock)) - list_safe_reset_next(req, tmp, wb_list); - nfs_request_remove_commit_list(req, cinfo); - clear_bit(PG_COMMIT_TO_DS, &req->wb_flags); - nfs_list_add_request(req, dst); - ret++; - if ((ret == max) && !cinfo->dreq) - break; - } - return ret; -} - -/* Note called with cinfo->lock held. */ -static int -filelayout_scan_ds_commit_list(struct pnfs_commit_bucket *bucket, - struct nfs_commit_info *cinfo, - int max) -{ - struct list_head *src = &bucket->written; - struct list_head *dst = &bucket->committing; - int ret; - - ret = transfer_commit_list(src, dst, cinfo, max); - if (ret) { - cinfo->ds->nwritten -= ret; - cinfo->ds->ncommitting += ret; - bucket->clseg = bucket->wlseg; - if (list_empty(src)) - bucket->wlseg = NULL; - else - pnfs_get_lseg(bucket->clseg); - } - return ret; -} - -/* Move reqs from written to committing lists, returning count of number moved. - * Note called with cinfo->lock held. - */ -static int filelayout_scan_commit_lists(struct nfs_commit_info *cinfo, - int max) -{ - int i, rv = 0, cnt; - - for (i = 0; i < cinfo->ds->nbuckets && max != 0; i++) { - cnt = filelayout_scan_ds_commit_list(&cinfo->ds->buckets[i], - cinfo, max); - max -= cnt; - rv += cnt; - } - return rv; -} - -/* Pull everything off the committing lists and dump into @dst */ -static void filelayout_recover_commit_reqs(struct list_head *dst, - struct nfs_commit_info *cinfo) -{ - struct pnfs_commit_bucket *b; - struct pnfs_layout_segment *freeme; - int i; - -restart: - spin_lock(cinfo->lock); - for (i = 0, b = cinfo->ds->buckets; i < cinfo->ds->nbuckets; i++, b++) { - if (transfer_commit_list(&b->written, dst, cinfo, 0)) { - freeme = b->wlseg; - b->wlseg = NULL; - spin_unlock(cinfo->lock); - pnfs_put_lseg(freeme); - goto restart; - } - } - cinfo->ds->nwritten = 0; - spin_unlock(cinfo->lock); -} - /* filelayout_search_commit_reqs - Search lists in @cinfo for the head reqest * for @page * @cinfo - commit info for current inode @@ -1263,108 +1094,14 @@ filelayout_search_commit_reqs(struct nfs_commit_info *cinfo, struct page *page) return NULL; } -static void filelayout_retry_commit(struct nfs_commit_info *cinfo, int idx) -{ - struct pnfs_ds_commit_info *fl_cinfo = cinfo->ds; - struct pnfs_commit_bucket *bucket; - struct pnfs_layout_segment *freeme; - int i; - - for (i = idx; i < fl_cinfo->nbuckets; i++) { - bucket = &fl_cinfo->buckets[i]; - if (list_empty(&bucket->committing)) - continue; - nfs_retry_commit(&bucket->committing, bucket->clseg, cinfo); - spin_lock(cinfo->lock); - freeme = bucket->clseg; - bucket->clseg = NULL; - spin_unlock(cinfo->lock); - pnfs_put_lseg(freeme); - } -} - -static unsigned int -alloc_ds_commits(struct nfs_commit_info *cinfo, struct list_head *list) -{ - struct pnfs_ds_commit_info *fl_cinfo; - struct pnfs_commit_bucket *bucket; - struct nfs_commit_data *data; - int i; - unsigned int nreq = 0; - - fl_cinfo = cinfo->ds; - bucket = fl_cinfo->buckets; - for (i = 0; i < fl_cinfo->nbuckets; i++, bucket++) { - if (list_empty(&bucket->committing)) - continue; - data = nfs_commitdata_alloc(); - if (!data) - break; - data->ds_commit_index = i; - spin_lock(cinfo->lock); - data->lseg = bucket->clseg; - bucket->clseg = NULL; - spin_unlock(cinfo->lock); - list_add(&data->pages, list); - nreq++; - } - - /* Clean up on error */ - filelayout_retry_commit(cinfo, i); - /* Caller will clean up entries put on list */ - return nreq; -} - -/* This follows nfs_commit_list pretty closely */ static int filelayout_commit_pagelist(struct inode *inode, struct list_head *mds_pages, int how, struct nfs_commit_info *cinfo) { - struct nfs_commit_data *data, *tmp; - LIST_HEAD(list); - unsigned int nreq = 0; - - if (!list_empty(mds_pages)) { - data = nfs_commitdata_alloc(); - if (data != NULL) { - data->lseg = NULL; - list_add(&data->pages, &list); - nreq++; - } else { - nfs_retry_commit(mds_pages, NULL, cinfo); - filelayout_retry_commit(cinfo, 0); - cinfo->completion_ops->error_cleanup(NFS_I(inode)); - return -ENOMEM; - } - } - - nreq += alloc_ds_commits(cinfo, &list); - - if (nreq == 0) { - cinfo->completion_ops->error_cleanup(NFS_I(inode)); - goto out; - } - - atomic_add(nreq, &cinfo->mds->rpcs_out); - - list_for_each_entry_safe(data, tmp, &list, pages) { - list_del_init(&data->pages); - if (!data->lseg) { - nfs_init_commit(data, mds_pages, NULL, cinfo); - nfs_initiate_commit(NFS_CLIENT(inode), data, - data->mds_ops, how, 0); - } else { - struct pnfs_commit_bucket *buckets; - - buckets = cinfo->ds->buckets; - nfs_init_commit(data, &buckets[data->ds_commit_index].committing, data->lseg, cinfo); - filelayout_initiate_commit(data, how); - } - } -out: - cinfo->ds->ncommitting = 0; - return PNFS_ATTEMPTED; + return pnfs_generic_commit_pagelist(inode, mds_pages, how, cinfo, + filelayout_initiate_commit); } + static struct nfs4_deviceid_node * filelayout_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev, gfp_t gfp_flags) @@ -1421,9 +1158,9 @@ static struct pnfs_layoutdriver_type filelayout_type = { .pg_write_ops = &filelayout_pg_write_ops, .get_ds_info = &filelayout_get_ds_info, .mark_request_commit = filelayout_mark_request_commit, - .clear_request_commit = filelayout_clear_request_commit, - .scan_commit_lists = filelayout_scan_commit_lists, - .recover_commit_reqs = filelayout_recover_commit_reqs, + .clear_request_commit = pnfs_generic_clear_request_commit, + .scan_commit_lists = pnfs_generic_scan_commit_lists, + .recover_commit_reqs = pnfs_generic_recover_commit_reqs, .search_commit_reqs = filelayout_search_commit_reqs, .commit_pagelist = filelayout_commit_pagelist, .read_pagelist = filelayout_read_pagelist, diff --git a/fs/nfs/filelayout/filelayout.h b/fs/nfs/filelayout/filelayout.h index 7c9f800c49d7..a5ce9b4bf2f8 100644 --- a/fs/nfs/filelayout/filelayout.h +++ b/fs/nfs/filelayout/filelayout.h @@ -119,17 +119,6 @@ FILELAYOUT_DEVID_NODE(struct pnfs_layout_segment *lseg) return &FILELAYOUT_LSEG(lseg)->dsaddr->id_node; } -static inline void -filelayout_mark_devid_invalid(struct nfs4_deviceid_node *node) -{ - u32 *p = (u32 *)&node->deviceid; - - printk(KERN_WARNING "NFS: Deviceid [%x%x%x%x] marked out of use.\n", - p[0], p[1], p[2], p[3]); - - set_bit(NFS_DEVICEID_INVALID, &node->flags); -} - static inline bool filelayout_test_devid_invalid(struct nfs4_deviceid_node *node) { diff --git a/fs/nfs/filelayout/filelayoutdev.c b/fs/nfs/filelayout/filelayoutdev.c index bfecac781f19..d21080aed9b2 100644 --- a/fs/nfs/filelayout/filelayoutdev.c +++ b/fs/nfs/filelayout/filelayoutdev.c @@ -708,7 +708,7 @@ nfs4_fl_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx) if (ds == NULL) { printk(KERN_ERR "NFS: %s: No data server for offset index %d\n", __func__, ds_idx); - filelayout_mark_devid_invalid(devid); + pnfs_generic_mark_devid_invalid(devid); goto out; } smp_rmb(); diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h index 9ae5b765b073..f17663446acc 100644 --- a/fs/nfs/pnfs.h +++ b/fs/nfs/pnfs.h @@ -275,6 +275,23 @@ void nfs4_mark_deviceid_unavailable(struct nfs4_deviceid_node *node); bool nfs4_test_deviceid_unavailable(struct nfs4_deviceid_node *node); void nfs4_deviceid_purge_client(const struct nfs_client *); +/* pnfs_nfs.c */ +void pnfs_generic_clear_request_commit(struct nfs_page *req, + struct nfs_commit_info *cinfo); +void pnfs_generic_commit_release(void *calldata); +void pnfs_generic_prepare_to_resend_writes(struct nfs_commit_data *data); +void pnfs_generic_rw_release(void *data); +void pnfs_generic_recover_commit_reqs(struct list_head *dst, + struct nfs_commit_info *cinfo); +int pnfs_generic_commit_pagelist(struct inode *inode, + struct list_head *mds_pages, + int how, + struct nfs_commit_info *cinfo, + int (*initiate_commit)(struct nfs_commit_data *data, + int how)); +int pnfs_generic_scan_commit_lists(struct nfs_commit_info *cinfo, int max); +void pnfs_generic_write_commit_done(struct rpc_task *task, void *data); + static inline struct nfs4_deviceid_node * nfs4_get_deviceid(struct nfs4_deviceid_node *d) { @@ -317,6 +334,12 @@ pnfs_get_ds_info(struct inode *inode) return ld->get_ds_info(inode); } +static inline void +pnfs_generic_mark_devid_invalid(struct nfs4_deviceid_node *node) +{ + set_bit(NFS_DEVICEID_INVALID, &node->flags); +} + static inline bool pnfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg, struct nfs_commit_info *cinfo) diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c new file mode 100644 index 000000000000..e5f841cb6227 --- /dev/null +++ b/fs/nfs/pnfs_nfs.c @@ -0,0 +1,291 @@ +/* + * Common NFS I/O operations for the pnfs file based + * layout drivers. + * + * Copyright (c) 2014, Primary Data, Inc. All rights reserved. + * + * Tom Haynes + */ + +#include +#include + +#include "internal.h" +#include "pnfs.h" + +static void pnfs_generic_fenceme(struct inode *inode, + struct pnfs_layout_hdr *lo) +{ + if (!test_and_clear_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) + return; + pnfs_return_layout(inode); +} + +void pnfs_generic_rw_release(void *data) +{ + struct nfs_pgio_header *hdr = data; + struct pnfs_layout_hdr *lo = hdr->lseg->pls_layout; + + pnfs_generic_fenceme(lo->plh_inode, lo); + nfs_put_client(hdr->ds_clp); + hdr->mds_ops->rpc_release(data); +} +EXPORT_SYMBOL_GPL(pnfs_generic_rw_release); + +/* Fake up some data that will cause nfs_commit_release to retry the writes. */ +void pnfs_generic_prepare_to_resend_writes(struct nfs_commit_data *data) +{ + struct nfs_page *first = nfs_list_entry(data->pages.next); + + data->task.tk_status = 0; + memcpy(&data->verf.verifier, &first->wb_verf, + sizeof(data->verf.verifier)); + data->verf.verifier.data[0]++; /* ensure verifier mismatch */ +} +EXPORT_SYMBOL_GPL(pnfs_generic_prepare_to_resend_writes); + +void pnfs_generic_write_commit_done(struct rpc_task *task, void *data) +{ + struct nfs_commit_data *wdata = data; + + /* Note this may cause RPC to be resent */ + wdata->mds_ops->rpc_call_done(task, data); +} +EXPORT_SYMBOL_GPL(pnfs_generic_write_commit_done); + +void pnfs_generic_commit_release(void *calldata) +{ + struct nfs_commit_data *data = calldata; + + data->completion_ops->completion(data); + pnfs_put_lseg(data->lseg); + nfs_put_client(data->ds_clp); + nfs_commitdata_release(data); +} +EXPORT_SYMBOL_GPL(pnfs_generic_commit_release); + +/* The generic layer is about to remove the req from the commit list. + * If this will make the bucket empty, it will need to put the lseg reference. + * Note this is must be called holding the inode (/cinfo) lock + */ +void +pnfs_generic_clear_request_commit(struct nfs_page *req, + struct nfs_commit_info *cinfo) +{ + struct pnfs_layout_segment *freeme = NULL; + + if (!test_and_clear_bit(PG_COMMIT_TO_DS, &req->wb_flags)) + goto out; + cinfo->ds->nwritten--; + if (list_is_singular(&req->wb_list)) { + struct pnfs_commit_bucket *bucket; + + bucket = list_first_entry(&req->wb_list, + struct pnfs_commit_bucket, + written); + freeme = bucket->wlseg; + bucket->wlseg = NULL; + } +out: + nfs_request_remove_commit_list(req, cinfo); + pnfs_put_lseg_locked(freeme); +} +EXPORT_SYMBOL_GPL(pnfs_generic_clear_request_commit); + +static int +pnfs_generic_transfer_commit_list(struct list_head *src, struct list_head *dst, + struct nfs_commit_info *cinfo, int max) +{ + struct nfs_page *req, *tmp; + int ret = 0; + + list_for_each_entry_safe(req, tmp, src, wb_list) { + if (!nfs_lock_request(req)) + continue; + kref_get(&req->wb_kref); + if (cond_resched_lock(cinfo->lock)) + list_safe_reset_next(req, tmp, wb_list); + nfs_request_remove_commit_list(req, cinfo); + clear_bit(PG_COMMIT_TO_DS, &req->wb_flags); + nfs_list_add_request(req, dst); + ret++; + if ((ret == max) && !cinfo->dreq) + break; + } + return ret; +} + +/* Note called with cinfo->lock held. */ +static int +pnfs_generic_scan_ds_commit_list(struct pnfs_commit_bucket *bucket, + struct nfs_commit_info *cinfo, + int max) +{ + struct list_head *src = &bucket->written; + struct list_head *dst = &bucket->committing; + int ret; + + ret = pnfs_generic_transfer_commit_list(src, dst, cinfo, max); + if (ret) { + cinfo->ds->nwritten -= ret; + cinfo->ds->ncommitting += ret; + bucket->clseg = bucket->wlseg; + if (list_empty(src)) + bucket->wlseg = NULL; + else + pnfs_get_lseg(bucket->clseg); + } + return ret; +} + +/* Move reqs from written to committing lists, returning count of number moved. + * Note called with cinfo->lock held. + */ +int pnfs_generic_scan_commit_lists(struct nfs_commit_info *cinfo, + int max) +{ + int i, rv = 0, cnt; + + for (i = 0; i < cinfo->ds->nbuckets && max != 0; i++) { + cnt = pnfs_generic_scan_ds_commit_list(&cinfo->ds->buckets[i], + cinfo, max); + max -= cnt; + rv += cnt; + } + return rv; +} +EXPORT_SYMBOL_GPL(pnfs_generic_scan_commit_lists); + +/* Pull everything off the committing lists and dump into @dst */ +void pnfs_generic_recover_commit_reqs(struct list_head *dst, + struct nfs_commit_info *cinfo) +{ + struct pnfs_commit_bucket *b; + struct pnfs_layout_segment *freeme; + int i; + +restart: + spin_lock(cinfo->lock); + for (i = 0, b = cinfo->ds->buckets; i < cinfo->ds->nbuckets; i++, b++) { + if (pnfs_generic_transfer_commit_list(&b->written, dst, + cinfo, 0)) { + freeme = b->wlseg; + b->wlseg = NULL; + spin_unlock(cinfo->lock); + pnfs_put_lseg(freeme); + goto restart; + } + } + cinfo->ds->nwritten = 0; + spin_unlock(cinfo->lock); +} +EXPORT_SYMBOL_GPL(pnfs_generic_recover_commit_reqs); + +static void pnfs_generic_retry_commit(struct nfs_commit_info *cinfo, int idx) +{ + struct pnfs_ds_commit_info *fl_cinfo = cinfo->ds; + struct pnfs_commit_bucket *bucket; + struct pnfs_layout_segment *freeme; + int i; + + for (i = idx; i < fl_cinfo->nbuckets; i++) { + bucket = &fl_cinfo->buckets[i]; + if (list_empty(&bucket->committing)) + continue; + nfs_retry_commit(&bucket->committing, bucket->clseg, cinfo); + spin_lock(cinfo->lock); + freeme = bucket->clseg; + bucket->clseg = NULL; + spin_unlock(cinfo->lock); + pnfs_put_lseg(freeme); + } +} + +static unsigned int +pnfs_generic_alloc_ds_commits(struct nfs_commit_info *cinfo, + struct list_head *list) +{ + struct pnfs_ds_commit_info *fl_cinfo; + struct pnfs_commit_bucket *bucket; + struct nfs_commit_data *data; + int i; + unsigned int nreq = 0; + + fl_cinfo = cinfo->ds; + bucket = fl_cinfo->buckets; + for (i = 0; i < fl_cinfo->nbuckets; i++, bucket++) { + if (list_empty(&bucket->committing)) + continue; + data = nfs_commitdata_alloc(); + if (!data) + break; + data->ds_commit_index = i; + spin_lock(cinfo->lock); + data->lseg = bucket->clseg; + bucket->clseg = NULL; + spin_unlock(cinfo->lock); + list_add(&data->pages, list); + nreq++; + } + + /* Clean up on error */ + pnfs_generic_retry_commit(cinfo, i); + return nreq; +} + +/* This follows nfs_commit_list pretty closely */ +int +pnfs_generic_commit_pagelist(struct inode *inode, struct list_head *mds_pages, + int how, struct nfs_commit_info *cinfo, + int (*initiate_commit)(struct nfs_commit_data *data, + int how)) +{ + struct nfs_commit_data *data, *tmp; + LIST_HEAD(list); + unsigned int nreq = 0; + + if (!list_empty(mds_pages)) { + data = nfs_commitdata_alloc(); + if (data != NULL) { + data->lseg = NULL; + list_add(&data->pages, &list); + nreq++; + } else { + nfs_retry_commit(mds_pages, NULL, cinfo); + pnfs_generic_retry_commit(cinfo, 0); + cinfo->completion_ops->error_cleanup(NFS_I(inode)); + return -ENOMEM; + } + } + + nreq += pnfs_generic_alloc_ds_commits(cinfo, &list); + + if (nreq == 0) { + cinfo->completion_ops->error_cleanup(NFS_I(inode)); + goto out; + } + + atomic_add(nreq, &cinfo->mds->rpcs_out); + + list_for_each_entry_safe(data, tmp, &list, pages) { + list_del_init(&data->pages); + if (!data->lseg) { + nfs_init_commit(data, mds_pages, NULL, cinfo); + nfs_initiate_commit(NFS_CLIENT(inode), data, + data->mds_ops, how, 0); + } else { + struct pnfs_commit_bucket *buckets; + + buckets = cinfo->ds->buckets; + nfs_init_commit(data, + &buckets[data->ds_commit_index].committing, + data->lseg, + cinfo); + initiate_commit(data, how); + } + } +out: + cinfo->ds->ncommitting = 0; + return PNFS_ATTEMPTED; +} +EXPORT_SYMBOL_GPL(pnfs_generic_commit_pagelist); -- cgit v1.2.3 From 085d1e33a6a8495d9afa58ad2b8b7ea74d613515 Mon Sep 17 00:00:00 2001 From: Tom Haynes Date: Thu, 11 Dec 2014 13:04:55 -0500 Subject: pnfs: Do not grab the commit_info lock twice when rescheduling writes Acked-by: Jeff Layton Signed-off-by: Tom Haynes --- fs/nfs/direct.c | 19 +++++++++++++++---- fs/nfs/pnfs.h | 15 --------------- fs/nfs/pnfs_nfs.c | 15 ++++++++------- 3 files changed, 23 insertions(+), 26 deletions(-) (limited to 'fs') diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index 10bf07280f4a..e84f764b9dcd 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c @@ -573,6 +573,20 @@ out: return result; } +static void +nfs_direct_write_scan_commit_list(struct inode *inode, + struct list_head *list, + struct nfs_commit_info *cinfo) +{ + spin_lock(cinfo->lock); +#ifdef CONFIG_NFS_V4_1 + if (cinfo->ds != NULL && cinfo->ds->nwritten != 0) + NFS_SERVER(inode)->pnfs_curr_ld->recover_commit_reqs(list, cinfo); +#endif + nfs_scan_commit_list(&cinfo->mds->list, list, cinfo, 0); + spin_unlock(cinfo->lock); +} + static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq) { struct nfs_pageio_descriptor desc; @@ -582,10 +596,7 @@ static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq) LIST_HEAD(failed); nfs_init_cinfo_from_dreq(&cinfo, dreq); - pnfs_recover_commit_reqs(dreq->inode, &reqs, &cinfo); - spin_lock(cinfo.lock); - nfs_scan_commit_list(&cinfo.mds->list, &reqs, &cinfo, 0); - spin_unlock(cinfo.lock); + nfs_direct_write_scan_commit_list(dreq->inode, &reqs, &cinfo); dreq->count = 0; get_dreq(dreq); diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h index f17663446acc..e94f6050e9b1 100644 --- a/fs/nfs/pnfs.h +++ b/fs/nfs/pnfs.h @@ -375,15 +375,6 @@ pnfs_scan_commit_lists(struct inode *inode, struct nfs_commit_info *cinfo, return NFS_SERVER(inode)->pnfs_curr_ld->scan_commit_lists(cinfo, max); } -static inline void -pnfs_recover_commit_reqs(struct inode *inode, struct list_head *list, - struct nfs_commit_info *cinfo) -{ - if (cinfo->ds == NULL || cinfo->ds->nwritten == 0) - return; - NFS_SERVER(inode)->pnfs_curr_ld->recover_commit_reqs(list, cinfo); -} - static inline struct nfs_page * pnfs_search_commit_reqs(struct inode *inode, struct nfs_commit_info *cinfo, struct page *page) @@ -554,12 +545,6 @@ pnfs_scan_commit_lists(struct inode *inode, struct nfs_commit_info *cinfo, return 0; } -static inline void -pnfs_recover_commit_reqs(struct inode *inode, struct list_head *list, - struct nfs_commit_info *cinfo) -{ -} - static inline struct nfs_page * pnfs_search_commit_reqs(struct inode *inode, struct nfs_commit_info *cinfo, struct page *page) diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c index e5f841cb6227..fd2a2f0e8cbb 100644 --- a/fs/nfs/pnfs_nfs.c +++ b/fs/nfs/pnfs_nfs.c @@ -66,7 +66,7 @@ EXPORT_SYMBOL_GPL(pnfs_generic_commit_release); /* The generic layer is about to remove the req from the commit list. * If this will make the bucket empty, it will need to put the lseg reference. - * Note this is must be called holding the inode (/cinfo) lock + * Note this must be called holding the inode (/cinfo) lock */ void pnfs_generic_clear_request_commit(struct nfs_page *req, @@ -115,7 +115,6 @@ pnfs_generic_transfer_commit_list(struct list_head *src, struct list_head *dst, return ret; } -/* Note called with cinfo->lock held. */ static int pnfs_generic_scan_ds_commit_list(struct pnfs_commit_bucket *bucket, struct nfs_commit_info *cinfo, @@ -125,6 +124,7 @@ pnfs_generic_scan_ds_commit_list(struct pnfs_commit_bucket *bucket, struct list_head *dst = &bucket->committing; int ret; + lockdep_assert_held(cinfo->lock); ret = pnfs_generic_transfer_commit_list(src, dst, cinfo, max); if (ret) { cinfo->ds->nwritten -= ret; @@ -138,14 +138,15 @@ pnfs_generic_scan_ds_commit_list(struct pnfs_commit_bucket *bucket, return ret; } -/* Move reqs from written to committing lists, returning count of number moved. - * Note called with cinfo->lock held. +/* Move reqs from written to committing lists, returning count + * of number moved. */ int pnfs_generic_scan_commit_lists(struct nfs_commit_info *cinfo, int max) { int i, rv = 0, cnt; + lockdep_assert_held(cinfo->lock); for (i = 0; i < cinfo->ds->nbuckets && max != 0; i++) { cnt = pnfs_generic_scan_ds_commit_list(&cinfo->ds->buckets[i], cinfo, max); @@ -156,7 +157,7 @@ int pnfs_generic_scan_commit_lists(struct nfs_commit_info *cinfo, } EXPORT_SYMBOL_GPL(pnfs_generic_scan_commit_lists); -/* Pull everything off the committing lists and dump into @dst */ +/* Pull everything off the committing lists and dump into @dst. */ void pnfs_generic_recover_commit_reqs(struct list_head *dst, struct nfs_commit_info *cinfo) { @@ -164,8 +165,8 @@ void pnfs_generic_recover_commit_reqs(struct list_head *dst, struct pnfs_layout_segment *freeme; int i; + lockdep_assert_held(cinfo->lock); restart: - spin_lock(cinfo->lock); for (i = 0, b = cinfo->ds->buckets; i < cinfo->ds->nbuckets; i++, b++) { if (pnfs_generic_transfer_commit_list(&b->written, dst, cinfo, 0)) { @@ -173,11 +174,11 @@ restart: b->wlseg = NULL; spin_unlock(cinfo->lock); pnfs_put_lseg(freeme); + spin_lock(cinfo->lock); goto restart; } } cinfo->ds->nwritten = 0; - spin_unlock(cinfo->lock); } EXPORT_SYMBOL_GPL(pnfs_generic_recover_commit_reqs); -- cgit v1.2.3 From 875ae0694be48f3e3bdddd435b79abf52b680299 Mon Sep 17 00:00:00 2001 From: Peng Tao Date: Thu, 29 May 2014 21:06:57 +0800 Subject: nfs41: pull data server cache from file layout to generic pnfs Also pull nfs4_pnfs_ds_addr and nfs4_pnfs_ds to generic pnfs. They can all be reused by flexfile layout as well. Reviewed-by: Jeff Layton Signed-off-by: Peng Tao Signed-off-by: Tom Haynes --- fs/nfs/filelayout/filelayout.h | 19 --- fs/nfs/filelayout/filelayoutdev.c | 235 +----------------------------------- fs/nfs/pnfs.h | 21 ++++ fs/nfs/pnfs_nfs.c | 242 ++++++++++++++++++++++++++++++++++++++ 4 files changed, 265 insertions(+), 252 deletions(-) (limited to 'fs') diff --git a/fs/nfs/filelayout/filelayout.h b/fs/nfs/filelayout/filelayout.h index a5ce9b4bf2f8..f97eea627c4f 100644 --- a/fs/nfs/filelayout/filelayout.h +++ b/fs/nfs/filelayout/filelayout.h @@ -56,24 +56,6 @@ enum stripetype4 { STRIPE_DENSE = 2 }; -/* Individual ip address */ -struct nfs4_pnfs_ds_addr { - struct sockaddr_storage da_addr; - size_t da_addrlen; - struct list_head da_node; /* nfs4_pnfs_dev_hlist dev_dslist */ - char *da_remotestr; /* human readable addr+port */ -}; - -struct nfs4_pnfs_ds { - struct list_head ds_node; /* nfs4_pnfs_dev_hlist dev_dslist */ - char *ds_remotestr; /* comma sep list of addrs */ - struct list_head ds_addrs; - struct nfs_client *ds_clp; - atomic_t ds_count; - unsigned long ds_state; -#define NFS4DS_CONNECTING 0 /* ds is establishing connection */ -}; - struct nfs4_file_layout_dsaddr { struct nfs4_deviceid_node id_node; u32 stripe_count; @@ -131,7 +113,6 @@ filelayout_test_devid_unavailable(struct nfs4_deviceid_node *node); extern struct nfs_fh * nfs4_fl_select_ds_fh(struct pnfs_layout_segment *lseg, u32 j); -extern void print_ds(struct nfs4_pnfs_ds *ds); u32 nfs4_fl_calc_j_index(struct pnfs_layout_segment *lseg, loff_t offset); u32 nfs4_fl_calc_ds_index(struct pnfs_layout_segment *lseg, u32 j); struct nfs4_pnfs_ds *nfs4_fl_prepare_ds(struct pnfs_layout_segment *lseg, diff --git a/fs/nfs/filelayout/filelayoutdev.c b/fs/nfs/filelayout/filelayoutdev.c index d21080aed9b2..fbfbb701159d 100644 --- a/fs/nfs/filelayout/filelayoutdev.c +++ b/fs/nfs/filelayout/filelayoutdev.c @@ -42,114 +42,6 @@ static unsigned int dataserver_timeo = NFS4_DEF_DS_TIMEO; static unsigned int dataserver_retrans = NFS4_DEF_DS_RETRANS; -/* - * Data server cache - * - * Data servers can be mapped to different device ids. - * nfs4_pnfs_ds reference counting - * - set to 1 on allocation - * - incremented when a device id maps a data server already in the cache. - * - decremented when deviceid is removed from the cache. - */ -static DEFINE_SPINLOCK(nfs4_ds_cache_lock); -static LIST_HEAD(nfs4_data_server_cache); - -/* Debug routines */ -void -print_ds(struct nfs4_pnfs_ds *ds) -{ - if (ds == NULL) { - printk("%s NULL device\n", __func__); - return; - } - printk(" ds %s\n" - " ref count %d\n" - " client %p\n" - " cl_exchange_flags %x\n", - ds->ds_remotestr, - atomic_read(&ds->ds_count), ds->ds_clp, - ds->ds_clp ? ds->ds_clp->cl_exchange_flags : 0); -} - -static bool -same_sockaddr(struct sockaddr *addr1, struct sockaddr *addr2) -{ - struct sockaddr_in *a, *b; - struct sockaddr_in6 *a6, *b6; - - if (addr1->sa_family != addr2->sa_family) - return false; - - switch (addr1->sa_family) { - case AF_INET: - a = (struct sockaddr_in *)addr1; - b = (struct sockaddr_in *)addr2; - - if (a->sin_addr.s_addr == b->sin_addr.s_addr && - a->sin_port == b->sin_port) - return true; - break; - - case AF_INET6: - a6 = (struct sockaddr_in6 *)addr1; - b6 = (struct sockaddr_in6 *)addr2; - - /* LINKLOCAL addresses must have matching scope_id */ - if (ipv6_addr_src_scope(&a6->sin6_addr) == - IPV6_ADDR_SCOPE_LINKLOCAL && - a6->sin6_scope_id != b6->sin6_scope_id) - return false; - - if (ipv6_addr_equal(&a6->sin6_addr, &b6->sin6_addr) && - a6->sin6_port == b6->sin6_port) - return true; - break; - - default: - dprintk("%s: unhandled address family: %u\n", - __func__, addr1->sa_family); - return false; - } - - return false; -} - -static bool -_same_data_server_addrs_locked(const struct list_head *dsaddrs1, - const struct list_head *dsaddrs2) -{ - struct nfs4_pnfs_ds_addr *da1, *da2; - - /* step through both lists, comparing as we go */ - for (da1 = list_first_entry(dsaddrs1, typeof(*da1), da_node), - da2 = list_first_entry(dsaddrs2, typeof(*da2), da_node); - da1 != NULL && da2 != NULL; - da1 = list_entry(da1->da_node.next, typeof(*da1), da_node), - da2 = list_entry(da2->da_node.next, typeof(*da2), da_node)) { - if (!same_sockaddr((struct sockaddr *)&da1->da_addr, - (struct sockaddr *)&da2->da_addr)) - return false; - } - if (da1 == NULL && da2 == NULL) - return true; - - return false; -} - -/* - * Lookup DS by addresses. nfs4_ds_cache_lock is held - */ -static struct nfs4_pnfs_ds * -_data_server_lookup_locked(const struct list_head *dsaddrs) -{ - struct nfs4_pnfs_ds *ds; - - list_for_each_entry(ds, &nfs4_data_server_cache, ds_node) - if (_same_data_server_addrs_locked(&ds->ds_addrs, dsaddrs)) - return ds; - return NULL; -} - /* * Create an rpc connection to the nfs4_pnfs_ds data server * Currently only supports IPv4 and IPv6 addresses @@ -195,30 +87,6 @@ out_put: goto out; } -static void -destroy_ds(struct nfs4_pnfs_ds *ds) -{ - struct nfs4_pnfs_ds_addr *da; - - dprintk("--> %s\n", __func__); - ifdebug(FACILITY) - print_ds(ds); - - nfs_put_client(ds->ds_clp); - - while (!list_empty(&ds->ds_addrs)) { - da = list_first_entry(&ds->ds_addrs, - struct nfs4_pnfs_ds_addr, - da_node); - list_del_init(&da->da_node); - kfree(da->da_remotestr); - kfree(da); - } - - kfree(ds->ds_remotestr); - kfree(ds); -} - void nfs4_fl_free_deviceid(struct nfs4_file_layout_dsaddr *dsaddr) { @@ -229,112 +97,13 @@ nfs4_fl_free_deviceid(struct nfs4_file_layout_dsaddr *dsaddr) for (i = 0; i < dsaddr->ds_num; i++) { ds = dsaddr->ds_list[i]; - if (ds != NULL) { - if (atomic_dec_and_lock(&ds->ds_count, - &nfs4_ds_cache_lock)) { - list_del_init(&ds->ds_node); - spin_unlock(&nfs4_ds_cache_lock); - destroy_ds(ds); - } - } + if (ds != NULL) + nfs4_pnfs_ds_put(ds); } kfree(dsaddr->stripe_indices); kfree(dsaddr); } -/* - * Create a string with a human readable address and port to avoid - * complicated setup around many dprinks. - */ -static char * -nfs4_pnfs_remotestr(struct list_head *dsaddrs, gfp_t gfp_flags) -{ - struct nfs4_pnfs_ds_addr *da; - char *remotestr; - size_t len; - char *p; - - len = 3; /* '{', '}' and eol */ - list_for_each_entry(da, dsaddrs, da_node) { - len += strlen(da->da_remotestr) + 1; /* string plus comma */ - } - - remotestr = kzalloc(len, gfp_flags); - if (!remotestr) - return NULL; - - p = remotestr; - *(p++) = '{'; - len--; - list_for_each_entry(da, dsaddrs, da_node) { - size_t ll = strlen(da->da_remotestr); - - if (ll > len) - goto out_err; - - memcpy(p, da->da_remotestr, ll); - p += ll; - len -= ll; - - if (len < 1) - goto out_err; - (*p++) = ','; - len--; - } - if (len < 2) - goto out_err; - *(p++) = '}'; - *p = '\0'; - return remotestr; -out_err: - kfree(remotestr); - return NULL; -} - -static struct nfs4_pnfs_ds * -nfs4_pnfs_ds_add(struct list_head *dsaddrs, gfp_t gfp_flags) -{ - struct nfs4_pnfs_ds *tmp_ds, *ds = NULL; - char *remotestr; - - if (list_empty(dsaddrs)) { - dprintk("%s: no addresses defined\n", __func__); - goto out; - } - - ds = kzalloc(sizeof(*ds), gfp_flags); - if (!ds) - goto out; - - /* this is only used for debugging, so it's ok if its NULL */ - remotestr = nfs4_pnfs_remotestr(dsaddrs, gfp_flags); - - spin_lock(&nfs4_ds_cache_lock); - tmp_ds = _data_server_lookup_locked(dsaddrs); - if (tmp_ds == NULL) { - INIT_LIST_HEAD(&ds->ds_addrs); - list_splice_init(dsaddrs, &ds->ds_addrs); - ds->ds_remotestr = remotestr; - atomic_set(&ds->ds_count, 1); - INIT_LIST_HEAD(&ds->ds_node); - ds->ds_clp = NULL; - list_add(&ds->ds_node, &nfs4_data_server_cache); - dprintk("%s add new data server %s\n", __func__, - ds->ds_remotestr); - } else { - kfree(remotestr); - kfree(ds); - atomic_inc(&tmp_ds->ds_count); - dprintk("%s data server %s found, inc'ed ds_count to %d\n", - __func__, tmp_ds->ds_remotestr, - atomic_read(&tmp_ds->ds_count)); - ds = tmp_ds; - } - spin_unlock(&nfs4_ds_cache_lock); -out: - return ds; -} - /* * Currently only supports ipv4, ipv6 and one multi-path address. */ diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h index e94f6050e9b1..b0168f1dd072 100644 --- a/fs/nfs/pnfs.h +++ b/fs/nfs/pnfs.h @@ -40,6 +40,24 @@ enum { NFS_LSEG_LAYOUTCOMMIT, /* layoutcommit bit set for layoutcommit */ }; +/* Individual ip address */ +struct nfs4_pnfs_ds_addr { + struct sockaddr_storage da_addr; + size_t da_addrlen; + struct list_head da_node; /* nfs4_pnfs_dev_hlist dev_dslist */ + char *da_remotestr; /* human readable addr+port */ +}; + +struct nfs4_pnfs_ds { + struct list_head ds_node; /* nfs4_pnfs_dev_hlist dev_dslist */ + char *ds_remotestr; /* comma sep list of addrs */ + struct list_head ds_addrs; + struct nfs_client *ds_clp; + atomic_t ds_count; + unsigned long ds_state; +#define NFS4DS_CONNECTING 0 /* ds is establishing connection */ +}; + struct pnfs_layout_segment { struct list_head pls_list; struct list_head pls_lc_list; @@ -291,6 +309,9 @@ int pnfs_generic_commit_pagelist(struct inode *inode, int how)); int pnfs_generic_scan_commit_lists(struct nfs_commit_info *cinfo, int max); void pnfs_generic_write_commit_done(struct rpc_task *task, void *data); +void nfs4_pnfs_ds_put(struct nfs4_pnfs_ds *ds); +struct nfs4_pnfs_ds *nfs4_pnfs_ds_add(struct list_head *dsaddrs, + gfp_t gfp_flags); static inline struct nfs4_deviceid_node * nfs4_get_deviceid(struct nfs4_deviceid_node *d) diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c index fd2a2f0e8cbb..3bb2b74cf600 100644 --- a/fs/nfs/pnfs_nfs.c +++ b/fs/nfs/pnfs_nfs.c @@ -13,6 +13,8 @@ #include "internal.h" #include "pnfs.h" +#define NFSDBG_FACILITY NFSDBG_PNFS + static void pnfs_generic_fenceme(struct inode *inode, struct pnfs_layout_hdr *lo) { @@ -290,3 +292,243 @@ out: return PNFS_ATTEMPTED; } EXPORT_SYMBOL_GPL(pnfs_generic_commit_pagelist); + +/* + * Data server cache + * + * Data servers can be mapped to different device ids. + * nfs4_pnfs_ds reference counting + * - set to 1 on allocation + * - incremented when a device id maps a data server already in the cache. + * - decremented when deviceid is removed from the cache. + */ +static DEFINE_SPINLOCK(nfs4_ds_cache_lock); +static LIST_HEAD(nfs4_data_server_cache); + +/* Debug routines */ +static void +print_ds(struct nfs4_pnfs_ds *ds) +{ + if (ds == NULL) { + printk(KERN_WARNING "%s NULL device\n", __func__); + return; + } + printk(KERN_WARNING " ds %s\n" + " ref count %d\n" + " client %p\n" + " cl_exchange_flags %x\n", + ds->ds_remotestr, + atomic_read(&ds->ds_count), ds->ds_clp, + ds->ds_clp ? ds->ds_clp->cl_exchange_flags : 0); +} + +static bool +same_sockaddr(struct sockaddr *addr1, struct sockaddr *addr2) +{ + struct sockaddr_in *a, *b; + struct sockaddr_in6 *a6, *b6; + + if (addr1->sa_family != addr2->sa_family) + return false; + + switch (addr1->sa_family) { + case AF_INET: + a = (struct sockaddr_in *)addr1; + b = (struct sockaddr_in *)addr2; + + if (a->sin_addr.s_addr == b->sin_addr.s_addr && + a->sin_port == b->sin_port) + return true; + break; + + case AF_INET6: + a6 = (struct sockaddr_in6 *)addr1; + b6 = (struct sockaddr_in6 *)addr2; + + /* LINKLOCAL addresses must have matching scope_id */ + if (ipv6_addr_src_scope(&a6->sin6_addr) == + IPV6_ADDR_SCOPE_LINKLOCAL && + a6->sin6_scope_id != b6->sin6_scope_id) + return false; + + if (ipv6_addr_equal(&a6->sin6_addr, &b6->sin6_addr) && + a6->sin6_port == b6->sin6_port) + return true; + break; + + default: + dprintk("%s: unhandled address family: %u\n", + __func__, addr1->sa_family); + return false; + } + + return false; +} + +static bool +_same_data_server_addrs_locked(const struct list_head *dsaddrs1, + const struct list_head *dsaddrs2) +{ + struct nfs4_pnfs_ds_addr *da1, *da2; + + /* step through both lists, comparing as we go */ + for (da1 = list_first_entry(dsaddrs1, typeof(*da1), da_node), + da2 = list_first_entry(dsaddrs2, typeof(*da2), da_node); + da1 != NULL && da2 != NULL; + da1 = list_entry(da1->da_node.next, typeof(*da1), da_node), + da2 = list_entry(da2->da_node.next, typeof(*da2), da_node)) { + if (!same_sockaddr((struct sockaddr *)&da1->da_addr, + (struct sockaddr *)&da2->da_addr)) + return false; + } + if (da1 == NULL && da2 == NULL) + return true; + + return false; +} + +/* + * Lookup DS by addresses. nfs4_ds_cache_lock is held + */ +static struct nfs4_pnfs_ds * +_data_server_lookup_locked(const struct list_head *dsaddrs) +{ + struct nfs4_pnfs_ds *ds; + + list_for_each_entry(ds, &nfs4_data_server_cache, ds_node) + if (_same_data_server_addrs_locked(&ds->ds_addrs, dsaddrs)) + return ds; + return NULL; +} + +static void destroy_ds(struct nfs4_pnfs_ds *ds) +{ + struct nfs4_pnfs_ds_addr *da; + + dprintk("--> %s\n", __func__); + ifdebug(FACILITY) + print_ds(ds); + + nfs_put_client(ds->ds_clp); + + while (!list_empty(&ds->ds_addrs)) { + da = list_first_entry(&ds->ds_addrs, + struct nfs4_pnfs_ds_addr, + da_node); + list_del_init(&da->da_node); + kfree(da->da_remotestr); + kfree(da); + } + + kfree(ds->ds_remotestr); + kfree(ds); +} + +void nfs4_pnfs_ds_put(struct nfs4_pnfs_ds *ds) +{ + if (atomic_dec_and_lock(&ds->ds_count, + &nfs4_ds_cache_lock)) { + list_del_init(&ds->ds_node); + spin_unlock(&nfs4_ds_cache_lock); + destroy_ds(ds); + } +} +EXPORT_SYMBOL_GPL(nfs4_pnfs_ds_put); + +/* + * Create a string with a human readable address and port to avoid + * complicated setup around many dprinks. + */ +static char * +nfs4_pnfs_remotestr(struct list_head *dsaddrs, gfp_t gfp_flags) +{ + struct nfs4_pnfs_ds_addr *da; + char *remotestr; + size_t len; + char *p; + + len = 3; /* '{', '}' and eol */ + list_for_each_entry(da, dsaddrs, da_node) { + len += strlen(da->da_remotestr) + 1; /* string plus comma */ + } + + remotestr = kzalloc(len, gfp_flags); + if (!remotestr) + return NULL; + + p = remotestr; + *(p++) = '{'; + len--; + list_for_each_entry(da, dsaddrs, da_node) { + size_t ll = strlen(da->da_remotestr); + + if (ll > len) + goto out_err; + + memcpy(p, da->da_remotestr, ll); + p += ll; + len -= ll; + + if (len < 1) + goto out_err; + (*p++) = ','; + len--; + } + if (len < 2) + goto out_err; + *(p++) = '}'; + *p = '\0'; + return remotestr; +out_err: + kfree(remotestr); + return NULL; +} + +/* + * Given a list of multipath struct nfs4_pnfs_ds_addr, add it to ds cache if + * uncached and return cached struct nfs4_pnfs_ds. + */ +struct nfs4_pnfs_ds * +nfs4_pnfs_ds_add(struct list_head *dsaddrs, gfp_t gfp_flags) +{ + struct nfs4_pnfs_ds *tmp_ds, *ds = NULL; + char *remotestr; + + if (list_empty(dsaddrs)) { + dprintk("%s: no addresses defined\n", __func__); + goto out; + } + + ds = kzalloc(sizeof(*ds), gfp_flags); + if (!ds) + goto out; + + /* this is only used for debugging, so it's ok if its NULL */ + remotestr = nfs4_pnfs_remotestr(dsaddrs, gfp_flags); + + spin_lock(&nfs4_ds_cache_lock); + tmp_ds = _data_server_lookup_locked(dsaddrs); + if (tmp_ds == NULL) { + INIT_LIST_HEAD(&ds->ds_addrs); + list_splice_init(dsaddrs, &ds->ds_addrs); + ds->ds_remotestr = remotestr; + atomic_set(&ds->ds_count, 1); + INIT_LIST_HEAD(&ds->ds_node); + ds->ds_clp = NULL; + list_add(&ds->ds_node, &nfs4_data_server_cache); + dprintk("%s add new data server %s\n", __func__, + ds->ds_remotestr); + } else { + kfree(remotestr); + kfree(ds); + atomic_inc(&tmp_ds->ds_count); + dprintk("%s data server %s found, inc'ed ds_count to %d\n", + __func__, tmp_ds->ds_remotestr, + atomic_read(&tmp_ds->ds_count)); + ds = tmp_ds; + } + spin_unlock(&nfs4_ds_cache_lock); +out: + return ds; +} +EXPORT_SYMBOL_GPL(nfs4_pnfs_ds_add); -- cgit v1.2.3 From 6b7f3cf96364eaf597940cb5c68a682894829915 Mon Sep 17 00:00:00 2001 From: Peng Tao Date: Thu, 29 May 2014 21:06:59 +0800 Subject: nfs41: pull decode_ds_addr from file layout to generic pnfs It can be reused by flexfile layout. Reviewed-by: Jeff Layton Signed-off-by: Peng Tao Signed-off-by: Tom Haynes --- fs/nfs/filelayout/filelayoutdev.c | 152 +------------------------------------- fs/nfs/pnfs.h | 3 + fs/nfs/pnfs_nfs.c | 149 +++++++++++++++++++++++++++++++++++++ 3 files changed, 154 insertions(+), 150 deletions(-) (limited to 'fs') diff --git a/fs/nfs/filelayout/filelayoutdev.c b/fs/nfs/filelayout/filelayoutdev.c index fbfbb701159d..c7f6041a287f 100644 --- a/fs/nfs/filelayout/filelayoutdev.c +++ b/fs/nfs/filelayout/filelayoutdev.c @@ -31,7 +31,6 @@ #include #include #include -#include #include "../internal.h" #include "../nfs4session.h" @@ -104,153 +103,6 @@ nfs4_fl_free_deviceid(struct nfs4_file_layout_dsaddr *dsaddr) kfree(dsaddr); } -/* - * Currently only supports ipv4, ipv6 and one multi-path address. - */ -static struct nfs4_pnfs_ds_addr * -decode_ds_addr(struct net *net, struct xdr_stream *streamp, gfp_t gfp_flags) -{ - struct nfs4_pnfs_ds_addr *da = NULL; - char *buf, *portstr; - __be16 port; - int nlen, rlen; - int tmp[2]; - __be32 *p; - char *netid, *match_netid; - size_t len, match_netid_len; - char *startsep = ""; - char *endsep = ""; - - - /* r_netid */ - p = xdr_inline_decode(streamp, 4); - if (unlikely(!p)) - goto out_err; - nlen = be32_to_cpup(p++); - - p = xdr_inline_decode(streamp, nlen); - if (unlikely(!p)) - goto out_err; - - netid = kmalloc(nlen+1, gfp_flags); - if (unlikely(!netid)) - goto out_err; - - netid[nlen] = '\0'; - memcpy(netid, p, nlen); - - /* r_addr: ip/ip6addr with port in dec octets - see RFC 5665 */ - p = xdr_inline_decode(streamp, 4); - if (unlikely(!p)) - goto out_free_netid; - rlen = be32_to_cpup(p); - - p = xdr_inline_decode(streamp, rlen); - if (unlikely(!p)) - goto out_free_netid; - - /* port is ".ABC.DEF", 8 chars max */ - if (rlen > INET6_ADDRSTRLEN + IPV6_SCOPE_ID_LEN + 8) { - dprintk("%s: Invalid address, length %d\n", __func__, - rlen); - goto out_free_netid; - } - buf = kmalloc(rlen + 1, gfp_flags); - if (!buf) { - dprintk("%s: Not enough memory\n", __func__); - goto out_free_netid; - } - buf[rlen] = '\0'; - memcpy(buf, p, rlen); - - /* replace port '.' with '-' */ - portstr = strrchr(buf, '.'); - if (!portstr) { - dprintk("%s: Failed finding expected dot in port\n", - __func__); - goto out_free_buf; - } - *portstr = '-'; - - /* find '.' between address and port */ - portstr = strrchr(buf, '.'); - if (!portstr) { - dprintk("%s: Failed finding expected dot between address and " - "port\n", __func__); - goto out_free_buf; - } - *portstr = '\0'; - - da = kzalloc(sizeof(*da), gfp_flags); - if (unlikely(!da)) - goto out_free_buf; - - INIT_LIST_HEAD(&da->da_node); - - if (!rpc_pton(net, buf, portstr-buf, (struct sockaddr *)&da->da_addr, - sizeof(da->da_addr))) { - dprintk("%s: error parsing address %s\n", __func__, buf); - goto out_free_da; - } - - portstr++; - sscanf(portstr, "%d-%d", &tmp[0], &tmp[1]); - port = htons((tmp[0] << 8) | (tmp[1])); - - switch (da->da_addr.ss_family) { - case AF_INET: - ((struct sockaddr_in *)&da->da_addr)->sin_port = port; - da->da_addrlen = sizeof(struct sockaddr_in); - match_netid = "tcp"; - match_netid_len = 3; - break; - - case AF_INET6: - ((struct sockaddr_in6 *)&da->da_addr)->sin6_port = port; - da->da_addrlen = sizeof(struct sockaddr_in6); - match_netid = "tcp6"; - match_netid_len = 4; - startsep = "["; - endsep = "]"; - break; - - default: - dprintk("%s: unsupported address family: %u\n", - __func__, da->da_addr.ss_family); - goto out_free_da; - } - - if (nlen != match_netid_len || strncmp(netid, match_netid, nlen)) { - dprintk("%s: ERROR: r_netid \"%s\" != \"%s\"\n", - __func__, netid, match_netid); - goto out_free_da; - } - - /* save human readable address */ - len = strlen(startsep) + strlen(buf) + strlen(endsep) + 7; - da->da_remotestr = kzalloc(len, gfp_flags); - - /* NULL is ok, only used for dprintk */ - if (da->da_remotestr) - snprintf(da->da_remotestr, len, "%s%s%s:%u", startsep, - buf, endsep, ntohs(port)); - - dprintk("%s: Parsed DS addr %s\n", __func__, da->da_remotestr); - kfree(buf); - kfree(netid); - return da; - -out_free_da: - kfree(da); -out_free_buf: - dprintk("%s: Error parsing DS addr: %s\n", __func__, buf); - kfree(buf); -out_free_netid: - kfree(netid); -out_err: - return NULL; -} - /* Decode opaque device data and return the result */ struct nfs4_file_layout_dsaddr * nfs4_fl_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev, @@ -353,8 +205,8 @@ nfs4_fl_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev, mp_count = be32_to_cpup(p); /* multipath count */ for (j = 0; j < mp_count; j++) { - da = decode_ds_addr(server->nfs_client->cl_net, - &stream, gfp_flags); + da = nfs4_decode_mp_ds_addr(server->nfs_client->cl_net, + &stream, gfp_flags); if (da) list_add_tail(&da->da_node, &dsaddrs); } diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h index b0168f1dd072..403d7bb67c41 100644 --- a/fs/nfs/pnfs.h +++ b/fs/nfs/pnfs.h @@ -312,6 +312,9 @@ void pnfs_generic_write_commit_done(struct rpc_task *task, void *data); void nfs4_pnfs_ds_put(struct nfs4_pnfs_ds *ds); struct nfs4_pnfs_ds *nfs4_pnfs_ds_add(struct list_head *dsaddrs, gfp_t gfp_flags); +struct nfs4_pnfs_ds_addr *nfs4_decode_mp_ds_addr(struct net *net, + struct xdr_stream *xdr, + gfp_t gfp_flags); static inline struct nfs4_deviceid_node * nfs4_get_deviceid(struct nfs4_deviceid_node *d) diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c index 3bb2b74cf600..81ec449138a8 100644 --- a/fs/nfs/pnfs_nfs.c +++ b/fs/nfs/pnfs_nfs.c @@ -9,6 +9,7 @@ #include #include +#include #include "internal.h" #include "pnfs.h" @@ -532,3 +533,151 @@ out: return ds; } EXPORT_SYMBOL_GPL(nfs4_pnfs_ds_add); + +/* + * Currently only supports ipv4, ipv6 and one multi-path address. + */ +struct nfs4_pnfs_ds_addr * +nfs4_decode_mp_ds_addr(struct net *net, struct xdr_stream *xdr, gfp_t gfp_flags) +{ + struct nfs4_pnfs_ds_addr *da = NULL; + char *buf, *portstr; + __be16 port; + int nlen, rlen; + int tmp[2]; + __be32 *p; + char *netid, *match_netid; + size_t len, match_netid_len; + char *startsep = ""; + char *endsep = ""; + + + /* r_netid */ + p = xdr_inline_decode(xdr, 4); + if (unlikely(!p)) + goto out_err; + nlen = be32_to_cpup(p++); + + p = xdr_inline_decode(xdr, nlen); + if (unlikely(!p)) + goto out_err; + + netid = kmalloc(nlen+1, gfp_flags); + if (unlikely(!netid)) + goto out_err; + + netid[nlen] = '\0'; + memcpy(netid, p, nlen); + + /* r_addr: ip/ip6addr with port in dec octets - see RFC 5665 */ + p = xdr_inline_decode(xdr, 4); + if (unlikely(!p)) + goto out_free_netid; + rlen = be32_to_cpup(p); + + p = xdr_inline_decode(xdr, rlen); + if (unlikely(!p)) + goto out_free_netid; + + /* port is ".ABC.DEF", 8 chars max */ + if (rlen > INET6_ADDRSTRLEN + IPV6_SCOPE_ID_LEN + 8) { + dprintk("%s: Invalid address, length %d\n", __func__, + rlen); + goto out_free_netid; + } + buf = kmalloc(rlen + 1, gfp_flags); + if (!buf) { + dprintk("%s: Not enough memory\n", __func__); + goto out_free_netid; + } + buf[rlen] = '\0'; + memcpy(buf, p, rlen); + + /* replace port '.' with '-' */ + portstr = strrchr(buf, '.'); + if (!portstr) { + dprintk("%s: Failed finding expected dot in port\n", + __func__); + goto out_free_buf; + } + *portstr = '-'; + + /* find '.' between address and port */ + portstr = strrchr(buf, '.'); + if (!portstr) { + dprintk("%s: Failed finding expected dot between address and " + "port\n", __func__); + goto out_free_buf; + } + *portstr = '\0'; + + da = kzalloc(sizeof(*da), gfp_flags); + if (unlikely(!da)) + goto out_free_buf; + + INIT_LIST_HEAD(&da->da_node); + + if (!rpc_pton(net, buf, portstr-buf, (struct sockaddr *)&da->da_addr, + sizeof(da->da_addr))) { + dprintk("%s: error parsing address %s\n", __func__, buf); + goto out_free_da; + } + + portstr++; + sscanf(portstr, "%d-%d", &tmp[0], &tmp[1]); + port = htons((tmp[0] << 8) | (tmp[1])); + + switch (da->da_addr.ss_family) { + case AF_INET: + ((struct sockaddr_in *)&da->da_addr)->sin_port = port; + da->da_addrlen = sizeof(struct sockaddr_in); + match_netid = "tcp"; + match_netid_len = 3; + break; + + case AF_INET6: + ((struct sockaddr_in6 *)&da->da_addr)->sin6_port = port; + da->da_addrlen = sizeof(struct sockaddr_in6); + match_netid = "tcp6"; + match_netid_len = 4; + startsep = "["; + endsep = "]"; + break; + + default: + dprintk("%s: unsupported address family: %u\n", + __func__, da->da_addr.ss_family); + goto out_free_da; + } + + if (nlen != match_netid_len || strncmp(netid, match_netid, nlen)) { + dprintk("%s: ERROR: r_netid \"%s\" != \"%s\"\n", + __func__, netid, match_netid); + goto out_free_da; + } + + /* save human readable address */ + len = strlen(startsep) + strlen(buf) + strlen(endsep) + 7; + da->da_remotestr = kzalloc(len, gfp_flags); + + /* NULL is ok, only used for dprintk */ + if (da->da_remotestr) + snprintf(da->da_remotestr, len, "%s%s%s:%u", startsep, + buf, endsep, ntohs(port)); + + dprintk("%s: Parsed DS addr %s\n", __func__, da->da_remotestr); + kfree(buf); + kfree(netid); + return da; + +out_free_da: + kfree(da); +out_free_buf: + dprintk("%s: Error parsing DS addr: %s\n", __func__, buf); + kfree(buf); +out_free_netid: + kfree(netid); +out_err: + return NULL; +} +EXPORT_SYMBOL_GPL(nfs4_decode_mp_ds_addr); -- cgit v1.2.3 From 7405f9e195aab95e147cc225f203d11fa74b65a8 Mon Sep 17 00:00:00 2001 From: Peng Tao Date: Thu, 29 May 2014 21:06:58 +0800 Subject: nfs41: pull nfs4_ds_connect from file layout to generic pnfs It can be reused by flexfiles layout client. Reviewed-by: Jeff Layton Signed-off-by: Peng Tao Signed-off-by: Tom Haynes --- fs/nfs/filelayout/filelayoutdev.c | 78 +++---------------------------------- fs/nfs/pnfs.h | 3 ++ fs/nfs/pnfs_nfs.c | 81 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 89 insertions(+), 73 deletions(-) (limited to 'fs') diff --git a/fs/nfs/filelayout/filelayoutdev.c b/fs/nfs/filelayout/filelayoutdev.c index c7f6041a287f..27bdd8ce177e 100644 --- a/fs/nfs/filelayout/filelayoutdev.c +++ b/fs/nfs/filelayout/filelayoutdev.c @@ -41,51 +41,6 @@ static unsigned int dataserver_timeo = NFS4_DEF_DS_TIMEO; static unsigned int dataserver_retrans = NFS4_DEF_DS_RETRANS; -/* - * Create an rpc connection to the nfs4_pnfs_ds data server - * Currently only supports IPv4 and IPv6 addresses - */ -static int -nfs4_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds) -{ - struct nfs_client *clp = ERR_PTR(-EIO); - struct nfs4_pnfs_ds_addr *da; - int status = 0; - - dprintk("--> %s DS %s au_flavor %d\n", __func__, ds->ds_remotestr, - mds_srv->nfs_client->cl_rpcclient->cl_auth->au_flavor); - - list_for_each_entry(da, &ds->ds_addrs, da_node) { - dprintk("%s: DS %s: trying address %s\n", - __func__, ds->ds_remotestr, da->da_remotestr); - - clp = nfs4_set_ds_client(mds_srv->nfs_client, - (struct sockaddr *)&da->da_addr, - da->da_addrlen, IPPROTO_TCP, - dataserver_timeo, dataserver_retrans); - if (!IS_ERR(clp)) - break; - } - - if (IS_ERR(clp)) { - status = PTR_ERR(clp); - goto out; - } - - status = nfs4_init_ds_session(clp, mds_srv->nfs_client->cl_lease_time); - if (status) - goto out_put; - - smp_wmb(); - ds->ds_clp = clp; - dprintk("%s [new] addr: %s\n", __func__, ds->ds_remotestr); -out: - return status; -out_put: - nfs_put_client(clp); - goto out; -} - void nfs4_fl_free_deviceid(struct nfs4_file_layout_dsaddr *dsaddr) { @@ -302,22 +257,7 @@ nfs4_fl_select_ds_fh(struct pnfs_layout_segment *lseg, u32 j) return flseg->fh_array[i]; } -static void nfs4_wait_ds_connect(struct nfs4_pnfs_ds *ds) -{ - might_sleep(); - wait_on_bit_action(&ds->ds_state, NFS4DS_CONNECTING, - nfs_wait_bit_killable, TASK_KILLABLE); -} - -static void nfs4_clear_ds_conn_bit(struct nfs4_pnfs_ds *ds) -{ - smp_mb__before_atomic(); - clear_bit(NFS4DS_CONNECTING, &ds->ds_state); - smp_mb__after_atomic(); - wake_up_bit(&ds->ds_state, NFS4DS_CONNECTING); -} - - +/* Upon return, either ds is connected, or ds is NULL */ struct nfs4_pnfs_ds * nfs4_fl_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx) { @@ -325,6 +265,7 @@ nfs4_fl_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx) struct nfs4_pnfs_ds *ds = dsaddr->ds_list[ds_idx]; struct nfs4_deviceid_node *devid = FILELAYOUT_DEVID_NODE(lseg); struct nfs4_pnfs_ds *ret = ds; + struct nfs_server *s = NFS_SERVER(lseg->pls_layout->plh_inode); if (ds == NULL) { printk(KERN_ERR "NFS: %s: No data server for offset index %d\n", @@ -336,18 +277,9 @@ nfs4_fl_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx) if (ds->ds_clp) goto out_test_devid; - if (test_and_set_bit(NFS4DS_CONNECTING, &ds->ds_state) == 0) { - struct nfs_server *s = NFS_SERVER(lseg->pls_layout->plh_inode); - int err; - - err = nfs4_ds_connect(s, ds); - if (err) - nfs4_mark_deviceid_unavailable(devid); - nfs4_clear_ds_conn_bit(ds); - } else { - /* Either ds is connected, or ds is NULL */ - nfs4_wait_ds_connect(ds); - } + nfs4_pnfs_ds_connect(s, ds, devid, dataserver_timeo, + dataserver_retrans); + out_test_devid: if (filelayout_test_devid_unavailable(devid)) ret = NULL; diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h index 403d7bb67c41..9a8937c31d97 100644 --- a/fs/nfs/pnfs.h +++ b/fs/nfs/pnfs.h @@ -312,6 +312,9 @@ void pnfs_generic_write_commit_done(struct rpc_task *task, void *data); void nfs4_pnfs_ds_put(struct nfs4_pnfs_ds *ds); struct nfs4_pnfs_ds *nfs4_pnfs_ds_add(struct list_head *dsaddrs, gfp_t gfp_flags); +void nfs4_pnfs_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds, + struct nfs4_deviceid_node *devid, unsigned int timeo, + unsigned int retrans); struct nfs4_pnfs_ds_addr *nfs4_decode_mp_ds_addr(struct net *net, struct xdr_stream *xdr, gfp_t gfp_flags); diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c index 81ec449138a8..5a92e76c6c53 100644 --- a/fs/nfs/pnfs_nfs.c +++ b/fs/nfs/pnfs_nfs.c @@ -11,6 +11,7 @@ #include #include +#include "nfs4session.h" #include "internal.h" #include "pnfs.h" @@ -534,6 +535,86 @@ out: } EXPORT_SYMBOL_GPL(nfs4_pnfs_ds_add); +static void nfs4_wait_ds_connect(struct nfs4_pnfs_ds *ds) +{ + might_sleep(); + wait_on_bit(&ds->ds_state, NFS4DS_CONNECTING, + TASK_KILLABLE); +} + +static void nfs4_clear_ds_conn_bit(struct nfs4_pnfs_ds *ds) +{ + smp_mb__before_atomic(); + clear_bit(NFS4DS_CONNECTING, &ds->ds_state); + smp_mb__after_atomic(); + wake_up_bit(&ds->ds_state, NFS4DS_CONNECTING); +} + +static int _nfs4_pnfs_ds_connect(struct nfs_server *mds_srv, + struct nfs4_pnfs_ds *ds, + unsigned int timeo, + unsigned int retrans) +{ + struct nfs_client *clp = ERR_PTR(-EIO); + struct nfs4_pnfs_ds_addr *da; + int status = 0; + + dprintk("--> %s DS %s au_flavor %d\n", __func__, ds->ds_remotestr, + mds_srv->nfs_client->cl_rpcclient->cl_auth->au_flavor); + + list_for_each_entry(da, &ds->ds_addrs, da_node) { + dprintk("%s: DS %s: trying address %s\n", + __func__, ds->ds_remotestr, da->da_remotestr); + + clp = nfs4_set_ds_client(mds_srv->nfs_client, + (struct sockaddr *)&da->da_addr, + da->da_addrlen, IPPROTO_TCP, + timeo, retrans); + if (!IS_ERR(clp)) + break; + } + + if (IS_ERR(clp)) { + status = PTR_ERR(clp); + goto out; + } + + status = nfs4_init_ds_session(clp, mds_srv->nfs_client->cl_lease_time); + if (status) + goto out_put; + + smp_wmb(); + ds->ds_clp = clp; + dprintk("%s [new] addr: %s\n", __func__, ds->ds_remotestr); +out: + return status; +out_put: + nfs_put_client(clp); + goto out; +} + +/* + * Create an rpc connection to the nfs4_pnfs_ds data server. + * Currently only supports IPv4 and IPv6 addresses. + * If connection fails, make devid unavailable. + */ +void nfs4_pnfs_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds, + struct nfs4_deviceid_node *devid, unsigned int timeo, + unsigned int retrans) +{ + if (test_and_set_bit(NFS4DS_CONNECTING, &ds->ds_state) == 0) { + int err = 0; + + err = _nfs4_pnfs_ds_connect(mds_srv, ds, timeo, retrans); + if (err) + nfs4_mark_deviceid_unavailable(devid); + nfs4_clear_ds_conn_bit(ds); + } else { + nfs4_wait_ds_connect(ds); + } +} +EXPORT_SYMBOL_GPL(nfs4_pnfs_ds_connect); + /* * Currently only supports ipv4, ipv6 and one multi-path address. */ -- cgit v1.2.3 From 064172f3459a914277aa309b2afd3bd5d1c3289a Mon Sep 17 00:00:00 2001 From: Peng Tao Date: Thu, 29 May 2014 21:07:00 +0800 Subject: nfs41: allow LD to choose DS connection auth flavor flexfile layout may use different auth flavor as specified by MDS. Reviewed-by: Jeff Layton Signed-off-by: Peng Tao Signed-off-by: Tom Haynes --- fs/nfs/filelayout/filelayoutdev.c | 3 ++- fs/nfs/internal.h | 3 ++- fs/nfs/nfs4client.c | 5 +++-- fs/nfs/pnfs.h | 2 +- fs/nfs/pnfs_nfs.c | 10 ++++++---- 5 files changed, 14 insertions(+), 9 deletions(-) (limited to 'fs') diff --git a/fs/nfs/filelayout/filelayoutdev.c b/fs/nfs/filelayout/filelayoutdev.c index 27bdd8ce177e..5e4b0cea84c8 100644 --- a/fs/nfs/filelayout/filelayoutdev.c +++ b/fs/nfs/filelayout/filelayoutdev.c @@ -278,7 +278,8 @@ nfs4_fl_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx) goto out_test_devid; nfs4_pnfs_ds_connect(s, ds, devid, dataserver_timeo, - dataserver_retrans); + dataserver_retrans, + s->nfs_client->cl_rpcclient->cl_auth->au_flavor); out_test_devid: if (filelayout_test_devid_unavailable(devid)) diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index efaa31c70fbe..7d7c36ff09fa 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h @@ -189,7 +189,8 @@ extern struct nfs_client *nfs4_set_ds_client(struct nfs_client* mds_clp, const struct sockaddr *ds_addr, int ds_addrlen, int ds_proto, unsigned int ds_timeo, - unsigned int ds_retrans); + unsigned int ds_retrans, + rpc_authflavor_t au_flavor); extern struct rpc_clnt *nfs4_find_or_create_ds_client(struct nfs_client *, struct inode *); #ifdef CONFIG_PROC_FS diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c index 953daa44a282..62d93a116790 100644 --- a/fs/nfs/nfs4client.c +++ b/fs/nfs/nfs4client.c @@ -849,7 +849,8 @@ error: */ struct nfs_client *nfs4_set_ds_client(struct nfs_client* mds_clp, const struct sockaddr *ds_addr, int ds_addrlen, - int ds_proto, unsigned int ds_timeo, unsigned int ds_retrans) + int ds_proto, unsigned int ds_timeo, unsigned int ds_retrans, + rpc_authflavor_t au_flavor) { struct nfs_client_initdata cl_init = { .addr = ds_addr, @@ -874,7 +875,7 @@ struct nfs_client *nfs4_set_ds_client(struct nfs_client* mds_clp, */ nfs_init_timeout_values(&ds_timeout, ds_proto, ds_timeo, ds_retrans); clp = nfs_get_client(&cl_init, &ds_timeout, mds_clp->cl_ipaddr, - mds_clp->cl_rpcclient->cl_auth->au_flavor); + au_flavor); dprintk("<-- %s %p\n", __func__, clp); return clp; diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h index 9a8937c31d97..2ea9e9a7d85e 100644 --- a/fs/nfs/pnfs.h +++ b/fs/nfs/pnfs.h @@ -314,7 +314,7 @@ struct nfs4_pnfs_ds *nfs4_pnfs_ds_add(struct list_head *dsaddrs, gfp_t gfp_flags); void nfs4_pnfs_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds, struct nfs4_deviceid_node *devid, unsigned int timeo, - unsigned int retrans); + unsigned int retrans, rpc_authflavor_t au_flavor); struct nfs4_pnfs_ds_addr *nfs4_decode_mp_ds_addr(struct net *net, struct xdr_stream *xdr, gfp_t gfp_flags); diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c index 5a92e76c6c53..106ee08ef52f 100644 --- a/fs/nfs/pnfs_nfs.c +++ b/fs/nfs/pnfs_nfs.c @@ -553,7 +553,8 @@ static void nfs4_clear_ds_conn_bit(struct nfs4_pnfs_ds *ds) static int _nfs4_pnfs_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds, unsigned int timeo, - unsigned int retrans) + unsigned int retrans, + rpc_authflavor_t au_flavor) { struct nfs_client *clp = ERR_PTR(-EIO); struct nfs4_pnfs_ds_addr *da; @@ -569,7 +570,7 @@ static int _nfs4_pnfs_ds_connect(struct nfs_server *mds_srv, clp = nfs4_set_ds_client(mds_srv->nfs_client, (struct sockaddr *)&da->da_addr, da->da_addrlen, IPPROTO_TCP, - timeo, retrans); + timeo, retrans, au_flavor); if (!IS_ERR(clp)) break; } @@ -600,12 +601,13 @@ out_put: */ void nfs4_pnfs_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds, struct nfs4_deviceid_node *devid, unsigned int timeo, - unsigned int retrans) + unsigned int retrans, rpc_authflavor_t au_flavor) { if (test_and_set_bit(NFS4DS_CONNECTING, &ds->ds_state) == 0) { int err = 0; - err = _nfs4_pnfs_ds_connect(mds_srv, ds, timeo, retrans); + err = _nfs4_pnfs_ds_connect(mds_srv, ds, timeo, + retrans, au_flavor); if (err) nfs4_mark_deviceid_unavailable(devid); nfs4_clear_ds_conn_bit(ds); -- cgit v1.2.3 From 39280a5ae8443dcc1ab3bb5ebc205aab0855b849 Mon Sep 17 00:00:00 2001 From: Peng Tao Date: Fri, 30 May 2014 18:15:55 +0800 Subject: nfs41: move file layout macros to generic pnfs They can be reused by flexfile layout as well. Also add a code such that if read fails on one DS and there are other DSes available to use, don't resend through MDS but through pNFS so that client can read from other DSes. Reviewed-by: Jeff Layton Signed-off-by: Peng Tao Signed-off-by: Tom Haynes --- fs/nfs/filelayout/filelayout.h | 10 ---------- fs/nfs/pnfs.h | 11 +++++++++++ 2 files changed, 11 insertions(+), 10 deletions(-) (limited to 'fs') diff --git a/fs/nfs/filelayout/filelayout.h b/fs/nfs/filelayout/filelayout.h index f97eea627c4f..2896cb833a11 100644 --- a/fs/nfs/filelayout/filelayout.h +++ b/fs/nfs/filelayout/filelayout.h @@ -32,13 +32,6 @@ #include "../pnfs.h" -/* - * Default data server connection timeout and retrans vaules. - * Set by module paramters dataserver_timeo and dataserver_retrans. - */ -#define NFS4_DEF_DS_TIMEO 600 /* in tenths of a second */ -#define NFS4_DEF_DS_RETRANS 5 - /* * Field testing shows we need to support up to 4096 stripe indices. * We store each index as a u8 (u32 on the wire) to keep the memory footprint @@ -48,9 +41,6 @@ #define NFS4_PNFS_MAX_STRIPE_CNT 4096 #define NFS4_PNFS_MAX_MULTI_CNT 256 /* 256 fit into a u8 stripe_index */ -/* error codes for internal use */ -#define NFS4ERR_RESET_TO_MDS 12001 - enum stripetype4 { STRIPE_SPARSE = 1, STRIPE_DENSE = 2 diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h index 2ea9e9a7d85e..aef89b347bdc 100644 --- a/fs/nfs/pnfs.h +++ b/fs/nfs/pnfs.h @@ -77,6 +77,17 @@ enum pnfs_try_status { #define LAYOUT_NFSV4_1_MODULE_PREFIX "nfs-layouttype4" +/* + * Default data server connection timeout and retrans vaules. + * Set by module parameters dataserver_timeo and dataserver_retrans. + */ +#define NFS4_DEF_DS_TIMEO 600 /* in tenths of a second */ +#define NFS4_DEF_DS_RETRANS 5 + +/* error codes for internal use */ +#define NFS4ERR_RESET_TO_MDS 12001 +#define NFS4ERR_RESET_TO_PNFS 12002 + enum { NFS_LAYOUT_RO_FAILED = 0, /* get ro layout failed stop trying */ NFS_LAYOUT_RW_FAILED, /* get rw layout failed stop trying */ -- cgit v1.2.3 From 1a04c6e1a26a43305fe124a0978a3e4be861af89 Mon Sep 17 00:00:00 2001 From: Peng Tao Date: Fri, 30 May 2014 18:15:57 +0800 Subject: nfsv3: introduce nfs3_set_ds_client The flexfiles layout wants to create DS connection over NFSv3. Add nfs3_set_ds_client to allow that to happen. Signed-off-by: Peng Tao Signed-off-by: Tom Haynes --- fs/nfs/internal.h | 4 ++++ fs/nfs/nfs3_fs.h | 2 ++ fs/nfs/nfs3client.c | 34 ++++++++++++++++++++++++++++++++++ fs/nfs/nfs3super.c | 2 +- include/linux/nfs_fs_sb.h | 9 +++++---- 5 files changed, 46 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index 7d7c36ff09fa..7332ba1f693b 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h @@ -193,6 +193,10 @@ extern struct nfs_client *nfs4_set_ds_client(struct nfs_client* mds_clp, rpc_authflavor_t au_flavor); extern struct rpc_clnt *nfs4_find_or_create_ds_client(struct nfs_client *, struct inode *); +extern struct nfs_client *nfs3_set_ds_client(struct nfs_client *mds_clp, + const struct sockaddr *ds_addr, int ds_addrlen, + int ds_proto, unsigned int ds_timeo, + unsigned int ds_retrans, rpc_authflavor_t au_flavor); #ifdef CONFIG_PROC_FS extern int __init nfs_fs_proc_init(void); extern void nfs_fs_proc_exit(void); diff --git a/fs/nfs/nfs3_fs.h b/fs/nfs/nfs3_fs.h index 333ae4068506..e134d6548ab7 100644 --- a/fs/nfs/nfs3_fs.h +++ b/fs/nfs/nfs3_fs.h @@ -30,5 +30,7 @@ struct nfs_server *nfs3_create_server(struct nfs_mount_info *, struct nfs_subver struct nfs_server *nfs3_clone_server(struct nfs_server *, struct nfs_fh *, struct nfs_fattr *, rpc_authflavor_t); +/* nfs3super.c */ +extern struct nfs_subversion nfs_v3; #endif /* __LINUX_FS_NFS_NFS3_FS_H */ diff --git a/fs/nfs/nfs3client.c b/fs/nfs/nfs3client.c index 8c1b437c5403..52e2344bf9a1 100644 --- a/fs/nfs/nfs3client.c +++ b/fs/nfs/nfs3client.c @@ -64,3 +64,37 @@ struct nfs_server *nfs3_clone_server(struct nfs_server *source, nfs_init_server_aclclient(server); return server; } + +/* + * Set up a pNFS Data Server client over NFSv3. + * + * Return any existing nfs_client that matches server address,port,version + * and minorversion. + * + * For a new nfs_client, use a soft mount (default), a low retrans and a + * low timeout interval so that if a connection is lost, we retry through + * the MDS. + */ +struct nfs_client *nfs3_set_ds_client(struct nfs_client *mds_clp, + const struct sockaddr *ds_addr, int ds_addrlen, + int ds_proto, unsigned int ds_timeo, unsigned int ds_retrans, + rpc_authflavor_t au_flavor) +{ + struct nfs_client_initdata cl_init = { + .addr = ds_addr, + .addrlen = ds_addrlen, + .nfs_mod = &nfs_v3, + .proto = ds_proto, + .net = mds_clp->cl_net, + }; + struct rpc_timeout ds_timeout; + struct nfs_client *clp; + + /* Use the MDS nfs_client cl_ipaddr. */ + nfs_init_timeout_values(&ds_timeout, ds_proto, ds_timeo, ds_retrans); + clp = nfs_get_client(&cl_init, &ds_timeout, mds_clp->cl_ipaddr, + au_flavor); + + return clp; +} +EXPORT_SYMBOL_GPL(nfs3_set_ds_client); diff --git a/fs/nfs/nfs3super.c b/fs/nfs/nfs3super.c index 6af29c2da352..5c4394e4656b 100644 --- a/fs/nfs/nfs3super.c +++ b/fs/nfs/nfs3super.c @@ -7,7 +7,7 @@ #include "nfs3_fs.h" #include "nfs.h" -static struct nfs_subversion nfs_v3 = { +struct nfs_subversion nfs_v3 = { .owner = THIS_MODULE, .nfs_fs = &nfs_fs_type, .rpc_vers = &nfs_version3, diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index ddea982355f3..5e1273d4de14 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h @@ -77,10 +77,6 @@ struct nfs_client { /* Client owner identifier */ const char * cl_owner_id; - /* Our own IP address, as a null-terminated string. - * This is used to generate the mv0 callback address. - */ - char cl_ipaddr[48]; u32 cl_cb_ident; /* v4.0 callback identifier */ const struct nfs4_minor_version_ops *cl_mvops; unsigned long cl_mig_gen; @@ -108,6 +104,11 @@ struct nfs_client { #define NFS_SP4_MACH_CRED_COMMIT 6 /* COMMIT */ #endif /* CONFIG_NFS_V4 */ + /* Our own IP address, as a null-terminated string. + * This is used to generate the mv0 callback address. + */ + char cl_ipaddr[48]; + #ifdef CONFIG_NFS_FSCACHE struct fscache_cookie *fscache; /* client index cache cookie */ #endif -- cgit v1.2.3 From 30626f9c32f0ad5e2c4173f10fb4b1358bbba6ec Mon Sep 17 00:00:00 2001 From: Peng Tao Date: Fri, 30 May 2014 18:15:58 +0800 Subject: nfs41: allow LD to choose DS connection version/minor_version flexfile layout may need to set such when making DS connections. Signed-off-by: Peng Tao Signed-off-by: Tom Haynes --- fs/nfs/filelayout/filelayoutdev.c | 3 ++- fs/nfs/internal.h | 1 + fs/nfs/nfs4client.c | 4 ++-- fs/nfs/pnfs.h | 3 ++- fs/nfs/pnfs_nfs.c | 11 +++++++---- 5 files changed, 14 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/nfs/filelayout/filelayoutdev.c b/fs/nfs/filelayout/filelayoutdev.c index 5e4b0cea84c8..4f372e224603 100644 --- a/fs/nfs/filelayout/filelayoutdev.c +++ b/fs/nfs/filelayout/filelayoutdev.c @@ -278,7 +278,8 @@ nfs4_fl_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx) goto out_test_devid; nfs4_pnfs_ds_connect(s, ds, devid, dataserver_timeo, - dataserver_retrans, + dataserver_retrans, 4, + s->nfs_client->cl_minorversion, s->nfs_client->cl_rpcclient->cl_auth->au_flavor); out_test_devid: diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index 7332ba1f693b..5543850268d2 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h @@ -190,6 +190,7 @@ extern struct nfs_client *nfs4_set_ds_client(struct nfs_client* mds_clp, int ds_addrlen, int ds_proto, unsigned int ds_timeo, unsigned int ds_retrans, + u32 minor_version, rpc_authflavor_t au_flavor); extern struct rpc_clnt *nfs4_find_or_create_ds_client(struct nfs_client *, struct inode *); diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c index 62d93a116790..102d96777d42 100644 --- a/fs/nfs/nfs4client.c +++ b/fs/nfs/nfs4client.c @@ -850,14 +850,14 @@ error: struct nfs_client *nfs4_set_ds_client(struct nfs_client* mds_clp, const struct sockaddr *ds_addr, int ds_addrlen, int ds_proto, unsigned int ds_timeo, unsigned int ds_retrans, - rpc_authflavor_t au_flavor) + u32 minor_version, rpc_authflavor_t au_flavor) { struct nfs_client_initdata cl_init = { .addr = ds_addr, .addrlen = ds_addrlen, .nfs_mod = &nfs_v4, .proto = ds_proto, - .minorversion = mds_clp->cl_minorversion, + .minorversion = minor_version, .net = mds_clp->cl_net, }; struct rpc_timeout ds_timeout; diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h index aef89b347bdc..70ffec135696 100644 --- a/fs/nfs/pnfs.h +++ b/fs/nfs/pnfs.h @@ -325,7 +325,8 @@ struct nfs4_pnfs_ds *nfs4_pnfs_ds_add(struct list_head *dsaddrs, gfp_t gfp_flags); void nfs4_pnfs_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds, struct nfs4_deviceid_node *devid, unsigned int timeo, - unsigned int retrans, rpc_authflavor_t au_flavor); + unsigned int retrans, u32 versoin, u32 minor_version, + rpc_authflavor_t au_flavor); struct nfs4_pnfs_ds_addr *nfs4_decode_mp_ds_addr(struct net *net, struct xdr_stream *xdr, gfp_t gfp_flags); diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c index 106ee08ef52f..ad211a4e1874 100644 --- a/fs/nfs/pnfs_nfs.c +++ b/fs/nfs/pnfs_nfs.c @@ -554,6 +554,7 @@ static int _nfs4_pnfs_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds, unsigned int timeo, unsigned int retrans, + u32 minor_version, rpc_authflavor_t au_flavor) { struct nfs_client *clp = ERR_PTR(-EIO); @@ -570,7 +571,8 @@ static int _nfs4_pnfs_ds_connect(struct nfs_server *mds_srv, clp = nfs4_set_ds_client(mds_srv->nfs_client, (struct sockaddr *)&da->da_addr, da->da_addrlen, IPPROTO_TCP, - timeo, retrans, au_flavor); + timeo, retrans, minor_version, + au_flavor); if (!IS_ERR(clp)) break; } @@ -601,13 +603,14 @@ out_put: */ void nfs4_pnfs_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds, struct nfs4_deviceid_node *devid, unsigned int timeo, - unsigned int retrans, rpc_authflavor_t au_flavor) + unsigned int retrans, u32 version, + u32 minor_version, rpc_authflavor_t au_flavor) { if (test_and_set_bit(NFS4DS_CONNECTING, &ds->ds_state) == 0) { int err = 0; - err = _nfs4_pnfs_ds_connect(mds_srv, ds, timeo, - retrans, au_flavor); + err = _nfs4_pnfs_ds_connect(mds_srv, ds, timeo, retrans, + minor_version, au_flavor); if (err) nfs4_mark_deviceid_unavailable(devid); nfs4_clear_ds_conn_bit(ds); -- cgit v1.2.3 From 5f01d9539496577b9ee62e213f4122a2a209550c Mon Sep 17 00:00:00 2001 From: Peng Tao Date: Fri, 30 May 2014 18:15:59 +0800 Subject: nfs41: create NFSv3 DS connection if specified Signed-off-by: Peng Tao Signed-off-by: Tom Haynes --- fs/nfs/nfs4super.c | 3 ++ fs/nfs/pnfs.h | 7 ++++- fs/nfs/pnfs_nfs.c | 88 +++++++++++++++++++++++++++++++++++++++++++++++++++--- 3 files changed, 93 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4super.c b/fs/nfs/nfs4super.c index 6f340f02f2ba..48cea3c30e5d 100644 --- a/fs/nfs/nfs4super.c +++ b/fs/nfs/nfs4super.c @@ -346,6 +346,9 @@ out: static void __exit exit_nfs_v4(void) { + /* Not called in the _init(), conditionally loaded */ + nfs4_pnfs_v3_ds_connect_unload(); + unregister_nfs_version(&nfs_v4); nfs4_unregister_sysctl(); nfs_idmap_quit(); diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h index 70ffec135696..c39882191651 100644 --- a/fs/nfs/pnfs.h +++ b/fs/nfs/pnfs.h @@ -323,9 +323,10 @@ void pnfs_generic_write_commit_done(struct rpc_task *task, void *data); void nfs4_pnfs_ds_put(struct nfs4_pnfs_ds *ds); struct nfs4_pnfs_ds *nfs4_pnfs_ds_add(struct list_head *dsaddrs, gfp_t gfp_flags); +void nfs4_pnfs_v3_ds_connect_unload(void); void nfs4_pnfs_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds, struct nfs4_deviceid_node *devid, unsigned int timeo, - unsigned int retrans, u32 versoin, u32 minor_version, + unsigned int retrans, u32 version, u32 minor_version, rpc_authflavor_t au_flavor); struct nfs4_pnfs_ds_addr *nfs4_decode_mp_ds_addr(struct net *net, struct xdr_stream *xdr, @@ -615,6 +616,10 @@ static inline struct nfs4_threshold *pnfs_mdsthreshold_alloc(void) return NULL; } +static inline void nfs4_pnfs_v3_ds_connect_unload(void) +{ +} + #endif /* CONFIG_NFS_V4_1 */ #endif /* FS_NFS_PNFS_H */ diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c index ad211a4e1874..23c851d4c9a9 100644 --- a/fs/nfs/pnfs_nfs.c +++ b/fs/nfs/pnfs_nfs.c @@ -10,6 +10,7 @@ #include #include #include +#include #include "nfs4session.h" #include "internal.h" @@ -550,7 +551,75 @@ static void nfs4_clear_ds_conn_bit(struct nfs4_pnfs_ds *ds) wake_up_bit(&ds->ds_state, NFS4DS_CONNECTING); } -static int _nfs4_pnfs_ds_connect(struct nfs_server *mds_srv, +static struct nfs_client *(*get_v3_ds_connect)( + struct nfs_client *mds_clp, + const struct sockaddr *ds_addr, + int ds_addrlen, + int ds_proto, + unsigned int ds_timeo, + unsigned int ds_retrans, + rpc_authflavor_t au_flavor); + +static bool load_v3_ds_connect(void) +{ + if (!get_v3_ds_connect) { + get_v3_ds_connect = symbol_request(nfs3_set_ds_client); + WARN_ON_ONCE(!get_v3_ds_connect); + } + + return(get_v3_ds_connect != NULL); +} + +void __exit nfs4_pnfs_v3_ds_connect_unload(void) +{ + if (get_v3_ds_connect) { + symbol_put(nfs3_set_ds_client); + get_v3_ds_connect = NULL; + } +} +EXPORT_SYMBOL_GPL(nfs4_pnfs_v3_ds_connect_unload); + +static int _nfs4_pnfs_v3_ds_connect(struct nfs_server *mds_srv, + struct nfs4_pnfs_ds *ds, + unsigned int timeo, + unsigned int retrans, + rpc_authflavor_t au_flavor) +{ + struct nfs_client *clp = ERR_PTR(-EIO); + struct nfs4_pnfs_ds_addr *da; + int status = 0; + + dprintk("--> %s DS %s au_flavor %d\n", __func__, + ds->ds_remotestr, au_flavor); + + if (!load_v3_ds_connect()) + goto out; + + list_for_each_entry(da, &ds->ds_addrs, da_node) { + dprintk("%s: DS %s: trying address %s\n", + __func__, ds->ds_remotestr, da->da_remotestr); + + clp = get_v3_ds_connect(mds_srv->nfs_client, + (struct sockaddr *)&da->da_addr, + da->da_addrlen, IPPROTO_TCP, + timeo, retrans, au_flavor); + if (!IS_ERR(clp)) + break; + } + + if (IS_ERR(clp)) { + status = PTR_ERR(clp); + goto out; + } + + smp_wmb(); + ds->ds_clp = clp; + dprintk("%s [new] addr: %s\n", __func__, ds->ds_remotestr); +out: + return status; +} + +static int _nfs4_pnfs_v4_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds, unsigned int timeo, unsigned int retrans, @@ -562,7 +631,7 @@ static int _nfs4_pnfs_ds_connect(struct nfs_server *mds_srv, int status = 0; dprintk("--> %s DS %s au_flavor %d\n", __func__, ds->ds_remotestr, - mds_srv->nfs_client->cl_rpcclient->cl_auth->au_flavor); + au_flavor); list_for_each_entry(da, &ds->ds_addrs, da_node) { dprintk("%s: DS %s: trying address %s\n", @@ -609,8 +678,19 @@ void nfs4_pnfs_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds, if (test_and_set_bit(NFS4DS_CONNECTING, &ds->ds_state) == 0) { int err = 0; - err = _nfs4_pnfs_ds_connect(mds_srv, ds, timeo, retrans, - minor_version, au_flavor); + if (version == 3) { + err = _nfs4_pnfs_v3_ds_connect(mds_srv, ds, timeo, + retrans, au_flavor); + } else if (version == 4) { + err = _nfs4_pnfs_v4_ds_connect(mds_srv, ds, timeo, + retrans, minor_version, + au_flavor); + } else { + dprintk("%s: unsupported DS version %d\n", __func__, + version); + err = -EPROTONOSUPPORT; + } + if (err) nfs4_mark_deviceid_unavailable(devid); nfs4_clear_ds_conn_bit(ds); -- cgit v1.2.3 From abde71f4d3c027a30f8d725e1e22001313b4481a Mon Sep 17 00:00:00 2001 From: Tom Haynes Date: Mon, 9 Jun 2014 13:12:20 -0700 Subject: pnfs: Add nfs_rpc_ops in calls to nfs_initiate_pgio Signed-off-by: Tom Haynes --- fs/nfs/filelayout/filelayout.c | 4 ++-- fs/nfs/internal.h | 1 + fs/nfs/pagelist.c | 6 ++++-- fs/nfs/read.c | 3 ++- fs/nfs/write.c | 6 +++--- include/linux/nfs_page.h | 1 + 6 files changed, 13 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c index bc36ed350a68..25c4896887ca 100644 --- a/fs/nfs/filelayout/filelayout.c +++ b/fs/nfs/filelayout/filelayout.c @@ -501,7 +501,7 @@ filelayout_read_pagelist(struct nfs_pgio_header *hdr) hdr->mds_offset = offset; /* Perform an asynchronous read to ds */ - nfs_initiate_pgio(ds_clnt, hdr, + nfs_initiate_pgio(ds_clnt, hdr, NFS_PROTO(hdr->inode), &filelayout_read_call_ops, 0, RPC_TASK_SOFTCONN); return PNFS_ATTEMPTED; } @@ -542,7 +542,7 @@ filelayout_write_pagelist(struct nfs_pgio_header *hdr, int sync) hdr->args.offset = filelayout_get_dserver_offset(lseg, offset); /* Perform an asynchronous write */ - nfs_initiate_pgio(ds_clnt, hdr, + nfs_initiate_pgio(ds_clnt, hdr, NFS_PROTO(hdr->inode), &filelayout_write_call_ops, sync, RPC_TASK_SOFTCONN); return PNFS_ATTEMPTED; diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index 5543850268d2..1d15ffa94937 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h @@ -251,6 +251,7 @@ void nfs_pgio_header_free(struct nfs_pgio_header *); void nfs_pgio_data_destroy(struct nfs_pgio_header *); int nfs_generic_pgio(struct nfs_pageio_descriptor *, struct nfs_pgio_header *); int nfs_initiate_pgio(struct rpc_clnt *, struct nfs_pgio_header *, + const struct nfs_rpc_ops *, const struct rpc_call_ops *, int, int); void nfs_free_request(struct nfs_page *req); diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c index 2b5e769beb16..35a2626a6922 100644 --- a/fs/nfs/pagelist.c +++ b/fs/nfs/pagelist.c @@ -597,6 +597,7 @@ static void nfs_pgio_prepare(struct rpc_task *task, void *calldata) } int nfs_initiate_pgio(struct rpc_clnt *clnt, struct nfs_pgio_header *hdr, + const struct nfs_rpc_ops *rpc_ops, const struct rpc_call_ops *call_ops, int how, int flags) { struct rpc_task *task; @@ -616,7 +617,7 @@ int nfs_initiate_pgio(struct rpc_clnt *clnt, struct nfs_pgio_header *hdr, }; int ret = 0; - hdr->rw_ops->rw_initiate(hdr, &msg, &task_setup_data, how); + hdr->rw_ops->rw_initiate(hdr, &msg, rpc_ops, &task_setup_data, how); dprintk("NFS: %5u initiated pgio call " "(req %s/%llu, %u bytes @ offset %llu)\n", @@ -792,7 +793,8 @@ static int nfs_generic_pg_pgios(struct nfs_pageio_descriptor *desc) ret = nfs_generic_pgio(desc, hdr); if (ret == 0) ret = nfs_initiate_pgio(NFS_CLIENT(hdr->inode), - hdr, desc->pg_rpc_callops, + hdr, NFS_PROTO(hdr->inode), + desc->pg_rpc_callops, desc->pg_ioflags, 0); return ret; } diff --git a/fs/nfs/read.c b/fs/nfs/read.c index c91a4799c562..092ab499f2b6 100644 --- a/fs/nfs/read.c +++ b/fs/nfs/read.c @@ -168,13 +168,14 @@ out: static void nfs_initiate_read(struct nfs_pgio_header *hdr, struct rpc_message *msg, + const struct nfs_rpc_ops *rpc_ops, struct rpc_task_setup *task_setup_data, int how) { struct inode *inode = hdr->inode; int swap_flags = IS_SWAPFILE(inode) ? NFS_RPC_SWAPFLAGS : 0; task_setup_data->flags |= swap_flags; - NFS_PROTO(inode)->read_setup(hdr, msg); + rpc_ops->read_setup(hdr, msg); } static void diff --git a/fs/nfs/write.c b/fs/nfs/write.c index af3af685a9e3..54d4857e0e2b 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -1240,15 +1240,15 @@ static int flush_task_priority(int how) static void nfs_initiate_write(struct nfs_pgio_header *hdr, struct rpc_message *msg, + const struct nfs_rpc_ops *rpc_ops, struct rpc_task_setup *task_setup_data, int how) { - struct inode *inode = hdr->inode; int priority = flush_task_priority(how); task_setup_data->priority = priority; - NFS_PROTO(inode)->write_setup(hdr, msg); + rpc_ops->write_setup(hdr, msg); - nfs4_state_protect_write(NFS_SERVER(inode)->nfs_client, + nfs4_state_protect_write(NFS_SERVER(hdr->inode)->nfs_client, &task_setup_data->rpc_client, msg, hdr); } diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h index 6c3e06ee2fb7..4c3aa809ab95 100644 --- a/include/linux/nfs_page.h +++ b/include/linux/nfs_page.h @@ -69,6 +69,7 @@ struct nfs_rw_ops { struct inode *); void (*rw_result)(struct rpc_task *, struct nfs_pgio_header *); void (*rw_initiate)(struct nfs_pgio_header *, struct rpc_message *, + const struct nfs_rpc_ops *, struct rpc_task_setup *, int); }; -- cgit v1.2.3 From c36aae9ad95afa2f9a9e9109d989c21af221fabd Mon Sep 17 00:00:00 2001 From: Peng Tao Date: Mon, 9 Jun 2014 07:10:14 +0800 Subject: nfs: allow different protocol in nfs_initiate_commit pnfs flexfile layout client may want to use NFSv3 ops rather than the default MDS v4 ops. Signed-off-by: Peng Tao Signed-off-by: Tom Haynes --- fs/nfs/filelayout/filelayout.c | 2 +- fs/nfs/internal.h | 1 + fs/nfs/pnfs_nfs.c | 1 + fs/nfs/write.c | 7 ++++--- 4 files changed, 7 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c index 25c4896887ca..e5a3c5b1398f 100644 --- a/fs/nfs/filelayout/filelayout.c +++ b/fs/nfs/filelayout/filelayout.c @@ -1055,7 +1055,7 @@ static int filelayout_initiate_commit(struct nfs_commit_data *data, int how) fh = select_ds_fh_from_commit(lseg, data->ds_commit_index); if (fh) data->args.fh = fh; - return nfs_initiate_commit(ds_clnt, data, + return nfs_initiate_commit(ds_clnt, data, NFS_PROTO(data->inode), &filelayout_commit_call_ops, how, RPC_TASK_SOFTCONN); out_err: diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index 1d15ffa94937..98dee834e9d6 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h @@ -436,6 +436,7 @@ extern void nfs_write_prepare(struct rpc_task *task, void *calldata); extern void nfs_commit_prepare(struct rpc_task *task, void *calldata); extern int nfs_initiate_commit(struct rpc_clnt *clnt, struct nfs_commit_data *data, + const struct nfs_rpc_ops *nfs_ops, const struct rpc_call_ops *call_ops, int how, int flags); extern void nfs_init_commit(struct nfs_commit_data *data, diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c index 23c851d4c9a9..c87f664587ee 100644 --- a/fs/nfs/pnfs_nfs.c +++ b/fs/nfs/pnfs_nfs.c @@ -278,6 +278,7 @@ pnfs_generic_commit_pagelist(struct inode *inode, struct list_head *mds_pages, if (!data->lseg) { nfs_init_commit(data, mds_pages, NULL, cinfo); nfs_initiate_commit(NFS_CLIENT(inode), data, + NFS_PROTO(data->inode), data->mds_ops, how, 0); } else { struct pnfs_commit_bucket *buckets; diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 54d4857e0e2b..8800bd3b235d 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -1465,6 +1465,7 @@ void nfs_commitdata_release(struct nfs_commit_data *data) EXPORT_SYMBOL_GPL(nfs_commitdata_release); int nfs_initiate_commit(struct rpc_clnt *clnt, struct nfs_commit_data *data, + const struct nfs_rpc_ops *nfs_ops, const struct rpc_call_ops *call_ops, int how, int flags) { @@ -1486,7 +1487,7 @@ int nfs_initiate_commit(struct rpc_clnt *clnt, struct nfs_commit_data *data, .priority = priority, }; /* Set up the initial task struct. */ - NFS_PROTO(data->inode)->commit_setup(data, &msg); + nfs_ops->commit_setup(data, &msg); dprintk("NFS: %5u initiated commit call\n", data->task.tk_pid); @@ -1589,8 +1590,8 @@ nfs_commit_list(struct inode *inode, struct list_head *head, int how, /* Set up the argument struct */ nfs_init_commit(data, head, NULL, cinfo); atomic_inc(&cinfo->mds->rpcs_out); - return nfs_initiate_commit(NFS_CLIENT(inode), data, data->mds_ops, - how, 0); + return nfs_initiate_commit(NFS_CLIENT(inode), data, NFS_PROTO(inode), + data->mds_ops, how, 0); out_bad: nfs_retry_commit(head, NULL, cinfo); cinfo->completion_ops->error_cleanup(NFS_I(inode)); -- cgit v1.2.3 From cb04ad2a2bc8978e69f7f582ca2869909c4e0571 Mon Sep 17 00:00:00 2001 From: Peng Tao Date: Wed, 11 Jun 2014 05:24:15 +0800 Subject: nfs4: pass slot table to nfs40_setup_sequence flexclient needs this as there is no nfs_server to DS connection. Signed-off-by: Peng Tao Signed-off-by: Tom Haynes --- fs/nfs/nfs4_fs.h | 4 ++++ fs/nfs/nfs4proc.c | 24 +++++++++++++----------- 2 files changed, 17 insertions(+), 11 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index a08178764cf9..90c4ffe084d7 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h @@ -443,6 +443,10 @@ extern void nfs_increment_open_seqid(int status, struct nfs_seqid *seqid); extern void nfs_increment_lock_seqid(int status, struct nfs_seqid *seqid); extern void nfs_release_seqid(struct nfs_seqid *seqid); extern void nfs_free_seqid(struct nfs_seqid *seqid); +extern int nfs40_setup_sequence(struct nfs4_slot_table *tbl, + struct nfs4_sequence_args *args, + struct nfs4_sequence_res *res, + struct rpc_task *task); extern void nfs4_free_lock_state(struct nfs_server *server, struct nfs4_lock_state *lsp); diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index c347705b0161..e98dda7180c1 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -495,12 +495,11 @@ static void nfs4_set_sequence_privileged(struct nfs4_sequence_args *args) args->sa_privileged = 1; } -static int nfs40_setup_sequence(const struct nfs_server *server, - struct nfs4_sequence_args *args, - struct nfs4_sequence_res *res, - struct rpc_task *task) +int nfs40_setup_sequence(struct nfs4_slot_table *tbl, + struct nfs4_sequence_args *args, + struct nfs4_sequence_res *res, + struct rpc_task *task) { - struct nfs4_slot_table *tbl = server->nfs_client->cl_slot_tbl; struct nfs4_slot *slot; /* slot already allocated? */ @@ -535,6 +534,7 @@ out_sleep: spin_unlock(&tbl->slot_tbl_lock); return -EAGAIN; } +EXPORT_SYMBOL_GPL(nfs40_setup_sequence); static int nfs40_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res) @@ -777,7 +777,8 @@ static int nfs4_setup_sequence(const struct nfs_server *server, int ret = 0; if (!session) - return nfs40_setup_sequence(server, args, res, task); + return nfs40_setup_sequence(server->nfs_client->cl_slot_tbl, + args, res, task); dprintk("--> %s clp %p session %p sr_slot %u\n", __func__, session->clp, session, res->sr_slot ? @@ -818,7 +819,8 @@ static int nfs4_setup_sequence(const struct nfs_server *server, struct nfs4_sequence_res *res, struct rpc_task *task) { - return nfs40_setup_sequence(server, args, res, task); + return nfs40_setup_sequence(server->nfs_client->cl_slot_tbl, + args, res, task); } static int nfs4_sequence_done(struct rpc_task *task, @@ -1679,8 +1681,8 @@ static void nfs4_open_confirm_prepare(struct rpc_task *task, void *calldata) { struct nfs4_opendata *data = calldata; - nfs40_setup_sequence(data->o_arg.server, &data->c_arg.seq_args, - &data->c_res.seq_res, task); + nfs40_setup_sequence(data->o_arg.server->nfs_client->cl_slot_tbl, + &data->c_arg.seq_args, &data->c_res.seq_res, task); } static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata) @@ -5974,8 +5976,8 @@ static void nfs4_release_lockowner_prepare(struct rpc_task *task, void *calldata { struct nfs_release_lockowner_data *data = calldata; struct nfs_server *server = data->server; - nfs40_setup_sequence(server, &data->args.seq_args, - &data->res.seq_res, task); + nfs40_setup_sequence(server->nfs_client->cl_slot_tbl, + &data->args.seq_args, &data->res.seq_res, task); data->args.lock_owner.clientid = server->nfs_client->cl_clientid; data->timestamp = jiffies; } -- cgit v1.2.3 From 2c4b131dea469e4df5bd59ccb490063b69e06155 Mon Sep 17 00:00:00 2001 From: Peng Tao Date: Wed, 11 Jun 2014 05:24:16 +0800 Subject: nfs4: export nfs4_sequence_done Signed-off-by: Peng Tao Signed-off-by: Tom Haynes --- fs/nfs/nfs4_fs.h | 2 ++ fs/nfs/nfs4proc.c | 9 +++++---- 2 files changed, 7 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index 90c4ffe084d7..b3c771e2ac68 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h @@ -447,6 +447,8 @@ extern int nfs40_setup_sequence(struct nfs4_slot_table *tbl, struct nfs4_sequence_args *args, struct nfs4_sequence_res *res, struct rpc_task *task); +extern int nfs4_sequence_done(struct rpc_task *task, + struct nfs4_sequence_res *res); extern void nfs4_free_lock_state(struct nfs_server *server, struct nfs4_lock_state *lsp); diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index e98dda7180c1..f358262a95f9 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -694,8 +694,7 @@ out_retry: } EXPORT_SYMBOL_GPL(nfs41_sequence_done); -static int nfs4_sequence_done(struct rpc_task *task, - struct nfs4_sequence_res *res) +int nfs4_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res) { if (res->sr_slot == NULL) return 1; @@ -703,6 +702,7 @@ static int nfs4_sequence_done(struct rpc_task *task, return nfs40_sequence_done(task, res); return nfs41_sequence_done(task, res); } +EXPORT_SYMBOL_GPL(nfs4_sequence_done); int nfs41_setup_sequence(struct nfs4_session *session, struct nfs4_sequence_args *args, @@ -823,11 +823,12 @@ static int nfs4_setup_sequence(const struct nfs_server *server, args, res, task); } -static int nfs4_sequence_done(struct rpc_task *task, - struct nfs4_sequence_res *res) +int nfs4_sequence_done(struct rpc_task *task, + struct nfs4_sequence_res *res) { return nfs40_sequence_done(task, res); } +EXPORT_SYMBOL_GPL(nfs4_sequence_done); #endif /* !CONFIG_NFS_V4_1 */ -- cgit v1.2.3 From 46a5ab4754cad6aeefd96feae8ba65db8655e1af Mon Sep 17 00:00:00 2001 From: Peng Tao Date: Fri, 13 Jun 2014 23:02:25 +0800 Subject: nfs: allow to specify cred in nfs_initiate_pgio so that flexfile layout client can pass in DS credential instead of using user cred, which will be done in the next patch. Signed-off-by: Peng Tao Signed-off-by: Tom Haynes --- fs/nfs/filelayout/filelayout.c | 11 ++++++----- fs/nfs/internal.h | 6 +++--- fs/nfs/pagelist.c | 8 +++++--- 3 files changed, 14 insertions(+), 11 deletions(-) (limited to 'fs') diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c index e5a3c5b1398f..bfa8547eb2d6 100644 --- a/fs/nfs/filelayout/filelayout.c +++ b/fs/nfs/filelayout/filelayout.c @@ -501,8 +501,9 @@ filelayout_read_pagelist(struct nfs_pgio_header *hdr) hdr->mds_offset = offset; /* Perform an asynchronous read to ds */ - nfs_initiate_pgio(ds_clnt, hdr, NFS_PROTO(hdr->inode), - &filelayout_read_call_ops, 0, RPC_TASK_SOFTCONN); + nfs_initiate_pgio(ds_clnt, hdr, hdr->cred, + NFS_PROTO(hdr->inode), &filelayout_read_call_ops, + 0, RPC_TASK_SOFTCONN); return PNFS_ATTEMPTED; } @@ -542,9 +543,9 @@ filelayout_write_pagelist(struct nfs_pgio_header *hdr, int sync) hdr->args.offset = filelayout_get_dserver_offset(lseg, offset); /* Perform an asynchronous write */ - nfs_initiate_pgio(ds_clnt, hdr, NFS_PROTO(hdr->inode), - &filelayout_write_call_ops, sync, - RPC_TASK_SOFTCONN); + nfs_initiate_pgio(ds_clnt, hdr, hdr->cred, + NFS_PROTO(hdr->inode), &filelayout_write_call_ops, + sync, RPC_TASK_SOFTCONN); return PNFS_ATTEMPTED; } diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index 98dee834e9d6..e9305e98b782 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h @@ -250,9 +250,9 @@ struct nfs_pgio_header *nfs_pgio_header_alloc(const struct nfs_rw_ops *); void nfs_pgio_header_free(struct nfs_pgio_header *); void nfs_pgio_data_destroy(struct nfs_pgio_header *); int nfs_generic_pgio(struct nfs_pageio_descriptor *, struct nfs_pgio_header *); -int nfs_initiate_pgio(struct rpc_clnt *, struct nfs_pgio_header *, - const struct nfs_rpc_ops *, - const struct rpc_call_ops *, int, int); +int nfs_initiate_pgio(struct rpc_clnt *clnt, struct nfs_pgio_header *hdr, + struct rpc_cred *cred, const struct nfs_rpc_ops *rpc_ops, + const struct rpc_call_ops *call_ops, int how, int flags); void nfs_free_request(struct nfs_page *req); static inline void nfs_iocounter_init(struct nfs_io_counter *c) diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c index 35a2626a6922..c4d175829880 100644 --- a/fs/nfs/pagelist.c +++ b/fs/nfs/pagelist.c @@ -597,14 +597,14 @@ static void nfs_pgio_prepare(struct rpc_task *task, void *calldata) } int nfs_initiate_pgio(struct rpc_clnt *clnt, struct nfs_pgio_header *hdr, - const struct nfs_rpc_ops *rpc_ops, + struct rpc_cred *cred, const struct nfs_rpc_ops *rpc_ops, const struct rpc_call_ops *call_ops, int how, int flags) { struct rpc_task *task; struct rpc_message msg = { .rpc_argp = &hdr->args, .rpc_resp = &hdr->res, - .rpc_cred = hdr->cred, + .rpc_cred = cred, }; struct rpc_task_setup task_setup_data = { .rpc_client = clnt, @@ -793,7 +793,9 @@ static int nfs_generic_pg_pgios(struct nfs_pageio_descriptor *desc) ret = nfs_generic_pgio(desc, hdr); if (ret == 0) ret = nfs_initiate_pgio(NFS_CLIENT(hdr->inode), - hdr, NFS_PROTO(hdr->inode), + hdr, + hdr->cred, + NFS_PROTO(hdr->inode), desc->pg_rpc_callops, desc->pg_ioflags, 0); return ret; -- cgit v1.2.3 From 16cecdf620eb23d2654a265d9b20e089370d7425 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Sun, 22 Jun 2014 12:55:11 -0400 Subject: NFSv4.1/NFSv3: Add pNFS callbacks for nfs3_(read|write|commit)_done() Enable pNFS callbacks to allow flex files to work correctly with a NFSv3-enabled data server. Signed-off-by: Trond Myklebust --- fs/nfs/nfs3proc.c | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'fs') diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c index 524f9f837408..78e557c3ab87 100644 --- a/fs/nfs/nfs3proc.c +++ b/fs/nfs/nfs3proc.c @@ -800,6 +800,9 @@ static int nfs3_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr) { struct inode *inode = hdr->inode; + if (hdr->pgio_done_cb != NULL) + return hdr->pgio_done_cb(task, hdr); + if (nfs3_async_handle_jukebox(task, inode)) return -EAGAIN; @@ -825,6 +828,9 @@ static int nfs3_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr) { struct inode *inode = hdr->inode; + if (hdr->pgio_done_cb != NULL) + return hdr->pgio_done_cb(task, hdr); + if (nfs3_async_handle_jukebox(task, inode)) return -EAGAIN; if (task->tk_status >= 0) @@ -845,6 +851,9 @@ static void nfs3_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commi static int nfs3_commit_done(struct rpc_task *task, struct nfs_commit_data *data) { + if (data->commit_done_cb != NULL) + return data->commit_done_cb(task, data); + if (nfs3_async_handle_jukebox(task, data->inode)) return -EAGAIN; nfs_refresh_inode(data->inode, data->res.fattr); -- cgit v1.2.3 From 36d3e3dcc93f1d3f70916ace76d94267b8948a2a Mon Sep 17 00:00:00 2001 From: Peng Tao Date: Tue, 8 Jul 2014 06:21:10 +0800 Subject: nfs: set hostname when creating nfsv3 ds connection lockd assumes hostname exists otherwise kernel oops. It can be reproduced by following steps: 1. mount flexfile MDS 2. write some files 3. mount DS via nfsv3 BUG: unable to handle kernel NULL pointer dereference at (null) IP: [] strlen+0x2/0x20 PGD 0 Oops: 0000 [#1] SMP Modules linked in: nfsd(F) nfs_layout_flexfiles(F) rpcsec_gss_krb5(F) auth_rpcgss(F) nfsv4(F) dns_resolver(F) nfsv3(F) nfs_acl(F) nfs(F) lockd(F) sunrpc(F) fscache(F) ebtable_nat(F) nf_conntrack_netbios_ns(F) nf_conntrack_broadcast(F) ipt_MASQUERADE(F) ip6table_nat(F) nf_nat_ipv6(F) ip6table_mangle(F) ip6t_REJECT(F) nf_conntrack_ipv6(F) nf_defrag_ipv6(F) iptable_nat(F) nf_nat_ipv4(F) nf_nat(F) iptable_mangle(F) nf_conntrack_ipv4(F) nf_defrag_ipv4(F) xt_conntrack(F) nf_conntrack(F) ebtable_filter(F) ebtables(F) ip6table_filter(F) ip6_tables(F) bnep(F) snd_ens1371(F) snd_rawmidi(F) snd_ac97_codec(F) btusb(F) ac97_bus(F) snd_seq(F) snd_seq_device(F) snd_pcm(F) ppdev(F) bluetooth(F) 6lowpan_iphc(F) rfkill(F) vmw_balloon(F) snd_timer(F) snd(F) soundcore(F) gameport(F) i2c_piix4(F) e1000(F) vmw_vmci(F) parport_pc(F) parport(F) shpchp(F) uinput(F) xfs(F) libcrc32c(F) vmwgfx(F) ttm(F) drm(F) mptspi(F) scsi_transport_spi(F) mptscsih(F) mptbase(F) i2c_core(F) CPU: 0 PID: 10397 Comm: mount.nfs Tainted: GF 3.14.7-100.pd_client.001.fc16.x86_64 #1 Hardware name: VMware, Inc. VMware Virtual Platform/440BX Desktop Reference Platform, BIOS 6.00 07/31/2013 task: ffff880008942600 ti: ffff880007990000 task.ti: ffff880007990000 RIP: 0010:[] [] strlen+0x2/0x20 RSP: 0018:ffff880007991aa0 EFLAGS: 00010246 RAX: 0000000000000000 RBX: ffff880038d39c20 RCX: 0000000000000004 RDX: 0000000000000006 RSI: 0000000000000010 RDI: 0000000000000000 RBP: ffff880007991b38 R08: 0000000000000000 R09: 0000000000000000 R10: 0000000000014600 R11: 0000000000000400 R12: ffffffff81cc8580 R13: 0000000000000000 R14: 0000000000000000 R15: 0000000000000004 FS: 00007f90cd2ef880(0000) GS:ffff88003f600000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 0000000000000000 CR3: 0000000001710000 CR4: 00000000001407f0 Stack: ffffffffa045f52c ffff880001782230 ffff880004141e28 0006880007991ac8 ffffffff816dc14b ffff880000000000 ffff880038d39c20 0000000000000010 0000000481cc0006 0000000000000000 ffffffffa0410be8 000000000000c014 Call Trace: [] ? nlmclnt_lookup_host+0x4c/0x2c0 [lockd] [] ? _raw_spin_unlock_bh+0x1b/0x20 [] ? svc_destroy+0xb8/0x140 [sunrpc] [] nlmclnt_init+0x53/0xc0 [lockd] [] ? nfs_get_client+0x1cc/0x340 [nfs] [] nfs_start_lockd+0xa7/0xd0 [nfs] [] nfs_create_server+0x181/0x5c0 [nfs] [] nfs3_create_server+0x13/0x30 [nfsv3] [] nfs_try_mount+0x21c/0x300 [nfs] [] ? __kmalloc_track_caller+0x1ad/0x240 [] ? nfs_fs_mount+0xc37/0xd80 [nfs] [] nfs_fs_mount+0x2c5/0xd80 [nfs] [] ? nfs_clone_super+0x140/0x140 [nfs] [] ? nfs_clone_sb_security+0x40/0x40 [nfs] [] mount_fs+0x43/0x1b0 [] ? __alloc_percpu+0x10/0x20 [] vfs_kern_mount+0x76/0x120 [] do_mount+0x237/0xa80 Signed-off-by: Peng Tao Signed-off-by: Trond Myklebust --- fs/nfs/nfs3client.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'fs') diff --git a/fs/nfs/nfs3client.c b/fs/nfs/nfs3client.c index 52e2344bf9a1..9e9fa347a948 100644 --- a/fs/nfs/nfs3client.c +++ b/fs/nfs/nfs3client.c @@ -1,5 +1,6 @@ #include #include +#include #include "internal.h" #include "nfs3_fs.h" @@ -89,6 +90,12 @@ struct nfs_client *nfs3_set_ds_client(struct nfs_client *mds_clp, }; struct rpc_timeout ds_timeout; struct nfs_client *clp; + char buf[INET6_ADDRSTRLEN + 1]; + + /* fake a hostname because lockd wants it */ + if (rpc_ntop(ds_addr, buf, sizeof(buf)) <= 0) + return ERR_PTR(-EINVAL); + cl_init.hostname = buf; /* Use the MDS nfs_client cl_ipaddr. */ nfs_init_timeout_values(&ds_timeout, ds_proto, ds_timeo, ds_retrans); -- cgit v1.2.3 From 72cff4494ea981202c8db6fd18940c8506f14db4 Mon Sep 17 00:00:00 2001 From: Peng Tao Date: Thu, 7 Aug 2014 10:12:38 +0800 Subject: nfs/flexclient: export pnfs_layoutcommit_inode flexfiles needs to start layoutcommit when necessary Signed-off-by: Peng Tao --- fs/nfs/pnfs.c | 1 + 1 file changed, 1 insertion(+) (limited to 'fs') diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index 0a5dda4d85c2..2d25670bbe44 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -1966,6 +1966,7 @@ clear_layoutcommitting: pnfs_clear_layoutcommitting(inode); goto out; } +EXPORT_SYMBOL_GPL(pnfs_layoutcommit_inode); struct nfs4_threshold *pnfs_mdsthreshold_alloc(void) { -- cgit v1.2.3 From abb9a0079c7f06360b83a5dd27ce74b8dc6d01b6 Mon Sep 17 00:00:00 2001 From: Peng Tao Date: Fri, 22 Aug 2014 17:37:40 +0800 Subject: nfs41: close a small race window when adding new layout to global list Signed-off-by: Peng Tao Signed-off-by: Tom Haynes --- fs/nfs/pnfs.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index 2d25670bbe44..fa00b56f176a 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -1288,7 +1288,6 @@ pnfs_update_layout(struct inode *ino, struct nfs_client *clp = server->nfs_client; struct pnfs_layout_hdr *lo; struct pnfs_layout_segment *lseg = NULL; - bool first; if (!pnfs_enabled_sb(NFS_SERVER(ino))) goto out; @@ -1321,16 +1320,15 @@ pnfs_update_layout(struct inode *ino, if (pnfs_layoutgets_blocked(lo, 0)) goto out_unlock; atomic_inc(&lo->plh_outstanding); - - first = list_empty(&lo->plh_layouts) ? true : false; spin_unlock(&ino->i_lock); - if (first) { + if (list_empty(&lo->plh_layouts)) { /* The lo must be on the clp list if there is any * chance of a CB_LAYOUTRECALL(FILE) coming in. */ spin_lock(&clp->cl_lock); - list_add_tail(&lo->plh_layouts, &server->layouts); + if (list_empty(&lo->plh_layouts)) + list_add_tail(&lo->plh_layouts, &server->layouts); spin_unlock(&clp->cl_lock); } -- cgit v1.2.3 From 9bf87482ddc6f8db884177a2a16b1a1dc12f8777 Mon Sep 17 00:00:00 2001 From: Peng Tao Date: Fri, 22 Aug 2014 17:37:41 +0800 Subject: nfs41: serialize first layoutget of a file Per RFC 5661 Errata 3208: | A client MAY always forget its layout state and associated | layout stateid at any time (See also section 12.5.5.1). | In such case, the client MUST use a non-layout stateid for the next | LAYOUTGET operation. This will signal the server that the client has | no more layouts on the file and its respective layout state can be | released before issuing a new layout in response to LAYOUTGET. In order to make such a signal unique to server, client needs to serialize all layoutgets using non-layout stateid. We implement this by serializing layoutgets when client has no layout segments at hand. Signed-off-by: Peng Tao Signed-off-by: Tom Haynes --- fs/nfs/pnfs.c | 35 +++++++++++++++++++++++++++++++---- fs/nfs/pnfs.h | 1 + 2 files changed, 32 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index fa00b56f176a..7e1bac189d1c 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -1288,6 +1288,7 @@ pnfs_update_layout(struct inode *ino, struct nfs_client *clp = server->nfs_client; struct pnfs_layout_hdr *lo; struct pnfs_layout_segment *lseg = NULL; + bool first; if (!pnfs_enabled_sb(NFS_SERVER(ino))) goto out; @@ -1295,6 +1296,8 @@ pnfs_update_layout(struct inode *ino, if (pnfs_within_mdsthreshold(ctx, ino, iomode)) goto out; +lookup_again: + first = false; spin_lock(&ino->i_lock); lo = pnfs_find_alloc_layout(ino, ctx, gfp_flags); if (lo == NULL) { @@ -1312,10 +1315,27 @@ pnfs_update_layout(struct inode *ino, if (pnfs_layout_io_test_failed(lo, iomode)) goto out_unlock; - /* Check to see if the layout for the given range already exists */ - lseg = pnfs_find_lseg(lo, &arg); - if (lseg) - goto out_unlock; + first = list_empty(&lo->plh_segs); + if (first) { + /* The first layoutget for the file. Need to serialize per + * RFC 5661 Errata 3208. + */ + if (test_and_set_bit(NFS_LAYOUT_FIRST_LAYOUTGET, + &lo->plh_flags)) { + spin_unlock(&ino->i_lock); + wait_on_bit(&lo->plh_flags, NFS_LAYOUT_FIRST_LAYOUTGET, + TASK_UNINTERRUPTIBLE); + pnfs_put_layout_hdr(lo); + goto lookup_again; + } + } else { + /* Check to see if the layout for the given range + * already exists + */ + lseg = pnfs_find_lseg(lo, &arg); + if (lseg) + goto out_unlock; + } if (pnfs_layoutgets_blocked(lo, 0)) goto out_unlock; @@ -1343,6 +1363,13 @@ pnfs_update_layout(struct inode *ino, lseg = send_layoutget(lo, ctx, &arg, gfp_flags); atomic_dec(&lo->plh_outstanding); out_put_layout_hdr: + if (first) { + unsigned long *bitlock = &lo->plh_flags; + + clear_bit_unlock(NFS_LAYOUT_FIRST_LAYOUTGET, bitlock); + smp_mb__after_atomic(); + wake_up_bit(bitlock, NFS_LAYOUT_FIRST_LAYOUTGET); + } pnfs_put_layout_hdr(lo); out: dprintk("%s: inode %s/%llu pNFS layout segment %s for " diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h index c39882191651..4cf0d54e14c3 100644 --- a/fs/nfs/pnfs.h +++ b/fs/nfs/pnfs.h @@ -95,6 +95,7 @@ enum { NFS_LAYOUT_ROC, /* some lseg had roc bit set */ NFS_LAYOUT_RETURN, /* Return this layout ASAP */ NFS_LAYOUT_INVALID_STID, /* layout stateid id is invalid */ + NFS_LAYOUT_FIRST_LAYOUTGET, /* Serialize first layoutget */ }; enum layoutdriver_policy_flags { -- cgit v1.2.3 From aabff4ddcac0d36dd26546f5b905c27682e7bf89 Mon Sep 17 00:00:00 2001 From: Peng Tao Date: Wed, 27 Aug 2014 10:47:14 +0800 Subject: nfs: save server READ/WRITE/COMMIT status Flexfiles layout would want to use them to report DS IO status. Signed-off-by: Peng Tao Signed-off-by: Tom Haynes --- fs/nfs/nfs2xdr.c | 10 +++++++--- fs/nfs/nfs3xdr.c | 3 +++ fs/nfs/nfs4xdr.c | 3 +++ include/linux/nfs_xdr.h | 2 ++ 4 files changed, 15 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs2xdr.c b/fs/nfs/nfs2xdr.c index 5f61b83f4a1c..b4e03ed8599d 100644 --- a/fs/nfs/nfs2xdr.c +++ b/fs/nfs/nfs2xdr.c @@ -481,7 +481,8 @@ out_overflow: * void; * }; */ -static int decode_attrstat(struct xdr_stream *xdr, struct nfs_fattr *result) +static int decode_attrstat(struct xdr_stream *xdr, struct nfs_fattr *result, + __u32 *op_status) { enum nfs_stat status; int error; @@ -489,6 +490,8 @@ static int decode_attrstat(struct xdr_stream *xdr, struct nfs_fattr *result) error = decode_stat(xdr, &status); if (unlikely(error)) goto out; + if (op_status) + *op_status = status; if (status != NFS_OK) goto out_default; error = decode_fattr(xdr, result); @@ -808,7 +811,7 @@ out_default: static int nfs2_xdr_dec_attrstat(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs_fattr *result) { - return decode_attrstat(xdr, result); + return decode_attrstat(xdr, result, NULL); } static int nfs2_xdr_dec_diropres(struct rpc_rqst *req, struct xdr_stream *xdr, @@ -865,6 +868,7 @@ static int nfs2_xdr_dec_readres(struct rpc_rqst *req, struct xdr_stream *xdr, error = decode_stat(xdr, &status); if (unlikely(error)) goto out; + result->op_status = status; if (status != NFS_OK) goto out_default; error = decode_fattr(xdr, result->fattr); @@ -882,7 +886,7 @@ static int nfs2_xdr_dec_writeres(struct rpc_rqst *req, struct xdr_stream *xdr, { /* All NFSv2 writes are "file sync" writes */ result->verf->committed = NFS_FILE_SYNC; - return decode_attrstat(xdr, result->fattr); + return decode_attrstat(xdr, result->fattr, &result->op_status); } /** diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c index 8f4cbe7f4aa8..2a932fdc57cb 100644 --- a/fs/nfs/nfs3xdr.c +++ b/fs/nfs/nfs3xdr.c @@ -1636,6 +1636,7 @@ static int nfs3_xdr_dec_read3res(struct rpc_rqst *req, struct xdr_stream *xdr, error = decode_post_op_attr(xdr, result->fattr); if (unlikely(error)) goto out; + result->op_status = status; if (status != NFS3_OK) goto out_status; error = decode_read3resok(xdr, result); @@ -1708,6 +1709,7 @@ static int nfs3_xdr_dec_write3res(struct rpc_rqst *req, struct xdr_stream *xdr, error = decode_wcc_data(xdr, result->fattr); if (unlikely(error)) goto out; + result->op_status = status; if (status != NFS3_OK) goto out_status; error = decode_write3resok(xdr, result); @@ -2323,6 +2325,7 @@ static int nfs3_xdr_dec_commit3res(struct rpc_rqst *req, error = decode_wcc_data(xdr, result->fattr); if (unlikely(error)) goto out; + result->op_status = status; if (status != NFS3_OK) goto out_status; error = decode_writeverf3(xdr, &result->verf->verifier); diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index cb4376b78ed9..7d8d7a47f771 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -6567,6 +6567,7 @@ static int nfs4_xdr_dec_read(struct rpc_rqst *rqstp, struct xdr_stream *xdr, int status; status = decode_compound_hdr(xdr, &hdr); + res->op_status = hdr.status; if (status) goto out; status = decode_sequence(xdr, &res->seq_res, rqstp); @@ -6592,6 +6593,7 @@ static int nfs4_xdr_dec_write(struct rpc_rqst *rqstp, struct xdr_stream *xdr, int status; status = decode_compound_hdr(xdr, &hdr); + res->op_status = hdr.status; if (status) goto out; status = decode_sequence(xdr, &res->seq_res, rqstp); @@ -6621,6 +6623,7 @@ static int nfs4_xdr_dec_commit(struct rpc_rqst *rqstp, struct xdr_stream *xdr, int status; status = decode_compound_hdr(xdr, &hdr); + res->op_status = hdr.status; if (status) goto out; status = decode_sequence(xdr, &res->seq_res, rqstp); diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 467c84efb596..962f461c065d 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -513,6 +513,7 @@ struct nfs_pgio_res { struct nfs4_sequence_res seq_res; struct nfs_fattr * fattr; __u32 count; + __u32 op_status; int eof; /* used by read */ struct nfs_writeverf * verf; /* used by write */ const struct nfs_server *server; /* used by write */ @@ -532,6 +533,7 @@ struct nfs_commitargs { struct nfs_commitres { struct nfs4_sequence_res seq_res; + __u32 op_status; struct nfs_fattr *fattr; struct nfs_writeverf *verf; const struct nfs_server *server; -- cgit v1.2.3 From 4579d6b897ee1b2557517fd536fb17eeb13481ad Mon Sep 17 00:00:00 2001 From: Peng Tao Date: Sat, 6 Sep 2014 00:53:21 +0800 Subject: nfs41: pass iomode through layoutreturn args So that it is possible to return a specific iomode layouts. Signed-off-by: Peng Tao Signed-off-by: Tom Haynes --- fs/nfs/nfs4xdr.c | 2 +- fs/nfs/pnfs.c | 1 + include/linux/nfs_xdr.h | 1 + 3 files changed, 3 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index 7d8d7a47f771..3c3ff633dd17 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -2012,7 +2012,7 @@ encode_layoutreturn(struct xdr_stream *xdr, p = reserve_space(xdr, 16); *p++ = cpu_to_be32(0); /* reclaim. always 0 for now */ *p++ = cpu_to_be32(args->layout_type); - *p++ = cpu_to_be32(IOMODE_ANY); + *p++ = cpu_to_be32(args->iomode); *p = cpu_to_be32(RETURN_FILE); p = reserve_space(xdr, 16); p = xdr_encode_hyper(p, 0); diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index 7e1bac189d1c..1b544c1a746c 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -914,6 +914,7 @@ _pnfs_return_layout(struct inode *ino) lrp->args.stateid = stateid; lrp->args.layout_type = NFS_SERVER(ino)->pnfs_curr_ld->id; lrp->args.inode = ino; + lrp->args.iomode = IOMODE_ANY; lrp->args.layout = lo; lrp->clp = NFS_SERVER(ino)->nfs_client; lrp->cred = lo->plh_lc_cred; diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 962f461c065d..4fd7793d45d1 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -293,6 +293,7 @@ struct nfs4_layoutreturn_args { struct nfs4_sequence_args seq_args; struct pnfs_layout_hdr *layout; struct inode *inode; + enum pnfs_iomode iomode; nfs4_stateid stateid; __u32 layout_type; }; -- cgit v1.2.3 From f40eb5d044e2eea3f866eeeeb45ca30753773cda Mon Sep 17 00:00:00 2001 From: Peng Tao Date: Sat, 6 Sep 2014 00:53:22 +0800 Subject: nfs41: make a helper function to send layoutreturn It allows to specify different iomode to return. Signed-off-by: Peng Tao Signed-off-by: Tom Haynes --- fs/nfs/pnfs.c | 53 +++++++++++++++++++++++++++++++++-------------------- 1 file changed, 33 insertions(+), 20 deletions(-) (limited to 'fs') diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index 1b544c1a746c..1b9720992608 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -845,6 +845,38 @@ static void pnfs_clear_layoutcommit(struct inode *inode, } } +static int +pnfs_send_layoutreturn(struct pnfs_layout_hdr *lo, nfs4_stateid stateid, + enum pnfs_iomode iomode) +{ + struct inode *ino = lo->plh_inode; + struct nfs4_layoutreturn *lrp; + int status = 0; + + lrp = kzalloc(sizeof(*lrp), GFP_KERNEL); + if (unlikely(lrp == NULL)) { + status = -ENOMEM; + spin_lock(&ino->i_lock); + lo->plh_block_lgets--; + spin_unlock(&ino->i_lock); + pnfs_put_layout_hdr(lo); + goto out; + } + + lrp->args.stateid = stateid; + lrp->args.layout_type = NFS_SERVER(ino)->pnfs_curr_ld->id; + lrp->args.inode = ino; + lrp->args.iomode = iomode; + lrp->args.layout = lo; + lrp->clp = NFS_SERVER(ino)->nfs_client; + lrp->cred = lo->plh_lc_cred; + + status = nfs4_proc_layoutreturn(lrp); +out: + dprintk("<-- %s status: %d\n", __func__, status); + return status; +} + /* * Initiates a LAYOUTRETURN(FILE), and removes the pnfs_layout_hdr * when the layout segment list is empty. @@ -859,7 +891,6 @@ _pnfs_return_layout(struct inode *ino) struct pnfs_layout_hdr *lo = NULL; struct nfs_inode *nfsi = NFS_I(ino); LIST_HEAD(tmp_list); - struct nfs4_layoutreturn *lrp; nfs4_stateid stateid; int status = 0, empty; @@ -901,25 +932,7 @@ _pnfs_return_layout(struct inode *ino) spin_unlock(&ino->i_lock); pnfs_free_lseg_list(&tmp_list); - lrp = kzalloc(sizeof(*lrp), GFP_KERNEL); - if (unlikely(lrp == NULL)) { - status = -ENOMEM; - spin_lock(&ino->i_lock); - lo->plh_block_lgets--; - spin_unlock(&ino->i_lock); - pnfs_put_layout_hdr(lo); - goto out; - } - - lrp->args.stateid = stateid; - lrp->args.layout_type = NFS_SERVER(ino)->pnfs_curr_ld->id; - lrp->args.inode = ino; - lrp->args.iomode = IOMODE_ANY; - lrp->args.layout = lo; - lrp->clp = NFS_SERVER(ino)->nfs_client; - lrp->cred = lo->plh_lc_cred; - - status = nfs4_proc_layoutreturn(lrp); + status = pnfs_send_layoutreturn(lo, stateid, IOMODE_ANY); out: dprintk("<-- %s status: %d\n", __func__, status); return status; -- cgit v1.2.3 From 016256df3a7e9eeb3f4dea5ccd0e21a0b63841eb Mon Sep 17 00:00:00 2001 From: Peng Tao Date: Sat, 6 Sep 2014 00:53:23 +0800 Subject: nfs41: add a helper to mark layout for return It marks all matching layout segments as NFS_LSEG_LAYOUTRETURN, which is an indicator for pnfs_put_lseg() to send layoutreturn, and also prevents pnfs_update_layout() from using the returning segments. Once it is set, it never gets cleared. It also sets proper io failure bit so that pnfs path can be retried after PNFS_LAYOUTGET_RETRY_TIMEOUT second. Signed-off-by: Peng Tao Signed-off-by: Tom Haynes --- fs/nfs/pnfs.c | 55 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ fs/nfs/pnfs.h | 4 ++++ 2 files changed, 59 insertions(+) (limited to 'fs') diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index 1b9720992608..0bd149baca71 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -1479,6 +1479,61 @@ out_forget_reply: goto out; } +static void +pnfs_mark_matching_lsegs_return(struct pnfs_layout_hdr *lo, + struct list_head *tmp_list, + struct pnfs_layout_range *return_range) +{ + struct pnfs_layout_segment *lseg, *next; + + dprintk("%s:Begin lo %p\n", __func__, lo); + + if (list_empty(&lo->plh_segs)) + return; + + list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list) + if (should_free_lseg(&lseg->pls_range, return_range)) { + dprintk("%s: marking lseg %p iomode %d " + "offset %llu length %llu\n", __func__, + lseg, lseg->pls_range.iomode, + lseg->pls_range.offset, + lseg->pls_range.length); + set_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags); + mark_lseg_invalid(lseg, tmp_list); + } +} + +void pnfs_error_mark_layout_for_return(struct inode *inode, + struct pnfs_layout_segment *lseg) +{ + struct pnfs_layout_hdr *lo = NFS_I(inode)->layout; + int iomode = pnfs_iomode_to_fail_bit(lseg->pls_range.iomode); + struct pnfs_layout_range range = { + .iomode = lseg->pls_range.iomode, + .offset = 0, + .length = NFS4_MAX_UINT64, + }; + LIST_HEAD(free_me); + + spin_lock(&inode->i_lock); + /* set failure bit so that pnfs path will be retried later */ + pnfs_layout_set_fail_bit(lo, iomode); + set_bit(NFS_LAYOUT_RETURN, &lo->plh_flags); + if (lo->plh_return_iomode == 0) + lo->plh_return_iomode = range.iomode; + else if (lo->plh_return_iomode != range.iomode) + lo->plh_return_iomode = IOMODE_ANY; + /* + * mark all matching lsegs so that we are sure to have no live + * segments at hand when sending layoutreturn. See pnfs_put_lseg() + * for how it works. + */ + pnfs_mark_matching_lsegs_return(lo, &free_me, &range); + spin_unlock(&inode->i_lock); + pnfs_free_lseg_list(&free_me); +} +EXPORT_SYMBOL_GPL(pnfs_error_mark_layout_for_return); + void pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req) { diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h index 4cf0d54e14c3..bea2030eec74 100644 --- a/fs/nfs/pnfs.h +++ b/fs/nfs/pnfs.h @@ -38,6 +38,7 @@ enum { NFS_LSEG_VALID = 0, /* cleared when lseg is recalled/returned */ NFS_LSEG_ROC, /* roc bit received from server */ NFS_LSEG_LAYOUTCOMMIT, /* layoutcommit bit set for layoutcommit */ + NFS_LSEG_LAYOUTRETURN, /* layoutreturn bit set for layoutreturn */ }; /* Individual ip address */ @@ -184,6 +185,7 @@ struct pnfs_layout_hdr { u32 plh_barrier; /* ignore lower seqids */ unsigned long plh_retry_timestamp; unsigned long plh_flags; + enum pnfs_iomode plh_return_iomode; loff_t plh_lwb; /* last write byte for layoutcommit */ struct rpc_cred *plh_lc_cred; /* layoutcommit cred */ struct inode *plh_inode; @@ -274,6 +276,8 @@ void nfs4_deviceid_mark_client_invalid(struct nfs_client *clp); int pnfs_read_done_resend_to_mds(struct nfs_pgio_header *); int pnfs_write_done_resend_to_mds(struct nfs_pgio_header *); struct nfs4_threshold *pnfs_mdsthreshold_alloc(void); +void pnfs_error_mark_layout_for_return(struct inode *inode, + struct pnfs_layout_segment *lseg); /* nfs4_deviceid_flags */ enum { -- cgit v1.2.3 From ce6ab4f238cb76d356229e97e1fefb7192388e13 Mon Sep 17 00:00:00 2001 From: Peng Tao Date: Sat, 6 Sep 2014 00:53:24 +0800 Subject: nfs41: don't use a layout if it is marked for returning And if we are to return the same type of layouts, don't bother sending more layoutgets. Signed-off-by: Peng Tao Signed-off-by: Tom Haynes --- fs/nfs/nfs4proc.c | 1 + fs/nfs/pnfs.c | 23 ++++++++++++++++++----- fs/nfs/pnfs.h | 1 + 3 files changed, 20 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index f358262a95f9..19432842b2dc 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -7540,6 +7540,7 @@ nfs4_layoutget_prepare(struct rpc_task *task, void *calldata) return; if (pnfs_choose_layoutget_stateid(&lgp->args.stateid, NFS_I(lgp->args.inode)->layout, + &lgp->args.range, lgp->args.ctx->state)) { rpc_exit(task, NFS4_OK); } diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index 0bd149baca71..853b544f2efc 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -740,25 +740,37 @@ pnfs_layout_stateid_blocked(const struct pnfs_layout_hdr *lo, return !pnfs_seqid_is_newer(seqid, lo->plh_barrier); } +static bool +pnfs_layout_returning(const struct pnfs_layout_hdr *lo, + struct pnfs_layout_range *range) +{ + return test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags) && + (lo->plh_return_iomode == IOMODE_ANY || + lo->plh_return_iomode == range->iomode); +} + /* lget is set to 1 if called from inside send_layoutget call chain */ static bool -pnfs_layoutgets_blocked(const struct pnfs_layout_hdr *lo, int lget) +pnfs_layoutgets_blocked(const struct pnfs_layout_hdr *lo, + struct pnfs_layout_range *range, int lget) { return lo->plh_block_lgets || test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags) || (list_empty(&lo->plh_segs) && - (atomic_read(&lo->plh_outstanding) > lget)); + (atomic_read(&lo->plh_outstanding) > lget)) || + pnfs_layout_returning(lo, range); } int pnfs_choose_layoutget_stateid(nfs4_stateid *dst, struct pnfs_layout_hdr *lo, + struct pnfs_layout_range *range, struct nfs4_state *open_state) { int status = 0; dprintk("--> %s\n", __func__); spin_lock(&lo->plh_inode->i_lock); - if (pnfs_layoutgets_blocked(lo, 1)) { + if (pnfs_layoutgets_blocked(lo, range, 1)) { status = -EAGAIN; } else if (!nfs4_valid_open_stateid(open_state)) { status = -EBADF; @@ -1192,6 +1204,7 @@ pnfs_find_lseg(struct pnfs_layout_hdr *lo, list_for_each_entry(lseg, &lo->plh_segs, pls_list) { if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags) && + !test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags) && pnfs_lseg_range_match(&lseg->pls_range, range)) { ret = pnfs_get_lseg(lseg); break; @@ -1351,7 +1364,7 @@ lookup_again: goto out_unlock; } - if (pnfs_layoutgets_blocked(lo, 0)) + if (pnfs_layoutgets_blocked(lo, &arg, 0)) goto out_unlock; atomic_inc(&lo->plh_outstanding); spin_unlock(&ino->i_lock); @@ -1432,7 +1445,7 @@ pnfs_layout_process(struct nfs4_layoutget *lgp) goto out_forget_reply; } - if (pnfs_layoutgets_blocked(lo, 1)) { + if (pnfs_layoutgets_blocked(lo, &lgp->args.range, 1)) { dprintk("%s forget reply due to state\n", __func__); goto out_forget_reply; } diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h index bea2030eec74..9e6edd1ebbc6 100644 --- a/fs/nfs/pnfs.h +++ b/fs/nfs/pnfs.h @@ -249,6 +249,7 @@ void pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo, bool update_barrier); int pnfs_choose_layoutget_stateid(nfs4_stateid *dst, struct pnfs_layout_hdr *lo, + struct pnfs_layout_range *range, struct nfs4_state *open_state); int pnfs_mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo, struct list_head *tmp_list, -- cgit v1.2.3 From aa1e0e3a8e3f16ff50a72a8c623d7e1c467383bc Mon Sep 17 00:00:00 2001 From: Peng Tao Date: Sat, 6 Sep 2014 00:53:25 +0800 Subject: nfs41: send layoutreturn in last put_lseg If current lseg is the last lseg marked with NFS_LSEG_LAYOUTRETURN, send layoutreturn. Signed-off-by: Peng Tao Signed-off-by: Tom Haynes --- fs/nfs/pnfs.c | 38 +++++++++++++++++++++++++++++++++++++- 1 file changed, 37 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index 853b544f2efc..e9acfcfdc9a9 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -50,6 +50,10 @@ static DEFINE_SPINLOCK(pnfs_spinlock); */ static LIST_HEAD(pnfs_modules_tbl); +static int +pnfs_send_layoutreturn(struct pnfs_layout_hdr *lo, nfs4_stateid stateid, + enum pnfs_iomode iomode); + /* Return the registered pnfs layout driver module matching given id */ static struct pnfs_layoutdriver_type * find_pnfs_driver_locked(u32 id) @@ -337,6 +341,29 @@ pnfs_layout_remove_lseg(struct pnfs_layout_hdr *lo, rpc_wake_up(&NFS_SERVER(inode)->roc_rpcwaitq); } +/* Return true if layoutreturn is needed */ +static bool +pnfs_layout_need_return(struct pnfs_layout_hdr *lo, + struct pnfs_layout_segment *lseg, + nfs4_stateid *stateid, enum pnfs_iomode *iomode) +{ + struct pnfs_layout_segment *s; + + if (!test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags)) + return false; + + list_for_each_entry(s, &lo->plh_segs, pls_list) + if (test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags)) + return false; + + *stateid = lo->plh_stateid; + *iomode = lo->plh_return_iomode; + /* decreased in pnfs_send_layoutreturn() */ + lo->plh_block_lgets++; + lo->plh_return_iomode = 0; + return true; +} + void pnfs_put_lseg(struct pnfs_layout_segment *lseg) { @@ -352,11 +379,20 @@ pnfs_put_lseg(struct pnfs_layout_segment *lseg) lo = lseg->pls_layout; inode = lo->plh_inode; if (atomic_dec_and_lock(&lseg->pls_refcount, &inode->i_lock)) { + bool need_return; + nfs4_stateid stateid; + enum pnfs_iomode iomode; + pnfs_get_layout_hdr(lo); pnfs_layout_remove_lseg(lo, lseg); + need_return = pnfs_layout_need_return(lo, lseg, + &stateid, &iomode); spin_unlock(&inode->i_lock); pnfs_free_lseg(lseg); - pnfs_put_layout_hdr(lo); + if (need_return) + pnfs_send_layoutreturn(lo, stateid, iomode); + else + pnfs_put_layout_hdr(lo); } } EXPORT_SYMBOL_GPL(pnfs_put_lseg); -- cgit v1.2.3 From e736a5b98c7aa98fe572990caf5fed9593c72a67 Mon Sep 17 00:00:00 2001 From: Peng Tao Date: Sat, 6 Sep 2014 00:53:26 +0800 Subject: nfs41: clear NFS_LAYOUT_RETURN if layoutreturn is sent or failed to send So that pnfs path is not disabled for ever. Signed-off-by: Peng Tao Signed-off-by: Tom Haynes --- fs/nfs/nfs4proc.c | 1 + fs/nfs/pnfs.c | 5 +++++ 2 files changed, 6 insertions(+) (limited to 'fs') diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 19432842b2dc..e19b5dbe535a 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -7796,6 +7796,7 @@ static void nfs4_layoutreturn_release(void *calldata) spin_lock(&lo->plh_inode->i_lock); if (lrp->res.lrs_present) pnfs_set_layout_stateid(lo, &lrp->res.stateid, true); + clear_bit(NFS_LAYOUT_RETURN, &lo->plh_flags); lo->plh_block_lgets--; spin_unlock(&lo->plh_inode->i_lock); pnfs_put_layout_hdr(lrp->args.layout); diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index e9acfcfdc9a9..63992c826faf 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -921,6 +921,11 @@ pnfs_send_layoutreturn(struct pnfs_layout_hdr *lo, nfs4_stateid stateid, status = nfs4_proc_layoutreturn(lrp); out: + if (status) { + spin_lock(&ino->i_lock); + clear_bit(NFS_LAYOUT_RETURN, &lo->plh_flags); + spin_unlock(&ino->i_lock); + } dprintk("<-- %s status: %d\n", __func__, status); return status; } -- cgit v1.2.3 From c220106fb45909719295474e2497ffe03e47dfb3 Mon Sep 17 00:00:00 2001 From: Peng Tao Date: Sat, 6 Sep 2014 00:53:29 +0800 Subject: nfs/filelayout: use pnfs_error_mark_layout_for_return Instead of calling layoutreturn directly, call pnfs_error_mark_layout_for_return to mark layouts for return and let generic code return layout when layout segments are freed. Signed-off-by: Peng Tao Signed-off-by: Tom Haynes Conflicts: fs/nfs/filelayout/filelayout.c --- fs/nfs/filelayout/filelayout.c | 2 +- fs/nfs/pnfs_nfs.c | 10 ---------- 2 files changed, 1 insertion(+), 11 deletions(-) (limited to 'fs') diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c index bfa8547eb2d6..5d2eadc65167 100644 --- a/fs/nfs/filelayout/filelayout.c +++ b/fs/nfs/filelayout/filelayout.c @@ -200,7 +200,7 @@ static int filelayout_async_handle_error(struct rpc_task *task, dprintk("%s DS connection error %d\n", __func__, task->tk_status); nfs4_mark_deviceid_unavailable(devid); - set_bit(NFS_LAYOUT_RETURN, &lo->plh_flags); + pnfs_error_mark_layout_for_return(inode, lseg); rpc_wake_up(&tbl->slot_tbl_waitq); /* fall through */ default: diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c index c87f664587ee..55bff41180e8 100644 --- a/fs/nfs/pnfs_nfs.c +++ b/fs/nfs/pnfs_nfs.c @@ -18,20 +18,10 @@ #define NFSDBG_FACILITY NFSDBG_PNFS -static void pnfs_generic_fenceme(struct inode *inode, - struct pnfs_layout_hdr *lo) -{ - if (!test_and_clear_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) - return; - pnfs_return_layout(inode); -} - void pnfs_generic_rw_release(void *data) { struct nfs_pgio_header *hdr = data; - struct pnfs_layout_hdr *lo = hdr->lseg->pls_layout; - pnfs_generic_fenceme(lo->plh_inode, lo); nfs_put_client(hdr->ds_clp); hdr->mds_ops->rpc_release(data); } -- cgit v1.2.3 From 2176bf4269a37a7742230ed6c91668241bfe1b2b Mon Sep 17 00:00:00 2001 From: Weston Andros Adamson Date: Wed, 10 Sep 2014 15:44:18 -0400 Subject: nfs: introduce pg_cleanup op for pgio descriptors Add a new operation to nfs_pageio_ops that is called on nfs_pageio_complete. Signed-off-by: Weston Andros Adamson --- fs/nfs/pagelist.c | 5 ++++- include/linux/nfs_page.h | 1 + 2 files changed, 5 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c index c4d175829880..1c031878c752 100644 --- a/fs/nfs/pagelist.c +++ b/fs/nfs/pagelist.c @@ -1050,7 +1050,7 @@ int nfs_pageio_resend(struct nfs_pageio_descriptor *desc, EXPORT_SYMBOL_GPL(nfs_pageio_resend); /** - * nfs_pageio_complete - Complete I/O on an nfs_pageio_descriptor + * nfs_pageio_complete - Complete I/O then cleanup an nfs_pageio_descriptor * @desc: pointer to io descriptor */ void nfs_pageio_complete(struct nfs_pageio_descriptor *desc) @@ -1062,6 +1062,9 @@ void nfs_pageio_complete(struct nfs_pageio_descriptor *desc) if (!nfs_do_recoalesce(desc)) break; } + + if (desc->pg_ops->pg_cleanup) + desc->pg_ops->pg_cleanup(desc); } /** diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h index 4c3aa809ab95..479c566c4ddc 100644 --- a/include/linux/nfs_page.h +++ b/include/linux/nfs_page.h @@ -58,6 +58,7 @@ struct nfs_pageio_ops { size_t (*pg_test)(struct nfs_pageio_descriptor *, struct nfs_page *, struct nfs_page *); int (*pg_doio)(struct nfs_pageio_descriptor *); + void (*pg_cleanup)(struct nfs_pageio_descriptor *); }; struct nfs_rw_ops { -- cgit v1.2.3 From 180bb5ec06ce3a95dccc751fbf6bf11d3003da98 Mon Sep 17 00:00:00 2001 From: Weston Andros Adamson Date: Wed, 10 Sep 2014 15:48:01 -0400 Subject: pnfs: release lseg in pnfs_generic_pg_cleanup This is needed to support mirrored writes - the first write can't just trash the lseg, we need to keep it around until all mirrors have written. Signed-off-by: Weston Andros Adamson --- fs/nfs/blocklayout/blocklayout.c | 2 ++ fs/nfs/filelayout/filelayout.c | 2 ++ fs/nfs/objlayout/objio_osd.c | 2 ++ fs/nfs/pnfs.c | 32 ++++++++++++++------------------ fs/nfs/pnfs.h | 1 + 5 files changed, 21 insertions(+), 18 deletions(-) (limited to 'fs') diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c index 77fec6a55f57..1cac3c175d18 100644 --- a/fs/nfs/blocklayout/blocklayout.c +++ b/fs/nfs/blocklayout/blocklayout.c @@ -860,12 +860,14 @@ static const struct nfs_pageio_ops bl_pg_read_ops = { .pg_init = bl_pg_init_read, .pg_test = bl_pg_test_read, .pg_doio = pnfs_generic_pg_readpages, + .pg_cleanup = pnfs_generic_pg_cleanup, }; static const struct nfs_pageio_ops bl_pg_write_ops = { .pg_init = bl_pg_init_write, .pg_test = bl_pg_test_write, .pg_doio = pnfs_generic_pg_writepages, + .pg_cleanup = pnfs_generic_pg_cleanup, }; static struct pnfs_layoutdriver_type blocklayout_type = { diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c index 5d2eadc65167..2af32fc39d60 100644 --- a/fs/nfs/filelayout/filelayout.c +++ b/fs/nfs/filelayout/filelayout.c @@ -933,12 +933,14 @@ static const struct nfs_pageio_ops filelayout_pg_read_ops = { .pg_init = filelayout_pg_init_read, .pg_test = filelayout_pg_test, .pg_doio = pnfs_generic_pg_readpages, + .pg_cleanup = pnfs_generic_pg_cleanup, }; static const struct nfs_pageio_ops filelayout_pg_write_ops = { .pg_init = filelayout_pg_init_write, .pg_test = filelayout_pg_test, .pg_doio = pnfs_generic_pg_writepages, + .pg_cleanup = pnfs_generic_pg_cleanup, }; static u32 select_bucket_index(struct nfs4_filelayout_segment *fl, u32 j) diff --git a/fs/nfs/objlayout/objio_osd.c b/fs/nfs/objlayout/objio_osd.c index 9e5bc42180e4..d00778077df1 100644 --- a/fs/nfs/objlayout/objio_osd.c +++ b/fs/nfs/objlayout/objio_osd.c @@ -607,12 +607,14 @@ static const struct nfs_pageio_ops objio_pg_read_ops = { .pg_init = objio_init_read, .pg_test = objio_pg_test, .pg_doio = pnfs_generic_pg_readpages, + .pg_cleanup = pnfs_generic_pg_cleanup, }; static const struct nfs_pageio_ops objio_pg_write_ops = { .pg_init = objio_init_write, .pg_test = objio_pg_test, .pg_doio = pnfs_generic_pg_writepages, + .pg_cleanup = pnfs_generic_pg_cleanup, }; static struct pnfs_layoutdriver_type objlayout_type = { diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index 63992c826faf..2da2e771fefe 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -1631,6 +1631,16 @@ pnfs_generic_pg_init_write(struct nfs_pageio_descriptor *pgio, } EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_write); +void +pnfs_generic_pg_cleanup(struct nfs_pageio_descriptor *desc) +{ + if (desc->pg_lseg) { + pnfs_put_lseg(desc->pg_lseg); + desc->pg_lseg = NULL; + } +} +EXPORT_SYMBOL_GPL(pnfs_generic_pg_cleanup); + /* * Return 0 if @req cannot be coalesced into @pgio, otherwise return the number * of bytes (maximum @req->wb_bytes) that can be coalesced. @@ -1756,11 +1766,9 @@ pnfs_do_write(struct nfs_pageio_descriptor *desc, struct pnfs_layout_segment *lseg = desc->pg_lseg; enum pnfs_try_status trypnfs; - desc->pg_lseg = NULL; trypnfs = pnfs_try_to_write_data(hdr, call_ops, lseg, how); if (trypnfs == PNFS_NOT_ATTEMPTED) pnfs_write_through_mds(desc, hdr); - pnfs_put_lseg(lseg); } static void pnfs_writehdr_free(struct nfs_pgio_header *hdr) @@ -1779,17 +1787,13 @@ pnfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc) hdr = nfs_pgio_header_alloc(desc->pg_rw_ops); if (!hdr) { desc->pg_completion_ops->error_cleanup(&desc->pg_list); - pnfs_put_lseg(desc->pg_lseg); - desc->pg_lseg = NULL; return -ENOMEM; } nfs_pgheader_init(desc, hdr, pnfs_writehdr_free); + hdr->lseg = pnfs_get_lseg(desc->pg_lseg); ret = nfs_generic_pgio(desc, hdr); - if (ret != 0) { - pnfs_put_lseg(desc->pg_lseg); - desc->pg_lseg = NULL; - } else + if (!ret) pnfs_do_write(desc, hdr, desc->pg_ioflags); return ret; } @@ -1874,11 +1878,9 @@ pnfs_do_read(struct nfs_pageio_descriptor *desc, struct nfs_pgio_header *hdr) struct pnfs_layout_segment *lseg = desc->pg_lseg; enum pnfs_try_status trypnfs; - desc->pg_lseg = NULL; trypnfs = pnfs_try_to_read_data(hdr, call_ops, lseg); if (trypnfs == PNFS_NOT_ATTEMPTED) pnfs_read_through_mds(desc, hdr); - pnfs_put_lseg(lseg); } static void pnfs_readhdr_free(struct nfs_pgio_header *hdr) @@ -1897,18 +1899,12 @@ pnfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc) hdr = nfs_pgio_header_alloc(desc->pg_rw_ops); if (!hdr) { desc->pg_completion_ops->error_cleanup(&desc->pg_list); - ret = -ENOMEM; - pnfs_put_lseg(desc->pg_lseg); - desc->pg_lseg = NULL; - return ret; + return -ENOMEM; } nfs_pgheader_init(desc, hdr, pnfs_readhdr_free); hdr->lseg = pnfs_get_lseg(desc->pg_lseg); ret = nfs_generic_pgio(desc, hdr); - if (ret != 0) { - pnfs_put_lseg(desc->pg_lseg); - desc->pg_lseg = NULL; - } else + if (!ret) pnfs_do_read(desc, hdr); return ret; } diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h index 9e6edd1ebbc6..59c831efb5de 100644 --- a/fs/nfs/pnfs.h +++ b/fs/nfs/pnfs.h @@ -230,6 +230,7 @@ void pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *, struct nfs_page * int pnfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc); void pnfs_generic_pg_init_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *req, u64 wb_size); +void pnfs_generic_pg_cleanup(struct nfs_pageio_descriptor *); int pnfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc); size_t pnfs_generic_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev, struct nfs_page *req); -- cgit v1.2.3 From 309a1d65b11de24d172f7dbbc21583ebd649c912 Mon Sep 17 00:00:00 2001 From: Weston Andros Adamson Date: Fri, 5 Sep 2014 16:34:29 -0400 Subject: nfs: handle overlapping reqs in lock_and_join This is needed for mirrored DS support, where multuple requests cover the same range. Signed-off-by: Weston Andros Adamson --- fs/nfs/write.c | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 8800bd3b235d..e9974574b19a 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -473,13 +473,18 @@ try_again: do { /* * Subrequests are always contiguous, non overlapping - * and in order. If not, it's a programming error. + * and in order - but may be repeated (mirrored writes). */ - WARN_ON_ONCE(subreq->wb_offset != - (head->wb_offset + total_bytes)); - - /* keep track of how many bytes this group covers */ - total_bytes += subreq->wb_bytes; + if (subreq->wb_offset == (head->wb_offset + total_bytes)) { + /* keep track of how many bytes this group covers */ + total_bytes += subreq->wb_bytes; + } else if (WARN_ON_ONCE(subreq->wb_offset < head->wb_offset || + ((subreq->wb_offset + subreq->wb_bytes) > + (head->wb_offset + total_bytes)))) { + nfs_page_group_unlock(head); + spin_unlock(&inode->i_lock); + return ERR_PTR(-EIO); + } if (!nfs_lock_request(subreq)) { /* releases page group bit lock and -- cgit v1.2.3 From 6cccbb6f52dceec5f4faed8846ac05ae830640e6 Mon Sep 17 00:00:00 2001 From: Weston Andros Adamson Date: Tue, 16 Sep 2014 17:35:51 -0400 Subject: nfs: rename pgio header ds_idx to ds_commit_idx 'ds_commit_idx' is a better name - it is used to select the right commit bucket for pnfs. Signed-off-by: Weston Andros Adamson --- fs/nfs/direct.c | 14 ++++++-------- fs/nfs/filelayout/filelayout.c | 4 ++-- include/linux/nfs_xdr.h | 2 +- 3 files changed, 9 insertions(+), 11 deletions(-) (limited to 'fs') diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index e84f764b9dcd..d7c2d430b04d 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c @@ -112,22 +112,22 @@ static inline int put_dreq(struct nfs_direct_req *dreq) * nfs_direct_select_verf - select the right verifier * @dreq - direct request possibly spanning multiple servers * @ds_clp - nfs_client of data server or NULL if MDS / non-pnfs - * @ds_idx - index of data server in data server list, only valid if ds_clp set + * @commit_idx - commit bucket index for the DS * * returns the correct verifier to use given the role of the server */ static struct nfs_writeverf * nfs_direct_select_verf(struct nfs_direct_req *dreq, struct nfs_client *ds_clp, - int ds_idx) + int commit_idx) { struct nfs_writeverf *verfp = &dreq->verf; #ifdef CONFIG_NFS_V4_1 if (ds_clp) { /* pNFS is in use, use the DS verf */ - if (ds_idx >= 0 && ds_idx < dreq->ds_cinfo.nbuckets) - verfp = &dreq->ds_cinfo.buckets[ds_idx].direct_verf; + if (commit_idx >= 0 && commit_idx < dreq->ds_cinfo.nbuckets) + verfp = &dreq->ds_cinfo.buckets[commit_idx].direct_verf; else WARN_ON_ONCE(1); } @@ -148,8 +148,7 @@ static void nfs_direct_set_hdr_verf(struct nfs_direct_req *dreq, { struct nfs_writeverf *verfp; - verfp = nfs_direct_select_verf(dreq, hdr->ds_clp, - hdr->ds_idx); + verfp = nfs_direct_select_verf(dreq, hdr->ds_clp, hdr->ds_commit_idx); WARN_ON_ONCE(verfp->committed >= 0); memcpy(verfp, &hdr->verf, sizeof(struct nfs_writeverf)); WARN_ON_ONCE(verfp->committed < 0); @@ -169,8 +168,7 @@ static int nfs_direct_set_or_cmp_hdr_verf(struct nfs_direct_req *dreq, { struct nfs_writeverf *verfp; - verfp = nfs_direct_select_verf(dreq, hdr->ds_clp, - hdr->ds_idx); + verfp = nfs_direct_select_verf(dreq, hdr->ds_clp, hdr->ds_commit_idx); if (verfp->committed < 0) { nfs_direct_set_hdr_verf(dreq, hdr); return 0; diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c index 2af32fc39d60..520cbc53e035 100644 --- a/fs/nfs/filelayout/filelayout.c +++ b/fs/nfs/filelayout/filelayout.c @@ -492,7 +492,7 @@ filelayout_read_pagelist(struct nfs_pgio_header *hdr) /* No multipath support. Use first DS */ atomic_inc(&ds->ds_clp->cl_count); hdr->ds_clp = ds->ds_clp; - hdr->ds_idx = idx; + hdr->ds_commit_idx = idx; fh = nfs4_fl_select_ds_fh(lseg, j); if (fh) hdr->args.fh = fh; @@ -536,7 +536,7 @@ filelayout_write_pagelist(struct nfs_pgio_header *hdr, int sync) hdr->pgio_done_cb = filelayout_write_done_cb; atomic_inc(&ds->ds_clp->cl_count); hdr->ds_clp = ds->ds_clp; - hdr->ds_idx = idx; + hdr->ds_commit_idx = idx; fh = nfs4_fl_select_ds_fh(lseg, j); if (fh) hdr->args.fh = fh; diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 4fd7793d45d1..5bc99f04a550 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -1328,7 +1328,7 @@ struct nfs_pgio_header { __u64 mds_offset; /* Filelayout dense stripe */ struct nfs_page_array page_array; struct nfs_client *ds_clp; /* pNFS data server */ - int ds_idx; /* ds index if ds_clp is set */ + int ds_commit_idx; /* ds index if ds_clp is set */ }; struct nfs_mds_commit_info { -- cgit v1.2.3 From b57ff1303a2d4d1484c7a82bd80a3e014d6cdf5e Mon Sep 17 00:00:00 2001 From: Weston Andros Adamson Date: Fri, 5 Sep 2014 18:20:21 -0400 Subject: pnfs: pass ds_commit_idx through the commit path Pass ds_commit_idx through the nfs commit path. It's used to select the commit bucket when using pnfs and is ignored when not using pnfs. Several functions had to be changed: nfs_retry_commit, nfs_mark_request_commit, pnfs_mark_request_commit and the pnfs layout driver .mark_request_commit functions. Signed-off-by: Tom Haynes --- fs/nfs/direct.c | 5 +++-- fs/nfs/filelayout/filelayout.c | 3 ++- fs/nfs/internal.h | 6 ++++-- fs/nfs/pnfs.h | 9 +++++---- fs/nfs/pnfs_nfs.c | 4 ++-- fs/nfs/write.c | 14 ++++++++------ 6 files changed, 24 insertions(+), 17 deletions(-) (limited to 'fs') diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index d7c2d430b04d..1ee41d74c31c 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c @@ -649,7 +649,7 @@ static void nfs_direct_commit_complete(struct nfs_commit_data *data) nfs_list_remove_request(req); if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES) { /* Note the rewrite will go through mds */ - nfs_mark_request_commit(req, NULL, &cinfo); + nfs_mark_request_commit(req, NULL, &cinfo, 0); } else nfs_release_request(req); nfs_unlock_and_release_request(req); @@ -748,7 +748,8 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr) nfs_list_remove_request(req); if (request_commit) { kref_get(&req->wb_kref); - nfs_mark_request_commit(req, hdr->lseg, &cinfo); + nfs_mark_request_commit(req, hdr->lseg, &cinfo, + hdr->ds_commit_idx); } nfs_unlock_and_release_request(req); } diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c index 520cbc53e035..3c9769441f36 100644 --- a/fs/nfs/filelayout/filelayout.c +++ b/fs/nfs/filelayout/filelayout.c @@ -954,7 +954,8 @@ static u32 select_bucket_index(struct nfs4_filelayout_segment *fl, u32 j) static void filelayout_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg, - struct nfs_commit_info *cinfo) + struct nfs_commit_info *cinfo, + u32 ds_commit_idx) { struct nfs4_filelayout_segment *fl = FILELAYOUT_LSEG(lseg); diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index e9305e98b782..05f9a87cdab4 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h @@ -450,13 +450,15 @@ int nfs_scan_commit(struct inode *inode, struct list_head *dst, struct nfs_commit_info *cinfo); void nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg, - struct nfs_commit_info *cinfo); + struct nfs_commit_info *cinfo, + u32 ds_commit_idx); int nfs_write_need_commit(struct nfs_pgio_header *); int nfs_generic_commit_list(struct inode *inode, struct list_head *head, int how, struct nfs_commit_info *cinfo); void nfs_retry_commit(struct list_head *page_list, struct pnfs_layout_segment *lseg, - struct nfs_commit_info *cinfo); + struct nfs_commit_info *cinfo, + u32 ds_commit_idx); void nfs_commitdata_release(struct nfs_commit_data *data); void nfs_request_add_commit_list(struct nfs_page *req, struct list_head *dst, struct nfs_commit_info *cinfo); diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h index 59c831efb5de..a0ab81cc9cf3 100644 --- a/fs/nfs/pnfs.h +++ b/fs/nfs/pnfs.h @@ -137,7 +137,8 @@ struct pnfs_layoutdriver_type { struct pnfs_ds_commit_info *(*get_ds_info) (struct inode *inode); void (*mark_request_commit) (struct nfs_page *req, struct pnfs_layout_segment *lseg, - struct nfs_commit_info *cinfo); + struct nfs_commit_info *cinfo, + u32 ds_commit_idx); void (*clear_request_commit) (struct nfs_page *req, struct nfs_commit_info *cinfo); int (*scan_commit_lists) (struct nfs_commit_info *cinfo, @@ -389,14 +390,14 @@ pnfs_generic_mark_devid_invalid(struct nfs4_deviceid_node *node) static inline bool pnfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg, - struct nfs_commit_info *cinfo) + struct nfs_commit_info *cinfo, u32 ds_commit_idx) { struct inode *inode = req->wb_context->dentry->d_inode; struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld; if (lseg == NULL || ld->mark_request_commit == NULL) return false; - ld->mark_request_commit(req, lseg, cinfo); + ld->mark_request_commit(req, lseg, cinfo, ds_commit_idx); return true; } @@ -574,7 +575,7 @@ pnfs_get_ds_info(struct inode *inode) static inline bool pnfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg, - struct nfs_commit_info *cinfo) + struct nfs_commit_info *cinfo, u32 ds_commit_idx) { return false; } diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c index 55bff41180e8..fdc4f6562bb7 100644 --- a/fs/nfs/pnfs_nfs.c +++ b/fs/nfs/pnfs_nfs.c @@ -188,7 +188,7 @@ static void pnfs_generic_retry_commit(struct nfs_commit_info *cinfo, int idx) bucket = &fl_cinfo->buckets[i]; if (list_empty(&bucket->committing)) continue; - nfs_retry_commit(&bucket->committing, bucket->clseg, cinfo); + nfs_retry_commit(&bucket->committing, bucket->clseg, cinfo, i); spin_lock(cinfo->lock); freeme = bucket->clseg; bucket->clseg = NULL; @@ -247,7 +247,7 @@ pnfs_generic_commit_pagelist(struct inode *inode, struct list_head *mds_pages, list_add(&data->pages, &list); nreq++; } else { - nfs_retry_commit(mds_pages, NULL, cinfo); + nfs_retry_commit(mds_pages, NULL, cinfo, 0); pnfs_generic_retry_commit(cinfo, 0); cinfo->completion_ops->error_cleanup(NFS_I(inode)); return -ENOMEM; diff --git a/fs/nfs/write.c b/fs/nfs/write.c index e9974574b19a..2bee165fddcf 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -847,9 +847,9 @@ EXPORT_SYMBOL_GPL(nfs_init_cinfo); */ void nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg, - struct nfs_commit_info *cinfo) + struct nfs_commit_info *cinfo, u32 ds_commit_idx) { - if (pnfs_mark_request_commit(req, lseg, cinfo)) + if (pnfs_mark_request_commit(req, lseg, cinfo, ds_commit_idx)) return; nfs_request_add_commit_list(req, &cinfo->mds->list, cinfo); } @@ -905,7 +905,8 @@ static void nfs_write_completion(struct nfs_pgio_header *hdr) } if (nfs_write_need_commit(hdr)) { memcpy(&req->wb_verf, &hdr->verf.verifier, sizeof(req->wb_verf)); - nfs_mark_request_commit(req, hdr->lseg, &cinfo); + nfs_mark_request_commit(req, hdr->lseg, &cinfo, + 0); goto next; } remove_req: @@ -1560,14 +1561,15 @@ EXPORT_SYMBOL_GPL(nfs_init_commit); void nfs_retry_commit(struct list_head *page_list, struct pnfs_layout_segment *lseg, - struct nfs_commit_info *cinfo) + struct nfs_commit_info *cinfo, + u32 ds_commit_idx) { struct nfs_page *req; while (!list_empty(page_list)) { req = nfs_list_entry(page_list->next); nfs_list_remove_request(req); - nfs_mark_request_commit(req, lseg, cinfo); + nfs_mark_request_commit(req, lseg, cinfo, ds_commit_idx); if (!cinfo->dreq) { dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); dec_bdi_stat(page_file_mapping(req->wb_page)->backing_dev_info, @@ -1598,7 +1600,7 @@ nfs_commit_list(struct inode *inode, struct list_head *head, int how, return nfs_initiate_commit(NFS_CLIENT(inode), data, NFS_PROTO(inode), data->mds_ops, how, 0); out_bad: - nfs_retry_commit(head, NULL, cinfo); + nfs_retry_commit(head, NULL, cinfo, 0); cinfo->completion_ops->error_cleanup(NFS_I(inode)); return -ENOMEM; } -- cgit v1.2.3 From a7d42ddb3099727f58366fa006f850a219cce6c8 Mon Sep 17 00:00:00 2001 From: Weston Andros Adamson Date: Fri, 19 Sep 2014 10:55:07 -0400 Subject: nfs: add mirroring support to pgio layer This patch adds mirrored write support to the pgio layer. The default is to use one mirror, but pgio callers may define callbacks to change this to any value up to the (arbitrarily selected) limit of 16. The basic idea is to break out members of nfs_pageio_descriptor that cannot be shared between mirrored DSes and put them in a new structure. Signed-off-by: Weston Andros Adamson --- fs/nfs/direct.c | 17 ++- fs/nfs/internal.h | 1 + fs/nfs/objlayout/objio_osd.c | 3 +- fs/nfs/pagelist.c | 270 +++++++++++++++++++++++++++++++++++-------- fs/nfs/pnfs.c | 26 +++-- fs/nfs/read.c | 30 ++++- fs/nfs/write.c | 10 +- include/linux/nfs_page.h | 20 +++- include/linux/nfs_xdr.h | 1 + 9 files changed, 311 insertions(+), 67 deletions(-) (limited to 'fs') diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index 1ee41d74c31c..0178d4fe8ab7 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c @@ -360,8 +360,14 @@ static void nfs_direct_read_completion(struct nfs_pgio_header *hdr) spin_lock(&dreq->lock); if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) && (hdr->good_bytes == 0)) dreq->error = hdr->error; - else - dreq->count += hdr->good_bytes; + else { + /* + * FIXME: right now this only accounts for bytes written + * to the first mirror + */ + if (hdr->pgio_mirror_idx == 0) + dreq->count += hdr->good_bytes; + } spin_unlock(&dreq->lock); while (!list_empty(&hdr->pages)) { @@ -724,7 +730,12 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr) dreq->error = hdr->error; } if (dreq->error == 0) { - dreq->count += hdr->good_bytes; + /* + * FIXME: right now this only accounts for bytes written + * to the first mirror + */ + if (hdr->pgio_mirror_idx == 0) + dreq->count += hdr->good_bytes; if (nfs_write_need_commit(hdr)) { if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES) request_commit = true; diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index 05f9a87cdab4..ef1c703e487b 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h @@ -469,6 +469,7 @@ void nfs_init_cinfo(struct nfs_commit_info *cinfo, struct nfs_direct_req *dreq); int nfs_key_timeout_notify(struct file *filp, struct inode *inode); bool nfs_ctx_key_to_expire(struct nfs_open_context *ctx); +void nfs_pageio_stop_mirroring(struct nfs_pageio_descriptor *pgio); #ifdef CONFIG_MIGRATION extern int nfs_migrate_page(struct address_space *, diff --git a/fs/nfs/objlayout/objio_osd.c b/fs/nfs/objlayout/objio_osd.c index d00778077df1..9a5f2ee6001f 100644 --- a/fs/nfs/objlayout/objio_osd.c +++ b/fs/nfs/objlayout/objio_osd.c @@ -537,11 +537,12 @@ int objio_write_pagelist(struct nfs_pgio_header *hdr, int how) static size_t objio_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev, struct nfs_page *req) { + struct nfs_pgio_mirror *mirror = &pgio->pg_mirrors[pgio->pg_mirror_idx]; unsigned int size; size = pnfs_generic_pg_test(pgio, prev, req); - if (!size || pgio->pg_count + req->wb_bytes > + if (!size || mirror->pg_count + req->wb_bytes > (unsigned long)pgio->pg_layout_private) return 0; diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c index 1c031878c752..eec12b75c232 100644 --- a/fs/nfs/pagelist.c +++ b/fs/nfs/pagelist.c @@ -46,17 +46,22 @@ void nfs_pgheader_init(struct nfs_pageio_descriptor *desc, struct nfs_pgio_header *hdr, void (*release)(struct nfs_pgio_header *hdr)) { - hdr->req = nfs_list_entry(desc->pg_list.next); + struct nfs_pgio_mirror *mirror = &desc->pg_mirrors[desc->pg_mirror_idx]; + + + hdr->req = nfs_list_entry(mirror->pg_list.next); hdr->inode = desc->pg_inode; hdr->cred = hdr->req->wb_context->cred; hdr->io_start = req_offset(hdr->req); - hdr->good_bytes = desc->pg_count; + hdr->good_bytes = mirror->pg_count; hdr->dreq = desc->pg_dreq; hdr->layout_private = desc->pg_layout_private; hdr->release = release; hdr->completion_ops = desc->pg_completion_ops; if (hdr->completion_ops->init_hdr) hdr->completion_ops->init_hdr(hdr); + + hdr->pgio_mirror_idx = desc->pg_mirror_idx; } EXPORT_SYMBOL_GPL(nfs_pgheader_init); @@ -480,7 +485,10 @@ nfs_wait_on_request(struct nfs_page *req) size_t nfs_generic_pg_test(struct nfs_pageio_descriptor *desc, struct nfs_page *prev, struct nfs_page *req) { - if (desc->pg_count > desc->pg_bsize) { + struct nfs_pgio_mirror *mirror = &desc->pg_mirrors[desc->pg_mirror_idx]; + + + if (mirror->pg_count > mirror->pg_bsize) { /* should never happen */ WARN_ON_ONCE(1); return 0; @@ -490,11 +498,11 @@ size_t nfs_generic_pg_test(struct nfs_pageio_descriptor *desc, * Limit the request size so that we can still allocate a page array * for it without upsetting the slab allocator. */ - if (((desc->pg_count + req->wb_bytes) >> PAGE_SHIFT) * + if (((mirror->pg_count + req->wb_bytes) >> PAGE_SHIFT) * sizeof(struct page) > PAGE_SIZE) return 0; - return min(desc->pg_bsize - desc->pg_count, (size_t)req->wb_bytes); + return min(mirror->pg_bsize - mirror->pg_count, (size_t)req->wb_bytes); } EXPORT_SYMBOL_GPL(nfs_generic_pg_test); @@ -651,10 +659,18 @@ EXPORT_SYMBOL_GPL(nfs_initiate_pgio); static int nfs_pgio_error(struct nfs_pageio_descriptor *desc, struct nfs_pgio_header *hdr) { + struct nfs_pgio_mirror *mirror; + u32 midx; + set_bit(NFS_IOHDR_REDO, &hdr->flags); nfs_pgio_data_destroy(hdr); hdr->completion_ops->completion(hdr); - desc->pg_completion_ops->error_cleanup(&desc->pg_list); + /* TODO: Make sure it's right to clean up all mirrors here + * and not just hdr->pgio_mirror_idx */ + for (midx = 0; midx < desc->pg_mirror_count; midx++) { + mirror = &desc->pg_mirrors[midx]; + desc->pg_completion_ops->error_cleanup(&mirror->pg_list); + } return -ENOMEM; } @@ -671,6 +687,17 @@ static void nfs_pgio_release(void *calldata) hdr->completion_ops->completion(hdr); } +static void nfs_pageio_mirror_init(struct nfs_pgio_mirror *mirror, + unsigned int bsize) +{ + INIT_LIST_HEAD(&mirror->pg_list); + mirror->pg_bytes_written = 0; + mirror->pg_count = 0; + mirror->pg_bsize = bsize; + mirror->pg_base = 0; + mirror->pg_recoalesce = 0; +} + /** * nfs_pageio_init - initialise a page io descriptor * @desc: pointer to descriptor @@ -687,13 +714,10 @@ void nfs_pageio_init(struct nfs_pageio_descriptor *desc, size_t bsize, int io_flags) { - INIT_LIST_HEAD(&desc->pg_list); - desc->pg_bytes_written = 0; - desc->pg_count = 0; - desc->pg_bsize = bsize; - desc->pg_base = 0; + struct nfs_pgio_mirror *new; + int i; + desc->pg_moreio = 0; - desc->pg_recoalesce = 0; desc->pg_inode = inode; desc->pg_ops = pg_ops; desc->pg_completion_ops = compl_ops; @@ -703,6 +727,26 @@ void nfs_pageio_init(struct nfs_pageio_descriptor *desc, desc->pg_lseg = NULL; desc->pg_dreq = NULL; desc->pg_layout_private = NULL; + desc->pg_bsize = bsize; + + desc->pg_mirror_count = 1; + desc->pg_mirror_idx = 0; + + if (pg_ops->pg_get_mirror_count) { + /* until we have a request, we don't have an lseg and no + * idea how many mirrors there will be */ + new = kcalloc(NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX, + sizeof(struct nfs_pgio_mirror), GFP_KERNEL); + desc->pg_mirrors_dynamic = new; + desc->pg_mirrors = new; + + for (i = 0; i < NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX; i++) + nfs_pageio_mirror_init(&desc->pg_mirrors[i], bsize); + } else { + desc->pg_mirrors_dynamic = NULL; + desc->pg_mirrors = desc->pg_mirrors_static; + nfs_pageio_mirror_init(&desc->pg_mirrors[0], bsize); + } } EXPORT_SYMBOL_GPL(nfs_pageio_init); @@ -738,14 +782,16 @@ static void nfs_pgio_result(struct rpc_task *task, void *calldata) int nfs_generic_pgio(struct nfs_pageio_descriptor *desc, struct nfs_pgio_header *hdr) { + struct nfs_pgio_mirror *mirror = &desc->pg_mirrors[desc->pg_mirror_idx]; + struct nfs_page *req; struct page **pages, *last_page; - struct list_head *head = &desc->pg_list; + struct list_head *head = &mirror->pg_list; struct nfs_commit_info cinfo; unsigned int pagecount, pageused; - pagecount = nfs_page_array_len(desc->pg_base, desc->pg_count); + pagecount = nfs_page_array_len(mirror->pg_base, mirror->pg_count); if (!nfs_pgarray_set(&hdr->page_array, pagecount)) return nfs_pgio_error(desc, hdr); @@ -773,7 +819,7 @@ int nfs_generic_pgio(struct nfs_pageio_descriptor *desc, desc->pg_ioflags &= ~FLUSH_COND_STABLE; /* Set up the argument struct */ - nfs_pgio_rpcsetup(hdr, desc->pg_count, 0, desc->pg_ioflags, &cinfo); + nfs_pgio_rpcsetup(hdr, mirror->pg_count, 0, desc->pg_ioflags, &cinfo); desc->pg_rpc_callops = &nfs_pgio_common_ops; return 0; } @@ -781,12 +827,17 @@ EXPORT_SYMBOL_GPL(nfs_generic_pgio); static int nfs_generic_pg_pgios(struct nfs_pageio_descriptor *desc) { + struct nfs_pgio_mirror *mirror; struct nfs_pgio_header *hdr; int ret; + mirror = &desc->pg_mirrors[desc->pg_mirror_idx]; + hdr = nfs_pgio_header_alloc(desc->pg_rw_ops); if (!hdr) { - desc->pg_completion_ops->error_cleanup(&desc->pg_list); + /* TODO: make sure this is right with mirroring - or + * should it back out all mirrors? */ + desc->pg_completion_ops->error_cleanup(&mirror->pg_list); return -ENOMEM; } nfs_pgheader_init(desc, hdr, nfs_pgio_header_free); @@ -801,6 +852,49 @@ static int nfs_generic_pg_pgios(struct nfs_pageio_descriptor *desc) return ret; } +/* + * nfs_pageio_setup_mirroring - determine if mirroring is to be used + * by calling the pg_get_mirror_count op + */ +static int nfs_pageio_setup_mirroring(struct nfs_pageio_descriptor *pgio, + struct nfs_page *req) +{ + int mirror_count = 1; + + if (!pgio->pg_ops->pg_get_mirror_count) + return 0; + + mirror_count = pgio->pg_ops->pg_get_mirror_count(pgio, req); + + if (!mirror_count || mirror_count > NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX) + return -EINVAL; + + if (WARN_ON_ONCE(!pgio->pg_mirrors_dynamic)) + return -EINVAL; + + pgio->pg_mirror_count = mirror_count; + + return 0; +} + +/* + * nfs_pageio_stop_mirroring - stop using mirroring (set mirror count to 1) + */ +void nfs_pageio_stop_mirroring(struct nfs_pageio_descriptor *pgio) +{ + pgio->pg_mirror_count = 1; + pgio->pg_mirror_idx = 0; +} + +static void nfs_pageio_cleanup_mirroring(struct nfs_pageio_descriptor *pgio) +{ + pgio->pg_mirror_count = 1; + pgio->pg_mirror_idx = 0; + pgio->pg_mirrors = pgio->pg_mirrors_static; + kfree(pgio->pg_mirrors_dynamic); + pgio->pg_mirrors_dynamic = NULL; +} + static bool nfs_match_open_context(const struct nfs_open_context *ctx1, const struct nfs_open_context *ctx2) { @@ -867,19 +961,22 @@ static bool nfs_can_coalesce_requests(struct nfs_page *prev, static int nfs_pageio_do_add_request(struct nfs_pageio_descriptor *desc, struct nfs_page *req) { + struct nfs_pgio_mirror *mirror = &desc->pg_mirrors[desc->pg_mirror_idx]; + struct nfs_page *prev = NULL; - if (desc->pg_count != 0) { - prev = nfs_list_entry(desc->pg_list.prev); + + if (mirror->pg_count != 0) { + prev = nfs_list_entry(mirror->pg_list.prev); } else { if (desc->pg_ops->pg_init) desc->pg_ops->pg_init(desc, req); - desc->pg_base = req->wb_pgbase; + mirror->pg_base = req->wb_pgbase; } if (!nfs_can_coalesce_requests(prev, req, desc)) return 0; nfs_list_remove_request(req); - nfs_list_add_request(req, &desc->pg_list); - desc->pg_count += req->wb_bytes; + nfs_list_add_request(req, &mirror->pg_list); + mirror->pg_count += req->wb_bytes; return 1; } @@ -888,16 +985,19 @@ static int nfs_pageio_do_add_request(struct nfs_pageio_descriptor *desc, */ static void nfs_pageio_doio(struct nfs_pageio_descriptor *desc) { - if (!list_empty(&desc->pg_list)) { + struct nfs_pgio_mirror *mirror = &desc->pg_mirrors[desc->pg_mirror_idx]; + + + if (!list_empty(&mirror->pg_list)) { int error = desc->pg_ops->pg_doio(desc); if (error < 0) desc->pg_error = error; else - desc->pg_bytes_written += desc->pg_count; + mirror->pg_bytes_written += mirror->pg_count; } - if (list_empty(&desc->pg_list)) { - desc->pg_count = 0; - desc->pg_base = 0; + if (list_empty(&mirror->pg_list)) { + mirror->pg_count = 0; + mirror->pg_base = 0; } } @@ -915,10 +1015,14 @@ static void nfs_pageio_doio(struct nfs_pageio_descriptor *desc) static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc, struct nfs_page *req) { + struct nfs_pgio_mirror *mirror = &desc->pg_mirrors[desc->pg_mirror_idx]; + struct nfs_page *subreq; unsigned int bytes_left = 0; unsigned int offset, pgbase; + WARN_ON_ONCE(desc->pg_mirror_idx >= desc->pg_mirror_count); + nfs_page_group_lock(req, false); subreq = req; @@ -938,7 +1042,7 @@ static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc, nfs_pageio_doio(desc); if (desc->pg_error < 0) return 0; - if (desc->pg_recoalesce) + if (mirror->pg_recoalesce) return 0; /* retry add_request for this subreq */ nfs_page_group_lock(req, false); @@ -976,14 +1080,16 @@ err_ptr: static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc) { + struct nfs_pgio_mirror *mirror = &desc->pg_mirrors[desc->pg_mirror_idx]; LIST_HEAD(head); do { - list_splice_init(&desc->pg_list, &head); - desc->pg_bytes_written -= desc->pg_count; - desc->pg_count = 0; - desc->pg_base = 0; - desc->pg_recoalesce = 0; + list_splice_init(&mirror->pg_list, &head); + mirror->pg_bytes_written -= mirror->pg_count; + mirror->pg_count = 0; + mirror->pg_base = 0; + mirror->pg_recoalesce = 0; + desc->pg_moreio = 0; while (!list_empty(&head)) { @@ -997,11 +1103,11 @@ static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc) return 0; break; } - } while (desc->pg_recoalesce); + } while (mirror->pg_recoalesce); return 1; } -int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc, +static int nfs_pageio_add_request_mirror(struct nfs_pageio_descriptor *desc, struct nfs_page *req) { int ret; @@ -1014,9 +1120,78 @@ int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc, break; ret = nfs_do_recoalesce(desc); } while (ret); + return ret; } +int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc, + struct nfs_page *req) +{ + u32 midx; + unsigned int pgbase, offset, bytes; + struct nfs_page *dupreq, *lastreq; + + pgbase = req->wb_pgbase; + offset = req->wb_offset; + bytes = req->wb_bytes; + + nfs_pageio_setup_mirroring(desc, req); + + for (midx = 0; midx < desc->pg_mirror_count; midx++) { + if (midx) { + nfs_page_group_lock(req, false); + + /* find the last request */ + for (lastreq = req->wb_head; + lastreq->wb_this_page != req->wb_head; + lastreq = lastreq->wb_this_page) + ; + + dupreq = nfs_create_request(req->wb_context, + req->wb_page, lastreq, pgbase, bytes); + + if (IS_ERR(dupreq)) { + nfs_page_group_unlock(req); + return 0; + } + + nfs_lock_request(dupreq); + nfs_page_group_unlock(req); + dupreq->wb_offset = offset; + dupreq->wb_index = req->wb_index; + } else + dupreq = req; + + desc->pg_mirror_idx = midx; + if (!nfs_pageio_add_request_mirror(desc, dupreq)) + return 0; + } + + return 1; +} + +/* + * nfs_pageio_complete_mirror - Complete I/O on the current mirror of an + * nfs_pageio_descriptor + * @desc: pointer to io descriptor + */ +static void nfs_pageio_complete_mirror(struct nfs_pageio_descriptor *desc, + u32 mirror_idx) +{ + struct nfs_pgio_mirror *mirror = &desc->pg_mirrors[mirror_idx]; + u32 restore_idx = desc->pg_mirror_idx; + + desc->pg_mirror_idx = mirror_idx; + for (;;) { + nfs_pageio_doio(desc); + if (!mirror->pg_recoalesce) + break; + if (!nfs_do_recoalesce(desc)) + break; + } + desc->pg_mirror_idx = restore_idx; +} + /* * nfs_pageio_resend - Transfer requests to new descriptor and resend * @hdr - the pgio header to move request from @@ -1055,16 +1230,14 @@ EXPORT_SYMBOL_GPL(nfs_pageio_resend); */ void nfs_pageio_complete(struct nfs_pageio_descriptor *desc) { - for (;;) { - nfs_pageio_doio(desc); - if (!desc->pg_recoalesce) - break; - if (!nfs_do_recoalesce(desc)) - break; - } + u32 midx; + + for (midx = 0; midx < desc->pg_mirror_count; midx++) + nfs_pageio_complete_mirror(desc, midx); if (desc->pg_ops->pg_cleanup) desc->pg_ops->pg_cleanup(desc); + nfs_pageio_cleanup_mirroring(desc); } /** @@ -1080,10 +1253,17 @@ void nfs_pageio_complete(struct nfs_pageio_descriptor *desc) */ void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *desc, pgoff_t index) { - if (!list_empty(&desc->pg_list)) { - struct nfs_page *prev = nfs_list_entry(desc->pg_list.prev); - if (index != prev->wb_index + 1) - nfs_pageio_complete(desc); + struct nfs_pgio_mirror *mirror; + struct nfs_page *prev; + u32 midx; + + for (midx = 0; midx < desc->pg_mirror_count; midx++) { + mirror = &desc->pg_mirrors[midx]; + if (!list_empty(&mirror->pg_list)) { + prev = nfs_list_entry(mirror->pg_list.prev); + if (index != prev->wb_index + 1) + nfs_pageio_complete_mirror(desc, midx); + } } } diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index 2da2e771fefe..5f7c422ebb5d 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -1646,8 +1646,8 @@ EXPORT_SYMBOL_GPL(pnfs_generic_pg_cleanup); * of bytes (maximum @req->wb_bytes) that can be coalesced. */ size_t -pnfs_generic_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev, - struct nfs_page *req) +pnfs_generic_pg_test(struct nfs_pageio_descriptor *pgio, + struct nfs_page *prev, struct nfs_page *req) { unsigned int size; u64 seg_end, req_start, seg_left; @@ -1729,10 +1729,12 @@ static void pnfs_write_through_mds(struct nfs_pageio_descriptor *desc, struct nfs_pgio_header *hdr) { + struct nfs_pgio_mirror *mirror = &desc->pg_mirrors[desc->pg_mirror_idx]; + if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) { - list_splice_tail_init(&hdr->pages, &desc->pg_list); + list_splice_tail_init(&hdr->pages, &mirror->pg_list); nfs_pageio_reset_write_mds(desc); - desc->pg_recoalesce = 1; + mirror->pg_recoalesce = 1; } nfs_pgio_data_destroy(hdr); } @@ -1781,12 +1783,14 @@ EXPORT_SYMBOL_GPL(pnfs_writehdr_free); int pnfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc) { + struct nfs_pgio_mirror *mirror = &desc->pg_mirrors[desc->pg_mirror_idx]; + struct nfs_pgio_header *hdr; int ret; hdr = nfs_pgio_header_alloc(desc->pg_rw_ops); if (!hdr) { - desc->pg_completion_ops->error_cleanup(&desc->pg_list); + desc->pg_completion_ops->error_cleanup(&mirror->pg_list); return -ENOMEM; } nfs_pgheader_init(desc, hdr, pnfs_writehdr_free); @@ -1795,6 +1799,7 @@ pnfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc) ret = nfs_generic_pgio(desc, hdr); if (!ret) pnfs_do_write(desc, hdr, desc->pg_ioflags); + return ret; } EXPORT_SYMBOL_GPL(pnfs_generic_pg_writepages); @@ -1839,10 +1844,13 @@ static void pnfs_read_through_mds(struct nfs_pageio_descriptor *desc, struct nfs_pgio_header *hdr) { + struct nfs_pgio_mirror *mirror = &desc->pg_mirrors[desc->pg_mirror_idx]; + + if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) { - list_splice_tail_init(&hdr->pages, &desc->pg_list); + list_splice_tail_init(&hdr->pages, &mirror->pg_list); nfs_pageio_reset_read_mds(desc); - desc->pg_recoalesce = 1; + mirror->pg_recoalesce = 1; } nfs_pgio_data_destroy(hdr); } @@ -1893,12 +1901,14 @@ EXPORT_SYMBOL_GPL(pnfs_readhdr_free); int pnfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc) { + struct nfs_pgio_mirror *mirror = &desc->pg_mirrors[desc->pg_mirror_idx]; + struct nfs_pgio_header *hdr; int ret; hdr = nfs_pgio_header_alloc(desc->pg_rw_ops); if (!hdr) { - desc->pg_completion_ops->error_cleanup(&desc->pg_list); + desc->pg_completion_ops->error_cleanup(&mirror->pg_list); return -ENOMEM; } nfs_pgheader_init(desc, hdr, pnfs_readhdr_free); diff --git a/fs/nfs/read.c b/fs/nfs/read.c index 092ab499f2b6..568ecf0a880f 100644 --- a/fs/nfs/read.c +++ b/fs/nfs/read.c @@ -70,8 +70,15 @@ EXPORT_SYMBOL_GPL(nfs_pageio_init_read); void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio) { + struct nfs_pgio_mirror *mirror; + pgio->pg_ops = &nfs_pgio_rw_ops; - pgio->pg_bsize = NFS_SERVER(pgio->pg_inode)->rsize; + + /* read path should never have more than one mirror */ + WARN_ON_ONCE(pgio->pg_mirror_count != 1); + + mirror = &pgio->pg_mirrors[0]; + mirror->pg_bsize = NFS_SERVER(pgio->pg_inode)->rsize; } EXPORT_SYMBOL_GPL(nfs_pageio_reset_read_mds); @@ -81,6 +88,7 @@ int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode, struct nfs_page *new; unsigned int len; struct nfs_pageio_descriptor pgio; + struct nfs_pgio_mirror *pgm; len = nfs_page_length(page); if (len == 0) @@ -97,7 +105,13 @@ int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode, &nfs_async_read_completion_ops); nfs_pageio_add_request(&pgio, new); nfs_pageio_complete(&pgio); - NFS_I(inode)->read_io += pgio.pg_bytes_written; + + /* It doesn't make sense to do mirrored reads! */ + WARN_ON_ONCE(pgio.pg_mirror_count != 1); + + pgm = &pgio.pg_mirrors[0]; + NFS_I(inode)->read_io += pgm->pg_bytes_written; + return 0; } @@ -352,6 +366,7 @@ int nfs_readpages(struct file *filp, struct address_space *mapping, struct list_head *pages, unsigned nr_pages) { struct nfs_pageio_descriptor pgio; + struct nfs_pgio_mirror *pgm; struct nfs_readdesc desc = { .pgio = &pgio, }; @@ -387,10 +402,15 @@ int nfs_readpages(struct file *filp, struct address_space *mapping, &nfs_async_read_completion_ops); ret = read_cache_pages(mapping, pages, readpage_async_filler, &desc); - nfs_pageio_complete(&pgio); - NFS_I(inode)->read_io += pgio.pg_bytes_written; - npages = (pgio.pg_bytes_written + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; + + /* It doesn't make sense to do mirrored reads! */ + WARN_ON_ONCE(pgio.pg_mirror_count != 1); + + pgm = &pgio.pg_mirrors[0]; + NFS_I(inode)->read_io += pgm->pg_bytes_written; + npages = (pgm->pg_bytes_written + PAGE_CACHE_SIZE - 1) >> + PAGE_CACHE_SHIFT; nfs_add_stats(inode, NFSIOS_READPAGES, npages); read_complete: put_nfs_open_context(desc.ctx); diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 2bee165fddcf..ceacfeeb28c2 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -906,7 +906,7 @@ static void nfs_write_completion(struct nfs_pgio_header *hdr) if (nfs_write_need_commit(hdr)) { memcpy(&req->wb_verf, &hdr->verf.verifier, sizeof(req->wb_verf)); nfs_mark_request_commit(req, hdr->lseg, &cinfo, - 0); + hdr->pgio_mirror_idx); goto next; } remove_req: @@ -1304,8 +1304,14 @@ EXPORT_SYMBOL_GPL(nfs_pageio_init_write); void nfs_pageio_reset_write_mds(struct nfs_pageio_descriptor *pgio) { + struct nfs_pgio_mirror *mirror; + pgio->pg_ops = &nfs_pgio_rw_ops; - pgio->pg_bsize = NFS_SERVER(pgio->pg_inode)->wsize; + + nfs_pageio_stop_mirroring(pgio); + + mirror = &pgio->pg_mirrors[0]; + mirror->pg_bsize = NFS_SERVER(pgio->pg_inode)->wsize; } EXPORT_SYMBOL_GPL(nfs_pageio_reset_write_mds); diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h index 479c566c4ddc..3eb072dbce83 100644 --- a/include/linux/nfs_page.h +++ b/include/linux/nfs_page.h @@ -58,6 +58,8 @@ struct nfs_pageio_ops { size_t (*pg_test)(struct nfs_pageio_descriptor *, struct nfs_page *, struct nfs_page *); int (*pg_doio)(struct nfs_pageio_descriptor *); + unsigned int (*pg_get_mirror_count)(struct nfs_pageio_descriptor *, + struct nfs_page *); void (*pg_cleanup)(struct nfs_pageio_descriptor *); }; @@ -74,15 +76,17 @@ struct nfs_rw_ops { struct rpc_task_setup *, int); }; -struct nfs_pageio_descriptor { +struct nfs_pgio_mirror { struct list_head pg_list; unsigned long pg_bytes_written; size_t pg_count; size_t pg_bsize; unsigned int pg_base; - unsigned char pg_moreio : 1, - pg_recoalesce : 1; + unsigned char pg_recoalesce : 1; +}; +struct nfs_pageio_descriptor { + unsigned char pg_moreio : 1; struct inode *pg_inode; const struct nfs_pageio_ops *pg_ops; const struct nfs_rw_ops *pg_rw_ops; @@ -93,8 +97,18 @@ struct nfs_pageio_descriptor { struct pnfs_layout_segment *pg_lseg; struct nfs_direct_req *pg_dreq; void *pg_layout_private; + unsigned int pg_bsize; /* default bsize for mirrors */ + + u32 pg_mirror_count; + struct nfs_pgio_mirror *pg_mirrors; + struct nfs_pgio_mirror pg_mirrors_static[1]; + struct nfs_pgio_mirror *pg_mirrors_dynamic; + u32 pg_mirror_idx; /* current mirror */ }; +/* arbitrarily selected limit to number of mirrors */ +#define NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX 16 + #define NFS_WBACK_BUSY(req) (test_bit(PG_BUSY,&(req)->wb_flags)) extern struct nfs_page *nfs_create_request(struct nfs_open_context *ctx, diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 5bc99f04a550..6400a1e01aa4 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -1329,6 +1329,7 @@ struct nfs_pgio_header { struct nfs_page_array page_array; struct nfs_client *ds_clp; /* pNFS data server */ int ds_commit_idx; /* ds index if ds_clp is set */ + int pgio_mirror_idx;/* mirror index in pgio layer */ }; struct nfs_mds_commit_info { -- cgit v1.2.3 From 0a00b77b331a0e4aac461d4e920677661256918a Mon Sep 17 00:00:00 2001 From: Weston Andros Adamson Date: Fri, 19 Sep 2014 12:48:33 -0400 Subject: nfs: mirroring support for direct io The current mirroring code only notices short writes to the first mirror. This patch keeps per-mirror byte counts and only considers a byte to be written once all mirrors report so. Signed-off-by: Weston Andros Adamson --- fs/nfs/direct.c | 71 +++++++++++++++++++++++++++++++++++++++++++++------------ 1 file changed, 57 insertions(+), 14 deletions(-) (limited to 'fs') diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index 0178d4fe8ab7..651387bbfd9f 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c @@ -66,6 +66,10 @@ static struct kmem_cache *nfs_direct_cachep; /* * This represents a set of asynchronous requests that we're waiting on */ +struct nfs_direct_mirror { + ssize_t count; +}; + struct nfs_direct_req { struct kref kref; /* release manager */ @@ -78,6 +82,10 @@ struct nfs_direct_req { /* completion state */ atomic_t io_count; /* i/os we're waiting for */ spinlock_t lock; /* protect completion state */ + + struct nfs_direct_mirror mirrors[NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX]; + int mirror_count; + ssize_t count, /* bytes actually processed */ bytes_left, /* bytes left to be sent */ error; /* any reported error */ @@ -108,6 +116,29 @@ static inline int put_dreq(struct nfs_direct_req *dreq) return atomic_dec_and_test(&dreq->io_count); } +static void +nfs_direct_good_bytes(struct nfs_direct_req *dreq, struct nfs_pgio_header *hdr) +{ + int i; + ssize_t count; + + WARN_ON_ONCE(hdr->pgio_mirror_idx >= dreq->mirror_count); + + dreq->mirrors[hdr->pgio_mirror_idx].count += hdr->good_bytes; + + if (hdr->pgio_mirror_idx == 0) + dreq->count += hdr->good_bytes; + + /* update the dreq->count by finding the minimum agreed count from all + * mirrors */ + count = dreq->mirrors[0].count; + + for (i = 1; i < dreq->mirror_count; i++) + count = min(count, dreq->mirrors[i].count); + + dreq->count = count; +} + /* * nfs_direct_select_verf - select the right verifier * @dreq - direct request possibly spanning multiple servers @@ -241,6 +272,18 @@ void nfs_init_cinfo_from_dreq(struct nfs_commit_info *cinfo, cinfo->completion_ops = &nfs_direct_commit_completion_ops; } +static inline void nfs_direct_setup_mirroring(struct nfs_direct_req *dreq, + struct nfs_pageio_descriptor *pgio, + struct nfs_page *req) +{ + int mirror_count = 1; + + if (pgio->pg_ops->pg_get_mirror_count) + mirror_count = pgio->pg_ops->pg_get_mirror_count(pgio, req); + + dreq->mirror_count = mirror_count; +} + static inline struct nfs_direct_req *nfs_direct_req_alloc(void) { struct nfs_direct_req *dreq; @@ -255,6 +298,7 @@ static inline struct nfs_direct_req *nfs_direct_req_alloc(void) INIT_LIST_HEAD(&dreq->mds_cinfo.list); dreq->verf.committed = NFS_INVALID_STABLE_HOW; /* not set yet */ INIT_WORK(&dreq->work, nfs_direct_write_schedule_work); + dreq->mirror_count = 1; spin_lock_init(&dreq->lock); return dreq; @@ -360,14 +404,9 @@ static void nfs_direct_read_completion(struct nfs_pgio_header *hdr) spin_lock(&dreq->lock); if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) && (hdr->good_bytes == 0)) dreq->error = hdr->error; - else { - /* - * FIXME: right now this only accounts for bytes written - * to the first mirror - */ - if (hdr->pgio_mirror_idx == 0) - dreq->count += hdr->good_bytes; - } + else + nfs_direct_good_bytes(dreq, hdr); + spin_unlock(&dreq->lock); while (!list_empty(&hdr->pages)) { @@ -598,17 +637,23 @@ static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq) LIST_HEAD(reqs); struct nfs_commit_info cinfo; LIST_HEAD(failed); + int i; nfs_init_cinfo_from_dreq(&cinfo, dreq); nfs_direct_write_scan_commit_list(dreq->inode, &reqs, &cinfo); dreq->count = 0; + for (i = 0; i < dreq->mirror_count; i++) + dreq->mirrors[i].count = 0; get_dreq(dreq); nfs_pageio_init_write(&desc, dreq->inode, FLUSH_STABLE, false, &nfs_direct_write_completion_ops); desc.pg_dreq = dreq; + req = nfs_list_entry(reqs.next); + nfs_direct_setup_mirroring(dreq, &desc, req); + list_for_each_entry_safe(req, tmp, &reqs, wb_list) { if (!nfs_pageio_add_request(&desc, req)) { nfs_list_remove_request(req); @@ -730,12 +775,7 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr) dreq->error = hdr->error; } if (dreq->error == 0) { - /* - * FIXME: right now this only accounts for bytes written - * to the first mirror - */ - if (hdr->pgio_mirror_idx == 0) - dreq->count += hdr->good_bytes; + nfs_direct_good_bytes(dreq, hdr); if (nfs_write_need_commit(hdr)) { if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES) request_commit = true; @@ -841,6 +881,9 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq, result = PTR_ERR(req); break; } + + nfs_direct_setup_mirroring(dreq, &desc, req); + nfs_lock_request(req); req->wb_index = pos >> PAGE_SHIFT; req->wb_offset = pos & ~PAGE_MASK; -- cgit v1.2.3 From 80c76fe314c2859d5aac94b5d66d2b9895aa73d4 Mon Sep 17 00:00:00 2001 From: Weston Andros Adamson Date: Wed, 1 Oct 2014 12:58:25 -0400 Subject: pnfs: fail comparison when bucket verifier not set This skips the WARN_ON_ONCE, but doesnt change behavior (the memcmp would fail). Signed-off-by: Weston Andros Adamson Signed-off-by: Tom Haynes --- fs/nfs/direct.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index 651387bbfd9f..eb814789f700 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c @@ -222,7 +222,11 @@ static int nfs_direct_cmp_commit_data_verf(struct nfs_direct_req *dreq, verfp = nfs_direct_select_verf(dreq, data->ds_clp, data->ds_commit_index); - WARN_ON_ONCE(verfp->committed < 0); + + /* verifier not set so always fail */ + if (verfp->committed < 0) + return 1; + return memcmp(verfp, &data->verf, sizeof(struct nfs_writeverf)); } -- cgit v1.2.3 From 566f8737630390b743d79e26e4ac855fe2758129 Mon Sep 17 00:00:00 2001 From: Peng Tao Date: Fri, 10 Oct 2014 23:25:46 +0800 Subject: nfs41: add a debug warning if we destroy an unempty layout So that we can detect the case if some layout segments are still pinned which is surely a bug that we need to fix. Signed-off-by: Peng Tao --- fs/nfs/pnfs.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'fs') diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index 5f7c422ebb5d..e123cfce54ee 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -242,6 +242,8 @@ pnfs_put_layout_hdr(struct pnfs_layout_hdr *lo) struct inode *inode = lo->plh_inode; if (atomic_dec_and_lock(&lo->plh_refcount, &inode->i_lock)) { + if (!list_empty(&lo->plh_segs)) + WARN_ONCE(1, "NFS: BUG unfreed layout segments.\n"); pnfs_detach_layout_hdr(lo); spin_unlock(&inode->i_lock); pnfs_free_layout_hdr(lo); -- cgit v1.2.3 From 47af81f29556a45493e5c87289c3c16ce911096c Mon Sep 17 00:00:00 2001 From: Peng Tao Date: Mon, 10 Nov 2014 08:35:34 +0800 Subject: nfs: only reset desc->pg_mirror_idx when mirroring is supported so that we don't reset desc->pg_mirror_idx for read unnecessarily. Remove WARN_ON_ONCE from __nfs_pageio_add_request to allow LD to set pg_mirror_idx for read where pg_mirror_count is always 1. Signed-off-by: Peng Tao Signed-off-by: Tom Haynes --- fs/nfs/internal.h | 7 +++++++ fs/nfs/pagelist.c | 8 ++++---- 2 files changed, 11 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index ef1c703e487b..5be06bcafa2f 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h @@ -6,6 +6,7 @@ #include #include #include +#include #define NFS_MS_MASK (MS_RDONLY|MS_NOSUID|MS_NODEV|MS_NOEXEC|MS_SYNCHRONOUS) @@ -261,6 +262,12 @@ static inline void nfs_iocounter_init(struct nfs_io_counter *c) atomic_set(&c->io_count, 0); } +static inline bool nfs_pgio_has_mirroring(struct nfs_pageio_descriptor *desc) +{ + WARN_ON_ONCE(desc->pg_mirror_count < 1); + return desc->pg_mirror_count > 1; +} + /* nfs2xdr.c */ extern struct rpc_procinfo nfs_procedures[]; extern int nfs2_decode_dirent(struct xdr_stream *, diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c index eec12b75c232..f9d8c4691149 100644 --- a/fs/nfs/pagelist.c +++ b/fs/nfs/pagelist.c @@ -1021,8 +1021,6 @@ static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc, unsigned int bytes_left = 0; unsigned int offset, pgbase; - WARN_ON_ONCE(desc->pg_mirror_idx >= desc->pg_mirror_count); - nfs_page_group_lock(req, false); subreq = req; @@ -1162,7 +1160,8 @@ int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc, } else dupreq = req; - desc->pg_mirror_idx = midx; + if (nfs_pgio_has_mirroring(desc)) + desc->pg_mirror_idx = midx; if (!nfs_pageio_add_request_mirror(desc, dupreq)) return 0; } @@ -1181,7 +1180,8 @@ static void nfs_pageio_complete_mirror(struct nfs_pageio_descriptor *desc, struct nfs_pgio_mirror *mirror = &desc->pg_mirrors[mirror_idx]; u32 restore_idx = desc->pg_mirror_idx; - desc->pg_mirror_idx = mirror_idx; + if (nfs_pgio_has_mirroring(desc)) + desc->pg_mirror_idx = mirror_idx; for (;;) { nfs_pageio_doio(desc); if (!mirror->pg_recoalesce) -- cgit v1.2.3 From 48d635f14a544c2b3ca870d2c7349b41160496d2 Mon Sep 17 00:00:00 2001 From: Peng Tao Date: Mon, 10 Nov 2014 08:35:35 +0800 Subject: nfs: add nfs_pgio_current_mirror helper Let it return current nfs_pgio_mirror in use depending on pg_mirror_count. For read, we always use pg_mirrors[0], so this effectively gives us freedom to use pg_mirror_idx to track the actual mirror to read from through out the IO stack. Signed-off-by: Peng Tao Signed-off-by: Tom Haynes --- fs/nfs/internal.h | 2 ++ fs/nfs/objlayout/objio_osd.c | 2 +- fs/nfs/pagelist.c | 25 +++++++++++++++++-------- fs/nfs/pnfs.c | 9 ++++----- 4 files changed, 24 insertions(+), 14 deletions(-) (limited to 'fs') diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index 5be06bcafa2f..ffe4b7ac9e6b 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h @@ -255,6 +255,8 @@ int nfs_initiate_pgio(struct rpc_clnt *clnt, struct nfs_pgio_header *hdr, struct rpc_cred *cred, const struct nfs_rpc_ops *rpc_ops, const struct rpc_call_ops *call_ops, int how, int flags); void nfs_free_request(struct nfs_page *req); +struct nfs_pgio_mirror * +nfs_pgio_current_mirror(struct nfs_pageio_descriptor *desc); static inline void nfs_iocounter_init(struct nfs_io_counter *c) { diff --git a/fs/nfs/objlayout/objio_osd.c b/fs/nfs/objlayout/objio_osd.c index 9a5f2ee6001f..24e1d7403c0b 100644 --- a/fs/nfs/objlayout/objio_osd.c +++ b/fs/nfs/objlayout/objio_osd.c @@ -537,7 +537,7 @@ int objio_write_pagelist(struct nfs_pgio_header *hdr, int how) static size_t objio_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev, struct nfs_page *req) { - struct nfs_pgio_mirror *mirror = &pgio->pg_mirrors[pgio->pg_mirror_idx]; + struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(pgio); unsigned int size; size = pnfs_generic_pg_test(pgio, prev, req); diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c index f9d8c4691149..960c99f75d3f 100644 --- a/fs/nfs/pagelist.c +++ b/fs/nfs/pagelist.c @@ -42,11 +42,20 @@ static bool nfs_pgarray_set(struct nfs_page_array *p, unsigned int pagecount) return p->pagevec != NULL; } +struct nfs_pgio_mirror * +nfs_pgio_current_mirror(struct nfs_pageio_descriptor *desc) +{ + return nfs_pgio_has_mirroring(desc) ? + &desc->pg_mirrors[desc->pg_mirror_idx] : + &desc->pg_mirrors[0]; +} +EXPORT_SYMBOL_GPL(nfs_pgio_current_mirror); + void nfs_pgheader_init(struct nfs_pageio_descriptor *desc, struct nfs_pgio_header *hdr, void (*release)(struct nfs_pgio_header *hdr)) { - struct nfs_pgio_mirror *mirror = &desc->pg_mirrors[desc->pg_mirror_idx]; + struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc); hdr->req = nfs_list_entry(mirror->pg_list.next); @@ -485,7 +494,7 @@ nfs_wait_on_request(struct nfs_page *req) size_t nfs_generic_pg_test(struct nfs_pageio_descriptor *desc, struct nfs_page *prev, struct nfs_page *req) { - struct nfs_pgio_mirror *mirror = &desc->pg_mirrors[desc->pg_mirror_idx]; + struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc); if (mirror->pg_count > mirror->pg_bsize) { @@ -782,7 +791,7 @@ static void nfs_pgio_result(struct rpc_task *task, void *calldata) int nfs_generic_pgio(struct nfs_pageio_descriptor *desc, struct nfs_pgio_header *hdr) { - struct nfs_pgio_mirror *mirror = &desc->pg_mirrors[desc->pg_mirror_idx]; + struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc); struct nfs_page *req; struct page **pages, @@ -831,7 +840,7 @@ static int nfs_generic_pg_pgios(struct nfs_pageio_descriptor *desc) struct nfs_pgio_header *hdr; int ret; - mirror = &desc->pg_mirrors[desc->pg_mirror_idx]; + mirror = nfs_pgio_current_mirror(desc); hdr = nfs_pgio_header_alloc(desc->pg_rw_ops); if (!hdr) { @@ -961,7 +970,7 @@ static bool nfs_can_coalesce_requests(struct nfs_page *prev, static int nfs_pageio_do_add_request(struct nfs_pageio_descriptor *desc, struct nfs_page *req) { - struct nfs_pgio_mirror *mirror = &desc->pg_mirrors[desc->pg_mirror_idx]; + struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc); struct nfs_page *prev = NULL; @@ -985,7 +994,7 @@ static int nfs_pageio_do_add_request(struct nfs_pageio_descriptor *desc, */ static void nfs_pageio_doio(struct nfs_pageio_descriptor *desc) { - struct nfs_pgio_mirror *mirror = &desc->pg_mirrors[desc->pg_mirror_idx]; + struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc); if (!list_empty(&mirror->pg_list)) { @@ -1015,7 +1024,7 @@ static void nfs_pageio_doio(struct nfs_pageio_descriptor *desc) static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc, struct nfs_page *req) { - struct nfs_pgio_mirror *mirror = &desc->pg_mirrors[desc->pg_mirror_idx]; + struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc); struct nfs_page *subreq; unsigned int bytes_left = 0; @@ -1078,7 +1087,7 @@ err_ptr: static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc) { - struct nfs_pgio_mirror *mirror = &desc->pg_mirrors[desc->pg_mirror_idx]; + struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc); LIST_HEAD(head); do { diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index e123cfce54ee..b822b1749643 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -1731,7 +1731,7 @@ static void pnfs_write_through_mds(struct nfs_pageio_descriptor *desc, struct nfs_pgio_header *hdr) { - struct nfs_pgio_mirror *mirror = &desc->pg_mirrors[desc->pg_mirror_idx]; + struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc); if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) { list_splice_tail_init(&hdr->pages, &mirror->pg_list); @@ -1785,7 +1785,7 @@ EXPORT_SYMBOL_GPL(pnfs_writehdr_free); int pnfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc) { - struct nfs_pgio_mirror *mirror = &desc->pg_mirrors[desc->pg_mirror_idx]; + struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc); struct nfs_pgio_header *hdr; int ret; @@ -1846,8 +1846,7 @@ static void pnfs_read_through_mds(struct nfs_pageio_descriptor *desc, struct nfs_pgio_header *hdr) { - struct nfs_pgio_mirror *mirror = &desc->pg_mirrors[desc->pg_mirror_idx]; - + struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc); if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) { list_splice_tail_init(&hdr->pages, &mirror->pg_list); @@ -1903,7 +1902,7 @@ EXPORT_SYMBOL_GPL(pnfs_readhdr_free); int pnfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc) { - struct nfs_pgio_mirror *mirror = &desc->pg_mirrors[desc->pg_mirror_idx]; + struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc); struct nfs_pgio_header *hdr; int ret; -- cgit v1.2.3 From ceb11e13df3e78b450730c615037133c57b90c3b Mon Sep 17 00:00:00 2001 From: Peng Tao Date: Mon, 10 Nov 2014 08:35:38 +0800 Subject: pnfs: allow LD to ask to resend read through pnfs If current IO cannot be completed due to some transient errors, LD may want to ask generic layer to resend the request through pnfs again. Signed-off-by: Peng Tao Signed-off-by: Tom Haynes --- fs/nfs/pnfs.c | 15 ++++++++++++++- fs/nfs/pnfs.h | 2 ++ 2 files changed, 16 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index b822b1749643..685af4fb39ca 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -1880,15 +1880,28 @@ pnfs_try_to_read_data(struct nfs_pgio_header *hdr, return trypnfs; } +/* Resend all requests through pnfs. */ +int pnfs_read_resend_pnfs(struct nfs_pgio_header *hdr) +{ + struct nfs_pageio_descriptor pgio; + + nfs_pageio_init_read(&pgio, hdr->inode, false, hdr->completion_ops); + return nfs_pageio_resend(&pgio, hdr); +} +EXPORT_SYMBOL_GPL(pnfs_read_resend_pnfs); + static void pnfs_do_read(struct nfs_pageio_descriptor *desc, struct nfs_pgio_header *hdr) { const struct rpc_call_ops *call_ops = desc->pg_rpc_callops; struct pnfs_layout_segment *lseg = desc->pg_lseg; enum pnfs_try_status trypnfs; + int err = 0; trypnfs = pnfs_try_to_read_data(hdr, call_ops, lseg); - if (trypnfs == PNFS_NOT_ATTEMPTED) + if (trypnfs == PNFS_TRY_AGAIN) + err = pnfs_read_resend_pnfs(hdr); + if (trypnfs == PNFS_NOT_ATTEMPTED || err) pnfs_read_through_mds(desc, hdr); } diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h index a0ab81cc9cf3..84c25cd476f8 100644 --- a/fs/nfs/pnfs.h +++ b/fs/nfs/pnfs.h @@ -72,6 +72,7 @@ struct pnfs_layout_segment { enum pnfs_try_status { PNFS_ATTEMPTED = 0, PNFS_NOT_ATTEMPTED = 1, + PNFS_TRY_AGAIN = 2, }; #ifdef CONFIG_NFS_V4_1 @@ -268,6 +269,7 @@ int _pnfs_return_layout(struct inode *); int pnfs_commit_and_return_layout(struct inode *); void pnfs_ld_write_done(struct nfs_pgio_header *); void pnfs_ld_read_done(struct nfs_pgio_header *); +int pnfs_read_resend_pnfs(struct nfs_pgio_header *); struct pnfs_layout_segment *pnfs_update_layout(struct inode *ino, struct nfs_open_context *ctx, loff_t pos, -- cgit v1.2.3 From 15eb67c15342d212b0c8a540b6d6bd2dfad52a63 Mon Sep 17 00:00:00 2001 From: Peng Tao Date: Mon, 17 Nov 2014 09:30:36 +0800 Subject: nfs41: add range to layoutreturn args So that callers can specify which range to return. Signed-off-by: Peng Tao Signed-off-by: Tom Haynes --- fs/nfs/nfs4xdr.c | 6 +++--- fs/nfs/pnfs.c | 4 +++- include/linux/nfs_xdr.h | 2 +- 3 files changed, 7 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index 3c3ff633dd17..56d4c91a48f3 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -2012,11 +2012,11 @@ encode_layoutreturn(struct xdr_stream *xdr, p = reserve_space(xdr, 16); *p++ = cpu_to_be32(0); /* reclaim. always 0 for now */ *p++ = cpu_to_be32(args->layout_type); - *p++ = cpu_to_be32(args->iomode); + *p++ = cpu_to_be32(args->range.iomode); *p = cpu_to_be32(RETURN_FILE); p = reserve_space(xdr, 16); - p = xdr_encode_hyper(p, 0); - p = xdr_encode_hyper(p, NFS4_MAX_UINT64); + p = xdr_encode_hyper(p, args->range.offset); + p = xdr_encode_hyper(p, args->range.length); spin_lock(&args->inode->i_lock); encode_nfs4_stateid(xdr, &args->stateid); spin_unlock(&args->inode->i_lock); diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index 685af4fb39ca..9549b89e494b 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -916,7 +916,9 @@ pnfs_send_layoutreturn(struct pnfs_layout_hdr *lo, nfs4_stateid stateid, lrp->args.stateid = stateid; lrp->args.layout_type = NFS_SERVER(ino)->pnfs_curr_ld->id; lrp->args.inode = ino; - lrp->args.iomode = iomode; + lrp->args.range.iomode = iomode; + lrp->args.range.offset = 0; + lrp->args.range.length = NFS4_MAX_UINT64; lrp->args.layout = lo; lrp->clp = NFS_SERVER(ino)->nfs_client; lrp->cred = lo->plh_lc_cred; diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 6400a1e01aa4..363792356d25 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -293,7 +293,7 @@ struct nfs4_layoutreturn_args { struct nfs4_sequence_args seq_args; struct pnfs_layout_hdr *layout; struct inode *inode; - enum pnfs_iomode iomode; + struct pnfs_layout_range range; nfs4_stateid stateid; __u32 layout_type; }; -- cgit v1.2.3 From 6c16605d6ef0dfb2e154119700d58b85c6b4dc71 Mon Sep 17 00:00:00 2001 From: Peng Tao Date: Mon, 17 Nov 2014 09:30:40 +0800 Subject: nfs41: allow async version layoutreturn Signed-off-by: Peng Tao Signed-off-by: Tom Haynes --- fs/nfs/nfs4proc.c | 11 +++++++++-- fs/nfs/pnfs.c | 11 ++++++----- fs/nfs/pnfs.h | 2 +- 3 files changed, 16 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index e19b5dbe535a..2397c0f080d3 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -7810,7 +7810,7 @@ static const struct rpc_call_ops nfs4_layoutreturn_call_ops = { .rpc_release = nfs4_layoutreturn_release, }; -int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp) +int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp, bool sync) { struct rpc_task *task; struct rpc_message msg = { @@ -7824,16 +7824,23 @@ int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp) .rpc_message = &msg, .callback_ops = &nfs4_layoutreturn_call_ops, .callback_data = lrp, + .flags = RPC_TASK_ASYNC, }; - int status; + int status = 0; dprintk("--> %s\n", __func__); nfs4_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1); task = rpc_run_task(&task_setup_data); if (IS_ERR(task)) return PTR_ERR(task); + if (sync == false) + goto out; + status = nfs4_wait_for_completion_rpc_task(task); + if (status != 0) + goto out; status = task->tk_status; trace_nfs4_layoutreturn(lrp->args.inode, status); +out: dprintk("<-- %s status=%d\n", __func__, status); rpc_put_task(task); return status; diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index 9549b89e494b..0a0e209e8262 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -52,7 +52,7 @@ static LIST_HEAD(pnfs_modules_tbl); static int pnfs_send_layoutreturn(struct pnfs_layout_hdr *lo, nfs4_stateid stateid, - enum pnfs_iomode iomode); + enum pnfs_iomode iomode, bool sync); /* Return the registered pnfs layout driver module matching given id */ static struct pnfs_layoutdriver_type * @@ -392,7 +392,8 @@ pnfs_put_lseg(struct pnfs_layout_segment *lseg) spin_unlock(&inode->i_lock); pnfs_free_lseg(lseg); if (need_return) - pnfs_send_layoutreturn(lo, stateid, iomode); + pnfs_send_layoutreturn(lo, stateid, iomode, + true); else pnfs_put_layout_hdr(lo); } @@ -897,7 +898,7 @@ static void pnfs_clear_layoutcommit(struct inode *inode, static int pnfs_send_layoutreturn(struct pnfs_layout_hdr *lo, nfs4_stateid stateid, - enum pnfs_iomode iomode) + enum pnfs_iomode iomode, bool sync) { struct inode *ino = lo->plh_inode; struct nfs4_layoutreturn *lrp; @@ -923,7 +924,7 @@ pnfs_send_layoutreturn(struct pnfs_layout_hdr *lo, nfs4_stateid stateid, lrp->clp = NFS_SERVER(ino)->nfs_client; lrp->cred = lo->plh_lc_cred; - status = nfs4_proc_layoutreturn(lrp); + status = nfs4_proc_layoutreturn(lrp, sync); out: if (status) { spin_lock(&ino->i_lock); @@ -989,7 +990,7 @@ _pnfs_return_layout(struct inode *ino) spin_unlock(&ino->i_lock); pnfs_free_lseg_list(&tmp_list); - status = pnfs_send_layoutreturn(lo, stateid, IOMODE_ANY); + status = pnfs_send_layoutreturn(lo, stateid, IOMODE_ANY, true); out: dprintk("<-- %s status: %d\n", __func__, status); return status; diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h index 84c25cd476f8..b79f494d59ac 100644 --- a/fs/nfs/pnfs.h +++ b/fs/nfs/pnfs.h @@ -219,7 +219,7 @@ extern int nfs4_proc_getdeviceinfo(struct nfs_server *server, struct pnfs_device *dev, struct rpc_cred *cred); extern struct pnfs_layout_segment* nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags); -extern int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp); +extern int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp, bool sync); /* pnfs.c */ void pnfs_get_layout_hdr(struct pnfs_layout_hdr *lo); -- cgit v1.2.3 From 193e3aa2ccfb5a53acf7a690b80a1e415b74dbd7 Mon Sep 17 00:00:00 2001 From: Peng Tao Date: Mon, 17 Nov 2014 09:30:41 +0800 Subject: nfs41: introduce NFS_LAYOUT_RETURN_BEFORE_CLOSE When it is set, generic pnfs would try to send layoutreturn right before last close/delegation_return regard less NFS_LAYOUT_ROC is set or not. LD can then make sure layoutreturn is always sent rather than being omitted. The difference against NFS_LAYOUT_RETURN is that NFS_LAYOUT_RETURN_BEFORE_CLOSE does not block usage of the layout so LD can set it and expect generic layer to try pnfs path at the same time. Signed-off-by: Peng Tao Signed-off-by: Tom Haynes --- fs/nfs/nfs4proc.c | 2 ++ fs/nfs/pnfs.c | 40 +++++++++++++++++++++++++++++++++------- fs/nfs/pnfs.h | 1 + 3 files changed, 36 insertions(+), 7 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 2397c0f080d3..7e1a97a54f99 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -7797,6 +7797,8 @@ static void nfs4_layoutreturn_release(void *calldata) if (lrp->res.lrs_present) pnfs_set_layout_stateid(lo, &lrp->res.stateid, true); clear_bit(NFS_LAYOUT_RETURN, &lo->plh_flags); + clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE, &lo->plh_flags); + rpc_wake_up(&NFS_SERVER(lo->plh_inode)->roc_rpcwaitq); lo->plh_block_lgets--; spin_unlock(&lo->plh_inode->i_lock); pnfs_put_layout_hdr(lrp->args.layout); diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index 0a0e209e8262..d3c2ca71a76d 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -909,6 +909,7 @@ pnfs_send_layoutreturn(struct pnfs_layout_hdr *lo, nfs4_stateid stateid, status = -ENOMEM; spin_lock(&ino->i_lock); lo->plh_block_lgets--; + rpc_wake_up(&NFS_SERVER(ino)->roc_rpcwaitq); spin_unlock(&ino->i_lock); pnfs_put_layout_hdr(lo); goto out; @@ -926,11 +927,6 @@ pnfs_send_layoutreturn(struct pnfs_layout_hdr *lo, nfs4_stateid stateid, status = nfs4_proc_layoutreturn(lrp, sync); out: - if (status) { - spin_lock(&ino->i_lock); - clear_bit(NFS_LAYOUT_RETURN, &lo->plh_flags); - spin_unlock(&ino->i_lock); - } dprintk("<-- %s status: %d\n", __func__, status); return status; } @@ -1028,8 +1024,9 @@ bool pnfs_roc(struct inode *ino) { struct pnfs_layout_hdr *lo; struct pnfs_layout_segment *lseg, *tmp; + nfs4_stateid stateid; LIST_HEAD(tmp_list); - bool found = false; + bool found = false, layoutreturn = false; spin_lock(&ino->i_lock); lo = NFS_I(ino)->layout; @@ -1050,7 +1047,20 @@ bool pnfs_roc(struct inode *ino) return true; out_nolayout: + if (lo) { + stateid = lo->plh_stateid; + layoutreturn = + test_and_clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE, + &lo->plh_flags); + if (layoutreturn) { + lo->plh_block_lgets++; + pnfs_get_layout_hdr(lo); + } + } spin_unlock(&ino->i_lock); + if (layoutreturn) + pnfs_send_layoutreturn(lo, stateid, IOMODE_ANY, 0, + NFS4_MAX_UINT64, true); return false; } @@ -1085,8 +1095,9 @@ bool pnfs_roc_drain(struct inode *ino, u32 *barrier, struct rpc_task *task) struct nfs_inode *nfsi = NFS_I(ino); struct pnfs_layout_hdr *lo; struct pnfs_layout_segment *lseg; + nfs4_stateid stateid; u32 current_seqid; - bool found = false; + bool found = false, layoutreturn = false; spin_lock(&ino->i_lock); list_for_each_entry(lseg, &nfsi->layout->plh_segs, pls_list) @@ -1103,7 +1114,22 @@ bool pnfs_roc_drain(struct inode *ino, u32 *barrier, struct rpc_task *task) */ *barrier = current_seqid + atomic_read(&lo->plh_outstanding); out: + if (!found) { + stateid = lo->plh_stateid; + layoutreturn = + test_and_clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE, + &lo->plh_flags); + if (layoutreturn) { + lo->plh_block_lgets++; + pnfs_get_layout_hdr(lo); + } + } spin_unlock(&ino->i_lock); + if (layoutreturn) { + rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL); + pnfs_send_layoutreturn(lo, stateid, IOMODE_ANY, 0, + NFS4_MAX_UINT64, false); + } return found; } diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h index b79f494d59ac..080bf90498d4 100644 --- a/fs/nfs/pnfs.h +++ b/fs/nfs/pnfs.h @@ -96,6 +96,7 @@ enum { NFS_LAYOUT_BULK_RECALL, /* bulk recall affecting layout */ NFS_LAYOUT_ROC, /* some lseg had roc bit set */ NFS_LAYOUT_RETURN, /* Return this layout ASAP */ + NFS_LAYOUT_RETURN_BEFORE_CLOSE, /* Return this layout before close */ NFS_LAYOUT_INVALID_STID, /* layout stateid id is invalid */ NFS_LAYOUT_FIRST_LAYOUTGET, /* Serialize first layoutget */ }; -- cgit v1.2.3 From 27b6f53987d61822a858b4680c3727bfb19e620a Mon Sep 17 00:00:00 2001 From: Peng Tao Date: Mon, 20 Oct 2014 14:44:38 +0800 Subject: nfs/flexfiles: send layoutreturn before freeing lseg Otherwise we'll lose error tracking information when encoding layoutreturn. pnfs_put_lseg may be called from rpc callbacks. So we should not call pnfs_send_layoutreturn directly because it can deadlock in the rpc layer. Signed-off-by: Peng Tao Signed-off-by: Tom Haynes --- fs/nfs/pnfs.c | 81 +++++++++++++++++++++++++++++++++++++++++------------------ 1 file changed, 56 insertions(+), 25 deletions(-) (limited to 'fs') diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index d3c2ca71a76d..108a619861e5 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -346,8 +346,7 @@ pnfs_layout_remove_lseg(struct pnfs_layout_hdr *lo, /* Return true if layoutreturn is needed */ static bool pnfs_layout_need_return(struct pnfs_layout_hdr *lo, - struct pnfs_layout_segment *lseg, - nfs4_stateid *stateid, enum pnfs_iomode *iomode) + struct pnfs_layout_segment *lseg) { struct pnfs_layout_segment *s; @@ -355,17 +354,54 @@ pnfs_layout_need_return(struct pnfs_layout_hdr *lo, return false; list_for_each_entry(s, &lo->plh_segs, pls_list) - if (test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags)) + if (s != lseg && test_bit(NFS_LSEG_LAYOUTRETURN, &s->pls_flags)) return false; - *stateid = lo->plh_stateid; - *iomode = lo->plh_return_iomode; - /* decreased in pnfs_send_layoutreturn() */ - lo->plh_block_lgets++; - lo->plh_return_iomode = 0; return true; } +static void pnfs_layoutreturn_free_lseg(struct work_struct *work) +{ + struct pnfs_layout_segment *lseg; + struct pnfs_layout_hdr *lo; + struct inode *inode; + + lseg = container_of(work, struct pnfs_layout_segment, pls_work); + WARN_ON(atomic_read(&lseg->pls_refcount)); + lo = lseg->pls_layout; + inode = lo->plh_inode; + + spin_lock(&inode->i_lock); + if (pnfs_layout_need_return(lo, lseg)) { + nfs4_stateid stateid; + enum pnfs_iomode iomode; + + stateid = lo->plh_stateid; + iomode = lo->plh_return_iomode; + /* decreased in pnfs_send_layoutreturn() */ + lo->plh_block_lgets++; + lo->plh_return_iomode = 0; + spin_unlock(&inode->i_lock); + + pnfs_send_layoutreturn(lo, stateid, iomode, true); + spin_lock(&inode->i_lock); + } else + /* match pnfs_get_layout_hdr #2 in pnfs_put_lseg */ + pnfs_put_layout_hdr(lo); + pnfs_layout_remove_lseg(lo, lseg); + spin_unlock(&inode->i_lock); + pnfs_free_lseg(lseg); + /* match pnfs_get_layout_hdr #1 in pnfs_put_lseg */ + pnfs_put_layout_hdr(lo); +} + +static void +pnfs_layoutreturn_free_lseg_async(struct pnfs_layout_segment *lseg) +{ + INIT_WORK(&lseg->pls_work, pnfs_layoutreturn_free_lseg); + queue_work(nfsiod_workqueue, &lseg->pls_work); +} + void pnfs_put_lseg(struct pnfs_layout_segment *lseg) { @@ -381,21 +417,18 @@ pnfs_put_lseg(struct pnfs_layout_segment *lseg) lo = lseg->pls_layout; inode = lo->plh_inode; if (atomic_dec_and_lock(&lseg->pls_refcount, &inode->i_lock)) { - bool need_return; - nfs4_stateid stateid; - enum pnfs_iomode iomode; - pnfs_get_layout_hdr(lo); - pnfs_layout_remove_lseg(lo, lseg); - need_return = pnfs_layout_need_return(lo, lseg, - &stateid, &iomode); - spin_unlock(&inode->i_lock); - pnfs_free_lseg(lseg); - if (need_return) - pnfs_send_layoutreturn(lo, stateid, iomode, - true); - else + if (pnfs_layout_need_return(lo, lseg)) { + spin_unlock(&inode->i_lock); + /* hdr reference dropped in nfs4_layoutreturn_release */ + pnfs_get_layout_hdr(lo); + pnfs_layoutreturn_free_lseg_async(lseg); + } else { + pnfs_layout_remove_lseg(lo, lseg); + spin_unlock(&inode->i_lock); + pnfs_free_lseg(lseg); pnfs_put_layout_hdr(lo); + } } } EXPORT_SYMBOL_GPL(pnfs_put_lseg); @@ -1059,8 +1092,7 @@ out_nolayout: } spin_unlock(&ino->i_lock); if (layoutreturn) - pnfs_send_layoutreturn(lo, stateid, IOMODE_ANY, 0, - NFS4_MAX_UINT64, true); + pnfs_send_layoutreturn(lo, stateid, IOMODE_ANY, true); return false; } @@ -1127,8 +1159,7 @@ out: spin_unlock(&ino->i_lock); if (layoutreturn) { rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL); - pnfs_send_layoutreturn(lo, stateid, IOMODE_ANY, 0, - NFS4_MAX_UINT64, false); + pnfs_send_layoutreturn(lo, stateid, IOMODE_ANY, false); } return found; } -- cgit v1.2.3 From c829013dca33110d57c7f625443b716bd7a17671 Mon Sep 17 00:00:00 2001 From: Peng Tao Date: Mon, 1 Dec 2014 08:22:18 +0800 Subject: nfs41: add NFS_LAYOUT_RETRY_LAYOUTGET to layout header flags Use it to indicate that LD wants to retry layoutget. LD can set it whenever it wants the common pnfs code to return and retry pnfs path through a new layout. The bit gets cleared when client does a new layoutget, when client closes the file (ROC case), or when kernel needs to evict the inode (non-ROC case). Signed-off-by: Peng Tao --- fs/nfs/pnfs.c | 3 +++ fs/nfs/pnfs.h | 18 ++++++++++++++++++ 2 files changed, 21 insertions(+) (limited to 'fs') diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index 108a619861e5..893f6b5afe6a 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -615,6 +615,7 @@ pnfs_destroy_layout(struct nfs_inode *nfsi) pnfs_get_layout_hdr(lo); pnfs_layout_clear_fail_bit(lo, NFS_LAYOUT_RO_FAILED); pnfs_layout_clear_fail_bit(lo, NFS_LAYOUT_RW_FAILED); + pnfs_clear_retry_layoutget(lo); spin_unlock(&nfsi->vfs_inode.i_lock); pnfs_free_lseg_list(&tmp_list); pnfs_put_layout_hdr(lo); @@ -1066,6 +1067,7 @@ bool pnfs_roc(struct inode *ino) if (!lo || !test_and_clear_bit(NFS_LAYOUT_ROC, &lo->plh_flags) || test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) goto out_nolayout; + pnfs_clear_retry_layoutget(lo); list_for_each_entry_safe(lseg, tmp, &lo->plh_segs, pls_list) if (test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) { mark_lseg_invalid(lseg, &tmp_list); @@ -1491,6 +1493,7 @@ lookup_again: arg.length = PAGE_CACHE_ALIGN(arg.length); lseg = send_layoutget(lo, ctx, &arg, gfp_flags); + pnfs_clear_retry_layoutget(lo); atomic_dec(&lo->plh_outstanding); out_put_layout_hdr: if (first) { diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h index 080bf90498d4..fed6ae067acb 100644 --- a/fs/nfs/pnfs.h +++ b/fs/nfs/pnfs.h @@ -99,6 +99,7 @@ enum { NFS_LAYOUT_RETURN_BEFORE_CLOSE, /* Return this layout before close */ NFS_LAYOUT_INVALID_STID, /* layout stateid id is invalid */ NFS_LAYOUT_FIRST_LAYOUTGET, /* Serialize first layoutget */ + NFS_LAYOUT_RETRY_LAYOUTGET, /* Retry layoutget */ }; enum layoutdriver_policy_flags { @@ -350,6 +351,23 @@ nfs4_get_deviceid(struct nfs4_deviceid_node *d) return d; } +static inline void pnfs_set_retry_layoutget(struct pnfs_layout_hdr *lo) +{ + if (!test_and_set_bit(NFS_LAYOUT_RETRY_LAYOUTGET, &lo->plh_flags)) + atomic_inc(&lo->plh_refcount); +} + +static inline void pnfs_clear_retry_layoutget(struct pnfs_layout_hdr *lo) +{ + if (test_and_clear_bit(NFS_LAYOUT_RETRY_LAYOUTGET, &lo->plh_flags)) + atomic_dec(&lo->plh_refcount); +} + +static inline bool pnfs_should_retry_layoutget(struct pnfs_layout_hdr *lo) +{ + return test_bit(NFS_LAYOUT_RETRY_LAYOUTGET, &lo->plh_flags); +} + static inline struct pnfs_layout_segment * pnfs_get_lseg(struct pnfs_layout_segment *lseg) { -- cgit v1.2.3 From 012fa16dca0da6c487dd066829ff0b0954925fe6 Mon Sep 17 00:00:00 2001 From: Peng Tao Date: Mon, 1 Dec 2014 08:22:21 +0800 Subject: nfs: add a helper to set NFS_ODIRECT_RESCHED_WRITES to direct writes To allow pnfs LD to ask direct writes to be resend. Signed-off-by: Peng Tao --- fs/nfs/direct.c | 6 ++++++ fs/nfs/internal.h | 1 + 2 files changed, 7 insertions(+) (limited to 'fs') diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index eb814789f700..4fad6b727eb4 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c @@ -116,6 +116,12 @@ static inline int put_dreq(struct nfs_direct_req *dreq) return atomic_dec_and_test(&dreq->io_count); } +void nfs_direct_set_resched_writes(struct nfs_direct_req *dreq) +{ + dreq->flags = NFS_ODIRECT_RESCHED_WRITES; +} +EXPORT_SYMBOL_GPL(nfs_direct_set_resched_writes); + static void nfs_direct_good_bytes(struct nfs_direct_req *dreq, struct nfs_pgio_header *hdr) { diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index ffe4b7ac9e6b..44e84960a26f 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h @@ -502,6 +502,7 @@ static inline void nfs_inode_dio_wait(struct inode *inode) inode_dio_wait(inode); } extern ssize_t nfs_dreq_bytes_left(struct nfs_direct_req *dreq); +extern void nfs_direct_set_resched_writes(struct nfs_direct_req *dreq); /* nfs4proc.c */ extern void __nfs4_read_done_cb(struct nfs_pgio_header *); -- cgit v1.2.3 From aa8a45ee974dfe3ffe290daaf5db457afae56fde Mon Sep 17 00:00:00 2001 From: Peng Tao Date: Mon, 1 Dec 2014 08:22:23 +0800 Subject: nfs41: wait for LAYOUTRETURN before retrying LAYOUTGET Also take care to stop waiting if someone clears retry bit. Signed-off-by: Peng Tao --- fs/nfs/nfs4proc.c | 4 +++- fs/nfs/pnfs.c | 39 ++++++++++++++++++++++++++++++++++++++- fs/nfs/pnfs.h | 5 ++++- 3 files changed, 45 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 7e1a97a54f99..44c600aac907 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -7796,7 +7796,9 @@ static void nfs4_layoutreturn_release(void *calldata) spin_lock(&lo->plh_inode->i_lock); if (lrp->res.lrs_present) pnfs_set_layout_stateid(lo, &lrp->res.stateid, true); - clear_bit(NFS_LAYOUT_RETURN, &lo->plh_flags); + clear_bit_unlock(NFS_LAYOUT_RETURN, &lo->plh_flags); + smp_mb__after_atomic(); + wake_up_bit(&lo->plh_flags, NFS_LAYOUT_RETURN); clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE, &lo->plh_flags); rpc_wake_up(&NFS_SERVER(lo->plh_inode)->roc_rpcwaitq); lo->plh_block_lgets--; diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index 893f6b5afe6a..c4c9fe606ae6 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -1398,6 +1398,26 @@ static bool pnfs_within_mdsthreshold(struct nfs_open_context *ctx, return ret; } +/* stop waiting if someone clears NFS_LAYOUT_RETRY_LAYOUTGET bit. */ +static int pnfs_layoutget_retry_bit_wait(struct wait_bit_key *key) +{ + if (!test_bit(NFS_LAYOUT_RETRY_LAYOUTGET, key->flags)) + return 1; + return nfs_wait_bit_killable(key); +} + +static bool pnfs_prepare_to_retry_layoutget(struct pnfs_layout_hdr *lo) +{ + /* + * send layoutcommit as it can hold up layoutreturn due to lseg + * reference + */ + pnfs_layoutcommit_inode(lo->plh_inode, false); + return !wait_on_bit_action(&lo->plh_flags, NFS_LAYOUT_RETURN, + pnfs_layoutget_retry_bit_wait, + TASK_UNINTERRUPTIBLE); +} + /* * Layout segment is retreived from the server if not cached. * The appropriate layout segment is referenced and returned to the caller. @@ -1444,7 +1464,8 @@ lookup_again: } /* if LAYOUTGET already failed once we don't try again */ - if (pnfs_layout_io_test_failed(lo, iomode)) + if (pnfs_layout_io_test_failed(lo, iomode) && + !pnfs_should_retry_layoutget(lo)) goto out_unlock; first = list_empty(&lo->plh_segs); @@ -1469,6 +1490,22 @@ lookup_again: goto out_unlock; } + /* + * Because we free lsegs before sending LAYOUTRETURN, we need to wait + * for LAYOUTRETURN even if first is true. + */ + if (!lseg && pnfs_should_retry_layoutget(lo) && + test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) { + spin_unlock(&ino->i_lock); + dprintk("%s wait for layoutreturn\n", __func__); + if (pnfs_prepare_to_retry_layoutget(lo)) { + pnfs_put_layout_hdr(lo); + dprintk("%s retrying\n", __func__); + goto lookup_again; + } + goto out_put_layout_hdr; + } + if (pnfs_layoutgets_blocked(lo, &arg, 0)) goto out_unlock; atomic_inc(&lo->plh_outstanding); diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h index fed6ae067acb..49a466708400 100644 --- a/fs/nfs/pnfs.h +++ b/fs/nfs/pnfs.h @@ -359,8 +359,11 @@ static inline void pnfs_set_retry_layoutget(struct pnfs_layout_hdr *lo) static inline void pnfs_clear_retry_layoutget(struct pnfs_layout_hdr *lo) { - if (test_and_clear_bit(NFS_LAYOUT_RETRY_LAYOUTGET, &lo->plh_flags)) + if (test_and_clear_bit(NFS_LAYOUT_RETRY_LAYOUTGET, &lo->plh_flags)) { atomic_dec(&lo->plh_refcount); + /* wake up waiters for LAYOUTRETURN as that is not needed */ + wake_up_bit(&lo->plh_flags, NFS_LAYOUT_RETURN); + } } static inline bool pnfs_should_retry_layoutget(struct pnfs_layout_hdr *lo) -- cgit v1.2.3 From 5fadeb47dcc5c30d4b6cf481b4a78689eab59443 Mon Sep 17 00:00:00 2001 From: Peng Tao Date: Mon, 19 Jan 2015 12:41:16 +0800 Subject: nfs: count DIO good bytes correctly with mirroring When resending to MDS, we might resend multiple mirroring requests to MDS. As a result, nfs_direct_good_bytes() ends up counting bytes multiple times, causing application to get wrong return results in read/write syscalls. Fix it by tracking start of a dreq and checking the range of pgio header. Cc: Weston Andros Adamson Signed-off-by: Peng Tao --- fs/nfs/direct.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index 4fad6b727eb4..3715b4957abc 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c @@ -88,6 +88,7 @@ struct nfs_direct_req { ssize_t count, /* bytes actually processed */ bytes_left, /* bytes left to be sent */ + io_start, /* start of IO */ error; /* any reported error */ struct completion completion; /* wait for i/o completion */ @@ -130,10 +131,11 @@ nfs_direct_good_bytes(struct nfs_direct_req *dreq, struct nfs_pgio_header *hdr) WARN_ON_ONCE(hdr->pgio_mirror_idx >= dreq->mirror_count); - dreq->mirrors[hdr->pgio_mirror_idx].count += hdr->good_bytes; - - if (hdr->pgio_mirror_idx == 0) - dreq->count += hdr->good_bytes; + count = dreq->mirrors[hdr->pgio_mirror_idx].count; + if (count + dreq->io_start < hdr->io_start + hdr->good_bytes) { + count = hdr->io_start + hdr->good_bytes - dreq->io_start; + dreq->mirrors[hdr->pgio_mirror_idx].count = count; + } /* update the dreq->count by finding the minimum agreed count from all * mirrors */ @@ -594,6 +596,7 @@ ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter, dreq->inode = inode; dreq->bytes_left = count; + dreq->io_start = pos; dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp)); l_ctx = nfs_get_lock_context(dreq->ctx); if (IS_ERR(l_ctx)) { @@ -1002,6 +1005,7 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter, dreq->inode = inode; dreq->bytes_left = count; + dreq->io_start = pos; dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp)); l_ctx = nfs_get_lock_context(dreq->ctx); if (IS_ERR(l_ctx)) { -- cgit v1.2.3 From d67ae825a59d639e4d8b82413af84d854617a87e Mon Sep 17 00:00:00 2001 From: Tom Haynes Date: Thu, 11 Dec 2014 17:02:04 -0500 Subject: pnfs/flexfiles: Add the FlexFile Layout Driver The flexfile layout is a new layout that extends the file layout. It is currently being drafted as a specification at https://datatracker.ietf.org/doc/draft-ietf-nfsv4-layout-types/ Signed-off-by: Weston Andros Adamson Signed-off-by: Tom Haynes Signed-off-by: Tao Peng --- fs/nfs/Kconfig | 5 + fs/nfs/Makefile | 1 + fs/nfs/flexfilelayout/Makefile | 5 + fs/nfs/flexfilelayout/flexfilelayout.c | 1574 +++++++++++++++++++++++++++++ fs/nfs/flexfilelayout/flexfilelayout.h | 155 +++ fs/nfs/flexfilelayout/flexfilelayoutdev.c | 552 ++++++++++ fs/nfs/idmap.c | 3 +- fs/nfs/nfs4proc.c | 4 +- fs/nfs/pnfs.c | 32 +- fs/nfs/pnfs.h | 1 + include/linux/nfs4.h | 1 + include/linux/nfs_idmap.h | 2 + include/linux/sunrpc/metrics.h | 2 + 13 files changed, 2325 insertions(+), 12 deletions(-) create mode 100644 fs/nfs/flexfilelayout/Makefile create mode 100644 fs/nfs/flexfilelayout/flexfilelayout.c create mode 100644 fs/nfs/flexfilelayout/flexfilelayout.h create mode 100644 fs/nfs/flexfilelayout/flexfilelayoutdev.c (limited to 'fs') diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig index 3dece03f2fc8..c7abc10279af 100644 --- a/fs/nfs/Kconfig +++ b/fs/nfs/Kconfig @@ -128,6 +128,11 @@ config PNFS_OBJLAYOUT depends on NFS_V4_1 && SCSI_OSD_ULD default NFS_V4 +config PNFS_FLEXFILE_LAYOUT + tristate + depends on NFS_V4_1 && NFS_V3 + default m + config NFS_V4_1_IMPLEMENTATION_ID_DOMAIN string "NFSv4.1 Implementation ID Domain" depends on NFS_V4_1 diff --git a/fs/nfs/Makefile b/fs/nfs/Makefile index 23abffa8a4ce..1e987acf20c9 100644 --- a/fs/nfs/Makefile +++ b/fs/nfs/Makefile @@ -33,3 +33,4 @@ nfsv4-$(CONFIG_NFS_V4_2) += nfs42proc.o obj-$(CONFIG_PNFS_FILE_LAYOUT) += filelayout/ obj-$(CONFIG_PNFS_OBJLAYOUT) += objlayout/ obj-$(CONFIG_PNFS_BLOCK) += blocklayout/ +obj-$(CONFIG_PNFS_FLEXFILE_LAYOUT) += flexfilelayout/ diff --git a/fs/nfs/flexfilelayout/Makefile b/fs/nfs/flexfilelayout/Makefile new file mode 100644 index 000000000000..1d2c9f6bbcd4 --- /dev/null +++ b/fs/nfs/flexfilelayout/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for the pNFS Flexfile Layout Driver kernel module +# +obj-$(CONFIG_PNFS_FLEXFILE_LAYOUT) += nfs_layout_flexfiles.o +nfs_layout_flexfiles-y := flexfilelayout.o flexfilelayoutdev.o diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c new file mode 100644 index 000000000000..f29fb7d7e8f8 --- /dev/null +++ b/fs/nfs/flexfilelayout/flexfilelayout.c @@ -0,0 +1,1574 @@ +/* + * Module for pnfs flexfile layout driver. + * + * Copyright (c) 2014, Primary Data, Inc. All rights reserved. + * + * Tao Peng + */ + +#include +#include +#include + +#include +#include + +#include "flexfilelayout.h" +#include "../nfs4session.h" +#include "../internal.h" +#include "../delegation.h" +#include "../nfs4trace.h" +#include "../iostat.h" +#include "../nfs.h" + +#define NFSDBG_FACILITY NFSDBG_PNFS_LD + +#define FF_LAYOUT_POLL_RETRY_MAX (15*HZ) + +static struct pnfs_layout_hdr * +ff_layout_alloc_layout_hdr(struct inode *inode, gfp_t gfp_flags) +{ + struct nfs4_flexfile_layout *ffl; + + ffl = kzalloc(sizeof(*ffl), gfp_flags); + if (ffl) { + INIT_LIST_HEAD(&ffl->error_list); + return &ffl->generic_hdr; + } else + return NULL; +} + +static void +ff_layout_free_layout_hdr(struct pnfs_layout_hdr *lo) +{ + struct nfs4_ff_layout_ds_err *err, *n; + + list_for_each_entry_safe(err, n, &FF_LAYOUT_FROM_HDR(lo)->error_list, + list) { + list_del(&err->list); + kfree(err); + } + kfree(FF_LAYOUT_FROM_HDR(lo)); +} + +static int decode_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid) +{ + __be32 *p; + + p = xdr_inline_decode(xdr, NFS4_STATEID_SIZE); + if (unlikely(p == NULL)) + return -ENOBUFS; + memcpy(stateid, p, NFS4_STATEID_SIZE); + dprintk("%s: stateid id= [%x%x%x%x]\n", __func__, + p[0], p[1], p[2], p[3]); + return 0; +} + +static int decode_deviceid(struct xdr_stream *xdr, struct nfs4_deviceid *devid) +{ + __be32 *p; + + p = xdr_inline_decode(xdr, NFS4_DEVICEID4_SIZE); + if (unlikely(!p)) + return -ENOBUFS; + memcpy(devid, p, NFS4_DEVICEID4_SIZE); + nfs4_print_deviceid(devid); + return 0; +} + +static int decode_nfs_fh(struct xdr_stream *xdr, struct nfs_fh *fh) +{ + __be32 *p; + + p = xdr_inline_decode(xdr, 4); + if (unlikely(!p)) + return -ENOBUFS; + fh->size = be32_to_cpup(p++); + if (fh->size > sizeof(struct nfs_fh)) { + printk(KERN_ERR "NFS flexfiles: Too big fh received %d\n", + fh->size); + return -EOVERFLOW; + } + /* fh.data */ + p = xdr_inline_decode(xdr, fh->size); + if (unlikely(!p)) + return -ENOBUFS; + memcpy(&fh->data, p, fh->size); + dprintk("%s: fh len %d\n", __func__, fh->size); + + return 0; +} + +/* + * Currently only stringified uids and gids are accepted. + * I.e., kerberos is not supported to the DSes, so no pricipals. + * + * That means that one common function will suffice, but when + * principals are added, this should be split to accomodate + * calls to both nfs_map_name_to_uid() and nfs_map_group_to_gid(). + */ +static int +decode_name(struct xdr_stream *xdr, u32 *id) +{ + __be32 *p; + int len; + + /* opaque_length(4)*/ + p = xdr_inline_decode(xdr, 4); + if (unlikely(!p)) + return -ENOBUFS; + len = be32_to_cpup(p++); + if (len < 0) + return -EINVAL; + + dprintk("%s: len %u\n", __func__, len); + + /* opaque body */ + p = xdr_inline_decode(xdr, len); + if (unlikely(!p)) + return -ENOBUFS; + + if (!nfs_map_string_to_numeric((char *)p, len, id)) + return -EINVAL; + + return 0; +} + +static void ff_layout_free_mirror_array(struct nfs4_ff_layout_segment *fls) +{ + int i; + + if (fls->mirror_array) { + for (i = 0; i < fls->mirror_array_cnt; i++) { + /* normally mirror_ds is freed in + * .free_deviceid_node but we still do it here + * for .alloc_lseg error path */ + if (fls->mirror_array[i]) { + kfree(fls->mirror_array[i]->fh_versions); + nfs4_ff_layout_put_deviceid(fls->mirror_array[i]->mirror_ds); + kfree(fls->mirror_array[i]); + } + } + kfree(fls->mirror_array); + fls->mirror_array = NULL; + } +} + +static int ff_layout_check_layout(struct nfs4_layoutget_res *lgr) +{ + int ret = 0; + + dprintk("--> %s\n", __func__); + + /* FIXME: remove this check when layout segment support is added */ + if (lgr->range.offset != 0 || + lgr->range.length != NFS4_MAX_UINT64) { + dprintk("%s Only whole file layouts supported. Use MDS i/o\n", + __func__); + ret = -EINVAL; + } + + dprintk("--> %s returns %d\n", __func__, ret); + return ret; +} + +static void _ff_layout_free_lseg(struct nfs4_ff_layout_segment *fls) +{ + if (fls) { + ff_layout_free_mirror_array(fls); + kfree(fls); + } +} + +static void ff_layout_sort_mirrors(struct nfs4_ff_layout_segment *fls) +{ + struct nfs4_ff_layout_mirror *tmp; + int i, j; + + for (i = 0; i < fls->mirror_array_cnt - 1; i++) { + for (j = i + 1; j < fls->mirror_array_cnt; j++) + if (fls->mirror_array[i]->efficiency < + fls->mirror_array[j]->efficiency) { + tmp = fls->mirror_array[i]; + fls->mirror_array[i] = fls->mirror_array[j]; + fls->mirror_array[j] = tmp; + } + } +} + +static struct pnfs_layout_segment * +ff_layout_alloc_lseg(struct pnfs_layout_hdr *lh, + struct nfs4_layoutget_res *lgr, + gfp_t gfp_flags) +{ + struct pnfs_layout_segment *ret; + struct nfs4_ff_layout_segment *fls = NULL; + struct xdr_stream stream; + struct xdr_buf buf; + struct page *scratch; + u64 stripe_unit; + u32 mirror_array_cnt; + __be32 *p; + int i, rc; + + dprintk("--> %s\n", __func__); + scratch = alloc_page(gfp_flags); + if (!scratch) + return ERR_PTR(-ENOMEM); + + xdr_init_decode_pages(&stream, &buf, lgr->layoutp->pages, + lgr->layoutp->len); + xdr_set_scratch_buffer(&stream, page_address(scratch), PAGE_SIZE); + + /* stripe unit and mirror_array_cnt */ + rc = -EIO; + p = xdr_inline_decode(&stream, 8 + 4); + if (!p) + goto out_err_free; + + p = xdr_decode_hyper(p, &stripe_unit); + mirror_array_cnt = be32_to_cpup(p++); + dprintk("%s: stripe_unit=%llu mirror_array_cnt=%u\n", __func__, + stripe_unit, mirror_array_cnt); + + if (mirror_array_cnt > NFS4_FLEXFILE_LAYOUT_MAX_MIRROR_CNT || + mirror_array_cnt == 0) + goto out_err_free; + + rc = -ENOMEM; + fls = kzalloc(sizeof(*fls), gfp_flags); + if (!fls) + goto out_err_free; + + fls->mirror_array_cnt = mirror_array_cnt; + fls->stripe_unit = stripe_unit; + fls->mirror_array = kcalloc(fls->mirror_array_cnt, + sizeof(fls->mirror_array[0]), gfp_flags); + if (fls->mirror_array == NULL) + goto out_err_free; + + for (i = 0; i < fls->mirror_array_cnt; i++) { + struct nfs4_deviceid devid; + struct nfs4_deviceid_node *idnode; + u32 ds_count; + u32 fh_count; + int j; + + rc = -EIO; + p = xdr_inline_decode(&stream, 4); + if (!p) + goto out_err_free; + ds_count = be32_to_cpup(p); + + /* FIXME: allow for striping? */ + if (ds_count != 1) + goto out_err_free; + + fls->mirror_array[i] = + kzalloc(sizeof(struct nfs4_ff_layout_mirror), + gfp_flags); + if (fls->mirror_array[i] == NULL) { + rc = -ENOMEM; + goto out_err_free; + } + + spin_lock_init(&fls->mirror_array[i]->lock); + fls->mirror_array[i]->ds_count = ds_count; + + /* deviceid */ + rc = decode_deviceid(&stream, &devid); + if (rc) + goto out_err_free; + + idnode = nfs4_find_get_deviceid(NFS_SERVER(lh->plh_inode), + &devid, lh->plh_lc_cred, + gfp_flags); + /* + * upon success, mirror_ds is allocated by previous + * getdeviceinfo, or newly by .alloc_deviceid_node + * nfs4_find_get_deviceid failure is indeed getdeviceinfo falure + */ + if (idnode) + fls->mirror_array[i]->mirror_ds = + FF_LAYOUT_MIRROR_DS(idnode); + else + goto out_err_free; + + /* efficiency */ + rc = -EIO; + p = xdr_inline_decode(&stream, 4); + if (!p) + goto out_err_free; + fls->mirror_array[i]->efficiency = be32_to_cpup(p); + + /* stateid */ + rc = decode_stateid(&stream, &fls->mirror_array[i]->stateid); + if (rc) + goto out_err_free; + + /* fh */ + p = xdr_inline_decode(&stream, 4); + if (!p) + goto out_err_free; + fh_count = be32_to_cpup(p); + + fls->mirror_array[i]->fh_versions = + kzalloc(fh_count * sizeof(struct nfs_fh), + gfp_flags); + if (fls->mirror_array[i]->fh_versions == NULL) { + rc = -ENOMEM; + goto out_err_free; + } + + for (j = 0; j < fh_count; j++) { + rc = decode_nfs_fh(&stream, + &fls->mirror_array[i]->fh_versions[j]); + if (rc) + goto out_err_free; + } + + fls->mirror_array[i]->fh_versions_cnt = fh_count; + + /* user */ + rc = decode_name(&stream, &fls->mirror_array[i]->uid); + if (rc) + goto out_err_free; + + /* group */ + rc = decode_name(&stream, &fls->mirror_array[i]->gid); + if (rc) + goto out_err_free; + + dprintk("%s: uid %d gid %d\n", __func__, + fls->mirror_array[i]->uid, + fls->mirror_array[i]->gid); + } + + ff_layout_sort_mirrors(fls); + rc = ff_layout_check_layout(lgr); + if (rc) + goto out_err_free; + + ret = &fls->generic_hdr; + dprintk("<-- %s (success)\n", __func__); +out_free_page: + __free_page(scratch); + return ret; +out_err_free: + _ff_layout_free_lseg(fls); + ret = ERR_PTR(rc); + dprintk("<-- %s (%d)\n", __func__, rc); + goto out_free_page; +} + +static bool ff_layout_has_rw_segments(struct pnfs_layout_hdr *layout) +{ + struct pnfs_layout_segment *lseg; + + list_for_each_entry(lseg, &layout->plh_segs, pls_list) + if (lseg->pls_range.iomode == IOMODE_RW) + return true; + + return false; +} + +static void +ff_layout_free_lseg(struct pnfs_layout_segment *lseg) +{ + struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg); + int i; + + dprintk("--> %s\n", __func__); + + for (i = 0; i < fls->mirror_array_cnt; i++) { + if (fls->mirror_array[i]) { + nfs4_ff_layout_put_deviceid(fls->mirror_array[i]->mirror_ds); + fls->mirror_array[i]->mirror_ds = NULL; + if (fls->mirror_array[i]->cred) { + put_rpccred(fls->mirror_array[i]->cred); + fls->mirror_array[i]->cred = NULL; + } + } + } + + if (lseg->pls_range.iomode == IOMODE_RW) { + struct nfs4_flexfile_layout *ffl; + struct inode *inode; + + ffl = FF_LAYOUT_FROM_HDR(lseg->pls_layout); + inode = ffl->generic_hdr.plh_inode; + spin_lock(&inode->i_lock); + if (!ff_layout_has_rw_segments(lseg->pls_layout)) { + ffl->commit_info.nbuckets = 0; + kfree(ffl->commit_info.buckets); + ffl->commit_info.buckets = NULL; + } + spin_unlock(&inode->i_lock); + } + _ff_layout_free_lseg(fls); +} + +/* Return 1 until we have multiple lsegs support */ +static int +ff_layout_get_lseg_count(struct nfs4_ff_layout_segment *fls) +{ + return 1; +} + +static int +ff_layout_alloc_commit_info(struct pnfs_layout_segment *lseg, + struct nfs_commit_info *cinfo, + gfp_t gfp_flags) +{ + struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg); + struct pnfs_commit_bucket *buckets; + int size; + + if (cinfo->ds->nbuckets != 0) { + /* This assumes there is only one RW lseg per file. + * To support multiple lseg per file, we need to + * change struct pnfs_commit_bucket to allow dynamic + * increasing nbuckets. + */ + return 0; + } + + size = ff_layout_get_lseg_count(fls) * FF_LAYOUT_MIRROR_COUNT(lseg); + + buckets = kcalloc(size, sizeof(struct pnfs_commit_bucket), + gfp_flags); + if (!buckets) + return -ENOMEM; + else { + int i; + + spin_lock(cinfo->lock); + if (cinfo->ds->nbuckets != 0) + kfree(buckets); + else { + cinfo->ds->buckets = buckets; + cinfo->ds->nbuckets = size; + for (i = 0; i < size; i++) { + INIT_LIST_HEAD(&buckets[i].written); + INIT_LIST_HEAD(&buckets[i].committing); + /* mark direct verifier as unset */ + buckets[i].direct_verf.committed = + NFS_INVALID_STABLE_HOW; + } + } + spin_unlock(cinfo->lock); + return 0; + } +} + +static struct nfs4_pnfs_ds * +ff_layout_choose_best_ds_for_read(struct nfs_pageio_descriptor *pgio, + int *best_idx) +{ + struct nfs4_ff_layout_segment *fls; + struct nfs4_pnfs_ds *ds; + int idx; + + fls = FF_LAYOUT_LSEG(pgio->pg_lseg); + /* mirrors are sorted by efficiency */ + for (idx = 0; idx < fls->mirror_array_cnt; idx++) { + ds = nfs4_ff_layout_prepare_ds(pgio->pg_lseg, idx, false); + if (ds) { + *best_idx = idx; + return ds; + } + } + + return NULL; +} + +static void +ff_layout_pg_init_read(struct nfs_pageio_descriptor *pgio, + struct nfs_page *req) +{ + struct nfs_pgio_mirror *pgm; + struct nfs4_ff_layout_mirror *mirror; + struct nfs4_pnfs_ds *ds; + int ds_idx; + + /* Use full layout for now */ + if (!pgio->pg_lseg) + pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode, + req->wb_context, + 0, + NFS4_MAX_UINT64, + IOMODE_READ, + GFP_KERNEL); + /* If no lseg, fall back to read through mds */ + if (pgio->pg_lseg == NULL) + goto out_mds; + + ds = ff_layout_choose_best_ds_for_read(pgio, &ds_idx); + if (!ds) + goto out_mds; + mirror = FF_LAYOUT_COMP(pgio->pg_lseg, ds_idx); + + pgio->pg_mirror_idx = ds_idx; + + /* read always uses only one mirror - idx 0 for pgio layer */ + pgm = &pgio->pg_mirrors[0]; + pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].rsize; + + return; +out_mds: + pnfs_put_lseg(pgio->pg_lseg); + pgio->pg_lseg = NULL; + nfs_pageio_reset_read_mds(pgio); +} + +static void +ff_layout_pg_init_write(struct nfs_pageio_descriptor *pgio, + struct nfs_page *req) +{ + struct nfs4_ff_layout_mirror *mirror; + struct nfs_pgio_mirror *pgm; + struct nfs_commit_info cinfo; + struct nfs4_pnfs_ds *ds; + int i; + int status; + + if (!pgio->pg_lseg) + pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode, + req->wb_context, + 0, + NFS4_MAX_UINT64, + IOMODE_RW, + GFP_NOFS); + /* If no lseg, fall back to write through mds */ + if (pgio->pg_lseg == NULL) + goto out_mds; + + nfs_init_cinfo(&cinfo, pgio->pg_inode, pgio->pg_dreq); + status = ff_layout_alloc_commit_info(pgio->pg_lseg, &cinfo, GFP_NOFS); + if (status < 0) + goto out_mds; + + /* Use a direct mapping of ds_idx to pgio mirror_idx */ + if (WARN_ON_ONCE(pgio->pg_mirror_count != + FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg))) + goto out_mds; + + for (i = 0; i < pgio->pg_mirror_count; i++) { + ds = nfs4_ff_layout_prepare_ds(pgio->pg_lseg, i, true); + if (!ds) + goto out_mds; + pgm = &pgio->pg_mirrors[i]; + mirror = FF_LAYOUT_COMP(pgio->pg_lseg, i); + pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].wsize; + } + + return; + +out_mds: + pnfs_put_lseg(pgio->pg_lseg); + pgio->pg_lseg = NULL; + nfs_pageio_reset_write_mds(pgio); +} + +static unsigned int +ff_layout_pg_get_mirror_count_write(struct nfs_pageio_descriptor *pgio, + struct nfs_page *req) +{ + if (!pgio->pg_lseg) + pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode, + req->wb_context, + 0, + NFS4_MAX_UINT64, + IOMODE_RW, + GFP_NOFS); + if (pgio->pg_lseg) + return FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg); + + /* no lseg means that pnfs is not in use, so no mirroring here */ + pnfs_put_lseg(pgio->pg_lseg); + pgio->pg_lseg = NULL; + nfs_pageio_reset_write_mds(pgio); + return 1; +} + +static const struct nfs_pageio_ops ff_layout_pg_read_ops = { + .pg_init = ff_layout_pg_init_read, + .pg_test = pnfs_generic_pg_test, + .pg_doio = pnfs_generic_pg_readpages, + .pg_cleanup = pnfs_generic_pg_cleanup, +}; + +static const struct nfs_pageio_ops ff_layout_pg_write_ops = { + .pg_init = ff_layout_pg_init_write, + .pg_test = pnfs_generic_pg_test, + .pg_doio = pnfs_generic_pg_writepages, + .pg_get_mirror_count = ff_layout_pg_get_mirror_count_write, + .pg_cleanup = pnfs_generic_pg_cleanup, +}; + +static void ff_layout_reset_write(struct nfs_pgio_header *hdr, bool retry_pnfs) +{ + struct rpc_task *task = &hdr->task; + + pnfs_layoutcommit_inode(hdr->inode, false); + + if (retry_pnfs) { + dprintk("%s Reset task %5u for i/o through pNFS " + "(req %s/%llu, %u bytes @ offset %llu)\n", __func__, + hdr->task.tk_pid, + hdr->inode->i_sb->s_id, + (unsigned long long)NFS_FILEID(hdr->inode), + hdr->args.count, + (unsigned long long)hdr->args.offset); + + if (!hdr->dreq) { + struct nfs_open_context *ctx; + + ctx = nfs_list_entry(hdr->pages.next)->wb_context; + set_bit(NFS_CONTEXT_RESEND_WRITES, &ctx->flags); + hdr->completion_ops->error_cleanup(&hdr->pages); + } else { + nfs_direct_set_resched_writes(hdr->dreq); + /* fake unstable write to let common nfs resend pages */ + hdr->verf.committed = NFS_UNSTABLE; + hdr->good_bytes = 0; + } + return; + } + + if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) { + dprintk("%s Reset task %5u for i/o through MDS " + "(req %s/%llu, %u bytes @ offset %llu)\n", __func__, + hdr->task.tk_pid, + hdr->inode->i_sb->s_id, + (unsigned long long)NFS_FILEID(hdr->inode), + hdr->args.count, + (unsigned long long)hdr->args.offset); + + task->tk_status = pnfs_write_done_resend_to_mds(hdr); + } +} + +static void ff_layout_reset_read(struct nfs_pgio_header *hdr) +{ + struct rpc_task *task = &hdr->task; + + pnfs_layoutcommit_inode(hdr->inode, false); + + if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) { + dprintk("%s Reset task %5u for i/o through MDS " + "(req %s/%llu, %u bytes @ offset %llu)\n", __func__, + hdr->task.tk_pid, + hdr->inode->i_sb->s_id, + (unsigned long long)NFS_FILEID(hdr->inode), + hdr->args.count, + (unsigned long long)hdr->args.offset); + + task->tk_status = pnfs_read_done_resend_to_mds(hdr); + } +} + +static int ff_layout_async_handle_error_v4(struct rpc_task *task, + struct nfs4_state *state, + struct nfs_client *clp, + struct pnfs_layout_segment *lseg, + int idx) +{ + struct pnfs_layout_hdr *lo = lseg->pls_layout; + struct inode *inode = lo->plh_inode; + struct nfs_server *mds_server = NFS_SERVER(inode); + + struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx); + struct nfs_client *mds_client = mds_server->nfs_client; + struct nfs4_slot_table *tbl = &clp->cl_session->fc_slot_table; + + if (task->tk_status >= 0) + return 0; + + switch (task->tk_status) { + /* MDS state errors */ + case -NFS4ERR_DELEG_REVOKED: + case -NFS4ERR_ADMIN_REVOKED: + case -NFS4ERR_BAD_STATEID: + if (state == NULL) + break; + nfs_remove_bad_delegation(state->inode); + case -NFS4ERR_OPENMODE: + if (state == NULL) + break; + if (nfs4_schedule_stateid_recovery(mds_server, state) < 0) + goto out_bad_stateid; + goto wait_on_recovery; + case -NFS4ERR_EXPIRED: + if (state != NULL) { + if (nfs4_schedule_stateid_recovery(mds_server, state) < 0) + goto out_bad_stateid; + } + nfs4_schedule_lease_recovery(mds_client); + goto wait_on_recovery; + /* DS session errors */ + case -NFS4ERR_BADSESSION: + case -NFS4ERR_BADSLOT: + case -NFS4ERR_BAD_HIGH_SLOT: + case -NFS4ERR_DEADSESSION: + case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: + case -NFS4ERR_SEQ_FALSE_RETRY: + case -NFS4ERR_SEQ_MISORDERED: + dprintk("%s ERROR %d, Reset session. Exchangeid " + "flags 0x%x\n", __func__, task->tk_status, + clp->cl_exchange_flags); + nfs4_schedule_session_recovery(clp->cl_session, task->tk_status); + break; + case -NFS4ERR_DELAY: + case -NFS4ERR_GRACE: + rpc_delay(task, FF_LAYOUT_POLL_RETRY_MAX); + break; + case -NFS4ERR_RETRY_UNCACHED_REP: + break; + /* Invalidate Layout errors */ + case -NFS4ERR_PNFS_NO_LAYOUT: + case -ESTALE: /* mapped NFS4ERR_STALE */ + case -EBADHANDLE: /* mapped NFS4ERR_BADHANDLE */ + case -EISDIR: /* mapped NFS4ERR_ISDIR */ + case -NFS4ERR_FHEXPIRED: + case -NFS4ERR_WRONG_TYPE: + dprintk("%s Invalid layout error %d\n", __func__, + task->tk_status); + /* + * Destroy layout so new i/o will get a new layout. + * Layout will not be destroyed until all current lseg + * references are put. Mark layout as invalid to resend failed + * i/o and all i/o waiting on the slot table to the MDS until + * layout is destroyed and a new valid layout is obtained. + */ + pnfs_destroy_layout(NFS_I(inode)); + rpc_wake_up(&tbl->slot_tbl_waitq); + goto reset; + /* RPC connection errors */ + case -ECONNREFUSED: + case -EHOSTDOWN: + case -EHOSTUNREACH: + case -ENETUNREACH: + case -EIO: + case -ETIMEDOUT: + case -EPIPE: + dprintk("%s DS connection error %d\n", __func__, + task->tk_status); + nfs4_mark_deviceid_unavailable(devid); + rpc_wake_up(&tbl->slot_tbl_waitq); + /* fall through */ + default: + if (ff_layout_has_available_ds(lseg)) + return -NFS4ERR_RESET_TO_PNFS; +reset: + dprintk("%s Retry through MDS. Error %d\n", __func__, + task->tk_status); + return -NFS4ERR_RESET_TO_MDS; + } +out: + task->tk_status = 0; + return -EAGAIN; +out_bad_stateid: + task->tk_status = -EIO; + return 0; +wait_on_recovery: + rpc_sleep_on(&mds_client->cl_rpcwaitq, task, NULL); + if (test_bit(NFS4CLNT_MANAGER_RUNNING, &mds_client->cl_state) == 0) + rpc_wake_up_queued_task(&mds_client->cl_rpcwaitq, task); + goto out; +} + +/* Retry all errors through either pNFS or MDS except for -EJUKEBOX */ +static int ff_layout_async_handle_error_v3(struct rpc_task *task, + struct pnfs_layout_segment *lseg, + int idx) +{ + struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx); + + if (task->tk_status >= 0) + return 0; + + if (task->tk_status != -EJUKEBOX) { + dprintk("%s DS connection error %d\n", __func__, + task->tk_status); + nfs4_mark_deviceid_unavailable(devid); + if (ff_layout_has_available_ds(lseg)) + return -NFS4ERR_RESET_TO_PNFS; + else + return -NFS4ERR_RESET_TO_MDS; + } + + if (task->tk_status == -EJUKEBOX) + nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY); + task->tk_status = 0; + rpc_restart_call(task); + rpc_delay(task, NFS_JUKEBOX_RETRY_TIME); + return -EAGAIN; +} + +static int ff_layout_async_handle_error(struct rpc_task *task, + struct nfs4_state *state, + struct nfs_client *clp, + struct pnfs_layout_segment *lseg, + int idx) +{ + int vers = clp->cl_nfs_mod->rpc_vers->number; + + switch (vers) { + case 3: + return ff_layout_async_handle_error_v3(task, lseg, idx); + case 4: + return ff_layout_async_handle_error_v4(task, state, clp, + lseg, idx); + default: + /* should never happen */ + WARN_ON_ONCE(1); + return 0; + } +} + +static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg, + int idx, u64 offset, u64 length, + u32 status, int opnum) +{ + struct nfs4_ff_layout_mirror *mirror; + int err; + + mirror = FF_LAYOUT_COMP(lseg, idx); + err = ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout), + mirror, offset, length, status, opnum, + GFP_NOIO); + dprintk("%s: err %d op %d status %u\n", __func__, err, opnum, status); +} + +/* NFS_PROTO call done callback routines */ + +static int ff_layout_read_done_cb(struct rpc_task *task, + struct nfs_pgio_header *hdr) +{ + struct inode *inode; + int err; + + trace_nfs4_pnfs_read(hdr, task->tk_status); + if (task->tk_status == -ETIMEDOUT && !hdr->res.op_status) + hdr->res.op_status = NFS4ERR_NXIO; + if (task->tk_status < 0 && hdr->res.op_status) + ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx, + hdr->args.offset, hdr->args.count, + hdr->res.op_status, OP_READ); + err = ff_layout_async_handle_error(task, hdr->args.context->state, + hdr->ds_clp, hdr->lseg, + hdr->pgio_mirror_idx); + + switch (err) { + case -NFS4ERR_RESET_TO_PNFS: + set_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE, + &hdr->lseg->pls_layout->plh_flags); + pnfs_read_resend_pnfs(hdr); + return task->tk_status; + case -NFS4ERR_RESET_TO_MDS: + inode = hdr->lseg->pls_layout->plh_inode; + pnfs_error_mark_layout_for_return(inode, hdr->lseg); + ff_layout_reset_read(hdr); + return task->tk_status; + case -EAGAIN: + rpc_restart_call_prepare(task); + return -EAGAIN; + } + + return 0; +} + +/* + * We reference the rpc_cred of the first WRITE that triggers the need for + * a LAYOUTCOMMIT, and use it to send the layoutcommit compound. + * rfc5661 is not clear about which credential should be used. + * + * Flexlayout client should treat DS replied FILE_SYNC as DATA_SYNC, so + * to follow http://www.rfc-editor.org/errata_search.php?rfc=5661&eid=2751 + * we always send layoutcommit after DS writes. + */ +static void +ff_layout_set_layoutcommit(struct nfs_pgio_header *hdr) +{ + pnfs_set_layoutcommit(hdr); + dprintk("%s inode %lu pls_end_pos %lu\n", __func__, hdr->inode->i_ino, + (unsigned long) NFS_I(hdr->inode)->layout->plh_lwb); +} + +static bool +ff_layout_reset_to_mds(struct pnfs_layout_segment *lseg, int idx) +{ + /* No mirroring for now */ + struct nfs4_deviceid_node *node = FF_LAYOUT_DEVID_NODE(lseg, idx); + + return ff_layout_test_devid_unavailable(node); +} + +static int ff_layout_read_prepare_common(struct rpc_task *task, + struct nfs_pgio_header *hdr) +{ + if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) { + rpc_exit(task, -EIO); + return -EIO; + } + if (ff_layout_reset_to_mds(hdr->lseg, hdr->pgio_mirror_idx)) { + dprintk("%s task %u reset io to MDS\n", __func__, task->tk_pid); + if (ff_layout_has_available_ds(hdr->lseg)) + pnfs_read_resend_pnfs(hdr); + else + ff_layout_reset_read(hdr); + rpc_exit(task, 0); + return -EAGAIN; + } + hdr->pgio_done_cb = ff_layout_read_done_cb; + + return 0; +} + +/* + * Call ops for the async read/write cases + * In the case of dense layouts, the offset needs to be reset to its + * original value. + */ +static void ff_layout_read_prepare_v3(struct rpc_task *task, void *data) +{ + struct nfs_pgio_header *hdr = data; + + if (ff_layout_read_prepare_common(task, hdr)) + return; + + rpc_call_start(task); +} + +static int ff_layout_setup_sequence(struct nfs_client *ds_clp, + struct nfs4_sequence_args *args, + struct nfs4_sequence_res *res, + struct rpc_task *task) +{ + if (ds_clp->cl_session) + return nfs41_setup_sequence(ds_clp->cl_session, + args, + res, + task); + return nfs40_setup_sequence(ds_clp->cl_slot_tbl, + args, + res, + task); +} + +static void ff_layout_read_prepare_v4(struct rpc_task *task, void *data) +{ + struct nfs_pgio_header *hdr = data; + + if (ff_layout_read_prepare_common(task, hdr)) + return; + + if (ff_layout_setup_sequence(hdr->ds_clp, + &hdr->args.seq_args, + &hdr->res.seq_res, + task)) + return; + + if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context, + hdr->args.lock_context, FMODE_READ) == -EIO) + rpc_exit(task, -EIO); /* lost lock, terminate I/O */ +} + +static void ff_layout_read_call_done(struct rpc_task *task, void *data) +{ + struct nfs_pgio_header *hdr = data; + + dprintk("--> %s task->tk_status %d\n", __func__, task->tk_status); + + if (test_bit(NFS_IOHDR_REDO, &hdr->flags) && + task->tk_status == 0) { + nfs4_sequence_done(task, &hdr->res.seq_res); + return; + } + + /* Note this may cause RPC to be resent */ + hdr->mds_ops->rpc_call_done(task, hdr); +} + +static void ff_layout_read_count_stats(struct rpc_task *task, void *data) +{ + struct nfs_pgio_header *hdr = data; + + rpc_count_iostats_metrics(task, + &NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_READ]); +} + +static int ff_layout_write_done_cb(struct rpc_task *task, + struct nfs_pgio_header *hdr) +{ + struct inode *inode; + int err; + + trace_nfs4_pnfs_write(hdr, task->tk_status); + if (task->tk_status == -ETIMEDOUT && !hdr->res.op_status) + hdr->res.op_status = NFS4ERR_NXIO; + if (task->tk_status < 0 && hdr->res.op_status) + ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx, + hdr->args.offset, hdr->args.count, + hdr->res.op_status, OP_WRITE); + err = ff_layout_async_handle_error(task, hdr->args.context->state, + hdr->ds_clp, hdr->lseg, + hdr->pgio_mirror_idx); + + switch (err) { + case -NFS4ERR_RESET_TO_PNFS: + case -NFS4ERR_RESET_TO_MDS: + inode = hdr->lseg->pls_layout->plh_inode; + pnfs_error_mark_layout_for_return(inode, hdr->lseg); + if (err == -NFS4ERR_RESET_TO_PNFS) { + pnfs_set_retry_layoutget(hdr->lseg->pls_layout); + ff_layout_reset_write(hdr, true); + } else { + pnfs_clear_retry_layoutget(hdr->lseg->pls_layout); + ff_layout_reset_write(hdr, false); + } + return task->tk_status; + case -EAGAIN: + rpc_restart_call_prepare(task); + return -EAGAIN; + } + + if (hdr->res.verf->committed == NFS_FILE_SYNC || + hdr->res.verf->committed == NFS_DATA_SYNC) + ff_layout_set_layoutcommit(hdr); + + return 0; +} + +static int ff_layout_commit_done_cb(struct rpc_task *task, + struct nfs_commit_data *data) +{ + struct inode *inode; + int err; + + trace_nfs4_pnfs_commit_ds(data, task->tk_status); + if (task->tk_status == -ETIMEDOUT && !data->res.op_status) + data->res.op_status = NFS4ERR_NXIO; + if (task->tk_status < 0 && data->res.op_status) + ff_layout_io_track_ds_error(data->lseg, data->ds_commit_index, + data->args.offset, data->args.count, + data->res.op_status, OP_COMMIT); + err = ff_layout_async_handle_error(task, NULL, data->ds_clp, + data->lseg, data->ds_commit_index); + + switch (err) { + case -NFS4ERR_RESET_TO_PNFS: + case -NFS4ERR_RESET_TO_MDS: + inode = data->lseg->pls_layout->plh_inode; + pnfs_error_mark_layout_for_return(inode, data->lseg); + if (err == -NFS4ERR_RESET_TO_PNFS) + pnfs_set_retry_layoutget(data->lseg->pls_layout); + else + pnfs_clear_retry_layoutget(data->lseg->pls_layout); + pnfs_generic_prepare_to_resend_writes(data); + return -EAGAIN; + case -EAGAIN: + rpc_restart_call_prepare(task); + return -EAGAIN; + } + + if (data->verf.committed == NFS_UNSTABLE) + pnfs_commit_set_layoutcommit(data); + + return 0; +} + +static int ff_layout_write_prepare_common(struct rpc_task *task, + struct nfs_pgio_header *hdr) +{ + if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) { + rpc_exit(task, -EIO); + return -EIO; + } + + if (ff_layout_reset_to_mds(hdr->lseg, hdr->pgio_mirror_idx)) { + bool retry_pnfs; + + retry_pnfs = ff_layout_has_available_ds(hdr->lseg); + dprintk("%s task %u reset io to %s\n", __func__, + task->tk_pid, retry_pnfs ? "pNFS" : "MDS"); + ff_layout_reset_write(hdr, retry_pnfs); + rpc_exit(task, 0); + return -EAGAIN; + } + + return 0; +} + +static void ff_layout_write_prepare_v3(struct rpc_task *task, void *data) +{ + struct nfs_pgio_header *hdr = data; + + if (ff_layout_write_prepare_common(task, hdr)) + return; + + rpc_call_start(task); +} + +static void ff_layout_write_prepare_v4(struct rpc_task *task, void *data) +{ + struct nfs_pgio_header *hdr = data; + + if (ff_layout_write_prepare_common(task, hdr)) + return; + + if (ff_layout_setup_sequence(hdr->ds_clp, + &hdr->args.seq_args, + &hdr->res.seq_res, + task)) + return; + + if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context, + hdr->args.lock_context, FMODE_WRITE) == -EIO) + rpc_exit(task, -EIO); /* lost lock, terminate I/O */ +} + +static void ff_layout_write_call_done(struct rpc_task *task, void *data) +{ + struct nfs_pgio_header *hdr = data; + + if (test_bit(NFS_IOHDR_REDO, &hdr->flags) && + task->tk_status == 0) { + nfs4_sequence_done(task, &hdr->res.seq_res); + return; + } + + /* Note this may cause RPC to be resent */ + hdr->mds_ops->rpc_call_done(task, hdr); +} + +static void ff_layout_write_count_stats(struct rpc_task *task, void *data) +{ + struct nfs_pgio_header *hdr = data; + + rpc_count_iostats_metrics(task, + &NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_WRITE]); +} + +static void ff_layout_commit_prepare_v3(struct rpc_task *task, void *data) +{ + rpc_call_start(task); +} + +static void ff_layout_commit_prepare_v4(struct rpc_task *task, void *data) +{ + struct nfs_commit_data *wdata = data; + + ff_layout_setup_sequence(wdata->ds_clp, + &wdata->args.seq_args, + &wdata->res.seq_res, + task); +} + +static void ff_layout_commit_count_stats(struct rpc_task *task, void *data) +{ + struct nfs_commit_data *cdata = data; + + rpc_count_iostats_metrics(task, + &NFS_CLIENT(cdata->inode)->cl_metrics[NFSPROC4_CLNT_COMMIT]); +} + +static const struct rpc_call_ops ff_layout_read_call_ops_v3 = { + .rpc_call_prepare = ff_layout_read_prepare_v3, + .rpc_call_done = ff_layout_read_call_done, + .rpc_count_stats = ff_layout_read_count_stats, + .rpc_release = pnfs_generic_rw_release, +}; + +static const struct rpc_call_ops ff_layout_read_call_ops_v4 = { + .rpc_call_prepare = ff_layout_read_prepare_v4, + .rpc_call_done = ff_layout_read_call_done, + .rpc_count_stats = ff_layout_read_count_stats, + .rpc_release = pnfs_generic_rw_release, +}; + +static const struct rpc_call_ops ff_layout_write_call_ops_v3 = { + .rpc_call_prepare = ff_layout_write_prepare_v3, + .rpc_call_done = ff_layout_write_call_done, + .rpc_count_stats = ff_layout_write_count_stats, + .rpc_release = pnfs_generic_rw_release, +}; + +static const struct rpc_call_ops ff_layout_write_call_ops_v4 = { + .rpc_call_prepare = ff_layout_write_prepare_v4, + .rpc_call_done = ff_layout_write_call_done, + .rpc_count_stats = ff_layout_write_count_stats, + .rpc_release = pnfs_generic_rw_release, +}; + +static const struct rpc_call_ops ff_layout_commit_call_ops_v3 = { + .rpc_call_prepare = ff_layout_commit_prepare_v3, + .rpc_call_done = pnfs_generic_write_commit_done, + .rpc_count_stats = ff_layout_commit_count_stats, + .rpc_release = pnfs_generic_commit_release, +}; + +static const struct rpc_call_ops ff_layout_commit_call_ops_v4 = { + .rpc_call_prepare = ff_layout_commit_prepare_v4, + .rpc_call_done = pnfs_generic_write_commit_done, + .rpc_count_stats = ff_layout_commit_count_stats, + .rpc_release = pnfs_generic_commit_release, +}; + +static enum pnfs_try_status +ff_layout_read_pagelist(struct nfs_pgio_header *hdr) +{ + struct pnfs_layout_segment *lseg = hdr->lseg; + struct nfs4_pnfs_ds *ds; + struct rpc_clnt *ds_clnt; + struct rpc_cred *ds_cred; + loff_t offset = hdr->args.offset; + u32 idx = hdr->pgio_mirror_idx; + int vers; + struct nfs_fh *fh; + + dprintk("--> %s ino %lu pgbase %u req %Zu@%llu\n", + __func__, hdr->inode->i_ino, + hdr->args.pgbase, (size_t)hdr->args.count, offset); + + ds = nfs4_ff_layout_prepare_ds(lseg, idx, false); + if (!ds) + goto out_failed; + + ds_clnt = nfs4_ff_find_or_create_ds_client(lseg, idx, ds->ds_clp, + hdr->inode); + if (IS_ERR(ds_clnt)) + goto out_failed; + + ds_cred = ff_layout_get_ds_cred(lseg, idx, hdr->cred); + if (IS_ERR(ds_cred)) + goto out_failed; + + vers = nfs4_ff_layout_ds_version(lseg, idx); + + dprintk("%s USE DS: %s cl_count %d vers %d\n", __func__, + ds->ds_remotestr, atomic_read(&ds->ds_clp->cl_count), vers); + + atomic_inc(&ds->ds_clp->cl_count); + hdr->ds_clp = ds->ds_clp; + fh = nfs4_ff_layout_select_ds_fh(lseg, idx); + if (fh) + hdr->args.fh = fh; + + /* + * Note that if we ever decide to split across DSes, + * then we may need to handle dense-like offsets. + */ + hdr->args.offset = offset; + hdr->mds_offset = offset; + + /* Perform an asynchronous read to ds */ + nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops, + vers == 3 ? &ff_layout_read_call_ops_v3 : + &ff_layout_read_call_ops_v4, + 0, RPC_TASK_SOFTCONN); + + return PNFS_ATTEMPTED; + +out_failed: + if (ff_layout_has_available_ds(lseg)) + return PNFS_TRY_AGAIN; + return PNFS_NOT_ATTEMPTED; +} + +/* Perform async writes. */ +static enum pnfs_try_status +ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync) +{ + struct pnfs_layout_segment *lseg = hdr->lseg; + struct nfs4_pnfs_ds *ds; + struct rpc_clnt *ds_clnt; + struct rpc_cred *ds_cred; + loff_t offset = hdr->args.offset; + int vers; + struct nfs_fh *fh; + int idx = hdr->pgio_mirror_idx; + + ds = nfs4_ff_layout_prepare_ds(lseg, idx, true); + if (!ds) + return PNFS_NOT_ATTEMPTED; + + ds_clnt = nfs4_ff_find_or_create_ds_client(lseg, idx, ds->ds_clp, + hdr->inode); + if (IS_ERR(ds_clnt)) + return PNFS_NOT_ATTEMPTED; + + ds_cred = ff_layout_get_ds_cred(lseg, idx, hdr->cred); + if (IS_ERR(ds_cred)) + return PNFS_NOT_ATTEMPTED; + + vers = nfs4_ff_layout_ds_version(lseg, idx); + + dprintk("%s ino %lu sync %d req %Zu@%llu DS: %s cl_count %d vers %d\n", + __func__, hdr->inode->i_ino, sync, (size_t) hdr->args.count, + offset, ds->ds_remotestr, atomic_read(&ds->ds_clp->cl_count), + vers); + + hdr->pgio_done_cb = ff_layout_write_done_cb; + atomic_inc(&ds->ds_clp->cl_count); + hdr->ds_clp = ds->ds_clp; + hdr->ds_commit_idx = idx; + fh = nfs4_ff_layout_select_ds_fh(lseg, idx); + if (fh) + hdr->args.fh = fh; + + /* + * Note that if we ever decide to split across DSes, + * then we may need to handle dense-like offsets. + */ + hdr->args.offset = offset; + + /* Perform an asynchronous write */ + nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops, + vers == 3 ? &ff_layout_write_call_ops_v3 : + &ff_layout_write_call_ops_v4, + sync, RPC_TASK_SOFTCONN); + return PNFS_ATTEMPTED; +} + +static void +ff_layout_mark_request_commit(struct nfs_page *req, + struct pnfs_layout_segment *lseg, + struct nfs_commit_info *cinfo, + u32 ds_commit_idx) +{ + struct list_head *list; + struct pnfs_commit_bucket *buckets; + + spin_lock(cinfo->lock); + buckets = cinfo->ds->buckets; + list = &buckets[ds_commit_idx].written; + if (list_empty(list)) { + /* Non-empty buckets hold a reference on the lseg. That ref + * is normally transferred to the COMMIT call and released + * there. It could also be released if the last req is pulled + * off due to a rewrite, in which case it will be done in + * pnfs_common_clear_request_commit + */ + WARN_ON_ONCE(buckets[ds_commit_idx].wlseg != NULL); + buckets[ds_commit_idx].wlseg = pnfs_get_lseg(lseg); + } + set_bit(PG_COMMIT_TO_DS, &req->wb_flags); + cinfo->ds->nwritten++; + + /* nfs_request_add_commit_list(). We need to add req to list without + * dropping cinfo lock. + */ + set_bit(PG_CLEAN, &(req)->wb_flags); + nfs_list_add_request(req, list); + cinfo->mds->ncommit++; + spin_unlock(cinfo->lock); + if (!cinfo->dreq) { + inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); + inc_bdi_stat(page_file_mapping(req->wb_page)->backing_dev_info, + BDI_RECLAIMABLE); + __mark_inode_dirty(req->wb_context->dentry->d_inode, + I_DIRTY_DATASYNC); + } +} + +static u32 calc_ds_index_from_commit(struct pnfs_layout_segment *lseg, u32 i) +{ + return i; +} + +static struct nfs_fh * +select_ds_fh_from_commit(struct pnfs_layout_segment *lseg, u32 i) +{ + struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(lseg); + + /* FIXME: Assume that there is only one NFS version available + * for the DS. + */ + return &flseg->mirror_array[i]->fh_versions[0]; +} + +static int ff_layout_initiate_commit(struct nfs_commit_data *data, int how) +{ + struct pnfs_layout_segment *lseg = data->lseg; + struct nfs4_pnfs_ds *ds; + struct rpc_clnt *ds_clnt; + struct rpc_cred *ds_cred; + u32 idx; + int vers; + struct nfs_fh *fh; + + idx = calc_ds_index_from_commit(lseg, data->ds_commit_index); + ds = nfs4_ff_layout_prepare_ds(lseg, idx, true); + if (!ds) + goto out_err; + + ds_clnt = nfs4_ff_find_or_create_ds_client(lseg, idx, ds->ds_clp, + data->inode); + if (IS_ERR(ds_clnt)) + goto out_err; + + ds_cred = ff_layout_get_ds_cred(lseg, idx, data->cred); + if (IS_ERR(ds_cred)) + goto out_err; + + vers = nfs4_ff_layout_ds_version(lseg, idx); + + dprintk("%s ino %lu, how %d cl_count %d vers %d\n", __func__, + data->inode->i_ino, how, atomic_read(&ds->ds_clp->cl_count), + vers); + data->commit_done_cb = ff_layout_commit_done_cb; + data->cred = ds_cred; + atomic_inc(&ds->ds_clp->cl_count); + data->ds_clp = ds->ds_clp; + fh = select_ds_fh_from_commit(lseg, data->ds_commit_index); + if (fh) + data->args.fh = fh; + return nfs_initiate_commit(ds_clnt, data, ds->ds_clp->rpc_ops, + vers == 3 ? &ff_layout_commit_call_ops_v3 : + &ff_layout_commit_call_ops_v4, + how, RPC_TASK_SOFTCONN); +out_err: + pnfs_generic_prepare_to_resend_writes(data); + pnfs_generic_commit_release(data); + return -EAGAIN; +} + +static int +ff_layout_commit_pagelist(struct inode *inode, struct list_head *mds_pages, + int how, struct nfs_commit_info *cinfo) +{ + return pnfs_generic_commit_pagelist(inode, mds_pages, how, cinfo, + ff_layout_initiate_commit); +} + +static struct pnfs_ds_commit_info * +ff_layout_get_ds_info(struct inode *inode) +{ + struct pnfs_layout_hdr *layout = NFS_I(inode)->layout; + + if (layout == NULL) + return NULL; + + return &FF_LAYOUT_FROM_HDR(layout)->commit_info; +} + +static void +ff_layout_free_deveiceid_node(struct nfs4_deviceid_node *d) +{ + nfs4_ff_layout_free_deviceid(container_of(d, struct nfs4_ff_layout_ds, + id_node)); +} + +static int ff_layout_encode_ioerr(struct nfs4_flexfile_layout *flo, + struct xdr_stream *xdr, + const struct nfs4_layoutreturn_args *args) +{ + struct pnfs_layout_hdr *hdr = &flo->generic_hdr; + __be32 *start; + int count = 0, ret = 0; + + start = xdr_reserve_space(xdr, 4); + if (unlikely(!start)) + return -E2BIG; + + /* This assume we always return _ALL_ layouts */ + spin_lock(&hdr->plh_inode->i_lock); + ret = ff_layout_encode_ds_ioerr(flo, xdr, &count, &args->range); + spin_unlock(&hdr->plh_inode->i_lock); + + *start = cpu_to_be32(count); + + return ret; +} + +/* report nothing for now */ +static void ff_layout_encode_iostats(struct nfs4_flexfile_layout *flo, + struct xdr_stream *xdr, + const struct nfs4_layoutreturn_args *args) +{ + __be32 *p; + + p = xdr_reserve_space(xdr, 4); + if (likely(p)) + *p = cpu_to_be32(0); +} + +static struct nfs4_deviceid_node * +ff_layout_alloc_deviceid_node(struct nfs_server *server, + struct pnfs_device *pdev, gfp_t gfp_flags) +{ + struct nfs4_ff_layout_ds *dsaddr; + + dsaddr = nfs4_ff_alloc_deviceid_node(server, pdev, gfp_flags); + if (!dsaddr) + return NULL; + return &dsaddr->id_node; +} + +static void +ff_layout_encode_layoutreturn(struct pnfs_layout_hdr *lo, + struct xdr_stream *xdr, + const struct nfs4_layoutreturn_args *args) +{ + struct nfs4_flexfile_layout *flo = FF_LAYOUT_FROM_HDR(lo); + __be32 *start; + + dprintk("%s: Begin\n", __func__); + start = xdr_reserve_space(xdr, 4); + BUG_ON(!start); + + if (ff_layout_encode_ioerr(flo, xdr, args)) + goto out; + + ff_layout_encode_iostats(flo, xdr, args); +out: + *start = cpu_to_be32((xdr->p - start - 1) * 4); + dprintk("%s: Return\n", __func__); +} + +static struct pnfs_layoutdriver_type flexfilelayout_type = { + .id = LAYOUT_FLEX_FILES, + .name = "LAYOUT_FLEX_FILES", + .owner = THIS_MODULE, + .alloc_layout_hdr = ff_layout_alloc_layout_hdr, + .free_layout_hdr = ff_layout_free_layout_hdr, + .alloc_lseg = ff_layout_alloc_lseg, + .free_lseg = ff_layout_free_lseg, + .pg_read_ops = &ff_layout_pg_read_ops, + .pg_write_ops = &ff_layout_pg_write_ops, + .get_ds_info = ff_layout_get_ds_info, + .free_deviceid_node = ff_layout_free_deveiceid_node, + .mark_request_commit = ff_layout_mark_request_commit, + .clear_request_commit = pnfs_generic_clear_request_commit, + .scan_commit_lists = pnfs_generic_scan_commit_lists, + .recover_commit_reqs = pnfs_generic_recover_commit_reqs, + .commit_pagelist = ff_layout_commit_pagelist, + .read_pagelist = ff_layout_read_pagelist, + .write_pagelist = ff_layout_write_pagelist, + .alloc_deviceid_node = ff_layout_alloc_deviceid_node, + .encode_layoutreturn = ff_layout_encode_layoutreturn, +}; + +static int __init nfs4flexfilelayout_init(void) +{ + printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Registering...\n", + __func__); + return pnfs_register_layoutdriver(&flexfilelayout_type); +} + +static void __exit nfs4flexfilelayout_exit(void) +{ + printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Unregistering...\n", + __func__); + pnfs_unregister_layoutdriver(&flexfilelayout_type); +} + +MODULE_ALIAS("nfs-layouttype4-4"); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("The NFSv4 flexfile layout driver"); + +module_init(nfs4flexfilelayout_init); +module_exit(nfs4flexfilelayout_exit); diff --git a/fs/nfs/flexfilelayout/flexfilelayout.h b/fs/nfs/flexfilelayout/flexfilelayout.h new file mode 100644 index 000000000000..070f20445b2d --- /dev/null +++ b/fs/nfs/flexfilelayout/flexfilelayout.h @@ -0,0 +1,155 @@ +/* + * NFSv4 flexfile layout driver data structures. + * + * Copyright (c) 2014, Primary Data, Inc. All rights reserved. + * + * Tao Peng + */ + +#ifndef FS_NFS_NFS4FLEXFILELAYOUT_H +#define FS_NFS_NFS4FLEXFILELAYOUT_H + +#include "../pnfs.h" + +/* XXX: Let's filter out insanely large mirror count for now to avoid oom + * due to network error etc. */ +#define NFS4_FLEXFILE_LAYOUT_MAX_MIRROR_CNT 4096 + +struct nfs4_ff_ds_version { + u32 version; + u32 minor_version; + u32 rsize; + u32 wsize; + bool tightly_coupled; +}; + +/* chained in global deviceid hlist */ +struct nfs4_ff_layout_ds { + struct nfs4_deviceid_node id_node; + u32 ds_versions_cnt; + struct nfs4_ff_ds_version *ds_versions; + struct nfs4_pnfs_ds *ds; +}; + +struct nfs4_ff_layout_ds_err { + struct list_head list; /* linked in mirror error_list */ + u64 offset; + u64 length; + int status; + enum nfs_opnum4 opnum; + nfs4_stateid stateid; + struct nfs4_deviceid deviceid; +}; + +struct nfs4_ff_layout_mirror { + u32 ds_count; + u32 efficiency; + struct nfs4_ff_layout_ds *mirror_ds; + u32 fh_versions_cnt; + struct nfs_fh *fh_versions; + nfs4_stateid stateid; + struct nfs4_string user_name; + struct nfs4_string group_name; + u32 uid; + u32 gid; + struct rpc_cred *cred; + spinlock_t lock; +}; + +struct nfs4_ff_layout_segment { + struct pnfs_layout_segment generic_hdr; + u64 stripe_unit; + u32 mirror_array_cnt; + struct nfs4_ff_layout_mirror **mirror_array; +}; + +struct nfs4_flexfile_layout { + struct pnfs_layout_hdr generic_hdr; + struct pnfs_ds_commit_info commit_info; + struct list_head error_list; /* nfs4_ff_layout_ds_err */ +}; + +static inline struct nfs4_flexfile_layout * +FF_LAYOUT_FROM_HDR(struct pnfs_layout_hdr *lo) +{ + return container_of(lo, struct nfs4_flexfile_layout, generic_hdr); +} + +static inline struct nfs4_ff_layout_segment * +FF_LAYOUT_LSEG(struct pnfs_layout_segment *lseg) +{ + return container_of(lseg, + struct nfs4_ff_layout_segment, + generic_hdr); +} + +static inline struct nfs4_deviceid_node * +FF_LAYOUT_DEVID_NODE(struct pnfs_layout_segment *lseg, u32 idx) +{ + if (idx >= FF_LAYOUT_LSEG(lseg)->mirror_array_cnt || + FF_LAYOUT_LSEG(lseg)->mirror_array[idx] == NULL || + FF_LAYOUT_LSEG(lseg)->mirror_array[idx]->mirror_ds == NULL) + return NULL; + return &FF_LAYOUT_LSEG(lseg)->mirror_array[idx]->mirror_ds->id_node; +} + +static inline struct nfs4_ff_layout_ds * +FF_LAYOUT_MIRROR_DS(struct nfs4_deviceid_node *node) +{ + return container_of(node, struct nfs4_ff_layout_ds, id_node); +} + +static inline struct nfs4_ff_layout_mirror * +FF_LAYOUT_COMP(struct pnfs_layout_segment *lseg, u32 idx) +{ + if (idx >= FF_LAYOUT_LSEG(lseg)->mirror_array_cnt) + return NULL; + return FF_LAYOUT_LSEG(lseg)->mirror_array[idx]; +} + +static inline u32 +FF_LAYOUT_MIRROR_COUNT(struct pnfs_layout_segment *lseg) +{ + return FF_LAYOUT_LSEG(lseg)->mirror_array_cnt; +} + +static inline bool +ff_layout_test_devid_unavailable(struct nfs4_deviceid_node *node) +{ + return nfs4_test_deviceid_unavailable(node); +} + +static inline int +nfs4_ff_layout_ds_version(struct pnfs_layout_segment *lseg, u32 ds_idx) +{ + return FF_LAYOUT_COMP(lseg, ds_idx)->mirror_ds->ds_versions[0].version; +} + +struct nfs4_ff_layout_ds * +nfs4_ff_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev, + gfp_t gfp_flags); +void nfs4_ff_layout_put_deviceid(struct nfs4_ff_layout_ds *mirror_ds); +void nfs4_ff_layout_free_deviceid(struct nfs4_ff_layout_ds *mirror_ds); +int ff_layout_track_ds_error(struct nfs4_flexfile_layout *flo, + struct nfs4_ff_layout_mirror *mirror, u64 offset, + u64 length, int status, enum nfs_opnum4 opnum, + gfp_t gfp_flags); +int ff_layout_encode_ds_ioerr(struct nfs4_flexfile_layout *flo, + struct xdr_stream *xdr, int *count, + const struct pnfs_layout_range *range); +struct nfs_fh * +nfs4_ff_layout_select_ds_fh(struct pnfs_layout_segment *lseg, u32 mirror_idx); + +struct nfs4_pnfs_ds * +nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx, + bool fail_return); + +struct rpc_clnt * +nfs4_ff_find_or_create_ds_client(struct pnfs_layout_segment *lseg, + u32 ds_idx, + struct nfs_client *ds_clp, + struct inode *inode); +struct rpc_cred *ff_layout_get_ds_cred(struct pnfs_layout_segment *lseg, + u32 ds_idx, struct rpc_cred *mdscred); +bool ff_layout_has_available_ds(struct pnfs_layout_segment *lseg); +#endif /* FS_NFS_NFS4FLEXFILELAYOUT_H */ diff --git a/fs/nfs/flexfilelayout/flexfilelayoutdev.c b/fs/nfs/flexfilelayout/flexfilelayoutdev.c new file mode 100644 index 000000000000..3bbb16b3066f --- /dev/null +++ b/fs/nfs/flexfilelayout/flexfilelayoutdev.c @@ -0,0 +1,552 @@ +/* + * Device operations for the pnfs nfs4 file layout driver. + * + * Copyright (c) 2014, Primary Data, Inc. All rights reserved. + * + * Tao Peng + */ + +#include +#include +#include +#include + +#include "../internal.h" +#include "../nfs4session.h" +#include "flexfilelayout.h" + +#define NFSDBG_FACILITY NFSDBG_PNFS_LD + +static unsigned int dataserver_timeo = NFS4_DEF_DS_TIMEO; +static unsigned int dataserver_retrans = NFS4_DEF_DS_RETRANS; + +void nfs4_ff_layout_put_deviceid(struct nfs4_ff_layout_ds *mirror_ds) +{ + if (mirror_ds) + nfs4_put_deviceid_node(&mirror_ds->id_node); +} + +void nfs4_ff_layout_free_deviceid(struct nfs4_ff_layout_ds *mirror_ds) +{ + nfs4_print_deviceid(&mirror_ds->id_node.deviceid); + nfs4_pnfs_ds_put(mirror_ds->ds); + kfree(mirror_ds); +} + +/* Decode opaque device data and construct new_ds using it */ +struct nfs4_ff_layout_ds * +nfs4_ff_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev, + gfp_t gfp_flags) +{ + struct xdr_stream stream; + struct xdr_buf buf; + struct page *scratch; + struct list_head dsaddrs; + struct nfs4_pnfs_ds_addr *da; + struct nfs4_ff_layout_ds *new_ds = NULL; + struct nfs4_ff_ds_version *ds_versions = NULL; + u32 mp_count; + u32 version_count; + __be32 *p; + int i, ret = -ENOMEM; + + /* set up xdr stream */ + scratch = alloc_page(gfp_flags); + if (!scratch) + goto out_err; + + new_ds = kzalloc(sizeof(struct nfs4_ff_layout_ds), gfp_flags); + if (!new_ds) + goto out_scratch; + + nfs4_init_deviceid_node(&new_ds->id_node, + server, + &pdev->dev_id); + INIT_LIST_HEAD(&dsaddrs); + + xdr_init_decode_pages(&stream, &buf, pdev->pages, pdev->pglen); + xdr_set_scratch_buffer(&stream, page_address(scratch), PAGE_SIZE); + + /* multipath count */ + p = xdr_inline_decode(&stream, 4); + if (unlikely(!p)) + goto out_err_drain_dsaddrs; + mp_count = be32_to_cpup(p); + dprintk("%s: multipath ds count %d\n", __func__, mp_count); + + for (i = 0; i < mp_count; i++) { + /* multipath ds */ + da = nfs4_decode_mp_ds_addr(server->nfs_client->cl_net, + &stream, gfp_flags); + if (da) + list_add_tail(&da->da_node, &dsaddrs); + } + if (list_empty(&dsaddrs)) { + dprintk("%s: no suitable DS addresses found\n", + __func__); + ret = -ENOMEDIUM; + goto out_err_drain_dsaddrs; + } + + /* version count */ + p = xdr_inline_decode(&stream, 4); + if (unlikely(!p)) + goto out_err_drain_dsaddrs; + version_count = be32_to_cpup(p); + dprintk("%s: version count %d\n", __func__, version_count); + + ds_versions = kzalloc(version_count * sizeof(struct nfs4_ff_ds_version), + gfp_flags); + if (!ds_versions) + goto out_scratch; + + for (i = 0; i < version_count; i++) { + /* 20 = version(4) + minor_version(4) + rsize(4) + wsize(4) + + * tightly_coupled(4) */ + p = xdr_inline_decode(&stream, 20); + if (unlikely(!p)) + goto out_err_drain_dsaddrs; + ds_versions[i].version = be32_to_cpup(p++); + ds_versions[i].minor_version = be32_to_cpup(p++); + ds_versions[i].rsize = nfs_block_size(be32_to_cpup(p++), NULL); + ds_versions[i].wsize = nfs_block_size(be32_to_cpup(p++), NULL); + ds_versions[i].tightly_coupled = be32_to_cpup(p); + + if (ds_versions[i].rsize > NFS_MAX_FILE_IO_SIZE) + ds_versions[i].rsize = NFS_MAX_FILE_IO_SIZE; + if (ds_versions[i].wsize > NFS_MAX_FILE_IO_SIZE) + ds_versions[i].wsize = NFS_MAX_FILE_IO_SIZE; + + if (ds_versions[i].version != 3 || ds_versions[i].minor_version != 0) { + dprintk("%s: [%d] unsupported ds version %d-%d\n", __func__, + i, ds_versions[i].version, + ds_versions[i].minor_version); + ret = -EPROTONOSUPPORT; + goto out_err_drain_dsaddrs; + } + + dprintk("%s: [%d] vers %u minor_ver %u rsize %u wsize %u coupled %d\n", + __func__, i, ds_versions[i].version, + ds_versions[i].minor_version, + ds_versions[i].rsize, + ds_versions[i].wsize, + ds_versions[i].tightly_coupled); + } + + new_ds->ds_versions = ds_versions; + new_ds->ds_versions_cnt = version_count; + + new_ds->ds = nfs4_pnfs_ds_add(&dsaddrs, gfp_flags); + if (!new_ds->ds) + goto out_err_drain_dsaddrs; + + /* If DS was already in cache, free ds addrs */ + while (!list_empty(&dsaddrs)) { + da = list_first_entry(&dsaddrs, + struct nfs4_pnfs_ds_addr, + da_node); + list_del_init(&da->da_node); + kfree(da->da_remotestr); + kfree(da); + } + + __free_page(scratch); + return new_ds; + +out_err_drain_dsaddrs: + while (!list_empty(&dsaddrs)) { + da = list_first_entry(&dsaddrs, struct nfs4_pnfs_ds_addr, + da_node); + list_del_init(&da->da_node); + kfree(da->da_remotestr); + kfree(da); + } + + kfree(ds_versions); +out_scratch: + __free_page(scratch); +out_err: + kfree(new_ds); + + dprintk("%s ERROR: returning %d\n", __func__, ret); + return NULL; +} + +static u64 +end_offset(u64 start, u64 len) +{ + u64 end; + + end = start + len; + return end >= start ? end : NFS4_MAX_UINT64; +} + +static void extend_ds_error(struct nfs4_ff_layout_ds_err *err, + u64 offset, u64 length) +{ + u64 end; + + end = max_t(u64, end_offset(err->offset, err->length), + end_offset(offset, length)); + err->offset = min_t(u64, err->offset, offset); + err->length = end - err->offset; +} + +static bool ds_error_can_merge(struct nfs4_ff_layout_ds_err *err, u64 offset, + u64 length, int status, enum nfs_opnum4 opnum, + nfs4_stateid *stateid, + struct nfs4_deviceid *deviceid) +{ + return err->status == status && err->opnum == opnum && + nfs4_stateid_match(&err->stateid, stateid) && + !memcmp(&err->deviceid, deviceid, sizeof(*deviceid)) && + end_offset(err->offset, err->length) >= offset && + err->offset <= end_offset(offset, length); +} + +static bool merge_ds_error(struct nfs4_ff_layout_ds_err *old, + struct nfs4_ff_layout_ds_err *new) +{ + if (!ds_error_can_merge(old, new->offset, new->length, new->status, + new->opnum, &new->stateid, &new->deviceid)) + return false; + + extend_ds_error(old, new->offset, new->length); + return true; +} + +static bool +ff_layout_add_ds_error_locked(struct nfs4_flexfile_layout *flo, + struct nfs4_ff_layout_ds_err *dserr) +{ + struct nfs4_ff_layout_ds_err *err; + + list_for_each_entry(err, &flo->error_list, list) { + if (merge_ds_error(err, dserr)) { + return true; + } + } + + list_add(&dserr->list, &flo->error_list); + return false; +} + +static bool +ff_layout_update_ds_error(struct nfs4_flexfile_layout *flo, u64 offset, + u64 length, int status, enum nfs_opnum4 opnum, + nfs4_stateid *stateid, struct nfs4_deviceid *deviceid) +{ + bool found = false; + struct nfs4_ff_layout_ds_err *err; + + list_for_each_entry(err, &flo->error_list, list) { + if (ds_error_can_merge(err, offset, length, status, opnum, + stateid, deviceid)) { + found = true; + extend_ds_error(err, offset, length); + break; + } + } + + return found; +} + +int ff_layout_track_ds_error(struct nfs4_flexfile_layout *flo, + struct nfs4_ff_layout_mirror *mirror, u64 offset, + u64 length, int status, enum nfs_opnum4 opnum, + gfp_t gfp_flags) +{ + struct nfs4_ff_layout_ds_err *dserr; + bool needfree; + + if (status == 0) + return 0; + + if (mirror->mirror_ds == NULL) + return -EINVAL; + + spin_lock(&flo->generic_hdr.plh_inode->i_lock); + if (ff_layout_update_ds_error(flo, offset, length, status, opnum, + &mirror->stateid, + &mirror->mirror_ds->id_node.deviceid)) { + spin_unlock(&flo->generic_hdr.plh_inode->i_lock); + return 0; + } + spin_unlock(&flo->generic_hdr.plh_inode->i_lock); + dserr = kmalloc(sizeof(*dserr), gfp_flags); + if (!dserr) + return -ENOMEM; + + INIT_LIST_HEAD(&dserr->list); + dserr->offset = offset; + dserr->length = length; + dserr->status = status; + dserr->opnum = opnum; + nfs4_stateid_copy(&dserr->stateid, &mirror->stateid); + memcpy(&dserr->deviceid, &mirror->mirror_ds->id_node.deviceid, + NFS4_DEVICEID4_SIZE); + + spin_lock(&flo->generic_hdr.plh_inode->i_lock); + needfree = ff_layout_add_ds_error_locked(flo, dserr); + spin_unlock(&flo->generic_hdr.plh_inode->i_lock); + if (needfree) + kfree(dserr); + + return 0; +} + +/* currently we only support AUTH_NONE and AUTH_SYS */ +static rpc_authflavor_t +nfs4_ff_layout_choose_authflavor(struct nfs4_ff_layout_mirror *mirror) +{ + if (mirror->uid == (u32)-1) + return RPC_AUTH_NULL; + return RPC_AUTH_UNIX; +} + +/* fetch cred for NFSv3 DS */ +static int ff_layout_update_mirror_cred(struct nfs4_ff_layout_mirror *mirror, + struct nfs4_pnfs_ds *ds) +{ + if (ds->ds_clp && !mirror->cred && + mirror->mirror_ds->ds_versions[0].version == 3) { + struct rpc_auth *auth = ds->ds_clp->cl_rpcclient->cl_auth; + struct rpc_cred *cred; + struct auth_cred acred = { + .uid = make_kuid(&init_user_ns, mirror->uid), + .gid = make_kgid(&init_user_ns, mirror->gid), + }; + + /* AUTH_NULL ignores acred */ + cred = auth->au_ops->lookup_cred(auth, &acred, 0); + if (IS_ERR(cred)) { + dprintk("%s: lookup_cred failed with %ld\n", + __func__, PTR_ERR(cred)); + return PTR_ERR(cred); + } else { + mirror->cred = cred; + } + } + return 0; +} + +struct nfs_fh * +nfs4_ff_layout_select_ds_fh(struct pnfs_layout_segment *lseg, u32 mirror_idx) +{ + struct nfs4_ff_layout_mirror *mirror = FF_LAYOUT_COMP(lseg, mirror_idx); + struct nfs_fh *fh = NULL; + struct nfs4_deviceid_node *devid; + + if (mirror == NULL || mirror->mirror_ds == NULL || + mirror->mirror_ds->ds == NULL) { + printk(KERN_ERR "NFS: %s: No data server for mirror offset index %d\n", + __func__, mirror_idx); + if (mirror && mirror->mirror_ds) { + devid = &mirror->mirror_ds->id_node; + pnfs_generic_mark_devid_invalid(devid); + } + goto out; + } + + /* FIXME: For now assume there is only 1 version available for the DS */ + fh = &mirror->fh_versions[0]; +out: + return fh; +} + +/* Upon return, either ds is connected, or ds is NULL */ +struct nfs4_pnfs_ds * +nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx, + bool fail_return) +{ + struct nfs4_ff_layout_mirror *mirror = FF_LAYOUT_COMP(lseg, ds_idx); + struct nfs4_pnfs_ds *ds = NULL; + struct nfs4_deviceid_node *devid; + struct inode *ino = lseg->pls_layout->plh_inode; + struct nfs_server *s = NFS_SERVER(ino); + unsigned int max_payload; + rpc_authflavor_t flavor; + + if (mirror == NULL || mirror->mirror_ds == NULL || + mirror->mirror_ds->ds == NULL) { + printk(KERN_ERR "NFS: %s: No data server for offset index %d\n", + __func__, ds_idx); + if (mirror && mirror->mirror_ds) { + devid = &mirror->mirror_ds->id_node; + pnfs_generic_mark_devid_invalid(devid); + } + goto out; + } + + devid = &mirror->mirror_ds->id_node; + if (ff_layout_test_devid_unavailable(devid)) + goto out; + + ds = mirror->mirror_ds->ds; + /* matching smp_wmb() in _nfs4_pnfs_v3/4_ds_connect */ + smp_rmb(); + if (ds->ds_clp) + goto out; + + flavor = nfs4_ff_layout_choose_authflavor(mirror); + + /* FIXME: For now we assume the server sent only one version of NFS + * to use for the DS. + */ + nfs4_pnfs_ds_connect(s, ds, devid, dataserver_timeo, + dataserver_retrans, + mirror->mirror_ds->ds_versions[0].version, + mirror->mirror_ds->ds_versions[0].minor_version, + flavor); + + /* connect success, check rsize/wsize limit */ + if (ds->ds_clp) { + max_payload = + nfs_block_size(rpc_max_payload(ds->ds_clp->cl_rpcclient), + NULL); + if (mirror->mirror_ds->ds_versions[0].rsize > max_payload) + mirror->mirror_ds->ds_versions[0].rsize = max_payload; + if (mirror->mirror_ds->ds_versions[0].wsize > max_payload) + mirror->mirror_ds->ds_versions[0].wsize = max_payload; + } else { + ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout), + mirror, lseg->pls_range.offset, + lseg->pls_range.length, NFS4ERR_NXIO, + OP_ILLEGAL, GFP_NOIO); + if (fail_return) { + pnfs_error_mark_layout_for_return(ino, lseg); + if (ff_layout_has_available_ds(lseg)) + pnfs_set_retry_layoutget(lseg->pls_layout); + else + pnfs_clear_retry_layoutget(lseg->pls_layout); + + } else { + if (ff_layout_has_available_ds(lseg)) + set_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE, + &lseg->pls_layout->plh_flags); + else { + pnfs_error_mark_layout_for_return(ino, lseg); + pnfs_clear_retry_layoutget(lseg->pls_layout); + } + } + } + + if (ff_layout_update_mirror_cred(mirror, ds)) + ds = NULL; +out: + return ds; +} + +struct rpc_cred * +ff_layout_get_ds_cred(struct pnfs_layout_segment *lseg, u32 ds_idx, + struct rpc_cred *mdscred) +{ + struct nfs4_ff_layout_mirror *mirror = FF_LAYOUT_COMP(lseg, ds_idx); + struct rpc_cred *cred = ERR_PTR(-EINVAL); + + if (!nfs4_ff_layout_prepare_ds(lseg, ds_idx, true)) + goto out; + + if (mirror && mirror->cred) + cred = mirror->cred; + else + cred = mdscred; +out: + return cred; +} + +/** +* Find or create a DS rpc client with th MDS server rpc client auth flavor +* in the nfs_client cl_ds_clients list. +*/ +struct rpc_clnt * +nfs4_ff_find_or_create_ds_client(struct pnfs_layout_segment *lseg, u32 ds_idx, + struct nfs_client *ds_clp, struct inode *inode) +{ + struct nfs4_ff_layout_mirror *mirror = FF_LAYOUT_COMP(lseg, ds_idx); + + switch (mirror->mirror_ds->ds_versions[0].version) { + case 3: + /* For NFSv3 DS, flavor is set when creating DS connections */ + return ds_clp->cl_rpcclient; + case 4: + return nfs4_find_or_create_ds_client(ds_clp, inode); + default: + BUG(); + } +} + +static bool is_range_intersecting(u64 offset1, u64 length1, + u64 offset2, u64 length2) +{ + u64 end1 = end_offset(offset1, length1); + u64 end2 = end_offset(offset2, length2); + + return (end1 == NFS4_MAX_UINT64 || end1 > offset2) && + (end2 == NFS4_MAX_UINT64 || end2 > offset1); +} + +/* called with inode i_lock held */ +int ff_layout_encode_ds_ioerr(struct nfs4_flexfile_layout *flo, + struct xdr_stream *xdr, int *count, + const struct pnfs_layout_range *range) +{ + struct nfs4_ff_layout_ds_err *err, *n; + __be32 *p; + + list_for_each_entry_safe(err, n, &flo->error_list, list) { + if (!is_range_intersecting(err->offset, err->length, + range->offset, range->length)) + continue; + /* offset(8) + length(8) + stateid(NFS4_STATEID_SIZE) + * + deviceid(NFS4_DEVICEID4_SIZE) + status(4) + opnum(4) + */ + p = xdr_reserve_space(xdr, + 24 + NFS4_STATEID_SIZE + NFS4_DEVICEID4_SIZE); + if (unlikely(!p)) + return -ENOBUFS; + p = xdr_encode_hyper(p, err->offset); + p = xdr_encode_hyper(p, err->length); + p = xdr_encode_opaque_fixed(p, &err->stateid, + NFS4_STATEID_SIZE); + p = xdr_encode_opaque_fixed(p, &err->deviceid, + NFS4_DEVICEID4_SIZE); + *p++ = cpu_to_be32(err->status); + *p++ = cpu_to_be32(err->opnum); + *count += 1; + list_del(&err->list); + kfree(err); + dprintk("%s: offset %llu length %llu status %d op %d count %d\n", + __func__, err->offset, err->length, err->status, + err->opnum, *count); + } + + return 0; +} + +bool ff_layout_has_available_ds(struct pnfs_layout_segment *lseg) +{ + struct nfs4_ff_layout_mirror *mirror; + struct nfs4_deviceid_node *devid; + int idx; + + for (idx = 0; idx < FF_LAYOUT_MIRROR_COUNT(lseg); idx++) { + mirror = FF_LAYOUT_COMP(lseg, idx); + if (mirror && mirror->mirror_ds) { + devid = &mirror->mirror_ds->id_node; + if (!ff_layout_test_devid_unavailable(devid)) + return true; + } + } + + return false; +} + +module_param(dataserver_retrans, uint, 0644); +MODULE_PARM_DESC(dataserver_retrans, "The number of times the NFSv4.1 client " + "retries a request before it attempts further " + " recovery action."); +module_param(dataserver_timeo, uint, 0644); +MODULE_PARM_DESC(dataserver_timeo, "The time (in tenths of a second) the " + "NFSv4.1 client waits for a response from a " + " data server before it retries an NFS request."); diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c index 2f5db844c172..857e2a99acc8 100644 --- a/fs/nfs/idmap.c +++ b/fs/nfs/idmap.c @@ -152,7 +152,7 @@ void nfs_fattr_map_and_free_names(struct nfs_server *server, struct nfs_fattr *f nfs_fattr_free_group_name(fattr); } -static int nfs_map_string_to_numeric(const char *name, size_t namelen, __u32 *res) +int nfs_map_string_to_numeric(const char *name, size_t namelen, __u32 *res) { unsigned long val; char buf[16]; @@ -166,6 +166,7 @@ static int nfs_map_string_to_numeric(const char *name, size_t namelen, __u32 *re *res = val; return 1; } +EXPORT_SYMBOL_GPL(nfs_map_string_to_numeric); static int nfs_map_numeric_to_string(__u32 id, char *buf, size_t buflen) { diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 44c600aac907..ca6dda0f68bb 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -7796,9 +7796,7 @@ static void nfs4_layoutreturn_release(void *calldata) spin_lock(&lo->plh_inode->i_lock); if (lrp->res.lrs_present) pnfs_set_layout_stateid(lo, &lrp->res.stateid, true); - clear_bit_unlock(NFS_LAYOUT_RETURN, &lo->plh_flags); - smp_mb__after_atomic(); - wake_up_bit(&lo->plh_flags, NFS_LAYOUT_RETURN); + pnfs_clear_layoutreturn_waitbit(lo); clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE, &lo->plh_flags); rpc_wake_up(&NFS_SERVER(lo->plh_inode)->roc_rpcwaitq); lo->plh_block_lgets--; diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index c4c9fe606ae6..0fb0f1920a1f 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -910,7 +910,9 @@ send_layoutget(struct pnfs_layout_hdr *lo, pnfs_layout_io_set_failed(lo, range->iomode); } return NULL; - } + } else + pnfs_layout_clear_fail_bit(lo, + pnfs_iomode_to_fail_bit(range->iomode)); return lseg; } @@ -930,6 +932,13 @@ static void pnfs_clear_layoutcommit(struct inode *inode, } } +void pnfs_clear_layoutreturn_waitbit(struct pnfs_layout_hdr *lo) +{ + clear_bit_unlock(NFS_LAYOUT_RETURN, &lo->plh_flags); + smp_mb__after_atomic(); + wake_up_bit(&lo->plh_flags, NFS_LAYOUT_RETURN); +} + static int pnfs_send_layoutreturn(struct pnfs_layout_hdr *lo, nfs4_stateid stateid, enum pnfs_iomode iomode, bool sync) @@ -943,6 +952,7 @@ pnfs_send_layoutreturn(struct pnfs_layout_hdr *lo, nfs4_stateid stateid, status = -ENOMEM; spin_lock(&ino->i_lock); lo->plh_block_lgets--; + pnfs_clear_layoutreturn_waitbit(lo); rpc_wake_up(&NFS_SERVER(ino)->roc_rpcwaitq); spin_unlock(&ino->i_lock); pnfs_put_layout_hdr(lo); @@ -1418,6 +1428,15 @@ static bool pnfs_prepare_to_retry_layoutget(struct pnfs_layout_hdr *lo) TASK_UNINTERRUPTIBLE); } +static void pnfs_clear_first_layoutget(struct pnfs_layout_hdr *lo) +{ + unsigned long *bitlock = &lo->plh_flags; + + clear_bit_unlock(NFS_LAYOUT_FIRST_LAYOUTGET, bitlock); + smp_mb__after_atomic(); + wake_up_bit(bitlock, NFS_LAYOUT_FIRST_LAYOUTGET); +} + /* * Layout segment is retreived from the server if not cached. * The appropriate layout segment is referenced and returned to the caller. @@ -1499,6 +1518,8 @@ lookup_again: spin_unlock(&ino->i_lock); dprintk("%s wait for layoutreturn\n", __func__); if (pnfs_prepare_to_retry_layoutget(lo)) { + if (first) + pnfs_clear_first_layoutget(lo); pnfs_put_layout_hdr(lo); dprintk("%s retrying\n", __func__); goto lookup_again; @@ -1533,13 +1554,8 @@ lookup_again: pnfs_clear_retry_layoutget(lo); atomic_dec(&lo->plh_outstanding); out_put_layout_hdr: - if (first) { - unsigned long *bitlock = &lo->plh_flags; - - clear_bit_unlock(NFS_LAYOUT_FIRST_LAYOUTGET, bitlock); - smp_mb__after_atomic(); - wake_up_bit(bitlock, NFS_LAYOUT_FIRST_LAYOUTGET); - } + if (first) + pnfs_clear_first_layoutget(lo); pnfs_put_layout_hdr(lo); out: dprintk("%s: inode %s/%llu pNFS layout segment %s for " diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h index 49a466708400..7642021484bf 100644 --- a/fs/nfs/pnfs.h +++ b/fs/nfs/pnfs.h @@ -278,6 +278,7 @@ struct pnfs_layout_segment *pnfs_update_layout(struct inode *ino, u64 count, enum pnfs_iomode iomode, gfp_t gfp_flags); +void pnfs_clear_layoutreturn_waitbit(struct pnfs_layout_hdr *lo); void nfs4_deviceid_mark_client_invalid(struct nfs_client *clp); int pnfs_read_done_resend_to_mds(struct nfs_pgio_header *); diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h index 022b761dbf0a..de7c91ca427e 100644 --- a/include/linux/nfs4.h +++ b/include/linux/nfs4.h @@ -516,6 +516,7 @@ enum pnfs_layouttype { LAYOUT_NFSV4_1_FILES = 1, LAYOUT_OSD2_OBJECTS = 2, LAYOUT_BLOCK_VOLUME = 3, + LAYOUT_FLEX_FILES = 4, }; /* used for both layout return and recall */ diff --git a/include/linux/nfs_idmap.h b/include/linux/nfs_idmap.h index 0f4b79da6584..333844e38f66 100644 --- a/include/linux/nfs_idmap.h +++ b/include/linux/nfs_idmap.h @@ -73,5 +73,7 @@ int nfs_map_group_to_gid(const struct nfs_server *, const char *, size_t, kgid_t int nfs_map_uid_to_name(const struct nfs_server *, kuid_t, char *, size_t); int nfs_map_gid_to_group(const struct nfs_server *, kgid_t, char *, size_t); +int nfs_map_string_to_numeric(const char *name, size_t namelen, __u32 *res); + extern unsigned int nfs_idmap_cache_timeout; #endif /* NFS_IDMAP_H */ diff --git a/include/linux/sunrpc/metrics.h b/include/linux/sunrpc/metrics.h index 89f2ca178873..7e61a17030a4 100644 --- a/include/linux/sunrpc/metrics.h +++ b/include/linux/sunrpc/metrics.h @@ -89,6 +89,8 @@ void rpc_free_iostats(struct rpc_iostats *); static inline struct rpc_iostats *rpc_alloc_iostats(struct rpc_clnt *clnt) { return NULL; } static inline void rpc_count_iostats(const struct rpc_task *task, struct rpc_iostats *stats) {} +static inline void rpc_count_iostats_metrics(const struct rpc_task *, + struct rpc_iostats *) {} static inline void rpc_print_iostats(struct seq_file *seq, struct rpc_clnt *clnt) {} static inline void rpc_free_iostats(struct rpc_iostats *stats) {} -- cgit v1.2.3 From cb5d04bc39e914124e811ea55f3034d2379a5f6c Mon Sep 17 00:00:00 2001 From: Peng Tao Date: Sat, 24 Jan 2015 22:14:52 +0800 Subject: nfs41: .init_read and .init_write can be called with valid pg_lseg With pgio refactoring in v3.15, .init_read and .init_write can be called with valid pgio->pg_lseg. file layout was fixed at that time by commit c6194271f (pnfs: filelayout: support non page aligned layouts). But the generic helper still needs to be fixed. Cc: stable@vger.kernel.org # 3.15+ Signed-off-by: Peng Tao --- fs/nfs/pnfs.c | 41 ++++++++++++++++++++--------------------- 1 file changed, 20 insertions(+), 21 deletions(-) (limited to 'fs') diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index 0fb0f1920a1f..c7be9b997f5e 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -1711,19 +1711,19 @@ pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *r { u64 rd_size = req->wb_bytes; - WARN_ON_ONCE(pgio->pg_lseg != NULL); - - if (pgio->pg_dreq == NULL) - rd_size = i_size_read(pgio->pg_inode) - req_offset(req); - else - rd_size = nfs_dreq_bytes_left(pgio->pg_dreq); - - pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode, - req->wb_context, - req_offset(req), - rd_size, - IOMODE_READ, - GFP_KERNEL); + if (pgio->pg_lseg == NULL) { + if (pgio->pg_dreq == NULL) + rd_size = i_size_read(pgio->pg_inode) - req_offset(req); + else + rd_size = nfs_dreq_bytes_left(pgio->pg_dreq); + + pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode, + req->wb_context, + req_offset(req), + rd_size, + IOMODE_READ, + GFP_KERNEL); + } /* If no lseg, fall back to read through mds */ if (pgio->pg_lseg == NULL) nfs_pageio_reset_read_mds(pgio); @@ -1735,14 +1735,13 @@ void pnfs_generic_pg_init_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *req, u64 wb_size) { - WARN_ON_ONCE(pgio->pg_lseg != NULL); - - pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode, - req->wb_context, - req_offset(req), - wb_size, - IOMODE_RW, - GFP_NOFS); + if (pgio->pg_lseg == NULL) + pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode, + req->wb_context, + req_offset(req), + wb_size, + IOMODE_RW, + GFP_NOFS); /* If no lseg, fall back to write through mds */ if (pgio->pg_lseg == NULL) nfs_pageio_reset_write_mds(pgio); -- cgit v1.2.3 From 7c13789e3e6c66dbcaade1760087429240eb3d27 Mon Sep 17 00:00:00 2001 From: Weston Andros Adamson Date: Fri, 30 Jan 2015 11:01:02 -0500 Subject: pnfs: lookup new lseg at lseg boundary Before mirroring support was added, the pageio descriptor's pg_lseg was set to null when an RPC was sent. Because of this, pg_init was called at lseg boundaries with pg_lseg = NULL, and it could be set to the new lseg. Signed-off-by: Weston Andros Adamson --- fs/nfs/pnfs.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index c7be9b997f5e..9304984bde80 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -1788,10 +1788,16 @@ pnfs_generic_pg_test(struct nfs_pageio_descriptor *pgio, seg_end = end_offset(pgio->pg_lseg->pls_range.offset, pgio->pg_lseg->pls_range.length); req_start = req_offset(req); - WARN_ON_ONCE(req_start > seg_end); + WARN_ON_ONCE(req_start >= seg_end); /* start of request is past the last byte of this segment */ - if (req_start >= seg_end) + if (req_start >= seg_end) { + /* reference the new lseg */ + if (pgio->pg_ops->pg_cleanup) + pgio->pg_ops->pg_cleanup(pgio); + if (pgio->pg_ops->pg_init) + pgio->pg_ops->pg_init(pgio, req); return 0; + } /* adjust 'size' iff there are fewer bytes left in the * segment than what nfs_generic_pg_test returned */ -- cgit v1.2.3 From 03a9a42a1a7e5b3e7919ddfacc1d1cc81882a955 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Fri, 30 Jan 2015 18:12:28 -0500 Subject: SUNRPC: NULL utsname dereference on NFS umount during namespace cleanup MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix an Oopsable condition when nsm_mon_unmon is called as part of the namespace cleanup, which now apparently happens after the utsname has been freed. Link: http://lkml.kernel.org/r/20150125220604.090121ae@neptune.home Reported-by: Bruno Prémont Cc: stable@vger.kernel.org # 3.18 Signed-off-by: Trond Myklebust --- fs/lockd/mon.c | 13 +++++++++---- include/linux/sunrpc/clnt.h | 3 ++- net/sunrpc/clnt.c | 12 +++++++----- net/sunrpc/rpcb_clnt.c | 8 ++++++-- 4 files changed, 24 insertions(+), 12 deletions(-) (limited to 'fs') diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c index 1cc6ec51e6b1..47a32b6d9b90 100644 --- a/fs/lockd/mon.c +++ b/fs/lockd/mon.c @@ -65,7 +65,7 @@ static inline struct sockaddr *nsm_addr(const struct nsm_handle *nsm) return (struct sockaddr *)&nsm->sm_addr; } -static struct rpc_clnt *nsm_create(struct net *net) +static struct rpc_clnt *nsm_create(struct net *net, const char *nodename) { struct sockaddr_in sin = { .sin_family = AF_INET, @@ -77,6 +77,7 @@ static struct rpc_clnt *nsm_create(struct net *net) .address = (struct sockaddr *)&sin, .addrsize = sizeof(sin), .servername = "rpc.statd", + .nodename = nodename, .program = &nsm_program, .version = NSM_VERSION, .authflavor = RPC_AUTH_NULL, @@ -102,7 +103,7 @@ out: return clnt; } -static struct rpc_clnt *nsm_client_get(struct net *net) +static struct rpc_clnt *nsm_client_get(struct net *net, const char *nodename) { struct rpc_clnt *clnt, *new; struct lockd_net *ln = net_generic(net, lockd_net_id); @@ -111,7 +112,7 @@ static struct rpc_clnt *nsm_client_get(struct net *net) if (clnt != NULL) goto out; - clnt = new = nsm_create(net); + clnt = new = nsm_create(net, nodename); if (IS_ERR(clnt)) goto out; @@ -190,19 +191,23 @@ int nsm_monitor(const struct nlm_host *host) struct nsm_res res; int status; struct rpc_clnt *clnt; + const char *nodename = NULL; dprintk("lockd: nsm_monitor(%s)\n", nsm->sm_name); if (nsm->sm_monitored) return 0; + if (host->h_rpcclnt) + nodename = host->h_rpcclnt->cl_nodename; + /* * Choose whether to record the caller_name or IP address of * this peer in the local rpc.statd's database. */ nsm->sm_mon_name = nsm_use_hostnames ? nsm->sm_name : nsm->sm_addrbuf; - clnt = nsm_client_get(host->net); + clnt = nsm_client_get(host->net, nodename); if (IS_ERR(clnt)) { status = PTR_ERR(clnt); dprintk("lockd: failed to create NSM upcall transport, " diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h index d86acc63b25f..598ba80ec30c 100644 --- a/include/linux/sunrpc/clnt.h +++ b/include/linux/sunrpc/clnt.h @@ -57,7 +57,7 @@ struct rpc_clnt { const struct rpc_timeout *cl_timeout; /* Timeout strategy */ int cl_nodelen; /* nodename length */ - char cl_nodename[UNX_MAXNODENAME]; + char cl_nodename[UNX_MAXNODENAME+1]; struct rpc_pipe_dir_head cl_pipedir_objects; struct rpc_clnt * cl_parent; /* Points to parent of clones */ struct rpc_rtt cl_rtt_default; @@ -112,6 +112,7 @@ struct rpc_create_args { struct sockaddr *saddress; const struct rpc_timeout *timeout; const char *servername; + const char *nodename; const struct rpc_program *program; u32 prognumber; /* overrides program->number */ u32 version; diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index 05da12a33945..3f5d4d48f0cb 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -286,10 +286,8 @@ static struct rpc_xprt *rpc_clnt_set_transport(struct rpc_clnt *clnt, static void rpc_clnt_set_nodename(struct rpc_clnt *clnt, const char *nodename) { - clnt->cl_nodelen = strlen(nodename); - if (clnt->cl_nodelen > UNX_MAXNODENAME) - clnt->cl_nodelen = UNX_MAXNODENAME; - memcpy(clnt->cl_nodename, nodename, clnt->cl_nodelen); + clnt->cl_nodelen = strlcpy(clnt->cl_nodename, + nodename, sizeof(clnt->cl_nodename)); } static int rpc_client_register(struct rpc_clnt *clnt, @@ -365,6 +363,7 @@ static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, const struct rpc_version *version; struct rpc_clnt *clnt = NULL; const struct rpc_timeout *timeout; + const char *nodename = args->nodename; int err; /* sanity check the name before trying to print it */ @@ -420,8 +419,10 @@ static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, atomic_set(&clnt->cl_count, 1); + if (nodename == NULL) + nodename = utsname()->nodename; /* save the nodename */ - rpc_clnt_set_nodename(clnt, utsname()->nodename); + rpc_clnt_set_nodename(clnt, nodename); err = rpc_client_register(clnt, args->authflavor, args->client_name); if (err) @@ -576,6 +577,7 @@ static struct rpc_clnt *__rpc_clone_client(struct rpc_create_args *args, if (xprt == NULL) goto out_err; args->servername = xprt->servername; + args->nodename = clnt->cl_nodename; new = rpc_new_client(args, xprt, clnt); if (IS_ERR(new)) { diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c index 05202012bcfc..cf5770d8f49a 100644 --- a/net/sunrpc/rpcb_clnt.c +++ b/net/sunrpc/rpcb_clnt.c @@ -355,7 +355,8 @@ out: return result; } -static struct rpc_clnt *rpcb_create(struct net *net, const char *hostname, +static struct rpc_clnt *rpcb_create(struct net *net, const char *nodename, + const char *hostname, struct sockaddr *srvaddr, size_t salen, int proto, u32 version) { @@ -365,6 +366,7 @@ static struct rpc_clnt *rpcb_create(struct net *net, const char *hostname, .address = srvaddr, .addrsize = salen, .servername = hostname, + .nodename = nodename, .program = &rpcb_program, .version = version, .authflavor = RPC_AUTH_UNIX, @@ -740,7 +742,9 @@ void rpcb_getport_async(struct rpc_task *task) dprintk("RPC: %5u %s: trying rpcbind version %u\n", task->tk_pid, __func__, bind_version); - rpcb_clnt = rpcb_create(xprt->xprt_net, xprt->servername, sap, salen, + rpcb_clnt = rpcb_create(xprt->xprt_net, + clnt->cl_nodename, + xprt->servername, sap, salen, xprt->prot, bind_version); if (IS_ERR(rpcb_clnt)) { status = PTR_ERR(rpcb_clnt); -- cgit v1.2.3 From adf305f77878880fa5868a7179979da93be68d83 Mon Sep 17 00:00:00 2001 From: Javi Merino Date: Thu, 15 Jan 2015 16:17:45 +0000 Subject: sysfs: fix warning when creating a sysfs group without attributes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When attempting to create a gropu without attrs, the warning prints the name of the group. However, the check for name being a NULL pointer is wrong: it uses the pointer to the name when it's NULL. Fix it to use the name if present, otherwise just put an empty string. Cc: Bruno Prémont Cc: Greg Kroah-Hartman Signed-off-by: Javi Merino Signed-off-by: Greg Kroah-Hartman --- fs/sysfs/group.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/sysfs/group.c b/fs/sysfs/group.c index 7d2a860ba788..2554d8835b48 100644 --- a/fs/sysfs/group.c +++ b/fs/sysfs/group.c @@ -99,7 +99,7 @@ static int internal_create_group(struct kobject *kobj, int update, return -EINVAL; if (!grp->attrs && !grp->bin_attrs) { WARN(1, "sysfs: (bin_)attrs not set by subsystem for group: %s/%s\n", - kobj->name, grp->name ? "" : grp->name); + kobj->name, grp->name ?: ""); return -EINVAL; } if (grp->name) { -- cgit v1.2.3 From 2e90b1c45e34240eeeacab0b37d5f8f739462bdc Mon Sep 17 00:00:00 2001 From: Al Viro Date: Thu, 27 Nov 2014 21:50:31 -0500 Subject: rxrpc: make the users of rxrpc_kernel_send_data() set kvec-backed msg_iter properly Use iov_iter_kvec() there, get rid of set_fs() games - now that rxrpc_send_data() uses iov_iter primitives, it'll handle ITER_KVEC just fine. Signed-off-by: Al Viro --- fs/afs/rxrpc.c | 14 +++++++------- net/rxrpc/ar-output.c | 3 --- 2 files changed, 7 insertions(+), 10 deletions(-) (limited to 'fs') diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c index 06e14bfb3496..dbc732e9a5c0 100644 --- a/fs/afs/rxrpc.c +++ b/fs/afs/rxrpc.c @@ -306,8 +306,8 @@ static int afs_send_pages(struct afs_call *call, struct msghdr *msg, _debug("- range %u-%u%s", offset, to, msg->msg_flags ? " [more]" : ""); - iov_iter_init(&msg->msg_iter, WRITE, - (struct iovec *) iov, 1, to - offset); + iov_iter_kvec(&msg->msg_iter, WRITE | ITER_KVEC, + iov, 1, to - offset); /* have to change the state *before* sending the last * packet as RxRPC might give us the reply before it @@ -384,7 +384,7 @@ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp, msg.msg_name = NULL; msg.msg_namelen = 0; - iov_iter_init(&msg.msg_iter, WRITE, (struct iovec *)iov, 1, + iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, iov, 1, call->request_size); msg.msg_control = NULL; msg.msg_controllen = 0; @@ -770,7 +770,7 @@ static int afs_deliver_cm_op_id(struct afs_call *call, struct sk_buff *skb, void afs_send_empty_reply(struct afs_call *call) { struct msghdr msg; - struct iovec iov[1]; + struct kvec iov[1]; _enter(""); @@ -778,7 +778,7 @@ void afs_send_empty_reply(struct afs_call *call) iov[0].iov_len = 0; msg.msg_name = NULL; msg.msg_namelen = 0; - iov_iter_init(&msg.msg_iter, WRITE, iov, 0, 0); /* WTF? */ + iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, iov, 0, 0); /* WTF? */ msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_flags = 0; @@ -805,7 +805,7 @@ void afs_send_empty_reply(struct afs_call *call) void afs_send_simple_reply(struct afs_call *call, const void *buf, size_t len) { struct msghdr msg; - struct iovec iov[1]; + struct kvec iov[1]; int n; _enter(""); @@ -814,7 +814,7 @@ void afs_send_simple_reply(struct afs_call *call, const void *buf, size_t len) iov[0].iov_len = len; msg.msg_name = NULL; msg.msg_namelen = 0; - iov_iter_init(&msg.msg_iter, WRITE, iov, 1, len); + iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, iov, 1, len); msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_flags = 0; diff --git a/net/rxrpc/ar-output.c b/net/rxrpc/ar-output.c index 963a5b91f3e8..8331c95e1522 100644 --- a/net/rxrpc/ar-output.c +++ b/net/rxrpc/ar-output.c @@ -232,10 +232,7 @@ int rxrpc_kernel_send_data(struct rxrpc_call *call, struct msghdr *msg, call->state != RXRPC_CALL_SERVER_SEND_REPLY) { ret = -EPROTO; /* request phase complete for this client call */ } else { - mm_segment_t oldfs = get_fs(); - set_fs(KERNEL_DS); ret = rxrpc_send_data(NULL, call->socket, call, msg, len); - set_fs(oldfs); } release_sock(&call->socket->sk); -- cgit v1.2.3 From 6ae373394c4257bad562817aa60464ff7fe8f9c4 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Fri, 30 Jan 2015 14:21:14 -0500 Subject: NFSv4.1: Ask for no delegation on OPEN if using O_DIRECT If we're using NFSv4.1, then we have the ability to let the server know whether or not we believe that returning a delegation as part of our OPEN request would be useful. The feature needs to be used with care, since the client sending the request doesn't necessarily know how other clients are using that file, and how they may be affected by the delegation. For this reason, our initial use of the feature will be to let the server know when the client believes that handing out a delegation would not be useful. The first application for this function is when opening the file using O_DIRECT. Signed-off-by: Trond Myklebust --- fs/nfs/nfs4proc.c | 30 +++++++++++++++++++ fs/nfs/nfs4xdr.c | 79 +++++++++++++++++++++++++++++++++---------------- include/linux/nfs_xdr.h | 2 ++ 3 files changed, 85 insertions(+), 26 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 6e1c9b2d92c5..cd4295d84d54 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -940,6 +940,31 @@ static bool nfs4_clear_cap_atomic_open_v1(struct nfs_server *server, return true; } +static u32 +nfs4_map_atomic_open_share(struct nfs_server *server, + fmode_t fmode, int openflags) +{ + u32 res = 0; + + switch (fmode & (FMODE_READ | FMODE_WRITE)) { + case FMODE_READ: + res = NFS4_SHARE_ACCESS_READ; + break; + case FMODE_WRITE: + res = NFS4_SHARE_ACCESS_WRITE; + break; + case FMODE_READ|FMODE_WRITE: + res = NFS4_SHARE_ACCESS_BOTH; + } + if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1)) + goto out; + /* Want no delegation if we're using O_DIRECT */ + if (openflags & O_DIRECT) + res |= NFS4_SHARE_WANT_NO_DELEG; +out: + return res; +} + static enum open_claim_type4 nfs4_map_atomic_open_claim(struct nfs_server *server, enum open_claim_type4 claim) @@ -1002,6 +1027,8 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry, atomic_inc(&sp->so_count); p->o_arg.open_flags = flags; p->o_arg.fmode = fmode & (FMODE_READ|FMODE_WRITE); + p->o_arg.share_access = nfs4_map_atomic_open_share(server, + fmode, flags); /* don't put an ACCESS op in OPEN compound if O_EXCL, because ACCESS * will return permission denied for all bits until close */ if (!(flags & O_EXCL)) { @@ -2695,6 +2722,9 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data) goto out_wait; } } + calldata->arg.share_access = + nfs4_map_atomic_open_share(NFS_SERVER(inode), + calldata->arg.fmode, 0); nfs_fattr_init(calldata->res.fattr); calldata->timestamp = jiffies; diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index a2329d69502b..e23a0a664e12 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -1351,24 +1351,12 @@ static void encode_lookup(struct xdr_stream *xdr, const struct qstr *name, struc encode_string(xdr, name->len, name->name); } -static void encode_share_access(struct xdr_stream *xdr, fmode_t fmode) +static void encode_share_access(struct xdr_stream *xdr, u32 share_access) { __be32 *p; p = reserve_space(xdr, 8); - switch (fmode & (FMODE_READ|FMODE_WRITE)) { - case FMODE_READ: - *p++ = cpu_to_be32(NFS4_SHARE_ACCESS_READ); - break; - case FMODE_WRITE: - *p++ = cpu_to_be32(NFS4_SHARE_ACCESS_WRITE); - break; - case FMODE_READ|FMODE_WRITE: - *p++ = cpu_to_be32(NFS4_SHARE_ACCESS_BOTH); - break; - default: - *p++ = cpu_to_be32(0); - } + *p++ = cpu_to_be32(share_access); *p = cpu_to_be32(0); /* for linux, share_deny = 0 always */ } @@ -1380,7 +1368,7 @@ static inline void encode_openhdr(struct xdr_stream *xdr, const struct nfs_opena * owner 4 = 32 */ encode_nfs4_seqid(xdr, arg->seqid); - encode_share_access(xdr, arg->fmode); + encode_share_access(xdr, arg->share_access); p = reserve_space(xdr, 36); p = xdr_encode_hyper(p, arg->clientid); *p++ = cpu_to_be32(24); @@ -1535,7 +1523,7 @@ static void encode_open_downgrade(struct xdr_stream *xdr, const struct nfs_close encode_op_hdr(xdr, OP_OPEN_DOWNGRADE, decode_open_downgrade_maxsz, hdr); encode_nfs4_stateid(xdr, &arg->stateid); encode_nfs4_seqid(xdr, arg->seqid); - encode_share_access(xdr, arg->fmode); + encode_share_access(xdr, arg->share_access); } static void @@ -4935,20 +4923,13 @@ out_overflow: return -EIO; } -static int decode_delegation(struct xdr_stream *xdr, struct nfs_openres *res) +static int decode_rw_delegation(struct xdr_stream *xdr, + uint32_t delegation_type, + struct nfs_openres *res) { __be32 *p; - uint32_t delegation_type; int status; - p = xdr_inline_decode(xdr, 4); - if (unlikely(!p)) - goto out_overflow; - delegation_type = be32_to_cpup(p); - if (delegation_type == NFS4_OPEN_DELEGATE_NONE) { - res->delegation_type = 0; - return 0; - } status = decode_stateid(xdr, &res->delegation); if (unlikely(status)) return status; @@ -4972,6 +4953,52 @@ out_overflow: return -EIO; } +static int decode_no_delegation(struct xdr_stream *xdr, struct nfs_openres *res) +{ + __be32 *p; + uint32_t why_no_delegation; + + p = xdr_inline_decode(xdr, 4); + if (unlikely(!p)) + goto out_overflow; + why_no_delegation = be32_to_cpup(p); + switch (why_no_delegation) { + case WND4_CONTENTION: + case WND4_RESOURCE: + xdr_inline_decode(xdr, 4); + /* Ignore for now */ + } + return 0; +out_overflow: + print_overflow_msg(__func__, xdr); + return -EIO; +} + +static int decode_delegation(struct xdr_stream *xdr, struct nfs_openres *res) +{ + __be32 *p; + uint32_t delegation_type; + + p = xdr_inline_decode(xdr, 4); + if (unlikely(!p)) + goto out_overflow; + delegation_type = be32_to_cpup(p); + res->delegation_type = 0; + switch (delegation_type) { + case NFS4_OPEN_DELEGATE_NONE: + return 0; + case NFS4_OPEN_DELEGATE_READ: + case NFS4_OPEN_DELEGATE_WRITE: + return decode_rw_delegation(xdr, delegation_type, res); + case NFS4_OPEN_DELEGATE_NONE_EXT: + return decode_no_delegation(xdr, res); + } + return -EIO; +out_overflow: + print_overflow_msg(__func__, xdr); + return -EIO; +} + static int decode_open(struct xdr_stream *xdr, struct nfs_openres *res) { __be32 *p; diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 81401125ab2d..2c35e2affa6f 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -326,6 +326,7 @@ struct nfs_openargs { struct nfs_seqid * seqid; int open_flags; fmode_t fmode; + u32 share_access; u32 access; __u64 clientid; struct stateowner_id id; @@ -393,6 +394,7 @@ struct nfs_closeargs { nfs4_stateid stateid; struct nfs_seqid * seqid; fmode_t fmode; + u32 share_access; const u32 * bitmask; }; -- cgit v1.2.3 From 0ae45f63d4ef8d8eeec49c7d8b44a1775fff13e8 Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Mon, 2 Feb 2015 00:37:00 -0500 Subject: vfs: add support for a lazytime mount option Add a new mount option which enables a new "lazytime" mode. This mode causes atime, mtime, and ctime updates to only be made to the in-memory version of the inode. The on-disk times will only get updated when (a) if the inode needs to be updated for some non-time related change, (b) if userspace calls fsync(), syncfs() or sync(), or (c) just before an undeleted inode is evicted from memory. This is OK according to POSIX because there are no guarantees after a crash unless userspace explicitly requests via a fsync(2) call. For workloads which feature a large number of random write to a preallocated file, the lazytime mount option significantly reduces writes to the inode table. The repeated 4k writes to a single block will result in undesirable stress on flash devices and SMR disk drives. Even on conventional HDD's, the repeated writes to the inode table block will trigger Adjacent Track Interference (ATI) remediation latencies, which very negatively impact long tail latencies --- which is a very big deal for web serving tiers (for example). Google-Bug-Id: 18297052 Signed-off-by: Theodore Ts'o Signed-off-by: Al Viro --- fs/ext4/inode.c | 6 ++++ fs/fs-writeback.c | 62 +++++++++++++++++++++++++++++++++------- fs/gfs2/file.c | 4 +-- fs/inode.c | 56 +++++++++++++++++++++++++----------- fs/jfs/file.c | 2 +- fs/libfs.c | 2 +- fs/proc_namespace.c | 1 + fs/sync.c | 8 ++++++ include/linux/backing-dev.h | 1 + include/linux/fs.h | 5 ++++ include/trace/events/writeback.h | 60 +++++++++++++++++++++++++++++++++++++- include/uapi/linux/fs.h | 4 ++- mm/backing-dev.c | 10 +++++-- 13 files changed, 186 insertions(+), 35 deletions(-) (limited to 'fs') diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 5653fa42930b..628df5ba44a6 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -4840,11 +4840,17 @@ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode) * If the inode is marked synchronous, we don't honour that here - doing * so would cause a commit on atime updates, which we don't bother doing. * We handle synchronous inodes at the highest possible level. + * + * If only the I_DIRTY_TIME flag is set, we can skip everything. If + * I_DIRTY_TIME and I_DIRTY_SYNC is set, the only inode fields we need + * to copy into the on-disk inode structure are the timestamp files. */ void ext4_dirty_inode(struct inode *inode, int flags) { handle_t *handle; + if (flags == I_DIRTY_TIME) + return; handle = ext4_journal_start(inode, EXT4_HT_INODE, 2); if (IS_ERR(handle)) goto out; diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 2d609a5fbfea..004686191354 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -247,14 +247,19 @@ static bool inode_dirtied_after(struct inode *inode, unsigned long t) return ret; } +#define EXPIRE_DIRTY_ATIME 0x0001 + /* * Move expired (dirtied before work->older_than_this) dirty inodes from * @delaying_queue to @dispatch_queue. */ static int move_expired_inodes(struct list_head *delaying_queue, struct list_head *dispatch_queue, + int flags, struct wb_writeback_work *work) { + unsigned long *older_than_this = NULL; + unsigned long expire_time; LIST_HEAD(tmp); struct list_head *pos, *node; struct super_block *sb = NULL; @@ -262,13 +267,21 @@ static int move_expired_inodes(struct list_head *delaying_queue, int do_sb_sort = 0; int moved = 0; + if ((flags & EXPIRE_DIRTY_ATIME) == 0) + older_than_this = work->older_than_this; + else if ((work->reason == WB_REASON_SYNC) == 0) { + expire_time = jiffies - (HZ * 86400); + older_than_this = &expire_time; + } while (!list_empty(delaying_queue)) { inode = wb_inode(delaying_queue->prev); - if (work->older_than_this && - inode_dirtied_after(inode, *work->older_than_this)) + if (older_than_this && + inode_dirtied_after(inode, *older_than_this)) break; list_move(&inode->i_wb_list, &tmp); moved++; + if (flags & EXPIRE_DIRTY_ATIME) + set_bit(__I_DIRTY_TIME_EXPIRED, &inode->i_state); if (sb_is_blkdev_sb(inode->i_sb)) continue; if (sb && sb != inode->i_sb) @@ -309,9 +322,12 @@ out: static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work) { int moved; + assert_spin_locked(&wb->list_lock); list_splice_init(&wb->b_more_io, &wb->b_io); - moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, work); + moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, 0, work); + moved += move_expired_inodes(&wb->b_dirty_time, &wb->b_io, + EXPIRE_DIRTY_ATIME, work); trace_writeback_queue_io(wb, work, moved); } @@ -435,6 +451,8 @@ static void requeue_inode(struct inode *inode, struct bdi_writeback *wb, * updates after data IO completion. */ redirty_tail(inode, wb); + } else if (inode->i_state & I_DIRTY_TIME) { + list_move(&inode->i_wb_list, &wb->b_dirty_time); } else { /* The inode is clean. Remove from writeback lists. */ list_del_init(&inode->i_wb_list); @@ -481,7 +499,13 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc) spin_lock(&inode->i_lock); dirty = inode->i_state & I_DIRTY; - inode->i_state &= ~I_DIRTY; + if (((dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) && + (inode->i_state & I_DIRTY_TIME)) || + (inode->i_state & I_DIRTY_TIME_EXPIRED)) { + dirty |= I_DIRTY_TIME | I_DIRTY_TIME_EXPIRED; + trace_writeback_lazytime(inode); + } + inode->i_state &= ~dirty; /* * Paired with smp_mb() in __mark_inode_dirty(). This allows @@ -501,8 +525,10 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc) spin_unlock(&inode->i_lock); + if (dirty & I_DIRTY_TIME) + mark_inode_dirty_sync(inode); /* Don't write the inode if only I_DIRTY_PAGES was set */ - if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) { + if (dirty & ~I_DIRTY_PAGES) { int err = write_inode(inode, wbc); if (ret == 0) ret = err; @@ -550,7 +576,7 @@ writeback_single_inode(struct inode *inode, struct bdi_writeback *wb, * make sure inode is on some writeback list and leave it there unless * we have completely cleaned the inode. */ - if (!(inode->i_state & I_DIRTY) && + if (!(inode->i_state & I_DIRTY_ALL) && (wbc->sync_mode != WB_SYNC_ALL || !mapping_tagged(inode->i_mapping, PAGECACHE_TAG_WRITEBACK))) goto out; @@ -565,7 +591,7 @@ writeback_single_inode(struct inode *inode, struct bdi_writeback *wb, * If inode is clean, remove it from writeback lists. Otherwise don't * touch it. See comment above for explanation. */ - if (!(inode->i_state & I_DIRTY)) + if (!(inode->i_state & I_DIRTY_ALL)) list_del_init(&inode->i_wb_list); spin_unlock(&wb->list_lock); inode_sync_complete(inode); @@ -707,7 +733,7 @@ static long writeback_sb_inodes(struct super_block *sb, wrote += write_chunk - wbc.nr_to_write; spin_lock(&wb->list_lock); spin_lock(&inode->i_lock); - if (!(inode->i_state & I_DIRTY)) + if (!(inode->i_state & I_DIRTY_ALL)) wrote++; requeue_inode(inode, wb, &wbc); inode_sync_complete(inode); @@ -1145,16 +1171,20 @@ static noinline void block_dump___mark_inode_dirty(struct inode *inode) * page->mapping->host, so the page-dirtying time is recorded in the internal * blockdev inode. */ +#define I_DIRTY_INODE (I_DIRTY_SYNC | I_DIRTY_DATASYNC) void __mark_inode_dirty(struct inode *inode, int flags) { struct super_block *sb = inode->i_sb; struct backing_dev_info *bdi = NULL; + int dirtytime; + + trace_writeback_mark_inode_dirty(inode, flags); /* * Don't do this for I_DIRTY_PAGES - that doesn't actually * dirty the inode itself */ - if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) { + if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC | I_DIRTY_TIME)) { trace_writeback_dirty_inode_start(inode, flags); if (sb->s_op->dirty_inode) @@ -1162,6 +1192,9 @@ void __mark_inode_dirty(struct inode *inode, int flags) trace_writeback_dirty_inode(inode, flags); } + if (flags & I_DIRTY_INODE) + flags &= ~I_DIRTY_TIME; + dirtytime = flags & I_DIRTY_TIME; /* * Paired with smp_mb() in __writeback_single_inode() for the @@ -1169,16 +1202,21 @@ void __mark_inode_dirty(struct inode *inode, int flags) */ smp_mb(); - if ((inode->i_state & flags) == flags) + if (((inode->i_state & flags) == flags) || + (dirtytime && (inode->i_state & I_DIRTY_INODE))) return; if (unlikely(block_dump)) block_dump___mark_inode_dirty(inode); spin_lock(&inode->i_lock); + if (dirtytime && (inode->i_state & I_DIRTY_INODE)) + goto out_unlock_inode; if ((inode->i_state & flags) != flags) { const int was_dirty = inode->i_state & I_DIRTY; + if (flags & I_DIRTY_INODE) + inode->i_state &= ~I_DIRTY_TIME; inode->i_state |= flags; /* @@ -1225,8 +1263,10 @@ void __mark_inode_dirty(struct inode *inode, int flags) } inode->dirtied_when = jiffies; - list_move(&inode->i_wb_list, &bdi->wb.b_dirty); + list_move(&inode->i_wb_list, dirtytime ? + &bdi->wb.b_dirty_time : &bdi->wb.b_dirty); spin_unlock(&bdi->wb.list_lock); + trace_writeback_dirty_inode_enqueue(inode); if (wakeup_bdi) bdi_wakeup_thread_delayed(bdi); diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c index 6e600abf694a..15c44cf457cc 100644 --- a/fs/gfs2/file.c +++ b/fs/gfs2/file.c @@ -655,7 +655,7 @@ static int gfs2_fsync(struct file *file, loff_t start, loff_t end, { struct address_space *mapping = file->f_mapping; struct inode *inode = mapping->host; - int sync_state = inode->i_state & I_DIRTY; + int sync_state = inode->i_state & I_DIRTY_ALL; struct gfs2_inode *ip = GFS2_I(inode); int ret = 0, ret1 = 0; @@ -668,7 +668,7 @@ static int gfs2_fsync(struct file *file, loff_t start, loff_t end, if (!gfs2_is_jdata(ip)) sync_state &= ~I_DIRTY_PAGES; if (datasync) - sync_state &= ~I_DIRTY_SYNC; + sync_state &= ~(I_DIRTY_SYNC | I_DIRTY_TIME); if (sync_state) { ret = sync_inode_metadata(inode, 1); diff --git a/fs/inode.c b/fs/inode.c index aa149e7262ac..4feb85cc125f 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -18,6 +18,7 @@ #include /* for inode_has_buffers */ #include #include +#include #include "internal.h" /* @@ -30,7 +31,7 @@ * inode_sb_list_lock protects: * sb->s_inodes, inode->i_sb_list * bdi->wb.list_lock protects: - * bdi->wb.b_{dirty,io,more_io}, inode->i_wb_list + * bdi->wb.b_{dirty,io,more_io,dirty_time}, inode->i_wb_list * inode_hash_lock protects: * inode_hashtable, inode->i_hash * @@ -416,7 +417,8 @@ static void inode_lru_list_add(struct inode *inode) */ void inode_add_lru(struct inode *inode) { - if (!(inode->i_state & (I_DIRTY | I_SYNC | I_FREEING | I_WILL_FREE)) && + if (!(inode->i_state & (I_DIRTY_ALL | I_SYNC | + I_FREEING | I_WILL_FREE)) && !atomic_read(&inode->i_count) && inode->i_sb->s_flags & MS_ACTIVE) inode_lru_list_add(inode); } @@ -647,7 +649,7 @@ int invalidate_inodes(struct super_block *sb, bool kill_dirty) spin_unlock(&inode->i_lock); continue; } - if (inode->i_state & I_DIRTY && !kill_dirty) { + if (inode->i_state & I_DIRTY_ALL && !kill_dirty) { spin_unlock(&inode->i_lock); busy = 1; continue; @@ -1432,11 +1434,20 @@ static void iput_final(struct inode *inode) */ void iput(struct inode *inode) { - if (inode) { - BUG_ON(inode->i_state & I_CLEAR); - - if (atomic_dec_and_lock(&inode->i_count, &inode->i_lock)) - iput_final(inode); + if (!inode) + return; + BUG_ON(inode->i_state & I_CLEAR); +retry: + if (atomic_dec_and_lock(&inode->i_count, &inode->i_lock)) { + if (inode->i_nlink && (inode->i_state & I_DIRTY_TIME)) { + atomic_inc(&inode->i_count); + inode->i_state &= ~I_DIRTY_TIME; + spin_unlock(&inode->i_lock); + trace_writeback_lazytime_iput(inode); + mark_inode_dirty_sync(inode); + goto retry; + } + iput_final(inode); } } EXPORT_SYMBOL(iput); @@ -1495,14 +1506,9 @@ static int relatime_need_update(struct vfsmount *mnt, struct inode *inode, return 0; } -/* - * This does the actual work of updating an inodes time or version. Must have - * had called mnt_want_write() before calling this. - */ -static int update_time(struct inode *inode, struct timespec *time, int flags) +int generic_update_time(struct inode *inode, struct timespec *time, int flags) { - if (inode->i_op->update_time) - return inode->i_op->update_time(inode, time, flags); + int iflags = I_DIRTY_TIME; if (flags & S_ATIME) inode->i_atime = *time; @@ -1512,9 +1518,27 @@ static int update_time(struct inode *inode, struct timespec *time, int flags) inode->i_ctime = *time; if (flags & S_MTIME) inode->i_mtime = *time; - mark_inode_dirty_sync(inode); + + if (!(inode->i_sb->s_flags & MS_LAZYTIME) || (flags & S_VERSION)) + iflags |= I_DIRTY_SYNC; + __mark_inode_dirty(inode, iflags); return 0; } +EXPORT_SYMBOL(generic_update_time); + +/* + * This does the actual work of updating an inodes time or version. Must have + * had called mnt_want_write() before calling this. + */ +static int update_time(struct inode *inode, struct timespec *time, int flags) +{ + int (*update_time)(struct inode *, struct timespec *, int); + + update_time = inode->i_op->update_time ? inode->i_op->update_time : + generic_update_time; + + return update_time(inode, time, flags); +} /** * touch_atime - update the access time diff --git a/fs/jfs/file.c b/fs/jfs/file.c index 33aa0cc1f8b8..10815f8dfd8b 100644 --- a/fs/jfs/file.c +++ b/fs/jfs/file.c @@ -39,7 +39,7 @@ int jfs_fsync(struct file *file, loff_t start, loff_t end, int datasync) return rc; mutex_lock(&inode->i_mutex); - if (!(inode->i_state & I_DIRTY) || + if (!(inode->i_state & I_DIRTY_ALL) || (datasync && !(inode->i_state & I_DIRTY_DATASYNC))) { /* Make sure committed changes hit the disk */ jfs_flush_journal(JFS_SBI(inode->i_sb)->log, 1); diff --git a/fs/libfs.c b/fs/libfs.c index 005843ce5dbd..b2ffdb045be4 100644 --- a/fs/libfs.c +++ b/fs/libfs.c @@ -948,7 +948,7 @@ int __generic_file_fsync(struct file *file, loff_t start, loff_t end, mutex_lock(&inode->i_mutex); ret = sync_mapping_buffers(inode->i_mapping); - if (!(inode->i_state & I_DIRTY)) + if (!(inode->i_state & I_DIRTY_ALL)) goto out; if (datasync && !(inode->i_state & I_DIRTY_DATASYNC)) goto out; diff --git a/fs/proc_namespace.c b/fs/proc_namespace.c index 0f96f71ab32b..8db932da4009 100644 --- a/fs/proc_namespace.c +++ b/fs/proc_namespace.c @@ -44,6 +44,7 @@ static int show_sb_opts(struct seq_file *m, struct super_block *sb) { MS_SYNCHRONOUS, ",sync" }, { MS_DIRSYNC, ",dirsync" }, { MS_MANDLOCK, ",mand" }, + { MS_LAZYTIME, ",lazytime" }, { 0, NULL } }; const struct proc_fs_info *fs_infop; diff --git a/fs/sync.c b/fs/sync.c index 01d9f18a70b5..fbc98ee62044 100644 --- a/fs/sync.c +++ b/fs/sync.c @@ -177,8 +177,16 @@ SYSCALL_DEFINE1(syncfs, int, fd) */ int vfs_fsync_range(struct file *file, loff_t start, loff_t end, int datasync) { + struct inode *inode = file->f_mapping->host; + if (!file->f_op->fsync) return -EINVAL; + if (!datasync && (inode->i_state & I_DIRTY_TIME)) { + spin_lock(&inode->i_lock); + inode->i_state &= ~I_DIRTY_TIME; + spin_unlock(&inode->i_lock); + mark_inode_dirty_sync(inode); + } return file->f_op->fsync(file, start, end, datasync); } EXPORT_SYMBOL(vfs_fsync_range); diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index 5da6012b7a14..4cdf7336f64a 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h @@ -55,6 +55,7 @@ struct bdi_writeback { struct list_head b_dirty; /* dirty inodes */ struct list_head b_io; /* parked for writeback */ struct list_head b_more_io; /* parked for more writeback */ + struct list_head b_dirty_time; /* time stamps are dirty */ spinlock_t list_lock; /* protects the b_* lists */ }; diff --git a/include/linux/fs.h b/include/linux/fs.h index 42efe13077b6..cd027ce2c705 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1746,8 +1746,12 @@ struct super_operations { #define __I_DIO_WAKEUP 9 #define I_DIO_WAKEUP (1 << I_DIO_WAKEUP) #define I_LINKABLE (1 << 10) +#define I_DIRTY_TIME (1 << 11) +#define __I_DIRTY_TIME_EXPIRED 12 +#define I_DIRTY_TIME_EXPIRED (1 << __I_DIRTY_TIME_EXPIRED) #define I_DIRTY (I_DIRTY_SYNC | I_DIRTY_DATASYNC | I_DIRTY_PAGES) +#define I_DIRTY_ALL (I_DIRTY | I_DIRTY_TIME) extern void __mark_inode_dirty(struct inode *, int); static inline void mark_inode_dirty(struct inode *inode) @@ -1910,6 +1914,7 @@ extern int current_umask(void); extern void ihold(struct inode * inode); extern void iput(struct inode *); +extern int generic_update_time(struct inode *, struct timespec *, int); static inline struct inode *file_inode(const struct file *f) { diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h index cee02d65ab3f..5ecb4c234625 100644 --- a/include/trace/events/writeback.h +++ b/include/trace/events/writeback.h @@ -18,6 +18,8 @@ {I_FREEING, "I_FREEING"}, \ {I_CLEAR, "I_CLEAR"}, \ {I_SYNC, "I_SYNC"}, \ + {I_DIRTY_TIME, "I_DIRTY_TIME"}, \ + {I_DIRTY_TIME_EXPIRED, "I_DIRTY_TIME_EXPIRED"}, \ {I_REFERENCED, "I_REFERENCED"} \ ) @@ -68,6 +70,7 @@ DECLARE_EVENT_CLASS(writeback_dirty_inode_template, TP_STRUCT__entry ( __array(char, name, 32) __field(unsigned long, ino) + __field(unsigned long, state) __field(unsigned long, flags) ), @@ -78,16 +81,25 @@ DECLARE_EVENT_CLASS(writeback_dirty_inode_template, strncpy(__entry->name, bdi->dev ? dev_name(bdi->dev) : "(unknown)", 32); __entry->ino = inode->i_ino; + __entry->state = inode->i_state; __entry->flags = flags; ), - TP_printk("bdi %s: ino=%lu flags=%s", + TP_printk("bdi %s: ino=%lu state=%s flags=%s", __entry->name, __entry->ino, + show_inode_state(__entry->state), show_inode_state(__entry->flags) ) ); +DEFINE_EVENT(writeback_dirty_inode_template, writeback_mark_inode_dirty, + + TP_PROTO(struct inode *inode, int flags), + + TP_ARGS(inode, flags) +); + DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode_start, TP_PROTO(struct inode *inode, int flags), @@ -598,6 +610,52 @@ DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode, TP_ARGS(inode, wbc, nr_to_write) ); +DECLARE_EVENT_CLASS(writeback_lazytime_template, + TP_PROTO(struct inode *inode), + + TP_ARGS(inode), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field(unsigned long, ino ) + __field(unsigned long, state ) + __field( __u16, mode ) + __field(unsigned long, dirtied_when ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->state = inode->i_state; + __entry->mode = inode->i_mode; + __entry->dirtied_when = inode->dirtied_when; + ), + + TP_printk("dev %d,%d ino %lu dirtied %lu state %s mode 0%o", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->ino, __entry->dirtied_when, + show_inode_state(__entry->state), __entry->mode) +); + +DEFINE_EVENT(writeback_lazytime_template, writeback_lazytime, + TP_PROTO(struct inode *inode), + + TP_ARGS(inode) +); + +DEFINE_EVENT(writeback_lazytime_template, writeback_lazytime_iput, + TP_PROTO(struct inode *inode), + + TP_ARGS(inode) +); + +DEFINE_EVENT(writeback_lazytime_template, writeback_dirty_inode_enqueue, + + TP_PROTO(struct inode *inode), + + TP_ARGS(inode) +); + #endif /* _TRACE_WRITEBACK_H */ /* This part must be outside protection */ diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h index 3735fa0a6784..9b964a5920af 100644 --- a/include/uapi/linux/fs.h +++ b/include/uapi/linux/fs.h @@ -90,6 +90,7 @@ struct inodes_stat_t { #define MS_KERNMOUNT (1<<22) /* this is a kern_mount call */ #define MS_I_VERSION (1<<23) /* Update inode I_version field */ #define MS_STRICTATIME (1<<24) /* Always perform atime updates */ +#define MS_LAZYTIME (1<<25) /* Update the on-disk [acm]times lazily */ /* These sb flags are internal to the kernel */ #define MS_NOSEC (1<<28) @@ -100,7 +101,8 @@ struct inodes_stat_t { /* * Superblock flags that can be altered by MS_REMOUNT */ -#define MS_RMT_MASK (MS_RDONLY|MS_SYNCHRONOUS|MS_MANDLOCK|MS_I_VERSION) +#define MS_RMT_MASK (MS_RDONLY|MS_SYNCHRONOUS|MS_MANDLOCK|MS_I_VERSION|\ + MS_LAZYTIME) /* * Old magic mount flag and mask diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 0ae0df55000b..915feea94c66 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -69,10 +69,10 @@ static int bdi_debug_stats_show(struct seq_file *m, void *v) unsigned long background_thresh; unsigned long dirty_thresh; unsigned long bdi_thresh; - unsigned long nr_dirty, nr_io, nr_more_io; + unsigned long nr_dirty, nr_io, nr_more_io, nr_dirty_time; struct inode *inode; - nr_dirty = nr_io = nr_more_io = 0; + nr_dirty = nr_io = nr_more_io = nr_dirty_time = 0; spin_lock(&wb->list_lock); list_for_each_entry(inode, &wb->b_dirty, i_wb_list) nr_dirty++; @@ -80,6 +80,9 @@ static int bdi_debug_stats_show(struct seq_file *m, void *v) nr_io++; list_for_each_entry(inode, &wb->b_more_io, i_wb_list) nr_more_io++; + list_for_each_entry(inode, &wb->b_dirty_time, i_wb_list) + if (inode->i_state & I_DIRTY_TIME) + nr_dirty_time++; spin_unlock(&wb->list_lock); global_dirty_limits(&background_thresh, &dirty_thresh); @@ -98,6 +101,7 @@ static int bdi_debug_stats_show(struct seq_file *m, void *v) "b_dirty: %10lu\n" "b_io: %10lu\n" "b_more_io: %10lu\n" + "b_dirty_time: %10lu\n" "bdi_list: %10u\n" "state: %10lx\n", (unsigned long) K(bdi_stat(bdi, BDI_WRITEBACK)), @@ -111,6 +115,7 @@ static int bdi_debug_stats_show(struct seq_file *m, void *v) nr_dirty, nr_io, nr_more_io, + nr_dirty_time, !list_empty(&bdi->bdi_list), bdi->state); #undef K @@ -418,6 +423,7 @@ static void bdi_wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi) INIT_LIST_HEAD(&wb->b_dirty); INIT_LIST_HEAD(&wb->b_io); INIT_LIST_HEAD(&wb->b_more_io); + INIT_LIST_HEAD(&wb->b_dirty_time); spin_lock_init(&wb->list_lock); INIT_DELAYED_WORK(&wb->dwork, bdi_writeback_workfn); } -- cgit v1.2.3 From fe032c422c5ba562ba9c2d316f55e258e03259c6 Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Mon, 2 Feb 2015 00:37:01 -0500 Subject: vfs: add find_inode_nowait() function Add a new function find_inode_nowait() which is an even more general version of ilookup5_nowait(). It is designed for callers which need very fine grained control over when the function is allowed to block or increment the inode's reference count. Signed-off-by: Theodore Ts'o Signed-off-by: Al Viro --- fs/inode.c | 50 ++++++++++++++++++++++++++++++++++++++++++++++++++ include/linux/fs.h | 5 +++++ 2 files changed, 55 insertions(+) (limited to 'fs') diff --git a/fs/inode.c b/fs/inode.c index 4feb85cc125f..740cba79c2b9 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -1284,6 +1284,56 @@ struct inode *ilookup(struct super_block *sb, unsigned long ino) } EXPORT_SYMBOL(ilookup); +/** + * find_inode_nowait - find an inode in the inode cache + * @sb: super block of file system to search + * @hashval: hash value (usually inode number) to search for + * @match: callback used for comparisons between inodes + * @data: opaque data pointer to pass to @match + * + * Search for the inode specified by @hashval and @data in the inode + * cache, where the helper function @match will return 0 if the inode + * does not match, 1 if the inode does match, and -1 if the search + * should be stopped. The @match function must be responsible for + * taking the i_lock spin_lock and checking i_state for an inode being + * freed or being initialized, and incrementing the reference count + * before returning 1. It also must not sleep, since it is called with + * the inode_hash_lock spinlock held. + * + * This is a even more generalized version of ilookup5() when the + * function must never block --- find_inode() can block in + * __wait_on_freeing_inode() --- or when the caller can not increment + * the reference count because the resulting iput() might cause an + * inode eviction. The tradeoff is that the @match funtion must be + * very carefully implemented. + */ +struct inode *find_inode_nowait(struct super_block *sb, + unsigned long hashval, + int (*match)(struct inode *, unsigned long, + void *), + void *data) +{ + struct hlist_head *head = inode_hashtable + hash(sb, hashval); + struct inode *inode, *ret_inode = NULL; + int mval; + + spin_lock(&inode_hash_lock); + hlist_for_each_entry(inode, head, i_hash) { + if (inode->i_sb != sb) + continue; + mval = match(inode, hashval, data); + if (mval == 0) + continue; + if (mval == 1) + ret_inode = inode; + goto out; + } +out: + spin_unlock(&inode_hash_lock); + return ret_inode; +} +EXPORT_SYMBOL(find_inode_nowait); + int insert_inode_locked(struct inode *inode) { struct super_block *sb = inode->i_sb; diff --git a/include/linux/fs.h b/include/linux/fs.h index cd027ce2c705..5ea8b6e46a3d 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2441,6 +2441,11 @@ extern struct inode *ilookup(struct super_block *sb, unsigned long ino); extern struct inode * iget5_locked(struct super_block *, unsigned long, int (*test)(struct inode *, void *), int (*set)(struct inode *, void *), void *); extern struct inode * iget_locked(struct super_block *, unsigned long); +extern struct inode *find_inode_nowait(struct super_block *, + unsigned long, + int (*match)(struct inode *, + unsigned long, void *), + void *data); extern int insert_inode_locked4(struct inode *, unsigned long, int (*test)(struct inode *, void *), void *); extern int insert_inode_locked(struct inode *); #ifdef CONFIG_DEBUG_LOCK_ALLOC -- cgit v1.2.3 From a26f49926da938f47561f386be56a83dd37a496d Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Mon, 2 Feb 2015 00:37:02 -0500 Subject: ext4: add optimization for the lazytime mount option Add an optimization for the MS_LAZYTIME mount option so that we will opportunistically write out any inodes with the I_DIRTY_TIME flag set in a particular inode table block when we need to update some inode in that inode table block anyway. Also add some temporary code so that we can set the lazytime mount option without needing a modified /sbin/mount program which can set MS_LAZYTIME. We can eventually make this go away once util-linux has added support. Google-Bug-Id: 18297052 Signed-off-by: Theodore Ts'o Signed-off-by: Al Viro --- fs/ext4/inode.c | 64 +++++++++++++++++++++++++++++++++++++++++++-- fs/ext4/super.c | 10 +++++++ include/trace/events/ext4.h | 30 +++++++++++++++++++++ 3 files changed, 102 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 628df5ba44a6..9193ea130dcb 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -4139,6 +4139,65 @@ static int ext4_inode_blocks_set(handle_t *handle, return 0; } +struct other_inode { + unsigned long orig_ino; + struct ext4_inode *raw_inode; +}; + +static int other_inode_match(struct inode * inode, unsigned long ino, + void *data) +{ + struct other_inode *oi = (struct other_inode *) data; + + if ((inode->i_ino != ino) || + (inode->i_state & (I_FREEING | I_WILL_FREE | I_NEW | + I_DIRTY_SYNC | I_DIRTY_DATASYNC)) || + ((inode->i_state & I_DIRTY_TIME) == 0)) + return 0; + spin_lock(&inode->i_lock); + if (((inode->i_state & (I_FREEING | I_WILL_FREE | I_NEW | + I_DIRTY_SYNC | I_DIRTY_DATASYNC)) == 0) && + (inode->i_state & I_DIRTY_TIME)) { + struct ext4_inode_info *ei = EXT4_I(inode); + + inode->i_state &= ~(I_DIRTY_TIME | I_DIRTY_TIME_EXPIRED); + spin_unlock(&inode->i_lock); + + spin_lock(&ei->i_raw_lock); + EXT4_INODE_SET_XTIME(i_ctime, inode, oi->raw_inode); + EXT4_INODE_SET_XTIME(i_mtime, inode, oi->raw_inode); + EXT4_INODE_SET_XTIME(i_atime, inode, oi->raw_inode); + ext4_inode_csum_set(inode, oi->raw_inode, ei); + spin_unlock(&ei->i_raw_lock); + trace_ext4_other_inode_update_time(inode, oi->orig_ino); + return -1; + } + spin_unlock(&inode->i_lock); + return -1; +} + +/* + * Opportunistically update the other time fields for other inodes in + * the same inode table block. + */ +static void ext4_update_other_inodes_time(struct super_block *sb, + unsigned long orig_ino, char *buf) +{ + struct other_inode oi; + unsigned long ino; + int i, inodes_per_block = EXT4_SB(sb)->s_inodes_per_block; + int inode_size = EXT4_INODE_SIZE(sb); + + oi.orig_ino = orig_ino; + ino = orig_ino & ~(inodes_per_block - 1); + for (i = 0; i < inodes_per_block; i++, ino++, buf += inode_size) { + if (ino == orig_ino) + continue; + oi.raw_inode = (struct ext4_inode *) buf; + (void) find_inode_nowait(sb, ino, other_inode_match, &oi); + } +} + /* * Post the struct inode info into an on-disk inode location in the * buffer-cache. This gobbles the caller's reference to the @@ -4248,10 +4307,11 @@ static int ext4_do_update_inode(handle_t *handle, cpu_to_le16(ei->i_extra_isize); } } - ext4_inode_csum_set(inode, raw_inode, ei); - spin_unlock(&ei->i_raw_lock); + if (inode->i_sb->s_flags & MS_LAZYTIME) + ext4_update_other_inodes_time(inode->i_sb, inode->i_ino, + bh->b_data); BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); rc = ext4_handle_dirty_metadata(handle, NULL, bh); diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 74c5f53595fb..362b23c8497a 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -1139,6 +1139,7 @@ enum { Opt_noquota, Opt_barrier, Opt_nobarrier, Opt_err, Opt_usrquota, Opt_grpquota, Opt_i_version, Opt_stripe, Opt_delalloc, Opt_nodelalloc, Opt_mblk_io_submit, + Opt_lazytime, Opt_nolazytime, Opt_nomblk_io_submit, Opt_block_validity, Opt_noblock_validity, Opt_inode_readahead_blks, Opt_journal_ioprio, Opt_dioread_nolock, Opt_dioread_lock, @@ -1202,6 +1203,8 @@ static const match_table_t tokens = { {Opt_i_version, "i_version"}, {Opt_stripe, "stripe=%u"}, {Opt_delalloc, "delalloc"}, + {Opt_lazytime, "lazytime"}, + {Opt_nolazytime, "nolazytime"}, {Opt_nodelalloc, "nodelalloc"}, {Opt_removed, "mblk_io_submit"}, {Opt_removed, "nomblk_io_submit"}, @@ -1459,6 +1462,12 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token, case Opt_i_version: sb->s_flags |= MS_I_VERSION; return 1; + case Opt_lazytime: + sb->s_flags |= MS_LAZYTIME; + return 1; + case Opt_nolazytime: + sb->s_flags &= ~MS_LAZYTIME; + return 1; } for (m = ext4_mount_opts; m->token != Opt_err; m++) @@ -5020,6 +5029,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) } #endif + *flags = (*flags & ~MS_LAZYTIME) | (sb->s_flags & MS_LAZYTIME); ext4_msg(sb, KERN_INFO, "re-mounted. Opts: %s", orig_data); kfree(orig_data); return 0; diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h index 6cfb841fea7c..6e5abd6d38a2 100644 --- a/include/trace/events/ext4.h +++ b/include/trace/events/ext4.h @@ -73,6 +73,36 @@ struct extent_status; { FALLOC_FL_ZERO_RANGE, "ZERO_RANGE"}) +TRACE_EVENT(ext4_other_inode_update_time, + TP_PROTO(struct inode *inode, ino_t orig_ino), + + TP_ARGS(inode, orig_ino), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( ino_t, orig_ino ) + __field( uid_t, uid ) + __field( gid_t, gid ) + __field( __u16, mode ) + ), + + TP_fast_assign( + __entry->orig_ino = orig_ino; + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->uid = i_uid_read(inode); + __entry->gid = i_gid_read(inode); + __entry->mode = inode->i_mode; + ), + + TP_printk("dev %d,%d orig_ino %lu ino %lu mode 0%o uid %u gid %u", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->orig_ino, + (unsigned long) __entry->ino, __entry->mode, + __entry->uid, __entry->gid) +); + TRACE_EVENT(ext4_free_inode, TP_PROTO(struct inode *inode), -- cgit v1.2.3 From 8650b8a058502d6957ba13dfb5544724fa038118 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 21 Jan 2015 11:40:00 +0100 Subject: nfsd: pNFS block layout driver Add a small shim between core nfsd and filesystems to translate the somewhat cumbersome pNFS data structures and semantics to something more palatable for Linux filesystems. Thanks to Rick McNeal for the old prototype pNFS blocklayout server code, which gave a lot of inspiration to this version even if no code is left from it. Signed-off-by: Christoph Hellwig --- .../filesystems/nfs/pnfs-block-server.txt | 37 ++++ fs/nfsd/Makefile | 2 +- fs/nfsd/blocklayout.c | 189 +++++++++++++++++++++ fs/nfsd/blocklayoutxdr.c | 157 +++++++++++++++++ fs/nfsd/blocklayoutxdr.h | 62 +++++++ fs/nfsd/nfs4layouts.c | 8 + fs/nfsd/pnfs.h | 1 + 7 files changed, 455 insertions(+), 1 deletion(-) create mode 100644 Documentation/filesystems/nfs/pnfs-block-server.txt create mode 100644 fs/nfsd/blocklayout.c create mode 100644 fs/nfsd/blocklayoutxdr.c create mode 100644 fs/nfsd/blocklayoutxdr.h (limited to 'fs') diff --git a/Documentation/filesystems/nfs/pnfs-block-server.txt b/Documentation/filesystems/nfs/pnfs-block-server.txt new file mode 100644 index 000000000000..2143673cf154 --- /dev/null +++ b/Documentation/filesystems/nfs/pnfs-block-server.txt @@ -0,0 +1,37 @@ +pNFS block layout server user guide + +The Linux NFS server now supports the pNFS block layout extension. In this +case the NFS server acts as Metadata Server (MDS) for pNFS, which in addition +to handling all the metadata access to the NFS export also hands out layouts +to the clients to directly access the underlying block devices that are +shared with the client. + +To use pNFS block layouts with with the Linux NFS server the exported file +system needs to support the pNFS block layouts (currently just XFS), and the +file system must sit on shared storage (typically iSCSI) that is accessible +to the clients in addition to the MDS. As of now the file system needs to +sit directly on the exported volume, striping or concatenation of +volumes on the MDS and clients is not supported yet. + +On the server, pNFS block volume support is automatically if the file system +support it. On the client make sure the kernel has the CONFIG_PNFS_BLOCK +option enabled, the blkmapd daemon from nfs-utils is running, and the +file system is mounted using the NFSv4.1 protocol version (mount -o vers=4.1). + +If the nfsd server needs to fence a non-responding client it calls +/sbin/nfsd-recall-failed with the first argument set to the IP address of +the client, and the second argument set to the device node without the /dev +prefix for the file system to be fenced. Below is an example file that shows +how to translate the device into a serial number from SCSI EVPD 0x80: + +cat > /sbin/nfsd-recall-failed << EOF +#!/bin/sh + +CLIENT="$1" +DEV="/dev/$2" +EVPD=`sg_inq --page=0x80 ${DEV} | \ + grep "Unit serial number:" | \ + awk -F ': ' '{print $2}'` + +echo "fencing client ${CLIENT} serial ${EVPD}" >> /var/log/pnfsd-fence.log +EOF diff --git a/fs/nfsd/Makefile b/fs/nfsd/Makefile index 6cba933880c5..9a6028e120c6 100644 --- a/fs/nfsd/Makefile +++ b/fs/nfsd/Makefile @@ -17,4 +17,4 @@ nfsd-$(CONFIG_NFSD_V3) += nfs3proc.o nfs3xdr.o nfsd-$(CONFIG_NFSD_V3_ACL) += nfs3acl.o nfsd-$(CONFIG_NFSD_V4) += nfs4proc.o nfs4xdr.o nfs4state.o nfs4idmap.o \ nfs4acl.o nfs4callback.o nfs4recover.o -nfsd-$(CONFIG_NFSD_PNFS) += nfs4layouts.o +nfsd-$(CONFIG_NFSD_PNFS) += nfs4layouts.o blocklayout.o blocklayoutxdr.o diff --git a/fs/nfsd/blocklayout.c b/fs/nfsd/blocklayout.c new file mode 100644 index 000000000000..cdbc78c72542 --- /dev/null +++ b/fs/nfsd/blocklayout.c @@ -0,0 +1,189 @@ +/* + * Copyright (c) 2014 Christoph Hellwig. + */ +#include +#include +#include + +#include + +#include "blocklayoutxdr.h" +#include "pnfs.h" + +#define NFSDDBG_FACILITY NFSDDBG_PNFS + + +static int +nfsd4_block_get_device_info_simple(struct super_block *sb, + struct nfsd4_getdeviceinfo *gdp) +{ + struct pnfs_block_deviceaddr *dev; + struct pnfs_block_volume *b; + + dev = kzalloc(sizeof(struct pnfs_block_deviceaddr) + + sizeof(struct pnfs_block_volume), GFP_KERNEL); + if (!dev) + return -ENOMEM; + gdp->gd_device = dev; + + dev->nr_volumes = 1; + b = &dev->volumes[0]; + + b->type = PNFS_BLOCK_VOLUME_SIMPLE; + b->simple.sig_len = PNFS_BLOCK_UUID_LEN; + return sb->s_export_op->get_uuid(sb, b->simple.sig, &b->simple.sig_len, + &b->simple.offset); +} + +static __be32 +nfsd4_block_proc_getdeviceinfo(struct super_block *sb, + struct nfsd4_getdeviceinfo *gdp) +{ + if (sb->s_bdev != sb->s_bdev->bd_contains) + return nfserr_inval; + return nfserrno(nfsd4_block_get_device_info_simple(sb, gdp)); +} + +static __be32 +nfsd4_block_proc_layoutget(struct inode *inode, const struct svc_fh *fhp, + struct nfsd4_layoutget *args) +{ + struct nfsd4_layout_seg *seg = &args->lg_seg; + struct super_block *sb = inode->i_sb; + u32 block_size = (1 << inode->i_blkbits); + struct pnfs_block_extent *bex; + struct iomap iomap; + u32 device_generation = 0; + int error; + + /* + * We do not attempt to support I/O smaller than the fs block size, + * or not aligned to it. + */ + if (args->lg_minlength < block_size) { + dprintk("pnfsd: I/O too small\n"); + goto out_layoutunavailable; + } + if (seg->offset & (block_size - 1)) { + dprintk("pnfsd: I/O misaligned\n"); + goto out_layoutunavailable; + } + + /* + * Some clients barf on non-zero block numbers for NONE or INVALID + * layouts, so make sure to zero the whole structure. + */ + error = -ENOMEM; + bex = kzalloc(sizeof(*bex), GFP_KERNEL); + if (!bex) + goto out_error; + args->lg_content = bex; + + error = sb->s_export_op->map_blocks(inode, seg->offset, seg->length, + &iomap, seg->iomode != IOMODE_READ, + &device_generation); + if (error) { + if (error == -ENXIO) + goto out_layoutunavailable; + goto out_error; + } + + if (iomap.length < args->lg_minlength) { + dprintk("pnfsd: extent smaller than minlength\n"); + goto out_layoutunavailable; + } + + switch (iomap.type) { + case IOMAP_MAPPED: + if (seg->iomode == IOMODE_READ) + bex->es = PNFS_BLOCK_READ_DATA; + else + bex->es = PNFS_BLOCK_READWRITE_DATA; + bex->soff = (iomap.blkno << 9); + break; + case IOMAP_UNWRITTEN: + if (seg->iomode & IOMODE_RW) { + /* + * Crack monkey special case from section 2.3.1. + */ + if (args->lg_minlength == 0) { + dprintk("pnfsd: no soup for you!\n"); + goto out_layoutunavailable; + } + + bex->es = PNFS_BLOCK_INVALID_DATA; + bex->soff = (iomap.blkno << 9); + break; + } + /*FALLTHRU*/ + case IOMAP_HOLE: + if (seg->iomode == IOMODE_READ) { + bex->es = PNFS_BLOCK_NONE_DATA; + break; + } + /*FALLTHRU*/ + case IOMAP_DELALLOC: + default: + WARN(1, "pnfsd: filesystem returned %d extent\n", iomap.type); + goto out_layoutunavailable; + } + + error = nfsd4_set_deviceid(&bex->vol_id, fhp, device_generation); + if (error) + goto out_error; + bex->foff = iomap.offset; + bex->len = iomap.length; + + seg->offset = iomap.offset; + seg->length = iomap.length; + + dprintk("GET: %lld:%lld %d\n", bex->foff, bex->len, bex->es); + return 0; + +out_error: + seg->length = 0; + return nfserrno(error); +out_layoutunavailable: + seg->length = 0; + return nfserr_layoutunavailable; +} + +static __be32 +nfsd4_block_proc_layoutcommit(struct inode *inode, + struct nfsd4_layoutcommit *lcp) +{ + loff_t new_size = lcp->lc_last_wr + 1; + struct iattr iattr = { .ia_valid = 0 }; + struct iomap *iomaps; + int nr_iomaps; + int error; + + nr_iomaps = nfsd4_block_decode_layoutupdate(lcp->lc_up_layout, + lcp->lc_up_len, &iomaps, 1 << inode->i_blkbits); + if (nr_iomaps < 0) + return nfserrno(nr_iomaps); + + if (lcp->lc_mtime.tv_nsec == UTIME_NOW || + timespec_compare(&lcp->lc_mtime, &inode->i_mtime) < 0) + lcp->lc_mtime = current_fs_time(inode->i_sb); + iattr.ia_valid |= ATTR_ATIME | ATTR_CTIME | ATTR_MTIME; + iattr.ia_atime = iattr.ia_ctime = iattr.ia_mtime = lcp->lc_mtime; + + if (new_size > i_size_read(inode)) { + iattr.ia_valid |= ATTR_SIZE; + iattr.ia_size = new_size; + } + + error = inode->i_sb->s_export_op->commit_blocks(inode, iomaps, + nr_iomaps, &iattr); + kfree(iomaps); + return nfserrno(error); +} + +const struct nfsd4_layout_ops bl_layout_ops = { + .proc_getdeviceinfo = nfsd4_block_proc_getdeviceinfo, + .encode_getdeviceinfo = nfsd4_block_encode_getdeviceinfo, + .proc_layoutget = nfsd4_block_proc_layoutget, + .encode_layoutget = nfsd4_block_encode_layoutget, + .proc_layoutcommit = nfsd4_block_proc_layoutcommit, +}; diff --git a/fs/nfsd/blocklayoutxdr.c b/fs/nfsd/blocklayoutxdr.c new file mode 100644 index 000000000000..9da89fddab33 --- /dev/null +++ b/fs/nfsd/blocklayoutxdr.c @@ -0,0 +1,157 @@ +/* + * Copyright (c) 2014 Christoph Hellwig. + */ +#include +#include +#include + +#include "nfsd.h" +#include "blocklayoutxdr.h" + +#define NFSDDBG_FACILITY NFSDDBG_PNFS + + +__be32 +nfsd4_block_encode_layoutget(struct xdr_stream *xdr, + struct nfsd4_layoutget *lgp) +{ + struct pnfs_block_extent *b = lgp->lg_content; + int len = sizeof(__be32) + 5 * sizeof(__be64) + sizeof(__be32); + __be32 *p; + + p = xdr_reserve_space(xdr, sizeof(__be32) + len); + if (!p) + return nfserr_toosmall; + + *p++ = cpu_to_be32(len); + *p++ = cpu_to_be32(1); /* we always return a single extent */ + + p = xdr_encode_opaque_fixed(p, &b->vol_id, + sizeof(struct nfsd4_deviceid)); + p = xdr_encode_hyper(p, b->foff); + p = xdr_encode_hyper(p, b->len); + p = xdr_encode_hyper(p, b->soff); + *p++ = cpu_to_be32(b->es); + return 0; +} + +static int +nfsd4_block_encode_volume(struct xdr_stream *xdr, struct pnfs_block_volume *b) +{ + __be32 *p; + int len; + + switch (b->type) { + case PNFS_BLOCK_VOLUME_SIMPLE: + len = 4 + 4 + 8 + 4 + b->simple.sig_len; + p = xdr_reserve_space(xdr, len); + if (!p) + return -ETOOSMALL; + + *p++ = cpu_to_be32(b->type); + *p++ = cpu_to_be32(1); /* single signature */ + p = xdr_encode_hyper(p, b->simple.offset); + p = xdr_encode_opaque(p, b->simple.sig, b->simple.sig_len); + break; + default: + return -ENOTSUPP; + } + + return len; +} + +__be32 +nfsd4_block_encode_getdeviceinfo(struct xdr_stream *xdr, + struct nfsd4_getdeviceinfo *gdp) +{ + struct pnfs_block_deviceaddr *dev = gdp->gd_device; + int len = sizeof(__be32), ret, i; + __be32 *p; + + p = xdr_reserve_space(xdr, len + sizeof(__be32)); + if (!p) + return nfserr_resource; + + for (i = 0; i < dev->nr_volumes; i++) { + ret = nfsd4_block_encode_volume(xdr, &dev->volumes[i]); + if (ret < 0) + return nfserrno(ret); + len += ret; + } + + /* + * Fill in the overall length and number of volumes at the beginning + * of the layout. + */ + *p++ = cpu_to_be32(len); + *p++ = cpu_to_be32(dev->nr_volumes); + return 0; +} + +int +nfsd4_block_decode_layoutupdate(__be32 *p, u32 len, struct iomap **iomapp, + u32 block_size) +{ + struct iomap *iomaps; + u32 nr_iomaps, expected, i; + + if (len < sizeof(u32)) { + dprintk("%s: extent array too small: %u\n", __func__, len); + return -EINVAL; + } + + nr_iomaps = be32_to_cpup(p++); + expected = sizeof(__be32) + nr_iomaps * NFS4_BLOCK_EXTENT_SIZE; + if (len != expected) { + dprintk("%s: extent array size mismatch: %u/%u\n", + __func__, len, expected); + return -EINVAL; + } + + iomaps = kcalloc(nr_iomaps, sizeof(*iomaps), GFP_KERNEL); + if (!iomaps) { + dprintk("%s: failed to allocate extent array\n", __func__); + return -ENOMEM; + } + + for (i = 0; i < nr_iomaps; i++) { + struct pnfs_block_extent bex; + + memcpy(&bex.vol_id, p, sizeof(struct nfsd4_deviceid)); + p += XDR_QUADLEN(sizeof(struct nfsd4_deviceid)); + + p = xdr_decode_hyper(p, &bex.foff); + if (bex.foff & (block_size - 1)) { + dprintk("%s: unaligned offset %lld\n", + __func__, bex.foff); + goto fail; + } + p = xdr_decode_hyper(p, &bex.len); + if (bex.len & (block_size - 1)) { + dprintk("%s: unaligned length %lld\n", + __func__, bex.foff); + goto fail; + } + p = xdr_decode_hyper(p, &bex.soff); + if (bex.soff & (block_size - 1)) { + dprintk("%s: unaligned disk offset %lld\n", + __func__, bex.soff); + goto fail; + } + bex.es = be32_to_cpup(p++); + if (bex.es != PNFS_BLOCK_READWRITE_DATA) { + dprintk("%s: incorrect extent state %d\n", + __func__, bex.es); + goto fail; + } + + iomaps[i].offset = bex.foff; + iomaps[i].length = bex.len; + } + + *iomapp = iomaps; + return nr_iomaps; +fail: + kfree(iomaps); + return -EINVAL; +} diff --git a/fs/nfsd/blocklayoutxdr.h b/fs/nfsd/blocklayoutxdr.h new file mode 100644 index 000000000000..fdc79037c0e7 --- /dev/null +++ b/fs/nfsd/blocklayoutxdr.h @@ -0,0 +1,62 @@ +#ifndef _NFSD_BLOCKLAYOUTXDR_H +#define _NFSD_BLOCKLAYOUTXDR_H 1 + +#include +#include "xdr4.h" + +struct iomap; +struct xdr_stream; + +enum pnfs_block_extent_state { + PNFS_BLOCK_READWRITE_DATA = 0, + PNFS_BLOCK_READ_DATA = 1, + PNFS_BLOCK_INVALID_DATA = 2, + PNFS_BLOCK_NONE_DATA = 3, +}; + +struct pnfs_block_extent { + struct nfsd4_deviceid vol_id; + u64 foff; + u64 len; + u64 soff; + enum pnfs_block_extent_state es; +}; +#define NFS4_BLOCK_EXTENT_SIZE 44 + +enum pnfs_block_volume_type { + PNFS_BLOCK_VOLUME_SIMPLE = 0, + PNFS_BLOCK_VOLUME_SLICE = 1, + PNFS_BLOCK_VOLUME_CONCAT = 2, + PNFS_BLOCK_VOLUME_STRIPE = 3, +}; + +/* + * Random upper cap for the uuid length to avoid unbounded allocation. + * Not actually limited by the protocol. + */ +#define PNFS_BLOCK_UUID_LEN 128 + +struct pnfs_block_volume { + enum pnfs_block_volume_type type; + union { + struct { + u64 offset; + u32 sig_len; + u8 sig[PNFS_BLOCK_UUID_LEN]; + } simple; + }; +}; + +struct pnfs_block_deviceaddr { + u32 nr_volumes; + struct pnfs_block_volume volumes[]; +}; + +__be32 nfsd4_block_encode_getdeviceinfo(struct xdr_stream *xdr, + struct nfsd4_getdeviceinfo *gdp); +__be32 nfsd4_block_encode_layoutget(struct xdr_stream *xdr, + struct nfsd4_layoutget *lgp); +int nfsd4_block_decode_layoutupdate(__be32 *p, u32 len, struct iomap **iomapp, + u32 block_size); + +#endif /* _NFSD_BLOCKLAYOUTXDR_H */ diff --git a/fs/nfsd/nfs4layouts.c b/fs/nfsd/nfs4layouts.c index 60137c54b2f7..3c1bfa155571 100644 --- a/fs/nfsd/nfs4layouts.c +++ b/fs/nfsd/nfs4layouts.c @@ -26,6 +26,7 @@ static struct nfsd4_callback_ops nfsd4_cb_layout_ops; static const struct lock_manager_operations nfsd4_layouts_lm_ops; const struct nfsd4_layout_ops *nfsd4_layout_ops[LAYOUT_TYPE_MAX] = { + [LAYOUT_BLOCK_VOLUME] = &bl_layout_ops, }; /* pNFS device ID to export fsid mapping */ @@ -115,8 +116,15 @@ nfsd4_set_deviceid(struct nfsd4_deviceid *id, const struct svc_fh *fhp, void nfsd4_setup_layout_type(struct svc_export *exp) { + struct super_block *sb = exp->ex_path.mnt->mnt_sb; + if (exp->ex_flags & NFSEXP_NOPNFS) return; + + if (sb->s_export_op->get_uuid && + sb->s_export_op->map_blocks && + sb->s_export_op->commit_blocks) + exp->ex_layout_type = LAYOUT_BLOCK_VOLUME; } static void diff --git a/fs/nfsd/pnfs.h b/fs/nfsd/pnfs.h index a9616a4e13cd..fedb4d620a81 100644 --- a/fs/nfsd/pnfs.h +++ b/fs/nfsd/pnfs.h @@ -34,6 +34,7 @@ struct nfsd4_layout_ops { }; extern const struct nfsd4_layout_ops *nfsd4_layout_ops[]; +extern const struct nfsd4_layout_ops bl_layout_ops; __be32 nfsd4_preprocess_layout_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, stateid_t *stateid, -- cgit v1.2.3 From ea7c38fef0b774a5dc16fb0ca5935f0ae8568176 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Thu, 5 Feb 2015 15:13:24 -0500 Subject: NFSv4: Ensure we reference the inode for return-on-close in delegreturn If we have to do a return-on-close in the delegreturn code, then we must ensure that the inode and super block remain referenced. Cc: Peng Tao Cc: stable@vger.kernel.org # 3.17.x Signed-off-by: Trond Myklebust Reviewed-by: Peng Tao --- fs/nfs/internal.h | 22 +++++++++++++++++++++- fs/nfs/nfs4proc.c | 14 +++++++++----- fs/nfs/super.c | 9 ++++++--- 3 files changed, 36 insertions(+), 9 deletions(-) (limited to 'fs') diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index a98cf2006179..21469e6e3834 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h @@ -391,7 +391,7 @@ extern struct rpc_stat nfs_rpcstat; extern int __init register_nfs_fs(void); extern void __exit unregister_nfs_fs(void); -extern void nfs_sb_active(struct super_block *sb); +extern bool nfs_sb_active(struct super_block *sb); extern void nfs_sb_deactive(struct super_block *sb); /* namespace.c */ @@ -514,6 +514,26 @@ extern int nfs41_walk_client_list(struct nfs_client *clp, struct nfs_client **result, struct rpc_cred *cred); +static inline struct inode *nfs_igrab_and_active(struct inode *inode) +{ + inode = igrab(inode); + if (inode != NULL && !nfs_sb_active(inode->i_sb)) { + iput(inode); + inode = NULL; + } + return inode; +} + +static inline void nfs_iput_and_deactive(struct inode *inode) +{ + if (inode != NULL) { + struct super_block *sb = inode->i_sb; + + iput(inode); + nfs_sb_deactive(sb); + } +} + /* * Determine the device name as a string */ diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index cd4295d84d54..dd892a4e7eb3 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -5175,9 +5175,13 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata) static void nfs4_delegreturn_release(void *calldata) { struct nfs4_delegreturndata *data = calldata; + struct inode *inode = data->inode; - if (data->roc) - pnfs_roc_release(data->inode); + if (inode) { + if (data->roc) + pnfs_roc_release(inode); + nfs_iput_and_deactive(inode); + } kfree(calldata); } @@ -5234,9 +5238,9 @@ static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, co nfs_fattr_init(data->res.fattr); data->timestamp = jiffies; data->rpc_status = 0; - data->inode = inode; - data->roc = list_empty(&NFS_I(inode)->open_files) ? - pnfs_roc(inode) : false; + data->inode = nfs_igrab_and_active(inode); + if (data->inode) + data->roc = nfs4_roc(inode); task_setup_data.callback_data = data; msg.rpc_argp = &data->args; diff --git a/fs/nfs/super.c b/fs/nfs/super.c index 31a11b0e885d..368d9395d2e7 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c @@ -405,12 +405,15 @@ void __exit unregister_nfs_fs(void) unregister_filesystem(&nfs_fs_type); } -void nfs_sb_active(struct super_block *sb) +bool nfs_sb_active(struct super_block *sb) { struct nfs_server *server = NFS_SB(sb); - if (atomic_inc_return(&server->active) == 1) - atomic_inc(&sb->s_active); + if (!atomic_inc_not_zero(&sb->s_active)) + return false; + if (atomic_inc_return(&server->active) != 1) + atomic_dec(&sb->s_active); + return true; } EXPORT_SYMBOL_GPL(nfs_sb_active); -- cgit v1.2.3 From 472e259449819d939b5a5188b6f4c7d59aa4304c Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Thu, 5 Feb 2015 16:50:30 -0500 Subject: NFSv4.1: Pin the inode and super block in asynchronous layoutcommit If we're sending an asynchronous layoutcommit, then we need to ensure that the inode and the super block remain pinned. Signed-off-by: Trond Myklebust Reviewed-by: Peng Tao --- fs/nfs/nfs4proc.c | 19 +++++++++++-------- include/linux/nfs_xdr.h | 1 + 2 files changed, 12 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index dd892a4e7eb3..e092b8540e2e 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -7989,6 +7989,7 @@ static void nfs4_layoutcommit_release(void *calldata) nfs_post_op_update_inode_force_wcc(data->args.inode, data->res.fattr); put_rpccred(data->cred); + nfs_iput_and_deactive(data->inode); kfree(data); } @@ -8013,7 +8014,6 @@ nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data, bool sync) .rpc_message = &msg, .callback_ops = &nfs4_layoutcommit_ops, .callback_data = data, - .flags = RPC_TASK_ASYNC, }; struct rpc_task *task; int status = 0; @@ -8024,18 +8024,21 @@ nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data, bool sync) data->args.lastbytewritten, data->args.inode->i_ino); + if (!sync) { + data->inode = nfs_igrab_and_active(data->args.inode); + if (data->inode == NULL) { + nfs4_layoutcommit_release(data); + return -EAGAIN; + } + task_setup_data.flags = RPC_TASK_ASYNC; + } nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1); task = rpc_run_task(&task_setup_data); if (IS_ERR(task)) return PTR_ERR(task); - if (sync == false) - goto out; - status = nfs4_wait_for_completion_rpc_task(task); - if (status != 0) - goto out; - status = task->tk_status; + if (sync) + status = task->tk_status; trace_nfs4_layoutcommit(data->args.inode, status); -out: dprintk("%s: status %d\n", __func__, status); rpc_put_task(task); return status; diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 2c35e2affa6f..bb0d56f737e0 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -285,6 +285,7 @@ struct nfs4_layoutcommit_data { struct nfs_fattr fattr; struct list_head lseg_list; struct rpc_cred *cred; + struct inode *inode; struct nfs4_layoutcommit_args args; struct nfs4_layoutcommit_res res; }; -- cgit v1.2.3 From 5a0ec8acb945e302ce819b4a9787796ccf284548 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Thu, 5 Feb 2015 16:35:16 -0500 Subject: NFSv4.1: Pin the inode and super block in asynchronous layoutreturns If we're sending an asynchronous layoutreturn, then we need to ensure that the inode and the super block remain pinned. Cc: Peng Tao Signed-off-by: Trond Myklebust Reviewed-by: Peng Tao --- fs/nfs/nfs4proc.c | 19 +++++++++++-------- include/linux/nfs_xdr.h | 1 + 2 files changed, 12 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index e092b8540e2e..2e7c9f7a6f7c 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -7856,6 +7856,7 @@ static void nfs4_layoutreturn_release(void *calldata) lo->plh_block_lgets--; spin_unlock(&lo->plh_inode->i_lock); pnfs_put_layout_hdr(lrp->args.layout); + nfs_iput_and_deactive(lrp->inode); kfree(calldata); dprintk("<-- %s\n", __func__); } @@ -7880,23 +7881,25 @@ int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp, bool sync) .rpc_message = &msg, .callback_ops = &nfs4_layoutreturn_call_ops, .callback_data = lrp, - .flags = RPC_TASK_ASYNC, }; int status = 0; dprintk("--> %s\n", __func__); + if (!sync) { + lrp->inode = nfs_igrab_and_active(lrp->args.inode); + if (!lrp->inode) { + nfs4_layoutreturn_release(lrp); + return -EAGAIN; + } + task_setup_data.flags |= RPC_TASK_ASYNC; + } nfs4_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1); task = rpc_run_task(&task_setup_data); if (IS_ERR(task)) return PTR_ERR(task); - if (sync == false) - goto out; - status = nfs4_wait_for_completion_rpc_task(task); - if (status != 0) - goto out; - status = task->tk_status; + if (sync) + status = task->tk_status; trace_nfs4_layoutreturn(lrp->args.inode, status); -out: dprintk("<-- %s status=%d\n", __func__, status); rpc_put_task(task); return status; diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index bb0d56f737e0..38d96ba935c2 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -310,6 +310,7 @@ struct nfs4_layoutreturn { struct nfs4_layoutreturn_res res; struct rpc_cred *cred; struct nfs_client *clp; + struct inode *inode; int rpc_status; }; -- cgit v1.2.3 From e4af440aaf390ac1d39b26ef6cf4a28bcb6a5979 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Thu, 5 Feb 2015 17:05:08 -0500 Subject: NFSv4.1: pnfs_send_layoutreturn should use GFP_NOFS In we want to be able to call pnfs_send_layoutreturn() from within the writeback path, we really want it to use GFP_NOFS in order to prevent recursion. Signed-off-by: Trond Myklebust --- fs/nfs/pnfs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index 703501d3ed19..a1d8620e8cb7 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -948,7 +948,7 @@ pnfs_send_layoutreturn(struct pnfs_layout_hdr *lo, nfs4_stateid stateid, struct nfs4_layoutreturn *lrp; int status = 0; - lrp = kzalloc(sizeof(*lrp), GFP_KERNEL); + lrp = kzalloc(sizeof(*lrp), GFP_NOFS); if (unlikely(lrp == NULL)) { status = -ENOMEM; spin_lock(&ino->i_lock); -- cgit v1.2.3 From 4ef2e4f84c523ebbc930ce05fa27b9b1350f4a4b Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Thu, 5 Feb 2015 17:27:39 -0500 Subject: NFSv4.1: Fix pnfs_put_lseg races pnfs_layoutreturn_free_lseg_async() can also race with inode put in the general case. We can now fix this, and also simplify the code. Cc: Peng Tao Signed-off-by: Trond Myklebust --- fs/nfs/pnfs.c | 53 +++++++++++++++++++---------------------------------- 1 file changed, 19 insertions(+), 34 deletions(-) (limited to 'fs') diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index a1d8620e8cb7..107b321be7d4 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -361,14 +361,9 @@ pnfs_layout_need_return(struct pnfs_layout_hdr *lo, return true; } -static void pnfs_layoutreturn_free_lseg(struct work_struct *work) +static void pnfs_layoutreturn_before_put_lseg(struct pnfs_layout_segment *lseg, + struct pnfs_layout_hdr *lo, struct inode *inode) { - struct pnfs_layout_segment *lseg; - struct pnfs_layout_hdr *lo; - struct inode *inode; - - lseg = container_of(work, struct pnfs_layout_segment, pls_work); - WARN_ON(atomic_read(&lseg->pls_refcount)); lo = lseg->pls_layout; inode = lo->plh_inode; @@ -383,24 +378,12 @@ static void pnfs_layoutreturn_free_lseg(struct work_struct *work) lo->plh_block_lgets++; lo->plh_return_iomode = 0; spin_unlock(&inode->i_lock); + pnfs_get_layout_hdr(lo); - pnfs_send_layoutreturn(lo, stateid, iomode, true); - spin_lock(&inode->i_lock); + /* Send an async layoutreturn so we dont deadlock */ + pnfs_send_layoutreturn(lo, stateid, iomode, false); } else - /* match pnfs_get_layout_hdr #2 in pnfs_put_lseg */ - pnfs_put_layout_hdr(lo); - pnfs_layout_remove_lseg(lo, lseg); - spin_unlock(&inode->i_lock); - pnfs_free_lseg(lseg); - /* match pnfs_get_layout_hdr #1 in pnfs_put_lseg */ - pnfs_put_layout_hdr(lo); -} - -static void -pnfs_layoutreturn_free_lseg_async(struct pnfs_layout_segment *lseg) -{ - INIT_WORK(&lseg->pls_work, pnfs_layoutreturn_free_lseg); - queue_work(nfsiod_workqueue, &lseg->pls_work); + spin_unlock(&inode->i_lock); } void @@ -415,21 +398,23 @@ pnfs_put_lseg(struct pnfs_layout_segment *lseg) dprintk("%s: lseg %p ref %d valid %d\n", __func__, lseg, atomic_read(&lseg->pls_refcount), test_bit(NFS_LSEG_VALID, &lseg->pls_flags)); + + /* Handle the case where refcount != 1 */ + if (atomic_add_unless(&lseg->pls_refcount, -1, 1)) + return; + lo = lseg->pls_layout; inode = lo->plh_inode; + /* Do we need a layoutreturn? */ + if (test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags)) + pnfs_layoutreturn_before_put_lseg(lseg, lo, inode); + if (atomic_dec_and_lock(&lseg->pls_refcount, &inode->i_lock)) { pnfs_get_layout_hdr(lo); - if (pnfs_layout_need_return(lo, lseg)) { - spin_unlock(&inode->i_lock); - /* hdr reference dropped in nfs4_layoutreturn_release */ - pnfs_get_layout_hdr(lo); - pnfs_layoutreturn_free_lseg_async(lseg); - } else { - pnfs_layout_remove_lseg(lo, lseg); - spin_unlock(&inode->i_lock); - pnfs_free_lseg(lseg); - pnfs_put_layout_hdr(lo); - } + pnfs_layout_remove_lseg(lo, lseg); + spin_unlock(&inode->i_lock); + pnfs_free_lseg(lseg); + pnfs_put_layout_hdr(lo); } } EXPORT_SYMBOL_GPL(pnfs_put_lseg); -- cgit v1.2.3 From c23ae6017835b5bc9b9ec9d5d9c2b1523053f503 Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Mon, 12 Jan 2015 14:52:15 -0500 Subject: nfsd: default NFSv4.2 to on The code seems to work. The protocol looks stable. The kernel's version defaults can be overridden by rpc.nfsd arguments. Signed-off-by: J. Bruce Fields --- fs/nfsd/nfssvc.c | 1 + 1 file changed, 1 insertion(+) (limited to 'fs') diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c index 314f5c8f8f1a..9277cc91c21b 100644 --- a/fs/nfsd/nfssvc.c +++ b/fs/nfsd/nfssvc.c @@ -119,6 +119,7 @@ struct svc_program nfsd_program = { static bool nfsd_supported_minorversions[NFSD_SUPPORTED_MINOR_VERSION + 1] = { [0] = 1, [1] = 1, + [2] = 1, }; int nfsd_vers(int vers, enum vers_op change) -- cgit v1.2.3 From 480486b4733d5bc7d9fe765b34bc6c2b72d5c12e Mon Sep 17 00:00:00 2001 From: Tom Haynes Date: Mon, 9 Feb 2015 17:48:32 -0800 Subject: pnfs/flexfiles: Do not dprintk after the free Found by 0-DAY kernel test infrastructure: fs/nfs/flexfilelayout/flexfilelayoutdev.c:520:13-16: ERROR: reference preceded by free on line 518 fs/nfs/flexfilelayout/flexfilelayoutdev.c:520:26-29: ERROR: reference preceded by free on line 518 fs/nfs/flexfilelayout/flexfilelayoutdev.c:520:39-42: ERROR: reference preceded by free on line 518 fs/nfs/flexfilelayout/flexfilelayoutdev.c:521:3-6: ERROR: reference preceded by free on line 518 Reported-by: Julia Lawall Signed-off-by: Tom Haynes Signed-off-by: Trond Myklebust --- fs/nfs/flexfilelayout/flexfilelayoutdev.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/nfs/flexfilelayout/flexfilelayoutdev.c b/fs/nfs/flexfilelayout/flexfilelayoutdev.c index 3bbb16b3066f..e2c01f204a95 100644 --- a/fs/nfs/flexfilelayout/flexfilelayoutdev.c +++ b/fs/nfs/flexfilelayout/flexfilelayoutdev.c @@ -515,10 +515,10 @@ int ff_layout_encode_ds_ioerr(struct nfs4_flexfile_layout *flo, *p++ = cpu_to_be32(err->opnum); *count += 1; list_del(&err->list); - kfree(err); dprintk("%s: offset %llu length %llu status %d op %d count %d\n", __func__, err->offset, err->length, err->status, err->opnum, *count); + kfree(err); } return 0; -- cgit v1.2.3 From 88cff0f0fbcf64cb6c2fbad6cf57e2725475d0ee Mon Sep 17 00:00:00 2001 From: hujianyang Date: Tue, 10 Feb 2015 11:28:57 +0800 Subject: UBIFS: return -EINVAL if log head is empty CS node is recognized as a sign in UBIFS log replay mechanism. Log relaying during mount should find the CS node in log head at beginning and then replay the following uncommitted buds. Here is a bug in log replay path: If the log head, which is indicated by @log_lnum in mst_node, is empty, current UBIFS replay nothing and directly mount the partition without any warning. This action will put filesystem in an abnormal state, e.g. space management in LPT area is incorrect to the real space usage in main area. We reproduced this bug by fault injection: turn log head leb into all 0xFF. UBIFS driver mount the polluted partition normally. But errors occur while running fs_stress on this mount: [89068.055183] UBI error: ubi_io_read: error -74 (ECC error) while reading 59 bytes from PEB 711:33088, read 59 bytes [89068.179877] UBIFS error (pid 10517): ubifs_check_node: bad magic 0x101031, expected 0x6101831 [89068.179882] UBIFS error (pid 10517): ubifs_check_node: bad node at LEB 591:28992 [89068.179891] Not a node, first 24 bytes: [89068.179892] 00000000: 31 10 10 00 37 84 64 04 10 04 00 00 00 00 00 00 20 00 00 00 02 01 00 00 1...7.d......... ....... [89068.180282] UBIFS error (pid 10517): ubifs_read_node: expected node type 2 This patch fix the problem by checking *lnum* to guarantee the empty leb is not log head leb and return an error if the log head leb is incorrectly empty. After this, we could catch *log head empty* error in place. Signed-off-by: hujianyang Signed-off-by: Artem Bityutskiy --- fs/ubifs/replay.c | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/ubifs/replay.c b/fs/ubifs/replay.c index 3187925e9879..9b40a1c5e160 100644 --- a/fs/ubifs/replay.c +++ b/fs/ubifs/replay.c @@ -1028,9 +1028,22 @@ int ubifs_replay_journal(struct ubifs_info *c) do { err = replay_log_leb(c, lnum, 0, c->sbuf); - if (err == 1) - /* We hit the end of the log */ - break; + if (err == 1) { + if (lnum != c->lhead_lnum) + /* We hit the end of the log */ + break; + + /* + * The head of the log must always start with the + * "commit start" node on a properly formatted UBIFS. + * But we found no nodes at all, which means that + * someting went wrong and we cannot proceed mounting + * the file-system. + */ + ubifs_err("no UBIFS nodes found at the log head LEB %d:%d, possibly corrupted", + lnum, 0); + err = -EINVAL; + } if (err) goto out; lnum = ubifs_next_log_lnum(c, lnum); -- cgit v1.2.3 From 4c21462acc530bb81c6ae30e5bbd0b06f8c50626 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 10 Feb 2015 11:03:22 +0300 Subject: pnfs: delete an unintended goto There was an extra goto here where it shouldn't be, because of a merge error. Fixes: e2c63e091e29 ('Merge branch 'flexfiles'') Signed-off-by: Dan Carpenter Signed-off-by: Trond Myklebust --- fs/nfs/pnfs.c | 1 - 1 file changed, 1 deletion(-) (limited to 'fs') diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index 107b321be7d4..4f802b02fbb9 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -1078,7 +1078,6 @@ bool pnfs_roc(struct inode *ino) goto out_noroc; } - goto out_noroc; pnfs_clear_retry_layoutget(lo); list_for_each_entry_safe(lseg, tmp, &lo->plh_segs, pls_list) if (test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) { -- cgit v1.2.3 From d8ba1f971497c19cf80da1ea5391a46a5f9fbd41 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Wed, 11 Feb 2015 17:27:55 -0500 Subject: NFSv4.1: Fix a kfree() of uninitialised pointers in decode_cb_sequence_args If the call to decode_rc_list() fails due to a memory allocation error, then we need to truncate the array size to ensure that we only call kfree() on those pointer that were allocated. Reported-by: David Ramos Fixes: 4aece6a19cf7f ("nfs41: cb_sequence xdr implementation") Cc: stable@vger.kernel.org Signed-off-by: Trond Myklebust --- fs/nfs/callback_xdr.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c index f4ccfe6521ec..02f8d09e119f 100644 --- a/fs/nfs/callback_xdr.c +++ b/fs/nfs/callback_xdr.c @@ -464,8 +464,10 @@ static __be32 decode_cb_sequence_args(struct svc_rqst *rqstp, for (i = 0; i < args->csa_nrclists; i++) { status = decode_rc_list(xdr, &args->csa_rclists[i]); - if (status) + if (status) { + args->csa_nrclists = i; goto out_free; + } } } status = 0; -- cgit v1.2.3 From a4f743a6bb201662962fa888e3f978583d61691e Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Wed, 11 Feb 2015 17:49:13 -0500 Subject: NFSv4.1: Convert open-coded array allocation calls to kmalloc_array() For added overflow protection... Signed-off-by: Trond Myklebust --- fs/nfs/callback_xdr.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c index 02f8d09e119f..19ca95cdfd9b 100644 --- a/fs/nfs/callback_xdr.c +++ b/fs/nfs/callback_xdr.c @@ -313,7 +313,7 @@ __be32 decode_devicenotify_args(struct svc_rqst *rqstp, goto out; } - args->devs = kmalloc(n * sizeof(*args->devs), GFP_KERNEL); + args->devs = kmalloc_array(n, sizeof(*args->devs), GFP_KERNEL); if (!args->devs) { status = htonl(NFS4ERR_DELAY); goto out; @@ -415,7 +415,7 @@ static __be32 decode_rc_list(struct xdr_stream *xdr, rc_list->rcl_nrefcalls * 2 * sizeof(uint32_t)); if (unlikely(p == NULL)) goto out; - rc_list->rcl_refcalls = kmalloc(rc_list->rcl_nrefcalls * + rc_list->rcl_refcalls = kmalloc_array(rc_list->rcl_nrefcalls, sizeof(*rc_list->rcl_refcalls), GFP_KERNEL); if (unlikely(rc_list->rcl_refcalls == NULL)) -- cgit v1.2.3 From 871f599f4a869d24ef98b0217f19f0cc55ff59ac Mon Sep 17 00:00:00 2001 From: Jaegeuk Kim Date: Fri, 9 Jan 2015 16:27:17 -0800 Subject: f2fs: avoid infinite loop on cp_error If cp_error is set, we should avoid all the infinite loop. In f2fs_sync_file, there is a hole, and this patch fixes that. Signed-off-by: Jaegeuk Kim --- fs/f2fs/file.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'fs') diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index 5df336757d6c..710adc987937 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c @@ -247,6 +247,10 @@ go_write: sync_nodes: sync_node_pages(sbi, ino, &wbc); + /* if cp_error was enabled, we should avoid infinite loop */ + if (unlikely(f2fs_cp_error(sbi))) + goto out; + if (need_inode_block_update(sbi, ino)) { mark_inode_dirty_sync(inode); f2fs_write_inode(inode, NULL); -- cgit v1.2.3 From 9066c6a7eb5492dda0ccd48bb78be52feff9043c Mon Sep 17 00:00:00 2001 From: Chao Yu Date: Sat, 10 Jan 2015 20:09:52 +0800 Subject: f2fs: fix wrong memory footprint statistics in debugfs Our value of memory footprint statistics showed in debugfs is not calculated correctly. Fix it in this patch. Signed-off-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/debug.c | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c index 1f0fb58072e7..b45daab97f0b 100644 --- a/fs/f2fs/debug.c +++ b/fs/f2fs/debug.c @@ -163,13 +163,20 @@ static void update_mem_info(struct f2fs_sb_info *sbi) si->base_mem += sizeof(struct f2fs_nm_info); si->base_mem += __bitmap_size(sbi, NAT_BITMAP); +get_cache: + si->cache_mem = 0; + /* build gc */ - si->base_mem += sizeof(struct f2fs_gc_kthread); + if (sbi->gc_thread) + si->cache_mem += sizeof(struct f2fs_gc_kthread); + + /* build merge flush thread */ + if (SM_I(sbi)->cmd_control_info) + si->cache_mem += sizeof(struct flush_cmd_control); -get_cache: /* free nids */ - si->cache_mem = NM_I(sbi)->fcnt; - si->cache_mem += NM_I(sbi)->nat_cnt; + si->cache_mem += NM_I(sbi)->fcnt * sizeof(struct free_nid); + si->cache_mem += NM_I(sbi)->nat_cnt * sizeof(struct nat_entry); npages = NODE_MAPPING(sbi)->nrpages; si->cache_mem += npages << PAGE_CACHE_SHIFT; npages = META_MAPPING(sbi)->nrpages; -- cgit v1.2.3 From 6f0aacbc3c1d71078d0f9eb47f8c422bb58fffd7 Mon Sep 17 00:00:00 2001 From: Jaegeuk Kim Date: Sat, 10 Jan 2015 21:37:36 -0800 Subject: f2fs: update memory footprint information This patch adds missing memory usages, and splits them in detail. Signed-off-by: Jaegeuk Kim --- fs/f2fs/debug.c | 24 +++++++++++++++++------- fs/f2fs/f2fs.h | 2 +- 2 files changed, 18 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c index b45daab97f0b..0f721f6a1147 100644 --- a/fs/f2fs/debug.c +++ b/fs/f2fs/debug.c @@ -177,13 +177,18 @@ get_cache: /* free nids */ si->cache_mem += NM_I(sbi)->fcnt * sizeof(struct free_nid); si->cache_mem += NM_I(sbi)->nat_cnt * sizeof(struct nat_entry); - npages = NODE_MAPPING(sbi)->nrpages; - si->cache_mem += npages << PAGE_CACHE_SHIFT; - npages = META_MAPPING(sbi)->nrpages; - si->cache_mem += npages << PAGE_CACHE_SHIFT; + si->cache_mem += NM_I(sbi)->dirty_nat_cnt * + sizeof(struct nat_entry_set); + si->cache_mem += si->inmem_pages * sizeof(struct inmem_pages); si->cache_mem += sbi->n_dirty_dirs * sizeof(struct inode_entry); for (i = 0; i <= UPDATE_INO; i++) si->cache_mem += sbi->im[i].ino_num * sizeof(struct ino_entry); + + si->page_mem = 0; + npages = NODE_MAPPING(sbi)->nrpages; + si->page_mem += npages << PAGE_CACHE_SHIFT; + npages = META_MAPPING(sbi)->nrpages; + si->page_mem += npages << PAGE_CACHE_SHIFT; } static int stat_show(struct seq_file *s, void *v) @@ -301,9 +306,14 @@ static int stat_show(struct seq_file *s, void *v) /* memory footprint */ update_mem_info(si->sbi); - seq_printf(s, "\nMemory: %u KB = static: %u + cached: %u\n", - (si->base_mem + si->cache_mem) >> 10, - si->base_mem >> 10, si->cache_mem >> 10); + seq_printf(s, "\nMemory: %u KB\n", + (si->base_mem + si->cache_mem + si->page_mem) >> 10); + seq_printf(s, " - static: %u KB\n", + si->base_mem >> 10); + seq_printf(s, " - cached: %u KB\n", + si->cache_mem >> 10); + seq_printf(s, " - paged : %u KB\n", + si->page_mem >> 10); } mutex_unlock(&f2fs_stat_mutex); return 0; diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index 209532cbee43..c2cf040d5284 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -1530,7 +1530,7 @@ struct f2fs_stat_info { unsigned int segment_count[2]; unsigned int block_count[2]; unsigned int inplace_count; - unsigned base_mem, cache_mem; + unsigned base_mem, cache_mem, page_mem; }; static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi) -- cgit v1.2.3 From 30a5537f9a9e91937aad6a47f55683f7ce0be257 Mon Sep 17 00:00:00 2001 From: Jaegeuk Kim Date: Wed, 14 Jan 2015 16:34:24 -0800 Subject: f2fs: trigger correct checkpoint during umount This patch fixes to trigger checkpoint with umount flag when kill_sb was called. In kill_sb, f2fs_sync_fs was finally called, but at this time, f2fs can't do checkpoint with CP_UMOUNT. After then, f2fs_put_super is not doing checkpoint, since it is not dirty. So, this patch adds a flag to indicate f2fs_sync_fs is called during umount. Reviewed-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/f2fs.h | 1 + fs/f2fs/super.c | 12 ++++++++++-- 2 files changed, 11 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index c2cf040d5284..1795ce231118 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -525,6 +525,7 @@ struct f2fs_sb_info { struct f2fs_super_block *raw_super; /* raw super block pointer */ int s_dirty; /* dirty flag for checkpoint */ bool need_fsck; /* need fsck.f2fs to fix */ + bool s_closing; /* specify unmounting */ /* for node-related operations */ struct f2fs_nm_info *nm_info; /* node manager */ diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index 0e97974bbbbd..84f95cd4076a 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -487,7 +487,8 @@ int f2fs_sync_fs(struct super_block *sb, int sync) if (sync) { struct cp_control cpc; - cpc.reason = test_opt(sbi, FASTBOOT) ? CP_UMOUNT : CP_SYNC; + cpc.reason = (test_opt(sbi, FASTBOOT) || sbi->s_closing) ? + CP_UMOUNT : CP_SYNC; mutex_lock(&sbi->gc_mutex); write_checkpoint(sbi, &cpc); mutex_unlock(&sbi->gc_mutex); @@ -1190,11 +1191,18 @@ static struct dentry *f2fs_mount(struct file_system_type *fs_type, int flags, return mount_bdev(fs_type, flags, dev_name, data, f2fs_fill_super); } +static void kill_f2fs_super(struct super_block *sb) +{ + if (sb->s_root) + F2FS_SB(sb)->s_closing = true; + kill_block_super(sb); +} + static struct file_system_type f2fs_fs_type = { .owner = THIS_MODULE, .name = "f2fs", .mount = f2fs_mount, - .kill_sb = kill_block_super, + .kill_sb = kill_f2fs_super, .fs_flags = FS_REQUIRES_DEV, }; MODULE_ALIAS_FS("f2fs"); -- cgit v1.2.3 From 85dc2f2c6c84e99e9864ef660f79683aaad85f42 Mon Sep 17 00:00:00 2001 From: Jaegeuk Kim Date: Wed, 14 Jan 2015 17:41:41 -0800 Subject: f2fs: do checkpoint when umount flag is not set If the previous checkpoint was done without CP_UMOUNT flag, it needs to do checkpoint with CP_UMOUNT for the next fast boot. Reviewed-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/checkpoint.c | 3 ++- fs/f2fs/super.c | 9 +++++++-- 2 files changed, 9 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c index 9f5317c9ad72..231d8c9fea0a 100644 --- a/fs/f2fs/checkpoint.c +++ b/fs/f2fs/checkpoint.c @@ -1043,7 +1043,8 @@ void write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) mutex_lock(&sbi->cp_mutex); - if (!sbi->s_dirty && cpc->reason != CP_DISCARD) + if (!sbi->s_dirty && + cpc->reason != CP_DISCARD && cpc->reason != CP_UMOUNT) goto out; if (unlikely(f2fs_cp_error(sbi))) goto out; diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index 84f95cd4076a..0d627f2d1828 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -447,8 +447,13 @@ static void f2fs_put_super(struct super_block *sb) f2fs_destroy_stats(sbi); stop_gc_thread(sbi); - /* We don't need to do checkpoint when it's clean */ - if (sbi->s_dirty) { + /* + * We don't need to do checkpoint when superblock is clean. + * But, the previous checkpoint was not done by umount, it needs to do + * clean checkpoint again. + */ + if (sbi->s_dirty || + !is_set_ckpt_flags(F2FS_CKPT(sbi), CP_UMOUNT_FLAG)) { struct cp_control cpc = { .reason = CP_UMOUNT, }; -- cgit v1.2.3 From 1601839e9e5bd5726d744c9c5919f87dc808bbcc Mon Sep 17 00:00:00 2001 From: Chao Yu Date: Mon, 19 Jan 2015 20:24:37 +0800 Subject: f2fs: fix to release count of meta page in ->invalidatepage We will encounter deadloop in below scenario: 1. increase page count for F2FS_DIRTY_META type in following path: ->recover_fsync_data ->recover_data ->do_recover_data ->recover_data_page ->change_curseg ->write_sum_page ->set_page_dirty 2. fail in recover_data() 3. invalidate meta pages in truncate_inode_pages_final without decreasing page count. 4. deadloop when sync_meta_pages as page count will always be non-zero. message: NMI watchdog: BUG: soft lockup - CPU#0 stuck for 22s! [] pagevec_lookup_tag+0x27/0x30 [] sync_meta_pages+0x87/0x160 [f2fs] [] recover_fsync_data+0xeb9/0xf10 [f2fs] [] f2fs_fill_super+0x888/0x980 [f2fs] [] mount_bdev+0x16a/0x1a0 [] f2fs_mount+0x1f/0x30 [f2fs] [] mount_fs+0x36/0x170 [] vfs_kern_mount+0x55/0xe0 [] do_mount+0x1df/0x9f0 [] SyS_mount+0x70/0xb0 [] sysenter_do_call+0x12/0x12 To avoid page count leak, let's add ->invalidatepage and ->releasepage in f2fs_meta_aops as f2fs_node_aops to release meta page count correctly. Signed-off-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/checkpoint.c | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) (limited to 'fs') diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c index 231d8c9fea0a..79f82819086f 100644 --- a/fs/f2fs/checkpoint.c +++ b/fs/f2fs/checkpoint.c @@ -302,16 +302,35 @@ static int f2fs_set_meta_page_dirty(struct page *page) if (!PageDirty(page)) { __set_page_dirty_nobuffers(page); inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_META); + SetPagePrivate(page); f2fs_trace_pid(page); return 1; } return 0; } +static void f2fs_invalidate_meta_page(struct page *page, unsigned int offset, + unsigned int length) +{ + struct inode *inode = page->mapping->host; + + if (PageDirty(page)) + dec_page_count(F2FS_I_SB(inode), F2FS_DIRTY_META); + ClearPagePrivate(page); +} + +static int f2fs_release_meta_page(struct page *page, gfp_t wait) +{ + ClearPagePrivate(page); + return 1; +} + const struct address_space_operations f2fs_meta_aops = { .writepage = f2fs_write_meta_page, .writepages = f2fs_write_meta_pages, .set_page_dirty = f2fs_set_meta_page_dirty, + .invalidatepage = f2fs_invalidate_meta_page, + .releasepage = f2fs_release_meta_page, }; static void __add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type) -- cgit v1.2.3 From bc4a1f873b9db010f5b0971ee5f2987d9be32c36 Mon Sep 17 00:00:00 2001 From: Jaegeuk Kim Date: Thu, 22 Jan 2015 14:48:28 -0800 Subject: f2fs: leave comment for code readability During the recovery, any xattr blocks should not be found, since they are written into cold log, not the warm node chain. Reviewed-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/recovery.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'fs') diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c index c4211a5862df..57603a7127f7 100644 --- a/fs/f2fs/recovery.c +++ b/fs/f2fs/recovery.c @@ -346,6 +346,10 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode, if (IS_INODE(page)) { recover_inline_xattr(inode, page); } else if (f2fs_has_xattr_block(ofs_of_node(page))) { + /* + * Deprecated; xattr blocks should be found from cold log. + * But, we should remain this for backward compatibility. + */ recover_xattr_data(inode, page, blkaddr); goto out; } -- cgit v1.2.3 From d49f3e890290bd1db047d02335401026d1886472 Mon Sep 17 00:00:00 2001 From: Chao Yu Date: Fri, 23 Jan 2015 20:36:04 +0800 Subject: f2fs: add F2FS_IOC_GETVERSION support In this patch we add the FS_IOC_GETVERSION ioctl for getting i_generation from inode, after that, users can list file's generation number by using "lsattr -v". Signed-off-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/f2fs.h | 1 + fs/f2fs/file.c | 9 +++++++++ 2 files changed, 10 insertions(+) (limited to 'fs') diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index 1795ce231118..5e16085d98d3 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -202,6 +202,7 @@ static inline bool __has_cursum_space(struct f2fs_summary_block *sum, int size, */ #define F2FS_IOC_GETFLAGS FS_IOC_GETFLAGS #define F2FS_IOC_SETFLAGS FS_IOC_SETFLAGS +#define F2FS_IOC_GETVERSION FS_IOC_GETVERSION #define F2FS_IOCTL_MAGIC 0xf5 #define F2FS_IOC_START_ATOMIC_WRITE _IO(F2FS_IOCTL_MAGIC, 1) diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index 710adc987937..ec17d05d6553 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c @@ -926,6 +926,13 @@ out: return ret; } +static int f2fs_ioc_getversion(struct file *filp, unsigned long arg) +{ + struct inode *inode = file_inode(filp); + + return put_user(inode->i_generation, (int __user *)arg); +} + static int f2fs_ioc_start_atomic_write(struct file *filp) { struct inode *inode = file_inode(filp); @@ -1061,6 +1068,8 @@ long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) return f2fs_ioc_getflags(filp, arg); case F2FS_IOC_SETFLAGS: return f2fs_ioc_setflags(filp, arg); + case F2FS_IOC_GETVERSION: + return f2fs_ioc_getversion(filp, arg); case F2FS_IOC_START_ATOMIC_WRITE: return f2fs_ioc_start_atomic_write(filp); case F2FS_IOC_COMMIT_ATOMIC_WRITE: -- cgit v1.2.3 From f28e503429644a794d9bf2793189ff0b0887eb21 Mon Sep 17 00:00:00 2001 From: Chao Yu Date: Fri, 23 Jan 2015 20:37:53 +0800 Subject: f2fs: use f2fs_radix_tree_insert to clean codes No modification in functionality, just clean codes with f2fs_radix_tree_insert. Signed-off-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/gc.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c index 40887d3c9d01..67860b6712f3 100644 --- a/fs/f2fs/gc.c +++ b/fs/f2fs/gc.c @@ -356,11 +356,8 @@ static void add_gc_inode(struct gc_inode_list *gc_list, struct inode *inode) } new_ie = f2fs_kmem_cache_alloc(inode_entry_slab, GFP_NOFS); new_ie->inode = inode; -retry: - if (radix_tree_insert(&gc_list->iroot, inode->i_ino, new_ie)) { - cond_resched(); - goto retry; - } + + f2fs_radix_tree_insert(&gc_list->iroot, inode->i_ino, new_ie); list_add_tail(&new_ie->list, &gc_list->ilist); } -- cgit v1.2.3 From 3b6709b771b3335856643df8f6e0158db150a438 Mon Sep 17 00:00:00 2001 From: Kinglong Mee Date: Sat, 24 Jan 2015 17:06:25 +0800 Subject: f2fs: fix a bug of inheriting default ACL from parent Introduced by a6dda0e63e97122ce9e0ba04367e37cca28315fa "f2fs: use generic posix ACL infrastructure". When testing default acl, gets in recent kernel (3.19.0-rc5), user::rwx group::r-x other::r-x default:user::rwx default:group::r-x default:group:root:rwx default:mask::rwx default:other::r-x ]# getfacl testdir/ user::rwx group::rwx // missing an acl "group:root:rwx" inherited from parent other::r-x default:user::rwx default:group::r-x default:group:root:rwx default:mask::rwx default:other::r-x Signed-off-by: Kinglong Mee Reviewed-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/acl.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/f2fs/acl.c b/fs/f2fs/acl.c index 7f12d28ad94c..742202779bd5 100644 --- a/fs/f2fs/acl.c +++ b/fs/f2fs/acl.c @@ -396,7 +396,7 @@ int f2fs_init_acl(struct inode *inode, struct inode *dir, struct page *ipage, posix_acl_release(default_acl); } if (acl) { - if (error) + if (!error) error = __f2fs_set_acl(inode, ACL_TYPE_ACCESS, acl, ipage); posix_acl_release(acl); -- cgit v1.2.3 From feeb0debfb3454726ebea3871cecac35e144d1bb Mon Sep 17 00:00:00 2001 From: Chao Yu Date: Mon, 26 Jan 2015 20:22:55 +0800 Subject: f2fs: make truncate_inline_date static 1. make truncate_inline_date static; 2. remove parameter @from of truncate_inline_date as callers only pass zero. Signed-off-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/f2fs.h | 1 - fs/f2fs/inline.c | 25 +++++++++---------------- 2 files changed, 9 insertions(+), 17 deletions(-) (limited to 'fs') diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index 5e16085d98d3..9e1fcc1f64ee 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -1649,7 +1649,6 @@ int f2fs_read_inline_data(struct inode *, struct page *); int f2fs_convert_inline_page(struct dnode_of_data *, struct page *); int f2fs_convert_inline_inode(struct inode *); int f2fs_write_inline_data(struct inode *, struct page *); -void truncate_inline_data(struct page *, u64); bool recover_inline_data(struct inode *, struct page *); struct f2fs_dir_entry *find_in_inline_dir(struct inode *, struct qstr *, struct page **); diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c index fa2aa2f7bb96..1484c00133cd 100644 --- a/fs/f2fs/inline.c +++ b/fs/f2fs/inline.c @@ -50,6 +50,12 @@ void read_inline_data(struct page *page, struct page *ipage) SetPageUptodate(page); } +static void truncate_inline_data(struct page *ipage) +{ + f2fs_wait_on_page_writeback(ipage, NODE); + memset(inline_data_addr(ipage), 0, MAX_INLINE_DATA); +} + int f2fs_read_inline_data(struct inode *inode, struct page *page) { struct page *ipage; @@ -125,7 +131,7 @@ no_update: set_inode_flag(F2FS_I(dn->inode), FI_APPEND_WRITE); /* clear inline data and flag after data writeback */ - truncate_inline_data(dn->inode_page, 0); + truncate_inline_data(dn->inode_page); clear_out: stat_dec_inline_inode(dn->inode); f2fs_clear_inline_inode(dn->inode); @@ -198,19 +204,6 @@ int f2fs_write_inline_data(struct inode *inode, struct page *page) return 0; } -void truncate_inline_data(struct page *ipage, u64 from) -{ - void *addr; - - if (from >= MAX_INLINE_DATA) - return; - - f2fs_wait_on_page_writeback(ipage, NODE); - - addr = inline_data_addr(ipage); - memset(addr + from, 0, MAX_INLINE_DATA - from); -} - bool recover_inline_data(struct inode *inode, struct page *npage) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); @@ -252,7 +245,7 @@ process_inline: if (f2fs_has_inline_data(inode)) { ipage = get_node_page(sbi, inode->i_ino); f2fs_bug_on(sbi, IS_ERR(ipage)); - truncate_inline_data(ipage, 0); + truncate_inline_data(ipage); f2fs_clear_inline_inode(inode); update_inode(inode, ipage); f2fs_put_page(ipage, 1); @@ -370,7 +363,7 @@ static int f2fs_convert_inline_dir(struct inode *dir, struct page *ipage, set_page_dirty(page); /* clear inline dir and flag after data writeback */ - truncate_inline_data(ipage, 0); + truncate_inline_data(ipage); stat_dec_inline_dir(dir); clear_inode_flag(F2FS_I(dir), FI_INLINE_DENTRY); -- cgit v1.2.3 From 88dd8934194f6d1db7f824c03d1eee169cb891b0 Mon Sep 17 00:00:00 2001 From: Chao Yu Date: Mon, 26 Jan 2015 20:24:21 +0800 Subject: f2fs: clean up {in,de}create_sleep_time Use pointer parameter @wait to pass result in {in,de}create_sleep_time for cleanup. Signed-off-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/gc.c | 8 ++++---- fs/f2fs/gc.h | 28 ++++++++++++++-------------- 2 files changed, 18 insertions(+), 18 deletions(-) (limited to 'fs') diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c index 67860b6712f3..ba89e27f394f 100644 --- a/fs/f2fs/gc.c +++ b/fs/f2fs/gc.c @@ -44,7 +44,7 @@ static int gc_thread_func(void *data) break; if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) { - wait_ms = increase_sleep_time(gc_th, wait_ms); + increase_sleep_time(gc_th, &wait_ms); continue; } @@ -65,15 +65,15 @@ static int gc_thread_func(void *data) continue; if (!is_idle(sbi)) { - wait_ms = increase_sleep_time(gc_th, wait_ms); + increase_sleep_time(gc_th, &wait_ms); mutex_unlock(&sbi->gc_mutex); continue; } if (has_enough_invalid_blocks(sbi)) - wait_ms = decrease_sleep_time(gc_th, wait_ms); + decrease_sleep_time(gc_th, &wait_ms); else - wait_ms = increase_sleep_time(gc_th, wait_ms); + increase_sleep_time(gc_th, &wait_ms); stat_inc_bggc_count(sbi); diff --git a/fs/f2fs/gc.h b/fs/f2fs/gc.h index 524543a6a34a..d5ff97c5e394 100644 --- a/fs/f2fs/gc.h +++ b/fs/f2fs/gc.h @@ -66,26 +66,26 @@ static inline block_t limit_free_user_blocks(struct f2fs_sb_info *sbi) return (long)(reclaimable_user_blocks * LIMIT_FREE_BLOCK) / 100; } -static inline long increase_sleep_time(struct f2fs_gc_kthread *gc_th, long wait) +static inline void increase_sleep_time(struct f2fs_gc_kthread *gc_th, + long *wait) { - if (wait == gc_th->no_gc_sleep_time) - return wait; + if (*wait == gc_th->no_gc_sleep_time) + return; - wait += gc_th->min_sleep_time; - if (wait > gc_th->max_sleep_time) - wait = gc_th->max_sleep_time; - return wait; + *wait += gc_th->min_sleep_time; + if (*wait > gc_th->max_sleep_time) + *wait = gc_th->max_sleep_time; } -static inline long decrease_sleep_time(struct f2fs_gc_kthread *gc_th, long wait) +static inline void decrease_sleep_time(struct f2fs_gc_kthread *gc_th, + long *wait) { - if (wait == gc_th->no_gc_sleep_time) - wait = gc_th->max_sleep_time; + if (*wait == gc_th->no_gc_sleep_time) + *wait = gc_th->max_sleep_time; - wait -= gc_th->min_sleep_time; - if (wait <= gc_th->min_sleep_time) - wait = gc_th->min_sleep_time; - return wait; + *wait -= gc_th->min_sleep_time; + if (*wait <= gc_th->min_sleep_time) + *wait = gc_th->min_sleep_time; } static inline bool has_enough_invalid_blocks(struct f2fs_sb_info *sbi) -- cgit v1.2.3 From caf0047e7e1e60a7ad1d655d3b81b32e2dfb6095 Mon Sep 17 00:00:00 2001 From: Chao Yu Date: Wed, 28 Jan 2015 17:48:42 +0800 Subject: f2fs: merge flags in struct f2fs_sb_info Currently, there are several variables with Boolean type as below: struct f2fs_sb_info { ... int s_dirty; bool need_fsck; bool s_closing; ... bool por_doing; ... } For this there are some issues: 1. there are some space of f2fs_sb_info is wasted due to aligning after Boolean type variables by compiler. 2. if we continuously add new flag into f2fs_sb_info, structure will be messed up. So in this patch, we try to: 1. switch s_dirty to Boolean type variable since it has two status 0/1. 2. merge s_dirty/need_fsck/s_closing/por_doing variables into s_flag. 3. introduce an enum type which can indicate different states of sbi. 4. use new introduced universal interfaces is_sbi_flag_set/{set,clear}_sbi_flag to operate flags for sbi. After that, above issues will be fixed. Signed-off-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/checkpoint.c | 12 ++++++------ fs/f2fs/data.c | 2 +- fs/f2fs/f2fs.h | 30 ++++++++++++++++++++---------- fs/f2fs/node.c | 4 ++-- fs/f2fs/recovery.c | 4 ++-- fs/f2fs/segment.h | 10 +++++----- fs/f2fs/super.c | 15 ++++++++------- include/trace/events/f2fs.h | 4 ++-- 8 files changed, 46 insertions(+), 35 deletions(-) (limited to 'fs') diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c index 79f82819086f..19021414d195 100644 --- a/fs/f2fs/checkpoint.c +++ b/fs/f2fs/checkpoint.c @@ -190,7 +190,7 @@ static int f2fs_write_meta_page(struct page *page, trace_f2fs_writepage(page, META); - if (unlikely(sbi->por_doing)) + if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) goto redirty_out; if (wbc->for_reclaim && page->index < GET_SUM_BLOCK(sbi, 0)) goto redirty_out; @@ -485,7 +485,7 @@ void recover_orphan_inodes(struct f2fs_sb_info *sbi) if (!is_set_ckpt_flags(F2FS_CKPT(sbi), CP_ORPHAN_PRESENT_FLAG)) return; - sbi->por_doing = true; + set_sbi_flag(sbi, SBI_POR_DOING); start_blk = __start_cp_addr(sbi) + 1 + le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_payload); @@ -506,7 +506,7 @@ void recover_orphan_inodes(struct f2fs_sb_info *sbi) } /* clear Orphan Flag */ clear_ckpt_flags(F2FS_CKPT(sbi), CP_ORPHAN_PRESENT_FLAG); - sbi->por_doing = false; + clear_sbi_flag(sbi, SBI_POR_DOING); return; } @@ -973,7 +973,7 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) else clear_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG); - if (sbi->need_fsck) + if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) set_ckpt_flags(ckpt, CP_FSCK_FLAG); /* update SIT/NAT bitmap */ @@ -1047,7 +1047,7 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) return; clear_prefree_segments(sbi); - F2FS_RESET_SB_DIRT(sbi); + clear_sbi_flag(sbi, SBI_IS_DIRTY); } /* @@ -1062,7 +1062,7 @@ void write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) mutex_lock(&sbi->cp_mutex); - if (!sbi->s_dirty && + if (!is_sbi_flag_set(sbi, SBI_IS_DIRTY) && cpc->reason != CP_DISCARD && cpc->reason != CP_UMOUNT) goto out; if (unlikely(f2fs_cp_error(sbi))) diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index a7b905cb05f8..6d9e6e4ce439 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -818,7 +818,7 @@ static int f2fs_write_data_page(struct page *page, zero_user_segment(page, offset, PAGE_CACHE_SIZE); write: - if (unlikely(sbi->por_doing)) + if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) goto redirty_out; if (f2fs_is_drop_cache(inode)) goto out; diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index 9e1fcc1f64ee..5abe0836c179 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -28,7 +28,7 @@ do { \ if (unlikely(condition)) { \ WARN_ON(1); \ - sbi->need_fsck = true; \ + set_sbi_flag(sbi, SBI_NEED_FSCK); \ } \ } while (0) #define f2fs_down_write(x, y) down_write(x) @@ -519,14 +519,20 @@ struct inode_management { unsigned long ino_num; /* number of entries */ }; +/* For s_flag in struct f2fs_sb_info */ +enum { + SBI_IS_DIRTY, /* dirty flag for checkpoint */ + SBI_IS_CLOSE, /* specify unmounting */ + SBI_NEED_FSCK, /* need fsck.f2fs to fix */ + SBI_POR_DOING, /* recovery is doing or not */ +}; + struct f2fs_sb_info { struct super_block *sb; /* pointer to VFS super block */ struct proc_dir_entry *s_proc; /* proc entry */ struct buffer_head *raw_super_buf; /* buffer head of raw sb */ struct f2fs_super_block *raw_super; /* raw super block pointer */ - int s_dirty; /* dirty flag for checkpoint */ - bool need_fsck; /* need fsck.f2fs to fix */ - bool s_closing; /* specify unmounting */ + int s_flag; /* flags for sbi */ /* for node-related operations */ struct f2fs_nm_info *nm_info; /* node manager */ @@ -546,7 +552,6 @@ struct f2fs_sb_info { struct rw_semaphore cp_rwsem; /* blocking FS operations */ struct rw_semaphore node_write; /* locking node writes */ struct mutex writepages; /* mutex for writepages() */ - bool por_doing; /* recovery is doing or not */ wait_queue_head_t cp_wait; struct inode_management im[MAX_INO_ENTRY]; /* manage inode cache */ @@ -699,14 +704,19 @@ static inline struct address_space *NODE_MAPPING(struct f2fs_sb_info *sbi) return sbi->node_inode->i_mapping; } -static inline void F2FS_SET_SB_DIRT(struct f2fs_sb_info *sbi) +static inline bool is_sbi_flag_set(struct f2fs_sb_info *sbi, unsigned int type) +{ + return sbi->s_flag & (0x01 << type); +} + +static inline void set_sbi_flag(struct f2fs_sb_info *sbi, unsigned int type) { - sbi->s_dirty = 1; + sbi->s_flag |= (0x01 << type); } -static inline void F2FS_RESET_SB_DIRT(struct f2fs_sb_info *sbi) +static inline void clear_sbi_flag(struct f2fs_sb_info *sbi, unsigned int type) { - sbi->s_dirty = 0; + sbi->s_flag &= ~(0x01 << type); } static inline unsigned long long cur_cp_version(struct f2fs_checkpoint *cp) @@ -818,7 +828,7 @@ static inline void dec_valid_block_count(struct f2fs_sb_info *sbi, static inline void inc_page_count(struct f2fs_sb_info *sbi, int count_type) { atomic_inc(&sbi->nr_pages[count_type]); - F2FS_SET_SB_DIRT(sbi); + set_sbi_flag(sbi, SBI_IS_DIRTY); } static inline void inode_inc_dirty_pages(struct inode *inode) diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c index d7c143626705..05e6faa71cff 100644 --- a/fs/f2fs/node.c +++ b/fs/f2fs/node.c @@ -588,7 +588,7 @@ static void truncate_node(struct dnode_of_data *dn) } invalidate: clear_node_page_dirty(dn->node_page); - F2FS_SET_SB_DIRT(sbi); + set_sbi_flag(sbi, SBI_IS_DIRTY); f2fs_put_page(dn->node_page, 1); @@ -1284,7 +1284,7 @@ static int f2fs_write_node_page(struct page *page, trace_f2fs_writepage(page, NODE); - if (unlikely(sbi->por_doing)) + if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) goto redirty_out; if (unlikely(f2fs_cp_error(sbi))) goto redirty_out; diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c index 57603a7127f7..41afb9534bbd 100644 --- a/fs/f2fs/recovery.c +++ b/fs/f2fs/recovery.c @@ -508,7 +508,7 @@ int recover_fsync_data(struct f2fs_sb_info *sbi) INIT_LIST_HEAD(&inode_list); /* step #1: find fsynced inode numbers */ - sbi->por_doing = true; + set_sbi_flag(sbi, SBI_POR_DOING); /* prevent checkpoint */ mutex_lock(&sbi->cp_mutex); @@ -541,7 +541,7 @@ out: truncate_inode_pages_final(META_MAPPING(sbi)); } - sbi->por_doing = false; + clear_sbi_flag(sbi, SBI_POR_DOING); if (err) { discard_next_dnode(sbi, blkaddr); diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h index 7f327c0ba4e3..421d5794b0c8 100644 --- a/fs/f2fs/segment.h +++ b/fs/f2fs/segment.h @@ -460,7 +460,7 @@ static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi, int freed) int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES); int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS); - if (unlikely(sbi->por_doing)) + if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) return false; return (free_sections(sbi) + freed) <= (node_secs + 2 * dent_secs + @@ -599,13 +599,13 @@ static inline void check_block_count(struct f2fs_sb_info *sbi, static inline void check_seg_range(struct f2fs_sb_info *sbi, unsigned int segno) { if (segno > TOTAL_SEGS(sbi) - 1) - sbi->need_fsck = true; + set_sbi_flag(sbi, SBI_NEED_FSCK); } static inline void verify_block_addr(struct f2fs_sb_info *sbi, block_t blk_addr) { if (blk_addr < SEG0_BLKADDR(sbi) || blk_addr >= MAX_BLKADDR(sbi)) - sbi->need_fsck = true; + set_sbi_flag(sbi, SBI_NEED_FSCK); } /* @@ -616,11 +616,11 @@ static inline void check_block_count(struct f2fs_sb_info *sbi, { /* check segment usage */ if (GET_SIT_VBLOCKS(raw_sit) > sbi->blocks_per_seg) - sbi->need_fsck = true; + set_sbi_flag(sbi, SBI_NEED_FSCK); /* check boundary of a given segment number */ if (segno > TOTAL_SEGS(sbi) - 1) - sbi->need_fsck = true; + set_sbi_flag(sbi, SBI_NEED_FSCK); } #endif diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index 0d627f2d1828..c3aa72f9c8c8 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -452,7 +452,7 @@ static void f2fs_put_super(struct super_block *sb) * But, the previous checkpoint was not done by umount, it needs to do * clean checkpoint again. */ - if (sbi->s_dirty || + if (is_sbi_flag_set(sbi, SBI_IS_DIRTY) || !is_set_ckpt_flags(F2FS_CKPT(sbi), CP_UMOUNT_FLAG)) { struct cp_control cpc = { .reason = CP_UMOUNT, @@ -492,8 +492,9 @@ int f2fs_sync_fs(struct super_block *sb, int sync) if (sync) { struct cp_control cpc; - cpc.reason = (test_opt(sbi, FASTBOOT) || sbi->s_closing) ? - CP_UMOUNT : CP_SYNC; + cpc.reason = (test_opt(sbi, FASTBOOT) || + is_sbi_flag_set(sbi, SBI_IS_CLOSE)) ? + CP_UMOUNT : CP_SYNC; mutex_lock(&sbi->gc_mutex); write_checkpoint(sbi, &cpc); mutex_unlock(&sbi->gc_mutex); @@ -895,7 +896,7 @@ static void init_sb_info(struct f2fs_sb_info *sbi) atomic_set(&sbi->nr_pages[i], 0); sbi->dir_level = DEF_DIR_LEVEL; - sbi->need_fsck = false; + clear_sbi_flag(sbi, SBI_NEED_FSCK); } /* @@ -1006,7 +1007,7 @@ try_onemore: mutex_init(&sbi->writepages); mutex_init(&sbi->cp_mutex); init_rwsem(&sbi->node_write); - sbi->por_doing = false; + clear_sbi_flag(sbi, SBI_POR_DOING); spin_lock_init(&sbi->stat_lock); init_rwsem(&sbi->read_io.io_rwsem); @@ -1130,7 +1131,7 @@ try_onemore: goto free_proc; if (!retry) - sbi->need_fsck = true; + set_sbi_flag(sbi, SBI_NEED_FSCK); /* recover fsynced data */ if (!test_opt(sbi, DISABLE_ROLL_FORWARD)) { @@ -1199,7 +1200,7 @@ static struct dentry *f2fs_mount(struct file_system_type *fs_type, int flags, static void kill_f2fs_super(struct super_block *sb) { if (sb->s_root) - F2FS_SB(sb)->s_closing = true; + set_sbi_flag(F2FS_SB(sb), SBI_IS_CLOSE); kill_block_super(sb); } diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h index 13992f3c1445..5e1c0292250c 100644 --- a/include/trace/events/f2fs.h +++ b/include/trace/events/f2fs.h @@ -184,13 +184,13 @@ TRACE_EVENT(f2fs_sync_fs, TP_STRUCT__entry( __field(dev_t, dev) - __field(int, dirty) + __field(bool, dirty) __field(int, wait) ), TP_fast_assign( __entry->dev = sb->s_dev; - __entry->dirty = F2FS_SB(sb)->s_dirty; + __entry->dirty = is_sbi_flag_set(F2FS_SB(sb), SBI_IS_DIRTY); __entry->wait = wait; ), -- cgit v1.2.3 From dabc4a5c60f796a8e7171272284ba077f9c1e439 Mon Sep 17 00:00:00 2001 From: Jaegeuk Kim Date: Fri, 23 Jan 2015 17:41:39 -0800 Subject: f2fs: fix not to drop mount options when retrying fill_super If wrong mount option was requested, f2fs tries to fill_super again. But, during the next trial, f2fs has no valid mount options, since parse_options deleted all the separators in the original string. Reviewed-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/super.c | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index c3aa72f9c8c8..146e310fbf04 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -951,6 +951,7 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent) struct inode *root; long err = -EINVAL; bool retry = true; + char *options = NULL; int i; try_onemore: @@ -982,9 +983,15 @@ try_onemore: set_opt(sbi, POSIX_ACL); #endif /* parse mount options */ - err = parse_options(sb, (char *)data); - if (err) + options = kstrdup((const char *)data, GFP_KERNEL); + if (data && !options) { + err = -ENOMEM; goto free_sb_buf; + } + + err = parse_options(sb, options); + if (err) + goto free_options; sb->s_maxbytes = max_file_size(le32_to_cpu(raw_super->log_blocksize)); sb->s_max_links = F2FS_LINK_MAX; @@ -1028,7 +1035,7 @@ try_onemore: if (IS_ERR(sbi->meta_inode)) { f2fs_msg(sb, KERN_ERR, "Failed to read F2FS meta data inode"); err = PTR_ERR(sbi->meta_inode); - goto free_sb_buf; + goto free_options; } err = get_valid_checkpoint(sbi); @@ -1153,6 +1160,7 @@ try_onemore: if (err) goto free_kobj; } + kfree(options); return 0; free_kobj: @@ -1177,6 +1185,8 @@ free_cp: free_meta_inode: make_bad_inode(sbi->meta_inode); iput(sbi->meta_inode); +free_options: + kfree(options); free_sb_buf: brelse(raw_super_buf); free_sbi: -- cgit v1.2.3 From 2d834bf9ac8e40d9ae2a2dcde2348bd028f87ec4 Mon Sep 17 00:00:00 2001 From: Jaegeuk Kim Date: Fri, 23 Jan 2015 18:33:46 -0800 Subject: f2fs: support norecovery mount option This patch adds a mount option, norecovery, which is mostly same as disable_roll_forward. The only difference is that norecovery should be activated with read-only mount option. This can be used when user wants to check whether f2fs is mountable or not without any recovery process. (e.g., xfstests/200) Reviewed-by: Chao Yu Signed-off-by: Jaegeuk Kim --- Documentation/filesystems/f2fs.txt | 2 ++ fs/f2fs/super.c | 8 ++++++++ 2 files changed, 10 insertions(+) (limited to 'fs') diff --git a/Documentation/filesystems/f2fs.txt b/Documentation/filesystems/f2fs.txt index e0950c483c22..6758aa358475 100644 --- a/Documentation/filesystems/f2fs.txt +++ b/Documentation/filesystems/f2fs.txt @@ -106,6 +106,8 @@ background_gc=%s Turn on/off cleaning operations, namely garbage Default value for this option is on. So garbage collection is on by default. disable_roll_forward Disable the roll-forward recovery routine +norecovery Disable the roll-forward recovery routine, mounted read- + only (i.e., -o ro,disable_roll_forward) discard Issue discard/TRIM commands when a segment is cleaned. no_heap Disable heap-style segment allocation which finds free segments for data from the beginning of main area, while diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index 146e310fbf04..06d903b5d6c8 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -42,6 +42,7 @@ static struct kset *f2fs_kset; enum { Opt_gc_background, Opt_disable_roll_forward, + Opt_norecovery, Opt_discard, Opt_noheap, Opt_user_xattr, @@ -62,6 +63,7 @@ enum { static match_table_t f2fs_tokens = { {Opt_gc_background, "background_gc=%s"}, {Opt_disable_roll_forward, "disable_roll_forward"}, + {Opt_norecovery, "norecovery"}, {Opt_discard, "discard"}, {Opt_noheap, "no_heap"}, {Opt_user_xattr, "user_xattr"}, @@ -287,6 +289,12 @@ static int parse_options(struct super_block *sb, char *options) case Opt_disable_roll_forward: set_opt(sbi, DISABLE_ROLL_FORWARD); break; + case Opt_norecovery: + /* this option mounts f2fs with ro */ + set_opt(sbi, DISABLE_ROLL_FORWARD); + if (!f2fs_readonly(sb)) + return -EINVAL; + break; case Opt_discard: set_opt(sbi, DISCARD); break; -- cgit v1.2.3 From 11504a8e7e20e2ff2e68176850e9b83b8bae760e Mon Sep 17 00:00:00 2001 From: Jaegeuk Kim Date: Fri, 23 Jan 2015 18:43:45 -0800 Subject: f2fs: avoid write_checkpoint if f2fs is mounted readonly Do not change any partition when f2fs is changed to readonly mode. Reviewed-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/checkpoint.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'fs') diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c index 19021414d195..22165fb1d0d1 100644 --- a/fs/f2fs/checkpoint.c +++ b/fs/f2fs/checkpoint.c @@ -1067,6 +1067,8 @@ void write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) goto out; if (unlikely(f2fs_cp_error(sbi))) goto out; + if (f2fs_readonly(sbi->sb)) + goto out; if (block_operations(sbi)) goto out; -- cgit v1.2.3 From 119ee9144534141822462e3e8a5ccc8dc537f712 Mon Sep 17 00:00:00 2001 From: Jaegeuk Kim Date: Thu, 29 Jan 2015 11:45:33 -0800 Subject: f2fs: split UMOUNT and FASTBOOT flags This patch adds FASTBOOT flag into checkpoint as follows. - CP_UMOUNT_FLAG is set when system is umounted. - CP_FASTBOOT_FLAG is set when intermediate checkpoint having node summaries was done. So, if you get CP_UMOUNT_FLAG from checkpoint, the system was umounted cleanly. Instead, if there was sudden-power-off, you can get CP_FASTBOOT_FLAG or nothing. Reviewed-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/checkpoint.c | 19 +++++++++++++------ fs/f2fs/f2fs.h | 23 +++++++++++++++++++++++ fs/f2fs/gc.c | 3 +-- fs/f2fs/segment.c | 11 +++++------ fs/f2fs/super.c | 5 ++--- include/linux/f2fs_fs.h | 1 + include/trace/events/f2fs.h | 1 + 7 files changed, 46 insertions(+), 17 deletions(-) (limited to 'fs') diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c index 22165fb1d0d1..f7cdcad31943 100644 --- a/fs/f2fs/checkpoint.c +++ b/fs/f2fs/checkpoint.c @@ -956,17 +956,24 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) ckpt->cp_pack_start_sum = cpu_to_le32(1 + cp_payload_blks + orphan_blocks); - if (cpc->reason == CP_UMOUNT) { - set_ckpt_flags(ckpt, CP_UMOUNT_FLAG); + if (__remain_node_summaries(cpc->reason)) ckpt->cp_pack_total_block_count = cpu_to_le32(F2FS_CP_PACKS+ cp_payload_blks + data_sum_blocks + orphan_blocks + NR_CURSEG_NODE_TYPE); - } else { - clear_ckpt_flags(ckpt, CP_UMOUNT_FLAG); + else ckpt->cp_pack_total_block_count = cpu_to_le32(F2FS_CP_PACKS + cp_payload_blks + data_sum_blocks + orphan_blocks); - } + + if (cpc->reason == CP_UMOUNT) + set_ckpt_flags(ckpt, CP_UMOUNT_FLAG); + else + clear_ckpt_flags(ckpt, CP_UMOUNT_FLAG); + + if (cpc->reason == CP_FASTBOOT) + set_ckpt_flags(ckpt, CP_FASTBOOT_FLAG); + else + clear_ckpt_flags(ckpt, CP_FASTBOOT_FLAG); if (orphan_num) set_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG); @@ -1010,7 +1017,7 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) write_data_summaries(sbi, start_blk); start_blk += data_sum_blocks; - if (cpc->reason == CP_UMOUNT) { + if (__remain_node_summaries(cpc->reason)) { write_node_summaries(sbi, start_blk); start_blk += NR_CURSEG_NODE_TYPE; } diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index 5abe0836c179..8231a599a305 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -100,6 +100,7 @@ enum { enum { CP_UMOUNT, + CP_FASTBOOT, CP_SYNC, CP_DISCARD, }; @@ -764,6 +765,28 @@ static inline void f2fs_unlock_all(struct f2fs_sb_info *sbi) up_write(&sbi->cp_rwsem); } +static inline int __get_cp_reason(struct f2fs_sb_info *sbi) +{ + int reason = CP_SYNC; + + if (test_opt(sbi, FASTBOOT)) + reason = CP_FASTBOOT; + if (is_sbi_flag_set(sbi, SBI_IS_CLOSE)) + reason = CP_UMOUNT; + return reason; +} + +static inline bool __remain_node_summaries(int reason) +{ + return (reason == CP_UMOUNT || reason == CP_FASTBOOT); +} + +static inline bool __exist_node_summaries(struct f2fs_sb_info *sbi) +{ + return (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_UMOUNT_FLAG) || + is_set_ckpt_flags(F2FS_CKPT(sbi), CP_FASTBOOT_FLAG)); +} + /* * Check whether the given nid is within node id range. */ diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c index ba89e27f394f..76adbc3641f1 100644 --- a/fs/f2fs/gc.c +++ b/fs/f2fs/gc.c @@ -698,8 +698,7 @@ int f2fs_gc(struct f2fs_sb_info *sbi) .iroot = RADIX_TREE_INIT(GFP_NOFS), }; - cpc.reason = test_opt(sbi, FASTBOOT) ? CP_UMOUNT : CP_SYNC; - + cpc.reason = __get_cp_reason(sbi); gc_more: if (unlikely(!(sbi->sb->s_flags & MS_ACTIVE))) goto stop; diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index 31c4e5702c7d..5ea57ec153d1 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -1401,7 +1401,7 @@ static int read_normal_summaries(struct f2fs_sb_info *sbi, int type) segno = le32_to_cpu(ckpt->cur_data_segno[type]); blk_off = le16_to_cpu(ckpt->cur_data_blkoff[type - CURSEG_HOT_DATA]); - if (is_set_ckpt_flags(ckpt, CP_UMOUNT_FLAG)) + if (__exist_node_summaries(sbi)) blk_addr = sum_blk_addr(sbi, NR_CURSEG_TYPE, type); else blk_addr = sum_blk_addr(sbi, NR_CURSEG_DATA_TYPE, type); @@ -1410,7 +1410,7 @@ static int read_normal_summaries(struct f2fs_sb_info *sbi, int type) CURSEG_HOT_NODE]); blk_off = le16_to_cpu(ckpt->cur_node_blkoff[type - CURSEG_HOT_NODE]); - if (is_set_ckpt_flags(ckpt, CP_UMOUNT_FLAG)) + if (__exist_node_summaries(sbi)) blk_addr = sum_blk_addr(sbi, NR_CURSEG_NODE_TYPE, type - CURSEG_HOT_NODE); else @@ -1421,7 +1421,7 @@ static int read_normal_summaries(struct f2fs_sb_info *sbi, int type) sum = (struct f2fs_summary_block *)page_address(new); if (IS_NODESEG(type)) { - if (is_set_ckpt_flags(ckpt, CP_UMOUNT_FLAG)) { + if (__exist_node_summaries(sbi)) { struct f2fs_summary *ns = &sum->entries[0]; int i; for (i = 0; i < sbi->blocks_per_seg; i++, ns++) { @@ -1470,7 +1470,7 @@ static int restore_curseg_summaries(struct f2fs_sb_info *sbi) type = CURSEG_HOT_NODE; } - if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_UMOUNT_FLAG)) + if (__exist_node_summaries(sbi)) ra_meta_pages(sbi, sum_blk_addr(sbi, NR_CURSEG_TYPE, type), NR_CURSEG_TYPE - type, META_CP); @@ -1567,8 +1567,7 @@ void write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk) void write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk) { - if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_UMOUNT_FLAG)) - write_normal_summaries(sbi, start_blk, CURSEG_HOT_NODE); + write_normal_summaries(sbi, start_blk, CURSEG_HOT_NODE); } int lookup_journal_in_cursum(struct f2fs_summary_block *sum, int type, diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index 06d903b5d6c8..bfeab3c81a48 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -500,9 +500,8 @@ int f2fs_sync_fs(struct super_block *sb, int sync) if (sync) { struct cp_control cpc; - cpc.reason = (test_opt(sbi, FASTBOOT) || - is_sbi_flag_set(sbi, SBI_IS_CLOSE)) ? - CP_UMOUNT : CP_SYNC; + cpc.reason = __get_cp_reason(sbi); + mutex_lock(&sbi->gc_mutex); write_checkpoint(sbi, &cpc); mutex_unlock(&sbi->gc_mutex); diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h index e993b0bc9abf..09805720f4bf 100644 --- a/include/linux/f2fs_fs.h +++ b/include/linux/f2fs_fs.h @@ -87,6 +87,7 @@ struct f2fs_super_block { /* * For checkpoint */ +#define CP_FASTBOOT_FLAG 0x00000020 #define CP_FSCK_FLAG 0x00000010 #define CP_ERROR_FLAG 0x00000008 #define CP_COMPACT_SUM_FLAG 0x00000004 diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h index 5e1c0292250c..69629826c2ba 100644 --- a/include/trace/events/f2fs.h +++ b/include/trace/events/f2fs.h @@ -72,6 +72,7 @@ #define show_cpreason(type) \ __print_symbolic(type, \ { CP_UMOUNT, "Umount" }, \ + { CP_FASTBOOT, "Fastboot" }, \ { CP_SYNC, "Sync" }, \ { CP_DISCARD, "Discard" }) -- cgit v1.2.3 From 081d78c2fc4ab2fef4cdf1100dd22155c73f8657 Mon Sep 17 00:00:00 2001 From: Jaegeuk Kim Date: Fri, 23 Jan 2015 19:16:59 -0800 Subject: f2fs: should fail mount when trying to recover data on read-only dev If device is read-only, we should not proceed data recovery. But, if the previous checkpoint was done by normal clean shutdown, it's safe to proceed the recovery, since there will be no data to be recovered. Reviewed-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/super.c | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'fs') diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index bfeab3c81a48..1e92c2ea6bc1 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -1149,6 +1149,15 @@ try_onemore: /* recover fsynced data */ if (!test_opt(sbi, DISABLE_ROLL_FORWARD)) { + /* + * mount should be failed, when device has readonly mode, and + * previous checkpoint was not done by clean system shutdown. + */ + if (bdev_read_only(sb->s_bdev) && + !is_set_ckpt_flags(sbi->ckpt, CP_UMOUNT_FLAG)) { + err = -EROFS; + goto free_kobj; + } err = recover_fsync_data(sbi); if (err) { f2fs_msg(sb, KERN_ERR, -- cgit v1.2.3 From f68daeebba5a697f31f64c07b8693fa678981819 Mon Sep 17 00:00:00 2001 From: Jaegeuk Kim Date: Fri, 30 Jan 2015 11:39:08 -0800 Subject: f2fs: keep PagePrivate during releasepage If PagePrivate is removed by releasepage, f2fs loses counting dirty pages. e.g., try_to_release_page will not release page when the page is dirty, but our releasepage removes PagePrivate. [] try_to_release_page+0x35/0x50 [] invalidate_inode_pages2_range+0x2f9/0x3b0 [] ? truncate_blocks+0x384/0x4d0 [f2fs] [] ? f2fs_direct_IO+0x283/0x290 [f2fs] [] ? get_data_block_fiemap+0x20/0x20 [f2fs] [] generic_file_direct_write+0x163/0x170 [] __generic_file_write_iter+0x2a6/0x350 [] generic_file_write_iter+0x3f/0xb0 [] new_sync_write+0x81/0xb0 [] vfs_write+0xb7/0x1f0 [] SyS_write+0x49/0xb0 [] system_call_fastpath+0x16/0x1b Reviewed-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/checkpoint.c | 4 ++++ fs/f2fs/data.c | 4 ++++ fs/f2fs/node.c | 4 ++++ 3 files changed, 12 insertions(+) (limited to 'fs') diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c index f7cdcad31943..470fa58606b2 100644 --- a/fs/f2fs/checkpoint.c +++ b/fs/f2fs/checkpoint.c @@ -321,6 +321,10 @@ static void f2fs_invalidate_meta_page(struct page *page, unsigned int offset, static int f2fs_release_meta_page(struct page *page, gfp_t wait) { + /* If this is dirty page, keep PagePrivate */ + if (PageDirty(page)) + return 0; + ClearPagePrivate(page); return 1; } diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index 6d9e6e4ce439..27dc3fd9fb60 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -1130,6 +1130,10 @@ static void f2fs_invalidate_data_page(struct page *page, unsigned int offset, static int f2fs_release_data_page(struct page *page, gfp_t wait) { + /* If this is dirty page, keep PagePrivate */ + if (PageDirty(page)) + return 0; + ClearPagePrivate(page); return 1; } diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c index 05e6faa71cff..9feda386ed80 100644 --- a/fs/f2fs/node.c +++ b/fs/f2fs/node.c @@ -1381,6 +1381,10 @@ static void f2fs_invalidate_node_page(struct page *page, unsigned int offset, static int f2fs_release_node_page(struct page *page, gfp_t wait) { + /* If this is dirty page, keep PagePrivate */ + if (PageDirty(page)) + return 0; + ClearPagePrivate(page); return 1; } -- cgit v1.2.3 From d24bdcbfc694026b5cc283cddf47e38f5a7b685d Mon Sep 17 00:00:00 2001 From: Jaegeuk Kim Date: Fri, 30 Jan 2015 16:43:11 -0800 Subject: f2fs: show the number of writeback pages in stat This patch adds the # of writeback pages in stat info. Reviewed-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/debug.c | 5 +++-- fs/f2fs/f2fs.h | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c index 0f721f6a1147..ac2bd8e7eca9 100644 --- a/fs/f2fs/debug.c +++ b/fs/f2fs/debug.c @@ -40,6 +40,7 @@ static void update_general_status(struct f2fs_sb_info *sbi) si->ndirty_dirs = sbi->n_dirty_dirs; si->ndirty_meta = get_pages(sbi, F2FS_DIRTY_META); si->inmem_pages = get_pages(sbi, F2FS_INMEM_PAGES); + si->wb_pages = get_pages(sbi, F2FS_WRITEBACK); si->total_count = (int)sbi->user_block_count / sbi->blocks_per_seg; si->rsvd_segs = reserved_segments(sbi); si->overp_segs = overprovision_segments(sbi); @@ -266,8 +267,8 @@ static int stat_show(struct seq_file *s, void *v) seq_printf(s, "\nExtent Hit Ratio: %d / %d\n", si->hit_ext, si->total_ext); seq_puts(s, "\nBalancing F2FS Async:\n"); - seq_printf(s, " - inmem: %4d\n", - si->inmem_pages); + seq_printf(s, " - inmem: %4d, wb: %4d\n", + si->inmem_pages, si->wb_pages); seq_printf(s, " - nodes: %4d in %4d\n", si->ndirty_node, si->node_pages); seq_printf(s, " - dents: %4d in dirs:%4d\n", diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index 8231a599a305..964c240c287f 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -1549,7 +1549,7 @@ struct f2fs_stat_info { int ndirty_node, ndirty_dent, ndirty_dirs, ndirty_meta; int nats, dirty_nats, sits, dirty_sits, fnids; int total_count, utilization; - int bg_gc, inline_inode, inline_dir, inmem_pages; + int bg_gc, inline_inode, inline_dir, inmem_pages, wb_pages; unsigned int valid_count, valid_node_count, valid_inode_count; unsigned int bimodal, avg_vblocks; int util_free, util_valid, util_invalid; -- cgit v1.2.3 From 487261f39bcd8983f55c611e299f70f34659674b Mon Sep 17 00:00:00 2001 From: Chao Yu Date: Thu, 5 Feb 2015 17:44:29 +0800 Subject: f2fs: merge {invalidate,release}page for meta/node/data pages This patch merges ->{invalidate,release}page function for meta/node/data pages. After this, duplication of codes could be removed. Signed-off-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/checkpoint.c | 24 ++---------------------- fs/f2fs/data.c | 24 ++++++++++++++++-------- fs/f2fs/f2fs.h | 2 ++ fs/f2fs/node.c | 23 ++--------------------- 4 files changed, 22 insertions(+), 51 deletions(-) (limited to 'fs') diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c index 470fa58606b2..31a715b5fd5c 100644 --- a/fs/f2fs/checkpoint.c +++ b/fs/f2fs/checkpoint.c @@ -309,32 +309,12 @@ static int f2fs_set_meta_page_dirty(struct page *page) return 0; } -static void f2fs_invalidate_meta_page(struct page *page, unsigned int offset, - unsigned int length) -{ - struct inode *inode = page->mapping->host; - - if (PageDirty(page)) - dec_page_count(F2FS_I_SB(inode), F2FS_DIRTY_META); - ClearPagePrivate(page); -} - -static int f2fs_release_meta_page(struct page *page, gfp_t wait) -{ - /* If this is dirty page, keep PagePrivate */ - if (PageDirty(page)) - return 0; - - ClearPagePrivate(page); - return 1; -} - const struct address_space_operations f2fs_meta_aops = { .writepage = f2fs_write_meta_page, .writepages = f2fs_write_meta_pages, .set_page_dirty = f2fs_set_meta_page_dirty, - .invalidatepage = f2fs_invalidate_meta_page, - .releasepage = f2fs_release_meta_page, + .invalidatepage = f2fs_invalidate_page, + .releasepage = f2fs_release_page, }; static void __add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type) diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index 27dc3fd9fb60..d166557ca867 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -1115,20 +1115,28 @@ static ssize_t f2fs_direct_IO(int rw, struct kiocb *iocb, return err; } -static void f2fs_invalidate_data_page(struct page *page, unsigned int offset, - unsigned int length) +void f2fs_invalidate_page(struct page *page, unsigned int offset, + unsigned int length) { struct inode *inode = page->mapping->host; + struct f2fs_sb_info *sbi = F2FS_I_SB(inode); - if (offset % PAGE_CACHE_SIZE || length != PAGE_CACHE_SIZE) + if (inode->i_ino >= F2FS_ROOT_INO(sbi) && + (offset % PAGE_CACHE_SIZE || length != PAGE_CACHE_SIZE)) return; - if (PageDirty(page)) - inode_dec_dirty_pages(inode); + if (PageDirty(page)) { + if (inode->i_ino == F2FS_META_INO(sbi)) + dec_page_count(sbi, F2FS_DIRTY_META); + else if (inode->i_ino == F2FS_NODE_INO(sbi)) + dec_page_count(sbi, F2FS_DIRTY_NODES); + else + inode_dec_dirty_pages(inode); + } ClearPagePrivate(page); } -static int f2fs_release_data_page(struct page *page, gfp_t wait) +int f2fs_release_page(struct page *page, gfp_t wait) { /* If this is dirty page, keep PagePrivate */ if (PageDirty(page)) @@ -1183,8 +1191,8 @@ const struct address_space_operations f2fs_dblock_aops = { .write_begin = f2fs_write_begin, .write_end = f2fs_write_end, .set_page_dirty = f2fs_set_data_page_dirty, - .invalidatepage = f2fs_invalidate_data_page, - .releasepage = f2fs_release_data_page, + .invalidatepage = f2fs_invalidate_page, + .releasepage = f2fs_release_page, .direct_IO = f2fs_direct_IO, .bmap = f2fs_bmap, }; diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index 964c240c287f..db1ff55de0bc 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -1520,6 +1520,8 @@ struct page *get_lock_data_page(struct inode *, pgoff_t); struct page *get_new_data_page(struct inode *, struct page *, pgoff_t, bool); int do_write_data_page(struct page *, struct f2fs_io_info *); int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *, u64, u64); +void f2fs_invalidate_page(struct page *, unsigned int, unsigned int); +int f2fs_release_page(struct page *, gfp_t); /* * gc.c diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c index 9feda386ed80..ea22c3210c8a 100644 --- a/fs/f2fs/node.c +++ b/fs/f2fs/node.c @@ -1370,25 +1370,6 @@ static int f2fs_set_node_page_dirty(struct page *page) return 0; } -static void f2fs_invalidate_node_page(struct page *page, unsigned int offset, - unsigned int length) -{ - struct inode *inode = page->mapping->host; - if (PageDirty(page)) - dec_page_count(F2FS_I_SB(inode), F2FS_DIRTY_NODES); - ClearPagePrivate(page); -} - -static int f2fs_release_node_page(struct page *page, gfp_t wait) -{ - /* If this is dirty page, keep PagePrivate */ - if (PageDirty(page)) - return 0; - - ClearPagePrivate(page); - return 1; -} - /* * Structure of the f2fs node operations */ @@ -1396,8 +1377,8 @@ const struct address_space_operations f2fs_node_aops = { .writepage = f2fs_write_node_page, .writepages = f2fs_write_node_pages, .set_page_dirty = f2fs_set_node_page_dirty, - .invalidatepage = f2fs_invalidate_node_page, - .releasepage = f2fs_release_node_page, + .invalidatepage = f2fs_invalidate_page, + .releasepage = f2fs_release_page, }; static struct free_nid *__lookup_free_nid_list(struct f2fs_nm_info *nm_i, -- cgit v1.2.3 From bba681cbb231920a786cd7303462fb2632af6f36 Mon Sep 17 00:00:00 2001 From: Jaegeuk Kim Date: Mon, 26 Jan 2015 17:41:23 -0800 Subject: f2fs: introduce a batched trim This patch introduces a batched trimming feature, which submits split discard commands. This is to avoid long latency due to huge trim commands. If fstrim was triggered ranging from 0 to the end of device, we should lock all the checkpoint-related mutexes, resulting in very long latency. Reviewed-by: Chao Yu Signed-off-by: Jaegeuk Kim --- Documentation/ABI/testing/sysfs-fs-f2fs | 6 ++++++ Documentation/filesystems/f2fs.txt | 4 ++++ fs/f2fs/f2fs.h | 7 +++++++ fs/f2fs/segment.c | 17 ++++++++++++----- fs/f2fs/super.c | 2 ++ 5 files changed, 31 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/Documentation/ABI/testing/sysfs-fs-f2fs b/Documentation/ABI/testing/sysfs-fs-f2fs index 6f9157f16725..2c4cc42006e8 100644 --- a/Documentation/ABI/testing/sysfs-fs-f2fs +++ b/Documentation/ABI/testing/sysfs-fs-f2fs @@ -74,3 +74,9 @@ Date: March 2014 Contact: "Jaegeuk Kim" Description: Controls the memory footprint used by f2fs. + +What: /sys/fs/f2fs//trim_sections +Date: February 2015 +Contact: "Jaegeuk Kim" +Description: + Controls the trimming rate in batch mode. diff --git a/Documentation/filesystems/f2fs.txt b/Documentation/filesystems/f2fs.txt index 6758aa358475..dac11d7fef27 100644 --- a/Documentation/filesystems/f2fs.txt +++ b/Documentation/filesystems/f2fs.txt @@ -199,6 +199,10 @@ Files in /sys/fs/f2fs/ checkpoint is triggered, and issued during the checkpoint. By default, it is disabled with 0. + trim_sections This parameter controls the number of sections + to be trimmed out in batch mode when FITRIM + conducts. 32 sections is set by default. + ipu_policy This parameter controls the policy of in-place updates in f2fs. There are five policies: 0x01: F2FS_IPU_FORCE, 0x02: F2FS_IPU_SSR, diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index db1ff55de0bc..337204dfc5cd 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -105,6 +105,10 @@ enum { CP_DISCARD, }; +#define DEF_BATCHED_TRIM_SECTIONS 32 +#define BATCHED_TRIM_SEGMENTS(sbi) \ + (SM_I(sbi)->trim_sections * (sbi)->segs_per_sec) + struct cp_control { int reason; __u64 trim_start; @@ -448,6 +452,9 @@ struct f2fs_sm_info { int nr_discards; /* # of discards in the list */ int max_discards; /* max. discards to be issued */ + /* for batched trimming */ + unsigned int trim_sections; /* # of sections to trim */ + struct list_head sit_entry_set; /* sit entry set list */ unsigned int ipu_policy; /* in-place-update policy */ diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index 5ea57ec153d1..9f278d156d88 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -1066,14 +1066,19 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range) end_segno = (end >= MAX_BLKADDR(sbi)) ? MAIN_SEGS(sbi) - 1 : GET_SEGNO(sbi, end); cpc.reason = CP_DISCARD; - cpc.trim_start = start_segno; - cpc.trim_end = end_segno; cpc.trim_minlen = range->minlen >> sbi->log_blocksize; /* do checkpoint to issue discard commands safely */ - mutex_lock(&sbi->gc_mutex); - write_checkpoint(sbi, &cpc); - mutex_unlock(&sbi->gc_mutex); + for (; start_segno <= end_segno; start_segno = cpc.trim_end + 1) { + cpc.trim_start = start_segno; + cpc.trim_end = min_t(unsigned int, rounddown(start_segno + + BATCHED_TRIM_SEGMENTS(sbi), + sbi->segs_per_sec) - 1, end_segno); + + mutex_lock(&sbi->gc_mutex); + write_checkpoint(sbi, &cpc); + mutex_unlock(&sbi->gc_mutex); + } out: range->len = cpc.trimmed << sbi->log_blocksize; return 0; @@ -2127,6 +2132,8 @@ int build_segment_manager(struct f2fs_sb_info *sbi) sm_info->nr_discards = 0; sm_info->max_discards = 0; + sm_info->trim_sections = DEF_BATCHED_TRIM_SECTIONS; + INIT_LIST_HEAD(&sm_info->sit_entry_set); if (test_opt(sbi, FLUSH_MERGE) && !f2fs_readonly(sbi->sb)) { diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index 1e92c2ea6bc1..f2fe666a6ea9 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -195,6 +195,7 @@ F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_no_gc_sleep_time, no_gc_sleep_time); F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_idle, gc_idle); F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, reclaim_segments, rec_prefree_segments); F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, max_small_discards, max_discards); +F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, batched_trim_sections, trim_sections); F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, ipu_policy, ipu_policy); F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_ipu_util, min_ipu_util); F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_fsync_blocks, min_fsync_blocks); @@ -210,6 +211,7 @@ static struct attribute *f2fs_attrs[] = { ATTR_LIST(gc_idle), ATTR_LIST(reclaim_segments), ATTR_LIST(max_small_discards), + ATTR_LIST(batched_trim_sections), ATTR_LIST(ipu_policy), ATTR_LIST(min_ipu_util), ATTR_LIST(min_fsync_blocks), -- cgit v1.2.3 From 560d4672e2f9f15aee2a9de1f1e70ef4c50f95d8 Mon Sep 17 00:00:00 2001 From: Chao Yu Date: Sat, 7 Feb 2015 17:36:15 +0800 Subject: f2fs: fix to use highmem for pages of newly created directory In commit a78186ebe516 ("f2fs: use highmem for directory pages"), we have set __GFP_HIGHMEM into dir mapping's gfp flag in f2fs_iget, so high address memory could be used for these existing dir's page. But we forgot to set flag for newly created dir, due to this reason, our newly created dir pages could not be allocated from high address memory. Fix it. Signed-off-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/namei.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c index 547a2deeb1ac..e79639a9787a 100644 --- a/fs/f2fs/namei.c +++ b/fs/f2fs/namei.c @@ -299,7 +299,7 @@ static int f2fs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) inode->i_op = &f2fs_dir_inode_operations; inode->i_fop = &f2fs_dir_operations; inode->i_mapping->a_ops = &f2fs_dblock_aops; - mapping_set_gfp_mask(inode->i_mapping, GFP_F2FS_ZERO); + mapping_set_gfp_mask(inode->i_mapping, GFP_F2FS_HIGH_ZERO); set_inode_flag(F2FS_I(inode), FI_INC_LINK); f2fs_lock_op(sbi); -- cgit v1.2.3 From 2e023174a88dd14eab30fae3a1f6c97f37eb3bb8 Mon Sep 17 00:00:00 2001 From: Chao Yu Date: Mon, 9 Feb 2015 11:23:58 +0800 Subject: f2fs: avoid data offset overflow when lseeking huge file xfstest generic/285 complains our issue in lseeking huge file. Here is the detail output of generic/285: "./check -f2fs tests/generic/285 Ran: generic/285 Failures: generic/285 Failed 1 of 1 tests 10. Test a huge file for offset overflow 10.01 SEEK_HOLE expected 65536 or 8589934592, got 65536. succ 10.02 SEEK_HOLE expected 65536 or 8589934592, got 65536. succ 10.03 SEEK_DATA expected 0 or 0, got 0. succ 10.04 SEEK_DATA expected 1 or 1, got 1. succ 10.05 SEEK_HOLE expected 8589934592 or 8589934592, got 0. FAIL 10.06 SEEK_DATA expected 8589869056 or 8589869056, got 8589869056. succ 10.07 SEEK_DATA expected 8589869057 or 8589869057, got 8589869057. succ 10.08 SEEK_DATA expected 8589869056 or 8589869056, got 4294901760. FAIL" The reason of this issue is: We will calculate current offset through left shifting page-offset with PAGE_CACHE_SHIFT bits, but our page-offset is a type of unsigned long, its size is 4 bytes in 32-bits machine. So if our page-offset is bigger than (1 << 32 / pagesize - 1), result of left shifting will overflow. Let's fix this issue by casting type of page-offset to type of current offset: loff_t. Signed-off-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/file.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index ec17d05d6553..7188a2ac64b5 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c @@ -357,7 +357,7 @@ static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence) /* find data/hole in dnode block */ for (; dn.ofs_in_node < end_offset; dn.ofs_in_node++, pgofs++, - data_ofs = pgofs << PAGE_CACHE_SHIFT) { + data_ofs = (loff_t)pgofs << PAGE_CACHE_SHIFT) { block_t blkaddr; blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node); -- cgit v1.2.3 From aaf9607516ed38825268515ef4d773289a44f429 Mon Sep 17 00:00:00 2001 From: Jaegeuk Kim Date: Fri, 6 Feb 2015 18:53:45 -0800 Subject: f2fs: check node page contents all the time In get_node_page, if the page is up-to-date, we assumed that the page was not reclaimed at all. But, sometimes it was reported that its contents was missing. So, just for sure, let's check its mapping and contents. Signed-off-by: Jaegeuk Kim --- fs/f2fs/node.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c index ea22c3210c8a..1e354ff9f7d8 100644 --- a/fs/f2fs/node.c +++ b/fs/f2fs/node.c @@ -1036,11 +1036,11 @@ repeat: err = read_node_page(page, READ_SYNC); if (err < 0) return ERR_PTR(err); - else if (err == LOCKED_PAGE) - goto got_it; + else if (err != LOCKED_PAGE) + lock_page(page); - lock_page(page); if (unlikely(!PageUptodate(page) || nid != nid_of_node(page))) { + ClearPageUptodate(page); f2fs_put_page(page, 1); return ERR_PTR(-EIO); } @@ -1048,7 +1048,6 @@ repeat: f2fs_put_page(page, 1); goto repeat; } -got_it: return page; } -- cgit v1.2.3 From da17eece035d72cb50d48529744a490784f29d2f Mon Sep 17 00:00:00 2001 From: Jaegeuk Kim Date: Mon, 9 Feb 2015 10:34:38 -0800 Subject: f2fs: call set_buffer_new for get_block This patch fixes wrong handling of buffer_new flag in get_block. If f2fs allocates new blocks and mapped buffer_head, it needs to set buffer_new for the bh_result. Signed-off-by: Jaegeuk Kim --- fs/f2fs/data.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index d166557ca867..26e247697e58 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -274,7 +274,7 @@ static int check_extent_cache(struct inode *inode, pgoff_t pgofs, unsigned int blkbits = inode->i_sb->s_blocksize_bits; size_t count; - clear_buffer_new(bh_result); + set_buffer_new(bh_result); map_bh(bh_result, inode->i_sb, start_blkaddr + pgofs - start_fofs); count = end_fofs - pgofs + 1; @@ -634,12 +634,14 @@ static int __get_data_block(struct inode *inode, sector_t iblock, goto put_out; if (dn.data_blkaddr != NULL_ADDR) { + set_buffer_new(bh_result); map_bh(bh_result, inode->i_sb, dn.data_blkaddr); } else if (create) { err = __allocate_data_block(&dn); if (err) goto put_out; allocated = true; + set_buffer_new(bh_result); map_bh(bh_result, inode->i_sb, dn.data_blkaddr); } else { goto put_out; -- cgit v1.2.3 From f7ef9b83b583640111039b30e13263b71c3a6ed5 Mon Sep 17 00:00:00 2001 From: Jaegeuk Kim Date: Mon, 9 Feb 2015 12:02:44 -0800 Subject: f2fs: introduce macros to convert bytes and blocks in f2fs This patch adds two macros for transition between byte and block offsets. Currently, f2fs only supports 4KB blocks, so use the default size for now. Signed-off-by: Jaegeuk Kim --- fs/f2fs/checkpoint.c | 7 +++---- fs/f2fs/file.c | 3 +-- fs/f2fs/segment.c | 8 ++++---- include/linux/f2fs_fs.h | 4 ++++ 4 files changed, 12 insertions(+), 10 deletions(-) (limited to 'fs') diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c index 31a715b5fd5c..58d88df0d632 100644 --- a/fs/f2fs/checkpoint.c +++ b/fs/f2fs/checkpoint.c @@ -981,15 +981,14 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) /* write out checkpoint buffer at block 0 */ cp_page = grab_meta_page(sbi, start_blk++); kaddr = page_address(cp_page); - memcpy(kaddr, ckpt, (1 << sbi->log_blocksize)); + memcpy(kaddr, ckpt, F2FS_BLKSIZE); set_page_dirty(cp_page); f2fs_put_page(cp_page, 1); for (i = 1; i < 1 + cp_payload_blks; i++) { cp_page = grab_meta_page(sbi, start_blk++); kaddr = page_address(cp_page); - memcpy(kaddr, (char *)ckpt + i * F2FS_BLKSIZE, - (1 << sbi->log_blocksize)); + memcpy(kaddr, (char *)ckpt + i * F2FS_BLKSIZE, F2FS_BLKSIZE); set_page_dirty(cp_page); f2fs_put_page(cp_page, 1); } @@ -1009,7 +1008,7 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) /* writeout checkpoint block */ cp_page = grab_meta_page(sbi, start_blk); kaddr = page_address(cp_page); - memcpy(kaddr, ckpt, (1 << sbi->log_blocksize)); + memcpy(kaddr, ckpt, F2FS_BLKSIZE); set_page_dirty(cp_page); f2fs_put_page(cp_page, 1); diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index 7188a2ac64b5..f3b007540a48 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c @@ -491,8 +491,7 @@ int truncate_blocks(struct inode *inode, u64 from, bool lock) trace_f2fs_truncate_blocks_enter(inode, from); - free_from = (pgoff_t) - ((from + blocksize - 1) >> (sbi->log_blocksize)); + free_from = (pgoff_t)F2FS_BYTES_TO_BLK(from + blocksize - 1); if (lock) f2fs_lock_op(sbi); diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index 9f278d156d88..877a272a8146 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -1048,8 +1048,8 @@ static const struct segment_allocation default_salloc_ops = { int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range) { - __u64 start = range->start >> sbi->log_blocksize; - __u64 end = start + (range->len >> sbi->log_blocksize) - 1; + __u64 start = F2FS_BYTES_TO_BLK(range->start); + __u64 end = start + F2FS_BYTES_TO_BLK(range->len) - 1; unsigned int start_segno, end_segno; struct cp_control cpc; @@ -1066,7 +1066,7 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range) end_segno = (end >= MAX_BLKADDR(sbi)) ? MAIN_SEGS(sbi) - 1 : GET_SEGNO(sbi, end); cpc.reason = CP_DISCARD; - cpc.trim_minlen = range->minlen >> sbi->log_blocksize; + cpc.trim_minlen = F2FS_BYTES_TO_BLK(range->minlen); /* do checkpoint to issue discard commands safely */ for (; start_segno <= end_segno; start_segno = cpc.trim_end + 1) { @@ -1080,7 +1080,7 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range) mutex_unlock(&sbi->gc_mutex); } out: - range->len = cpc.trimmed << sbi->log_blocksize; + range->len = F2FS_BLK_TO_BYTES(cpc.trimmed); return 0; } diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h index 09805720f4bf..a23556c32703 100644 --- a/include/linux/f2fs_fs.h +++ b/include/linux/f2fs_fs.h @@ -19,12 +19,16 @@ #define F2FS_MAX_LOG_SECTOR_SIZE 12 /* 12 bits for 4096 bytes */ #define F2FS_LOG_SECTORS_PER_BLOCK 3 /* log number for sector/blk */ #define F2FS_BLKSIZE 4096 /* support only 4KB block */ +#define F2FS_BLKSIZE_BITS 12 /* bits for F2FS_BLKSIZE */ #define F2FS_MAX_EXTENSION 64 /* # of extension entries */ #define F2FS_BLK_ALIGN(x) (((x) + F2FS_BLKSIZE - 1) / F2FS_BLKSIZE) #define NULL_ADDR ((block_t)0) /* used as block_t addresses */ #define NEW_ADDR ((block_t)-1) /* used as block_t addresses */ +#define F2FS_BYTES_TO_BLK(bytes) ((bytes) >> F2FS_BLKSIZE_BITS) +#define F2FS_BLK_TO_BYTES(blk) ((blk) << F2FS_BLKSIZE_BITS) + /* 0, 1(node nid), 2(meta nid) are reserved node id */ #define F2FS_RESERVED_NODE_NUM 3 -- cgit v1.2.3 From 59b802e5a453d413e7222d179be405cbadca3a8e Mon Sep 17 00:00:00 2001 From: Jaegeuk Kim Date: Mon, 9 Feb 2015 12:09:53 -0800 Subject: f2fs: allocate data blocks in advance for f2fs_direct_IO This patch adds preallocation for data blocks to prepare f2fs_direct_IO. Signed-off-by: Jaegeuk Kim --- fs/f2fs/data.c | 57 ++++++++++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 54 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index 26e247697e58..985ed023a750 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -592,6 +592,56 @@ static int __allocate_data_block(struct dnode_of_data *dn) return 0; } +static void __allocate_data_blocks(struct inode *inode, loff_t offset, + size_t count) +{ + struct f2fs_sb_info *sbi = F2FS_I_SB(inode); + struct dnode_of_data dn; + u64 start = F2FS_BYTES_TO_BLK(offset); + u64 len = F2FS_BYTES_TO_BLK(count); + bool allocated; + u64 end_offset; + + while (len) { + f2fs_balance_fs(sbi); + f2fs_lock_op(sbi); + + /* When reading holes, we need its node page */ + set_new_dnode(&dn, inode, NULL, NULL, 0); + if (get_dnode_of_data(&dn, start, ALLOC_NODE)) + goto out; + + allocated = false; + end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode)); + + while (dn.ofs_in_node < end_offset && len) { + if (dn.data_blkaddr == NULL_ADDR) { + if (__allocate_data_block(&dn)) + goto sync_out; + allocated = true; + } + len--; + start++; + dn.ofs_in_node++; + } + + if (allocated) + sync_inode_page(&dn); + + f2fs_put_dnode(&dn); + f2fs_unlock_op(sbi); + } + return; + +sync_out: + if (allocated) + sync_inode_page(&dn); + f2fs_put_dnode(&dn); +out: + f2fs_unlock_op(sbi); + return; +} + /* * get_data_block() now supported readahead/bmap/rw direct_IO with mapped bh. * If original data blocks are allocated, then give them to blockdev. @@ -617,10 +667,8 @@ static int __get_data_block(struct inode *inode, sector_t iblock, if (check_extent_cache(inode, pgofs, bh_result)) goto out; - if (create) { - f2fs_balance_fs(F2FS_I_SB(inode)); + if (create) f2fs_lock_op(F2FS_I_SB(inode)); - } /* When reading holes, we need its node page */ set_new_dnode(&dn, inode, NULL, NULL, 0); @@ -1108,6 +1156,9 @@ static ssize_t f2fs_direct_IO(int rw, struct kiocb *iocb, trace_f2fs_direct_IO_enter(inode, offset, count, rw); + if (rw & WRITE) + __allocate_data_blocks(inode, offset, count); + err = blockdev_direct_IO(rw, iocb, inode, iter, offset, get_data_block); if (err < 0 && (rw & WRITE)) f2fs_write_failed(mapping, offset + count); -- cgit v1.2.3 From 29e7043f405c4c4c3a82f61222790f3ea8c0bf13 Mon Sep 17 00:00:00 2001 From: Jaegeuk Kim Date: Tue, 10 Feb 2015 16:23:12 -0800 Subject: f2fs: fix sparse warnings This patch resolves the following warnings. include/trace/events/f2fs.h:150:1: warning: expression using sizeof bool include/trace/events/f2fs.h:180:1: warning: expression using sizeof bool include/trace/events/f2fs.h:990:1: warning: expression using sizeof bool include/trace/events/f2fs.h:990:1: warning: expression using sizeof bool include/trace/events/f2fs.h:150:1: warning: odd constant _Bool cast (ffffffffffffffff becomes 1) include/trace/events/f2fs.h:180:1: warning: odd constant _Bool cast (ffffffffffffffff becomes 1) include/trace/events/f2fs.h:990:1: warning: odd constant _Bool cast (ffffffffffffffff becomes 1) include/trace/events/f2fs.h:990:1: warning: odd constant _Bool cast (ffffffffffffffff becomes 1) fs/f2fs/checkpoint.c:27:19: warning: symbol 'inode_entry_slab' was not declared. Should it be static? fs/f2fs/checkpoint.c:577:15: warning: cast to restricted __le32 fs/f2fs/checkpoint.c:592:15: warning: cast to restricted __le32 fs/f2fs/trace.c:19:1: warning: symbol 'pids' was not declared. Should it be static? fs/f2fs/trace.c:21:21: warning: symbol 'last_io' was not declared. Should it be static? Signed-off-by: Jaegeuk Kim --- fs/f2fs/checkpoint.c | 4 ++-- fs/f2fs/f2fs.h | 1 + fs/f2fs/gc.h | 2 -- fs/f2fs/trace.c | 4 ++-- include/trace/events/f2fs.h | 13 +++++++------ 5 files changed, 12 insertions(+), 12 deletions(-) (limited to 'fs') diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c index 58d88df0d632..7f794b72b3b7 100644 --- a/fs/f2fs/checkpoint.c +++ b/fs/f2fs/checkpoint.c @@ -574,7 +574,7 @@ static struct page *validate_checkpoint(struct f2fs_sb_info *sbi, if (crc_offset >= blk_size) goto invalid_cp1; - crc = le32_to_cpu(*((__u32 *)((unsigned char *)cp_block + crc_offset))); + crc = le32_to_cpu(*((__le32 *)((unsigned char *)cp_block + crc_offset))); if (!f2fs_crc_valid(crc, cp_block, crc_offset)) goto invalid_cp1; @@ -589,7 +589,7 @@ static struct page *validate_checkpoint(struct f2fs_sb_info *sbi, if (crc_offset >= blk_size) goto invalid_cp2; - crc = le32_to_cpu(*((__u32 *)((unsigned char *)cp_block + crc_offset))); + crc = le32_to_cpu(*((__le32 *)((unsigned char *)cp_block + crc_offset))); if (!f2fs_crc_valid(crc, cp_block, crc_offset)) goto invalid_cp2; diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index 337204dfc5cd..7fa3313ab0e2 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -1681,6 +1681,7 @@ extern const struct address_space_operations f2fs_meta_aops; extern const struct inode_operations f2fs_dir_inode_operations; extern const struct inode_operations f2fs_symlink_inode_operations; extern const struct inode_operations f2fs_special_inode_operations; +extern struct kmem_cache *inode_entry_slab; /* * inline.c diff --git a/fs/f2fs/gc.h b/fs/f2fs/gc.h index d5ff97c5e394..b4a65be9f7d3 100644 --- a/fs/f2fs/gc.h +++ b/fs/f2fs/gc.h @@ -40,8 +40,6 @@ struct gc_inode_list { struct radix_tree_root iroot; }; -extern struct kmem_cache *inode_entry_slab; - /* * inline functions */ diff --git a/fs/f2fs/trace.c b/fs/f2fs/trace.c index ce01a2c903bd..875aa8179bc1 100644 --- a/fs/f2fs/trace.c +++ b/fs/f2fs/trace.c @@ -16,9 +16,9 @@ #include "f2fs.h" #include "trace.h" -RADIX_TREE(pids, GFP_ATOMIC); +static RADIX_TREE(pids, GFP_ATOMIC); static spinlock_t pids_lock; -struct last_io_info last_io; +static struct last_io_info last_io; static inline void __print_last_io(void) { diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h index 69629826c2ba..5422dbfaf97d 100644 --- a/include/trace/events/f2fs.h +++ b/include/trace/events/f2fs.h @@ -149,14 +149,14 @@ DEFINE_EVENT(f2fs__inode, f2fs_sync_file_enter, TRACE_EVENT(f2fs_sync_file_exit, - TP_PROTO(struct inode *inode, bool need_cp, int datasync, int ret), + TP_PROTO(struct inode *inode, int need_cp, int datasync, int ret), TP_ARGS(inode, need_cp, datasync, ret), TP_STRUCT__entry( __field(dev_t, dev) __field(ino_t, ino) - __field(bool, need_cp) + __field(int, need_cp) __field(int, datasync) __field(int, ret) ), @@ -185,7 +185,7 @@ TRACE_EVENT(f2fs_sync_fs, TP_STRUCT__entry( __field(dev_t, dev) - __field(bool, dirty) + __field(int, dirty) __field(int, wait) ), @@ -989,14 +989,15 @@ TRACE_EVENT(f2fs_issue_discard, TRACE_EVENT(f2fs_issue_flush, - TP_PROTO(struct super_block *sb, bool nobarrier, bool flush_merge), + TP_PROTO(struct super_block *sb, unsigned int nobarrier, + unsigned int flush_merge), TP_ARGS(sb, nobarrier, flush_merge), TP_STRUCT__entry( __field(dev_t, dev) - __field(bool, nobarrier) - __field(bool, flush_merge) + __field(unsigned int, nobarrier) + __field(unsigned int, flush_merge) ), TP_fast_assign( -- cgit v1.2.3 From 60a3b782b1aaf6e5f8c4f92e99302c48a26d475b Mon Sep 17 00:00:00 2001 From: Jaegeuk Kim Date: Tue, 10 Feb 2015 16:44:29 -0800 Subject: f2fs: avoid variable length array Instead of using variable length array, this patch let preallocate memory for them. Signed-off-by: Jaegeuk Kim --- fs/f2fs/debug.c | 1 + fs/f2fs/segment.c | 10 ++++++++-- fs/f2fs/segment.h | 1 + 3 files changed, 10 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c index ac2bd8e7eca9..e671373cc8ab 100644 --- a/fs/f2fs/debug.c +++ b/fs/f2fs/debug.c @@ -142,6 +142,7 @@ static void update_mem_info(struct f2fs_sb_info *sbi) si->base_mem += MAIN_SEGS(sbi) * sizeof(struct seg_entry); si->base_mem += f2fs_bitmap_size(MAIN_SEGS(sbi)); si->base_mem += 2 * SIT_VBLOCK_MAP_SIZE * MAIN_SEGS(sbi); + si->base_mem += SIT_VBLOCK_MAP_SIZE; if (sbi->segs_per_sec > 1) si->base_mem += MAIN_SECS(sbi) * sizeof(struct sec_entry); si->base_mem += __bitmap_size(sbi, SIT_BITMAP); diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index 877a272a8146..c9d314f44568 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -505,7 +505,7 @@ static void add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc) struct seg_entry *se = get_seg_entry(sbi, cpc->trim_start); unsigned long *cur_map = (unsigned long *)se->cur_valid_map; unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map; - unsigned long dmap[entries]; + unsigned long *dmap = SIT_I(sbi)->tmp_map; unsigned int start = 0, end = -1; bool force = (cpc->reason == CP_DISCARD); int i; @@ -924,7 +924,7 @@ static void __next_free_blkoff(struct f2fs_sb_info *sbi, { struct seg_entry *se = get_seg_entry(sbi, seg->segno); int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long); - unsigned long target_map[entries]; + unsigned long *target_map = SIT_I(sbi)->tmp_map; unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map; unsigned long *cur_map = (unsigned long *)se->cur_valid_map; int i, pos; @@ -1855,6 +1855,10 @@ static int build_sit_info(struct f2fs_sb_info *sbi) return -ENOMEM; } + sit_i->tmp_map = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL); + if (!sit_i->tmp_map) + return -ENOMEM; + if (sbi->segs_per_sec > 1) { sit_i->sec_entries = vzalloc(MAIN_SECS(sbi) * sizeof(struct sec_entry)); @@ -2236,6 +2240,8 @@ static void destroy_sit_info(struct f2fs_sb_info *sbi) kfree(sit_i->sentries[start].ckpt_valid_map); } } + kfree(sit_i->tmp_map); + vfree(sit_i->sentries); vfree(sit_i->sec_entries); kfree(sit_i->dirty_sentries_bitmap); diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h index 421d5794b0c8..ba858ca01445 100644 --- a/fs/f2fs/segment.h +++ b/fs/f2fs/segment.h @@ -189,6 +189,7 @@ struct sit_info { char *sit_bitmap; /* SIT bitmap pointer */ unsigned int bitmap_size; /* SIT bitmap size */ + unsigned long *tmp_map; /* bitmap for temporal use */ unsigned long *dirty_sentries_bitmap; /* bitmap for dirty sentries */ unsigned int dirty_sentries; /* # of dirty sentries */ unsigned int sents_per_block; /* # of SIT entries per block */ -- cgit v1.2.3 From f1a3b98e73a9f811ab4882669043c50c0e0dc7b6 Mon Sep 17 00:00:00 2001 From: Jaegeuk Kim Date: Wed, 11 Feb 2015 11:25:11 -0800 Subject: f2fs: fix accessing wrong indexed data blocks This patch fixes the following test. This causes: attempt to access beyond end of device sdb2: rw=16384, want=14413962000, limit=16777216 The reason is: - f2fs_write_begin - f2fs_convert_inline_inode returns -ENOSPC - f2fs_write_failed - truncate_blocks - truncate_partial_data_page - find_data_page - get_dnode_of_data returns wrong data index retrieved from inline_data - f2fs_submit_page_bio(wrong data index) - submit_bio(wrong data index) Signed-off-by: Jaegeuk Kim --- fs/f2fs/node.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c index 1e354ff9f7d8..97bd9d3db882 100644 --- a/fs/f2fs/node.c +++ b/fs/f2fs/node.c @@ -474,7 +474,7 @@ int get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode) { struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); struct page *npage[4]; - struct page *parent; + struct page *parent = NULL; int offset[4]; unsigned int noffset[4]; nid_t nids[4]; @@ -491,6 +491,14 @@ int get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode) if (IS_ERR(npage[0])) return PTR_ERR(npage[0]); } + + /* if inline_data is set, should not report any block indices */ + if (f2fs_has_inline_data(dn->inode) && index) { + err = -EINVAL; + f2fs_put_page(npage[0], 1); + goto release_out; + } + parent = npage[0]; if (level != 0) nids[1] = get_nid(parent, offset[0], true); -- cgit v1.2.3 From 1a118ccfd60fc78e64c0a3ab9e85075545839d6e Mon Sep 17 00:00:00 2001 From: Chao Yu Date: Wed, 11 Feb 2015 18:20:38 +0800 Subject: f2fs: use spinlock for segmap_lock instead of rwlock rwlock can provide better concurrency when there are much more readers than writers because readers can hold the rwlock simultaneously. But now, for segmap_lock rwlock in struct free_segmap_info, there is only one reader 'mount' from below call path: ->f2fs_fill_super ->build_segment_manager ->build_dirty_segmap ->init_dirty_segmap ->find_next_inuse read_lock ... read_unlock Now that our concurrency can not be improved since there is no other reader for this lock, we do not need to use rwlock_t type for segmap_lock, let's replace it with spinlock_t type. Signed-off-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/segment.c | 6 +++--- fs/f2fs/segment.h | 18 +++++++++--------- 2 files changed, 12 insertions(+), 12 deletions(-) (limited to 'fs') diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index c9d314f44568..daee4ab913da 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -800,7 +800,7 @@ static void get_new_segment(struct f2fs_sb_info *sbi, int go_left = 0; int i; - write_lock(&free_i->segmap_lock); + spin_lock(&free_i->segmap_lock); if (!new_sec && ((*newseg + 1) % sbi->segs_per_sec)) { segno = find_next_zero_bit(free_i->free_segmap, @@ -873,7 +873,7 @@ got_it: f2fs_bug_on(sbi, test_bit(segno, free_i->free_segmap)); __set_inuse(sbi, segno); *newseg = segno; - write_unlock(&free_i->segmap_lock); + spin_unlock(&free_i->segmap_lock); } static void reset_curseg(struct f2fs_sb_info *sbi, int type, int modified) @@ -1923,7 +1923,7 @@ static int build_free_segmap(struct f2fs_sb_info *sbi) free_i->start_segno = GET_SEGNO_FROM_SEG0(sbi, MAIN_BLKADDR(sbi)); free_i->free_segments = 0; free_i->free_sections = 0; - rwlock_init(&free_i->segmap_lock); + spin_lock_init(&free_i->segmap_lock); return 0; } diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h index ba858ca01445..7fd35111cf62 100644 --- a/fs/f2fs/segment.h +++ b/fs/f2fs/segment.h @@ -208,7 +208,7 @@ struct free_segmap_info { unsigned int start_segno; /* start segment number logically */ unsigned int free_segments; /* # of free segments */ unsigned int free_sections; /* # of free sections */ - rwlock_t segmap_lock; /* free segmap lock */ + spinlock_t segmap_lock; /* free segmap lock */ unsigned long *free_segmap; /* free segment bitmap */ unsigned long *free_secmap; /* free section bitmap */ }; @@ -319,9 +319,9 @@ static inline unsigned int find_next_inuse(struct free_segmap_info *free_i, unsigned int max, unsigned int segno) { unsigned int ret; - read_lock(&free_i->segmap_lock); + spin_lock(&free_i->segmap_lock); ret = find_next_bit(free_i->free_segmap, max, segno); - read_unlock(&free_i->segmap_lock); + spin_unlock(&free_i->segmap_lock); return ret; } @@ -332,7 +332,7 @@ static inline void __set_free(struct f2fs_sb_info *sbi, unsigned int segno) unsigned int start_segno = secno * sbi->segs_per_sec; unsigned int next; - write_lock(&free_i->segmap_lock); + spin_lock(&free_i->segmap_lock); clear_bit(segno, free_i->free_segmap); free_i->free_segments++; @@ -341,7 +341,7 @@ static inline void __set_free(struct f2fs_sb_info *sbi, unsigned int segno) clear_bit(secno, free_i->free_secmap); free_i->free_sections++; } - write_unlock(&free_i->segmap_lock); + spin_unlock(&free_i->segmap_lock); } static inline void __set_inuse(struct f2fs_sb_info *sbi, @@ -363,7 +363,7 @@ static inline void __set_test_and_free(struct f2fs_sb_info *sbi, unsigned int start_segno = secno * sbi->segs_per_sec; unsigned int next; - write_lock(&free_i->segmap_lock); + spin_lock(&free_i->segmap_lock); if (test_and_clear_bit(segno, free_i->free_segmap)) { free_i->free_segments++; @@ -374,7 +374,7 @@ static inline void __set_test_and_free(struct f2fs_sb_info *sbi, free_i->free_sections++; } } - write_unlock(&free_i->segmap_lock); + spin_unlock(&free_i->segmap_lock); } static inline void __set_test_and_inuse(struct f2fs_sb_info *sbi, @@ -382,13 +382,13 @@ static inline void __set_test_and_inuse(struct f2fs_sb_info *sbi, { struct free_segmap_info *free_i = FREE_I(sbi); unsigned int secno = segno / sbi->segs_per_sec; - write_lock(&free_i->segmap_lock); + spin_lock(&free_i->segmap_lock); if (!test_and_set_bit(segno, free_i->free_segmap)) { free_i->free_segments--; if (!test_and_set_bit(secno, free_i->free_secmap)) free_i->free_sections--; } - write_unlock(&free_i->segmap_lock); + spin_unlock(&free_i->segmap_lock); } static inline void get_sit_bitmap(struct f2fs_sb_info *sbi, -- cgit v1.2.3 From 56873f43abdcd574b25105867a990f067747b2f4 Mon Sep 17 00:00:00 2001 From: "Wang, Yalin" Date: Wed, 11 Feb 2015 15:24:51 -0800 Subject: mm:add KPF_ZERO_PAGE flag for /proc/kpageflags Add KPF_ZERO_PAGE flag for zero_page, so that userspace processes can detect zero_page in /proc/kpageflags, and then do memory analysis more accurately. Signed-off-by: Yalin Wang Acked-by: Kirill A. Shutemov Cc: Konstantin Khlebnikov Cc: Naoya Horiguchi Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/vm/pagemap.txt | 8 ++++++++ fs/proc/page.c | 16 +++++++++++++--- include/linux/huge_mm.h | 12 ++++++++++++ include/uapi/linux/kernel-page-flags.h | 1 + mm/huge_memory.c | 7 +------ tools/vm/page-types.c | 1 + 6 files changed, 36 insertions(+), 9 deletions(-) (limited to 'fs') diff --git a/Documentation/vm/pagemap.txt b/Documentation/vm/pagemap.txt index 5948e455c4d2..6fbd55ef6b45 100644 --- a/Documentation/vm/pagemap.txt +++ b/Documentation/vm/pagemap.txt @@ -62,6 +62,8 @@ There are three components to pagemap: 20. NOPAGE 21. KSM 22. THP + 23. BALLOON + 24. ZERO_PAGE Short descriptions to the page flags: @@ -102,6 +104,12 @@ Short descriptions to the page flags: 22. THP contiguous pages which construct transparent hugepages +23. BALLOON + balloon compaction page + +24. ZERO_PAGE + zero page for pfn_zero or huge_zero page + [IO related page flags] 1. ERROR IO error occurred 3. UPTODATE page has up-to-date data diff --git a/fs/proc/page.c b/fs/proc/page.c index 1e3187da1fed..7eee2d8b97d9 100644 --- a/fs/proc/page.c +++ b/fs/proc/page.c @@ -5,6 +5,7 @@ #include #include #include +#include #include #include #include @@ -121,9 +122,18 @@ u64 stable_page_flags(struct page *page) * just checks PG_head/PG_tail, so we need to check PageLRU/PageAnon * to make sure a given page is a thp, not a non-huge compound page. */ - else if (PageTransCompound(page) && (PageLRU(compound_head(page)) || - PageAnon(compound_head(page)))) - u |= 1 << KPF_THP; + else if (PageTransCompound(page)) { + struct page *head = compound_head(page); + + if (PageLRU(head) || PageAnon(head)) + u |= 1 << KPF_THP; + else if (is_huge_zero_page(head)) { + u |= 1 << KPF_ZERO_PAGE; + u |= 1 << KPF_THP; + } + } else if (is_zero_pfn(page_to_pfn(page))) + u |= 1 << KPF_ZERO_PAGE; + /* * Caveats on high order pages: page->_count will only be set diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index ad9051bab267..f10b20f05159 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -157,6 +157,13 @@ static inline int hpage_nr_pages(struct page *page) extern int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, pmd_t pmd, pmd_t *pmdp); +extern struct page *huge_zero_page; + +static inline bool is_huge_zero_page(struct page *page) +{ + return ACCESS_ONCE(huge_zero_page) == page; +} + #else /* CONFIG_TRANSPARENT_HUGEPAGE */ #define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; }) #define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; }) @@ -206,6 +213,11 @@ static inline int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_str return 0; } +static inline bool is_huge_zero_page(struct page *page) +{ + return false; +} + #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif /* _LINUX_HUGE_MM_H */ diff --git a/include/uapi/linux/kernel-page-flags.h b/include/uapi/linux/kernel-page-flags.h index 2f96d233c980..a6c4962e5d46 100644 --- a/include/uapi/linux/kernel-page-flags.h +++ b/include/uapi/linux/kernel-page-flags.h @@ -32,6 +32,7 @@ #define KPF_KSM 21 #define KPF_THP 22 #define KPF_BALLOON 23 +#define KPF_ZERO_PAGE 24 #endif /* _UAPILINUX_KERNEL_PAGE_FLAGS_H */ diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 817a875f2b8c..889713180980 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -171,12 +171,7 @@ static int start_khugepaged(void) } static atomic_t huge_zero_refcount; -static struct page *huge_zero_page __read_mostly; - -static inline bool is_huge_zero_page(struct page *page) -{ - return ACCESS_ONCE(huge_zero_page) == page; -} +struct page *huge_zero_page __read_mostly; static inline bool is_huge_zero_pmd(pmd_t pmd) { diff --git a/tools/vm/page-types.c b/tools/vm/page-types.c index 264fbc297e0b..8bdf16b8ba60 100644 --- a/tools/vm/page-types.c +++ b/tools/vm/page-types.c @@ -133,6 +133,7 @@ static const char * const page_flag_names[] = { [KPF_KSM] = "x:ksm", [KPF_THP] = "t:thp", [KPF_BALLOON] = "o:balloon", + [KPF_ZERO_PAGE] = "z:zero_page", [KPF_RESERVED] = "r:reserved", [KPF_MLOCKED] = "m:mlocked", -- cgit v1.2.3 From dc6c9a35b66b520cf67e05d8ca60ebecad3b0479 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Wed, 11 Feb 2015 15:26:50 -0800 Subject: mm: account pmd page tables to the process Dave noticed that unprivileged process can allocate significant amount of memory -- >500 MiB on x86_64 -- and stay unnoticed by oom-killer and memory cgroup. The trick is to allocate a lot of PMD page tables. Linux kernel doesn't account PMD tables to the process, only PTE. The use-cases below use few tricks to allocate a lot of PMD page tables while keeping VmRSS and VmPTE low. oom_score for the process will be 0. #include #include #include #include #include #include #define PUD_SIZE (1UL << 30) #define PMD_SIZE (1UL << 21) #define NR_PUD 130000 int main(void) { char *addr = NULL; unsigned long i; prctl(PR_SET_THP_DISABLE); for (i = 0; i < NR_PUD ; i++) { addr = mmap(addr + PUD_SIZE, PUD_SIZE, PROT_WRITE|PROT_READ, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0); if (addr == MAP_FAILED) { perror("mmap"); break; } *addr = 'x'; munmap(addr, PMD_SIZE); mmap(addr, PMD_SIZE, PROT_WRITE|PROT_READ, MAP_ANONYMOUS|MAP_PRIVATE|MAP_FIXED, -1, 0); if (addr == MAP_FAILED) perror("re-mmap"), exit(1); } printf("PID %d consumed %lu KiB in PMD page tables\n", getpid(), i * 4096 >> 10); return pause(); } The patch addresses the issue by account PMD tables to the process the same way we account PTE. The main place where PMD tables is accounted is __pmd_alloc() and free_pmd_range(). But there're few corner cases: - HugeTLB can share PMD page tables. The patch handles by accounting the table to all processes who share it. - x86 PAE pre-allocates few PMD tables on fork. - Architectures with FIRST_USER_ADDRESS > 0. We need to adjust sanity check on exit(2). Accounting only happens on configuration where PMD page table's level is present (PMD is not folded). As with nr_ptes we use per-mm counter. The counter value is used to calculate baseline for badness score by oom-killer. Signed-off-by: Kirill A. Shutemov Reported-by: Dave Hansen Cc: Hugh Dickins Reviewed-by: Cyrill Gorcunov Cc: Pavel Emelyanov Cc: David Rientjes Tested-by: Sedat Dilek Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/sysctl/vm.txt | 12 ++++++------ arch/x86/mm/pgtable.c | 14 +++++++++----- fs/proc/task_mmu.c | 9 ++++++--- include/linux/mm.h | 24 ++++++++++++++++++++++++ include/linux/mm_types.h | 3 ++- kernel/fork.c | 3 +++ mm/debug.c | 3 ++- mm/hugetlb.c | 8 ++++++-- mm/memory.c | 15 +++++++++------ mm/mmap.c | 4 +++- mm/oom_kill.c | 9 +++++---- 11 files changed, 75 insertions(+), 29 deletions(-) (limited to 'fs') diff --git a/Documentation/sysctl/vm.txt b/Documentation/sysctl/vm.txt index 4415aa915681..e9c706e4627a 100644 --- a/Documentation/sysctl/vm.txt +++ b/Documentation/sysctl/vm.txt @@ -555,12 +555,12 @@ this is causing problems for your system/application. oom_dump_tasks -Enables a system-wide task dump (excluding kernel threads) to be -produced when the kernel performs an OOM-killing and includes such -information as pid, uid, tgid, vm size, rss, nr_ptes, swapents, -oom_score_adj score, and name. This is helpful to determine why the -OOM killer was invoked, to identify the rogue task that caused it, -and to determine why the OOM killer chose the task it did to kill. +Enables a system-wide task dump (excluding kernel threads) to be produced +when the kernel performs an OOM-killing and includes such information as +pid, uid, tgid, vm size, rss, nr_ptes, nr_pmds, swapents, oom_score_adj +score, and name. This is helpful to determine why the OOM killer was +invoked, to identify the rogue task that caused it, and to determine why +the OOM killer chose the task it did to kill. If this is set to zero, this information is suppressed. On very large systems with thousands of tasks it may not be feasible to dump diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index 6fb6927f9e76..7b22adaad4f1 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c @@ -190,7 +190,7 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd) #endif /* CONFIG_X86_PAE */ -static void free_pmds(pmd_t *pmds[]) +static void free_pmds(struct mm_struct *mm, pmd_t *pmds[]) { int i; @@ -198,10 +198,11 @@ static void free_pmds(pmd_t *pmds[]) if (pmds[i]) { pgtable_pmd_page_dtor(virt_to_page(pmds[i])); free_page((unsigned long)pmds[i]); + mm_dec_nr_pmds(mm); } } -static int preallocate_pmds(pmd_t *pmds[]) +static int preallocate_pmds(struct mm_struct *mm, pmd_t *pmds[]) { int i; bool failed = false; @@ -215,11 +216,13 @@ static int preallocate_pmds(pmd_t *pmds[]) pmd = NULL; failed = true; } + if (pmd) + mm_inc_nr_pmds(mm); pmds[i] = pmd; } if (failed) { - free_pmds(pmds); + free_pmds(mm, pmds); return -ENOMEM; } @@ -246,6 +249,7 @@ static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp) paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT); pmd_free(mm, pmd); + mm_dec_nr_pmds(mm); } } } @@ -283,7 +287,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm) mm->pgd = pgd; - if (preallocate_pmds(pmds) != 0) + if (preallocate_pmds(mm, pmds) != 0) goto out_free_pgd; if (paravirt_pgd_alloc(mm) != 0) @@ -304,7 +308,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm) return pgd; out_free_pmds: - free_pmds(pmds); + free_pmds(mm, pmds); out_free_pgd: free_page((unsigned long)pgd); out: diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 6396f88c6687..e6e0abeb5d12 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -21,7 +21,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm) { - unsigned long data, text, lib, swap; + unsigned long data, text, lib, swap, ptes, pmds; unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss; /* @@ -42,6 +42,8 @@ void task_mem(struct seq_file *m, struct mm_struct *mm) text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> 10; lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text; swap = get_mm_counter(mm, MM_SWAPENTS); + ptes = PTRS_PER_PTE * sizeof(pte_t) * atomic_long_read(&mm->nr_ptes); + pmds = PTRS_PER_PMD * sizeof(pmd_t) * mm_nr_pmds(mm); seq_printf(m, "VmPeak:\t%8lu kB\n" "VmSize:\t%8lu kB\n" @@ -54,6 +56,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm) "VmExe:\t%8lu kB\n" "VmLib:\t%8lu kB\n" "VmPTE:\t%8lu kB\n" + "VmPMD:\t%8lu kB\n" "VmSwap:\t%8lu kB\n", hiwater_vm << (PAGE_SHIFT-10), total_vm << (PAGE_SHIFT-10), @@ -63,8 +66,8 @@ void task_mem(struct seq_file *m, struct mm_struct *mm) total_rss << (PAGE_SHIFT-10), data << (PAGE_SHIFT-10), mm->stack_vm << (PAGE_SHIFT-10), text, lib, - (PTRS_PER_PTE * sizeof(pte_t) * - atomic_long_read(&mm->nr_ptes)) >> 10, + ptes >> 10, + pmds >> 10, swap << (PAGE_SHIFT-10)); } diff --git a/include/linux/mm.h b/include/linux/mm.h index c6bf813a6b3d..644990b83cda 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1438,8 +1438,32 @@ static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud, { return 0; } + +static inline unsigned long mm_nr_pmds(struct mm_struct *mm) +{ + return 0; +} + +static inline void mm_inc_nr_pmds(struct mm_struct *mm) {} +static inline void mm_dec_nr_pmds(struct mm_struct *mm) {} + #else int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address); + +static inline unsigned long mm_nr_pmds(struct mm_struct *mm) +{ + return atomic_long_read(&mm->nr_pmds); +} + +static inline void mm_inc_nr_pmds(struct mm_struct *mm) +{ + atomic_long_inc(&mm->nr_pmds); +} + +static inline void mm_dec_nr_pmds(struct mm_struct *mm) +{ + atomic_long_dec(&mm->nr_pmds); +} #endif int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 20ff2105b564..199a03aab8dc 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -363,7 +363,8 @@ struct mm_struct { pgd_t * pgd; atomic_t mm_users; /* How many users with user space? */ atomic_t mm_count; /* How many references to "struct mm_struct" (users count as 1) */ - atomic_long_t nr_ptes; /* Page table pages */ + atomic_long_t nr_ptes; /* PTE page table pages */ + atomic_long_t nr_pmds; /* PMD page table pages */ int map_count; /* number of VMAs */ spinlock_t page_table_lock; /* Protects page tables and some counters */ diff --git a/kernel/fork.c b/kernel/fork.c index b379d9abddc7..c99098c52641 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -555,6 +555,9 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p) INIT_LIST_HEAD(&mm->mmlist); mm->core_state = NULL; atomic_long_set(&mm->nr_ptes, 0); +#ifndef __PAGETABLE_PMD_FOLDED + atomic_long_set(&mm->nr_pmds, 0); +#endif mm->map_count = 0; mm->locked_vm = 0; mm->pinned_vm = 0; diff --git a/mm/debug.c b/mm/debug.c index d69cb5a7ba9a..3eb3ac2fcee7 100644 --- a/mm/debug.c +++ b/mm/debug.c @@ -173,7 +173,7 @@ void dump_mm(const struct mm_struct *mm) "get_unmapped_area %p\n" #endif "mmap_base %lu mmap_legacy_base %lu highest_vm_end %lu\n" - "pgd %p mm_users %d mm_count %d nr_ptes %lu map_count %d\n" + "pgd %p mm_users %d mm_count %d nr_ptes %lu nr_pmds %lu map_count %d\n" "hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n" "pinned_vm %lx shared_vm %lx exec_vm %lx stack_vm %lx\n" "start_code %lx end_code %lx start_data %lx end_data %lx\n" @@ -206,6 +206,7 @@ void dump_mm(const struct mm_struct *mm) mm->pgd, atomic_read(&mm->mm_users), atomic_read(&mm->mm_count), atomic_long_read((atomic_long_t *)&mm->nr_ptes), + mm_nr_pmds((struct mm_struct *)mm), mm->map_count, mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm, mm->pinned_vm, mm->shared_vm, mm->exec_vm, mm->stack_vm, diff --git a/mm/hugetlb.c b/mm/hugetlb.c index fd28d6ba5e5d..0a9ac6c26832 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -3598,6 +3598,7 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) if (saddr) { spte = huge_pte_offset(svma->vm_mm, saddr); if (spte) { + mm_inc_nr_pmds(mm); get_page(virt_to_page(spte)); break; } @@ -3609,11 +3610,13 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) ptl = huge_pte_lockptr(hstate_vma(vma), mm, spte); spin_lock(ptl); - if (pud_none(*pud)) + if (pud_none(*pud)) { pud_populate(mm, pud, (pmd_t *)((unsigned long)spte & PAGE_MASK)); - else + } else { put_page(virt_to_page(spte)); + mm_inc_nr_pmds(mm); + } spin_unlock(ptl); out: pte = (pte_t *)pmd_alloc(mm, pud, addr); @@ -3644,6 +3647,7 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) pud_clear(pud); put_page(virt_to_page(ptep)); + mm_dec_nr_pmds(mm); *addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE; return 1; } diff --git a/mm/memory.c b/mm/memory.c index d63849b5188f..bbe6a73a899d 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -428,6 +428,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud, pmd = pmd_offset(pud, start); pud_clear(pud); pmd_free_tlb(tlb, pmd, start); + mm_dec_nr_pmds(tlb->mm); } static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, @@ -3322,15 +3323,17 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) spin_lock(&mm->page_table_lock); #ifndef __ARCH_HAS_4LEVEL_HACK - if (pud_present(*pud)) /* Another has populated it */ - pmd_free(mm, new); - else + if (!pud_present(*pud)) { + mm_inc_nr_pmds(mm); pud_populate(mm, pud, new); -#else - if (pgd_present(*pud)) /* Another has populated it */ + } else /* Another has populated it */ pmd_free(mm, new); - else +#else + if (!pgd_present(*pud)) { + mm_inc_nr_pmds(mm); pgd_populate(mm, pud, new); + } else /* Another has populated it */ + pmd_free(mm, new); #endif /* __ARCH_HAS_4LEVEL_HACK */ spin_unlock(&mm->page_table_lock); return 0; diff --git a/mm/mmap.c b/mm/mmap.c index 14d84666e8ba..6a7d36d133fb 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2853,7 +2853,9 @@ void exit_mmap(struct mm_struct *mm) vm_unacct_memory(nr_accounted); WARN_ON(atomic_long_read(&mm->nr_ptes) > - (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT); + round_up(FIRST_USER_ADDRESS, PMD_SIZE) >> PMD_SHIFT); + WARN_ON(mm_nr_pmds(mm) > + round_up(FIRST_USER_ADDRESS, PUD_SIZE) >> PUD_SHIFT); } /* Insert vm structure into process list sorted by address diff --git a/mm/oom_kill.c b/mm/oom_kill.c index b8df76ee2be3..642f38cb175a 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -169,8 +169,8 @@ unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg, * The baseline for the badness score is the proportion of RAM that each * task's rss, pagetable and swap space use. */ - points = get_mm_rss(p->mm) + atomic_long_read(&p->mm->nr_ptes) + - get_mm_counter(p->mm, MM_SWAPENTS); + points = get_mm_rss(p->mm) + get_mm_counter(p->mm, MM_SWAPENTS) + + atomic_long_read(&p->mm->nr_ptes) + mm_nr_pmds(p->mm); task_unlock(p); /* @@ -351,7 +351,7 @@ static void dump_tasks(struct mem_cgroup *memcg, const nodemask_t *nodemask) struct task_struct *p; struct task_struct *task; - pr_info("[ pid ] uid tgid total_vm rss nr_ptes swapents oom_score_adj name\n"); + pr_info("[ pid ] uid tgid total_vm rss nr_ptes nr_pmds swapents oom_score_adj name\n"); rcu_read_lock(); for_each_process(p) { if (oom_unkillable_task(p, memcg, nodemask)) @@ -367,10 +367,11 @@ static void dump_tasks(struct mem_cgroup *memcg, const nodemask_t *nodemask) continue; } - pr_info("[%5d] %5d %5d %8lu %8lu %7ld %8lu %5hd %s\n", + pr_info("[%5d] %5d %5d %8lu %8lu %7ld %7ld %8lu %5hd %s\n", task->pid, from_kuid(&init_user_ns, task_uid(task)), task->tgid, task->mm->total_vm, get_mm_rss(task->mm), atomic_long_read(&task->mm->nr_ptes), + mm_nr_pmds(task->mm), get_mm_counter(task->mm, MM_SWAPENTS), task->signal->oom_score_adj, task->comm); task_unlock(task); -- cgit v1.2.3 From 8d38633c3b4093aca7524945f1e9249d7d3a44da Mon Sep 17 00:00:00 2001 From: Konstantin Khebnikov Date: Wed, 11 Feb 2015 15:26:55 -0800 Subject: page_writeback: put account_page_redirty() after set_page_dirty() Helper account_page_redirty() fixes dirty pages counter for redirtied pages. This patch puts it after dirtying and prevents temporary underflows of dirtied pages counters on zone/bdi and current->nr_dirtied. Signed-off-by: Konstantin Khebnikov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/btrfs/extent_io.c | 2 +- mm/page-writeback.c | 5 ++++- 2 files changed, 5 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 790dbae3343c..c73df6a7c9b6 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -1407,8 +1407,8 @@ int extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end) while (index <= end_index) { page = find_get_page(inode->i_mapping, index); BUG_ON(!page); /* Pages should be in the extent_io_tree */ - account_page_redirty(page); __set_page_dirty_nobuffers(page); + account_page_redirty(page); page_cache_release(page); index++; } diff --git a/mm/page-writeback.c b/mm/page-writeback.c index fb71e9deca85..6a73e47e81c6 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -2168,9 +2168,12 @@ EXPORT_SYMBOL(account_page_redirty); */ int redirty_page_for_writepage(struct writeback_control *wbc, struct page *page) { + int ret; + wbc->pages_skipped++; + ret = __set_page_dirty_nobuffers(page); account_page_redirty(page); - return __set_page_dirty_nobuffers(page); + return ret; } EXPORT_SYMBOL(redirty_page_for_writepage); -- cgit v1.2.3 From 05fbf357d94152171bc50f8a369390f1f16efd89 Mon Sep 17 00:00:00 2001 From: Konstantin Khlebnikov Date: Wed, 11 Feb 2015 15:27:31 -0800 Subject: proc/pagemap: walk page tables under pte lock Lockless access to pte in pagemap_pte_range() might race with page migration and trigger BUG_ON(!PageLocked()) in migration_entry_to_page(): CPU A (pagemap) CPU B (migration) lock_page() try_to_unmap(page, TTU_MIGRATION...) make_migration_entry() set_pte_at() pte_to_pagemap_entry() remove_migration_ptes() unlock_page() if(is_migration_entry()) migration_entry_to_page() BUG_ON(!PageLocked(page)) Also lockless read might be non-atomic if pte is larger than wordsize. Other pte walkers (smaps, numa_maps, clear_refs) already lock ptes. Fixes: 052fb0d635df ("proc: report file/anon bit in /proc/pid/pagemap") Signed-off-by: Konstantin Khlebnikov Reported-by: Andrey Ryabinin Reviewed-by: Cyrill Gorcunov Acked-by: Naoya Horiguchi Acked-by: Kirill A. Shutemov Cc: [3.5+] Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/proc/task_mmu.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index e6e0abeb5d12..eeab30fcffcc 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -1056,7 +1056,7 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, struct vm_area_struct *vma; struct pagemapread *pm = walk->private; spinlock_t *ptl; - pte_t *pte; + pte_t *pte, *orig_pte; int err = 0; /* find the first VMA at or above 'addr' */ @@ -1117,15 +1117,19 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, BUG_ON(is_vm_hugetlb_page(vma)); /* Addresses in the VMA. */ - for (; addr < min(end, vma->vm_end); addr += PAGE_SIZE) { + orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); + for (; addr < min(end, vma->vm_end); pte++, addr += PAGE_SIZE) { pagemap_entry_t pme; - pte = pte_offset_map(pmd, addr); + pte_to_pagemap_entry(&pme, pm, vma, addr, *pte); - pte_unmap(pte); err = add_to_pagemap(addr, &pme, pm); if (err) - return err; + break; } + pte_unmap_unlock(orig_pte, ptl); + + if (err) + return err; if (addr == end) break; -- cgit v1.2.3 From 14eb6fdd4204d215a14ecd9f84a1ca66faabcc4d Mon Sep 17 00:00:00 2001 From: Naoya Horiguchi Date: Wed, 11 Feb 2015 15:27:43 -0800 Subject: smaps: remove mem_size_stats->vma and use walk_page_vma() pagewalk.c can handle vma in itself, so we don't have to pass vma via walk->private. And show_smap() walks pages on vma basis, so using walk_page_vma() is preferable. Signed-off-by: Naoya Horiguchi Acked-by: Kirill A. Shutemov Cc: "Kirill A. Shutemov" Cc: Andrea Arcangeli Cc: Cyrill Gorcunov Cc: Dave Hansen Cc: Pavel Emelyanov Cc: Benjamin Herrenschmidt Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/proc/task_mmu.c | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index eeab30fcffcc..73425398b38c 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -436,7 +436,6 @@ const struct file_operations proc_tid_maps_operations = { #ifdef CONFIG_PROC_PAGE_MONITOR struct mem_size_stats { - struct vm_area_struct *vma; unsigned long resident; unsigned long shared_clean; unsigned long shared_dirty; @@ -485,7 +484,7 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr, struct mm_walk *walk) { struct mem_size_stats *mss = walk->private; - struct vm_area_struct *vma = mss->vma; + struct vm_area_struct *vma = walk->vma; struct page *page = NULL; if (pte_present(*pte)) { @@ -509,7 +508,7 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr, struct mm_walk *walk) { struct mem_size_stats *mss = walk->private; - struct vm_area_struct *vma = mss->vma; + struct vm_area_struct *vma = walk->vma; struct page *page; /* FOLL_DUMP will return -EFAULT on huge zero page */ @@ -530,8 +529,7 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr, static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, struct mm_walk *walk) { - struct mem_size_stats *mss = walk->private; - struct vm_area_struct *vma = mss->vma; + struct vm_area_struct *vma = walk->vma; pte_t *pte; spinlock_t *ptl; @@ -623,10 +621,8 @@ static int show_smap(struct seq_file *m, void *v, int is_pid) }; memset(&mss, 0, sizeof mss); - mss.vma = vma; /* mmap_sem is held in m_start */ - if (vma->vm_mm && !is_vm_hugetlb_page(vma)) - walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk); + walk_page_vma(vma, &smaps_walk); show_map_vma(m, vma, is_pid); -- cgit v1.2.3 From 5c64f52acdbc615e3ef58692f42ee00b83d0225d Mon Sep 17 00:00:00 2001 From: Naoya Horiguchi Date: Wed, 11 Feb 2015 15:27:46 -0800 Subject: clear_refs: remove clear_refs_private->vma and introduce clear_refs_test_walk() clear_refs_write() has some prechecks to determine if we really walk over a given vma. Now we have a test_walk() callback to filter vmas, so let's utilize it. Signed-off-by: Naoya Horiguchi Acked-by: Kirill A. Shutemov Cc: "Kirill A. Shutemov" Cc: Andrea Arcangeli Cc: Cyrill Gorcunov Cc: Dave Hansen Cc: Pavel Emelyanov Cc: Benjamin Herrenschmidt Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/proc/task_mmu.c | 46 ++++++++++++++++++++++------------------------ 1 file changed, 22 insertions(+), 24 deletions(-) (limited to 'fs') diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 73425398b38c..bed0834715a5 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -736,7 +736,6 @@ enum clear_refs_types { }; struct clear_refs_private { - struct vm_area_struct *vma; enum clear_refs_types type; }; @@ -767,7 +766,7 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, struct mm_walk *walk) { struct clear_refs_private *cp = walk->private; - struct vm_area_struct *vma = cp->vma; + struct vm_area_struct *vma = walk->vma; pte_t *pte, ptent; spinlock_t *ptl; struct page *page; @@ -801,6 +800,25 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr, return 0; } +static int clear_refs_test_walk(unsigned long start, unsigned long end, + struct mm_walk *walk) +{ + struct clear_refs_private *cp = walk->private; + struct vm_area_struct *vma = walk->vma; + + /* + * Writing 1 to /proc/pid/clear_refs affects all pages. + * Writing 2 to /proc/pid/clear_refs only affects anonymous pages. + * Writing 3 to /proc/pid/clear_refs only affects file mapped pages. + * Writing 4 to /proc/pid/clear_refs affects all pages. + */ + if (cp->type == CLEAR_REFS_ANON && vma->vm_file) + return 1; + if (cp->type == CLEAR_REFS_MAPPED && !vma->vm_file) + return 1; + return 0; +} + static ssize_t clear_refs_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { @@ -841,6 +859,7 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf, }; struct mm_walk clear_refs_walk = { .pmd_entry = clear_refs_pte_range, + .test_walk = clear_refs_test_walk, .mm = mm, .private = &cp, }; @@ -860,28 +879,7 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf, } mmu_notifier_invalidate_range_start(mm, 0, -1); } - for (vma = mm->mmap; vma; vma = vma->vm_next) { - cp.vma = vma; - if (is_vm_hugetlb_page(vma)) - continue; - /* - * Writing 1 to /proc/pid/clear_refs affects all pages. - * - * Writing 2 to /proc/pid/clear_refs only affects - * Anonymous pages. - * - * Writing 3 to /proc/pid/clear_refs only affects file - * mapped pages. - * - * Writing 4 to /proc/pid/clear_refs affects all pages. - */ - if (type == CLEAR_REFS_ANON && vma->vm_file) - continue; - if (type == CLEAR_REFS_MAPPED && !vma->vm_file) - continue; - walk_page_range(vma->vm_start, vma->vm_end, - &clear_refs_walk); - } + walk_page_range(0, ~0UL, &clear_refs_walk); if (type == CLEAR_REFS_SOFT_DIRTY) mmu_notifier_invalidate_range_end(mm, 0, -1); flush_tlb_mm(mm); -- cgit v1.2.3 From f995ece24dfecb3614468befbe4e6e777b854cc0 Mon Sep 17 00:00:00 2001 From: Naoya Horiguchi Date: Wed, 11 Feb 2015 15:27:48 -0800 Subject: pagemap: use walk->vma instead of calling find_vma() Page table walker has the information of the current vma in mm_walk, so we don't have to call find_vma() in each pagemap_(pte|hugetlb)_range() call any longer. Currently pagemap_pte_range() does vma loop itself, so this patch reduces many lines of code. NULL-vma check is omitted because we assume that we never run these callbacks on any address outside vma. And even if it were broken, NULL pointer dereference would be detected, so we can get enough information for debugging. Signed-off-by: Naoya Horiguchi Cc: "Kirill A. Shutemov" Cc: Andrea Arcangeli Cc: Cyrill Gorcunov Cc: Dave Hansen Cc: Kirill A. Shutemov Cc: Pavel Emelyanov Cc: Benjamin Herrenschmidt Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/proc/task_mmu.c | 68 +++++++++++------------------------------------------- 1 file changed, 14 insertions(+), 54 deletions(-) (limited to 'fs') diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index bed0834715a5..4206706dd92a 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -1047,15 +1047,13 @@ static inline void thp_pmd_to_pagemap_entry(pagemap_entry_t *pme, struct pagemap static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, struct mm_walk *walk) { - struct vm_area_struct *vma; + struct vm_area_struct *vma = walk->vma; struct pagemapread *pm = walk->private; spinlock_t *ptl; pte_t *pte, *orig_pte; int err = 0; - /* find the first VMA at or above 'addr' */ - vma = find_vma(walk->mm, addr); - if (vma && pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { + if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { int pmd_flags2; if ((vma->vm_flags & VM_SOFTDIRTY) || pmd_soft_dirty(*pmd)) @@ -1081,55 +1079,20 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, if (pmd_trans_unstable(pmd)) return 0; - while (1) { - /* End of address space hole, which we mark as non-present. */ - unsigned long hole_end; - - if (vma) - hole_end = min(end, vma->vm_start); - else - hole_end = end; - - for (; addr < hole_end; addr += PAGE_SIZE) { - pagemap_entry_t pme = make_pme(PM_NOT_PRESENT(pm->v2)); - - err = add_to_pagemap(addr, &pme, pm); - if (err) - return err; - } - - if (!vma || vma->vm_start >= end) - break; - /* - * We can't possibly be in a hugetlb VMA. In general, - * for a mm_walk with a pmd_entry and a hugetlb_entry, - * the pmd_entry can only be called on addresses in a - * hugetlb if the walk starts in a non-hugetlb VMA and - * spans a hugepage VMA. Since pagemap_read walks are - * PMD-sized and PMD-aligned, this will never be true. - */ - BUG_ON(is_vm_hugetlb_page(vma)); - - /* Addresses in the VMA. */ - orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); - for (; addr < min(end, vma->vm_end); pte++, addr += PAGE_SIZE) { - pagemap_entry_t pme; - - pte_to_pagemap_entry(&pme, pm, vma, addr, *pte); - err = add_to_pagemap(addr, &pme, pm); - if (err) - break; - } - pte_unmap_unlock(orig_pte, ptl); + /* + * We can assume that @vma always points to a valid one and @end never + * goes beyond vma->vm_end. + */ + orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); + for (; addr < end; pte++, addr += PAGE_SIZE) { + pagemap_entry_t pme; + pte_to_pagemap_entry(&pme, pm, vma, addr, *pte); + err = add_to_pagemap(addr, &pme, pm); if (err) - return err; - - if (addr == end) break; - - vma = find_vma(walk->mm, addr); } + pte_unmap_unlock(orig_pte, ptl); cond_resched(); @@ -1155,15 +1118,12 @@ static int pagemap_hugetlb_range(pte_t *pte, unsigned long hmask, struct mm_walk *walk) { struct pagemapread *pm = walk->private; - struct vm_area_struct *vma; + struct vm_area_struct *vma = walk->vma; int err = 0; int flags2; pagemap_entry_t pme; - vma = find_vma(walk->mm, addr); - WARN_ON_ONCE(!vma); - - if (vma && (vma->vm_flags & VM_SOFTDIRTY)) + if (vma->vm_flags & VM_SOFTDIRTY) flags2 = __PM_SOFT_DIRTY; else flags2 = 0; -- cgit v1.2.3 From 632fd60fe46f9159f059ed7612eb529e475302a9 Mon Sep 17 00:00:00 2001 From: Naoya Horiguchi Date: Wed, 11 Feb 2015 15:27:51 -0800 Subject: numa_maps: fix typo in gather_hugetbl_stats Just doing s/gather_hugetbl_stats/gather_hugetlb_stats/g, this makes code grep-friendly. Signed-off-by: Naoya Horiguchi Acked-by: Kirill A. Shutemov Cc: "Kirill A. Shutemov" Cc: Andrea Arcangeli Cc: Cyrill Gorcunov Cc: Dave Hansen Cc: Pavel Emelyanov Cc: Benjamin Herrenschmidt Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/proc/task_mmu.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 4206706dd92a..ae4bc2960077 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -1385,7 +1385,7 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr, return 0; } #ifdef CONFIG_HUGETLB_PAGE -static int gather_hugetbl_stats(pte_t *pte, unsigned long hmask, +static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask, unsigned long addr, unsigned long end, struct mm_walk *walk) { struct numa_maps *md; @@ -1404,7 +1404,7 @@ static int gather_hugetbl_stats(pte_t *pte, unsigned long hmask, } #else -static int gather_hugetbl_stats(pte_t *pte, unsigned long hmask, +static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask, unsigned long addr, unsigned long end, struct mm_walk *walk) { return 0; @@ -1435,7 +1435,7 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid) md->vma = vma; - walk.hugetlb_entry = gather_hugetbl_stats; + walk.hugetlb_entry = gather_hugetlb_stats; walk.pmd_entry = gather_pte_stats; walk.private = md; walk.mm = mm; -- cgit v1.2.3 From d85f4d6d3bfe3b82e2903ac51a2f837eab7115d7 Mon Sep 17 00:00:00 2001 From: Naoya Horiguchi Date: Wed, 11 Feb 2015 15:27:54 -0800 Subject: numa_maps: remove numa_maps->vma pagewalk.c can handle vma in itself, so we don't have to pass vma via walk->private. And show_numa_map() walks pages on vma basis, so using walk_page_vma() is preferable. Signed-off-by: Naoya Horiguchi Acked-by: Kirill A. Shutemov Cc: "Kirill A. Shutemov" Cc: Andrea Arcangeli Cc: Cyrill Gorcunov Cc: Dave Hansen Cc: Pavel Emelyanov Cc: Benjamin Herrenschmidt Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/proc/task_mmu.c | 29 +++++++++++++---------------- 1 file changed, 13 insertions(+), 16 deletions(-) (limited to 'fs') diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index ae4bc2960077..a36db4ad140b 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -1283,7 +1283,6 @@ const struct file_operations proc_pagemap_operations = { #ifdef CONFIG_NUMA struct numa_maps { - struct vm_area_struct *vma; unsigned long pages; unsigned long anon; unsigned long active; @@ -1352,18 +1351,17 @@ static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma, static int gather_pte_stats(pmd_t *pmd, unsigned long addr, unsigned long end, struct mm_walk *walk) { - struct numa_maps *md; + struct numa_maps *md = walk->private; + struct vm_area_struct *vma = walk->vma; spinlock_t *ptl; pte_t *orig_pte; pte_t *pte; - md = walk->private; - - if (pmd_trans_huge_lock(pmd, md->vma, &ptl) == 1) { + if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { pte_t huge_pte = *(pte_t *)pmd; struct page *page; - page = can_gather_numa_stats(huge_pte, md->vma, addr); + page = can_gather_numa_stats(huge_pte, vma, addr); if (page) gather_stats(page, md, pte_dirty(huge_pte), HPAGE_PMD_SIZE/PAGE_SIZE); @@ -1375,7 +1373,7 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr, return 0; orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); do { - struct page *page = can_gather_numa_stats(*pte, md->vma, addr); + struct page *page = can_gather_numa_stats(*pte, vma, addr); if (!page) continue; gather_stats(page, md, pte_dirty(*pte), 1); @@ -1422,7 +1420,12 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid) struct numa_maps *md = &numa_priv->md; struct file *file = vma->vm_file; struct mm_struct *mm = vma->vm_mm; - struct mm_walk walk = {}; + struct mm_walk walk = { + .hugetlb_entry = gather_hugetlb_stats, + .pmd_entry = gather_pte_stats, + .private = md, + .mm = mm, + }; struct mempolicy *pol; char buffer[64]; int nid; @@ -1433,13 +1436,6 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid) /* Ensure we start with an empty set of numa_maps statistics. */ memset(md, 0, sizeof(*md)); - md->vma = vma; - - walk.hugetlb_entry = gather_hugetlb_stats; - walk.pmd_entry = gather_pte_stats; - walk.private = md; - walk.mm = mm; - pol = __get_vma_policy(vma, vma->vm_start); if (pol) { mpol_to_str(buffer, sizeof(buffer), pol); @@ -1473,7 +1469,8 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid) if (is_vm_hugetlb_page(vma)) seq_puts(m, " huge"); - walk_page_range(vma->vm_start, vma->vm_end, &walk); + /* mmap_sem is held by m_start */ + walk_page_vma(vma, &walk); if (!md->pages) goto out; -- cgit v1.2.3 From 48684a65b4e3ff544d62532c1b78962c9677b632 Mon Sep 17 00:00:00 2001 From: Naoya Horiguchi Date: Wed, 11 Feb 2015 15:28:06 -0800 Subject: mm: pagewalk: fix misbehavior of walk_page_range for vma(VM_PFNMAP) walk_page_range() silently skips vma having VM_PFNMAP set, which leads to undesirable behaviour at client end (who called walk_page_range). For example for pagemap_read(), when no callbacks are called against VM_PFNMAP vma, pagemap_read() may prepare pagemap data for next virtual address range at wrong index. That could confuse and/or break userspace applications. This patch avoid this misbehavior caused by vma(VM_PFNMAP) like follows: - for pagemap_read() which has its own ->pte_hole(), call the ->pte_hole() over vma(VM_PFNMAP), - for clear_refs and queue_pages which have their own ->tests_walk, just return 1 and skip vma(VM_PFNMAP). This is no problem because these are not interested in hole regions, - for other callers, just skip the vma(VM_PFNMAP) as a default behavior. Signed-off-by: Naoya Horiguchi Signed-off-by: Shiraz Hashim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/proc/task_mmu.c | 3 +++ mm/mempolicy.c | 3 +++ mm/pagewalk.c | 21 +++++++++++++-------- 3 files changed, 19 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index a36db4ad140b..f5ca96524f5f 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -806,6 +806,9 @@ static int clear_refs_test_walk(unsigned long start, unsigned long end, struct clear_refs_private *cp = walk->private; struct vm_area_struct *vma = walk->vma; + if (vma->vm_flags & VM_PFNMAP) + return 1; + /* * Writing 1 to /proc/pid/clear_refs affects all pages. * Writing 2 to /proc/pid/clear_refs only affects anonymous pages. diff --git a/mm/mempolicy.c b/mm/mempolicy.c index b1dcd11d867a..f1bd23803576 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -591,6 +591,9 @@ static int queue_pages_test_walk(unsigned long start, unsigned long end, unsigned long endvma = vma->vm_end; unsigned long flags = qp->flags; + if (vma->vm_flags & VM_PFNMAP) + return 1; + if (endvma > end) endvma = end; if (vma->vm_start > start) diff --git a/mm/pagewalk.c b/mm/pagewalk.c index 4c9a653ba563..75c1f2878519 100644 --- a/mm/pagewalk.c +++ b/mm/pagewalk.c @@ -35,7 +35,7 @@ static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, do { again: next = pmd_addr_end(addr, end); - if (pmd_none(*pmd)) { + if (pmd_none(*pmd) || !walk->vma) { if (walk->pte_hole) err = walk->pte_hole(addr, next, walk); if (err) @@ -165,9 +165,6 @@ static int walk_hugetlb_range(unsigned long addr, unsigned long end, * or skip it via the returned value. Return 0 if we do walk over the * current vma, and return 1 if we skip the vma. Negative values means * error, where we abort the current walk. - * - * Default check (only VM_PFNMAP check for now) is used when the caller - * doesn't define test_walk() callback. */ static int walk_page_test(unsigned long start, unsigned long end, struct mm_walk *walk) @@ -178,11 +175,19 @@ static int walk_page_test(unsigned long start, unsigned long end, return walk->test_walk(start, end, walk); /* - * Do not walk over vma(VM_PFNMAP), because we have no valid struct - * page backing a VM_PFNMAP range. See also commit a9ff785e4437. + * vma(VM_PFNMAP) doesn't have any valid struct pages behind VM_PFNMAP + * range, so we don't walk over it as we do for normal vmas. However, + * Some callers are interested in handling hole range and they don't + * want to just ignore any single address range. Such users certainly + * define their ->pte_hole() callbacks, so let's delegate them to handle + * vma(VM_PFNMAP). */ - if (vma->vm_flags & VM_PFNMAP) - return 1; + if (vma->vm_flags & VM_PFNMAP) { + int err = 1; + if (walk->pte_hole) + err = walk->pte_hole(start, end, walk); + return err ? err : 1; + } return 0; } -- cgit v1.2.3 From 7d5b3bfaa2da150ce2dc45546f2125b854f962ef Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Wed, 11 Feb 2015 15:28:08 -0800 Subject: mm: /proc/pid/clear_refs: avoid split_huge_page() Currently pagewalker splits all THP pages on any clear_refs request. It's not necessary. We can handle this on PMD level. One side effect is that soft dirty will potentially see more dirty memory, since we will mark whole THP page dirty at once. Sanity checked with CRIU test suite. More testing is required. Signed-off-by: Kirill A. Shutemov Signed-off-by: Naoya Horiguchi Reviewed-by: Cyrill Gorcunov Cc: Pavel Emelyanov Cc: Andrea Arcangeli Cc: Dave Hansen Cc: "Kirill A. Shutemov" Cc: Benjamin Herrenschmidt Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/proc/task_mmu.c | 47 ++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 44 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index f5ca96524f5f..0e36c1e49fe3 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -739,10 +739,10 @@ struct clear_refs_private { enum clear_refs_types type; }; +#ifdef CONFIG_MEM_SOFT_DIRTY static inline void clear_soft_dirty(struct vm_area_struct *vma, unsigned long addr, pte_t *pte) { -#ifdef CONFIG_MEM_SOFT_DIRTY /* * The soft-dirty tracker uses #PF-s to catch writes * to pages, so write-protect the pte as well. See the @@ -759,9 +759,35 @@ static inline void clear_soft_dirty(struct vm_area_struct *vma, } set_pte_at(vma->vm_mm, addr, pte, ptent); -#endif } +static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma, + unsigned long addr, pmd_t *pmdp) +{ + pmd_t pmd = *pmdp; + + pmd = pmd_wrprotect(pmd); + pmd = pmd_clear_flags(pmd, _PAGE_SOFT_DIRTY); + + if (vma->vm_flags & VM_SOFTDIRTY) + vma->vm_flags &= ~VM_SOFTDIRTY; + + set_pmd_at(vma->vm_mm, addr, pmdp, pmd); +} + +#else + +static inline void clear_soft_dirty(struct vm_area_struct *vma, + unsigned long addr, pte_t *pte) +{ +} + +static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma, + unsigned long addr, pmd_t *pmdp) +{ +} +#endif + static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, struct mm_walk *walk) { @@ -771,7 +797,22 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr, spinlock_t *ptl; struct page *page; - split_huge_page_pmd(vma, addr, pmd); + if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { + if (cp->type == CLEAR_REFS_SOFT_DIRTY) { + clear_soft_dirty_pmd(vma, addr, pmd); + goto out; + } + + page = pmd_page(*pmd); + + /* Clear accessed and referenced bits. */ + pmdp_test_and_clear_young(vma, addr, pmd); + ClearPageReferenced(page); +out: + spin_unlock(ptl); + return 0; + } + if (pmd_trans_unstable(pmd)) return 0; -- cgit v1.2.3 From 0ab39de6b341189589093777f241dd803b417fc0 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Wed, 11 Feb 2015 16:08:32 +0300 Subject: nfsd: fix comparison in fh_fsid_match() We're supposed to be testing that the fh_fsid's match but because the parenthesis are in the wrong place, then we only check the first byte. Fixes: 9558f2500a20 ('nfsd: add fh_fsid_match helper') Signed-off-by: Dan Carpenter Signed-off-by: J. Bruce Fields --- fs/nfsd/nfsfh.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/nfsd/nfsfh.h b/fs/nfsd/nfsfh.h index 84cae2079d21..f22920442172 100644 --- a/fs/nfsd/nfsfh.h +++ b/fs/nfsd/nfsfh.h @@ -200,7 +200,7 @@ static inline bool fh_fsid_match(struct knfsd_fh *fh1, struct knfsd_fh *fh2) { if (fh1->fh_fsid_type != fh2->fh_fsid_type) return false; - if (memcmp(fh1->fh_fsid, fh2->fh_fsid, key_len(fh1->fh_fsid_type) != 0)) + if (memcmp(fh1->fh_fsid, fh2->fh_fsid, key_len(fh1->fh_fsid_type)) != 0) return false; return true; } -- cgit v1.2.3 From 503c358cf1925853195ee39ec437e51138bbb7df Mon Sep 17 00:00:00 2001 From: Vladimir Davydov Date: Thu, 12 Feb 2015 14:58:47 -0800 Subject: list_lru: introduce list_lru_shrink_{count,walk} Kmem accounting of memcg is unusable now, because it lacks slab shrinker support. That means when we hit the limit we will get ENOMEM w/o any chance to recover. What we should do then is to call shrink_slab, which would reclaim old inode/dentry caches from this cgroup. This is what this patch set is intended to do. Basically, it does two things. First, it introduces the notion of per-memcg slab shrinker. A shrinker that wants to reclaim objects per cgroup should mark itself as SHRINKER_MEMCG_AWARE. Then it will be passed the memory cgroup to scan from in shrink_control->memcg. For such shrinkers shrink_slab iterates over the whole cgroup subtree under the target cgroup and calls the shrinker for each kmem-active memory cgroup. Secondly, this patch set makes the list_lru structure per-memcg. It's done transparently to list_lru users - everything they have to do is to tell list_lru_init that they want memcg-aware list_lru. Then the list_lru will automatically distribute objects among per-memcg lists basing on which cgroup the object is accounted to. This way to make FS shrinkers (icache, dcache) memcg-aware we only need to make them use memcg-aware list_lru, and this is what this patch set does. As before, this patch set only enables per-memcg kmem reclaim when the pressure goes from memory.limit, not from memory.kmem.limit. Handling memory.kmem.limit is going to be tricky due to GFP_NOFS allocations, and it is still unclear whether we will have this knob in the unified hierarchy. This patch (of 9): NUMA aware slab shrinkers use the list_lru structure to distribute objects coming from different NUMA nodes to different lists. Whenever such a shrinker needs to count or scan objects from a particular node, it issues commands like this: count = list_lru_count_node(lru, sc->nid); freed = list_lru_walk_node(lru, sc->nid, isolate_func, isolate_arg, &sc->nr_to_scan); where sc is an instance of the shrink_control structure passed to it from vmscan. To simplify this, let's add special list_lru functions to be used by shrinkers, list_lru_shrink_count() and list_lru_shrink_walk(), which consolidate the nid and nr_to_scan arguments in the shrink_control structure. This will also allow us to avoid patching shrinkers that use list_lru when we make shrink_slab() per-memcg - all we will have to do is extend the shrink_control structure to include the target memcg and make list_lru_shrink_{count,walk} handle this appropriately. Signed-off-by: Vladimir Davydov Suggested-by: Dave Chinner Cc: Johannes Weiner Cc: Michal Hocko Cc: Greg Thelen Cc: Glauber Costa Cc: Alexander Viro Cc: Christoph Lameter Cc: Pekka Enberg Cc: David Rientjes Cc: Joonsoo Kim Cc: Tejun Heo Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/dcache.c | 14 ++++++-------- fs/gfs2/quota.c | 6 +++--- fs/inode.c | 7 +++---- fs/internal.h | 7 +++---- fs/super.c | 24 +++++++++++------------- fs/xfs/xfs_buf.c | 7 +++---- fs/xfs/xfs_qm.c | 7 +++---- include/linux/list_lru.h | 16 ++++++++++++++++ mm/workingset.c | 6 +++--- 9 files changed, 51 insertions(+), 43 deletions(-) (limited to 'fs') diff --git a/fs/dcache.c b/fs/dcache.c index e368d4f412f9..56c5da89f58a 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -930,24 +930,22 @@ dentry_lru_isolate(struct list_head *item, spinlock_t *lru_lock, void *arg) /** * prune_dcache_sb - shrink the dcache * @sb: superblock - * @nr_to_scan : number of entries to try to free - * @nid: which node to scan for freeable entities + * @sc: shrink control, passed to list_lru_shrink_walk() * - * Attempt to shrink the superblock dcache LRU by @nr_to_scan entries. This is - * done when we need more memory an called from the superblock shrinker + * Attempt to shrink the superblock dcache LRU by @sc->nr_to_scan entries. This + * is done when we need more memory and called from the superblock shrinker * function. * * This function may fail to free any resources if all the dentries are in * use. */ -long prune_dcache_sb(struct super_block *sb, unsigned long nr_to_scan, - int nid) +long prune_dcache_sb(struct super_block *sb, struct shrink_control *sc) { LIST_HEAD(dispose); long freed; - freed = list_lru_walk_node(&sb->s_dentry_lru, nid, dentry_lru_isolate, - &dispose, &nr_to_scan); + freed = list_lru_shrink_walk(&sb->s_dentry_lru, sc, + dentry_lru_isolate, &dispose); shrink_dentry_list(&dispose); return freed; } diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c index 3e193cb36996..c15d6b216d0b 100644 --- a/fs/gfs2/quota.c +++ b/fs/gfs2/quota.c @@ -171,8 +171,8 @@ static unsigned long gfs2_qd_shrink_scan(struct shrinker *shrink, if (!(sc->gfp_mask & __GFP_FS)) return SHRINK_STOP; - freed = list_lru_walk_node(&gfs2_qd_lru, sc->nid, gfs2_qd_isolate, - &dispose, &sc->nr_to_scan); + freed = list_lru_shrink_walk(&gfs2_qd_lru, sc, + gfs2_qd_isolate, &dispose); gfs2_qd_dispose(&dispose); @@ -182,7 +182,7 @@ static unsigned long gfs2_qd_shrink_scan(struct shrinker *shrink, static unsigned long gfs2_qd_shrink_count(struct shrinker *shrink, struct shrink_control *sc) { - return vfs_pressure_ratio(list_lru_count_node(&gfs2_qd_lru, sc->nid)); + return vfs_pressure_ratio(list_lru_shrink_count(&gfs2_qd_lru, sc)); } struct shrinker gfs2_qd_shrinker = { diff --git a/fs/inode.c b/fs/inode.c index 3a53b1da3fb8..524a32c2b0c6 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -751,14 +751,13 @@ inode_lru_isolate(struct list_head *item, spinlock_t *lru_lock, void *arg) * to trim from the LRU. Inodes to be freed are moved to a temporary list and * then are freed outside inode_lock by dispose_list(). */ -long prune_icache_sb(struct super_block *sb, unsigned long nr_to_scan, - int nid) +long prune_icache_sb(struct super_block *sb, struct shrink_control *sc) { LIST_HEAD(freeable); long freed; - freed = list_lru_walk_node(&sb->s_inode_lru, nid, inode_lru_isolate, - &freeable, &nr_to_scan); + freed = list_lru_shrink_walk(&sb->s_inode_lru, sc, + inode_lru_isolate, &freeable); dispose_list(&freeable); return freed; } diff --git a/fs/internal.h b/fs/internal.h index e9a61fe67575..d92c346a793d 100644 --- a/fs/internal.h +++ b/fs/internal.h @@ -14,6 +14,7 @@ struct file_system_type; struct linux_binprm; struct path; struct mount; +struct shrink_control; /* * block_dev.c @@ -111,8 +112,7 @@ extern int open_check_o_direct(struct file *f); * inode.c */ extern spinlock_t inode_sb_list_lock; -extern long prune_icache_sb(struct super_block *sb, unsigned long nr_to_scan, - int nid); +extern long prune_icache_sb(struct super_block *sb, struct shrink_control *sc); extern void inode_add_lru(struct inode *inode); /* @@ -129,8 +129,7 @@ extern int invalidate_inodes(struct super_block *, bool); */ extern struct dentry *__d_alloc(struct super_block *, const struct qstr *); extern int d_set_mounted(struct dentry *dentry); -extern long prune_dcache_sb(struct super_block *sb, unsigned long nr_to_scan, - int nid); +extern long prune_dcache_sb(struct super_block *sb, struct shrink_control *sc); /* * read_write.c diff --git a/fs/super.c b/fs/super.c index eae088f6aaae..4554ac257647 100644 --- a/fs/super.c +++ b/fs/super.c @@ -77,8 +77,8 @@ static unsigned long super_cache_scan(struct shrinker *shrink, if (sb->s_op->nr_cached_objects) fs_objects = sb->s_op->nr_cached_objects(sb, sc->nid); - inodes = list_lru_count_node(&sb->s_inode_lru, sc->nid); - dentries = list_lru_count_node(&sb->s_dentry_lru, sc->nid); + inodes = list_lru_shrink_count(&sb->s_inode_lru, sc); + dentries = list_lru_shrink_count(&sb->s_dentry_lru, sc); total_objects = dentries + inodes + fs_objects + 1; if (!total_objects) total_objects = 1; @@ -86,20 +86,20 @@ static unsigned long super_cache_scan(struct shrinker *shrink, /* proportion the scan between the caches */ dentries = mult_frac(sc->nr_to_scan, dentries, total_objects); inodes = mult_frac(sc->nr_to_scan, inodes, total_objects); + fs_objects = mult_frac(sc->nr_to_scan, fs_objects, total_objects); /* * prune the dcache first as the icache is pinned by it, then * prune the icache, followed by the filesystem specific caches */ - freed = prune_dcache_sb(sb, dentries, sc->nid); - freed += prune_icache_sb(sb, inodes, sc->nid); + sc->nr_to_scan = dentries; + freed = prune_dcache_sb(sb, sc); + sc->nr_to_scan = inodes; + freed += prune_icache_sb(sb, sc); - if (fs_objects) { - fs_objects = mult_frac(sc->nr_to_scan, fs_objects, - total_objects); + if (fs_objects) freed += sb->s_op->free_cached_objects(sb, fs_objects, sc->nid); - } drop_super(sb); return freed; @@ -118,17 +118,15 @@ static unsigned long super_cache_count(struct shrinker *shrink, * scalability bottleneck. The counts could get updated * between super_cache_count and super_cache_scan anyway. * Call to super_cache_count with shrinker_rwsem held - * ensures the safety of call to list_lru_count_node() and + * ensures the safety of call to list_lru_shrink_count() and * s_op->nr_cached_objects(). */ if (sb->s_op && sb->s_op->nr_cached_objects) total_objects = sb->s_op->nr_cached_objects(sb, sc->nid); - total_objects += list_lru_count_node(&sb->s_dentry_lru, - sc->nid); - total_objects += list_lru_count_node(&sb->s_inode_lru, - sc->nid); + total_objects += list_lru_shrink_count(&sb->s_dentry_lru, sc); + total_objects += list_lru_shrink_count(&sb->s_inode_lru, sc); total_objects = vfs_pressure_ratio(total_objects); return total_objects; diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index bb502a391792..15c9d224c721 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c @@ -1583,10 +1583,9 @@ xfs_buftarg_shrink_scan( struct xfs_buftarg, bt_shrinker); LIST_HEAD(dispose); unsigned long freed; - unsigned long nr_to_scan = sc->nr_to_scan; - freed = list_lru_walk_node(&btp->bt_lru, sc->nid, xfs_buftarg_isolate, - &dispose, &nr_to_scan); + freed = list_lru_shrink_walk(&btp->bt_lru, sc, + xfs_buftarg_isolate, &dispose); while (!list_empty(&dispose)) { struct xfs_buf *bp; @@ -1605,7 +1604,7 @@ xfs_buftarg_shrink_count( { struct xfs_buftarg *btp = container_of(shrink, struct xfs_buftarg, bt_shrinker); - return list_lru_count_node(&btp->bt_lru, sc->nid); + return list_lru_shrink_count(&btp->bt_lru, sc); } void diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c index 3e8186279541..4f4b1274e144 100644 --- a/fs/xfs/xfs_qm.c +++ b/fs/xfs/xfs_qm.c @@ -523,7 +523,6 @@ xfs_qm_shrink_scan( struct xfs_qm_isolate isol; unsigned long freed; int error; - unsigned long nr_to_scan = sc->nr_to_scan; if ((sc->gfp_mask & (__GFP_FS|__GFP_WAIT)) != (__GFP_FS|__GFP_WAIT)) return 0; @@ -531,8 +530,8 @@ xfs_qm_shrink_scan( INIT_LIST_HEAD(&isol.buffers); INIT_LIST_HEAD(&isol.dispose); - freed = list_lru_walk_node(&qi->qi_lru, sc->nid, xfs_qm_dquot_isolate, &isol, - &nr_to_scan); + freed = list_lru_shrink_walk(&qi->qi_lru, sc, + xfs_qm_dquot_isolate, &isol); error = xfs_buf_delwri_submit(&isol.buffers); if (error) @@ -557,7 +556,7 @@ xfs_qm_shrink_count( struct xfs_quotainfo *qi = container_of(shrink, struct xfs_quotainfo, qi_shrinker); - return list_lru_count_node(&qi->qi_lru, sc->nid); + return list_lru_shrink_count(&qi->qi_lru, sc); } /* diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h index f3434533fbf8..f500a2e39b13 100644 --- a/include/linux/list_lru.h +++ b/include/linux/list_lru.h @@ -9,6 +9,7 @@ #include #include +#include /* list_lru_walk_cb has to always return one of those */ enum lru_status { @@ -81,6 +82,13 @@ bool list_lru_del(struct list_lru *lru, struct list_head *item); * Callers that want such a guarantee need to provide an outer lock. */ unsigned long list_lru_count_node(struct list_lru *lru, int nid); + +static inline unsigned long list_lru_shrink_count(struct list_lru *lru, + struct shrink_control *sc) +{ + return list_lru_count_node(lru, sc->nid); +} + static inline unsigned long list_lru_count(struct list_lru *lru) { long count = 0; @@ -119,6 +127,14 @@ unsigned long list_lru_walk_node(struct list_lru *lru, int nid, list_lru_walk_cb isolate, void *cb_arg, unsigned long *nr_to_walk); +static inline unsigned long +list_lru_shrink_walk(struct list_lru *lru, struct shrink_control *sc, + list_lru_walk_cb isolate, void *cb_arg) +{ + return list_lru_walk_node(lru, sc->nid, isolate, cb_arg, + &sc->nr_to_scan); +} + static inline unsigned long list_lru_walk(struct list_lru *lru, list_lru_walk_cb isolate, void *cb_arg, unsigned long nr_to_walk) diff --git a/mm/workingset.c b/mm/workingset.c index f7216fa7da27..d4fa7fb10a52 100644 --- a/mm/workingset.c +++ b/mm/workingset.c @@ -275,7 +275,7 @@ static unsigned long count_shadow_nodes(struct shrinker *shrinker, /* list_lru lock nests inside IRQ-safe mapping->tree_lock */ local_irq_disable(); - shadow_nodes = list_lru_count_node(&workingset_shadow_nodes, sc->nid); + shadow_nodes = list_lru_shrink_count(&workingset_shadow_nodes, sc); local_irq_enable(); pages = node_present_pages(sc->nid); @@ -376,8 +376,8 @@ static unsigned long scan_shadow_nodes(struct shrinker *shrinker, /* list_lru lock nests inside IRQ-safe mapping->tree_lock */ local_irq_disable(); - ret = list_lru_walk_node(&workingset_shadow_nodes, sc->nid, - shadow_lru_isolate, NULL, &sc->nr_to_scan); + ret = list_lru_shrink_walk(&workingset_shadow_nodes, sc, + shadow_lru_isolate, NULL); local_irq_enable(); return ret; } -- cgit v1.2.3 From 4101b624352fddb5ed72e7a1b6f8be8cffaa20fa Mon Sep 17 00:00:00 2001 From: Vladimir Davydov Date: Thu, 12 Feb 2015 14:58:51 -0800 Subject: fs: consolidate {nr,free}_cached_objects args in shrink_control We are going to make FS shrinkers memcg-aware. To achieve that, we will have to pass the memcg to scan to the nr_cached_objects and free_cached_objects VFS methods, which currently take only the NUMA node to scan. Since the shrink_control structure already holds the node, and the memcg to scan will be added to it when we introduce memcg-aware vmscan, let us consolidate the methods' arguments in this structure to keep things clean. Signed-off-by: Vladimir Davydov Suggested-by: Dave Chinner Cc: Johannes Weiner Cc: Michal Hocko Cc: Greg Thelen Cc: Glauber Costa Cc: Alexander Viro Cc: Christoph Lameter Cc: Pekka Enberg Cc: David Rientjes Cc: Joonsoo Kim Cc: Tejun Heo Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/super.c | 12 ++++++------ fs/xfs/xfs_super.c | 7 +++---- include/linux/fs.h | 6 ++++-- 3 files changed, 13 insertions(+), 12 deletions(-) (limited to 'fs') diff --git a/fs/super.c b/fs/super.c index 4554ac257647..a2b735a42e74 100644 --- a/fs/super.c +++ b/fs/super.c @@ -75,7 +75,7 @@ static unsigned long super_cache_scan(struct shrinker *shrink, return SHRINK_STOP; if (sb->s_op->nr_cached_objects) - fs_objects = sb->s_op->nr_cached_objects(sb, sc->nid); + fs_objects = sb->s_op->nr_cached_objects(sb, sc); inodes = list_lru_shrink_count(&sb->s_inode_lru, sc); dentries = list_lru_shrink_count(&sb->s_dentry_lru, sc); @@ -97,9 +97,10 @@ static unsigned long super_cache_scan(struct shrinker *shrink, sc->nr_to_scan = inodes; freed += prune_icache_sb(sb, sc); - if (fs_objects) - freed += sb->s_op->free_cached_objects(sb, fs_objects, - sc->nid); + if (fs_objects) { + sc->nr_to_scan = fs_objects; + freed += sb->s_op->free_cached_objects(sb, sc); + } drop_super(sb); return freed; @@ -122,8 +123,7 @@ static unsigned long super_cache_count(struct shrinker *shrink, * s_op->nr_cached_objects(). */ if (sb->s_op && sb->s_op->nr_cached_objects) - total_objects = sb->s_op->nr_cached_objects(sb, - sc->nid); + total_objects = sb->s_op->nr_cached_objects(sb, sc); total_objects += list_lru_shrink_count(&sb->s_dentry_lru, sc); total_objects += list_lru_shrink_count(&sb->s_inode_lru, sc); diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c index f2449fd86926..8fcc4ccc5c79 100644 --- a/fs/xfs/xfs_super.c +++ b/fs/xfs/xfs_super.c @@ -1537,7 +1537,7 @@ xfs_fs_mount( static long xfs_fs_nr_cached_objects( struct super_block *sb, - int nid) + struct shrink_control *sc) { return xfs_reclaim_inodes_count(XFS_M(sb)); } @@ -1545,10 +1545,9 @@ xfs_fs_nr_cached_objects( static long xfs_fs_free_cached_objects( struct super_block *sb, - long nr_to_scan, - int nid) + struct shrink_control *sc) { - return xfs_reclaim_inodes_nr(XFS_M(sb), nr_to_scan); + return xfs_reclaim_inodes_nr(XFS_M(sb), sc->nr_to_scan); } static const struct super_operations xfs_super_operations = { diff --git a/include/linux/fs.h b/include/linux/fs.h index cdcb1e9d9613..a20d6586e517 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1635,8 +1635,10 @@ struct super_operations { struct dquot **(*get_dquots)(struct inode *); #endif int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t); - long (*nr_cached_objects)(struct super_block *, int); - long (*free_cached_objects)(struct super_block *, long, int); + long (*nr_cached_objects)(struct super_block *, + struct shrink_control *); + long (*free_cached_objects)(struct super_block *, + struct shrink_control *); }; /* -- cgit v1.2.3 From cb731d6c62bbc2f890b08ea3d0386d5dad887326 Mon Sep 17 00:00:00 2001 From: Vladimir Davydov Date: Thu, 12 Feb 2015 14:58:54 -0800 Subject: vmscan: per memory cgroup slab shrinkers This patch adds SHRINKER_MEMCG_AWARE flag. If a shrinker has this flag set, it will be called per memory cgroup. The memory cgroup to scan objects from is passed in shrink_control->memcg. If the memory cgroup is NULL, a memcg aware shrinker is supposed to scan objects from the global list. Unaware shrinkers are only called on global pressure with memcg=NULL. Signed-off-by: Vladimir Davydov Cc: Dave Chinner Cc: Johannes Weiner Cc: Michal Hocko Cc: Greg Thelen Cc: Glauber Costa Cc: Alexander Viro Cc: Christoph Lameter Cc: Pekka Enberg Cc: David Rientjes Cc: Joonsoo Kim Cc: Tejun Heo Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/drop_caches.c | 14 -------- include/linux/memcontrol.h | 7 ++++ include/linux/mm.h | 5 ++- include/linux/shrinker.h | 6 +++- mm/memcontrol.c | 2 +- mm/memory-failure.c | 11 ++---- mm/vmscan.c | 85 ++++++++++++++++++++++++++++++++++------------ 7 files changed, 80 insertions(+), 50 deletions(-) (limited to 'fs') diff --git a/fs/drop_caches.c b/fs/drop_caches.c index 2bc2c87f35e7..5718cb9f7273 100644 --- a/fs/drop_caches.c +++ b/fs/drop_caches.c @@ -37,20 +37,6 @@ static void drop_pagecache_sb(struct super_block *sb, void *unused) iput(toput_inode); } -static void drop_slab(void) -{ - int nr_objects; - - do { - int nid; - - nr_objects = 0; - for_each_online_node(nid) - nr_objects += shrink_node_slabs(GFP_KERNEL, nid, - 1000, 1000); - } while (nr_objects > 10); -} - int drop_caches_sysctl_handler(struct ctl_table *table, int write, void __user *buffer, size_t *length, loff_t *ppos) { diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 6cfd934c7c9b..54992fe0959f 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -413,6 +413,8 @@ static inline bool memcg_kmem_enabled(void) return static_key_false(&memcg_kmem_enabled_key); } +bool memcg_kmem_is_active(struct mem_cgroup *memcg); + /* * In general, we'll do everything in our power to not incur in any overhead * for non-memcg users for the kmem functions. Not even a function call, if we @@ -542,6 +544,11 @@ static inline bool memcg_kmem_enabled(void) return false; } +static inline bool memcg_kmem_is_active(struct mem_cgroup *memcg) +{ + return false; +} + static inline bool memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order) { diff --git a/include/linux/mm.h b/include/linux/mm.h index a4d24f3c5430..af4ff88a11e0 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2168,9 +2168,8 @@ int drop_caches_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); #endif -unsigned long shrink_node_slabs(gfp_t gfp_mask, int nid, - unsigned long nr_scanned, - unsigned long nr_eligible); +void drop_slab(void); +void drop_slab_node(int nid); #ifndef CONFIG_MMU #define randomize_va_space 0 diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h index f4aee75f00b1..4fcacd915d45 100644 --- a/include/linux/shrinker.h +++ b/include/linux/shrinker.h @@ -20,6 +20,9 @@ struct shrink_control { /* current node being shrunk (for NUMA aware shrinkers) */ int nid; + + /* current memcg being shrunk (for memcg aware shrinkers) */ + struct mem_cgroup *memcg; }; #define SHRINK_STOP (~0UL) @@ -61,7 +64,8 @@ struct shrinker { #define DEFAULT_SEEKS 2 /* A good number if you don't know better. */ /* Flags */ -#define SHRINKER_NUMA_AWARE (1 << 0) +#define SHRINKER_NUMA_AWARE (1 << 0) +#define SHRINKER_MEMCG_AWARE (1 << 1) extern int register_shrinker(struct shrinker *); extern void unregister_shrinker(struct shrinker *); diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 095c1f96fbec..3c2a1a8286ac 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -352,7 +352,7 @@ struct mem_cgroup { }; #ifdef CONFIG_MEMCG_KMEM -static bool memcg_kmem_is_active(struct mem_cgroup *memcg) +bool memcg_kmem_is_active(struct mem_cgroup *memcg) { return memcg->kmemcg_id >= 0; } diff --git a/mm/memory-failure.c b/mm/memory-failure.c index feb803bf3443..1a735fad2a13 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -242,15 +242,8 @@ void shake_page(struct page *p, int access) * Only call shrink_node_slabs here (which would also shrink * other caches) if access is not potentially fatal. */ - if (access) { - int nr; - int nid = page_to_nid(p); - do { - nr = shrink_node_slabs(GFP_KERNEL, nid, 1000, 1000); - if (page_count(p) == 1) - break; - } while (nr > 10); - } + if (access) + drop_slab_node(page_to_nid(p)); } EXPORT_SYMBOL_GPL(shake_page); diff --git a/mm/vmscan.c b/mm/vmscan.c index 8e645ee52045..803886b8e353 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -232,10 +232,10 @@ EXPORT_SYMBOL(unregister_shrinker); #define SHRINK_BATCH 128 -static unsigned long shrink_slabs(struct shrink_control *shrinkctl, - struct shrinker *shrinker, - unsigned long nr_scanned, - unsigned long nr_eligible) +static unsigned long do_shrink_slab(struct shrink_control *shrinkctl, + struct shrinker *shrinker, + unsigned long nr_scanned, + unsigned long nr_eligible) { unsigned long freed = 0; unsigned long long delta; @@ -344,9 +344,10 @@ static unsigned long shrink_slabs(struct shrink_control *shrinkctl, } /** - * shrink_node_slabs - shrink slab caches of a given node + * shrink_slab - shrink slab caches * @gfp_mask: allocation context * @nid: node whose slab caches to target + * @memcg: memory cgroup whose slab caches to target * @nr_scanned: pressure numerator * @nr_eligible: pressure denominator * @@ -355,6 +356,12 @@ static unsigned long shrink_slabs(struct shrink_control *shrinkctl, * @nid is passed along to shrinkers with SHRINKER_NUMA_AWARE set, * unaware shrinkers will receive a node id of 0 instead. * + * @memcg specifies the memory cgroup to target. If it is not NULL, + * only shrinkers with SHRINKER_MEMCG_AWARE set will be called to scan + * objects from the memory cgroup specified. Otherwise all shrinkers + * are called, and memcg aware shrinkers are supposed to scan the + * global list then. + * * @nr_scanned and @nr_eligible form a ratio that indicate how much of * the available objects should be scanned. Page reclaim for example * passes the number of pages scanned and the number of pages on the @@ -365,13 +372,17 @@ static unsigned long shrink_slabs(struct shrink_control *shrinkctl, * * Returns the number of reclaimed slab objects. */ -unsigned long shrink_node_slabs(gfp_t gfp_mask, int nid, - unsigned long nr_scanned, - unsigned long nr_eligible) +static unsigned long shrink_slab(gfp_t gfp_mask, int nid, + struct mem_cgroup *memcg, + unsigned long nr_scanned, + unsigned long nr_eligible) { struct shrinker *shrinker; unsigned long freed = 0; + if (memcg && !memcg_kmem_is_active(memcg)) + return 0; + if (nr_scanned == 0) nr_scanned = SWAP_CLUSTER_MAX; @@ -390,12 +401,16 @@ unsigned long shrink_node_slabs(gfp_t gfp_mask, int nid, struct shrink_control sc = { .gfp_mask = gfp_mask, .nid = nid, + .memcg = memcg, }; + if (memcg && !(shrinker->flags & SHRINKER_MEMCG_AWARE)) + continue; + if (!(shrinker->flags & SHRINKER_NUMA_AWARE)) sc.nid = 0; - freed += shrink_slabs(&sc, shrinker, nr_scanned, nr_eligible); + freed += do_shrink_slab(&sc, shrinker, nr_scanned, nr_eligible); } up_read(&shrinker_rwsem); @@ -404,6 +419,29 @@ out: return freed; } +void drop_slab_node(int nid) +{ + unsigned long freed; + + do { + struct mem_cgroup *memcg = NULL; + + freed = 0; + do { + freed += shrink_slab(GFP_KERNEL, nid, memcg, + 1000, 1000); + } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL); + } while (freed > 10); +} + +void drop_slab(void) +{ + int nid; + + for_each_online_node(nid) + drop_slab_node(nid); +} + static inline int is_page_cache_freeable(struct page *page) { /* @@ -2276,6 +2314,7 @@ static inline bool should_continue_reclaim(struct zone *zone, static bool shrink_zone(struct zone *zone, struct scan_control *sc, bool is_classzone) { + struct reclaim_state *reclaim_state = current->reclaim_state; unsigned long nr_reclaimed, nr_scanned; bool reclaimable = false; @@ -2294,6 +2333,7 @@ static bool shrink_zone(struct zone *zone, struct scan_control *sc, memcg = mem_cgroup_iter(root, NULL, &reclaim); do { unsigned long lru_pages; + unsigned long scanned; struct lruvec *lruvec; int swappiness; @@ -2305,10 +2345,16 @@ static bool shrink_zone(struct zone *zone, struct scan_control *sc, lruvec = mem_cgroup_zone_lruvec(zone, memcg); swappiness = mem_cgroup_swappiness(memcg); + scanned = sc->nr_scanned; shrink_lruvec(lruvec, swappiness, sc, &lru_pages); zone_lru_pages += lru_pages; + if (memcg && is_classzone) + shrink_slab(sc->gfp_mask, zone_to_nid(zone), + memcg, sc->nr_scanned - scanned, + lru_pages); + /* * Direct reclaim and kswapd have to scan all memory * cgroups to fulfill the overall scan target for the @@ -2330,19 +2376,14 @@ static bool shrink_zone(struct zone *zone, struct scan_control *sc, * Shrink the slab caches in the same proportion that * the eligible LRU pages were scanned. */ - if (global_reclaim(sc) && is_classzone) { - struct reclaim_state *reclaim_state; - - shrink_node_slabs(sc->gfp_mask, zone_to_nid(zone), - sc->nr_scanned - nr_scanned, - zone_lru_pages); - - reclaim_state = current->reclaim_state; - if (reclaim_state) { - sc->nr_reclaimed += - reclaim_state->reclaimed_slab; - reclaim_state->reclaimed_slab = 0; - } + if (global_reclaim(sc) && is_classzone) + shrink_slab(sc->gfp_mask, zone_to_nid(zone), NULL, + sc->nr_scanned - nr_scanned, + zone_lru_pages); + + if (reclaim_state) { + sc->nr_reclaimed += reclaim_state->reclaimed_slab; + reclaim_state->reclaimed_slab = 0; } vmpressure(sc->gfp_mask, sc->target_mem_cgroup, -- cgit v1.2.3 From c0a5b560938a0f2fd2fbf66ddc446c7c2b41383a Mon Sep 17 00:00:00 2001 From: Vladimir Davydov Date: Thu, 12 Feb 2015 14:59:07 -0800 Subject: list_lru: organize all list_lrus to list To make list_lru memcg aware, we need all list_lrus to be kept on a list protected by a mutex, so that we could sleep while walking over the list. Therefore after this change list_lru_destroy may sleep. Fortunately, there is only one user that calls it from an atomic context - it's put_super - and we can easily fix it by calling list_lru_destroy before put_super in destroy_locked_super - anyway we don't longer need lrus by that time. Another point that should be noted is that list_lru_destroy is allowed to be called on an uninitialized zeroed-out object, in which case it is a no-op. Before this patch this was guaranteed by kfree, but now we need an explicit check there. Signed-off-by: Vladimir Davydov Cc: Dave Chinner Cc: Johannes Weiner Cc: Michal Hocko Cc: Greg Thelen Cc: Glauber Costa Cc: Alexander Viro Cc: Christoph Lameter Cc: Pekka Enberg Cc: David Rientjes Cc: Joonsoo Kim Cc: Tejun Heo Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/super.c | 8 ++++++++ include/linux/list_lru.h | 3 +++ mm/list_lru.c | 34 ++++++++++++++++++++++++++++++++++ 3 files changed, 45 insertions(+) (limited to 'fs') diff --git a/fs/super.c b/fs/super.c index a2b735a42e74..b027849d92d2 100644 --- a/fs/super.c +++ b/fs/super.c @@ -282,6 +282,14 @@ void deactivate_locked_super(struct super_block *s) unregister_shrinker(&s->s_shrink); fs->kill_sb(s); + /* + * Since list_lru_destroy() may sleep, we cannot call it from + * put_super(), where we hold the sb_lock. Therefore we destroy + * the lru lists right now. + */ + list_lru_destroy(&s->s_dentry_lru); + list_lru_destroy(&s->s_inode_lru); + put_filesystem(fs); put_super(s); } else { diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h index 53c1d6b78270..ee9486ac0621 100644 --- a/include/linux/list_lru.h +++ b/include/linux/list_lru.h @@ -31,6 +31,9 @@ struct list_lru_node { struct list_lru { struct list_lru_node *node; +#ifdef CONFIG_MEMCG_KMEM + struct list_head list; +#endif }; void list_lru_destroy(struct list_lru *lru); diff --git a/mm/list_lru.c b/mm/list_lru.c index 07e198c77888..a9021cb3ccde 100644 --- a/mm/list_lru.c +++ b/mm/list_lru.c @@ -9,6 +9,34 @@ #include #include #include +#include + +#ifdef CONFIG_MEMCG_KMEM +static LIST_HEAD(list_lrus); +static DEFINE_MUTEX(list_lrus_mutex); + +static void list_lru_register(struct list_lru *lru) +{ + mutex_lock(&list_lrus_mutex); + list_add(&lru->list, &list_lrus); + mutex_unlock(&list_lrus_mutex); +} + +static void list_lru_unregister(struct list_lru *lru) +{ + mutex_lock(&list_lrus_mutex); + list_del(&lru->list); + mutex_unlock(&list_lrus_mutex); +} +#else +static void list_lru_register(struct list_lru *lru) +{ +} + +static void list_lru_unregister(struct list_lru *lru) +{ +} +#endif /* CONFIG_MEMCG_KMEM */ bool list_lru_add(struct list_lru *lru, struct list_head *item) { @@ -137,12 +165,18 @@ int list_lru_init_key(struct list_lru *lru, struct lock_class_key *key) INIT_LIST_HEAD(&lru->node[i].list); lru->node[i].nr_items = 0; } + list_lru_register(lru); return 0; } EXPORT_SYMBOL_GPL(list_lru_init_key); void list_lru_destroy(struct list_lru *lru) { + /* Already destroyed or not yet initialized? */ + if (!lru->node) + return; + list_lru_unregister(lru); kfree(lru->node); + lru->node = NULL; } EXPORT_SYMBOL_GPL(list_lru_destroy); -- cgit v1.2.3 From 2acb60a0461fec8a4516686c913a295ce872697f Mon Sep 17 00:00:00 2001 From: Vladimir Davydov Date: Thu, 12 Feb 2015 14:59:14 -0800 Subject: fs: make shrinker memcg aware Now, to make any list_lru-based shrinker memcg aware we should only initialize its list_lru as memcg aware. Let's do it for the general FS shrinker (super_block::s_shrink). There are other FS-specific shrinkers that use list_lru for storing objects, such as XFS and GFS2 dquot cache shrinkers, but since they reclaim objects that are shared among different cgroups, there is no point making them memcg aware. It's a big question whether we should account them to memcg at all. Signed-off-by: Vladimir Davydov Cc: Dave Chinner Cc: Johannes Weiner Cc: Michal Hocko Cc: Greg Thelen Cc: Glauber Costa Cc: Alexander Viro Cc: Christoph Lameter Cc: Pekka Enberg Cc: David Rientjes Cc: Joonsoo Kim Cc: Tejun Heo Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/super.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/super.c b/fs/super.c index b027849d92d2..482b4071f4de 100644 --- a/fs/super.c +++ b/fs/super.c @@ -189,9 +189,9 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags) INIT_HLIST_BL_HEAD(&s->s_anon); INIT_LIST_HEAD(&s->s_inodes); - if (list_lru_init(&s->s_dentry_lru)) + if (list_lru_init_memcg(&s->s_dentry_lru)) goto fail; - if (list_lru_init(&s->s_inode_lru)) + if (list_lru_init_memcg(&s->s_inode_lru)) goto fail; init_rwsem(&s->s_umount); @@ -227,7 +227,7 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags) s->s_shrink.scan_objects = super_cache_scan; s->s_shrink.count_objects = super_cache_count; s->s_shrink.batch = 1024; - s->s_shrink.flags = SHRINKER_NUMA_AWARE; + s->s_shrink.flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE; return s; fail: -- cgit v1.2.3 From 49e7e7ff8d551b5b1e2f8da8497b9058cfa25672 Mon Sep 17 00:00:00 2001 From: Vladimir Davydov Date: Thu, 12 Feb 2015 14:59:17 -0800 Subject: fs: shrinker: always scan at least one object of each type In super_cache_scan() we divide the number of objects of particular type by the total number of objects in order to distribute pressure among As a result, in some corner cases we can get nr_to_scan=0 even if there are some objects to reclaim, e.g. dentries=1, inodes=1, fs_objects=1, nr_to_scan=1/3=0. This is unacceptable for per memcg kmem accounting, because this means that some objects may never get reclaimed after memcg death, preventing it from being freed. This patch therefore assures that super_cache_scan() will scan at least one object of each type if any. [akpm@linux-foundation.org: add comment] Signed-off-by: Vladimir Davydov Cc: Johannes Weiner Cc: Michal Hocko Cc: Alexander Viro Cc: Dave Chinner Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/super.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/super.c b/fs/super.c index 482b4071f4de..a89d6250bab8 100644 --- a/fs/super.c +++ b/fs/super.c @@ -91,14 +91,17 @@ static unsigned long super_cache_scan(struct shrinker *shrink, /* * prune the dcache first as the icache is pinned by it, then * prune the icache, followed by the filesystem specific caches + * + * Ensure that we always scan at least one object - memcg kmem + * accounting uses this to fully empty the caches. */ - sc->nr_to_scan = dentries; + sc->nr_to_scan = dentries + 1; freed = prune_dcache_sb(sb, sc); - sc->nr_to_scan = inodes; + sc->nr_to_scan = inodes + 1; freed += prune_icache_sb(sb, sc); if (fs_objects) { - sc->nr_to_scan = fs_objects; + sc->nr_to_scan = fs_objects + 1; freed += sb->s_op->free_cached_objects(sb, sc); } -- cgit v1.2.3 From 3f97b163207c67a3b35931494ad3db1de66356f0 Mon Sep 17 00:00:00 2001 From: Vladimir Davydov Date: Thu, 12 Feb 2015 14:59:35 -0800 Subject: list_lru: add helpers to isolate items Currently, the isolate callback passed to the list_lru_walk family of functions is supposed to just delete an item from the list upon returning LRU_REMOVED or LRU_REMOVED_RETRY, while nr_items counter is fixed by __list_lru_walk_one after the callback returns. Since the callback is allowed to drop the lock after removing an item (it has to return LRU_REMOVED_RETRY then), the nr_items can be less than the actual number of elements on the list even if we check them under the lock. This makes it difficult to move items from one list_lru_one to another, which is required for per-memcg list_lru reparenting - we can't just splice the lists, we have to move entries one by one. This patch therefore introduces helpers that must be used by callback functions to isolate items instead of raw list_del/list_move. These are list_lru_isolate and list_lru_isolate_move. They not only remove the entry from the list, but also fix the nr_items counter, making sure nr_items always reflects the actual number of elements on the list if checked under the appropriate lock. Signed-off-by: Vladimir Davydov Cc: Johannes Weiner Cc: Michal Hocko Cc: Tejun Heo Cc: Christoph Lameter Cc: Pekka Enberg Cc: David Rientjes Cc: Joonsoo Kim Cc: Dave Chinner Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/dcache.c | 21 +++++++++++---------- fs/gfs2/quota.c | 5 +++-- fs/inode.c | 8 ++++---- fs/xfs/xfs_buf.c | 6 ++++-- fs/xfs/xfs_qm.c | 5 +++-- include/linux/list_lru.h | 9 +++++++-- mm/list_lru.c | 19 ++++++++++++++++--- mm/workingset.c | 3 ++- 8 files changed, 50 insertions(+), 26 deletions(-) (limited to 'fs') diff --git a/fs/dcache.c b/fs/dcache.c index 56c5da89f58a..d04be762b216 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -400,19 +400,20 @@ static void d_shrink_add(struct dentry *dentry, struct list_head *list) * LRU lists entirely, while shrink_move moves it to the indicated * private list. */ -static void d_lru_isolate(struct dentry *dentry) +static void d_lru_isolate(struct list_lru_one *lru, struct dentry *dentry) { D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST); dentry->d_flags &= ~DCACHE_LRU_LIST; this_cpu_dec(nr_dentry_unused); - list_del_init(&dentry->d_lru); + list_lru_isolate(lru, &dentry->d_lru); } -static void d_lru_shrink_move(struct dentry *dentry, struct list_head *list) +static void d_lru_shrink_move(struct list_lru_one *lru, struct dentry *dentry, + struct list_head *list) { D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST); dentry->d_flags |= DCACHE_SHRINK_LIST; - list_move_tail(&dentry->d_lru, list); + list_lru_isolate_move(lru, &dentry->d_lru, list); } /* @@ -869,8 +870,8 @@ static void shrink_dentry_list(struct list_head *list) } } -static enum lru_status -dentry_lru_isolate(struct list_head *item, spinlock_t *lru_lock, void *arg) +static enum lru_status dentry_lru_isolate(struct list_head *item, + struct list_lru_one *lru, spinlock_t *lru_lock, void *arg) { struct list_head *freeable = arg; struct dentry *dentry = container_of(item, struct dentry, d_lru); @@ -890,7 +891,7 @@ dentry_lru_isolate(struct list_head *item, spinlock_t *lru_lock, void *arg) * another pass through the LRU. */ if (dentry->d_lockref.count) { - d_lru_isolate(dentry); + d_lru_isolate(lru, dentry); spin_unlock(&dentry->d_lock); return LRU_REMOVED; } @@ -921,7 +922,7 @@ dentry_lru_isolate(struct list_head *item, spinlock_t *lru_lock, void *arg) return LRU_ROTATE; } - d_lru_shrink_move(dentry, freeable); + d_lru_shrink_move(lru, dentry, freeable); spin_unlock(&dentry->d_lock); return LRU_REMOVED; @@ -951,7 +952,7 @@ long prune_dcache_sb(struct super_block *sb, struct shrink_control *sc) } static enum lru_status dentry_lru_isolate_shrink(struct list_head *item, - spinlock_t *lru_lock, void *arg) + struct list_lru_one *lru, spinlock_t *lru_lock, void *arg) { struct list_head *freeable = arg; struct dentry *dentry = container_of(item, struct dentry, d_lru); @@ -964,7 +965,7 @@ static enum lru_status dentry_lru_isolate_shrink(struct list_head *item, if (!spin_trylock(&dentry->d_lock)) return LRU_SKIP; - d_lru_shrink_move(dentry, freeable); + d_lru_shrink_move(lru, dentry, freeable); spin_unlock(&dentry->d_lock); return LRU_REMOVED; diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c index c15d6b216d0b..3aa17d4d1cfc 100644 --- a/fs/gfs2/quota.c +++ b/fs/gfs2/quota.c @@ -145,7 +145,8 @@ static void gfs2_qd_dispose(struct list_head *list) } -static enum lru_status gfs2_qd_isolate(struct list_head *item, spinlock_t *lock, void *arg) +static enum lru_status gfs2_qd_isolate(struct list_head *item, + struct list_lru_one *lru, spinlock_t *lru_lock, void *arg) { struct list_head *dispose = arg; struct gfs2_quota_data *qd = list_entry(item, struct gfs2_quota_data, qd_lru); @@ -155,7 +156,7 @@ static enum lru_status gfs2_qd_isolate(struct list_head *item, spinlock_t *lock, if (qd->qd_lockref.count == 0) { lockref_mark_dead(&qd->qd_lockref); - list_move(&qd->qd_lru, dispose); + list_lru_isolate_move(lru, &qd->qd_lru, dispose); } spin_unlock(&qd->qd_lockref.lock); diff --git a/fs/inode.c b/fs/inode.c index 524a32c2b0c6..86c612b92c6f 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -685,8 +685,8 @@ int invalidate_inodes(struct super_block *sb, bool kill_dirty) * LRU does not have strict ordering. Hence we don't want to reclaim inodes * with this flag set because they are the inodes that are out of order. */ -static enum lru_status -inode_lru_isolate(struct list_head *item, spinlock_t *lru_lock, void *arg) +static enum lru_status inode_lru_isolate(struct list_head *item, + struct list_lru_one *lru, spinlock_t *lru_lock, void *arg) { struct list_head *freeable = arg; struct inode *inode = container_of(item, struct inode, i_lru); @@ -704,7 +704,7 @@ inode_lru_isolate(struct list_head *item, spinlock_t *lru_lock, void *arg) */ if (atomic_read(&inode->i_count) || (inode->i_state & ~I_REFERENCED)) { - list_del_init(&inode->i_lru); + list_lru_isolate(lru, &inode->i_lru); spin_unlock(&inode->i_lock); this_cpu_dec(nr_unused); return LRU_REMOVED; @@ -738,7 +738,7 @@ inode_lru_isolate(struct list_head *item, spinlock_t *lru_lock, void *arg) WARN_ON(inode->i_state & I_NEW); inode->i_state |= I_FREEING; - list_move(&inode->i_lru, freeable); + list_lru_isolate_move(lru, &inode->i_lru, freeable); spin_unlock(&inode->i_lock); this_cpu_dec(nr_unused); diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index 15c9d224c721..1790b00bea7a 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c @@ -1488,6 +1488,7 @@ xfs_buf_iomove( static enum lru_status xfs_buftarg_wait_rele( struct list_head *item, + struct list_lru_one *lru, spinlock_t *lru_lock, void *arg) @@ -1509,7 +1510,7 @@ xfs_buftarg_wait_rele( */ atomic_set(&bp->b_lru_ref, 0); bp->b_state |= XFS_BSTATE_DISPOSE; - list_move(item, dispose); + list_lru_isolate_move(lru, item, dispose); spin_unlock(&bp->b_lock); return LRU_REMOVED; } @@ -1546,6 +1547,7 @@ xfs_wait_buftarg( static enum lru_status xfs_buftarg_isolate( struct list_head *item, + struct list_lru_one *lru, spinlock_t *lru_lock, void *arg) { @@ -1569,7 +1571,7 @@ xfs_buftarg_isolate( } bp->b_state |= XFS_BSTATE_DISPOSE; - list_move(item, dispose); + list_lru_isolate_move(lru, item, dispose); spin_unlock(&bp->b_lock); return LRU_REMOVED; } diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c index 4f4b1274e144..53cc2aaf8d2b 100644 --- a/fs/xfs/xfs_qm.c +++ b/fs/xfs/xfs_qm.c @@ -430,6 +430,7 @@ struct xfs_qm_isolate { static enum lru_status xfs_qm_dquot_isolate( struct list_head *item, + struct list_lru_one *lru, spinlock_t *lru_lock, void *arg) __releases(lru_lock) __acquires(lru_lock) @@ -450,7 +451,7 @@ xfs_qm_dquot_isolate( XFS_STATS_INC(xs_qm_dqwants); trace_xfs_dqreclaim_want(dqp); - list_del_init(&dqp->q_lru); + list_lru_isolate(lru, &dqp->q_lru); XFS_STATS_DEC(xs_qm_dquot_unused); return LRU_REMOVED; } @@ -494,7 +495,7 @@ xfs_qm_dquot_isolate( xfs_dqunlock(dqp); ASSERT(dqp->q_nrefs == 0); - list_move_tail(&dqp->q_lru, &isol->dispose); + list_lru_isolate_move(lru, &dqp->q_lru, &isol->dispose); XFS_STATS_DEC(xs_qm_dquot_unused); trace_xfs_dqreclaim_done(dqp); XFS_STATS_INC(xs_qm_dqreclaims); diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h index 305b598abac2..7edf9c9ab9eb 100644 --- a/include/linux/list_lru.h +++ b/include/linux/list_lru.h @@ -125,8 +125,13 @@ static inline unsigned long list_lru_count(struct list_lru *lru) return count; } -typedef enum lru_status -(*list_lru_walk_cb)(struct list_head *item, spinlock_t *lock, void *cb_arg); +void list_lru_isolate(struct list_lru_one *list, struct list_head *item); +void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item, + struct list_head *head); + +typedef enum lru_status (*list_lru_walk_cb)(struct list_head *item, + struct list_lru_one *list, spinlock_t *lock, void *cb_arg); + /** * list_lru_walk_one: walk a list_lru, isolating and disposing freeable items. * @lru: the lru pointer. diff --git a/mm/list_lru.c b/mm/list_lru.c index 79aee70c3b9d..8d9d168c6c38 100644 --- a/mm/list_lru.c +++ b/mm/list_lru.c @@ -132,6 +132,21 @@ bool list_lru_del(struct list_lru *lru, struct list_head *item) } EXPORT_SYMBOL_GPL(list_lru_del); +void list_lru_isolate(struct list_lru_one *list, struct list_head *item) +{ + list_del_init(item); + list->nr_items--; +} +EXPORT_SYMBOL_GPL(list_lru_isolate); + +void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item, + struct list_head *head) +{ + list_move(item, head); + list->nr_items--; +} +EXPORT_SYMBOL_GPL(list_lru_isolate_move); + static unsigned long __list_lru_count_one(struct list_lru *lru, int nid, int memcg_idx) { @@ -194,13 +209,11 @@ restart: break; --*nr_to_walk; - ret = isolate(item, &nlru->lock, cb_arg); + ret = isolate(item, l, &nlru->lock, cb_arg); switch (ret) { case LRU_REMOVED_RETRY: assert_spin_locked(&nlru->lock); case LRU_REMOVED: - l->nr_items--; - WARN_ON_ONCE(l->nr_items < 0); isolated++; /* * If the lru lock has been dropped, our list diff --git a/mm/workingset.c b/mm/workingset.c index d4fa7fb10a52..aa017133744b 100644 --- a/mm/workingset.c +++ b/mm/workingset.c @@ -302,6 +302,7 @@ static unsigned long count_shadow_nodes(struct shrinker *shrinker, } static enum lru_status shadow_lru_isolate(struct list_head *item, + struct list_lru_one *lru, spinlock_t *lru_lock, void *arg) { @@ -332,7 +333,7 @@ static enum lru_status shadow_lru_isolate(struct list_head *item, goto out; } - list_del_init(item); + list_lru_isolate(lru, item); spin_unlock(lru_lock); /* -- cgit v1.2.3 From 695f055936938c674473ea071ca7359a863551e7 Mon Sep 17 00:00:00 2001 From: Petr Cermak Date: Thu, 12 Feb 2015 15:01:00 -0800 Subject: fs/proc/task_mmu.c: add user-space support for resetting mm->hiwater_rss (peak RSS) Peak resident size of a process can be reset back to the process's current rss value by writing "5" to /proc/pid/clear_refs. The driving use-case for this would be getting the peak RSS value, which can be retrieved from the VmHWM field in /proc/pid/status, per benchmark iteration or test scenario. [akpm@linux-foundation.org: clarify behaviour in documentation] Signed-off-by: Petr Cermak Cc: Bjorn Helgaas Cc: Primiano Tucci Cc: Petr Cermak Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/filesystems/proc.txt | 4 ++++ fs/proc/task_mmu.c | 14 ++++++++++++++ include/linux/mm.h | 5 +++++ 3 files changed, 23 insertions(+) (limited to 'fs') diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt index cf8fc2f0b34b..0b3448dba9ec 100644 --- a/Documentation/filesystems/proc.txt +++ b/Documentation/filesystems/proc.txt @@ -489,6 +489,10 @@ To clear the bits for the file mapped pages associated with the process To clear the soft-dirty bit > echo 4 > /proc/PID/clear_refs +To reset the peak resident set size ("high water mark") to the process's +current value: + > echo 5 > /proc/PID/clear_refs + Any other value written to /proc/PID/clear_refs will have no effect. The /proc/pid/pagemap gives the PFN, which can be used to find the pageflags diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 0e36c1e49fe3..1359a911d194 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -732,6 +732,7 @@ enum clear_refs_types { CLEAR_REFS_ANON, CLEAR_REFS_MAPPED, CLEAR_REFS_SOFT_DIRTY, + CLEAR_REFS_MM_HIWATER_RSS, CLEAR_REFS_LAST, }; @@ -907,6 +908,18 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf, .mm = mm, .private = &cp, }; + + if (type == CLEAR_REFS_MM_HIWATER_RSS) { + /* + * Writing 5 to /proc/pid/clear_refs resets the peak + * resident set size to this mm's current rss value. + */ + down_write(&mm->mmap_sem); + reset_mm_hiwater_rss(mm); + up_write(&mm->mmap_sem); + goto out_mm; + } + down_read(&mm->mmap_sem); if (type == CLEAR_REFS_SOFT_DIRTY) { for (vma = mm->mmap; vma; vma = vma->vm_next) { @@ -928,6 +941,7 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf, mmu_notifier_invalidate_range_end(mm, 0, -1); flush_tlb_mm(mm); up_read(&mm->mmap_sem); +out_mm: mmput(mm); } put_task_struct(task); diff --git a/include/linux/mm.h b/include/linux/mm.h index bd52e2f14027..9bee7ec0c31f 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1408,6 +1408,11 @@ static inline void update_hiwater_vm(struct mm_struct *mm) mm->hiwater_vm = mm->total_vm; } +static inline void reset_mm_hiwater_rss(struct mm_struct *mm) +{ + mm->hiwater_rss = get_mm_rss(mm); +} + static inline void setmax_mm_hiwater_rss(unsigned long *maxrss, struct mm_struct *mm) { -- cgit v1.2.3 From 6bee55f94f971ccf87c8c95c10fa1047ade0f122 Mon Sep 17 00:00:00 2001 From: Alexander Kuleshov Date: Thu, 12 Feb 2015 15:01:03 -0800 Subject: fs: proc: use PDE() to get proc_dir_entry Use the PDE() helper to get proc_dir_entry instead of coding it directly. Signed-off-by: Alexander Kuleshov Acked-by: Nicolas Dichtel Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/proc/generic.c | 2 +- fs/proc/inode.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/proc/generic.c b/fs/proc/generic.c index 7fea13229f33..de14e46fd807 100644 --- a/fs/proc/generic.c +++ b/fs/proc/generic.c @@ -122,7 +122,7 @@ static int proc_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) { struct inode *inode = dentry->d_inode; - struct proc_dir_entry *de = PROC_I(inode)->pde; + struct proc_dir_entry *de = PDE(inode); if (de && de->nlink) set_nlink(inode, de->nlink); diff --git a/fs/proc/inode.c b/fs/proc/inode.c index 8420a2f80811..13a50a32652d 100644 --- a/fs/proc/inode.c +++ b/fs/proc/inode.c @@ -40,7 +40,7 @@ static void proc_evict_inode(struct inode *inode) put_pid(PROC_I(inode)->pid); /* Let go of any associated proc directory entry */ - de = PROC_I(inode)->pde; + de = PDE(inode); if (de) pde_put(de); head = PROC_I(inode)->sysctl; -- cgit v1.2.3 From 198d1597cc5a12d04af18b69338a5b1d66ee7020 Mon Sep 17 00:00:00 2001 From: Rafael Aquini Date: Thu, 12 Feb 2015 15:01:08 -0800 Subject: fs: proc: task_mmu: show page size in /proc//numa_maps The output of /proc/$pid/numa_maps is in terms of number of pages like anon=22 or dirty=54. Here's some output: 7f4680000000 default file=/hugetlb/bigfile anon=50 dirty=50 N0=50 7f7659600000 default file=/anon_hugepage\040(deleted) anon=50 dirty=50 N0=50 7fff8d425000 default stack anon=50 dirty=50 N0=50 Looks like we have a stack and a couple of anonymous hugetlbfs areas page which both use the same amount of memory. They don't. The 'bigfile' uses 1GB pages and takes up ~50GB of space. The anon_hugepage uses 2MB pages and takes up ~100MB of space while the stack uses normal 4k pages. You can go over to smaps to figure out what the page size _really_ is with KernelPageSize or MMUPageSize. But, I think this is a pretty nasty and counterintuitive interface as it stands. This patch introduces 'kernelpagesize_kB' line element to /proc//numa_maps report file in order to help identifying the size of pages that are backing memory areas mapped by a given task. This is specially useful to help differentiating between HUGE and GIGANTIC page backed VMAs. This patch is based on Dave Hansen's proposal and reviewer's follow-ups taken from the following dicussion threads: * https://lkml.org/lkml/2011/9/21/454 * https://lkml.org/lkml/2014/12/20/66 Signed-off-by: Rafael Aquini Cc: Johannes Weiner Cc: Dave Hansen Acked-by: David Rientjes Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/filesystems/proc.txt | 30 +++++++++++++++--------------- fs/proc/task_mmu.c | 2 ++ 2 files changed, 17 insertions(+), 15 deletions(-) (limited to 'fs') diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt index ff3eb2380831..a07ba61662ed 100644 --- a/Documentation/filesystems/proc.txt +++ b/Documentation/filesystems/proc.txt @@ -508,22 +508,22 @@ summarized separated by blank spaces, one mapping per each file line: address policy mapping details -00400000 default file=/usr/local/bin/app kernelpagesize_kB=4 mapped=1 active=0 N3=1 -00600000 default file=/usr/local/bin/app kernelpagesize_kB=4 anon=1 dirty=1 N3=1 -3206000000 default file=/lib64/ld-2.12.so kernelpagesize_kB=4 mapped=26 mapmax=6 N0=24 N3=2 -320621f000 default file=/lib64/ld-2.12.so kernelpagesize_kB=4 anon=1 dirty=1 N3=1 -3206220000 default file=/lib64/ld-2.12.so kernelpagesize_kB=4 anon=1 dirty=1 N3=1 -3206221000 default kernelpagesize_kB=4 anon=1 dirty=1 N3=1 -3206800000 default file=/lib64/libc-2.12.so kernelpagesize_kB=4 mapped=59 mapmax=21 active=55 N0=41 N3=18 +00400000 default file=/usr/local/bin/app mapped=1 active=0 N3=1 kernelpagesize_kB=4 +00600000 default file=/usr/local/bin/app anon=1 dirty=1 N3=1 kernelpagesize_kB=4 +3206000000 default file=/lib64/ld-2.12.so mapped=26 mapmax=6 N0=24 N3=2 kernelpagesize_kB=4 +320621f000 default file=/lib64/ld-2.12.so anon=1 dirty=1 N3=1 kernelpagesize_kB=4 +3206220000 default file=/lib64/ld-2.12.so anon=1 dirty=1 N3=1 kernelpagesize_kB=4 +3206221000 default anon=1 dirty=1 N3=1 kernelpagesize_kB=4 +3206800000 default file=/lib64/libc-2.12.so mapped=59 mapmax=21 active=55 N0=41 N3=18 kernelpagesize_kB=4 320698b000 default file=/lib64/libc-2.12.so -3206b8a000 default file=/lib64/libc-2.12.so kernelpagesize_kB=4 anon=2 dirty=2 N3=2 -3206b8e000 default file=/lib64/libc-2.12.so kernelpagesize_kB=4 anon=1 dirty=1 N3=1 -3206b8f000 default kernelpagesize_kB=4 anon=3 dirty=3 active=1 N3=3 -7f4dc10a2000 default kernelpagesize_kB=4 anon=3 dirty=3 N3=3 -7f4dc10b4000 default kernelpagesize_kB=4 anon=2 dirty=2 active=1 N3=2 -7f4dc1200000 default file=/anon_hugepage\040(deleted) huge kernelpagesize_kB=2048 anon=1 dirty=1 N3=1 -7fff335f0000 default stack kernelpagesize_kB=4 anon=3 dirty=3 N3=3 -7fff3369d000 default kernelpagesize_kB=4 mapped=1 mapmax=35 active=0 N3=1 +3206b8a000 default file=/lib64/libc-2.12.so anon=2 dirty=2 N3=2 kernelpagesize_kB=4 +3206b8e000 default file=/lib64/libc-2.12.so anon=1 dirty=1 N3=1 kernelpagesize_kB=4 +3206b8f000 default anon=3 dirty=3 active=1 N3=3 kernelpagesize_kB=4 +7f4dc10a2000 default anon=3 dirty=3 N3=3 kernelpagesize_kB=4 +7f4dc10b4000 default anon=2 dirty=2 active=1 N3=2 kernelpagesize_kB=4 +7f4dc1200000 default file=/anon_hugepage\040(deleted) huge anon=1 dirty=1 N3=1 kernelpagesize_kB=2048 +7fff335f0000 default stack anon=3 dirty=3 N3=3 kernelpagesize_kB=4 +7fff3369d000 default mapped=1 mapmax=35 active=0 N3=1 kernelpagesize_kB=4 Where: "address" is the starting address for the mapping; diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 1359a911d194..956b75d61809 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -1557,6 +1557,8 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid) for_each_node_state(nid, N_MEMORY) if (md->node[nid]) seq_printf(m, " N%d=%lu", nid, md->node[nid]); + + seq_printf(m, " kernelpagesize_kB=%lu", vma_kernel_pagesize(vma) >> 10); out: seq_putc(m, '\n'); m_cache_vma(m, vma); -- cgit v1.2.3 From edc924e023ae2022fc29f1246e115c610c9abf38 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Thu, 12 Feb 2015 15:01:11 -0800 Subject: fs/proc/array.c: convert to use string_escape_str() Instead of custom approach let's use string_escape_str() to escape a given string (task_name in this case). Signed-off-by: Andy Shevchenko Cc: Oleg Nesterov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/proc/array.c | 34 +++++++--------------------------- 1 file changed, 7 insertions(+), 27 deletions(-) (limited to 'fs') diff --git a/fs/proc/array.c b/fs/proc/array.c index bd117d065b82..a3ccf4c4ce70 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c @@ -81,6 +81,7 @@ #include #include #include +#include #include #include @@ -89,39 +90,18 @@ static inline void task_name(struct seq_file *m, struct task_struct *p) { - int i; - char *buf, *end; - char *name; + char *buf; char tcomm[sizeof(p->comm)]; get_task_comm(tcomm, p); seq_puts(m, "Name:\t"); - end = m->buf + m->size; buf = m->buf + m->count; - name = tcomm; - i = sizeof(tcomm); - while (i && (buf < end)) { - unsigned char c = *name; - name++; - i--; - *buf = c; - if (!c) - break; - if (c == '\\') { - buf++; - if (buf < end) - *buf++ = c; - continue; - } - if (c == '\n') { - *buf++ = '\\'; - if (buf < end) - *buf++ = 'n'; - continue; - } - buf++; - } + + /* Ignore error for now */ + string_escape_str(tcomm, &buf, m->size - m->count, + ESCAPE_SPACE | ESCAPE_SPECIAL, "\n\\"); + m->count = buf - m->buf; seq_putc(m, '\n'); } -- cgit v1.2.3 From f56141e3e2d9aabf7e6b89680ab572c2cdbb2a24 Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Thu, 12 Feb 2015 15:01:14 -0800 Subject: all arches, signal: move restart_block to struct task_struct If an attacker can cause a controlled kernel stack overflow, overwriting the restart block is a very juicy exploit target. This is because the restart_block is held in the same memory allocation as the kernel stack. Moving the restart block to struct task_struct prevents this exploit by making the restart_block harder to locate. Note that there are other fields in thread_info that are also easy targets, at least on some architectures. It's also a decent simplification, since the restart code is more or less identical on all architectures. [james.hogan@imgtec.com: metag: align thread_info::supervisor_stack] Signed-off-by: Andy Lutomirski Cc: Thomas Gleixner Cc: Al Viro Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: Kees Cook Cc: David Miller Acked-by: Richard Weinberger Cc: Richard Henderson Cc: Ivan Kokshaysky Cc: Matt Turner Cc: Vineet Gupta Cc: Russell King Cc: Catalin Marinas Cc: Will Deacon Cc: Haavard Skinnemoen Cc: Hans-Christian Egtvedt Cc: Steven Miao Cc: Mark Salter Cc: Aurelien Jacquiot Cc: Mikael Starvik Cc: Jesper Nilsson Cc: David Howells Cc: Richard Kuo Cc: "Luck, Tony" Cc: Geert Uytterhoeven Cc: Michal Simek Cc: Ralf Baechle Cc: Jonas Bonn Cc: "James E.J. Bottomley" Cc: Helge Deller Cc: Benjamin Herrenschmidt Cc: Paul Mackerras Acked-by: Michael Ellerman (powerpc) Tested-by: Michael Ellerman (powerpc) Cc: Martin Schwidefsky Cc: Heiko Carstens Cc: Chen Liqin Cc: Lennox Wu Cc: Chris Metcalf Cc: Guan Xuetao Cc: Chris Zankel Cc: Max Filippov Cc: Oleg Nesterov Cc: Guenter Roeck Signed-off-by: James Hogan Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/alpha/include/asm/thread_info.h | 5 ----- arch/alpha/kernel/signal.c | 2 +- arch/arc/include/asm/thread_info.h | 4 ---- arch/arc/kernel/signal.c | 2 +- arch/arm/include/asm/thread_info.h | 4 ---- arch/arm/kernel/signal.c | 4 ++-- arch/arm64/include/asm/thread_info.h | 4 ---- arch/arm64/kernel/signal.c | 2 +- arch/arm64/kernel/signal32.c | 4 ++-- arch/avr32/include/asm/thread_info.h | 4 ---- arch/avr32/kernel/asm-offsets.c | 1 - arch/avr32/kernel/signal.c | 2 +- arch/blackfin/include/asm/thread_info.h | 4 ---- arch/blackfin/kernel/signal.c | 2 +- arch/c6x/include/asm/thread_info.h | 4 ---- arch/c6x/kernel/signal.c | 2 +- arch/cris/arch-v10/kernel/signal.c | 2 +- arch/cris/arch-v32/kernel/signal.c | 2 +- arch/cris/include/asm/thread_info.h | 4 ---- arch/frv/include/asm/thread_info.h | 4 ---- arch/frv/kernel/asm-offsets.c | 1 - arch/frv/kernel/signal.c | 2 +- arch/hexagon/include/asm/thread_info.h | 4 ---- arch/hexagon/kernel/signal.c | 2 +- arch/ia64/include/asm/thread_info.h | 4 ---- arch/ia64/kernel/signal.c | 2 +- arch/m32r/include/asm/thread_info.h | 5 ----- arch/m32r/kernel/signal.c | 2 +- arch/m68k/include/asm/thread_info.h | 4 ---- arch/m68k/kernel/signal.c | 4 ++-- arch/metag/include/asm/thread_info.h | 6 +----- arch/metag/kernel/signal.c | 2 +- arch/microblaze/include/asm/thread_info.h | 4 ---- arch/microblaze/kernel/signal.c | 2 +- arch/mips/include/asm/thread_info.h | 4 ---- arch/mips/kernel/asm-offsets.c | 1 - arch/mips/kernel/signal.c | 2 +- arch/mips/kernel/signal32.c | 2 +- arch/mn10300/include/asm/thread_info.h | 4 ---- arch/mn10300/kernel/asm-offsets.c | 1 - arch/mn10300/kernel/signal.c | 2 +- arch/openrisc/include/asm/thread_info.h | 4 ---- arch/openrisc/kernel/signal.c | 2 +- arch/parisc/include/asm/thread_info.h | 4 ---- arch/parisc/kernel/signal.c | 2 +- arch/powerpc/include/asm/thread_info.h | 4 ---- arch/powerpc/kernel/signal_32.c | 4 ++-- arch/powerpc/kernel/signal_64.c | 2 +- arch/s390/include/asm/thread_info.h | 4 ---- arch/s390/kernel/compat_signal.c | 2 +- arch/s390/kernel/signal.c | 2 +- arch/score/include/asm/thread_info.h | 4 ---- arch/score/kernel/asm-offsets.c | 1 - arch/score/kernel/signal.c | 2 +- arch/sh/include/asm/thread_info.h | 4 ---- arch/sh/kernel/asm-offsets.c | 1 - arch/sh/kernel/signal_32.c | 4 ++-- arch/sh/kernel/signal_64.c | 4 ++-- arch/sparc/include/asm/thread_info_32.h | 6 ------ arch/sparc/include/asm/thread_info_64.h | 12 +++--------- arch/sparc/kernel/signal32.c | 4 ++-- arch/sparc/kernel/signal_32.c | 2 +- arch/sparc/kernel/signal_64.c | 2 +- arch/sparc/kernel/traps_64.c | 2 -- arch/tile/include/asm/thread_info.h | 4 ---- arch/tile/kernel/signal.c | 2 +- arch/um/include/asm/thread_info.h | 4 ---- arch/unicore32/include/asm/thread_info.h | 4 ---- arch/unicore32/kernel/signal.c | 2 +- arch/x86/ia32/ia32_signal.c | 2 +- arch/x86/include/asm/thread_info.h | 4 ---- arch/x86/kernel/signal.c | 2 +- arch/x86/um/signal.c | 2 +- arch/xtensa/include/asm/thread_info.h | 5 ----- arch/xtensa/kernel/signal.c | 2 +- fs/select.c | 2 +- include/linux/init_task.h | 3 +++ include/linux/sched.h | 2 ++ kernel/compat.c | 5 ++--- kernel/futex.c | 2 +- kernel/signal.c | 2 +- kernel/time/alarmtimer.c | 2 +- kernel/time/hrtimer.c | 2 +- kernel/time/posix-cpu-timers.c | 3 +-- 84 files changed, 62 insertions(+), 194 deletions(-) (limited to 'fs') diff --git a/arch/alpha/include/asm/thread_info.h b/arch/alpha/include/asm/thread_info.h index 48bbea6898b3..d5b98ab514bb 100644 --- a/arch/alpha/include/asm/thread_info.h +++ b/arch/alpha/include/asm/thread_info.h @@ -27,8 +27,6 @@ struct thread_info { int bpt_nsaved; unsigned long bpt_addr[2]; /* breakpoint handling */ unsigned int bpt_insn[2]; - - struct restart_block restart_block; }; /* @@ -40,9 +38,6 @@ struct thread_info { .exec_domain = &default_exec_domain, \ .addr_limit = KERNEL_DS, \ .preempt_count = INIT_PREEMPT_COUNT, \ - .restart_block = { \ - .fn = do_no_restart_syscall, \ - }, \ } #define init_thread_info (init_thread_union.thread_info) diff --git a/arch/alpha/kernel/signal.c b/arch/alpha/kernel/signal.c index 6cec2881acbf..8dbfb15f1745 100644 --- a/arch/alpha/kernel/signal.c +++ b/arch/alpha/kernel/signal.c @@ -150,7 +150,7 @@ restore_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs) struct switch_stack *sw = (struct switch_stack *)regs - 1; long i, err = __get_user(regs->pc, &sc->sc_pc); - current_thread_info()->restart_block.fn = do_no_restart_syscall; + current->restart_block.fn = do_no_restart_syscall; sw->r26 = (unsigned long) ret_from_sys_call; diff --git a/arch/arc/include/asm/thread_info.h b/arch/arc/include/asm/thread_info.h index 02bc5ec0fb2e..1163a1838ac1 100644 --- a/arch/arc/include/asm/thread_info.h +++ b/arch/arc/include/asm/thread_info.h @@ -46,7 +46,6 @@ struct thread_info { struct exec_domain *exec_domain;/* execution domain */ __u32 cpu; /* current CPU */ unsigned long thr_ptr; /* TLS ptr */ - struct restart_block restart_block; }; /* @@ -62,9 +61,6 @@ struct thread_info { .cpu = 0, \ .preempt_count = INIT_PREEMPT_COUNT, \ .addr_limit = KERNEL_DS, \ - .restart_block = { \ - .fn = do_no_restart_syscall, \ - }, \ } #define init_thread_info (init_thread_union.thread_info) diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c index cb3142a2d40b..114234e83caa 100644 --- a/arch/arc/kernel/signal.c +++ b/arch/arc/kernel/signal.c @@ -104,7 +104,7 @@ SYSCALL_DEFINE0(rt_sigreturn) struct pt_regs *regs = current_pt_regs(); /* Always make any pending restarted system calls return -EINTR */ - current_thread_info()->restart_block.fn = do_no_restart_syscall; + current->restart_block.fn = do_no_restart_syscall; /* Since we stacked the signal on a word boundary, * then 'sp' should be word aligned here. If it's diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h index d890e41f5520..72812a1f3d1c 100644 --- a/arch/arm/include/asm/thread_info.h +++ b/arch/arm/include/asm/thread_info.h @@ -68,7 +68,6 @@ struct thread_info { #ifdef CONFIG_ARM_THUMBEE unsigned long thumbee_state; /* ThumbEE Handler Base register */ #endif - struct restart_block restart_block; }; #define INIT_THREAD_INFO(tsk) \ @@ -81,9 +80,6 @@ struct thread_info { .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \ domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \ domain_val(DOMAIN_IO, DOMAIN_CLIENT), \ - .restart_block = { \ - .fn = do_no_restart_syscall, \ - }, \ } #define init_thread_info (init_thread_union.thread_info) diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index 8aa6f1b87c9e..023ac905e4c3 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -191,7 +191,7 @@ asmlinkage int sys_sigreturn(struct pt_regs *regs) struct sigframe __user *frame; /* Always make any pending restarted system calls return -EINTR */ - current_thread_info()->restart_block.fn = do_no_restart_syscall; + current->restart_block.fn = do_no_restart_syscall; /* * Since we stacked the signal on a 64-bit boundary, @@ -221,7 +221,7 @@ asmlinkage int sys_rt_sigreturn(struct pt_regs *regs) struct rt_sigframe __user *frame; /* Always make any pending restarted system calls return -EINTR */ - current_thread_info()->restart_block.fn = do_no_restart_syscall; + current->restart_block.fn = do_no_restart_syscall; /* * Since we stacked the signal on a 64-bit boundary, diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h index 459bf8e53208..702e1e6a0d80 100644 --- a/arch/arm64/include/asm/thread_info.h +++ b/arch/arm64/include/asm/thread_info.h @@ -48,7 +48,6 @@ struct thread_info { mm_segment_t addr_limit; /* address limit */ struct task_struct *task; /* main task structure */ struct exec_domain *exec_domain; /* execution domain */ - struct restart_block restart_block; int preempt_count; /* 0 => preemptable, <0 => bug */ int cpu; /* cpu */ }; @@ -60,9 +59,6 @@ struct thread_info { .flags = 0, \ .preempt_count = INIT_PREEMPT_COUNT, \ .addr_limit = KERNEL_DS, \ - .restart_block = { \ - .fn = do_no_restart_syscall, \ - }, \ } #define init_thread_info (init_thread_union.thread_info) diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c index 6fa792137eda..660ccf9f7524 100644 --- a/arch/arm64/kernel/signal.c +++ b/arch/arm64/kernel/signal.c @@ -131,7 +131,7 @@ asmlinkage long sys_rt_sigreturn(struct pt_regs *regs) struct rt_sigframe __user *frame; /* Always make any pending restarted system calls return -EINTR */ - current_thread_info()->restart_block.fn = do_no_restart_syscall; + current->restart_block.fn = do_no_restart_syscall; /* * Since we stacked the signal on a 128-bit boundary, then 'sp' should diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c index e299de396e9b..c20a300e2213 100644 --- a/arch/arm64/kernel/signal32.c +++ b/arch/arm64/kernel/signal32.c @@ -347,7 +347,7 @@ asmlinkage int compat_sys_sigreturn(struct pt_regs *regs) struct compat_sigframe __user *frame; /* Always make any pending restarted system calls return -EINTR */ - current_thread_info()->restart_block.fn = do_no_restart_syscall; + current->restart_block.fn = do_no_restart_syscall; /* * Since we stacked the signal on a 64-bit boundary, @@ -381,7 +381,7 @@ asmlinkage int compat_sys_rt_sigreturn(struct pt_regs *regs) struct compat_rt_sigframe __user *frame; /* Always make any pending restarted system calls return -EINTR */ - current_thread_info()->restart_block.fn = do_no_restart_syscall; + current->restart_block.fn = do_no_restart_syscall; /* * Since we stacked the signal on a 64-bit boundary, diff --git a/arch/avr32/include/asm/thread_info.h b/arch/avr32/include/asm/thread_info.h index a978f3fe7c25..d56afa99a514 100644 --- a/arch/avr32/include/asm/thread_info.h +++ b/arch/avr32/include/asm/thread_info.h @@ -30,7 +30,6 @@ struct thread_info { saved by debug handler when setting up trampoline */ - struct restart_block restart_block; __u8 supervisor_stack[0]; }; @@ -41,9 +40,6 @@ struct thread_info { .flags = 0, \ .cpu = 0, \ .preempt_count = INIT_PREEMPT_COUNT, \ - .restart_block = { \ - .fn = do_no_restart_syscall \ - } \ } #define init_thread_info (init_thread_union.thread_info) diff --git a/arch/avr32/kernel/asm-offsets.c b/arch/avr32/kernel/asm-offsets.c index d6a8193a1d2f..e41c84516e5d 100644 --- a/arch/avr32/kernel/asm-offsets.c +++ b/arch/avr32/kernel/asm-offsets.c @@ -18,7 +18,6 @@ void foo(void) OFFSET(TI_preempt_count, thread_info, preempt_count); OFFSET(TI_rar_saved, thread_info, rar_saved); OFFSET(TI_rsr_saved, thread_info, rsr_saved); - OFFSET(TI_restart_block, thread_info, restart_block); BLANK(); OFFSET(TSK_active_mm, task_struct, active_mm); BLANK(); diff --git a/arch/avr32/kernel/signal.c b/arch/avr32/kernel/signal.c index d309fbcc3bd6..8f1c63b9b983 100644 --- a/arch/avr32/kernel/signal.c +++ b/arch/avr32/kernel/signal.c @@ -69,7 +69,7 @@ asmlinkage int sys_rt_sigreturn(struct pt_regs *regs) sigset_t set; /* Always make any pending restarted system calls return -EINTR */ - current_thread_info()->restart_block.fn = do_no_restart_syscall; + current->restart_block.fn = do_no_restart_syscall; frame = (struct rt_sigframe __user *)regs->sp; pr_debug("SIG return: frame = %p\n", frame); diff --git a/arch/blackfin/include/asm/thread_info.h b/arch/blackfin/include/asm/thread_info.h index 55f473bdad36..57c3a8bd583d 100644 --- a/arch/blackfin/include/asm/thread_info.h +++ b/arch/blackfin/include/asm/thread_info.h @@ -42,7 +42,6 @@ struct thread_info { int cpu; /* cpu we're on */ int preempt_count; /* 0 => preemptable, <0 => BUG */ mm_segment_t addr_limit; /* address limit */ - struct restart_block restart_block; #ifndef CONFIG_SMP struct l1_scratch_task_info l1_task_info; #endif @@ -58,9 +57,6 @@ struct thread_info { .flags = 0, \ .cpu = 0, \ .preempt_count = INIT_PREEMPT_COUNT, \ - .restart_block = { \ - .fn = do_no_restart_syscall, \ - }, \ } #define init_thread_info (init_thread_union.thread_info) #define init_stack (init_thread_union.stack) diff --git a/arch/blackfin/kernel/signal.c b/arch/blackfin/kernel/signal.c index ef275571d885..f2a8b5493bd3 100644 --- a/arch/blackfin/kernel/signal.c +++ b/arch/blackfin/kernel/signal.c @@ -44,7 +44,7 @@ rt_restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *p int err = 0; /* Always make any pending restarted system calls return -EINTR */ - current_thread_info()->restart_block.fn = do_no_restart_syscall; + current->restart_block.fn = do_no_restart_syscall; #define RESTORE(x) err |= __get_user(regs->x, &sc->sc_##x) diff --git a/arch/c6x/include/asm/thread_info.h b/arch/c6x/include/asm/thread_info.h index d4e9ef87076d..584e253f3217 100644 --- a/arch/c6x/include/asm/thread_info.h +++ b/arch/c6x/include/asm/thread_info.h @@ -45,7 +45,6 @@ struct thread_info { int cpu; /* cpu we're on */ int preempt_count; /* 0 = preemptable, <0 = BUG */ mm_segment_t addr_limit; /* thread address space */ - struct restart_block restart_block; }; /* @@ -61,9 +60,6 @@ struct thread_info { .cpu = 0, \ .preempt_count = INIT_PREEMPT_COUNT, \ .addr_limit = KERNEL_DS, \ - .restart_block = { \ - .fn = do_no_restart_syscall, \ - }, \ } #define init_thread_info (init_thread_union.thread_info) diff --git a/arch/c6x/kernel/signal.c b/arch/c6x/kernel/signal.c index fe68226f6c4d..3c4bb5a5c382 100644 --- a/arch/c6x/kernel/signal.c +++ b/arch/c6x/kernel/signal.c @@ -68,7 +68,7 @@ asmlinkage int do_rt_sigreturn(struct pt_regs *regs) sigset_t set; /* Always make any pending restarted system calls return -EINTR */ - current_thread_info()->restart_block.fn = do_no_restart_syscall; + current->restart_block.fn = do_no_restart_syscall; /* * Since we stacked the signal on a dword boundary, diff --git a/arch/cris/arch-v10/kernel/signal.c b/arch/cris/arch-v10/kernel/signal.c index 9b32d338838b..74d7ba35120d 100644 --- a/arch/cris/arch-v10/kernel/signal.c +++ b/arch/cris/arch-v10/kernel/signal.c @@ -67,7 +67,7 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) unsigned long old_usp; /* Always make any pending restarted system calls return -EINTR */ - current_thread_info()->restart_block.fn = do_no_restart_syscall; + current->restart_block.fn = do_no_restart_syscall; /* restore the regs from &sc->regs (same as sc, since regs is first) * (sc is already checked for VERIFY_READ since the sigframe was diff --git a/arch/cris/arch-v32/kernel/signal.c b/arch/cris/arch-v32/kernel/signal.c index 78ce3b1c9bcb..870e3e069318 100644 --- a/arch/cris/arch-v32/kernel/signal.c +++ b/arch/cris/arch-v32/kernel/signal.c @@ -59,7 +59,7 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) unsigned long old_usp; /* Always make any pending restarted system calls return -EINTR */ - current_thread_info()->restart_block.fn = do_no_restart_syscall; + current->restart_block.fn = do_no_restart_syscall; /* * Restore the registers from &sc->regs. sc is already checked diff --git a/arch/cris/include/asm/thread_info.h b/arch/cris/include/asm/thread_info.h index 55dede18c032..7286db5ed90e 100644 --- a/arch/cris/include/asm/thread_info.h +++ b/arch/cris/include/asm/thread_info.h @@ -38,7 +38,6 @@ struct thread_info { 0-0xBFFFFFFF for user-thead 0-0xFFFFFFFF for kernel-thread */ - struct restart_block restart_block; __u8 supervisor_stack[0]; }; @@ -56,9 +55,6 @@ struct thread_info { .cpu = 0, \ .preempt_count = INIT_PREEMPT_COUNT, \ .addr_limit = KERNEL_DS, \ - .restart_block = { \ - .fn = do_no_restart_syscall, \ - }, \ } #define init_thread_info (init_thread_union.thread_info) diff --git a/arch/frv/include/asm/thread_info.h b/arch/frv/include/asm/thread_info.h index af29e17c0181..6b917f1c2955 100644 --- a/arch/frv/include/asm/thread_info.h +++ b/arch/frv/include/asm/thread_info.h @@ -41,7 +41,6 @@ struct thread_info { * 0-0xBFFFFFFF for user-thead * 0-0xFFFFFFFF for kernel-thread */ - struct restart_block restart_block; __u8 supervisor_stack[0]; }; @@ -65,9 +64,6 @@ struct thread_info { .cpu = 0, \ .preempt_count = INIT_PREEMPT_COUNT, \ .addr_limit = KERNEL_DS, \ - .restart_block = { \ - .fn = do_no_restart_syscall, \ - }, \ } #define init_thread_info (init_thread_union.thread_info) diff --git a/arch/frv/kernel/asm-offsets.c b/arch/frv/kernel/asm-offsets.c index 9de96843a278..446e89d500cc 100644 --- a/arch/frv/kernel/asm-offsets.c +++ b/arch/frv/kernel/asm-offsets.c @@ -40,7 +40,6 @@ void foo(void) OFFSET(TI_CPU, thread_info, cpu); OFFSET(TI_PREEMPT_COUNT, thread_info, preempt_count); OFFSET(TI_ADDR_LIMIT, thread_info, addr_limit); - OFFSET(TI_RESTART_BLOCK, thread_info, restart_block); BLANK(); /* offsets into register file storage */ diff --git a/arch/frv/kernel/signal.c b/arch/frv/kernel/signal.c index dc3d59de0870..336713ab4745 100644 --- a/arch/frv/kernel/signal.c +++ b/arch/frv/kernel/signal.c @@ -62,7 +62,7 @@ static int restore_sigcontext(struct sigcontext __user *sc, int *_gr8) unsigned long tbr, psr; /* Always make any pending restarted system calls return -EINTR */ - current_thread_info()->restart_block.fn = do_no_restart_syscall; + current->restart_block.fn = do_no_restart_syscall; tbr = user->i.tbr; psr = user->i.psr; diff --git a/arch/hexagon/include/asm/thread_info.h b/arch/hexagon/include/asm/thread_info.h index a59dad3b3695..bacd3d6030c5 100644 --- a/arch/hexagon/include/asm/thread_info.h +++ b/arch/hexagon/include/asm/thread_info.h @@ -56,7 +56,6 @@ struct thread_info { * used for syscalls somehow; * seems to have a function pointer and four arguments */ - struct restart_block restart_block; /* Points to the current pt_regs frame */ struct pt_regs *regs; /* @@ -83,9 +82,6 @@ struct thread_info { .cpu = 0, \ .preempt_count = 1, \ .addr_limit = KERNEL_DS, \ - .restart_block = { \ - .fn = do_no_restart_syscall, \ - }, \ .sp = 0, \ .regs = NULL, \ } diff --git a/arch/hexagon/kernel/signal.c b/arch/hexagon/kernel/signal.c index eadd70e47e7e..b039a624c170 100644 --- a/arch/hexagon/kernel/signal.c +++ b/arch/hexagon/kernel/signal.c @@ -239,7 +239,7 @@ asmlinkage int sys_rt_sigreturn(void) sigset_t blocked; /* Always make any pending restarted system calls return -EINTR */ - current_thread_info()->restart_block.fn = do_no_restart_syscall; + current->restart_block.fn = do_no_restart_syscall; frame = (struct rt_sigframe __user *)pt_psp(regs); if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) diff --git a/arch/ia64/include/asm/thread_info.h b/arch/ia64/include/asm/thread_info.h index 5b17418b4223..c16f21a068ff 100644 --- a/arch/ia64/include/asm/thread_info.h +++ b/arch/ia64/include/asm/thread_info.h @@ -27,7 +27,6 @@ struct thread_info { __u32 status; /* Thread synchronous flags */ mm_segment_t addr_limit; /* user-level address space limit */ int preempt_count; /* 0=premptable, <0=BUG; will also serve as bh-counter */ - struct restart_block restart_block; #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE __u64 ac_stamp; __u64 ac_leave; @@ -46,9 +45,6 @@ struct thread_info { .cpu = 0, \ .addr_limit = KERNEL_DS, \ .preempt_count = INIT_PREEMPT_COUNT, \ - .restart_block = { \ - .fn = do_no_restart_syscall, \ - }, \ } #ifndef ASM_OFFSETS_C diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c index 6d92170be457..b3a124da71e5 100644 --- a/arch/ia64/kernel/signal.c +++ b/arch/ia64/kernel/signal.c @@ -46,7 +46,7 @@ restore_sigcontext (struct sigcontext __user *sc, struct sigscratch *scr) long err; /* Always make any pending restarted system calls return -EINTR */ - current_thread_info()->restart_block.fn = do_no_restart_syscall; + current->restart_block.fn = do_no_restart_syscall; /* restore scratch that always needs gets updated during signal delivery: */ err = __get_user(flags, &sc->sc_flags); diff --git a/arch/m32r/include/asm/thread_info.h b/arch/m32r/include/asm/thread_info.h index 00171703402f..32422d0211c3 100644 --- a/arch/m32r/include/asm/thread_info.h +++ b/arch/m32r/include/asm/thread_info.h @@ -34,7 +34,6 @@ struct thread_info { 0-0xBFFFFFFF for user-thread 0-0xFFFFFFFF for kernel-thread */ - struct restart_block restart_block; __u8 supervisor_stack[0]; }; @@ -49,7 +48,6 @@ struct thread_info { #define TI_CPU 0x00000010 #define TI_PRE_COUNT 0x00000014 #define TI_ADDR_LIMIT 0x00000018 -#define TI_RESTART_BLOCK 0x000001C #endif @@ -68,9 +66,6 @@ struct thread_info { .cpu = 0, \ .preempt_count = INIT_PREEMPT_COUNT, \ .addr_limit = KERNEL_DS, \ - .restart_block = { \ - .fn = do_no_restart_syscall, \ - }, \ } #define init_thread_info (init_thread_union.thread_info) diff --git a/arch/m32r/kernel/signal.c b/arch/m32r/kernel/signal.c index 95408b8f130a..7736c6660a15 100644 --- a/arch/m32r/kernel/signal.c +++ b/arch/m32r/kernel/signal.c @@ -48,7 +48,7 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, unsigned int err = 0; /* Always make any pending restarted system calls return -EINTR */ - current_thread_info()->restart_block.fn = do_no_restart_syscall; + current->restart_block.fn = do_no_restart_syscall; #define COPY(x) err |= __get_user(regs->x, &sc->sc_##x) COPY(r4); diff --git a/arch/m68k/include/asm/thread_info.h b/arch/m68k/include/asm/thread_info.h index 21a4784ca5a1..c54256e69e64 100644 --- a/arch/m68k/include/asm/thread_info.h +++ b/arch/m68k/include/asm/thread_info.h @@ -31,7 +31,6 @@ struct thread_info { int preempt_count; /* 0 => preemptable, <0 => BUG */ __u32 cpu; /* should always be 0 on m68k */ unsigned long tp_value; /* thread pointer */ - struct restart_block restart_block; }; #endif /* __ASSEMBLY__ */ @@ -41,9 +40,6 @@ struct thread_info { .exec_domain = &default_exec_domain, \ .addr_limit = KERNEL_DS, \ .preempt_count = INIT_PREEMPT_COUNT, \ - .restart_block = { \ - .fn = do_no_restart_syscall, \ - }, \ } #define init_stack (init_thread_union.stack) diff --git a/arch/m68k/kernel/signal.c b/arch/m68k/kernel/signal.c index 967a8b7e1527..d7179281e74a 100644 --- a/arch/m68k/kernel/signal.c +++ b/arch/m68k/kernel/signal.c @@ -655,7 +655,7 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *usc, void __u int err = 0; /* Always make any pending restarted system calls return -EINTR */ - current_thread_info()->restart_block.fn = do_no_restart_syscall; + current->restart_block.fn = do_no_restart_syscall; /* get previous context */ if (copy_from_user(&context, usc, sizeof(context))) @@ -693,7 +693,7 @@ rt_restore_ucontext(struct pt_regs *regs, struct switch_stack *sw, int err; /* Always make any pending restarted system calls return -EINTR */ - current_thread_info()->restart_block.fn = do_no_restart_syscall; + current->restart_block.fn = do_no_restart_syscall; err = __get_user(temp, &uc->uc_mcontext.version); if (temp != MCONTEXT_VERSION) diff --git a/arch/metag/include/asm/thread_info.h b/arch/metag/include/asm/thread_info.h index 47711336119e..afb3ca4776d1 100644 --- a/arch/metag/include/asm/thread_info.h +++ b/arch/metag/include/asm/thread_info.h @@ -35,9 +35,8 @@ struct thread_info { int preempt_count; /* 0 => preemptable, <0 => BUG */ mm_segment_t addr_limit; /* thread address space */ - struct restart_block restart_block; - u8 supervisor_stack[0]; + u8 supervisor_stack[0] __aligned(8); }; #else /* !__ASSEMBLY__ */ @@ -74,9 +73,6 @@ struct thread_info { .cpu = 0, \ .preempt_count = INIT_PREEMPT_COUNT, \ .addr_limit = KERNEL_DS, \ - .restart_block = { \ - .fn = do_no_restart_syscall, \ - }, \ } #define init_thread_info (init_thread_union.thread_info) diff --git a/arch/metag/kernel/signal.c b/arch/metag/kernel/signal.c index 0d100d5c1407..ce49d429c74a 100644 --- a/arch/metag/kernel/signal.c +++ b/arch/metag/kernel/signal.c @@ -48,7 +48,7 @@ static int restore_sigcontext(struct pt_regs *regs, int err; /* Always make any pending restarted system calls return -EINTR */ - current_thread_info()->restart_block.fn = do_no_restart_syscall; + current->restart_block.fn = do_no_restart_syscall; err = metag_gp_regs_copyin(regs, 0, sizeof(struct user_gp_regs), NULL, &sc->regs); diff --git a/arch/microblaze/include/asm/thread_info.h b/arch/microblaze/include/asm/thread_info.h index 8c9d36591a03..b699fbd7de4a 100644 --- a/arch/microblaze/include/asm/thread_info.h +++ b/arch/microblaze/include/asm/thread_info.h @@ -71,7 +71,6 @@ struct thread_info { __u32 cpu; /* current CPU */ __s32 preempt_count; /* 0 => preemptable,< 0 => BUG*/ mm_segment_t addr_limit; /* thread address space */ - struct restart_block restart_block; struct cpu_context cpu_context; }; @@ -87,9 +86,6 @@ struct thread_info { .cpu = 0, \ .preempt_count = INIT_PREEMPT_COUNT, \ .addr_limit = KERNEL_DS, \ - .restart_block = { \ - .fn = do_no_restart_syscall, \ - }, \ } #define init_thread_info (init_thread_union.thread_info) diff --git a/arch/microblaze/kernel/signal.c b/arch/microblaze/kernel/signal.c index 235706055b7f..a1cbaf90e2ea 100644 --- a/arch/microblaze/kernel/signal.c +++ b/arch/microblaze/kernel/signal.c @@ -89,7 +89,7 @@ asmlinkage long sys_rt_sigreturn(struct pt_regs *regs) int rval; /* Always make any pending restarted system calls return -EINTR */ - current_thread_info()->restart_block.fn = do_no_restart_syscall; + current->restart_block.fn = do_no_restart_syscall; if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) goto badframe; diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h index e4440f92b366..9e1295f874f0 100644 --- a/arch/mips/include/asm/thread_info.h +++ b/arch/mips/include/asm/thread_info.h @@ -34,7 +34,6 @@ struct thread_info { * 0x7fffffff for user-thead * 0xffffffff for kernel-thread */ - struct restart_block restart_block; struct pt_regs *regs; long syscall; /* syscall number */ }; @@ -50,9 +49,6 @@ struct thread_info { .cpu = 0, \ .preempt_count = INIT_PREEMPT_COUNT, \ .addr_limit = KERNEL_DS, \ - .restart_block = { \ - .fn = do_no_restart_syscall, \ - }, \ } #define init_thread_info (init_thread_union.thread_info) diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c index b1d84bd4efb3..3b2dfdb4865f 100644 --- a/arch/mips/kernel/asm-offsets.c +++ b/arch/mips/kernel/asm-offsets.c @@ -98,7 +98,6 @@ void output_thread_info_defines(void) OFFSET(TI_CPU, thread_info, cpu); OFFSET(TI_PRE_COUNT, thread_info, preempt_count); OFFSET(TI_ADDR_LIMIT, thread_info, addr_limit); - OFFSET(TI_RESTART_BLOCK, thread_info, restart_block); OFFSET(TI_REGS, thread_info, regs); DEFINE(_THREAD_SIZE, THREAD_SIZE); DEFINE(_THREAD_MASK, THREAD_MASK); diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c index 545bf11bd2ed..6a28c792d862 100644 --- a/arch/mips/kernel/signal.c +++ b/arch/mips/kernel/signal.c @@ -243,7 +243,7 @@ int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) int i; /* Always make any pending restarted system calls return -EINTR */ - current_thread_info()->restart_block.fn = do_no_restart_syscall; + current->restart_block.fn = do_no_restart_syscall; err |= __get_user(regs->cp0_epc, &sc->sc_pc); diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c index d69179c0d49d..19a7705f2a01 100644 --- a/arch/mips/kernel/signal32.c +++ b/arch/mips/kernel/signal32.c @@ -220,7 +220,7 @@ static int restore_sigcontext32(struct pt_regs *regs, int i; /* Always make any pending restarted system calls return -EINTR */ - current_thread_info()->restart_block.fn = do_no_restart_syscall; + current->restart_block.fn = do_no_restart_syscall; err |= __get_user(regs->cp0_epc, &sc->sc_pc); err |= __get_user(regs->hi, &sc->sc_mdhi); diff --git a/arch/mn10300/include/asm/thread_info.h b/arch/mn10300/include/asm/thread_info.h index bf280eaccd36..c1c374f0ec12 100644 --- a/arch/mn10300/include/asm/thread_info.h +++ b/arch/mn10300/include/asm/thread_info.h @@ -50,7 +50,6 @@ struct thread_info { 0-0xBFFFFFFF for user-thead 0-0xFFFFFFFF for kernel-thread */ - struct restart_block restart_block; __u8 supervisor_stack[0]; }; @@ -80,9 +79,6 @@ struct thread_info { .cpu = 0, \ .preempt_count = INIT_PREEMPT_COUNT, \ .addr_limit = KERNEL_DS, \ - .restart_block = { \ - .fn = do_no_restart_syscall, \ - }, \ } #define init_thread_info (init_thread_union.thread_info) diff --git a/arch/mn10300/kernel/asm-offsets.c b/arch/mn10300/kernel/asm-offsets.c index 47b3bb0c04ff..d780670cbaf3 100644 --- a/arch/mn10300/kernel/asm-offsets.c +++ b/arch/mn10300/kernel/asm-offsets.c @@ -28,7 +28,6 @@ void foo(void) OFFSET(TI_cpu, thread_info, cpu); OFFSET(TI_preempt_count, thread_info, preempt_count); OFFSET(TI_addr_limit, thread_info, addr_limit); - OFFSET(TI_restart_block, thread_info, restart_block); BLANK(); OFFSET(REG_D0, pt_regs, d0); diff --git a/arch/mn10300/kernel/signal.c b/arch/mn10300/kernel/signal.c index a6c0858592c3..8609845f12c5 100644 --- a/arch/mn10300/kernel/signal.c +++ b/arch/mn10300/kernel/signal.c @@ -40,7 +40,7 @@ static int restore_sigcontext(struct pt_regs *regs, unsigned int err = 0; /* Always make any pending restarted system calls return -EINTR */ - current_thread_info()->restart_block.fn = do_no_restart_syscall; + current->restart_block.fn = do_no_restart_syscall; if (is_using_fpu(current)) fpu_kill_state(current); diff --git a/arch/openrisc/include/asm/thread_info.h b/arch/openrisc/include/asm/thread_info.h index d797acc901e4..875f0845a707 100644 --- a/arch/openrisc/include/asm/thread_info.h +++ b/arch/openrisc/include/asm/thread_info.h @@ -57,7 +57,6 @@ struct thread_info { 0-0x7FFFFFFF for user-thead 0-0xFFFFFFFF for kernel-thread */ - struct restart_block restart_block; __u8 supervisor_stack[0]; /* saved context data */ @@ -79,9 +78,6 @@ struct thread_info { .cpu = 0, \ .preempt_count = 1, \ .addr_limit = KERNEL_DS, \ - .restart_block = { \ - .fn = do_no_restart_syscall, \ - }, \ .ksp = 0, \ } diff --git a/arch/openrisc/kernel/signal.c b/arch/openrisc/kernel/signal.c index 7d1b8235bf90..4112175bf803 100644 --- a/arch/openrisc/kernel/signal.c +++ b/arch/openrisc/kernel/signal.c @@ -46,7 +46,7 @@ static int restore_sigcontext(struct pt_regs *regs, int err = 0; /* Always make any pending restarted system calls return -EINTR */ - current_thread_info()->restart_block.fn = do_no_restart_syscall; + current->restart_block.fn = do_no_restart_syscall; /* * Restore the regs from &sc->regs. diff --git a/arch/parisc/include/asm/thread_info.h b/arch/parisc/include/asm/thread_info.h index a84611835549..fb13e3865563 100644 --- a/arch/parisc/include/asm/thread_info.h +++ b/arch/parisc/include/asm/thread_info.h @@ -14,7 +14,6 @@ struct thread_info { mm_segment_t addr_limit; /* user-level address space limit */ __u32 cpu; /* current CPU */ int preempt_count; /* 0=premptable, <0=BUG; will also serve as bh-counter */ - struct restart_block restart_block; }; #define INIT_THREAD_INFO(tsk) \ @@ -25,9 +24,6 @@ struct thread_info { .cpu = 0, \ .addr_limit = KERNEL_DS, \ .preempt_count = INIT_PREEMPT_COUNT, \ - .restart_block = { \ - .fn = do_no_restart_syscall \ - } \ } #define init_thread_info (init_thread_union.thread_info) diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c index 012d4fa63d97..9b910a0251b8 100644 --- a/arch/parisc/kernel/signal.c +++ b/arch/parisc/kernel/signal.c @@ -99,7 +99,7 @@ sys_rt_sigreturn(struct pt_regs *regs, int in_syscall) sigframe_size = PARISC_RT_SIGFRAME_SIZE32; #endif - current_thread_info()->restart_block.fn = do_no_restart_syscall; + current->restart_block.fn = do_no_restart_syscall; /* Unwind the user stack to get the rt_sigframe structure. */ frame = (struct rt_sigframe __user *) diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h index e8abc83e699f..72489799cf02 100644 --- a/arch/powerpc/include/asm/thread_info.h +++ b/arch/powerpc/include/asm/thread_info.h @@ -43,7 +43,6 @@ struct thread_info { int cpu; /* cpu we're on */ int preempt_count; /* 0 => preemptable, <0 => BUG */ - struct restart_block restart_block; unsigned long local_flags; /* private flags for thread */ /* low level flags - has atomic operations done on it */ @@ -59,9 +58,6 @@ struct thread_info { .exec_domain = &default_exec_domain, \ .cpu = 0, \ .preempt_count = INIT_PREEMPT_COUNT, \ - .restart_block = { \ - .fn = do_no_restart_syscall, \ - }, \ .flags = 0, \ } diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c index b171001698ff..d3a831ac0f92 100644 --- a/arch/powerpc/kernel/signal_32.c +++ b/arch/powerpc/kernel/signal_32.c @@ -1231,7 +1231,7 @@ long sys_rt_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8, int tm_restore = 0; #endif /* Always make any pending restarted system calls return -EINTR */ - current_thread_info()->restart_block.fn = do_no_restart_syscall; + current->restart_block.fn = do_no_restart_syscall; rt_sf = (struct rt_sigframe __user *) (regs->gpr[1] + __SIGNAL_FRAMESIZE + 16); @@ -1504,7 +1504,7 @@ long sys_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8, #endif /* Always make any pending restarted system calls return -EINTR */ - current_thread_info()->restart_block.fn = do_no_restart_syscall; + current->restart_block.fn = do_no_restart_syscall; sf = (struct sigframe __user *)(regs->gpr[1] + __SIGNAL_FRAMESIZE); sc = &sf->sctx; diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c index 2cb0c94cafa5..c7c24d2e2bdb 100644 --- a/arch/powerpc/kernel/signal_64.c +++ b/arch/powerpc/kernel/signal_64.c @@ -666,7 +666,7 @@ int sys_rt_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5, #endif /* Always make any pending restarted system calls return -EINTR */ - current_thread_info()->restart_block.fn = do_no_restart_syscall; + current->restart_block.fn = do_no_restart_syscall; if (!access_ok(VERIFY_READ, uc, sizeof(*uc))) goto badframe; diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h index 4d62fd5b56e5..ef1df718642d 100644 --- a/arch/s390/include/asm/thread_info.h +++ b/arch/s390/include/asm/thread_info.h @@ -39,7 +39,6 @@ struct thread_info { unsigned long sys_call_table; /* System call table address */ unsigned int cpu; /* current CPU */ int preempt_count; /* 0 => preemptable, <0 => BUG */ - struct restart_block restart_block; unsigned int system_call; __u64 user_timer; __u64 system_timer; @@ -56,9 +55,6 @@ struct thread_info { .flags = 0, \ .cpu = 0, \ .preempt_count = INIT_PREEMPT_COUNT, \ - .restart_block = { \ - .fn = do_no_restart_syscall, \ - }, \ } #define init_thread_info (init_thread_union.thread_info) diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c index 34d5fa7b01b5..bc1df12dd4f8 100644 --- a/arch/s390/kernel/compat_signal.c +++ b/arch/s390/kernel/compat_signal.c @@ -209,7 +209,7 @@ static int restore_sigregs32(struct pt_regs *regs,_sigregs32 __user *sregs) int i; /* Alwys make any pending restarted system call return -EINTR */ - current_thread_info()->restart_block.fn = do_no_restart_syscall; + current->restart_block.fn = do_no_restart_syscall; if (__copy_from_user(&user_sregs, &sregs->regs, sizeof(user_sregs))) return -EFAULT; diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c index 6a2ac257d98f..b3ae6f70c6d6 100644 --- a/arch/s390/kernel/signal.c +++ b/arch/s390/kernel/signal.c @@ -162,7 +162,7 @@ static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs) _sigregs user_sregs; /* Alwys make any pending restarted system call return -EINTR */ - current_thread_info()->restart_block.fn = do_no_restart_syscall; + current->restart_block.fn = do_no_restart_syscall; if (__copy_from_user(&user_sregs, sregs, sizeof(user_sregs))) return -EFAULT; diff --git a/arch/score/include/asm/thread_info.h b/arch/score/include/asm/thread_info.h index 656b7ada9326..33864fa2a8d4 100644 --- a/arch/score/include/asm/thread_info.h +++ b/arch/score/include/asm/thread_info.h @@ -42,7 +42,6 @@ struct thread_info { * 0-0xFFFFFFFF for kernel-thread */ mm_segment_t addr_limit; - struct restart_block restart_block; struct pt_regs *regs; }; @@ -58,9 +57,6 @@ struct thread_info { .cpu = 0, \ .preempt_count = 1, \ .addr_limit = KERNEL_DS, \ - .restart_block = { \ - .fn = do_no_restart_syscall, \ - }, \ } #define init_thread_info (init_thread_union.thread_info) diff --git a/arch/score/kernel/asm-offsets.c b/arch/score/kernel/asm-offsets.c index 57788f44c6fb..b4d5214a7a7e 100644 --- a/arch/score/kernel/asm-offsets.c +++ b/arch/score/kernel/asm-offsets.c @@ -106,7 +106,6 @@ void output_thread_info_defines(void) OFFSET(TI_CPU, thread_info, cpu); OFFSET(TI_PRE_COUNT, thread_info, preempt_count); OFFSET(TI_ADDR_LIMIT, thread_info, addr_limit); - OFFSET(TI_RESTART_BLOCK, thread_info, restart_block); OFFSET(TI_REGS, thread_info, regs); DEFINE(KERNEL_STACK_SIZE, THREAD_SIZE); DEFINE(KERNEL_STACK_MASK, THREAD_MASK); diff --git a/arch/score/kernel/signal.c b/arch/score/kernel/signal.c index 1651807774ad..e381c8c4ff65 100644 --- a/arch/score/kernel/signal.c +++ b/arch/score/kernel/signal.c @@ -141,7 +141,7 @@ score_rt_sigreturn(struct pt_regs *regs) int sig; /* Always make any pending restarted system calls return -EINTR */ - current_thread_info()->restart_block.fn = do_no_restart_syscall; + current->restart_block.fn = do_no_restart_syscall; frame = (struct rt_sigframe __user *) regs->regs[0]; if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) diff --git a/arch/sh/include/asm/thread_info.h b/arch/sh/include/asm/thread_info.h index ad27ffa65e2e..657c03919627 100644 --- a/arch/sh/include/asm/thread_info.h +++ b/arch/sh/include/asm/thread_info.h @@ -33,7 +33,6 @@ struct thread_info { __u32 cpu; int preempt_count; /* 0 => preemptable, <0 => BUG */ mm_segment_t addr_limit; /* thread address space */ - struct restart_block restart_block; unsigned long previous_sp; /* sp of previous stack in case of nested IRQ stacks */ __u8 supervisor_stack[0]; @@ -63,9 +62,6 @@ struct thread_info { .cpu = 0, \ .preempt_count = INIT_PREEMPT_COUNT, \ .addr_limit = KERNEL_DS, \ - .restart_block = { \ - .fn = do_no_restart_syscall, \ - }, \ } #define init_thread_info (init_thread_union.thread_info) diff --git a/arch/sh/kernel/asm-offsets.c b/arch/sh/kernel/asm-offsets.c index 08a2be775b6c..542225fedb11 100644 --- a/arch/sh/kernel/asm-offsets.c +++ b/arch/sh/kernel/asm-offsets.c @@ -25,7 +25,6 @@ int main(void) DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count)); - DEFINE(TI_RESTART_BLOCK,offsetof(struct thread_info, restart_block)); DEFINE(TI_SIZE, sizeof(struct thread_info)); #ifdef CONFIG_HIBERNATION diff --git a/arch/sh/kernel/signal_32.c b/arch/sh/kernel/signal_32.c index 2f002b24fb92..0b34f2a704fe 100644 --- a/arch/sh/kernel/signal_32.c +++ b/arch/sh/kernel/signal_32.c @@ -156,7 +156,7 @@ asmlinkage int sys_sigreturn(void) int r0; /* Always make any pending restarted system calls return -EINTR */ - current_thread_info()->restart_block.fn = do_no_restart_syscall; + current->restart_block.fn = do_no_restart_syscall; if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) goto badframe; @@ -186,7 +186,7 @@ asmlinkage int sys_rt_sigreturn(void) int r0; /* Always make any pending restarted system calls return -EINTR */ - current_thread_info()->restart_block.fn = do_no_restart_syscall; + current->restart_block.fn = do_no_restart_syscall; if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) goto badframe; diff --git a/arch/sh/kernel/signal_64.c b/arch/sh/kernel/signal_64.c index 897abe7b871e..71993c6a7d94 100644 --- a/arch/sh/kernel/signal_64.c +++ b/arch/sh/kernel/signal_64.c @@ -260,7 +260,7 @@ asmlinkage int sys_sigreturn(unsigned long r2, unsigned long r3, long long ret; /* Always make any pending restarted system calls return -EINTR */ - current_thread_info()->restart_block.fn = do_no_restart_syscall; + current->restart_block.fn = do_no_restart_syscall; if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) goto badframe; @@ -294,7 +294,7 @@ asmlinkage int sys_rt_sigreturn(unsigned long r2, unsigned long r3, long long ret; /* Always make any pending restarted system calls return -EINTR */ - current_thread_info()->restart_block.fn = do_no_restart_syscall; + current->restart_block.fn = do_no_restart_syscall; if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) goto badframe; diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h index 025c98446b1e..fd7bd0a440ca 100644 --- a/arch/sparc/include/asm/thread_info_32.h +++ b/arch/sparc/include/asm/thread_info_32.h @@ -47,8 +47,6 @@ struct thread_info { struct reg_window32 reg_window[NSWINS]; /* align for ldd! */ unsigned long rwbuf_stkptrs[NSWINS]; unsigned long w_saved; - - struct restart_block restart_block; }; /* @@ -62,9 +60,6 @@ struct thread_info { .flags = 0, \ .cpu = 0, \ .preempt_count = INIT_PREEMPT_COUNT, \ - .restart_block = { \ - .fn = do_no_restart_syscall, \ - }, \ } #define init_thread_info (init_thread_union.thread_info) @@ -103,7 +98,6 @@ register struct thread_info *current_thread_info_reg asm("g6"); #define TI_REG_WINDOW 0x30 #define TI_RWIN_SPTRS 0x230 #define TI_W_SAVED 0x250 -/* #define TI_RESTART_BLOCK 0x25n */ /* Nobody cares */ /* * thread information flag bit numbers diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h index 798f0279a4b5..ff455164732a 100644 --- a/arch/sparc/include/asm/thread_info_64.h +++ b/arch/sparc/include/asm/thread_info_64.h @@ -58,8 +58,6 @@ struct thread_info { unsigned long gsr[7]; unsigned long xfsr[7]; - struct restart_block restart_block; - struct pt_regs *kern_una_regs; unsigned int kern_una_insn; @@ -92,10 +90,9 @@ struct thread_info { #define TI_RWIN_SPTRS 0x000003c8 #define TI_GSR 0x00000400 #define TI_XFSR 0x00000438 -#define TI_RESTART_BLOCK 0x00000470 -#define TI_KUNA_REGS 0x000004a0 -#define TI_KUNA_INSN 0x000004a8 -#define TI_FPREGS 0x000004c0 +#define TI_KUNA_REGS 0x00000470 +#define TI_KUNA_INSN 0x00000478 +#define TI_FPREGS 0x00000480 /* We embed this in the uppermost byte of thread_info->flags */ #define FAULT_CODE_WRITE 0x01 /* Write access, implies D-TLB */ @@ -124,9 +121,6 @@ struct thread_info { .current_ds = ASI_P, \ .exec_domain = &default_exec_domain, \ .preempt_count = INIT_PREEMPT_COUNT, \ - .restart_block = { \ - .fn = do_no_restart_syscall, \ - }, \ } #define init_thread_info (init_thread_union.thread_info) diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c index 62deba7be1a9..4eed773a7735 100644 --- a/arch/sparc/kernel/signal32.c +++ b/arch/sparc/kernel/signal32.c @@ -150,7 +150,7 @@ void do_sigreturn32(struct pt_regs *regs) int err, i; /* Always make any pending restarted system calls return -EINTR */ - current_thread_info()->restart_block.fn = do_no_restart_syscall; + current->restart_block.fn = do_no_restart_syscall; synchronize_user_stack(); @@ -235,7 +235,7 @@ asmlinkage void do_rt_sigreturn32(struct pt_regs *regs) int err, i; /* Always make any pending restarted system calls return -EINTR */ - current_thread_info()->restart_block.fn = do_no_restart_syscall; + current->restart_block.fn = do_no_restart_syscall; synchronize_user_stack(); regs->u_regs[UREG_FP] &= 0x00000000ffffffffUL; diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c index 9ee72fc8e0e4..52aa5e4ce5e7 100644 --- a/arch/sparc/kernel/signal_32.c +++ b/arch/sparc/kernel/signal_32.c @@ -70,7 +70,7 @@ asmlinkage void do_sigreturn(struct pt_regs *regs) int err; /* Always make any pending restarted system calls return -EINTR */ - current_thread_info()->restart_block.fn = do_no_restart_syscall; + current->restart_block.fn = do_no_restart_syscall; synchronize_user_stack(); diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c index 1a6999868031..d88beff47bab 100644 --- a/arch/sparc/kernel/signal_64.c +++ b/arch/sparc/kernel/signal_64.c @@ -254,7 +254,7 @@ void do_rt_sigreturn(struct pt_regs *regs) int err; /* Always make any pending restarted system calls return -EINTR */ - current_thread_info()->restart_block.fn = do_no_restart_syscall; + current->restart_block.fn = do_no_restart_syscall; synchronize_user_stack (); sf = (struct rt_signal_frame __user *) diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c index 981a769b9558..a27651e866e7 100644 --- a/arch/sparc/kernel/traps_64.c +++ b/arch/sparc/kernel/traps_64.c @@ -2730,8 +2730,6 @@ void __init trap_init(void) TI_NEW_CHILD != offsetof(struct thread_info, new_child) || TI_CURRENT_DS != offsetof(struct thread_info, current_ds) || - TI_RESTART_BLOCK != offsetof(struct thread_info, - restart_block) || TI_KUNA_REGS != offsetof(struct thread_info, kern_una_regs) || TI_KUNA_INSN != offsetof(struct thread_info, diff --git a/arch/tile/include/asm/thread_info.h b/arch/tile/include/asm/thread_info.h index 48e4fd0f38e4..96c14c1430d8 100644 --- a/arch/tile/include/asm/thread_info.h +++ b/arch/tile/include/asm/thread_info.h @@ -36,7 +36,6 @@ struct thread_info { mm_segment_t addr_limit; /* thread address space (KERNEL_DS or USER_DS) */ - struct restart_block restart_block; struct single_step_state *step_state; /* single step state (if non-zero) */ int align_ctl; /* controls unaligned access */ @@ -57,9 +56,6 @@ struct thread_info { .cpu = 0, \ .preempt_count = INIT_PREEMPT_COUNT, \ .addr_limit = KERNEL_DS, \ - .restart_block = { \ - .fn = do_no_restart_syscall, \ - }, \ .step_state = NULL, \ .align_ctl = 0, \ } diff --git a/arch/tile/kernel/signal.c b/arch/tile/kernel/signal.c index bb0a9ce7ae23..8a524e332c1a 100644 --- a/arch/tile/kernel/signal.c +++ b/arch/tile/kernel/signal.c @@ -48,7 +48,7 @@ int restore_sigcontext(struct pt_regs *regs, int err; /* Always make any pending restarted system calls return -EINTR */ - current_thread_info()->restart_block.fn = do_no_restart_syscall; + current->restart_block.fn = do_no_restart_syscall; /* * Enforce that sigcontext is like pt_regs, and doesn't mess diff --git a/arch/um/include/asm/thread_info.h b/arch/um/include/asm/thread_info.h index 1c5b2a83046a..e04114c4fcd9 100644 --- a/arch/um/include/asm/thread_info.h +++ b/arch/um/include/asm/thread_info.h @@ -22,7 +22,6 @@ struct thread_info { mm_segment_t addr_limit; /* thread address space: 0-0xBFFFFFFF for user 0-0xFFFFFFFF for kernel */ - struct restart_block restart_block; struct thread_info *real_thread; /* Points to non-IRQ stack */ }; @@ -34,9 +33,6 @@ struct thread_info { .cpu = 0, \ .preempt_count = INIT_PREEMPT_COUNT, \ .addr_limit = KERNEL_DS, \ - .restart_block = { \ - .fn = do_no_restart_syscall, \ - }, \ .real_thread = NULL, \ } diff --git a/arch/unicore32/include/asm/thread_info.h b/arch/unicore32/include/asm/thread_info.h index af36d8eabdf1..63e2839dfeb8 100644 --- a/arch/unicore32/include/asm/thread_info.h +++ b/arch/unicore32/include/asm/thread_info.h @@ -79,7 +79,6 @@ struct thread_info { #ifdef CONFIG_UNICORE_FPU_F64 struct fp_state fpstate __attribute__((aligned(8))); #endif - struct restart_block restart_block; }; #define INIT_THREAD_INFO(tsk) \ @@ -89,9 +88,6 @@ struct thread_info { .flags = 0, \ .preempt_count = INIT_PREEMPT_COUNT, \ .addr_limit = KERNEL_DS, \ - .restart_block = { \ - .fn = do_no_restart_syscall, \ - }, \ } #define init_thread_info (init_thread_union.thread_info) diff --git a/arch/unicore32/kernel/signal.c b/arch/unicore32/kernel/signal.c index 7c8fb7018dc6..d329f85766cc 100644 --- a/arch/unicore32/kernel/signal.c +++ b/arch/unicore32/kernel/signal.c @@ -105,7 +105,7 @@ asmlinkage int __sys_rt_sigreturn(struct pt_regs *regs) struct rt_sigframe __user *frame; /* Always make any pending restarted system calls return -EINTR */ - current_thread_info()->restart_block.fn = do_no_restart_syscall; + current->restart_block.fn = do_no_restart_syscall; /* * Since we stacked the signal on a 64-bit boundary, diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c index f9e181aaba97..d0165c9a2932 100644 --- a/arch/x86/ia32/ia32_signal.c +++ b/arch/x86/ia32/ia32_signal.c @@ -169,7 +169,7 @@ static int ia32_restore_sigcontext(struct pt_regs *regs, u32 tmp; /* Always make any pending restarted system calls return -EINTR */ - current_thread_info()->restart_block.fn = do_no_restart_syscall; + current->restart_block.fn = do_no_restart_syscall; get_user_try { /* diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index e82e95abc92b..1d4e4f279a32 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h @@ -31,7 +31,6 @@ struct thread_info { __u32 cpu; /* current CPU */ int saved_preempt_count; mm_segment_t addr_limit; - struct restart_block restart_block; void __user *sysenter_return; unsigned int sig_on_uaccess_error:1; unsigned int uaccess_err:1; /* uaccess failed */ @@ -45,9 +44,6 @@ struct thread_info { .cpu = 0, \ .saved_preempt_count = INIT_PREEMPT_COUNT, \ .addr_limit = KERNEL_DS, \ - .restart_block = { \ - .fn = do_no_restart_syscall, \ - }, \ } #define init_thread_info (init_thread_union.thread_info) diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index 2a33c8f68319..e5042463c1bc 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c @@ -69,7 +69,7 @@ int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, unsigned int err = 0; /* Always make any pending restarted system calls return -EINTR */ - current_thread_info()->restart_block.fn = do_no_restart_syscall; + current->restart_block.fn = do_no_restart_syscall; get_user_try { diff --git a/arch/x86/um/signal.c b/arch/x86/um/signal.c index 79d824551c1a..0c8c32bfd792 100644 --- a/arch/x86/um/signal.c +++ b/arch/x86/um/signal.c @@ -157,7 +157,7 @@ static int copy_sc_from_user(struct pt_regs *regs, int err, pid; /* Always make any pending restarted system calls return -EINTR */ - current_thread_info()->restart_block.fn = do_no_restart_syscall; + current->restart_block.fn = do_no_restart_syscall; err = copy_from_user(&sc, from, sizeof(sc)); if (err) diff --git a/arch/xtensa/include/asm/thread_info.h b/arch/xtensa/include/asm/thread_info.h index 470153e8547c..a9b5d3ba196c 100644 --- a/arch/xtensa/include/asm/thread_info.h +++ b/arch/xtensa/include/asm/thread_info.h @@ -51,7 +51,6 @@ struct thread_info { __s32 preempt_count; /* 0 => preemptable,< 0 => BUG*/ mm_segment_t addr_limit; /* thread address space */ - struct restart_block restart_block; unsigned long cpenable; @@ -72,7 +71,6 @@ struct thread_info { #define TI_CPU 0x00000010 #define TI_PRE_COUNT 0x00000014 #define TI_ADDR_LIMIT 0x00000018 -#define TI_RESTART_BLOCK 0x000001C #endif @@ -90,9 +88,6 @@ struct thread_info { .cpu = 0, \ .preempt_count = INIT_PREEMPT_COUNT, \ .addr_limit = KERNEL_DS, \ - .restart_block = { \ - .fn = do_no_restart_syscall, \ - }, \ } #define init_thread_info (init_thread_union.thread_info) diff --git a/arch/xtensa/kernel/signal.c b/arch/xtensa/kernel/signal.c index 4612321c73cc..3d733ba16f28 100644 --- a/arch/xtensa/kernel/signal.c +++ b/arch/xtensa/kernel/signal.c @@ -245,7 +245,7 @@ asmlinkage long xtensa_rt_sigreturn(long a0, long a1, long a2, long a3, int ret; /* Always make any pending restarted system calls return -EINTR */ - current_thread_info()->restart_block.fn = do_no_restart_syscall; + current->restart_block.fn = do_no_restart_syscall; if (regs->depc > 64) panic("rt_sigreturn in double exception!\n"); diff --git a/fs/select.c b/fs/select.c index 467bb1cb3ea5..f684c750e08a 100644 --- a/fs/select.c +++ b/fs/select.c @@ -971,7 +971,7 @@ SYSCALL_DEFINE3(poll, struct pollfd __user *, ufds, unsigned int, nfds, if (ret == -EINTR) { struct restart_block *restart_block; - restart_block = ¤t_thread_info()->restart_block; + restart_block = ¤t->restart_block; restart_block->fn = do_restart_poll; restart_block->poll.ufds = ufds; restart_block->poll.nfds = nfds; diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 3037fc085e8e..d3d43ecf148c 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -193,6 +193,9 @@ extern struct task_group root_task_group; .nr_cpus_allowed= NR_CPUS, \ .mm = NULL, \ .active_mm = &init_mm, \ + .restart_block = { \ + .fn = do_no_restart_syscall, \ + }, \ .se = { \ .group_node = LIST_HEAD_INIT(tsk.se.group_node), \ }, \ diff --git a/include/linux/sched.h b/include/linux/sched.h index 8db31ef98d2f..22ee0d5d7f8c 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1370,6 +1370,8 @@ struct task_struct { unsigned long atomic_flags; /* Flags needing atomic access. */ + struct restart_block restart_block; + pid_t pid; pid_t tgid; diff --git a/kernel/compat.c b/kernel/compat.c index ebb3c369d03d..24f00610c575 100644 --- a/kernel/compat.c +++ b/kernel/compat.c @@ -276,8 +276,7 @@ COMPAT_SYSCALL_DEFINE2(nanosleep, struct compat_timespec __user *, rqtp, * core implementation decides to return random nonsense. */ if (ret == -ERESTART_RESTARTBLOCK) { - struct restart_block *restart - = ¤t_thread_info()->restart_block; + struct restart_block *restart = ¤t->restart_block; restart->fn = compat_nanosleep_restart; restart->nanosleep.compat_rmtp = rmtp; @@ -860,7 +859,7 @@ COMPAT_SYSCALL_DEFINE4(clock_nanosleep, clockid_t, which_clock, int, flags, return -EFAULT; if (err == -ERESTART_RESTARTBLOCK) { - restart = ¤t_thread_info()->restart_block; + restart = ¤t->restart_block; restart->fn = compat_clock_nanosleep_restart; restart->nanosleep.compat_rmtp = rmtp; } diff --git a/kernel/futex.c b/kernel/futex.c index 4eeb63de7e54..2a5e3830e953 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -2217,7 +2217,7 @@ retry: if (!abs_time) goto out; - restart = ¤t_thread_info()->restart_block; + restart = ¤t->restart_block; restart->fn = futex_wait_restart; restart->futex.uaddr = uaddr; restart->futex.val = val; diff --git a/kernel/signal.c b/kernel/signal.c index 16a305295256..33a52759cc0e 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -2501,7 +2501,7 @@ EXPORT_SYMBOL(unblock_all_signals); */ SYSCALL_DEFINE0(restart_syscall) { - struct restart_block *restart = ¤t_thread_info()->restart_block; + struct restart_block *restart = ¤t->restart_block; return restart->fn(restart); } diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c index a7077d3ae52f..1b001ed1edb9 100644 --- a/kernel/time/alarmtimer.c +++ b/kernel/time/alarmtimer.c @@ -788,7 +788,7 @@ static int alarm_timer_nsleep(const clockid_t which_clock, int flags, goto out; } - restart = ¤t_thread_info()->restart_block; + restart = ¤t->restart_block; restart->fn = alarm_timer_nsleep_restart; restart->nanosleep.clockid = type; restart->nanosleep.expires = exp.tv64; diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c index 3f5e183c3d97..bee0c1f78091 100644 --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c @@ -1583,7 +1583,7 @@ long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp, goto out; } - restart = ¤t_thread_info()->restart_block; + restart = ¤t->restart_block; restart->fn = hrtimer_nanosleep_restart; restart->nanosleep.clockid = t.timer.base->clockid; restart->nanosleep.rmtp = rmtp; diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c index a16b67859e2a..0075da74abf0 100644 --- a/kernel/time/posix-cpu-timers.c +++ b/kernel/time/posix-cpu-timers.c @@ -1334,8 +1334,7 @@ static long posix_cpu_nsleep_restart(struct restart_block *restart_block); static int posix_cpu_nsleep(const clockid_t which_clock, int flags, struct timespec *rqtp, struct timespec __user *rmtp) { - struct restart_block *restart_block = - ¤t_thread_info()->restart_block; + struct restart_block *restart_block = ¤t->restart_block; struct itimerspec it; int error; -- cgit v1.2.3 From 2cb5cc8b09c939c77826635956c35995b15c9331 Mon Sep 17 00:00:00 2001 From: "Darrick J. Wong" Date: Thu, 12 Feb 2015 22:31:21 -0500 Subject: ext4: support read-only images Add a rocompat feature, "readonly" to mark a FS image as read-only. The feature prevents the kernel and e2fsprogs from changing the image; the flag can be toggled by tune2fs. Signed-off-by: Darrick J. Wong Signed-off-by: Theodore Ts'o --- fs/ext4/ext4.h | 1 + fs/ext4/super.c | 10 +++++++++- 2 files changed, 10 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index b7f393df2e4c..7fec2efd8635 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -1530,6 +1530,7 @@ static inline void ext4_clear_state_flags(struct ext4_inode_info *ei) * GDT_CSUM bits are mutually exclusive. */ #define EXT4_FEATURE_RO_COMPAT_METADATA_CSUM 0x0400 +#define EXT4_FEATURE_RO_COMPAT_READONLY 0x1000 #define EXT4_FEATURE_INCOMPAT_COMPRESSION 0x0001 #define EXT4_FEATURE_INCOMPAT_FILETYPE 0x0002 diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 95b388cae0ab..60db5a11fbea 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -2776,6 +2776,12 @@ static int ext4_feature_set_ok(struct super_block *sb, int readonly) if (readonly) return 1; + if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_READONLY)) { + ext4_msg(sb, KERN_INFO, "filesystem is read-only"); + sb->s_flags |= MS_RDONLY; + return 1; + } + /* Check that feature set is OK for a read-write mount */ if (EXT4_HAS_RO_COMPAT_FEATURE(sb, ~EXT4_FEATURE_RO_COMPAT_SUPP)) { ext4_msg(sb, KERN_ERR, "couldn't mount RDWR because of " @@ -4929,7 +4935,9 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) ext4_mark_recovery_complete(sb, es); } else { /* Make sure we can mount this feature set readwrite */ - if (!ext4_feature_set_ok(sb, 0)) { + if (EXT4_HAS_RO_COMPAT_FEATURE(sb, + EXT4_FEATURE_RO_COMPAT_READONLY) || + !ext4_feature_set_ok(sb, 0)) { err = -EROFS; goto restore_opts; } -- cgit v1.2.3 From 0572639ff66dcffe62d37adfe4c4576f9fc398f4 Mon Sep 17 00:00:00 2001 From: Xiaoguang Wang Date: Thu, 12 Feb 2015 23:00:17 -0500 Subject: ext4: fix mmap data corruption in nodelalloc mode when blocksize < pagesize Since commit 90a8020 and d6320cb, Jan Kara has fixed this issue partially. This mmap data corruption still exists in nodelalloc mode, fix this. Signed-off-by: Xiaoguang Wang Signed-off-by: Theodore Ts'o Reviewed-by: Jan Kara --- fs/ext4/inode.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'fs') diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 5653fa42930b..4df6d01b762e 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -1007,6 +1007,7 @@ static int ext4_write_end(struct file *file, { handle_t *handle = ext4_journal_current_handle(); struct inode *inode = mapping->host; + loff_t old_size = inode->i_size; int ret = 0, ret2; int i_size_changed = 0; @@ -1037,6 +1038,8 @@ static int ext4_write_end(struct file *file, unlock_page(page); page_cache_release(page); + if (old_size < pos) + pagecache_isize_extended(inode, old_size, pos); /* * Don't mark the inode dirty under page lock. First, it unnecessarily * makes the holding time of page lock longer. Second, it forces lock @@ -1078,6 +1081,7 @@ static int ext4_journalled_write_end(struct file *file, { handle_t *handle = ext4_journal_current_handle(); struct inode *inode = mapping->host; + loff_t old_size = inode->i_size; int ret = 0, ret2; int partial = 0; unsigned from, to; @@ -1110,6 +1114,9 @@ static int ext4_journalled_write_end(struct file *file, unlock_page(page); page_cache_release(page); + if (old_size < pos) + pagecache_isize_extended(inode, old_size, pos); + if (size_changed) { ret2 = ext4_mark_inode_dirty(handle, inode); if (!ret) -- cgit v1.2.3 From b94a8b36be4e74d8caff387fadf71f9f97c2ea69 Mon Sep 17 00:00:00 2001 From: Eric Sandeen Date: Thu, 12 Feb 2015 23:04:27 -0500 Subject: ext4: remove duplicate remount check for JOURNAL_CHECKSUM change rejection of, changing journal_checksum during remount. One suffices. While we're at it, remove old comment about the "check" option which has been deprecated for some time now. Signed-off-by: Eric Sandeen Signed-off-by: Theodore Ts'o --- fs/ext4/super.c | 11 ----------- 1 file changed, 11 deletions(-) (limited to 'fs') diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 60db5a11fbea..2ecce8644cf8 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -4850,9 +4850,6 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) if (sbi->s_journal && sbi->s_journal->j_task->io_context) journal_ioprio = sbi->s_journal->j_task->io_context->ioprio; - /* - * Allow the "check" option to be passed as a remount option. - */ if (!parse_options(data, sb, NULL, &journal_ioprio, 1)) { err = -EINVAL; goto restore_opts; @@ -4866,14 +4863,6 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) goto restore_opts; } - if ((old_opts.s_mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) ^ - test_opt(sb, JOURNAL_CHECKSUM)) { - ext4_msg(sb, KERN_ERR, "changing journal_checksum " - "during remount not supported"); - err = -EINVAL; - goto restore_opts; - } - if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) { if (test_opt2(sb, EXPLICIT_DELALLOC)) { ext4_msg(sb, KERN_ERR, "can't mount with " -- cgit v1.2.3 From 2d5b86e048780c5efa7f7d9708815555919e7b05 Mon Sep 17 00:00:00 2001 From: Eric Sandeen Date: Thu, 12 Feb 2015 23:07:37 -0500 Subject: ext4: ignore journal checksum on remount; don't fail MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit As of v3.18, ext4 started rejecting a remount which changes the journal_checksum option. Prior to that, it was simply ignored; the problem here is that if someone has this in their fstab for the root fs, now the box fails to boot properly, because remount of root with the new options will fail, and the box proceeds with a readonly root. I think it is a little nicer behavior to accept the option, but warn that it's being ignored, rather than failing the mount, but that might be a subjective matter... Reported-by: Cónräd Signed-off-by: Eric Sandeen Signed-off-by: Theodore Ts'o --- fs/ext4/super.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 2ecce8644cf8..bff3427784ca 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -4858,9 +4858,8 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) if ((old_opts.s_mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) ^ test_opt(sb, JOURNAL_CHECKSUM)) { ext4_msg(sb, KERN_ERR, "changing journal_checksum " - "during remount not supported"); - err = -EINVAL; - goto restore_opts; + "during remount not supported; ignoring"); + sbi->s_mount_opt ^= EXT4_MOUNT_JOURNAL_CHECKSUM; } if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) { -- cgit v1.2.3 From 164c24063a3eadee11b46575c5482b2f1417be49 Mon Sep 17 00:00:00 2001 From: Chen Jie Date: Tue, 10 Feb 2015 12:49:48 -0800 Subject: jffs2: fix handling of corrupted summary length sm->offset maybe wrong but magic maybe right, the offset do not have CRC. Badness at c00c7580 [verbose debug info unavailable] NIP: c00c7580 LR: c00c718c CTR: 00000014 REGS: df07bb40 TRAP: 0700 Not tainted (2.6.34.13-WR4.3.0.0_standard) MSR: 00029000 CR: 22084f84 XER: 00000000 TASK = df84d6e0[908] 'mount' THREAD: df07a000 GPR00: 00000001 df07bbf0 df84d6e0 00000000 00000001 00000000 df07bb58 00000041 GPR08: 00000041 c0638860 00000000 00000010 22084f88 100636c8 df814ff8 00000000 GPR16: df84d6e0 dfa558cc c05adb90 00000048 c0452d30 00000000 000240d0 000040d0 GPR24: 00000014 c05ae734 c05be2e0 00000000 00000001 00000000 00000000 c05ae730 NIP [c00c7580] __alloc_pages_nodemask+0x4d0/0x638 LR [c00c718c] __alloc_pages_nodemask+0xdc/0x638 Call Trace: [df07bbf0] [c00c718c] __alloc_pages_nodemask+0xdc/0x638 (unreliable) [df07bc90] [c00c7708] __get_free_pages+0x20/0x48 [df07bca0] [c00f4a40] __kmalloc+0x15c/0x1ec [df07bcd0] [c01fc880] jffs2_scan_medium+0xa58/0x14d0 [df07bd70] [c01ff38c] jffs2_do_mount_fs+0x1f4/0x6b4 [df07bdb0] [c020144c] jffs2_do_fill_super+0xa8/0x260 [df07bdd0] [c020230c] jffs2_fill_super+0x104/0x184 [df07be00] [c0335814] get_sb_mtd_aux+0x9c/0xec [df07be20] [c033596c] get_sb_mtd+0x84/0x1e8 [df07be60] [c0201ed0] jffs2_get_sb+0x1c/0x2c [df07be70] [c0103898] vfs_kern_mount+0x78/0x1e8 [df07bea0] [c0103a58] do_kern_mount+0x40/0x100 [df07bec0] [c011fe90] do_mount+0x240/0x890 [df07bf10] [c0120570] sys_mount+0x90/0xd8 [df07bf40] [c00110d8] ret_from_syscall+0x0/0x4 === Exception: c01 at 0xff61a34 LR = 0x100135f0 Instruction dump: 38800005 38600000 48010f41 4bfffe1c 4bfc2d15 4bfffe8c 72e90200 4082fc28 3d20c064 39298860 8809000d 68000001 <0f000000> 2f800000 419efc0c 38000001 mount: mounting /dev/mtdblock3 on /common failed: Input/output error Signed-off-by: Chen Jie Cc: Signed-off-by: Andrew Morton Signed-off-by: David Woodhouse --- fs/jffs2/scan.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'fs') diff --git a/fs/jffs2/scan.c b/fs/jffs2/scan.c index 7654e87b0428..9ad5ba4b299b 100644 --- a/fs/jffs2/scan.c +++ b/fs/jffs2/scan.c @@ -510,6 +510,10 @@ static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblo sumlen = c->sector_size - je32_to_cpu(sm->offset); sumptr = buf + buf_size - sumlen; + /* sm->offset maybe wrong but MAGIC maybe right */ + if (sumlen > c->sector_size) + goto full_scan; + /* Now, make sure the summary itself is available */ if (sumlen > buf_size) { /* Need to kmalloc for this. */ @@ -544,6 +548,7 @@ static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblo } } +full_scan: buf_ofs = jeb->offset; if (!buf_size) { -- cgit v1.2.3 From d15bc38df607c893c36f4962dca0f57174c6a5c9 Mon Sep 17 00:00:00 2001 From: Tom Haynes Date: Fri, 13 Feb 2015 13:19:53 -0800 Subject: nfs: Provide and use helper functions for marking a page as unstable Signed-off-by: Tom Haynes Signed-off-by: Trond Myklebust --- fs/nfs/filelayout/filelayout.c | 9 ++------- fs/nfs/flexfilelayout/flexfilelayout.c | 9 ++------- fs/nfs/internal.h | 13 +++++++++++++ fs/nfs/write.c | 9 ++------- 4 files changed, 19 insertions(+), 21 deletions(-) (limited to 'fs') diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c index 7ae1c263c5cf..e1e5ea262a13 100644 --- a/fs/nfs/filelayout/filelayout.c +++ b/fs/nfs/filelayout/filelayout.c @@ -1000,13 +1000,8 @@ mds_commit: nfs_list_add_request(req, list); cinfo->mds->ncommit++; spin_unlock(cinfo->lock); - if (!cinfo->dreq) { - inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); - inc_bdi_stat(inode_to_bdi(page_file_mapping(req->wb_page)->host), - BDI_RECLAIMABLE); - __mark_inode_dirty(req->wb_context->dentry->d_inode, - I_DIRTY_DATASYNC); - } + if (!cinfo->dreq) + nfs_mark_page_unstable(req->wb_page); } static u32 calc_ds_index_from_commit(struct pnfs_layout_segment *lseg, u32 i) diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c index c22ecaa86c1c..423c2bc371fa 100644 --- a/fs/nfs/flexfilelayout/flexfilelayout.c +++ b/fs/nfs/flexfilelayout/flexfilelayout.c @@ -1364,13 +1364,8 @@ ff_layout_mark_request_commit(struct nfs_page *req, nfs_list_add_request(req, list); cinfo->mds->ncommit++; spin_unlock(cinfo->lock); - if (!cinfo->dreq) { - inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); - inc_bdi_stat(inode_to_bdi(page_file_mapping(req->wb_page)->host), - BDI_RECLAIMABLE); - __mark_inode_dirty(req->wb_context->dentry->d_inode, - I_DIRTY_DATASYNC); - } + if (!cinfo->dreq) + nfs_mark_page_unstable(req->wb_page); } static u32 calc_ds_index_from_commit(struct pnfs_layout_segment *lseg, u32 i) diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index 212b8c883d22..b802fb3a2d99 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h @@ -597,6 +597,19 @@ void nfs_super_set_maxbytes(struct super_block *sb, __u64 maxfilesize) sb->s_maxbytes = MAX_LFS_FILESIZE; } +/* + * Record the page as unstable and mark its inode as dirty. + */ +static inline +void nfs_mark_page_unstable(struct page *page) +{ + struct inode *inode = page_file_mapping(page)->host; + + inc_zone_page_state(page, NR_UNSTABLE_NFS); + inc_bdi_stat(inode_to_bdi(inode), BDI_RECLAIMABLE); + __mark_inode_dirty(inode, I_DIRTY_DATASYNC); +} + /* * Determine the number of bytes of data the page contains */ diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 88a6d2196ece..76c278acaefc 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -789,13 +789,8 @@ nfs_request_add_commit_list(struct nfs_page *req, struct list_head *dst, nfs_list_add_request(req, dst); cinfo->mds->ncommit++; spin_unlock(cinfo->lock); - if (!cinfo->dreq) { - inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); - inc_bdi_stat(inode_to_bdi(page_file_mapping(req->wb_page)->host), - BDI_RECLAIMABLE); - __mark_inode_dirty(req->wb_context->dentry->d_inode, - I_DIRTY_DATASYNC); - } + if (!cinfo->dreq) + nfs_mark_page_unstable(req->wb_page); } EXPORT_SYMBOL_GPL(nfs_request_add_commit_list); -- cgit v1.2.3 From 487b9b8afde60986b606b3ee05169fb893adc153 Mon Sep 17 00:00:00 2001 From: Tom Haynes Date: Fri, 13 Feb 2015 13:19:54 -0800 Subject: nfs: Can call nfs_clear_page_commit() instead Signed-off-by: Tom Haynes Signed-off-by: Trond Myklebust --- fs/nfs/write.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 76c278acaefc..595d81e354d1 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -1600,11 +1600,8 @@ void nfs_retry_commit(struct list_head *page_list, req = nfs_list_entry(page_list->next); nfs_list_remove_request(req); nfs_mark_request_commit(req, lseg, cinfo, ds_commit_idx); - if (!cinfo->dreq) { - dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); - dec_bdi_stat(inode_to_bdi(page_file_mapping(req->wb_page)->host), - BDI_RECLAIMABLE); - } + if (!cinfo->dreq) + nfs_clear_page_commit(req->wb_page); nfs_unlock_and_release_request(req); } } -- cgit v1.2.3 From f4086a3d789dbe18949862276d83b8f49fce6d2f Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Fri, 13 Feb 2015 21:03:16 -0500 Subject: NFS: struct nfs_commit_info.lock must always point to inode->i_lock Commit 411a99adffb4f (nfs: clear_request_commit while holding i_lock) assumes that the nfs_commit_info always points to the inode->i_lock. For historical reasons, that is not the case for O_DIRECT writes. Cc: Weston Andros Adamson Fixes: 411a99adffb4f ("nfs: clear_request_commit while holding i_lock") Cc: stable@vger.kernel.org # 3.17.x Signed-off-by: Trond Myklebust --- fs/nfs/direct.c | 2 +- include/linux/nfs_xdr.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index 7077521acdf4..e907c8cf732e 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c @@ -283,7 +283,7 @@ static void nfs_direct_release_pages(struct page **pages, unsigned int npages) void nfs_init_cinfo_from_dreq(struct nfs_commit_info *cinfo, struct nfs_direct_req *dreq) { - cinfo->lock = &dreq->lock; + cinfo->lock = &dreq->inode->i_lock; cinfo->mds = &dreq->mds_cinfo; cinfo->ds = &dreq->ds_cinfo; cinfo->dreq = dreq; diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 38d96ba935c2..9a39132fda49 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -1351,7 +1351,7 @@ struct nfs_commit_completion_ops { }; struct nfs_commit_info { - spinlock_t *lock; + spinlock_t *lock; /* inode->i_lock */ struct nfs_mds_commit_info *mds; struct pnfs_ds_commit_info *ds; struct nfs_direct_req *dreq; /* O_DIRECT request */ -- cgit v1.2.3 From bf40e5561fd288a505d5d8d8bf45eef96fe7253d Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Fri, 13 Feb 2015 21:40:27 -0500 Subject: NFSv4: Kill unused nfs_inode->delegation_state field Signed-off-by: Trond Myklebust --- fs/nfs/delegation.c | 4 ---- fs/nfs/inode.c | 1 - include/linux/nfs_fs.h | 1 - 3 files changed, 6 deletions(-) (limited to 'fs') diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c index 16b754ee0d09..4464eb06b0b6 100644 --- a/fs/nfs/delegation.c +++ b/fs/nfs/delegation.c @@ -175,7 +175,6 @@ void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred, delegation->cred = get_rpccred(cred); clear_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags); - NFS_I(inode)->delegation_state = delegation->type; spin_unlock(&delegation->lock); put_rpccred(oldcred); rcu_read_unlock(); @@ -270,7 +269,6 @@ nfs_detach_delegation_locked(struct nfs_inode *nfsi, set_bit(NFS_DELEGATION_RETURNING, &delegation->flags); list_del_rcu(&delegation->super_list); delegation->inode = NULL; - nfsi->delegation_state = 0; rcu_assign_pointer(nfsi->delegation, NULL); spin_unlock(&delegation->lock); return delegation; @@ -350,7 +348,6 @@ int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct &delegation->stateid)) { nfs_update_inplace_delegation(old_delegation, delegation); - nfsi->delegation_state = old_delegation->type; goto out; } /* @@ -374,7 +371,6 @@ int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct goto out; } list_add_rcu(&delegation->super_list, &server->delegations); - nfsi->delegation_state = delegation->type; rcu_assign_pointer(nfsi->delegation, delegation); delegation = NULL; diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index d2398c193bda..e211f975a69a 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -1776,7 +1776,6 @@ static inline void nfs4_init_once(struct nfs_inode *nfsi) #if IS_ENABLED(CONFIG_NFS_V4) INIT_LIST_HEAD(&nfsi->open_states); nfsi->delegation = NULL; - nfsi->delegation_state = 0; init_rwsem(&nfsi->rwsem); nfsi->layout = NULL; #endif diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 6d627b92df53..2f77e0c651c8 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -180,7 +180,6 @@ struct nfs_inode { /* NFSv4 state */ struct list_head open_states; struct nfs_delegation __rcu *delegation; - fmode_t delegation_state; struct rw_semaphore rwsem; /* pNFS layout information */ -- cgit v1.2.3 From 75287a677ba1beab7ca0db948468f44eb23a709f Mon Sep 17 00:00:00 2001 From: Andrzej Hajda Date: Fri, 13 Feb 2015 14:36:27 -0800 Subject: kernfs: convert node name allocation to kstrdup_const sysfs frequently performs duplication of strings located in read-only memory section. Replacing kstrdup by kstrdup_const allows to avoid such operations. Signed-off-by: Andrzej Hajda Cc: Marek Szyprowski Cc: Kyungmin Park Cc: Mike Turquette Cc: Alexander Viro Cc: Christoph Lameter Cc: Pekka Enberg Cc: David Rientjes Cc: Joonsoo Kim Acked-by: Tejun Heo Cc: Greg KH Cc: Geert Uytterhoeven Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/kernfs/dir.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c index 2d881b381d2b..35e40879860a 100644 --- a/fs/kernfs/dir.c +++ b/fs/kernfs/dir.c @@ -412,7 +412,7 @@ void kernfs_put(struct kernfs_node *kn) if (kernfs_type(kn) == KERNFS_LINK) kernfs_put(kn->symlink.target_kn); if (!(kn->flags & KERNFS_STATIC_NAME)) - kfree(kn->name); + kfree_const(kn->name); if (kn->iattr) { if (kn->iattr->ia_secdata) security_release_secctx(kn->iattr->ia_secdata, @@ -506,12 +506,12 @@ static struct kernfs_node *__kernfs_new_node(struct kernfs_root *root, const char *name, umode_t mode, unsigned flags) { - char *dup_name = NULL; + const char *dup_name = NULL; struct kernfs_node *kn; int ret; if (!(flags & KERNFS_STATIC_NAME)) { - name = dup_name = kstrdup(name, GFP_KERNEL); + name = dup_name = kstrdup_const(name, GFP_KERNEL); if (!name) return NULL; } @@ -538,7 +538,7 @@ static struct kernfs_node *__kernfs_new_node(struct kernfs_root *root, err_out2: kmem_cache_free(kernfs_node_cache, kn); err_out1: - kfree(dup_name); + kfree_const(dup_name); return NULL; } @@ -1264,7 +1264,7 @@ int kernfs_rename_ns(struct kernfs_node *kn, struct kernfs_node *new_parent, /* rename kernfs_node */ if (strcmp(kn->name, new_name) != 0) { error = -ENOMEM; - new_name = kstrdup(new_name, GFP_KERNEL); + new_name = kstrdup_const(new_name, GFP_KERNEL); if (!new_name) goto out; } else { @@ -1297,7 +1297,7 @@ int kernfs_rename_ns(struct kernfs_node *kn, struct kernfs_node *new_parent, kernfs_link_sibling(kn); kernfs_put(old_parent); - kfree(old_name); + kfree_const(old_name); error = 0; out: -- cgit v1.2.3 From dfeb0750b630b72b5d4fb2461bc7179eceb54666 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 13 Feb 2015 14:36:31 -0800 Subject: kernfs: remove KERNFS_STATIC_NAME When a new kernfs node is created, KERNFS_STATIC_NAME is used to avoid making a separate copy of its name. It's currently only used for sysfs attributes whose filenames are required to stay accessible and unchanged. There are rare exceptions where these names are allocated and formatted dynamically but for the vast majority of cases they're consts in the rodata section. Now that kernfs is converted to use kstrdup_const() and kfree_const(), there's little point in keeping KERNFS_STATIC_NAME around. Remove it. Signed-off-by: Tejun Heo Cc: Andrzej Hajda Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/kernfs/dir.c | 20 ++++++++------------ fs/kernfs/file.c | 4 ---- fs/sysfs/file.c | 2 +- include/linux/kernfs.h | 7 ++----- kernel/cgroup.c | 2 +- 5 files changed, 12 insertions(+), 23 deletions(-) (limited to 'fs') diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c index 35e40879860a..6acc9648f986 100644 --- a/fs/kernfs/dir.c +++ b/fs/kernfs/dir.c @@ -411,8 +411,9 @@ void kernfs_put(struct kernfs_node *kn) if (kernfs_type(kn) == KERNFS_LINK) kernfs_put(kn->symlink.target_kn); - if (!(kn->flags & KERNFS_STATIC_NAME)) - kfree_const(kn->name); + + kfree_const(kn->name); + if (kn->iattr) { if (kn->iattr->ia_secdata) security_release_secctx(kn->iattr->ia_secdata, @@ -506,15 +507,12 @@ static struct kernfs_node *__kernfs_new_node(struct kernfs_root *root, const char *name, umode_t mode, unsigned flags) { - const char *dup_name = NULL; struct kernfs_node *kn; int ret; - if (!(flags & KERNFS_STATIC_NAME)) { - name = dup_name = kstrdup_const(name, GFP_KERNEL); - if (!name) - return NULL; - } + name = kstrdup_const(name, GFP_KERNEL); + if (!name) + return NULL; kn = kmem_cache_zalloc(kernfs_node_cache, GFP_KERNEL); if (!kn) @@ -538,7 +536,7 @@ static struct kernfs_node *__kernfs_new_node(struct kernfs_root *root, err_out2: kmem_cache_free(kernfs_node_cache, kn); err_out1: - kfree_const(dup_name); + kfree_const(name); return NULL; } @@ -1285,9 +1283,7 @@ int kernfs_rename_ns(struct kernfs_node *kn, struct kernfs_node *new_parent, kn->ns = new_ns; if (new_name) { - if (!(kn->flags & KERNFS_STATIC_NAME)) - old_name = kn->name; - kn->flags &= ~KERNFS_STATIC_NAME; + old_name = kn->name; kn->name = new_name; } diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c index ddc9f9612f16..b684e8a132e6 100644 --- a/fs/kernfs/file.c +++ b/fs/kernfs/file.c @@ -901,7 +901,6 @@ const struct file_operations kernfs_file_fops = { * @ops: kernfs operations for the file * @priv: private data for the file * @ns: optional namespace tag of the file - * @name_is_static: don't copy file name * @key: lockdep key for the file's active_ref, %NULL to disable lockdep * * Returns the created node on success, ERR_PTR() value on error. @@ -911,7 +910,6 @@ struct kernfs_node *__kernfs_create_file(struct kernfs_node *parent, umode_t mode, loff_t size, const struct kernfs_ops *ops, void *priv, const void *ns, - bool name_is_static, struct lock_class_key *key) { struct kernfs_node *kn; @@ -919,8 +917,6 @@ struct kernfs_node *__kernfs_create_file(struct kernfs_node *parent, int rc; flags = KERNFS_FILE; - if (name_is_static) - flags |= KERNFS_STATIC_NAME; kn = kernfs_new_node(parent, name, (mode & S_IALLUGO) | S_IFREG, flags); if (!kn) diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c index dfe928a9540f..7c2867b44141 100644 --- a/fs/sysfs/file.c +++ b/fs/sysfs/file.c @@ -295,7 +295,7 @@ int sysfs_add_file_mode_ns(struct kernfs_node *parent, key = attr->key ?: (struct lock_class_key *)&attr->skey; #endif kn = __kernfs_create_file(parent, attr->name, mode & 0777, size, ops, - (void *)attr, ns, true, key); + (void *)attr, ns, key); if (IS_ERR(kn)) { if (PTR_ERR(kn) == -EEXIST) sysfs_warn_dup(parent, attr->name); diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h index d4e01b358341..71ecdab1671b 100644 --- a/include/linux/kernfs.h +++ b/include/linux/kernfs.h @@ -43,7 +43,6 @@ enum kernfs_node_flag { KERNFS_HAS_SEQ_SHOW = 0x0040, KERNFS_HAS_MMAP = 0x0080, KERNFS_LOCKDEP = 0x0100, - KERNFS_STATIC_NAME = 0x0200, KERNFS_SUICIDAL = 0x0400, KERNFS_SUICIDED = 0x0800, }; @@ -291,7 +290,6 @@ struct kernfs_node *__kernfs_create_file(struct kernfs_node *parent, umode_t mode, loff_t size, const struct kernfs_ops *ops, void *priv, const void *ns, - bool name_is_static, struct lock_class_key *key); struct kernfs_node *kernfs_create_link(struct kernfs_node *parent, const char *name, @@ -369,8 +367,7 @@ kernfs_create_dir_ns(struct kernfs_node *parent, const char *name, static inline struct kernfs_node * __kernfs_create_file(struct kernfs_node *parent, const char *name, umode_t mode, loff_t size, const struct kernfs_ops *ops, - void *priv, const void *ns, bool name_is_static, - struct lock_class_key *key) + void *priv, const void *ns, struct lock_class_key *key) { return ERR_PTR(-ENOSYS); } static inline struct kernfs_node * @@ -439,7 +436,7 @@ kernfs_create_file_ns(struct kernfs_node *parent, const char *name, key = (struct lock_class_key *)&ops->lockdep_key; #endif return __kernfs_create_file(parent, name, mode, size, ops, priv, ns, - false, key); + key); } static inline struct kernfs_node * diff --git a/kernel/cgroup.c b/kernel/cgroup.c index d5f6ec251fb2..29a7b2cc593e 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -3077,7 +3077,7 @@ static int cgroup_add_file(struct cgroup *cgrp, struct cftype *cft) #endif kn = __kernfs_create_file(cgrp->kn, cgroup_file_name(cgrp, cft, name), cgroup_file_mode(cft), 0, cft->kf_ops, cft, - NULL, false, key); + NULL, key); if (IS_ERR(kn)) return PTR_ERR(kn); -- cgit v1.2.3 From fcc139ae227b97bd81352e9102d8e79498d1e930 Mon Sep 17 00:00:00 2001 From: Andrzej Hajda Date: Fri, 13 Feb 2015 14:36:41 -0800 Subject: fs/namespace: convert devname allocation to kstrdup_const VFS frequently performs duplication of strings located in read-only memory section. Replacing kstrdup by kstrdup_const allows to avoid such operations. Signed-off-by: Andrzej Hajda Cc: Marek Szyprowski Cc: Kyungmin Park Cc: Mike Turquette Cc: Alexander Viro Cc: Christoph Lameter Cc: Pekka Enberg Cc: David Rientjes Cc: Joonsoo Kim Cc: Tejun Heo Cc: Greg KH Cc: Geert Uytterhoeven Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/namespace.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/namespace.c b/fs/namespace.c index cd1e9681a0cf..6dae553dd69c 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -201,7 +201,7 @@ static struct mount *alloc_vfsmnt(const char *name) goto out_free_cache; if (name) { - mnt->mnt_devname = kstrdup(name, GFP_KERNEL); + mnt->mnt_devname = kstrdup_const(name, GFP_KERNEL); if (!mnt->mnt_devname) goto out_free_id; } @@ -234,7 +234,7 @@ static struct mount *alloc_vfsmnt(const char *name) #ifdef CONFIG_SMP out_free_devname: - kfree(mnt->mnt_devname); + kfree_const(mnt->mnt_devname); #endif out_free_id: mnt_free_id(mnt); @@ -568,7 +568,7 @@ int sb_prepare_remount_readonly(struct super_block *sb) static void free_vfsmnt(struct mount *mnt) { - kfree(mnt->mnt_devname); + kfree_const(mnt->mnt_devname); #ifdef CONFIG_SMP free_percpu(mnt->mnt_pcp); #endif -- cgit v1.2.3 From a0c2e07d6d4fe6f67b057d0f1c961e70ff581eda Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 13 Feb 2015 14:38:07 -0800 Subject: proc: use %*pb[l] to print bitmaps including cpumasks and nodemasks printk and friends can now format bitmaps using '%*pb[l]'. cpumask and nodemask also provide cpumask_pr_args() and nodemask_pr_args() respectively which can be used to generate the two printf arguments necessary to format the specified cpu/nodemask. Signed-off-by: Tejun Heo Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/proc/array.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/proc/array.c b/fs/proc/array.c index a3ccf4c4ce70..1295a00ca316 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c @@ -316,12 +316,10 @@ static inline void task_context_switch_counts(struct seq_file *m, static void task_cpus_allowed(struct seq_file *m, struct task_struct *task) { - seq_puts(m, "Cpus_allowed:\t"); - seq_cpumask(m, &task->cpus_allowed); - seq_putc(m, '\n'); - seq_puts(m, "Cpus_allowed_list:\t"); - seq_cpumask_list(m, &task->cpus_allowed); - seq_putc(m, '\n'); + seq_printf(m, "Cpus_allowed:\t%*pb\n", + cpumask_pr_args(&task->cpus_allowed)); + seq_printf(m, "Cpus_allowed_list:\t%*pbl\n", + cpumask_pr_args(&task->cpus_allowed)); } int proc_pid_status(struct seq_file *m, struct pid_namespace *ns, -- cgit v1.2.3 From 46385326cc1577587ed3e7432c2425cf6d3e4308 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 13 Feb 2015 14:38:15 -0800 Subject: bitmap, cpumask, nodemask: remove dedicated formatting functions Now that all bitmap formatting usages have been converted to '%*pb[l]', the separate formatting functions are unnecessary. The following functions are removed. * bitmap_scn[list]printf() * cpumask_scnprintf(), cpulist_scnprintf() * [__]nodemask_scnprintf(), [__]nodelist_scnprintf() * seq_bitmap[_list](), seq_cpumask[_list](), seq_nodemask[_list]() * seq_buf_bitmask() Signed-off-by: Tejun Heo Cc: Rusty Russell Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/seq_file.c | 32 -------------------------------- include/linux/bitmap.h | 7 ------- include/linux/cpumask.h | 31 ------------------------------- include/linux/nodemask.h | 33 +++++++-------------------------- include/linux/seq_buf.h | 3 --- include/linux/seq_file.h | 25 ------------------------- lib/bitmap.c | 41 ----------------------------------------- lib/seq_buf.c | 36 ------------------------------------ 8 files changed, 7 insertions(+), 201 deletions(-) (limited to 'fs') diff --git a/fs/seq_file.c b/fs/seq_file.c index dbf3a59c86bb..555f82155be8 100644 --- a/fs/seq_file.c +++ b/fs/seq_file.c @@ -539,38 +539,6 @@ int seq_dentry(struct seq_file *m, struct dentry *dentry, const char *esc) return res; } -int seq_bitmap(struct seq_file *m, const unsigned long *bits, - unsigned int nr_bits) -{ - if (m->count < m->size) { - int len = bitmap_scnprintf(m->buf + m->count, - m->size - m->count, bits, nr_bits); - if (m->count + len < m->size) { - m->count += len; - return 0; - } - } - seq_set_overflow(m); - return -1; -} -EXPORT_SYMBOL(seq_bitmap); - -int seq_bitmap_list(struct seq_file *m, const unsigned long *bits, - unsigned int nr_bits) -{ - if (m->count < m->size) { - int len = bitmap_scnlistprintf(m->buf + m->count, - m->size - m->count, bits, nr_bits); - if (m->count + len < m->size) { - m->count += len; - return 0; - } - } - seq_set_overflow(m); - return -1; -} -EXPORT_SYMBOL(seq_bitmap_list); - static void *single_start(struct seq_file *p, loff_t *pos) { return NULL + (*pos == 0); diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h index 5e7f75a6d7d0..dbfbf4990005 100644 --- a/include/linux/bitmap.h +++ b/include/linux/bitmap.h @@ -52,16 +52,13 @@ * bitmap_bitremap(oldbit, old, new, nbits) newbit = map(old, new)(oldbit) * bitmap_onto(dst, orig, relmap, nbits) *dst = orig relative to relmap * bitmap_fold(dst, orig, sz, nbits) dst bits = orig bits mod sz - * bitmap_scnprintf(buf, len, src, nbits) Print bitmap src to buf * bitmap_parse(buf, buflen, dst, nbits) Parse bitmap dst from kernel buf * bitmap_parse_user(ubuf, ulen, dst, nbits) Parse bitmap dst from user buf - * bitmap_scnlistprintf(buf, len, src, nbits) Print bitmap src as list to buf * bitmap_parselist(buf, dst, nbits) Parse bitmap dst from kernel buf * bitmap_parselist_user(buf, dst, nbits) Parse bitmap dst from user buf * bitmap_find_free_region(bitmap, bits, order) Find and allocate bit region * bitmap_release_region(bitmap, pos, order) Free specified bit region * bitmap_allocate_region(bitmap, pos, order) Allocate specified bit region - * bitmap_print_to_pagebuf(list, buf, mask, nbits) Print bitmap src as list/hex */ /* @@ -147,14 +144,10 @@ bitmap_find_next_zero_area(unsigned long *map, align_mask, 0); } -extern int bitmap_scnprintf(char *buf, unsigned int len, - const unsigned long *src, int nbits); extern int __bitmap_parse(const char *buf, unsigned int buflen, int is_user, unsigned long *dst, int nbits); extern int bitmap_parse_user(const char __user *ubuf, unsigned int ulen, unsigned long *dst, int nbits); -extern int bitmap_scnlistprintf(char *buf, unsigned int len, - const unsigned long *src, int nbits); extern int bitmap_parselist(const char *buf, unsigned long *maskp, int nmaskbits); extern int bitmap_parselist_user(const char __user *ubuf, unsigned int ulen, diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index a9b3d00915a0..086549a665e2 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -546,21 +546,6 @@ static inline void cpumask_copy(struct cpumask *dstp, */ #define cpumask_of(cpu) (get_cpu_mask(cpu)) -/** - * cpumask_scnprintf - print a cpumask into a string as comma-separated hex - * @buf: the buffer to sprintf into - * @len: the length of the buffer - * @srcp: the cpumask to print - * - * If len is zero, returns zero. Otherwise returns the length of the - * (nul-terminated) @buf string. - */ -static inline int cpumask_scnprintf(char *buf, int len, - const struct cpumask *srcp) -{ - return bitmap_scnprintf(buf, len, cpumask_bits(srcp), nr_cpu_ids); -} - /** * cpumask_parse_user - extract a cpumask from a user string * @buf: the buffer to extract from @@ -590,22 +575,6 @@ static inline int cpumask_parselist_user(const char __user *buf, int len, nr_cpu_ids); } -/** - * cpulist_scnprintf - print a cpumask into a string as comma-separated list - * @buf: the buffer to sprintf into - * @len: the length of the buffer - * @srcp: the cpumask to print - * - * If len is zero, returns zero. Otherwise returns the length of the - * (nul-terminated) @buf string. - */ -static inline int cpulist_scnprintf(char *buf, int len, - const struct cpumask *srcp) -{ - return bitmap_scnlistprintf(buf, len, cpumask_bits(srcp), - nr_cpu_ids); -} - /** * cpumask_parse - extract a cpumask from from a string * @buf: the buffer to extract from diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h index 10f8e556ba07..6e85889cf9ab 100644 --- a/include/linux/nodemask.h +++ b/include/linux/nodemask.h @@ -8,14 +8,13 @@ * See detailed comments in the file linux/bitmap.h describing the * data type on which these nodemasks are based. * - * For details of nodemask_scnprintf() and nodemask_parse_user(), - * see bitmap_scnprintf() and bitmap_parse_user() in lib/bitmap.c. - * For details of nodelist_scnprintf() and nodelist_parse(), see - * bitmap_scnlistprintf() and bitmap_parselist(), also in bitmap.c. - * For details of node_remap(), see bitmap_bitremap in lib/bitmap.c. - * For details of nodes_remap(), see bitmap_remap in lib/bitmap.c. - * For details of nodes_onto(), see bitmap_onto in lib/bitmap.c. - * For details of nodes_fold(), see bitmap_fold in lib/bitmap.c. + * For details of nodemask_parse_user(), see bitmap_parse_user() in + * lib/bitmap.c. For details of nodelist_parse(), see bitmap_parselist(), + * also in bitmap.c. For details of node_remap(), see bitmap_bitremap in + * lib/bitmap.c. For details of nodes_remap(), see bitmap_remap in + * lib/bitmap.c. For details of nodes_onto(), see bitmap_onto in + * lib/bitmap.c. For details of nodes_fold(), see bitmap_fold in + * lib/bitmap.c. * * The available nodemask operations are: * @@ -52,9 +51,7 @@ * NODE_MASK_NONE Initializer - no bits set * unsigned long *nodes_addr(mask) Array of unsigned long's in mask * - * int nodemask_scnprintf(buf, len, mask) Format nodemask for printing * int nodemask_parse_user(ubuf, ulen, mask) Parse ascii string as nodemask - * int nodelist_scnprintf(buf, len, mask) Format nodemask as list for printing * int nodelist_parse(buf, map) Parse ascii string as nodelist * int node_remap(oldbit, old, new) newbit = map(old, new)(oldbit) * void nodes_remap(dst, src, old, new) *dst = map(old, new)(src) @@ -312,14 +309,6 @@ static inline int __first_unset_node(const nodemask_t *maskp) #define nodes_addr(src) ((src).bits) -#define nodemask_scnprintf(buf, len, src) \ - __nodemask_scnprintf((buf), (len), &(src), MAX_NUMNODES) -static inline int __nodemask_scnprintf(char *buf, int len, - const nodemask_t *srcp, int nbits) -{ - return bitmap_scnprintf(buf, len, srcp->bits, nbits); -} - #define nodemask_parse_user(ubuf, ulen, dst) \ __nodemask_parse_user((ubuf), (ulen), &(dst), MAX_NUMNODES) static inline int __nodemask_parse_user(const char __user *buf, int len, @@ -328,14 +317,6 @@ static inline int __nodemask_parse_user(const char __user *buf, int len, return bitmap_parse_user(buf, len, dstp->bits, nbits); } -#define nodelist_scnprintf(buf, len, src) \ - __nodelist_scnprintf((buf), (len), &(src), MAX_NUMNODES) -static inline int __nodelist_scnprintf(char *buf, int len, - const nodemask_t *srcp, int nbits) -{ - return bitmap_scnlistprintf(buf, len, srcp->bits, nbits); -} - #define nodelist_parse(buf, dst) __nodelist_parse((buf), &(dst), MAX_NUMNODES) static inline int __nodelist_parse(const char *buf, nodemask_t *dstp, int nbits) { diff --git a/include/linux/seq_buf.h b/include/linux/seq_buf.h index 9aafe0e24c68..fb7eb9ccb1cd 100644 --- a/include/linux/seq_buf.h +++ b/include/linux/seq_buf.h @@ -125,9 +125,6 @@ extern int seq_buf_putmem_hex(struct seq_buf *s, const void *mem, unsigned int len); extern int seq_buf_path(struct seq_buf *s, const struct path *path, const char *esc); -extern int seq_buf_bitmask(struct seq_buf *s, const unsigned long *maskp, - int nmaskbits); - #ifdef CONFIG_BINARY_PRINTF extern int seq_buf_bprintf(struct seq_buf *s, const char *fmt, const u32 *binary); diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h index cf6a9daaaf6d..afbb1fd77c77 100644 --- a/include/linux/seq_file.h +++ b/include/linux/seq_file.h @@ -126,31 +126,6 @@ int seq_path(struct seq_file *, const struct path *, const char *); int seq_dentry(struct seq_file *, struct dentry *, const char *); int seq_path_root(struct seq_file *m, const struct path *path, const struct path *root, const char *esc); -int seq_bitmap(struct seq_file *m, const unsigned long *bits, - unsigned int nr_bits); -static inline int seq_cpumask(struct seq_file *m, const struct cpumask *mask) -{ - return seq_bitmap(m, cpumask_bits(mask), nr_cpu_ids); -} - -static inline int seq_nodemask(struct seq_file *m, nodemask_t *mask) -{ - return seq_bitmap(m, mask->bits, MAX_NUMNODES); -} - -int seq_bitmap_list(struct seq_file *m, const unsigned long *bits, - unsigned int nr_bits); - -static inline int seq_cpumask_list(struct seq_file *m, - const struct cpumask *mask) -{ - return seq_bitmap_list(m, cpumask_bits(mask), nr_cpu_ids); -} - -static inline int seq_nodemask_list(struct seq_file *m, nodemask_t *mask) -{ - return seq_bitmap_list(m, mask->bits, MAX_NUMNODES); -} int single_open(struct file *, int (*)(struct seq_file *, void *), void *); int single_open_size(struct file *, int (*)(struct seq_file *, void *), void *, size_t); diff --git a/lib/bitmap.c b/lib/bitmap.c index 088adbdcbad9..d456f4c15a9f 100644 --- a/lib/bitmap.c +++ b/lib/bitmap.c @@ -369,24 +369,6 @@ EXPORT_SYMBOL(bitmap_find_next_zero_area_off); #define nbits_to_hold_value(val) fls(val) #define BASEDEC 10 /* fancier cpuset lists input in decimal */ -/** - * bitmap_scnprintf - convert bitmap to an ASCII hex string. - * @buf: byte buffer into which string is placed - * @buflen: reserved size of @buf, in bytes - * @maskp: pointer to bitmap to convert - * @nmaskbits: size of bitmap, in bits - * - * Exactly @nmaskbits bits are displayed. Hex digits are grouped into - * comma-separated sets of eight digits per set. Returns the number of - * characters which were written to *buf, excluding the trailing \0. - */ -int bitmap_scnprintf(char *buf, unsigned int buflen, - const unsigned long *maskp, int nmaskbits) -{ - return scnprintf(buf, buflen, "%*pb", nmaskbits, maskp); -} -EXPORT_SYMBOL(bitmap_scnprintf); - /** * __bitmap_parse - convert an ASCII hex string into a bitmap. * @buf: pointer to buffer containing string. @@ -500,29 +482,6 @@ int bitmap_parse_user(const char __user *ubuf, } EXPORT_SYMBOL(bitmap_parse_user); -/** - * bitmap_scnlistprintf - convert bitmap to list format ASCII string - * @buf: byte buffer into which string is placed - * @buflen: reserved size of @buf, in bytes - * @maskp: pointer to bitmap to convert - * @nmaskbits: size of bitmap, in bits - * - * Output format is a comma-separated list of decimal numbers and - * ranges. Consecutively set bits are shown as two hyphen-separated - * decimal numbers, the smallest and largest bit numbers set in - * the range. Output format is compatible with the format - * accepted as input by bitmap_parselist(). - * - * The return value is the number of characters which were written to *buf - * excluding the trailing '\0', as per ISO C99's scnprintf. - */ -int bitmap_scnlistprintf(char *buf, unsigned int buflen, - const unsigned long *maskp, int nmaskbits) -{ - return scnprintf(buf, buflen, "%*pbl", nmaskbits, maskp); -} -EXPORT_SYMBOL(bitmap_scnlistprintf); - /** * bitmap_print_to_pagebuf - convert bitmap to list or hex format ASCII string * @list: indicates whether the bitmap must be list diff --git a/lib/seq_buf.c b/lib/seq_buf.c index 4eedfedb9e31..88c0854bd752 100644 --- a/lib/seq_buf.c +++ b/lib/seq_buf.c @@ -91,42 +91,6 @@ int seq_buf_printf(struct seq_buf *s, const char *fmt, ...) return ret; } -/** - * seq_buf_bitmask - write a bitmask array in its ASCII representation - * @s: seq_buf descriptor - * @maskp: points to an array of unsigned longs that represent a bitmask - * @nmaskbits: The number of bits that are valid in @maskp - * - * Writes a ASCII representation of a bitmask string into @s. - * - * Returns zero on success, -1 on overflow. - */ -int seq_buf_bitmask(struct seq_buf *s, const unsigned long *maskp, - int nmaskbits) -{ - unsigned int len = seq_buf_buffer_left(s); - int ret; - - WARN_ON(s->size == 0); - - /* - * Note, because bitmap_scnprintf() only returns the number of bytes - * written and not the number that would be written, we use the last - * byte of the buffer to let us know if we overflowed. There's a small - * chance that the bitmap could have fit exactly inside the buffer, but - * it's not that critical if that does happen. - */ - if (len > 1) { - ret = bitmap_scnprintf(s->buffer + s->len, len, maskp, nmaskbits); - if (ret < len) { - s->len += ret; - return 0; - } - } - seq_buf_set_overflow(s); - return -1; -} - #ifdef CONFIG_BINARY_PRINTF /** * seq_buf_bprintf - Write the printf string from binary arguments -- cgit v1.2.3 From 4d5755b147665912c938504033d958f1115b68ff Mon Sep 17 00:00:00 2001 From: Davidlohr Bueso Date: Fri, 13 Feb 2015 14:39:08 -0800 Subject: epoll: optimize setting task running after blocking After waking up a task waiting for an event, we explicitly mark it as TASK_RUNNING (which is necessary as we do the checks for wakeups as TASK_INTERRUPTIBLE). Once running and dealing with actually delivering the events, we're obviously not planning on calling schedule, thus we can relax the implied barrier and simply update the state with __set_current_state(). Signed-off-by: Davidlohr Bueso Cc: Alexander Viro Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/eventpoll.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/eventpoll.c b/fs/eventpoll.c index d77f94491352..1e009cad8d5c 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -1639,9 +1639,9 @@ fetch_events: spin_lock_irqsave(&ep->lock, flags); } - __remove_wait_queue(&ep->wq, &wait); - set_current_state(TASK_RUNNING); + __remove_wait_queue(&ep->wq, &wait); + __set_current_state(TASK_RUNNING); } check_events: /* Is it worth to try to dig for events ? */ -- cgit v1.2.3 From df4c0e36f1b1782b0611a77c52cc240e5c4752dd Mon Sep 17 00:00:00 2001 From: Andrey Ryabinin Date: Fri, 13 Feb 2015 14:39:45 -0800 Subject: fs: dcache: manually unpoison dname after allocation to shut up kasan's reports We need to manually unpoison rounded up allocation size for dname to avoid kasan's reports in dentry_string_cmp(). When CONFIG_DCACHE_WORD_ACCESS=y dentry_string_cmp may access few bytes beyound requested in kmalloc() size. dentry_string_cmp() relates on that fact that dentry allocated using kmalloc and kmalloc internally round up allocation size. So this is not a bug, but this makes kasan to complain about such accesses. To avoid such reports we mark rounded up allocation size in shadow as accessible. Signed-off-by: Andrey Ryabinin Reported-by: Dmitry Vyukov Cc: Konstantin Serebryany Cc: Dmitry Chernenkov Signed-off-by: Andrey Konovalov Cc: Yuri Gribov Cc: Konstantin Khlebnikov Cc: Sasha Levin Cc: Christoph Lameter Cc: Joonsoo Kim Cc: Dave Hansen Cc: Andi Kleen Cc: Ingo Molnar Cc: Thomas Gleixner Cc: "H. Peter Anvin" Cc: Christoph Lameter Cc: Pekka Enberg Cc: David Rientjes Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/dcache.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'fs') diff --git a/fs/dcache.c b/fs/dcache.c index d04be762b216..7d34f04ec7aa 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -38,6 +38,8 @@ #include #include #include +#include + #include "internal.h" #include "mount.h" @@ -1429,6 +1431,9 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name) } atomic_set(&p->u.count, 1); dname = p->name; + if (IS_ENABLED(CONFIG_DCACHE_WORD_ACCESS)) + kasan_unpoison_shadow(dname, + round_up(name->len + 1, sizeof(unsigned long))); } else { dname = dentry->d_iname; } -- cgit v1.2.3 From 575849ecf5d103ca9bbf0a6b9e89eba335d4e750 Mon Sep 17 00:00:00 2001 From: Filipe Manana Date: Wed, 11 Feb 2015 11:12:39 +0000 Subject: Btrfs: fix scheduler warning when syncing log We try to lock a mutex while the current task state is not TASK_RUNNING, which results in the following warning when CONFIG_DEBUG_LOCK_ALLOC=y: [30736.772501] ------------[ cut here ]------------ [30736.774545] WARNING: CPU: 9 PID: 19972 at kernel/sched/core.c:7300 __might_sleep+0x8b/0xa8() [30736.783453] do not call blocking ops when !TASK_RUNNING; state=2 set at [] prepare_to_wait+0x43/0x89 [30736.786261] Modules linked in: dm_flakey dm_mod crc32c_generic btrfs xor raid6_pq nfsd auth_rpcgss oid_registry nfs_acl nfs lockd grace fscache sunrpc loop parport_pc psmouse parport pcspkr microcode serio_raw evdev processor thermal_sys i2c_piix4 i2c_core button ext4 crc16 jbd2 mbcache sg sr_mod cdrom sd_mod ata_generic virtio_scsi floppy ata_piix libata virtio_pci virtio_ring e1000 virtio scsi_mod [30736.794323] CPU: 9 PID: 19972 Comm: fsstress Not tainted 3.19.0-rc7-btrfs-next-5+ #1 [30736.795821] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.7.5-0-ge51488c-20140602_164612-nilsson.home.kraxel.org 04/01/2014 [30736.798788] 0000000000000009 ffff88042743fbd8 ffffffff814248ed ffff88043d32f2d8 [30736.800504] ffff88042743fc28 ffff88042743fc18 ffffffff81045338 0000000000000001 [30736.802131] ffffffff81064514 ffffffff817c52d1 000000000000026d 0000000000000000 [30736.803676] Call Trace: [30736.804256] [] dump_stack+0x4c/0x65 [30736.805245] [] warn_slowpath_common+0xa1/0xbb [30736.806360] [] ? __might_sleep+0x8b/0xa8 [30736.807391] [] warn_slowpath_fmt+0x46/0x48 [30736.808511] [] ? prepare_to_wait+0x43/0x89 [30736.809620] [] ? prepare_to_wait+0x43/0x89 [30736.810691] [] __might_sleep+0x8b/0xa8 [30736.811703] [] mutex_lock_nested+0x2f/0x3a0 [30736.812889] [] ? trace_hardirqs_on_caller+0x18f/0x1ab [30736.814138] [] ? trace_hardirqs_on+0xd/0xf [30736.819878] [] wait_for_writer.isra.12+0x91/0xaa [btrfs] [30736.821260] [] ? signal_pending_state+0x31/0x31 [30736.822410] [] btrfs_sync_log+0x160/0x947 [btrfs] [30736.823574] [] ? trace_hardirqs_on_caller+0x18f/0x1ab [30736.824847] [] ? trace_hardirqs_on+0xd/0xf [30736.825972] [] btrfs_sync_file+0x2b0/0x319 [btrfs] [30736.827684] [] vfs_fsync_range+0x21/0x23 [30736.828932] [] vfs_fsync+0x1c/0x1e [30736.829917] [] do_fsync+0x34/0x4e [30736.830862] [] SyS_fsync+0x10/0x14 [30736.831819] [] system_call_fastpath+0x12/0x17 [30736.832982] ---[ end trace c0b57df60d32ae5c ]--- Fix this my acquiring the mutex after calling finish_wait(), which sets the task's state to TASK_RUNNING. Signed-off-by: Filipe Manana Reviewed-by: Liu Bo Signed-off-by: Chris Mason --- fs/btrfs/tree-log.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 79f05bc9d6ec..7870bdba26b7 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -2448,8 +2448,8 @@ static void wait_for_writer(struct btrfs_trans_handle *trans, mutex_unlock(&root->log_mutex); if (atomic_read(&root->log_writers)) schedule(); - mutex_lock(&root->log_mutex); finish_wait(&root->log_writer_wait, &wait); + mutex_lock(&root->log_mutex); } } -- cgit v1.2.3 From f55985f4dda5cfb6967c17e96237f3c859076eb3 Mon Sep 17 00:00:00 2001 From: Filipe Manana Date: Mon, 9 Feb 2015 21:14:24 +0000 Subject: Btrfs: scrub, fix sleep in atomic context My previous patch "Btrfs: fix scrub race leading to use-after-free" introduced the possibility to sleep in an atomic context, which happens when the scrub_lock mutex is held at the time scrub_pending_bio_dec() is called - this function can be called under an atomic context. Chris ran into this in a debug kernel which gave the following trace: [ 1928.950319] BUG: sleeping function called from invalid context at kernel/locking/mutex.c:621 [ 1928.967334] in_atomic(): 1, irqs_disabled(): 0, pid: 149670, name: fsstress [ 1928.981324] INFO: lockdep is turned off. [ 1928.989244] CPU: 24 PID: 149670 Comm: fsstress Tainted: G W 3.19.0-rc7-mason+ #41 [ 1929.006418] Hardware name: ZTSYSTEMS Echo Ridge T4 /A9DRPF-10D, BIOS 1.07 05/10/2012 [ 1929.022207] ffffffff81a22cf8 ffff881076e03b78 ffffffff816b8dd9 ffff881076e03b78 [ 1929.037267] ffff880d8e828710 ffff881076e03ba8 ffffffff810856c4 ffff881076e03bc8 [ 1929.052315] 0000000000000000 000000000000026d ffffffff81a22cf8 ffff881076e03bd8 [ 1929.067381] Call Trace: [ 1929.072344] [] dump_stack+0x4f/0x6e [ 1929.083968] [] ___might_sleep+0x174/0x230 [ 1929.095352] [] __might_sleep+0x52/0x90 [ 1929.106223] [] mutex_lock_nested+0x2f/0x3b0 [ 1929.117951] [] ? trace_hardirqs_on+0xd/0x10 [ 1929.129708] [] scrub_pending_bio_dec+0x38/0x70 [btrfs] [ 1929.143370] [] scrub_parity_bio_endio+0x50/0x70 [btrfs] [ 1929.157191] [] bio_endio+0x53/0xa0 [ 1929.167382] [] rbio_orig_end_io+0x7c/0xa0 [btrfs] [ 1929.180161] [] raid_write_parity_end_io+0x5a/0x80 [btrfs] [ 1929.194318] [] bio_endio+0x53/0xa0 [ 1929.204496] [] blk_update_request+0x1eb/0x450 [ 1929.216569] [] ? trigger_load_balance+0x78/0x500 [ 1929.229176] [] scsi_end_request+0x3d/0x1f0 [ 1929.240740] [] scsi_io_completion+0xac/0x5b0 [ 1929.252654] [] scsi_finish_command+0xf0/0x150 [ 1929.264725] [] scsi_softirq_done+0x147/0x170 [ 1929.276635] [] blk_done_softirq+0x86/0xa0 [ 1929.288014] [] __do_softirq+0xde/0x600 [ 1929.298885] [] irq_exit+0xbd/0xd0 (...) Fix this by using a reference count on the scrub context structure instead of locking the scrub_lock mutex. Signed-off-by: Filipe Manana Signed-off-by: Chris Mason --- fs/btrfs/scrub.c | 39 +++++++++++++++++++++++---------------- 1 file changed, 23 insertions(+), 16 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 6d6e155c8c8b..db21f17df996 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -193,6 +193,15 @@ struct scrub_ctx { */ struct btrfs_scrub_progress stat; spinlock_t stat_lock; + + /* + * Use a ref counter to avoid use-after-free issues. Scrub workers + * decrement bios_in_flight and workers_pending and then do a wakeup + * on the list_wait wait queue. We must ensure the main scrub task + * doesn't free the scrub context before or while the workers are + * doing the wakeup() call. + */ + atomic_t refs; }; struct scrub_fixup_nodatasum { @@ -297,26 +306,20 @@ static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len, static void copy_nocow_pages_worker(struct btrfs_work *work); static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info); static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info); +static void scrub_put_ctx(struct scrub_ctx *sctx); static void scrub_pending_bio_inc(struct scrub_ctx *sctx) { + atomic_inc(&sctx->refs); atomic_inc(&sctx->bios_in_flight); } static void scrub_pending_bio_dec(struct scrub_ctx *sctx) { - struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info; - - /* - * Hold the scrub_lock while doing the wakeup to ensure the - * sctx (and its wait queue list_wait) isn't destroyed/freed - * during the wakeup. - */ - mutex_lock(&fs_info->scrub_lock); atomic_dec(&sctx->bios_in_flight); wake_up(&sctx->list_wait); - mutex_unlock(&fs_info->scrub_lock); + scrub_put_ctx(sctx); } static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info) @@ -350,6 +353,7 @@ static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx) { struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info; + atomic_inc(&sctx->refs); /* * increment scrubs_running to prevent cancel requests from * completing as long as a worker is running. we must also @@ -388,15 +392,11 @@ static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx) mutex_lock(&fs_info->scrub_lock); atomic_dec(&fs_info->scrubs_running); atomic_dec(&fs_info->scrubs_paused); + mutex_unlock(&fs_info->scrub_lock); atomic_dec(&sctx->workers_pending); wake_up(&fs_info->scrub_pause_wait); - /* - * Hold the scrub_lock while doing the wakeup to ensure the - * sctx (and its wait queue list_wait) isn't destroyed/freed - * during the wakeup. - */ wake_up(&sctx->list_wait); - mutex_unlock(&fs_info->scrub_lock); + scrub_put_ctx(sctx); } static void scrub_free_csums(struct scrub_ctx *sctx) @@ -442,6 +442,12 @@ static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx) kfree(sctx); } +static void scrub_put_ctx(struct scrub_ctx *sctx) +{ + if (atomic_dec_and_test(&sctx->refs)) + scrub_free_ctx(sctx); +} + static noinline_for_stack struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace) { @@ -466,6 +472,7 @@ struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace) sctx = kzalloc(sizeof(*sctx), GFP_NOFS); if (!sctx) goto nomem; + atomic_set(&sctx->refs, 1); sctx->is_dev_replace = is_dev_replace; sctx->pages_per_rd_bio = pages_per_rd_bio; sctx->curr = -1; @@ -3739,7 +3746,7 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start, scrub_workers_put(fs_info); mutex_unlock(&fs_info->scrub_lock); - scrub_free_ctx(sctx); + scrub_put_ctx(sctx); return ret; } -- cgit v1.2.3 From 13212b54d18d5235fb97fbdcba8ae453fd2a3a51 Mon Sep 17 00:00:00 2001 From: Zhao Lei Date: Thu, 12 Feb 2015 14:18:17 +0800 Subject: btrfs: Fix out-of-space bug Btrfs will report NO_SPACE when we create and remove files for several times, and we can't write to filesystem until mount it again. Steps to reproduce: 1: Create a single-dev btrfs fs with default option 2: Write a file into it to take up most fs space 3: Delete above file 4: Wait about 100s to let chunk removed 5: goto 2 Script is like following: #!/bin/bash # Recommend 1.2G space, too large disk will make test slow DEV="/dev/sda16" MNT="/mnt/tmp" dev_size="$(lsblk -bn -o SIZE "$DEV")" || exit 2 file_size_m=$((dev_size * 75 / 100 / 1024 / 1024)) echo "Loop write ${file_size_m}M file on $((dev_size / 1024 / 1024))M dev" for ((i = 0; i < 10; i++)); do umount "$MNT" 2>/dev/null; done echo "mkfs $DEV" mkfs.btrfs -f "$DEV" >/dev/null || exit 2 echo "mount $DEV $MNT" mount "$DEV" "$MNT" || exit 2 for ((loop_i = 0; loop_i < 20; loop_i++)); do echo echo "loop $loop_i" echo "dd file..." cmd=(dd if=/dev/zero of="$MNT"/file0 bs=1M count="$file_size_m") "${cmd[@]}" 2>/dev/null || { # NO_SPACE error triggered echo "dd failed: ${cmd[*]}" exit 1 } echo "rm file..." rm -f "$MNT"/file0 || exit 2 for ((i = 0; i < 10; i++)); do df "$MNT" | tail -1 sleep 10 done done Reason: It is triggered by commit: 47ab2a6c689913db23ccae38349714edf8365e0a which is used to remove empty block groups automatically, but the reason is not in that patch. Code before works well because btrfs don't need to create and delete chunks so many times with high complexity. Above bug is caused by many reason, any of them can trigger it. Reason1: When we remove some continuous chunks but leave other chunks after, these disk space should be used by chunk-recreating, but in current code, only first create will successed. Fixed by Forrest Liu in: Btrfs: fix find_free_dev_extent() malfunction in case device tree has hole Reason2: contains_pending_extent() return wrong value in calculation. Fixed by Forrest Liu in: Btrfs: fix find_free_dev_extent() malfunction in case device tree has hole Reason3: btrfs_check_data_free_space() try to commit transaction and retry allocating chunk when the first allocating failed, but space_info->full is set in first allocating, and prevent second allocating in retry. Fixed in this patch by clear space_info->full in commit transaction. Tested for severial times by above script. Changelog v3->v4: use light weight int instead of atomic_t to record have_remove_bgs in transaction, suggested by: Josef Bacik Changelog v2->v3: v2 fixed the bug by adding more commit-transaction, but we only need to reclaim space when we are really have no space for new chunk, noticed by: Filipe David Manana Actually, our code already have this type of commit-and-retry, we only need to make it working with removed-bgs. v3 fixed the bug with above way. Changelog v1->v2: v1 will introduce a new bug when delete and create chunk in same disk space in same transaction, noticed by: Filipe David Manana V2 fix this bug by commit transaction after remove block grops. Reported-by: Tsutomu Itoh Suggested-by: Filipe David Manana Suggested-by: Josef Bacik Signed-off-by: Zhao Lei Signed-off-by: Chris Mason --- fs/btrfs/transaction.c | 4 ++++ fs/btrfs/transaction.h | 5 +++++ fs/btrfs/volumes.c | 2 ++ 3 files changed, 11 insertions(+) (limited to 'fs') diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index e0faf803513a..038fcf6051e0 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -220,6 +220,7 @@ loop: * commit the transaction. */ atomic_set(&cur_trans->use_count, 2); + cur_trans->have_free_bgs = 0; cur_trans->start_time = get_seconds(); cur_trans->delayed_refs.href_root = RB_ROOT; @@ -2037,6 +2038,9 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, btrfs_finish_extent_commit(trans, root); + if (cur_trans->have_free_bgs) + btrfs_clear_space_info_full(root->fs_info); + root->fs_info->last_trans_committed = cur_trans->transid; /* * We needn't acquire the lock here because there is no other task diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h index 3305451451ca..937050a2b68e 100644 --- a/fs/btrfs/transaction.h +++ b/fs/btrfs/transaction.h @@ -47,6 +47,11 @@ struct btrfs_transaction { atomic_t num_writers; atomic_t use_count; + /* + * true if there is free bgs operations in this transaction + */ + int have_free_bgs; + /* Be protected by fs_info->trans_lock when we want to change it. */ enum btrfs_trans_state state; struct list_head list; diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 2c4cab2dbd1a..cd4d1315aaa9 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -1310,6 +1310,8 @@ again: if (ret) { btrfs_error(root->fs_info, ret, "Failed to remove dev extent item"); + } else { + trans->transaction->have_free_bgs = 1; } out: btrfs_free_path(path); -- cgit v1.2.3 From 3e05bde8c3c2dd761da4d52944a087907955a53c Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Wed, 11 Feb 2015 15:08:57 -0500 Subject: Btrfs: only adjust outstanding_extents when we do a short write We have this weird dance where we always inc outstanding_extents when we do a O_DIRECT write, even if we allocate the entire range. To get around this we also drop the metadata space if we successfully write. This is an unnecessary dance, we only need to jack up outstanding_extents if we don't satisfy the entire range request in get_blocks_direct, otherwise we are good using our original reservation. So drop the unconditional inc and the drop of the metadata space that we have for the unconditional inc. Thanks, Signed-off-by: Josef Bacik Reviewed-by: Liu Bo Signed-off-by: Chris Mason --- fs/btrfs/inode.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index b0292333fd84..5a5b902e647f 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -7155,6 +7155,7 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, u64 start = iblock << inode->i_blkbits; u64 lockstart, lockend; u64 len = bh_result->b_size; + u64 orig_len = len; int unlock_bits = EXTENT_LOCKED; int ret = 0; @@ -7290,9 +7291,11 @@ unlock: if (start + len > i_size_read(inode)) i_size_write(inode, start + len); - spin_lock(&BTRFS_I(inode)->lock); - BTRFS_I(inode)->outstanding_extents++; - spin_unlock(&BTRFS_I(inode)->lock); + if (len < orig_len) { + spin_lock(&BTRFS_I(inode)->lock); + BTRFS_I(inode)->outstanding_extents++; + spin_unlock(&BTRFS_I(inode)->lock); + } ret = set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockstart + len - 1, EXTENT_DELALLOC, NULL, @@ -8073,8 +8076,6 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb, else if (ret >= 0 && (size_t)ret < count) btrfs_delalloc_release_space(inode, count - (size_t)ret); - else - btrfs_delalloc_release_metadata(inode, 0); } out: if (wakeup) -- cgit v1.2.3 From 3266789f9d08b27275bae5ab1dcd27d1bbf15e79 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Wed, 11 Feb 2015 15:08:58 -0500 Subject: Btrfs: don't set and clear delalloc for O_DIRECT writes We do this to get the space accounting, but this is just needless churn on the io_tree, so just drop setting/clearing delalloc and just drop the reserved data space when we have a successfull allocation. Thanks, Signed-off-by: Josef Bacik Reviewed-by: Liu Bo Signed-off-by: Chris Mason --- fs/btrfs/inode.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 5a5b902e647f..3b957921ba59 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -7160,7 +7160,7 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, int ret = 0; if (create) - unlock_bits |= EXTENT_DELALLOC | EXTENT_DIRTY; + unlock_bits |= EXTENT_DIRTY; else len = min_t(u64, len, root->sectorsize); @@ -7296,11 +7296,7 @@ unlock: BTRFS_I(inode)->outstanding_extents++; spin_unlock(&BTRFS_I(inode)->lock); } - - ret = set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, - lockstart + len - 1, EXTENT_DELALLOC, NULL, - &cached_state, GFP_NOFS); - BUG_ON(ret); + btrfs_free_reserved_data_space(inode, len); } /* -- cgit v1.2.3 From dcab6a3b2ae657a2017637083c28ee303b6b1b8e Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Wed, 11 Feb 2015 15:08:59 -0500 Subject: Btrfs: account for large extents with enospc On our gluster boxes we stream large tar balls of backups onto our fses. With 160gb of ram this means we get really large contiguous ranges of dirty data, but the way our ENOSPC stuff works is that as long as it's contiguous we only hold metadata reservation for one extent. The problem is we limit our extents to 128mb, so we'll end up with at least 800 extents so our enospc accounting is quite a bit lower than what we need. To keep track of this make sure we increase outstanding_extents for every multiple of the max extent size so we can be sure to have enough reserved metadata space. Thanks, Signed-off-by: Josef Bacik Signed-off-by: Chris Mason --- fs/btrfs/ctree.h | 2 ++ fs/btrfs/extent-tree.c | 16 +++++++++---- fs/btrfs/extent_io.c | 2 +- fs/btrfs/inode.c | 63 +++++++++++++++++++++++++++++++++++++++++++++++++- 4 files changed, 76 insertions(+), 7 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index d3562dd43c66..b3dd55f52f71 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -198,6 +198,8 @@ static int btrfs_csum_sizes[] = { 4, 0 }; #define BTRFS_DIRTY_METADATA_THRESH (32 * 1024 * 1024) +#define BTRFS_MAX_EXTENT_SIZE (128 * 1024 * 1024) + /* * The key defines the order in the tree, and so it also defines (optimal) * block layout. diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 50de1fa6fc9e..0f6737063142 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -4963,19 +4963,25 @@ void btrfs_subvolume_release_metadata(struct btrfs_root *root, /** * drop_outstanding_extent - drop an outstanding extent * @inode: the inode we're dropping the extent for + * @num_bytes: the number of bytes we're relaseing. * * This is called when we are freeing up an outstanding extent, either called * after an error or after an extent is written. This will return the number of * reserved extents that need to be freed. This must be called with * BTRFS_I(inode)->lock held. */ -static unsigned drop_outstanding_extent(struct inode *inode) +static unsigned drop_outstanding_extent(struct inode *inode, u64 num_bytes) { unsigned drop_inode_space = 0; unsigned dropped_extents = 0; + unsigned num_extents = 0; - BUG_ON(!BTRFS_I(inode)->outstanding_extents); - BTRFS_I(inode)->outstanding_extents--; + num_extents = (unsigned)div64_u64(num_bytes + + BTRFS_MAX_EXTENT_SIZE - 1, + BTRFS_MAX_EXTENT_SIZE); + ASSERT(num_extents); + ASSERT(BTRFS_I(inode)->outstanding_extents >= num_extents); + BTRFS_I(inode)->outstanding_extents -= num_extents; if (BTRFS_I(inode)->outstanding_extents == 0 && test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED, @@ -5146,7 +5152,7 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes) out_fail: spin_lock(&BTRFS_I(inode)->lock); - dropped = drop_outstanding_extent(inode); + dropped = drop_outstanding_extent(inode, num_bytes); /* * If the inodes csum_bytes is the same as the original * csum_bytes then we know we haven't raced with any free()ers @@ -5225,7 +5231,7 @@ void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes) num_bytes = ALIGN(num_bytes, root->sectorsize); spin_lock(&BTRFS_I(inode)->lock); - dropped = drop_outstanding_extent(inode); + dropped = drop_outstanding_extent(inode, num_bytes); if (num_bytes) to_free = calc_csum_metadata_size(inode, num_bytes, 0); diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index a7f66009519a..29850d4a3827 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -3242,7 +3242,7 @@ static noinline_for_stack int writepage_delalloc(struct inode *inode, page, &delalloc_start, &delalloc_end, - 128 * 1024 * 1024); + BTRFS_MAX_EXTENT_SIZE); if (nr_delalloc == 0) { delalloc_start = delalloc_end + 1; continue; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 3b957921ba59..8564d8ce03de 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -1530,10 +1530,45 @@ static int run_delalloc_range(struct inode *inode, struct page *locked_page, static void btrfs_split_extent_hook(struct inode *inode, struct extent_state *orig, u64 split) { + u64 size; + /* not delalloc, ignore it */ if (!(orig->state & EXTENT_DELALLOC)) return; + size = orig->end - orig->start + 1; + if (size > BTRFS_MAX_EXTENT_SIZE) { + u64 num_extents; + u64 new_size; + + /* + * We need the largest size of the remaining extent to see if we + * need to add a new outstanding extent. Think of the following + * case + * + * [MEAX_EXTENT_SIZEx2 - 4k][4k] + * + * The new_size would just be 4k and we'd think we had enough + * outstanding extents for this if we only took one side of the + * split, same goes for the other direction. We need to see if + * the larger size still is the same amount of extents as the + * original size, because if it is we need to add a new + * outstanding extent. But if we split up and the larger size + * is less than the original then we are good to go since we've + * already accounted for the extra extent in our original + * accounting. + */ + new_size = orig->end - split + 1; + if ((split - orig->start) > new_size) + new_size = split - orig->start; + + num_extents = div64_u64(size + BTRFS_MAX_EXTENT_SIZE - 1, + BTRFS_MAX_EXTENT_SIZE); + if (div64_u64(new_size + BTRFS_MAX_EXTENT_SIZE - 1, + BTRFS_MAX_EXTENT_SIZE) < num_extents) + return; + } + spin_lock(&BTRFS_I(inode)->lock); BTRFS_I(inode)->outstanding_extents++; spin_unlock(&BTRFS_I(inode)->lock); @@ -1549,10 +1584,34 @@ static void btrfs_merge_extent_hook(struct inode *inode, struct extent_state *new, struct extent_state *other) { + u64 new_size, old_size; + u64 num_extents; + /* not delalloc, ignore it */ if (!(other->state & EXTENT_DELALLOC)) return; + old_size = other->end - other->start + 1; + new_size = old_size + (new->end - new->start + 1); + + /* we're not bigger than the max, unreserve the space and go */ + if (new_size <= BTRFS_MAX_EXTENT_SIZE) { + spin_lock(&BTRFS_I(inode)->lock); + BTRFS_I(inode)->outstanding_extents--; + spin_unlock(&BTRFS_I(inode)->lock); + return; + } + + /* + * If we grew by another max_extent, just return, we want to keep that + * reserved amount. + */ + num_extents = div64_u64(old_size + BTRFS_MAX_EXTENT_SIZE - 1, + BTRFS_MAX_EXTENT_SIZE); + if (div64_u64(new_size + BTRFS_MAX_EXTENT_SIZE - 1, + BTRFS_MAX_EXTENT_SIZE) > num_extents) + return; + spin_lock(&BTRFS_I(inode)->lock); BTRFS_I(inode)->outstanding_extents--; spin_unlock(&BTRFS_I(inode)->lock); @@ -1648,6 +1707,8 @@ static void btrfs_clear_bit_hook(struct inode *inode, unsigned *bits) { u64 len = state->end + 1 - state->start; + u64 num_extents = div64_u64(len + BTRFS_MAX_EXTENT_SIZE -1, + BTRFS_MAX_EXTENT_SIZE); spin_lock(&BTRFS_I(inode)->lock); if ((state->state & EXTENT_DEFRAG) && (*bits & EXTENT_DEFRAG)) @@ -1667,7 +1728,7 @@ static void btrfs_clear_bit_hook(struct inode *inode, *bits &= ~EXTENT_FIRST_DELALLOC; } else if (!(*bits & EXTENT_DO_ACCOUNTING)) { spin_lock(&BTRFS_I(inode)->lock); - BTRFS_I(inode)->outstanding_extents--; + BTRFS_I(inode)->outstanding_extents -= num_extents; spin_unlock(&BTRFS_I(inode)->lock); } -- cgit v1.2.3 From 3d84be799194147e04c0e3129ed44a948773b80a Mon Sep 17 00:00:00 2001 From: Forrest Liu Date: Wed, 11 Feb 2015 14:24:12 +0800 Subject: Btrfs: fix BUG_ON in btrfs_orphan_add() when delete unused block group Removing large amount of block group in a transaction may encounters BUG_ON() in btrfs_orphan_add(). That is because btrfs_orphan_reserve_metadata() will grab metadata reservation from transaction handle, and btrfs_delete_unused_bgs() didn't reserve metadata for trnasaction handle when delete unused block group. The problem can be reproduce by following script mntpath=/btrfs loopdev=/dev/loop0 filepath=/home/forrest/image umount $mntpath losetup -d $loopdev truncate --size 1000g $filepath losetup $loopdev $filepath mkfs.btrfs -f $loopdev mount $loopdev $mntpath for j in `seq 1 1 1000`; do fallocate -l 1g $mntpath/$j done # wait cleaner thread remove unused block group sleep 300 The call trace that results from the BUG_ON() is: [ 613.093084] ------------[ cut here ]------------ [ 613.097928] kernel BUG at fs/btrfs/inode.c:3142! [ 613.105855] invalid opcode: 0000 [#1] SMP [ 613.112702] Modules linked in: coretemp(E) crc32_pclmul(E) ghash_clmulni_intel(E) aesni_intel(E) snd_ens1371(E) snd_ac97_codec(E) aes_x86_64(E) lrw(E) gf128mul(E) glue_helper(E) ppdev(E) ac97_bus(E) ablk_helper(E) gameport(E) cryptd(E) snd_rawmidi(E) snd_seq_device(E) snd_pcm(E) vmw_balloon(E) snd_timer(E) snd(E) soundcore(E) serio_raw(E) vmwgfx(E) ttm(E) drm_kms_helper(E) drm(E) vmw_vmci(E) parport_pc(E) shpchp(E) i2c_piix4(E) mac_hid(E) lp(E) parport(E) btrfs(E) xor(E) raid6_pq(E) hid_generic(E) usbhid(E) hid(E) psmouse(E) ahci(E) libahci(E) e1000(E) mptspi(E) mptscsih(E) mptbase(E) floppy(E) vmw_pvscsi(E) vmxnet3(E) [ 613.144196] CPU: 0 PID: 1480 Comm: btrfs-cleaner Tainted: G E 3.19.0-rc7-custom #2 [ 613.148501] Hardware name: VMware, Inc. VMware Virtual Platform/440BX Desktop Reference Platform, BIOS 6.00 07/31/2013 [ 613.152694] task: ffff880035cdb1a0 ti: ffff880039cf4000 task.ti: ffff880039cf4000 [ 613.154969] RIP: 0010:[] [] btrfs_orphan_add+0x1d2/0x1e0 [btrfs] [ 613.157780] RSP: 0018:ffff880039cf7c48 EFLAGS: 00010286 [ 613.159560] RAX: 00000000ffffffe4 RBX: ffff88003bd981a0 RCX: ffff88003c9e4000 [ 613.161904] RDX: 0000000000002244 RSI: 0000000000040000 RDI: ffff88003c9e4138 [ 613.164264] RBP: ffff880039cf7c88 R08: 000060ffc0000850 R09: 0000000000000000 [ 613.166507] R10: ffff88003bc4b7a0 R11: ffffea0000eb6740 R12: ffff88003c9c0000 [ 613.168681] R13: ffff88003c102160 R14: ffff88003c9c0458 R15: 0000000000000001 [ 613.170932] FS: 0000000000000000(0000) GS:ffff88003f600000(0000) knlGS:0000000000000000 [ 613.173316] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 613.175227] CR2: 00007f6343537000 CR3: 0000000036329000 CR4: 00000000000407f0 [ 613.177554] Stack: [ 613.178712] ffff880039cf7c88 ffffffffa0182a54 ffff88003c9e4b04 ffff88003c9c7800 [ 613.181297] ffff88003bc4b7a0 ffff88003bd981a0 ffff88003c8db200 ffff88003c2fcc60 [ 613.183782] ffff880039cf7d18 ffffffffa012da97 ffff88003bc4b7a4 ffff88003bc4b7a0 [ 613.186171] Call Trace: [ 613.187493] [] ? lookup_free_space_inode+0x44/0x100 [btrfs] [ 613.189801] [] btrfs_remove_block_group+0x137/0x740 [btrfs] [ 613.192126] [] btrfs_remove_chunk+0x672/0x780 [btrfs] [ 613.194267] [] btrfs_delete_unused_bgs+0x25f/0x280 [btrfs] [ 613.196567] [] cleaner_kthread+0x12c/0x190 [btrfs] [ 613.198687] [] ? check_leaf+0x350/0x350 [btrfs] [ 613.200758] [] kthread+0xd2/0xf0 [ 613.202616] [] ? kthread_create_on_node+0x180/0x180 [ 613.204738] [] ret_from_fork+0x7c/0xb0 [ 613.206652] [] ? kthread_create_on_node+0x180/0x180 [ 613.208741] Code: ff ff 0f 1f 80 00 00 00 00 89 45 c8 3e 80 63 80 fd 48 89 df e8 d0 23 fe ff 8b 45 c8 e9 14 ff ff ff b8 f4 ff ff ff e9 12 ff ff ff <0f> 0b 66 66 66 2e 0f 1f 84 00 00 00 00 00 66 66 66 66 90 55 48 [ 613.216562] RIP [] btrfs_orphan_add+0x1d2/0x1e0 [btrfs] [ 613.218828] RSP [ 613.220382] ---[ end trace 71073106deb8a457 ]--- This patch replace btrfs_join_transaction() with btrfs_start_transaction() in btrfs_delete_unused_bgs() to revent BUG_ON() in btrfs_orphan_add() Signed-off-by: Forrest Liu Signed-off-by: Chris Mason --- fs/btrfs/extent-tree.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 0f6737063142..28ce5c8004d4 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -9555,7 +9555,8 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info) * Want to do this before we do anything else so we can recover * properly if we fail to join the transaction. */ - trans = btrfs_join_transaction(root); + /* 1 for btrfs_orphan_reserve_metadata() */ + trans = btrfs_start_transaction(root, 1); if (IS_ERR(trans)) { btrfs_set_block_group_rw(root, block_group); ret = PTR_ERR(trans); -- cgit v1.2.3 From 1a4bcf470c886b955adf36486f4c86f2441d85cb Mon Sep 17 00:00:00 2001 From: Filipe Manana Date: Fri, 13 Feb 2015 12:30:56 +0000 Subject: Btrfs: fix fsync data loss after adding hard link to inode We have a scenario where after the fsync log replay we can lose file data that had been previously fsync'ed if we added an hard link for our inode and after that we sync'ed the fsync log (for example by fsync'ing some other file or directory). This is because when adding an hard link we updated the inode item in the log tree with an i_size value of 0. At that point the new inode item was in memory only and a subsequent fsync log replay would not make us lose the file data. However if after adding the hard link we sync the log tree to disk, by fsync'ing some other file or directory for example, we ended up losing the file data after log replay, because the inode item in the persisted log tree had an an i_size of zero. This is easy to reproduce, and the following excerpt from my test for xfstests shows this: _scratch_mkfs >> $seqres.full 2>&1 _init_flakey _mount_flakey # Create one file with data and fsync it. # This made the btrfs fsync log persist the data and the inode metadata with # a correct inode->i_size (4096 bytes). $XFS_IO_PROG -f -c "pwrite -S 0xaa -b 4K 0 4K" -c "fsync" \ $SCRATCH_MNT/foo | _filter_xfs_io # Now add one hard link to our file. This made the btrfs code update the fsync # log, in memory only, with an inode metadata having a size of 0. ln $SCRATCH_MNT/foo $SCRATCH_MNT/foo_link # Now force persistence of the fsync log to disk, for example, by fsyncing some # other file. touch $SCRATCH_MNT/bar $XFS_IO_PROG -c "fsync" $SCRATCH_MNT/bar # Before a power loss or crash, we could read the 4Kb of data from our file as # expected. echo "File content before:" od -t x1 $SCRATCH_MNT/foo # Simulate a crash/power loss. _load_flakey_table $FLAKEY_DROP_WRITES _unmount_flakey _load_flakey_table $FLAKEY_ALLOW_WRITES _mount_flakey # After the fsync log replay, because the fsync log had a value of 0 for our # inode's i_size, we couldn't read anymore the 4Kb of data that we previously # wrote and fsync'ed. The size of the file became 0 after the fsync log replay. echo "File content after:" od -t x1 $SCRATCH_MNT/foo Another alternative test, that doesn't need to fsync an inode in the same transaction it was created, is: _scratch_mkfs >> $seqres.full 2>&1 _init_flakey _mount_flakey # Create our test file with some data. $XFS_IO_PROG -f -c "pwrite -S 0xaa -b 8K 0 8K" \ $SCRATCH_MNT/foo | _filter_xfs_io # Make sure the file is durably persisted. sync # Append some data to our file, to increase its size. $XFS_IO_PROG -f -c "pwrite -S 0xcc -b 4K 8K 4K" \ $SCRATCH_MNT/foo | _filter_xfs_io # Fsync the file, so from this point on if a crash/power failure happens, our # new data is guaranteed to be there next time the fs is mounted. $XFS_IO_PROG -c "fsync" $SCRATCH_MNT/foo # Add one hard link to our file. This made btrfs write into the in memory fsync # log a special inode with generation 0 and an i_size of 0 too. Note that this # didn't update the inode in the fsync log on disk. ln $SCRATCH_MNT/foo $SCRATCH_MNT/foo_link # Now make sure the in memory fsync log is durably persisted. # Creating and fsync'ing another file will do it. touch $SCRATCH_MNT/bar $XFS_IO_PROG -c "fsync" $SCRATCH_MNT/bar # As expected, before the crash/power failure, we should be able to read the # 12Kb of file data. echo "File content before:" od -t x1 $SCRATCH_MNT/foo # Simulate a crash/power loss. _load_flakey_table $FLAKEY_DROP_WRITES _unmount_flakey _load_flakey_table $FLAKEY_ALLOW_WRITES _mount_flakey # After mounting the fs again, the fsync log was replayed. # The btrfs fsync log replay code didn't update the i_size of the persisted # inode because the inode item in the log had a special generation with a # value of 0 (and it couldn't know the correct i_size, since that inode item # had a 0 i_size too). This made the last 4Kb of file data inaccessible and # effectively lost. echo "File content after:" od -t x1 $SCRATCH_MNT/foo This isn't a new issue/regression. This problem has been around since the log tree code was added in 2008: Btrfs: Add a write ahead tree log to optimize synchronous operations (commit e02119d5a7b4396c5a872582fddc8bd6d305a70a) Test cases for xfstests follow soon. CC: Signed-off-by: Filipe Manana Signed-off-by: Chris Mason --- fs/btrfs/tree-log.c | 82 +++++++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 73 insertions(+), 9 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 7870bdba26b7..5f649bb32bec 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -490,8 +490,20 @@ insert: src_item = (struct btrfs_inode_item *)src_ptr; dst_item = (struct btrfs_inode_item *)dst_ptr; - if (btrfs_inode_generation(eb, src_item) == 0) + if (btrfs_inode_generation(eb, src_item) == 0) { + struct extent_buffer *dst_eb = path->nodes[0]; + + if (S_ISREG(btrfs_inode_mode(eb, src_item)) && + S_ISREG(btrfs_inode_mode(dst_eb, dst_item))) { + struct btrfs_map_token token; + u64 ino_size = btrfs_inode_size(eb, src_item); + + btrfs_init_map_token(&token); + btrfs_set_token_inode_size(dst_eb, dst_item, + ino_size, &token); + } goto no_copy; + } if (overwrite_root && S_ISDIR(btrfs_inode_mode(eb, src_item)) && @@ -3250,7 +3262,8 @@ static int drop_objectid_items(struct btrfs_trans_handle *trans, static void fill_inode_item(struct btrfs_trans_handle *trans, struct extent_buffer *leaf, struct btrfs_inode_item *item, - struct inode *inode, int log_inode_only) + struct inode *inode, int log_inode_only, + u64 logged_isize) { struct btrfs_map_token token; @@ -3263,7 +3276,7 @@ static void fill_inode_item(struct btrfs_trans_handle *trans, * to say 'update this inode with these values' */ btrfs_set_token_inode_generation(leaf, item, 0, &token); - btrfs_set_token_inode_size(leaf, item, 0, &token); + btrfs_set_token_inode_size(leaf, item, logged_isize, &token); } else { btrfs_set_token_inode_generation(leaf, item, BTRFS_I(inode)->generation, @@ -3315,7 +3328,7 @@ static int log_inode_item(struct btrfs_trans_handle *trans, return ret; inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0], struct btrfs_inode_item); - fill_inode_item(trans, path->nodes[0], inode_item, inode, 0); + fill_inode_item(trans, path->nodes[0], inode_item, inode, 0, 0); btrfs_release_path(path); return 0; } @@ -3324,7 +3337,8 @@ static noinline int copy_items(struct btrfs_trans_handle *trans, struct inode *inode, struct btrfs_path *dst_path, struct btrfs_path *src_path, u64 *last_extent, - int start_slot, int nr, int inode_only) + int start_slot, int nr, int inode_only, + u64 logged_isize) { unsigned long src_offset; unsigned long dst_offset; @@ -3381,7 +3395,8 @@ static noinline int copy_items(struct btrfs_trans_handle *trans, dst_path->slots[0], struct btrfs_inode_item); fill_inode_item(trans, dst_path->nodes[0], inode_item, - inode, inode_only == LOG_INODE_EXISTS); + inode, inode_only == LOG_INODE_EXISTS, + logged_isize); } else { copy_extent_buffer(dst_path->nodes[0], src, dst_offset, src_offset, ins_sizes[i]); @@ -3933,6 +3948,33 @@ process: return ret; } +static int logged_inode_size(struct btrfs_root *log, struct inode *inode, + struct btrfs_path *path, u64 *size_ret) +{ + struct btrfs_key key; + int ret; + + key.objectid = btrfs_ino(inode); + key.type = BTRFS_INODE_ITEM_KEY; + key.offset = 0; + + ret = btrfs_search_slot(NULL, log, &key, path, 0, 0); + if (ret < 0) { + return ret; + } else if (ret > 0) { + *size_ret = i_size_read(inode); + } else { + struct btrfs_inode_item *item; + + item = btrfs_item_ptr(path->nodes[0], path->slots[0], + struct btrfs_inode_item); + *size_ret = btrfs_inode_size(path->nodes[0], item); + } + + btrfs_release_path(path); + return 0; +} + /* log a single inode in the tree log. * At least one parent directory for this inode must exist in the tree * or be logged already. @@ -3970,6 +4012,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans, bool fast_search = false; u64 ino = btrfs_ino(inode); struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; + u64 logged_isize = 0; path = btrfs_alloc_path(); if (!path) @@ -4030,6 +4073,25 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans, max_key_type = BTRFS_XATTR_ITEM_KEY; ret = drop_objectid_items(trans, log, path, ino, max_key_type); } else { + if (inode_only == LOG_INODE_EXISTS) { + /* + * Make sure the new inode item we write to the log has + * the same isize as the current one (if it exists). + * This is necessary to prevent data loss after log + * replay, and also to prevent doing a wrong expanding + * truncate - for e.g. create file, write 4K into offset + * 0, fsync, write 4K into offset 4096, add hard link, + * fsync some other file (to sync log), power fail - if + * we use the inode's current i_size, after log replay + * we get a 8Kb file, with the last 4Kb extent as a hole + * (zeroes), as if an expanding truncate happened, + * instead of getting a file of 4Kb only. + */ + err = logged_inode_size(log, inode, path, + &logged_isize); + if (err) + goto out_unlock; + } if (test_and_clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags)) { clear_bit(BTRFS_INODE_COPY_EVERYTHING, @@ -4085,7 +4147,8 @@ again: } ret = copy_items(trans, inode, dst_path, path, &last_extent, - ins_start_slot, ins_nr, inode_only); + ins_start_slot, ins_nr, inode_only, + logged_isize); if (ret < 0) { err = ret; goto out_unlock; @@ -4109,7 +4172,7 @@ next_slot: if (ins_nr) { ret = copy_items(trans, inode, dst_path, path, &last_extent, ins_start_slot, - ins_nr, inode_only); + ins_nr, inode_only, logged_isize); if (ret < 0) { err = ret; goto out_unlock; @@ -4130,7 +4193,8 @@ next_slot: } if (ins_nr) { ret = copy_items(trans, inode, dst_path, path, &last_extent, - ins_start_slot, ins_nr, inode_only); + ins_start_slot, ins_nr, inode_only, + logged_isize); if (ret < 0) { err = ret; goto out_unlock; -- cgit v1.2.3 From a742994aa2e271eb8cd8e043d276515ec858ed73 Mon Sep 17 00:00:00 2001 From: Filipe Manana Date: Fri, 13 Feb 2015 16:56:14 +0000 Subject: Btrfs: don't remove extents and xattrs when logging new names If we are recording in the tree log that an inode has new names (new hard links were added), we would drop items, belonging to the inode, that we shouldn't: 1) When the flag BTRFS_INODE_COPY_EVERYTHING is set in the inode's runtime flags, we ended up dropping all the extent and xattr items that were previously logged. This was done only in memory, since logging a new name doesn't imply syncing the log; 2) When the flag BTRFS_INODE_COPY_EVERYTHING is set in the inode's runtime flags, we ended up dropping all the xattr items that were previously logged. Like the case before, this was done only in memory because logging a new name doesn't imply syncing the log. This led to some surprises in scenarios such as the following: 1) write some extents to an inode; 2) fsync the inode; 3) truncate the inode or delete/modify some of its xattrs 4) add a new hard link for that inode 5) fsync some other file, to force the log tree to be durably persisted 6) power failure happens The next time the fs is mounted, the fsync log replay code is executed, and the resulting file doesn't have the content it had when the last fsync against it was performed, instead if has a content matching what it had when the last transaction commit happened. So change the behaviour such that when a new name is logged, only the inode item and reference items are processed. This is easy to reproduce with the test I just made for xfstests, whose main body is: _scratch_mkfs >> $seqres.full 2>&1 _init_flakey _mount_flakey # Create our test file with some data. $XFS_IO_PROG -f -c "pwrite -S 0xaa -b 8K 0 8K" \ $SCRATCH_MNT/foo | _filter_xfs_io # Make sure the file is durably persisted. sync # Append some data to our file, to increase its size. $XFS_IO_PROG -f -c "pwrite -S 0xcc -b 4K 8K 4K" \ $SCRATCH_MNT/foo | _filter_xfs_io # Fsync the file, so from this point on if a crash/power failure happens, our # new data is guaranteed to be there next time the fs is mounted. $XFS_IO_PROG -c "fsync" $SCRATCH_MNT/foo # Now shrink our file to 5000 bytes. $XFS_IO_PROG -c "truncate 5000" $SCRATCH_MNT/foo # Now do an expanding truncate to a size larger than what we had when we last # fsync'ed our file. This is just to verify that after power failure and # replaying the fsync log, our file matches what it was when we last fsync'ed # it - 12Kb size, first 8Kb of data had a value of 0xaa and the last 4Kb of # data had a value of 0xcc. $XFS_IO_PROG -c "truncate 32K" $SCRATCH_MNT/foo # Add one hard link to our file. This made btrfs drop all of our file's # metadata from the fsync log, including the metadata relative to the # extent we just wrote and fsync'ed. This change was made only to the fsync # log in memory, so adding the hard link alone doesn't change the persisted # fsync log. This happened because the previous truncates set the runtime # flag BTRFS_INODE_NEEDS_FULL_SYNC in the btrfs inode structure. ln $SCRATCH_MNT/foo $SCRATCH_MNT/foo_link # Now make sure the in memory fsync log is durably persisted. # Creating and fsync'ing another file will do it. # After this our persisted fsync log will no longer have metadata for our file # foo that points to the extent we wrote and fsync'ed before. touch $SCRATCH_MNT/bar $XFS_IO_PROG -c "fsync" $SCRATCH_MNT/bar # As expected, before the crash/power failure, we should be able to see a file # with a size of 32Kb, with its first 5000 bytes having the value 0xaa and all # the remaining bytes with value 0x00. echo "File content before:" od -t x1 $SCRATCH_MNT/foo # Simulate a crash/power loss. _load_flakey_table $FLAKEY_DROP_WRITES _unmount_flakey _load_flakey_table $FLAKEY_ALLOW_WRITES _mount_flakey # After mounting the fs again, the fsync log was replayed. # The expected result is to see a file with a size of 12Kb, with its first 8Kb # of data having the value 0xaa and its last 4Kb of data having a value of 0xcc. # The btrfs bug used to leave the file as it used te be as of the last # transaction commit - that is, with a size of 8Kb with all bytes having a # value of 0xaa. echo "File content after:" od -t x1 $SCRATCH_MNT/foo The test case for xfstests follows soon. Signed-off-by: Filipe Manana Signed-off-by: Chris Mason --- fs/btrfs/tree-log.c | 39 +++++++++++++++++++++++++++------------ 1 file changed, 27 insertions(+), 12 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 5f649bb32bec..f96996a1b70c 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -4069,8 +4069,10 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans, if (S_ISDIR(inode->i_mode)) { int max_key_type = BTRFS_DIR_LOG_INDEX_KEY; - if (inode_only == LOG_INODE_EXISTS) - max_key_type = BTRFS_XATTR_ITEM_KEY; + if (inode_only == LOG_INODE_EXISTS) { + max_key_type = BTRFS_INODE_EXTREF_KEY; + max_key.type = max_key_type; + } ret = drop_objectid_items(trans, log, path, ino, max_key_type); } else { if (inode_only == LOG_INODE_EXISTS) { @@ -4092,18 +4094,31 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans, if (err) goto out_unlock; } - if (test_and_clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC, - &BTRFS_I(inode)->runtime_flags)) { - clear_bit(BTRFS_INODE_COPY_EVERYTHING, - &BTRFS_I(inode)->runtime_flags); - ret = btrfs_truncate_inode_items(trans, log, - inode, 0, 0); - } else if (test_and_clear_bit(BTRFS_INODE_COPY_EVERYTHING, - &BTRFS_I(inode)->runtime_flags) || + if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, + &BTRFS_I(inode)->runtime_flags)) { + if (inode_only == LOG_INODE_EXISTS) { + max_key.type = BTRFS_INODE_EXTREF_KEY; + ret = drop_objectid_items(trans, log, path, ino, + max_key.type); + } else { + clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC, + &BTRFS_I(inode)->runtime_flags); + clear_bit(BTRFS_INODE_COPY_EVERYTHING, + &BTRFS_I(inode)->runtime_flags); + ret = btrfs_truncate_inode_items(trans, log, + inode, 0, 0); + } + } else if (test_bit(BTRFS_INODE_COPY_EVERYTHING, + &BTRFS_I(inode)->runtime_flags) || inode_only == LOG_INODE_EXISTS) { - if (inode_only == LOG_INODE_ALL) + if (inode_only == LOG_INODE_ALL) { + clear_bit(BTRFS_INODE_COPY_EVERYTHING, + &BTRFS_I(inode)->runtime_flags); fast_search = true; - max_key.type = BTRFS_XATTR_ITEM_KEY; + max_key.type = BTRFS_XATTR_ITEM_KEY; + } else { + max_key.type = BTRFS_INODE_EXTREF_KEY; + } ret = drop_objectid_items(trans, log, path, ino, max_key.type); } else { -- cgit v1.2.3 From 6f30b7e37a8239f9d27db626a1d3427bc7951908 Mon Sep 17 00:00:00 2001 From: Omar Sandoval Date: Sat, 14 Feb 2015 20:08:51 -0500 Subject: ext4: fix indirect punch hole corruption Commit 4f579ae7de56 (ext4: fix punch hole on files with indirect mapping) rewrote FALLOC_FL_PUNCH_HOLE for ext4 files with indirect mapping. However, there are bugs in several corner cases. This fixes 5 distinct bugs: 1. When there is at least one entire level of indirection between the start and end of the punch range and the end of the punch range is the first block of its level, we can't return early; we have to free the intervening levels. 2. When the end is at a higher level of indirection than the start and ext4_find_shared returns a top branch for the end, we still need to free the rest of the shared branch it returns; we can't decrement partial2. 3. When a punch happens within one level of indirection, we need to converge on an indirect block that contains the start and end. However, because the branches returned from ext4_find_shared do not necessarily start at the same level (e.g., the partial2 chain will be shallower if the last block occurs at the beginning of an indirect group), the walk of the two chains can end up "missing" each other and freeing a bunch of extra blocks in the process. This mismatch can be handled by first making sure that the chains are at the same level, then walking them together until they converge. 4. When the punch happens within one level of indirection and ext4_find_shared returns a top branch for the start, we must free it, but only if the end does not occur within that branch. 5. When the punch happens within one level of indirection and ext4_find_shared returns a top branch for the end, then we shouldn't free the block referenced by the end of the returned chain (this mirrors the different levels case). Signed-off-by: Omar Sandoval --- fs/ext4/indirect.c | 105 ++++++++++++++++++++++++++++++++++++----------------- 1 file changed, 71 insertions(+), 34 deletions(-) (limited to 'fs') diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c index 36b369697a13..5e7af1c69577 100644 --- a/fs/ext4/indirect.c +++ b/fs/ext4/indirect.c @@ -1393,10 +1393,7 @@ end_range: * to free. Everything was covered by the start * of the range. */ - return 0; - } else { - /* Shared branch grows from an indirect block */ - partial2--; + goto do_indirects; } } else { /* @@ -1427,56 +1424,96 @@ end_range: /* Punch happened within the same level (n == n2) */ partial = ext4_find_shared(inode, n, offsets, chain, &nr); partial2 = ext4_find_shared(inode, n2, offsets2, chain2, &nr2); - /* - * ext4_find_shared returns Indirect structure which - * points to the last element which should not be - * removed by truncate. But this is end of the range - * in punch_hole so we need to point to the next element - */ - partial2->p++; - while ((partial > chain) || (partial2 > chain2)) { - /* We're at the same block, so we're almost finished */ - if ((partial->bh && partial2->bh) && - (partial->bh->b_blocknr == partial2->bh->b_blocknr)) { - if ((partial > chain) && (partial2 > chain2)) { + + /* Free top, but only if partial2 isn't its subtree. */ + if (nr) { + int level = min(partial - chain, partial2 - chain2); + int i; + int subtree = 1; + + for (i = 0; i <= level; i++) { + if (offsets[i] != offsets2[i]) { + subtree = 0; + break; + } + } + + if (!subtree) { + if (partial == chain) { + /* Shared branch grows from the inode */ + ext4_free_branches(handle, inode, NULL, + &nr, &nr+1, + (chain+n-1) - partial); + *partial->p = 0; + } else { + /* Shared branch grows from an indirect block */ + BUFFER_TRACE(partial->bh, "get_write_access"); ext4_free_branches(handle, inode, partial->bh, - partial->p + 1, - partial2->p, + partial->p, + partial->p+1, (chain+n-1) - partial); - BUFFER_TRACE(partial->bh, "call brelse"); - brelse(partial->bh); - BUFFER_TRACE(partial2->bh, "call brelse"); - brelse(partial2->bh); } - return 0; } + } + + if (!nr2) { /* - * Clear the ends of indirect blocks on the shared branch - * at the start of the range + * ext4_find_shared returns Indirect structure which + * points to the last element which should not be + * removed by truncate. But this is end of the range + * in punch_hole so we need to point to the next element */ - if (partial > chain) { + partial2->p++; + } + + while (partial > chain || partial2 > chain2) { + int depth = (chain+n-1) - partial; + int depth2 = (chain2+n2-1) - partial2; + + if (partial > chain && partial2 > chain2 && + partial->bh->b_blocknr == partial2->bh->b_blocknr) { + /* + * We've converged on the same block. Clear the range, + * then we're done. + */ ext4_free_branches(handle, inode, partial->bh, - partial->p + 1, - (__le32 *)partial->bh->b_data+addr_per_block, - (chain+n-1) - partial); + partial->p + 1, + partial2->p, + (chain+n-1) - partial); BUFFER_TRACE(partial->bh, "call brelse"); brelse(partial->bh); - partial--; + BUFFER_TRACE(partial2->bh, "call brelse"); + brelse(partial2->bh); + return 0; } + /* - * Clear the ends of indirect blocks on the shared branch - * at the end of the range + * The start and end partial branches may not be at the same + * level even though the punch happened within one level. So, we + * give them a chance to arrive at the same level, then walk + * them in step with each other until we converge on the same + * block. */ - if (partial2 > chain2) { + if (partial > chain && depth <= depth2) { + ext4_free_branches(handle, inode, partial->bh, + partial->p + 1, + (__le32 *)partial->bh->b_data+addr_per_block, + (chain+n-1) - partial); + BUFFER_TRACE(partial->bh, "call brelse"); + brelse(partial->bh); + partial--; + } + if (partial2 > chain2 && depth2 <= depth) { ext4_free_branches(handle, inode, partial2->bh, (__le32 *)partial2->bh->b_data, partial2->p, - (chain2+n-1) - partial2); + (chain2+n2-1) - partial2); BUFFER_TRACE(partial2->bh, "call brelse"); brelse(partial2->bh); partial2--; } } + return 0; do_indirects: /* Kill the remaining (whole) subtrees */ -- cgit v1.2.3 From 527851124d10f9c50b1c578e0a56fcd49922422d Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 16 Feb 2015 11:49:23 +1100 Subject: xfs: implement pNFS export operations Add operations to export pNFS block layouts from an XFS filesystem. See the previous commit adding the operations for an explanation of them. Signed-off-by: Christoph Hellwig Reviewed-by: Dave Chinner Signed-off-by: Dave Chinner --- fs/xfs/Makefile | 1 + fs/xfs/xfs_export.c | 6 ++ fs/xfs/xfs_fsops.c | 6 ++ fs/xfs/xfs_iops.c | 2 +- fs/xfs/xfs_iops.h | 1 + fs/xfs/xfs_mount.h | 11 ++ fs/xfs/xfs_pnfs.c | 292 ++++++++++++++++++++++++++++++++++++++++++++++++++++ fs/xfs/xfs_pnfs.h | 11 ++ 8 files changed, 329 insertions(+), 1 deletion(-) create mode 100644 fs/xfs/xfs_pnfs.c create mode 100644 fs/xfs/xfs_pnfs.h (limited to 'fs') diff --git a/fs/xfs/Makefile b/fs/xfs/Makefile index d61799949580..df6828570e87 100644 --- a/fs/xfs/Makefile +++ b/fs/xfs/Makefile @@ -121,3 +121,4 @@ xfs-$(CONFIG_XFS_POSIX_ACL) += xfs_acl.o xfs-$(CONFIG_PROC_FS) += xfs_stats.o xfs-$(CONFIG_SYSCTL) += xfs_sysctl.o xfs-$(CONFIG_COMPAT) += xfs_ioctl32.o +xfs-$(CONFIG_NFSD_PNFS) += xfs_pnfs.o diff --git a/fs/xfs/xfs_export.c b/fs/xfs/xfs_export.c index 5eb4a14e0a0f..b97359ba2648 100644 --- a/fs/xfs/xfs_export.c +++ b/fs/xfs/xfs_export.c @@ -30,6 +30,7 @@ #include "xfs_trace.h" #include "xfs_icache.h" #include "xfs_log.h" +#include "xfs_pnfs.h" /* * Note that we only accept fileids which are long enough rather than allow @@ -245,4 +246,9 @@ const struct export_operations xfs_export_operations = { .fh_to_parent = xfs_fs_fh_to_parent, .get_parent = xfs_fs_get_parent, .commit_metadata = xfs_fs_nfs_commit_metadata, +#ifdef CONFIG_NFSD_PNFS + .get_uuid = xfs_fs_get_uuid, + .map_blocks = xfs_fs_map_blocks, + .commit_blocks = xfs_fs_commit_blocks, +#endif }; diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c index fba6532efba4..74efe5b760dc 100644 --- a/fs/xfs/xfs_fsops.c +++ b/fs/xfs/xfs_fsops.c @@ -602,6 +602,12 @@ xfs_growfs_data( if (!mutex_trylock(&mp->m_growlock)) return -EWOULDBLOCK; error = xfs_growfs_data_private(mp, in); + /* + * Increment the generation unconditionally, the error could be from + * updating the secondary superblocks, in which case the new size + * is live already. + */ + mp->m_generation++; mutex_unlock(&mp->m_growlock); return error; } diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c index ce80eeb8faa4..e5e2ea0d0b25 100644 --- a/fs/xfs/xfs_iops.c +++ b/fs/xfs/xfs_iops.c @@ -505,7 +505,7 @@ xfs_setattr_mode( inode->i_mode |= mode & ~S_IFMT; } -static void +void xfs_setattr_time( struct xfs_inode *ip, struct iattr *iattr) diff --git a/fs/xfs/xfs_iops.h b/fs/xfs/xfs_iops.h index 1c34e4335920..ea7a98e9cb70 100644 --- a/fs/xfs/xfs_iops.h +++ b/fs/xfs/xfs_iops.h @@ -32,6 +32,7 @@ extern void xfs_setup_inode(struct xfs_inode *); */ #define XFS_ATTR_NOACL 0x01 /* Don't call posix_acl_chmod */ +extern void xfs_setattr_time(struct xfs_inode *ip, struct iattr *iattr); extern int xfs_setattr_nonsize(struct xfs_inode *ip, struct iattr *vap, int flags); extern int xfs_setattr_size(struct xfs_inode *ip, struct iattr *vap); diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h index a5b2ff822653..0d8abd6364d9 100644 --- a/fs/xfs/xfs_mount.h +++ b/fs/xfs/xfs_mount.h @@ -174,6 +174,17 @@ typedef struct xfs_mount { struct workqueue_struct *m_reclaim_workqueue; struct workqueue_struct *m_log_workqueue; struct workqueue_struct *m_eofblocks_workqueue; + + /* + * Generation of the filesysyem layout. This is incremented by each + * growfs, and used by the pNFS server to ensure the client updates + * its view of the block device once it gets a layout that might + * reference the newly added blocks. Does not need to be persistent + * as long as we only allow file system size increments, but if we + * ever support shrinks it would have to be persisted in addition + * to various other kinds of pain inflicted on the pNFS server. + */ + __uint32_t m_generation; } xfs_mount_t; /* diff --git a/fs/xfs/xfs_pnfs.c b/fs/xfs/xfs_pnfs.c new file mode 100644 index 000000000000..89912b34f184 --- /dev/null +++ b/fs/xfs/xfs_pnfs.c @@ -0,0 +1,292 @@ +/* + * Copyright (c) 2014 Christoph Hellwig. + */ +#include "xfs.h" +#include "xfs_format.h" +#include "xfs_log_format.h" +#include "xfs_trans_resv.h" +#include "xfs_sb.h" +#include "xfs_mount.h" +#include "xfs_inode.h" +#include "xfs_trans.h" +#include "xfs_log.h" +#include "xfs_bmap.h" +#include "xfs_bmap_util.h" +#include "xfs_error.h" +#include "xfs_iomap.h" +#include "xfs_shared.h" +#include "xfs_bit.h" +#include "xfs_pnfs.h" + +/* + * Get a unique ID including its location so that the client can identify + * the exported device. + */ +int +xfs_fs_get_uuid( + struct super_block *sb, + u8 *buf, + u32 *len, + u64 *offset) +{ + struct xfs_mount *mp = XFS_M(sb); + + printk_once(KERN_NOTICE +"XFS (%s): using experimental pNFS feature, use at your own risk!\n", + mp->m_fsname); + + if (*len < sizeof(uuid_t)) + return -EINVAL; + + memcpy(buf, &mp->m_sb.sb_uuid, sizeof(uuid_t)); + *len = sizeof(uuid_t); + *offset = offsetof(struct xfs_dsb, sb_uuid); + return 0; +} + +static void +xfs_bmbt_to_iomap( + struct xfs_inode *ip, + struct iomap *iomap, + struct xfs_bmbt_irec *imap) +{ + struct xfs_mount *mp = ip->i_mount; + + if (imap->br_startblock == HOLESTARTBLOCK) { + iomap->blkno = IOMAP_NULL_BLOCK; + iomap->type = IOMAP_HOLE; + } else if (imap->br_startblock == DELAYSTARTBLOCK) { + iomap->blkno = IOMAP_NULL_BLOCK; + iomap->type = IOMAP_DELALLOC; + } else { + iomap->blkno = + XFS_FSB_TO_DADDR(ip->i_mount, imap->br_startblock); + if (imap->br_state == XFS_EXT_UNWRITTEN) + iomap->type = IOMAP_UNWRITTEN; + else + iomap->type = IOMAP_MAPPED; + } + iomap->offset = XFS_FSB_TO_B(mp, imap->br_startoff); + iomap->length = XFS_FSB_TO_B(mp, imap->br_blockcount); +} + +/* + * Get a layout for the pNFS client. + */ +int +xfs_fs_map_blocks( + struct inode *inode, + loff_t offset, + u64 length, + struct iomap *iomap, + bool write, + u32 *device_generation) +{ + struct xfs_inode *ip = XFS_I(inode); + struct xfs_mount *mp = ip->i_mount; + struct xfs_bmbt_irec imap; + xfs_fileoff_t offset_fsb, end_fsb; + loff_t limit; + int bmapi_flags = XFS_BMAPI_ENTIRE; + int nimaps = 1; + uint lock_flags; + int error = 0; + + if (XFS_FORCED_SHUTDOWN(mp)) + return -EIO; + + /* + * We can't export inodes residing on the realtime device. The realtime + * device doesn't have a UUID to identify it, so the client has no way + * to find it. + */ + if (XFS_IS_REALTIME_INODE(ip)) + return -ENXIO; + + /* + * Lock out any other I/O before we flush and invalidate the pagecache, + * and then hand out a layout to the remote system. This is very + * similar to direct I/O, except that the synchronization is much more + * complicated. See the comment near xfs_break_layouts for a detailed + * explanation. + */ + xfs_ilock(ip, XFS_IOLOCK_EXCL); + + error = -EINVAL; + limit = mp->m_super->s_maxbytes; + if (!write) + limit = max(limit, round_up(i_size_read(inode), + inode->i_sb->s_blocksize)); + if (offset > limit) + goto out_unlock; + if (offset > limit - length) + length = limit - offset; + + error = filemap_write_and_wait(inode->i_mapping); + if (error) + goto out_unlock; + error = invalidate_inode_pages2(inode->i_mapping); + if (WARN_ON_ONCE(error)) + return error; + + end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + length); + offset_fsb = XFS_B_TO_FSBT(mp, offset); + + lock_flags = xfs_ilock_data_map_shared(ip); + error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, + &imap, &nimaps, bmapi_flags); + xfs_iunlock(ip, lock_flags); + + if (error) + goto out_unlock; + + if (write) { + enum xfs_prealloc_flags flags = 0; + + ASSERT(imap.br_startblock != DELAYSTARTBLOCK); + + if (!nimaps || imap.br_startblock == HOLESTARTBLOCK) { + error = xfs_iomap_write_direct(ip, offset, length, + &imap, nimaps); + if (error) + goto out_unlock; + + /* + * Ensure the next transaction is committed + * synchronously so that the blocks allocated and + * handed out to the client are guaranteed to be + * present even after a server crash. + */ + flags |= XFS_PREALLOC_SET | XFS_PREALLOC_SYNC; + } + + error = xfs_update_prealloc_flags(ip, flags); + if (error) + goto out_unlock; + } + xfs_iunlock(ip, XFS_IOLOCK_EXCL); + + xfs_bmbt_to_iomap(ip, iomap, &imap); + *device_generation = mp->m_generation; + return error; +out_unlock: + xfs_iunlock(ip, XFS_IOLOCK_EXCL); + return error; +} + +/* + * Ensure the size update falls into a valid allocated block. + */ +static int +xfs_pnfs_validate_isize( + struct xfs_inode *ip, + xfs_off_t isize) +{ + struct xfs_bmbt_irec imap; + int nimaps = 1; + int error = 0; + + xfs_ilock(ip, XFS_ILOCK_SHARED); + error = xfs_bmapi_read(ip, XFS_B_TO_FSBT(ip->i_mount, isize - 1), 1, + &imap, &nimaps, 0); + xfs_iunlock(ip, XFS_ILOCK_SHARED); + if (error) + return error; + + if (imap.br_startblock == HOLESTARTBLOCK || + imap.br_startblock == DELAYSTARTBLOCK || + imap.br_state == XFS_EXT_UNWRITTEN) + return -EIO; + return 0; +} + +/* + * Make sure the blocks described by maps are stable on disk. This includes + * converting any unwritten extents, flushing the disk cache and updating the + * time stamps. + * + * Note that we rely on the caller to always send us a timestamp update so that + * we always commit a transaction here. If that stops being true we will have + * to manually flush the cache here similar to what the fsync code path does + * for datasyncs on files that have no dirty metadata. + */ +int +xfs_fs_commit_blocks( + struct inode *inode, + struct iomap *maps, + int nr_maps, + struct iattr *iattr) +{ + struct xfs_inode *ip = XFS_I(inode); + struct xfs_mount *mp = ip->i_mount; + struct xfs_trans *tp; + bool update_isize = false; + int error, i; + loff_t size; + + ASSERT(iattr->ia_valid & (ATTR_ATIME|ATTR_CTIME|ATTR_MTIME)); + + xfs_ilock(ip, XFS_IOLOCK_EXCL); + + size = i_size_read(inode); + if ((iattr->ia_valid & ATTR_SIZE) && iattr->ia_size > size) { + update_isize = true; + size = iattr->ia_size; + } + + for (i = 0; i < nr_maps; i++) { + u64 start, length, end; + + start = maps[i].offset; + if (start > size) + continue; + + end = start + maps[i].length; + if (end > size) + end = size; + + length = end - start; + if (!length) + continue; + + /* + * Make sure reads through the pagecache see the new data. + */ + error = invalidate_inode_pages2_range(inode->i_mapping, + start >> PAGE_CACHE_SHIFT, + (end - 1) >> PAGE_CACHE_SHIFT); + WARN_ON_ONCE(error); + + error = xfs_iomap_write_unwritten(ip, start, length); + if (error) + goto out_drop_iolock; + } + + if (update_isize) { + error = xfs_pnfs_validate_isize(ip, size); + if (error) + goto out_drop_iolock; + } + + tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_NOT_SIZE); + error = xfs_trans_reserve(tp, &M_RES(mp)->tr_ichange, 0, 0); + if (error) + goto out_drop_iolock; + + xfs_ilock(ip, XFS_ILOCK_EXCL); + xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); + xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); + + xfs_setattr_time(ip, iattr); + if (update_isize) { + i_size_write(inode, iattr->ia_size); + ip->i_d.di_size = iattr->ia_size; + } + + xfs_trans_set_sync(tp); + error = xfs_trans_commit(tp, 0); + +out_drop_iolock: + xfs_iunlock(ip, XFS_IOLOCK_EXCL); + return error; +} diff --git a/fs/xfs/xfs_pnfs.h b/fs/xfs/xfs_pnfs.h new file mode 100644 index 000000000000..0d91255a89ae --- /dev/null +++ b/fs/xfs/xfs_pnfs.h @@ -0,0 +1,11 @@ +#ifndef _XFS_PNFS_H +#define _XFS_PNFS_H 1 + +#ifdef CONFIG_NFSD_PNFS +int xfs_fs_get_uuid(struct super_block *sb, u8 *buf, u32 *len, u64 *offset); +int xfs_fs_map_blocks(struct inode *inode, loff_t offset, u64 length, + struct iomap *iomap, bool write, u32 *device_generation); +int xfs_fs_commit_blocks(struct inode *inode, struct iomap *maps, int nr_maps, + struct iattr *iattr); +#endif /* CONFIG_NFSD_PNFS */ +#endif /* _XFS_PNFS_H */ -- cgit v1.2.3 From 781355c6e5ae87908de27dec3380a34918c33eee Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 16 Feb 2015 11:59:50 +1100 Subject: xfs: recall pNFS layouts on conflicting access Recall all outstanding pNFS layouts and truncates, writes and similar extent list modifying operations. Signed-off-by: Christoph Hellwig Reviewed-by: Dave Chinner Signed-off-by: Dave Chinner --- fs/xfs/xfs_file.c | 14 ++++++++++++-- fs/xfs/xfs_ioctl.c | 9 +++++++-- fs/xfs/xfs_iops.c | 11 ++++++++--- fs/xfs/xfs_pnfs.c | 30 ++++++++++++++++++++++++++++++ fs/xfs/xfs_pnfs.h | 7 +++++++ 5 files changed, 64 insertions(+), 7 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index 712d312d8e3e..56dcfce8d7d6 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c @@ -36,6 +36,7 @@ #include "xfs_trace.h" #include "xfs_log.h" #include "xfs_icache.h" +#include "xfs_pnfs.h" #include #include @@ -554,6 +555,10 @@ restart: if (error) return error; + error = xfs_break_layouts(inode, iolock); + if (error) + return error; + /* * If the offset is beyond the size of the file, we need to zero any * blocks that fall between the existing EOF and the start of this @@ -822,6 +827,7 @@ xfs_file_fallocate( struct xfs_inode *ip = XFS_I(inode); long error; enum xfs_prealloc_flags flags = 0; + uint iolock = XFS_IOLOCK_EXCL; loff_t new_size = 0; if (!S_ISREG(inode->i_mode)) @@ -830,7 +836,11 @@ xfs_file_fallocate( FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE)) return -EOPNOTSUPP; - xfs_ilock(ip, XFS_IOLOCK_EXCL); + xfs_ilock(ip, iolock); + error = xfs_break_layouts(inode, &iolock); + if (error) + goto out_unlock; + if (mode & FALLOC_FL_PUNCH_HOLE) { error = xfs_free_file_space(ip, offset, len); if (error) @@ -894,7 +904,7 @@ xfs_file_fallocate( } out_unlock: - xfs_iunlock(ip, XFS_IOLOCK_EXCL); + xfs_iunlock(ip, iolock); return error; } diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c index f7afb86c9148..bf70a2affb05 100644 --- a/fs/xfs/xfs_ioctl.c +++ b/fs/xfs/xfs_ioctl.c @@ -39,6 +39,7 @@ #include "xfs_icache.h" #include "xfs_symlink.h" #include "xfs_trans.h" +#include "xfs_pnfs.h" #include #include @@ -608,6 +609,7 @@ xfs_ioc_space( { struct iattr iattr; enum xfs_prealloc_flags flags = 0; + uint iolock = XFS_IOLOCK_EXCL; int error; /* @@ -636,7 +638,10 @@ xfs_ioc_space( if (error) return error; - xfs_ilock(ip, XFS_IOLOCK_EXCL); + xfs_ilock(ip, iolock); + error = xfs_break_layouts(inode, &iolock); + if (error) + goto out_unlock; switch (bf->l_whence) { case 0: /*SEEK_SET*/ @@ -725,7 +730,7 @@ xfs_ioc_space( error = xfs_update_prealloc_flags(ip, flags); out_unlock: - xfs_iunlock(ip, XFS_IOLOCK_EXCL); + xfs_iunlock(ip, iolock); mnt_drop_write_file(filp); return error; } diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c index e5e2ea0d0b25..d919ad7b16bf 100644 --- a/fs/xfs/xfs_iops.c +++ b/fs/xfs/xfs_iops.c @@ -37,6 +37,7 @@ #include "xfs_da_btree.h" #include "xfs_dir2.h" #include "xfs_trans_space.h" +#include "xfs_pnfs.h" #include #include @@ -979,9 +980,13 @@ xfs_vn_setattr( int error; if (iattr->ia_valid & ATTR_SIZE) { - xfs_ilock(ip, XFS_IOLOCK_EXCL); - error = xfs_setattr_size(ip, iattr); - xfs_iunlock(ip, XFS_IOLOCK_EXCL); + uint iolock = XFS_IOLOCK_EXCL; + + xfs_ilock(ip, iolock); + error = xfs_break_layouts(dentry->d_inode, &iolock); + if (!error) + error = xfs_setattr_size(ip, iattr); + xfs_iunlock(ip, iolock); } else { error = xfs_setattr_nonsize(ip, iattr, 0); } diff --git a/fs/xfs/xfs_pnfs.c b/fs/xfs/xfs_pnfs.c index 89912b34f184..4b33ef112400 100644 --- a/fs/xfs/xfs_pnfs.c +++ b/fs/xfs/xfs_pnfs.c @@ -18,6 +18,36 @@ #include "xfs_bit.h" #include "xfs_pnfs.h" +/* + * Ensure that we do not have any outstanding pNFS layouts that can be used by + * clients to directly read from or write to this inode. This must be called + * before every operation that can remove blocks from the extent map. + * Additionally we call it during the write operation, where aren't concerned + * about exposing unallocated blocks but just want to provide basic + * synchronization between a local writer and pNFS clients. mmap writes would + * also benefit from this sort of synchronization, but due to the tricky locking + * rules in the page fault path we don't bother. + */ +int +xfs_break_layouts( + struct inode *inode, + uint *iolock) +{ + struct xfs_inode *ip = XFS_I(inode); + int error; + + ASSERT(xfs_isilocked(ip, XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)); + + while ((error = break_layout(inode, false) == -EWOULDBLOCK)) { + xfs_iunlock(ip, *iolock); + error = break_layout(inode, true); + *iolock = XFS_IOLOCK_EXCL; + xfs_ilock(ip, *iolock); + } + + return error; +} + /* * Get a unique ID including its location so that the client can identify * the exported device. diff --git a/fs/xfs/xfs_pnfs.h b/fs/xfs/xfs_pnfs.h index 0d91255a89ae..b7fbfce660f6 100644 --- a/fs/xfs/xfs_pnfs.h +++ b/fs/xfs/xfs_pnfs.h @@ -7,5 +7,12 @@ int xfs_fs_map_blocks(struct inode *inode, loff_t offset, u64 length, struct iomap *iomap, bool write, u32 *device_generation); int xfs_fs_commit_blocks(struct inode *inode, struct iomap *maps, int nr_maps, struct iattr *iattr); + +int xfs_break_layouts(struct inode *inode, uint *iolock); +#else +static inline int xfs_break_layouts(struct inode *inode, uint *iolock) +{ + return 0; +} #endif /* CONFIG_NFSD_PNFS */ #endif /* _XFS_PNFS_H */ -- cgit v1.2.3 From a51f25a587e16bf89dce3f821de99b7717782859 Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Fri, 13 Feb 2015 12:34:25 -0500 Subject: nfsd4: fix v3-less build Includes of pnfs.h in export.c and fcntl.c also bring in xdr4.h, which won't build without CONFIG_NFSD_V3, breaking non-V3 builds. Ifdef-out most of pnfs.h in that case. Reported-by: Bas Peters Reported-by: Jim Davis Tested-by: Guenter Roeck Fixes: 9cf514ccfac "nfsd: implement pNFS operations" Signed-off-by: J. Bruce Fields --- fs/nfsd/pnfs.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'fs') diff --git a/fs/nfsd/pnfs.h b/fs/nfsd/pnfs.h index fedb4d620a81..d4c4453674c6 100644 --- a/fs/nfsd/pnfs.h +++ b/fs/nfsd/pnfs.h @@ -1,6 +1,7 @@ #ifndef _FS_NFSD_PNFS_H #define _FS_NFSD_PNFS_H 1 +#ifdef CONFIG_NFSD_V4 #include #include @@ -50,6 +51,7 @@ __be32 nfsd4_return_client_layouts(struct svc_rqst *rqstp, int nfsd4_set_deviceid(struct nfsd4_deviceid *id, const struct svc_fh *fhp, u32 device_generation); struct nfsd4_deviceid_map *nfsd4_find_devid_map(int idx); +#endif /* CONFIG_NFSD_V4 */ #ifdef CONFIG_NFSD_PNFS void nfsd4_setup_layout_type(struct svc_export *exp); @@ -59,6 +61,9 @@ void nfsd4_return_all_file_layouts(struct nfs4_client *clp, int nfsd4_init_pnfs(void); void nfsd4_exit_pnfs(void); #else +struct nfs4_client; +struct nfs4_file; + static inline void nfsd4_setup_layout_type(struct svc_export *exp) { } -- cgit v1.2.3 From e084c1bd40926938ff8d26af3bde34396dd4d06d Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Mon, 16 Feb 2015 14:32:03 -0500 Subject: Revert "locks: keep a count of locks on the flctx lists" This reverts commit 9bd0f45b7037fcfa8b575c7e27d0431d6e6dc3bb. Linus rightly pointed out that I failed to initialize the counters when adding them, so they don't work as expected. Just revert this patch for now. Reported-by: Linus Torvalds Signed-off-by: Jeff Layton --- fs/ceph/locks.c | 9 +++++++-- fs/cifs/file.c | 14 ++++++++++---- fs/locks.c | 45 ++++++++++++++++----------------------------- include/linux/fs.h | 3 --- 4 files changed, 33 insertions(+), 38 deletions(-) (limited to 'fs') diff --git a/fs/ceph/locks.c b/fs/ceph/locks.c index 06ea5cd05cd9..4347039ecc18 100644 --- a/fs/ceph/locks.c +++ b/fs/ceph/locks.c @@ -245,6 +245,7 @@ int ceph_flock(struct file *file, int cmd, struct file_lock *fl) */ void ceph_count_locks(struct inode *inode, int *fcntl_count, int *flock_count) { + struct file_lock *lock; struct file_lock_context *ctx; *fcntl_count = 0; @@ -252,8 +253,12 @@ void ceph_count_locks(struct inode *inode, int *fcntl_count, int *flock_count) ctx = inode->i_flctx; if (ctx) { - *fcntl_count = ctx->flc_posix_cnt; - *flock_count = ctx->flc_flock_cnt; + spin_lock(&ctx->flc_lock); + list_for_each_entry(lock, &ctx->flc_posix, fl_list) + ++(*fcntl_count); + list_for_each_entry(lock, &ctx->flc_flock, fl_list) + ++(*flock_count); + spin_unlock(&ctx->flc_lock); } dout("counted %d flock locks and %d fcntl locks", *flock_count, *fcntl_count); diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 8fe1f7a21b3e..a94b3e673182 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -1129,7 +1129,7 @@ cifs_push_posix_locks(struct cifsFileInfo *cfile) struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); struct file_lock *flock; struct file_lock_context *flctx = inode->i_flctx; - unsigned int i; + unsigned int count = 0, i; int rc = 0, xid, type; struct list_head locks_to_send, *el; struct lock_to_push *lck, *tmp; @@ -1140,14 +1140,20 @@ cifs_push_posix_locks(struct cifsFileInfo *cfile) if (!flctx) goto out; + spin_lock(&flctx->flc_lock); + list_for_each(el, &flctx->flc_posix) { + count++; + } + spin_unlock(&flctx->flc_lock); + INIT_LIST_HEAD(&locks_to_send); /* - * Allocating flc_posix_cnt locks is enough because no FL_POSIX locks - * can be added to the list while we are holding cinode->lock_sem that + * Allocating count locks is enough because no FL_POSIX locks can be + * added to the list while we are holding cinode->lock_sem that * protects locking operations of this inode. */ - for (i = 0; i < flctx->flc_posix_cnt; i++) { + for (i = 0; i < count; i++) { lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL); if (!lck) { rc = -ENOMEM; diff --git a/fs/locks.c b/fs/locks.c index 4753218f308e..7998f670812c 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -681,21 +681,18 @@ static void locks_wake_up_blocks(struct file_lock *blocker) } static void -locks_insert_lock_ctx(struct file_lock *fl, int *counter, - struct list_head *before) +locks_insert_lock_ctx(struct file_lock *fl, struct list_head *before) { fl->fl_nspid = get_pid(task_tgid(current)); list_add_tail(&fl->fl_list, before); - ++*counter; locks_insert_global_locks(fl); } static void -locks_unlink_lock_ctx(struct file_lock *fl, int *counter) +locks_unlink_lock_ctx(struct file_lock *fl) { locks_delete_global_locks(fl); list_del_init(&fl->fl_list); - --*counter; if (fl->fl_nspid) { put_pid(fl->fl_nspid); fl->fl_nspid = NULL; @@ -704,10 +701,9 @@ locks_unlink_lock_ctx(struct file_lock *fl, int *counter) } static void -locks_delete_lock_ctx(struct file_lock *fl, int *counter, - struct list_head *dispose) +locks_delete_lock_ctx(struct file_lock *fl, struct list_head *dispose) { - locks_unlink_lock_ctx(fl, counter); + locks_unlink_lock_ctx(fl); if (dispose) list_add(&fl->fl_list, dispose); else @@ -895,7 +891,7 @@ static int flock_lock_file(struct file *filp, struct file_lock *request) if (request->fl_type == fl->fl_type) goto out; found = true; - locks_delete_lock_ctx(fl, &ctx->flc_flock_cnt, &dispose); + locks_delete_lock_ctx(fl, &dispose); break; } @@ -929,7 +925,7 @@ find_conflict: if (request->fl_flags & FL_ACCESS) goto out; locks_copy_lock(new_fl, request); - locks_insert_lock_ctx(new_fl, &ctx->flc_flock_cnt, &ctx->flc_flock); + locks_insert_lock_ctx(new_fl, &ctx->flc_flock); new_fl = NULL; error = 0; @@ -1046,8 +1042,7 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str else request->fl_end = fl->fl_end; if (added) { - locks_delete_lock_ctx(fl, &ctx->flc_posix_cnt, - &dispose); + locks_delete_lock_ctx(fl, &dispose); continue; } request = fl; @@ -1076,8 +1071,7 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str * one (This may happen several times). */ if (added) { - locks_delete_lock_ctx(fl, - &ctx->flc_posix_cnt, &dispose); + locks_delete_lock_ctx(fl, &dispose); continue; } /* @@ -1093,10 +1087,8 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str locks_copy_lock(new_fl, request); request = new_fl; new_fl = NULL; - locks_insert_lock_ctx(request, - &ctx->flc_posix_cnt, &fl->fl_list); - locks_delete_lock_ctx(fl, - &ctx->flc_posix_cnt, &dispose); + locks_insert_lock_ctx(request, &fl->fl_list); + locks_delete_lock_ctx(fl, &dispose); added = true; } } @@ -1124,8 +1116,7 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str goto out; } locks_copy_lock(new_fl, request); - locks_insert_lock_ctx(new_fl, &ctx->flc_posix_cnt, - &fl->fl_list); + locks_insert_lock_ctx(new_fl, &fl->fl_list); new_fl = NULL; } if (right) { @@ -1136,8 +1127,7 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str left = new_fl2; new_fl2 = NULL; locks_copy_lock(left, right); - locks_insert_lock_ctx(left, &ctx->flc_posix_cnt, - &fl->fl_list); + locks_insert_lock_ctx(left, &fl->fl_list); } right->fl_start = request->fl_end + 1; locks_wake_up_blocks(right); @@ -1321,7 +1311,6 @@ static void lease_clear_pending(struct file_lock *fl, int arg) /* We already had a lease on this file; just change its type */ int lease_modify(struct file_lock *fl, int arg, struct list_head *dispose) { - struct file_lock_context *flctx; int error = assign_type(fl, arg); if (error) @@ -1331,7 +1320,6 @@ int lease_modify(struct file_lock *fl, int arg, struct list_head *dispose) if (arg == F_UNLCK) { struct file *filp = fl->fl_file; - flctx = file_inode(filp)->i_flctx; f_delown(filp); filp->f_owner.signum = 0; fasync_helper(0, fl->fl_file, 0, &fl->fl_fasync); @@ -1339,7 +1327,7 @@ int lease_modify(struct file_lock *fl, int arg, struct list_head *dispose) printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync); fl->fl_fasync = NULL; } - locks_delete_lock_ctx(fl, &flctx->flc_lease_cnt, dispose); + locks_delete_lock_ctx(fl, dispose); } return 0; } @@ -1456,8 +1444,7 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type) fl->fl_downgrade_time = break_time; } if (fl->fl_lmops->lm_break(fl)) - locks_delete_lock_ctx(fl, &ctx->flc_lease_cnt, - &dispose); + locks_delete_lock_ctx(fl, &dispose); } if (list_empty(&ctx->flc_lease)) @@ -1697,7 +1684,7 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr if (!leases_enable) goto out; - locks_insert_lock_ctx(lease, &ctx->flc_lease_cnt, &ctx->flc_lease); + locks_insert_lock_ctx(lease, &ctx->flc_lease); /* * The check in break_lease() is lockless. It's possible for another * open to race in after we did the earlier check for a conflicting @@ -1710,7 +1697,7 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr smp_mb(); error = check_conflicting_open(dentry, arg, lease->fl_flags); if (error) { - locks_unlink_lock_ctx(lease, &ctx->flc_lease_cnt); + locks_unlink_lock_ctx(lease); goto out; } diff --git a/include/linux/fs.h b/include/linux/fs.h index e49f10cc8a73..a5a303e8a33c 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -969,9 +969,6 @@ struct file_lock_context { struct list_head flc_flock; struct list_head flc_posix; struct list_head flc_lease; - int flc_flock_cnt; - int flc_posix_cnt; - int flc_lease_cnt; }; /* The following constant reflects the upper bound of the file/locking space */ -- cgit v1.2.3 From fbbbad4bc2101e452b24e6e65d3d5e11314a0b5f Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Mon, 16 Feb 2015 15:58:53 -0800 Subject: vfs,ext2: introduce IS_DAX(inode) Use an inode flag to tag inodes which should avoid using the page cache. Convert ext2 to use it instead of mapping_is_xip(). Prevent I/Os to files tagged with the DAX flag from falling back to buffered I/O. Signed-off-by: Matthew Wilcox Reviewed-by: Jan Kara Reviewed-by: Mathieu Desnoyers Cc: Andreas Dilger Cc: Boaz Harrosh Cc: Christoph Hellwig Cc: Dave Chinner Cc: Jens Axboe Cc: Kirill A. Shutemov Cc: Randy Dunlap Cc: Ross Zwisler Cc: Theodore Ts'o Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/ext2/inode.c | 9 ++++++--- fs/ext2/xip.h | 2 -- include/linux/fs.h | 6 ++++++ mm/filemap.c | 19 ++++++++++++------- 4 files changed, 24 insertions(+), 12 deletions(-) (limited to 'fs') diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c index 36d35c36311d..0cb04486577d 100644 --- a/fs/ext2/inode.c +++ b/fs/ext2/inode.c @@ -731,7 +731,7 @@ static int ext2_get_blocks(struct inode *inode, goto cleanup; } - if (ext2_use_xip(inode->i_sb)) { + if (IS_DAX(inode)) { /* * we need to clear the block */ @@ -1201,7 +1201,7 @@ static int ext2_setsize(struct inode *inode, loff_t newsize) inode_dio_wait(inode); - if (mapping_is_xip(inode->i_mapping)) + if (IS_DAX(inode)) error = xip_truncate_page(inode->i_mapping, newsize); else if (test_opt(inode->i_sb, NOBH)) error = nobh_truncate_page(inode->i_mapping, @@ -1273,7 +1273,8 @@ void ext2_set_inode_flags(struct inode *inode) { unsigned int flags = EXT2_I(inode)->i_flags; - inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC); + inode->i_flags &= ~(S_SYNC | S_APPEND | S_IMMUTABLE | S_NOATIME | + S_DIRSYNC | S_DAX); if (flags & EXT2_SYNC_FL) inode->i_flags |= S_SYNC; if (flags & EXT2_APPEND_FL) @@ -1284,6 +1285,8 @@ void ext2_set_inode_flags(struct inode *inode) inode->i_flags |= S_NOATIME; if (flags & EXT2_DIRSYNC_FL) inode->i_flags |= S_DIRSYNC; + if (test_opt(inode->i_sb, XIP)) + inode->i_flags |= S_DAX; } /* Propagate flags from i_flags to EXT2_I(inode)->i_flags */ diff --git a/fs/ext2/xip.h b/fs/ext2/xip.h index 18b34d2f31b3..29be73781419 100644 --- a/fs/ext2/xip.h +++ b/fs/ext2/xip.h @@ -16,9 +16,7 @@ static inline int ext2_use_xip (struct super_block *sb) } int ext2_get_xip_mem(struct address_space *, pgoff_t, int, void **, unsigned long *); -#define mapping_is_xip(map) unlikely(map->a_ops->get_xip_mem) #else -#define mapping_is_xip(map) 0 #define ext2_xip_verify_sb(sb) do { } while (0) #define ext2_use_xip(sb) 0 #define ext2_clear_xip_target(inode, chain) 0 diff --git a/include/linux/fs.h b/include/linux/fs.h index e49f10cc8a73..fb373bb5cf03 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1677,6 +1677,11 @@ struct super_operations { #define S_IMA 1024 /* Inode has an associated IMA struct */ #define S_AUTOMOUNT 2048 /* Automount/referral quasi-directory */ #define S_NOSEC 4096 /* no suid or xattr security attributes */ +#ifdef CONFIG_FS_XIP +#define S_DAX 8192 /* Direct Access, avoiding the page cache */ +#else +#define S_DAX 0 /* Make all the DAX code disappear */ +#endif /* * Note that nosuid etc flags are inode-specific: setting some file-system @@ -1714,6 +1719,7 @@ struct super_operations { #define IS_IMA(inode) ((inode)->i_flags & S_IMA) #define IS_AUTOMOUNT(inode) ((inode)->i_flags & S_AUTOMOUNT) #define IS_NOSEC(inode) ((inode)->i_flags & S_NOSEC) +#define IS_DAX(inode) ((inode)->i_flags & S_DAX) #define IS_WHITEOUT(inode) (S_ISCHR(inode->i_mode) && \ (inode)->i_rdev == WHITEOUT_DEV) diff --git a/mm/filemap.c b/mm/filemap.c index d9f5336552d7..1578c224285e 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1723,9 +1723,11 @@ generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter) * we've already read everything we wanted to, or if * there was a short read because we hit EOF, go ahead * and return. Otherwise fallthrough to buffered io for - * the rest of the read. + * the rest of the read. Buffered reads will not work for + * DAX files, so don't bother trying. */ - if (retval < 0 || !iov_iter_count(iter) || *ppos >= size) { + if (retval < 0 || !iov_iter_count(iter) || *ppos >= size || + IS_DAX(inode)) { file_accessed(file); goto out; } @@ -2587,13 +2589,16 @@ ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from) loff_t endbyte; written = generic_file_direct_write(iocb, from, pos); - if (written < 0 || written == count) - goto out; - /* - * direct-io write to a hole: fall through to buffered I/O - * for completing the rest of the request. + * If the write stopped short of completing, fall back to + * buffered writes. Some filesystems do this for writes to + * holes, for example. For DAX files, a buffered write will + * not succeed (even if it did, DAX does not handle dirty + * page-cache pages correctly). */ + if (written < 0 || written == count || IS_DAX(inode)) + goto out; + pos += written; count -= written; -- cgit v1.2.3 From d475c6346a38aef3058eba96867bfa726a3cc940 Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Mon, 16 Feb 2015 15:58:56 -0800 Subject: dax,ext2: replace XIP read and write with DAX I/O Use the generic AIO infrastructure instead of custom read and write methods. In addition to giving us support for AIO, this adds the missing locking between read() and truncate(). Signed-off-by: Matthew Wilcox Reviewed-by: Ross Zwisler Reviewed-by: Jan Kara Cc: Andreas Dilger Cc: Boaz Harrosh Cc: Christoph Hellwig Cc: Dave Chinner Cc: Jens Axboe Cc: Kirill A. Shutemov Cc: Mathieu Desnoyers Cc: Randy Dunlap Cc: Theodore Ts'o Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- MAINTAINERS | 6 ++ fs/Makefile | 1 + fs/dax.c | 186 ++++++++++++++++++++++++++++++++++++++++++ fs/ext2/file.c | 6 +- fs/ext2/inode.c | 8 +- include/linux/fs.h | 12 ++- mm/filemap.c | 6 +- mm/filemap_xip.c | 234 ----------------------------------------------------- 8 files changed, 214 insertions(+), 245 deletions(-) create mode 100644 fs/dax.c (limited to 'fs') diff --git a/MAINTAINERS b/MAINTAINERS index 348f5c16ef50..8670c224c833 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -3151,6 +3151,12 @@ L: linux-i2c@vger.kernel.org S: Maintained F: drivers/i2c/busses/i2c-diolan-u2c.c +DIRECT ACCESS (DAX) +M: Matthew Wilcox +L: linux-fsdevel@vger.kernel.org +S: Supported +F: fs/dax.c + DIRECTORY NOTIFICATION (DNOTIFY) M: Eric Paris S: Maintained diff --git a/fs/Makefile b/fs/Makefile index bedff48e8fdc..0534444e257c 100644 --- a/fs/Makefile +++ b/fs/Makefile @@ -28,6 +28,7 @@ obj-$(CONFIG_SIGNALFD) += signalfd.o obj-$(CONFIG_TIMERFD) += timerfd.o obj-$(CONFIG_EVENTFD) += eventfd.o obj-$(CONFIG_AIO) += aio.o +obj-$(CONFIG_FS_XIP) += dax.o obj-$(CONFIG_FILE_LOCKING) += locks.o obj-$(CONFIG_COMPAT) += compat.o compat_ioctl.o obj-$(CONFIG_BINFMT_AOUT) += binfmt_aout.o diff --git a/fs/dax.c b/fs/dax.c new file mode 100644 index 000000000000..1a2bdbfa3ea9 --- /dev/null +++ b/fs/dax.c @@ -0,0 +1,186 @@ +/* + * fs/dax.c - Direct Access filesystem code + * Copyright (c) 2013-2014 Intel Corporation + * Author: Matthew Wilcox + * Author: Ross Zwisler + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include +#include +#include +#include +#include +#include +#include + +static long dax_get_addr(struct buffer_head *bh, void **addr, unsigned blkbits) +{ + unsigned long pfn; + sector_t sector = bh->b_blocknr << (blkbits - 9); + return bdev_direct_access(bh->b_bdev, sector, addr, &pfn, bh->b_size); +} + +static void dax_new_buf(void *addr, unsigned size, unsigned first, loff_t pos, + loff_t end) +{ + loff_t final = end - pos + first; /* The final byte of the buffer */ + + if (first > 0) + memset(addr, 0, first); + if (final < size) + memset(addr + final, 0, size - final); +} + +static bool buffer_written(struct buffer_head *bh) +{ + return buffer_mapped(bh) && !buffer_unwritten(bh); +} + +/* + * When ext4 encounters a hole, it returns without modifying the buffer_head + * which means that we can't trust b_size. To cope with this, we set b_state + * to 0 before calling get_block and, if any bit is set, we know we can trust + * b_size. Unfortunate, really, since ext4 knows precisely how long a hole is + * and would save us time calling get_block repeatedly. + */ +static bool buffer_size_valid(struct buffer_head *bh) +{ + return bh->b_state != 0; +} + +static ssize_t dax_io(int rw, struct inode *inode, struct iov_iter *iter, + loff_t start, loff_t end, get_block_t get_block, + struct buffer_head *bh) +{ + ssize_t retval = 0; + loff_t pos = start; + loff_t max = start; + loff_t bh_max = start; + void *addr; + bool hole = false; + + if (rw != WRITE) + end = min(end, i_size_read(inode)); + + while (pos < end) { + unsigned len; + if (pos == max) { + unsigned blkbits = inode->i_blkbits; + sector_t block = pos >> blkbits; + unsigned first = pos - (block << blkbits); + long size; + + if (pos == bh_max) { + bh->b_size = PAGE_ALIGN(end - pos); + bh->b_state = 0; + retval = get_block(inode, block, bh, + rw == WRITE); + if (retval) + break; + if (!buffer_size_valid(bh)) + bh->b_size = 1 << blkbits; + bh_max = pos - first + bh->b_size; + } else { + unsigned done = bh->b_size - + (bh_max - (pos - first)); + bh->b_blocknr += done >> blkbits; + bh->b_size -= done; + } + + hole = (rw != WRITE) && !buffer_written(bh); + if (hole) { + addr = NULL; + size = bh->b_size - first; + } else { + retval = dax_get_addr(bh, &addr, blkbits); + if (retval < 0) + break; + if (buffer_unwritten(bh) || buffer_new(bh)) + dax_new_buf(addr, retval, first, pos, + end); + addr += first; + size = retval - first; + } + max = min(pos + size, end); + } + + if (rw == WRITE) + len = copy_from_iter(addr, max - pos, iter); + else if (!hole) + len = copy_to_iter(addr, max - pos, iter); + else + len = iov_iter_zero(max - pos, iter); + + if (!len) + break; + + pos += len; + addr += len; + } + + return (pos == start) ? retval : pos - start; +} + +/** + * dax_do_io - Perform I/O to a DAX file + * @rw: READ to read or WRITE to write + * @iocb: The control block for this I/O + * @inode: The file which the I/O is directed at + * @iter: The addresses to do I/O from or to + * @pos: The file offset where the I/O starts + * @get_block: The filesystem method used to translate file offsets to blocks + * @end_io: A filesystem callback for I/O completion + * @flags: See below + * + * This function uses the same locking scheme as do_blockdev_direct_IO: + * If @flags has DIO_LOCKING set, we assume that the i_mutex is held by the + * caller for writes. For reads, we take and release the i_mutex ourselves. + * If DIO_LOCKING is not set, the filesystem takes care of its own locking. + * As with do_blockdev_direct_IO(), we increment i_dio_count while the I/O + * is in progress. + */ +ssize_t dax_do_io(int rw, struct kiocb *iocb, struct inode *inode, + struct iov_iter *iter, loff_t pos, + get_block_t get_block, dio_iodone_t end_io, int flags) +{ + struct buffer_head bh; + ssize_t retval = -EINVAL; + loff_t end = pos + iov_iter_count(iter); + + memset(&bh, 0, sizeof(bh)); + + if ((flags & DIO_LOCKING) && (rw == READ)) { + struct address_space *mapping = inode->i_mapping; + mutex_lock(&inode->i_mutex); + retval = filemap_write_and_wait_range(mapping, pos, end - 1); + if (retval) { + mutex_unlock(&inode->i_mutex); + goto out; + } + } + + /* Protects against truncate */ + atomic_inc(&inode->i_dio_count); + + retval = dax_io(rw, inode, iter, pos, end, get_block, &bh); + + if ((flags & DIO_LOCKING) && (rw == READ)) + mutex_unlock(&inode->i_mutex); + + if ((retval > 0) && end_io) + end_io(iocb, pos, retval, bh.b_private); + + inode_dio_done(inode); + out: + return retval; +} +EXPORT_SYMBOL_GPL(dax_do_io); diff --git a/fs/ext2/file.c b/fs/ext2/file.c index 7c87b22a7228..a247123fd798 100644 --- a/fs/ext2/file.c +++ b/fs/ext2/file.c @@ -81,8 +81,10 @@ const struct file_operations ext2_file_operations = { #ifdef CONFIG_EXT2_FS_XIP const struct file_operations ext2_xip_file_operations = { .llseek = generic_file_llseek, - .read = xip_file_read, - .write = xip_file_write, + .read = new_sync_read, + .write = new_sync_write, + .read_iter = generic_file_read_iter, + .write_iter = generic_file_write_iter, .unlocked_ioctl = ext2_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = ext2_compat_ioctl, diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c index 0cb04486577d..3ccd5fd47d66 100644 --- a/fs/ext2/inode.c +++ b/fs/ext2/inode.c @@ -859,7 +859,12 @@ ext2_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter, size_t count = iov_iter_count(iter); ssize_t ret; - ret = blockdev_direct_IO(rw, iocb, inode, iter, offset, ext2_get_block); + if (IS_DAX(inode)) + ret = dax_do_io(rw, iocb, inode, iter, offset, ext2_get_block, + NULL, DIO_LOCKING); + else + ret = blockdev_direct_IO(rw, iocb, inode, iter, offset, + ext2_get_block); if (ret < 0 && (rw & WRITE)) ext2_write_failed(mapping, offset + count); return ret; @@ -888,6 +893,7 @@ const struct address_space_operations ext2_aops = { const struct address_space_operations ext2_aops_xip = { .bmap = ext2_bmap, .get_xip_mem = ext2_get_xip_mem, + .direct_IO = ext2_direct_IO, }; const struct address_space_operations ext2_nobh_aops = { diff --git a/include/linux/fs.h b/include/linux/fs.h index fb373bb5cf03..241c3c030fb5 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2587,12 +2587,11 @@ extern loff_t fixed_size_llseek(struct file *file, loff_t offset, extern int generic_file_open(struct inode * inode, struct file * filp); extern int nonseekable_open(struct inode * inode, struct file * filp); +ssize_t dax_do_io(int rw, struct kiocb *, struct inode *, struct iov_iter *, + loff_t, get_block_t, dio_iodone_t, int flags); + #ifdef CONFIG_FS_XIP -extern ssize_t xip_file_read(struct file *filp, char __user *buf, size_t len, - loff_t *ppos); extern int xip_file_mmap(struct file * file, struct vm_area_struct * vma); -extern ssize_t xip_file_write(struct file *filp, const char __user *buf, - size_t len, loff_t *ppos); extern int xip_truncate_page(struct address_space *mapping, loff_t from); #else static inline int xip_truncate_page(struct address_space *mapping, loff_t from) @@ -2756,6 +2755,11 @@ extern int generic_show_options(struct seq_file *m, struct dentry *root); extern void save_mount_options(struct super_block *sb, char *options); extern void replace_mount_options(struct super_block *sb, char *options); +static inline bool io_is_direct(struct file *filp) +{ + return (filp->f_flags & O_DIRECT) || IS_DAX(file_inode(filp)); +} + static inline ino_t parent_ino(struct dentry *dentry) { ino_t res; diff --git a/mm/filemap.c b/mm/filemap.c index 1578c224285e..ad7242043bdb 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1695,8 +1695,7 @@ generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter) loff_t *ppos = &iocb->ki_pos; loff_t pos = *ppos; - /* coalesce the iovecs and go direct-to-BIO for O_DIRECT */ - if (file->f_flags & O_DIRECT) { + if (io_is_direct(file)) { struct address_space *mapping = file->f_mapping; struct inode *inode = mapping->host; size_t count = iov_iter_count(iter); @@ -2584,8 +2583,7 @@ ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from) if (err) goto out; - /* coalesce the iovecs and go direct-to-BIO for O_DIRECT */ - if (unlikely(file->f_flags & O_DIRECT)) { + if (io_is_direct(file)) { loff_t endbyte; written = generic_file_direct_write(iocb, from, pos); diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c index 59e1c5585748..9c869f402c07 100644 --- a/mm/filemap_xip.c +++ b/mm/filemap_xip.c @@ -42,119 +42,6 @@ static struct page *xip_sparse_page(void) return __xip_sparse_page; } -/* - * This is a file read routine for execute in place files, and uses - * the mapping->a_ops->get_xip_mem() function for the actual low-level - * stuff. - * - * Note the struct file* is not used at all. It may be NULL. - */ -static ssize_t -do_xip_mapping_read(struct address_space *mapping, - struct file_ra_state *_ra, - struct file *filp, - char __user *buf, - size_t len, - loff_t *ppos) -{ - struct inode *inode = mapping->host; - pgoff_t index, end_index; - unsigned long offset; - loff_t isize, pos; - size_t copied = 0, error = 0; - - BUG_ON(!mapping->a_ops->get_xip_mem); - - pos = *ppos; - index = pos >> PAGE_CACHE_SHIFT; - offset = pos & ~PAGE_CACHE_MASK; - - isize = i_size_read(inode); - if (!isize) - goto out; - - end_index = (isize - 1) >> PAGE_CACHE_SHIFT; - do { - unsigned long nr, left; - void *xip_mem; - unsigned long xip_pfn; - int zero = 0; - - /* nr is the maximum number of bytes to copy from this page */ - nr = PAGE_CACHE_SIZE; - if (index >= end_index) { - if (index > end_index) - goto out; - nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1; - if (nr <= offset) { - goto out; - } - } - nr = nr - offset; - if (nr > len - copied) - nr = len - copied; - - error = mapping->a_ops->get_xip_mem(mapping, index, 0, - &xip_mem, &xip_pfn); - if (unlikely(error)) { - if (error == -ENODATA) { - /* sparse */ - zero = 1; - } else - goto out; - } - - /* If users can be writing to this page using arbitrary - * virtual addresses, take care about potential aliasing - * before reading the page on the kernel side. - */ - if (mapping_writably_mapped(mapping)) - /* address based flush */ ; - - /* - * Ok, we have the mem, so now we can copy it to user space... - * - * The actor routine returns how many bytes were actually used.. - * NOTE! This may not be the same as how much of a user buffer - * we filled up (we may be padding etc), so we can only update - * "pos" here (the actor routine has to update the user buffer - * pointers and the remaining count). - */ - if (!zero) - left = __copy_to_user(buf+copied, xip_mem+offset, nr); - else - left = __clear_user(buf + copied, nr); - - if (left) { - error = -EFAULT; - goto out; - } - - copied += (nr - left); - offset += (nr - left); - index += offset >> PAGE_CACHE_SHIFT; - offset &= ~PAGE_CACHE_MASK; - } while (copied < len); - -out: - *ppos = pos + copied; - if (filp) - file_accessed(filp); - - return (copied ? copied : error); -} - -ssize_t -xip_file_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos) -{ - if (!access_ok(VERIFY_WRITE, buf, len)) - return -EFAULT; - - return do_xip_mapping_read(filp->f_mapping, &filp->f_ra, filp, - buf, len, ppos); -} -EXPORT_SYMBOL_GPL(xip_file_read); - /* * __xip_unmap is invoked from xip_unmap and xip_write * @@ -341,127 +228,6 @@ int xip_file_mmap(struct file * file, struct vm_area_struct * vma) } EXPORT_SYMBOL_GPL(xip_file_mmap); -static ssize_t -__xip_file_write(struct file *filp, const char __user *buf, - size_t count, loff_t pos, loff_t *ppos) -{ - struct address_space * mapping = filp->f_mapping; - const struct address_space_operations *a_ops = mapping->a_ops; - struct inode *inode = mapping->host; - long status = 0; - size_t bytes; - ssize_t written = 0; - - BUG_ON(!mapping->a_ops->get_xip_mem); - - do { - unsigned long index; - unsigned long offset; - size_t copied; - void *xip_mem; - unsigned long xip_pfn; - - offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */ - index = pos >> PAGE_CACHE_SHIFT; - bytes = PAGE_CACHE_SIZE - offset; - if (bytes > count) - bytes = count; - - status = a_ops->get_xip_mem(mapping, index, 0, - &xip_mem, &xip_pfn); - if (status == -ENODATA) { - /* we allocate a new page unmap it */ - mutex_lock(&xip_sparse_mutex); - status = a_ops->get_xip_mem(mapping, index, 1, - &xip_mem, &xip_pfn); - mutex_unlock(&xip_sparse_mutex); - if (!status) - /* unmap page at pgoff from all other vmas */ - __xip_unmap(mapping, index); - } - - if (status) - break; - - copied = bytes - - __copy_from_user_nocache(xip_mem + offset, buf, bytes); - - if (likely(copied > 0)) { - status = copied; - - if (status >= 0) { - written += status; - count -= status; - pos += status; - buf += status; - } - } - if (unlikely(copied != bytes)) - if (status >= 0) - status = -EFAULT; - if (status < 0) - break; - } while (count); - *ppos = pos; - /* - * No need to use i_size_read() here, the i_size - * cannot change under us because we hold i_mutex. - */ - if (pos > inode->i_size) { - i_size_write(inode, pos); - mark_inode_dirty(inode); - } - - return written ? written : status; -} - -ssize_t -xip_file_write(struct file *filp, const char __user *buf, size_t len, - loff_t *ppos) -{ - struct address_space *mapping = filp->f_mapping; - struct inode *inode = mapping->host; - size_t count; - loff_t pos; - ssize_t ret; - - mutex_lock(&inode->i_mutex); - - if (!access_ok(VERIFY_READ, buf, len)) { - ret=-EFAULT; - goto out_up; - } - - pos = *ppos; - count = len; - - /* We can write back this queue in page reclaim */ - current->backing_dev_info = inode_to_bdi(inode); - - ret = generic_write_checks(filp, &pos, &count, S_ISBLK(inode->i_mode)); - if (ret) - goto out_backing; - if (count == 0) - goto out_backing; - - ret = file_remove_suid(filp); - if (ret) - goto out_backing; - - ret = file_update_time(filp); - if (ret) - goto out_backing; - - ret = __xip_file_write (filp, buf, count, pos, ppos); - - out_backing: - current->backing_dev_info = NULL; - out_up: - mutex_unlock(&inode->i_mutex); - return ret; -} -EXPORT_SYMBOL_GPL(xip_file_write); - /* * truncate a page used for execute in place * functionality is analog to block_truncate_page but does use get_xip_mem -- cgit v1.2.3 From 289c6aedac981533331428bc933fff21ae332c9e Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Mon, 16 Feb 2015 15:58:59 -0800 Subject: dax,ext2: replace ext2_clear_xip_target with dax_clear_blocks This is practically generic code; other filesystems will want to call it from other places, but there's nothing ext2-specific about it. Make it a little more generic by allowing it to take a count of the number of bytes to zero rather than fixing it to a single page. Thanks to Dave Hansen for suggesting that I need to call cond_resched() if zeroing more than one page. Signed-off-by: Matthew Wilcox Cc: Andreas Dilger Cc: Boaz Harrosh Cc: Christoph Hellwig Cc: Dave Chinner Cc: Jan Kara Cc: Jens Axboe Cc: Kirill A. Shutemov Cc: Mathieu Desnoyers Cc: Randy Dunlap Cc: Ross Zwisler Cc: Theodore Ts'o Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/dax.c | 37 +++++++++++++++++++++++++++++++++++++ fs/ext2/inode.c | 8 +++++--- fs/ext2/xip.c | 14 -------------- fs/ext2/xip.h | 3 --- include/linux/fs.h | 1 + 5 files changed, 43 insertions(+), 20 deletions(-) (limited to 'fs') diff --git a/fs/dax.c b/fs/dax.c index 1a2bdbfa3ea9..69c3126a05b4 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -20,8 +20,45 @@ #include #include #include +#include #include +int dax_clear_blocks(struct inode *inode, sector_t block, long size) +{ + struct block_device *bdev = inode->i_sb->s_bdev; + sector_t sector = block << (inode->i_blkbits - 9); + + might_sleep(); + do { + void *addr; + unsigned long pfn; + long count; + + count = bdev_direct_access(bdev, sector, &addr, &pfn, size); + if (count < 0) + return count; + BUG_ON(size < count); + while (count > 0) { + unsigned pgsz = PAGE_SIZE - offset_in_page(addr); + if (pgsz > count) + pgsz = count; + if (pgsz < PAGE_SIZE) + memset(addr, 0, pgsz); + else + clear_page(addr); + addr += pgsz; + size -= pgsz; + count -= pgsz; + BUG_ON(pgsz & 511); + sector += pgsz / 512; + cond_resched(); + } + } while (size); + + return 0; +} +EXPORT_SYMBOL_GPL(dax_clear_blocks); + static long dax_get_addr(struct buffer_head *bh, void **addr, unsigned blkbits) { unsigned long pfn; diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c index 3ccd5fd47d66..52978b853226 100644 --- a/fs/ext2/inode.c +++ b/fs/ext2/inode.c @@ -733,10 +733,12 @@ static int ext2_get_blocks(struct inode *inode, if (IS_DAX(inode)) { /* - * we need to clear the block + * block must be initialised before we put it in the tree + * so that it's not found by another thread before it's + * initialised */ - err = ext2_clear_xip_target (inode, - le32_to_cpu(chain[depth-1].key)); + err = dax_clear_blocks(inode, le32_to_cpu(chain[depth-1].key), + 1 << inode->i_blkbits); if (err) { mutex_unlock(&ei->truncate_mutex); goto cleanup; diff --git a/fs/ext2/xip.c b/fs/ext2/xip.c index bbc5fec6ff7f..8cfca3a4cd58 100644 --- a/fs/ext2/xip.c +++ b/fs/ext2/xip.c @@ -42,20 +42,6 @@ __ext2_get_block(struct inode *inode, pgoff_t pgoff, int create, return rc; } -int -ext2_clear_xip_target(struct inode *inode, sector_t block) -{ - void *kaddr; - unsigned long pfn; - long size; - - size = __inode_direct_access(inode, block, &kaddr, &pfn, PAGE_SIZE); - if (size < 0) - return size; - clear_page(kaddr); - return 0; -} - void ext2_xip_verify_sb(struct super_block *sb) { struct ext2_sb_info *sbi = EXT2_SB(sb); diff --git a/fs/ext2/xip.h b/fs/ext2/xip.h index 29be73781419..b2592f2f3c9d 100644 --- a/fs/ext2/xip.h +++ b/fs/ext2/xip.h @@ -7,8 +7,6 @@ #ifdef CONFIG_EXT2_FS_XIP extern void ext2_xip_verify_sb (struct super_block *); -extern int ext2_clear_xip_target (struct inode *, sector_t); - static inline int ext2_use_xip (struct super_block *sb) { struct ext2_sb_info *sbi = EXT2_SB(sb); @@ -19,6 +17,5 @@ int ext2_get_xip_mem(struct address_space *, pgoff_t, int, #else #define ext2_xip_verify_sb(sb) do { } while (0) #define ext2_use_xip(sb) 0 -#define ext2_clear_xip_target(inode, chain) 0 #define ext2_get_xip_mem NULL #endif diff --git a/include/linux/fs.h b/include/linux/fs.h index 241c3c030fb5..8084934a5676 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2589,6 +2589,7 @@ extern int nonseekable_open(struct inode * inode, struct file * filp); ssize_t dax_do_io(int rw, struct kiocb *, struct inode *, struct iov_iter *, loff_t, get_block_t, dio_iodone_t, int flags); +int dax_clear_blocks(struct inode *, sector_t block, long size); #ifdef CONFIG_FS_XIP extern int xip_file_mmap(struct file * file, struct vm_area_struct * vma); -- cgit v1.2.3 From f7ca90b160307d63aaedab8bd451c24a182db20f Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Mon, 16 Feb 2015 15:59:02 -0800 Subject: dax,ext2: replace the XIP page fault handler with the DAX page fault handler Instead of calling aops->get_xip_mem from the fault handler, the filesystem passes a get_block_t that is used to find the appropriate blocks. This requires that all architectures implement copy_user_page(). At the time of writing, mips and arm do not. Patches exist and are in progress. [akpm@linux-foundation.org: remap_file_pages went away] Signed-off-by: Matthew Wilcox Reviewed-by: Jan Kara Cc: Andreas Dilger Cc: Boaz Harrosh Cc: Christoph Hellwig Cc: Dave Chinner Cc: Jens Axboe Cc: Kirill A. Shutemov Cc: Mathieu Desnoyers Cc: Randy Dunlap Cc: Ross Zwisler Cc: Theodore Ts'o Cc: Russell King Cc: Ralf Baechle Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/dax.c | 241 +++++++++++++++++++++++++++++++++++++++++++++++++++++ fs/ext2/file.c | 34 +++++++- include/linux/fs.h | 4 +- mm/filemap_xip.c | 206 --------------------------------------------- 4 files changed, 276 insertions(+), 209 deletions(-) (limited to 'fs') diff --git a/fs/dax.c b/fs/dax.c index 69c3126a05b4..553e55b93495 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -19,9 +19,13 @@ #include #include #include +#include +#include +#include #include #include #include +#include int dax_clear_blocks(struct inode *inode, sector_t block, long size) { @@ -221,3 +225,240 @@ ssize_t dax_do_io(int rw, struct kiocb *iocb, struct inode *inode, return retval; } EXPORT_SYMBOL_GPL(dax_do_io); + +/* + * The user has performed a load from a hole in the file. Allocating + * a new page in the file would cause excessive storage usage for + * workloads with sparse files. We allocate a page cache page instead. + * We'll kick it out of the page cache if it's ever written to, + * otherwise it will simply fall out of the page cache under memory + * pressure without ever having been dirtied. + */ +static int dax_load_hole(struct address_space *mapping, struct page *page, + struct vm_fault *vmf) +{ + unsigned long size; + struct inode *inode = mapping->host; + if (!page) + page = find_or_create_page(mapping, vmf->pgoff, + GFP_KERNEL | __GFP_ZERO); + if (!page) + return VM_FAULT_OOM; + /* Recheck i_size under page lock to avoid truncate race */ + size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT; + if (vmf->pgoff >= size) { + unlock_page(page); + page_cache_release(page); + return VM_FAULT_SIGBUS; + } + + vmf->page = page; + return VM_FAULT_LOCKED; +} + +static int copy_user_bh(struct page *to, struct buffer_head *bh, + unsigned blkbits, unsigned long vaddr) +{ + void *vfrom, *vto; + if (dax_get_addr(bh, &vfrom, blkbits) < 0) + return -EIO; + vto = kmap_atomic(to); + copy_user_page(vto, vfrom, vaddr, to); + kunmap_atomic(vto); + return 0; +} + +static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh, + struct vm_area_struct *vma, struct vm_fault *vmf) +{ + struct address_space *mapping = inode->i_mapping; + sector_t sector = bh->b_blocknr << (inode->i_blkbits - 9); + unsigned long vaddr = (unsigned long)vmf->virtual_address; + void *addr; + unsigned long pfn; + pgoff_t size; + int error; + + i_mmap_lock_read(mapping); + + /* + * Check truncate didn't happen while we were allocating a block. + * If it did, this block may or may not be still allocated to the + * file. We can't tell the filesystem to free it because we can't + * take i_mutex here. In the worst case, the file still has blocks + * allocated past the end of the file. + */ + size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT; + if (unlikely(vmf->pgoff >= size)) { + error = -EIO; + goto out; + } + + error = bdev_direct_access(bh->b_bdev, sector, &addr, &pfn, bh->b_size); + if (error < 0) + goto out; + if (error < PAGE_SIZE) { + error = -EIO; + goto out; + } + + if (buffer_unwritten(bh) || buffer_new(bh)) + clear_page(addr); + + error = vm_insert_mixed(vma, vaddr, pfn); + + out: + i_mmap_unlock_read(mapping); + + if (bh->b_end_io) + bh->b_end_io(bh, 1); + + return error; +} + +static int do_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf, + get_block_t get_block) +{ + struct file *file = vma->vm_file; + struct address_space *mapping = file->f_mapping; + struct inode *inode = mapping->host; + struct page *page; + struct buffer_head bh; + unsigned long vaddr = (unsigned long)vmf->virtual_address; + unsigned blkbits = inode->i_blkbits; + sector_t block; + pgoff_t size; + int error; + int major = 0; + + size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT; + if (vmf->pgoff >= size) + return VM_FAULT_SIGBUS; + + memset(&bh, 0, sizeof(bh)); + block = (sector_t)vmf->pgoff << (PAGE_SHIFT - blkbits); + bh.b_size = PAGE_SIZE; + + repeat: + page = find_get_page(mapping, vmf->pgoff); + if (page) { + if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) { + page_cache_release(page); + return VM_FAULT_RETRY; + } + if (unlikely(page->mapping != mapping)) { + unlock_page(page); + page_cache_release(page); + goto repeat; + } + size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT; + if (unlikely(vmf->pgoff >= size)) { + /* + * We have a struct page covering a hole in the file + * from a read fault and we've raced with a truncate + */ + error = -EIO; + goto unlock_page; + } + } + + error = get_block(inode, block, &bh, 0); + if (!error && (bh.b_size < PAGE_SIZE)) + error = -EIO; /* fs corruption? */ + if (error) + goto unlock_page; + + if (!buffer_mapped(&bh) && !buffer_unwritten(&bh) && !vmf->cow_page) { + if (vmf->flags & FAULT_FLAG_WRITE) { + error = get_block(inode, block, &bh, 1); + count_vm_event(PGMAJFAULT); + mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT); + major = VM_FAULT_MAJOR; + if (!error && (bh.b_size < PAGE_SIZE)) + error = -EIO; + if (error) + goto unlock_page; + } else { + return dax_load_hole(mapping, page, vmf); + } + } + + if (vmf->cow_page) { + struct page *new_page = vmf->cow_page; + if (buffer_written(&bh)) + error = copy_user_bh(new_page, &bh, blkbits, vaddr); + else + clear_user_highpage(new_page, vaddr); + if (error) + goto unlock_page; + vmf->page = page; + if (!page) { + i_mmap_lock_read(mapping); + /* Check we didn't race with truncate */ + size = (i_size_read(inode) + PAGE_SIZE - 1) >> + PAGE_SHIFT; + if (vmf->pgoff >= size) { + i_mmap_unlock_read(mapping); + error = -EIO; + goto out; + } + } + return VM_FAULT_LOCKED; + } + + /* Check we didn't race with a read fault installing a new page */ + if (!page && major) + page = find_lock_page(mapping, vmf->pgoff); + + if (page) { + unmap_mapping_range(mapping, vmf->pgoff << PAGE_SHIFT, + PAGE_CACHE_SIZE, 0); + delete_from_page_cache(page); + unlock_page(page); + page_cache_release(page); + } + + error = dax_insert_mapping(inode, &bh, vma, vmf); + + out: + if (error == -ENOMEM) + return VM_FAULT_OOM | major; + /* -EBUSY is fine, somebody else faulted on the same PTE */ + if ((error < 0) && (error != -EBUSY)) + return VM_FAULT_SIGBUS | major; + return VM_FAULT_NOPAGE | major; + + unlock_page: + if (page) { + unlock_page(page); + page_cache_release(page); + } + goto out; +} + +/** + * dax_fault - handle a page fault on a DAX file + * @vma: The virtual memory area where the fault occurred + * @vmf: The description of the fault + * @get_block: The filesystem method used to translate file offsets to blocks + * + * When a page fault occurs, filesystems may call this helper in their + * fault handler for DAX files. + */ +int dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf, + get_block_t get_block) +{ + int result; + struct super_block *sb = file_inode(vma->vm_file)->i_sb; + + if (vmf->flags & FAULT_FLAG_WRITE) { + sb_start_pagefault(sb); + file_update_time(vma->vm_file); + } + result = do_dax_fault(vma, vmf, get_block); + if (vmf->flags & FAULT_FLAG_WRITE) + sb_end_pagefault(sb); + + return result; +} +EXPORT_SYMBOL_GPL(dax_fault); diff --git a/fs/ext2/file.c b/fs/ext2/file.c index a247123fd798..a61c93fd9dce 100644 --- a/fs/ext2/file.c +++ b/fs/ext2/file.c @@ -25,6 +25,36 @@ #include "xattr.h" #include "acl.h" +#ifdef CONFIG_EXT2_FS_XIP +static int ext2_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +{ + return dax_fault(vma, vmf, ext2_get_block); +} + +static int ext2_dax_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) +{ + return dax_mkwrite(vma, vmf, ext2_get_block); +} + +static const struct vm_operations_struct ext2_dax_vm_ops = { + .fault = ext2_dax_fault, + .page_mkwrite = ext2_dax_mkwrite, +}; + +static int ext2_file_mmap(struct file *file, struct vm_area_struct *vma) +{ + if (!IS_DAX(file_inode(file))) + return generic_file_mmap(file, vma); + + file_accessed(file); + vma->vm_ops = &ext2_dax_vm_ops; + vma->vm_flags |= VM_MIXEDMAP; + return 0; +} +#else +#define ext2_file_mmap generic_file_mmap +#endif + /* * Called when filp is released. This happens when all file descriptors * for a single struct file are closed. Note that different open() calls @@ -70,7 +100,7 @@ const struct file_operations ext2_file_operations = { #ifdef CONFIG_COMPAT .compat_ioctl = ext2_compat_ioctl, #endif - .mmap = generic_file_mmap, + .mmap = ext2_file_mmap, .open = dquot_file_open, .release = ext2_release_file, .fsync = ext2_fsync, @@ -89,7 +119,7 @@ const struct file_operations ext2_xip_file_operations = { #ifdef CONFIG_COMPAT .compat_ioctl = ext2_compat_ioctl, #endif - .mmap = xip_file_mmap, + .mmap = ext2_file_mmap, .open = dquot_file_open, .release = ext2_release_file, .fsync = ext2_fsync, diff --git a/include/linux/fs.h b/include/linux/fs.h index 8084934a5676..6bad6d4c579b 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -51,6 +51,7 @@ struct swap_info_struct; struct seq_file; struct workqueue_struct; struct iov_iter; +struct vm_fault; extern void __init inode_init(void); extern void __init inode_init_early(void); @@ -2590,9 +2591,10 @@ extern int nonseekable_open(struct inode * inode, struct file * filp); ssize_t dax_do_io(int rw, struct kiocb *, struct inode *, struct iov_iter *, loff_t, get_block_t, dio_iodone_t, int flags); int dax_clear_blocks(struct inode *, sector_t block, long size); +int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t); +#define dax_mkwrite(vma, vmf, gb) dax_fault(vma, vmf, gb) #ifdef CONFIG_FS_XIP -extern int xip_file_mmap(struct file * file, struct vm_area_struct * vma); extern int xip_truncate_page(struct address_space *mapping, loff_t from); #else static inline int xip_truncate_page(struct address_space *mapping, loff_t from) diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c index 9c869f402c07..59fb387b2238 100644 --- a/mm/filemap_xip.c +++ b/mm/filemap_xip.c @@ -22,212 +22,6 @@ #include #include -/* - * We do use our own empty page to avoid interference with other users - * of ZERO_PAGE(), such as /dev/zero - */ -static DEFINE_MUTEX(xip_sparse_mutex); -static seqcount_t xip_sparse_seq = SEQCNT_ZERO(xip_sparse_seq); -static struct page *__xip_sparse_page; - -/* called under xip_sparse_mutex */ -static struct page *xip_sparse_page(void) -{ - if (!__xip_sparse_page) { - struct page *page = alloc_page(GFP_HIGHUSER | __GFP_ZERO); - - if (page) - __xip_sparse_page = page; - } - return __xip_sparse_page; -} - -/* - * __xip_unmap is invoked from xip_unmap and xip_write - * - * This function walks all vmas of the address_space and unmaps the - * __xip_sparse_page when found at pgoff. - */ -static void __xip_unmap(struct address_space * mapping, unsigned long pgoff) -{ - struct vm_area_struct *vma; - struct page *page; - unsigned count; - int locked = 0; - - count = read_seqcount_begin(&xip_sparse_seq); - - page = __xip_sparse_page; - if (!page) - return; - -retry: - i_mmap_lock_read(mapping); - vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { - pte_t *pte, pteval; - spinlock_t *ptl; - struct mm_struct *mm = vma->vm_mm; - unsigned long address = vma->vm_start + - ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); - - BUG_ON(address < vma->vm_start || address >= vma->vm_end); - pte = page_check_address(page, mm, address, &ptl, 1); - if (pte) { - /* Nuke the page table entry. */ - flush_cache_page(vma, address, pte_pfn(*pte)); - pteval = ptep_clear_flush(vma, address, pte); - page_remove_rmap(page); - dec_mm_counter(mm, MM_FILEPAGES); - BUG_ON(pte_dirty(pteval)); - pte_unmap_unlock(pte, ptl); - /* must invalidate_page _before_ freeing the page */ - mmu_notifier_invalidate_page(mm, address); - page_cache_release(page); - } - } - i_mmap_unlock_read(mapping); - - if (locked) { - mutex_unlock(&xip_sparse_mutex); - } else if (read_seqcount_retry(&xip_sparse_seq, count)) { - mutex_lock(&xip_sparse_mutex); - locked = 1; - goto retry; - } -} - -/* - * xip_fault() is invoked via the vma operations vector for a - * mapped memory region to read in file data during a page fault. - * - * This function is derived from filemap_fault, but used for execute in place - */ -static int xip_file_fault(struct vm_area_struct *vma, struct vm_fault *vmf) -{ - struct file *file = vma->vm_file; - struct address_space *mapping = file->f_mapping; - struct inode *inode = mapping->host; - pgoff_t size; - void *xip_mem; - unsigned long xip_pfn; - struct page *page; - int error; - - /* XXX: are VM_FAULT_ codes OK? */ -again: - size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; - if (vmf->pgoff >= size) - return VM_FAULT_SIGBUS; - - error = mapping->a_ops->get_xip_mem(mapping, vmf->pgoff, 0, - &xip_mem, &xip_pfn); - if (likely(!error)) - goto found; - if (error != -ENODATA) - return VM_FAULT_OOM; - - /* sparse block */ - if ((vma->vm_flags & (VM_WRITE | VM_MAYWRITE)) && - (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) && - (!(mapping->host->i_sb->s_flags & MS_RDONLY))) { - int err; - - /* maybe shared writable, allocate new block */ - mutex_lock(&xip_sparse_mutex); - error = mapping->a_ops->get_xip_mem(mapping, vmf->pgoff, 1, - &xip_mem, &xip_pfn); - mutex_unlock(&xip_sparse_mutex); - if (error) - return VM_FAULT_SIGBUS; - /* unmap sparse mappings at pgoff from all other vmas */ - __xip_unmap(mapping, vmf->pgoff); - -found: - /* - * We must recheck i_size under i_mmap_rwsem to prevent races - * with truncation - */ - i_mmap_lock_read(mapping); - size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> - PAGE_CACHE_SHIFT; - if (unlikely(vmf->pgoff >= size)) { - i_mmap_unlock_read(mapping); - return VM_FAULT_SIGBUS; - } - err = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, - xip_pfn); - i_mmap_unlock_read(mapping); - if (err == -ENOMEM) - return VM_FAULT_OOM; - /* - * err == -EBUSY is fine, we've raced against another thread - * that faulted-in the same page - */ - if (err != -EBUSY) - BUG_ON(err); - return VM_FAULT_NOPAGE; - } else { - int err, ret = VM_FAULT_OOM; - - mutex_lock(&xip_sparse_mutex); - write_seqcount_begin(&xip_sparse_seq); - error = mapping->a_ops->get_xip_mem(mapping, vmf->pgoff, 0, - &xip_mem, &xip_pfn); - if (unlikely(!error)) { - write_seqcount_end(&xip_sparse_seq); - mutex_unlock(&xip_sparse_mutex); - goto again; - } - if (error != -ENODATA) - goto out; - - /* - * We must recheck i_size under i_mmap_rwsem to prevent races - * with truncation - */ - i_mmap_lock_read(mapping); - size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> - PAGE_CACHE_SHIFT; - if (unlikely(vmf->pgoff >= size)) { - ret = VM_FAULT_SIGBUS; - goto unlock; - } - /* not shared and writable, use xip_sparse_page() */ - page = xip_sparse_page(); - if (!page) - goto unlock; - err = vm_insert_page(vma, (unsigned long)vmf->virtual_address, - page); - if (err == -ENOMEM) - goto unlock; - - ret = VM_FAULT_NOPAGE; -unlock: - i_mmap_unlock_read(mapping); -out: - write_seqcount_end(&xip_sparse_seq); - mutex_unlock(&xip_sparse_mutex); - - return ret; - } -} - -static const struct vm_operations_struct xip_file_vm_ops = { - .fault = xip_file_fault, - .page_mkwrite = filemap_page_mkwrite, -}; - -int xip_file_mmap(struct file * file, struct vm_area_struct * vma) -{ - BUG_ON(!file->f_mapping->a_ops->get_xip_mem); - - file_accessed(file); - vma->vm_ops = &xip_file_vm_ops; - vma->vm_flags |= VM_MIXEDMAP; - return 0; -} -EXPORT_SYMBOL_GPL(xip_file_mmap); - /* * truncate a page used for execute in place * functionality is analog to block_truncate_page but does use get_xip_mem -- cgit v1.2.3 From 4c0ccfef2e9f7418a6eb0bf07a2fc8f216365b18 Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Mon, 16 Feb 2015 15:59:06 -0800 Subject: dax,ext2: replace xip_truncate_page with dax_truncate_page It takes a get_block parameter just like nobh_truncate_page() and block_truncate_page() Signed-off-by: Matthew Wilcox Reviewed-by: Mathieu Desnoyers Cc: Andreas Dilger Cc: Boaz Harrosh Cc: Christoph Hellwig Cc: Dave Chinner Cc: Jan Kara Cc: Jens Axboe Cc: Kirill A. Shutemov Cc: Randy Dunlap Cc: Ross Zwisler Cc: Theodore Ts'o Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/dax.c | 44 ++++++++++++++++++++++++++++++++++++++++++++ fs/ext2/inode.c | 2 +- include/linux/fs.h | 10 +--------- mm/filemap_xip.c | 40 ---------------------------------------- 4 files changed, 46 insertions(+), 50 deletions(-) (limited to 'fs') diff --git a/fs/dax.c b/fs/dax.c index 553e55b93495..ebf9b2231b0f 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -462,3 +462,47 @@ int dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf, return result; } EXPORT_SYMBOL_GPL(dax_fault); + +/** + * dax_truncate_page - handle a partial page being truncated in a DAX file + * @inode: The file being truncated + * @from: The file offset that is being truncated to + * @get_block: The filesystem method used to translate file offsets to blocks + * + * Similar to block_truncate_page(), this function can be called by a + * filesystem when it is truncating an DAX file to handle the partial page. + * + * We work in terms of PAGE_CACHE_SIZE here for commonality with + * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem + * took care of disposing of the unnecessary blocks. Even if the filesystem + * block size is smaller than PAGE_SIZE, we have to zero the rest of the page + * since the file might be mmaped. + */ +int dax_truncate_page(struct inode *inode, loff_t from, get_block_t get_block) +{ + struct buffer_head bh; + pgoff_t index = from >> PAGE_CACHE_SHIFT; + unsigned offset = from & (PAGE_CACHE_SIZE-1); + unsigned length = PAGE_CACHE_ALIGN(from) - from; + int err; + + /* Block boundary? Nothing to do */ + if (!length) + return 0; + + memset(&bh, 0, sizeof(bh)); + bh.b_size = PAGE_CACHE_SIZE; + err = get_block(inode, index, &bh, 0); + if (err < 0) + return err; + if (buffer_written(&bh)) { + void *addr; + err = dax_get_addr(&bh, &addr, inode->i_blkbits); + if (err < 0) + return err; + memset(addr + offset, 0, length); + } + + return 0; +} +EXPORT_SYMBOL_GPL(dax_truncate_page); diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c index 52978b853226..5ac0a3461b3c 100644 --- a/fs/ext2/inode.c +++ b/fs/ext2/inode.c @@ -1210,7 +1210,7 @@ static int ext2_setsize(struct inode *inode, loff_t newsize) inode_dio_wait(inode); if (IS_DAX(inode)) - error = xip_truncate_page(inode->i_mapping, newsize); + error = dax_truncate_page(inode, newsize, ext2_get_block); else if (test_opt(inode->i_sb, NOBH)) error = nobh_truncate_page(inode->i_mapping, newsize, ext2_get_block); diff --git a/include/linux/fs.h b/include/linux/fs.h index 6bad6d4c579b..2c8f9055af38 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2591,18 +2591,10 @@ extern int nonseekable_open(struct inode * inode, struct file * filp); ssize_t dax_do_io(int rw, struct kiocb *, struct inode *, struct iov_iter *, loff_t, get_block_t, dio_iodone_t, int flags); int dax_clear_blocks(struct inode *, sector_t block, long size); +int dax_truncate_page(struct inode *, loff_t from, get_block_t); int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t); #define dax_mkwrite(vma, vmf, gb) dax_fault(vma, vmf, gb) -#ifdef CONFIG_FS_XIP -extern int xip_truncate_page(struct address_space *mapping, loff_t from); -#else -static inline int xip_truncate_page(struct address_space *mapping, loff_t from) -{ - return 0; -} -#endif - #ifdef CONFIG_BLOCK typedef void (dio_submit_t)(int rw, struct bio *bio, struct inode *inode, loff_t file_offset); diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c index 59fb387b2238..8e3f99b61959 100644 --- a/mm/filemap_xip.c +++ b/mm/filemap_xip.c @@ -22,43 +22,3 @@ #include #include -/* - * truncate a page used for execute in place - * functionality is analog to block_truncate_page but does use get_xip_mem - * to get the page instead of page cache - */ -int -xip_truncate_page(struct address_space *mapping, loff_t from) -{ - pgoff_t index = from >> PAGE_CACHE_SHIFT; - unsigned offset = from & (PAGE_CACHE_SIZE-1); - unsigned blocksize; - unsigned length; - void *xip_mem; - unsigned long xip_pfn; - int err; - - BUG_ON(!mapping->a_ops->get_xip_mem); - - blocksize = 1 << mapping->host->i_blkbits; - length = offset & (blocksize - 1); - - /* Block boundary? Nothing to do */ - if (!length) - return 0; - - length = blocksize - length; - - err = mapping->a_ops->get_xip_mem(mapping, index, 0, - &xip_mem, &xip_pfn); - if (unlikely(err)) { - if (err == -ENODATA) - /* Hole? No need to truncate */ - return 0; - else - return err; - } - memset(xip_mem + offset, 0, length); - return 0; -} -EXPORT_SYMBOL_GPL(xip_truncate_page); -- cgit v1.2.3 From e748dcd095ddee50e7a7deda2e26247715318a2e Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Mon, 16 Feb 2015 15:59:12 -0800 Subject: vfs: remove get_xip_mem All callers of get_xip_mem() are now gone. Remove checks for it, initialisers of it, documentation of it and the only implementation of it. Also remove mm/filemap_xip.c as it is now empty. Also remove documentation of the long-gone get_xip_page(). Signed-off-by: Matthew Wilcox Cc: Andreas Dilger Cc: Boaz Harrosh Cc: Christoph Hellwig Cc: Dave Chinner Cc: Jan Kara Cc: Jens Axboe Cc: Kirill A. Shutemov Cc: Mathieu Desnoyers Cc: Randy Dunlap Cc: Ross Zwisler Cc: Theodore Ts'o Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/filesystems/Locking | 3 --- Documentation/filesystems/vfs.txt | 7 ------ fs/exofs/inode.c | 1 - fs/ext2/inode.c | 1 - fs/ext2/xip.c | 45 --------------------------------------- fs/ext2/xip.h | 3 --- fs/open.c | 5 +---- include/linux/fs.h | 2 -- include/linux/rmap.h | 2 +- mm/Makefile | 1 - mm/fadvise.c | 6 ++++-- mm/filemap_xip.c | 24 --------------------- mm/madvise.c | 2 +- 13 files changed, 7 insertions(+), 95 deletions(-) delete mode 100644 mm/filemap_xip.c (limited to 'fs') diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking index b30753cbf431..2ca3d17eee56 100644 --- a/Documentation/filesystems/Locking +++ b/Documentation/filesystems/Locking @@ -199,8 +199,6 @@ prototypes: int (*releasepage) (struct page *, int); void (*freepage)(struct page *); int (*direct_IO)(int, struct kiocb *, struct iov_iter *iter, loff_t offset); - int (*get_xip_mem)(struct address_space *, pgoff_t, int, void **, - unsigned long *); int (*migratepage)(struct address_space *, struct page *, struct page *); int (*launder_page)(struct page *); int (*is_partially_uptodate)(struct page *, unsigned long, unsigned long); @@ -225,7 +223,6 @@ invalidatepage: yes releasepage: yes freepage: yes direct_IO: -get_xip_mem: maybe migratepage: yes (both) launder_page: yes is_partially_uptodate: yes diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt index 43ce0507ee25..966b22829f3b 100644 --- a/Documentation/filesystems/vfs.txt +++ b/Documentation/filesystems/vfs.txt @@ -591,8 +591,6 @@ struct address_space_operations { int (*releasepage) (struct page *, int); void (*freepage)(struct page *); ssize_t (*direct_IO)(int, struct kiocb *, struct iov_iter *iter, loff_t offset); - struct page* (*get_xip_page)(struct address_space *, sector_t, - int); /* migrate the contents of a page to the specified target */ int (*migratepage) (struct page *, struct page *); int (*launder_page) (struct page *); @@ -748,11 +746,6 @@ struct address_space_operations { and transfer data directly between the storage and the application's address space. - get_xip_page: called by the VM to translate a block number to a page. - The page is valid until the corresponding filesystem is unmounted. - Filesystems that want to use execute-in-place (XIP) need to implement - it. An example implementation can be found in fs/ext2/xip.c. - migrate_page: This is used to compact the physical memory usage. If the VM wants to relocate a page (maybe off a memory card that is signalling imminent failure) it will pass a new page diff --git a/fs/exofs/inode.c b/fs/exofs/inode.c index 6fc91df99ff8..a198e94813fe 100644 --- a/fs/exofs/inode.c +++ b/fs/exofs/inode.c @@ -985,7 +985,6 @@ const struct address_space_operations exofs_aops = { .direct_IO = exofs_direct_IO, /* With these NULL has special meaning or default is not exported */ - .get_xip_mem = NULL, .migratepage = NULL, .launder_page = NULL, .is_partially_uptodate = NULL, diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c index 5ac0a3461b3c..59d6c7d43740 100644 --- a/fs/ext2/inode.c +++ b/fs/ext2/inode.c @@ -894,7 +894,6 @@ const struct address_space_operations ext2_aops = { const struct address_space_operations ext2_aops_xip = { .bmap = ext2_bmap, - .get_xip_mem = ext2_get_xip_mem, .direct_IO = ext2_direct_IO, }; diff --git a/fs/ext2/xip.c b/fs/ext2/xip.c index 8cfca3a4cd58..132d4daf98c4 100644 --- a/fs/ext2/xip.c +++ b/fs/ext2/xip.c @@ -13,35 +13,6 @@ #include "ext2.h" #include "xip.h" -static inline long __inode_direct_access(struct inode *inode, sector_t block, - void **kaddr, unsigned long *pfn, long size) -{ - struct block_device *bdev = inode->i_sb->s_bdev; - sector_t sector = block * (PAGE_SIZE / 512); - return bdev_direct_access(bdev, sector, kaddr, pfn, size); -} - -static inline int -__ext2_get_block(struct inode *inode, pgoff_t pgoff, int create, - sector_t *result) -{ - struct buffer_head tmp; - int rc; - - memset(&tmp, 0, sizeof(struct buffer_head)); - tmp.b_size = 1 << inode->i_blkbits; - rc = ext2_get_block(inode, pgoff, &tmp, create); - *result = tmp.b_blocknr; - - /* did we get a sparse block (hole in the file)? */ - if (!tmp.b_blocknr && !rc) { - BUG_ON(create); - rc = -ENODATA; - } - - return rc; -} - void ext2_xip_verify_sb(struct super_block *sb) { struct ext2_sb_info *sbi = EXT2_SB(sb); @@ -54,19 +25,3 @@ void ext2_xip_verify_sb(struct super_block *sb) "not supported by bdev"); } } - -int ext2_get_xip_mem(struct address_space *mapping, pgoff_t pgoff, int create, - void **kmem, unsigned long *pfn) -{ - long rc; - sector_t block; - - /* first, retrieve the sector number */ - rc = __ext2_get_block(mapping->host, pgoff, create, &block); - if (rc) - return rc; - - /* retrieve address of the target data */ - rc = __inode_direct_access(mapping->host, block, kmem, pfn, PAGE_SIZE); - return (rc < 0) ? rc : 0; -} diff --git a/fs/ext2/xip.h b/fs/ext2/xip.h index b2592f2f3c9d..e7b9f0a2cc54 100644 --- a/fs/ext2/xip.h +++ b/fs/ext2/xip.h @@ -12,10 +12,7 @@ static inline int ext2_use_xip (struct super_block *sb) struct ext2_sb_info *sbi = EXT2_SB(sb); return (sbi->s_mount_opt & EXT2_MOUNT_XIP); } -int ext2_get_xip_mem(struct address_space *, pgoff_t, int, - void **, unsigned long *); #else #define ext2_xip_verify_sb(sb) do { } while (0) #define ext2_use_xip(sb) 0 -#define ext2_get_xip_mem NULL #endif diff --git a/fs/open.c b/fs/open.c index 813be037b412..a293c2020676 100644 --- a/fs/open.c +++ b/fs/open.c @@ -667,11 +667,8 @@ int open_check_o_direct(struct file *f) { /* NB: we're sure to have correct a_ops only after f_op->open */ if (f->f_flags & O_DIRECT) { - if (!f->f_mapping->a_ops || - ((!f->f_mapping->a_ops->direct_IO) && - (!f->f_mapping->a_ops->get_xip_mem))) { + if (!f->f_mapping->a_ops || !f->f_mapping->a_ops->direct_IO) return -EINVAL; - } } return 0; } diff --git a/include/linux/fs.h b/include/linux/fs.h index 2c8f9055af38..9772d655f444 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -362,8 +362,6 @@ struct address_space_operations { int (*releasepage) (struct page *, gfp_t); void (*freepage)(struct page *); ssize_t (*direct_IO)(int, struct kiocb *, struct iov_iter *iter, loff_t offset); - int (*get_xip_mem)(struct address_space *, pgoff_t, int, - void **, unsigned long *); /* * migrate the contents of a page to the specified target. If * migrate_mode is MIGRATE_ASYNC, it must not block. diff --git a/include/linux/rmap.h b/include/linux/rmap.h index b38f559130d5..c4c559a45dc8 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -198,7 +198,7 @@ int page_referenced(struct page *, int is_locked, int try_to_unmap(struct page *, enum ttu_flags flags); /* - * Called from mm/filemap_xip.c to unmap empty zero page + * Used by uprobes to replace a userspace page safely */ pte_t *__page_check_address(struct page *, struct mm_struct *, unsigned long, spinlock_t **, int); diff --git a/mm/Makefile b/mm/Makefile index 088c68e9ec35..3c1caa2693bd 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -55,7 +55,6 @@ obj-$(CONFIG_KMEMCHECK) += kmemcheck.o obj-$(CONFIG_KASAN) += kasan/ obj-$(CONFIG_FAILSLAB) += failslab.o obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o -obj-$(CONFIG_FS_XIP) += filemap_xip.o obj-$(CONFIG_MIGRATION) += migrate.o obj-$(CONFIG_QUICKLIST) += quicklist.o obj-$(CONFIG_TRANSPARENT_HUGEPAGE) += huge_memory.o diff --git a/mm/fadvise.c b/mm/fadvise.c index fac23ecf8d72..4a3907cf79f8 100644 --- a/mm/fadvise.c +++ b/mm/fadvise.c @@ -28,6 +28,7 @@ SYSCALL_DEFINE4(fadvise64_64, int, fd, loff_t, offset, loff_t, len, int, advice) { struct fd f = fdget(fd); + struct inode *inode; struct address_space *mapping; struct backing_dev_info *bdi; loff_t endbyte; /* inclusive */ @@ -39,7 +40,8 @@ SYSCALL_DEFINE4(fadvise64_64, int, fd, loff_t, offset, loff_t, len, int, advice) if (!f.file) return -EBADF; - if (S_ISFIFO(file_inode(f.file)->i_mode)) { + inode = file_inode(f.file); + if (S_ISFIFO(inode->i_mode)) { ret = -ESPIPE; goto out; } @@ -50,7 +52,7 @@ SYSCALL_DEFINE4(fadvise64_64, int, fd, loff_t, offset, loff_t, len, int, advice) goto out; } - if (mapping->a_ops->get_xip_mem) { + if (IS_DAX(inode)) { switch (advice) { case POSIX_FADV_NORMAL: case POSIX_FADV_RANDOM: diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c deleted file mode 100644 index 8e3f99b61959..000000000000 --- a/mm/filemap_xip.c +++ /dev/null @@ -1,24 +0,0 @@ -/* - * linux/mm/filemap_xip.c - * - * Copyright (C) 2005 IBM Corporation - * Author: Carsten Otte - * - * derived from linux/mm/filemap.c - Copyright (C) Linus Torvalds - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - diff --git a/mm/madvise.c b/mm/madvise.c index 1077cbdc8b52..d551475517bf 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -239,7 +239,7 @@ static long madvise_willneed(struct vm_area_struct *vma, return -EBADF; #endif - if (file->f_mapping->a_ops->get_xip_mem) { + if (IS_DAX(file_inode(file))) { /* no bad return value, but ignore advice */ return 0; } -- cgit v1.2.3 From 0de4830fd49f60d04ba37e8b32f95100f3953c39 Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Mon, 16 Feb 2015 15:59:15 -0800 Subject: ext2: remove ext2_xip_verify_sb() Jan Kara pointed out that calling ext2_xip_verify_sb() in ext2_remount() doesn't make sense, since changing the XIP option on remount isn't allowed. It also doesn't make sense to re-check whether blocksize is supported since it can't change between mounts. Replace the call to ext2_xip_verify_sb() in ext2_fill_super() with the equivalent check and delete the definition. Signed-off-by: Matthew Wilcox Cc: Andreas Dilger Cc: Boaz Harrosh Cc: Christoph Hellwig Cc: Dave Chinner Cc: Jan Kara Cc: Jens Axboe Cc: Kirill A. Shutemov Cc: Mathieu Desnoyers Cc: Randy Dunlap Cc: Ross Zwisler Cc: Theodore Ts'o Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/ext2/super.c | 33 ++++++++++++--------------------- fs/ext2/xip.c | 12 ------------ fs/ext2/xip.h | 2 -- 3 files changed, 12 insertions(+), 35 deletions(-) (limited to 'fs') diff --git a/fs/ext2/super.c b/fs/ext2/super.c index ae55fddc26a9..83c5d51b8ede 100644 --- a/fs/ext2/super.c +++ b/fs/ext2/super.c @@ -877,9 +877,6 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent) ((EXT2_SB(sb)->s_mount_opt & EXT2_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0); - ext2_xip_verify_sb(sb); /* see if bdev supports xip, unset - EXT2_MOUNT_XIP if not */ - if (le32_to_cpu(es->s_rev_level) == EXT2_GOOD_OLD_REV && (EXT2_HAS_COMPAT_FEATURE(sb, ~0U) || EXT2_HAS_RO_COMPAT_FEATURE(sb, ~0U) || @@ -909,11 +906,17 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent) blocksize = BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size); - if (ext2_use_xip(sb) && blocksize != PAGE_SIZE) { - if (!silent) + if (sbi->s_mount_opt & EXT2_MOUNT_XIP) { + if (blocksize != PAGE_SIZE) { ext2_msg(sb, KERN_ERR, - "error: unsupported blocksize for xip"); - goto failed_mount; + "error: unsupported blocksize for xip"); + goto failed_mount; + } + if (!sb->s_bdev->bd_disk->fops->direct_access) { + ext2_msg(sb, KERN_ERR, + "error: device does not support xip"); + goto failed_mount; + } } /* If the blocksize doesn't match, re-read the thing.. */ @@ -1259,7 +1262,6 @@ static int ext2_remount (struct super_block * sb, int * flags, char * data) { struct ext2_sb_info * sbi = EXT2_SB(sb); struct ext2_super_block * es; - unsigned long old_mount_opt = sbi->s_mount_opt; struct ext2_mount_options old_opts; unsigned long old_sb_flags; int err; @@ -1284,22 +1286,11 @@ static int ext2_remount (struct super_block * sb, int * flags, char * data) sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | ((sbi->s_mount_opt & EXT2_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0); - ext2_xip_verify_sb(sb); /* see if bdev supports xip, unset - EXT2_MOUNT_XIP if not */ - - if ((ext2_use_xip(sb)) && (sb->s_blocksize != PAGE_SIZE)) { - ext2_msg(sb, KERN_WARNING, - "warning: unsupported blocksize for xip"); - err = -EINVAL; - goto restore_opts; - } - es = sbi->s_es; - if ((sbi->s_mount_opt ^ old_mount_opt) & EXT2_MOUNT_XIP) { + if ((sbi->s_mount_opt ^ old_opts.s_mount_opt) & EXT2_MOUNT_XIP) { ext2_msg(sb, KERN_WARNING, "warning: refusing change of " "xip flag with busy inodes while remounting"); - sbi->s_mount_opt &= ~EXT2_MOUNT_XIP; - sbi->s_mount_opt |= old_mount_opt & EXT2_MOUNT_XIP; + sbi->s_mount_opt ^= EXT2_MOUNT_XIP; } if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY)) { spin_unlock(&sbi->s_lock); diff --git a/fs/ext2/xip.c b/fs/ext2/xip.c index 132d4daf98c4..66ca1133827e 100644 --- a/fs/ext2/xip.c +++ b/fs/ext2/xip.c @@ -13,15 +13,3 @@ #include "ext2.h" #include "xip.h" -void ext2_xip_verify_sb(struct super_block *sb) -{ - struct ext2_sb_info *sbi = EXT2_SB(sb); - - if ((sbi->s_mount_opt & EXT2_MOUNT_XIP) && - !sb->s_bdev->bd_disk->fops->direct_access) { - sbi->s_mount_opt &= (~EXT2_MOUNT_XIP); - ext2_msg(sb, KERN_WARNING, - "warning: ignoring xip option - " - "not supported by bdev"); - } -} diff --git a/fs/ext2/xip.h b/fs/ext2/xip.h index e7b9f0a2cc54..87eeb0460e8c 100644 --- a/fs/ext2/xip.h +++ b/fs/ext2/xip.h @@ -6,13 +6,11 @@ */ #ifdef CONFIG_EXT2_FS_XIP -extern void ext2_xip_verify_sb (struct super_block *); static inline int ext2_use_xip (struct super_block *sb) { struct ext2_sb_info *sbi = EXT2_SB(sb); return (sbi->s_mount_opt & EXT2_MOUNT_XIP); } #else -#define ext2_xip_verify_sb(sb) do { } while (0) #define ext2_use_xip(sb) 0 #endif -- cgit v1.2.3 From ed87e9202035c8564472f5d84e7d5e10f1014029 Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Mon, 16 Feb 2015 15:59:18 -0800 Subject: ext2: remove ext2_use_xip Replace ext2_use_xip() with test_opt(XIP) which expands to the same code Signed-off-by: Matthew Wilcox Reviewed-by: Mathieu Desnoyers Cc: Andreas Dilger Cc: Boaz Harrosh Cc: Christoph Hellwig Cc: Dave Chinner Cc: Jan Kara Cc: Jens Axboe Cc: Kirill A. Shutemov Cc: Randy Dunlap Cc: Ross Zwisler Cc: Theodore Ts'o Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/ext2/ext2.h | 4 ++++ fs/ext2/inode.c | 2 +- fs/ext2/namei.c | 4 ++-- 3 files changed, 7 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/ext2/ext2.h b/fs/ext2/ext2.h index e4279ead4a05..30604c4d70e6 100644 --- a/fs/ext2/ext2.h +++ b/fs/ext2/ext2.h @@ -380,7 +380,11 @@ struct ext2_inode { #define EXT2_MOUNT_NO_UID32 0x000200 /* Disable 32-bit UIDs */ #define EXT2_MOUNT_XATTR_USER 0x004000 /* Extended user attributes */ #define EXT2_MOUNT_POSIX_ACL 0x008000 /* POSIX Access Control Lists */ +#ifdef CONFIG_FS_XIP #define EXT2_MOUNT_XIP 0x010000 /* Execute in place */ +#else +#define EXT2_MOUNT_XIP 0 +#endif #define EXT2_MOUNT_USRQUOTA 0x020000 /* user quota */ #define EXT2_MOUNT_GRPQUOTA 0x040000 /* group quota */ #define EXT2_MOUNT_RESERVATION 0x080000 /* Preallocation */ diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c index 59d6c7d43740..cba38331a124 100644 --- a/fs/ext2/inode.c +++ b/fs/ext2/inode.c @@ -1394,7 +1394,7 @@ struct inode *ext2_iget (struct super_block *sb, unsigned long ino) if (S_ISREG(inode->i_mode)) { inode->i_op = &ext2_file_inode_operations; - if (ext2_use_xip(inode->i_sb)) { + if (test_opt(inode->i_sb, XIP)) { inode->i_mapping->a_ops = &ext2_aops_xip; inode->i_fop = &ext2_xip_file_operations; } else if (test_opt(inode->i_sb, NOBH)) { diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c index c268d0af1db9..846c356af7ca 100644 --- a/fs/ext2/namei.c +++ b/fs/ext2/namei.c @@ -105,7 +105,7 @@ static int ext2_create (struct inode * dir, struct dentry * dentry, umode_t mode return PTR_ERR(inode); inode->i_op = &ext2_file_inode_operations; - if (ext2_use_xip(inode->i_sb)) { + if (test_opt(inode->i_sb, XIP)) { inode->i_mapping->a_ops = &ext2_aops_xip; inode->i_fop = &ext2_xip_file_operations; } else if (test_opt(inode->i_sb, NOBH)) { @@ -126,7 +126,7 @@ static int ext2_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode) return PTR_ERR(inode); inode->i_op = &ext2_file_inode_operations; - if (ext2_use_xip(inode->i_sb)) { + if (test_opt(inode->i_sb, XIP)) { inode->i_mapping->a_ops = &ext2_aops_xip; inode->i_fop = &ext2_xip_file_operations; } else if (test_opt(inode->i_sb, NOBH)) { -- cgit v1.2.3 From 07642381d5ad800d021bb80842bfebb100b12fc2 Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Mon, 16 Feb 2015 15:59:22 -0800 Subject: ext2: remove xip.c and xip.h These files are now empty, so delete them Signed-off-by: Matthew Wilcox Reviewed-by: Mathieu Desnoyers Cc: Andreas Dilger Cc: Boaz Harrosh Cc: Christoph Hellwig Cc: Dave Chinner Cc: Jan Kara Cc: Jens Axboe Cc: Kirill A. Shutemov Cc: Randy Dunlap Cc: Ross Zwisler Cc: Theodore Ts'o Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/ext2/Makefile | 1 - fs/ext2/inode.c | 1 - fs/ext2/namei.c | 1 - fs/ext2/super.c | 1 - fs/ext2/xip.c | 15 --------------- fs/ext2/xip.h | 16 ---------------- 6 files changed, 35 deletions(-) delete mode 100644 fs/ext2/xip.c delete mode 100644 fs/ext2/xip.h (limited to 'fs') diff --git a/fs/ext2/Makefile b/fs/ext2/Makefile index f42af45cfd88..445b0e996a12 100644 --- a/fs/ext2/Makefile +++ b/fs/ext2/Makefile @@ -10,4 +10,3 @@ ext2-y := balloc.o dir.o file.o ialloc.o inode.o \ ext2-$(CONFIG_EXT2_FS_XATTR) += xattr.o xattr_user.o xattr_trusted.o ext2-$(CONFIG_EXT2_FS_POSIX_ACL) += acl.o ext2-$(CONFIG_EXT2_FS_SECURITY) += xattr_security.o -ext2-$(CONFIG_EXT2_FS_XIP) += xip.o diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c index cba38331a124..154cbcf30f35 100644 --- a/fs/ext2/inode.c +++ b/fs/ext2/inode.c @@ -34,7 +34,6 @@ #include #include "ext2.h" #include "acl.h" -#include "xip.h" #include "xattr.h" static int __ext2_write_inode(struct inode *inode, int do_sync); diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c index 846c356af7ca..7ca803f408b0 100644 --- a/fs/ext2/namei.c +++ b/fs/ext2/namei.c @@ -35,7 +35,6 @@ #include "ext2.h" #include "xattr.h" #include "acl.h" -#include "xip.h" static inline int ext2_add_nondir(struct dentry *dentry, struct inode *inode) { diff --git a/fs/ext2/super.c b/fs/ext2/super.c index 83c5d51b8ede..50342583db1f 100644 --- a/fs/ext2/super.c +++ b/fs/ext2/super.c @@ -35,7 +35,6 @@ #include "ext2.h" #include "xattr.h" #include "acl.h" -#include "xip.h" static void ext2_sync_super(struct super_block *sb, struct ext2_super_block *es, int wait); diff --git a/fs/ext2/xip.c b/fs/ext2/xip.c deleted file mode 100644 index 66ca1133827e..000000000000 --- a/fs/ext2/xip.c +++ /dev/null @@ -1,15 +0,0 @@ -/* - * linux/fs/ext2/xip.c - * - * Copyright (C) 2005 IBM Corporation - * Author: Carsten Otte (cotte@de.ibm.com) - */ - -#include -#include -#include -#include -#include -#include "ext2.h" -#include "xip.h" - diff --git a/fs/ext2/xip.h b/fs/ext2/xip.h deleted file mode 100644 index 87eeb0460e8c..000000000000 --- a/fs/ext2/xip.h +++ /dev/null @@ -1,16 +0,0 @@ -/* - * linux/fs/ext2/xip.h - * - * Copyright (C) 2005 IBM Corporation - * Author: Carsten Otte (cotte@de.ibm.com) - */ - -#ifdef CONFIG_EXT2_FS_XIP -static inline int ext2_use_xip (struct super_block *sb) -{ - struct ext2_sb_info *sbi = EXT2_SB(sb); - return (sbi->s_mount_opt & EXT2_MOUNT_XIP); -} -#else -#define ext2_use_xip(sb) 0 -#endif -- cgit v1.2.3 From 6cd176a51e52e5218b1aa97e1ec916bac25a9b7e Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Mon, 16 Feb 2015 15:59:25 -0800 Subject: vfs,ext2: remove CONFIG_EXT2_FS_XIP and rename CONFIG_FS_XIP to CONFIG_FS_DAX The fewer Kconfig options we have the better. Use the generic CONFIG_FS_DAX to enable XIP support in ext2 as well as in the core. Signed-off-by: Matthew Wilcox Cc: Andreas Dilger Cc: Boaz Harrosh Cc: Christoph Hellwig Cc: Dave Chinner Cc: Jan Kara Cc: Jens Axboe Cc: Kirill A. Shutemov Cc: Mathieu Desnoyers Cc: Randy Dunlap Cc: Ross Zwisler Cc: Theodore Ts'o Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/Kconfig | 21 ++++++++++++++------- fs/Makefile | 2 +- fs/ext2/Kconfig | 11 ----------- fs/ext2/ext2.h | 2 +- fs/ext2/file.c | 4 ++-- fs/ext2/super.c | 4 ++-- include/linux/fs.h | 2 +- scripts/diffconfig | 1 - 8 files changed, 21 insertions(+), 26 deletions(-) (limited to 'fs') diff --git a/fs/Kconfig b/fs/Kconfig index a6bb530b1ec5..5331497d5b25 100644 --- a/fs/Kconfig +++ b/fs/Kconfig @@ -13,13 +13,6 @@ if BLOCK source "fs/ext2/Kconfig" source "fs/ext3/Kconfig" source "fs/ext4/Kconfig" - -config FS_XIP -# execute in place - bool - depends on EXT2_FS_XIP - default y - source "fs/jbd/Kconfig" source "fs/jbd2/Kconfig" @@ -40,6 +33,20 @@ source "fs/ocfs2/Kconfig" source "fs/btrfs/Kconfig" source "fs/nilfs2/Kconfig" +config FS_DAX + bool "Direct Access (DAX) support" + depends on MMU + help + Direct Access (DAX) can be used on memory-backed block devices. + If the block device supports DAX and the filesystem supports DAX, + then you can avoid using the pagecache to buffer I/Os. Turning + on this option will compile in support for DAX; you will need to + mount the filesystem using the -o dax option. + + If you do not have a block device that is capable of using this, + or if unsure, say N. Saying Y will increase the size of the kernel + by about 5kB. + endif # BLOCK # Posix ACL utility routines diff --git a/fs/Makefile b/fs/Makefile index 0534444e257c..0f4635f7c49c 100644 --- a/fs/Makefile +++ b/fs/Makefile @@ -28,7 +28,7 @@ obj-$(CONFIG_SIGNALFD) += signalfd.o obj-$(CONFIG_TIMERFD) += timerfd.o obj-$(CONFIG_EVENTFD) += eventfd.o obj-$(CONFIG_AIO) += aio.o -obj-$(CONFIG_FS_XIP) += dax.o +obj-$(CONFIG_FS_DAX) += dax.o obj-$(CONFIG_FILE_LOCKING) += locks.o obj-$(CONFIG_COMPAT) += compat.o compat_ioctl.o obj-$(CONFIG_BINFMT_AOUT) += binfmt_aout.o diff --git a/fs/ext2/Kconfig b/fs/ext2/Kconfig index 14a6780fd034..c634874e12d9 100644 --- a/fs/ext2/Kconfig +++ b/fs/ext2/Kconfig @@ -42,14 +42,3 @@ config EXT2_FS_SECURITY If you are not using a security module that requires using extended attributes for file security labels, say N. - -config EXT2_FS_XIP - bool "Ext2 execute in place support" - depends on EXT2_FS && MMU - help - Execute in place can be used on memory-backed block devices. If you - enable this option, you can select to mount block devices which are - capable of this feature without using the page cache. - - If you do not use a block device that is capable of using this, - or if unsure, say N. diff --git a/fs/ext2/ext2.h b/fs/ext2/ext2.h index 30604c4d70e6..6854038c09ae 100644 --- a/fs/ext2/ext2.h +++ b/fs/ext2/ext2.h @@ -380,7 +380,7 @@ struct ext2_inode { #define EXT2_MOUNT_NO_UID32 0x000200 /* Disable 32-bit UIDs */ #define EXT2_MOUNT_XATTR_USER 0x004000 /* Extended user attributes */ #define EXT2_MOUNT_POSIX_ACL 0x008000 /* POSIX Access Control Lists */ -#ifdef CONFIG_FS_XIP +#ifdef CONFIG_FS_DAX #define EXT2_MOUNT_XIP 0x010000 /* Execute in place */ #else #define EXT2_MOUNT_XIP 0 diff --git a/fs/ext2/file.c b/fs/ext2/file.c index a61c93fd9dce..de8174d1e973 100644 --- a/fs/ext2/file.c +++ b/fs/ext2/file.c @@ -25,7 +25,7 @@ #include "xattr.h" #include "acl.h" -#ifdef CONFIG_EXT2_FS_XIP +#ifdef CONFIG_FS_DAX static int ext2_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { return dax_fault(vma, vmf, ext2_get_block); @@ -108,7 +108,7 @@ const struct file_operations ext2_file_operations = { .splice_write = iter_file_splice_write, }; -#ifdef CONFIG_EXT2_FS_XIP +#ifdef CONFIG_FS_DAX const struct file_operations ext2_xip_file_operations = { .llseek = generic_file_llseek, .read = new_sync_read, diff --git a/fs/ext2/super.c b/fs/ext2/super.c index 50342583db1f..5f029d8c3a02 100644 --- a/fs/ext2/super.c +++ b/fs/ext2/super.c @@ -291,7 +291,7 @@ static int ext2_show_options(struct seq_file *seq, struct dentry *root) seq_puts(seq, ",grpquota"); #endif -#if defined(CONFIG_EXT2_FS_XIP) +#ifdef CONFIG_FS_DAX if (sbi->s_mount_opt & EXT2_MOUNT_XIP) seq_puts(seq, ",xip"); #endif @@ -558,7 +558,7 @@ static int parse_options(char *options, struct super_block *sb) break; #endif case Opt_xip: -#ifdef CONFIG_EXT2_FS_XIP +#ifdef CONFIG_FS_DAX set_opt (sbi->s_mount_opt, XIP); #else ext2_msg(sb, KERN_INFO, "xip option not supported"); diff --git a/include/linux/fs.h b/include/linux/fs.h index 9772d655f444..d46f8fe6a0ea 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1676,7 +1676,7 @@ struct super_operations { #define S_IMA 1024 /* Inode has an associated IMA struct */ #define S_AUTOMOUNT 2048 /* Automount/referral quasi-directory */ #define S_NOSEC 4096 /* no suid or xattr security attributes */ -#ifdef CONFIG_FS_XIP +#ifdef CONFIG_FS_DAX #define S_DAX 8192 /* Direct Access, avoiding the page cache */ #else #define S_DAX 0 /* Make all the DAX code disappear */ diff --git a/scripts/diffconfig b/scripts/diffconfig index 6d672836e187..0db267d0adc9 100755 --- a/scripts/diffconfig +++ b/scripts/diffconfig @@ -28,7 +28,6 @@ If no config files are specified, .config and .config.old are used. Example usage: $ diffconfig .config config-with-some-changes -EXT2_FS_XATTR n --EXT2_FS_XIP n CRAMFS n -> y EXT2_FS y -> n LOG_BUF_SHIFT 14 -> 16 -- cgit v1.2.3 From 97443aa809a142b1e6db2ccfb046c3a962907204 Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Mon, 16 Feb 2015 15:59:28 -0800 Subject: ext2: remove ext2_aops_xip We shouldn't need a special address_space_operations any more Signed-off-by: Matthew Wilcox Reviewed-by: Mathieu Desnoyers Cc: Andreas Dilger Cc: Boaz Harrosh Cc: Christoph Hellwig Cc: Dave Chinner Cc: Jan Kara Cc: Jens Axboe Cc: Kirill A. Shutemov Cc: Randy Dunlap Cc: Ross Zwisler Cc: Theodore Ts'o Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/ext2/ext2.h | 1 - fs/ext2/inode.c | 7 +------ fs/ext2/namei.c | 4 ++-- 3 files changed, 3 insertions(+), 9 deletions(-) (limited to 'fs') diff --git a/fs/ext2/ext2.h b/fs/ext2/ext2.h index 6854038c09ae..ab9b3ec3bac9 100644 --- a/fs/ext2/ext2.h +++ b/fs/ext2/ext2.h @@ -796,7 +796,6 @@ extern const struct file_operations ext2_xip_file_operations; /* inode.c */ extern const struct address_space_operations ext2_aops; -extern const struct address_space_operations ext2_aops_xip; extern const struct address_space_operations ext2_nobh_aops; /* namei.c */ diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c index 154cbcf30f35..034fd42eade0 100644 --- a/fs/ext2/inode.c +++ b/fs/ext2/inode.c @@ -891,11 +891,6 @@ const struct address_space_operations ext2_aops = { .error_remove_page = generic_error_remove_page, }; -const struct address_space_operations ext2_aops_xip = { - .bmap = ext2_bmap, - .direct_IO = ext2_direct_IO, -}; - const struct address_space_operations ext2_nobh_aops = { .readpage = ext2_readpage, .readpages = ext2_readpages, @@ -1394,7 +1389,7 @@ struct inode *ext2_iget (struct super_block *sb, unsigned long ino) if (S_ISREG(inode->i_mode)) { inode->i_op = &ext2_file_inode_operations; if (test_opt(inode->i_sb, XIP)) { - inode->i_mapping->a_ops = &ext2_aops_xip; + inode->i_mapping->a_ops = &ext2_aops; inode->i_fop = &ext2_xip_file_operations; } else if (test_opt(inode->i_sb, NOBH)) { inode->i_mapping->a_ops = &ext2_nobh_aops; diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c index 7ca803f408b0..0db888c91bec 100644 --- a/fs/ext2/namei.c +++ b/fs/ext2/namei.c @@ -105,7 +105,7 @@ static int ext2_create (struct inode * dir, struct dentry * dentry, umode_t mode inode->i_op = &ext2_file_inode_operations; if (test_opt(inode->i_sb, XIP)) { - inode->i_mapping->a_ops = &ext2_aops_xip; + inode->i_mapping->a_ops = &ext2_aops; inode->i_fop = &ext2_xip_file_operations; } else if (test_opt(inode->i_sb, NOBH)) { inode->i_mapping->a_ops = &ext2_nobh_aops; @@ -126,7 +126,7 @@ static int ext2_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode) inode->i_op = &ext2_file_inode_operations; if (test_opt(inode->i_sb, XIP)) { - inode->i_mapping->a_ops = &ext2_aops_xip; + inode->i_mapping->a_ops = &ext2_aops; inode->i_fop = &ext2_xip_file_operations; } else if (test_opt(inode->i_sb, NOBH)) { inode->i_mapping->a_ops = &ext2_nobh_aops; -- cgit v1.2.3 From 9c3ce9ec58716733232b97771b10f31901caf62e Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Mon, 16 Feb 2015 15:59:31 -0800 Subject: ext2: get rid of most mentions of XIP in ext2 To help people transition, accept the 'xip' mount option (and report it in /proc/mounts), but print a message encouraging people to switch over to the 'dax' option. Signed-off-by: Matthew Wilcox Reviewed-by: Mathieu Desnoyers Cc: Andreas Dilger Cc: Boaz Harrosh Cc: Christoph Hellwig Cc: Dave Chinner Cc: Jan Kara Cc: Jens Axboe Cc: Kirill A. Shutemov Cc: Randy Dunlap Cc: Ross Zwisler Cc: Theodore Ts'o Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/filesystems/ext2.txt | 5 +++-- fs/ext2/ext2.h | 13 +++++++------ fs/ext2/file.c | 2 +- fs/ext2/inode.c | 6 +++--- fs/ext2/namei.c | 8 ++++---- fs/ext2/super.c | 25 ++++++++++++++++--------- 6 files changed, 34 insertions(+), 25 deletions(-) (limited to 'fs') diff --git a/Documentation/filesystems/ext2.txt b/Documentation/filesystems/ext2.txt index 67639f905f10..b9714569e472 100644 --- a/Documentation/filesystems/ext2.txt +++ b/Documentation/filesystems/ext2.txt @@ -20,6 +20,9 @@ minixdf Makes `df' act like Minix. check=none, nocheck (*) Don't do extra checking of bitmaps on mount (check=normal and check=strict options removed) +dax Use direct access (no page cache). See + Documentation/filesystems/dax.txt. + debug Extra debugging information is sent to the kernel syslog. Useful for developers. @@ -56,8 +59,6 @@ noacl Don't support POSIX ACLs. nobh Do not attach buffer_heads to file pagecache. -xip Use execute in place (no caching) if possible - grpquota,noquota,quota,usrquota Quota options are silently ignored by ext2. diff --git a/fs/ext2/ext2.h b/fs/ext2/ext2.h index ab9b3ec3bac9..678f9ab08c48 100644 --- a/fs/ext2/ext2.h +++ b/fs/ext2/ext2.h @@ -380,14 +380,15 @@ struct ext2_inode { #define EXT2_MOUNT_NO_UID32 0x000200 /* Disable 32-bit UIDs */ #define EXT2_MOUNT_XATTR_USER 0x004000 /* Extended user attributes */ #define EXT2_MOUNT_POSIX_ACL 0x008000 /* POSIX Access Control Lists */ -#ifdef CONFIG_FS_DAX -#define EXT2_MOUNT_XIP 0x010000 /* Execute in place */ -#else -#define EXT2_MOUNT_XIP 0 -#endif +#define EXT2_MOUNT_XIP 0x010000 /* Obsolete, use DAX */ #define EXT2_MOUNT_USRQUOTA 0x020000 /* user quota */ #define EXT2_MOUNT_GRPQUOTA 0x040000 /* group quota */ #define EXT2_MOUNT_RESERVATION 0x080000 /* Preallocation */ +#ifdef CONFIG_FS_DAX +#define EXT2_MOUNT_DAX 0x100000 /* Direct Access */ +#else +#define EXT2_MOUNT_DAX 0 +#endif #define clear_opt(o, opt) o &= ~EXT2_MOUNT_##opt @@ -792,7 +793,7 @@ extern int ext2_fsync(struct file *file, loff_t start, loff_t end, int datasync); extern const struct inode_operations ext2_file_inode_operations; extern const struct file_operations ext2_file_operations; -extern const struct file_operations ext2_xip_file_operations; +extern const struct file_operations ext2_dax_file_operations; /* inode.c */ extern const struct address_space_operations ext2_aops; diff --git a/fs/ext2/file.c b/fs/ext2/file.c index de8174d1e973..e31701713516 100644 --- a/fs/ext2/file.c +++ b/fs/ext2/file.c @@ -109,7 +109,7 @@ const struct file_operations ext2_file_operations = { }; #ifdef CONFIG_FS_DAX -const struct file_operations ext2_xip_file_operations = { +const struct file_operations ext2_dax_file_operations = { .llseek = generic_file_llseek, .read = new_sync_read, .write = new_sync_write, diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c index 034fd42eade0..6434bc000125 100644 --- a/fs/ext2/inode.c +++ b/fs/ext2/inode.c @@ -1286,7 +1286,7 @@ void ext2_set_inode_flags(struct inode *inode) inode->i_flags |= S_NOATIME; if (flags & EXT2_DIRSYNC_FL) inode->i_flags |= S_DIRSYNC; - if (test_opt(inode->i_sb, XIP)) + if (test_opt(inode->i_sb, DAX)) inode->i_flags |= S_DAX; } @@ -1388,9 +1388,9 @@ struct inode *ext2_iget (struct super_block *sb, unsigned long ino) if (S_ISREG(inode->i_mode)) { inode->i_op = &ext2_file_inode_operations; - if (test_opt(inode->i_sb, XIP)) { + if (test_opt(inode->i_sb, DAX)) { inode->i_mapping->a_ops = &ext2_aops; - inode->i_fop = &ext2_xip_file_operations; + inode->i_fop = &ext2_dax_file_operations; } else if (test_opt(inode->i_sb, NOBH)) { inode->i_mapping->a_ops = &ext2_nobh_aops; inode->i_fop = &ext2_file_operations; diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c index 0db888c91bec..148f6e3789ea 100644 --- a/fs/ext2/namei.c +++ b/fs/ext2/namei.c @@ -104,9 +104,9 @@ static int ext2_create (struct inode * dir, struct dentry * dentry, umode_t mode return PTR_ERR(inode); inode->i_op = &ext2_file_inode_operations; - if (test_opt(inode->i_sb, XIP)) { + if (test_opt(inode->i_sb, DAX)) { inode->i_mapping->a_ops = &ext2_aops; - inode->i_fop = &ext2_xip_file_operations; + inode->i_fop = &ext2_dax_file_operations; } else if (test_opt(inode->i_sb, NOBH)) { inode->i_mapping->a_ops = &ext2_nobh_aops; inode->i_fop = &ext2_file_operations; @@ -125,9 +125,9 @@ static int ext2_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode) return PTR_ERR(inode); inode->i_op = &ext2_file_inode_operations; - if (test_opt(inode->i_sb, XIP)) { + if (test_opt(inode->i_sb, DAX)) { inode->i_mapping->a_ops = &ext2_aops; - inode->i_fop = &ext2_xip_file_operations; + inode->i_fop = &ext2_dax_file_operations; } else if (test_opt(inode->i_sb, NOBH)) { inode->i_mapping->a_ops = &ext2_nobh_aops; inode->i_fop = &ext2_file_operations; diff --git a/fs/ext2/super.c b/fs/ext2/super.c index 5f029d8c3a02..d0e746e96511 100644 --- a/fs/ext2/super.c +++ b/fs/ext2/super.c @@ -294,6 +294,8 @@ static int ext2_show_options(struct seq_file *seq, struct dentry *root) #ifdef CONFIG_FS_DAX if (sbi->s_mount_opt & EXT2_MOUNT_XIP) seq_puts(seq, ",xip"); + if (sbi->s_mount_opt & EXT2_MOUNT_DAX) + seq_puts(seq, ",dax"); #endif if (!test_opt(sb, RESERVATION)) @@ -402,7 +404,7 @@ enum { Opt_resgid, Opt_resuid, Opt_sb, Opt_err_cont, Opt_err_panic, Opt_err_ro, Opt_nouid32, Opt_nocheck, Opt_debug, Opt_oldalloc, Opt_orlov, Opt_nobh, Opt_user_xattr, Opt_nouser_xattr, - Opt_acl, Opt_noacl, Opt_xip, Opt_ignore, Opt_err, Opt_quota, + Opt_acl, Opt_noacl, Opt_xip, Opt_dax, Opt_ignore, Opt_err, Opt_quota, Opt_usrquota, Opt_grpquota, Opt_reservation, Opt_noreservation }; @@ -431,6 +433,7 @@ static const match_table_t tokens = { {Opt_acl, "acl"}, {Opt_noacl, "noacl"}, {Opt_xip, "xip"}, + {Opt_dax, "dax"}, {Opt_grpquota, "grpquota"}, {Opt_ignore, "noquota"}, {Opt_quota, "quota"}, @@ -558,10 +561,14 @@ static int parse_options(char *options, struct super_block *sb) break; #endif case Opt_xip: + ext2_msg(sb, KERN_INFO, "use dax instead of xip"); + set_opt(sbi->s_mount_opt, XIP); + /* Fall through */ + case Opt_dax: #ifdef CONFIG_FS_DAX - set_opt (sbi->s_mount_opt, XIP); + set_opt(sbi->s_mount_opt, DAX); #else - ext2_msg(sb, KERN_INFO, "xip option not supported"); + ext2_msg(sb, KERN_INFO, "dax option not supported"); #endif break; @@ -905,15 +912,15 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent) blocksize = BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size); - if (sbi->s_mount_opt & EXT2_MOUNT_XIP) { + if (sbi->s_mount_opt & EXT2_MOUNT_DAX) { if (blocksize != PAGE_SIZE) { ext2_msg(sb, KERN_ERR, - "error: unsupported blocksize for xip"); + "error: unsupported blocksize for dax"); goto failed_mount; } if (!sb->s_bdev->bd_disk->fops->direct_access) { ext2_msg(sb, KERN_ERR, - "error: device does not support xip"); + "error: device does not support dax"); goto failed_mount; } } @@ -1286,10 +1293,10 @@ static int ext2_remount (struct super_block * sb, int * flags, char * data) ((sbi->s_mount_opt & EXT2_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0); es = sbi->s_es; - if ((sbi->s_mount_opt ^ old_opts.s_mount_opt) & EXT2_MOUNT_XIP) { + if ((sbi->s_mount_opt ^ old_opts.s_mount_opt) & EXT2_MOUNT_DAX) { ext2_msg(sb, KERN_WARNING, "warning: refusing change of " - "xip flag with busy inodes while remounting"); - sbi->s_mount_opt ^= EXT2_MOUNT_XIP; + "dax flag with busy inodes while remounting"); + sbi->s_mount_opt ^= EXT2_MOUNT_DAX; } if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY)) { spin_unlock(&sbi->s_lock); -- cgit v1.2.3 From 25726bc15731d42112b579cf73f30edbc43d3973 Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Mon, 16 Feb 2015 15:59:35 -0800 Subject: dax: add dax_zero_page_range This new function allows us to support hole-punch for DAX files by zeroing a partial page, as opposed to the dax_truncate_page() function which can only truncate to the end of the page. Reimplement dax_truncate_page() to call dax_zero_page_range(). [ross.zwisler@linux.intel.com: ported to 3.13-rc2] [akpm@linux-foundation.org: fix typos in comments] Signed-off-by: Matthew Wilcox Signed-off-by: Ross Zwisler Cc: Andreas Dilger Cc: Boaz Harrosh Cc: Christoph Hellwig Cc: Dave Chinner Cc: Jan Kara Cc: Jens Axboe Cc: Kirill A. Shutemov Cc: Mathieu Desnoyers Cc: Randy Dunlap Cc: Theodore Ts'o Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/filesystems/dax.txt | 1 + fs/dax.c | 38 ++++++++++++++++++++++++++++++++------ include/linux/fs.h | 1 + 3 files changed, 34 insertions(+), 6 deletions(-) (limited to 'fs') diff --git a/Documentation/filesystems/dax.txt b/Documentation/filesystems/dax.txt index 635adaa1e425..ebcd97f03654 100644 --- a/Documentation/filesystems/dax.txt +++ b/Documentation/filesystems/dax.txt @@ -62,6 +62,7 @@ Filesystem support consists of for fault and page_mkwrite (which should probably call dax_fault() and dax_mkwrite(), passing the appropriate get_block() callback) - calling dax_truncate_page() instead of block_truncate_page() for DAX files +- calling dax_zero_page_range() instead of zero_user() for DAX files - ensuring that there is sufficient locking between reads, writes, truncates and page faults diff --git a/fs/dax.c b/fs/dax.c index ebf9b2231b0f..ed1619ec6537 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -464,31 +464,35 @@ int dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf, EXPORT_SYMBOL_GPL(dax_fault); /** - * dax_truncate_page - handle a partial page being truncated in a DAX file + * dax_zero_page_range - zero a range within a page of a DAX file * @inode: The file being truncated * @from: The file offset that is being truncated to + * @length: The number of bytes to zero * @get_block: The filesystem method used to translate file offsets to blocks * - * Similar to block_truncate_page(), this function can be called by a - * filesystem when it is truncating an DAX file to handle the partial page. + * This function can be called by a filesystem when it is zeroing part of a + * page in a DAX file. This is intended for hole-punch operations. If + * you are truncating a file, the helper function dax_truncate_page() may be + * more convenient. * * We work in terms of PAGE_CACHE_SIZE here for commonality with * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem * took care of disposing of the unnecessary blocks. Even if the filesystem * block size is smaller than PAGE_SIZE, we have to zero the rest of the page - * since the file might be mmaped. + * since the file might be mmapped. */ -int dax_truncate_page(struct inode *inode, loff_t from, get_block_t get_block) +int dax_zero_page_range(struct inode *inode, loff_t from, unsigned length, + get_block_t get_block) { struct buffer_head bh; pgoff_t index = from >> PAGE_CACHE_SHIFT; unsigned offset = from & (PAGE_CACHE_SIZE-1); - unsigned length = PAGE_CACHE_ALIGN(from) - from; int err; /* Block boundary? Nothing to do */ if (!length) return 0; + BUG_ON((offset + length) > PAGE_CACHE_SIZE); memset(&bh, 0, sizeof(bh)); bh.b_size = PAGE_CACHE_SIZE; @@ -505,4 +509,26 @@ int dax_truncate_page(struct inode *inode, loff_t from, get_block_t get_block) return 0; } +EXPORT_SYMBOL_GPL(dax_zero_page_range); + +/** + * dax_truncate_page - handle a partial page being truncated in a DAX file + * @inode: The file being truncated + * @from: The file offset that is being truncated to + * @get_block: The filesystem method used to translate file offsets to blocks + * + * Similar to block_truncate_page(), this function can be called by a + * filesystem when it is truncating a DAX file to handle the partial page. + * + * We work in terms of PAGE_CACHE_SIZE here for commonality with + * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem + * took care of disposing of the unnecessary blocks. Even if the filesystem + * block size is smaller than PAGE_SIZE, we have to zero the rest of the page + * since the file might be mmapped. + */ +int dax_truncate_page(struct inode *inode, loff_t from, get_block_t get_block) +{ + unsigned length = PAGE_CACHE_ALIGN(from) - from; + return dax_zero_page_range(inode, from, length, get_block); +} EXPORT_SYMBOL_GPL(dax_truncate_page); diff --git a/include/linux/fs.h b/include/linux/fs.h index d46f8fe6a0ea..ed5a0900b94d 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2589,6 +2589,7 @@ extern int nonseekable_open(struct inode * inode, struct file * filp); ssize_t dax_do_io(int rw, struct kiocb *, struct inode *, struct iov_iter *, loff_t, get_block_t, dio_iodone_t, int flags); int dax_clear_blocks(struct inode *, sector_t block, long size); +int dax_zero_page_range(struct inode *, loff_t from, unsigned len, get_block_t); int dax_truncate_page(struct inode *, loff_t from, get_block_t); int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t); #define dax_mkwrite(vma, vmf, gb) dax_fault(vma, vmf, gb) -- cgit v1.2.3 From 923ae0ff9250430133b3310fe62c47538cf1cbc1 Mon Sep 17 00:00:00 2001 From: Ross Zwisler Date: Mon, 16 Feb 2015 15:59:38 -0800 Subject: ext4: add DAX functionality This is a port of the DAX functionality found in the current version of ext2. [matthew.r.wilcox@intel.com: heavily tweaked] [akpm@linux-foundation.org: remap_pages went away] Signed-off-by: Ross Zwisler Reviewed-by: Andreas Dilger Signed-off-by: Matthew Wilcox Cc: Boaz Harrosh Cc: Christoph Hellwig Cc: Dave Chinner Cc: Jan Kara Cc: Jens Axboe Cc: Kirill A. Shutemov Cc: Mathieu Desnoyers Cc: Randy Dunlap Cc: Theodore Ts'o Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/filesystems/dax.txt | 1 + Documentation/filesystems/ext4.txt | 4 ++ fs/ext4/ext4.h | 6 +++ fs/ext4/file.c | 49 ++++++++++++++++++++- fs/ext4/indirect.c | 18 +++++--- fs/ext4/inode.c | 89 ++++++++++++++++++++++++++------------ fs/ext4/namei.c | 10 ++++- fs/ext4/super.c | 39 ++++++++++++++++- 8 files changed, 179 insertions(+), 37 deletions(-) (limited to 'fs') diff --git a/Documentation/filesystems/dax.txt b/Documentation/filesystems/dax.txt index ebcd97f03654..be376d91d058 100644 --- a/Documentation/filesystems/dax.txt +++ b/Documentation/filesystems/dax.txt @@ -73,6 +73,7 @@ or a write()) work correctly. These filesystems may be used for inspiration: - ext2: the second extended filesystem, see Documentation/filesystems/ext2.txt +- ext4: the fourth extended filesystem, see Documentation/filesystems/ext4.txt Shortcomings diff --git a/Documentation/filesystems/ext4.txt b/Documentation/filesystems/ext4.txt index 919a3293aaa4..6c0108eb0137 100644 --- a/Documentation/filesystems/ext4.txt +++ b/Documentation/filesystems/ext4.txt @@ -386,6 +386,10 @@ max_dir_size_kb=n This limits the size of directories so that any i_version Enable 64-bit inode version support. This option is off by default. +dax Use direct access (no page cache). See + Documentation/filesystems/dax.txt. Note that + this option is incompatible with data=journal. + Data Mode ========= There are 3 different data modes: diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index a75fba67bb1f..982d934fd9ac 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -965,6 +965,11 @@ struct ext4_inode_info { #define EXT4_MOUNT_ERRORS_MASK 0x00070 #define EXT4_MOUNT_MINIX_DF 0x00080 /* Mimics the Minix statfs */ #define EXT4_MOUNT_NOLOAD 0x00100 /* Don't use existing journal*/ +#ifdef CONFIG_FS_DAX +#define EXT4_MOUNT_DAX 0x00200 /* Direct Access */ +#else +#define EXT4_MOUNT_DAX 0 +#endif #define EXT4_MOUNT_DATA_FLAGS 0x00C00 /* Mode for data writes: */ #define EXT4_MOUNT_JOURNAL_DATA 0x00400 /* Write data to journal */ #define EXT4_MOUNT_ORDERED_DATA 0x00800 /* Flush data before commit */ @@ -2578,6 +2583,7 @@ extern const struct file_operations ext4_dir_operations; /* file.c */ extern const struct inode_operations ext4_file_inode_operations; extern const struct file_operations ext4_file_operations; +extern const struct file_operations ext4_dax_file_operations; extern loff_t ext4_llseek(struct file *file, loff_t offset, int origin); /* inline.c */ diff --git a/fs/ext4/file.c b/fs/ext4/file.c index 7cb592386121..33a09da16c9c 100644 --- a/fs/ext4/file.c +++ b/fs/ext4/file.c @@ -95,7 +95,7 @@ ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from) struct inode *inode = file_inode(iocb->ki_filp); struct mutex *aio_mutex = NULL; struct blk_plug plug; - int o_direct = file->f_flags & O_DIRECT; + int o_direct = io_is_direct(file); int overwrite = 0; size_t length = iov_iter_count(from); ssize_t ret; @@ -191,6 +191,26 @@ errout: return ret; } +#ifdef CONFIG_FS_DAX +static int ext4_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +{ + return dax_fault(vma, vmf, ext4_get_block); + /* Is this the right get_block? */ +} + +static int ext4_dax_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) +{ + return dax_mkwrite(vma, vmf, ext4_get_block); +} + +static const struct vm_operations_struct ext4_dax_vm_ops = { + .fault = ext4_dax_fault, + .page_mkwrite = ext4_dax_mkwrite, +}; +#else +#define ext4_dax_vm_ops ext4_file_vm_ops +#endif + static const struct vm_operations_struct ext4_file_vm_ops = { .fault = filemap_fault, .map_pages = filemap_map_pages, @@ -200,7 +220,12 @@ static const struct vm_operations_struct ext4_file_vm_ops = { static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma) { file_accessed(file); - vma->vm_ops = &ext4_file_vm_ops; + if (IS_DAX(file_inode(file))) { + vma->vm_ops = &ext4_dax_vm_ops; + vma->vm_flags |= VM_MIXEDMAP; + } else { + vma->vm_ops = &ext4_file_vm_ops; + } return 0; } @@ -599,6 +624,26 @@ const struct file_operations ext4_file_operations = { .fallocate = ext4_fallocate, }; +#ifdef CONFIG_FS_DAX +const struct file_operations ext4_dax_file_operations = { + .llseek = ext4_llseek, + .read = new_sync_read, + .write = new_sync_write, + .read_iter = generic_file_read_iter, + .write_iter = ext4_file_write_iter, + .unlocked_ioctl = ext4_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = ext4_compat_ioctl, +#endif + .mmap = ext4_file_mmap, + .open = ext4_file_open, + .release = ext4_release_file, + .fsync = ext4_sync_file, + /* Splice not yet supported with DAX */ + .fallocate = ext4_fallocate, +}; +#endif + const struct inode_operations ext4_file_inode_operations = { .setattr = ext4_setattr, .getattr = ext4_getattr, diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c index 36b369697a13..6b9878a24182 100644 --- a/fs/ext4/indirect.c +++ b/fs/ext4/indirect.c @@ -689,14 +689,22 @@ retry: inode_dio_done(inode); goto locked; } - ret = __blockdev_direct_IO(rw, iocb, inode, - inode->i_sb->s_bdev, iter, offset, - ext4_get_block, NULL, NULL, 0); + if (IS_DAX(inode)) + ret = dax_do_io(rw, iocb, inode, iter, offset, + ext4_get_block, NULL, 0); + else + ret = __blockdev_direct_IO(rw, iocb, inode, + inode->i_sb->s_bdev, iter, offset, + ext4_get_block, NULL, NULL, 0); inode_dio_done(inode); } else { locked: - ret = blockdev_direct_IO(rw, iocb, inode, iter, - offset, ext4_get_block); + if (IS_DAX(inode)) + ret = dax_do_io(rw, iocb, inode, iter, offset, + ext4_get_block, NULL, DIO_LOCKING); + else + ret = blockdev_direct_IO(rw, iocb, inode, iter, + offset, ext4_get_block); if (unlikely((rw & WRITE) && ret < 0)) { loff_t isize = i_size_read(inode); diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 5653fa42930b..28555f191b62 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -657,6 +657,18 @@ has_zeroout: return retval; } +static void ext4_end_io_unwritten(struct buffer_head *bh, int uptodate) +{ + struct inode *inode = bh->b_assoc_map->host; + /* XXX: breaks on 32-bit > 16GB. Is that even supported? */ + loff_t offset = (loff_t)(uintptr_t)bh->b_private << inode->i_blkbits; + int err; + if (!uptodate) + return; + WARN_ON(!buffer_unwritten(bh)); + err = ext4_convert_unwritten_extents(NULL, inode, offset, bh->b_size); +} + /* Maximum number of blocks we map for direct IO at once. */ #define DIO_MAX_BLOCKS 4096 @@ -694,6 +706,11 @@ static int _ext4_get_block(struct inode *inode, sector_t iblock, map_bh(bh, inode->i_sb, map.m_pblk); bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags; + if (IS_DAX(inode) && buffer_unwritten(bh) && !io_end) { + bh->b_assoc_map = inode->i_mapping; + bh->b_private = (void *)(unsigned long)iblock; + bh->b_end_io = ext4_end_io_unwritten; + } if (io_end && io_end->flag & EXT4_IO_END_UNWRITTEN) set_buffer_defer_completion(bh); bh->b_size = inode->i_sb->s_blocksize * map.m_len; @@ -3010,13 +3027,14 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb, get_block_func = ext4_get_block_write; dio_flags = DIO_LOCKING; } - ret = __blockdev_direct_IO(rw, iocb, inode, - inode->i_sb->s_bdev, iter, - offset, - get_block_func, - ext4_end_io_dio, - NULL, - dio_flags); + if (IS_DAX(inode)) + ret = dax_do_io(rw, iocb, inode, iter, offset, get_block_func, + ext4_end_io_dio, dio_flags); + else + ret = __blockdev_direct_IO(rw, iocb, inode, + inode->i_sb->s_bdev, iter, offset, + get_block_func, + ext4_end_io_dio, NULL, dio_flags); /* * Put our reference to io_end. This can free the io_end structure e.g. @@ -3180,19 +3198,12 @@ void ext4_set_aops(struct inode *inode) inode->i_mapping->a_ops = &ext4_aops; } -/* - * ext4_block_zero_page_range() zeros out a mapping of length 'length' - * starting from file offset 'from'. The range to be zero'd must - * be contained with in one block. If the specified range exceeds - * the end of the block it will be shortened to end of the block - * that cooresponds to 'from' - */ -static int ext4_block_zero_page_range(handle_t *handle, +static int __ext4_block_zero_page_range(handle_t *handle, struct address_space *mapping, loff_t from, loff_t length) { ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT; unsigned offset = from & (PAGE_CACHE_SIZE-1); - unsigned blocksize, max, pos; + unsigned blocksize, pos; ext4_lblk_t iblock; struct inode *inode = mapping->host; struct buffer_head *bh; @@ -3205,14 +3216,6 @@ static int ext4_block_zero_page_range(handle_t *handle, return -ENOMEM; blocksize = inode->i_sb->s_blocksize; - max = blocksize - (offset & (blocksize - 1)); - - /* - * correct length if it does not fall between - * 'from' and the end of the block - */ - if (length > max || length < 0) - length = max; iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits); @@ -3277,6 +3280,33 @@ unlock: return err; } +/* + * ext4_block_zero_page_range() zeros out a mapping of length 'length' + * starting from file offset 'from'. The range to be zero'd must + * be contained with in one block. If the specified range exceeds + * the end of the block it will be shortened to end of the block + * that cooresponds to 'from' + */ +static int ext4_block_zero_page_range(handle_t *handle, + struct address_space *mapping, loff_t from, loff_t length) +{ + struct inode *inode = mapping->host; + unsigned offset = from & (PAGE_CACHE_SIZE-1); + unsigned blocksize = inode->i_sb->s_blocksize; + unsigned max = blocksize - (offset & (blocksize - 1)); + + /* + * correct length if it does not fall between + * 'from' and the end of the block + */ + if (length > max || length < 0) + length = max; + + if (IS_DAX(inode)) + return dax_zero_page_range(inode, from, length, ext4_get_block); + return __ext4_block_zero_page_range(handle, mapping, from, length); +} + /* * ext4_block_truncate_page() zeroes out a mapping from file offset `from' * up to the end of the block which corresponds to `from'. @@ -3798,8 +3828,10 @@ void ext4_set_inode_flags(struct inode *inode) new_fl |= S_NOATIME; if (flags & EXT4_DIRSYNC_FL) new_fl |= S_DIRSYNC; + if (test_opt(inode->i_sb, DAX)) + new_fl |= S_DAX; inode_set_flags(inode, new_fl, - S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC); + S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|S_DAX); } /* Propagate flags from i_flags to EXT4_I(inode)->i_flags */ @@ -4052,7 +4084,10 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino) if (S_ISREG(inode->i_mode)) { inode->i_op = &ext4_file_inode_operations; - inode->i_fop = &ext4_file_operations; + if (test_opt(inode->i_sb, DAX)) + inode->i_fop = &ext4_dax_file_operations; + else + inode->i_fop = &ext4_file_operations; ext4_set_aops(inode); } else if (S_ISDIR(inode->i_mode)) { inode->i_op = &ext4_dir_inode_operations; @@ -4534,7 +4569,7 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr) * Truncate pagecache after we've waited for commit * in data=journal mode to make pages freeable. */ - truncate_pagecache(inode, inode->i_size); + truncate_pagecache(inode, inode->i_size); } /* * We want to call ext4_truncate() even if attr->ia_size == diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index 2291923dae4e..28fe71a2904c 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c @@ -2235,7 +2235,10 @@ retry: err = PTR_ERR(inode); if (!IS_ERR(inode)) { inode->i_op = &ext4_file_inode_operations; - inode->i_fop = &ext4_file_operations; + if (test_opt(inode->i_sb, DAX)) + inode->i_fop = &ext4_dax_file_operations; + else + inode->i_fop = &ext4_file_operations; ext4_set_aops(inode); err = ext4_add_nondir(handle, dentry, inode); if (!err && IS_DIRSYNC(dir)) @@ -2299,7 +2302,10 @@ retry: err = PTR_ERR(inode); if (!IS_ERR(inode)) { inode->i_op = &ext4_file_inode_operations; - inode->i_fop = &ext4_file_operations; + if (test_opt(inode->i_sb, DAX)) + inode->i_fop = &ext4_dax_file_operations; + else + inode->i_fop = &ext4_file_operations; ext4_set_aops(inode); d_tmpfile(dentry, inode); err = ext4_orphan_add(handle, inode); diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 64c39c7c594f..10e8c6b7ca08 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -1124,7 +1124,7 @@ enum { Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota, Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_jqfmt_vfsv1, Opt_quota, Opt_noquota, Opt_barrier, Opt_nobarrier, Opt_err, - Opt_usrquota, Opt_grpquota, Opt_i_version, + Opt_usrquota, Opt_grpquota, Opt_i_version, Opt_dax, Opt_stripe, Opt_delalloc, Opt_nodelalloc, Opt_mblk_io_submit, Opt_nomblk_io_submit, Opt_block_validity, Opt_noblock_validity, Opt_inode_readahead_blks, Opt_journal_ioprio, @@ -1187,6 +1187,7 @@ static const match_table_t tokens = { {Opt_barrier, "barrier"}, {Opt_nobarrier, "nobarrier"}, {Opt_i_version, "i_version"}, + {Opt_dax, "dax"}, {Opt_stripe, "stripe=%u"}, {Opt_delalloc, "delalloc"}, {Opt_nodelalloc, "nodelalloc"}, @@ -1371,6 +1372,7 @@ static const struct mount_opts { {Opt_min_batch_time, 0, MOPT_GTE0}, {Opt_inode_readahead_blks, 0, MOPT_GTE0}, {Opt_init_itable, 0, MOPT_GTE0}, + {Opt_dax, EXT4_MOUNT_DAX, MOPT_SET}, {Opt_stripe, 0, MOPT_GTE0}, {Opt_resuid, 0, MOPT_GTE0}, {Opt_resgid, 0, MOPT_GTE0}, @@ -1606,6 +1608,11 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token, return -1; } sbi->s_jquota_fmt = m->mount_opt; +#endif +#ifndef CONFIG_FS_DAX + } else if (token == Opt_dax) { + ext4_msg(sb, KERN_INFO, "dax option not supported"); + return -1; #endif } else { if (!args->from) @@ -3589,6 +3596,11 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) "both data=journal and dioread_nolock"); goto failed_mount; } + if (test_opt(sb, DAX)) { + ext4_msg(sb, KERN_ERR, "can't mount with " + "both data=journal and dax"); + goto failed_mount; + } if (test_opt(sb, DELALLOC)) clear_opt(sb, DELALLOC); } @@ -3652,6 +3664,19 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) goto failed_mount; } + if (sbi->s_mount_opt & EXT4_MOUNT_DAX) { + if (blocksize != PAGE_SIZE) { + ext4_msg(sb, KERN_ERR, + "error: unsupported blocksize for dax"); + goto failed_mount; + } + if (!sb->s_bdev->bd_disk->fops->direct_access) { + ext4_msg(sb, KERN_ERR, + "error: device does not support dax"); + goto failed_mount; + } + } + if (sb->s_blocksize != blocksize) { /* Validate the filesystem blocksize */ if (!sb_set_blocksize(sb, blocksize)) { @@ -4869,6 +4894,18 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) err = -EINVAL; goto restore_opts; } + if (test_opt(sb, DAX)) { + ext4_msg(sb, KERN_ERR, "can't mount with " + "both data=journal and dax"); + err = -EINVAL; + goto restore_opts; + } + } + + if ((sbi->s_mount_opt ^ old_opts.s_mount_opt) & EXT4_MOUNT_DAX) { + ext4_msg(sb, KERN_WARNING, "warning: refusing change of " + "dax flag with busy inodes while remounting"); + sbi->s_mount_opt ^= EXT4_MOUNT_DAX; } if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED) -- cgit v1.2.3 From d92576f1167cacf7844e5993f343eed4a6d8a147 Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Mon, 16 Feb 2015 15:59:44 -0800 Subject: dax: does not work correctly with virtual aliasing caches The DAX code accesses the underlying storage through the kernel's linear mapping, which may not be cache-coherent with user mappings on ARM, MIPS or SPARC. Temporarily disable the DAX code until this problem is resolved. The original XIP code also had this problem, but it was never noticed. Signed-off-by: Matthew Wilcox Cc: Andreas Dilger Cc: Boaz Harrosh Cc: Christoph Hellwig Cc: Dave Chinner Cc: Jan Kara Cc: Jens Axboe Cc: Kirill A. Shutemov Cc: Mathieu Desnoyers Cc: Randy Dunlap Cc: Ross Zwisler Cc: Theodore Ts'o Cc: Ralf Baechle Cc: Russell King Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/filesystems/dax.txt | 3 +++ fs/Kconfig | 1 + 2 files changed, 4 insertions(+) (limited to 'fs') diff --git a/Documentation/filesystems/dax.txt b/Documentation/filesystems/dax.txt index be376d91d058..baf41118660d 100644 --- a/Documentation/filesystems/dax.txt +++ b/Documentation/filesystems/dax.txt @@ -82,6 +82,9 @@ Shortcomings Even if the kernel or its modules are stored on a filesystem that supports DAX on a block device that supports DAX, they will still be copied into RAM. +The DAX code does not work correctly on architectures which have virtually +mapped caches such as ARM, MIPS and SPARC. + Calling get_user_pages() on a range of user memory that has been mmaped from a DAX file will fail as there are no 'struct page' to describe those pages. This problem is being worked on. That means that O_DIRECT diff --git a/fs/Kconfig b/fs/Kconfig index 5331497d5b25..ec35851e5b71 100644 --- a/fs/Kconfig +++ b/fs/Kconfig @@ -36,6 +36,7 @@ source "fs/nilfs2/Kconfig" config FS_DAX bool "Direct Access (DAX) support" depends on MMU + depends on !(ARM || MIPS || SPARC) help Direct Access (DAX) can be used on memory-backed block devices. If the block device supports DAX and the filesystem supports DAX, -- cgit v1.2.3 From 026749a86ebff68cb2accdcd29872d36ac148920 Mon Sep 17 00:00:00 2001 From: Joseph Qi Date: Mon, 16 Feb 2015 15:59:50 -0800 Subject: ocfs2: prepare some interfaces used in append direct io Currently in case of append O_DIRECT write (block not allocated yet), ocfs2 will fall back to buffered I/O. This has some disadvantages. Firstly, it is not the behavior as expected. Secondly, it will consume huge page cache, e.g. in mass backup scenario. Thirdly, modern filesystems such as ext4 support this feature. In this patch set, the direct I/O write doesn't fallback to buffer I/O write any more because the allocate blocks are enabled in direct I/O now. This patch (of 9): Prepare some interfaces which will be used in append O_DIRECT write. Signed-off-by: Joseph Qi Cc: Weiwei Wang Cc: Mark Fasheh Cc: Joel Becker Cc: Xuejiufei Cc: Junxiao Bi Cc: alex chen Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/ocfs2/file.c | 11 +++++++++-- fs/ocfs2/file.h | 9 +++++++++ 2 files changed, 18 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index e0f04d55fd05..2fce3c40ad27 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c @@ -295,7 +295,7 @@ out: return ret; } -static int ocfs2_set_inode_size(handle_t *handle, +int ocfs2_set_inode_size(handle_t *handle, struct inode *inode, struct buffer_head *fe_bh, u64 new_i_size) @@ -441,7 +441,7 @@ out: return status; } -static int ocfs2_truncate_file(struct inode *inode, +int ocfs2_truncate_file(struct inode *inode, struct buffer_head *di_bh, u64 new_i_size) { @@ -709,6 +709,13 @@ leave: return status; } +int ocfs2_extend_allocation(struct inode *inode, u32 logical_start, + u32 clusters_to_add, int mark_unwritten) +{ + return __ocfs2_extend_allocation(inode, logical_start, + clusters_to_add, mark_unwritten); +} + /* * While a write will already be ordering the data, a truncate will not. * Thus, we need to explicitly order the zeroed pages. diff --git a/fs/ocfs2/file.h b/fs/ocfs2/file.h index 97bf761c9e7c..e8c62f22215c 100644 --- a/fs/ocfs2/file.h +++ b/fs/ocfs2/file.h @@ -51,13 +51,22 @@ int ocfs2_add_inode_data(struct ocfs2_super *osb, struct ocfs2_alloc_context *data_ac, struct ocfs2_alloc_context *meta_ac, enum ocfs2_alloc_restarted *reason_ret); +int ocfs2_set_inode_size(handle_t *handle, + struct inode *inode, + struct buffer_head *fe_bh, + u64 new_i_size); int ocfs2_simple_size_update(struct inode *inode, struct buffer_head *di_bh, u64 new_i_size); +int ocfs2_truncate_file(struct inode *inode, + struct buffer_head *di_bh, + u64 new_i_size); int ocfs2_extend_no_holes(struct inode *inode, struct buffer_head *di_bh, u64 new_i_size, u64 zero_to); int ocfs2_zero_extend(struct inode *inode, struct buffer_head *di_bh, loff_t zero_to); +int ocfs2_extend_allocation(struct inode *inode, u32 logical_start, + u32 clusters_to_add, int mark_unwritten); int ocfs2_setattr(struct dentry *dentry, struct iattr *attr); int ocfs2_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat); -- cgit v1.2.3 From 06ee5c75b57564435933fc0ffa72dc18a2fda0e7 Mon Sep 17 00:00:00 2001 From: Joseph Qi Date: Mon, 16 Feb 2015 15:59:54 -0800 Subject: ocfs2: add functions to add and remove inode in orphan dir Add functions to add inode to orphan dir and remove inode in orphan dir. Here we do not call ocfs2_prepare_orphan_dir and ocfs2_orphan_add directly. Because append O_DIRECT will add inode to orphan two and may result in more than one orphan entry for the same inode. [akpm@linux-foundation.org: avoid dynamic stack allocation] Signed-off-by: Joseph Qi Cc: Weiwei Wang Cc: Junxiao Bi Cc: Joel Becker Cc: Mark Fasheh Cc: Xuejiufei Cc: alex chen Cc: Fengguang Wu Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/ocfs2/inode.c | 2 +- fs/ocfs2/journal.h | 5 ++ fs/ocfs2/namei.c | 247 ++++++++++++++++++++++++++++++++++++++++++++++------ fs/ocfs2/namei.h | 8 +- fs/ocfs2/ocfs2_fs.h | 6 +- 5 files changed, 238 insertions(+), 30 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c index c8b25de9efbb..3025c0da6b8a 100644 --- a/fs/ocfs2/inode.c +++ b/fs/ocfs2/inode.c @@ -648,7 +648,7 @@ static int ocfs2_remove_inode(struct inode *inode, if (!(OCFS2_I(inode)->ip_flags & OCFS2_INODE_SKIP_ORPHAN_DIR)) { status = ocfs2_orphan_del(osb, handle, orphan_dir_inode, inode, - orphan_dir_bh); + orphan_dir_bh, false); if (status < 0) { mlog_errno(status); goto bail_commit; diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h index 7f8cde94abfe..f4cd3c3e9fb7 100644 --- a/fs/ocfs2/journal.h +++ b/fs/ocfs2/journal.h @@ -472,6 +472,11 @@ static inline int ocfs2_unlink_credits(struct super_block *sb) * orphan dir index leaf */ #define OCFS2_DELETE_INODE_CREDITS (3 * OCFS2_INODE_UPDATE_CREDITS + 4) +/* dinode + orphan dir dinode + extent tree leaf block + orphan dir entry + + * orphan dir index root + orphan dir index leaf */ +#define OCFS2_INODE_ADD_TO_ORPHAN_CREDITS (2 * OCFS2_INODE_UPDATE_CREDITS + 4) +#define OCFS2_INODE_DEL_FROM_ORPHAN_CREDITS OCFS2_INODE_ADD_TO_ORPHAN_CREDITS + /* dinode update, old dir dinode update, new dir dinode update, old * dir dir entry, new dir dir entry, dir entry update for renaming * directory + target unlink + 3 x dir index leaves */ diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c index 914c121ec890..7eec45d0d85f 100644 --- a/fs/ocfs2/namei.c +++ b/fs/ocfs2/namei.c @@ -79,7 +79,8 @@ static int ocfs2_prepare_orphan_dir(struct ocfs2_super *osb, struct inode **ret_orphan_dir, u64 blkno, char *name, - struct ocfs2_dir_lookup_result *lookup); + struct ocfs2_dir_lookup_result *lookup, + bool dio); static int ocfs2_orphan_add(struct ocfs2_super *osb, handle_t *handle, @@ -87,7 +88,8 @@ static int ocfs2_orphan_add(struct ocfs2_super *osb, struct buffer_head *fe_bh, char *name, struct ocfs2_dir_lookup_result *lookup, - struct inode *orphan_dir_inode); + struct inode *orphan_dir_inode, + bool dio); static int ocfs2_create_symlink_data(struct ocfs2_super *osb, handle_t *handle, @@ -104,6 +106,8 @@ static int ocfs2_double_lock(struct ocfs2_super *osb, static void ocfs2_double_unlock(struct inode *inode1, struct inode *inode2); /* An orphan dir name is an 8 byte value, printed as a hex string */ #define OCFS2_ORPHAN_NAMELEN ((int)(2 * sizeof(u64))) +#define OCFS2_DIO_ORPHAN_PREFIX "dio-" +#define OCFS2_DIO_ORPHAN_PREFIX_LEN 4 static struct dentry *ocfs2_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) @@ -952,7 +956,8 @@ static int ocfs2_unlink(struct inode *dir, if (ocfs2_inode_is_unlinkable(inode)) { status = ocfs2_prepare_orphan_dir(osb, &orphan_dir, OCFS2_I(inode)->ip_blkno, - orphan_name, &orphan_insert); + orphan_name, &orphan_insert, + false); if (status < 0) { mlog_errno(status); goto leave; @@ -1004,7 +1009,7 @@ static int ocfs2_unlink(struct inode *dir, if (is_unlinkable) { status = ocfs2_orphan_add(osb, handle, inode, fe_bh, - orphan_name, &orphan_insert, orphan_dir); + orphan_name, &orphan_insert, orphan_dir, false); if (status < 0) mlog_errno(status); } @@ -1440,7 +1445,8 @@ static int ocfs2_rename(struct inode *old_dir, if (S_ISDIR(new_inode->i_mode) || (new_inode->i_nlink == 1)) { status = ocfs2_prepare_orphan_dir(osb, &orphan_dir, OCFS2_I(new_inode)->ip_blkno, - orphan_name, &orphan_insert); + orphan_name, &orphan_insert, + false); if (status < 0) { mlog_errno(status); goto bail; @@ -1507,7 +1513,7 @@ static int ocfs2_rename(struct inode *old_dir, if (should_add_orphan) { status = ocfs2_orphan_add(osb, handle, new_inode, newfe_bh, orphan_name, - &orphan_insert, orphan_dir); + &orphan_insert, orphan_dir, false); if (status < 0) { mlog_errno(status); goto bail; @@ -2088,12 +2094,28 @@ static int __ocfs2_prepare_orphan_dir(struct inode *orphan_dir_inode, struct buffer_head *orphan_dir_bh, u64 blkno, char *name, - struct ocfs2_dir_lookup_result *lookup) + struct ocfs2_dir_lookup_result *lookup, + bool dio) { int ret; struct ocfs2_super *osb = OCFS2_SB(orphan_dir_inode->i_sb); + int namelen = dio ? + (OCFS2_DIO_ORPHAN_PREFIX_LEN + OCFS2_ORPHAN_NAMELEN) : + OCFS2_ORPHAN_NAMELEN; + + if (dio) { + ret = snprintf(name, OCFS2_DIO_ORPHAN_PREFIX_LEN + 1, "%s", + OCFS2_DIO_ORPHAN_PREFIX); + if (ret != OCFS2_DIO_ORPHAN_PREFIX_LEN) { + ret = -EINVAL; + mlog_errno(ret); + return ret; + } - ret = ocfs2_blkno_stringify(blkno, name); + ret = ocfs2_blkno_stringify(blkno, + name + OCFS2_DIO_ORPHAN_PREFIX_LEN); + } else + ret = ocfs2_blkno_stringify(blkno, name); if (ret < 0) { mlog_errno(ret); return ret; @@ -2101,7 +2123,7 @@ static int __ocfs2_prepare_orphan_dir(struct inode *orphan_dir_inode, ret = ocfs2_prepare_dir_for_insert(osb, orphan_dir_inode, orphan_dir_bh, name, - OCFS2_ORPHAN_NAMELEN, lookup); + namelen, lookup); if (ret < 0) { mlog_errno(ret); return ret; @@ -2128,7 +2150,8 @@ static int ocfs2_prepare_orphan_dir(struct ocfs2_super *osb, struct inode **ret_orphan_dir, u64 blkno, char *name, - struct ocfs2_dir_lookup_result *lookup) + struct ocfs2_dir_lookup_result *lookup, + bool dio) { struct inode *orphan_dir_inode = NULL; struct buffer_head *orphan_dir_bh = NULL; @@ -2142,7 +2165,7 @@ static int ocfs2_prepare_orphan_dir(struct ocfs2_super *osb, } ret = __ocfs2_prepare_orphan_dir(orphan_dir_inode, orphan_dir_bh, - blkno, name, lookup); + blkno, name, lookup, dio); if (ret < 0) { mlog_errno(ret); goto out; @@ -2170,12 +2193,16 @@ static int ocfs2_orphan_add(struct ocfs2_super *osb, struct buffer_head *fe_bh, char *name, struct ocfs2_dir_lookup_result *lookup, - struct inode *orphan_dir_inode) + struct inode *orphan_dir_inode, + bool dio) { struct buffer_head *orphan_dir_bh = NULL; int status = 0; struct ocfs2_dinode *orphan_fe; struct ocfs2_dinode *fe = (struct ocfs2_dinode *) fe_bh->b_data; + int namelen = dio ? + (OCFS2_DIO_ORPHAN_PREFIX_LEN + OCFS2_ORPHAN_NAMELEN) : + OCFS2_ORPHAN_NAMELEN; trace_ocfs2_orphan_add_begin( (unsigned long long)OCFS2_I(inode)->ip_blkno); @@ -2219,7 +2246,7 @@ static int ocfs2_orphan_add(struct ocfs2_super *osb, ocfs2_journal_dirty(handle, orphan_dir_bh); status = __ocfs2_add_entry(handle, orphan_dir_inode, name, - OCFS2_ORPHAN_NAMELEN, inode, + namelen, inode, OCFS2_I(inode)->ip_blkno, orphan_dir_bh, lookup); if (status < 0) { @@ -2227,13 +2254,21 @@ static int ocfs2_orphan_add(struct ocfs2_super *osb, goto rollback; } - fe->i_flags |= cpu_to_le32(OCFS2_ORPHANED_FL); - OCFS2_I(inode)->ip_flags &= ~OCFS2_INODE_SKIP_ORPHAN_DIR; + if (dio) { + /* Update flag OCFS2_DIO_ORPHANED_FL and record the orphan + * slot. + */ + fe->i_flags |= cpu_to_le32(OCFS2_DIO_ORPHANED_FL); + fe->i_dio_orphaned_slot = cpu_to_le16(osb->slot_num); + } else { + fe->i_flags |= cpu_to_le32(OCFS2_ORPHANED_FL); + OCFS2_I(inode)->ip_flags &= ~OCFS2_INODE_SKIP_ORPHAN_DIR; - /* Record which orphan dir our inode now resides - * in. delete_inode will use this to determine which orphan - * dir to lock. */ - fe->i_orphaned_slot = cpu_to_le16(osb->slot_num); + /* Record which orphan dir our inode now resides + * in. delete_inode will use this to determine which orphan + * dir to lock. */ + fe->i_orphaned_slot = cpu_to_le16(osb->slot_num); + } ocfs2_journal_dirty(handle, fe_bh); @@ -2258,14 +2293,28 @@ int ocfs2_orphan_del(struct ocfs2_super *osb, handle_t *handle, struct inode *orphan_dir_inode, struct inode *inode, - struct buffer_head *orphan_dir_bh) + struct buffer_head *orphan_dir_bh, + bool dio) { - char name[OCFS2_ORPHAN_NAMELEN + 1]; + const int namelen = OCFS2_DIO_ORPHAN_PREFIX_LEN + OCFS2_ORPHAN_NAMELEN; + char name[namelen + 1]; struct ocfs2_dinode *orphan_fe; int status = 0; struct ocfs2_dir_lookup_result lookup = { NULL, }; - status = ocfs2_blkno_stringify(OCFS2_I(inode)->ip_blkno, name); + if (dio) { + status = snprintf(name, OCFS2_DIO_ORPHAN_PREFIX_LEN + 1, "%s", + OCFS2_DIO_ORPHAN_PREFIX); + if (status != OCFS2_DIO_ORPHAN_PREFIX_LEN) { + status = -EINVAL; + mlog_errno(status); + return status; + } + + status = ocfs2_blkno_stringify(OCFS2_I(inode)->ip_blkno, + name + OCFS2_DIO_ORPHAN_PREFIX_LEN); + } else + status = ocfs2_blkno_stringify(OCFS2_I(inode)->ip_blkno, name); if (status < 0) { mlog_errno(status); goto leave; @@ -2273,10 +2322,10 @@ int ocfs2_orphan_del(struct ocfs2_super *osb, trace_ocfs2_orphan_del( (unsigned long long)OCFS2_I(orphan_dir_inode)->ip_blkno, - name, OCFS2_ORPHAN_NAMELEN); + name, namelen); /* find it's spot in the orphan directory */ - status = ocfs2_find_entry(name, OCFS2_ORPHAN_NAMELEN, orphan_dir_inode, + status = ocfs2_find_entry(name, namelen, orphan_dir_inode, &lookup); if (status) { mlog_errno(status); @@ -2376,7 +2425,8 @@ static int ocfs2_prep_new_orphaned_file(struct inode *dir, } ret = __ocfs2_prepare_orphan_dir(orphan_dir, orphan_dir_bh, - di_blkno, orphan_name, orphan_insert); + di_blkno, orphan_name, orphan_insert, + false); if (ret < 0) { mlog_errno(ret); goto out; @@ -2482,7 +2532,7 @@ int ocfs2_create_inode_in_orphan(struct inode *dir, di = (struct ocfs2_dinode *)new_di_bh->b_data; status = ocfs2_orphan_add(osb, handle, inode, new_di_bh, orphan_name, - &orphan_insert, orphan_dir); + &orphan_insert, orphan_dir, false); if (status < 0) { mlog_errno(status); goto leave; @@ -2527,6 +2577,149 @@ leave: return status; } +int ocfs2_add_inode_to_orphan(struct ocfs2_super *osb, + struct inode *inode) +{ + char orphan_name[OCFS2_DIO_ORPHAN_PREFIX_LEN + OCFS2_ORPHAN_NAMELEN + 1]; + struct inode *orphan_dir_inode = NULL; + struct ocfs2_dir_lookup_result orphan_insert = { NULL, }; + struct buffer_head *di_bh = NULL; + int status = 0; + handle_t *handle = NULL; + + status = ocfs2_inode_lock(inode, &di_bh, 1); + if (status < 0) { + mlog_errno(status); + goto bail; + } + + status = ocfs2_prepare_orphan_dir(osb, &orphan_dir_inode, + OCFS2_I(inode)->ip_blkno, + orphan_name, + &orphan_insert, + true); + if (status < 0) { + mlog_errno(status); + goto bail_unlock_inode; + } + + handle = ocfs2_start_trans(osb, + OCFS2_INODE_ADD_TO_ORPHAN_CREDITS); + if (IS_ERR(handle)) { + status = PTR_ERR(handle); + goto bail_unlock_orphan; + } + + status = ocfs2_orphan_add(osb, handle, inode, di_bh, orphan_name, + &orphan_insert, orphan_dir_inode, true); + if (status) + mlog_errno(status); + + ocfs2_commit_trans(osb, handle); + +bail_unlock_orphan: + ocfs2_inode_unlock(orphan_dir_inode, 1); + mutex_unlock(&orphan_dir_inode->i_mutex); + iput(orphan_dir_inode); + + ocfs2_free_dir_lookup_result(&orphan_insert); + +bail_unlock_inode: + ocfs2_inode_unlock(inode, 1); + brelse(di_bh); + +bail: + return status; +} + +int ocfs2_del_inode_from_orphan(struct ocfs2_super *osb, + struct inode *inode, int update_isize, + loff_t end) +{ + struct inode *orphan_dir_inode = NULL; + struct buffer_head *orphan_dir_bh = NULL; + struct buffer_head *di_bh = NULL; + struct ocfs2_dinode *di = NULL; + handle_t *handle = NULL; + int status = 0; + + status = ocfs2_inode_lock(inode, &di_bh, 1); + if (status < 0) { + mlog_errno(status); + goto bail; + } + di = (struct ocfs2_dinode *) di_bh->b_data; + + orphan_dir_inode = ocfs2_get_system_file_inode(osb, + ORPHAN_DIR_SYSTEM_INODE, + le16_to_cpu(di->i_dio_orphaned_slot)); + if (!orphan_dir_inode) { + status = -ENOENT; + mlog_errno(status); + goto bail_unlock_inode; + } + + mutex_lock(&orphan_dir_inode->i_mutex); + status = ocfs2_inode_lock(orphan_dir_inode, &orphan_dir_bh, 1); + if (status < 0) { + mutex_unlock(&orphan_dir_inode->i_mutex); + iput(orphan_dir_inode); + mlog_errno(status); + goto bail_unlock_inode; + } + + handle = ocfs2_start_trans(osb, + OCFS2_INODE_DEL_FROM_ORPHAN_CREDITS); + if (IS_ERR(handle)) { + status = PTR_ERR(handle); + goto bail_unlock_orphan; + } + + BUG_ON(!(di->i_flags & cpu_to_le32(OCFS2_DIO_ORPHANED_FL))); + + status = ocfs2_orphan_del(osb, handle, orphan_dir_inode, + inode, orphan_dir_bh, true); + if (status < 0) { + mlog_errno(status); + goto bail_commit; + } + + status = ocfs2_journal_access_di(handle, + INODE_CACHE(inode), + di_bh, + OCFS2_JOURNAL_ACCESS_WRITE); + if (status < 0) { + mlog_errno(status); + goto bail_commit; + } + + di->i_flags &= ~cpu_to_le32(OCFS2_DIO_ORPHANED_FL); + di->i_dio_orphaned_slot = 0; + + if (update_isize) { + status = ocfs2_set_inode_size(handle, inode, di_bh, end); + if (status) + mlog_errno(status); + } else + ocfs2_journal_dirty(handle, di_bh); + +bail_commit: + ocfs2_commit_trans(osb, handle); + +bail_unlock_orphan: + ocfs2_inode_unlock(orphan_dir_inode, 1); + mutex_unlock(&orphan_dir_inode->i_mutex); + brelse(orphan_dir_bh); + iput(orphan_dir_inode); + +bail_unlock_inode: + ocfs2_inode_unlock(inode, 1); + brelse(di_bh); + +bail: + return status; +} + int ocfs2_mv_orphaned_inode_to_new(struct inode *dir, struct inode *inode, struct dentry *dentry) @@ -2615,7 +2808,7 @@ int ocfs2_mv_orphaned_inode_to_new(struct inode *dir, } status = ocfs2_orphan_del(osb, handle, orphan_dir_inode, inode, - orphan_dir_bh); + orphan_dir_bh, false); if (status < 0) { mlog_errno(status); goto out_commit; diff --git a/fs/ocfs2/namei.h b/fs/ocfs2/namei.h index e5d059d4f115..5ddecce172fa 100644 --- a/fs/ocfs2/namei.h +++ b/fs/ocfs2/namei.h @@ -34,10 +34,16 @@ int ocfs2_orphan_del(struct ocfs2_super *osb, handle_t *handle, struct inode *orphan_dir_inode, struct inode *inode, - struct buffer_head *orphan_dir_bh); + struct buffer_head *orphan_dir_bh, + bool dio); int ocfs2_create_inode_in_orphan(struct inode *dir, int mode, struct inode **new_inode); +int ocfs2_add_inode_to_orphan(struct ocfs2_super *osb, + struct inode *inode); +int ocfs2_del_inode_from_orphan(struct ocfs2_super *osb, + struct inode *inode, int update_isize, + loff_t end); int ocfs2_mv_orphaned_inode_to_new(struct inode *dir, struct inode *new_inode, struct dentry *new_dentry); diff --git a/fs/ocfs2/ocfs2_fs.h b/fs/ocfs2/ocfs2_fs.h index 938387a10d5d..cf4fa43af78e 100644 --- a/fs/ocfs2/ocfs2_fs.h +++ b/fs/ocfs2/ocfs2_fs.h @@ -229,6 +229,8 @@ #define OCFS2_CHAIN_FL (0x00000400) /* Chain allocator */ #define OCFS2_DEALLOC_FL (0x00000800) /* Truncate log */ #define OCFS2_QUOTA_FL (0x00001000) /* Quota file */ +#define OCFS2_DIO_ORPHANED_FL (0X00002000) /* On the orphan list especially + * for dio */ /* * Flags on ocfs2_dinode.i_dyn_features @@ -729,7 +731,9 @@ struct ocfs2_dinode { inode belongs to. Only valid if allocated from a discontiguous block group */ -/*A0*/ __le64 i_reserved2[3]; +/*A0*/ __le16 i_dio_orphaned_slot; /* only used for append dio write */ + __le16 i_reserved1[3]; + __le64 i_reserved2[2]; /*B8*/ union { __le64 i_pad1; /* Generic way to refer to this 64bit union */ -- cgit v1.2.3 From ed460cffc26ba4ec663a89589d81290ca92c5010 Mon Sep 17 00:00:00 2001 From: Joseph Qi Date: Mon, 16 Feb 2015 15:59:57 -0800 Subject: ocfs2: add orphan recovery types in ocfs2_recover_orphans Define two orphan recovery types, which indicates if need truncate file or not. Signed-off-by: Joseph Qi Cc: Weiwei Wang Cc: Junxiao Bi Cc: Joel Becker Cc: Mark Fasheh Cc: Xuejiufei Cc: alex chen Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/ocfs2/journal.c | 108 +++++++++++++++++++++++++++++++++++++++++++---------- fs/ocfs2/ocfs2.h | 5 +++ 2 files changed, 93 insertions(+), 20 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c index d10860fde165..9730f5350ef4 100644 --- a/fs/ocfs2/journal.c +++ b/fs/ocfs2/journal.c @@ -50,6 +50,8 @@ #include "sysfile.h" #include "uptodate.h" #include "quota.h" +#include "file.h" +#include "namei.h" #include "buffer_head_io.h" #include "ocfs2_trace.h" @@ -69,13 +71,15 @@ static int ocfs2_journal_toggle_dirty(struct ocfs2_super *osb, static int ocfs2_trylock_journal(struct ocfs2_super *osb, int slot_num); static int ocfs2_recover_orphans(struct ocfs2_super *osb, - int slot); + int slot, + enum ocfs2_orphan_reco_type orphan_reco_type); static int ocfs2_commit_thread(void *arg); static void ocfs2_queue_recovery_completion(struct ocfs2_journal *journal, int slot_num, struct ocfs2_dinode *la_dinode, struct ocfs2_dinode *tl_dinode, - struct ocfs2_quota_recovery *qrec); + struct ocfs2_quota_recovery *qrec, + enum ocfs2_orphan_reco_type orphan_reco_type); static inline int ocfs2_wait_on_mount(struct ocfs2_super *osb) { @@ -149,7 +153,8 @@ int ocfs2_compute_replay_slots(struct ocfs2_super *osb) return 0; } -void ocfs2_queue_replay_slots(struct ocfs2_super *osb) +void ocfs2_queue_replay_slots(struct ocfs2_super *osb, + enum ocfs2_orphan_reco_type orphan_reco_type) { struct ocfs2_replay_map *replay_map = osb->replay_map; int i; @@ -163,7 +168,8 @@ void ocfs2_queue_replay_slots(struct ocfs2_super *osb) for (i = 0; i < replay_map->rm_slots; i++) if (replay_map->rm_replay_slots[i]) ocfs2_queue_recovery_completion(osb->journal, i, NULL, - NULL, NULL); + NULL, NULL, + orphan_reco_type); replay_map->rm_state = REPLAY_DONE; } @@ -1174,6 +1180,7 @@ struct ocfs2_la_recovery_item { struct ocfs2_dinode *lri_la_dinode; struct ocfs2_dinode *lri_tl_dinode; struct ocfs2_quota_recovery *lri_qrec; + enum ocfs2_orphan_reco_type lri_orphan_reco_type; }; /* Does the second half of the recovery process. By this point, the @@ -1195,6 +1202,7 @@ void ocfs2_complete_recovery(struct work_struct *work) struct ocfs2_dinode *la_dinode, *tl_dinode; struct ocfs2_la_recovery_item *item, *n; struct ocfs2_quota_recovery *qrec; + enum ocfs2_orphan_reco_type orphan_reco_type; LIST_HEAD(tmp_la_list); trace_ocfs2_complete_recovery( @@ -1212,6 +1220,7 @@ void ocfs2_complete_recovery(struct work_struct *work) la_dinode = item->lri_la_dinode; tl_dinode = item->lri_tl_dinode; qrec = item->lri_qrec; + orphan_reco_type = item->lri_orphan_reco_type; trace_ocfs2_complete_recovery_slot(item->lri_slot, la_dinode ? le64_to_cpu(la_dinode->i_blkno) : 0, @@ -1236,7 +1245,8 @@ void ocfs2_complete_recovery(struct work_struct *work) kfree(tl_dinode); } - ret = ocfs2_recover_orphans(osb, item->lri_slot); + ret = ocfs2_recover_orphans(osb, item->lri_slot, + orphan_reco_type); if (ret < 0) mlog_errno(ret); @@ -1261,7 +1271,8 @@ static void ocfs2_queue_recovery_completion(struct ocfs2_journal *journal, int slot_num, struct ocfs2_dinode *la_dinode, struct ocfs2_dinode *tl_dinode, - struct ocfs2_quota_recovery *qrec) + struct ocfs2_quota_recovery *qrec, + enum ocfs2_orphan_reco_type orphan_reco_type) { struct ocfs2_la_recovery_item *item; @@ -1285,6 +1296,7 @@ static void ocfs2_queue_recovery_completion(struct ocfs2_journal *journal, item->lri_slot = slot_num; item->lri_tl_dinode = tl_dinode; item->lri_qrec = qrec; + item->lri_orphan_reco_type = orphan_reco_type; spin_lock(&journal->j_lock); list_add_tail(&item->lri_list, &journal->j_la_cleanups); @@ -1304,7 +1316,8 @@ void ocfs2_complete_mount_recovery(struct ocfs2_super *osb) /* No need to queue up our truncate_log as regular cleanup will catch * that */ ocfs2_queue_recovery_completion(journal, osb->slot_num, - osb->local_alloc_copy, NULL, NULL); + osb->local_alloc_copy, NULL, NULL, + ORPHAN_NEED_TRUNCATE); ocfs2_schedule_truncate_log_flush(osb, 0); osb->local_alloc_copy = NULL; @@ -1312,7 +1325,7 @@ void ocfs2_complete_mount_recovery(struct ocfs2_super *osb) /* queue to recover orphan slots for all offline slots */ ocfs2_replay_map_set_state(osb, REPLAY_NEEDED); - ocfs2_queue_replay_slots(osb); + ocfs2_queue_replay_slots(osb, ORPHAN_NEED_TRUNCATE); ocfs2_free_replay_slots(osb); } @@ -1323,7 +1336,8 @@ void ocfs2_complete_quota_recovery(struct ocfs2_super *osb) osb->slot_num, NULL, NULL, - osb->quota_rec); + osb->quota_rec, + ORPHAN_NEED_TRUNCATE); osb->quota_rec = NULL; } } @@ -1360,7 +1374,7 @@ restart: /* queue recovery for our own slot */ ocfs2_queue_recovery_completion(osb->journal, osb->slot_num, NULL, - NULL, NULL); + NULL, NULL, ORPHAN_NO_NEED_TRUNCATE); spin_lock(&osb->osb_lock); while (rm->rm_used) { @@ -1419,13 +1433,14 @@ skip_recovery: continue; } ocfs2_queue_recovery_completion(osb->journal, rm_quota[i], - NULL, NULL, qrec); + NULL, NULL, qrec, + ORPHAN_NEED_TRUNCATE); } ocfs2_super_unlock(osb, 1); /* queue recovery for offline slots */ - ocfs2_queue_replay_slots(osb); + ocfs2_queue_replay_slots(osb, ORPHAN_NEED_TRUNCATE); bail: mutex_lock(&osb->recovery_lock); @@ -1711,7 +1726,7 @@ static int ocfs2_recover_node(struct ocfs2_super *osb, /* This will kfree the memory pointed to by la_copy and tl_copy */ ocfs2_queue_recovery_completion(osb->journal, slot_num, la_copy, - tl_copy, NULL); + tl_copy, NULL, ORPHAN_NEED_TRUNCATE); status = 0; done: @@ -1901,7 +1916,7 @@ void ocfs2_queue_orphan_scan(struct ocfs2_super *osb) for (i = 0; i < osb->max_slots; i++) ocfs2_queue_recovery_completion(osb->journal, i, NULL, NULL, - NULL); + NULL, ORPHAN_NO_NEED_TRUNCATE); /* * We queued a recovery on orphan slots, increment the sequence * number and update LVB so other node will skip the scan for a while @@ -2000,6 +2015,13 @@ static int ocfs2_orphan_filldir(struct dir_context *ctx, const char *name, if (IS_ERR(iter)) return 0; + /* Skip inodes which are already added to recover list, since dio may + * happen concurrently with unlink/rename */ + if (OCFS2_I(iter)->ip_next_orphan) { + iput(iter); + return 0; + } + trace_ocfs2_orphan_filldir((unsigned long long)OCFS2_I(iter)->ip_blkno); /* No locking is required for the next_orphan queue as there * is only ever a single process doing orphan recovery. */ @@ -2108,7 +2130,8 @@ static void ocfs2_clear_recovering_orphan_dir(struct ocfs2_super *osb, * advertising our state to ocfs2_delete_inode(). */ static int ocfs2_recover_orphans(struct ocfs2_super *osb, - int slot) + int slot, + enum ocfs2_orphan_reco_type orphan_reco_type) { int ret = 0; struct inode *inode = NULL; @@ -2132,13 +2155,58 @@ static int ocfs2_recover_orphans(struct ocfs2_super *osb, (unsigned long long)oi->ip_blkno); iter = oi->ip_next_orphan; + oi->ip_next_orphan = NULL; + + /* + * We need to take and drop the inode lock to + * force read inode from disk. + */ + ret = ocfs2_inode_lock(inode, NULL, 0); + if (ret) { + mlog_errno(ret); + goto next; + } + ocfs2_inode_unlock(inode, 0); + + if (inode->i_nlink == 0) { + spin_lock(&oi->ip_lock); + /* Set the proper information to get us going into + * ocfs2_delete_inode. */ + oi->ip_flags |= OCFS2_INODE_MAYBE_ORPHANED; + spin_unlock(&oi->ip_lock); + } else if (orphan_reco_type == ORPHAN_NEED_TRUNCATE) { + struct buffer_head *di_bh = NULL; + + ret = ocfs2_rw_lock(inode, 1); + if (ret) { + mlog_errno(ret); + goto next; + } - spin_lock(&oi->ip_lock); - /* Set the proper information to get us going into - * ocfs2_delete_inode. */ - oi->ip_flags |= OCFS2_INODE_MAYBE_ORPHANED; - spin_unlock(&oi->ip_lock); + ret = ocfs2_inode_lock(inode, &di_bh, 1); + if (ret < 0) { + ocfs2_rw_unlock(inode, 1); + mlog_errno(ret); + goto next; + } + + ret = ocfs2_truncate_file(inode, di_bh, + i_size_read(inode)); + ocfs2_inode_unlock(inode, 1); + ocfs2_rw_unlock(inode, 1); + brelse(di_bh); + if (ret < 0) { + if (ret != -ENOSPC) + mlog_errno(ret); + goto next; + } + + ret = ocfs2_del_inode_from_orphan(osb, inode, 0, 0); + if (ret) + mlog_errno(ret); + } /* else if ORPHAN_NO_NEED_TRUNCATE, do nothing */ +next: iput(inode); inode = iter; diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h index fdbcbfed529e..df9a95cbea3a 100644 --- a/fs/ocfs2/ocfs2.h +++ b/fs/ocfs2/ocfs2.h @@ -209,6 +209,11 @@ struct ocfs2_lock_res { #endif }; +enum ocfs2_orphan_reco_type { + ORPHAN_NO_NEED_TRUNCATE = 0, + ORPHAN_NEED_TRUNCATE, +}; + enum ocfs2_orphan_scan_state { ORPHAN_SCAN_ACTIVE, ORPHAN_SCAN_INACTIVE -- cgit v1.2.3 From 24c40b329e03dd38a1ca2225c739db67f4441343 Mon Sep 17 00:00:00 2001 From: Joseph Qi Date: Mon, 16 Feb 2015 16:00:00 -0800 Subject: ocfs2: implement ocfs2_direct_IO_write Implement ocfs2_direct_IO_write. Add the inode to orphan dir first, and then delete it once append O_DIRECT finished. This is to make sure block allocation and inode size are consistent. [akpm@linux-foundation.org: fix it for "block: Add discard flag to blkdev_issue_zeroout() function"] Signed-off-by: Joseph Qi Cc: Weiwei Wang Cc: Junxiao Bi Cc: Joel Becker Cc: Mark Fasheh Cc: Xuejiufei Cc: alex chen Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/ocfs2/aops.c | 197 ++++++++++++++++++++++++++++++++++++++++++++++++++++++- fs/ocfs2/ocfs2.h | 10 +++ 2 files changed, 204 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index 46d93e941f3d..be5986b7e5c6 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c @@ -28,6 +28,7 @@ #include #include #include +#include #include @@ -47,6 +48,9 @@ #include "ocfs2_trace.h" #include "buffer_head_io.h" +#include "dir.h" +#include "namei.h" +#include "sysfile.h" static int ocfs2_symlink_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create) @@ -597,6 +601,184 @@ static int ocfs2_releasepage(struct page *page, gfp_t wait) return try_to_free_buffers(page); } +static int ocfs2_is_overwrite(struct ocfs2_super *osb, + struct inode *inode, loff_t offset) +{ + int ret = 0; + u32 v_cpos = 0; + u32 p_cpos = 0; + unsigned int num_clusters = 0; + unsigned int ext_flags = 0; + + v_cpos = ocfs2_bytes_to_clusters(osb->sb, offset); + ret = ocfs2_get_clusters(inode, v_cpos, &p_cpos, + &num_clusters, &ext_flags); + if (ret < 0) { + mlog_errno(ret); + return ret; + } + + if (p_cpos && !(ext_flags & OCFS2_EXT_UNWRITTEN)) + return 1; + + return 0; +} + +static ssize_t ocfs2_direct_IO_write(struct kiocb *iocb, + struct iov_iter *iter, + loff_t offset) +{ + ssize_t ret = 0; + ssize_t written = 0; + bool orphaned = false; + int is_overwrite = 0; + struct file *file = iocb->ki_filp; + struct inode *inode = file_inode(file)->i_mapping->host; + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + struct buffer_head *di_bh = NULL; + size_t count = iter->count; + journal_t *journal = osb->journal->j_journal; + u32 zero_len; + int cluster_align; + loff_t final_size = offset + count; + int append_write = offset >= i_size_read(inode) ? 1 : 0; + unsigned int num_clusters = 0; + unsigned int ext_flags = 0; + + { + u64 o = offset; + + zero_len = do_div(o, 1 << osb->s_clustersize_bits); + cluster_align = !zero_len; + } + + /* + * when final_size > inode->i_size, inode->i_size will be + * updated after direct write, so add the inode to orphan + * dir first. + */ + if (final_size > i_size_read(inode)) { + ret = ocfs2_add_inode_to_orphan(osb, inode); + if (ret < 0) { + mlog_errno(ret); + goto out; + } + orphaned = true; + } + + if (append_write) { + ret = ocfs2_inode_lock(inode, &di_bh, 1); + if (ret < 0) { + mlog_errno(ret); + goto clean_orphan; + } + + if (ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb))) + ret = ocfs2_zero_extend(inode, di_bh, offset); + else + ret = ocfs2_extend_no_holes(inode, di_bh, offset, + offset); + if (ret < 0) { + mlog_errno(ret); + ocfs2_inode_unlock(inode, 1); + brelse(di_bh); + goto clean_orphan; + } + + is_overwrite = ocfs2_is_overwrite(osb, inode, offset); + if (is_overwrite < 0) { + mlog_errno(is_overwrite); + ocfs2_inode_unlock(inode, 1); + brelse(di_bh); + goto clean_orphan; + } + + ocfs2_inode_unlock(inode, 1); + brelse(di_bh); + di_bh = NULL; + } + + written = __blockdev_direct_IO(WRITE, iocb, inode, inode->i_sb->s_bdev, + iter, offset, + ocfs2_direct_IO_get_blocks, + ocfs2_dio_end_io, NULL, 0); + if (unlikely(written < 0)) { + loff_t i_size = i_size_read(inode); + + if (offset + count > i_size) { + ret = ocfs2_inode_lock(inode, &di_bh, 1); + if (ret < 0) { + mlog_errno(ret); + goto clean_orphan; + } + + if (i_size == i_size_read(inode)) { + ret = ocfs2_truncate_file(inode, di_bh, + i_size); + if (ret < 0) { + if (ret != -ENOSPC) + mlog_errno(ret); + + ocfs2_inode_unlock(inode, 1); + brelse(di_bh); + goto clean_orphan; + } + } + + ocfs2_inode_unlock(inode, 1); + brelse(di_bh); + + ret = jbd2_journal_force_commit(journal); + if (ret < 0) + mlog_errno(ret); + } + } else if (written < 0 && append_write && !is_overwrite && + !cluster_align) { + u32 p_cpos = 0; + u32 v_cpos = ocfs2_bytes_to_clusters(osb->sb, offset); + + ret = ocfs2_get_clusters(inode, v_cpos, &p_cpos, + &num_clusters, &ext_flags); + if (ret < 0) { + mlog_errno(ret); + goto clean_orphan; + } + + BUG_ON(!p_cpos || (ext_flags & OCFS2_EXT_UNWRITTEN)); + + ret = blkdev_issue_zeroout(osb->sb->s_bdev, + p_cpos << (osb->s_clustersize_bits - 9), + zero_len >> 9, GFP_KERNEL, false); + if (ret < 0) + mlog_errno(ret); + } + +clean_orphan: + if (orphaned) { + int tmp_ret; + int update_isize = written > 0 ? 1 : 0; + loff_t end = update_isize ? offset + written : 0; + + tmp_ret = ocfs2_del_inode_from_orphan(osb, inode, + update_isize, end); + if (tmp_ret < 0) { + ret = tmp_ret; + goto out; + } + + tmp_ret = jbd2_journal_force_commit(journal); + if (tmp_ret < 0) { + ret = tmp_ret; + mlog_errno(tmp_ret); + } + } + +out: + if (ret >= 0) + ret = written; + return ret; +} + static ssize_t ocfs2_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter, @@ -604,6 +786,9 @@ static ssize_t ocfs2_direct_IO(int rw, { struct file *file = iocb->ki_filp; struct inode *inode = file_inode(file)->i_mapping->host; + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + int full_coherency = !(osb->s_mount_opt & + OCFS2_MOUNT_COHERENCY_BUFFERED); /* * Fallback to buffered I/O if we see an inode without @@ -612,14 +797,20 @@ static ssize_t ocfs2_direct_IO(int rw, if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) return 0; - /* Fallback to buffered I/O if we are appending. */ - if (i_size_read(inode) <= offset) + /* Fallback to buffered I/O if we are appending and + * concurrent O_DIRECT writes are allowed. + */ + if (i_size_read(inode) <= offset && !full_coherency) return 0; - return __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, + if (rw == READ) + return __blockdev_direct_IO(rw, iocb, inode, + inode->i_sb->s_bdev, iter, offset, ocfs2_direct_IO_get_blocks, ocfs2_dio_end_io, NULL, 0); + else + return ocfs2_direct_IO_write(iocb, iter, offset); } static void ocfs2_figure_cluster_boundaries(struct ocfs2_super *osb, diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h index df9a95cbea3a..7e39cd654834 100644 --- a/fs/ocfs2/ocfs2.h +++ b/fs/ocfs2/ocfs2.h @@ -731,6 +731,16 @@ static inline unsigned int ocfs2_clusters_for_bytes(struct super_block *sb, return clusters; } +static inline unsigned int ocfs2_bytes_to_clusters(struct super_block *sb, + u64 bytes) +{ + int cl_bits = OCFS2_SB(sb)->s_clustersize_bits; + unsigned int clusters; + + clusters = (unsigned int)(bytes >> cl_bits); + return clusters; +} + static inline u64 ocfs2_blocks_for_bytes(struct super_block *sb, u64 bytes) { -- cgit v1.2.3 From 49255dce65dba3d06a0f642fee788f0a50d48d5b Mon Sep 17 00:00:00 2001 From: Joseph Qi Date: Mon, 16 Feb 2015 16:00:03 -0800 Subject: ocfs2: allocate blocks in ocfs2_direct_IO_get_blocks Allow blocks allocation in ocfs2_direct_IO_get_blocks. Signed-off-by: Joseph Qi Cc: Weiwei Wang Cc: Junxiao Bi Cc: Joel Becker Cc: Mark Fasheh Cc: Xuejiufei Cc: alex chen Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/ocfs2/aops.c | 45 ++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 42 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index be5986b7e5c6..44db1808cdb5 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c @@ -510,18 +510,21 @@ bail: * * called like this: dio->get_blocks(dio->inode, fs_startblk, * fs_count, map_bh, dio->rw == WRITE); - * - * Note that we never bother to allocate blocks here, and thus ignore the - * create argument. */ static int ocfs2_direct_IO_get_blocks(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create) { int ret; + u32 cpos = 0; + int alloc_locked = 0; u64 p_blkno, inode_blocks, contig_blocks; unsigned int ext_flags; unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits; unsigned long max_blocks = bh_result->b_size >> inode->i_blkbits; + unsigned long len = bh_result->b_size; + unsigned int clusters_to_alloc = 0; + + cpos = ocfs2_blocks_to_clusters(inode->i_sb, iblock); /* This function won't even be called if the request isn't all * nicely aligned and of the right size, so there's no need @@ -543,6 +546,40 @@ static int ocfs2_direct_IO_get_blocks(struct inode *inode, sector_t iblock, /* We should already CoW the refcounted extent in case of create. */ BUG_ON(create && (ext_flags & OCFS2_EXT_REFCOUNTED)); + /* allocate blocks if no p_blkno is found, and create == 1 */ + if (!p_blkno && create) { + ret = ocfs2_inode_lock(inode, NULL, 1); + if (ret < 0) { + mlog_errno(ret); + goto bail; + } + + alloc_locked = 1; + + /* fill hole, allocate blocks can't be larger than the size + * of the hole */ + clusters_to_alloc = ocfs2_clusters_for_bytes(inode->i_sb, len); + if (clusters_to_alloc > contig_blocks) + clusters_to_alloc = contig_blocks; + + /* allocate extent and insert them into the extent tree */ + ret = ocfs2_extend_allocation(inode, cpos, + clusters_to_alloc, 0); + if (ret < 0) { + mlog_errno(ret); + goto bail; + } + + ret = ocfs2_extent_map_get_blocks(inode, iblock, &p_blkno, + &contig_blocks, &ext_flags); + if (ret < 0) { + mlog(ML_ERROR, "get_blocks() failed iblock=%llu\n", + (unsigned long long)iblock); + ret = -EIO; + goto bail; + } + } + /* * get_more_blocks() expects us to describe a hole by clearing * the mapped bit on bh_result(). @@ -560,6 +597,8 @@ static int ocfs2_direct_IO_get_blocks(struct inode *inode, sector_t iblock, contig_blocks = max_blocks; bh_result->b_size = contig_blocks << blocksize_bits; bail: + if (alloc_locked) + ocfs2_inode_unlock(inode, 1); return ret; } -- cgit v1.2.3 From d943d59dd32d33cd8a44a2f9caf373ede11200da Mon Sep 17 00:00:00 2001 From: Joseph Qi Date: Mon, 16 Feb 2015 16:00:06 -0800 Subject: ocfs2: do not fallback to buffer I/O write if appending Now we can do direct io and do not fallback to buffered IO any more in case of append O_DIRECT write. Signed-off-by: Joseph Qi Cc: Weiwei Wang Cc: Joel Becker Cc: Junxiao Bi Cc: Mark Fasheh Cc: Xuejiufei Cc: alex chen Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/ocfs2/file.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index 2fce3c40ad27..1055a2ece738 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c @@ -2116,6 +2116,9 @@ static int ocfs2_prepare_inode_for_write(struct file *file, struct dentry *dentry = file->f_path.dentry; struct inode *inode = dentry->d_inode; loff_t saved_pos = 0, end; + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + int full_coherency = !(osb->s_mount_opt & + OCFS2_MOUNT_COHERENCY_BUFFERED); /* * We start with a read level meta lock and only jump to an ex @@ -2204,7 +2207,7 @@ static int ocfs2_prepare_inode_for_write(struct file *file, * one node could wind up truncating another * nodes writes. */ - if (end > i_size_read(inode)) { + if (end > i_size_read(inode) && !full_coherency) { *direct_io = 0; break; } -- cgit v1.2.3 From 3a83b342c87e6d21290de8dc76ec20a67821261d Mon Sep 17 00:00:00 2001 From: Joseph Qi Date: Mon, 16 Feb 2015 16:00:09 -0800 Subject: ocfs2: complete the rest request through buffer io Complte the rest request thourgh buffer io after direct write performed. Signed-off-by: Joseph Qi Cc: Weiwei Wang Cc: Joel Becker Cc: Junxiao Bi Cc: Mark Fasheh Cc: Xuejiufei Cc: alex chen Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/ocfs2/file.c | 43 ++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 42 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index 1055a2ece738..784f2c72c992 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c @@ -2253,6 +2253,7 @@ static ssize_t ocfs2_file_write_iter(struct kiocb *iocb, u32 old_clusters; struct file *file = iocb->ki_filp; struct inode *inode = file_inode(file); + struct address_space *mapping = file->f_mapping; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); int full_coherency = !(osb->s_mount_opt & OCFS2_MOUNT_COHERENCY_BUFFERED); @@ -2367,11 +2368,51 @@ relock: iov_iter_truncate(from, count); if (direct_io) { + loff_t endbyte; + ssize_t written_buffered; written = generic_file_direct_write(iocb, from, *ppos); - if (written < 0) { + if (written < 0 || written == count) { ret = written; goto out_dio; } + + /* + * for completing the rest of the request. + */ + *ppos += written; + count -= written; + written_buffered = generic_perform_write(file, from, *ppos); + /* + * If generic_file_buffered_write() returned a synchronous error + * then we want to return the number of bytes which were + * direct-written, or the error code if that was zero. Note + * that this differs from normal direct-io semantics, which + * will return -EFOO even if some bytes were written. + */ + if (written_buffered < 0) { + ret = written_buffered; + goto out_dio; + } + + iocb->ki_pos = *ppos + written_buffered; + /* We need to ensure that the page cache pages are written to + * disk and invalidated to preserve the expected O_DIRECT + * semantics. + */ + endbyte = *ppos + written_buffered - 1; + ret = filemap_write_and_wait_range(file->f_mapping, *ppos, + endbyte); + if (ret == 0) { + written += written_buffered; + invalidate_mapping_pages(mapping, + *ppos >> PAGE_CACHE_SHIFT, + endbyte >> PAGE_CACHE_SHIFT); + } else { + /* + * We don't know how much we wrote, so just return + * the number of bytes which were direct-written + */ + } } else { current->backing_dev_info = inode_to_bdi(inode); written = generic_perform_write(file, from, *ppos); -- cgit v1.2.3 From 4813962beef7586f890a645a1bda77691da4b74a Mon Sep 17 00:00:00 2001 From: Joseph Qi Date: Mon, 16 Feb 2015 16:00:12 -0800 Subject: ocfs2: wait for orphan recovery first once append O_DIRECT write crash If one node has crashed with orphan entry leftover, another node which do append O_DIRECT write to the same file will override the i_dio_orphaned_slot. Then the old entry won't be cleaned forever. If this case happens, we let it wait for orphan recovery first. Signed-off-by: Joseph Qi Cc: Weiwei Wang Cc: Joel Becker Cc: Junxiao Bi Cc: Mark Fasheh Cc: Xuejiufei Cc: alex chen Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/ocfs2/inode.h | 2 ++ fs/ocfs2/journal.c | 2 ++ fs/ocfs2/namei.c | 37 +++++++++++++++++++++++++++++++++++++ fs/ocfs2/super.c | 2 ++ 4 files changed, 43 insertions(+) (limited to 'fs') diff --git a/fs/ocfs2/inode.h b/fs/ocfs2/inode.h index ca3431ee7f24..5e86b247c821 100644 --- a/fs/ocfs2/inode.h +++ b/fs/ocfs2/inode.h @@ -81,6 +81,8 @@ struct ocfs2_inode_info tid_t i_sync_tid; tid_t i_datasync_tid; + wait_queue_head_t append_dio_wq; + struct dquot *i_dquot[MAXQUOTAS]; }; diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c index 9730f5350ef4..ff531928269e 100644 --- a/fs/ocfs2/journal.c +++ b/fs/ocfs2/journal.c @@ -2204,6 +2204,8 @@ static int ocfs2_recover_orphans(struct ocfs2_super *osb, ret = ocfs2_del_inode_from_orphan(osb, inode, 0, 0); if (ret) mlog_errno(ret); + + wake_up(&OCFS2_I(inode)->append_dio_wq); } /* else if ORPHAN_NO_NEED_TRUNCATE, do nothing */ next: diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c index 7eec45d0d85f..b5c3a5ea3ee6 100644 --- a/fs/ocfs2/namei.c +++ b/fs/ocfs2/namei.c @@ -2577,6 +2577,27 @@ leave: return status; } +static int ocfs2_dio_orphan_recovered(struct inode *inode) +{ + int ret; + struct buffer_head *di_bh = NULL; + struct ocfs2_dinode *di = NULL; + + ret = ocfs2_inode_lock(inode, &di_bh, 1); + if (ret < 0) { + mlog_errno(ret); + return 0; + } + + di = (struct ocfs2_dinode *) di_bh->b_data; + ret = !(di->i_flags & cpu_to_le32(OCFS2_DIO_ORPHANED_FL)); + ocfs2_inode_unlock(inode, 1); + brelse(di_bh); + + return ret; +} + +#define OCFS2_DIO_ORPHANED_FL_CHECK_INTERVAL 10000 int ocfs2_add_inode_to_orphan(struct ocfs2_super *osb, struct inode *inode) { @@ -2586,13 +2607,29 @@ int ocfs2_add_inode_to_orphan(struct ocfs2_super *osb, struct buffer_head *di_bh = NULL; int status = 0; handle_t *handle = NULL; + struct ocfs2_dinode *di = NULL; +restart: status = ocfs2_inode_lock(inode, &di_bh, 1); if (status < 0) { mlog_errno(status); goto bail; } + di = (struct ocfs2_dinode *) di_bh->b_data; + /* + * Another append dio crashed? + * If so, wait for recovery first. + */ + if (unlikely(di->i_flags & cpu_to_le32(OCFS2_DIO_ORPHANED_FL))) { + ocfs2_inode_unlock(inode, 1); + brelse(di_bh); + wait_event_interruptible_timeout(OCFS2_I(inode)->append_dio_wq, + ocfs2_dio_orphan_recovered(inode), + msecs_to_jiffies(OCFS2_DIO_ORPHANED_FL_CHECK_INTERVAL)); + goto restart; + } + status = ocfs2_prepare_orphan_dir(osb, &orphan_dir_inode, OCFS2_I(inode)->ip_blkno, orphan_name, diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index 87a1f7679d9b..26675185b886 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c @@ -1746,6 +1746,8 @@ static void ocfs2_inode_init_once(void *data) ocfs2_lock_res_init_once(&oi->ip_inode_lockres); ocfs2_lock_res_init_once(&oi->ip_open_lockres); + init_waitqueue_head(&oi->append_dio_wq); + ocfs2_metadata_cache_init(INODE_CACHE(&oi->vfs_inode), &ocfs2_inode_caching_ops); -- cgit v1.2.3 From 160cc266639d4213c15c103074561c1b44ffe691 Mon Sep 17 00:00:00 2001 From: Joseph Qi Date: Mon, 16 Feb 2015 16:00:15 -0800 Subject: ocfs2: set append dio as a ro compat feature Intruduce a bit OCFS2_FEATURE_RO_COMPAT_APPEND_DIO and check it in write flow. If the bit is not set, fall back to the old way. Signed-off-by: Joseph Qi Cc: Weiwei Wang Cc: Joel Becker Cc: Junxiao Bi Cc: Mark Fasheh Cc: Xuejiufei Cc: alex chen Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/ocfs2/file.c | 17 ++++++++++++++++- fs/ocfs2/ocfs2.h | 8 ++++++++ fs/ocfs2/ocfs2_fs.h | 8 +++++++- 3 files changed, 31 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index 784f2c72c992..46e0d4e857c7 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c @@ -2212,6 +2212,15 @@ static int ocfs2_prepare_inode_for_write(struct file *file, break; } + /* + * Fallback to old way if the feature bit is not set. + */ + if (end > i_size_read(inode) && + !ocfs2_supports_append_dio(osb)) { + *direct_io = 0; + break; + } + /* * We don't fill holes during direct io, so * check for them here. If any are found, the @@ -2220,7 +2229,13 @@ static int ocfs2_prepare_inode_for_write(struct file *file, */ ret = ocfs2_check_range_for_holes(inode, saved_pos, count); if (ret == 1) { - *direct_io = 0; + /* + * Fallback to old way if the feature bit is not set. + * Otherwise try dio first and then complete the rest + * request through buffer io. + */ + if (!ocfs2_supports_append_dio(osb)) + *direct_io = 0; ret = 0; } else if (ret < 0) mlog_errno(ret); diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h index 7e39cd654834..8490c64d34fe 100644 --- a/fs/ocfs2/ocfs2.h +++ b/fs/ocfs2/ocfs2.h @@ -500,6 +500,14 @@ static inline int ocfs2_writes_unwritten_extents(struct ocfs2_super *osb) return 0; } +static inline int ocfs2_supports_append_dio(struct ocfs2_super *osb) +{ + if (osb->s_feature_ro_compat & OCFS2_FEATURE_RO_COMPAT_APPEND_DIO) + return 1; + return 0; +} + + static inline int ocfs2_supports_inline_data(struct ocfs2_super *osb) { if (osb->s_feature_incompat & OCFS2_FEATURE_INCOMPAT_INLINE_DATA) diff --git a/fs/ocfs2/ocfs2_fs.h b/fs/ocfs2/ocfs2_fs.h index cf4fa43af78e..20e37a3ed26f 100644 --- a/fs/ocfs2/ocfs2_fs.h +++ b/fs/ocfs2/ocfs2_fs.h @@ -105,7 +105,8 @@ | OCFS2_FEATURE_INCOMPAT_CLUSTERINFO) #define OCFS2_FEATURE_RO_COMPAT_SUPP (OCFS2_FEATURE_RO_COMPAT_UNWRITTEN \ | OCFS2_FEATURE_RO_COMPAT_USRQUOTA \ - | OCFS2_FEATURE_RO_COMPAT_GRPQUOTA) + | OCFS2_FEATURE_RO_COMPAT_GRPQUOTA \ + | OCFS2_FEATURE_RO_COMPAT_APPEND_DIO) /* * Heartbeat-only devices are missing journals and other files. The @@ -199,6 +200,11 @@ #define OCFS2_FEATURE_RO_COMPAT_USRQUOTA 0x0002 #define OCFS2_FEATURE_RO_COMPAT_GRPQUOTA 0x0004 +/* + * Append Direct IO support + */ +#define OCFS2_FEATURE_RO_COMPAT_APPEND_DIO 0x0008 + /* The byte offset of the first backup block will be 1G. * The following will be 4G, 16G, 64G, 256G and 1T. */ -- cgit v1.2.3 From 35e88d5c22e1916c819b5a8756aed2f09a4aba18 Mon Sep 17 00:00:00 2001 From: Helge Deller Date: Tue, 17 Feb 2015 15:41:51 +0100 Subject: fs/binfmt_som: Drop kernel support for HP-UX SOM binaries The parisc arch has been the only user of HP-UX SOM binaries. Support for HP-UX executables was never finished and since we now drop support for the HP-UX compat layer anyway, it does not makes sense to keep the BINFMT_SOM support. Cc: linux-fsdevel@vger.kernel.org Cc: linux-parisc@vger.kernel.org Signed-off-by: Helge Deller --- fs/Kconfig.binfmt | 7 -- fs/Makefile | 1 - fs/binfmt_som.c | 299 ---------------------------------------------- include/uapi/linux/Kbuild | 1 - include/uapi/linux/som.h | 154 ------------------------ 5 files changed, 462 deletions(-) delete mode 100644 fs/binfmt_som.c delete mode 100644 include/uapi/linux/som.h (limited to 'fs') diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt index c055d56ec63d..270c48148f79 100644 --- a/fs/Kconfig.binfmt +++ b/fs/Kconfig.binfmt @@ -149,13 +149,6 @@ config BINFMT_EM86 later load the module when you want to use a Linux/Intel binary. The module will be called binfmt_em86. If unsure, say Y. -config BINFMT_SOM - tristate "Kernel support for SOM binaries" - depends on PARISC && HPUX - help - SOM is a binary executable format inherited from HP/UX. Say - Y here to be able to load and execute SOM binaries directly. - config BINFMT_MISC tristate "Kernel support for MISC binaries" ---help--- diff --git a/fs/Makefile b/fs/Makefile index bedff48e8fdc..b7e0cfe3af2f 100644 --- a/fs/Makefile +++ b/fs/Makefile @@ -37,7 +37,6 @@ obj-$(CONFIG_BINFMT_SCRIPT) += binfmt_script.o obj-$(CONFIG_BINFMT_ELF) += binfmt_elf.o obj-$(CONFIG_COMPAT_BINFMT_ELF) += compat_binfmt_elf.o obj-$(CONFIG_BINFMT_ELF_FDPIC) += binfmt_elf_fdpic.o -obj-$(CONFIG_BINFMT_SOM) += binfmt_som.o obj-$(CONFIG_BINFMT_FLAT) += binfmt_flat.o obj-$(CONFIG_FS_MBCACHE) += mbcache.o diff --git a/fs/binfmt_som.c b/fs/binfmt_som.c deleted file mode 100644 index 4e00ed68d4a6..000000000000 --- a/fs/binfmt_som.c +++ /dev/null @@ -1,299 +0,0 @@ -/* - * linux/fs/binfmt_som.c - * - * These are the functions used to load SOM format executables as used - * by HP-UX. - * - * Copyright 1999 Matthew Wilcox - * based on binfmt_elf which is - * Copyright 1993, 1994: Eric Youngdale (ericy@cais.com). - */ - -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - - -#include - -static int load_som_binary(struct linux_binprm * bprm); -static int load_som_library(struct file *); - -/* - * If we don't support core dumping, then supply a NULL so we - * don't even try. - */ -#if 0 -static int som_core_dump(struct coredump_params *cprm); -#else -#define som_core_dump NULL -#endif - -#define SOM_PAGESTART(_v) ((_v) & ~(unsigned long)(SOM_PAGESIZE-1)) -#define SOM_PAGEOFFSET(_v) ((_v) & (SOM_PAGESIZE-1)) -#define SOM_PAGEALIGN(_v) (((_v) + SOM_PAGESIZE - 1) & ~(SOM_PAGESIZE - 1)) - -static struct linux_binfmt som_format = { - .module = THIS_MODULE, - .load_binary = load_som_binary, - .load_shlib = load_som_library, - .core_dump = som_core_dump, - .min_coredump = SOM_PAGESIZE -}; - -/* - * create_som_tables() parses the env- and arg-strings in new user - * memory and creates the pointer tables from them, and puts their - * addresses on the "stack", returning the new stack pointer value. - */ -static void create_som_tables(struct linux_binprm *bprm) -{ - char **argv, **envp; - int argc = bprm->argc; - int envc = bprm->envc; - unsigned long p; - unsigned long *sp; - - /* Word-align the stack pointer */ - sp = (unsigned long *)((bprm->p + 3) & ~3); - - envp = (char **) sp; - sp += envc + 1; - argv = (char **) sp; - sp += argc + 1; - - __put_user((unsigned long) envp,++sp); - __put_user((unsigned long) argv,++sp); - - __put_user(argc, ++sp); - - bprm->p = (unsigned long) sp; - - p = current->mm->arg_start; - while (argc-- > 0) { - __put_user((char *)p,argv++); - p += strlen_user((char *)p); - } - __put_user(NULL, argv); - current->mm->arg_end = current->mm->env_start = p; - while (envc-- > 0) { - __put_user((char *)p,envp++); - p += strlen_user((char *)p); - } - __put_user(NULL, envp); - current->mm->env_end = p; -} - -static int check_som_header(struct som_hdr *som_ex) -{ - int *buf = (int *)som_ex; - int i, ck; - - if (som_ex->system_id != SOM_SID_PARISC_1_0 && - som_ex->system_id != SOM_SID_PARISC_1_1 && - som_ex->system_id != SOM_SID_PARISC_2_0) - return -ENOEXEC; - - if (som_ex->a_magic != SOM_EXEC_NONSHARE && - som_ex->a_magic != SOM_EXEC_SHARE && - som_ex->a_magic != SOM_EXEC_DEMAND) - return -ENOEXEC; - - if (som_ex->version_id != SOM_ID_OLD && - som_ex->version_id != SOM_ID_NEW) - return -ENOEXEC; - - ck = 0; - for (i=0; i<32; i++) - ck ^= buf[i]; - if (ck != 0) - return -ENOEXEC; - - return 0; -} - -static int map_som_binary(struct file *file, - const struct som_exec_auxhdr *hpuxhdr) -{ - unsigned long code_start, code_size, data_start, data_size; - unsigned long bss_start, som_brk; - int retval; - int prot = PROT_READ | PROT_EXEC; - int flags = MAP_FIXED|MAP_PRIVATE|MAP_DENYWRITE|MAP_EXECUTABLE; - - mm_segment_t old_fs = get_fs(); - set_fs(get_ds()); - - code_start = SOM_PAGESTART(hpuxhdr->exec_tmem); - code_size = SOM_PAGEALIGN(hpuxhdr->exec_tsize); - current->mm->start_code = code_start; - current->mm->end_code = code_start + code_size; - retval = vm_mmap(file, code_start, code_size, prot, - flags, SOM_PAGESTART(hpuxhdr->exec_tfile)); - if (retval < 0 && retval > -1024) - goto out; - - data_start = SOM_PAGESTART(hpuxhdr->exec_dmem); - data_size = SOM_PAGEALIGN(hpuxhdr->exec_dsize); - current->mm->start_data = data_start; - current->mm->end_data = bss_start = data_start + data_size; - retval = vm_mmap(file, data_start, data_size, - prot | PROT_WRITE, flags, - SOM_PAGESTART(hpuxhdr->exec_dfile)); - if (retval < 0 && retval > -1024) - goto out; - - som_brk = bss_start + SOM_PAGEALIGN(hpuxhdr->exec_bsize); - current->mm->start_brk = current->mm->brk = som_brk; - retval = vm_mmap(NULL, bss_start, som_brk - bss_start, - prot | PROT_WRITE, MAP_FIXED | MAP_PRIVATE, 0); - if (retval > 0 || retval < -1024) - retval = 0; -out: - set_fs(old_fs); - return retval; -} - - -/* - * These are the functions used to load SOM executables and shared - * libraries. There is no binary dependent code anywhere else. - */ - -static int -load_som_binary(struct linux_binprm * bprm) -{ - int retval; - unsigned int size; - unsigned long som_entry; - struct som_hdr *som_ex; - struct som_exec_auxhdr *hpuxhdr; - struct pt_regs *regs = current_pt_regs(); - - /* Get the exec-header */ - som_ex = (struct som_hdr *) bprm->buf; - - retval = check_som_header(som_ex); - if (retval != 0) - goto out; - - /* Now read in the auxiliary header information */ - - retval = -ENOMEM; - size = som_ex->aux_header_size; - if (size > SOM_PAGESIZE) - goto out; - hpuxhdr = kmalloc(size, GFP_KERNEL); - if (!hpuxhdr) - goto out; - - retval = kernel_read(bprm->file, som_ex->aux_header_location, - (char *) hpuxhdr, size); - if (retval != size) { - if (retval >= 0) - retval = -EIO; - goto out_free; - } - - /* Flush all traces of the currently running executable */ - retval = flush_old_exec(bprm); - if (retval) - goto out_free; - - /* OK, This is the point of no return */ - current->personality = PER_HPUX; - setup_new_exec(bprm); - - /* Set the task size for HP-UX processes such that - * the gateway page is outside the address space. - * This can be fixed later, but for now, this is much - * easier. - */ - - current->thread.task_size = 0xc0000000; - - /* Set map base to allow enough room for hp-ux heap growth */ - - current->thread.map_base = 0x80000000; - - retval = map_som_binary(bprm->file, hpuxhdr); - if (retval < 0) - goto out_free; - - som_entry = hpuxhdr->exec_entry; - kfree(hpuxhdr); - - set_binfmt(&som_format); - install_exec_creds(bprm); - setup_arg_pages(bprm, STACK_TOP, EXSTACK_DEFAULT); - - create_som_tables(bprm); - - current->mm->start_stack = bprm->p; - -#if 0 - printk("(start_brk) %08lx\n" , (unsigned long) current->mm->start_brk); - printk("(end_code) %08lx\n" , (unsigned long) current->mm->end_code); - printk("(start_code) %08lx\n" , (unsigned long) current->mm->start_code); - printk("(end_data) %08lx\n" , (unsigned long) current->mm->end_data); - printk("(start_stack) %08lx\n" , (unsigned long) current->mm->start_stack); - printk("(brk) %08lx\n" , (unsigned long) current->mm->brk); -#endif - - map_hpux_gateway_page(current,current->mm); - - start_thread_som(regs, som_entry, bprm->p); - return 0; - - /* error cleanup */ -out_free: - kfree(hpuxhdr); -out: - return retval; -} - -static int load_som_library(struct file *f) -{ -/* No lib support in SOM yet. gizza chance.. */ - return -ENOEXEC; -} - /* Install the SOM loader. - * N.B. We *rely* on the table being the right size with the - * right number of free slots... - */ - -static int __init init_som_binfmt(void) -{ - register_binfmt(&som_format); - return 0; -} - -static void __exit exit_som_binfmt(void) -{ - /* Remove the SOM loader. */ - unregister_binfmt(&som_format); -} - -core_initcall(init_som_binfmt); -module_exit(exit_som_binfmt); - -MODULE_LICENSE("GPL"); diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild index 7b8141bf59a7..68ceb97c458c 100644 --- a/include/uapi/linux/Kbuild +++ b/include/uapi/linux/Kbuild @@ -370,7 +370,6 @@ header-y += snmp.h header-y += sock_diag.h header-y += socket.h header-y += sockios.h -header-y += som.h header-y += sonet.h header-y += sonypi.h header-y += soundcard.h diff --git a/include/uapi/linux/som.h b/include/uapi/linux/som.h deleted file mode 100644 index 166594e4e7be..000000000000 --- a/include/uapi/linux/som.h +++ /dev/null @@ -1,154 +0,0 @@ -#ifndef _LINUX_SOM_H -#define _LINUX_SOM_H - -/* File format definition for SOM executables / shared libraries */ - -/* we need struct timespec */ -#include - -#define SOM_PAGESIZE 4096 - -/* this is the SOM header */ -struct som_hdr { - short system_id; /* magic number - system */ - short a_magic; /* magic number - file type */ - unsigned int version_id; /* versiod ID: YYMMDDHH */ - struct timespec file_time; /* system clock */ - unsigned int entry_space; /* space for entry point */ - unsigned int entry_subspace; /* subspace for entry point */ - unsigned int entry_offset; /* offset of entry point */ - unsigned int aux_header_location; /* auxiliary header location */ - unsigned int aux_header_size; /* auxiliary header size */ - unsigned int som_length; /* length of entire SOM */ - unsigned int presumed_dp; /* compiler's DP value */ - unsigned int space_location; /* space dictionary location */ - unsigned int space_total; /* number of space entries */ - unsigned int subspace_location; /* subspace entries location */ - unsigned int subspace_total; /* number of subspace entries */ - unsigned int loader_fixup_location; /* MPE/iX loader fixup */ - unsigned int loader_fixup_total; /* number of fixup records */ - unsigned int space_strings_location; /* (sub)space names */ - unsigned int space_strings_size; /* size of strings area */ - unsigned int init_array_location; /* reserved */ - unsigned int init_array_total; /* reserved */ - unsigned int compiler_location; /* module dictionary */ - unsigned int compiler_total; /* number of modules */ - unsigned int symbol_location; /* symbol dictionary */ - unsigned int symbol_total; /* number of symbols */ - unsigned int fixup_request_location; /* fixup requests */ - unsigned int fixup_request_total; /* number of fixup requests */ - unsigned int symbol_strings_location;/* module & symbol names area */ - unsigned int symbol_strings_size; /* size of strings area */ - unsigned int unloadable_sp_location; /* unloadable spaces location */ - unsigned int unloadable_sp_size; /* size of data */ - unsigned int checksum; -}; - -/* values for system_id */ - -#define SOM_SID_PARISC_1_0 0x020b -#define SOM_SID_PARISC_1_1 0x0210 -#define SOM_SID_PARISC_2_0 0x0214 - -/* values for a_magic */ - -#define SOM_LIB_EXEC 0x0104 -#define SOM_RELOCATABLE 0x0106 -#define SOM_EXEC_NONSHARE 0x0107 -#define SOM_EXEC_SHARE 0x0108 -#define SOM_EXEC_DEMAND 0x010B -#define SOM_LIB_DYN 0x010D -#define SOM_LIB_SHARE 0x010E -#define SOM_LIB_RELOC 0x0619 - -/* values for version_id. Decimal not hex, yes. Grr. */ - -#define SOM_ID_OLD 85082112 -#define SOM_ID_NEW 87102412 - -struct aux_id { - unsigned int mandatory :1; /* the linker must understand this */ - unsigned int copy :1; /* Must be copied by the linker */ - unsigned int append :1; /* Must be merged by the linker */ - unsigned int ignore :1; /* Discard section if unknown */ - unsigned int reserved :12; - unsigned int type :16; /* Header type */ - unsigned int length; /* length of _following_ data */ -}; - -/* The Exec Auxiliary Header. Called The HP-UX Header within HP apparently. */ -struct som_exec_auxhdr { - struct aux_id som_auxhdr; - int exec_tsize; /* Text size in bytes */ - int exec_tmem; /* Address to load text at */ - int exec_tfile; /* Location of text in file */ - int exec_dsize; /* Data size in bytes */ - int exec_dmem; /* Address to load data at */ - int exec_dfile; /* Location of data in file */ - int exec_bsize; /* Uninitialised data (bss) */ - int exec_entry; /* Address to start executing */ - int exec_flags; /* loader flags */ - int exec_bfill; /* initialisation value for bss */ -}; - -/* Oh, the things people do to avoid casts. Shame it'll break with gcc's - * new aliasing rules really. - */ -union name_pt { - char * n_name; - unsigned int n_strx; -}; - -/* The Space Dictionary */ -struct space_dictionary_record { - union name_pt name; /* index to subspace name */ - unsigned int is_loadable :1; /* loadable */ - unsigned int is_defined :1; /* defined within file */ - unsigned int is_private :1; /* not sharable */ - unsigned int has_intermediate_code :1; /* contains intermediate code */ - unsigned int is_tspecific :1; /* thread specific */ - unsigned int reserved :11; /* for future expansion */ - unsigned int sort_key :8; /* for linker */ - unsigned int reserved2 :8; /* for future expansion */ - - int space_number; /* index */ - int subspace_index; /* index into subspace dict */ - unsigned int subspace_quantity; /* number of subspaces */ - int loader_fix_index; /* for loader */ - unsigned int loader_fix_quantity; /* for loader */ - int init_pointer_index; /* data pointer array index */ - unsigned int init_pointer_quantity; /* number of data pointers */ -}; - -/* The Subspace Dictionary */ -struct subspace_dictionary_record { - int space_index; - unsigned int access_control_bits :7; - unsigned int memory_resident :1; - unsigned int dup_common :1; - unsigned int is_common :1; - unsigned int quadrant :2; - unsigned int initially_frozen :1; - unsigned int is_first :1; - unsigned int code_only :1; - unsigned int sort_key :8; - unsigned int replicate_init :1; - unsigned int continuation :1; - unsigned int is_tspecific :1; - unsigned int is_comdat :1; - unsigned int reserved :4; - - int file_loc_init_value; - unsigned int initialization_length; - unsigned int subspace_start; - unsigned int subspace_length; - - unsigned int reserved2 :5; - unsigned int alignment :27; - - union name_pt name; - int fixup_request_index; - unsigned int fixup_request_quantity; -}; - -#endif /* _LINUX_SOM_H */ -- cgit v1.2.3 From e59b4e9187bd5175b9845dc10fedb0879b7efbfd Mon Sep 17 00:00:00 2001 From: David Howells Date: Wed, 21 Jan 2015 20:03:40 +0000 Subject: debugfs: Provide a file creation function that also takes an initial size Provide a file creation function that also takes an initial size so that the caller doesn't have to set i_size, thus meaning that we don't have to call deal with ->d_inode in the callers. Signed-off-by: David Howells Signed-off-by: Al Viro --- drivers/infiniband/hw/cxgb4/device.c | 35 ++++++------------- drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c | 9 ++--- drivers/scsi/csiostor/csio_init.c | 9 ++--- drivers/usb/gadget/udc/atmel_usba_udc.c | 15 ++++---- fs/debugfs/inode.c | 40 ++++++++++++++++++++++ include/linux/debugfs.h | 13 +++++++ 6 files changed, 79 insertions(+), 42 deletions(-) (limited to 'fs') diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c index eb5df4e62703..391309a55360 100644 --- a/drivers/infiniband/hw/cxgb4/device.c +++ b/drivers/infiniband/hw/cxgb4/device.c @@ -700,37 +700,24 @@ static const struct file_operations ep_debugfs_fops = { static int setup_debugfs(struct c4iw_dev *devp) { - struct dentry *de; - if (!devp->debugfs_root) return -1; - de = debugfs_create_file("qps", S_IWUSR, devp->debugfs_root, - (void *)devp, &qp_debugfs_fops); - if (de && de->d_inode) - de->d_inode->i_size = 4096; + debugfs_create_file_size("qps", S_IWUSR, devp->debugfs_root, + (void *)devp, &qp_debugfs_fops, 4096); - de = debugfs_create_file("stags", S_IWUSR, devp->debugfs_root, - (void *)devp, &stag_debugfs_fops); - if (de && de->d_inode) - de->d_inode->i_size = 4096; + debugfs_create_file_size("stags", S_IWUSR, devp->debugfs_root, + (void *)devp, &stag_debugfs_fops, 4096); - de = debugfs_create_file("stats", S_IWUSR, devp->debugfs_root, - (void *)devp, &stats_debugfs_fops); - if (de && de->d_inode) - de->d_inode->i_size = 4096; + debugfs_create_file_size("stats", S_IWUSR, devp->debugfs_root, + (void *)devp, &stats_debugfs_fops, 4096); - de = debugfs_create_file("eps", S_IWUSR, devp->debugfs_root, - (void *)devp, &ep_debugfs_fops); - if (de && de->d_inode) - de->d_inode->i_size = 4096; + debugfs_create_file_size("eps", S_IWUSR, devp->debugfs_root, + (void *)devp, &ep_debugfs_fops, 4096); - if (c4iw_wr_log) { - de = debugfs_create_file("wr_log", S_IWUSR, devp->debugfs_root, - (void *)devp, &wr_log_debugfs_fops); - if (de && de->d_inode) - de->d_inode->i_size = 4096; - } + if (c4iw_wr_log) + debugfs_create_file_size("wr_log", S_IWUSR, devp->debugfs_root, + (void *)devp, &wr_log_debugfs_fops, 4096); return 0; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c index c98a350d857e..4c7fe447e407 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c @@ -91,12 +91,9 @@ static const struct file_operations mem_debugfs_fops = { static void add_debugfs_mem(struct adapter *adap, const char *name, unsigned int idx, unsigned int size_mb) { - struct dentry *de; - - de = debugfs_create_file(name, S_IRUSR, adap->debugfs_root, - (void *)adap + idx, &mem_debugfs_fops); - if (de && de->d_inode) - de->d_inode->i_size = size_mb << 20; + debugfs_create_file_size(name, S_IRUSR, adap->debugfs_root, + (void *)adap + idx, &mem_debugfs_fops, + size_mb << 20); } /* Add an array of Debug FS files. diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c index 34d20cc3e110..2617f15eaeeb 100644 --- a/drivers/scsi/csiostor/csio_init.c +++ b/drivers/scsi/csiostor/csio_init.c @@ -113,12 +113,9 @@ static const struct file_operations csio_mem_debugfs_fops = { void csio_add_debugfs_mem(struct csio_hw *hw, const char *name, unsigned int idx, unsigned int size_mb) { - struct dentry *de; - - de = debugfs_create_file(name, S_IRUSR, hw->debugfs_root, - (void *)hw + idx, &csio_mem_debugfs_fops); - if (de && de->d_inode) - de->d_inode->i_size = size_mb << 20; + debugfs_create_file_size(name, S_IRUSR, hw->debugfs_root, + (void *)hw + idx, &csio_mem_debugfs_fops, + size_mb << 20); } static int csio_setup_debugfs(struct csio_hw *hw) diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c index 9f93bed42052..b2239a2d4941 100644 --- a/drivers/usb/gadget/udc/atmel_usba_udc.c +++ b/drivers/usb/gadget/udc/atmel_usba_udc.c @@ -264,14 +264,17 @@ static void usba_init_debugfs(struct usba_udc *udc) goto err_root; udc->debugfs_root = root; - regs = debugfs_create_file("regs", 0400, root, udc, ®s_dbg_fops); - if (!regs) - goto err_regs; - regs_resource = platform_get_resource(udc->pdev, IORESOURCE_MEM, CTRL_IOMEM_ID); - regs->d_inode->i_size = resource_size(regs_resource); - udc->debugfs_regs = regs; + + if (regs_resource) { + regs = debugfs_create_file_size("regs", 0400, root, udc, + ®s_dbg_fops, + resource_size(regs_resource)); + if (!regs) + goto err_regs; + udc->debugfs_regs = regs; + } usba_ep_init_debugfs(udc, to_usba_ep(udc->gadget.ep0)); diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c index 957c40ce09f6..45b18a5e225c 100644 --- a/fs/debugfs/inode.c +++ b/fs/debugfs/inode.c @@ -337,6 +337,46 @@ struct dentry *debugfs_create_file(const char *name, umode_t mode, } EXPORT_SYMBOL_GPL(debugfs_create_file); +/** + * debugfs_create_file_size - create a file in the debugfs filesystem + * @name: a pointer to a string containing the name of the file to create. + * @mode: the permission that the file should have. + * @parent: a pointer to the parent dentry for this file. This should be a + * directory dentry if set. If this parameter is NULL, then the + * file will be created in the root of the debugfs filesystem. + * @data: a pointer to something that the caller will want to get to later + * on. The inode.i_private pointer will point to this value on + * the open() call. + * @fops: a pointer to a struct file_operations that should be used for + * this file. + * @file_size: initial file size + * + * This is the basic "create a file" function for debugfs. It allows for a + * wide range of flexibility in creating a file, or a directory (if you want + * to create a directory, the debugfs_create_dir() function is + * recommended to be used instead.) + * + * This function will return a pointer to a dentry if it succeeds. This + * pointer must be passed to the debugfs_remove() function when the file is + * to be removed (no automatic cleanup happens if your module is unloaded, + * you are responsible here.) If an error occurs, %NULL will be returned. + * + * If debugfs is not enabled in the kernel, the value -%ENODEV will be + * returned. + */ +struct dentry *debugfs_create_file_size(const char *name, umode_t mode, + struct dentry *parent, void *data, + const struct file_operations *fops, + loff_t file_size) +{ + struct dentry *de = debugfs_create_file(name, mode, parent, data, fops); + + if (de) + de->d_inode->i_size = file_size; + return de; +} +EXPORT_SYMBOL_GPL(debugfs_create_file_size); + /** * debugfs_create_dir - create a directory in the debugfs filesystem * @name: a pointer to a string containing the name of the directory to diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h index ea149a24a1f2..cb25af461054 100644 --- a/include/linux/debugfs.h +++ b/include/linux/debugfs.h @@ -51,6 +51,11 @@ struct dentry *debugfs_create_file(const char *name, umode_t mode, struct dentry *parent, void *data, const struct file_operations *fops); +struct dentry *debugfs_create_file_size(const char *name, umode_t mode, + struct dentry *parent, void *data, + const struct file_operations *fops, + loff_t file_size); + struct dentry *debugfs_create_dir(const char *name, struct dentry *parent); struct dentry *debugfs_create_symlink(const char *name, struct dentry *parent, @@ -129,6 +134,14 @@ static inline struct dentry *debugfs_create_file(const char *name, umode_t mode, return ERR_PTR(-ENODEV); } +static inline struct dentry *debugfs_create_file_size(const char *name, umode_t mode, + struct dentry *parent, void *data, + const struct file_operations *fops, + loff_t file_size) +{ + return ERR_PTR(-ENODEV); +} + static inline struct dentry *debugfs_create_dir(const char *name, struct dentry *parent) { -- cgit v1.2.3 From c4e136cda11cb5f87683dd5b154a2d15ea5898b3 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Mon, 16 Feb 2015 19:37:42 -0500 Subject: locks: only remove leases associated with the file being closed We don't want to remove all leases just because one filp was closed. Signed-off-by: Jeff Layton --- fs/locks.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/locks.c b/fs/locks.c index 7998f670812c..fe8f9f46445b 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -2435,7 +2435,8 @@ locks_remove_lease(struct file *filp) spin_lock(&ctx->flc_lock); list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) - lease_modify(fl, F_UNLCK, &dispose); + if (filp == fl->fl_file) + lease_modify(fl, F_UNLCK, &dispose); spin_unlock(&ctx->flc_lock); locks_dispose_list(&dispose); } -- cgit v1.2.3 From 267f1128583074b575b90a58de4dcb12dd25af96 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Tue, 17 Feb 2015 14:44:08 -0500 Subject: locks: remove conditional lock release in middle of flock_lock_file As Linus pointed out: Say we have an existing flock, and now do a new one that conflicts. I see what looks like three separate bugs. - We go through the first loop, find a lock of another type, and delete it in preparation for replacing it - we *drop* the lock context spinlock. - BUG #1? So now there is no lock at all, and somebody can come in and see that unlocked state. Is that really valid? - another thread comes in while the first thread dropped the lock context lock, and wants to add its own lock. It doesn't see the deleted or pending locks, so it just adds it - the first thread gets the context spinlock again, and adds the lock that replaced the original - BUG #2? So now there are *two* locks on the thing, and the next time you do an unlock (or when you close the file), it will only remove/replace the first one. ...remove the "drop the spinlock" code in the middle of this function as it has always been suspicious. This should eliminate the potential race that can leave two locks for the same struct file on the list. He also pointed out another thing as a bug -- namely that you flock_lock_file removes the lock from the list unconditionally when doing a lock upgrade, without knowing whether it'll be able to set the new lock. Bruce pointed out that this is expected behavior and may help prevent certain deadlock situations. We may want to revisit that at some point, but it's probably best that we do so in the context of a different patchset. Reported-by: Linus Torvalds Signed-off-by: Jeff Layton --- fs/locks.c | 10 ---------- 1 file changed, 10 deletions(-) (limited to 'fs') diff --git a/fs/locks.c b/fs/locks.c index fe8f9f46445b..90b652ad306f 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -901,16 +901,6 @@ static int flock_lock_file(struct file *filp, struct file_lock *request) goto out; } - /* - * If a higher-priority process was blocked on the old file lock, - * give it the opportunity to lock the file. - */ - if (found) { - spin_unlock(&ctx->flc_lock); - cond_resched(); - spin_lock(&ctx->flc_lock); - } - find_conflict: list_for_each_entry(fl, &ctx->flc_flock, fl_list) { if (!flock_locks_conflict(request, fl)) -- cgit v1.2.3 From 2e2f756f81edd7c3ba6ed384385ae1d6491652eb Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Tue, 17 Feb 2015 17:08:23 -0500 Subject: locks: fix list insertion when lock is split in two In the case where we're splitting a lock in two, the current code the new "left" lock in the incorrect spot. It's inserted just before "right" when it should instead be inserted just before the new lock. When we add a new lock, set "fl" to that value so that we can add "left" before it. Reported-by: Al Viro Signed-off-by: Jeff Layton --- fs/locks.c | 1 + 1 file changed, 1 insertion(+) (limited to 'fs') diff --git a/fs/locks.c b/fs/locks.c index 90b652ad306f..365c82e1b3a9 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -1107,6 +1107,7 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str } locks_copy_lock(new_fl, request); locks_insert_lock_ctx(new_fl, &fl->fl_list); + fl = new_fl; new_fl = NULL; } if (right) { -- cgit v1.2.3 From 111d639dd659bc1496a63cb8854abab8a15f3728 Mon Sep 17 00:00:00 2001 From: Fabian Frederick Date: Tue, 17 Feb 2015 13:45:23 -0800 Subject: fs/befs/linuxvfs.c: remove unnecessary casting Fix the following coccinelle warning: fs/befs/linuxvfs.c:278:14-36: WARNING: casting value returned by memory allocation function to (struct befs_inode_info *) is useless. [akpm@linux-foundation.org: avoid 80-col ugliness] Signed-off-by: Fabian Frederick Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/befs/linuxvfs.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c index edf47774b03d..e089f1985fca 100644 --- a/fs/befs/linuxvfs.c +++ b/fs/befs/linuxvfs.c @@ -274,9 +274,9 @@ more: static struct inode * befs_alloc_inode(struct super_block *sb) { - struct befs_inode_info *bi; - bi = (struct befs_inode_info *)kmem_cache_alloc(befs_inode_cachep, - GFP_KERNEL); + struct befs_inode_info *bi; + + bi = kmem_cache_alloc(befs_inode_cachep, GFP_KERNEL); if (!bi) return NULL; return &bi->vfs_inode; -- cgit v1.2.3 From b625032b10222c4406979c7604189f2bef29c5d0 Mon Sep 17 00:00:00 2001 From: Fabian Frederick Date: Tue, 17 Feb 2015 13:45:25 -0800 Subject: fs/coda/dir.c: forward declaration clean-up - Move operation structures to avoid forward declarations. - Fix some checkpatch warnings: WARNING: Missing a blank line after declarations + struct inode *host_inode = file_inode(host_file); + mutex_lock(&host_inode->i_mutex); ERROR: that open brace { should be on the previous line +const struct dentry_operations coda_dentry_operations = +{ ERROR: that open brace { should be on the previous line +const struct inode_operations coda_dir_inode_operations = +{ Signed-off-by: Fabian Frederick Cc: Jan Harkes Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/coda/dir.c | 138 ++++++++++++++++++++++++---------------------------------- 1 file changed, 56 insertions(+), 82 deletions(-) (limited to 'fs') diff --git a/fs/coda/dir.c b/fs/coda/dir.c index 86c893884eb9..281ee011bb6a 100644 --- a/fs/coda/dir.c +++ b/fs/coda/dir.c @@ -28,29 +28,6 @@ #include "coda_int.h" -/* dir inode-ops */ -static int coda_create(struct inode *dir, struct dentry *new, umode_t mode, bool excl); -static struct dentry *coda_lookup(struct inode *dir, struct dentry *target, unsigned int flags); -static int coda_link(struct dentry *old_dentry, struct inode *dir_inode, - struct dentry *entry); -static int coda_unlink(struct inode *dir_inode, struct dentry *entry); -static int coda_symlink(struct inode *dir_inode, struct dentry *entry, - const char *symname); -static int coda_mkdir(struct inode *dir_inode, struct dentry *entry, umode_t mode); -static int coda_rmdir(struct inode *dir_inode, struct dentry *entry); -static int coda_rename(struct inode *old_inode, struct dentry *old_dentry, - struct inode *new_inode, struct dentry *new_dentry); - -/* dir file-ops */ -static int coda_readdir(struct file *file, struct dir_context *ctx); - -/* dentry ops */ -static int coda_dentry_revalidate(struct dentry *de, unsigned int flags); -static int coda_dentry_delete(const struct dentry *); - -/* support routines */ -static int coda_venus_readdir(struct file *, struct dir_context *); - /* same as fs/bad_inode.c */ static int coda_return_EIO(void) { @@ -58,38 +35,6 @@ static int coda_return_EIO(void) } #define CODA_EIO_ERROR ((void *) (coda_return_EIO)) -const struct dentry_operations coda_dentry_operations = -{ - .d_revalidate = coda_dentry_revalidate, - .d_delete = coda_dentry_delete, -}; - -const struct inode_operations coda_dir_inode_operations = -{ - .create = coda_create, - .lookup = coda_lookup, - .link = coda_link, - .unlink = coda_unlink, - .symlink = coda_symlink, - .mkdir = coda_mkdir, - .rmdir = coda_rmdir, - .mknod = CODA_EIO_ERROR, - .rename = coda_rename, - .permission = coda_permission, - .getattr = coda_getattr, - .setattr = coda_setattr, -}; - -const struct file_operations coda_dir_operations = { - .llseek = generic_file_llseek, - .read = generic_read_dir, - .iterate = coda_readdir, - .open = coda_open, - .release = coda_release, - .fsync = coda_fsync, -}; - - /* inode operations for directories */ /* access routines: lookup, readlink, permission */ static struct dentry *coda_lookup(struct inode *dir, struct dentry *entry, unsigned int flags) @@ -374,33 +319,6 @@ static int coda_rename(struct inode *old_dir, struct dentry *old_dentry, return error; } - -/* file operations for directories */ -static int coda_readdir(struct file *coda_file, struct dir_context *ctx) -{ - struct coda_file_info *cfi; - struct file *host_file; - int ret; - - cfi = CODA_FTOC(coda_file); - BUG_ON(!cfi || cfi->cfi_magic != CODA_MAGIC); - host_file = cfi->cfi_container; - - if (host_file->f_op->iterate) { - struct inode *host_inode = file_inode(host_file); - mutex_lock(&host_inode->i_mutex); - ret = -ENOENT; - if (!IS_DEADDIR(host_inode)) { - ret = host_file->f_op->iterate(host_file, ctx); - file_accessed(host_file); - } - mutex_unlock(&host_inode->i_mutex); - return ret; - } - /* Venus: we must read Venus dirents from a file */ - return coda_venus_readdir(coda_file, ctx); -} - static inline unsigned int CDT2DT(unsigned char cdt) { unsigned int dt; @@ -495,6 +413,33 @@ out: return 0; } +/* file operations for directories */ +static int coda_readdir(struct file *coda_file, struct dir_context *ctx) +{ + struct coda_file_info *cfi; + struct file *host_file; + int ret; + + cfi = CODA_FTOC(coda_file); + BUG_ON(!cfi || cfi->cfi_magic != CODA_MAGIC); + host_file = cfi->cfi_container; + + if (host_file->f_op->iterate) { + struct inode *host_inode = file_inode(host_file); + + mutex_lock(&host_inode->i_mutex); + ret = -ENOENT; + if (!IS_DEADDIR(host_inode)) { + ret = host_file->f_op->iterate(host_file, ctx); + file_accessed(host_file); + } + mutex_unlock(&host_inode->i_mutex); + return ret; + } + /* Venus: we must read Venus dirents from a file */ + return coda_venus_readdir(coda_file, ctx); +} + /* called when a cache lookup succeeds */ static int coda_dentry_revalidate(struct dentry *de, unsigned int flags) { @@ -603,3 +548,32 @@ int coda_revalidate_inode(struct inode *inode) } return 0; } + +const struct dentry_operations coda_dentry_operations = { + .d_revalidate = coda_dentry_revalidate, + .d_delete = coda_dentry_delete, +}; + +const struct inode_operations coda_dir_inode_operations = { + .create = coda_create, + .lookup = coda_lookup, + .link = coda_link, + .unlink = coda_unlink, + .symlink = coda_symlink, + .mkdir = coda_mkdir, + .rmdir = coda_rmdir, + .mknod = CODA_EIO_ERROR, + .rename = coda_rename, + .permission = coda_permission, + .getattr = coda_getattr, + .setattr = coda_setattr, +}; + +const struct file_operations coda_dir_operations = { + .llseek = generic_file_llseek, + .read = generic_read_dir, + .iterate = coda_readdir, + .open = coda_open, + .release = coda_release, + .fsync = coda_fsync, +}; -- cgit v1.2.3 From 61da3ae241f4382e30beb6de06c4dacada37f520 Mon Sep 17 00:00:00 2001 From: Fabian Frederick Date: Tue, 17 Feb 2015 13:45:28 -0800 Subject: fs/ufs/super.c: remove unnecessary casting Fix the following coccinelle warning: fs/ufs/super.c:1418:7-28: WARNING: casting value returned by memory allocation function to (struct ufs_inode_info *) is useless. Signed-off-by: Fabian Frederick Cc: Evgeniy Dushistov Cc: Joe Perches Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/ufs/super.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/ufs/super.c b/fs/ufs/super.c index da73801301d5..e515e99a02f9 100644 --- a/fs/ufs/super.c +++ b/fs/ufs/super.c @@ -1415,9 +1415,11 @@ static struct kmem_cache * ufs_inode_cachep; static struct inode *ufs_alloc_inode(struct super_block *sb) { struct ufs_inode_info *ei; - ei = (struct ufs_inode_info *)kmem_cache_alloc(ufs_inode_cachep, GFP_NOFS); + + ei = kmem_cache_alloc(ufs_inode_cachep, GFP_NOFS); if (!ei) return NULL; + ei->vfs_inode.i_version = 1; return &ei->vfs_inode; } -- cgit v1.2.3 From ed3ad79f87f31beed64778af0b29aff3074f700e Mon Sep 17 00:00:00 2001 From: Fabian Frederick Date: Tue, 17 Feb 2015 13:45:31 -0800 Subject: fs/ufs/super.c: fix potential race condition Let locking subsystem decide on mutex management. As reported by Andrew Morton this patch fixes a bug: : lock_ufs() is assuming that on non-preempt uniprocessor, the calling : code will run atomically up to the matching unlock_ufs(). : : But that isn't true. The very first site I looked at (ufs_frag_map) : does sb_bread() under lock_ufs(). And sb_bread() will call schedule(), : very commonly. : : The ->mutex_owner stuff is a bit hacky but should work OK. Signed-off-by: Fabian Frederick Cc: Evgeniy Dushistov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/ufs/super.c | 4 ---- 1 file changed, 4 deletions(-) (limited to 'fs') diff --git a/fs/ufs/super.c b/fs/ufs/super.c index e515e99a02f9..8092d3759a5e 100644 --- a/fs/ufs/super.c +++ b/fs/ufs/super.c @@ -95,22 +95,18 @@ void lock_ufs(struct super_block *sb) { -#if defined(CONFIG_SMP) || defined (CONFIG_PREEMPT) struct ufs_sb_info *sbi = UFS_SB(sb); mutex_lock(&sbi->mutex); sbi->mutex_owner = current; -#endif } void unlock_ufs(struct super_block *sb) { -#if defined(CONFIG_SMP) || defined (CONFIG_PREEMPT) struct ufs_sb_info *sbi = UFS_SB(sb); sbi->mutex_owner = NULL; mutex_unlock(&sbi->mutex); -#endif } static struct inode *ufs_nfs_get_inode(struct super_block *sb, u64 ino, u32 generation) -- cgit v1.2.3 From 714b71a3a91f63e0852ad9a07edc3820800c681f Mon Sep 17 00:00:00 2001 From: Fabian Frederick Date: Tue, 17 Feb 2015 13:45:33 -0800 Subject: fs/reiserfs/inode.c: replace 0 by NULL for pointers Fix sparse warning: fs/reiserfs/inode.c:2769:19: warning: Using plain integer as NULL pointer Signed-off-by: Fabian Frederick Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/reiserfs/inode.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c index a7eec9888f10..e72401e1f995 100644 --- a/fs/reiserfs/inode.c +++ b/fs/reiserfs/inode.c @@ -2766,7 +2766,7 @@ static int reiserfs_write_begin(struct file *file, int old_ref = 0; inode = mapping->host; - *fsdata = 0; + *fsdata = NULL; if (flags & AOP_FLAG_CONT_EXPAND && (pos & (inode->i_sb->s_blocksize - 1)) == 0) { pos ++; -- cgit v1.2.3 From d6bd428275f3f470fc7cf6624b737c6d7805b44b Mon Sep 17 00:00:00 2001 From: Fred Chou Date: Tue, 17 Feb 2015 13:45:36 -0800 Subject: fs: fat: use MSDOS_SB macro to get msdos_sb_info Use the MSDOS_SB macro to get msdos_sb_info, instead of coding it directly. Signed-off-by: Fred Chou Acked-by: OGAWA Hirofumi Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/fat/inode.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/fat/inode.c b/fs/fat/inode.c index 7b41a2dcdd76..497c7c5263c7 100644 --- a/fs/fat/inode.c +++ b/fs/fat/inode.c @@ -580,7 +580,7 @@ static void fat_set_state(struct super_block *sb, { struct buffer_head *bh; struct fat_boot_sector *b; - struct msdos_sb_info *sbi = sb->s_fs_info; + struct msdos_sb_info *sbi = MSDOS_SB(sb); /* do not change any thing if mounted read only */ if ((sb->s_flags & MS_RDONLY) && !force) -- cgit v1.2.3 From 34b47764297130b21aaeb4cc6119bb811814b8e3 Mon Sep 17 00:00:00 2001 From: WANG Chao Date: Tue, 17 Feb 2015 13:46:01 -0800 Subject: vmcore: fix PT_NOTE n_namesz, n_descsz overflow issue When updating PT_NOTE header size (ie. p_memsz), an overflow issue happens with the following bogus note entry: n_namesz = 0xFFFFFFFF n_descsz = 0x0 n_type = 0x0 This kind of note entry should be dropped during updating p_memsz. But because n_namesz is 32bit, after (n_namesz + 3) & (~3), it's overflow to 0x0, the note entry size looks sane and reserved. When userspace (eg. crash utility) is trying to access such bogus note, it could lead to an unexpected behavior (eg. crash utility segment fault because it's reading bogus address). The source of bogus note hasn't been identified yet. At least we could drop the bogus note so user space wouldn't be surprised. Signed-off-by: WANG Chao Cc: Dave Anderson Cc: Baoquan He Cc: Randy Wright Cc: Vivek Goyal Cc: Paul Gortmaker Cc: Fabian Frederick Cc: Vitaly Kuznetsov Cc: Rashika Kheria Cc: Greg Pearson Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/proc/vmcore.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c index a90d6d354199..4e61388ec03d 100644 --- a/fs/proc/vmcore.c +++ b/fs/proc/vmcore.c @@ -546,8 +546,8 @@ static int __init update_note_header_size_elf64(const Elf64_Ehdr *ehdr_ptr) nhdr_ptr = notes_section; while (nhdr_ptr->n_namesz != 0) { sz = sizeof(Elf64_Nhdr) + - ((nhdr_ptr->n_namesz + 3) & ~3) + - ((nhdr_ptr->n_descsz + 3) & ~3); + (((u64)nhdr_ptr->n_namesz + 3) & ~3) + + (((u64)nhdr_ptr->n_descsz + 3) & ~3); if ((real_sz + sz) > max_sz) { pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n", nhdr_ptr->n_namesz, nhdr_ptr->n_descsz); @@ -732,8 +732,8 @@ static int __init update_note_header_size_elf32(const Elf32_Ehdr *ehdr_ptr) nhdr_ptr = notes_section; while (nhdr_ptr->n_namesz != 0) { sz = sizeof(Elf32_Nhdr) + - ((nhdr_ptr->n_namesz + 3) & ~3) + - ((nhdr_ptr->n_descsz + 3) & ~3); + (((u64)nhdr_ptr->n_namesz + 3) & ~3) + + (((u64)nhdr_ptr->n_descsz + 3) & ~3); if ((real_sz + sz) > max_sz) { pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n", nhdr_ptr->n_namesz, nhdr_ptr->n_descsz); -- cgit v1.2.3 From e22553e2a25ed3f2a9c874088e0f20cdcd97c7b0 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Tue, 17 Feb 2015 13:46:07 -0800 Subject: eventfd: don't take the spinlock in eventfd_poll The spinlock in eventfd_poll is trying to protect the count of events so it can decide if it should return POLLIN, POLLERR, or POLLOUT. But, because of the way we drop the lock after calling poll_wait, and drop it again before returning, we have the same pile of races with the lock as we do with a single read of ctx->count(). This replaces the lock with a read barrier and single read. eventfd_write does a single bump of ctx->count, so this should not add new races with adding events. eventfd_read is similar, it will do a single decrement with the lock held, and so we're making the race with concurrent readers slightly larger. This spinlock is the top CPU user in kernel code during one of our workloads. Removing it gives us a ~2% boost. [arnd@arndb.de: avoid unused variable warning] [dan.carpenter@oracle.com: type bug in eventfd_poll()] Signed-off-by: Chris Mason Cc: Davide Libenzi Signed-off-by: Arnd Bergmann Signed-off-by: Dan Carpenter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/eventfd.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/eventfd.c b/fs/eventfd.c index 4b0a226024fa..8d0c0df01854 100644 --- a/fs/eventfd.c +++ b/fs/eventfd.c @@ -118,18 +118,18 @@ static unsigned int eventfd_poll(struct file *file, poll_table *wait) { struct eventfd_ctx *ctx = file->private_data; unsigned int events = 0; - unsigned long flags; + u64 count; poll_wait(file, &ctx->wqh, wait); + smp_rmb(); + count = ctx->count; - spin_lock_irqsave(&ctx->wqh.lock, flags); - if (ctx->count > 0) + if (count > 0) events |= POLLIN; - if (ctx->count == ULLONG_MAX) + if (count == ULLONG_MAX) events |= POLLERR; - if (ULLONG_MAX - 1 > ctx->count) + if (ULLONG_MAX - 1 > count) events |= POLLOUT; - spin_unlock_irqrestore(&ctx->wqh.lock, flags); return events; } -- cgit v1.2.3 From 08fe100d91bc09baca9eb22206f6b050286bd43c Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Tue, 17 Feb 2015 13:46:10 -0800 Subject: fs/affs: fix casting in printed messages - "inode.i_ino" is "unsigned long", - "loff_t" is always "unsigned long long", - "sector_t" should be cast to "unsigned long long" for printing, - "u32" should not be cast to "unsigned int" for printing. Signed-off-by: Geert Uytterhoeven Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/affs/amigaffs.c | 6 +++--- fs/affs/dir.c | 8 +++----- fs/affs/file.c | 36 ++++++++++++++++++------------------ fs/affs/inode.c | 5 ++--- fs/affs/namei.c | 18 +++++++----------- 5 files changed, 33 insertions(+), 40 deletions(-) (limited to 'fs') diff --git a/fs/affs/amigaffs.c b/fs/affs/amigaffs.c index c852f2fa1710..511ab6b12618 100644 --- a/fs/affs/amigaffs.c +++ b/fs/affs/amigaffs.c @@ -30,7 +30,7 @@ affs_insert_hash(struct inode *dir, struct buffer_head *bh) ino = bh->b_blocknr; offset = affs_hash_name(sb, AFFS_TAIL(sb, bh)->name + 1, AFFS_TAIL(sb, bh)->name[0]); - pr_debug("%s(dir=%u, ino=%d)\n", __func__, (u32)dir->i_ino, ino); + pr_debug("%s(dir=%lu, ino=%d)\n", __func__, dir->i_ino, ino); dir_bh = affs_bread(sb, dir->i_ino); if (!dir_bh) @@ -80,8 +80,8 @@ affs_remove_hash(struct inode *dir, struct buffer_head *rem_bh) sb = dir->i_sb; rem_ino = rem_bh->b_blocknr; offset = affs_hash_name(sb, AFFS_TAIL(sb, rem_bh)->name+1, AFFS_TAIL(sb, rem_bh)->name[0]); - pr_debug("%s(dir=%d, ino=%d, hashval=%d)\n", - __func__, (u32)dir->i_ino, rem_ino, offset); + pr_debug("%s(dir=%lu, ino=%d, hashval=%d)\n", __func__, dir->i_ino, + rem_ino, offset); bh = affs_bread(sb, dir->i_ino); if (!bh) diff --git a/fs/affs/dir.c b/fs/affs/dir.c index 59f07bec92a6..a682892878a8 100644 --- a/fs/affs/dir.c +++ b/fs/affs/dir.c @@ -54,8 +54,7 @@ affs_readdir(struct file *file, struct dir_context *ctx) u32 ino; int error = 0; - pr_debug("%s(ino=%lu,f_pos=%lx)\n", - __func__, inode->i_ino, (unsigned long)ctx->pos); + pr_debug("%s(ino=%lu,f_pos=%llx)\n", __func__, inode->i_ino, ctx->pos); if (ctx->pos < 2) { file->private_data = (void *)0; @@ -117,9 +116,8 @@ inside: namelen = min(AFFS_TAIL(sb, fh_bh)->name[0], (u8)30); name = AFFS_TAIL(sb, fh_bh)->name + 1; - pr_debug("readdir(): dir_emit(\"%.*s\", " - "ino=%u), hash=%d, f_pos=%x\n", - namelen, name, ino, hash_pos, (u32)ctx->pos); + pr_debug("readdir(): dir_emit(\"%.*s\", ino=%u), hash=%d, f_pos=%llx\n", + namelen, name, ino, hash_pos, ctx->pos); if (!dir_emit(ctx, name, namelen, ino, DT_UNKNOWN)) goto done; diff --git a/fs/affs/file.c b/fs/affs/file.c index 8faa6593ca6d..40a024a9b41d 100644 --- a/fs/affs/file.c +++ b/fs/affs/file.c @@ -299,8 +299,8 @@ affs_get_block(struct inode *inode, sector_t block, struct buffer_head *bh_resul struct buffer_head *ext_bh; u32 ext; - pr_debug("%s(%u, %lu)\n", - __func__, (u32)inode->i_ino, (unsigned long)block); + pr_debug("%s(%lu, %llu)\n", __func__, inode->i_ino, + (unsigned long long)block); BUG_ON(block > (sector_t)0x7fffffffUL); @@ -330,8 +330,9 @@ affs_get_block(struct inode *inode, sector_t block, struct buffer_head *bh_resul /* store new block */ if (bh_result->b_blocknr) - affs_warning(sb, "get_block", "block already set (%lx)", - (unsigned long)bh_result->b_blocknr); + affs_warning(sb, "get_block", + "block already set (%llx)", + (unsigned long long)bh_result->b_blocknr); AFFS_BLOCK(sb, ext_bh, block) = cpu_to_be32(blocknr); AFFS_HEAD(ext_bh)->block_count = cpu_to_be32(block + 1); affs_adjust_checksum(ext_bh, blocknr - bh_result->b_blocknr + 1); @@ -353,8 +354,8 @@ affs_get_block(struct inode *inode, sector_t block, struct buffer_head *bh_resul return 0; err_big: - affs_error(inode->i_sb, "get_block", "strange block request %d", - (int)block); + affs_error(inode->i_sb, "get_block", "strange block request %llu", + (unsigned long long)block); return -EIO; err_ext: // unlock cache @@ -503,7 +504,7 @@ affs_do_readpage_ofs(struct page *page, unsigned to) u32 bidx, boff, bsize; u32 tmp; - pr_debug("%s(%u, %ld, 0, %d)\n", __func__, (u32)inode->i_ino, + pr_debug("%s(%lu, %ld, 0, %d)\n", __func__, inode->i_ino, page->index, to); BUG_ON(to > PAGE_CACHE_SIZE); kmap(page); @@ -539,7 +540,7 @@ affs_extent_file_ofs(struct inode *inode, u32 newsize) u32 size, bsize; u32 tmp; - pr_debug("%s(%u, %d)\n", __func__, (u32)inode->i_ino, newsize); + pr_debug("%s(%lu, %d)\n", __func__, inode->i_ino, newsize); bsize = AFFS_SB(sb)->s_data_blksize; bh = NULL; size = AFFS_I(inode)->mmu_private; @@ -608,7 +609,7 @@ affs_readpage_ofs(struct file *file, struct page *page) u32 to; int err; - pr_debug("%s(%u, %ld)\n", __func__, (u32)inode->i_ino, page->index); + pr_debug("%s(%lu, %ld)\n", __func__, inode->i_ino, page->index); to = PAGE_CACHE_SIZE; if (((page->index + 1) << PAGE_CACHE_SHIFT) > inode->i_size) { to = inode->i_size & ~PAGE_CACHE_MASK; @@ -631,8 +632,8 @@ static int affs_write_begin_ofs(struct file *file, struct address_space *mapping pgoff_t index; int err = 0; - pr_debug("%s(%u, %llu, %llu)\n", __func__, (u32)inode->i_ino, - (unsigned long long)pos, (unsigned long long)pos + len); + pr_debug("%s(%lu, %llu, %llu)\n", __func__, inode->i_ino, pos, + pos + len); if (pos > AFFS_I(inode)->mmu_private) { /* XXX: this probably leaves a too-big i_size in case of * failure. Should really be updating i_size at write_end time @@ -681,9 +682,8 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping, * due to write_begin. */ - pr_debug("%s(%u, %llu, %llu)\n", - __func__, (u32)inode->i_ino, (unsigned long long)pos, - (unsigned long long)pos + len); + pr_debug("%s(%lu, %llu, %llu)\n", __func__, inode->i_ino, pos, + pos + len); bsize = AFFS_SB(sb)->s_data_blksize; data = page_address(page); @@ -831,8 +831,8 @@ affs_truncate(struct inode *inode) struct buffer_head *ext_bh; int i; - pr_debug("truncate(inode=%d, oldsize=%u, newsize=%u)\n", - (u32)inode->i_ino, (u32)AFFS_I(inode)->mmu_private, (u32)inode->i_size); + pr_debug("truncate(inode=%lu, oldsize=%llu, newsize=%llu)\n", + inode->i_ino, AFFS_I(inode)->mmu_private, inode->i_size); last_blk = 0; ext = 0; @@ -863,7 +863,7 @@ affs_truncate(struct inode *inode) if (IS_ERR(ext_bh)) { affs_warning(sb, "truncate", "unexpected read error for ext block %u (%ld)", - (unsigned int)ext, PTR_ERR(ext_bh)); + ext, PTR_ERR(ext_bh)); return; } if (AFFS_I(inode)->i_lc) { @@ -911,7 +911,7 @@ affs_truncate(struct inode *inode) if (IS_ERR(bh)) { affs_warning(sb, "truncate", "unexpected read error for last block %u (%ld)", - (unsigned int)ext, PTR_ERR(bh)); + ext, PTR_ERR(bh)); return; } tmp = be32_to_cpu(AFFS_DATA_HEAD(bh)->next); diff --git a/fs/affs/inode.c b/fs/affs/inode.c index d0609a282e1d..25cb4b43f2f1 100644 --- a/fs/affs/inode.c +++ b/fs/affs/inode.c @@ -348,9 +348,8 @@ affs_add_entry(struct inode *dir, struct inode *inode, struct dentry *dentry, s3 u32 block = 0; int retval; - pr_debug("%s(dir=%u, inode=%u, \"%pd\", type=%d)\n", - __func__, (u32)dir->i_ino, - (u32)inode->i_ino, dentry, type); + pr_debug("%s(dir=%lu, inode=%lu, \"%pd\", type=%d)\n", __func__, + dir->i_ino, inode->i_ino, dentry, type); retval = -EIO; bh = affs_bread(sb, inode->i_ino); diff --git a/fs/affs/namei.c b/fs/affs/namei.c index bbc38530e924..de84f4d3e9ec 100644 --- a/fs/affs/namei.c +++ b/fs/affs/namei.c @@ -248,9 +248,8 @@ affs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) int affs_unlink(struct inode *dir, struct dentry *dentry) { - pr_debug("%s(dir=%d, %lu \"%pd\")\n", - __func__, (u32)dir->i_ino, dentry->d_inode->i_ino, - dentry); + pr_debug("%s(dir=%lu, %lu \"%pd\")\n", __func__, dir->i_ino, + dentry->d_inode->i_ino, dentry); return affs_remove_header(dentry); } @@ -317,9 +316,8 @@ affs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) int affs_rmdir(struct inode *dir, struct dentry *dentry) { - pr_debug("%s(dir=%u, %lu \"%pd\")\n", - __func__, (u32)dir->i_ino, dentry->d_inode->i_ino, - dentry); + pr_debug("%s(dir=%lu, %lu \"%pd\")\n", __func__, dir->i_ino, + dentry->d_inode->i_ino, dentry); return affs_remove_header(dentry); } @@ -404,8 +402,7 @@ affs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) { struct inode *inode = old_dentry->d_inode; - pr_debug("%s(%u, %u, \"%pd\")\n", - __func__, (u32)inode->i_ino, (u32)dir->i_ino, + pr_debug("%s(%lu, %lu, \"%pd\")\n", __func__, inode->i_ino, dir->i_ino, dentry); return affs_add_entry(dir, inode, dentry, ST_LINKFILE); @@ -419,9 +416,8 @@ affs_rename(struct inode *old_dir, struct dentry *old_dentry, struct buffer_head *bh = NULL; int retval; - pr_debug("%s(old=%u,\"%pd\" to new=%u,\"%pd\")\n", - __func__, (u32)old_dir->i_ino, old_dentry, - (u32)new_dir->i_ino, new_dentry); + pr_debug("%s(old=%lu,\"%pd\" to new=%lu,\"%pd\")\n", __func__, + old_dir->i_ino, old_dentry, new_dir->i_ino, new_dentry); retval = affs_check_name(new_dentry->d_name.name, new_dentry->d_name.len, -- cgit v1.2.3 From afe305dcc96edc06aec923a0f5fa07ff654b2489 Mon Sep 17 00:00:00 2001 From: Fabian Frederick Date: Tue, 17 Feb 2015 13:46:12 -0800 Subject: fs/affs/file.c: replace if/BUG by BUG_ON Signed-off-by: Fabian Frederick Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/affs/file.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/affs/file.c b/fs/affs/file.c index 40a024a9b41d..7e83ba22bed4 100644 --- a/fs/affs/file.c +++ b/fs/affs/file.c @@ -180,8 +180,7 @@ affs_get_extblock_slow(struct inode *inode, u32 ext) ext_key = be32_to_cpu(AFFS_TAIL(sb, bh)->extension); if (ext < AFFS_I(inode)->i_extcnt) goto read_ext; - if (ext > AFFS_I(inode)->i_extcnt) - BUG(); + BUG_ON(ext > AFFS_I(inode)->i_extcnt); bh = affs_alloc_extblock(inode, bh, ext); if (IS_ERR(bh)) return bh; @@ -198,8 +197,7 @@ affs_get_extblock_slow(struct inode *inode, u32 ext) struct buffer_head *prev_bh; /* allocate a new extended block */ - if (ext > AFFS_I(inode)->i_extcnt) - BUG(); + BUG_ON(ext > AFFS_I(inode)->i_extcnt); /* get previous extended block */ prev_bh = affs_get_extblock(inode, ext - 1); -- cgit v1.2.3 From 92b20708f9f0c6429b3b6865de567e721f509c75 Mon Sep 17 00:00:00 2001 From: Fabian Frederick Date: Tue, 17 Feb 2015 13:46:15 -0800 Subject: fs/affs/file.c: fix direct IO writes beyond EOF Use the same fallback to normal IO in case of write operations beyond EOF as fat direct IO. This patch fixes fsx file -d -Z -r 4096 -w 4096 Report: 129(129 mod 256): TRUNCATE DOWN from 0x3ff01 to 0xb3f6 130(130 mod 256): WRITE 0x22000 thru 0x2dfff (0xc000 bytes) HOLE Thanks to Jan for helping me on this problem. The ideal solution suggested by Jan Kara would be to use cont_write_begin() but affs direct_IO shouldn't be used a lot anyway... Signed-off-by: Fabian Frederick Reviewed-by: Jan Kara Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/affs/file.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'fs') diff --git a/fs/affs/file.c b/fs/affs/file.c index 7e83ba22bed4..d2468bf95669 100644 --- a/fs/affs/file.c +++ b/fs/affs/file.c @@ -398,6 +398,13 @@ affs_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter, size_t count = iov_iter_count(iter); ssize_t ret; + if (rw == WRITE) { + loff_t size = offset + count; + + if (AFFS_I(inode)->mmu_private < size) + return 0; + } + ret = blockdev_direct_IO(rw, iocb, inode, iter, offset, affs_get_block); if (ret < 0 && (rw & WRITE)) affs_write_failed(mapping, offset + count); -- cgit v1.2.3 From 4d29e571e1942f8f418bf776af0134a9cb5a35c9 Mon Sep 17 00:00:00 2001 From: Fabian Frederick Date: Tue, 17 Feb 2015 13:46:17 -0800 Subject: fs/affs/super.c: destroy sbi mutex in affs_kill_sb() Call mutex_destroy() on superblock mutex in affs_kill_sb() otherwise mutex debugging code isn't able to detect that mutex is used after being freed. (thanks to Jan Kara for complete definition). Signed-off-by: Fabian Frederick Cc: Jan Kara Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/affs/super.c | 1 + 1 file changed, 1 insertion(+) (limited to 'fs') diff --git a/fs/affs/super.c b/fs/affs/super.c index f754ab68a840..ee8eca7add0e 100644 --- a/fs/affs/super.c +++ b/fs/affs/super.c @@ -602,6 +602,7 @@ static void affs_kill_sb(struct super_block *sb) affs_free_bitmap(sb); affs_brelse(sbi->s_root_bh); kfree(sbi->s_prefix); + mutex_destroy(&sbi->s_bmlock); kfree(sbi); } } -- cgit v1.2.3 From eeb36f8e938d151fc5e12e96ae13d0e283be357e Mon Sep 17 00:00:00 2001 From: Fabian Frederick Date: Tue, 17 Feb 2015 13:46:20 -0800 Subject: fs/affs: use unsigned int for string lengths - Some min() were used with different types. - Create a new variable in __affs_hash_dentry() to process affs_check_name()/min() return Signed-off-by: Fabian Frederick Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/affs/amigaffs.c | 2 +- fs/affs/namei.c | 15 ++++++++------- 2 files changed, 9 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/affs/amigaffs.c b/fs/affs/amigaffs.c index 511ab6b12618..0836f6fff641 100644 --- a/fs/affs/amigaffs.c +++ b/fs/affs/amigaffs.c @@ -508,7 +508,7 @@ affs_check_name(const unsigned char *name, int len, bool notruncate) int affs_copy_name(unsigned char *bstr, struct dentry *dentry) { - int len = min(dentry->d_name.len, 30u); + u32 len = min(dentry->d_name.len, 30u); *bstr++ = len; memcpy(bstr, dentry->d_name.name, len); diff --git a/fs/affs/namei.c b/fs/affs/namei.c index de84f4d3e9ec..66c6cb349bf6 100644 --- a/fs/affs/namei.c +++ b/fs/affs/namei.c @@ -64,15 +64,16 @@ __affs_hash_dentry(struct qstr *qstr, toupper_t toupper, bool notruncate) { const u8 *name = qstr->name; unsigned long hash; - int i; + int retval; + u32 len; - i = affs_check_name(qstr->name, qstr->len, notruncate); - if (i) - return i; + retval = affs_check_name(qstr->name, qstr->len, notruncate); + if (retval) + return retval; hash = init_name_hash(); - i = min(qstr->len, 30u); - for (; i > 0; name++, i--) + len = min(qstr->len, 30u); + for (; len > 0; name++, len--) hash = partial_name_hash(toupper(*name), hash); qstr->hash = end_name_hash(hash); @@ -173,7 +174,7 @@ int affs_hash_name(struct super_block *sb, const u8 *name, unsigned int len) { toupper_t toupper = affs_get_toupper(sb); - int hash; + u32 hash; hash = len = min(len, 30u); for (; len > 0; len--) -- cgit v1.2.3 From f157853e407c0611cd6acbc400fa6c7be420b1bd Mon Sep 17 00:00:00 2001 From: Fabian Frederick Date: Tue, 17 Feb 2015 13:46:23 -0800 Subject: fs/affs: define AFFSNAMEMAX to replace constant use 30 was used all over the place to compare name length against AFFS maximum name length. Signed-off-by: Fabian Frederick Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/affs/affs.h | 2 ++ fs/affs/amigaffs.c | 6 +++--- fs/affs/dir.c | 3 ++- fs/affs/namei.c | 16 ++++++++-------- fs/affs/super.c | 2 +- 5 files changed, 16 insertions(+), 13 deletions(-) (limited to 'fs') diff --git a/fs/affs/affs.h b/fs/affs/affs.h index ff44ff3ff015..c8764bd7497d 100644 --- a/fs/affs/affs.h +++ b/fs/affs/affs.h @@ -30,6 +30,8 @@ #define AFFS_AC_SIZE (AFFS_CACHE_SIZE/sizeof(struct affs_ext_key)/2) #define AFFS_AC_MASK (AFFS_AC_SIZE-1) +#define AFFSNAMEMAX 30U + struct affs_ext_key { u32 ext; /* idx of the extended block */ u32 key; /* block number */ diff --git a/fs/affs/amigaffs.c b/fs/affs/amigaffs.c index 0836f6fff641..118d782b041f 100644 --- a/fs/affs/amigaffs.c +++ b/fs/affs/amigaffs.c @@ -483,11 +483,11 @@ affs_check_name(const unsigned char *name, int len, bool notruncate) { int i; - if (len > 30) { + if (len > AFFSNAMEMAX) { if (notruncate) return -ENAMETOOLONG; else - len = 30; + len = AFFSNAMEMAX; } for (i = 0; i < len; i++) { if (name[i] < ' ' || name[i] == ':' @@ -508,7 +508,7 @@ affs_check_name(const unsigned char *name, int len, bool notruncate) int affs_copy_name(unsigned char *bstr, struct dentry *dentry) { - u32 len = min(dentry->d_name.len, 30u); + u32 len = min(dentry->d_name.len, AFFSNAMEMAX); *bstr++ = len; memcpy(bstr, dentry->d_name.name, len); diff --git a/fs/affs/dir.c b/fs/affs/dir.c index a682892878a8..ac4f318aafba 100644 --- a/fs/affs/dir.c +++ b/fs/affs/dir.c @@ -114,7 +114,8 @@ inside: break; } - namelen = min(AFFS_TAIL(sb, fh_bh)->name[0], (u8)30); + namelen = min(AFFS_TAIL(sb, fh_bh)->name[0], + (u8)AFFSNAMEMAX); name = AFFS_TAIL(sb, fh_bh)->name + 1; pr_debug("readdir(): dir_emit(\"%.*s\", ino=%u), hash=%d, f_pos=%llx\n", namelen, name, ino, hash_pos, ctx->pos); diff --git a/fs/affs/namei.c b/fs/affs/namei.c index 66c6cb349bf6..ffb7bd82c2a5 100644 --- a/fs/affs/namei.c +++ b/fs/affs/namei.c @@ -72,7 +72,7 @@ __affs_hash_dentry(struct qstr *qstr, toupper_t toupper, bool notruncate) return retval; hash = init_name_hash(); - len = min(qstr->len, 30u); + len = min(qstr->len, AFFSNAMEMAX); for (; len > 0; name++, len--) hash = partial_name_hash(toupper(*name), hash); qstr->hash = end_name_hash(hash); @@ -115,10 +115,10 @@ static inline int __affs_compare_dentry(unsigned int len, * If the names are longer than the allowed 30 chars, * the excess is ignored, so their length may differ. */ - if (len >= 30) { - if (name->len < 30) + if (len >= AFFSNAMEMAX) { + if (name->len < AFFSNAMEMAX) return 1; - len = 30; + len = AFFSNAMEMAX; } else if (len != name->len) return 1; @@ -157,10 +157,10 @@ affs_match(struct dentry *dentry, const u8 *name2, toupper_t toupper) const u8 *name = dentry->d_name.name; int len = dentry->d_name.len; - if (len >= 30) { - if (*name2 < 30) + if (len >= AFFSNAMEMAX) { + if (*name2 < AFFSNAMEMAX) return 0; - len = 30; + len = AFFSNAMEMAX; } else if (len != *name2) return 0; @@ -176,7 +176,7 @@ affs_hash_name(struct super_block *sb, const u8 *name, unsigned int len) toupper_t toupper = affs_get_toupper(sb); u32 hash; - hash = len = min(len, 30u); + hash = len = min(len, AFFSNAMEMAX); for (; len > 0; len--) hash = (hash * 13 + toupper(*name++)) & 0x7ff; diff --git a/fs/affs/super.c b/fs/affs/super.c index ee8eca7add0e..c3524bfdfe04 100644 --- a/fs/affs/super.c +++ b/fs/affs/super.c @@ -584,7 +584,7 @@ affs_statfs(struct dentry *dentry, struct kstatfs *buf) buf->f_bavail = free; buf->f_fsid.val[0] = (u32)id; buf->f_fsid.val[1] = (u32)(id >> 32); - buf->f_namelen = 30; + buf->f_namelen = AFFSNAMEMAX; return 0; } -- cgit v1.2.3 From b4478e3530288503704e1cc701c426174e4550f0 Mon Sep 17 00:00:00 2001 From: Fabian Frederick Date: Tue, 17 Feb 2015 13:46:25 -0800 Subject: fs/affs/amigaffs.c: remove else after return else is unnecessary after return -ENAMETOOLONG Signed-off-by: Fabian Frederick Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/affs/amigaffs.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/affs/amigaffs.c b/fs/affs/amigaffs.c index 118d782b041f..388da1ea815d 100644 --- a/fs/affs/amigaffs.c +++ b/fs/affs/amigaffs.c @@ -486,8 +486,7 @@ affs_check_name(const unsigned char *name, int len, bool notruncate) if (len > AFFSNAMEMAX) { if (notruncate) return -ENAMETOOLONG; - else - len = AFFSNAMEMAX; + len = AFFSNAMEMAX; } for (i = 0; i < len; i++) { if (name[i] < ' ' || name[i] == ':' -- cgit v1.2.3 From 211c2af014d2c41752a13b652ae8b9815e07802c Mon Sep 17 00:00:00 2001 From: Fabian Frederick Date: Tue, 17 Feb 2015 13:46:28 -0800 Subject: fs/affs/bitmap.c: remove unnecessary return return is not needed at the end of function. Signed-off-by: Fabian Frederick Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/affs/bitmap.c | 1 - 1 file changed, 1 deletion(-) (limited to 'fs') diff --git a/fs/affs/bitmap.c b/fs/affs/bitmap.c index c8de51185c23..675148950fed 100644 --- a/fs/affs/bitmap.c +++ b/fs/affs/bitmap.c @@ -99,7 +99,6 @@ err_bh_read: err_range: affs_error(sb, "affs_free_block","Block %u outside partition", block); - return; } /* -- cgit v1.2.3 From 0cdfe18ad5ae0fbb9417ac2b5808189aaaa54230 Mon Sep 17 00:00:00 2001 From: Fabian Frederick Date: Tue, 17 Feb 2015 13:46:30 -0800 Subject: fs/affs/inode.c: remove double extern affs_symlink_inode_operations affs_symlink_inode_operations was already declared extern in affs.h Signed-off-by: Fabian Frederick Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/affs/inode.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'fs') diff --git a/fs/affs/inode.c b/fs/affs/inode.c index 25cb4b43f2f1..6f34510449e8 100644 --- a/fs/affs/inode.c +++ b/fs/affs/inode.c @@ -13,8 +13,6 @@ #include #include "affs.h" -extern const struct inode_operations affs_symlink_inode_operations; - struct inode *affs_iget(struct super_block *sb, unsigned long ino) { struct affs_sb_info *sbi = AFFS_SB(sb); -- cgit v1.2.3 From 0445f01a53ad53ef6b23307b4630ab92423994ab Mon Sep 17 00:00:00 2001 From: Fabian Frederick Date: Tue, 17 Feb 2015 13:46:33 -0800 Subject: fs/affs/super.c: fix switch indentation Fix checkpatch error: ERROR: switch and case should be at the same indent Signed-off-by: Fabian Frederick Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/affs/super.c | 66 ++++++++++++++++++++++++++++----------------------------- 1 file changed, 33 insertions(+), 33 deletions(-) (limited to 'fs') diff --git a/fs/affs/super.c b/fs/affs/super.c index c3524bfdfe04..4cf0e9113fb6 100644 --- a/fs/affs/super.c +++ b/fs/affs/super.c @@ -432,39 +432,39 @@ got_root: sb->s_flags |= MS_RDONLY; } switch (chksum) { - case MUFS_FS: - case MUFS_INTLFFS: - case MUFS_DCFFS: - sbi->s_flags |= SF_MUFS; - /* fall thru */ - case FS_INTLFFS: - case FS_DCFFS: - sbi->s_flags |= SF_INTL; - break; - case MUFS_FFS: - sbi->s_flags |= SF_MUFS; - break; - case FS_FFS: - break; - case MUFS_OFS: - sbi->s_flags |= SF_MUFS; - /* fall thru */ - case FS_OFS: - sbi->s_flags |= SF_OFS; - sb->s_flags |= MS_NOEXEC; - break; - case MUFS_DCOFS: - case MUFS_INTLOFS: - sbi->s_flags |= SF_MUFS; - case FS_DCOFS: - case FS_INTLOFS: - sbi->s_flags |= SF_INTL | SF_OFS; - sb->s_flags |= MS_NOEXEC; - break; - default: - pr_err("Unknown filesystem on device %s: %08X\n", - sb->s_id, chksum); - return -EINVAL; + case MUFS_FS: + case MUFS_INTLFFS: + case MUFS_DCFFS: + sbi->s_flags |= SF_MUFS; + /* fall thru */ + case FS_INTLFFS: + case FS_DCFFS: + sbi->s_flags |= SF_INTL; + break; + case MUFS_FFS: + sbi->s_flags |= SF_MUFS; + break; + case FS_FFS: + break; + case MUFS_OFS: + sbi->s_flags |= SF_MUFS; + /* fall thru */ + case FS_OFS: + sbi->s_flags |= SF_OFS; + sb->s_flags |= MS_NOEXEC; + break; + case MUFS_DCOFS: + case MUFS_INTLOFS: + sbi->s_flags |= SF_MUFS; + case FS_DCOFS: + case FS_INTLOFS: + sbi->s_flags |= SF_INTL | SF_OFS; + sb->s_flags |= MS_NOEXEC; + break; + default: + pr_err("Unknown filesystem on device %s: %08X\n", + sb->s_id, chksum); + return -EINVAL; } if (mount_flags & SF_VERBOSE) { -- cgit v1.2.3 From c88b1e70aeaa38aa20e67e436f28c4d36c0b9f4b Mon Sep 17 00:00:00 2001 From: Al Viro Date: Thu, 29 Jan 2015 00:17:57 -0500 Subject: configfs: configfs_create() init callback is never NULL and it never fails ... so make it return void and drop the check for it being non-NULL Signed-off-by: Al Viro --- fs/configfs/configfs_internal.h | 2 +- fs/configfs/dir.c | 9 +++------ fs/configfs/inode.c | 10 ++-------- 3 files changed, 6 insertions(+), 15 deletions(-) (limited to 'fs') diff --git a/fs/configfs/configfs_internal.h b/fs/configfs/configfs_internal.h index a315677e44d3..ed0fdd156880 100644 --- a/fs/configfs/configfs_internal.h +++ b/fs/configfs/configfs_internal.h @@ -69,7 +69,7 @@ extern struct kmem_cache *configfs_dir_cachep; extern int configfs_is_root(struct config_item *item); extern struct inode * configfs_new_inode(umode_t mode, struct configfs_dirent *, struct super_block *); -extern int configfs_create(struct dentry *, umode_t mode, int (*init)(struct inode *)); +extern int configfs_create(struct dentry *, umode_t mode, void (*init)(struct inode *)); extern int configfs_create_file(struct config_item *, const struct configfs_attribute *); extern int configfs_make_dirent(struct configfs_dirent *, diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c index c9c298bd3058..6371ba19ee43 100644 --- a/fs/configfs/dir.c +++ b/fs/configfs/dir.c @@ -240,27 +240,24 @@ int configfs_make_dirent(struct configfs_dirent * parent_sd, return 0; } -static int init_dir(struct inode * inode) +static void init_dir(struct inode * inode) { inode->i_op = &configfs_dir_inode_operations; inode->i_fop = &configfs_dir_operations; /* directory inodes start off with i_nlink == 2 (for "." entry) */ inc_nlink(inode); - return 0; } -static int configfs_init_file(struct inode * inode) +static void configfs_init_file(struct inode * inode) { inode->i_size = PAGE_SIZE; inode->i_fop = &configfs_file_operations; - return 0; } -static int init_symlink(struct inode * inode) +static void init_symlink(struct inode * inode) { inode->i_op = &configfs_symlink_inode_operations; - return 0; } static int create_dir(struct config_item *k, struct dentry *d) diff --git a/fs/configfs/inode.c b/fs/configfs/inode.c index 65af86147154..60727db7b0a3 100644 --- a/fs/configfs/inode.c +++ b/fs/configfs/inode.c @@ -176,7 +176,7 @@ static void configfs_set_inode_lock_class(struct configfs_dirent *sd, #endif /* CONFIG_LOCKDEP */ -int configfs_create(struct dentry * dentry, umode_t mode, int (*init)(struct inode *)) +int configfs_create(struct dentry * dentry, umode_t mode, void (*init)(struct inode *)) { int error = 0; struct inode *inode = NULL; @@ -198,13 +198,7 @@ int configfs_create(struct dentry * dentry, umode_t mode, int (*init)(struct ino p_inode->i_mtime = p_inode->i_ctime = CURRENT_TIME; configfs_set_inode_lock_class(sd, inode); - if (init) { - error = init(inode); - if (error) { - iput(inode); - return error; - } - } + init(inode); d_instantiate(dentry, inode); if (S_ISDIR(mode) || S_ISLNK(mode)) dget(dentry); /* pin link and directory dentries in core */ -- cgit v1.2.3 From 1cf97d0d3a1b0232a3fde25deac3b3fd288627e2 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Thu, 29 Jan 2015 00:20:49 -0500 Subject: configfs: fold create_dir() into its only caller Signed-off-by: Al Viro --- fs/configfs/dir.c | 63 +++++++++++++++++++++++++------------------------------ 1 file changed, 29 insertions(+), 34 deletions(-) (limited to 'fs') diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c index 6371ba19ee43..cf0db005d2f5 100644 --- a/fs/configfs/dir.c +++ b/fs/configfs/dir.c @@ -260,37 +260,6 @@ static void init_symlink(struct inode * inode) inode->i_op = &configfs_symlink_inode_operations; } -static int create_dir(struct config_item *k, struct dentry *d) -{ - int error; - umode_t mode = S_IFDIR| S_IRWXU | S_IRUGO | S_IXUGO; - struct dentry *p = d->d_parent; - - BUG_ON(!k); - - error = configfs_dirent_exists(p->d_fsdata, d->d_name.name); - if (!error) - error = configfs_make_dirent(p->d_fsdata, d, k, mode, - CONFIGFS_DIR | CONFIGFS_USET_CREATING); - if (!error) { - configfs_set_dir_dirent_depth(p->d_fsdata, d->d_fsdata); - error = configfs_create(d, mode, init_dir); - if (!error) { - inc_nlink(p->d_inode); - } else { - struct configfs_dirent *sd = d->d_fsdata; - if (sd) { - spin_lock(&configfs_dirent_lock); - list_del_init(&sd->s_sibling); - spin_unlock(&configfs_dirent_lock); - configfs_put(sd); - } - } - } - return error; -} - - /** * configfs_create_dir - create a directory for an config_item. * @item: config_itemwe're creating directory for. @@ -300,11 +269,37 @@ static int create_dir(struct config_item *k, struct dentry *d) * until it is validated by configfs_dir_set_ready() */ -static int configfs_create_dir(struct config_item * item, struct dentry *dentry) +static int configfs_create_dir(struct config_item *item, struct dentry *dentry) { - int error = create_dir(item, dentry); - if (!error) + int error; + umode_t mode = S_IFDIR| S_IRWXU | S_IRUGO | S_IXUGO; + struct dentry *p = dentry->d_parent; + + BUG_ON(!item); + + error = configfs_dirent_exists(p->d_fsdata, dentry->d_name.name); + if (unlikely(error)) + return error; + + error = configfs_make_dirent(p->d_fsdata, dentry, item, mode, + CONFIGFS_DIR | CONFIGFS_USET_CREATING); + if (unlikely(error)) + return error; + + configfs_set_dir_dirent_depth(p->d_fsdata, dentry->d_fsdata); + error = configfs_create(dentry, mode, init_dir); + if (!error) { + inc_nlink(p->d_inode); item->ci_dentry = dentry; + } else { + struct configfs_dirent *sd = dentry->d_fsdata; + if (sd) { + spin_lock(&configfs_dirent_lock); + list_del_init(&sd->s_sibling); + spin_unlock(&configfs_dirent_lock); + configfs_put(sd); + } + } return error; } -- cgit v1.2.3 From 28444a2bde8d1695447eb51362b46cf1e49b9c21 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Thu, 29 Jan 2015 00:27:57 -0500 Subject: configfs_add_file: fold into its sole caller Signed-off-by: Al Viro --- fs/configfs/configfs_internal.h | 1 - fs/configfs/file.c | 28 ++++++++++------------------ 2 files changed, 10 insertions(+), 19 deletions(-) (limited to 'fs') diff --git a/fs/configfs/configfs_internal.h b/fs/configfs/configfs_internal.h index ed0fdd156880..b65d1ef532d5 100644 --- a/fs/configfs/configfs_internal.h +++ b/fs/configfs/configfs_internal.h @@ -76,7 +76,6 @@ extern int configfs_make_dirent(struct configfs_dirent *, struct dentry *, void *, umode_t, int); extern int configfs_dirent_is_ready(struct configfs_dirent *); -extern int configfs_add_file(struct dentry *, const struct configfs_attribute *, int); extern void configfs_hash_and_remove(struct dentry * dir, const char * name); extern const unsigned char * configfs_get_name(struct configfs_dirent *sd); diff --git a/fs/configfs/file.c b/fs/configfs/file.c index 1d1c41f1014d..56d2cdc9ae0a 100644 --- a/fs/configfs/file.c +++ b/fs/configfs/file.c @@ -313,21 +313,6 @@ const struct file_operations configfs_file_operations = { .release = configfs_release, }; - -int configfs_add_file(struct dentry * dir, const struct configfs_attribute * attr, int type) -{ - struct configfs_dirent * parent_sd = dir->d_fsdata; - umode_t mode = (attr->ca_mode & S_IALLUGO) | S_IFREG; - int error = 0; - - mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_NORMAL); - error = configfs_make_dirent(parent_sd, NULL, (void *) attr, mode, type); - mutex_unlock(&dir->d_inode->i_mutex); - - return error; -} - - /** * configfs_create_file - create an attribute file for an item. * @item: item we're creating for. @@ -336,9 +321,16 @@ int configfs_add_file(struct dentry * dir, const struct configfs_attribute * att int configfs_create_file(struct config_item * item, const struct configfs_attribute * attr) { - BUG_ON(!item || !item->ci_dentry || !attr); + struct dentry *dir = item->ci_dentry; + struct configfs_dirent *parent_sd = dir->d_fsdata; + umode_t mode = (attr->ca_mode & S_IALLUGO) | S_IFREG; + int error = 0; - return configfs_add_file(item->ci_dentry, attr, - CONFIGFS_ITEM_ATTR); + mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_NORMAL); + error = configfs_make_dirent(parent_sd, NULL, (void *) attr, mode, + CONFIGFS_ITEM_ATTR); + mutex_unlock(&dir->d_inode->i_mutex); + + return error; } -- cgit v1.2.3 From 338d00cfef07d74a072f96821c64b20f98517d72 Mon Sep 17 00:00:00 2001 From: Tom Haynes Date: Tue, 17 Feb 2015 14:58:15 -0800 Subject: pnfs: Refactor the *_layout_mark_request_commit to use pnfs_layout_mark_request_commit The File Layout's filelayout_mark_request_commit() is almost the Flex File Layout's ff_layout_mark_request_commit(). And that can be reduced by calling into nfs_request_add_commit_list(). Signed-off-by: Tom Haynes Signed-off-by: Trond Myklebust --- fs/nfs/filelayout/filelayout.c | 48 +++++++--------------------------- fs/nfs/flexfilelayout/flexfilelayout.c | 38 +-------------------------- fs/nfs/pnfs.h | 4 +++ fs/nfs/pnfs_nfs.c | 30 +++++++++++++++++++++ 4 files changed, 45 insertions(+), 75 deletions(-) (limited to 'fs') diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c index e1e5ea262a13..91e88a7ecef0 100644 --- a/fs/nfs/filelayout/filelayout.c +++ b/fs/nfs/filelayout/filelayout.c @@ -960,48 +960,20 @@ filelayout_mark_request_commit(struct nfs_page *req, { struct nfs4_filelayout_segment *fl = FILELAYOUT_LSEG(lseg); u32 i, j; - struct list_head *list; - struct pnfs_commit_bucket *buckets; if (fl->commit_through_mds) { - list = &cinfo->mds->list; - spin_lock(cinfo->lock); - goto mds_commit; - } - - /* Note that we are calling nfs4_fl_calc_j_index on each page - * that ends up being committed to a data server. An attractive - * alternative is to add a field to nfs_write_data and nfs_page - * to store the value calculated in filelayout_write_pagelist - * and just use that here. - */ - j = nfs4_fl_calc_j_index(lseg, req_offset(req)); - i = select_bucket_index(fl, j); - spin_lock(cinfo->lock); - buckets = cinfo->ds->buckets; - list = &buckets[i].written; - if (list_empty(list)) { - /* Non-empty buckets hold a reference on the lseg. That ref - * is normally transferred to the COMMIT call and released - * there. It could also be released if the last req is pulled - * off due to a rewrite, in which case it will be done in - * pnfs_generic_clear_request_commit + nfs_request_add_commit_list(req, &cinfo->mds->list, cinfo); + } else { + /* Note that we are calling nfs4_fl_calc_j_index on each page + * that ends up being committed to a data server. An attractive + * alternative is to add a field to nfs_write_data and nfs_page + * to store the value calculated in filelayout_write_pagelist + * and just use that here. */ - buckets[i].wlseg = pnfs_get_lseg(lseg); + j = nfs4_fl_calc_j_index(lseg, req_offset(req)); + i = select_bucket_index(fl, j); + pnfs_layout_mark_request_commit(req, lseg, cinfo, i); } - set_bit(PG_COMMIT_TO_DS, &req->wb_flags); - cinfo->ds->nwritten++; - -mds_commit: - /* nfs_request_add_commit_list(). We need to add req to list without - * dropping cinfo lock. - */ - set_bit(PG_CLEAN, &(req)->wb_flags); - nfs_list_add_request(req, list); - cinfo->mds->ncommit++; - spin_unlock(cinfo->lock); - if (!cinfo->dreq) - nfs_mark_page_unstable(req->wb_page); } static u32 calc_ds_index_from_commit(struct pnfs_layout_segment *lseg, u32 i) diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c index 423c2bc371fa..315cc68945b9 100644 --- a/fs/nfs/flexfilelayout/flexfilelayout.c +++ b/fs/nfs/flexfilelayout/flexfilelayout.c @@ -1332,42 +1332,6 @@ ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync) return PNFS_ATTEMPTED; } -static void -ff_layout_mark_request_commit(struct nfs_page *req, - struct pnfs_layout_segment *lseg, - struct nfs_commit_info *cinfo, - u32 ds_commit_idx) -{ - struct list_head *list; - struct pnfs_commit_bucket *buckets; - - spin_lock(cinfo->lock); - buckets = cinfo->ds->buckets; - list = &buckets[ds_commit_idx].written; - if (list_empty(list)) { - /* Non-empty buckets hold a reference on the lseg. That ref - * is normally transferred to the COMMIT call and released - * there. It could also be released if the last req is pulled - * off due to a rewrite, in which case it will be done in - * pnfs_common_clear_request_commit - */ - WARN_ON_ONCE(buckets[ds_commit_idx].wlseg != NULL); - buckets[ds_commit_idx].wlseg = pnfs_get_lseg(lseg); - } - set_bit(PG_COMMIT_TO_DS, &req->wb_flags); - cinfo->ds->nwritten++; - - /* nfs_request_add_commit_list(). We need to add req to list without - * dropping cinfo lock. - */ - set_bit(PG_CLEAN, &(req)->wb_flags); - nfs_list_add_request(req, list); - cinfo->mds->ncommit++; - spin_unlock(cinfo->lock); - if (!cinfo->dreq) - nfs_mark_page_unstable(req->wb_page); -} - static u32 calc_ds_index_from_commit(struct pnfs_layout_segment *lseg, u32 i) { return i; @@ -1535,7 +1499,7 @@ static struct pnfs_layoutdriver_type flexfilelayout_type = { .pg_write_ops = &ff_layout_pg_write_ops, .get_ds_info = ff_layout_get_ds_info, .free_deviceid_node = ff_layout_free_deveiceid_node, - .mark_request_commit = ff_layout_mark_request_commit, + .mark_request_commit = pnfs_layout_mark_request_commit, .clear_request_commit = pnfs_generic_clear_request_commit, .scan_commit_lists = pnfs_generic_scan_commit_lists, .recover_commit_reqs = pnfs_generic_recover_commit_reqs, diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h index 797cd6253adf..635f0865671c 100644 --- a/fs/nfs/pnfs.h +++ b/fs/nfs/pnfs.h @@ -344,6 +344,10 @@ void nfs4_pnfs_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds, struct nfs4_pnfs_ds_addr *nfs4_decode_mp_ds_addr(struct net *net, struct xdr_stream *xdr, gfp_t gfp_flags); +void pnfs_layout_mark_request_commit(struct nfs_page *req, + struct pnfs_layout_segment *lseg, + struct nfs_commit_info *cinfo, + u32 ds_commit_idx); static inline bool nfs_have_layout(struct inode *inode) { diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c index fdc4f6562bb7..54e36b38fb5f 100644 --- a/fs/nfs/pnfs_nfs.c +++ b/fs/nfs/pnfs_nfs.c @@ -838,3 +838,33 @@ out_err: return NULL; } EXPORT_SYMBOL_GPL(nfs4_decode_mp_ds_addr); + +void +pnfs_layout_mark_request_commit(struct nfs_page *req, + struct pnfs_layout_segment *lseg, + struct nfs_commit_info *cinfo, + u32 ds_commit_idx) +{ + struct list_head *list; + struct pnfs_commit_bucket *buckets; + + spin_lock(cinfo->lock); + buckets = cinfo->ds->buckets; + list = &buckets[ds_commit_idx].written; + if (list_empty(list)) { + /* Non-empty buckets hold a reference on the lseg. That ref + * is normally transferred to the COMMIT call and released + * there. It could also be released if the last req is pulled + * off due to a rewrite, in which case it will be done in + * pnfs_common_clear_request_commit + */ + WARN_ON_ONCE(buckets[ds_commit_idx].wlseg != NULL); + buckets[ds_commit_idx].wlseg = pnfs_get_lseg(lseg); + } + set_bit(PG_COMMIT_TO_DS, &req->wb_flags); + cinfo->ds->nwritten++; + spin_unlock(cinfo->lock); + + nfs_request_add_commit_list(req, list, cinfo); +} +EXPORT_SYMBOL_GPL(pnfs_layout_mark_request_commit); -- cgit v1.2.3 From 79969dd12e8756f64a999992c0536ccd91bf6e54 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Wed, 18 Feb 2015 11:30:18 -0800 Subject: NFSv4.1: Clean up create_session Don't decode directly into the shared struct session Signed-off-by: Trond Myklebust --- fs/nfs/nfs4proc.c | 42 +++++++++++++++++++++++++++++------------- fs/nfs/nfs4session.h | 6 ++++++ fs/nfs/nfs4xdr.c | 16 +++++++--------- include/linux/nfs_xdr.h | 8 +++++++- 4 files changed, 49 insertions(+), 23 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 2e7c9f7a6f7c..006bfa3da55b 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -7166,10 +7166,11 @@ static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args) args->bc_attrs.max_reqs); } -static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args, struct nfs4_session *session) +static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args, + struct nfs41_create_session_res *res) { struct nfs4_channel_attrs *sent = &args->fc_attrs; - struct nfs4_channel_attrs *rcvd = &session->fc_attrs; + struct nfs4_channel_attrs *rcvd = &res->fc_attrs; if (rcvd->max_resp_sz > sent->max_resp_sz) return -EINVAL; @@ -7188,10 +7189,11 @@ static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args return 0; } -static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args, struct nfs4_session *session) +static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args, + struct nfs41_create_session_res *res) { struct nfs4_channel_attrs *sent = &args->bc_attrs; - struct nfs4_channel_attrs *rcvd = &session->bc_attrs; + struct nfs4_channel_attrs *rcvd = &res->bc_attrs; if (rcvd->max_rqst_sz > sent->max_rqst_sz) return -EINVAL; @@ -7208,14 +7210,23 @@ static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args } static int nfs4_verify_channel_attrs(struct nfs41_create_session_args *args, - struct nfs4_session *session) + struct nfs41_create_session_res *res) { int ret; - ret = nfs4_verify_fore_channel_attrs(args, session); + ret = nfs4_verify_fore_channel_attrs(args, res); if (ret) return ret; - return nfs4_verify_back_channel_attrs(args, session); + return nfs4_verify_back_channel_attrs(args, res); +} + +static void nfs4_update_session(struct nfs4_session *session, + struct nfs41_create_session_res *res) +{ + nfs4_copy_sessionid(&session->sess_id, &res->sessionid); + session->flags = res->flags; + memcpy(&session->fc_attrs, &res->fc_attrs, sizeof(session->fc_attrs)); + memcpy(&session->bc_attrs, &res->bc_attrs, sizeof(session->bc_attrs)); } static int _nfs4_proc_create_session(struct nfs_client *clp, @@ -7224,11 +7235,12 @@ static int _nfs4_proc_create_session(struct nfs_client *clp, struct nfs4_session *session = clp->cl_session; struct nfs41_create_session_args args = { .client = clp, + .clientid = clp->cl_clientid, + .seqid = clp->cl_seqid, .cb_program = NFS4_CALLBACK, }; - struct nfs41_create_session_res res = { - .client = clp, - }; + struct nfs41_create_session_res res; + struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE_SESSION], .rpc_argp = &args, @@ -7245,11 +7257,15 @@ static int _nfs4_proc_create_session(struct nfs_client *clp, if (!status) { /* Verify the session's negotiated channel_attrs values */ - status = nfs4_verify_channel_attrs(&args, session); + status = nfs4_verify_channel_attrs(&args, &res); /* Increment the clientid slot sequence id */ - clp->cl_seqid++; + if (clp->cl_seqid == res.seqid) + clp->cl_seqid++; + if (status) + goto out; + nfs4_update_session(session, &res); } - +out: return status; } diff --git a/fs/nfs/nfs4session.h b/fs/nfs/nfs4session.h index b34ada9bc6a2..fc46c7455898 100644 --- a/fs/nfs/nfs4session.h +++ b/fs/nfs/nfs4session.h @@ -118,6 +118,12 @@ static inline int nfs4_has_persistent_session(const struct nfs_client *clp) return 0; } +static inline void nfs4_copy_sessionid(struct nfs4_sessionid *dst, + const struct nfs4_sessionid *src) +{ + memcpy(dst->data, src->data, NFS4_MAX_SESSIONID_LEN); +} + #ifdef CONFIG_CRC32 /* * nfs_session_id_hash - calculate the crc32 hash for the session id diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index e23a0a664e12..248903b138a8 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -1806,8 +1806,8 @@ static void encode_create_session(struct xdr_stream *xdr, encode_op_hdr(xdr, OP_CREATE_SESSION, decode_create_session_maxsz, hdr); p = reserve_space(xdr, 16 + 2*28 + 20 + clnt->cl_nodelen + 12); - p = xdr_encode_hyper(p, clp->cl_clientid); - *p++ = cpu_to_be32(clp->cl_seqid); /*Sequence id */ + p = xdr_encode_hyper(p, args->clientid); + *p++ = cpu_to_be32(args->seqid); /*Sequence id */ *p++ = cpu_to_be32(args->flags); /*flags */ /* Fore Channel */ @@ -5641,12 +5641,10 @@ static int decode_create_session(struct xdr_stream *xdr, { __be32 *p; int status; - struct nfs_client *clp = res->client; - struct nfs4_session *session = clp->cl_session; status = decode_op_hdr(xdr, OP_CREATE_SESSION); if (!status) - status = decode_sessionid(xdr, &session->sess_id); + status = decode_sessionid(xdr, &res->sessionid); if (unlikely(status)) return status; @@ -5654,13 +5652,13 @@ static int decode_create_session(struct xdr_stream *xdr, p = xdr_inline_decode(xdr, 8); if (unlikely(!p)) goto out_overflow; - clp->cl_seqid = be32_to_cpup(p++); - session->flags = be32_to_cpup(p); + res->seqid = be32_to_cpup(p++); + res->flags = be32_to_cpup(p); /* Channel attributes */ - status = decode_chan_attrs(xdr, &session->fc_attrs); + status = decode_chan_attrs(xdr, &res->fc_attrs); if (!status) - status = decode_chan_attrs(xdr, &session->bc_attrs); + status = decode_chan_attrs(xdr, &res->bc_attrs); return status; out_overflow: print_overflow_msg(__func__, xdr); diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 9a39132fda49..1af12fc16e98 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -1185,6 +1185,8 @@ struct nfs41_exchange_id_res { struct nfs41_create_session_args { struct nfs_client *client; + u64 clientid; + uint32_t seqid; uint32_t flags; uint32_t cb_program; struct nfs4_channel_attrs fc_attrs; /* Fore Channel */ @@ -1192,7 +1194,11 @@ struct nfs41_create_session_args { }; struct nfs41_create_session_res { - struct nfs_client *client; + struct nfs4_sessionid sessionid; + uint32_t seqid; + uint32_t flags; + struct nfs4_channel_attrs fc_attrs; /* Fore Channel */ + struct nfs4_channel_attrs bc_attrs; /* Back Channel */ }; struct nfs41_reclaim_complete_args { -- cgit v1.2.3 From b1c0df5fadc917ba3724ae9fdfcc6f97db34736a Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Wed, 18 Feb 2015 11:34:58 -0800 Subject: NFSv4.1: Don't set up a backchannel if the server didn't agree to do so If the server doesn't agree to out backchannel setup request, then don't set one up. Signed-off-by: Trond Myklebust --- fs/nfs/callback_proc.c | 2 ++ fs/nfs/nfs4proc.c | 7 ++++++- fs/nfs/nfs4session.c | 2 +- 3 files changed, 9 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c index e36a9d78ea49..197806fb87ff 100644 --- a/fs/nfs/callback_proc.c +++ b/fs/nfs/callback_proc.c @@ -427,6 +427,8 @@ __be32 nfs4_callback_sequence(struct cb_sequenceargs *args, if (clp == NULL) goto out; + if (!(clp->cl_session->flags & SESSION4_BACK_CHAN)) + goto out; tbl = &clp->cl_session->bc_slot_table; spin_lock(&tbl->slot_tbl_lock); diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 006bfa3da55b..59853797825c 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -7195,6 +7195,8 @@ static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args struct nfs4_channel_attrs *sent = &args->bc_attrs; struct nfs4_channel_attrs *rcvd = &res->bc_attrs; + if (!(res->flags & SESSION4_BACK_CHAN)) + goto out; if (rcvd->max_rqst_sz > sent->max_rqst_sz) return -EINVAL; if (rcvd->max_resp_sz < sent->max_resp_sz) @@ -7206,6 +7208,7 @@ static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args return -EINVAL; if (rcvd->max_reqs != sent->max_reqs) return -EINVAL; +out: return 0; } @@ -7226,7 +7229,9 @@ static void nfs4_update_session(struct nfs4_session *session, nfs4_copy_sessionid(&session->sess_id, &res->sessionid); session->flags = res->flags; memcpy(&session->fc_attrs, &res->fc_attrs, sizeof(session->fc_attrs)); - memcpy(&session->bc_attrs, &res->bc_attrs, sizeof(session->bc_attrs)); + if (res->flags & SESSION4_BACK_CHAN) + memcpy(&session->bc_attrs, &res->bc_attrs, + sizeof(session->bc_attrs)); } static int _nfs4_proc_create_session(struct nfs_client *clp, diff --git a/fs/nfs/nfs4session.c b/fs/nfs/nfs4session.c index e799dc3c3b1d..e23366effcfb 100644 --- a/fs/nfs/nfs4session.c +++ b/fs/nfs/nfs4session.c @@ -450,7 +450,7 @@ int nfs4_setup_session_slot_tables(struct nfs4_session *ses) tbl = &ses->fc_slot_table; tbl->session = ses; status = nfs4_realloc_slot_table(tbl, ses->fc_attrs.max_reqs, 1); - if (status) /* -ENOMEM */ + if (status || !(ses->flags & SESSION4_BACK_CHAN)) /* -ENOMEM */ return status; /* Back channel */ tbl = &ses->bc_slot_table; -- cgit v1.2.3 From 7e9f07388779ccc5067f206357d9791aeef38864 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Wed, 18 Feb 2015 12:07:19 -0800 Subject: NFSv4.1: Always set up a forward channel when binding the session Currently, the client requests a back channel or a bidirectional connection when binding a new TCP channel to an existing session. Fix that to ask for a forward channel or bidirectional. Signed-off-by: Trond Myklebust --- fs/nfs/nfs4xdr.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index 248903b138a8..97d4bdf53541 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -1724,7 +1724,7 @@ static void encode_bind_conn_to_session(struct xdr_stream *xdr, decode_bind_conn_to_session_maxsz, hdr); encode_opaque_fixed(xdr, session->sess_id.data, NFS4_MAX_SESSIONID_LEN); p = xdr_reserve_space(xdr, 8); - *p++ = cpu_to_be32(NFS4_CDFC4_BACK_OR_BOTH); + *p++ = cpu_to_be32(NFS4_CDFC4_FORE_OR_BOTH); *p = 0; /* use_conn_in_rdma_mode = False */ } -- cgit v1.2.3 From 71a097c6de9a49afd0f96b3ecef70c4eb04efde7 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Wed, 18 Feb 2015 09:27:18 -0800 Subject: NFSv4.1: Clean up bind_conn_to_session We don't need to fake up an entire session in order retrieve the arguments. Signed-off-by: Trond Myklebust --- fs/nfs/nfs4proc.c | 28 ++++++++++++++-------------- fs/nfs/nfs4xdr.c | 16 ++++++++-------- include/linux/nfs_xdr.h | 9 ++++++++- 3 files changed, 30 insertions(+), 23 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 59853797825c..88180ac5ea0e 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -6648,47 +6648,47 @@ nfs41_same_server_scope(struct nfs41_server_scope *a, int nfs4_proc_bind_conn_to_session(struct nfs_client *clp, struct rpc_cred *cred) { int status; + struct nfs41_bind_conn_to_session_args args = { + .client = clp, + .dir = NFS4_CDFC4_FORE_OR_BOTH, + }; struct nfs41_bind_conn_to_session_res res; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_BIND_CONN_TO_SESSION], - .rpc_argp = clp, + .rpc_argp = &args, .rpc_resp = &res, .rpc_cred = cred, }; dprintk("--> %s\n", __func__); - res.session = kzalloc(sizeof(struct nfs4_session), GFP_NOFS); - if (unlikely(res.session == NULL)) { - status = -ENOMEM; - goto out; - } + nfs4_copy_sessionid(&args.sessionid, &clp->cl_session->sess_id); + if (!(clp->cl_session->flags & SESSION4_BACK_CHAN)) + args.dir = NFS4_CDFC4_FORE; status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); trace_nfs4_bind_conn_to_session(clp, status); if (status == 0) { - if (memcmp(res.session->sess_id.data, + if (memcmp(res.sessionid.data, clp->cl_session->sess_id.data, NFS4_MAX_SESSIONID_LEN)) { dprintk("NFS: %s: Session ID mismatch\n", __func__); status = -EIO; - goto out_session; + goto out; } - if (res.dir != NFS4_CDFS4_BOTH) { + if ((res.dir & args.dir) != res.dir || res.dir == 0) { dprintk("NFS: %s: Unexpected direction from server\n", __func__); status = -EIO; - goto out_session; + goto out; } - if (res.use_conn_in_rdma_mode) { + if (res.use_conn_in_rdma_mode != args.use_conn_in_rdma_mode) { dprintk("NFS: %s: Server returned RDMA mode = true\n", __func__); status = -EIO; - goto out_session; + goto out; } } -out_session: - kfree(res.session); out: dprintk("<-- %s status= %d\n", __func__, status); return status; diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index 97d4bdf53541..5c399ec41079 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -1715,17 +1715,17 @@ static void encode_secinfo(struct xdr_stream *xdr, const struct qstr *name, stru #if defined(CONFIG_NFS_V4_1) /* NFSv4.1 operations */ static void encode_bind_conn_to_session(struct xdr_stream *xdr, - struct nfs4_session *session, + struct nfs41_bind_conn_to_session_args *args, struct compound_hdr *hdr) { __be32 *p; encode_op_hdr(xdr, OP_BIND_CONN_TO_SESSION, decode_bind_conn_to_session_maxsz, hdr); - encode_opaque_fixed(xdr, session->sess_id.data, NFS4_MAX_SESSIONID_LEN); + encode_opaque_fixed(xdr, args->sessionid.data, NFS4_MAX_SESSIONID_LEN); p = xdr_reserve_space(xdr, 8); - *p++ = cpu_to_be32(NFS4_CDFC4_FORE_OR_BOTH); - *p = 0; /* use_conn_in_rdma_mode = False */ + *p++ = cpu_to_be32(args->dir); + *p = (args->use_conn_in_rdma_mode) ? cpu_to_be32(1) : cpu_to_be32(0); } static void encode_op_map(struct xdr_stream *xdr, struct nfs4_op_map *op_map) @@ -2734,14 +2734,14 @@ static void nfs4_xdr_enc_fsid_present(struct rpc_rqst *req, */ static void nfs4_xdr_enc_bind_conn_to_session(struct rpc_rqst *req, struct xdr_stream *xdr, - struct nfs_client *clp) + struct nfs41_bind_conn_to_session_args *args) { struct compound_hdr hdr = { - .minorversion = clp->cl_mvops->minor_version, + .minorversion = args->client->cl_mvops->minor_version, }; encode_compound_hdr(xdr, req, &hdr); - encode_bind_conn_to_session(xdr, clp->cl_session, &hdr); + encode_bind_conn_to_session(xdr, args, &hdr); encode_nops(&hdr); } @@ -5613,7 +5613,7 @@ static int decode_bind_conn_to_session(struct xdr_stream *xdr, status = decode_op_hdr(xdr, OP_BIND_CONN_TO_SESSION); if (!status) - status = decode_sessionid(xdr, &res->session->sess_id); + status = decode_sessionid(xdr, &res->sessionid); if (unlikely(status)) return status; diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 1af12fc16e98..4cb3eaa89cf7 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -1167,8 +1167,15 @@ struct nfs41_impl_id { struct nfstime4 date; }; +struct nfs41_bind_conn_to_session_args { + struct nfs_client *client; + struct nfs4_sessionid sessionid; + u32 dir; + bool use_conn_in_rdma_mode; +}; + struct nfs41_bind_conn_to_session_res { - struct nfs4_session *session; + struct nfs4_sessionid sessionid; u32 dir; bool use_conn_in_rdma_mode; }; -- cgit v1.2.3 From 03f4fcb02884859b584c709652bb48f8125ceb45 Mon Sep 17 00:00:00 2001 From: "Yan, Zheng" Date: Mon, 5 Jan 2015 11:04:04 +0800 Subject: ceph: handle SESSION_FORCE_RO message mark session as readonly and wake up all cap waiters. Signed-off-by: Yan, Zheng --- fs/ceph/caps.c | 15 +++++++++++++++ fs/ceph/mds_client.c | 10 ++++++++++ fs/ceph/mds_client.h | 1 + include/linux/ceph/ceph_fs.h | 1 + 4 files changed, 27 insertions(+) (limited to 'fs') diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index b93c631c6c87..d0618e8412fd 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -2143,6 +2143,21 @@ again: ret = 1; } } else { + int session_readonly = false; + if ((need & CEPH_CAP_FILE_WR) && ci->i_auth_cap) { + struct ceph_mds_session *s = ci->i_auth_cap->session; + spin_lock(&s->s_cap_lock); + session_readonly = s->s_readonly; + spin_unlock(&s->s_cap_lock); + } + if (session_readonly) { + dout("get_cap_refs %p needed %s but mds%d readonly\n", + inode, ceph_cap_string(need), ci->i_auth_cap->mds); + *err = -EROFS; + ret = 1; + goto out_unlock; + } + dout("get_cap_refs %p have %s needed %s\n", inode, ceph_cap_string(have), ceph_cap_string(need)); } diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index d2171f4a6980..c6c33b411a2f 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -2580,6 +2580,14 @@ static void handle_session(struct ceph_mds_session *session, send_flushmsg_ack(mdsc, session, seq); break; + case CEPH_SESSION_FORCE_RO: + dout("force_session_readonly %p\n", session); + spin_lock(&session->s_cap_lock); + session->s_readonly = true; + spin_unlock(&session->s_cap_lock); + wake_up_session_caps(session, 0); + break; + default: pr_err("mdsc_handle_session bad op %d mds%d\n", op, mds); WARN_ON(1); @@ -2791,6 +2799,8 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc, spin_unlock(&session->s_gen_ttl_lock); spin_lock(&session->s_cap_lock); + /* don't know if session is readonly */ + session->s_readonly = 0; /* * notify __ceph_remove_cap() that we are composing cap reconnect. * If a cap get released before being added to the cap reconnect, diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h index e2817d00f7d9..a87b92f500bb 100644 --- a/fs/ceph/mds_client.h +++ b/fs/ceph/mds_client.h @@ -137,6 +137,7 @@ struct ceph_mds_session { int s_nr_caps, s_trim_caps; int s_num_cap_releases; int s_cap_reconnect; + int s_readonly; struct list_head s_cap_releases; /* waiting cap_release messages */ struct list_head s_cap_releases_done; /* ready to send */ struct ceph_cap *s_cap_iterator; diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h index 69e2c9e2305b..31eb03d0c766 100644 --- a/include/linux/ceph/ceph_fs.h +++ b/include/linux/ceph/ceph_fs.h @@ -271,6 +271,7 @@ enum { CEPH_SESSION_RECALL_STATE, CEPH_SESSION_FLUSHMSG, CEPH_SESSION_FLUSHMSG_ACK, + CEPH_SESSION_FORCE_RO, }; extern const char *ceph_session_op_name(int op); -- cgit v1.2.3 From 671762f8071563847e50f45c6fb0b329e6e8cf9a Mon Sep 17 00:00:00 2001 From: Rickard Strandqvist Date: Sun, 4 Jan 2015 00:44:54 +0100 Subject: ceph: acl: Remove unused function Remove the function ceph_get_cached_acl() that is not used anywhere. This was partially found by using a static code analysis program called cppcheck. Signed-off-by: Rickard Strandqvist Reviewed-by: Yan, Zheng --- fs/ceph/acl.c | 14 -------------- 1 file changed, 14 deletions(-) (limited to 'fs') diff --git a/fs/ceph/acl.c b/fs/ceph/acl.c index 5bd853ba44ff..64fa248343f6 100644 --- a/fs/ceph/acl.c +++ b/fs/ceph/acl.c @@ -40,20 +40,6 @@ static inline void ceph_set_cached_acl(struct inode *inode, spin_unlock(&ci->i_ceph_lock); } -static inline struct posix_acl *ceph_get_cached_acl(struct inode *inode, - int type) -{ - struct ceph_inode_info *ci = ceph_inode(inode); - struct posix_acl *acl = ACL_NOT_CACHED; - - spin_lock(&ci->i_ceph_lock); - if (__ceph_caps_issued_mask(ci, CEPH_CAP_XATTR_SHARED, 0)) - acl = get_cached_acl(inode, type); - spin_unlock(&ci->i_ceph_lock); - - return acl; -} - struct posix_acl *ceph_get_acl(struct inode *inode, int type) { int size; -- cgit v1.2.3 From 1487a688d8ea596e6710b0d256300ab10ce99284 Mon Sep 17 00:00:00 2001 From: "Yan, Zheng" Date: Tue, 6 Jan 2015 15:29:14 +0800 Subject: ceph: properly zero data pages for file holes. A bug is found in striped_read() of fs/ceph/file.c. striped_read() calls ceph_zero_pape_vector_range(). The first argument, page_align + read + ret, passed to ceph_zero_pape_vector_range() is wrong. When a file has holes, this wrong parameter may cause memory corruption either in kernal space or user space. Kernel space memory may be corrupted in the case of non direct IO; user space memory may be corrupted in the case of direct IO. In the latter case, the application doing direct IO may crash due to memory corruption, as we have experienced. The correct value should be initial_align + read + ret, where intial_align = o_direct ? buf_align : io_align. Compared with page_align, the current page offest, initial_align is the initial page offest, which should be used to calculate the page and offset in ceph_zero_pape_vector_range(). Reported-by: caifeng zhu Signed-off-by: Yan, Zheng --- fs/ceph/file.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/ceph/file.c b/fs/ceph/file.c index ce74b394b49d..663da44c06b6 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -392,13 +392,14 @@ more: if (ret >= 0) { int didpages; if (was_short && (pos + ret < inode->i_size)) { - u64 tmp = min(this_len - ret, - inode->i_size - pos - ret); + int zlen = min(this_len - ret, + inode->i_size - pos - ret); + int zoff = (o_direct ? buf_align : io_align) + + read + ret; dout(" zero gap %llu to %llu\n", - pos + ret, pos + ret + tmp); - ceph_zero_page_vector_range(page_align + read + ret, - tmp, pages); - ret += tmp; + pos + ret, pos + ret + zlen); + ceph_zero_page_vector_range(zoff, zlen, pages); + ret += zlen; } didpages = (page_align + ret) >> PAGE_CACHE_SHIFT; -- cgit v1.2.3 From 982d6011bc30a26e8a3d546e0e7fc7db2c255d85 Mon Sep 17 00:00:00 2001 From: "Yan, Zheng" Date: Tue, 23 Dec 2014 15:30:54 +0800 Subject: ceph: improve reference tracking for snaprealm When snaprealm is created, its initial reference count is zero. But in some rare cases, the newly created snaprealm is not referenced by anyone. This causes snaprealm with zero reference count not freed. The fix is set reference count of newly snaprealm to 1. The reference is return the function who requests to create the snaprealm. When the function finishes its job, it releases the reference. Signed-off-by: Yan, Zheng --- fs/ceph/caps.c | 24 +++++++++++++++-------- fs/ceph/mds_client.c | 9 +++++++-- fs/ceph/snap.c | 54 ++++++++++++++++++++++++++++++++++++---------------- fs/ceph/super.h | 3 ++- 4 files changed, 63 insertions(+), 27 deletions(-) (limited to 'fs') diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index d0618e8412fd..8ed1192606d9 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -577,7 +577,6 @@ void ceph_add_cap(struct inode *inode, struct ceph_snap_realm *realm = ceph_lookup_snap_realm(mdsc, realmino); if (realm) { - ceph_get_snap_realm(mdsc, realm); spin_lock(&realm->inodes_with_caps_lock); ci->i_snap_realm = realm; list_add(&ci->i_snap_realm_item, @@ -2447,13 +2446,13 @@ static void invalidate_aliases(struct inode *inode) */ static void handle_cap_grant(struct ceph_mds_client *mdsc, struct inode *inode, struct ceph_mds_caps *grant, - void *snaptrace, int snaptrace_len, u64 inline_version, void *inline_data, int inline_len, struct ceph_buffer *xattr_buf, struct ceph_mds_session *session, struct ceph_cap *cap, int issued) __releases(ci->i_ceph_lock) + __releases(mdsc->snap_rwsem) { struct ceph_inode_info *ci = ceph_inode(inode); int mds = session->s_mds; @@ -2654,10 +2653,6 @@ static void handle_cap_grant(struct ceph_mds_client *mdsc, spin_unlock(&ci->i_ceph_lock); if (le32_to_cpu(grant->op) == CEPH_CAP_OP_IMPORT) { - down_write(&mdsc->snap_rwsem); - ceph_update_snap_trace(mdsc, snaptrace, - snaptrace + snaptrace_len, false); - downgrade_write(&mdsc->snap_rwsem); kick_flushing_inode_caps(mdsc, session, inode); up_read(&mdsc->snap_rwsem); if (newcaps & ~issued) @@ -3067,6 +3062,7 @@ void ceph_handle_caps(struct ceph_mds_session *session, struct ceph_cap *cap; struct ceph_mds_caps *h; struct ceph_mds_cap_peer *peer = NULL; + struct ceph_snap_realm *realm; int mds = session->s_mds; int op, issued; u32 seq, mseq; @@ -3168,11 +3164,23 @@ void ceph_handle_caps(struct ceph_mds_session *session, goto done_unlocked; case CEPH_CAP_OP_IMPORT: + realm = NULL; + if (snaptrace_len) { + down_write(&mdsc->snap_rwsem); + ceph_update_snap_trace(mdsc, snaptrace, + snaptrace + snaptrace_len, + false, &realm); + downgrade_write(&mdsc->snap_rwsem); + } else { + down_read(&mdsc->snap_rwsem); + } handle_cap_import(mdsc, inode, h, peer, session, &cap, &issued); - handle_cap_grant(mdsc, inode, h, snaptrace, snaptrace_len, + handle_cap_grant(mdsc, inode, h, inline_version, inline_data, inline_len, msg->middle, session, cap, issued); + if (realm) + ceph_put_snap_realm(mdsc, realm); goto done_unlocked; } @@ -3192,7 +3200,7 @@ void ceph_handle_caps(struct ceph_mds_session *session, case CEPH_CAP_OP_GRANT: __ceph_caps_issued(ci, &issued); issued |= __ceph_caps_dirty(ci); - handle_cap_grant(mdsc, inode, h, NULL, 0, + handle_cap_grant(mdsc, inode, h, inline_version, inline_data, inline_len, msg->middle, session, cap, issued); goto done_unlocked; diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index c6c33b411a2f..85c67ae03e46 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -2286,6 +2286,7 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg) struct ceph_mds_request *req; struct ceph_mds_reply_head *head = msg->front.iov_base; struct ceph_mds_reply_info_parsed *rinfo; /* parsed reply info */ + struct ceph_snap_realm *realm; u64 tid; int err, result; int mds = session->s_mds; @@ -2401,11 +2402,13 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg) } /* snap trace */ + realm = NULL; if (rinfo->snapblob_len) { down_write(&mdsc->snap_rwsem); ceph_update_snap_trace(mdsc, rinfo->snapblob, - rinfo->snapblob + rinfo->snapblob_len, - le32_to_cpu(head->op) == CEPH_MDS_OP_RMSNAP); + rinfo->snapblob + rinfo->snapblob_len, + le32_to_cpu(head->op) == CEPH_MDS_OP_RMSNAP, + &realm); downgrade_write(&mdsc->snap_rwsem); } else { down_read(&mdsc->snap_rwsem); @@ -2423,6 +2426,8 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg) mutex_unlock(&req->r_fill_mutex); up_read(&mdsc->snap_rwsem); + if (realm) + ceph_put_snap_realm(mdsc, realm); out_err: mutex_lock(&mdsc->mutex); if (!req->r_aborted) { diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c index ce35fbd4ba5d..a97e39f09ba6 100644 --- a/fs/ceph/snap.c +++ b/fs/ceph/snap.c @@ -70,13 +70,11 @@ void ceph_get_snap_realm(struct ceph_mds_client *mdsc, * safe. we do need to protect against concurrent empty list * additions, however. */ - if (atomic_read(&realm->nref) == 0) { + if (atomic_inc_return(&realm->nref) == 1) { spin_lock(&mdsc->snap_empty_lock); list_del_init(&realm->empty_item); spin_unlock(&mdsc->snap_empty_lock); } - - atomic_inc(&realm->nref); } static void __insert_snap_realm(struct rb_root *root, @@ -116,7 +114,7 @@ static struct ceph_snap_realm *ceph_create_snap_realm( if (!realm) return ERR_PTR(-ENOMEM); - atomic_set(&realm->nref, 0); /* tree does not take a ref */ + atomic_set(&realm->nref, 1); /* for caller */ realm->ino = ino; INIT_LIST_HEAD(&realm->children); INIT_LIST_HEAD(&realm->child_item); @@ -134,8 +132,8 @@ static struct ceph_snap_realm *ceph_create_snap_realm( * * caller must hold snap_rwsem for write. */ -struct ceph_snap_realm *ceph_lookup_snap_realm(struct ceph_mds_client *mdsc, - u64 ino) +static struct ceph_snap_realm *__lookup_snap_realm(struct ceph_mds_client *mdsc, + u64 ino) { struct rb_node *n = mdsc->snap_realms.rb_node; struct ceph_snap_realm *r; @@ -154,6 +152,16 @@ struct ceph_snap_realm *ceph_lookup_snap_realm(struct ceph_mds_client *mdsc, return NULL; } +struct ceph_snap_realm *ceph_lookup_snap_realm(struct ceph_mds_client *mdsc, + u64 ino) +{ + struct ceph_snap_realm *r; + r = __lookup_snap_realm(mdsc, ino); + if (r) + ceph_get_snap_realm(mdsc, r); + return r; +} + static void __put_snap_realm(struct ceph_mds_client *mdsc, struct ceph_snap_realm *realm); @@ -273,7 +281,6 @@ static int adjust_snap_realm_parent(struct ceph_mds_client *mdsc, } realm->parent_ino = parentino; realm->parent = parent; - ceph_get_snap_realm(mdsc, parent); list_add(&realm->child_item, &parent->children); return 1; } @@ -631,12 +638,14 @@ static void queue_realm_cap_snaps(struct ceph_snap_realm *realm) * Caller must hold snap_rwsem for write. */ int ceph_update_snap_trace(struct ceph_mds_client *mdsc, - void *p, void *e, bool deletion) + void *p, void *e, bool deletion, + struct ceph_snap_realm **realm_ret) { struct ceph_mds_snap_realm *ri; /* encoded */ __le64 *snaps; /* encoded */ __le64 *prior_parent_snaps; /* encoded */ - struct ceph_snap_realm *realm; + struct ceph_snap_realm *realm = NULL; + struct ceph_snap_realm *first_realm = NULL; int invalidate = 0; int err = -ENOMEM; LIST_HEAD(dirty_realms); @@ -704,13 +713,18 @@ more: dout("done with %llx %p, invalidated=%d, %p %p\n", realm->ino, realm, invalidate, p, e); - if (p < e) - goto more; - /* invalidate when we reach the _end_ (root) of the trace */ - if (invalidate) + if (invalidate && p >= e) rebuild_snap_realms(realm); + if (!first_realm) + first_realm = realm; + else + ceph_put_snap_realm(mdsc, realm); + + if (p < e) + goto more; + /* * queue cap snaps _after_ we've built the new snap contexts, * so that i_head_snapc can be set appropriately. @@ -721,12 +735,21 @@ more: queue_realm_cap_snaps(realm); } + if (realm_ret) + *realm_ret = first_realm; + else + ceph_put_snap_realm(mdsc, first_realm); + __cleanup_empty_realms(mdsc); return 0; bad: err = -EINVAL; fail: + if (realm && !IS_ERR(realm)) + ceph_put_snap_realm(mdsc, realm); + if (first_realm) + ceph_put_snap_realm(mdsc, first_realm); pr_err("update_snap_trace error %d\n", err); return err; } @@ -844,7 +867,6 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc, if (IS_ERR(realm)) goto out; } - ceph_get_snap_realm(mdsc, realm); dout("splitting snap_realm %llx %p\n", realm->ino, realm); for (i = 0; i < num_split_inos; i++) { @@ -905,7 +927,7 @@ skip_inode: /* we may have taken some of the old realm's children. */ for (i = 0; i < num_split_realms; i++) { struct ceph_snap_realm *child = - ceph_lookup_snap_realm(mdsc, + __lookup_snap_realm(mdsc, le64_to_cpu(split_realms[i])); if (!child) continue; @@ -918,7 +940,7 @@ skip_inode: * snap, we can avoid queueing cap_snaps. */ ceph_update_snap_trace(mdsc, p, e, - op == CEPH_SNAP_OP_DESTROY); + op == CEPH_SNAP_OP_DESTROY, NULL); if (op == CEPH_SNAP_OP_SPLIT) /* we took a reference when we created the realm, above */ diff --git a/fs/ceph/super.h b/fs/ceph/super.h index e1aa32d0759d..72bc05a73b69 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h @@ -693,7 +693,8 @@ extern void ceph_get_snap_realm(struct ceph_mds_client *mdsc, extern void ceph_put_snap_realm(struct ceph_mds_client *mdsc, struct ceph_snap_realm *realm); extern int ceph_update_snap_trace(struct ceph_mds_client *m, - void *p, void *e, bool deletion); + void *p, void *e, bool deletion, + struct ceph_snap_realm **realm_ret); extern void ceph_handle_snap(struct ceph_mds_client *mdsc, struct ceph_mds_session *session, struct ceph_msg *msg); -- cgit v1.2.3 From d3383a8e37f802818cde4cb489bb0735db637cf0 Mon Sep 17 00:00:00 2001 From: "Yan, Zheng" Date: Thu, 8 Jan 2015 21:30:12 +0800 Subject: ceph: avoid block operation when !TASK_RUNNING (ceph_mdsc_sync) check_cap_flush() calls mutex_lock(), which may block. So we can't use it as condition check function for wait_event(); Signed-off-by: Yan, Zheng --- fs/ceph/caps.c | 2 +- fs/ceph/mds_client.c | 51 ++++++++++++++++++++++++++++++++++----------------- 2 files changed, 35 insertions(+), 18 deletions(-) (limited to 'fs') diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index 8ed1192606d9..844b57cb52bd 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -1450,8 +1450,8 @@ static int __mark_caps_flushing(struct inode *inode, spin_lock(&mdsc->cap_dirty_lock); list_del_init(&ci->i_dirty_item); - ci->i_cap_flush_seq = ++mdsc->cap_flush_seq; if (list_empty(&ci->i_flushing_item)) { + ci->i_cap_flush_seq = ++mdsc->cap_flush_seq; list_add_tail(&ci->i_flushing_item, &session->s_cap_flushing); mdsc->num_cap_flushing++; dout(" inode %p now flushing seq %lld\n", inode, diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index 85c67ae03e46..fdf5cc8737ee 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -1464,19 +1464,33 @@ out_unlocked: return err; } +static int check_cap_flush(struct inode *inode, u64 want_flush_seq) +{ + struct ceph_inode_info *ci = ceph_inode(inode); + int ret; + spin_lock(&ci->i_ceph_lock); + if (ci->i_flushing_caps) + ret = ci->i_cap_flush_seq >= want_flush_seq; + else + ret = 1; + spin_unlock(&ci->i_ceph_lock); + return ret; +} + /* * flush all dirty inode data to disk. * * returns true if we've flushed through want_flush_seq */ -static int check_cap_flush(struct ceph_mds_client *mdsc, u64 want_flush_seq) +static void wait_caps_flush(struct ceph_mds_client *mdsc, u64 want_flush_seq) { - int mds, ret = 1; + int mds; dout("check_cap_flush want %lld\n", want_flush_seq); mutex_lock(&mdsc->mutex); - for (mds = 0; ret && mds < mdsc->max_sessions; mds++) { + for (mds = 0; mds < mdsc->max_sessions; mds++) { struct ceph_mds_session *session = mdsc->sessions[mds]; + struct inode *inode = NULL; if (!session) continue; @@ -1489,29 +1503,29 @@ static int check_cap_flush(struct ceph_mds_client *mdsc, u64 want_flush_seq) list_entry(session->s_cap_flushing.next, struct ceph_inode_info, i_flushing_item); - struct inode *inode = &ci->vfs_inode; - spin_lock(&ci->i_ceph_lock); - if (ci->i_cap_flush_seq <= want_flush_seq) { + if (!check_cap_flush(&ci->vfs_inode, want_flush_seq)) { dout("check_cap_flush still flushing %p " - "seq %lld <= %lld to mds%d\n", inode, - ci->i_cap_flush_seq, want_flush_seq, - session->s_mds); - ret = 0; + "seq %lld <= %lld to mds%d\n", + &ci->vfs_inode, ci->i_cap_flush_seq, + want_flush_seq, session->s_mds); + inode = igrab(&ci->vfs_inode); } - spin_unlock(&ci->i_ceph_lock); } mutex_unlock(&session->s_mutex); ceph_put_mds_session(session); - if (!ret) - return ret; + if (inode) { + wait_event(mdsc->cap_flushing_wq, + check_cap_flush(inode, want_flush_seq)); + iput(inode); + } + mutex_lock(&mdsc->mutex); } mutex_unlock(&mdsc->mutex); dout("check_cap_flush ok, flushed thru %lld\n", want_flush_seq); - return ret; } /* @@ -3447,14 +3461,17 @@ void ceph_mdsc_sync(struct ceph_mds_client *mdsc) dout("sync\n"); mutex_lock(&mdsc->mutex); want_tid = mdsc->last_tid; - want_flush = mdsc->cap_flush_seq; mutex_unlock(&mdsc->mutex); - dout("sync want tid %lld flush_seq %lld\n", want_tid, want_flush); ceph_flush_dirty_caps(mdsc); + spin_lock(&mdsc->cap_dirty_lock); + want_flush = mdsc->cap_flush_seq; + spin_unlock(&mdsc->cap_dirty_lock); + + dout("sync want tid %lld flush_seq %lld\n", want_tid, want_flush); wait_unsafe_requests(mdsc, want_tid); - wait_event(mdsc->cap_flushing_wq, check_cap_flush(mdsc, want_flush)); + wait_caps_flush(mdsc, want_flush); } /* -- cgit v1.2.3 From c4d4a582c538e890f09c338bc3063c28dfdc9ae5 Mon Sep 17 00:00:00 2001 From: "Yan, Zheng" Date: Fri, 9 Jan 2015 15:56:18 +0800 Subject: ceph: avoid block operation when !TASK_RUNNING (ceph_get_caps) we should not do block operation in wait_event_interruptible()'s condition check function, but reading inline data can block. so move the read inline data code to ceph_get_caps() Signed-off-by: Yan, Zheng --- fs/ceph/caps.c | 86 ++++++++++++++++++++++++++++------------------------------ 1 file changed, 42 insertions(+), 44 deletions(-) (limited to 'fs') diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index 844b57cb52bd..8172775428a0 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -2072,17 +2072,16 @@ static void __take_cap_refs(struct ceph_inode_info *ci, int got) * requested from the MDS. */ static int try_get_cap_refs(struct ceph_inode_info *ci, int need, int want, - loff_t endoff, int *got, struct page **pinned_page, - int *check_max, int *err) + loff_t endoff, int *got, int *check_max, int *err) { struct inode *inode = &ci->vfs_inode; int ret = 0; - int have, implemented, _got = 0; + int have, implemented; int file_wanted; dout("get_cap_refs %p need %s want %s\n", inode, ceph_cap_string(need), ceph_cap_string(want)); -again: + spin_lock(&ci->i_ceph_lock); /* make sure file is actually open */ @@ -2137,8 +2136,8 @@ again: inode, ceph_cap_string(have), ceph_cap_string(not), ceph_cap_string(revoking)); if ((revoking & not) == 0) { - _got = need | (have & want); - __take_cap_refs(ci, _got); + *got = need | (have & want); + __take_cap_refs(ci, *got); ret = 1; } } else { @@ -2163,39 +2162,8 @@ again: out_unlock: spin_unlock(&ci->i_ceph_lock); - if (ci->i_inline_version != CEPH_INLINE_NONE && - (_got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) && - i_size_read(inode) > 0) { - int ret1; - struct page *page = find_get_page(inode->i_mapping, 0); - if (page) { - if (PageUptodate(page)) { - *pinned_page = page; - goto out; - } - page_cache_release(page); - } - /* - * drop cap refs first because getattr while holding - * caps refs can cause deadlock. - */ - ceph_put_cap_refs(ci, _got); - _got = 0; - - /* getattr request will bring inline data into page cache */ - ret1 = __ceph_do_getattr(inode, NULL, - CEPH_STAT_CAP_INLINE_DATA, true); - if (ret1 >= 0) { - ret = 0; - goto again; - } - *err = ret1; - ret = 1; - } -out: dout("get_cap_refs %p ret %d got %s\n", inode, - ret, ceph_cap_string(_got)); - *got = _got; + ret, ceph_cap_string(*got)); return ret; } @@ -2235,22 +2203,52 @@ static void check_max_size(struct inode *inode, loff_t endoff) int ceph_get_caps(struct ceph_inode_info *ci, int need, int want, loff_t endoff, int *got, struct page **pinned_page) { - int check_max, ret, err; + int _got, check_max, ret, err = 0; retry: if (endoff > 0) check_max_size(&ci->vfs_inode, endoff); + _got = 0; check_max = 0; - err = 0; ret = wait_event_interruptible(ci->i_cap_wq, - try_get_cap_refs(ci, need, want, endoff, - got, pinned_page, - &check_max, &err)); + try_get_cap_refs(ci, need, want, endoff, + &_got, &check_max, &err)); if (err) ret = err; + if (ret < 0) + return ret; + if (check_max) goto retry; - return ret; + + if (ci->i_inline_version != CEPH_INLINE_NONE && + (_got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) && + i_size_read(&ci->vfs_inode) > 0) { + struct page *page = find_get_page(ci->vfs_inode.i_mapping, 0); + if (page) { + if (PageUptodate(page)) { + *pinned_page = page; + goto out; + } + page_cache_release(page); + } + /* + * drop cap refs first because getattr while holding + * caps refs can cause deadlock. + */ + ceph_put_cap_refs(ci, _got); + _got = 0; + + /* getattr request will bring inline data into page cache */ + ret = __ceph_do_getattr(&ci->vfs_inode, NULL, + CEPH_STAT_CAP_INLINE_DATA, true); + if (ret < 0) + return ret; + goto retry; + } +out: + *got = _got; + return 0; } /* -- cgit v1.2.3 From 86d8f67b26a8b30228b5177b7e594bbc89798a23 Mon Sep 17 00:00:00 2001 From: "Yan, Zheng" Date: Fri, 9 Jan 2015 17:00:42 +0800 Subject: ceph: avoid block operation when !TASK_RUNNING (ceph_mdsc_close_sessions) use an atomic variable to track number of sessions, this can avoid block operation inside wait loops. Signed-off-by: Yan, Zheng --- fs/ceph/mds_client.c | 13 ++++--------- fs/ceph/mds_client.h | 1 + 2 files changed, 5 insertions(+), 9 deletions(-) (limited to 'fs') diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index fdf5cc8737ee..c90ca99331be 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -480,6 +480,7 @@ static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc, mdsc->max_sessions = newmax; } mdsc->sessions[mds] = s; + atomic_inc(&mdsc->num_sessions); atomic_inc(&s->s_ref); /* one ref to sessions[], one to caller */ ceph_con_open(&s->s_con, CEPH_ENTITY_TYPE_MDS, mds, @@ -503,6 +504,7 @@ static void __unregister_session(struct ceph_mds_client *mdsc, mdsc->sessions[s->s_mds] = NULL; ceph_con_close(&s->s_con); ceph_put_mds_session(s); + atomic_dec(&mdsc->num_sessions); } /* @@ -3328,6 +3330,7 @@ int ceph_mdsc_init(struct ceph_fs_client *fsc) init_waitqueue_head(&mdsc->session_close_wq); INIT_LIST_HEAD(&mdsc->waiting_for_map); mdsc->sessions = NULL; + atomic_set(&mdsc->num_sessions, 0); mdsc->max_sessions = 0; mdsc->stopping = 0; init_rwsem(&mdsc->snap_rwsem); @@ -3479,17 +3482,9 @@ void ceph_mdsc_sync(struct ceph_mds_client *mdsc) */ static bool done_closing_sessions(struct ceph_mds_client *mdsc) { - int i, n = 0; - if (mdsc->fsc->mount_state == CEPH_MOUNT_SHUTDOWN) return true; - - mutex_lock(&mdsc->mutex); - for (i = 0; i < mdsc->max_sessions; i++) - if (mdsc->sessions[i]) - n++; - mutex_unlock(&mdsc->mutex); - return n == 0; + return atomic_read(&mdsc->num_sessions) == 0; } /* diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h index a87b92f500bb..1875b5d985c6 100644 --- a/fs/ceph/mds_client.h +++ b/fs/ceph/mds_client.h @@ -273,6 +273,7 @@ struct ceph_mds_client { struct list_head waiting_for_map; struct ceph_mds_session **sessions; /* NULL for mds if no session */ + atomic_t num_sessions; int max_sessions; /* len of s_mds_sessions */ int stopping; /* true if shutting down */ -- cgit v1.2.3 From fcc02d2a03fc629b82d1ca1006fbd06570385264 Mon Sep 17 00:00:00 2001 From: "Yan, Zheng" Date: Sat, 10 Jan 2015 11:43:12 +0800 Subject: ceph: fix reading inline data when i_size > PAGE_SIZE when inode has inline data but its size > PAGE_SIZE (it was truncated to larger size), previous direct read code return -EIO. This patch adds code to return zeros for data whose offset > PAGE_SIZE. Signed-off-by: Yan, Zheng --- fs/ceph/addr.c | 19 ++++++++++++------- fs/ceph/file.c | 22 ++++++++++++++-------- 2 files changed, 26 insertions(+), 15 deletions(-) (limited to 'fs') diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index c81c0e004588..7d05e37874d4 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c @@ -196,17 +196,22 @@ static int readpage_nounlock(struct file *filp, struct page *page) u64 len = PAGE_CACHE_SIZE; if (off >= i_size_read(inode)) { - zero_user_segment(page, err, PAGE_CACHE_SIZE); + zero_user_segment(page, 0, PAGE_CACHE_SIZE); SetPageUptodate(page); return 0; } - /* - * Uptodate inline data should have been added into page cache - * while getting Fcr caps. - */ - if (ci->i_inline_version != CEPH_INLINE_NONE) - return -EINVAL; + if (ci->i_inline_version != CEPH_INLINE_NONE) { + /* + * Uptodate inline data should have been added + * into page cache while getting Fcr caps. + */ + if (off == 0) + return -EINVAL; + zero_user_segment(page, 0, PAGE_CACHE_SIZE); + SetPageUptodate(page); + return 0; + } err = ceph_readpage_from_fscache(inode, page); if (err == 0) diff --git a/fs/ceph/file.c b/fs/ceph/file.c index 663da44c06b6..c407abb52b7b 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -879,28 +879,34 @@ again: i_size = i_size_read(inode); if (retry_op == READ_INLINE) { - /* does not support inline data > PAGE_SIZE */ - if (i_size > PAGE_CACHE_SIZE) { - ret = -EIO; - } else if (iocb->ki_pos < i_size) { + BUG_ON(ret > 0 || read > 0); + if (iocb->ki_pos < i_size && + iocb->ki_pos < PAGE_CACHE_SIZE) { loff_t end = min_t(loff_t, i_size, iocb->ki_pos + len); + end = min_t(loff_t, end, PAGE_CACHE_SIZE); if (statret < end) zero_user_segment(page, statret, end); ret = copy_page_to_iter(page, iocb->ki_pos & ~PAGE_MASK, end - iocb->ki_pos, to); iocb->ki_pos += ret; - } else { - ret = 0; + read += ret; + } + if (iocb->ki_pos < i_size && read < len) { + size_t zlen = min_t(size_t, len - read, + i_size - iocb->ki_pos); + ret = iov_iter_zero(zlen, to); + iocb->ki_pos += ret; + read += ret; } __free_pages(page, 0); - return ret; + return read; } /* hit EOF or hole? */ if (retry_op == CHECK_EOF && iocb->ki_pos < i_size && - ret < len) { + ret < len) { dout("sync_read hit hole, ppos %lld < size %lld" ", reading more\n", iocb->ki_pos, inode->i_size); -- cgit v1.2.3 From 1f041a89b4f22cf2e701514f4b8f73a8b1e06a3e Mon Sep 17 00:00:00 2001 From: "Yan, Zheng" Date: Tue, 13 Jan 2015 15:20:52 +0800 Subject: ceph: fix request time stamp encoding struct timespec uses 'long' to present second and nanosecond. 'long' is 64 bits on 64bits machine. ceph MDS expects time stamp to be encoded as struct ceph_timespec, which uses 'u32' to present second and nanosecond. Signed-off-by: Yan, Zheng --- fs/ceph/mds_client.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index c90ca99331be..03720fe3f531 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -1939,7 +1939,11 @@ static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc, head->num_releases = cpu_to_le16(releases); /* time stamp */ - ceph_encode_copy(&p, &req->r_stamp, sizeof(req->r_stamp)); + { + struct ceph_timespec ts; + ceph_encode_timespec(&ts, &req->r_stamp); + ceph_encode_copy(&p, &ts, sizeof(ts)); + } BUG_ON(p > end); msg->front.iov_len = p - msg->front.iov_base; @@ -2028,7 +2032,11 @@ static int __prepare_send_request(struct ceph_mds_client *mdsc, /* time stamp */ p = msg->front.iov_base + req->r_request_release_offset; - ceph_encode_copy(&p, &req->r_stamp, sizeof(req->r_stamp)); + { + struct ceph_timespec ts; + ceph_encode_timespec(&ts, &req->r_stamp); + ceph_encode_copy(&p, &ts, sizeof(ts)); + } msg->front.iov_len = p - msg->front.iov_base; msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); -- cgit v1.2.3 From 38c48b5f0a7fd5ed9fdab6da4d208aa23cc5391a Mon Sep 17 00:00:00 2001 From: "Yan, Zheng" Date: Wed, 14 Jan 2015 13:46:04 +0800 Subject: ceph: provide seperate {inode,file}_operations for snapdir remove all unsupported operations from {inode,file}_operations. Signed-off-by: Yan, Zheng --- fs/ceph/dir.c | 17 +++++++++++++++-- fs/ceph/inode.c | 4 ++-- fs/ceph/super.h | 2 ++ 3 files changed, 19 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c index c241603764fd..709f3b98ca5c 100644 --- a/fs/ceph/dir.c +++ b/fs/ceph/dir.c @@ -26,8 +26,6 @@ * point by name. */ -const struct inode_operations ceph_dir_iops; -const struct file_operations ceph_dir_fops; const struct dentry_operations ceph_dentry_ops; /* @@ -1335,6 +1333,13 @@ const struct file_operations ceph_dir_fops = { .fsync = ceph_dir_fsync, }; +const struct file_operations ceph_snapdir_fops = { + .iterate = ceph_readdir, + .llseek = ceph_dir_llseek, + .open = ceph_open, + .release = ceph_release, +}; + const struct inode_operations ceph_dir_iops = { .lookup = ceph_lookup, .permission = ceph_permission, @@ -1357,6 +1362,14 @@ const struct inode_operations ceph_dir_iops = { .atomic_open = ceph_atomic_open, }; +const struct inode_operations ceph_snapdir_iops = { + .lookup = ceph_lookup, + .permission = ceph_permission, + .getattr = ceph_getattr, + .mkdir = ceph_mkdir, + .rmdir = ceph_unlink, +}; + const struct dentry_operations ceph_dentry_ops = { .d_revalidate = ceph_d_revalidate, .d_release = ceph_d_release, diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index f61a74115beb..d0fe2f437fbb 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c @@ -82,8 +82,8 @@ struct inode *ceph_get_snapdir(struct inode *parent) inode->i_mode = parent->i_mode; inode->i_uid = parent->i_uid; inode->i_gid = parent->i_gid; - inode->i_op = &ceph_dir_iops; - inode->i_fop = &ceph_dir_fops; + inode->i_op = &ceph_snapdir_iops; + inode->i_fop = &ceph_snapdir_fops; ci->i_snap_caps = CEPH_CAP_PIN; /* so we can open */ ci->i_rbytes = 0; return inode; diff --git a/fs/ceph/super.h b/fs/ceph/super.h index 72bc05a73b69..04c8124ed30e 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h @@ -893,7 +893,9 @@ extern void ceph_fill_inline_data(struct inode *inode, struct page *locked_page, int ceph_uninline_data(struct file *filp, struct page *locked_page); /* dir.c */ extern const struct file_operations ceph_dir_fops; +extern const struct file_operations ceph_snapdir_fops; extern const struct inode_operations ceph_dir_iops; +extern const struct inode_operations ceph_snapdir_iops; extern const struct dentry_operations ceph_dentry_ops, ceph_snap_dentry_ops, ceph_snapdir_dentry_ops; -- cgit v1.2.3 From a6a5ce4f0df9146ba8cb61121b80aa191fbb1f04 Mon Sep 17 00:00:00 2001 From: "Yan, Zheng" Date: Fri, 16 Jan 2015 10:54:43 +0800 Subject: client: include kernel version in client metadata Signed-off-by: Yan, Zheng --- fs/ceph/mds_client.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index 03720fe3f531..03482c0974b6 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -844,8 +844,9 @@ static struct ceph_msg *create_session_open_msg(struct ceph_mds_client *mdsc, u6 struct ceph_options *opt = mdsc->fsc->client->options; void *p; - const char* metadata[3][2] = { + const char* metadata[][2] = { {"hostname", utsname()->nodename}, + {"kernel_version", utsname()->release}, {"entity_id", opt->name ? opt->name : ""}, {NULL, NULL} }; -- cgit v1.2.3 From 2f92b3d0a9a583a5a4dd786a84fc42e6f1aa40fa Mon Sep 17 00:00:00 2001 From: "Yan, Zheng" Date: Mon, 19 Jan 2015 13:12:24 +0800 Subject: ceph: properly mark empty directory as complete ceph_add_cap() calls __check_cap_issue(), which clears directory inode' complete flag. so we should set the complete flag for empty directory should be set after calling ceph_add_cap(). Signed-off-by: Yan, Zheng --- fs/ceph/inode.c | 29 +++++++++++++++-------------- 1 file changed, 15 insertions(+), 14 deletions(-) (limited to 'fs') diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index d0fe2f437fbb..f88a0f059dc8 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c @@ -840,30 +840,31 @@ static int fill_inode(struct inode *inode, struct page *locked_page, ceph_vinop(inode), inode->i_mode); } - /* set dir completion flag? */ - if (S_ISDIR(inode->i_mode) && - ci->i_files == 0 && ci->i_subdirs == 0 && - ceph_snap(inode) == CEPH_NOSNAP && - (le32_to_cpu(info->cap.caps) & CEPH_CAP_FILE_SHARED) && - (issued & CEPH_CAP_FILE_EXCL) == 0 && - !__ceph_dir_is_complete(ci)) { - dout(" marking %p complete (empty)\n", inode); - __ceph_dir_set_complete(ci, atomic_read(&ci->i_release_count), - ci->i_ordered_count); - } - /* were we issued a capability? */ if (info->cap.caps) { if (ceph_snap(inode) == CEPH_NOSNAP) { + unsigned caps = le32_to_cpu(info->cap.caps); ceph_add_cap(inode, session, le64_to_cpu(info->cap.cap_id), - cap_fmode, - le32_to_cpu(info->cap.caps), + cap_fmode, caps, le32_to_cpu(info->cap.wanted), le32_to_cpu(info->cap.seq), le32_to_cpu(info->cap.mseq), le64_to_cpu(info->cap.realm), info->cap.flags, &new_cap); + + /* set dir completion flag? */ + if (S_ISDIR(inode->i_mode) && + ci->i_files == 0 && ci->i_subdirs == 0 && + (caps & CEPH_CAP_FILE_SHARED) && + (issued & CEPH_CAP_FILE_EXCL) == 0 && + !__ceph_dir_is_complete(ci)) { + dout(" marking %p complete (empty)\n", inode); + __ceph_dir_set_complete(ci, + atomic_read(&ci->i_release_count), + ci->i_ordered_count); + } + wake = true; } else { dout(" %p got snap_caps %s\n", inode, -- cgit v1.2.3 From bf91c3150880ed6304f578cf00bd408d642fe6a0 Mon Sep 17 00:00:00 2001 From: "Yan, Zheng" Date: Mon, 19 Jan 2015 13:23:20 +0800 Subject: ceph: fix atomic_open snapdir ceph_handle_snapdir() checks ceph_mdsc_do_request()'s return value and creates snapdir inode if it's -ENOENT Signed-off-by: Yan, Zheng --- fs/ceph/file.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/ceph/file.c b/fs/ceph/file.c index c407abb52b7b..848969ee24db 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -275,10 +275,10 @@ int ceph_atomic_open(struct inode *dir, struct dentry *dentry, err = ceph_mdsc_do_request(mdsc, (flags & (O_CREAT|O_TRUNC)) ? dir : NULL, req); + err = ceph_handle_snapdir(req, dentry, err); if (err) goto out_req; - err = ceph_handle_snapdir(req, dentry, err); if (err == 0 && (flags & O_CREAT) && !req->r_reply_info.head->is_dentry) err = ceph_handle_notrace_create(dir, dentry); -- cgit v1.2.3 From 2a0b61cefcd52ad63ff03aacae6d4113cdf46812 Mon Sep 17 00:00:00 2001 From: Ilya Dryomov Date: Mon, 2 Feb 2015 17:54:17 +0300 Subject: ceph: show nocephx_require_signatures and notcp_nodelay options Signed-off-by: Ilya Dryomov --- fs/ceph/super.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'fs') diff --git a/fs/ceph/super.c b/fs/ceph/super.c index 50f06cddc94b..8f8983f38b82 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c @@ -425,6 +425,10 @@ static int ceph_show_options(struct seq_file *m, struct dentry *root) seq_puts(m, ",noshare"); if (opt->flags & CEPH_OPT_NOCRC) seq_puts(m, ",nocrc"); + if (opt->flags & CEPH_OPT_NOMSGAUTH) + seq_puts(m, ",nocephx_require_signatures"); + if ((opt->flags & CEPH_OPT_TCP_NODELAY) == 0) + seq_puts(m, ",notcp_nodelay"); if (opt->name) seq_printf(m, ",name=%s", opt->name); -- cgit v1.2.3 From 3de22be6771353241eaec237fe594dfea3daf30f Mon Sep 17 00:00:00 2001 From: "Yan, Zheng" Date: Wed, 4 Feb 2015 14:26:22 +0800 Subject: ceph: re-send requests when MDS enters reconnecting stage So that MDS can check if any request is already completed and process completed requests in clientreplay stage. When completed requests are processed in clientreplay stage, MDS can avoid sending traceless replies. Signed-off-by: Yan, Zheng --- fs/ceph/mds_client.c | 29 ++++++++++++++++++++++++++--- 1 file changed, 26 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index 03482c0974b6..4c1e36a171af 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -2184,6 +2184,8 @@ static void kick_requests(struct ceph_mds_client *mdsc, int mds) p = rb_next(p); if (req->r_got_unsafe) continue; + if (req->r_attempts > 0) + continue; /* only new requests */ if (req->r_session && req->r_session->s_mds == mds) { dout(" kicking tid %llu\n", req->r_tid); @@ -2517,6 +2519,7 @@ static void handle_forward(struct ceph_mds_client *mdsc, dout("forward tid %llu to mds%d (we resend)\n", tid, next_mds); BUG_ON(req->r_err); BUG_ON(req->r_got_result); + req->r_attempts = 0; req->r_num_fwd = fwd_seq; req->r_resend_mds = next_mds; put_request_session(req); @@ -2648,6 +2651,7 @@ static void replay_unsafe_requests(struct ceph_mds_client *mdsc, struct ceph_mds_session *session) { struct ceph_mds_request *req, *nreq; + struct rb_node *p; int err; dout("replay_unsafe_requests mds%d\n", session->s_mds); @@ -2660,6 +2664,28 @@ static void replay_unsafe_requests(struct ceph_mds_client *mdsc, ceph_con_send(&session->s_con, req->r_request); } } + + /* + * also re-send old requests when MDS enters reconnect stage. So that MDS + * can process completed request in clientreplay stage. + */ + p = rb_first(&mdsc->request_tree); + while (p) { + req = rb_entry(p, struct ceph_mds_request, r_node); + p = rb_next(p); + if (req->r_got_unsafe) + continue; + if (req->r_attempts == 0) + continue; /* only old requests */ + if (req->r_session && + req->r_session->s_mds == session->s_mds) { + err = __prepare_send_request(mdsc, req, session->s_mds); + if (!err) { + ceph_msg_get(req->r_request); + ceph_con_send(&session->s_con, req->r_request); + } + } + } mutex_unlock(&mdsc->mutex); } @@ -2977,9 +3003,6 @@ static void check_new_map(struct ceph_mds_client *mdsc, mutex_unlock(&s->s_mutex); s->s_state = CEPH_MDS_SESSION_RESTARTING; } - - /* kick any requests waiting on the recovering mds */ - kick_requests(mdsc, i); } else if (oldstate == newstate) { continue; /* nothing new with this mds */ } -- cgit v1.2.3 From 5cba372c0fe78d24e83d9e0556ecbeb219625c15 Mon Sep 17 00:00:00 2001 From: "Yan, Zheng" Date: Mon, 2 Feb 2015 11:27:56 +0800 Subject: ceph: fix dentry leaks Signed-off-by: Yan, Zheng --- fs/ceph/dir.c | 1 + fs/ceph/inode.c | 8 +++++--- 2 files changed, 6 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c index 709f3b98ca5c..77eeb768f95a 100644 --- a/fs/ceph/dir.c +++ b/fs/ceph/dir.c @@ -676,6 +676,7 @@ int ceph_handle_notrace_create(struct inode *dir, struct dentry *dentry) */ BUG_ON(!result->d_inode); d_instantiate(dentry, result->d_inode); + d_drop(result); return 0; } return PTR_ERR(result); diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index f88a0f059dc8..be3af18e4cf1 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c @@ -1449,12 +1449,14 @@ retry_lookup: } if (!dn->d_inode) { - dn = splice_dentry(dn, in, NULL); - if (IS_ERR(dn)) { - err = PTR_ERR(dn); + struct dentry *realdn = splice_dentry(dn, in, NULL); + if (IS_ERR(realdn)) { + err = PTR_ERR(realdn); + d_drop(dn); dn = NULL; goto next_item; } + dn = realdn; } di = dn->d_fsdata; -- cgit v1.2.3 From 4d41cef279f72f3965140fffa6b48f2a7d51408c Mon Sep 17 00:00:00 2001 From: "Yan, Zheng" Date: Wed, 4 Feb 2015 15:10:48 +0800 Subject: ceph: return error for traceless reply race When we receives traceless reply for request that created new inode, we re-send a lookup request to MDS get information of the newly created inode. (VFS expects FS' callback return an inode in create case) This breaks one request into two requests. Other client may modify or move to the new inode in the middle. When the race happens, ceph_handle_notrace_create() unconditionally links the dentry for 'create' operation to the inode returned by lookup. This may confuse VFS when the inode is a directory (VFS does not allow multiple linkages for directory inode). This patch makes ceph_handle_notrace_create() when it detect a race. This event should be rare and it happens only when we talk to old MDS. Recent MDS does not send traceless reply for request that creates new inode. Signed-off-by: Yan, Zheng --- fs/ceph/dir.c | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c index 77eeb768f95a..0411dbb15815 100644 --- a/fs/ceph/dir.c +++ b/fs/ceph/dir.c @@ -670,14 +670,17 @@ int ceph_handle_notrace_create(struct inode *dir, struct dentry *dentry) /* * We created the item, then did a lookup, and found * it was already linked to another inode we already - * had in our cache (and thus got spliced). Link our - * dentry to that inode, but don't hash it, just in - * case the VFS wants to dereference it. + * had in our cache (and thus got spliced). To not + * confuse VFS (especially when inode is a directory), + * we don't link our dentry to that inode, return an + * error instead. + * + * This event should be rare and it happens only when + * we talk to old MDS. Recent MDS does not send traceless + * reply for request that creates new inode. */ - BUG_ON(!result->d_inode); - d_instantiate(dentry, result->d_inode); d_drop(result); - return 0; + return -ESTALE; } return PTR_ERR(result); } -- cgit v1.2.3 From 4e7c22d447bb6d7e37bfe39ff658486ae78e8d77 Mon Sep 17 00:00:00 2001 From: Hector Marco-Gisbert Date: Sat, 14 Feb 2015 09:33:50 -0800 Subject: x86, mm/ASLR: Fix stack randomization on 64-bit systems The issue is that the stack for processes is not properly randomized on 64 bit architectures due to an integer overflow. The affected function is randomize_stack_top() in file "fs/binfmt_elf.c": static unsigned long randomize_stack_top(unsigned long stack_top) { unsigned int random_variable = 0; if ((current->flags & PF_RANDOMIZE) && !(current->personality & ADDR_NO_RANDOMIZE)) { random_variable = get_random_int() & STACK_RND_MASK; random_variable <<= PAGE_SHIFT; } return PAGE_ALIGN(stack_top) + random_variable; return PAGE_ALIGN(stack_top) - random_variable; } Note that, it declares the "random_variable" variable as "unsigned int". Since the result of the shifting operation between STACK_RND_MASK (which is 0x3fffff on x86_64, 22 bits) and PAGE_SHIFT (which is 12 on x86_64): random_variable <<= PAGE_SHIFT; then the two leftmost bits are dropped when storing the result in the "random_variable". This variable shall be at least 34 bits long to hold the (22+12) result. These two dropped bits have an impact on the entropy of process stack. Concretely, the total stack entropy is reduced by four: from 2^28 to 2^30 (One fourth of expected entropy). This patch restores back the entropy by correcting the types involved in the operations in the functions randomize_stack_top() and stack_maxrandom_size(). The successful fix can be tested with: $ for i in `seq 1 10`; do cat /proc/self/maps | grep stack; done 7ffeda566000-7ffeda587000 rw-p 00000000 00:00 0 [stack] 7fff5a332000-7fff5a353000 rw-p 00000000 00:00 0 [stack] 7ffcdb7a1000-7ffcdb7c2000 rw-p 00000000 00:00 0 [stack] 7ffd5e2c4000-7ffd5e2e5000 rw-p 00000000 00:00 0 [stack] ... Once corrected, the leading bytes should be between 7ffc and 7fff, rather than always being 7fff. Signed-off-by: Hector Marco-Gisbert Signed-off-by: Ismael Ripoll [ Rebased, fixed 80 char bugs, cleaned up commit message, added test example and CVE ] Signed-off-by: Kees Cook Cc: Cc: Linus Torvalds Cc: Andrew Morton Cc: Al Viro Fixes: CVE-2015-1593 Link: http://lkml.kernel.org/r/20150214173350.GA18393@www.outflux.net Signed-off-by: Borislav Petkov --- arch/x86/mm/mmap.c | 6 +++--- fs/binfmt_elf.c | 5 +++-- 2 files changed, 6 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c index 919b91205cd4..df4552bd239e 100644 --- a/arch/x86/mm/mmap.c +++ b/arch/x86/mm/mmap.c @@ -35,12 +35,12 @@ struct va_alignment __read_mostly va_align = { .flags = -1, }; -static unsigned int stack_maxrandom_size(void) +static unsigned long stack_maxrandom_size(void) { - unsigned int max = 0; + unsigned long max = 0; if ((current->flags & PF_RANDOMIZE) && !(current->personality & ADDR_NO_RANDOMIZE)) { - max = ((-1U) & STACK_RND_MASK) << PAGE_SHIFT; + max = ((-1UL) & STACK_RND_MASK) << PAGE_SHIFT; } return max; diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 02b16910f4c9..995986b8e36b 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -645,11 +645,12 @@ out: static unsigned long randomize_stack_top(unsigned long stack_top) { - unsigned int random_variable = 0; + unsigned long random_variable = 0; if ((current->flags & PF_RANDOMIZE) && !(current->personality & ADDR_NO_RANDOMIZE)) { - random_variable = get_random_int() & STACK_RND_MASK; + random_variable = (unsigned long) get_random_int(); + random_variable &= STACK_RND_MASK; random_variable <<= PAGE_SHIFT; } #ifdef CONFIG_STACK_GROWSUP -- cgit v1.2.3 From db671a8ecd764baf76a698b8366603a147880734 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Wed, 4 Feb 2015 16:02:09 -0500 Subject: don't bother with most of the bad_file_ops methods Only ->open() should be there (always failing, of course). We never replace ->f_op of an already opened struct file, so there's no way for any of those methods to be called. Signed-off-by: Al Viro --- fs/bad_inode.c | 147 --------------------------------------------------------- 1 file changed, 147 deletions(-) (limited to 'fs') diff --git a/fs/bad_inode.c b/fs/bad_inode.c index afd2b4408adf..861b1e1c4777 100644 --- a/fs/bad_inode.c +++ b/fs/bad_inode.c @@ -15,161 +15,14 @@ #include #include - -static loff_t bad_file_llseek(struct file *file, loff_t offset, int whence) -{ - return -EIO; -} - -static ssize_t bad_file_read(struct file *filp, char __user *buf, - size_t size, loff_t *ppos) -{ - return -EIO; -} - -static ssize_t bad_file_write(struct file *filp, const char __user *buf, - size_t siz, loff_t *ppos) -{ - return -EIO; -} - -static ssize_t bad_file_aio_read(struct kiocb *iocb, const struct iovec *iov, - unsigned long nr_segs, loff_t pos) -{ - return -EIO; -} - -static ssize_t bad_file_aio_write(struct kiocb *iocb, const struct iovec *iov, - unsigned long nr_segs, loff_t pos) -{ - return -EIO; -} - -static int bad_file_readdir(struct file *file, struct dir_context *ctx) -{ - return -EIO; -} - -static unsigned int bad_file_poll(struct file *filp, poll_table *wait) -{ - return POLLERR; -} - -static long bad_file_unlocked_ioctl(struct file *file, unsigned cmd, - unsigned long arg) -{ - return -EIO; -} - -static long bad_file_compat_ioctl(struct file *file, unsigned int cmd, - unsigned long arg) -{ - return -EIO; -} - -static int bad_file_mmap(struct file *file, struct vm_area_struct *vma) -{ - return -EIO; -} - static int bad_file_open(struct inode *inode, struct file *filp) { return -EIO; } -static int bad_file_flush(struct file *file, fl_owner_t id) -{ - return -EIO; -} - -static int bad_file_release(struct inode *inode, struct file *filp) -{ - return -EIO; -} - -static int bad_file_fsync(struct file *file, loff_t start, loff_t end, - int datasync) -{ - return -EIO; -} - -static int bad_file_aio_fsync(struct kiocb *iocb, int datasync) -{ - return -EIO; -} - -static int bad_file_fasync(int fd, struct file *filp, int on) -{ - return -EIO; -} - -static int bad_file_lock(struct file *file, int cmd, struct file_lock *fl) -{ - return -EIO; -} - -static ssize_t bad_file_sendpage(struct file *file, struct page *page, - int off, size_t len, loff_t *pos, int more) -{ - return -EIO; -} - -static unsigned long bad_file_get_unmapped_area(struct file *file, - unsigned long addr, unsigned long len, - unsigned long pgoff, unsigned long flags) -{ - return -EIO; -} - -static int bad_file_check_flags(int flags) -{ - return -EIO; -} - -static int bad_file_flock(struct file *filp, int cmd, struct file_lock *fl) -{ - return -EIO; -} - -static ssize_t bad_file_splice_write(struct pipe_inode_info *pipe, - struct file *out, loff_t *ppos, size_t len, - unsigned int flags) -{ - return -EIO; -} - -static ssize_t bad_file_splice_read(struct file *in, loff_t *ppos, - struct pipe_inode_info *pipe, size_t len, - unsigned int flags) -{ - return -EIO; -} - static const struct file_operations bad_file_ops = { - .llseek = bad_file_llseek, - .read = bad_file_read, - .write = bad_file_write, - .aio_read = bad_file_aio_read, - .aio_write = bad_file_aio_write, - .iterate = bad_file_readdir, - .poll = bad_file_poll, - .unlocked_ioctl = bad_file_unlocked_ioctl, - .compat_ioctl = bad_file_compat_ioctl, - .mmap = bad_file_mmap, .open = bad_file_open, - .flush = bad_file_flush, - .release = bad_file_release, - .fsync = bad_file_fsync, - .aio_fsync = bad_file_aio_fsync, - .fasync = bad_file_fasync, - .lock = bad_file_lock, - .sendpage = bad_file_sendpage, - .get_unmapped_area = bad_file_get_unmapped_area, - .check_flags = bad_file_check_flags, - .flock = bad_file_flock, - .splice_write = bad_file_splice_write, - .splice_read = bad_file_splice_read, }; static int bad_inode_create (struct inode *dir, struct dentry *dentry, -- cgit v1.2.3 From 112fc894a7c49e6435f91faa1cebfd425e6f3ace Mon Sep 17 00:00:00 2001 From: David Howells Date: Tue, 27 Jan 2015 15:18:39 +0000 Subject: configfs: Fix potential NULL d_inode dereference Code that does this: if (!(d_unhashed(dentry) && dentry->d_inode)) { ... simple_unlink(parent->d_inode, dentry); } is broken because: !(d_unhashed(dentry) && dentry->d_inode) is equivalent to: !d_unhashed(dentry) || !dentry->d_inode so it is possible to get into simple_unlink() with dentry->d_inode == NULL. simple_unlink(), however, assumes dentry->d_inode cannot be NULL. I think that what was meant is this: !d_unhashed(dentry) && dentry->d_inode and that the logical-not operator or the final close-bracket was misplaced. Signed-off-by: David Howells cc: Joel Becker Signed-off-by: Al Viro --- fs/configfs/inode.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/configfs/inode.c b/fs/configfs/inode.c index 60727db7b0a3..5423a6a6ecc8 100644 --- a/fs/configfs/inode.c +++ b/fs/configfs/inode.c @@ -236,7 +236,7 @@ void configfs_drop_dentry(struct configfs_dirent * sd, struct dentry * parent) if (dentry) { spin_lock(&dentry->d_lock); - if (!(d_unhashed(dentry) && dentry->d_inode)) { + if (!d_unhashed(dentry) && dentry->d_inode) { dget_dlock(dentry); __d_drop(dentry); spin_unlock(&dentry->d_lock); -- cgit v1.2.3 From acd88d4e1af3c6c55f679c202cd517dff7ea9c6f Mon Sep 17 00:00:00 2001 From: Kinglong Mee Date: Wed, 4 Feb 2015 21:15:59 +0800 Subject: fs/aio.c: Remove duplicate function name in pr_debug messages Have defined pr_fmt as below in fs/aio.c, so remove duplicate function name in pr_debug message. #define pr_fmt(fmt) "%s: " fmt, __func__ Signed-off-by: Kinglong Mee Signed-off-by: Al Viro --- fs/aio.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/aio.c b/fs/aio.c index 118a2e0088d8..f8e52a1854c1 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -1285,7 +1285,7 @@ SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp) ret = -EINVAL; if (unlikely(ctx || nr_events == 0)) { - pr_debug("EINVAL: io_setup: ctx %lu nr_events %u\n", + pr_debug("EINVAL: ctx %lu nr_events %u\n", ctx, nr_events); goto out; } @@ -1333,7 +1333,7 @@ SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx) return ret; } - pr_debug("EINVAL: io_destroy: invalid context id\n"); + pr_debug("EINVAL: invalid context id\n"); return -EINVAL; } @@ -1515,7 +1515,7 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, (iocb->aio_nbytes != (size_t)iocb->aio_nbytes) || ((ssize_t)iocb->aio_nbytes < 0) )) { - pr_debug("EINVAL: io_submit: overflow check\n"); + pr_debug("EINVAL: overflow check\n"); return -EINVAL; } -- cgit v1.2.3 From fcbc32bc6cb59cae8528dadbdc4958c9c814bba4 Mon Sep 17 00:00:00 2001 From: Bastien Nocera Date: Thu, 5 Feb 2015 14:35:05 +0100 Subject: coredump: Fix typo in comment Signed-off-by: Bastien Nocera Signed-off-by: Al Viro --- fs/coredump.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/coredump.c b/fs/coredump.c index b5c86ffd5033..f319926ddf8c 100644 --- a/fs/coredump.c +++ b/fs/coredump.c @@ -572,7 +572,7 @@ void do_coredump(const siginfo_t *siginfo) * * Normally core limits are irrelevant to pipes, since * we're not writing to the file system, but we use - * cprm.limit of 1 here as a speacial value, this is a + * cprm.limit of 1 here as a special value, this is a * consistent way to catch recursive crashes. * We can still crash if the core_pattern binary sets * RLIM_CORE = !1, but it runs as root, and can do -- cgit v1.2.3 From 76bf3f6b1d6ac4c770bb121b0461c460aa068e64 Mon Sep 17 00:00:00 2001 From: Rasmus Villemoes Date: Fri, 6 Feb 2015 16:28:17 +0100 Subject: autofs4: Wrong format for printing dentry %pD for struct file*, %pd for struct dentry*. Fixes: a455589f181e ("assorted conversions to %p[dD]") Signed-off-by: Rasmus Villemoes Signed-off-by: Al Viro --- fs/autofs4/root.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c index dbb5b7212ce1..7ba355b8d4ac 100644 --- a/fs/autofs4/root.c +++ b/fs/autofs4/root.c @@ -108,7 +108,7 @@ static int autofs4_dir_open(struct inode *inode, struct file *file) struct dentry *dentry = file->f_path.dentry; struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb); - DPRINTK("file=%p dentry=%p %pD", file, dentry, dentry); + DPRINTK("file=%p dentry=%p %pd", file, dentry, dentry); if (autofs4_oz_mode(sbi)) goto out; -- cgit v1.2.3 From fed0b588be2f55822013808a2968c228258d921b Mon Sep 17 00:00:00 2001 From: Omar Sandoval Date: Sun, 8 Feb 2015 21:45:25 -0800 Subject: posix_acl: fix reference leaks in posix_acl_create get_acl gets a reference which we must release in the error cases. Reviewed-by: Christoph Hellwig Signed-off-by: Omar Sandoval Signed-off-by: Al Viro --- fs/posix_acl.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/posix_acl.c b/fs/posix_acl.c index 0855f772cd41..515d31511d0d 100644 --- a/fs/posix_acl.c +++ b/fs/posix_acl.c @@ -564,13 +564,11 @@ posix_acl_create(struct inode *dir, umode_t *mode, *acl = posix_acl_clone(p, GFP_NOFS); if (!*acl) - return -ENOMEM; + goto no_mem; ret = posix_acl_create_masq(*acl, mode); - if (ret < 0) { - posix_acl_release(*acl); - return -ENOMEM; - } + if (ret < 0) + goto no_mem_clone; if (ret == 0) { posix_acl_release(*acl); @@ -591,6 +589,12 @@ no_acl: *default_acl = NULL; *acl = NULL; return 0; + +no_mem_clone: + posix_acl_release(*acl); +no_mem: + posix_acl_release(p); + return -ENOMEM; } EXPORT_SYMBOL_GPL(posix_acl_create); -- cgit v1.2.3 From df1a085af1f652a02238168c4f2b730c8c90dd4a Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 29 Jan 2015 12:02:28 +0000 Subject: VFS: Add a fallthrough flag for marking virtual dentries Add a DCACHE_FALLTHRU flag to indicate that, in a layered filesystem, this is a virtual dentry that covers another one in a lower layer that should be used instead. This may be recorded on medium if directory integration is stored there. The flag can be set with d_set_fallthru() and tested with d_is_fallthru(). Original-author: Valerie Aurora Signed-off-by: David Howells Signed-off-by: Al Viro --- fs/dcache.c | 19 ++++++++++++++++++- include/linux/dcache.h | 9 +++++++++ 2 files changed, 27 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/dcache.c b/fs/dcache.c index dc400fd29f4d..e33a0934efd7 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -1659,6 +1659,22 @@ void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op) } EXPORT_SYMBOL(d_set_d_op); + +/* + * d_set_fallthru - Mark a dentry as falling through to a lower layer + * @dentry - The dentry to mark + * + * Mark a dentry as falling through to the lower layer (as set with + * d_pin_lower()). This flag may be recorded on the medium. + */ +void d_set_fallthru(struct dentry *dentry) +{ + spin_lock(&dentry->d_lock); + dentry->d_flags |= DCACHE_FALLTHRU; + spin_unlock(&dentry->d_lock); +} +EXPORT_SYMBOL(d_set_fallthru); + static unsigned d_flags_for_inode(struct inode *inode) { unsigned add_flags = DCACHE_FILE_TYPE; @@ -1691,7 +1707,8 @@ static void __d_instantiate(struct dentry *dentry, struct inode *inode) unsigned add_flags = d_flags_for_inode(inode); spin_lock(&dentry->d_lock); - __d_set_type(dentry, add_flags); + dentry->d_flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU); + dentry->d_flags |= add_flags; if (inode) hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry); dentry->d_inode = inode; diff --git a/include/linux/dcache.h b/include/linux/dcache.h index 98d2a948a08e..728f5d31b5e6 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -223,6 +223,7 @@ struct dentry_operations { #define DCACHE_FILE_TYPE 0x00500000 /* Other file type (or fallthru to such) */ #define DCACHE_MAY_FREE 0x00800000 +#define DCACHE_FALLTHRU 0x01000000 /* Fall through to lower layer */ extern seqlock_t rename_lock; @@ -470,6 +471,14 @@ static inline bool d_is_positive(const struct dentry *dentry) return !d_is_negative(dentry); } +extern void d_set_fallthru(struct dentry *dentry); + +static inline bool d_is_fallthru(const struct dentry *dentry) +{ + return dentry->d_flags & DCACHE_FALLTHRU; +} + + extern int sysctl_vfs_cache_pressure; static inline unsigned long vfs_pressure_ratio(unsigned long val) -- cgit v1.2.3 From 44bdb5e5f6382ba88f7678d6f535f879324522ae Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 29 Jan 2015 12:02:29 +0000 Subject: VFS: Split DCACHE_FILE_TYPE into regular and special types Split DCACHE_FILE_TYPE into DCACHE_REGULAR_TYPE (dentries representing regular files) and DCACHE_SPECIAL_TYPE (representing blockdev, chardev, FIFO and socket files). d_is_reg() and d_is_special() are added to detect these subtypes and d_is_file() is left as the union of the two. This allows a number of places that use S_ISREG(dentry->d_inode->i_mode) to use d_is_reg(dentry) instead. Signed-off-by: David Howells Signed-off-by: Al Viro --- fs/dcache.c | 18 +++++++++++++----- include/linux/dcache.h | 17 ++++++++++++++--- 2 files changed, 27 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/dcache.c b/fs/dcache.c index e33a0934efd7..c71e3732e53b 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -1677,7 +1677,7 @@ EXPORT_SYMBOL(d_set_fallthru); static unsigned d_flags_for_inode(struct inode *inode) { - unsigned add_flags = DCACHE_FILE_TYPE; + unsigned add_flags = DCACHE_REGULAR_TYPE; if (!inode) return DCACHE_MISS_TYPE; @@ -1690,13 +1690,21 @@ static unsigned d_flags_for_inode(struct inode *inode) else inode->i_opflags |= IOP_LOOKUP; } - } else if (unlikely(!(inode->i_opflags & IOP_NOFOLLOW))) { - if (unlikely(inode->i_op->follow_link)) + goto type_determined; + } + + if (unlikely(!(inode->i_opflags & IOP_NOFOLLOW))) { + if (unlikely(inode->i_op->follow_link)) { add_flags = DCACHE_SYMLINK_TYPE; - else - inode->i_opflags |= IOP_NOFOLLOW; + goto type_determined; + } + inode->i_opflags |= IOP_NOFOLLOW; } + if (unlikely(!S_ISREG(inode->i_mode))) + add_flags = DCACHE_SPECIAL_TYPE; + +type_determined: if (unlikely(IS_AUTOMOUNT(inode))) add_flags |= DCACHE_NEED_AUTOMOUNT; return add_flags; diff --git a/include/linux/dcache.h b/include/linux/dcache.h index 728f5d31b5e6..d8358799c594 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -219,8 +219,9 @@ struct dentry_operations { #define DCACHE_WHITEOUT_TYPE 0x00100000 /* Whiteout dentry (stop pathwalk) */ #define DCACHE_DIRECTORY_TYPE 0x00200000 /* Normal directory */ #define DCACHE_AUTODIR_TYPE 0x00300000 /* Lookupless directory (presumed automount) */ -#define DCACHE_SYMLINK_TYPE 0x00400000 /* Symlink (or fallthru to such) */ -#define DCACHE_FILE_TYPE 0x00500000 /* Other file type (or fallthru to such) */ +#define DCACHE_REGULAR_TYPE 0x00400000 /* Regular file type (or fallthru to such) */ +#define DCACHE_SPECIAL_TYPE 0x00500000 /* Other file type (or fallthru to such) */ +#define DCACHE_SYMLINK_TYPE 0x00600000 /* Symlink (or fallthru to such) */ #define DCACHE_MAY_FREE 0x00800000 #define DCACHE_FALLTHRU 0x01000000 /* Fall through to lower layer */ @@ -455,9 +456,19 @@ static inline bool d_is_symlink(const struct dentry *dentry) return __d_entry_type(dentry) == DCACHE_SYMLINK_TYPE; } +static inline bool d_is_reg(const struct dentry *dentry) +{ + return __d_entry_type(dentry) == DCACHE_REGULAR_TYPE; +} + +static inline bool d_is_special(const struct dentry *dentry) +{ + return __d_entry_type(dentry) == DCACHE_SPECIAL_TYPE; +} + static inline bool d_is_file(const struct dentry *dentry) { - return __d_entry_type(dentry) == DCACHE_FILE_TYPE; + return d_is_reg(dentry) || d_is_special(dentry); } static inline bool d_is_negative(const struct dentry *dentry) -- cgit v1.2.3 From e36cb0b89ce20b4f8786a57e8a6bc8476f577650 Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 29 Jan 2015 12:02:35 +0000 Subject: VFS: (Scripted) Convert S_ISLNK/DIR/REG(dentry->d_inode) to d_is_*(dentry) Convert the following where appropriate: (1) S_ISLNK(dentry->d_inode) to d_is_symlink(dentry). (2) S_ISREG(dentry->d_inode) to d_is_reg(dentry). (3) S_ISDIR(dentry->d_inode) to d_is_dir(dentry). This is actually more complicated than it appears as some calls should be converted to d_can_lookup() instead. The difference is whether the directory in question is a real dir with a ->lookup op or whether it's a fake dir with a ->d_automount op. In some circumstances, we can subsume checks for dentry->d_inode not being NULL into this, provided we the code isn't in a filesystem that expects d_inode to be NULL if the dirent really *is* negative (ie. if we're going to use d_inode() rather than d_backing_inode() to get the inode pointer). Note that the dentry type field may be set to something other than DCACHE_MISS_TYPE when d_inode is NULL in the case of unionmount, where the VFS manages the fall-through from a negative dentry to a lower layer. In such a case, the dentry type of the negative union dentry is set to the same as the type of the lower dentry. However, if you know d_inode is not NULL at the call site, then you can use the d_is_xxx() functions even in a filesystem. There is one further complication: a 0,0 chardev dentry may be labelled DCACHE_WHITEOUT_TYPE rather than DCACHE_SPECIAL_TYPE. Strictly, this was intended for special directory entry types that don't have attached inodes. The following perl+coccinelle script was used: use strict; my @callers; open($fd, 'git grep -l \'S_IS[A-Z].*->d_inode\' |') || die "Can't grep for S_ISDIR and co. callers"; @callers = <$fd>; close($fd); unless (@callers) { print "No matches\n"; exit(0); } my @cocci = ( '@@', 'expression E;', '@@', '', '- S_ISLNK(E->d_inode->i_mode)', '+ d_is_symlink(E)', '', '@@', 'expression E;', '@@', '', '- S_ISDIR(E->d_inode->i_mode)', '+ d_is_dir(E)', '', '@@', 'expression E;', '@@', '', '- S_ISREG(E->d_inode->i_mode)', '+ d_is_reg(E)' ); my $coccifile = "tmp.sp.cocci"; open($fd, ">$coccifile") || die $coccifile; print($fd "$_\n") || die $coccifile foreach (@cocci); close($fd); foreach my $file (@callers) { chomp $file; print "Processing ", $file, "\n"; system("spatch", "--sp-file", $coccifile, $file, "--in-place", "--no-show-diff") == 0 || die "spatch failed"; } [AV: overlayfs parts skipped] Signed-off-by: David Howells Signed-off-by: Al Viro --- arch/s390/hypfs/inode.c | 2 +- fs/9p/vfs_inode.c | 2 +- fs/autofs4/expire.c | 2 +- fs/autofs4/root.c | 4 ++-- fs/btrfs/ioctl.c | 4 ++-- fs/cachefiles/daemon.c | 4 ++-- fs/cachefiles/namei.c | 16 ++++++++-------- fs/ceph/dir.c | 2 +- fs/ceph/file.c | 2 +- fs/coda/dir.c | 2 +- fs/debugfs/inode.c | 2 +- fs/ecryptfs/file.c | 2 +- fs/ecryptfs/inode.c | 4 ++-- fs/exportfs/expfs.c | 2 +- fs/fuse/dir.c | 2 +- fs/gfs2/dir.c | 2 +- fs/hfsplus/dir.c | 2 +- fs/hppfs/hppfs.c | 4 ++-- fs/jffs2/dir.c | 14 +++++++------- fs/jffs2/super.c | 2 +- fs/libfs.c | 2 +- fs/namei.c | 2 +- fs/namespace.c | 10 +++++----- fs/nfsd/nfs4recover.c | 4 ++-- fs/nfsd/nfsfh.c | 8 ++++---- fs/nfsd/vfs.c | 8 ++++---- fs/notify/fanotify/fanotify.c | 6 +++--- fs/overlayfs/dir.c | 6 +++--- fs/posix_acl.c | 4 ++-- fs/reiserfs/xattr.c | 4 ++-- fs/xfs/xfs_ioctl.c | 2 +- mm/shmem.c | 4 ++-- security/inode.c | 2 +- security/selinux/hooks.c | 4 ++-- 34 files changed, 71 insertions(+), 71 deletions(-) (limited to 'fs') diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c index 67a0014ddb63..99824ff8dd35 100644 --- a/arch/s390/hypfs/inode.c +++ b/arch/s390/hypfs/inode.c @@ -74,7 +74,7 @@ static void hypfs_remove(struct dentry *dentry) parent = dentry->d_parent; mutex_lock(&parent->d_inode->i_mutex); if (hypfs_positive(dentry)) { - if (S_ISDIR(dentry->d_inode->i_mode)) + if (d_is_dir(dentry)) simple_rmdir(parent->d_inode, dentry); else simple_unlink(parent->d_inode, dentry); diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c index 9ee5343d4884..3662f1d1d9cf 100644 --- a/fs/9p/vfs_inode.c +++ b/fs/9p/vfs_inode.c @@ -1127,7 +1127,7 @@ static int v9fs_vfs_setattr(struct dentry *dentry, struct iattr *iattr) } /* Write all dirty data */ - if (S_ISREG(dentry->d_inode->i_mode)) + if (d_is_reg(dentry)) filemap_write_and_wait(dentry->d_inode->i_mapping); retval = p9_client_wstat(fid, &wstat); diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c index bfdbaba9c2ba..11dd118f75e2 100644 --- a/fs/autofs4/expire.c +++ b/fs/autofs4/expire.c @@ -374,7 +374,7 @@ static struct dentry *should_expire(struct dentry *dentry, return NULL; } - if (dentry->d_inode && S_ISLNK(dentry->d_inode->i_mode)) { + if (dentry->d_inode && d_is_symlink(dentry)) { DPRINTK("checking symlink %p %pd", dentry, dentry); /* * A symlink can't be "busy" in the usual sense so diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c index 7ba355b8d4ac..7e44fdd03e2d 100644 --- a/fs/autofs4/root.c +++ b/fs/autofs4/root.c @@ -371,7 +371,7 @@ static struct vfsmount *autofs4_d_automount(struct path *path) * having d_mountpoint() true, so there's no need to call back * to the daemon. */ - if (dentry->d_inode && S_ISLNK(dentry->d_inode->i_mode)) { + if (dentry->d_inode && d_is_symlink(dentry)) { spin_unlock(&sbi->fs_lock); goto done; } @@ -485,7 +485,7 @@ static int autofs4_d_manage(struct dentry *dentry, bool rcu_walk) * an incorrect ELOOP error return. */ if ((!d_mountpoint(dentry) && !simple_empty(dentry)) || - (dentry->d_inode && S_ISLNK(dentry->d_inode->i_mode))) + (dentry->d_inode && d_is_symlink(dentry))) status = -EISDIR; } spin_unlock(&sbi->fs_lock); diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index d49fe8a0f6b5..74609b931ba5 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -776,11 +776,11 @@ static int btrfs_may_delete(struct inode *dir, struct dentry *victim, int isdir) IS_IMMUTABLE(victim->d_inode) || IS_SWAPFILE(victim->d_inode)) return -EPERM; if (isdir) { - if (!S_ISDIR(victim->d_inode->i_mode)) + if (!d_is_dir(victim)) return -ENOTDIR; if (IS_ROOT(victim)) return -EBUSY; - } else if (S_ISDIR(victim->d_inode->i_mode)) + } else if (d_is_dir(victim)) return -EISDIR; if (IS_DEADDIR(dir)) return -ENOENT; diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c index ce1b115dcc28..d92840209863 100644 --- a/fs/cachefiles/daemon.c +++ b/fs/cachefiles/daemon.c @@ -574,7 +574,7 @@ static int cachefiles_daemon_cull(struct cachefiles_cache *cache, char *args) /* extract the directory dentry from the cwd */ get_fs_pwd(current->fs, &path); - if (!S_ISDIR(path.dentry->d_inode->i_mode)) + if (!d_is_dir(path.dentry)) goto notdir; cachefiles_begin_secure(cache, &saved_cred); @@ -646,7 +646,7 @@ static int cachefiles_daemon_inuse(struct cachefiles_cache *cache, char *args) /* extract the directory dentry from the cwd */ get_fs_pwd(current->fs, &path); - if (!S_ISDIR(path.dentry->d_inode->i_mode)) + if (!d_is_dir(path.dentry)) goto notdir; cachefiles_begin_secure(cache, &saved_cred); diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c index 7f8e83f9d74e..d750e8cc0ab6 100644 --- a/fs/cachefiles/namei.c +++ b/fs/cachefiles/namei.c @@ -277,7 +277,7 @@ static int cachefiles_bury_object(struct cachefiles_cache *cache, _debug("remove %p from %p", rep, dir); /* non-directories can just be unlinked */ - if (!S_ISDIR(rep->d_inode->i_mode)) { + if (!d_is_dir(rep)) { _debug("unlink stale object"); path.mnt = cache->mnt; @@ -323,7 +323,7 @@ try_again: return 0; } - if (!S_ISDIR(cache->graveyard->d_inode->i_mode)) { + if (!d_is_dir(cache->graveyard)) { unlock_rename(cache->graveyard, dir); cachefiles_io_error(cache, "Graveyard no longer a directory"); return -EIO; @@ -475,7 +475,7 @@ int cachefiles_walk_to_object(struct cachefiles_object *parent, ASSERT(parent->dentry); ASSERT(parent->dentry->d_inode); - if (!(S_ISDIR(parent->dentry->d_inode->i_mode))) { + if (!(d_is_dir(parent->dentry))) { // TODO: convert file to dir _leave("looking up in none directory"); return -ENOBUFS; @@ -539,7 +539,7 @@ lookup_again: _debug("mkdir -> %p{%p{ino=%lu}}", next, next->d_inode, next->d_inode->i_ino); - } else if (!S_ISDIR(next->d_inode->i_mode)) { + } else if (!d_is_dir(next)) { pr_err("inode %lu is not a directory\n", next->d_inode->i_ino); ret = -ENOBUFS; @@ -568,8 +568,8 @@ lookup_again: _debug("create -> %p{%p{ino=%lu}}", next, next->d_inode, next->d_inode->i_ino); - } else if (!S_ISDIR(next->d_inode->i_mode) && - !S_ISREG(next->d_inode->i_mode) + } else if (!d_is_dir(next) && + !d_is_reg(next) ) { pr_err("inode %lu is not a file or directory\n", next->d_inode->i_ino); @@ -642,7 +642,7 @@ lookup_again: /* open a file interface onto a data file */ if (object->type != FSCACHE_COOKIE_TYPE_INDEX) { - if (S_ISREG(object->dentry->d_inode->i_mode)) { + if (d_is_reg(object->dentry)) { const struct address_space_operations *aops; ret = -EPERM; @@ -763,7 +763,7 @@ struct dentry *cachefiles_get_directory(struct cachefiles_cache *cache, /* we need to make sure the subdir is a directory */ ASSERT(subdir->d_inode); - if (!S_ISDIR(subdir->d_inode->i_mode)) { + if (!d_is_dir(subdir)) { pr_err("%s is not a directory\n", dirname); ret = -EIO; goto check_error; diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c index c241603764fd..f099aefb0d19 100644 --- a/fs/ceph/dir.c +++ b/fs/ceph/dir.c @@ -902,7 +902,7 @@ static int ceph_unlink(struct inode *dir, struct dentry *dentry) } else if (ceph_snap(dir) == CEPH_NOSNAP) { dout("unlink/rmdir dir %p dn %p inode %p\n", dir, dentry, inode); - op = S_ISDIR(dentry->d_inode->i_mode) ? + op = d_is_dir(dentry) ? CEPH_MDS_OP_RMDIR : CEPH_MDS_OP_UNLINK; } else goto out; diff --git a/fs/ceph/file.c b/fs/ceph/file.c index 905986dd4c3c..851939c666f8 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -292,7 +292,7 @@ int ceph_atomic_open(struct inode *dir, struct dentry *dentry, } if (err) goto out_req; - if (dn || dentry->d_inode == NULL || S_ISLNK(dentry->d_inode->i_mode)) { + if (dn || dentry->d_inode == NULL || d_is_symlink(dentry)) { /* make vfs retry on splice, ENOENT, or symlink */ dout("atomic_open finish_no_open on dn %p\n", dn); err = finish_no_open(file, dn); diff --git a/fs/coda/dir.c b/fs/coda/dir.c index 281ee011bb6a..60cb88c1dd2b 100644 --- a/fs/coda/dir.c +++ b/fs/coda/dir.c @@ -304,7 +304,7 @@ static int coda_rename(struct inode *old_dir, struct dentry *old_dentry, (const char *) old_name, (const char *)new_name); if (!error) { if (new_dentry->d_inode) { - if (S_ISDIR(new_dentry->d_inode->i_mode)) { + if (d_is_dir(new_dentry)) { coda_dir_drop_nlink(old_dir); coda_dir_inc_nlink(new_dir); } diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c index 45b18a5e225c..90933645298c 100644 --- a/fs/debugfs/inode.c +++ b/fs/debugfs/inode.c @@ -690,7 +690,7 @@ struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry, } d_move(old_dentry, dentry); fsnotify_move(old_dir->d_inode, new_dir->d_inode, old_name, - S_ISDIR(old_dentry->d_inode->i_mode), + d_is_dir(old_dentry), NULL, old_dentry); fsnotify_oldname_free(old_name); unlock_rename(new_dir, old_dir); diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c index 6f4e659f508f..b07731e68c0b 100644 --- a/fs/ecryptfs/file.c +++ b/fs/ecryptfs/file.c @@ -230,7 +230,7 @@ static int ecryptfs_open(struct inode *inode, struct file *file) } ecryptfs_set_file_lower( file, ecryptfs_inode_to_private(inode)->lower_file); - if (S_ISDIR(ecryptfs_dentry->d_inode->i_mode)) { + if (d_is_dir(ecryptfs_dentry)) { ecryptfs_printk(KERN_DEBUG, "This is a directory\n"); mutex_lock(&crypt_stat->cs_mutex); crypt_stat->flags &= ~(ECRYPTFS_ENCRYPTED); diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c index 34b36a504059..b08b5187f662 100644 --- a/fs/ecryptfs/inode.c +++ b/fs/ecryptfs/inode.c @@ -907,9 +907,9 @@ static int ecryptfs_setattr(struct dentry *dentry, struct iattr *ia) lower_inode = ecryptfs_inode_to_lower(inode); lower_dentry = ecryptfs_dentry_to_lower(dentry); mutex_lock(&crypt_stat->cs_mutex); - if (S_ISDIR(dentry->d_inode->i_mode)) + if (d_is_dir(dentry)) crypt_stat->flags &= ~(ECRYPTFS_ENCRYPTED); - else if (S_ISREG(dentry->d_inode->i_mode) + else if (d_is_reg(dentry) && (!(crypt_stat->flags & ECRYPTFS_POLICY_APPLIED) || !(crypt_stat->flags & ECRYPTFS_KEY_VALID))) { struct ecryptfs_mount_crypt_stat *mount_crypt_stat; diff --git a/fs/exportfs/expfs.c b/fs/exportfs/expfs.c index fdfd206c737a..714cd37a6ba3 100644 --- a/fs/exportfs/expfs.c +++ b/fs/exportfs/expfs.c @@ -429,7 +429,7 @@ struct dentry *exportfs_decode_fh(struct vfsmount *mnt, struct fid *fid, if (IS_ERR(result)) return result; - if (S_ISDIR(result->d_inode->i_mode)) { + if (d_is_dir(result)) { /* * This request is for a directory. * diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index 08e7b1a9d5d0..1545b711ddcf 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -971,7 +971,7 @@ int fuse_reverse_inval_entry(struct super_block *sb, u64 parent_nodeid, err = -EBUSY; goto badentry; } - if (S_ISDIR(entry->d_inode->i_mode)) { + if (d_is_dir(entry)) { shrink_dcache_parent(entry); if (!simple_empty(entry)) { err = -ENOTEMPTY; diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c index 6371192961e2..487527b42d94 100644 --- a/fs/gfs2/dir.c +++ b/fs/gfs2/dir.c @@ -1809,7 +1809,7 @@ int gfs2_dir_del(struct gfs2_inode *dip, const struct dentry *dentry) gfs2_consist_inode(dip); dip->i_entries--; dip->i_inode.i_mtime = dip->i_inode.i_ctime = tv; - if (S_ISDIR(dentry->d_inode->i_mode)) + if (d_is_dir(dentry)) drop_nlink(&dip->i_inode); mark_inode_dirty(&dip->i_inode); diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c index 435bea231cc6..f0235c1640af 100644 --- a/fs/hfsplus/dir.c +++ b/fs/hfsplus/dir.c @@ -530,7 +530,7 @@ static int hfsplus_rename(struct inode *old_dir, struct dentry *old_dentry, /* Unlink destination if it already exists */ if (new_dentry->d_inode) { - if (S_ISDIR(new_dentry->d_inode->i_mode)) + if (d_is_dir(new_dentry)) res = hfsplus_rmdir(new_dir, new_dentry); else res = hfsplus_unlink(new_dir, new_dentry); diff --git a/fs/hppfs/hppfs.c b/fs/hppfs/hppfs.c index 5f2755117ce7..043ac9d77262 100644 --- a/fs/hppfs/hppfs.c +++ b/fs/hppfs/hppfs.c @@ -678,10 +678,10 @@ static struct inode *get_inode(struct super_block *sb, struct dentry *dentry) return NULL; } - if (S_ISDIR(dentry->d_inode->i_mode)) { + if (d_is_dir(dentry)) { inode->i_op = &hppfs_dir_iops; inode->i_fop = &hppfs_dir_fops; - } else if (S_ISLNK(dentry->d_inode->i_mode)) { + } else if (d_is_symlink(dentry)) { inode->i_op = &hppfs_link_iops; inode->i_fop = &hppfs_file_fops; } else { diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c index 938556025d64..f21b6fb5e4c4 100644 --- a/fs/jffs2/dir.c +++ b/fs/jffs2/dir.c @@ -252,7 +252,7 @@ static int jffs2_link (struct dentry *old_dentry, struct inode *dir_i, struct de if (!f->inocache) return -EIO; - if (S_ISDIR(old_dentry->d_inode->i_mode)) + if (d_is_dir(old_dentry)) return -EPERM; /* XXX: This is ugly */ @@ -772,7 +772,7 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry, */ if (new_dentry->d_inode) { victim_f = JFFS2_INODE_INFO(new_dentry->d_inode); - if (S_ISDIR(new_dentry->d_inode->i_mode)) { + if (d_is_dir(new_dentry)) { struct jffs2_full_dirent *fd; mutex_lock(&victim_f->sem); @@ -807,7 +807,7 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry, if (victim_f) { /* There was a victim. Kill it off nicely */ - if (S_ISDIR(new_dentry->d_inode->i_mode)) + if (d_is_dir(new_dentry)) clear_nlink(new_dentry->d_inode); else drop_nlink(new_dentry->d_inode); @@ -815,7 +815,7 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry, inode which didn't exist. */ if (victim_f->inocache) { mutex_lock(&victim_f->sem); - if (S_ISDIR(new_dentry->d_inode->i_mode)) + if (d_is_dir(new_dentry)) victim_f->inocache->pino_nlink = 0; else victim_f->inocache->pino_nlink--; @@ -825,7 +825,7 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry, /* If it was a directory we moved, and there was no victim, increase i_nlink on its new parent */ - if (S_ISDIR(old_dentry->d_inode->i_mode) && !victim_f) + if (d_is_dir(old_dentry) && !victim_f) inc_nlink(new_dir_i); /* Unlink the original */ @@ -839,7 +839,7 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry, struct jffs2_inode_info *f = JFFS2_INODE_INFO(old_dentry->d_inode); mutex_lock(&f->sem); inc_nlink(old_dentry->d_inode); - if (f->inocache && !S_ISDIR(old_dentry->d_inode->i_mode)) + if (f->inocache && !d_is_dir(old_dentry)) f->inocache->pino_nlink++; mutex_unlock(&f->sem); @@ -852,7 +852,7 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry, return ret; } - if (S_ISDIR(old_dentry->d_inode->i_mode)) + if (d_is_dir(old_dentry)) drop_nlink(old_dir_i); new_dir_i->i_mtime = new_dir_i->i_ctime = old_dir_i->i_mtime = old_dir_i->i_ctime = ITIME(now); diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c index 0918f0e2e266..3d76f28a2ba9 100644 --- a/fs/jffs2/super.c +++ b/fs/jffs2/super.c @@ -138,7 +138,7 @@ static struct dentry *jffs2_get_parent(struct dentry *child) struct jffs2_inode_info *f; uint32_t pino; - BUG_ON(!S_ISDIR(child->d_inode->i_mode)); + BUG_ON(!d_is_dir(child)); f = JFFS2_INODE_INFO(child->d_inode); diff --git a/fs/libfs.c b/fs/libfs.c index b2ffdb045be4..0ab65122ee45 100644 --- a/fs/libfs.c +++ b/fs/libfs.c @@ -329,7 +329,7 @@ int simple_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { struct inode *inode = old_dentry->d_inode; - int they_are_dirs = S_ISDIR(old_dentry->d_inode->i_mode); + int they_are_dirs = d_is_dir(old_dentry); if (!simple_empty(new_dentry)) return -ENOTEMPTY; diff --git a/fs/namei.c b/fs/namei.c index 96ca11dea4a2..c83145af4bfc 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -2814,7 +2814,7 @@ no_open: } else if (!dentry->d_inode) { goto out; } else if ((open_flag & O_TRUNC) && - S_ISREG(dentry->d_inode->i_mode)) { + d_is_reg(dentry)) { goto out; } /* will fail later, go on to get the right error */ diff --git a/fs/namespace.c b/fs/namespace.c index 72a286e0d33e..82ef1405260e 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -1907,8 +1907,8 @@ static int graft_tree(struct mount *mnt, struct mount *p, struct mountpoint *mp) if (mnt->mnt.mnt_sb->s_flags & MS_NOUSER) return -EINVAL; - if (S_ISDIR(mp->m_dentry->d_inode->i_mode) != - S_ISDIR(mnt->mnt.mnt_root->d_inode->i_mode)) + if (d_is_dir(mp->m_dentry) != + d_is_dir(mnt->mnt.mnt_root)) return -ENOTDIR; return attach_recursive_mnt(mnt, p, mp, NULL); @@ -2180,8 +2180,8 @@ static int do_move_mount(struct path *path, const char *old_name) if (!mnt_has_parent(old)) goto out1; - if (S_ISDIR(path->dentry->d_inode->i_mode) != - S_ISDIR(old_path.dentry->d_inode->i_mode)) + if (d_is_dir(path->dentry) != + d_is_dir(old_path.dentry)) goto out1; /* * Don't move a mount residing in a shared parent. @@ -2271,7 +2271,7 @@ static int do_add_mount(struct mount *newmnt, struct path *path, int mnt_flags) goto unlock; err = -EINVAL; - if (S_ISLNK(newmnt->mnt.mnt_root->d_inode->i_mode)) + if (d_is_symlink(newmnt->mnt.mnt_root)) goto unlock; newmnt->mnt.mnt_flags = mnt_flags; diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c index cc6a76072009..1c307f02baa8 100644 --- a/fs/nfsd/nfs4recover.c +++ b/fs/nfsd/nfs4recover.c @@ -583,7 +583,7 @@ nfs4_reset_recoverydir(char *recdir) if (status) return status; status = -ENOTDIR; - if (S_ISDIR(path.dentry->d_inode->i_mode)) { + if (d_is_dir(path.dentry)) { strcpy(user_recovery_dirname, recdir); status = 0; } @@ -1426,7 +1426,7 @@ nfsd4_client_tracking_init(struct net *net) nn->client_tracking_ops = &nfsd4_legacy_tracking_ops; status = kern_path(nfs4_recoverydir(), LOOKUP_FOLLOW, &path); if (!status) { - status = S_ISDIR(path.dentry->d_inode->i_mode); + status = d_is_dir(path.dentry); path_put(&path); if (status) goto do_init; diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c index 965b478d50fc..e9fa966fc37f 100644 --- a/fs/nfsd/nfsfh.c +++ b/fs/nfsd/nfsfh.c @@ -114,8 +114,8 @@ static inline __be32 check_pseudo_root(struct svc_rqst *rqstp, * We're exposing only the directories and symlinks that have to be * traversed on the way to real exports: */ - if (unlikely(!S_ISDIR(dentry->d_inode->i_mode) && - !S_ISLNK(dentry->d_inode->i_mode))) + if (unlikely(!d_is_dir(dentry) && + !d_is_symlink(dentry))) return nfserr_stale; /* * A pseudoroot export gives permission to access only one @@ -259,7 +259,7 @@ static __be32 nfsd_set_fh_dentry(struct svc_rqst *rqstp, struct svc_fh *fhp) goto out; } - if (S_ISDIR(dentry->d_inode->i_mode) && + if (d_is_dir(dentry) && (dentry->d_flags & DCACHE_DISCONNECTED)) { printk("nfsd: find_fh_dentry returned a DISCONNECTED directory: %pd2\n", dentry); @@ -414,7 +414,7 @@ static inline void _fh_update_old(struct dentry *dentry, { fh->ofh_ino = ino_t_to_u32(dentry->d_inode->i_ino); fh->ofh_generation = dentry->d_inode->i_generation; - if (S_ISDIR(dentry->d_inode->i_mode) || + if (d_is_dir(dentry) || (exp->ex_flags & NFSEXP_NOSUBTREECHECK)) fh->ofh_dirino = 0; } diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index 5685c679dd93..368526582429 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c @@ -615,9 +615,9 @@ nfsd_access(struct svc_rqst *rqstp, struct svc_fh *fhp, u32 *access, u32 *suppor export = fhp->fh_export; dentry = fhp->fh_dentry; - if (S_ISREG(dentry->d_inode->i_mode)) + if (d_is_reg(dentry)) map = nfs3_regaccess; - else if (S_ISDIR(dentry->d_inode->i_mode)) + else if (d_is_dir(dentry)) map = nfs3_diraccess; else map = nfs3_anyaccess; @@ -1402,7 +1402,7 @@ do_nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp, switch (createmode) { case NFS3_CREATE_UNCHECKED: - if (! S_ISREG(dchild->d_inode->i_mode)) + if (! d_is_reg(dchild)) goto out; else if (truncp) { /* in nfsv4, we need to treat this case a little @@ -1615,7 +1615,7 @@ nfsd_link(struct svc_rqst *rqstp, struct svc_fh *ffhp, if (err) goto out; err = nfserr_isdir; - if (S_ISDIR(tfhp->fh_dentry->d_inode->i_mode)) + if (d_is_dir(tfhp->fh_dentry)) goto out; err = nfserr_perm; if (!len) diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c index 51ceb8107284..61fdbb826324 100644 --- a/fs/notify/fanotify/fanotify.c +++ b/fs/notify/fanotify/fanotify.c @@ -115,8 +115,8 @@ static bool fanotify_should_send_event(struct fsnotify_mark *inode_mark, return false; /* sorry, fanotify only gives a damn about files and dirs */ - if (!S_ISREG(path->dentry->d_inode->i_mode) && - !S_ISDIR(path->dentry->d_inode->i_mode)) + if (!d_is_reg(path->dentry) && + !d_is_dir(path->dentry)) return false; if (inode_mark && vfsmnt_mark) { @@ -139,7 +139,7 @@ static bool fanotify_should_send_event(struct fsnotify_mark *inode_mark, BUG(); } - if (S_ISDIR(path->dentry->d_inode->i_mode) && + if (d_is_dir(path->dentry) && !(marks_mask & FS_ISDIR & ~marks_ignored_mask)) return false; diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c index 0dc4c33a0a1b..d139405d2bfa 100644 --- a/fs/overlayfs/dir.c +++ b/fs/overlayfs/dir.c @@ -19,7 +19,7 @@ void ovl_cleanup(struct inode *wdir, struct dentry *wdentry) int err; dget(wdentry); - if (S_ISDIR(wdentry->d_inode->i_mode)) + if (d_is_dir(wdentry)) err = ovl_do_rmdir(wdir, wdentry); else err = ovl_do_unlink(wdir, wdentry); @@ -693,7 +693,7 @@ static int ovl_rename2(struct inode *olddir, struct dentry *old, bool new_create = false; bool cleanup_whiteout = false; bool overwrite = !(flags & RENAME_EXCHANGE); - bool is_dir = S_ISDIR(old->d_inode->i_mode); + bool is_dir = d_is_dir(old); bool new_is_dir = false; struct dentry *opaquedir = NULL; const struct cred *old_cred = NULL; @@ -720,7 +720,7 @@ static int ovl_rename2(struct inode *olddir, struct dentry *old, if (err) goto out; - if (S_ISDIR(new->d_inode->i_mode)) + if (d_is_dir(new)) new_is_dir = true; new_type = ovl_path_type(new); diff --git a/fs/posix_acl.c b/fs/posix_acl.c index 515d31511d0d..3a48bb789c9f 100644 --- a/fs/posix_acl.c +++ b/fs/posix_acl.c @@ -776,7 +776,7 @@ posix_acl_xattr_get(struct dentry *dentry, const char *name, if (!IS_POSIXACL(dentry->d_inode)) return -EOPNOTSUPP; - if (S_ISLNK(dentry->d_inode->i_mode)) + if (d_is_symlink(dentry)) return -EOPNOTSUPP; acl = get_acl(dentry->d_inode, type); @@ -836,7 +836,7 @@ posix_acl_xattr_list(struct dentry *dentry, char *list, size_t list_size, if (!IS_POSIXACL(dentry->d_inode)) return -EOPNOTSUPP; - if (S_ISLNK(dentry->d_inode->i_mode)) + if (d_is_symlink(dentry)) return -EOPNOTSUPP; if (type == ACL_TYPE_ACCESS) diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c index 04b06146bae2..4e781e697c90 100644 --- a/fs/reiserfs/xattr.c +++ b/fs/reiserfs/xattr.c @@ -266,7 +266,7 @@ static int reiserfs_for_each_xattr(struct inode *inode, for (i = 0; !err && i < buf.count && buf.dentries[i]; i++) { struct dentry *dentry = buf.dentries[i]; - if (!S_ISDIR(dentry->d_inode->i_mode)) + if (!d_is_dir(dentry)) err = action(dentry, data); dput(dentry); @@ -322,7 +322,7 @@ static int delete_one_xattr(struct dentry *dentry, void *data) struct inode *dir = dentry->d_parent->d_inode; /* This is the xattr dir, handle specially. */ - if (S_ISDIR(dentry->d_inode->i_mode)) + if (d_is_dir(dentry)) return xattr_rmdir(dir, dentry); return xattr_unlink(dir, dentry); diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c index f7afb86c9148..fe3c0fe71e64 100644 --- a/fs/xfs/xfs_ioctl.c +++ b/fs/xfs/xfs_ioctl.c @@ -286,7 +286,7 @@ xfs_readlink_by_handle( return PTR_ERR(dentry); /* Restrict this handle operation to symlinks only. */ - if (!S_ISLNK(dentry->d_inode->i_mode)) { + if (!d_is_symlink(dentry)) { error = -EINVAL; goto out_dput; } diff --git a/mm/shmem.c b/mm/shmem.c index a63031fa3e0c..2f17cb5f00a4 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -2319,8 +2319,8 @@ static int shmem_rmdir(struct inode *dir, struct dentry *dentry) static int shmem_exchange(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { - bool old_is_dir = S_ISDIR(old_dentry->d_inode->i_mode); - bool new_is_dir = S_ISDIR(new_dentry->d_inode->i_mode); + bool old_is_dir = d_is_dir(old_dentry); + bool new_is_dir = d_is_dir(new_dentry); if (old_dir != new_dir && old_is_dir != new_is_dir) { if (old_is_dir) { diff --git a/security/inode.c b/security/inode.c index 8e7ca62078ab..131a3c49f766 100644 --- a/security/inode.c +++ b/security/inode.c @@ -203,7 +203,7 @@ void securityfs_remove(struct dentry *dentry) mutex_lock(&parent->d_inode->i_mutex); if (positive(dentry)) { if (dentry->d_inode) { - if (S_ISDIR(dentry->d_inode->i_mode)) + if (d_is_dir(dentry)) simple_rmdir(parent->d_inode, dentry); else simple_unlink(parent->d_inode, dentry); diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index 79f2c2cb68ad..4d1a54190388 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -1799,7 +1799,7 @@ static inline int may_rename(struct inode *old_dir, old_dsec = old_dir->i_security; old_isec = old_dentry->d_inode->i_security; - old_is_dir = S_ISDIR(old_dentry->d_inode->i_mode); + old_is_dir = d_is_dir(old_dentry); new_dsec = new_dir->i_security; ad.type = LSM_AUDIT_DATA_DENTRY; @@ -1829,7 +1829,7 @@ static inline int may_rename(struct inode *old_dir, return rc; if (d_is_positive(new_dentry)) { new_isec = new_dentry->d_inode->i_security; - new_is_dir = S_ISDIR(new_dentry->d_inode->i_mode); + new_is_dir = d_is_dir(new_dentry); rc = avc_has_perm(sid, new_isec->sid, new_isec->sclass, (new_is_dir ? DIR__RMDIR : FILE__UNLINK), &ad); -- cgit v1.2.3 From ce40fa78ef8f0e813392903c96de65b947298d16 Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 29 Jan 2015 12:02:36 +0000 Subject: Cachefiles: Fix up scripted S_ISDIR/S_ISREG/S_ISLNK conversions Fix up the following scripted S_ISDIR/S_ISREG/S_ISLNK conversions (or lack thereof) in cachefiles: (1) Cachefiles mostly wants to use d_can_lookup() rather than d_is_dir() as it doesn't want to deal with automounts in its cache. (2) Coccinelle didn't find S_IS* expressions in ASSERT() statements in cachefiles. Signed-off-by: David Howells Signed-off-by: Al Viro --- fs/cachefiles/daemon.c | 4 ++-- fs/cachefiles/interface.c | 4 ++-- fs/cachefiles/namei.c | 8 ++++---- fs/cachefiles/rdwr.c | 2 +- 4 files changed, 9 insertions(+), 9 deletions(-) (limited to 'fs') diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c index d92840209863..f601def05bdf 100644 --- a/fs/cachefiles/daemon.c +++ b/fs/cachefiles/daemon.c @@ -574,7 +574,7 @@ static int cachefiles_daemon_cull(struct cachefiles_cache *cache, char *args) /* extract the directory dentry from the cwd */ get_fs_pwd(current->fs, &path); - if (!d_is_dir(path.dentry)) + if (!d_can_lookup(path.dentry)) goto notdir; cachefiles_begin_secure(cache, &saved_cred); @@ -646,7 +646,7 @@ static int cachefiles_daemon_inuse(struct cachefiles_cache *cache, char *args) /* extract the directory dentry from the cwd */ get_fs_pwd(current->fs, &path); - if (!d_is_dir(path.dentry)) + if (!d_can_lookup(path.dentry)) goto notdir; cachefiles_begin_secure(cache, &saved_cred); diff --git a/fs/cachefiles/interface.c b/fs/cachefiles/interface.c index 1c7293c3a93a..232426214fdd 100644 --- a/fs/cachefiles/interface.c +++ b/fs/cachefiles/interface.c @@ -437,7 +437,7 @@ static int cachefiles_attr_changed(struct fscache_object *_object) if (!object->backer) return -ENOBUFS; - ASSERT(S_ISREG(object->backer->d_inode->i_mode)); + ASSERT(d_is_reg(object->backer)); fscache_set_store_limit(&object->fscache, ni_size); @@ -501,7 +501,7 @@ static void cachefiles_invalidate_object(struct fscache_operation *op) op->object->debug_id, (unsigned long long)ni_size); if (object->backer) { - ASSERT(S_ISREG(object->backer->d_inode->i_mode)); + ASSERT(d_is_reg(object->backer)); fscache_set_store_limit(&object->fscache, ni_size); diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c index d750e8cc0ab6..1e51714eb33e 100644 --- a/fs/cachefiles/namei.c +++ b/fs/cachefiles/namei.c @@ -323,7 +323,7 @@ try_again: return 0; } - if (!d_is_dir(cache->graveyard)) { + if (!d_can_lookup(cache->graveyard)) { unlock_rename(cache->graveyard, dir); cachefiles_io_error(cache, "Graveyard no longer a directory"); return -EIO; @@ -539,7 +539,7 @@ lookup_again: _debug("mkdir -> %p{%p{ino=%lu}}", next, next->d_inode, next->d_inode->i_ino); - } else if (!d_is_dir(next)) { + } else if (!d_can_lookup(next)) { pr_err("inode %lu is not a directory\n", next->d_inode->i_ino); ret = -ENOBUFS; @@ -568,7 +568,7 @@ lookup_again: _debug("create -> %p{%p{ino=%lu}}", next, next->d_inode, next->d_inode->i_ino); - } else if (!d_is_dir(next) && + } else if (!d_can_lookup(next) && !d_is_reg(next) ) { pr_err("inode %lu is not a file or directory\n", @@ -763,7 +763,7 @@ struct dentry *cachefiles_get_directory(struct cachefiles_cache *cache, /* we need to make sure the subdir is a directory */ ASSERT(subdir->d_inode); - if (!d_is_dir(subdir)) { + if (!d_can_lookup(subdir)) { pr_err("%s is not a directory\n", dirname); ret = -EIO; goto check_error; diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c index 616db0e77b44..c6cd8d7a4eef 100644 --- a/fs/cachefiles/rdwr.c +++ b/fs/cachefiles/rdwr.c @@ -900,7 +900,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page) return -ENOBUFS; } - ASSERT(S_ISREG(object->backer->d_inode->i_mode)); + ASSERT(d_is_reg(object->backer)); cache = container_of(object->fscache.cache, struct cachefiles_cache, cache); -- cgit v1.2.3 From 54f2a2f42759b11ada761013a12f0e743702219a Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 29 Jan 2015 12:02:36 +0000 Subject: fanotify: Fix up scripted S_ISDIR/S_ISREG/S_ISLNK conversions Fanotify probably doesn't want to watch autodirs so make it use d_can_lookup() rather than d_is_dir() when checking a dir watch and give an error on fake directories. Signed-off-by: David Howells Signed-off-by: Al Viro --- fs/notify/fanotify/fanotify.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c index 61fdbb826324..9a66ff79ff27 100644 --- a/fs/notify/fanotify/fanotify.c +++ b/fs/notify/fanotify/fanotify.c @@ -116,7 +116,7 @@ static bool fanotify_should_send_event(struct fsnotify_mark *inode_mark, /* sorry, fanotify only gives a damn about files and dirs */ if (!d_is_reg(path->dentry) && - !d_is_dir(path->dentry)) + !d_can_lookup(path->dentry)) return false; if (inode_mark && vfsmnt_mark) { -- cgit v1.2.3 From eb6ef3df4faa5424cf2a24b4e4f3eeceb1482a8e Mon Sep 17 00:00:00 2001 From: Konstantin Khlebnikov Date: Thu, 19 Feb 2015 20:19:35 +0300 Subject: trylock_super(): replacement for grab_super_passive() I've noticed significant locking contention in memory reclaimer around sb_lock inside grab_super_passive(). Grab_super_passive() is called from two places: in icache/dcache shrinkers (function super_cache_scan) and from writeback (function __writeback_inodes_wb). Both are required for progress in memory allocator. Grab_super_passive() acquires sb_lock to increment sb->s_count and check sb->s_instances. It seems sb->s_umount locked for read is enough here: super-block deactivation always runs under sb->s_umount locked for write. Protecting super-block itself isn't a problem: in super_cache_scan() sb is protected by shrinker_rwsem: it cannot be freed if its slab shrinkers are still active. Inside writeback super-block comes from inode from bdi writeback list under wb->list_lock. This patch removes locking sb_lock and checks s_instances under s_umount: generic_shutdown_super() unlinks it under sb->s_umount locked for write. New variant is called trylock_super() and since it only locks semaphore, callers must call up_read(&sb->s_umount) instead of drop_super(sb) when they're done. Signed-off-by: Konstantin Khlebnikov Signed-off-by: Al Viro --- fs/fs-writeback.c | 6 +++--- fs/internal.h | 2 +- fs/super.c | 40 ++++++++++++++++++---------------------- 3 files changed, 22 insertions(+), 26 deletions(-) (limited to 'fs') diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 073657f755d4..e907052eeadb 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -769,9 +769,9 @@ static long __writeback_inodes_wb(struct bdi_writeback *wb, struct inode *inode = wb_inode(wb->b_io.prev); struct super_block *sb = inode->i_sb; - if (!grab_super_passive(sb)) { + if (!trylock_super(sb)) { /* - * grab_super_passive() may fail consistently due to + * trylock_super() may fail consistently due to * s_umount being grabbed by someone else. Don't use * requeue_io() to avoid busy retrying the inode/sb. */ @@ -779,7 +779,7 @@ static long __writeback_inodes_wb(struct bdi_writeback *wb, continue; } wrote += writeback_sb_inodes(sb, wb, work); - drop_super(sb); + up_read(&sb->s_umount); /* refer to the same tests at the end of writeback_sb_inodes */ if (wrote) { diff --git a/fs/internal.h b/fs/internal.h index 30459dab409d..01dce1d1476b 100644 --- a/fs/internal.h +++ b/fs/internal.h @@ -84,7 +84,7 @@ extern struct file *get_empty_filp(void); * super.c */ extern int do_remount_sb(struct super_block *, int, void *, int); -extern bool grab_super_passive(struct super_block *sb); +extern bool trylock_super(struct super_block *sb); extern struct dentry *mount_fs(struct file_system_type *, int, const char *, void *); extern struct super_block *user_get_super(dev_t); diff --git a/fs/super.c b/fs/super.c index 65a53efc1cf4..2b7dc90ccdbb 100644 --- a/fs/super.c +++ b/fs/super.c @@ -71,7 +71,7 @@ static unsigned long super_cache_scan(struct shrinker *shrink, if (!(sc->gfp_mask & __GFP_FS)) return SHRINK_STOP; - if (!grab_super_passive(sb)) + if (!trylock_super(sb)) return SHRINK_STOP; if (sb->s_op->nr_cached_objects) @@ -105,7 +105,7 @@ static unsigned long super_cache_scan(struct shrinker *shrink, freed += sb->s_op->free_cached_objects(sb, sc); } - drop_super(sb); + up_read(&sb->s_umount); return freed; } @@ -118,7 +118,7 @@ static unsigned long super_cache_count(struct shrinker *shrink, sb = container_of(shrink, struct super_block, s_shrink); /* - * Don't call grab_super_passive as it is a potential + * Don't call trylock_super as it is a potential * scalability bottleneck. The counts could get updated * between super_cache_count and super_cache_scan anyway. * Call to super_cache_count with shrinker_rwsem held @@ -348,35 +348,31 @@ static int grab_super(struct super_block *s) __releases(sb_lock) } /* - * grab_super_passive - acquire a passive reference + * trylock_super - try to grab ->s_umount shared * @sb: reference we are trying to grab * - * Tries to acquire a passive reference. This is used in places where we + * Try to prevent fs shutdown. This is used in places where we * cannot take an active reference but we need to ensure that the - * superblock does not go away while we are working on it. It returns - * false if a reference was not gained, and returns true with the s_umount - * lock held in read mode if a reference is gained. On successful return, - * the caller must drop the s_umount lock and the passive reference when - * done. + * filesystem is not shut down while we are working on it. It returns + * false if we cannot acquire s_umount or if we lose the race and + * filesystem already got into shutdown, and returns true with the s_umount + * lock held in read mode in case of success. On successful return, + * the caller must drop the s_umount lock when done. + * + * Note that unlike get_super() et.al. this one does *not* bump ->s_count. + * The reason why it's safe is that we are OK with doing trylock instead + * of down_read(). There's a couple of places that are OK with that, but + * it's very much not a general-purpose interface. */ -bool grab_super_passive(struct super_block *sb) +bool trylock_super(struct super_block *sb) { - spin_lock(&sb_lock); - if (hlist_unhashed(&sb->s_instances)) { - spin_unlock(&sb_lock); - return false; - } - - sb->s_count++; - spin_unlock(&sb_lock); - if (down_read_trylock(&sb->s_umount)) { - if (sb->s_root && (sb->s_flags & MS_BORN)) + if (!hlist_unhashed(&sb->s_instances) && + sb->s_root && (sb->s_flags & MS_BORN)) return true; up_read(&sb->s_umount); } - put_super(sb); return false; } -- cgit v1.2.3 From 0db59e59299f0b67450c5db21f7f316c8fb04e84 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 21 Feb 2015 22:05:11 -0500 Subject: debugfs: leave freeing a symlink body until inode eviction As it is, we have debugfs_remove() racing with symlink traversals. Supply ->evict_inode() and do freeing there - inode will remain pinned until we are done with the symlink body. And rip the idiocy with checking if dentry is positive right after we'd verified debugfs_positive(), which is a stronger check... Cc: stable@vger.kernel.org Signed-off-by: Al Viro --- fs/debugfs/inode.c | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) (limited to 'fs') diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c index 90933645298c..96400ab42d13 100644 --- a/fs/debugfs/inode.c +++ b/fs/debugfs/inode.c @@ -169,10 +169,19 @@ static int debugfs_show_options(struct seq_file *m, struct dentry *root) return 0; } +static void debugfs_evict_inode(struct inode *inode) +{ + truncate_inode_pages_final(&inode->i_data); + clear_inode(inode); + if (S_ISLNK(inode->i_mode)) + kfree(inode->i_private); +} + static const struct super_operations debugfs_super_operations = { .statfs = simple_statfs, .remount_fs = debugfs_remount, .show_options = debugfs_show_options, + .evict_inode = debugfs_evict_inode, }; static struct vfsmount *debugfs_automount(struct path *path) @@ -511,23 +520,14 @@ static int __debugfs_remove(struct dentry *dentry, struct dentry *parent) int ret = 0; if (debugfs_positive(dentry)) { - if (dentry->d_inode) { - dget(dentry); - switch (dentry->d_inode->i_mode & S_IFMT) { - case S_IFDIR: - ret = simple_rmdir(parent->d_inode, dentry); - break; - case S_IFLNK: - kfree(dentry->d_inode->i_private); - /* fall through */ - default: - simple_unlink(parent->d_inode, dentry); - break; - } - if (!ret) - d_delete(dentry); - dput(dentry); - } + dget(dentry); + if (S_ISDIR(dentry->d_inode->i_mode)) + ret = simple_rmdir(parent->d_inode, dentry); + else + simple_unlink(parent->d_inode, dentry); + if (!ret) + d_delete(dentry); + dput(dentry); } return ret; } -- cgit v1.2.3 From 7e0e953bb0cf649f93277ac8fb67ecbb7f7b04a9 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 21 Feb 2015 22:16:11 -0500 Subject: procfs: fix race between symlink removals and traversals use_pde()/unuse_pde() in ->follow_link()/->put_link() resp. Cc: stable@vger.kernel.org Signed-off-by: Al Viro --- fs/proc/generic.c | 12 ------------ fs/proc/inode.c | 21 +++++++++++++++++++++ fs/proc/internal.h | 1 + 3 files changed, 22 insertions(+), 12 deletions(-) (limited to 'fs') diff --git a/fs/proc/generic.c b/fs/proc/generic.c index 3309f59d421b..be65b2082135 100644 --- a/fs/proc/generic.c +++ b/fs/proc/generic.c @@ -19,7 +19,6 @@ #include #include #include -#include #include #include #include @@ -223,17 +222,6 @@ void proc_free_inum(unsigned int inum) spin_unlock_irqrestore(&proc_inum_lock, flags); } -static void *proc_follow_link(struct dentry *dentry, struct nameidata *nd) -{ - nd_set_link(nd, __PDE_DATA(dentry->d_inode)); - return NULL; -} - -static const struct inode_operations proc_link_inode_operations = { - .readlink = generic_readlink, - .follow_link = proc_follow_link, -}; - /* * Don't create negative dentries here, return -ENOENT by hand * instead. diff --git a/fs/proc/inode.c b/fs/proc/inode.c index 13a50a32652d..7697b6621cfd 100644 --- a/fs/proc/inode.c +++ b/fs/proc/inode.c @@ -23,6 +23,7 @@ #include #include #include +#include #include @@ -393,6 +394,26 @@ static const struct file_operations proc_reg_file_ops_no_compat = { }; #endif +static void *proc_follow_link(struct dentry *dentry, struct nameidata *nd) +{ + struct proc_dir_entry *pde = PDE(dentry->d_inode); + if (unlikely(!use_pde(pde))) + return ERR_PTR(-EINVAL); + nd_set_link(nd, pde->data); + return pde; +} + +static void proc_put_link(struct dentry *dentry, struct nameidata *nd, void *p) +{ + unuse_pde(p); +} + +const struct inode_operations proc_link_inode_operations = { + .readlink = generic_readlink, + .follow_link = proc_follow_link, + .put_link = proc_put_link, +}; + struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de) { struct inode *inode = new_inode_pseudo(sb); diff --git a/fs/proc/internal.h b/fs/proc/internal.h index 6fcdba573e0f..c835b94c0cd3 100644 --- a/fs/proc/internal.h +++ b/fs/proc/internal.h @@ -200,6 +200,7 @@ struct pde_opener { int closing; struct completion *c; }; +extern const struct inode_operations proc_link_inode_operations; extern const struct inode_operations proc_pid_link_inode_operations; -- cgit v1.2.3 From 0a280962dc6e117e0e4baa668453f753579265d9 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 21 Feb 2015 22:19:57 -0500 Subject: autofs4 copy_dev_ioctl(): keep the value of ->size we'd used for allocation X-Coverup: just ask spender Cc: stable@vger.kernel.org Signed-off-by: Al Viro --- fs/autofs4/dev-ioctl.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/autofs4/dev-ioctl.c b/fs/autofs4/dev-ioctl.c index aaf96cb25452..ac7d921ed984 100644 --- a/fs/autofs4/dev-ioctl.c +++ b/fs/autofs4/dev-ioctl.c @@ -95,7 +95,7 @@ static int check_dev_ioctl_version(int cmd, struct autofs_dev_ioctl *param) */ static struct autofs_dev_ioctl *copy_dev_ioctl(struct autofs_dev_ioctl __user *in) { - struct autofs_dev_ioctl tmp; + struct autofs_dev_ioctl tmp, *res; if (copy_from_user(&tmp, in, sizeof(tmp))) return ERR_PTR(-EFAULT); @@ -106,7 +106,11 @@ static struct autofs_dev_ioctl *copy_dev_ioctl(struct autofs_dev_ioctl __user *i if (tmp.size > (PATH_MAX + sizeof(tmp))) return ERR_PTR(-ENAMETOOLONG); - return memdup_user(in, tmp.size); + res = memdup_user(in, tmp.size); + if (!IS_ERR(res)) + res->size = tmp.size; + + return res; } static inline void free_dev_ioctl(struct autofs_dev_ioctl *param) -- cgit v1.2.3