diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2022-04-01 00:49:36 +0200 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2022-04-01 00:49:36 +0200 |
commit | f008b1d6e1e06bb61e9402aa8a1cfa681510e375 (patch) | |
tree | 5c21a4ed2333592eec1ab4d25a6189e4bfe62866 /fs/ceph | |
parent | Merge tag 'random-5.18-rc1-for-linus' of git://git.kernel.org/pub/scm/linux/k... (diff) | |
parent | afs: Maintain netfs_i_context::remote_i_size (diff) | |
download | linux-f008b1d6e1e06bb61e9402aa8a1cfa681510e375.tar.xz linux-f008b1d6e1e06bb61e9402aa8a1cfa681510e375.zip |
Merge tag 'netfs-prep-20220318' of git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-fs
Pull netfs updates from David Howells:
"Netfs prep for write helpers.
Having had a go at implementing write helpers and content encryption
support in netfslib, it seems that the netfs_read_{,sub}request
structs and the equivalent write request structs were almost the same
and so should be merged, thereby requiring only one set of
alloc/get/put functions and a common set of tracepoints.
Merging the structs also has the advantage that if a bounce buffer is
added to the request struct, a read operation can be performed to fill
the bounce buffer, the contents of the buffer can be modified and then
a write operation can be performed on it to send the data wherever it
needs to go using the same request structure all the way through. The
I/O handlers would then transparently perform any required crypto.
This should make it easier to perform RMW cycles if needed.
The potentially common functions and structs, however, by their names
all proclaim themselves to be associated with the read side of things.
The bulk of these changes alter this in the following ways:
- Rename struct netfs_read_{,sub}request to netfs_io_{,sub}request.
- Rename some enums, members and flags to make them more appropriate.
- Adjust some comments to match.
- Drop "read"/"rreq" from the names of common functions. For
instance, netfs_get_read_request() becomes netfs_get_request().
- The ->init_rreq() and ->issue_op() methods become ->init_request()
and ->issue_read(). I've kept the latter as a read-specific
function and in another branch added an ->issue_write() method.
The driver source is then reorganised into a number of files:
fs/netfs/buffered_read.c Create read reqs to the pagecache
fs/netfs/io.c Dispatchers for read and write reqs
fs/netfs/main.c Some general miscellaneous bits
fs/netfs/objects.c Alloc, get and put functions
fs/netfs/stats.c Optional procfs statistics.
and future development can be fitted into this scheme, e.g.:
fs/netfs/buffered_write.c Modify the pagecache
fs/netfs/buffered_flush.c Writeback from the pagecache
fs/netfs/direct_read.c DIO read support
fs/netfs/direct_write.c DIO write support
fs/netfs/unbuffered_write.c Write modifications directly back
Beyond the above changes, there are also some changes that affect how
things work:
- Make fscache_end_operation() generally available.
- In the netfs tracing header, generate enums from the symbol ->
string mapping tables rather than manually coding them.
- Add a struct for filesystems that uses netfslib to put into their
inode wrapper structs to hold extra state that netfslib is
interested in, such as the fscache cookie. This allows netfslib
functions to be set in filesystem operation tables and jumped to
directly without having to have a filesystem wrapper.
- Add a member to the struct added above to track the remote inode
length as that may differ if local modifications are buffered. We
may need to supply an appropriate EOF pointer when storing data (in
AFS for example).
- Pass extra information to netfs_alloc_request() so that the
->init_request() hook can access it and retain information to
indicate the origin of the operation.
- Make the ->init_request() hook return an error, thereby allowing a
filesystem that isn't allowed to cache an inode (ceph or cifs, for
example) to skip readahead.
- Switch to using refcount_t for subrequests and add tracepoints to
log refcount changes for the request and subrequest structs.
- Add a function to consolidate dispatching a read request. Similar
code is used in three places and another couple are likely to be
added in the future"
Link: https://lore.kernel.org/all/2639515.1648483225@warthog.procyon.org.uk/
* tag 'netfs-prep-20220318' of git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-fs:
afs: Maintain netfs_i_context::remote_i_size
netfs: Keep track of the actual remote file size
netfs: Split some core bits out into their own file
netfs: Split fs/netfs/read_helper.c
netfs: Rename read_helper.c to io.c
netfs: Prepare to split read_helper.c
netfs: Add a function to consolidate beginning a read
netfs: Add a netfs inode context
ceph: Make ceph_init_request() check caps on readahead
netfs: Change ->init_request() to return an error code
netfs: Refactor arguments for netfs_alloc_read_request
netfs: Adjust the netfs_failure tracepoint to indicate non-subreq lines
netfs: Trace refcounting on the netfs_io_subrequest struct
netfs: Trace refcounting on the netfs_io_request struct
netfs: Adjust the netfs_rreq tracepoint slightly
netfs: Split netfs_io_* object handling out
netfs: Finish off rename of netfs_read_request to netfs_io_request
netfs: Rename netfs_read_*request to netfs_io_*request
netfs: Generate enums from trace symbol mapping lists
fscache: export fscache_end_operation()
Diffstat (limited to 'fs/ceph')
-rw-r--r-- | fs/ceph/addr.c | 116 | ||||
-rw-r--r-- | fs/ceph/cache.c | 28 | ||||
-rw-r--r-- | fs/ceph/cache.h | 15 | ||||
-rw-r--r-- | fs/ceph/inode.c | 6 | ||||
-rw-r--r-- | fs/ceph/super.h | 17 |
5 files changed, 81 insertions, 101 deletions
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index c7a0ab0d298b..aa25bffd4823 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c @@ -182,7 +182,7 @@ static int ceph_releasepage(struct page *page, gfp_t gfp) return 1; } -static void ceph_netfs_expand_readahead(struct netfs_read_request *rreq) +static void ceph_netfs_expand_readahead(struct netfs_io_request *rreq) { struct inode *inode = rreq->inode; struct ceph_inode_info *ci = ceph_inode(inode); @@ -199,7 +199,7 @@ static void ceph_netfs_expand_readahead(struct netfs_read_request *rreq) rreq->len = roundup(rreq->len, lo->stripe_unit); } -static bool ceph_netfs_clamp_length(struct netfs_read_subrequest *subreq) +static bool ceph_netfs_clamp_length(struct netfs_io_subrequest *subreq) { struct inode *inode = subreq->rreq->inode; struct ceph_fs_client *fsc = ceph_inode_to_client(inode); @@ -218,7 +218,7 @@ static void finish_netfs_read(struct ceph_osd_request *req) { struct ceph_fs_client *fsc = ceph_inode_to_client(req->r_inode); struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0); - struct netfs_read_subrequest *subreq = req->r_priv; + struct netfs_io_subrequest *subreq = req->r_priv; int num_pages; int err = req->r_result; @@ -244,9 +244,9 @@ static void finish_netfs_read(struct ceph_osd_request *req) iput(req->r_inode); } -static bool ceph_netfs_issue_op_inline(struct netfs_read_subrequest *subreq) +static bool ceph_netfs_issue_op_inline(struct netfs_io_subrequest *subreq) { - struct netfs_read_request *rreq = subreq->rreq; + struct netfs_io_request *rreq = subreq->rreq; struct inode *inode = rreq->inode; struct ceph_mds_reply_info_parsed *rinfo; struct ceph_mds_reply_info_in *iinfo; @@ -258,7 +258,7 @@ static bool ceph_netfs_issue_op_inline(struct netfs_read_subrequest *subreq) size_t len; __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags); - __clear_bit(NETFS_SREQ_WRITE_TO_CACHE, &subreq->flags); + __clear_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags); if (subreq->start >= inode->i_size) goto out; @@ -297,9 +297,9 @@ out: return true; } -static void ceph_netfs_issue_op(struct netfs_read_subrequest *subreq) +static void ceph_netfs_issue_read(struct netfs_io_subrequest *subreq) { - struct netfs_read_request *rreq = subreq->rreq; + struct netfs_io_request *rreq = subreq->rreq; struct inode *inode = rreq->inode; struct ceph_inode_info *ci = ceph_inode(inode); struct ceph_fs_client *fsc = ceph_inode_to_client(inode); @@ -353,6 +353,45 @@ out: dout("%s: result %d\n", __func__, err); } +static int ceph_init_request(struct netfs_io_request *rreq, struct file *file) +{ + struct inode *inode = rreq->inode; + int got = 0, want = CEPH_CAP_FILE_CACHE; + int ret = 0; + + if (rreq->origin != NETFS_READAHEAD) + return 0; + + if (file) { + struct ceph_rw_context *rw_ctx; + struct ceph_file_info *fi = file->private_data; + + rw_ctx = ceph_find_rw_context(fi); + if (rw_ctx) + return 0; + } + + /* + * readahead callers do not necessarily hold Fcb caps + * (e.g. fadvise, madvise). + */ + ret = ceph_try_get_caps(inode, CEPH_CAP_FILE_RD, want, true, &got); + if (ret < 0) { + dout("start_read %p, error getting cap\n", inode); + return ret; + } + + if (!(got & want)) { + dout("start_read %p, no cache cap\n", inode); + return -EACCES; + } + if (ret == 0) + return -EACCES; + + rreq->netfs_priv = (void *)(uintptr_t)got; + return 0; +} + static void ceph_readahead_cleanup(struct address_space *mapping, void *priv) { struct inode *inode = mapping->host; @@ -363,64 +402,16 @@ static void ceph_readahead_cleanup(struct address_space *mapping, void *priv) ceph_put_cap_refs(ci, got); } -static const struct netfs_read_request_ops ceph_netfs_read_ops = { - .is_cache_enabled = ceph_is_cache_enabled, +const struct netfs_request_ops ceph_netfs_ops = { + .init_request = ceph_init_request, .begin_cache_operation = ceph_begin_cache_operation, - .issue_op = ceph_netfs_issue_op, + .issue_read = ceph_netfs_issue_read, .expand_readahead = ceph_netfs_expand_readahead, .clamp_length = ceph_netfs_clamp_length, .check_write_begin = ceph_netfs_check_write_begin, .cleanup = ceph_readahead_cleanup, }; -/* read a single page, without unlocking it. */ -static int ceph_readpage(struct file *file, struct page *subpage) -{ - struct folio *folio = page_folio(subpage); - struct inode *inode = file_inode(file); - struct ceph_inode_info *ci = ceph_inode(inode); - struct ceph_vino vino = ceph_vino(inode); - size_t len = folio_size(folio); - u64 off = folio_file_pos(folio); - - dout("readpage ino %llx.%llx file %p off %llu len %zu folio %p index %lu\n inline %d", - vino.ino, vino.snap, file, off, len, folio, folio_index(folio), - ci->i_inline_version != CEPH_INLINE_NONE); - - return netfs_readpage(file, folio, &ceph_netfs_read_ops, NULL); -} - -static void ceph_readahead(struct readahead_control *ractl) -{ - struct inode *inode = file_inode(ractl->file); - struct ceph_file_info *fi = ractl->file->private_data; - struct ceph_rw_context *rw_ctx; - int got = 0; - int ret = 0; - - if (ceph_inode(inode)->i_inline_version != CEPH_INLINE_NONE) - return; - - rw_ctx = ceph_find_rw_context(fi); - if (!rw_ctx) { - /* - * readahead callers do not necessarily hold Fcb caps - * (e.g. fadvise, madvise). - */ - int want = CEPH_CAP_FILE_CACHE; - - ret = ceph_try_get_caps(inode, CEPH_CAP_FILE_RD, want, true, &got); - if (ret < 0) - dout("start_read %p, error getting cap\n", inode); - else if (!(got & want)) - dout("start_read %p, no cache cap\n", inode); - - if (ret <= 0) - return; - } - netfs_readahead(ractl, &ceph_netfs_read_ops, (void *)(uintptr_t)got); -} - #ifdef CONFIG_CEPH_FSCACHE static void ceph_set_page_fscache(struct page *page) { @@ -1327,8 +1318,7 @@ static int ceph_write_begin(struct file *file, struct address_space *mapping, struct folio *folio = NULL; int r; - r = netfs_write_begin(file, inode->i_mapping, pos, len, 0, &folio, NULL, - &ceph_netfs_read_ops, NULL); + r = netfs_write_begin(file, inode->i_mapping, pos, len, 0, &folio, NULL); if (r == 0) folio_wait_fscache(folio); if (r < 0) { @@ -1382,8 +1372,8 @@ out: } const struct address_space_operations ceph_aops = { - .readpage = ceph_readpage, - .readahead = ceph_readahead, + .readpage = netfs_readpage, + .readahead = netfs_readahead, .writepage = ceph_writepage, .writepages = ceph_writepages_start, .write_begin = ceph_write_begin, diff --git a/fs/ceph/cache.c b/fs/ceph/cache.c index 7d22850623ef..ddea99922073 100644 --- a/fs/ceph/cache.c +++ b/fs/ceph/cache.c @@ -29,26 +29,25 @@ void ceph_fscache_register_inode_cookie(struct inode *inode) if (!(inode->i_state & I_NEW)) return; - WARN_ON_ONCE(ci->fscache); + WARN_ON_ONCE(ci->netfs_ctx.cache); - ci->fscache = fscache_acquire_cookie(fsc->fscache, 0, - &ci->i_vino, sizeof(ci->i_vino), - &ci->i_version, sizeof(ci->i_version), - i_size_read(inode)); + ci->netfs_ctx.cache = + fscache_acquire_cookie(fsc->fscache, 0, + &ci->i_vino, sizeof(ci->i_vino), + &ci->i_version, sizeof(ci->i_version), + i_size_read(inode)); } -void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info* ci) +void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info *ci) { - struct fscache_cookie *cookie = ci->fscache; - - fscache_relinquish_cookie(cookie, false); + fscache_relinquish_cookie(ceph_fscache_cookie(ci), false); } void ceph_fscache_use_cookie(struct inode *inode, bool will_modify) { struct ceph_inode_info *ci = ceph_inode(inode); - fscache_use_cookie(ci->fscache, will_modify); + fscache_use_cookie(ceph_fscache_cookie(ci), will_modify); } void ceph_fscache_unuse_cookie(struct inode *inode, bool update) @@ -58,9 +57,10 @@ void ceph_fscache_unuse_cookie(struct inode *inode, bool update) if (update) { loff_t i_size = i_size_read(inode); - fscache_unuse_cookie(ci->fscache, &ci->i_version, &i_size); + fscache_unuse_cookie(ceph_fscache_cookie(ci), + &ci->i_version, &i_size); } else { - fscache_unuse_cookie(ci->fscache, NULL, NULL); + fscache_unuse_cookie(ceph_fscache_cookie(ci), NULL, NULL); } } @@ -69,14 +69,14 @@ void ceph_fscache_update(struct inode *inode) struct ceph_inode_info *ci = ceph_inode(inode); loff_t i_size = i_size_read(inode); - fscache_update_cookie(ci->fscache, &ci->i_version, &i_size); + fscache_update_cookie(ceph_fscache_cookie(ci), &ci->i_version, &i_size); } void ceph_fscache_invalidate(struct inode *inode, bool dio_write) { struct ceph_inode_info *ci = ceph_inode(inode); - fscache_invalidate(ceph_inode(inode)->fscache, + fscache_invalidate(ceph_fscache_cookie(ci), &ci->i_version, i_size_read(inode), dio_write ? FSCACHE_INVAL_DIO_WRITE : 0); } diff --git a/fs/ceph/cache.h b/fs/ceph/cache.h index b90f3016994d..7255b790a4c1 100644 --- a/fs/ceph/cache.h +++ b/fs/ceph/cache.h @@ -26,14 +26,9 @@ void ceph_fscache_unuse_cookie(struct inode *inode, bool update); void ceph_fscache_update(struct inode *inode); void ceph_fscache_invalidate(struct inode *inode, bool dio_write); -static inline void ceph_fscache_inode_init(struct ceph_inode_info *ci) -{ - ci->fscache = NULL; -} - static inline struct fscache_cookie *ceph_fscache_cookie(struct ceph_inode_info *ci) { - return ci->fscache; + return netfs_i_cookie(&ci->vfs_inode); } static inline void ceph_fscache_resize(struct inode *inode, loff_t to) @@ -62,7 +57,7 @@ static inline int ceph_fscache_dirty_folio(struct address_space *mapping, return fscache_dirty_folio(mapping, folio, ceph_fscache_cookie(ci)); } -static inline int ceph_begin_cache_operation(struct netfs_read_request *rreq) +static inline int ceph_begin_cache_operation(struct netfs_io_request *rreq) { struct fscache_cookie *cookie = ceph_fscache_cookie(ceph_inode(rreq->inode)); @@ -91,10 +86,6 @@ static inline void ceph_fscache_unregister_fs(struct ceph_fs_client* fsc) { } -static inline void ceph_fscache_inode_init(struct ceph_inode_info *ci) -{ -} - static inline void ceph_fscache_register_inode_cookie(struct inode *inode) { } @@ -144,7 +135,7 @@ static inline bool ceph_is_cache_enabled(struct inode *inode) return false; } -static inline int ceph_begin_cache_operation(struct netfs_read_request *rreq) +static inline int ceph_begin_cache_operation(struct netfs_io_request *rreq) { return -ENOBUFS; } diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index d80911dc91c2..63113e2a4890 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c @@ -459,6 +459,9 @@ struct inode *ceph_alloc_inode(struct super_block *sb) dout("alloc_inode %p\n", &ci->vfs_inode); + /* Set parameters for the netfs library */ + netfs_i_context_init(&ci->vfs_inode, &ceph_netfs_ops); + spin_lock_init(&ci->i_ceph_lock); ci->i_version = 0; @@ -544,9 +547,6 @@ struct inode *ceph_alloc_inode(struct super_block *sb) INIT_WORK(&ci->i_work, ceph_inode_work); ci->i_work_mask = 0; memset(&ci->i_btime, '\0', sizeof(ci->i_btime)); - - ceph_fscache_inode_init(ci); - return &ci->vfs_inode; } diff --git a/fs/ceph/super.h b/fs/ceph/super.h index a1ecc410a495..20ceab74e871 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h @@ -17,13 +17,11 @@ #include <linux/posix_acl.h> #include <linux/refcount.h> #include <linux/security.h> +#include <linux/netfs.h> +#include <linux/fscache.h> #include <linux/ceph/libceph.h> -#ifdef CONFIG_CEPH_FSCACHE -#include <linux/fscache.h> -#endif - /* large granularity for statfs utilization stats to facilitate * large volume sizes on 32-bit machines. */ #define CEPH_BLOCK_SHIFT 22 /* 4 MB */ @@ -318,6 +316,11 @@ struct ceph_inode_xattrs_info { * Ceph inode. */ struct ceph_inode_info { + struct { + /* These must be contiguous */ + struct inode vfs_inode; + struct netfs_i_context netfs_ctx; /* Netfslib context */ + }; struct ceph_vino i_vino; /* ceph ino + snap */ spinlock_t i_ceph_lock; @@ -428,11 +431,6 @@ struct ceph_inode_info { struct work_struct i_work; unsigned long i_work_mask; - -#ifdef CONFIG_CEPH_FSCACHE - struct fscache_cookie *fscache; -#endif - struct inode vfs_inode; /* at end */ }; static inline struct ceph_inode_info * @@ -1216,6 +1214,7 @@ extern void __ceph_touch_fmode(struct ceph_inode_info *ci, /* addr.c */ extern const struct address_space_operations ceph_aops; +extern const struct netfs_request_ops ceph_netfs_ops; extern int ceph_mmap(struct file *file, struct vm_area_struct *vma); extern int ceph_uninline_data(struct file *file); extern int ceph_pool_perm_check(struct inode *inode, int need); |