diff options
author | Trond Myklebust <trond.myklebust@hammerspace.com> | 2020-03-30 18:40:47 +0200 |
---|---|---|
committer | Trond Myklebust <trond.myklebust@hammerspace.com> | 2020-04-01 19:37:56 +0200 |
commit | e00ed89d7bd59c4ae49d6aeeee567187b1357a4b (patch) | |
tree | 01d1b60917b8970e8f42be04dc6ae0788ca55b67 /fs/nfs/pagelist.c | |
parent | NFS: Reverse the submission order of requests in __nfs_pageio_add_request() (diff) | |
download | linux-e00ed89d7bd59c4ae49d6aeeee567187b1357a4b.tar.xz linux-e00ed89d7bd59c4ae49d6aeeee567187b1357a4b.zip |
NFS: Refactor nfs_lock_and_join_requests()
Refactor nfs_lock_and_join_requests() in order to separate out the
subrequest merging into its own function nfs_lock_and_join_group()
that can be used by O_DIRECT.
Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
Diffstat (limited to 'fs/nfs/pagelist.c')
-rw-r--r-- | fs/nfs/pagelist.c | 26 |
1 files changed, 24 insertions, 2 deletions
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c index b9805d1dac75..f61f96603df7 100644 --- a/fs/nfs/pagelist.c +++ b/fs/nfs/pagelist.c @@ -131,6 +131,25 @@ nfs_async_iocounter_wait(struct rpc_task *task, struct nfs_lock_context *l_ctx) EXPORT_SYMBOL_GPL(nfs_async_iocounter_wait); /* + * nfs_page_lock_head_request - page lock the head of the page group + * @req: any member of the page group + */ +struct nfs_page * +nfs_page_group_lock_head(struct nfs_page *req) +{ + struct nfs_page *head = req->wb_head; + + while (!nfs_lock_request(head)) { + int ret = nfs_wait_on_request(head); + if (ret < 0) + return ERR_PTR(ret); + } + if (head != req) + kref_get(&head->wb_kref); + return head; +} + +/* * nfs_unroll_locks - unlock all newly locked reqs and wait on @req * @head: head request of page group, must be holding head lock * @req: request that couldn't lock and needs to wait on the req bit lock @@ -186,14 +205,16 @@ nfs_page_group_lock_subreq(struct nfs_page *head, struct nfs_page *subreq) * @head: head request of page group * * This is a helper function for nfs_lock_and_join_requests which - * must be called with the head request and page group both locked. - * On error, it returns with the page group unlocked. + * must be called with the head request locked. */ int nfs_page_group_lock_subrequests(struct nfs_page *head) { struct nfs_page *subreq; int ret; + ret = nfs_page_group_lock(head); + if (ret < 0) + return ret; /* lock each request in the page group */ for (subreq = head->wb_this_page; subreq != head; subreq = subreq->wb_this_page) { @@ -201,6 +222,7 @@ int nfs_page_group_lock_subrequests(struct nfs_page *head) if (ret < 0) return ret; } + nfs_page_group_unlock(head); return 0; } |