summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid Howells <dhowells@redhat.com>2024-07-09 10:47:17 +0200
committerChristian Brauner <brauner@kernel.org>2024-09-05 11:00:42 +0200
commit24c90a79f6068d309adda05d871855414bb75283 (patch)
treee5328ece90959ef717585392d0f61053aebadd0d
parentnetfs: Remove NETFS_COPY_TO_CACHE (diff)
downloadlinux-24c90a79f6068d309adda05d871855414bb75283.tar.xz
linux-24c90a79f6068d309adda05d871855414bb75283.zip
netfs: Set the request work function upon allocation
Set the work function in the netfs_io_request work_struct when we allocate the request rather than doing this later. This reduces the number of places we need to set it in future code. Signed-off-by: David Howells <dhowells@redhat.com> cc: Jeff Layton <jlayton@kernel.org> cc: netfs@lists.linux.dev cc: linux-fsdevel@vger.kernel.org Link: https://lore.kernel.org/r/20240814203850.2240469-11-dhowells@redhat.com/ # v2 Signed-off-by: Christian Brauner <brauner@kernel.org>
-rw-r--r--fs/netfs/internal.h1
-rw-r--r--fs/netfs/io.c4
-rw-r--r--fs/netfs/objects.c9
-rw-r--r--fs/netfs/write_issue.c1
4 files changed, 10 insertions, 5 deletions
diff --git a/fs/netfs/internal.h b/fs/netfs/internal.h
index 9e6e0e59d7e4..f2920b4ee726 100644
--- a/fs/netfs/internal.h
+++ b/fs/netfs/internal.h
@@ -29,6 +29,7 @@ int netfs_prefetch_for_write(struct file *file, struct folio *folio,
/*
* io.c
*/
+void netfs_rreq_work(struct work_struct *work);
int netfs_begin_read(struct netfs_io_request *rreq, bool sync);
/*
diff --git a/fs/netfs/io.c b/fs/netfs/io.c
index ad9580c2dd6c..08f0b985d436 100644
--- a/fs/netfs/io.c
+++ b/fs/netfs/io.c
@@ -426,7 +426,7 @@ again:
netfs_rreq_completed(rreq, was_async);
}
-static void netfs_rreq_work(struct work_struct *work)
+void netfs_rreq_work(struct work_struct *work)
{
struct netfs_io_request *rreq =
container_of(work, struct netfs_io_request, work);
@@ -739,8 +739,6 @@ int netfs_begin_read(struct netfs_io_request *rreq, bool sync)
// TODO: Use bounce buffer if requested
rreq->io_iter = rreq->iter;
- INIT_WORK(&rreq->work, netfs_rreq_work);
-
/* Chop the read into slices according to what the cache and the netfs
* want and submit each one.
*/
diff --git a/fs/netfs/objects.c b/fs/netfs/objects.c
index 0294df70c3ff..d6e9785ce7a3 100644
--- a/fs/netfs/objects.c
+++ b/fs/netfs/objects.c
@@ -48,9 +48,16 @@ struct netfs_io_request *netfs_alloc_request(struct address_space *mapping,
INIT_LIST_HEAD(&rreq->io_streams[0].subrequests);
INIT_LIST_HEAD(&rreq->io_streams[1].subrequests);
INIT_LIST_HEAD(&rreq->subrequests);
- INIT_WORK(&rreq->work, NULL);
refcount_set(&rreq->ref, 1);
+ if (origin == NETFS_READAHEAD ||
+ origin == NETFS_READPAGE ||
+ origin == NETFS_READ_FOR_WRITE ||
+ origin == NETFS_DIO_READ)
+ INIT_WORK(&rreq->work, netfs_rreq_work);
+ else
+ INIT_WORK(&rreq->work, netfs_write_collection_worker);
+
__set_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
if (file && file->f_flags & O_NONBLOCK)
__set_bit(NETFS_RREQ_NONBLOCK, &rreq->flags);
diff --git a/fs/netfs/write_issue.c b/fs/netfs/write_issue.c
index 34e541afd79b..41db709ca1d3 100644
--- a/fs/netfs/write_issue.c
+++ b/fs/netfs/write_issue.c
@@ -109,7 +109,6 @@ struct netfs_io_request *netfs_create_write_req(struct address_space *mapping,
wreq->contiguity = wreq->start;
wreq->cleaned_to = wreq->start;
- INIT_WORK(&wreq->work, netfs_write_collection_worker);
wreq->io_streams[0].stream_nr = 0;
wreq->io_streams[0].source = NETFS_UPLOAD_TO_SERVER;