diff options
author | Jens Axboe <axboe@kernel.dk> | 2024-03-28 19:38:44 +0100 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2024-06-24 16:39:39 +0200 |
commit | c3ac76f9ca7a621428851149bc56bfca0aacaef4 (patch) | |
tree | 1d10e888424ed968ba9a63703fa75c1c25e18467 /io_uring | |
parent | io_uring/msg_ring: tighten requirement for remote posting (diff) | |
download | linux-c3ac76f9ca7a621428851149bc56bfca0aacaef4.tar.xz linux-c3ac76f9ca7a621428851149bc56bfca0aacaef4.zip |
io_uring: add remote task_work execution helper
All our task_work handling is targeted at the state in the io_kiocb
itself, which is what it is being used for. However, MSG_RING rolls its
own task_work handling, ignoring how that is usually done.
In preparation for switching MSG_RING to be able to use the normal
task_work handling, add io_req_task_work_add_remote() which allows the
caller to pass in the target io_ring_ctx.
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring')
-rw-r--r-- | io_uring/io_uring.c | 24 | ||||
-rw-r--r-- | io_uring/io_uring.h | 2 |
2 files changed, 18 insertions, 8 deletions
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index 438c44ca3abd..85b2ce54328c 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -1098,9 +1098,10 @@ void tctx_task_work(struct callback_head *cb) WARN_ON_ONCE(ret); } -static inline void io_req_local_work_add(struct io_kiocb *req, unsigned flags) +static inline void io_req_local_work_add(struct io_kiocb *req, + struct io_ring_ctx *ctx, + unsigned flags) { - struct io_ring_ctx *ctx = req->ctx; unsigned nr_wait, nr_tw, nr_tw_prev; struct llist_node *head; @@ -1114,6 +1115,8 @@ static inline void io_req_local_work_add(struct io_kiocb *req, unsigned flags) if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) flags &= ~IOU_F_TWQ_LAZY_WAKE; + guard(rcu)(); + head = READ_ONCE(ctx->work_llist.first); do { nr_tw_prev = 0; @@ -1195,13 +1198,18 @@ static void io_req_normal_work_add(struct io_kiocb *req) void __io_req_task_work_add(struct io_kiocb *req, unsigned flags) { - if (req->ctx->flags & IORING_SETUP_DEFER_TASKRUN) { - rcu_read_lock(); - io_req_local_work_add(req, flags); - rcu_read_unlock(); - } else { + if (req->ctx->flags & IORING_SETUP_DEFER_TASKRUN) + io_req_local_work_add(req, req->ctx, flags); + else io_req_normal_work_add(req); - } +} + +void io_req_task_work_add_remote(struct io_kiocb *req, struct io_ring_ctx *ctx, + unsigned flags) +{ + if (WARN_ON_ONCE(!(ctx->flags & IORING_SETUP_DEFER_TASKRUN))) + return; + io_req_local_work_add(req, ctx, flags); } static void __cold io_move_task_work_from_local(struct io_ring_ctx *ctx) diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h index cd43924eed04..7a8641214509 100644 --- a/io_uring/io_uring.h +++ b/io_uring/io_uring.h @@ -73,6 +73,8 @@ struct file *io_file_get_fixed(struct io_kiocb *req, int fd, unsigned issue_flags); void __io_req_task_work_add(struct io_kiocb *req, unsigned flags); +void io_req_task_work_add_remote(struct io_kiocb *req, struct io_ring_ctx *ctx, + unsigned flags); bool io_alloc_async_data(struct io_kiocb *req); void io_req_task_queue(struct io_kiocb *req); void io_req_task_complete(struct io_kiocb *req, struct io_tw_state *ts); |