diff options
author | Pavel Begunkov <asml.silence@gmail.com> | 2023-01-20 17:38:05 +0100 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2023-01-20 17:49:29 +0100 |
commit | 56d8e3180c065c9b78ed77afcd0cf99677a4e22f (patch) | |
tree | 4ef1be544452afbc796a78ea5178016e695c2cb5 /io_uring/msg_ring.c | |
parent | io_uring/msg_ring: fix missing lock on overflow for IOPOLL (diff) | |
download | linux-56d8e3180c065c9b78ed77afcd0cf99677a4e22f.tar.xz linux-56d8e3180c065c9b78ed77afcd0cf99677a4e22f.zip |
io_uring/msg_ring: fix flagging remote execution
There is a couple of problems with queueing a tw in io_msg_ring_data()
for remote execution. First, once we queue it the target ring can
go away and so setting IORING_SQ_TASKRUN there is not safe. Secondly,
the userspace might not expect IORING_SQ_TASKRUN.
Extract a helper and uniformly use TWA_SIGNAL without TWA_SIGNAL_NO_IPI
tricks for now, just as it was done in the original patch.
Cc: stable@vger.kernel.org
Fixes: 6d043ee1164ca ("io_uring: do msg_ring in target task via tw")
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring/msg_ring.c')
-rw-r--r-- | io_uring/msg_ring.c | 40 |
1 files changed, 23 insertions, 17 deletions
diff --git a/io_uring/msg_ring.c b/io_uring/msg_ring.c index a333781565d3..bb868447dcdf 100644 --- a/io_uring/msg_ring.c +++ b/io_uring/msg_ring.c @@ -58,6 +58,25 @@ void io_msg_ring_cleanup(struct io_kiocb *req) msg->src_file = NULL; } +static inline bool io_msg_need_remote(struct io_ring_ctx *target_ctx) +{ + if (!target_ctx->task_complete) + return false; + return current != target_ctx->submitter_task; +} + +static int io_msg_exec_remote(struct io_kiocb *req, task_work_func_t func) +{ + struct io_ring_ctx *ctx = req->file->private_data; + struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg); + + init_task_work(&msg->tw, func); + if (task_work_add(ctx->submitter_task, &msg->tw, TWA_SIGNAL)) + return -EOWNERDEAD; + + return IOU_ISSUE_SKIP_COMPLETE; +} + static void io_msg_tw_complete(struct callback_head *head) { struct io_msg *msg = container_of(head, struct io_msg, tw); @@ -96,15 +115,8 @@ static int io_msg_ring_data(struct io_kiocb *req, unsigned int issue_flags) if (msg->src_fd || msg->dst_fd || msg->flags) return -EINVAL; - if (target_ctx->task_complete && current != target_ctx->submitter_task) { - init_task_work(&msg->tw, io_msg_tw_complete); - if (task_work_add(target_ctx->submitter_task, &msg->tw, - TWA_SIGNAL_NO_IPI)) - return -EOWNERDEAD; - - atomic_or(IORING_SQ_TASKRUN, &target_ctx->rings->sq_flags); - return IOU_ISSUE_SKIP_COMPLETE; - } + if (io_msg_need_remote(target_ctx)) + return io_msg_exec_remote(req, io_msg_tw_complete); ret = -EOVERFLOW; if (target_ctx->flags & IORING_SETUP_IOPOLL) { @@ -202,14 +214,8 @@ static int io_msg_send_fd(struct io_kiocb *req, unsigned int issue_flags) req->flags |= REQ_F_NEED_CLEANUP; } - if (target_ctx->task_complete && current != target_ctx->submitter_task) { - init_task_work(&msg->tw, io_msg_tw_fd_complete); - if (task_work_add(target_ctx->submitter_task, &msg->tw, - TWA_SIGNAL)) - return -EOWNERDEAD; - - return IOU_ISSUE_SKIP_COMPLETE; - } + if (io_msg_need_remote(target_ctx)) + return io_msg_exec_remote(req, io_msg_tw_fd_complete); return io_msg_install_complete(req, issue_flags); } |