summaryrefslogtreecommitdiffstats
path: root/io_uring/msg_ring.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2023-01-19 17:04:40 +0100
committerJens Axboe <axboe@kernel.dk>2023-01-19 18:43:59 +0100
commite12d7a46f65ae4b7d58a5e0c1cbfa825cf8d830d (patch)
treef00d8e48b6566b62f73360273b69960f6f159705 /io_uring/msg_ring.c
parentio_uring/msg_ring: move double lock/unlock helpers higher up (diff)
downloadlinux-e12d7a46f65ae4b7d58a5e0c1cbfa825cf8d830d.tar.xz
linux-e12d7a46f65ae4b7d58a5e0c1cbfa825cf8d830d.zip
io_uring/msg_ring: fix missing lock on overflow for IOPOLL
If the target ring is configured with IOPOLL, then we always need to hold the target ring uring_lock before posting CQEs. We could just grab it unconditionally, but since we don't expect many target rings to be of this type, make grabbing the uring_lock conditional on the ring type. Link: https://lore.kernel.org/io-uring/Y8krlYa52%2F0YGqkg@ip-172-31-85-199.ec2.internal/ Reported-by: Xingyuan Mo <hdthky0@gmail.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring/msg_ring.c')
-rw-r--r--io_uring/msg_ring.c39
1 files changed, 30 insertions, 9 deletions
diff --git a/io_uring/msg_ring.c b/io_uring/msg_ring.c
index 321f5eafef99..a333781565d3 100644
--- a/io_uring/msg_ring.c
+++ b/io_uring/msg_ring.c
@@ -65,20 +65,33 @@ static void io_msg_tw_complete(struct callback_head *head)
struct io_ring_ctx *target_ctx = req->file->private_data;
int ret = 0;
- if (current->flags & PF_EXITING)
+ if (current->flags & PF_EXITING) {
ret = -EOWNERDEAD;
- else if (!io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0))
- ret = -EOVERFLOW;
+ } else {
+ /*
+ * If the target ring is using IOPOLL mode, then we need to be
+ * holding the uring_lock for posting completions. Other ring
+ * types rely on the regular completion locking, which is
+ * handled while posting.
+ */
+ if (target_ctx->flags & IORING_SETUP_IOPOLL)
+ mutex_lock(&target_ctx->uring_lock);
+ if (!io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0))
+ ret = -EOVERFLOW;
+ if (target_ctx->flags & IORING_SETUP_IOPOLL)
+ mutex_unlock(&target_ctx->uring_lock);
+ }
if (ret < 0)
req_set_fail(req);
io_req_queue_tw_complete(req, ret);
}
-static int io_msg_ring_data(struct io_kiocb *req)
+static int io_msg_ring_data(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_ring_ctx *target_ctx = req->file->private_data;
struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
+ int ret;
if (msg->src_fd || msg->dst_fd || msg->flags)
return -EINVAL;
@@ -93,10 +106,18 @@ static int io_msg_ring_data(struct io_kiocb *req)
return IOU_ISSUE_SKIP_COMPLETE;
}
- if (io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0))
- return 0;
-
- return -EOVERFLOW;
+ ret = -EOVERFLOW;
+ if (target_ctx->flags & IORING_SETUP_IOPOLL) {
+ if (unlikely(io_double_lock_ctx(target_ctx, issue_flags)))
+ return -EAGAIN;
+ if (io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0))
+ ret = 0;
+ io_double_unlock_ctx(target_ctx);
+ } else {
+ if (io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0))
+ ret = 0;
+ }
+ return ret;
}
static struct file *io_msg_grab_file(struct io_kiocb *req, unsigned int issue_flags)
@@ -223,7 +244,7 @@ int io_msg_ring(struct io_kiocb *req, unsigned int issue_flags)
switch (msg->cmd) {
case IORING_MSG_DATA:
- ret = io_msg_ring_data(req);
+ ret = io_msg_ring_data(req, issue_flags);
break;
case IORING_MSG_SEND_FD:
ret = io_msg_send_fd(req, issue_flags);