summaryrefslogtreecommitdiffstats
path: root/io_uring
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2022-12-21 15:05:09 +0100
committerJens Axboe <axboe@kernel.dk>2022-12-21 16:43:53 +0100
commit52ea806ad983490b3132a9e526e11a10dc2fd10c (patch)
treec1a8029e933faa2d1091d18786b466a63e061b28 /io_uring
parentMAINTAINERS: io_uring: Add include/trace/events/io_uring.h (diff)
downloadlinux-52ea806ad983490b3132a9e526e11a10dc2fd10c.tar.xz
linux-52ea806ad983490b3132a9e526e11a10dc2fd10c.zip
io_uring: finish waiting before flushing overflow entries
If we have overflow entries being generated after we've done the initial flush in io_cqring_wait(), then we could be flushing them in the main wait loop as well. If that's done after having added ourselves to the cq_wait waitqueue, then the task state can be != TASK_RUNNING when we enter the overflow flush. Check for the need to overflow flush, and finish our wait cycle first if we have to do so. Reported-and-tested-by: syzbot+cf6ea1d6bb30a4ce10b2@syzkaller.appspotmail.com Link: https://lore.kernel.org/io-uring/000000000000cb143a05f04eee15@google.com/ Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring')
-rw-r--r--io_uring/io_uring.c25
1 files changed, 16 insertions, 9 deletions
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index ff2bbac1a10f..ac5d39eeb3d1 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -677,16 +677,20 @@ static void __io_cqring_overflow_flush(struct io_ring_ctx *ctx)
io_cq_unlock_post(ctx);
}
+static void io_cqring_do_overflow_flush(struct io_ring_ctx *ctx)
+{
+ /* iopoll syncs against uring_lock, not completion_lock */
+ if (ctx->flags & IORING_SETUP_IOPOLL)
+ mutex_lock(&ctx->uring_lock);
+ __io_cqring_overflow_flush(ctx);
+ if (ctx->flags & IORING_SETUP_IOPOLL)
+ mutex_unlock(&ctx->uring_lock);
+}
+
static void io_cqring_overflow_flush(struct io_ring_ctx *ctx)
{
- if (test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq)) {
- /* iopoll syncs against uring_lock, not completion_lock */
- if (ctx->flags & IORING_SETUP_IOPOLL)
- mutex_lock(&ctx->uring_lock);
- __io_cqring_overflow_flush(ctx);
- if (ctx->flags & IORING_SETUP_IOPOLL)
- mutex_unlock(&ctx->uring_lock);
- }
+ if (test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq))
+ io_cqring_do_overflow_flush(ctx);
}
void __io_put_task(struct task_struct *task, int nr)
@@ -2549,7 +2553,10 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
trace_io_uring_cqring_wait(ctx, min_events);
do {
- io_cqring_overflow_flush(ctx);
+ if (test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq)) {
+ finish_wait(&ctx->cq_wait, &iowq.wq);
+ io_cqring_do_overflow_flush(ctx);
+ }
prepare_to_wait_exclusive(&ctx->cq_wait, &iowq.wq,
TASK_INTERRUPTIBLE);
ret = io_cqring_wait_schedule(ctx, &iowq, timeout);