diff options
author | Pavel Begunkov <asml.silence@gmail.com> | 2021-08-18 11:50:52 +0200 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2021-08-23 21:10:43 +0200 |
commit | ec3c3d0f3a271b5c7422449262970e7eb98f2126 (patch) | |
tree | 884db1e03b47cd6f42bb694e4fd4d76e123760b7 /fs | |
parent | io_uring: improve same wq polling (diff) | |
download | linux-ec3c3d0f3a271b5c7422449262970e7eb98f2126.tar.xz linux-ec3c3d0f3a271b5c7422449262970e7eb98f2126.zip |
io_uring: fix io_timeout_remove locking
io_timeout_cancel() posts CQEs so needs ->completion_lock to be held,
so grab it in io_timeout_remove().
Fixes: 48ecb6369f1f2 ("io_uring: run timeouts from task_work")
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/d6f03d653a4d7bf693ef6f39b6a426b6d97fd96f.1629280204.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs')
-rw-r--r-- | fs/io_uring.c | 14 |
1 files changed, 10 insertions, 4 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c index 71d54841ecbe..08546332f1b9 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -5572,6 +5572,7 @@ static struct io_kiocb *io_timeout_extract(struct io_ring_ctx *ctx, } static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data) + __must_hold(&ctx->completion_lock) __must_hold(&ctx->timeout_lock) { struct io_kiocb *req = io_timeout_extract(ctx, user_data); @@ -5646,13 +5647,18 @@ static int io_timeout_remove(struct io_kiocb *req, unsigned int issue_flags) struct io_ring_ctx *ctx = req->ctx; int ret; - spin_lock_irq(&ctx->timeout_lock); - if (!(req->timeout_rem.flags & IORING_TIMEOUT_UPDATE)) + if (!(req->timeout_rem.flags & IORING_TIMEOUT_UPDATE)) { + spin_lock(&ctx->completion_lock); + spin_lock_irq(&ctx->timeout_lock); ret = io_timeout_cancel(ctx, tr->addr); - else + spin_unlock_irq(&ctx->timeout_lock); + spin_unlock(&ctx->completion_lock); + } else { + spin_lock_irq(&ctx->timeout_lock); ret = io_timeout_update(ctx, tr->addr, &tr->ts, io_translate_timeout_mode(tr->flags)); - spin_unlock_irq(&ctx->timeout_lock); + spin_unlock_irq(&ctx->timeout_lock); + } if (ret < 0) req_set_fail(req); |