summaryrefslogtreecommitdiffstats
path: root/io_uring
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2022-12-02 18:47:24 +0100
committerJens Axboe <axboe@kernel.dk>2022-12-14 16:53:35 +0100
commite5f30f6fb29a0b8fa7ca784e44571a610b949b04 (patch)
treee5214e50f664bc3f0f8324aebe5c20866ff01726 /io_uring
parentio_uring: revise completion_lock locking (diff)
downloadlinux-e5f30f6fb29a0b8fa7ca784e44571a610b949b04.tar.xz
linux-e5f30f6fb29a0b8fa7ca784e44571a610b949b04.zip
io_uring: ease timeout flush locking requirements
We don't need completion_lock for timeout flushing, don't take it. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/1e3dc657975ac445b80e7bdc40050db783a5935a.1670002973.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring')
-rw-r--r--io_uring/io_uring.c9
-rw-r--r--io_uring/timeout.c2
2 files changed, 4 insertions, 7 deletions
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 698c54f951ea..fc64072c53eb 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -572,12 +572,11 @@ static void io_eventfd_flush_signal(struct io_ring_ctx *ctx)
void __io_commit_cqring_flush(struct io_ring_ctx *ctx)
{
- if (ctx->off_timeout_used || ctx->drain_active) {
+ if (ctx->off_timeout_used)
+ io_flush_timeouts(ctx);
+ if (ctx->drain_active) {
spin_lock(&ctx->completion_lock);
- if (ctx->off_timeout_used)
- io_flush_timeouts(ctx);
- if (ctx->drain_active)
- io_queue_deferred(ctx);
+ io_queue_deferred(ctx);
spin_unlock(&ctx->completion_lock);
}
if (ctx->has_evfd)
diff --git a/io_uring/timeout.c b/io_uring/timeout.c
index eae005b2d1d2..826a51bca3e4 100644
--- a/io_uring/timeout.c
+++ b/io_uring/timeout.c
@@ -50,7 +50,6 @@ static inline void io_put_req(struct io_kiocb *req)
}
static bool io_kill_timeout(struct io_kiocb *req, int status)
- __must_hold(&req->ctx->completion_lock)
__must_hold(&req->ctx->timeout_lock)
{
struct io_timeout_data *io = req->async_data;
@@ -70,7 +69,6 @@ static bool io_kill_timeout(struct io_kiocb *req, int status)
}
__cold void io_flush_timeouts(struct io_ring_ctx *ctx)
- __must_hold(&ctx->completion_lock)
{
u32 seq;
struct io_timeout *timeout, *tmp;