summaryrefslogtreecommitdiffstats
path: root/io_uring/timeout.c
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2022-06-20 02:25:56 +0200
committerJens Axboe <axboe@kernel.dk>2022-07-25 02:39:14 +0200
commit253993210bd8aa3b39a392807c03c8ef1cd7dc3d (patch)
tree09276f23754ea44fd9e7e46e9921c86ea8fb2482 /io_uring/timeout.c
parentio_uring: hide eventfd assumptions in eventfd paths (diff)
downloadlinux-253993210bd8aa3b39a392807c03c8ef1cd7dc3d.tar.xz
linux-253993210bd8aa3b39a392807c03c8ef1cd7dc3d.zip
io_uring: introduce locking helpers for CQE posting
spin_lock(&ctx->completion_lock); /* post CQEs */ io_commit_cqring(ctx); spin_unlock(&ctx->completion_lock); io_cqring_ev_posted(ctx); We have many places repeating this sequence, and the three function unlock section is not perfect from the maintainance perspective and also makes it harder to add new locking/sync trick. Introduce two helpers. io_cq_lock(), which is simple and only grabs ->completion_lock, and io_cq_unlock_post() encapsulating the three call section. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/fe0c682bf7f7b55d9be55b0d034be9c1949277dc.1655684496.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring/timeout.c')
-rw-r--r--io_uring/timeout.c6
1 files changed, 2 insertions, 4 deletions
diff --git a/io_uring/timeout.c b/io_uring/timeout.c
index 424b2fc858b8..7e2c341f9762 100644
--- a/io_uring/timeout.c
+++ b/io_uring/timeout.c
@@ -617,7 +617,7 @@ __cold bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk,
struct io_timeout *timeout, *tmp;
int canceled = 0;
- spin_lock(&ctx->completion_lock);
+ io_cq_lock(ctx);
spin_lock_irq(&ctx->timeout_lock);
list_for_each_entry_safe(timeout, tmp, &ctx->timeout_list, list) {
struct io_kiocb *req = cmd_to_io_kiocb(timeout);
@@ -627,8 +627,6 @@ __cold bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk,
canceled++;
}
spin_unlock_irq(&ctx->timeout_lock);
- io_commit_cqring(ctx);
- spin_unlock(&ctx->completion_lock);
- io_cqring_ev_posted(ctx);
+ io_cq_unlock_post(ctx);
return canceled != 0;
}