summaryrefslogtreecommitdiffstats
path: root/fs/io_uring.c
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2021-06-15 00:37:25 +0200
committerJens Axboe <axboe@kernel.dk>2021-06-15 23:38:40 +0200
commitf18ee4cf0a277a0e3d043755046d5817d4ddd618 (patch)
treeffb179c2fcc64c8f1be555387094b79fb6b1c6fb /fs/io_uring.c
parentio_uring: don't cache number of dropped SQEs (diff)
downloadlinux-f18ee4cf0a277a0e3d043755046d5817d4ddd618.tar.xz
linux-f18ee4cf0a277a0e3d043755046d5817d4ddd618.zip
io_uring: optimise completion timeout flushing
io_commit_cqring() might be very hot and we definitely don't want to touch ->timeout_list there, because 1) it's shared with the submission side so might lead to cache bouncing and 2) may need to load an extra cache line, especially for IRQ completions. We're interested in it at the completion side only when there are offset-mode timeouts, which are not so popular. Replace list_empty(->timeout_list) hot path check with a new one-way flag, which is set when we prepare the first offset-mode timeout. note: the flag sits in the same line as briefly used after ->rings Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/e4892ec68b71a69f92ffbea4a1499be3ec0d463b.1623709150.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r--fs/io_uring.c10
1 files changed, 6 insertions, 4 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index ecac362913cc..b27734bc5ca3 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -351,6 +351,7 @@ struct io_ring_ctx {
unsigned int drain_next: 1;
unsigned int eventfd_async: 1;
unsigned int restricted: 1;
+ unsigned int off_timeout_used: 1;
} ____cacheline_aligned_in_smp;
/* submission data */
@@ -1318,12 +1319,12 @@ static void io_flush_timeouts(struct io_ring_ctx *ctx)
{
u32 seq;
- if (list_empty(&ctx->timeout_list))
+ if (likely(!ctx->off_timeout_used))
return;
seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
- do {
+ while (!list_empty(&ctx->timeout_list)) {
u32 events_needed, events_got;
struct io_kiocb *req = list_first_entry(&ctx->timeout_list,
struct io_kiocb, timeout.list);
@@ -1345,8 +1346,7 @@ static void io_flush_timeouts(struct io_ring_ctx *ctx)
list_del_init(&req->timeout.list);
io_kill_timeout(req, 0);
- } while (!list_empty(&ctx->timeout_list));
-
+ }
ctx->cq_last_tm_flush = seq;
}
@@ -5651,6 +5651,8 @@ static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
return -EINVAL;
req->timeout.off = off;
+ if (unlikely(off && !req->ctx->off_timeout_used))
+ req->ctx->off_timeout_used = true;
if (!req->async_data && io_alloc_async_data(req))
return -ENOMEM;