summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMarcelo Diop-Gonzalez <marcelo827@gmail.com>2021-01-15 17:54:40 +0100
committerJens Axboe <axboe@kernel.dk>2021-01-15 18:02:28 +0100
commitf010505b78a4fa8d5b6480752566e7313fb5ca6e (patch)
treee441b11c2ebe22618a96bb6db65ff88d66fb5753
parentio_uring: do sqo disable on install_fd error (diff)
downloadlinux-f010505b78a4fa8d5b6480752566e7313fb5ca6e.tar.xz
linux-f010505b78a4fa8d5b6480752566e7313fb5ca6e.zip
io_uring: flush timeouts that should already have expired
Right now io_flush_timeouts() checks if the current number of events is equal to ->timeout.target_seq, but this will miss some timeouts if there have been more than 1 event added since the last time they were flushed (possible in io_submit_flush_completions(), for example). Fix it by recording the last sequence at which timeouts were flushed so that the number of events seen can be compared to the number of events needed without overflow. Signed-off-by: Marcelo Diop-Gonzalez <marcelo827@gmail.com> Reviewed-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to '')
-rw-r--r--fs/io_uring.c34
1 files changed, 30 insertions, 4 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 372be9caf340..06cc79d39586 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -354,6 +354,7 @@ struct io_ring_ctx {
unsigned cq_entries;
unsigned cq_mask;
atomic_t cq_timeouts;
+ unsigned cq_last_tm_flush;
unsigned long cq_check_overflow;
struct wait_queue_head cq_wait;
struct fasync_struct *cq_fasync;
@@ -1639,19 +1640,38 @@ static void __io_queue_deferred(struct io_ring_ctx *ctx)
static void io_flush_timeouts(struct io_ring_ctx *ctx)
{
- while (!list_empty(&ctx->timeout_list)) {
+ u32 seq;
+
+ if (list_empty(&ctx->timeout_list))
+ return;
+
+ seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
+
+ do {
+ u32 events_needed, events_got;
struct io_kiocb *req = list_first_entry(&ctx->timeout_list,
struct io_kiocb, timeout.list);
if (io_is_timeout_noseq(req))
break;
- if (req->timeout.target_seq != ctx->cached_cq_tail
- - atomic_read(&ctx->cq_timeouts))
+
+ /*
+ * Since seq can easily wrap around over time, subtract
+ * the last seq at which timeouts were flushed before comparing.
+ * Assuming not more than 2^31-1 events have happened since,
+ * these subtractions won't have wrapped, so we can check if
+ * target is in [last_seq, current_seq] by comparing the two.
+ */
+ events_needed = req->timeout.target_seq - ctx->cq_last_tm_flush;
+ events_got = seq - ctx->cq_last_tm_flush;
+ if (events_got < events_needed)
break;
list_del_init(&req->timeout.list);
io_kill_timeout(req);
- }
+ } while (!list_empty(&ctx->timeout_list));
+
+ ctx->cq_last_tm_flush = seq;
}
static void io_commit_cqring(struct io_ring_ctx *ctx)
@@ -5837,6 +5857,12 @@ static int io_timeout(struct io_kiocb *req)
tail = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
req->timeout.target_seq = tail + off;
+ /* Update the last seq here in case io_flush_timeouts() hasn't.
+ * This is safe because ->completion_lock is held, and submissions
+ * and completions are never mixed in the same ->completion_lock section.
+ */
+ ctx->cq_last_tm_flush = tail;
+
/*
* Insertion sort, ensuring the first entry in the list is always
* the one we need first.