summaryrefslogtreecommitdiffstats
path: root/fs/io_uring.c
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2021-04-01 16:43:58 +0200
committerJens Axboe <axboe@kernel.dk>2021-04-12 03:30:34 +0200
commitc4ea060e85eabe40f3572969daff4fc2f242b7b8 (patch)
tree04fc546c9ae25cb6c69da0c8bd70caaf19313522 /fs/io_uring.c
parentio_uring: lock annotate timeouts and poll (diff)
downloadlinux-c4ea060e85eabe40f3572969daff4fc2f242b7b8.tar.xz
linux-c4ea060e85eabe40f3572969daff4fc2f242b7b8.zip
io_uring: simplify overflow handling
Overflowed CQEs doesn't lock requests anymore, so we don't care so much about cancelling them, so kill cq_overflow_flushed and simplify the code. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/5799867aeba9e713c32f49aef78e5e1aef9fbc43.1617287883.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to '')
-rw-r--r--fs/io_uring.c8
1 files changed, 3 insertions, 5 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 7c5c3d46c6b7..99f5252ff2dc 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -335,7 +335,6 @@ struct io_ring_ctx {
struct {
unsigned int flags;
unsigned int compat: 1;
- unsigned int cq_overflow_flushed: 1;
unsigned int drain_next: 1;
unsigned int eventfd_async: 1;
unsigned int restricted: 1;
@@ -1525,8 +1524,7 @@ static bool __io_cqring_fill_event(struct io_kiocb *req, long res,
WRITE_ONCE(cqe->flags, cflags);
return true;
}
- if (!ctx->cq_overflow_flushed &&
- !atomic_read(&req->task->io_uring->in_idle)) {
+ if (!atomic_read(&req->task->io_uring->in_idle)) {
struct io_overflow_cqe *ocqe;
ocqe = kmalloc(sizeof(*ocqe), GFP_ATOMIC | __GFP_ACCOUNT);
@@ -8491,6 +8489,8 @@ static void io_ring_ctx_free(struct io_ring_ctx *ctx)
mutex_lock(&ctx->uring_lock);
io_sqe_files_unregister(ctx);
+ if (ctx->rings)
+ __io_cqring_overflow_flush(ctx, true);
mutex_unlock(&ctx->uring_lock);
io_eventfd_unregister(ctx);
io_destroy_buffers(ctx);
@@ -8692,8 +8692,6 @@ static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
mutex_lock(&ctx->uring_lock);
percpu_ref_kill(&ctx->refs);
- /* if force is set, the ring is going away. always drop after that */
- ctx->cq_overflow_flushed = 1;
if (ctx->rings)
__io_cqring_overflow_flush(ctx, true);
xa_for_each(&ctx->personalities, index, creds)