summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2021-01-16 19:52:11 +0100
committerJens Axboe <axboe@kernel.dk>2021-01-16 20:13:59 +0100
commitc93cc9e16d88e0f5ea95d2d65d58a8a4dab258bc (patch)
tree475e92df72996c3077abb8c0ab41e243dc2b85bc
parentio_uring: ensure finish_wait() is always called in __io_uring_task_cancel() (diff)
downloadlinux-c93cc9e16d88e0f5ea95d2d65d58a8a4dab258bc.tar.xz
linux-c93cc9e16d88e0f5ea95d2d65d58a8a4dab258bc.zip
io_uring: iopoll requests should also wake task ->in_idle state
If we're freeing/finishing iopoll requests, ensure we check if the task is in idling in terms of cancelation. Otherwise we could end up waiting forever in __io_uring_task_cancel() if the task has active iopoll requests that need cancelation. Cc: stable@vger.kernel.org # 5.9+ Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--fs/io_uring.c4
1 files changed, 4 insertions, 0 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 985a9e3f976d..5cda878b69cf 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -2270,6 +2270,8 @@ static void io_req_free_batch_finish(struct io_ring_ctx *ctx,
struct io_uring_task *tctx = rb->task->io_uring;
percpu_counter_sub(&tctx->inflight, rb->task_refs);
+ if (atomic_read(&tctx->in_idle))
+ wake_up(&tctx->wait);
put_task_struct_many(rb->task, rb->task_refs);
rb->task = NULL;
}
@@ -2288,6 +2290,8 @@ static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req)
struct io_uring_task *tctx = rb->task->io_uring;
percpu_counter_sub(&tctx->inflight, rb->task_refs);
+ if (atomic_read(&tctx->in_idle))
+ wake_up(&tctx->wait);
put_task_struct_many(rb->task, rb->task_refs);
}
rb->task = req->task;