summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2021-02-18 23:32:52 +0100
committerJens Axboe <axboe@kernel.dk>2021-02-21 03:01:35 +0100
commita3df769899c0bdc224c94d1d8cc9cbb3f3a72553 (patch)
treedd5dbb26d53838626dea7bc62bd919a3a9b17520 /fs
parentio_uring: don't take uring_lock during iowq cancel (diff)
downloadlinux-a3df769899c0bdc224c94d1d8cc9cbb3f3a72553.tar.xz
linux-a3df769899c0bdc224c94d1d8cc9cbb3f3a72553.zip
io_uring: fail io-wq submission from a task_work
In case of failure io_wq_submit_work() needs to post an CQE and so potentially take uring_lock. The safest way to deal with it is to do that from under task_work where we can safely take the lock. Also, as io_iopoll_check() holds the lock tight and releases it reluctantly, it will play nicer in the furuter with notifying an iopolling task about new such pending failed requests. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs')
-rw-r--r--fs/io_uring.c48
1 files changed, 18 insertions, 30 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 8dab07f42b34..582306b1dfd1 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -2338,7 +2338,7 @@ static void io_req_task_cancel(struct callback_head *cb)
struct io_ring_ctx *ctx = req->ctx;
mutex_lock(&ctx->uring_lock);
- __io_req_task_cancel(req, -ECANCELED);
+ __io_req_task_cancel(req, req->result);
mutex_unlock(&ctx->uring_lock);
percpu_ref_put(&ctx->refs);
}
@@ -2371,11 +2371,22 @@ static void io_req_task_queue(struct io_kiocb *req)
req->task_work.func = io_req_task_submit;
ret = io_req_task_work_add(req);
if (unlikely(ret)) {
+ req->result = -ECANCELED;
percpu_ref_get(&req->ctx->refs);
io_req_task_work_add_fallback(req, io_req_task_cancel);
}
}
+static void io_req_task_queue_fail(struct io_kiocb *req, int ret)
+{
+ percpu_ref_get(&req->ctx->refs);
+ req->result = ret;
+ req->task_work.func = io_req_task_cancel;
+
+ if (unlikely(io_req_task_work_add(req)))
+ io_req_task_work_add_fallback(req, io_req_task_cancel);
+}
+
static inline void io_queue_next(struct io_kiocb *req)
{
struct io_kiocb *nxt = io_req_find_next(req);
@@ -6428,13 +6439,8 @@ static void io_wq_submit_work(struct io_wq_work *work)
if (timeout)
io_queue_linked_timeout(timeout);
- if (work->flags & IO_WQ_WORK_CANCEL) {
- /* io-wq is going to take down one */
- refcount_inc(&req->refs);
- percpu_ref_get(&req->ctx->refs);
- io_req_task_work_add_fallback(req, io_req_task_cancel);
- return;
- }
+ if (work->flags & IO_WQ_WORK_CANCEL)
+ ret = -ECANCELED;
if (!ret) {
do {
@@ -6450,29 +6456,11 @@ static void io_wq_submit_work(struct io_wq_work *work)
} while (1);
}
+ /* avoid locking problems by failing it from a clean context */
if (ret) {
- struct io_ring_ctx *lock_ctx = NULL;
-
- if (req->ctx->flags & IORING_SETUP_IOPOLL)
- lock_ctx = req->ctx;
-
- /*
- * io_iopoll_complete() does not hold completion_lock to
- * complete polled io, so here for polled io, we can not call
- * io_req_complete() directly, otherwise there maybe concurrent
- * access to cqring, defer_list, etc, which is not safe. Given
- * that io_iopoll_complete() is always called under uring_lock,
- * so here for polled io, we also get uring_lock to complete
- * it.
- */
- if (lock_ctx)
- mutex_lock(&lock_ctx->uring_lock);
-
- req_set_fail_links(req);
- io_req_complete(req, ret);
-
- if (lock_ctx)
- mutex_unlock(&lock_ctx->uring_lock);
+ /* io-wq is going to take one down */
+ refcount_inc(&req->refs);
+ io_req_task_queue_fail(req, ret);
}
}