diff options
author | Jens Axboe <axboe@kernel.dk> | 2020-08-28 00:46:24 +0200 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2020-08-28 00:48:34 +0200 |
commit | fdee946d0925f971f167d2606984426763355e4f (patch) | |
tree | fa089a3a3f0cfbdb57086596edfe5df66cda9cf4 /fs | |
parent | io_uring: fix IOPOLL -EAGAIN retries (diff) | |
download | linux-fdee946d0925f971f167d2606984426763355e4f.tar.xz linux-fdee946d0925f971f167d2606984426763355e4f.zip |
io_uring: don't bounce block based -EAGAIN retry off task_work
These events happen inline from submission, so there's no need to
bounce them through the original task. Just set them up for retry
and issue retry directly instead of going over task_work.
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs')
-rw-r--r-- | fs/io_uring.c | 26 |
1 files changed, 6 insertions, 20 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c index 8c77ad4a65f0..852c2eaf1a9a 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -2295,22 +2295,6 @@ end_req: io_req_complete(req, ret); return false; } - -static void io_rw_resubmit(struct callback_head *cb) -{ - struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work); - struct io_ring_ctx *ctx = req->ctx; - int err; - - err = io_sq_thread_acquire_mm(ctx, req); - - if (io_resubmit_prep(req, err)) { - refcount_inc(&req->refs); - io_queue_async_work(req); - } - - percpu_ref_put(&ctx->refs); -} #endif static bool io_rw_reissue(struct io_kiocb *req, long res) @@ -2321,12 +2305,14 @@ static bool io_rw_reissue(struct io_kiocb *req, long res) if ((res != -EAGAIN && res != -EOPNOTSUPP) || io_wq_current_is_worker()) return false; - init_task_work(&req->task_work, io_rw_resubmit); - percpu_ref_get(&req->ctx->refs); + ret = io_sq_thread_acquire_mm(req->ctx, req); - ret = io_req_task_work_add(req, &req->task_work, true); - if (!ret) + if (io_resubmit_prep(req, ret)) { + refcount_inc(&req->refs); + io_queue_async_work(req); return true; + } + #endif return false; } |