diff options
author | Pavel Begunkov <asml.silence@gmail.com> | 2020-05-26 19:34:05 +0200 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2020-05-26 21:31:09 +0200 |
commit | 0451894522108d6c72934aff6ef89023743a9ed4 (patch) | |
tree | 39ff33d7b3507609ddb89cda8f6faba412f8d8a8 /fs/io_uring.c | |
parent | io_uring: don't re-read sqe->off in timeout_prep() (diff) | |
download | linux-0451894522108d6c72934aff6ef89023743a9ed4.tar.xz linux-0451894522108d6c72934aff6ef89023743a9ed4.zip |
io_uring: separate DRAIN flushing into a cold path
io_commit_cqring() assembly doesn't look good with extra code handling
drained requests. IOSQE_IO_DRAIN is slow and discouraged to be used in
a hot path, so try to minimise its impact by putting it into a helper
and doing a fast check.
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r-- | fs/io_uring.c | 30 |
1 files changed, 15 insertions, 15 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c index f888b20d0a68..0d98a529a93e 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -982,19 +982,6 @@ static inline bool req_need_defer(struct io_kiocb *req) return false; } -static struct io_kiocb *io_get_deferred_req(struct io_ring_ctx *ctx) -{ - struct io_kiocb *req; - - req = list_first_entry_or_null(&ctx->defer_list, struct io_kiocb, list); - if (req && !req_need_defer(req)) { - list_del_init(&req->list); - return req; - } - - return NULL; -} - static struct io_kiocb *io_get_timeout_req(struct io_ring_ctx *ctx) { struct io_kiocb *req; @@ -1127,6 +1114,19 @@ static void io_kill_timeouts(struct io_ring_ctx *ctx) spin_unlock_irq(&ctx->completion_lock); } +static void __io_queue_deferred(struct io_ring_ctx *ctx) +{ + do { + struct io_kiocb *req = list_first_entry(&ctx->defer_list, + struct io_kiocb, list); + + if (req_need_defer(req)) + break; + list_del_init(&req->list); + io_queue_async_work(req); + } while (!list_empty(&ctx->defer_list)); +} + static void io_commit_cqring(struct io_ring_ctx *ctx) { struct io_kiocb *req; @@ -1136,8 +1136,8 @@ static void io_commit_cqring(struct io_ring_ctx *ctx) __io_commit_cqring(ctx); - while ((req = io_get_deferred_req(ctx)) != NULL) - io_queue_async_work(req); + if (unlikely(!list_empty(&ctx->defer_list))) + __io_queue_deferred(ctx); } static struct io_uring_cqe *io_get_cqring(struct io_ring_ctx *ctx) |