summaryrefslogtreecommitdiffstats
path: root/io_uring/io_uring.c
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2023-08-25 00:53:27 +0200
committerJens Axboe <axboe@kernel.dk>2023-08-25 01:16:19 +0200
commit59fbc409e71649f558fb4578cdbfac67acb824dc (patch)
tree12ceae2eb77eb9ed66d8c5b328844be42ea7422c /io_uring/io_uring.c
parentio_uring: refactor __io_get_cqe() (diff)
downloadlinux-59fbc409e71649f558fb4578cdbfac67acb824dc.tar.xz
linux-59fbc409e71649f558fb4578cdbfac67acb824dc.zip
io_uring: optimise extra io_get_cqe null check
If the cached cqe check passes in io_get_cqe*() it already means that the cqe we return is valid and non-zero, however the compiler is unable to optimise null checks like in io_fill_cqe_req(). Do a bit of trickery, return success/fail boolean from io_get_cqe*() and store cqe in the cqe parameter. That makes it do the right thing, erasing the check together with the introduced indirection. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/322ea4d3377d3d4efd8ae90ab8ed28a99f518210.1692916914.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring/io_uring.c')
-rw-r--r--io_uring/io_uring.c7
1 files changed, 3 insertions, 4 deletions
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index de05831eeca7..cfc2dc8c4b2f 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -683,10 +683,10 @@ static void __io_cqring_overflow_flush(struct io_ring_ctx *ctx)
io_cq_lock(ctx);
while (!list_empty(&ctx->cq_overflow_list)) {
- struct io_uring_cqe *cqe = io_get_cqe_overflow(ctx, true);
+ struct io_uring_cqe *cqe;
struct io_overflow_cqe *ocqe;
- if (!cqe)
+ if (!io_get_cqe_overflow(ctx, &cqe, true))
break;
ocqe = list_first_entry(&ctx->cq_overflow_list,
struct io_overflow_cqe, list);
@@ -862,8 +862,7 @@ static bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res,
* submission (by quite a lot). Increment the overflow count in
* the ring.
*/
- cqe = io_get_cqe(ctx);
- if (likely(cqe)) {
+ if (likely(io_get_cqe(ctx, &cqe))) {
trace_io_uring_complete(ctx, NULL, user_data, res, cflags, 0, 0);
WRITE_ONCE(cqe->user_data, user_data);