diff options
author | Pavel Begunkov <asml.silence@gmail.com> | 2022-06-17 10:48:01 +0200 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2022-07-25 02:39:14 +0200 |
commit | faf88dde060f74117b3a86a62cb32a20f27fd636 (patch) | |
tree | efcc06f04689767ebe25ab240638bb1a929a910e /io_uring/io_uring.c | |
parent | io_uring: don't expose io_fill_cqe_aux() (diff) | |
download | linux-faf88dde060f74117b3a86a62cb32a20f27fd636.tar.xz linux-faf88dde060f74117b3a86a62cb32a20f27fd636.zip |
io_uring: don't inline __io_get_cqe()
__io_get_cqe() is not as hot as io_get_cqe(), no need to inline it, it
sheds ~500B from the binary.
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/c1ac829198a881b7af8710926f99a3559b9f24c0.1655455613.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring/io_uring.c')
-rw-r--r-- | io_uring/io_uring.c | 35 |
1 files changed, 35 insertions, 0 deletions
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index 8c1b0e0ce5bb..df6a9abdd966 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -166,6 +166,11 @@ static inline void io_submit_flush_completions(struct io_ring_ctx *ctx) __io_submit_flush_completions(ctx); } +static inline unsigned int __io_cqring_events(struct io_ring_ctx *ctx) +{ + return ctx->cached_cq_tail - READ_ONCE(ctx->rings->cq.head); +} + static bool io_match_linked(struct io_kiocb *head) { struct io_kiocb *req; @@ -676,6 +681,36 @@ bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data, s32 res, return true; } +/* + * writes to the cq entry need to come after reading head; the + * control dependency is enough as we're using WRITE_ONCE to + * fill the cq entry + */ +struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx) +{ + struct io_rings *rings = ctx->rings; + unsigned int off = ctx->cached_cq_tail & (ctx->cq_entries - 1); + unsigned int shift = 0; + unsigned int free, queued, len; + + if (ctx->flags & IORING_SETUP_CQE32) + shift = 1; + + /* userspace may cheat modifying the tail, be safe and do min */ + queued = min(__io_cqring_events(ctx), ctx->cq_entries); + free = ctx->cq_entries - queued; + /* we need a contiguous range, limit based on the current array offset */ + len = min(free, ctx->cq_entries - off); + if (!len) + return NULL; + + ctx->cached_cq_tail++; + ctx->cqe_cached = &rings->cqes[off]; + ctx->cqe_sentinel = ctx->cqe_cached + len; + ctx->cqe_cached++; + return &rings->cqes[off << shift]; +} + static bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags) { |