summaryrefslogtreecommitdiffstats
path: root/io_uring/io_uring.h
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2022-12-07 16:50:01 +0100
committerJens Axboe <axboe@kernel.dk>2022-12-07 16:51:08 +0100
commitf66f73421f0a929734bb41dde575e6d7859e548f (patch)
tree51e49f7b56ce259e8dc755471e3359549774b7c7 /io_uring/io_uring.h
parentio_uring: do msg_ring in target task via tw (diff)
downloadlinux-f66f73421f0a929734bb41dde575e6d7859e548f.tar.xz
linux-f66f73421f0a929734bb41dde575e6d7859e548f.zip
io_uring: skip spinlocking for ->task_complete
->task_complete was added to serialised CQE posting by doing it from the task context only (or fallback wq when the task is dead), and now we can use that to avoid taking ->completion_lock while filling CQ entries. The patch skips spinlocking only in two spots, __io_submit_flush_completions() and flushing in io_aux_cqe, it's safer and covers all cases we care about. Extra care is taken to force taking the lock while queueing overflow entries. It fundamentally relies on SINGLE_ISSUER to have only one task posting events. It also need to take into account overflowed CQEs, flushing of which happens in the cq wait path, and so this implementation also needs DEFER_TASKRUN to limit waiters. For the same reason we disable it for SQPOLL, and for IOPOLL as it won't benefit from it in any case. DEFER_TASKRUN, SQPOLL and IOPOLL requirement may be relaxed in the future. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/2a8c91fd82cfcdcc1d2e5bac7051fe2c183bda73.1670384893.git.asml.silence@gmail.com [axboe: modify to apply] Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring/io_uring.h')
-rw-r--r--io_uring/io_uring.h10
1 files changed, 9 insertions, 1 deletions
diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
index 2277c05f52a6..1b2f0b2cc888 100644
--- a/io_uring/io_uring.h
+++ b/io_uring/io_uring.h
@@ -133,7 +133,7 @@ static inline bool __io_fill_cqe_req(struct io_ring_ctx *ctx,
*/
cqe = io_get_cqe(ctx);
if (unlikely(!cqe))
- return io_req_cqe_overflow(req);
+ return false;
trace_io_uring_complete(req->ctx, req, req->cqe.user_data,
req->cqe.res, req->cqe.flags,
@@ -156,6 +156,14 @@ static inline bool __io_fill_cqe_req(struct io_ring_ctx *ctx,
return true;
}
+static inline bool io_fill_cqe_req(struct io_ring_ctx *ctx,
+ struct io_kiocb *req)
+{
+ if (likely(__io_fill_cqe_req(ctx, req)))
+ return true;
+ return io_req_cqe_overflow(req);
+}
+
static inline void req_set_fail(struct io_kiocb *req)
{
req->flags |= REQ_F_FAIL;