summaryrefslogtreecommitdiffstats
path: root/io_uring/io_uring.h
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2022-09-03 18:09:22 +0200
committerJens Axboe <axboe@kernel.dk>2022-09-21 18:30:43 +0200
commit8ac5d85a89b48269e5aefb92b640d38367670a1b (patch)
treec0c504bb905bc4ab2f2474f8231188763d5f09ad /io_uring/io_uring.h
parentio_uring: cleanly separate request types for iopoll (diff)
downloadlinux-8ac5d85a89b48269e5aefb92b640d38367670a1b.tar.xz
linux-8ac5d85a89b48269e5aefb92b640d38367670a1b.zip
io_uring: add local task_work run helper that is entered locked
We have a few spots that drop the mutex just to run local task_work, which immediately tries to grab it again. Add a helper that just passes in whether we're locked already. Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring/io_uring.h')
-rw-r--r--io_uring/io_uring.h1
1 files changed, 1 insertions, 0 deletions
diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
index f417d75d7bc1..0f90d1dfa42b 100644
--- a/io_uring/io_uring.h
+++ b/io_uring/io_uring.h
@@ -27,6 +27,7 @@ enum {
struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx);
bool io_req_cqe_overflow(struct io_kiocb *req);
int io_run_task_work_sig(struct io_ring_ctx *ctx);
+int __io_run_local_work(struct io_ring_ctx *ctx, bool locked);
int io_run_local_work(struct io_ring_ctx *ctx);
void io_req_complete_failed(struct io_kiocb *req, s32 res);
void __io_req_complete(struct io_kiocb *req, unsigned issue_flags);