summaryrefslogtreecommitdiffstats
path: root/io_uring/io_uring.c
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2023-01-05 12:22:23 +0100
committerJens Axboe <axboe@kernel.dk>2023-01-29 23:17:39 +0100
commit140102ae9a9f2f83f0592b98b3c5c6119d9a9b32 (patch)
tree7c83692d22146407b437fcfad179523bcb0000f2 /io_uring/io_uring.c
parentio_uring: kill io_run_task_work_ctx (diff)
downloadlinux-140102ae9a9f2f83f0592b98b3c5c6119d9a9b32.tar.xz
linux-140102ae9a9f2f83f0592b98b3c5c6119d9a9b32.zip
io_uring: move defer tw task checks
Most places that want to run local tw explicitly and in advance check if they are allowed to do so. Don't rely on a similar check in __io_run_local_work(), leave it as a just-in-case warning and make sure callers checks capabilities themselves. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/990fe0e8e70fd4d57e43625e5ce8fba584821d1a.1672916894.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring/io_uring.c')
-rw-r--r--io_uring/io_uring.c15
1 files changed, 6 insertions, 9 deletions
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 5326e2d94055..97b749203ba8 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -1301,14 +1301,13 @@ int __io_run_local_work(struct io_ring_ctx *ctx, bool *locked)
struct llist_node *node;
struct llist_node fake;
struct llist_node *current_final = NULL;
- int ret;
+ int ret = 0;
unsigned int loops = 1;
- if (unlikely(ctx->submitter_task != current))
+ if (WARN_ON_ONCE(ctx->submitter_task != current))
return -EEXIST;
node = io_llist_xchg(&ctx->work_llist, &fake);
- ret = 0;
again:
while (node != current_final) {
struct llist_node *next = node->next;
@@ -2514,11 +2513,8 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
if (!io_allowed_run_tw(ctx))
return -EEXIST;
- if (!llist_empty(&ctx->work_llist)) {
- ret = io_run_local_work(ctx);
- if (ret < 0)
- return ret;
- }
+ if (!llist_empty(&ctx->work_llist))
+ io_run_local_work(ctx);
io_run_task_work();
io_cqring_overflow_flush(ctx);
/* if user messes with these they will just get an early return */
@@ -3055,7 +3051,8 @@ static __cold bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
}
}
- if (ctx->flags & IORING_SETUP_DEFER_TASKRUN)
+ if ((ctx->flags & IORING_SETUP_DEFER_TASKRUN) &&
+ io_allowed_defer_tw_run(ctx))
ret |= io_run_local_work(ctx) > 0;
ret |= io_cancel_defer_files(ctx, task, cancel_all);
mutex_lock(&ctx->uring_lock);