summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2021-03-08 14:20:57 +0100
committerJens Axboe <axboe@kernel.dk>2021-03-10 15:28:42 +0100
commit0298ef969a110ca03654f0cea9b50e3f3b331acc (patch)
treee93af2b4339642d4ac4d1090c523d0aae58a3889 /fs
parentio_uring: fix unrelated ctx reqs cancellation (diff)
downloadlinux-0298ef969a110ca03654f0cea9b50e3f3b331acc.tar.xz
linux-0298ef969a110ca03654f0cea9b50e3f3b331acc.zip
io_uring: clean R_DISABLED startup mess
There are enough of problems with IORING_SETUP_R_DISABLED, including the burden of checking and kicking off the SQO task all over the codebase -- for exit/cancel/etc. Rework it, always start the thread but don't do submit unless the flag is gone, that's much easier. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs')
-rw-r--r--fs/io_uring.c25
1 files changed, 6 insertions, 19 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index d4f018f5838d..3f6db813d670 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -6606,7 +6606,8 @@ static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries)
if (!list_empty(&ctx->iopoll_list))
io_do_iopoll(ctx, &nr_events, 0);
- if (to_submit && likely(!percpu_ref_is_dying(&ctx->refs)))
+ if (to_submit && likely(!percpu_ref_is_dying(&ctx->refs)) &&
+ !(ctx->flags & IORING_SETUP_R_DISABLED))
ret = io_submit_sqes(ctx, to_submit);
mutex_unlock(&ctx->uring_lock);
}
@@ -7861,6 +7862,7 @@ static int io_sq_offload_create(struct io_ring_ctx *ctx,
wake_up_new_task(tsk);
if (ret)
goto err;
+ complete(&sqd->startup);
} else if (p->flags & IORING_SETUP_SQ_AFF) {
/* Can't have SQ_AFF without SQPOLL */
ret = -EINVAL;
@@ -7873,15 +7875,6 @@ err:
return ret;
}
-static void io_sq_offload_start(struct io_ring_ctx *ctx)
-{
- struct io_sq_data *sqd = ctx->sq_data;
-
- ctx->flags &= ~IORING_SETUP_R_DISABLED;
- if (ctx->flags & IORING_SETUP_SQPOLL)
- complete(&sqd->startup);
-}
-
static inline void __io_unaccount_mem(struct user_struct *user,
unsigned long nr_pages)
{
@@ -8742,11 +8735,6 @@ static void io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
struct task_struct *task = current;
if ((ctx->flags & IORING_SETUP_SQPOLL) && ctx->sq_data) {
- /* never started, nothing to cancel */
- if (ctx->flags & IORING_SETUP_R_DISABLED) {
- io_sq_offload_start(ctx);
- return;
- }
io_sq_thread_park(ctx->sq_data);
task = ctx->sq_data->thread;
if (task)
@@ -9449,9 +9437,6 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p,
if (ret)
goto err;
- if (!(p->flags & IORING_SETUP_R_DISABLED))
- io_sq_offload_start(ctx);
-
memset(&p->sq_off, 0, sizeof(p->sq_off));
p->sq_off.head = offsetof(struct io_rings, sq.head);
p->sq_off.tail = offsetof(struct io_rings, sq.tail);
@@ -9668,7 +9653,9 @@ static int io_register_enable_rings(struct io_ring_ctx *ctx)
if (ctx->restrictions.registered)
ctx->restricted = 1;
- io_sq_offload_start(ctx);
+ ctx->flags &= ~IORING_SETUP_R_DISABLED;
+ if (ctx->sq_data && wq_has_sleeper(&ctx->sq_data->wait))
+ wake_up(&ctx->sq_data->wait);
return 0;
}