diff options
author | Pavel Begunkov <asml.silence@gmail.com> | 2023-04-13 16:28:10 +0200 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2023-04-15 22:44:57 +0200 |
commit | 0b222eeb6514ba6c3457b667fa4f3645032e1fc9 (patch) | |
tree | ae98c04f8ce68425ddbeba1777c69150e18bbd78 /io_uring | |
parent | io_uring/rsrc: fix DEFER_TASKRUN rsrc quiesce (diff) | |
download | linux-0b222eeb6514ba6c3457b667fa4f3645032e1fc9.tar.xz linux-0b222eeb6514ba6c3457b667fa4f3645032e1fc9.zip |
io_uring/rsrc: remove rsrc_data refs
Instead of waiting for rsrc_data->refs to be downed to zero, check
whether there are rsrc nodes queued for completion, that's easier then
maintaining references.
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/8e33fd143d83e11af3e386aea28eb6d6c6a1be10.1681395792.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring')
-rw-r--r-- | io_uring/io_uring.c | 4 | ||||
-rw-r--r-- | io_uring/rsrc.c | 32 | ||||
-rw-r--r-- | io_uring/rsrc.h | 2 |
3 files changed, 10 insertions, 28 deletions
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index 3c1c8c788b7b..3d43df8f1e4e 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -2831,8 +2831,8 @@ static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx) { io_sq_thread_finish(ctx); /* __io_rsrc_put_work() may need uring_lock to progress, wait w/o it */ - io_wait_rsrc_data(ctx->buf_data); - io_wait_rsrc_data(ctx->file_data); + if (WARN_ON_ONCE(!list_empty(&ctx->rsrc_ref_list))) + return; mutex_lock(&ctx->uring_lock); if (ctx->buf_data) diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c index e634ef384724..5415a18844e0 100644 --- a/io_uring/rsrc.c +++ b/io_uring/rsrc.c @@ -31,11 +31,6 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov, #define IORING_MAX_FIXED_FILES (1U << 20) #define IORING_MAX_REG_BUFFERS (1U << 14) -static inline bool io_put_rsrc_data_ref(struct io_rsrc_data *rsrc_data) -{ - return !--rsrc_data->refs; -} - int __io_account_mem(struct user_struct *user, unsigned long nr_pages) { unsigned long page_limit, cur_pages, new_pages; @@ -158,7 +153,6 @@ static void io_rsrc_put_work_one(struct io_rsrc_data *rsrc_data, static void __io_rsrc_put_work(struct io_rsrc_node *ref_node) { struct io_rsrc_data *rsrc_data = ref_node->rsrc_data; - struct io_ring_ctx *ctx = rsrc_data->ctx; struct io_rsrc_put *prsrc, *tmp; if (ref_node->inline_items) @@ -171,14 +165,6 @@ static void __io_rsrc_put_work(struct io_rsrc_node *ref_node) } io_rsrc_node_destroy(rsrc_data->ctx, ref_node); - if (io_put_rsrc_data_ref(rsrc_data)) - wake_up_all(&ctx->rsrc_quiesce_wq); -} - -void io_wait_rsrc_data(struct io_rsrc_data *data) -{ - if (data) - WARN_ON_ONCE(!io_put_rsrc_data_ref(data)); } void io_rsrc_node_destroy(struct io_ring_ctx *ctx, struct io_rsrc_node *node) @@ -201,6 +187,8 @@ void io_rsrc_node_ref_zero(struct io_rsrc_node *node) list_del(&node->node); __io_rsrc_put_work(node); } + if (list_empty(&ctx->rsrc_ref_list) && unlikely(ctx->rsrc_quiesce)) + wake_up_all(&ctx->rsrc_quiesce_wq); } struct io_rsrc_node *io_rsrc_node_alloc(struct io_ring_ctx *ctx) @@ -235,7 +223,6 @@ void io_rsrc_node_switch(struct io_ring_ctx *ctx, if (WARN_ON_ONCE(!backup)) return; - data_to_kill->refs++; node->rsrc_data = data_to_kill; list_add_tail(&node->node, &ctx->rsrc_ref_list); /* put master ref */ @@ -269,8 +256,7 @@ __cold static int io_rsrc_ref_quiesce(struct io_rsrc_data *data, return ret; io_rsrc_node_switch(ctx, data); - /* kill initial ref */ - if (io_put_rsrc_data_ref(data)) + if (list_empty(&ctx->rsrc_ref_list)) return 0; if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) { @@ -278,6 +264,7 @@ __cold static int io_rsrc_ref_quiesce(struct io_rsrc_data *data, smp_mb(); } + ctx->rsrc_quiesce++; data->quiesce = true; do { prepare_to_wait(&ctx->rsrc_quiesce_wq, &we, TASK_INTERRUPTIBLE); @@ -286,12 +273,8 @@ __cold static int io_rsrc_ref_quiesce(struct io_rsrc_data *data, ret = io_run_task_work_sig(ctx); if (ret < 0) { mutex_lock(&ctx->uring_lock); - if (!data->refs) { + if (list_empty(&ctx->rsrc_ref_list)) ret = 0; - } else { - /* restore the master reference */ - data->refs++; - } break; } @@ -299,10 +282,12 @@ __cold static int io_rsrc_ref_quiesce(struct io_rsrc_data *data, __set_current_state(TASK_RUNNING); mutex_lock(&ctx->uring_lock); ret = 0; - } while (data->refs); + } while (!list_empty(&ctx->rsrc_ref_list)); finish_wait(&ctx->rsrc_quiesce_wq, &we); data->quiesce = false; + ctx->rsrc_quiesce--; + if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) { atomic_set(&ctx->cq_wait_nr, 0); smp_mb(); @@ -371,7 +356,6 @@ __cold static int io_rsrc_data_alloc(struct io_ring_ctx *ctx, data->nr = nr; data->ctx = ctx; data->do_put = do_put; - data->refs = 1; if (utags) { ret = -EFAULT; for (i = 0; i < nr; i++) { diff --git a/io_uring/rsrc.h b/io_uring/rsrc.h index d93ba4e9742a..5dd2fcb28069 100644 --- a/io_uring/rsrc.h +++ b/io_uring/rsrc.h @@ -35,7 +35,6 @@ struct io_rsrc_data { u64 **tags; unsigned int nr; rsrc_put_fn *do_put; - int refs; bool quiesce; }; @@ -69,7 +68,6 @@ struct io_mapped_ubuf { void io_rsrc_put_tw(struct callback_head *cb); void io_rsrc_node_ref_zero(struct io_rsrc_node *node); void io_rsrc_put_work(struct work_struct *work); -void io_wait_rsrc_data(struct io_rsrc_data *data); void io_rsrc_node_destroy(struct io_ring_ctx *ctx, struct io_rsrc_node *ref_node); int io_rsrc_node_switch_start(struct io_ring_ctx *ctx); struct io_rsrc_node *io_rsrc_node_alloc(struct io_ring_ctx *ctx); |