diff options
author | Pavel Begunkov <asml.silence@gmail.com> | 2023-04-04 14:39:54 +0200 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2023-04-04 17:30:39 +0200 |
commit | 9eae8655f9cd2eeed99fb7a0d2bb22816c17e497 (patch) | |
tree | 9839eb9739e972da6b62d363843f1fa95e48ea4b /io_uring/rsrc.c | |
parent | io_uring/rsrc: don't offload node free (diff) | |
download | linux-9eae8655f9cd2eeed99fb7a0d2bb22816c17e497.tar.xz linux-9eae8655f9cd2eeed99fb7a0d2bb22816c17e497.zip |
io_uring/rsrc: cache struct io_rsrc_node
Add allocation cache for struct io_rsrc_node, it's always allocated and
put under ->uring_lock, so it doesn't need any extra synchronisation
around caches.
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/252a9d9ef9654e6467af30fdc02f57c0118fb76e.1680576071.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring/rsrc.c')
-rw-r--r-- | io_uring/rsrc.c | 23 |
1 files changed, 15 insertions, 8 deletions
diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c index 0f4e245dee1b..345631091d80 100644 --- a/io_uring/rsrc.c +++ b/io_uring/rsrc.c @@ -164,7 +164,7 @@ static void __io_rsrc_put_work(struct io_rsrc_node *ref_node) kfree(prsrc); } - io_rsrc_node_destroy(ref_node); + io_rsrc_node_destroy(rsrc_data->ctx, ref_node); if (atomic_dec_and_test(&rsrc_data->refs)) complete(&rsrc_data->done); } @@ -175,9 +175,10 @@ void io_wait_rsrc_data(struct io_rsrc_data *data) wait_for_completion(&data->done); } -void io_rsrc_node_destroy(struct io_rsrc_node *ref_node) +void io_rsrc_node_destroy(struct io_ring_ctx *ctx, struct io_rsrc_node *node) { - kfree(ref_node); + if (!io_alloc_cache_put(&ctx->rsrc_node_cache, &node->cache)) + kfree(node); } void io_rsrc_node_ref_zero(struct io_rsrc_node *node) @@ -198,13 +199,19 @@ void io_rsrc_node_ref_zero(struct io_rsrc_node *node) } } -static struct io_rsrc_node *io_rsrc_node_alloc(void) +static struct io_rsrc_node *io_rsrc_node_alloc(struct io_ring_ctx *ctx) { struct io_rsrc_node *ref_node; + struct io_cache_entry *entry; - ref_node = kzalloc(sizeof(*ref_node), GFP_KERNEL); - if (!ref_node) - return NULL; + entry = io_alloc_cache_get(&ctx->rsrc_node_cache); + if (entry) { + ref_node = container_of(entry, struct io_rsrc_node, cache); + } else { + ref_node = kzalloc(sizeof(*ref_node), GFP_KERNEL); + if (!ref_node) + return NULL; + } ref_node->refs = 1; INIT_LIST_HEAD(&ref_node->node); @@ -243,7 +250,7 @@ int io_rsrc_node_switch_start(struct io_ring_ctx *ctx) { if (ctx->rsrc_backup_node) return 0; - ctx->rsrc_backup_node = io_rsrc_node_alloc(); + ctx->rsrc_backup_node = io_rsrc_node_alloc(ctx); return ctx->rsrc_backup_node ? 0 : -ENOMEM; } |