diff options
author | Pavel Begunkov <asml.silence@gmail.com> | 2023-04-11 13:06:05 +0200 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2023-04-12 20:09:41 +0200 |
commit | 528407b1e0ea51260fff2cc8b669c632a65d7a09 (patch) | |
tree | fc71826287bad3c13e9e8408582f5113d331535f /io_uring | |
parent | io_uring/rsrc: add lockdep checks (diff) | |
download | linux-528407b1e0ea51260fff2cc8b669c632a65d7a09.tar.xz linux-528407b1e0ea51260fff2cc8b669c632a65d7a09.zip |
io_uring/rsrc: consolidate node caching
We store one pre-allocated rsrc node in ->rsrc_backup_node, merge it
with ->rsrc_node_cache.
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/6d5410e51ccd29be7a716be045b51d6b371baef6.1681210788.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring')
-rw-r--r-- | io_uring/alloc_cache.h | 5 | ||||
-rw-r--r-- | io_uring/io_uring.c | 2 | ||||
-rw-r--r-- | io_uring/rsrc.c | 20 |
3 files changed, 16 insertions, 11 deletions
diff --git a/io_uring/alloc_cache.h b/io_uring/alloc_cache.h index 851a527afb5e..241245cb54a6 100644 --- a/io_uring/alloc_cache.h +++ b/io_uring/alloc_cache.h @@ -23,6 +23,11 @@ static inline bool io_alloc_cache_put(struct io_alloc_cache *cache, return false; } +static inline bool io_alloc_cache_empty(struct io_alloc_cache *cache) +{ + return !cache->list.next; +} + static inline struct io_cache_entry *io_alloc_cache_get(struct io_alloc_cache *cache) { if (cache->list.next) { diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index b171c26d331d..075bae8a2bb1 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -2852,8 +2852,6 @@ static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx) /* there are no registered resources left, nobody uses it */ if (ctx->rsrc_node) io_rsrc_node_destroy(ctx, ctx->rsrc_node); - if (ctx->rsrc_backup_node) - io_rsrc_node_destroy(ctx, ctx->rsrc_backup_node); WARN_ON_ONCE(!list_empty(&ctx->rsrc_ref_list)); diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c index 24e4e2109549..73f9e10d9bf0 100644 --- a/io_uring/rsrc.c +++ b/io_uring/rsrc.c @@ -230,7 +230,7 @@ void io_rsrc_node_switch(struct io_ring_ctx *ctx, struct io_rsrc_data *data_to_kill) __must_hold(&ctx->uring_lock) { - WARN_ON_ONCE(!ctx->rsrc_backup_node); + WARN_ON_ONCE(io_alloc_cache_empty(&ctx->rsrc_node_cache)); WARN_ON_ONCE(data_to_kill && !ctx->rsrc_node); if (data_to_kill) { @@ -245,18 +245,20 @@ void io_rsrc_node_switch(struct io_ring_ctx *ctx, ctx->rsrc_node = NULL; } - if (!ctx->rsrc_node) { - ctx->rsrc_node = ctx->rsrc_backup_node; - ctx->rsrc_backup_node = NULL; - } + if (!ctx->rsrc_node) + ctx->rsrc_node = io_rsrc_node_alloc(ctx); } int io_rsrc_node_switch_start(struct io_ring_ctx *ctx) { - if (ctx->rsrc_backup_node) - return 0; - ctx->rsrc_backup_node = io_rsrc_node_alloc(ctx); - return ctx->rsrc_backup_node ? 0 : -ENOMEM; + if (io_alloc_cache_empty(&ctx->rsrc_node_cache)) { + struct io_rsrc_node *node = kzalloc(sizeof(*node), GFP_KERNEL); + + if (!node) + return -ENOMEM; + io_alloc_cache_put(&ctx->rsrc_node_cache, &node->cache); + } + return 0; } __cold static int io_rsrc_ref_quiesce(struct io_rsrc_data *data, |