summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2022-07-07 22:20:54 +0200
committerJens Axboe <axboe@kernel.dk>2022-07-25 02:39:17 +0200
commit9731bc9855dc169f27433fef3c4d0ff3496c512d (patch)
treeb28facb6c0ee4796e2787a5c736231a73520993f
parentio_uring: add abstraction around apoll cache (diff)
downloadlinux-9731bc9855dc169f27433fef3c4d0ff3496c512d.tar.xz
linux-9731bc9855dc169f27433fef3c4d0ff3496c512d.zip
io_uring: impose max limit on apoll cache
Caches like this tend to grow to the peak size, and then never get any smaller. Impose a max limit on the size, to prevent it from growing too big. A somewhat randomly chosen 512 is the max size we'll allow the cache to get. If a batch of frees come in and would bring it over that, we simply start kfree'ing the surplus. Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--include/linux/io_uring_types.h1
-rw-r--r--io_uring/alloc_cache.h16
-rw-r--r--io_uring/io_uring.c3
3 files changed, 17 insertions, 3 deletions
diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
index b548da03b563..bf8f95332eda 100644
--- a/include/linux/io_uring_types.h
+++ b/include/linux/io_uring_types.h
@@ -160,6 +160,7 @@ struct io_ev_fd {
struct io_alloc_cache {
struct hlist_head list;
+ unsigned int nr_cached;
};
struct io_ring_ctx {
diff --git a/io_uring/alloc_cache.h b/io_uring/alloc_cache.h
index 98f2374c37c7..729793ae9712 100644
--- a/io_uring/alloc_cache.h
+++ b/io_uring/alloc_cache.h
@@ -1,14 +1,24 @@
#ifndef IOU_ALLOC_CACHE_H
#define IOU_ALLOC_CACHE_H
+/*
+ * Don't allow the cache to grow beyond this size.
+ */
+#define IO_ALLOC_CACHE_MAX 512
+
struct io_cache_entry {
struct hlist_node node;
};
-static inline void io_alloc_cache_put(struct io_alloc_cache *cache,
+static inline bool io_alloc_cache_put(struct io_alloc_cache *cache,
struct io_cache_entry *entry)
{
- hlist_add_head(&entry->node, &cache->list);
+ if (cache->nr_cached < IO_ALLOC_CACHE_MAX) {
+ cache->nr_cached++;
+ hlist_add_head(&entry->node, &cache->list);
+ return true;
+ }
+ return false;
}
static inline struct io_cache_entry *io_alloc_cache_get(struct io_alloc_cache *cache)
@@ -26,6 +36,7 @@ static inline struct io_cache_entry *io_alloc_cache_get(struct io_alloc_cache *c
static inline void io_alloc_cache_init(struct io_alloc_cache *cache)
{
INIT_HLIST_HEAD(&cache->list);
+ cache->nr_cached = 0;
}
static inline void io_alloc_cache_free(struct io_alloc_cache *cache,
@@ -37,5 +48,6 @@ static inline void io_alloc_cache_free(struct io_alloc_cache *cache,
hlist_del(node);
free(container_of(node, struct io_cache_entry, node));
}
+ cache->nr_cached = 0;
}
#endif
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index a360a3d390c6..c9c23e459766 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -1181,7 +1181,8 @@ void io_free_batch_list(struct io_ring_ctx *ctx, struct io_wq_work_node *node)
if (apoll->double_poll)
kfree(apoll->double_poll);
- io_alloc_cache_put(&ctx->apoll_cache, &apoll->cache);
+ if (!io_alloc_cache_put(&ctx->apoll_cache, &apoll->cache))
+ kfree(apoll);
req->flags &= ~REQ_F_POLLED;
}
if (req->flags & IO_REQ_LINK_FLAGS)