summaryrefslogtreecommitdiffstats
path: root/io_uring
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2022-07-07 22:16:20 +0200
committerJens Axboe <axboe@kernel.dk>2022-07-25 02:39:17 +0200
commit9b797a37c4bd83b03cedcfbd15852b836f5e562c (patch)
treef01382c4665ad9276dd632b69a8ca9429f26f979 /io_uring
parentio_uring: move apoll cache to poll.c (diff)
downloadlinux-9b797a37c4bd83b03cedcfbd15852b836f5e562c.tar.xz
linux-9b797a37c4bd83b03cedcfbd15852b836f5e562c.zip
io_uring: add abstraction around apoll cache
In preparation for adding limits, and one more user, abstract out the core bits of the allocation+free cache. Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring')
-rw-r--r--io_uring/alloc_cache.h41
-rw-r--r--io_uring/io_uring.c8
-rw-r--r--io_uring/poll.c18
-rw-r--r--io_uring/poll.h9
4 files changed, 57 insertions, 19 deletions
diff --git a/io_uring/alloc_cache.h b/io_uring/alloc_cache.h
new file mode 100644
index 000000000000..98f2374c37c7
--- /dev/null
+++ b/io_uring/alloc_cache.h
@@ -0,0 +1,41 @@
+#ifndef IOU_ALLOC_CACHE_H
+#define IOU_ALLOC_CACHE_H
+
+struct io_cache_entry {
+ struct hlist_node node;
+};
+
+static inline void io_alloc_cache_put(struct io_alloc_cache *cache,
+ struct io_cache_entry *entry)
+{
+ hlist_add_head(&entry->node, &cache->list);
+}
+
+static inline struct io_cache_entry *io_alloc_cache_get(struct io_alloc_cache *cache)
+{
+ if (!hlist_empty(&cache->list)) {
+ struct hlist_node *node = cache->list.first;
+
+ hlist_del(node);
+ return container_of(node, struct io_cache_entry, node);
+ }
+
+ return NULL;
+}
+
+static inline void io_alloc_cache_init(struct io_alloc_cache *cache)
+{
+ INIT_HLIST_HEAD(&cache->list);
+}
+
+static inline void io_alloc_cache_free(struct io_alloc_cache *cache,
+ void (*free)(struct io_cache_entry *))
+{
+ while (!hlist_empty(&cache->list)) {
+ struct hlist_node *node = cache->list.first;
+
+ hlist_del(node);
+ free(container_of(node, struct io_cache_entry, node));
+ }
+}
+#endif
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 4d1ce58b015e..a360a3d390c6 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -92,6 +92,7 @@
#include "timeout.h"
#include "poll.h"
+#include "alloc_cache.h"
#define IORING_MAX_ENTRIES 32768
#define IORING_MAX_CQ_ENTRIES (2 * IORING_MAX_ENTRIES)
@@ -295,7 +296,7 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
INIT_LIST_HEAD(&ctx->sqd_list);
INIT_LIST_HEAD(&ctx->cq_overflow_list);
INIT_LIST_HEAD(&ctx->io_buffers_cache);
- INIT_LIST_HEAD(&ctx->apoll_cache);
+ io_alloc_cache_init(&ctx->apoll_cache);
init_completion(&ctx->ref_comp);
xa_init_flags(&ctx->personalities, XA_FLAGS_ALLOC1);
mutex_init(&ctx->uring_lock);
@@ -1180,8 +1181,7 @@ void io_free_batch_list(struct io_ring_ctx *ctx, struct io_wq_work_node *node)
if (apoll->double_poll)
kfree(apoll->double_poll);
- list_add(&apoll->poll.wait.entry,
- &ctx->apoll_cache);
+ io_alloc_cache_put(&ctx->apoll_cache, &apoll->cache);
req->flags &= ~REQ_F_POLLED;
}
if (req->flags & IO_REQ_LINK_FLAGS)
@@ -2467,7 +2467,7 @@ static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx)
if (ctx->rings)
__io_cqring_overflow_flush(ctx, true);
io_eventfd_unregister(ctx);
- io_flush_apoll_cache(ctx);
+ io_alloc_cache_free(&ctx->apoll_cache, io_apoll_cache_free);
mutex_unlock(&ctx->uring_lock);
io_destroy_buffers(ctx);
if (ctx->sq_creds)
diff --git a/io_uring/poll.c b/io_uring/poll.c
index 052fcb647208..dadd293749b0 100644
--- a/io_uring/poll.c
+++ b/io_uring/poll.c
@@ -590,16 +590,15 @@ static struct async_poll *io_req_alloc_apoll(struct io_kiocb *req,
unsigned issue_flags)
{
struct io_ring_ctx *ctx = req->ctx;
+ struct io_cache_entry *entry;
struct async_poll *apoll;
if (req->flags & REQ_F_POLLED) {
apoll = req->apoll;
kfree(apoll->double_poll);
} else if (!(issue_flags & IO_URING_F_UNLOCKED) &&
- !list_empty(&ctx->apoll_cache)) {
- apoll = list_first_entry(&ctx->apoll_cache, struct async_poll,
- poll.wait.entry);
- list_del_init(&apoll->poll.wait.entry);
+ (entry = io_alloc_cache_get(&ctx->apoll_cache)) != NULL) {
+ apoll = container_of(entry, struct async_poll, cache);
} else {
apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
if (unlikely(!apoll))
@@ -960,14 +959,7 @@ out:
return IOU_OK;
}
-void io_flush_apoll_cache(struct io_ring_ctx *ctx)
+void io_apoll_cache_free(struct io_cache_entry *entry)
{
- struct async_poll *apoll;
-
- while (!list_empty(&ctx->apoll_cache)) {
- apoll = list_first_entry(&ctx->apoll_cache, struct async_poll,
- poll.wait.entry);
- list_del(&apoll->poll.wait.entry);
- kfree(apoll);
- }
+ kfree(container_of(entry, struct async_poll, cache));
}
diff --git a/io_uring/poll.h b/io_uring/poll.h
index 95f192c7babb..5f3bae50fc81 100644
--- a/io_uring/poll.h
+++ b/io_uring/poll.h
@@ -1,5 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
+#include "alloc_cache.h"
+
enum {
IO_APOLL_OK,
IO_APOLL_ABORTED,
@@ -14,7 +16,10 @@ struct io_poll {
};
struct async_poll {
- struct io_poll poll;
+ union {
+ struct io_poll poll;
+ struct io_cache_entry cache;
+ };
struct io_poll *double_poll;
};
@@ -31,4 +36,4 @@ int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags);
bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk,
bool cancel_all);
-void io_flush_apoll_cache(struct io_ring_ctx *ctx);
+void io_apoll_cache_free(struct io_cache_entry *entry);