summaryrefslogtreecommitdiffstats
path: root/fs/io_uring.c
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2020-07-13 22:37:14 +0200
committerJens Axboe <axboe@kernel.dk>2020-07-24 20:55:45 +0200
commit27dc8338e5fb0e0ed5b272e792f4ffad7f3bc03e (patch)
treefdfc22228d5a2b8127cb02944443894082366c1b /fs/io_uring.c
parentio_uring: remove init for unused list (diff)
downloadlinux-27dc8338e5fb0e0ed5b272e792f4ffad7f3bc03e.tar.xz
linux-27dc8338e5fb0e0ed5b272e792f4ffad7f3bc03e.zip
io_uring: use non-intrusive list for defer
The only left user of req->list is DRAIN, hence instead of keeping a separate per request list for it, do that with old fashion non-intrusive lists allocated on demand. That's a really slow path, so that's OK. This removes req->list and so sheds 16 bytes from io_kiocb. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r--fs/io_uring.c25
1 files changed, 18 insertions, 7 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 1e4ac48b1557..6e6e71310785 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -641,7 +641,6 @@ struct io_kiocb {
u16 buf_index;
struct io_ring_ctx *ctx;
- struct list_head list;
unsigned int flags;
refcount_t refs;
struct task_struct *task;
@@ -676,6 +675,11 @@ struct io_kiocb {
struct callback_head task_work;
};
+struct io_defer_entry {
+ struct list_head list;
+ struct io_kiocb *req;
+};
+
#define IO_IOPOLL_BATCH 8
struct io_comp_state {
@@ -1234,14 +1238,15 @@ static void io_kill_timeouts(struct io_ring_ctx *ctx)
static void __io_queue_deferred(struct io_ring_ctx *ctx)
{
do {
- struct io_kiocb *req = list_first_entry(&ctx->defer_list,
- struct io_kiocb, list);
+ struct io_defer_entry *de = list_first_entry(&ctx->defer_list,
+ struct io_defer_entry, list);
- if (req_need_defer(req))
+ if (req_need_defer(de->req))
break;
- list_del_init(&req->list);
+ list_del_init(&de->list);
/* punt-init is done before queueing for defer */
- __io_queue_async_work(req);
+ __io_queue_async_work(de->req);
+ kfree(de);
} while (!list_empty(&ctx->defer_list));
}
@@ -5394,6 +5399,7 @@ static int io_req_defer_prep(struct io_kiocb *req,
static int io_req_defer(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
struct io_ring_ctx *ctx = req->ctx;
+ struct io_defer_entry *de;
int ret;
/* Still need defer if there is pending req in defer list. */
@@ -5408,15 +5414,20 @@ static int io_req_defer(struct io_kiocb *req, const struct io_uring_sqe *sqe)
return ret;
}
io_prep_async_link(req);
+ de = kmalloc(sizeof(*de), GFP_KERNEL);
+ if (!de)
+ return -ENOMEM;
spin_lock_irq(&ctx->completion_lock);
if (!req_need_defer(req) && list_empty(&ctx->defer_list)) {
spin_unlock_irq(&ctx->completion_lock);
+ kfree(de);
return 0;
}
trace_io_uring_defer(ctx, req, req->user_data);
- list_add_tail(&req->list, &ctx->defer_list);
+ de->req = req;
+ list_add_tail(&de->list, &ctx->defer_list);
spin_unlock_irq(&ctx->completion_lock);
return -EIOCBQUEUED;
}