summaryrefslogtreecommitdiffstats
path: root/fs/io_uring.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2020-10-16 01:38:03 +0200
committerJens Axboe <axboe@kernel.dk>2020-10-17 17:25:46 +0200
commit500a373d731ac506612db12631ec21295c1ff360 (patch)
treee2f3e0f98abb185d1d52765b6ccd4c16558d6727 /fs/io_uring.c
parentio_uring: store io_identity in io_uring_task (diff)
downloadlinux-500a373d731ac506612db12631ec21295c1ff360.tar.xz
linux-500a373d731ac506612db12631ec21295c1ff360.zip
io_uring: assign new io_identity for task if members have changed
This avoids doing a copy for each new async IO, if some parts of the io_identity has changed. We avoid reference counting for the normal fast path of nothing ever changing. Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r--fs/io_uring.c19
1 files changed, 15 insertions, 4 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index ae91632b8bf9..7020c6a72231 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1066,12 +1066,18 @@ static void io_init_identity(struct io_identity *id)
*/
static inline void io_req_init_async(struct io_kiocb *req)
{
+ struct io_uring_task *tctx = current->io_uring;
+
if (req->flags & REQ_F_WORK_INITIALIZED)
return;
memset(&req->work, 0, sizeof(req->work));
req->flags |= REQ_F_WORK_INITIALIZED;
- req->work.identity = &current->io_uring->identity;
+
+ /* Grab a ref if this isn't our static identity */
+ req->work.identity = tctx->identity;
+ if (tctx->identity != &tctx->__identity)
+ refcount_inc(&req->work.identity->count);
}
static inline bool io_async_submit(struct io_ring_ctx *ctx)
@@ -1179,7 +1185,7 @@ static void __io_commit_cqring(struct io_ring_ctx *ctx)
static void io_put_identity(struct io_uring_task *tctx, struct io_kiocb *req)
{
- if (req->work.identity == &tctx->identity)
+ if (req->work.identity == &tctx->__identity)
return;
if (refcount_dec_and_test(&req->work.identity->count))
kfree(req->work.identity);
@@ -1254,11 +1260,12 @@ static bool io_identity_cow(struct io_kiocb *req)
refcount_inc(&id->count);
/* drop old identity, assign new one. one ref for req, one for tctx */
- if (req->work.identity != &tctx->identity &&
+ if (req->work.identity != tctx->identity &&
refcount_sub_and_test(2, &req->work.identity->count))
kfree(req->work.identity);
req->work.identity = id;
+ tctx->identity = id;
return true;
}
@@ -7691,7 +7698,8 @@ static int io_uring_alloc_task_context(struct task_struct *task)
tctx->in_idle = 0;
atomic_long_set(&tctx->req_issue, 0);
atomic_long_set(&tctx->req_complete, 0);
- io_init_identity(&tctx->identity);
+ io_init_identity(&tctx->__identity);
+ tctx->identity = &tctx->__identity;
task->io_uring = tctx;
return 0;
}
@@ -7701,6 +7709,9 @@ void __io_uring_free(struct task_struct *tsk)
struct io_uring_task *tctx = tsk->io_uring;
WARN_ON_ONCE(!xa_empty(&tctx->xa));
+ WARN_ON_ONCE(refcount_read(&tctx->identity->count) != 1);
+ if (tctx->identity != &tctx->__identity)
+ kfree(tctx->identity);
kfree(tctx);
tsk->io_uring = NULL;
}