summaryrefslogtreecommitdiffstats
path: root/io_uring/tctx.c
diff options
context:
space:
mode:
authorDylan Yudaken <dylany@fb.com>2022-06-22 15:40:23 +0200
committerJens Axboe <axboe@kernel.dk>2022-07-25 02:39:15 +0200
commitf88262e60bb9cb5740891672ce9f405e7f9393e5 (patch)
treebca01661645c35ef7958fd5b0a5f9438d9b9b477 /io_uring/tctx.c
parentio_uring: remove __io_req_task_work_add (diff)
downloadlinux-f88262e60bb9cb5740891672ce9f405e7f9393e5.tar.xz
linux-f88262e60bb9cb5740891672ce9f405e7f9393e5.zip
io_uring: lockless task list
With networking use cases we see contention on the spinlock used to protect the task_list when multiple threads try and add completions at once. Instead we can use a lockless list, and assume that the first caller to add to the list is responsible for kicking off task work. Signed-off-by: Dylan Yudaken <dylany@fb.com> Link: https://lore.kernel.org/r/20220622134028.2013417-4-dylany@fb.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring/tctx.c')
-rw-r--r--io_uring/tctx.c3
1 files changed, 1 insertions, 2 deletions
diff --git a/io_uring/tctx.c b/io_uring/tctx.c
index 7a68ba9beec3..7f97d97fef0a 100644
--- a/io_uring/tctx.c
+++ b/io_uring/tctx.c
@@ -86,8 +86,7 @@ __cold int io_uring_alloc_task_context(struct task_struct *task,
atomic_set(&tctx->in_idle, 0);
atomic_set(&tctx->inflight_tracked, 0);
task->io_uring = tctx;
- spin_lock_init(&tctx->task_lock);
- INIT_WQ_LIST(&tctx->task_list);
+ init_llist_head(&tctx->task_list);
init_task_work(&tctx->task_work, tctx_task_work);
return 0;
}