summaryrefslogtreecommitdiffstats
path: root/io_uring/io-wq.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2023-03-08 15:18:51 +0100
committerJens Axboe <axboe@kernel.dk>2023-03-08 16:48:13 +0100
commit01e68ce08a30db3d842ce7a55f7f6e0474a55f9a (patch)
treeb2962dc88c9dcadbc042fa88f6442ffb40d34ae8 /io_uring/io-wq.c
parentLinux 6.3-rc1 (diff)
downloadlinux-01e68ce08a30db3d842ce7a55f7f6e0474a55f9a.tar.xz
linux-01e68ce08a30db3d842ce7a55f7f6e0474a55f9a.zip
io_uring/io-wq: stop setting PF_NO_SETAFFINITY on io-wq workers
Every now and then reports come in that are puzzled on why changing affinity on the io-wq workers fails with EINVAL. This happens because they set PF_NO_SETAFFINITY as part of their creation, as io-wq organizes workers into groups based on what CPU they are running on. However, this is purely an optimization and not a functional requirement. We can allow setting affinity, and just lazily update our worker to wqe mappings. If a given io-wq thread times out, it normally exits if there's no more work to do. The exception is if it's the last worker available. For the timeout case, check the affinity of the worker against group mask and exit even if it's the last worker. New workers should be created with the right mask and in the right location. Reported-by:Daniel Dao <dqminh@cloudflare.com> Link: https://lore.kernel.org/io-uring/CA+wXwBQwgxB3_UphSny-yAP5b26meeOu1W4TwYVcD_+5gOhvPw@mail.gmail.com/ Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring/io-wq.c')
-rw-r--r--io_uring/io-wq.c16
1 files changed, 11 insertions, 5 deletions
diff --git a/io_uring/io-wq.c b/io_uring/io-wq.c
index 411bb2d1acd4..f81c0a7136a5 100644
--- a/io_uring/io-wq.c
+++ b/io_uring/io-wq.c
@@ -616,7 +616,7 @@ static int io_wqe_worker(void *data)
struct io_wqe_acct *acct = io_wqe_get_acct(worker);
struct io_wqe *wqe = worker->wqe;
struct io_wq *wq = wqe->wq;
- bool last_timeout = false;
+ bool exit_mask = false, last_timeout = false;
char buf[TASK_COMM_LEN];
worker->flags |= (IO_WORKER_F_UP | IO_WORKER_F_RUNNING);
@@ -632,8 +632,11 @@ static int io_wqe_worker(void *data)
io_worker_handle_work(worker);
raw_spin_lock(&wqe->lock);
- /* timed out, exit unless we're the last worker */
- if (last_timeout && acct->nr_workers > 1) {
+ /*
+ * Last sleep timed out. Exit if we're not the last worker,
+ * or if someone modified our affinity.
+ */
+ if (last_timeout && (exit_mask || acct->nr_workers > 1)) {
acct->nr_workers--;
raw_spin_unlock(&wqe->lock);
__set_current_state(TASK_RUNNING);
@@ -652,7 +655,11 @@ static int io_wqe_worker(void *data)
continue;
break;
}
- last_timeout = !ret;
+ if (!ret) {
+ last_timeout = true;
+ exit_mask = !cpumask_test_cpu(raw_smp_processor_id(),
+ wqe->cpu_mask);
+ }
}
if (test_bit(IO_WQ_BIT_EXIT, &wq->state))
@@ -704,7 +711,6 @@ static void io_init_new_worker(struct io_wqe *wqe, struct io_worker *worker,
tsk->worker_private = worker;
worker->task = tsk;
set_cpus_allowed_ptr(tsk, wqe->cpu_mask);
- tsk->flags |= PF_NO_SETAFFINITY;
raw_spin_lock(&wqe->lock);
hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list);