diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2023-07-28 19:19:44 +0200 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2023-07-28 19:19:44 +0200 |
commit | 9c65505826395e1193495ad73087bcdaa4347813 (patch) | |
tree | c3fc5c28a2942c676fe7b9347495d43b6e0cace8 /io_uring | |
parent | Merge tag 'for-linus-iommufd' of git://git.kernel.org/pub/scm/linux/kernel/gi... (diff) | |
parent | io_uring: gate iowait schedule on having pending requests (diff) | |
download | linux-9c65505826395e1193495ad73087bcdaa4347813.tar.xz linux-9c65505826395e1193495ad73087bcdaa4347813.zip |
Merge tag 'io_uring-6.5-2023-07-28' of git://git.kernel.dk/linux
Pull io_uring fix from Jens Axboe:
"Just a single tweak to a patch from last week, to avoid having idle
cqring waits be attributed as iowait"
* tag 'io_uring-6.5-2023-07-28' of git://git.kernel.dk/linux:
io_uring: gate iowait schedule on having pending requests
Diffstat (limited to 'io_uring')
-rw-r--r-- | io_uring/io_uring.c | 23 |
1 files changed, 17 insertions, 6 deletions
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index 89a611541bc4..f4591b912ea8 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -2493,11 +2493,20 @@ int io_run_task_work_sig(struct io_ring_ctx *ctx) return 0; } +static bool current_pending_io(void) +{ + struct io_uring_task *tctx = current->io_uring; + + if (!tctx) + return false; + return percpu_counter_read_positive(&tctx->inflight); +} + /* when returns >0, the caller should retry */ static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx, struct io_wait_queue *iowq) { - int token, ret; + int io_wait, ret; if (unlikely(READ_ONCE(ctx->check_cq))) return 1; @@ -2511,17 +2520,19 @@ static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx, return 0; /* - * Use io_schedule_prepare/finish, so cpufreq can take into account - * that the task is waiting for IO - turns out to be important for low - * QD IO. + * Mark us as being in io_wait if we have pending requests, so cpufreq + * can take into account that the task is waiting for IO - turns out + * to be important for low QD IO. */ - token = io_schedule_prepare(); + io_wait = current->in_iowait; + if (current_pending_io()) + current->in_iowait = 1; ret = 0; if (iowq->timeout == KTIME_MAX) schedule(); else if (!schedule_hrtimeout(&iowq->timeout, HRTIMER_MODE_ABS)) ret = -ETIME; - io_schedule_finish(token); + current->in_iowait = io_wait; return ret; } |