diff options
author | Jens Axboe <axboe@kernel.dk> | 2024-01-04 16:02:59 +0100 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2024-08-25 16:27:01 +0200 |
commit | 45a41e74b8f472254c64b42713bad0686350b0c6 (patch) | |
tree | 415bf81e254fef33d15ad3d056472a86a39b58e2 /io_uring | |
parent | io_uring: encapsulate extraneous wait flags into a separate struct (diff) | |
download | linux-45a41e74b8f472254c64b42713bad0686350b0c6.tar.xz linux-45a41e74b8f472254c64b42713bad0686350b0c6.zip |
io_uring: move schedule wait logic into helper
In preparation for expanding how we handle waits, move the actual
schedule and schedule_timeout() handling into a helper.
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring')
-rw-r--r-- | io_uring/io_uring.c | 37 |
1 files changed, 21 insertions, 16 deletions
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index 37053d32c668..9e2b8d4c05db 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -2350,22 +2350,10 @@ static bool current_pending_io(void) return percpu_counter_read_positive(&tctx->inflight); } -/* when returns >0, the caller should retry */ -static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx, - struct io_wait_queue *iowq) +static int __io_cqring_wait_schedule(struct io_ring_ctx *ctx, + struct io_wait_queue *iowq) { - int ret; - - if (unlikely(READ_ONCE(ctx->check_cq))) - return 1; - if (unlikely(!llist_empty(&ctx->work_llist))) - return 1; - if (unlikely(test_thread_flag(TIF_NOTIFY_SIGNAL))) - return 1; - if (unlikely(task_sigpending(current))) - return -EINTR; - if (unlikely(io_should_wake(iowq))) - return 0; + int ret = 0; /* * Mark us as being in io_wait if we have pending requests, so cpufreq @@ -2374,7 +2362,6 @@ static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx, */ if (current_pending_io()) current->in_iowait = 1; - ret = 0; if (iowq->timeout == KTIME_MAX) schedule(); else if (!schedule_hrtimeout_range_clock(&iowq->timeout, 0, @@ -2384,6 +2371,24 @@ static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx, return ret; } +/* If this returns > 0, the caller should retry */ +static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx, + struct io_wait_queue *iowq) +{ + if (unlikely(READ_ONCE(ctx->check_cq))) + return 1; + if (unlikely(!llist_empty(&ctx->work_llist))) + return 1; + if (unlikely(test_thread_flag(TIF_NOTIFY_SIGNAL))) + return 1; + if (unlikely(task_sigpending(current))) + return -EINTR; + if (unlikely(io_should_wake(iowq))) + return 0; + + return __io_cqring_wait_schedule(ctx, iowq); +} + struct ext_arg { size_t argsz; struct __kernel_timespec __user *ts; |