summaryrefslogtreecommitdiffstats
path: root/io_uring
diff options
context:
space:
mode:
authorStefan Roesch <shr@devkernel.io>2023-06-08 18:38:35 +0200
committerJens Axboe <axboe@kernel.dk>2024-02-09 19:54:12 +0100
commit405b4dc14b10c5bdb3e9a6c3b9596c1597f7974d (patch)
treef2a323384ab9611eeb8886e1809858e458c07fea /io_uring
parentMerge branch 'for-io_uring-add-napi-busy-polling-support' of git://git.kernel... (diff)
downloadlinux-405b4dc14b10c5bdb3e9a6c3b9596c1597f7974d.tar.xz
linux-405b4dc14b10c5bdb3e9a6c3b9596c1597f7974d.zip
io-uring: move io_wait_queue definition to header file
This moves the definition of the io_wait_queue structure to the header file so it can be also used from other files. Signed-off-by: Stefan Roesch <shr@devkernel.io> Link: https://lore.kernel.org/r/20230608163839.2891748-4-shr@devkernel.io Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring')
-rw-r--r--io_uring/io_uring.c21
-rw-r--r--io_uring/io_uring.h22
2 files changed, 22 insertions, 21 deletions
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 72b9c1c1a244..5311f19faa4c 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -2477,33 +2477,12 @@ int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
return ret;
}
-struct io_wait_queue {
- struct wait_queue_entry wq;
- struct io_ring_ctx *ctx;
- unsigned cq_tail;
- unsigned nr_timeouts;
- ktime_t timeout;
-};
-
static inline bool io_has_work(struct io_ring_ctx *ctx)
{
return test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq) ||
!llist_empty(&ctx->work_llist);
}
-static inline bool io_should_wake(struct io_wait_queue *iowq)
-{
- struct io_ring_ctx *ctx = iowq->ctx;
- int dist = READ_ONCE(ctx->rings->cq.tail) - (int) iowq->cq_tail;
-
- /*
- * Wake up if we have enough events, or if a timeout occurred since we
- * started waiting. For timeouts, we always want to return to userspace,
- * regardless of event count.
- */
- return dist >= 0 || atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
-}
-
static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode,
int wake_flags, void *key)
{
diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
index 38af82788786..859f6e0580e3 100644
--- a/io_uring/io_uring.h
+++ b/io_uring/io_uring.h
@@ -35,6 +35,28 @@ enum {
IOU_STOP_MULTISHOT = -ECANCELED,
};
+struct io_wait_queue {
+ struct wait_queue_entry wq;
+ struct io_ring_ctx *ctx;
+ unsigned cq_tail;
+ unsigned nr_timeouts;
+ ktime_t timeout;
+
+};
+
+static inline bool io_should_wake(struct io_wait_queue *iowq)
+{
+ struct io_ring_ctx *ctx = iowq->ctx;
+ int dist = READ_ONCE(ctx->rings->cq.tail) - (int) iowq->cq_tail;
+
+ /*
+ * Wake up if we have enough events, or if a timeout occurred since we
+ * started waiting. For timeouts, we always want to return to userspace,
+ * regardless of event count.
+ */
+ return dist >= 0 || atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
+}
+
bool io_cqe_cache_refill(struct io_ring_ctx *ctx, bool overflow);
void io_req_cqe_overflow(struct io_kiocb *req);
int io_run_task_work_sig(struct io_ring_ctx *ctx);