summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2020-04-13 19:09:12 +0200
committerJens Axboe <axboe@kernel.dk>2020-04-13 19:09:12 +0200
commit74ce6ce43d4fc6ce15efb21378d9ef26125c298b (patch)
tree216ea232a9e67e8063b69c68b898ee45086f3abe /fs
parentio_uring: correct O_NONBLOCK check for splice punt (diff)
downloadlinux-74ce6ce43d4fc6ce15efb21378d9ef26125c298b.tar.xz
linux-74ce6ce43d4fc6ce15efb21378d9ef26125c298b.zip
io_uring: check for need to re-wait in polled async handling
We added this for just the regular poll requests in commit a6ba632d2c24 ("io_uring: retry poll if we got woken with non-matching mask"), we should do the same for the poll handler used pollable async requests. Move the re-wait check and arm into a helper, and call it from io_async_task_func() as well. Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs')
-rw-r--r--fs/io_uring.c43
1 files changed, 29 insertions, 14 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 0d1b5d5f1251..7b41f6231955 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -4156,6 +4156,26 @@ static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
return 1;
}
+static bool io_poll_rewait(struct io_kiocb *req, struct io_poll_iocb *poll)
+ __acquires(&req->ctx->completion_lock)
+{
+ struct io_ring_ctx *ctx = req->ctx;
+
+ if (!req->result && !READ_ONCE(poll->canceled)) {
+ struct poll_table_struct pt = { ._key = poll->events };
+
+ req->result = vfs_poll(req->file, &pt) & poll->events;
+ }
+
+ spin_lock_irq(&ctx->completion_lock);
+ if (!req->result && !READ_ONCE(poll->canceled)) {
+ add_wait_queue(poll->head, &poll->wait);
+ return true;
+ }
+
+ return false;
+}
+
static void io_async_task_func(struct callback_head *cb)
{
struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
@@ -4164,14 +4184,16 @@ static void io_async_task_func(struct callback_head *cb)
trace_io_uring_task_run(req->ctx, req->opcode, req->user_data);
- WARN_ON_ONCE(!list_empty(&req->apoll->poll.wait.entry));
-
- if (hash_hashed(&req->hash_node)) {
- spin_lock_irq(&ctx->completion_lock);
- hash_del(&req->hash_node);
+ if (io_poll_rewait(req, &apoll->poll)) {
spin_unlock_irq(&ctx->completion_lock);
+ return;
}
+ if (hash_hashed(&req->hash_node))
+ hash_del(&req->hash_node);
+
+ spin_unlock_irq(&ctx->completion_lock);
+
/* restore ->work in case we need to retry again */
memcpy(&req->work, &apoll->work, sizeof(req->work));
@@ -4436,18 +4458,11 @@ static void io_poll_task_handler(struct io_kiocb *req, struct io_kiocb **nxt)
struct io_ring_ctx *ctx = req->ctx;
struct io_poll_iocb *poll = &req->poll;
- if (!req->result && !READ_ONCE(poll->canceled)) {
- struct poll_table_struct pt = { ._key = poll->events };
-
- req->result = vfs_poll(req->file, &pt) & poll->events;
- }
-
- spin_lock_irq(&ctx->completion_lock);
- if (!req->result && !READ_ONCE(poll->canceled)) {
- add_wait_queue(poll->head, &poll->wait);
+ if (io_poll_rewait(req, poll)) {
spin_unlock_irq(&ctx->completion_lock);
return;
}
+
hash_del(&req->hash_node);
io_poll_complete(req, req->result, 0);
req->flags |= REQ_F_COMP_LOCKED;