summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2022-11-30 16:21:52 +0100
committerJens Axboe <axboe@kernel.dk>2022-11-30 18:26:57 +0100
commit9805fa2d94993e16efd0e1adbd2b54d8d1fe2f9f (patch)
tree880afb7c19c4898443ce7cad847df4a63d797133
parentio_uring: kill io_poll_issue's PF_EXITING check (diff)
downloadlinux-9805fa2d94993e16efd0e1adbd2b54d8d1fe2f9f.tar.xz
linux-9805fa2d94993e16efd0e1adbd2b54d8d1fe2f9f.zip
io_uring: carve io_poll_check_events fast path
The fast path in io_poll_check_events() is when we have only one (i.e. master) reference. Move all verification, cancellations checks, edge case handling and so on under a common if. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/8c21c5d5e027e32dc553705e88796dec79ff6f93.1669821213.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--io_uring/poll.c41
1 files changed, 22 insertions, 19 deletions
diff --git a/io_uring/poll.c b/io_uring/poll.c
index 599ba28c89b2..8987e13d302e 100644
--- a/io_uring/poll.c
+++ b/io_uring/poll.c
@@ -247,27 +247,30 @@ static int io_poll_check_events(struct io_kiocb *req, bool *locked)
do {
v = atomic_read(&req->poll_refs);
- /* tw handler should be the owner, and so have some references */
- if (WARN_ON_ONCE(!(v & IO_POLL_REF_MASK)))
- return IOU_POLL_DONE;
- if (v & IO_POLL_CANCEL_FLAG)
- return -ECANCELED;
- /*
- * cqe.res contains only events of the first wake up
- * and all others are be lost. Redo vfs_poll() to get
- * up to date state.
- */
- if ((v & IO_POLL_REF_MASK) != 1)
- req->cqe.res = 0;
- if (v & IO_POLL_RETRY_FLAG) {
- req->cqe.res = 0;
+ if (unlikely(v != 1)) {
+ /* tw should be the owner and so have some refs */
+ if (WARN_ON_ONCE(!(v & IO_POLL_REF_MASK)))
+ return IOU_POLL_DONE;
+ if (v & IO_POLL_CANCEL_FLAG)
+ return -ECANCELED;
/*
- * We won't find new events that came in between
- * vfs_poll and the ref put unless we clear the flag
- * in advance.
+ * cqe.res contains only events of the first wake up
+ * and all others are to be lost. Redo vfs_poll() to get
+ * up to date state.
*/
- atomic_andnot(IO_POLL_RETRY_FLAG, &req->poll_refs);
- v &= ~IO_POLL_RETRY_FLAG;
+ if ((v & IO_POLL_REF_MASK) != 1)
+ req->cqe.res = 0;
+
+ if (v & IO_POLL_RETRY_FLAG) {
+ req->cqe.res = 0;
+ /*
+ * We won't find new events that came in between
+ * vfs_poll and the ref put unless we clear the
+ * flag in advance.
+ */
+ atomic_andnot(IO_POLL_RETRY_FLAG, &req->poll_refs);
+ v &= ~IO_POLL_RETRY_FLAG;
+ }
}
/* the mask was stashed in __io_poll_execute */