summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2023-12-15 21:20:14 +0100
committerLinus Torvalds <torvalds@linux-foundation.org>2023-12-15 21:20:14 +0100
commit3bd7d748816927202268cb335921f7f68b3ca723 (patch)
tree7c076c6f612d649043f1f4d80765689d4bd7fcba
parentMerge tag 'mm-hotfixes-stable-2023-12-15-07-11' of git://git.kernel.org/pub/s... (diff)
parentio_uring/cmd: fix breakage in SOCKET_URING_OP_SIOC* implementation (diff)
downloadlinux-3bd7d748816927202268cb335921f7f68b3ca723.tar.xz
linux-3bd7d748816927202268cb335921f7f68b3ca723.zip
Merge tag 'io_uring-6.7-2023-12-15' of git://git.kernel.dk/linux
Pull io_uring fixes from Jens Axboe: "Just two minor fixes: - Fix for the io_uring socket option commands using the wrong value on some archs (Al) - Tweak to the poll lazy wake enable (me)" * tag 'io_uring-6.7-2023-12-15' of git://git.kernel.dk/linux: io_uring/cmd: fix breakage in SOCKET_URING_OP_SIOC* implementation io_uring/poll: don't enable lazy wake for POLLEXCLUSIVE
-rw-r--r--include/linux/io_uring_types.h3
-rw-r--r--io_uring/poll.c20
-rw-r--r--io_uring/uring_cmd.c2
3 files changed, 21 insertions, 4 deletions
diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
index 805bb635cdf5..239a4f68801b 100644
--- a/include/linux/io_uring_types.h
+++ b/include/linux/io_uring_types.h
@@ -434,6 +434,7 @@ enum {
/* keep async read/write and isreg together and in order */
REQ_F_SUPPORT_NOWAIT_BIT,
REQ_F_ISREG_BIT,
+ REQ_F_POLL_NO_LAZY_BIT,
/* not a real bit, just to check we're not overflowing the space */
__REQ_F_LAST_BIT,
@@ -501,6 +502,8 @@ enum {
REQ_F_CLEAR_POLLIN = BIT(REQ_F_CLEAR_POLLIN_BIT),
/* hashed into ->cancel_hash_locked, protected by ->uring_lock */
REQ_F_HASH_LOCKED = BIT(REQ_F_HASH_LOCKED_BIT),
+ /* don't use lazy poll wake for this request */
+ REQ_F_POLL_NO_LAZY = BIT(REQ_F_POLL_NO_LAZY_BIT),
};
typedef void (*io_req_tw_func_t)(struct io_kiocb *req, struct io_tw_state *ts);
diff --git a/io_uring/poll.c b/io_uring/poll.c
index d38d05edb4fa..d59b74a99d4e 100644
--- a/io_uring/poll.c
+++ b/io_uring/poll.c
@@ -366,11 +366,16 @@ void io_poll_task_func(struct io_kiocb *req, struct io_tw_state *ts)
static void __io_poll_execute(struct io_kiocb *req, int mask)
{
+ unsigned flags = 0;
+
io_req_set_res(req, mask, 0);
req->io_task_work.func = io_poll_task_func;
trace_io_uring_task_add(req, mask);
- __io_req_task_work_add(req, IOU_F_TWQ_LAZY_WAKE);
+
+ if (!(req->flags & REQ_F_POLL_NO_LAZY))
+ flags = IOU_F_TWQ_LAZY_WAKE;
+ __io_req_task_work_add(req, flags);
}
static inline void io_poll_execute(struct io_kiocb *req, int res)
@@ -526,10 +531,19 @@ static void __io_queue_proc(struct io_poll *poll, struct io_poll_table *pt,
poll->head = head;
poll->wait.private = (void *) wqe_private;
- if (poll->events & EPOLLEXCLUSIVE)
+ if (poll->events & EPOLLEXCLUSIVE) {
+ /*
+ * Exclusive waits may only wake a limited amount of entries
+ * rather than all of them, this may interfere with lazy
+ * wake if someone does wait(events > 1). Ensure we don't do
+ * lazy wake for those, as we need to process each one as they
+ * come in.
+ */
+ req->flags |= REQ_F_POLL_NO_LAZY;
add_wait_queue_exclusive(head, &poll->wait);
- else
+ } else {
add_wait_queue(head, &poll->wait);
+ }
}
static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
diff --git a/io_uring/uring_cmd.c b/io_uring/uring_cmd.c
index acbc2924ecd2..7d3ef62e620a 100644
--- a/io_uring/uring_cmd.c
+++ b/io_uring/uring_cmd.c
@@ -7,7 +7,7 @@
#include <linux/nospec.h>
#include <uapi/linux/io_uring.h>
-#include <uapi/asm-generic/ioctls.h>
+#include <asm/ioctls.h>
#include "io_uring.h"
#include "rsrc.h"