summaryrefslogtreecommitdiffstats
path: root/fs/io_uring.c
diff options
context:
space:
mode:
authorStefano Garzarella <sgarzare@redhat.com>2020-02-21 16:42:16 +0100
committerJens Axboe <axboe@kernel.dk>2020-02-21 17:16:10 +0100
commit7143b5ac5750f404ff3a594b34fdf3fc2f99f828 (patch)
treed6bf0edaaf6f0f994d2043d91a91644b2e6ca461 /fs/io_uring.c
parentio_uring: fix use-after-free by io_cleanup_req() (diff)
downloadlinux-7143b5ac5750f404ff3a594b34fdf3fc2f99f828.tar.xz
linux-7143b5ac5750f404ff3a594b34fdf3fc2f99f828.zip
io_uring: prevent sq_thread from spinning when it should stop
This patch drops 'cur_mm' before calling cond_resched(), to prevent the sq_thread from spinning even when the user process is finished. Before this patch, if the user process ended without closing the io_uring fd, the sq_thread continues to spin until the 'sq_thread_idle' timeout ends. In the worst case where the 'sq_thread_idle' parameter is bigger than INT_MAX, the sq_thread will spin forever. Fixes: 6c271ce2f1d5 ("io_uring: add submission polling") Signed-off-by: Stefano Garzarella <sgarzare@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to '')
-rw-r--r--fs/io_uring.c24
1 files changed, 12 insertions, 12 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 6e249aa97ba3..b43467b3a8dc 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -5143,6 +5143,18 @@ static int io_sq_thread(void *data)
*/
if (!to_submit || ret == -EBUSY) {
/*
+ * Drop cur_mm before scheduling, we can't hold it for
+ * long periods (or over schedule()). Do this before
+ * adding ourselves to the waitqueue, as the unuse/drop
+ * may sleep.
+ */
+ if (cur_mm) {
+ unuse_mm(cur_mm);
+ mmput(cur_mm);
+ cur_mm = NULL;
+ }
+
+ /*
* We're polling. If we're within the defined idle
* period, then let us spin without work before going
* to sleep. The exception is if we got EBUSY doing
@@ -5156,18 +5168,6 @@ static int io_sq_thread(void *data)
continue;
}
- /*
- * Drop cur_mm before scheduling, we can't hold it for
- * long periods (or over schedule()). Do this before
- * adding ourselves to the waitqueue, as the unuse/drop
- * may sleep.
- */
- if (cur_mm) {
- unuse_mm(cur_mm);
- mmput(cur_mm);
- cur_mm = NULL;
- }
-
prepare_to_wait(&ctx->sqo_wait, &wait,
TASK_INTERRUPTIBLE);