summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2022-03-20 01:04:41 +0100
committerJens Axboe <axboe@kernel.dk>2022-03-20 14:23:57 +0100
commitabdad709ed8fe4fd3b865ed1010de37a49601ff4 (patch)
tree93e232115062ce069401429d6d31b7b451ca420a /fs
parentio_uring: terminate manual loop iterator loop correctly for non-vecs (diff)
downloadlinux-abdad709ed8fe4fd3b865ed1010de37a49601ff4.tar.xz
linux-abdad709ed8fe4fd3b865ed1010de37a49601ff4.zip
io_uring: recycle provided before arming poll
We currently have a race where we recycle the selected buffer if poll returns IO_APOLL_OK. But that's too late, as the poll could already be triggering or have triggered. If that race happens, then we're putting a buffer that's already being used. Fix this by recycling before we arm poll. This does mean that we'll sometimes almost instantly re-select the buffer, but it's rare enough in testing that it should not pose a performance issue. Fixes: b1c62645758e ("io_uring: recycle provided buffers if request goes async") Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs')
-rw-r--r--fs/io_uring.c3
1 files changed, 2 insertions, 1 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 5fa736344b67..98949348ee02 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -6240,6 +6240,8 @@ static int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags)
req->flags |= REQ_F_POLLED;
ipt.pt._qproc = io_async_queue_proc;
+ io_kbuf_recycle(req);
+
ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask);
if (ret || ipt.error)
return ret ? IO_APOLL_READY : IO_APOLL_ABORTED;
@@ -7491,7 +7493,6 @@ static void io_queue_sqe_arm_apoll(struct io_kiocb *req)
io_queue_async_work(req, NULL);
break;
case IO_APOLL_OK:
- io_kbuf_recycle(req);
break;
}