summaryrefslogtreecommitdiffstats
path: root/io_uring
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2022-12-02 18:47:25 +0100
committerJens Axboe <axboe@kernel.dk>2022-12-15 16:20:10 +0100
commita8cf95f93610eb8282f8b6d0117ba78b74588d6b (patch)
tree7ff8b291a2c3df4c45a9afe14db1d293c3d8a399 /io_uring
parentio_uring: ease timeout flush locking requirements (diff)
downloadlinux-a8cf95f93610eb8282f8b6d0117ba78b74588d6b.tar.xz
linux-a8cf95f93610eb8282f8b6d0117ba78b74588d6b.zip
io_uring: fix overflow handling regression
Because the single task locking series got reordered ahead of the timeout and completion lock changes, two hunks inadvertently ended up using __io_fill_cqe_req() rather than io_fill_cqe_req(). This meant that we dropped overflow handling in those two spots. Reinstate the correct CQE filling helper. Fixes: f66f73421f0a ("io_uring: skip spinlocking for ->task_complete") Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring')
-rw-r--r--io_uring/io_uring.c2
-rw-r--r--io_uring/rw.c2
2 files changed, 2 insertions, 2 deletions
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index fc64072c53eb..4601e48a173d 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -927,7 +927,7 @@ static void __io_req_complete_post(struct io_kiocb *req)
io_cq_lock(ctx);
if (!(req->flags & REQ_F_CQE_SKIP))
- __io_fill_cqe_req(ctx, req);
+ io_fill_cqe_req(ctx, req);
/*
* If we're the last reference to this request, add to our locked
diff --git a/io_uring/rw.c b/io_uring/rw.c
index b9cac5706e8d..8227af2e1c0f 100644
--- a/io_uring/rw.c
+++ b/io_uring/rw.c
@@ -1062,7 +1062,7 @@ int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
continue;
req->cqe.flags = io_put_kbuf(req, 0);
- __io_fill_cqe_req(req->ctx, req);
+ io_fill_cqe_req(req->ctx, req);
}
if (unlikely(!nr_events))