summaryrefslogtreecommitdiffstats
path: root/io_uring/rw.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2023-08-30 05:11:33 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2023-08-30 05:11:33 +0200
commitc1b7fcf3f6d94c2c3528bf77054bf174a5ef63d7 (patch)
treecc7655b6b42a386436dcdb4d50f89c7ad9bf1db1 /io_uring/rw.c
parentMerge tag 'sysctl-6.6-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/m... (diff)
parentio_uring: move iopoll ctx fields around (diff)
downloadlinux-c1b7fcf3f6d94c2c3528bf77054bf174a5ef63d7.tar.xz
linux-c1b7fcf3f6d94c2c3528bf77054bf174a5ef63d7.zip
Merge tag 'for-6.6/io_uring-2023-08-28' of git://git.kernel.dk/linux
Pull io_uring updates from Jens Axboe: "Fairly quiet round in terms of features, mostly just improvements all over the map for existing code. In detail: - Initial support for socket operations through io_uring. Latter half of this will likely land with the 6.7 kernel, then allowing things like get/setsockopt (Breno) - Cleanup of the cancel code, and then adding support for canceling requests with the opcode as the key (me) - Improvements for the io-wq locking (me) - Fix affinity setting for SQPOLL based io-wq (me) - Remove the io_uring userspace code. These were added initially as copies from liburing, but all of them have since bitrotted and are way out of date at this point. Rather than attempt to keep them in sync, just get rid of them. People will have liburing available anyway for these examples. (Pavel) - Series improving the CQ/SQ ring caching (Pavel) - Misc fixes and cleanups (Pavel, Yue, me)" * tag 'for-6.6/io_uring-2023-08-28' of git://git.kernel.dk/linux: (47 commits) io_uring: move iopoll ctx fields around io_uring: move multishot cqe cache in ctx io_uring: separate task_work/waiting cache line io_uring: banish non-hot data to end of io_ring_ctx io_uring: move non aligned field to the end io_uring: add option to remove SQ indirection io_uring: compact SQ/CQ heads/tails io_uring: force inline io_fill_cqe_req io_uring: merge iopoll and normal completion paths io_uring: reorder cqring_flush and wakeups io_uring: optimise extra io_get_cqe null check io_uring: refactor __io_get_cqe() io_uring: simplify big_cqe handling io_uring: cqe init hardening io_uring: improve cqe !tracing hot path io_uring/rsrc: Annotate struct io_mapped_ubuf with __counted_by io_uring/sqpoll: fix io-wq affinity when IORING_SETUP_SQPOLL is used io_uring: simplify io_run_task_work_sig return io_uring/rsrc: keep one global dummy_ubuf io_uring: never overflow io_aux_cqe ...
Diffstat (limited to 'io_uring/rw.c')
-rw-r--r--io_uring/rw.c24
1 files changed, 5 insertions, 19 deletions
diff --git a/io_uring/rw.c b/io_uring/rw.c
index b3435033fadf..c8c822fa7980 100644
--- a/io_uring/rw.c
+++ b/io_uring/rw.c
@@ -989,13 +989,6 @@ copy_iov:
return ret;
}
-static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)
-{
- io_commit_cqring_flush(ctx);
- if (ctx->flags & IORING_SETUP_SQPOLL)
- io_cqring_wake(ctx);
-}
-
void io_rw_fail(struct io_kiocb *req)
{
int res;
@@ -1066,24 +1059,17 @@ int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
if (!smp_load_acquire(&req->iopoll_completed))
break;
nr_events++;
- if (unlikely(req->flags & REQ_F_CQE_SKIP))
- continue;
-
req->cqe.flags = io_put_kbuf(req, 0);
- if (unlikely(!__io_fill_cqe_req(ctx, req))) {
- spin_lock(&ctx->completion_lock);
- io_req_cqe_overflow(req);
- spin_unlock(&ctx->completion_lock);
- }
}
-
if (unlikely(!nr_events))
return 0;
- io_commit_cqring(ctx);
- io_cqring_ev_posted_iopoll(ctx);
pos = start ? start->next : ctx->iopoll_list.first;
wq_list_cut(&ctx->iopoll_list, prev, start);
- io_free_batch_list(ctx, pos);
+
+ if (WARN_ON_ONCE(!wq_list_empty(&ctx->submit_state.compl_reqs)))
+ return 0;
+ ctx->submit_state.compl_reqs.first = pos;
+ __io_submit_flush_completions(ctx);
return nr_events;
}