diff options
author | Jens Axboe <axboe@kernel.dk> | 2024-01-29 04:11:55 +0100 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2024-02-08 21:27:06 +0100 |
commit | 521223d7c229f83915619f888c99e952f24dc39f (patch) | |
tree | b19885749949ac462562ebc474b8964edc85331f /io_uring/poll.c | |
parent | io_uring: expand main struct io_kiocb flags to 64-bits (diff) | |
download | linux-521223d7c229f83915619f888c99e952f24dc39f.tar.xz linux-521223d7c229f83915619f888c99e952f24dc39f.zip |
io_uring/cancel: don't default to setting req->work.cancel_seq
Just leave it unset by default, avoiding dipping into the last
cacheline (which is otherwise untouched) for the fast path of using
poll to drive networked traffic. Add a flag that tells us if the
sequence is valid or not, and then we can defer actually assigning
the flag and sequence until someone runs cancelations.
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to '')
-rw-r--r-- | io_uring/poll.c | 6 |
1 files changed, 1 insertions, 5 deletions
diff --git a/io_uring/poll.c b/io_uring/poll.c index 7513afc7b702..c2b0a2d0762b 100644 --- a/io_uring/poll.c +++ b/io_uring/poll.c @@ -588,10 +588,7 @@ static int __io_arm_poll_handler(struct io_kiocb *req, struct io_poll_table *ipt, __poll_t mask, unsigned issue_flags) { - struct io_ring_ctx *ctx = req->ctx; - INIT_HLIST_NODE(&req->hash_node); - req->work.cancel_seq = atomic_read(&ctx->cancel_seq); io_init_poll_iocb(poll, mask); poll->file = req->file; req->apoll_events = poll->events; @@ -818,9 +815,8 @@ static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, bool poll_only, if (poll_only && req->opcode != IORING_OP_POLL_ADD) continue; if (cd->flags & IORING_ASYNC_CANCEL_ALL) { - if (cd->seq == req->work.cancel_seq) + if (io_cancel_match_sequence(req, cd->seq)) continue; - req->work.cancel_seq = cd->seq; } *out_bucket = hb; return req; |