summaryrefslogtreecommitdiffstats
path: root/io_uring/net.c
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2022-09-21 13:17:53 +0200
committerJens Axboe <axboe@kernel.dk>2022-09-21 21:15:02 +0200
commitc4c0009e0b56ef9920020bcade1e45be52653bae (patch)
tree13bf9d158b354a7659a77ac97a1ae61b50d52080 /io_uring/net.c
parentio_uring/net: rename io_sendzc() (diff)
downloadlinux-c4c0009e0b56ef9920020bcade1e45be52653bae.tar.xz
linux-c4c0009e0b56ef9920020bcade1e45be52653bae.zip
io_uring/net: combine fail handlers
Merge io_send_zc_fail() into io_sendrecv_fail(), saves a few lines of code and some headache for following patch. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/e0eba1d577413aef5602cd45f588b9230207082d.1663668091.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring/net.c')
-rw-r--r--io_uring/net.c31
1 files changed, 16 insertions, 15 deletions
diff --git a/io_uring/net.c b/io_uring/net.c
index 145beb455f61..209bc69b3707 100644
--- a/io_uring/net.c
+++ b/io_uring/net.c
@@ -192,6 +192,7 @@ int io_send_prep_async(struct io_kiocb *req)
io = io_msg_alloc_async_prep(req);
if (!io)
return -ENOMEM;
+ io->free_iov = NULL;
ret = move_addr_to_kernel(zc->addr, zc->addr_len, &io->addr);
return ret;
}
@@ -208,6 +209,7 @@ static int io_setup_async_addr(struct io_kiocb *req,
io = io_msg_alloc_async(req, issue_flags);
if (!io)
return -ENOMEM;
+ io->free_iov = NULL;
memcpy(&io->addr, addr_storage, sizeof(io->addr));
return -EAGAIN;
}
@@ -1119,26 +1121,25 @@ int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
void io_sendrecv_fail(struct io_kiocb *req)
{
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
+ struct io_async_msghdr *io;
int res = req->cqe.res;
if (req->flags & REQ_F_PARTIAL_IO)
res = sr->done_io;
- io_req_set_res(req, res, req->cqe.flags);
-}
-
-void io_send_zc_fail(struct io_kiocb *req)
-{
- struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
- int res = req->cqe.res;
-
- if (req->flags & REQ_F_PARTIAL_IO) {
- if (req->flags & REQ_F_NEED_CLEANUP) {
- io_notif_flush(sr->notif);
- sr->notif = NULL;
- req->flags &= ~REQ_F_NEED_CLEANUP;
- }
- res = sr->done_io;
+ if ((req->flags & REQ_F_NEED_CLEANUP) &&
+ req->opcode == IORING_OP_SEND_ZC) {
+ /* preserve notification for partial I/O */
+ if (res < 0)
+ sr->notif->flags |= REQ_F_CQE_SKIP;
+ io_notif_flush(sr->notif);
+ sr->notif = NULL;
}
+ if (req_has_async_data(req)) {
+ io = req->async_data;
+ kfree(io->free_iov);
+ io->free_iov = NULL;
+ }
+ req->flags &= ~REQ_F_NEED_CLEANUP;
io_req_set_res(req, res, req->cqe.flags);
}