summaryrefslogtreecommitdiffstats
path: root/io_uring
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2024-02-27 00:43:01 +0100
committerJens Axboe <axboe@kernel.dk>2024-02-27 19:16:00 +0100
commit792060de8b3e9ca11fab4afc0c3c5927186152a2 (patch)
tree0b7cfc28dea4b940da185fce48c15942c0b0720f /io_uring
parentio_uring/net: move receive multishot out of the generic msghdr path (diff)
downloadlinux-792060de8b3e9ca11fab4afc0c3c5927186152a2.tar.xz
linux-792060de8b3e9ca11fab4afc0c3c5927186152a2.zip
io_uring/net: improve the usercopy for sendmsg/recvmsg
We're spending a considerable amount of the sendmsg/recvmsg time just copying in the message header. And for provided buffers, the known single entry iovec. Be a bit smarter about it and enable/disable user access around our copying. In a test case that does both sendmsg and recvmsg, the runtime before this change (averaged over multiple runs, very stable times however): Kernel Time Diff ==================================== -git 4720 usec -git+commit 4311 usec -8.7% and looking at a profile diff, we see the following: 0.25% +9.33% [kernel.kallsyms] [k] _copy_from_user 4.47% -3.32% [kernel.kallsyms] [k] __io_msg_copy_hdr.constprop.0 where we drop more than 9% of _copy_from_user() time, and consequently add time to __io_msg_copy_hdr() where the copies are now attributed to, but with a net win of 6%. In comparison, the same test case with send/recv runs in 3745 usec, which is (expectedly) still quite a bit faster. But at least sendmsg/recvmsg is now only ~13% slower, where it was ~21% slower before. Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring')
-rw-r--r--io_uring/net.c29
1 files changed, 22 insertions, 7 deletions
diff --git a/io_uring/net.c b/io_uring/net.c
index 7a07c5563d66..83fba2882720 100644
--- a/io_uring/net.c
+++ b/io_uring/net.c
@@ -255,27 +255,42 @@ static int io_msg_copy_hdr(struct io_kiocb *req, struct io_async_msghdr *iomsg,
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
int ret;
- if (copy_from_user(msg, sr->umsg, sizeof(*sr->umsg)))
+ if (!user_access_begin(sr->umsg, sizeof(*sr->umsg)))
return -EFAULT;
+ ret = -EFAULT;
+ unsafe_get_user(msg->msg_name, &sr->umsg->msg_name, ua_end);
+ unsafe_get_user(msg->msg_namelen, &sr->umsg->msg_namelen, ua_end);
+ unsafe_get_user(msg->msg_iov, &sr->umsg->msg_iov, ua_end);
+ unsafe_get_user(msg->msg_iovlen, &sr->umsg->msg_iovlen, ua_end);
+ unsafe_get_user(msg->msg_control, &sr->umsg->msg_control, ua_end);
+ unsafe_get_user(msg->msg_controllen, &sr->umsg->msg_controllen, ua_end);
+ msg->msg_flags = 0;
+
if (req->flags & REQ_F_BUFFER_SELECT) {
if (msg->msg_iovlen == 0) {
sr->len = iomsg->fast_iov[0].iov_len = 0;
iomsg->fast_iov[0].iov_base = NULL;
iomsg->free_iov = NULL;
} else if (msg->msg_iovlen > 1) {
- return -EINVAL;
+ ret = -EINVAL;
+ goto ua_end;
} else {
- if (copy_from_user(iomsg->fast_iov, msg->msg_iov,
- sizeof(*msg->msg_iov)))
- return -EFAULT;
+ /* we only need the length for provided buffers */
+ if (!access_ok(&msg->msg_iov[0].iov_len, sizeof(__kernel_size_t)))
+ goto ua_end;
+ unsafe_get_user(iomsg->fast_iov[0].iov_len,
+ &msg->msg_iov[0].iov_len, ua_end);
sr->len = iomsg->fast_iov[0].iov_len;
iomsg->free_iov = NULL;
}
-
- return 0;
+ ret = 0;
+ua_end:
+ user_access_end();
+ return ret;
}
+ user_access_end();
iomsg->free_iov = iomsg->fast_iov;
ret = __import_iovec(ddir, msg->msg_iov, msg->msg_iovlen, UIO_FASTIOV,
&iomsg->free_iov, &iomsg->msg.msg_iter, false);