summaryrefslogtreecommitdiffstats
path: root/io_uring
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2023-01-05 20:07:32 +0100
committerJens Axboe <axboe@kernel.dk>2023-01-09 04:59:17 +0100
commit1e23db450cff5f0410480137041181d1514bda2a (patch)
tree0063af8cb61c30a8ddb7a0c2abae701c54867206 /io_uring
parentio_uring: switch network send/recv to ITER_UBUF (diff)
downloadlinux-1e23db450cff5f0410480137041181d1514bda2a.tar.xz
linux-1e23db450cff5f0410480137041181d1514bda2a.zip
io_uring: use iter_ubuf for single range imports
This is more efficient than iter_iov. Signed-off-by: Jens Axboe <axboe@kernel.dk> [merge to 6.2, minor fixes] Signed-off-by: Keith Busch <kbusch@kernel.org> Reviewed-by: Christoph Hellwig <hch@lst.de>
Diffstat (limited to 'io_uring')
-rw-r--r--io_uring/rw.c9
1 files changed, 6 insertions, 3 deletions
diff --git a/io_uring/rw.c b/io_uring/rw.c
index 8227af2e1c0f..436a2e064df6 100644
--- a/io_uring/rw.c
+++ b/io_uring/rw.c
@@ -391,7 +391,7 @@ static struct iovec *__io_import_iovec(int ddir, struct io_kiocb *req,
rw->len = sqe_len;
}
- ret = import_single_range(ddir, buf, sqe_len, s->fast_iov, iter);
+ ret = import_ubuf(ddir, buf, sqe_len, iter);
if (ret)
return ERR_PTR(ret);
return NULL;
@@ -450,7 +450,10 @@ static ssize_t loop_rw_iter(int ddir, struct io_rw *rw, struct iov_iter *iter)
struct iovec iovec;
ssize_t nr;
- if (!iov_iter_is_bvec(iter)) {
+ if (iter_is_ubuf(iter)) {
+ iovec.iov_base = iter->ubuf + iter->iov_offset;
+ iovec.iov_len = iov_iter_count(iter);
+ } else if (!iov_iter_is_bvec(iter)) {
iovec = iov_iter_iovec(iter);
} else {
iovec.iov_base = u64_to_user_ptr(rw->addr);
@@ -495,7 +498,7 @@ static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec,
io->free_iovec = iovec;
io->bytes_done = 0;
/* can only be fixed buffers, no need to do anything */
- if (iov_iter_is_bvec(iter))
+ if (iov_iter_is_bvec(iter) || iter_is_ubuf(iter))
return;
if (!iovec) {
unsigned iov_off = 0;