summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2019-02-28 17:00:18 +0100
committerJens Axboe <axboe@kernel.dk>2019-02-28 21:49:22 +0100
commit5b88a17cfdeba75e0092bab2c79aaf7d9e7db482 (patch)
tree9b4eb6248df2e42fa08c1138fa0c0ca49d48f35b
parentblock: introduce mp_bvec_for_each_page() for iterating over page (diff)
downloadlinux-5b88a17cfdeba75e0092bab2c79aaf7d9e7db482.tar.xz
linux-5b88a17cfdeba75e0092bab2c79aaf7d9e7db482.zip
block: optimize bvec iteration in bvec_iter_advance
There is no need to only iterate in chunks of PAGE_SIZE or less in bvec_iter_advance, given that the callers pass in the chunk length that they are operating on - either that already is less than PAGE_SIZE because they do classic page-based iteration, or it is larger because the caller operates on multi-page bvecs. This should help shaving off a few cycles of the I/O hot path. Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de> Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--include/linux/bvec.h7
1 files changed, 4 insertions, 3 deletions
diff --git a/include/linux/bvec.h b/include/linux/bvec.h
index 87e82e503a52..f6275c4da13a 100644
--- a/include/linux/bvec.h
+++ b/include/linux/bvec.h
@@ -112,14 +112,15 @@ static inline bool bvec_iter_advance(const struct bio_vec *bv,
}
while (bytes) {
- unsigned iter_len = bvec_iter_len(bv, *iter);
- unsigned len = min(bytes, iter_len);
+ const struct bio_vec *cur = bv + iter->bi_idx;
+ unsigned len = min3(bytes, iter->bi_size,
+ cur->bv_len - iter->bi_bvec_done);
bytes -= len;
iter->bi_size -= len;
iter->bi_bvec_done += len;
- if (iter->bi_bvec_done == __bvec_iter_bvec(bv, *iter)->bv_len) {
+ if (iter->bi_bvec_done == cur->bv_len) {
iter->bi_bvec_done = 0;
iter->bi_idx++;
}