summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2023-07-24 18:54:26 +0200
committerJens Axboe <axboe@kernel.dk>2023-07-25 03:55:16 +0200
commitcd1d83e24e689f25de7e34bea697971750138d5f (patch)
tree788fef148baffc1e83b709ac030addd27ce63495 /block
parentblock: refactor to use helper (diff)
downloadlinux-cd1d83e24e689f25de7e34bea697971750138d5f.tar.xz
linux-cd1d83e24e689f25de7e34bea697971750138d5f.zip
block: tidy up the bio full checks in bio_add_hw_page
bio_add_hw_page already checks if the number of bytes trying to be added even fit into max_hw_sectors limit of the queue. Remove the call to bio_full and just do a check for the smaller of the number of segments in the bio and the queue max segments limit, and do this cheap check before the more expensive gap to previous check. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Jinyoung Choi <j-young.choi@samsung.com> Link: https://lore.kernel.org/r/20230724165433.117645-2-hch@lst.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/bio.c10
1 files changed, 4 insertions, 6 deletions
diff --git a/block/bio.c b/block/bio.c
index 8672179213b9..72488ecea47a 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -1014,6 +1014,10 @@ int bio_add_hw_page(struct request_queue *q, struct bio *bio,
if (bio_try_merge_hw_seg(q, bio, page, len, offset, same_page))
return len;
+ if (bio->bi_vcnt >=
+ min(bio->bi_max_vecs, queue_max_segments(q)))
+ return 0;
+
/*
* If the queue doesn't support SG gaps and adding this segment
* would create a gap, disallow it.
@@ -1023,12 +1027,6 @@ int bio_add_hw_page(struct request_queue *q, struct bio *bio,
return 0;
}
- if (bio_full(bio, len))
- return 0;
-
- if (bio->bi_vcnt >= queue_max_segments(q))
- return 0;
-
bvec_set_page(&bio->bi_io_vec[bio->bi_vcnt], page, len, offset);
bio->bi_vcnt++;
bio->bi_iter.bi_size += len;