summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorKanchan Joshi <joshi.k@samsung.com>2022-09-30 08:27:46 +0200
committerJens Axboe <axboe@kernel.dk>2022-09-30 15:51:13 +0200
commitab89e8e7ca526ca04baaad2aa28172d336425d67 (patch)
tree57fb231e3744a1de3d39956eed0ff269af3aff0a /block
parentblock: rename bio_map_put to blk_mq_map_bio_put (diff)
downloadlinux-ab89e8e7ca526ca04baaad2aa28172d336425d67.tar.xz
linux-ab89e8e7ca526ca04baaad2aa28172d336425d67.zip
block: factor out blk_rq_map_bio_alloc helper
Move bio allocation logic from bio_map_user_iov to a new helper blk_rq_map_bio_alloc. It is named so because functionality is opposite of what is done inside blk_mq_map_bio_put. This is a prep patch. Signed-off-by: Kanchan Joshi <joshi.k@samsung.com> Link: https://lore.kernel.org/r/20220930062749.152261-10-anuj20.g@samsung.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/blk-map.c33
1 files changed, 22 insertions, 11 deletions
diff --git a/block/blk-map.c b/block/blk-map.c
index 84b13a4158b7..d6ea377394a9 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -241,17 +241,10 @@ static void blk_mq_map_bio_put(struct bio *bio)
}
}
-static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
- gfp_t gfp_mask)
+static struct bio *blk_rq_map_bio_alloc(struct request *rq,
+ unsigned int nr_vecs, gfp_t gfp_mask)
{
- unsigned int max_sectors = queue_max_hw_sectors(rq->q);
- unsigned int nr_vecs = iov_iter_npages(iter, BIO_MAX_VECS);
struct bio *bio;
- int ret;
- int j;
-
- if (!iov_iter_count(iter))
- return -EINVAL;
if (rq->cmd_flags & REQ_POLLED) {
blk_opf_t opf = rq->cmd_flags | REQ_ALLOC_CACHE;
@@ -259,13 +252,31 @@ static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
bio = bio_alloc_bioset(NULL, nr_vecs, opf, gfp_mask,
&fs_bio_set);
if (!bio)
- return -ENOMEM;
+ return NULL;
} else {
bio = bio_kmalloc(nr_vecs, gfp_mask);
if (!bio)
- return -ENOMEM;
+ return NULL;
bio_init(bio, NULL, bio->bi_inline_vecs, nr_vecs, req_op(rq));
}
+ return bio;
+}
+
+static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
+ gfp_t gfp_mask)
+{
+ unsigned int max_sectors = queue_max_hw_sectors(rq->q);
+ unsigned int nr_vecs = iov_iter_npages(iter, BIO_MAX_VECS);
+ struct bio *bio;
+ int ret;
+ int j;
+
+ if (!iov_iter_count(iter))
+ return -EINVAL;
+
+ bio = blk_rq_map_bio_alloc(rq, nr_vecs, gfp_mask);
+ if (bio == NULL)
+ return -ENOMEM;
while (iov_iter_count(iter)) {
struct page **pages, *stack_pages[UIO_FASTIOV];