summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorJens Axboe <axboe@fb.com>2014-06-10 20:53:56 +0200
committerJens Axboe <axboe@fb.com>2014-06-10 20:53:56 +0200
commit58a4915ad2f8a87f4456aac260396df7e300e6f2 (patch)
tree6481a87de33d3caa7b647c3347324eea1c474216 /block
parentblk-mq: add timer in blk_mq_start_request (diff)
downloadlinux-58a4915ad2f8a87f4456aac260396df7e300e6f2.tar.xz
linux-58a4915ad2f8a87f4456aac260396df7e300e6f2.zip
block: ensure that bio_add_page() always accepts a page for an empty bio
With commit 762380ad9322 added support for chunk sizes and no merging across them, it broke the rule of always allowing adding of a single page to an empty bio. So relax the restriction a bit to allow for that, similarly to what we have always done. This fixes a crash with mkfs.xfs and 512b sector sizes on NVMe. Reported-by: Keith Busch <keith.busch@intel.com> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block')
-rw-r--r--block/bio.c7
-rw-r--r--block/blk-settings.c5
2 files changed, 10 insertions, 2 deletions
diff --git a/block/bio.c b/block/bio.c
index 97e832cc9b9c..2d64488e51c6 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -849,8 +849,13 @@ int bio_add_page(struct bio *bio, struct page *page, unsigned int len,
unsigned int offset)
{
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
+ unsigned int max_sectors;
- return __bio_add_page(q, bio, page, len, offset, blk_max_size_offset(q, bio->bi_iter.bi_sector));
+ max_sectors = blk_max_size_offset(q, bio->bi_iter.bi_sector);
+ if ((max_sectors < (len >> 9)) && !bio->bi_iter.bi_size)
+ max_sectors = len >> 9;
+
+ return __bio_add_page(q, bio, page, len, offset, max_sectors);
}
EXPORT_SYMBOL(bio_add_page);
diff --git a/block/blk-settings.c b/block/blk-settings.c
index a2b9cb195e70..f1a1795a5683 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -285,7 +285,10 @@ EXPORT_SYMBOL(blk_queue_max_hw_sectors);
* Description:
* If a driver doesn't want IOs to cross a given chunk size, it can set
* this limit and prevent merging across chunks. Note that the chunk size
- * must currently be a power-of-2 in sectors.
+ * must currently be a power-of-2 in sectors. Also note that the block
+ * layer must accept a page worth of data at any offset. So if the
+ * crossing of chunks is a hard limitation in the driver, it must still be
+ * prepared to split single page bios.
**/
void blk_queue_chunk_sectors(struct request_queue *q, unsigned int chunk_sectors)
{