summaryrefslogtreecommitdiffstats
path: root/block/blk-lib.c
diff options
context:
space:
mode:
authorDarrick J. Wong <darrick.wong@oracle.com>2016-10-11 22:51:08 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2016-10-12 00:06:30 +0200
commit28b2be203e5a09de566d6f7e21183f861e36f07e (patch)
treefae93d2f35925bafb738ef2172d93e4b68cf60d9 /block/blk-lib.c
parentblock: invalidate the page cache when issuing BLKZEROOUT (diff)
downloadlinux-28b2be203e5a09de566d6f7e21183f861e36f07e.tar.xz
linux-28b2be203e5a09de566d6f7e21183f861e36f07e.zip
block: require write_same and discard requests align to logical block size
Make sure that the offset and length arguments that we're using to construct WRITE SAME and DISCARD requests are actually aligned to the logical block size. Failure to do this causes other errors in other parts of the block layer or the SCSI layer because disks don't support partial logical block writes. Link: http://lkml.kernel.org/r/147518379026.22791.4437508871355153928.stgit@birch.djwong.org Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Bart Van Assche <bart.vanassche@sandisk.com> Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com> Reviewed-by: Hannes Reinecke <hare@suse.com> Cc: Theodore Ts'o <tytso@mit.edu> Cc: Mike Snitzer <snitzer@redhat.com> # tweaked header Cc: Brian Foster <bfoster@redhat.com> Cc: Jens Axboe <axboe@kernel.dk> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'block/blk-lib.c')
-rw-r--r--block/blk-lib.c15
1 files changed, 15 insertions, 0 deletions
diff --git a/block/blk-lib.c b/block/blk-lib.c
index 083e56f72308..46fe9248410d 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -31,6 +31,7 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
unsigned int granularity;
enum req_op op;
int alignment;
+ sector_t bs_mask;
if (!q)
return -ENXIO;
@@ -50,6 +51,10 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
op = REQ_OP_DISCARD;
}
+ bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
+ if ((sector | nr_sects) & bs_mask)
+ return -EINVAL;
+
/* Zero-sector (unknown) and one-sector granularities are the same. */
granularity = max(q->limits.discard_granularity >> 9, 1U);
alignment = (bdev_discard_alignment(bdev) >> 9) % granularity;
@@ -150,10 +155,15 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
unsigned int max_write_same_sectors;
struct bio *bio = NULL;
int ret = 0;
+ sector_t bs_mask;
if (!q)
return -ENXIO;
+ bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
+ if ((sector | nr_sects) & bs_mask)
+ return -EINVAL;
+
/* Ensure that max_write_same_sectors doesn't overflow bi_size */
max_write_same_sectors = UINT_MAX >> 9;
@@ -202,6 +212,11 @@ static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
int ret;
struct bio *bio = NULL;
unsigned int sz;
+ sector_t bs_mask;
+
+ bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
+ if ((sector | nr_sects) & bs_mask)
+ return -EINVAL;
while (nr_sects != 0) {
bio = next_bio(bio, min(nr_sects, (sector_t)BIO_MAX_PAGES),