summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorRitika Srivastava <ritika.srivastava@oracle.com>2020-09-01 22:17:31 +0200
committerJens Axboe <axboe@kernel.dk>2020-09-02 03:38:33 +0200
commit8327cce5ff9376fac9ff713a8d5c99c16ba3fa33 (patch)
treef6c7b9cdf15953683061c61fc1e0e936aa5927a2 /block
parentblock: Return blk_status_t instead of errno codes (diff)
downloadlinux-8327cce5ff9376fac9ff713a8d5c99c16ba3fa33.tar.xz
linux-8327cce5ff9376fac9ff713a8d5c99c16ba3fa33.zip
block: better deal with the delayed not supported case in blk_cloned_rq_check_limits
If WRITE_ZERO/WRITE_SAME operation is not supported by the storage, blk_cloned_rq_check_limits() will return IO error which will cause device-mapper to fail the paths. Instead, if the queue limit is set to 0, return BLK_STS_NOTSUPP. BLK_STS_NOTSUPP will be ignored by device-mapper and will not fail the paths. Suggested-by: Martin K. Petersen <martin.petersen@oracle.com> Signed-off-by: Ritika Srivastava <ritika.srivastava@oracle.com> Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c27
1 files changed, 22 insertions, 5 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 3fbb5b2d5385..9f2a99abeeb9 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1148,10 +1148,24 @@ EXPORT_SYMBOL(submit_bio);
static blk_status_t blk_cloned_rq_check_limits(struct request_queue *q,
struct request *rq)
{
- if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, req_op(rq))) {
+ unsigned int max_sectors = blk_queue_get_max_sectors(q, req_op(rq));
+
+ if (blk_rq_sectors(rq) > max_sectors) {
+ /*
+ * SCSI device does not have a good way to return if
+ * Write Same/Zero is actually supported. If a device rejects
+ * a non-read/write command (discard, write same,etc.) the
+ * low-level device driver will set the relevant queue limit to
+ * 0 to prevent blk-lib from issuing more of the offending
+ * operations. Commands queued prior to the queue limit being
+ * reset need to be completed with BLK_STS_NOTSUPP to avoid I/O
+ * errors being propagated to upper layers.
+ */
+ if (max_sectors == 0)
+ return BLK_STS_NOTSUPP;
+
printk(KERN_ERR "%s: over max size limit. (%u > %u)\n",
- __func__, blk_rq_sectors(rq),
- blk_queue_get_max_sectors(q, req_op(rq)));
+ __func__, blk_rq_sectors(rq), max_sectors);
return BLK_STS_IOERR;
}
@@ -1178,8 +1192,11 @@ static blk_status_t blk_cloned_rq_check_limits(struct request_queue *q,
*/
blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *rq)
{
- if (blk_cloned_rq_check_limits(q, rq))
- return BLK_STS_IOERR;
+ blk_status_t ret;
+
+ ret = blk_cloned_rq_check_limits(q, rq);
+ if (ret != BLK_STS_OK)
+ return ret;
if (rq->rq_disk &&
should_fail_request(&rq->rq_disk->part0, blk_rq_bytes(rq)))