diff options
Diffstat (limited to 'drivers/scsi/sd.c')
-rw-r--r-- | drivers/scsi/sd.c | 220 |
1 files changed, 135 insertions, 85 deletions
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 6825eda1114a..2c2041ca4b70 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -134,6 +134,19 @@ static const char *sd_cache_types[] = { "write back, no read (daft)" }; +static void sd_set_flush_flag(struct scsi_disk *sdkp) +{ + unsigned flush = 0; + + if (sdkp->WCE) { + flush |= REQ_FLUSH; + if (sdkp->DPOFUA) + flush |= REQ_FUA; + } + + blk_queue_flush(sdkp->disk->queue, flush); +} + static ssize_t cache_type_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) @@ -177,6 +190,7 @@ cache_type_store(struct device *dev, struct device_attribute *attr, if (sdkp->cache_override) { sdkp->WCE = wce; sdkp->RCD = rcd; + sd_set_flush_flag(sdkp); return count; } @@ -677,8 +691,10 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode) * Will issue either UNMAP or WRITE SAME(16) depending on preference * indicated by target device. **/ -static int sd_setup_discard_cmnd(struct scsi_device *sdp, struct request *rq) +static int sd_setup_discard_cmnd(struct scsi_cmnd *cmd) { + struct request *rq = cmd->request; + struct scsi_device *sdp = cmd->device; struct scsi_disk *sdkp = scsi_disk(rq->rq_disk); sector_t sector = blk_rq_pos(rq); unsigned int nr_sectors = blk_rq_sectors(rq); @@ -690,9 +706,6 @@ static int sd_setup_discard_cmnd(struct scsi_device *sdp, struct request *rq) sector >>= ilog2(sdp->sector_size) - 9; nr_sectors >>= ilog2(sdp->sector_size) - 9; - rq->timeout = SD_TIMEOUT; - - memset(rq->cmd, 0, rq->cmd_len); page = alloc_page(GFP_ATOMIC | __GFP_ZERO); if (!page) @@ -702,9 +715,9 @@ static int sd_setup_discard_cmnd(struct scsi_device *sdp, struct request *rq) case SD_LBP_UNMAP: buf = page_address(page); - rq->cmd_len = 10; - rq->cmd[0] = UNMAP; - rq->cmd[8] = 24; + cmd->cmd_len = 10; + cmd->cmnd[0] = UNMAP; + cmd->cmnd[8] = 24; put_unaligned_be16(6 + 16, &buf[0]); put_unaligned_be16(16, &buf[2]); @@ -715,23 +728,23 @@ static int sd_setup_discard_cmnd(struct scsi_device *sdp, struct request *rq) break; case SD_LBP_WS16: - rq->cmd_len = 16; - rq->cmd[0] = WRITE_SAME_16; - rq->cmd[1] = 0x8; /* UNMAP */ - put_unaligned_be64(sector, &rq->cmd[2]); - put_unaligned_be32(nr_sectors, &rq->cmd[10]); + cmd->cmd_len = 16; + cmd->cmnd[0] = WRITE_SAME_16; + cmd->cmnd[1] = 0x8; /* UNMAP */ + put_unaligned_be64(sector, &cmd->cmnd[2]); + put_unaligned_be32(nr_sectors, &cmd->cmnd[10]); len = sdkp->device->sector_size; break; case SD_LBP_WS10: case SD_LBP_ZERO: - rq->cmd_len = 10; - rq->cmd[0] = WRITE_SAME; + cmd->cmd_len = 10; + cmd->cmnd[0] = WRITE_SAME; if (sdkp->provisioning_mode == SD_LBP_WS10) - rq->cmd[1] = 0x8; /* UNMAP */ - put_unaligned_be32(sector, &rq->cmd[2]); - put_unaligned_be16(nr_sectors, &rq->cmd[7]); + cmd->cmnd[1] = 0x8; /* UNMAP */ + put_unaligned_be32(sector, &cmd->cmnd[2]); + put_unaligned_be16(nr_sectors, &cmd->cmnd[7]); len = sdkp->device->sector_size; break; @@ -742,8 +755,21 @@ static int sd_setup_discard_cmnd(struct scsi_device *sdp, struct request *rq) } rq->completion_data = page; + rq->timeout = SD_TIMEOUT; + + cmd->transfersize = len; + cmd->allowed = SD_MAX_RETRIES; + + /* + * Initially __data_len is set to the amount of data that needs to be + * transferred to the target. This amount depends on whether WRITE SAME + * or UNMAP is being used. After the scatterlist has been mapped by + * scsi_init_io() we set __data_len to the size of the area to be + * discarded on disk. This allows us to report completion on the full + * amount of blocks described by the request. + */ blk_add_request_payload(rq, page, len); - ret = scsi_setup_blk_pc_cmnd(sdp, rq); + ret = scsi_init_io(cmd, GFP_ATOMIC); rq->__data_len = nr_bytes; out: @@ -785,14 +811,15 @@ out: /** * sd_setup_write_same_cmnd - write the same data to multiple blocks - * @sdp: scsi device to operate one - * @rq: Request to prepare + * @cmd: command to prepare * * Will issue either WRITE SAME(10) or WRITE SAME(16) depending on * preference indicated by target device. **/ -static int sd_setup_write_same_cmnd(struct scsi_device *sdp, struct request *rq) +static int sd_setup_write_same_cmnd(struct scsi_cmnd *cmd) { + struct request *rq = cmd->request; + struct scsi_device *sdp = cmd->device; struct scsi_disk *sdkp = scsi_disk(rq->rq_disk); struct bio *bio = rq->bio; sector_t sector = blk_rq_pos(rq); @@ -808,53 +835,56 @@ static int sd_setup_write_same_cmnd(struct scsi_device *sdp, struct request *rq) sector >>= ilog2(sdp->sector_size) - 9; nr_sectors >>= ilog2(sdp->sector_size) - 9; - rq->__data_len = sdp->sector_size; rq->timeout = SD_WRITE_SAME_TIMEOUT; - memset(rq->cmd, 0, rq->cmd_len); if (sdkp->ws16 || sector > 0xffffffff || nr_sectors > 0xffff) { - rq->cmd_len = 16; - rq->cmd[0] = WRITE_SAME_16; - put_unaligned_be64(sector, &rq->cmd[2]); - put_unaligned_be32(nr_sectors, &rq->cmd[10]); + cmd->cmd_len = 16; + cmd->cmnd[0] = WRITE_SAME_16; + put_unaligned_be64(sector, &cmd->cmnd[2]); + put_unaligned_be32(nr_sectors, &cmd->cmnd[10]); } else { - rq->cmd_len = 10; - rq->cmd[0] = WRITE_SAME; - put_unaligned_be32(sector, &rq->cmd[2]); - put_unaligned_be16(nr_sectors, &rq->cmd[7]); + cmd->cmd_len = 10; + cmd->cmnd[0] = WRITE_SAME; + put_unaligned_be32(sector, &cmd->cmnd[2]); + put_unaligned_be16(nr_sectors, &cmd->cmnd[7]); } - ret = scsi_setup_blk_pc_cmnd(sdp, rq); - rq->__data_len = nr_bytes; + cmd->transfersize = sdp->sector_size; + cmd->allowed = SD_MAX_RETRIES; + /* + * For WRITE_SAME the data transferred in the DATA IN buffer is + * different from the amount of data actually written to the target. + * + * We set up __data_len to the amount of data transferred from the + * DATA IN buffer so that blk_rq_map_sg set up the proper S/G list + * to transfer a single sector of data first, but then reset it to + * the amount of data to be written right after so that the I/O path + * knows how much to actually write. + */ + rq->__data_len = sdp->sector_size; + ret = scsi_init_io(cmd, GFP_ATOMIC); + rq->__data_len = nr_bytes; return ret; } -static int scsi_setup_flush_cmnd(struct scsi_device *sdp, struct request *rq) +static int sd_setup_flush_cmnd(struct scsi_cmnd *cmd) { - rq->timeout *= SD_FLUSH_TIMEOUT_MULTIPLIER; - rq->retries = SD_MAX_RETRIES; - rq->cmd[0] = SYNCHRONIZE_CACHE; - rq->cmd_len = 10; - - return scsi_setup_blk_pc_cmnd(sdp, rq); -} + struct request *rq = cmd->request; -static void sd_uninit_command(struct scsi_cmnd *SCpnt) -{ - struct request *rq = SCpnt->request; + /* flush requests don't perform I/O, zero the S/G table */ + memset(&cmd->sdb, 0, sizeof(cmd->sdb)); - if (rq->cmd_flags & REQ_DISCARD) - __free_page(rq->completion_data); + cmd->cmnd[0] = SYNCHRONIZE_CACHE; + cmd->cmd_len = 10; + cmd->transfersize = 0; + cmd->allowed = SD_MAX_RETRIES; - if (SCpnt->cmnd != rq->cmd) { - mempool_free(SCpnt->cmnd, sd_cdb_pool); - SCpnt->cmnd = NULL; - SCpnt->cmd_len = 0; - } + rq->timeout = rq->q->rq_timeout * SD_FLUSH_TIMEOUT_MULTIPLIER; + return BLKPREP_OK; } -static int sd_init_command(struct scsi_cmnd *SCpnt) +static int sd_setup_read_write_cmnd(struct scsi_cmnd *SCpnt) { struct request *rq = SCpnt->request; struct scsi_device *sdp = SCpnt->device; @@ -866,21 +896,7 @@ static int sd_init_command(struct scsi_cmnd *SCpnt) int ret, host_dif; unsigned char protect; - /* - * Discard request come in as REQ_TYPE_FS but we turn them into - * block PC requests to make life easier. - */ - if (rq->cmd_flags & REQ_DISCARD) { - ret = sd_setup_discard_cmnd(sdp, rq); - goto out; - } else if (rq->cmd_flags & REQ_WRITE_SAME) { - ret = sd_setup_write_same_cmnd(sdp, rq); - goto out; - } else if (rq->cmd_flags & REQ_FLUSH) { - ret = scsi_setup_flush_cmnd(sdp, rq); - goto out; - } - ret = scsi_setup_fs_cmnd(sdp, rq); + ret = scsi_init_io(SCpnt, GFP_ATOMIC); if (ret != BLKPREP_OK) goto out; SCpnt = rq->special; @@ -976,18 +992,13 @@ static int sd_init_command(struct scsi_cmnd *SCpnt) } } if (rq_data_dir(rq) == WRITE) { - if (!sdp->writeable) { - goto out; - } SCpnt->cmnd[0] = WRITE_6; - SCpnt->sc_data_direction = DMA_TO_DEVICE; if (blk_integrity_rq(rq)) sd_dif_prepare(rq, block, sdp->sector_size); } else if (rq_data_dir(rq) == READ) { SCpnt->cmnd[0] = READ_6; - SCpnt->sc_data_direction = DMA_FROM_DEVICE; } else { scmd_printk(KERN_ERR, SCpnt, "Unknown command %llx\n", (unsigned long long) rq->cmd_flags); goto out; @@ -1042,7 +1053,7 @@ static int sd_init_command(struct scsi_cmnd *SCpnt) SCpnt->cmnd[29] = (unsigned char) (this_count >> 16) & 0xff; SCpnt->cmnd[30] = (unsigned char) (this_count >> 8) & 0xff; SCpnt->cmnd[31] = (unsigned char) this_count & 0xff; - } else if (sdp->use_16_for_rw) { + } else if (sdp->use_16_for_rw || (this_count > 0xffff)) { SCpnt->cmnd[0] += READ_16 - READ_6; SCpnt->cmnd[1] = protect | ((rq->cmd_flags & REQ_FUA) ? 0x8 : 0); SCpnt->cmnd[2] = sizeof(block) > 4 ? (unsigned char) (block >> 56) & 0xff : 0; @@ -1061,9 +1072,6 @@ static int sd_init_command(struct scsi_cmnd *SCpnt) } else if ((this_count > 0xff) || (block > 0x1fffff) || scsi_device_protection(SCpnt->device) || SCpnt->device->use_10_for_rw) { - if (this_count > 0xffff) - this_count = 0xffff; - SCpnt->cmnd[0] += READ_10 - READ_6; SCpnt->cmnd[1] = protect | ((rq->cmd_flags & REQ_FUA) ? 0x8 : 0); SCpnt->cmnd[2] = (unsigned char) (block >> 24) & 0xff; @@ -1116,6 +1124,34 @@ static int sd_init_command(struct scsi_cmnd *SCpnt) return ret; } +static int sd_init_command(struct scsi_cmnd *cmd) +{ + struct request *rq = cmd->request; + + if (rq->cmd_flags & REQ_DISCARD) + return sd_setup_discard_cmnd(cmd); + else if (rq->cmd_flags & REQ_WRITE_SAME) + return sd_setup_write_same_cmnd(cmd); + else if (rq->cmd_flags & REQ_FLUSH) + return sd_setup_flush_cmnd(cmd); + else + return sd_setup_read_write_cmnd(cmd); +} + +static void sd_uninit_command(struct scsi_cmnd *SCpnt) +{ + struct request *rq = SCpnt->request; + + if (rq->cmd_flags & REQ_DISCARD) + __free_page(rq->completion_data); + + if (SCpnt->cmnd != rq->cmd) { + mempool_free(SCpnt->cmnd, sd_cdb_pool); + SCpnt->cmnd = NULL; + SCpnt->cmd_len = 0; + } +} + /** * sd_open - open a scsi disk device * @inode: only i_rdev member may be used @@ -2225,7 +2261,11 @@ got_data: } } - sdp->use_16_for_rw = (sdkp->capacity > 0xffffffff); + if (sdkp->capacity > 0xffffffff) { + sdp->use_16_for_rw = 1; + sdkp->max_xfer_blocks = SD_MAX_XFER_BLOCKS; + } else + sdkp->max_xfer_blocks = SD_DEF_XFER_BLOCKS; /* Rescale capacity to 512-byte units */ if (sector_size == 4096) @@ -2540,6 +2580,7 @@ static void sd_read_block_limits(struct scsi_disk *sdkp) { unsigned int sector_sz = sdkp->device->sector_size; const int vpd_len = 64; + u32 max_xfer_length; unsigned char *buffer = kmalloc(vpd_len, GFP_KERNEL); if (!buffer || @@ -2547,6 +2588,10 @@ static void sd_read_block_limits(struct scsi_disk *sdkp) scsi_get_vpd_page(sdkp->device, 0xb0, buffer, vpd_len)) goto out; + max_xfer_length = get_unaligned_be32(&buffer[8]); + if (max_xfer_length) + sdkp->max_xfer_blocks = max_xfer_length; + blk_queue_io_min(sdkp->disk->queue, get_unaligned_be16(&buffer[6]) * sector_sz); blk_queue_io_opt(sdkp->disk->queue, @@ -2681,6 +2726,11 @@ static void sd_read_write_same(struct scsi_disk *sdkp, unsigned char *buffer) static int sd_try_extended_inquiry(struct scsi_device *sdp) { + /* Attempt VPD inquiry if the device blacklist explicitly calls + * for it. + */ + if (sdp->try_vpd_pages) + return 1; /* * Although VPD inquiries can go to SCSI-2 type devices, * some USB ones crash on receiving them, and the pages @@ -2701,7 +2751,7 @@ static int sd_revalidate_disk(struct gendisk *disk) struct scsi_disk *sdkp = scsi_disk(disk); struct scsi_device *sdp = sdkp->device; unsigned char *buffer; - unsigned flush = 0; + unsigned int max_xfer; SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_revalidate_disk\n")); @@ -2747,14 +2797,12 @@ static int sd_revalidate_disk(struct gendisk *disk) * We now have all cache related info, determine how we deal * with flush requests. */ - if (sdkp->WCE) { - flush |= REQ_FLUSH; - if (sdkp->DPOFUA) - flush |= REQ_FUA; - } - - blk_queue_flush(sdkp->disk->queue, flush); + sd_set_flush_flag(sdkp); + max_xfer = min_not_zero(queue_max_hw_sectors(sdkp->disk->queue), + sdkp->max_xfer_blocks); + max_xfer <<= ilog2(sdp->sector_size) - 9; + blk_queue_max_hw_sectors(sdkp->disk->queue, max_xfer); set_capacity(disk, sdkp->capacity); sd_config_write_same(sdkp); kfree(buffer); @@ -3208,12 +3256,14 @@ static int __init init_sd(void) 0, 0, NULL); if (!sd_cdb_cache) { printk(KERN_ERR "sd: can't init extended cdb cache\n"); + err = -ENOMEM; goto err_out_class; } sd_cdb_pool = mempool_create_slab_pool(SD_MEMPOOL_SIZE, sd_cdb_cache); if (!sd_cdb_pool) { printk(KERN_ERR "sd: can't init extended cdb pool\n"); + err = -ENOMEM; goto err_out_cache; } |