summaryrefslogtreecommitdiffstats
path: root/drivers/ata
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2020-04-14 09:42:24 +0200
committerJens Axboe <axboe@kernel.dk>2020-04-22 18:47:35 +0200
commitcc97923a5bccc776851c242b61015faf288d5c22 (patch)
treed6510d4f326674e96bdf70c4f07472f492537db8 /drivers/ata
parentscsi: merge scsi_init_sgtable into scsi_init_io (diff)
downloadlinux-cc97923a5bccc776851c242b61015faf288d5c22.tar.xz
linux-cc97923a5bccc776851c242b61015faf288d5c22.zip
block: move dma drain handling to scsi
Don't burden the common block code with with specifics of the libata DMA draining mechanism. Instead move most of the code to the scsi midlayer. That also means the nr_phys_segments adjustments in the blk-mq fast path can go away entirely, given that SCSI never looks at nr_phys_segments after mapping the request to a scatterlist. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'drivers/ata')
-rw-r--r--drivers/ata/libata-scsi.c28
1 files changed, 10 insertions, 18 deletions
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 36e588d88b95..feb13b8f93d7 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -1017,16 +1017,11 @@ void ata_scsi_sdev_config(struct scsi_device *sdev)
* RETURNS:
* 1 if ; otherwise, 0.
*/
-static int atapi_drain_needed(struct request *rq)
+bool ata_scsi_dma_need_drain(struct request *rq)
{
- if (likely(!blk_rq_is_passthrough(rq)))
- return 0;
-
- if (!blk_rq_bytes(rq) || op_is_write(req_op(rq)))
- return 0;
-
return atapi_cmd_type(scsi_req(rq)->cmd[0]) == ATAPI_MISC;
}
+EXPORT_SYMBOL_GPL(ata_scsi_dma_need_drain);
int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev)
{
@@ -1039,21 +1034,21 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev)
blk_queue_max_hw_sectors(q, dev->max_sectors);
if (dev->class == ATA_DEV_ATAPI) {
- void *buf;
-
sdev->sector_size = ATA_SECT_SIZE;
/* set DMA padding */
blk_queue_update_dma_pad(q, ATA_DMA_PAD_SZ - 1);
- /* configure draining */
- buf = kmalloc(ATAPI_MAX_DRAIN, q->bounce_gfp | GFP_KERNEL);
- if (!buf) {
+ /* make room for appending the drain */
+ blk_queue_max_segments(q, queue_max_segments(q) - 1);
+
+ sdev->dma_drain_len = ATAPI_MAX_DRAIN;
+ sdev->dma_drain_buf = kmalloc(sdev->dma_drain_len,
+ q->bounce_gfp | GFP_KERNEL);
+ if (!sdev->dma_drain_buf) {
ata_dev_err(dev, "drain buffer allocation failed\n");
return -ENOMEM;
}
-
- blk_queue_dma_drain(q, atapi_drain_needed, buf, ATAPI_MAX_DRAIN);
} else {
sdev->sector_size = ata_id_logical_sector_size(dev->id);
sdev->manage_start_stop = 1;
@@ -1135,7 +1130,6 @@ EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
void ata_scsi_slave_destroy(struct scsi_device *sdev)
{
struct ata_port *ap = ata_shost_to_port(sdev->host);
- struct request_queue *q = sdev->request_queue;
unsigned long flags;
struct ata_device *dev;
@@ -1152,9 +1146,7 @@ void ata_scsi_slave_destroy(struct scsi_device *sdev)
}
spin_unlock_irqrestore(ap->lock, flags);
- kfree(q->dma_drain_buffer);
- q->dma_drain_buffer = NULL;
- q->dma_drain_size = 0;
+ kfree(sdev->dma_drain_buf);
}
EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);