summaryrefslogtreecommitdiffstats
path: root/drivers/block
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-05-06 20:25:08 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2017-05-06 20:25:08 +0200
commit044f1daaaaf7c86bc4fcf433848b7baae236946b (patch)
tree55a5b94c75cc6e51992ee3b5d7c49878c7ae7760 /drivers/block
parentrefcount: change EXPORT_SYMBOL markings (diff)
parentmq-deadline: add debugfs attributes (diff)
downloadlinux-044f1daaaaf7c86bc4fcf433848b7baae236946b.tar.xz
linux-044f1daaaaf7c86bc4fcf433848b7baae236946b.zip
Merge branch 'for-linus' of git://git.kernel.dk/linux-block
Pull block fixes and updates from Jens Axboe: "Some fixes and followup features/changes that should go in, in this merge window. This contains: - Two fixes for lightnvm from Javier, fixing problems in the new code merge previously in this merge window. - A fix from Jan for the backing device changes, fixing an issue in NFS that causes a failure to mount on certain setups. - A change from Christoph, cleaning up the blk-mq init and exit request paths. - Remove elevator_change(), which is now unused. From Bart. - A fix for queue operation invocation on a dead queue, from Bart. - A series fixing up mtip32xx for blk-mq scheduling, removing a bandaid we previously had in place for this. From me. - A regression fix for this series, fixing a case where we wait on workqueue flushing from an invalid (non-blocking) context. From me. - A fix/optimization from Ming, ensuring that we don't both quiesce and freeze a queue at the same time. - A fix from Peter on lock ordering for CPU hotplug. Not a real problem right now, but will be once the CPU hotplug rework goes in. - A series from Omar, cleaning up out blk-mq debugfs support, and adding support for exporting info from schedulers in debugfs as well. This is really useful in debugging stalls or livelocks. From Omar" * 'for-linus' of git://git.kernel.dk/linux-block: (28 commits) mq-deadline: add debugfs attributes kyber: add debugfs attributes blk-mq-debugfs: allow schedulers to register debugfs attributes blk-mq: untangle debugfs and sysfs blk-mq: move debugfs declarations to a separate header file blk-mq: Do not invoke queue operations on a dead queue blk-mq-debugfs: get rid of a bunch of boilerplate blk-mq-debugfs: rename hw queue directories from <n> to hctx<n> blk-mq-debugfs: don't open code strstrip() blk-mq-debugfs: error on long write to queue "state" file blk-mq-debugfs: clean up flag definitions blk-mq-debugfs: separate flags with | nfs: Fix bdi handling for cloned superblocks block/mq: Cure cpu hotplug lock inversion lightnvm: fix bad back free on error path lightnvm: create cmd before allocating request blk-mq: don't use sync workqueue flushing from drivers mtip32xx: convert internal commands to regular block infrastructure mtip32xx: cleanup internal tag assumptions block: don't call blk_mq_quiesce_queue() after queue is frozen ...
Diffstat (limited to 'drivers/block')
-rw-r--r--drivers/block/loop.c5
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c490
-rw-r--r--drivers/block/mtip32xx/mtip32xx.h10
-rw-r--r--drivers/block/nbd.c7
-rw-r--r--drivers/block/rbd.c5
-rw-r--r--drivers/block/virtio_blk.c7
6 files changed, 171 insertions, 353 deletions
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 994403efee19..28d932906f24 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -1697,9 +1697,8 @@ static void loop_queue_work(struct kthread_work *work)
loop_handle_cmd(cmd);
}
-static int loop_init_request(void *data, struct request *rq,
- unsigned int hctx_idx, unsigned int request_idx,
- unsigned int numa_node)
+static int loop_init_request(struct blk_mq_tag_set *set, struct request *rq,
+ unsigned int hctx_idx, unsigned int numa_node)
{
struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 02804cc79d82..3a779a4f5653 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -195,7 +195,7 @@ static struct mtip_cmd *mtip_get_int_command(struct driver_data *dd)
if (mtip_check_surprise_removal(dd->pdev))
return NULL;
- rq = blk_mq_alloc_request(dd->queue, 0, BLK_MQ_REQ_RESERVED);
+ rq = blk_mq_alloc_request(dd->queue, REQ_OP_DRV_IN, BLK_MQ_REQ_RESERVED);
if (IS_ERR(rq))
return NULL;
@@ -205,66 +205,12 @@ static struct mtip_cmd *mtip_get_int_command(struct driver_data *dd)
return blk_mq_rq_to_pdu(rq);
}
-static void mtip_put_int_command(struct driver_data *dd, struct mtip_cmd *cmd)
-{
- blk_put_request(blk_mq_rq_from_pdu(cmd));
-}
-
-/*
- * Once we add support for one hctx per mtip group, this will change a bit
- */
-static struct request *mtip_rq_from_tag(struct driver_data *dd,
- unsigned int tag)
-{
- struct blk_mq_hw_ctx *hctx = dd->queue->queue_hw_ctx[0];
-
- return blk_mq_tag_to_rq(hctx->tags, tag);
-}
-
static struct mtip_cmd *mtip_cmd_from_tag(struct driver_data *dd,
unsigned int tag)
{
- struct request *rq = mtip_rq_from_tag(dd, tag);
-
- return blk_mq_rq_to_pdu(rq);
-}
-
-/*
- * IO completion function.
- *
- * This completion function is called by the driver ISR when a
- * command that was issued by the kernel completes. It first calls the
- * asynchronous completion function which normally calls back into the block
- * layer passing the asynchronous callback data, then unmaps the
- * scatter list associated with the completed command, and finally
- * clears the allocated bit associated with the completed command.
- *
- * @port Pointer to the port data structure.
- * @tag Tag of the command.
- * @data Pointer to driver_data.
- * @status Completion status.
- *
- * return value
- * None
- */
-static void mtip_async_complete(struct mtip_port *port,
- int tag, struct mtip_cmd *cmd, int status)
-{
- struct driver_data *dd = port->dd;
- struct request *rq;
-
- if (unlikely(!dd) || unlikely(!port))
- return;
-
- if (unlikely(status == PORT_IRQ_TF_ERR)) {
- dev_warn(&port->dd->pdev->dev,
- "Command tag %d failed due to TFE\n", tag);
- }
-
- rq = mtip_rq_from_tag(dd, tag);
+ struct blk_mq_hw_ctx *hctx = dd->queue->queue_hw_ctx[0];
- cmd->status = status;
- blk_mq_complete_request(rq);
+ return blk_mq_rq_to_pdu(blk_mq_tag_to_rq(hctx->tags, tag));
}
/*
@@ -581,43 +527,19 @@ static void print_tags(struct driver_data *dd,
"%d command(s) %s: tagmap [%s]", cnt, msg, tagmap);
}
-/*
- * Internal command completion callback function.
- *
- * This function is normally called by the driver ISR when an internal
- * command completed. This function signals the command completion by
- * calling complete().
- *
- * @port Pointer to the port data structure.
- * @tag Tag of the command that has completed.
- * @data Pointer to a completion structure.
- * @status Completion status.
- *
- * return value
- * None
- */
-static void mtip_completion(struct mtip_port *port,
- int tag, struct mtip_cmd *command, int status)
-{
- struct completion *waiting = command->comp_data;
- if (unlikely(status == PORT_IRQ_TF_ERR))
- dev_warn(&port->dd->pdev->dev,
- "Internal command %d completed with TFE\n", tag);
-
- command->comp_func = NULL;
- command->comp_data = NULL;
- complete(waiting);
-}
-
-static void mtip_null_completion(struct mtip_port *port,
- int tag, struct mtip_cmd *command, int status)
-{
-}
-
static int mtip_read_log_page(struct mtip_port *port, u8 page, u16 *buffer,
dma_addr_t buffer_dma, unsigned int sectors);
static int mtip_get_smart_attr(struct mtip_port *port, unsigned int id,
struct smart_attr *attrib);
+
+static void mtip_complete_command(struct mtip_cmd *cmd, int status)
+{
+ struct request *req = blk_mq_rq_from_pdu(cmd);
+
+ cmd->status = status;
+ blk_mq_complete_request(req);
+}
+
/*
* Handle an error.
*
@@ -646,11 +568,7 @@ static void mtip_handle_tfe(struct driver_data *dd)
if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) {
cmd = mtip_cmd_from_tag(dd, MTIP_TAG_INTERNAL);
dbg_printk(MTIP_DRV_NAME " TFE for the internal command\n");
-
- if (cmd->comp_data && cmd->comp_func) {
- cmd->comp_func(port, MTIP_TAG_INTERNAL,
- cmd, PORT_IRQ_TF_ERR);
- }
+ mtip_complete_command(cmd, -EIO);
return;
}
@@ -677,19 +595,9 @@ static void mtip_handle_tfe(struct driver_data *dd)
continue;
cmd = mtip_cmd_from_tag(dd, tag);
- if (likely(cmd->comp_func)) {
- set_bit(tag, tagaccum);
- cmd_cnt++;
- cmd->comp_func(port, tag, cmd, 0);
- } else {
- dev_err(&port->dd->pdev->dev,
- "Missing completion func for tag %d",
- tag);
- if (mtip_check_surprise_removal(dd->pdev)) {
- /* don't proceed further */
- return;
- }
- }
+ mtip_complete_command(cmd, 0);
+ set_bit(tag, tagaccum);
+ cmd_cnt++;
}
}
@@ -759,10 +667,7 @@ static void mtip_handle_tfe(struct driver_data *dd)
tag,
fail_reason != NULL ?
fail_reason : "unknown");
- if (cmd->comp_func) {
- cmd->comp_func(port, tag,
- cmd, -ENODATA);
- }
+ mtip_complete_command(cmd, -ENODATA);
continue;
}
}
@@ -785,12 +690,7 @@ static void mtip_handle_tfe(struct driver_data *dd)
dev_warn(&port->dd->pdev->dev,
"retiring tag %d\n", tag);
- if (cmd->comp_func)
- cmd->comp_func(port, tag, cmd, PORT_IRQ_TF_ERR);
- else
- dev_warn(&port->dd->pdev->dev,
- "Bad completion for tag %d\n",
- tag);
+ mtip_complete_command(cmd, -EIO);
}
}
print_tags(dd, "reissued (TFE)", tagaccum, cmd_cnt);
@@ -823,18 +723,7 @@ static inline void mtip_workq_sdbfx(struct mtip_port *port, int group,
continue;
command = mtip_cmd_from_tag(dd, tag);
- if (likely(command->comp_func))
- command->comp_func(port, tag, command, 0);
- else {
- dev_dbg(&dd->pdev->dev,
- "Null completion for tag %d",
- tag);
-
- if (mtip_check_surprise_removal(
- dd->pdev)) {
- return;
- }
- }
+ mtip_complete_command(command, 0);
}
completed >>= 1;
}
@@ -852,16 +741,13 @@ static inline void mtip_process_legacy(struct driver_data *dd, u32 port_stat)
struct mtip_port *port = dd->port;
struct mtip_cmd *cmd = mtip_cmd_from_tag(dd, MTIP_TAG_INTERNAL);
- if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags) &&
- (cmd != NULL) && !(readl(port->cmd_issue[MTIP_TAG_INTERNAL])
- & (1 << MTIP_TAG_INTERNAL))) {
- if (cmd->comp_func) {
- cmd->comp_func(port, MTIP_TAG_INTERNAL, cmd, 0);
- return;
- }
- }
+ if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags) && cmd) {
+ int group = MTIP_TAG_INDEX(MTIP_TAG_INTERNAL);
+ int status = readl(port->cmd_issue[group]);
- return;
+ if (!(status & (1 << MTIP_TAG_BIT(MTIP_TAG_INTERNAL))))
+ mtip_complete_command(cmd, 0);
+ }
}
/*
@@ -869,7 +755,6 @@ static inline void mtip_process_legacy(struct driver_data *dd, u32 port_stat)
*/
static inline void mtip_process_errors(struct driver_data *dd, u32 port_stat)
{
-
if (unlikely(port_stat & PORT_IRQ_CONNECT)) {
dev_warn(&dd->pdev->dev,
"Clearing PxSERR.DIAG.x\n");
@@ -996,8 +881,7 @@ static irqreturn_t mtip_irq_handler(int irq, void *instance)
static void mtip_issue_non_ncq_command(struct mtip_port *port, int tag)
{
- writel(1 << MTIP_TAG_BIT(tag),
- port->cmd_issue[MTIP_TAG_INDEX(tag)]);
+ writel(1 << MTIP_TAG_BIT(tag), port->cmd_issue[MTIP_TAG_INDEX(tag)]);
}
static bool mtip_pause_ncq(struct mtip_port *port,
@@ -1035,53 +919,53 @@ static bool mtip_pause_ncq(struct mtip_port *port,
return false;
}
+static bool mtip_commands_active(struct mtip_port *port)
+{
+ unsigned int active;
+ unsigned int n;
+
+ /*
+ * Ignore s_active bit 0 of array element 0.
+ * This bit will always be set
+ */
+ active = readl(port->s_active[0]) & 0xFFFFFFFE;
+ for (n = 1; n < port->dd->slot_groups; n++)
+ active |= readl(port->s_active[n]);
+
+ return active != 0;
+}
+
/*
* Wait for port to quiesce
*
* @port Pointer to port data structure
* @timeout Max duration to wait (ms)
- * @atomic gfp_t flag to indicate blockable context or not
*
* return value
* 0 Success
* -EBUSY Commands still active
*/
-static int mtip_quiesce_io(struct mtip_port *port, unsigned long timeout,
- gfp_t atomic)
+static int mtip_quiesce_io(struct mtip_port *port, unsigned long timeout)
{
unsigned long to;
- unsigned int n;
- unsigned int active = 1;
+ bool active = true;
blk_mq_stop_hw_queues(port->dd->queue);
to = jiffies + msecs_to_jiffies(timeout);
do {
if (test_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags) &&
- test_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags) &&
- atomic == GFP_KERNEL) {
+ test_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags)) {
msleep(20);
continue; /* svc thd is actively issuing commands */
}
- if (atomic == GFP_KERNEL)
- msleep(100);
- else {
- cpu_relax();
- udelay(100);
- }
+ msleep(100);
if (mtip_check_surprise_removal(port->dd->pdev))
goto err_fault;
- /*
- * Ignore s_active bit 0 of array element 0.
- * This bit will always be set
- */
- active = readl(port->s_active[0]) & 0xFFFFFFFE;
- for (n = 1; n < port->dd->slot_groups; n++)
- active |= readl(port->s_active[n]);
-
+ active = mtip_commands_active(port);
if (!active)
break;
} while (time_before(jiffies, to));
@@ -1093,6 +977,13 @@ err_fault:
return -EFAULT;
}
+struct mtip_int_cmd {
+ int fis_len;
+ dma_addr_t buffer;
+ int buf_len;
+ u32 opts;
+};
+
/*
* Execute an internal command and wait for the completion.
*
@@ -1117,13 +1008,17 @@ static int mtip_exec_internal_command(struct mtip_port *port,
dma_addr_t buffer,
int buf_len,
u32 opts,
- gfp_t atomic,
unsigned long timeout)
{
- struct mtip_cmd_sg *command_sg;
- DECLARE_COMPLETION_ONSTACK(wait);
struct mtip_cmd *int_cmd;
struct driver_data *dd = port->dd;
+ struct request *rq;
+ struct mtip_int_cmd icmd = {
+ .fis_len = fis_len,
+ .buffer = buffer,
+ .buf_len = buf_len,
+ .opts = opts
+ };
int rv = 0;
unsigned long start;
@@ -1138,6 +1033,8 @@ static int mtip_exec_internal_command(struct mtip_port *port,
dbg_printk(MTIP_DRV_NAME "Unable to allocate tag for PIO cmd\n");
return -EFAULT;
}
+ rq = blk_mq_rq_from_pdu(int_cmd);
+ rq->special = &icmd;
set_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags);
@@ -1146,135 +1043,60 @@ static int mtip_exec_internal_command(struct mtip_port *port,
clear_bit(MTIP_PF_DM_ACTIVE_BIT, &port->flags);
- if (atomic == GFP_KERNEL) {
- if (fis->command != ATA_CMD_STANDBYNOW1) {
- /* wait for io to complete if non atomic */
- if (mtip_quiesce_io(port,
- MTIP_QUIESCE_IO_TIMEOUT_MS, atomic) < 0) {
- dev_warn(&dd->pdev->dev,
- "Failed to quiesce IO\n");
- mtip_put_int_command(dd, int_cmd);
- clear_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags);
- wake_up_interruptible(&port->svc_wait);
- return -EBUSY;
- }
+ if (fis->command != ATA_CMD_STANDBYNOW1) {
+ /* wait for io to complete if non atomic */
+ if (mtip_quiesce_io(port, MTIP_QUIESCE_IO_TIMEOUT_MS) < 0) {
+ dev_warn(&dd->pdev->dev, "Failed to quiesce IO\n");
+ blk_mq_free_request(rq);
+ clear_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags);
+ wake_up_interruptible(&port->svc_wait);
+ return -EBUSY;
}
-
- /* Set the completion function and data for the command. */
- int_cmd->comp_data = &wait;
- int_cmd->comp_func = mtip_completion;
-
- } else {
- /* Clear completion - we're going to poll */
- int_cmd->comp_data = NULL;
- int_cmd->comp_func = mtip_null_completion;
}
/* Copy the command to the command table */
memcpy(int_cmd->command, fis, fis_len*4);
- /* Populate the SG list */
- int_cmd->command_header->opts =
- __force_bit2int cpu_to_le32(opts | fis_len);
- if (buf_len) {
- command_sg = int_cmd->command + AHCI_CMD_TBL_HDR_SZ;
-
- command_sg->info =
- __force_bit2int cpu_to_le32((buf_len-1) & 0x3FFFFF);
- command_sg->dba =
- __force_bit2int cpu_to_le32(buffer & 0xFFFFFFFF);
- command_sg->dba_upper =
- __force_bit2int cpu_to_le32((buffer >> 16) >> 16);
-
- int_cmd->command_header->opts |=
- __force_bit2int cpu_to_le32((1 << 16));
- }
-
- /* Populate the command header */
- int_cmd->command_header->byte_count = 0;
-
start = jiffies;
+ rq->timeout = timeout;
- /* Issue the command to the hardware */
- mtip_issue_non_ncq_command(port, MTIP_TAG_INTERNAL);
-
- if (atomic == GFP_KERNEL) {
- /* Wait for the command to complete or timeout. */
- if ((rv = wait_for_completion_interruptible_timeout(
- &wait,
- msecs_to_jiffies(timeout))) <= 0) {
-
- if (rv == -ERESTARTSYS) { /* interrupted */
- dev_err(&dd->pdev->dev,
- "Internal command [%02X] was interrupted after %u ms\n",
- fis->command,
- jiffies_to_msecs(jiffies - start));
- rv = -EINTR;
- goto exec_ic_exit;
- } else if (rv == 0) /* timeout */
- dev_err(&dd->pdev->dev,
- "Internal command did not complete [%02X] within timeout of %lu ms\n",
- fis->command, timeout);
- else
- dev_err(&dd->pdev->dev,
- "Internal command [%02X] wait returned code [%d] after %lu ms - unhandled\n",
- fis->command, rv, timeout);
-
- if (mtip_check_surprise_removal(dd->pdev) ||
- test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
- &dd->dd_flag)) {
- dev_err(&dd->pdev->dev,
- "Internal command [%02X] wait returned due to SR\n",
- fis->command);
- rv = -ENXIO;
- goto exec_ic_exit;
- }
- mtip_device_reset(dd); /* recover from timeout issue */
- rv = -EAGAIN;
+ /* insert request and run queue */
+ blk_execute_rq(rq->q, NULL, rq, true);
+
+ rv = int_cmd->status;
+ if (rv < 0) {
+ if (rv == -ERESTARTSYS) { /* interrupted */
+ dev_err(&dd->pdev->dev,
+ "Internal command [%02X] was interrupted after %u ms\n",
+ fis->command,
+ jiffies_to_msecs(jiffies - start));
+ rv = -EINTR;
goto exec_ic_exit;
- }
- } else {
- u32 hba_stat, port_stat;
-
- /* Spin for <timeout> checking if command still outstanding */
- timeout = jiffies + msecs_to_jiffies(timeout);
- while ((readl(port->cmd_issue[MTIP_TAG_INTERNAL])
- & (1 << MTIP_TAG_INTERNAL))
- && time_before(jiffies, timeout)) {
- if (mtip_check_surprise_removal(dd->pdev)) {
- rv = -ENXIO;
- goto exec_ic_exit;
- }
- if ((fis->command != ATA_CMD_STANDBYNOW1) &&
- test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
- &dd->dd_flag)) {
- rv = -ENXIO;
- goto exec_ic_exit;
- }
- port_stat = readl(port->mmio + PORT_IRQ_STAT);
- if (!port_stat)
- continue;
+ } else if (rv == 0) /* timeout */
+ dev_err(&dd->pdev->dev,
+ "Internal command did not complete [%02X] within timeout of %lu ms\n",
+ fis->command, timeout);
+ else
+ dev_err(&dd->pdev->dev,
+ "Internal command [%02X] wait returned code [%d] after %lu ms - unhandled\n",
+ fis->command, rv, timeout);
- if (port_stat & PORT_IRQ_ERR) {
- dev_err(&dd->pdev->dev,
- "Internal command [%02X] failed\n",
- fis->command);
- mtip_device_reset(dd);
- rv = -EIO;
- goto exec_ic_exit;
- } else {
- writel(port_stat, port->mmio + PORT_IRQ_STAT);
- hba_stat = readl(dd->mmio + HOST_IRQ_STAT);
- if (hba_stat)
- writel(hba_stat,
- dd->mmio + HOST_IRQ_STAT);
- }
- break;
+ if (mtip_check_surprise_removal(dd->pdev) ||
+ test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
+ &dd->dd_flag)) {
+ dev_err(&dd->pdev->dev,
+ "Internal command [%02X] wait returned due to SR\n",
+ fis->command);
+ rv = -ENXIO;
+ goto exec_ic_exit;
}
+ mtip_device_reset(dd); /* recover from timeout issue */
+ rv = -EAGAIN;
+ goto exec_ic_exit;
}
- if (readl(port->cmd_issue[MTIP_TAG_INTERNAL])
- & (1 << MTIP_TAG_INTERNAL)) {
+ if (readl(port->cmd_issue[MTIP_TAG_INDEX(MTIP_TAG_INTERNAL)])
+ & (1 << MTIP_TAG_BIT(MTIP_TAG_INTERNAL))) {
rv = -ENXIO;
if (!test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)) {
mtip_device_reset(dd);
@@ -1283,7 +1105,7 @@ static int mtip_exec_internal_command(struct mtip_port *port,
}
exec_ic_exit:
/* Clear the allocated and active bits for the internal command. */
- mtip_put_int_command(dd, int_cmd);
+ blk_mq_free_request(rq);
clear_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags);
if (rv >= 0 && mtip_pause_ncq(port, fis)) {
/* NCQ paused */
@@ -1391,7 +1213,6 @@ static int mtip_get_identify(struct mtip_port *port, void __user *user_buffer)
port->identify_dma,
sizeof(u16) * ATA_ID_WORDS,
0,
- GFP_KERNEL,
MTIP_INT_CMD_TIMEOUT_MS)
< 0) {
rv = -1;
@@ -1477,7 +1298,6 @@ static int mtip_standby_immediate(struct mtip_port *port)
0,
0,
0,
- GFP_ATOMIC,
timeout);
dbg_printk(MTIP_DRV_NAME "Time taken to complete standby cmd: %d ms\n",
jiffies_to_msecs(jiffies - start));
@@ -1523,7 +1343,6 @@ static int mtip_read_log_page(struct mtip_port *port, u8 page, u16 *buffer,
buffer_dma,
sectors * ATA_SECT_SIZE,
0,
- GFP_ATOMIC,
MTIP_INT_CMD_TIMEOUT_MS);
}
@@ -1558,7 +1377,6 @@ static int mtip_get_smart_data(struct mtip_port *port, u8 *buffer,
buffer_dma,
ATA_SECT_SIZE,
0,
- GFP_ATOMIC,
15000);
}
@@ -1686,7 +1504,6 @@ static int mtip_send_trim(struct driver_data *dd, unsigned int lba,
dma_addr,
ATA_SECT_SIZE,
0,
- GFP_KERNEL,
MTIP_TRIM_TIMEOUT_MS) < 0)
rv = -EIO;
@@ -1850,7 +1667,6 @@ static int exec_drive_task(struct mtip_port *port, u8 *command)
0,
0,
0,
- GFP_KERNEL,
to) < 0) {
return -1;
}
@@ -1946,7 +1762,6 @@ static int exec_drive_command(struct mtip_port *port, u8 *command,
(xfer_sz ? dma_addr : 0),
(xfer_sz ? ATA_SECT_SIZE * xfer_sz : 0),
0,
- GFP_KERNEL,
to)
< 0) {
rv = -EFAULT;
@@ -2189,7 +2004,6 @@ static int exec_drive_taskfile(struct driver_data *dd,
dma_buffer,
transfer_size,
0,
- GFP_KERNEL,
timeout) < 0) {
err = -EIO;
goto abort;
@@ -2446,12 +2260,6 @@ static void mtip_hw_submit_io(struct driver_data *dd, struct request *rq,
(nents << 16) | 5 | AHCI_CMD_PREFETCH);
command->command_header->byte_count = 0;
- /*
- * Set the completion function and data for the command
- * within this layer.
- */
- command->comp_data = dd;
- command->comp_func = mtip_async_complete;
command->direction = dma_dir;
/*
@@ -3825,6 +3633,42 @@ static bool mtip_check_unal_depth(struct blk_mq_hw_ctx *hctx,
return false;
}
+static int mtip_issue_reserved_cmd(struct blk_mq_hw_ctx *hctx,
+ struct request *rq)
+{
+ struct driver_data *dd = hctx->queue->queuedata;
+ struct mtip_int_cmd *icmd = rq->special;
+ struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
+ struct mtip_cmd_sg *command_sg;
+
+ if (mtip_commands_active(dd->port))
+ return BLK_MQ_RQ_QUEUE_BUSY;
+
+ /* Populate the SG list */
+ cmd->command_header->opts =
+ __force_bit2int cpu_to_le32(icmd->opts | icmd->fis_len);
+ if (icmd->buf_len) {
+ command_sg = cmd->command + AHCI_CMD_TBL_HDR_SZ;
+
+ command_sg->info =
+ __force_bit2int cpu_to_le32((icmd->buf_len-1) & 0x3FFFFF);
+ command_sg->dba =
+ __force_bit2int cpu_to_le32(icmd->buffer & 0xFFFFFFFF);
+ command_sg->dba_upper =
+ __force_bit2int cpu_to_le32((icmd->buffer >> 16) >> 16);
+
+ cmd->command_header->opts |=
+ __force_bit2int cpu_to_le32((1 << 16));
+ }
+
+ /* Populate the command header */
+ cmd->command_header->byte_count = 0;
+
+ blk_mq_start_request(rq);
+ mtip_issue_non_ncq_command(dd->port, rq->tag);
+ return BLK_MQ_RQ_QUEUE_OK;
+}
+
static int mtip_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
@@ -3833,6 +3677,9 @@ static int mtip_queue_rq(struct blk_mq_hw_ctx *hctx,
mtip_init_cmd_header(rq);
+ if (blk_rq_is_passthrough(rq))
+ return mtip_issue_reserved_cmd(hctx, rq);
+
if (unlikely(mtip_check_unal_depth(hctx, rq)))
return BLK_MQ_RQ_QUEUE_BUSY;
@@ -3845,10 +3692,10 @@ static int mtip_queue_rq(struct blk_mq_hw_ctx *hctx,
return BLK_MQ_RQ_QUEUE_ERROR;
}
-static void mtip_free_cmd(void *data, struct request *rq,
- unsigned int hctx_idx, unsigned int request_idx)
+static void mtip_free_cmd(struct blk_mq_tag_set *set, struct request *rq,
+ unsigned int hctx_idx)
{
- struct driver_data *dd = data;
+ struct driver_data *dd = set->driver_data;
struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
if (!cmd->command)
@@ -3858,20 +3705,12 @@ static void mtip_free_cmd(void *data, struct request *rq,
cmd->command, cmd->command_dma);
}
-static int mtip_init_cmd(void *data, struct request *rq, unsigned int hctx_idx,
- unsigned int request_idx, unsigned int numa_node)
+static int mtip_init_cmd(struct blk_mq_tag_set *set, struct request *rq,
+ unsigned int hctx_idx, unsigned int numa_node)
{
- struct driver_data *dd = data;
+ struct driver_data *dd = set->driver_data;
struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
- /*
- * For flush requests, request_idx starts at the end of the
- * tag space. Since we don't support FLUSH/FUA, simply return
- * 0 as there's nothing to be done.
- */
- if (request_idx >= MTIP_MAX_COMMAND_SLOTS)
- return 0;
-
cmd->command = dmam_alloc_coherent(&dd->pdev->dev, CMD_DMA_ALLOC_SZ,
&cmd->command_dma, GFP_KERNEL);
if (!cmd->command)
@@ -3888,8 +3727,12 @@ static enum blk_eh_timer_return mtip_cmd_timeout(struct request *req,
{
struct driver_data *dd = req->q->queuedata;
- if (reserved)
- goto exit_handler;
+ if (reserved) {
+ struct mtip_cmd *cmd = blk_mq_rq_to_pdu(req);
+
+ cmd->status = -ETIME;
+ return BLK_EH_HANDLED;
+ }
if (test_bit(req->tag, dd->port->cmds_to_issue))
goto exit_handler;
@@ -3982,7 +3825,7 @@ static int mtip_block_initialize(struct driver_data *dd)
dd->tags.reserved_tags = 1;
dd->tags.cmd_size = sizeof(struct mtip_cmd);
dd->tags.numa_node = dd->numa_node;
- dd->tags.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_NO_SCHED;
+ dd->tags.flags = BLK_MQ_F_SHOULD_MERGE;
dd->tags.driver_data = dd;
dd->tags.timeout = MTIP_NCQ_CMD_TIMEOUT_MS;
@@ -4116,20 +3959,10 @@ protocol_init_error:
static void mtip_no_dev_cleanup(struct request *rq, void *data, bool reserv)
{
- struct driver_data *dd = (struct driver_data *)data;
- struct mtip_cmd *cmd;
-
- if (likely(!reserv)) {
- cmd = blk_mq_rq_to_pdu(rq);
- cmd->status = -ENODEV;
- blk_mq_complete_request(rq);
- } else if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &dd->port->flags)) {
+ struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
- cmd = mtip_cmd_from_tag(dd, MTIP_TAG_INTERNAL);
- if (cmd->comp_func)
- cmd->comp_func(dd->port, MTIP_TAG_INTERNAL,
- cmd, -ENODEV);
- }
+ cmd->status = -ENODEV;
+ blk_mq_complete_request(rq);
}
/*
@@ -4168,8 +4001,7 @@ static int mtip_block_remove(struct driver_data *dd)
* Explicitly wait here for IOs to quiesce,
* as mtip_standby_drive usually won't wait for IOs.
*/
- if (!mtip_quiesce_io(dd->port, MTIP_QUIESCE_IO_TIMEOUT_MS,
- GFP_KERNEL))
+ if (!mtip_quiesce_io(dd->port, MTIP_QUIESCE_IO_TIMEOUT_MS))
mtip_standby_drive(dd);
}
else
diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h
index 57b41528a824..37b8e3e0bb78 100644
--- a/drivers/block/mtip32xx/mtip32xx.h
+++ b/drivers/block/mtip32xx/mtip32xx.h
@@ -333,16 +333,6 @@ struct mtip_cmd {
dma_addr_t command_dma; /* corresponding physical address */
- void *comp_data; /* data passed to completion function comp_func() */
- /*
- * Completion function called by the ISR upon completion of
- * a command.
- */
- void (*comp_func)(struct mtip_port *port,
- int tag,
- struct mtip_cmd *cmd,
- int status);
-
int scatter_ents; /* Number of scatter list entries used */
int unaligned; /* command is unaligned on 4k boundary */
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 9b482baa869e..e9e2a9e95a66 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -1396,12 +1396,11 @@ static void nbd_dbg_close(void)
#endif
-static int nbd_init_request(void *data, struct request *rq,
- unsigned int hctx_idx, unsigned int request_idx,
- unsigned int numa_node)
+static int nbd_init_request(struct blk_mq_tag_set *set, struct request *rq,
+ unsigned int hctx_idx, unsigned int numa_node)
{
struct nbd_cmd *cmd = blk_mq_rq_to_pdu(rq);
- cmd->nbd = data;
+ cmd->nbd = set->driver_data;
return 0;
}
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 089ac4179919..3670e8dd03fe 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -4307,9 +4307,8 @@ out:
return ret;
}
-static int rbd_init_request(void *data, struct request *rq,
- unsigned int hctx_idx, unsigned int request_idx,
- unsigned int numa_node)
+static int rbd_init_request(struct blk_mq_tag_set *set, struct request *rq,
+ unsigned int hctx_idx, unsigned int numa_node)
{
struct work_struct *work = blk_mq_rq_to_pdu(rq);
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index f94614257462..94173de1efaa 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -573,11 +573,10 @@ static const struct device_attribute dev_attr_cache_type_rw =
__ATTR(cache_type, S_IRUGO|S_IWUSR,
virtblk_cache_type_show, virtblk_cache_type_store);
-static int virtblk_init_request(void *data, struct request *rq,
- unsigned int hctx_idx, unsigned int request_idx,
- unsigned int numa_node)
+static int virtblk_init_request(struct blk_mq_tag_set *set, struct request *rq,
+ unsigned int hctx_idx, unsigned int numa_node)
{
- struct virtio_blk *vblk = data;
+ struct virtio_blk *vblk = set->driver_data;
struct virtblk_req *vbr = blk_mq_rq_to_pdu(rq);
#ifdef CONFIG_VIRTIO_BLK_SCSI