summaryrefslogtreecommitdiffstats
path: root/drivers/block
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/block')
-rw-r--r--drivers/block/nvme-core.c49
1 files changed, 19 insertions, 30 deletions
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index a57685f74e5e..0f388206b15b 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -1274,29 +1274,18 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
struct nvme_cmd_info *cmd = blk_mq_rq_to_pdu(req);
struct nvme_queue *nvmeq = cmd->nvmeq;
- /*
- * The aborted req will be completed on receiving the abort req.
- * We enable the timer again. If hit twice, it'll cause a device reset,
- * as the device then is in a faulty state.
- */
- int ret = BLK_EH_RESET_TIMER;
-
dev_warn(nvmeq->q_dmadev, "Timeout I/O %d QID %d\n", req->tag,
nvmeq->qid);
-
spin_lock_irq(&nvmeq->q_lock);
- if (!nvmeq->dev->initialized) {
- /*
- * Force cancelled command frees the request, which requires we
- * return BLK_EH_NOT_HANDLED.
- */
- nvme_cancel_queue_ios(nvmeq->hctx, req, nvmeq, reserved);
- ret = BLK_EH_NOT_HANDLED;
- } else
- nvme_abort_req(req);
+ nvme_abort_req(req);
spin_unlock_irq(&nvmeq->q_lock);
- return ret;
+ /*
+ * The aborted req will be completed on receiving the abort req.
+ * We enable the timer again. If hit twice, it'll cause a device reset,
+ * as the device then is in a faulty state.
+ */
+ return BLK_EH_RESET_TIMER;
}
static void nvme_free_queue(struct nvme_queue *nvmeq)
@@ -1349,7 +1338,6 @@ static void nvme_clear_queue(struct nvme_queue *nvmeq)
struct blk_mq_hw_ctx *hctx = nvmeq->hctx;
spin_lock_irq(&nvmeq->q_lock);
- nvme_process_cq(nvmeq);
if (hctx && hctx->tags)
blk_mq_tag_busy_iter(hctx, nvme_cancel_queue_ios, nvmeq);
spin_unlock_irq(&nvmeq->q_lock);
@@ -1372,7 +1360,10 @@ static void nvme_disable_queue(struct nvme_dev *dev, int qid)
}
if (!qid && dev->admin_q)
blk_mq_freeze_queue_start(dev->admin_q);
- nvme_clear_queue(nvmeq);
+
+ spin_lock_irq(&nvmeq->q_lock);
+ nvme_process_cq(nvmeq);
+ spin_unlock_irq(&nvmeq->q_lock);
}
static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
@@ -2121,8 +2112,7 @@ static int nvme_kthread(void *data)
spin_lock(&dev_list_lock);
list_for_each_entry_safe(dev, next, &dev_list, node) {
int i;
- if (readl(&dev->bar->csts) & NVME_CSTS_CFS &&
- dev->initialized) {
+ if (readl(&dev->bar->csts) & NVME_CSTS_CFS) {
if (work_busy(&dev->reset_work))
continue;
list_del_init(&dev->node);
@@ -2525,8 +2515,6 @@ static struct nvme_delq_ctx *nvme_get_dq(struct nvme_delq_ctx *dq)
static void nvme_del_queue_end(struct nvme_queue *nvmeq)
{
struct nvme_delq_ctx *dq = nvmeq->cmdinfo.ctx;
-
- nvme_clear_queue(nvmeq);
nvme_put_dq(dq);
}
@@ -2669,7 +2657,6 @@ static void nvme_dev_shutdown(struct nvme_dev *dev)
int i;
u32 csts = -1;
- dev->initialized = 0;
nvme_dev_list_remove(dev);
if (dev->bar) {
@@ -2680,7 +2667,6 @@ static void nvme_dev_shutdown(struct nvme_dev *dev)
for (i = dev->queue_count - 1; i >= 0; i--) {
struct nvme_queue *nvmeq = dev->queues[i];
nvme_suspend_queue(nvmeq);
- nvme_clear_queue(nvmeq);
}
} else {
nvme_disable_io_queues(dev);
@@ -2688,6 +2674,9 @@ static void nvme_dev_shutdown(struct nvme_dev *dev)
nvme_disable_queue(dev, 0);
}
nvme_dev_unmap(dev);
+
+ for (i = dev->queue_count - 1; i >= 0; i--)
+ nvme_clear_queue(dev->queues[i]);
}
static void nvme_dev_remove(struct nvme_dev *dev)
@@ -2955,7 +2944,6 @@ static int nvme_dev_resume(struct nvme_dev *dev)
nvme_unfreeze_queues(dev);
nvme_set_irq_hints(dev);
}
- dev->initialized = 1;
return 0;
}
@@ -3063,11 +3051,12 @@ static void nvme_async_probe(struct work_struct *work)
goto reset;
nvme_set_irq_hints(dev);
- dev->initialized = 1;
return;
reset:
- dev->reset_workfn = nvme_reset_failed_dev;
- queue_work(nvme_workq, &dev->reset_work);
+ if (!work_busy(&dev->reset_work)) {
+ dev->reset_workfn = nvme_reset_failed_dev;
+ queue_work(nvme_workq, &dev->reset_work);
+ }
}
static void nvme_reset_notify(struct pci_dev *pdev, bool prepare)