diff options
author | Jason Gunthorpe <jgg@mellanox.com> | 2019-02-04 22:53:42 +0100 |
---|---|---|
committer | Jason Gunthorpe <jgg@mellanox.com> | 2019-02-04 22:53:42 +0100 |
commit | 6a8a2aa62da2fbe51f5449993fd366398048f465 (patch) | |
tree | 566c6fcc782eefbca054af8553371d09b9d0734c /drivers/nvme/host | |
parent | IB/core: Remove ib_sg_dma_address() and ib_sg_dma_len() (diff) | |
parent | Linux 5.0-rc5 (diff) | |
download | linux-6a8a2aa62da2fbe51f5449993fd366398048f465.tar.xz linux-6a8a2aa62da2fbe51f5449993fd366398048f465.zip |
Merge tag 'v5.0-rc5' into rdma.git for-next
Linux 5.0-rc5
Needed to merge the include/uapi changes so we have an up to date
single-tree for these files. Patches already posted are also expected to
need this for dependencies.
Diffstat (limited to 'drivers/nvme/host')
-rw-r--r-- | drivers/nvme/host/multipath.c | 3 | ||||
-rw-r--r-- | drivers/nvme/host/pci.c | 21 | ||||
-rw-r--r-- | drivers/nvme/host/rdma.c | 64 | ||||
-rw-r--r-- | drivers/nvme/host/tcp.c | 19 |
4 files changed, 68 insertions, 39 deletions
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index df4b3a6db51b..b9fff3b8ed1b 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -545,8 +545,7 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) timer_setup(&ctrl->anatt_timer, nvme_anatt_timeout, 0); ctrl->ana_log_size = sizeof(struct nvme_ana_rsp_hdr) + ctrl->nanagrpid * sizeof(struct nvme_ana_group_desc); - if (!(ctrl->anacap & (1 << 6))) - ctrl->ana_log_size += ctrl->max_namespaces * sizeof(__le32); + ctrl->ana_log_size += ctrl->max_namespaces * sizeof(__le32); if (ctrl->ana_log_size > ctrl->max_hw_sectors << SECTOR_SHIFT) { dev_err(ctrl->device, diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index deb1a66bf117..9bc585415d9b 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -2041,14 +2041,18 @@ static int nvme_setup_host_mem(struct nvme_dev *dev) return ret; } +/* irq_queues covers admin queue */ static void nvme_calc_io_queues(struct nvme_dev *dev, unsigned int irq_queues) { unsigned int this_w_queues = write_queues; + WARN_ON(!irq_queues); + /* - * Setup read/write queue split + * Setup read/write queue split, assign admin queue one independent + * irq vector if irq_queues is > 1. */ - if (irq_queues == 1) { + if (irq_queues <= 2) { dev->io_queues[HCTX_TYPE_DEFAULT] = 1; dev->io_queues[HCTX_TYPE_READ] = 0; return; @@ -2056,21 +2060,21 @@ static void nvme_calc_io_queues(struct nvme_dev *dev, unsigned int irq_queues) /* * If 'write_queues' is set, ensure it leaves room for at least - * one read queue + * one read queue and one admin queue */ if (this_w_queues >= irq_queues) - this_w_queues = irq_queues - 1; + this_w_queues = irq_queues - 2; /* * If 'write_queues' is set to zero, reads and writes will share * a queue set. */ if (!this_w_queues) { - dev->io_queues[HCTX_TYPE_DEFAULT] = irq_queues; + dev->io_queues[HCTX_TYPE_DEFAULT] = irq_queues - 1; dev->io_queues[HCTX_TYPE_READ] = 0; } else { dev->io_queues[HCTX_TYPE_DEFAULT] = this_w_queues; - dev->io_queues[HCTX_TYPE_READ] = irq_queues - this_w_queues; + dev->io_queues[HCTX_TYPE_READ] = irq_queues - this_w_queues - 1; } } @@ -2095,7 +2099,7 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues) this_p_queues = nr_io_queues - 1; irq_queues = 1; } else { - irq_queues = nr_io_queues - this_p_queues; + irq_queues = nr_io_queues - this_p_queues + 1; } dev->io_queues[HCTX_TYPE_POLL] = this_p_queues; @@ -2115,8 +2119,9 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues) * If we got a failure and we're down to asking for just * 1 + 1 queues, just ask for a single vector. We'll share * that between the single IO queue and the admin queue. + * Otherwise, we assign one independent vector to admin queue. */ - if (result >= 0 && irq_queues > 1) + if (irq_queues > 1) irq_queues = irq_sets[0] + irq_sets[1] + 1; result = pci_alloc_irq_vectors_affinity(pdev, irq_queues, diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 0a2fd2949ad7..52abc3a6de12 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -119,6 +119,7 @@ struct nvme_rdma_ctrl { struct nvme_ctrl ctrl; bool use_inline_data; + u32 io_queues[HCTX_MAX_TYPES]; }; static inline struct nvme_rdma_ctrl *to_rdma_ctrl(struct nvme_ctrl *ctrl) @@ -165,8 +166,8 @@ static inline int nvme_rdma_queue_idx(struct nvme_rdma_queue *queue) static bool nvme_rdma_poll_queue(struct nvme_rdma_queue *queue) { return nvme_rdma_queue_idx(queue) > - queue->ctrl->ctrl.opts->nr_io_queues + - queue->ctrl->ctrl.opts->nr_write_queues; + queue->ctrl->io_queues[HCTX_TYPE_DEFAULT] + + queue->ctrl->io_queues[HCTX_TYPE_READ]; } static inline size_t nvme_rdma_inline_data_size(struct nvme_rdma_queue *queue) @@ -661,8 +662,21 @@ static int nvme_rdma_alloc_io_queues(struct nvme_rdma_ctrl *ctrl) nr_io_queues = min_t(unsigned int, nr_io_queues, ibdev->num_comp_vectors); - nr_io_queues += min(opts->nr_write_queues, num_online_cpus()); - nr_io_queues += min(opts->nr_poll_queues, num_online_cpus()); + if (opts->nr_write_queues) { + ctrl->io_queues[HCTX_TYPE_DEFAULT] = + min(opts->nr_write_queues, nr_io_queues); + nr_io_queues += ctrl->io_queues[HCTX_TYPE_DEFAULT]; + } else { + ctrl->io_queues[HCTX_TYPE_DEFAULT] = nr_io_queues; + } + + ctrl->io_queues[HCTX_TYPE_READ] = nr_io_queues; + + if (opts->nr_poll_queues) { + ctrl->io_queues[HCTX_TYPE_POLL] = + min(opts->nr_poll_queues, num_online_cpus()); + nr_io_queues += ctrl->io_queues[HCTX_TYPE_POLL]; + } ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues); if (ret) @@ -1689,18 +1703,28 @@ static enum blk_eh_timer_return nvme_rdma_timeout(struct request *rq, bool reserved) { struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); + struct nvme_rdma_queue *queue = req->queue; + struct nvme_rdma_ctrl *ctrl = queue->ctrl; - dev_warn(req->queue->ctrl->ctrl.device, - "I/O %d QID %d timeout, reset controller\n", - rq->tag, nvme_rdma_queue_idx(req->queue)); + dev_warn(ctrl->ctrl.device, "I/O %d QID %d timeout\n", + rq->tag, nvme_rdma_queue_idx(queue)); - /* queue error recovery */ - nvme_rdma_error_recovery(req->queue->ctrl); + if (ctrl->ctrl.state != NVME_CTRL_LIVE) { + /* + * Teardown immediately if controller times out while starting + * or we are already started error recovery. all outstanding + * requests are completed on shutdown, so we return BLK_EH_DONE. + */ + flush_work(&ctrl->err_work); + nvme_rdma_teardown_io_queues(ctrl, false); + nvme_rdma_teardown_admin_queue(ctrl, false); + return BLK_EH_DONE; + } - /* fail with DNR on cmd timeout */ - nvme_req(rq)->status = NVME_SC_ABORT_REQ | NVME_SC_DNR; + dev_warn(ctrl->ctrl.device, "starting error recovery\n"); + nvme_rdma_error_recovery(ctrl); - return BLK_EH_DONE; + return BLK_EH_RESET_TIMER; } static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx, @@ -1779,17 +1803,15 @@ static int nvme_rdma_map_queues(struct blk_mq_tag_set *set) struct nvme_rdma_ctrl *ctrl = set->driver_data; set->map[HCTX_TYPE_DEFAULT].queue_offset = 0; - set->map[HCTX_TYPE_READ].nr_queues = ctrl->ctrl.opts->nr_io_queues; + set->map[HCTX_TYPE_DEFAULT].nr_queues = + ctrl->io_queues[HCTX_TYPE_DEFAULT]; + set->map[HCTX_TYPE_READ].nr_queues = ctrl->io_queues[HCTX_TYPE_READ]; if (ctrl->ctrl.opts->nr_write_queues) { /* separate read/write queues */ - set->map[HCTX_TYPE_DEFAULT].nr_queues = - ctrl->ctrl.opts->nr_write_queues; set->map[HCTX_TYPE_READ].queue_offset = - ctrl->ctrl.opts->nr_write_queues; + ctrl->io_queues[HCTX_TYPE_DEFAULT]; } else { /* mixed read/write queues */ - set->map[HCTX_TYPE_DEFAULT].nr_queues = - ctrl->ctrl.opts->nr_io_queues; set->map[HCTX_TYPE_READ].queue_offset = 0; } blk_mq_rdma_map_queues(&set->map[HCTX_TYPE_DEFAULT], @@ -1799,12 +1821,12 @@ static int nvme_rdma_map_queues(struct blk_mq_tag_set *set) if (ctrl->ctrl.opts->nr_poll_queues) { set->map[HCTX_TYPE_POLL].nr_queues = - ctrl->ctrl.opts->nr_poll_queues; + ctrl->io_queues[HCTX_TYPE_POLL]; set->map[HCTX_TYPE_POLL].queue_offset = - ctrl->ctrl.opts->nr_io_queues; + ctrl->io_queues[HCTX_TYPE_DEFAULT]; if (ctrl->ctrl.opts->nr_write_queues) set->map[HCTX_TYPE_POLL].queue_offset += - ctrl->ctrl.opts->nr_write_queues; + ctrl->io_queues[HCTX_TYPE_READ]; blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]); } return 0; diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index 265a0543b381..5f0a00425242 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -1948,20 +1948,23 @@ nvme_tcp_timeout(struct request *rq, bool reserved) struct nvme_tcp_ctrl *ctrl = req->queue->ctrl; struct nvme_tcp_cmd_pdu *pdu = req->pdu; - dev_dbg(ctrl->ctrl.device, + dev_warn(ctrl->ctrl.device, "queue %d: timeout request %#x type %d\n", - nvme_tcp_queue_id(req->queue), rq->tag, - pdu->hdr.type); + nvme_tcp_queue_id(req->queue), rq->tag, pdu->hdr.type); if (ctrl->ctrl.state != NVME_CTRL_LIVE) { - union nvme_result res = {}; - - nvme_req(rq)->flags |= NVME_REQ_CANCELLED; - nvme_end_request(rq, cpu_to_le16(NVME_SC_ABORT_REQ), res); + /* + * Teardown immediately if controller times out while starting + * or we are already started error recovery. all outstanding + * requests are completed on shutdown, so we return BLK_EH_DONE. + */ + flush_work(&ctrl->err_work); + nvme_tcp_teardown_io_queues(&ctrl->ctrl, false); + nvme_tcp_teardown_admin_queue(&ctrl->ctrl, false); return BLK_EH_DONE; } - /* queue error recovery */ + dev_warn(ctrl->ctrl.device, "starting error recovery\n"); nvme_tcp_error_recovery(&ctrl->ctrl); return BLK_EH_RESET_TIMER; |