diff options
author | James Smart <jsmart2021@gmail.com> | 2017-02-12 22:52:30 +0100 |
---|---|---|
committer | Martin K. Petersen <martin.petersen@oracle.com> | 2017-02-23 00:41:43 +0100 |
commit | 895427bd012ce5814fc9888c7c0ee9de44761833 (patch) | |
tree | 307a6d5500f676e5df31b8120a3c5986d0636eba /drivers/scsi/lpfc/lpfc_scsi.c | |
parent | scsi: lpfc: refactor debugfs queue dump routines (diff) | |
download | linux-895427bd012ce5814fc9888c7c0ee9de44761833.tar.xz linux-895427bd012ce5814fc9888c7c0ee9de44761833.zip |
scsi: lpfc: NVME Initiator: Base modifications
NVME Initiator: Base modifications
This patch adds base modifications for NVME initiator support.
The base modifications consist of:
- Formal split of SLI3 rings from SLI-4 WQs (sometimes referred to as
rings as well) as implementation now widely varies between the two.
- Addition of configuration modes:
SCSI initiator only; NVME initiator only; NVME target only; and
SCSI and NVME initiator.
The configuration mode drives overall adapter configuration,
offloads enabled, and resource splits.
NVME support is only available on SLI-4 devices and newer fw.
- Implements the following based on configuration mode:
- Exchange resources are split by protocol; Obviously, if only
1 mode, then no split occurs. Default is 50/50. module attribute
allows tuning.
- Pools and config parameters are separated per-protocol
- Each protocol has it's own set of queues, but share interrupt
vectors.
SCSI:
SLI3 devices have few queues and the original style of queue
allocation remains.
SLI4 devices piggy back on an "io-channel" concept that
eventually needs to merge with scsi-mq/blk-mq support (it is
underway). For now, the paradigm continues as it existed
prior. io channel allocates N msix and N WQs (N=4 default)
and either round robins or uses cpu # modulo N for scheduling.
A bunch of module parameters allow the configuration to be
tuned.
NVME (initiator):
Allocates an msix per cpu (or whatever pci_alloc_irq_vectors
gets)
Allocates a WQ per cpu, and maps the WQs to msix on a WQ #
modulo msix vector count basis.
Module parameters exist to cap/control the config if desired.
- Each protocol has its own buffer and dma pools.
I apologize for the size of the patch.
Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <james.smart@broadcom.com>
----
Reviewed-by: Hannes Reinecke <hare@suse.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_scsi.c')
-rw-r--r-- | drivers/scsi/lpfc/lpfc_scsi.c | 101 |
1 files changed, 76 insertions, 25 deletions
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index bd7b75dbb97e..c327fc7b1a54 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -413,7 +413,7 @@ lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc) * struct fcp_cmnd, struct fcp_rsp and the number of bde's * necessary to support the sg_tablesize. */ - psb->data = pci_pool_zalloc(phba->lpfc_scsi_dma_buf_pool, + psb->data = pci_pool_zalloc(phba->lpfc_sg_dma_buf_pool, GFP_KERNEL, &psb->dma_handle); if (!psb->data) { kfree(psb); @@ -424,8 +424,8 @@ lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc) /* Allocate iotag for psb->cur_iocbq. */ iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq); if (iotag == 0) { - pci_pool_free(phba->lpfc_scsi_dma_buf_pool, - psb->data, psb->dma_handle); + pci_pool_free(phba->lpfc_sg_dma_buf_pool, + psb->data, psb->dma_handle); kfree(psb); break; } @@ -522,6 +522,8 @@ lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport) struct lpfc_scsi_buf *psb, *next_psb; unsigned long iflag = 0; + if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) + return; spin_lock_irqsave(&phba->hbalock, iflag); spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock); list_for_each_entry_safe(psb, next_psb, @@ -554,8 +556,10 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba, int i; struct lpfc_nodelist *ndlp; int rrq_empty = 0; - struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; + struct lpfc_sli_ring *pring = phba->sli4_hba.els_wq->pring; + if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) + return; spin_lock_irqsave(&phba->hbalock, iflag); spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock); list_for_each_entry_safe(psb, next_psb, @@ -819,7 +823,7 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc) * for the struct fcp_cmnd, struct fcp_rsp and the number * of bde's necessary to support the sg_tablesize. */ - psb->data = pci_pool_zalloc(phba->lpfc_scsi_dma_buf_pool, + psb->data = pci_pool_zalloc(phba->lpfc_sg_dma_buf_pool, GFP_KERNEL, &psb->dma_handle); if (!psb->data) { kfree(psb); @@ -832,7 +836,7 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc) */ if (phba->cfg_enable_bg && (((unsigned long)(psb->data) & (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) { - pci_pool_free(phba->lpfc_scsi_dma_buf_pool, + pci_pool_free(phba->lpfc_sg_dma_buf_pool, psb->data, psb->dma_handle); kfree(psb); break; @@ -841,8 +845,8 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc) lxri = lpfc_sli4_next_xritag(phba); if (lxri == NO_XRI) { - pci_pool_free(phba->lpfc_scsi_dma_buf_pool, - psb->data, psb->dma_handle); + pci_pool_free(phba->lpfc_sg_dma_buf_pool, + psb->data, psb->dma_handle); kfree(psb); break; } @@ -850,8 +854,8 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc) /* Allocate iotag for psb->cur_iocbq. */ iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq); if (iotag == 0) { - pci_pool_free(phba->lpfc_scsi_dma_buf_pool, - psb->data, psb->dma_handle); + pci_pool_free(phba->lpfc_sg_dma_buf_pool, + psb->data, psb->dma_handle); kfree(psb); lpfc_printf_log(phba, KERN_ERR, LOG_FCP, "3368 Failed to allocate IOTAG for" @@ -920,7 +924,7 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc) phba->sli4_hba.scsi_xri_cnt++; spin_unlock_irq(&phba->scsi_buf_list_get_lock); } - lpfc_printf_log(phba, KERN_INFO, LOG_BG, + lpfc_printf_log(phba, KERN_INFO, LOG_BG | LOG_FCP, "3021 Allocate %d out of %d requested new SCSI " "buffers\n", bcnt, num_to_alloc); @@ -3925,6 +3929,8 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, struct Scsi_Host *shost; uint32_t logit = LOG_FCP; + phba->fc4ScsiIoCmpls++; + /* Sanity check on return of outstanding command */ cmd = lpfc_cmd->pCmd; if (!cmd) @@ -4242,19 +4248,19 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, vport->cfg_first_burst_size; } fcp_cmnd->fcpCntl3 = WRITE_DATA; - phba->fc4OutputRequests++; + phba->fc4ScsiOutputRequests++; } else { iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR; iocb_cmd->ulpPU = PARM_READ_CHECK; fcp_cmnd->fcpCntl3 = READ_DATA; - phba->fc4InputRequests++; + phba->fc4ScsiInputRequests++; } } else { iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR; iocb_cmd->un.fcpi.fcpi_parm = 0; iocb_cmd->ulpPU = 0; fcp_cmnd->fcpCntl3 = 0; - phba->fc4ControlRequests++; + phba->fc4ScsiControlRequests++; } if (phba->sli_rev == 3 && !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) @@ -4468,7 +4474,7 @@ static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba) unsigned long poll_tmo_expires = (jiffies + msecs_to_jiffies(phba->cfg_poll_tmo)); - if (!list_empty(&phba->sli.ring[LPFC_FCP_RING].txcmplq)) + if (!list_empty(&phba->sli.sli3_ring[LPFC_FCP_RING].txcmplq)) mod_timer(&phba->fcp_poll_timer, poll_tmo_expires); } @@ -4498,7 +4504,7 @@ void lpfc_poll_timeout(unsigned long ptr) if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { lpfc_sli_handle_fast_ring_event(phba, - &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ); + &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ); if (phba->cfg_poll & DISABLE_FCP_RING_INT) lpfc_poll_rearm_timer(phba); @@ -4562,7 +4568,7 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) if (lpfc_cmd == NULL) { lpfc_rampdown_queue_depth(phba); - lpfc_printf_vlog(vport, KERN_INFO, LOG_MISC, + lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR, "0707 driver's buffer pool is empty, " "IO busied\n"); goto out_host_busy; @@ -4637,7 +4643,7 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) } if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { lpfc_sli_handle_fast_ring_event(phba, - &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ); + &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ); if (phba->cfg_poll & DISABLE_FCP_RING_INT) lpfc_poll_rearm_timer(phba); @@ -4682,7 +4688,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) IOCB_t *cmd, *icmd; int ret = SUCCESS, status = 0; struct lpfc_sli_ring *pring_s4; - int ring_number, ret_val; + int ret_val; unsigned long flags, iflags; DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq); @@ -4770,7 +4776,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) icmd->ulpClass = cmd->ulpClass; /* ABTS WQE must go to the same WQ as the WQE to be aborted */ - abtsiocb->fcp_wqidx = iocb->fcp_wqidx; + abtsiocb->hba_wqidx = iocb->hba_wqidx; abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX; if (iocb->iocb_flag & LPFC_IO_FOF) abtsiocb->iocb_flag |= LPFC_IO_FOF; @@ -4783,8 +4789,11 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; abtsiocb->vport = vport; if (phba->sli_rev == LPFC_SLI_REV4) { - ring_number = MAX_SLI3_CONFIGURED_RINGS + iocb->fcp_wqidx; - pring_s4 = &phba->sli.ring[ring_number]; + pring_s4 = lpfc_sli4_calc_ring(phba, iocb); + if (pring_s4 == NULL) { + ret = FAILED; + goto out_unlock; + } /* Note: both hbalock and ring_lock must be set here */ spin_lock_irqsave(&pring_s4->ring_lock, iflags); ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno, @@ -4806,7 +4815,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) if (phba->cfg_poll & DISABLE_FCP_RING_INT) lpfc_sli_handle_fast_ring_event(phba, - &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ); + &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ); wait_for_cmpl: lpfc_cmd->waitq = &waitq; @@ -5106,7 +5115,7 @@ lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id, cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context); if (cnt) lpfc_sli_abort_taskmgmt(vport, - &phba->sli.ring[phba->sli.fcp_ring], + &phba->sli.sli3_ring[LPFC_FCP_RING], tgt_id, lun_id, context); later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies; while (time_after(later, jiffies) && cnt) { @@ -5535,7 +5544,7 @@ lpfc_slave_configure(struct scsi_device *sdev) if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { lpfc_sli_handle_fast_ring_event(phba, - &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ); + &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ); if (phba->cfg_poll & DISABLE_FCP_RING_INT) lpfc_poll_rearm_timer(phba); } @@ -5899,6 +5908,48 @@ lpfc_disable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn, return false; } +static int +lpfc_no_command(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) +{ + return SCSI_MLQUEUE_HOST_BUSY; +} + +static int +lpfc_no_handler(struct scsi_cmnd *cmnd) +{ + return FAILED; +} + +static int +lpfc_no_slave(struct scsi_device *sdev) +{ + return -ENODEV; +} + +struct scsi_host_template lpfc_template_nvme = { + .module = THIS_MODULE, + .name = LPFC_DRIVER_NAME, + .proc_name = LPFC_DRIVER_NAME, + .info = lpfc_info, + .queuecommand = lpfc_no_command, + .eh_abort_handler = lpfc_no_handler, + .eh_device_reset_handler = lpfc_no_handler, + .eh_target_reset_handler = lpfc_no_handler, + .eh_bus_reset_handler = lpfc_no_handler, + .eh_host_reset_handler = lpfc_no_handler, + .slave_alloc = lpfc_no_slave, + .slave_configure = lpfc_no_slave, + .scan_finished = lpfc_scan_finished, + .this_id = -1, + .sg_tablesize = 1, + .cmd_per_lun = 1, + .use_clustering = ENABLE_CLUSTERING, + .shost_attrs = lpfc_hba_attrs, + .max_sectors = 0xFFFF, + .vendor_id = LPFC_NL_VENDOR_ID, + .track_queue_depth = 0, +}; + struct scsi_host_template lpfc_template_s3 = { .module = THIS_MODULE, .name = LPFC_DRIVER_NAME, |