diff options
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_sli.c')
-rw-r--r-- | drivers/scsi/lpfc/lpfc_sli.c | 350 |
1 files changed, 127 insertions, 223 deletions
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 57b4a463b589..2acda188b0dc 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -87,9 +87,6 @@ static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe); static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba); static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba); -static int lpfc_sli4_abort_nvme_io(struct lpfc_hba *phba, - struct lpfc_sli_ring *pring, - struct lpfc_iocbq *cmdiocb); static IOCB_t * lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq) @@ -151,7 +148,7 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe) /* sanity check on queue memory */ if (unlikely(!q)) return -ENOMEM; - temp_wqe = q->qe[q->host_index].wqe; + temp_wqe = lpfc_sli4_qe(q, q->host_index); /* If the host has not yet processed the next entry then we are done */ idx = ((q->host_index + 1) % q->entry_count); @@ -271,7 +268,7 @@ lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe) /* sanity check on queue memory */ if (unlikely(!q)) return -ENOMEM; - temp_mqe = q->qe[q->host_index].mqe; + temp_mqe = lpfc_sli4_qe(q, q->host_index); /* If the host has not yet processed the next entry then we are done */ if (((q->host_index + 1) % q->entry_count) == q->hba_index) @@ -331,7 +328,7 @@ lpfc_sli4_eq_get(struct lpfc_queue *q) /* sanity check on queue memory */ if (unlikely(!q)) return NULL; - eqe = q->qe[q->host_index].eqe; + eqe = lpfc_sli4_qe(q, q->host_index); /* If the next EQE is not valid then we are done */ if (bf_get_le32(lpfc_eqe_valid, eqe) != q->qe_valid) @@ -355,7 +352,7 @@ lpfc_sli4_eq_get(struct lpfc_queue *q) * @q: The Event Queue to disable interrupts * **/ -inline void +void lpfc_sli4_eq_clr_intr(struct lpfc_queue *q) { struct lpfc_register doorbell; @@ -374,7 +371,7 @@ lpfc_sli4_eq_clr_intr(struct lpfc_queue *q) * @q: The Event Queue to disable interrupts * **/ -inline void +void lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q) { struct lpfc_register doorbell; @@ -545,7 +542,7 @@ lpfc_sli4_cq_get(struct lpfc_queue *q) /* sanity check on queue memory */ if (unlikely(!q)) return NULL; - cqe = q->qe[q->host_index].cqe; + cqe = lpfc_sli4_qe(q, q->host_index); /* If the next CQE is not valid then we are done */ if (bf_get_le32(lpfc_cqe_valid, cqe) != q->qe_valid) @@ -667,8 +664,8 @@ lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq, return -ENOMEM; hq_put_index = hq->host_index; dq_put_index = dq->host_index; - temp_hrqe = hq->qe[hq_put_index].rqe; - temp_drqe = dq->qe[dq_put_index].rqe; + temp_hrqe = lpfc_sli4_qe(hq, hq_put_index); + temp_drqe = lpfc_sli4_qe(dq, dq_put_index); if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ) return -EINVAL; @@ -907,10 +904,10 @@ lpfc_handle_rrq_active(struct lpfc_hba *phba) mod_timer(&phba->rrq_tmr, next_time); list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) { list_del(&rrq->list); - if (!rrq->send_rrq) + if (!rrq->send_rrq) { /* this call will free the rrq */ - lpfc_clr_rrq_active(phba, rrq->xritag, rrq); - else if (lpfc_send_rrq(phba, rrq)) { + lpfc_clr_rrq_active(phba, rrq->xritag, rrq); + } else if (lpfc_send_rrq(phba, rrq)) { /* if we send the rrq then the completion handler * will clear the bit in the xribitmap. */ @@ -2502,8 +2499,8 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) } else { ndlp->nlp_flag &= ~NLP_UNREG_INP; } + pmb->ctx_ndlp = NULL; } - pmb->ctx_ndlp = NULL; } /* Check security permission status on INIT_LINK mailbox command */ @@ -3922,33 +3919,6 @@ lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) } /** - * lpfc_sli_abort_wqe_ring - Abort all iocbs in the ring - * @phba: Pointer to HBA context object. - * @pring: Pointer to driver SLI ring object. - * - * This function aborts all iocbs in the given ring and frees all the iocb - * objects in txq. This function issues an abort iocb for all the iocb commands - * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before - * the return of this function. The caller is not required to hold any locks. - **/ -void -lpfc_sli_abort_wqe_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) -{ - LIST_HEAD(completions); - struct lpfc_iocbq *iocb, *next_iocb; - - if (pring->ringno == LPFC_ELS_RING) - lpfc_fabric_abort_hba(phba); - - spin_lock_irq(&phba->hbalock); - /* Next issue ABTS for everything on the txcmplq */ - list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) - lpfc_sli4_abort_nvme_io(phba, pring, iocb); - spin_unlock_irq(&phba->hbalock); -} - - -/** * lpfc_sli_abort_fcp_rings - Abort all iocbs in all FCP rings * @phba: Pointer to HBA context object. * @pring: Pointer to driver SLI ring object. @@ -3978,33 +3948,6 @@ lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba) } /** - * lpfc_sli_abort_nvme_rings - Abort all wqes in all NVME rings - * @phba: Pointer to HBA context object. - * - * This function aborts all wqes in NVME rings. This function issues an - * abort wqe for all the outstanding IO commands in txcmplq. The iocbs in - * the txcmplq is not guaranteed to complete before the return of this - * function. The caller is not required to hold any locks. - **/ -void -lpfc_sli_abort_nvme_rings(struct lpfc_hba *phba) -{ - struct lpfc_sli_ring *pring; - uint32_t i; - - if ((phba->sli_rev < LPFC_SLI_REV4) || - !(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) - return; - - /* Abort all IO on each NVME ring. */ - for (i = 0; i < phba->cfg_hdw_queue; i++) { - pring = phba->sli4_hba.hdwq[i].nvme_wq->pring; - lpfc_sli_abort_wqe_ring(phba, pring); - } -} - - -/** * lpfc_sli_flush_fcp_rings - flush all iocbs in the fcp ring * @phba: Pointer to HBA context object. * @@ -4487,7 +4430,9 @@ lpfc_sli_brdreset(struct lpfc_hba *phba) } /* Turn off parity checking and serr during the physical reset */ - pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value); + if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value)) + return -EIO; + pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value & ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); @@ -4564,7 +4509,12 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba) "0389 Performing PCI function reset!\n"); /* Turn off parity checking and serr during the physical reset */ - pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value); + if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value)) { + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "3205 PCI read Config failed\n"); + return -EIO; + } + pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value & ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); @@ -5395,7 +5345,7 @@ lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, } /** - * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name + * lpfc_sli4_get_ctl_attr - Retrieve SLI4 device controller attributes * @phba: pointer to lpfc hba data structure. * * This routine retrieves SLI4 device physical port name this PCI function @@ -5403,40 +5353,30 @@ lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, * * Return codes * 0 - successful - * otherwise - failed to retrieve physical port name + * otherwise - failed to retrieve controller attributes **/ static int -lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba) +lpfc_sli4_get_ctl_attr(struct lpfc_hba *phba) { LPFC_MBOXQ_t *mboxq; struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr; struct lpfc_controller_attribute *cntl_attr; - struct lpfc_mbx_get_port_name *get_port_name; void *virtaddr = NULL; uint32_t alloclen, reqlen; uint32_t shdr_status, shdr_add_status; union lpfc_sli4_cfg_shdr *shdr; - char cport_name = 0; int rc; - /* We assume nothing at this point */ - phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL; - phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON; - mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mboxq) return -ENOMEM; - /* obtain link type and link number via READ_CONFIG */ - phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL; - lpfc_sli4_read_config(phba); - if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL) - goto retrieve_ppname; - /* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */ + /* Send COMMON_GET_CNTL_ATTRIBUTES mbox cmd */ reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes); alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen, LPFC_SLI4_MBX_NEMBED); + if (alloclen < reqlen) { lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "3084 Allocated DMA memory size (%d) is " @@ -5462,16 +5402,71 @@ lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba) rc = -ENXIO; goto out_free_mboxq; } + cntl_attr = &mbx_cntl_attr->cntl_attr; phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL; phba->sli4_hba.lnk_info.lnk_tp = bf_get(lpfc_cntl_attr_lnk_type, cntl_attr); phba->sli4_hba.lnk_info.lnk_no = bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr); + + memset(phba->BIOSVersion, 0, sizeof(phba->BIOSVersion)); + strlcat(phba->BIOSVersion, (char *)cntl_attr->bios_ver_str, + sizeof(phba->BIOSVersion)); + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, - "3086 lnk_type:%d, lnk_numb:%d\n", + "3086 lnk_type:%d, lnk_numb:%d, bios_ver:%s\n", phba->sli4_hba.lnk_info.lnk_tp, - phba->sli4_hba.lnk_info.lnk_no); + phba->sli4_hba.lnk_info.lnk_no, + phba->BIOSVersion); +out_free_mboxq: + if (rc != MBX_TIMEOUT) { + if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG) + lpfc_sli4_mbox_cmd_free(phba, mboxq); + else + mempool_free(mboxq, phba->mbox_mem_pool); + } + return rc; +} + +/** + * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name + * @phba: pointer to lpfc hba data structure. + * + * This routine retrieves SLI4 device physical port name this PCI function + * is attached to. + * + * Return codes + * 0 - successful + * otherwise - failed to retrieve physical port name + **/ +static int +lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba) +{ + LPFC_MBOXQ_t *mboxq; + struct lpfc_mbx_get_port_name *get_port_name; + uint32_t shdr_status, shdr_add_status; + union lpfc_sli4_cfg_shdr *shdr; + char cport_name = 0; + int rc; + + /* We assume nothing at this point */ + phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL; + phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON; + + mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mboxq) + return -ENOMEM; + /* obtain link type and link number via READ_CONFIG */ + phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL; + lpfc_sli4_read_config(phba); + if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL) + goto retrieve_ppname; + + /* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */ + rc = lpfc_sli4_get_ctl_attr(phba); + if (rc) + goto out_free_mboxq; retrieve_ppname: lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, @@ -7047,7 +7042,7 @@ lpfc_sli4_repost_sgl_list(struct lpfc_hba *phba, * * Returns: 0 = success, non-zero failure. **/ -int +static int lpfc_sli4_repost_io_sgl_list(struct lpfc_hba *phba) { LIST_HEAD(post_nblist); @@ -7067,7 +7062,7 @@ lpfc_sli4_repost_io_sgl_list(struct lpfc_hba *phba) return rc; } -void +static void lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) { uint32_t len; @@ -7250,6 +7245,12 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) "3080 Successful retrieving SLI4 device " "physical port name: %s.\n", phba->Port); + rc = lpfc_sli4_get_ctl_attr(phba); + if (!rc) + lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, + "8351 Successful retrieving SLI4 device " + "CTL ATTR\n"); + /* * Evaluate the read rev and vpd data. Populate the driver * state with the results. If this routine fails, the failure @@ -7652,12 +7653,6 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) phba->cfg_xri_rebalancing = 0; } - /* Arm the CQs and then EQs on device */ - lpfc_sli4_arm_cqeq_intr(phba); - - /* Indicate device interrupt mode */ - phba->sli4_hba.intr_enable = 1; - /* Allow asynchronous mailbox command to go through */ spin_lock_irq(&phba->hbalock); phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; @@ -7726,6 +7721,12 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) phba->trunk_link.link3.state = LPFC_LINK_DOWN; spin_unlock_irq(&phba->hbalock); + /* Arm the CQs and then EQs on device */ + lpfc_sli4_arm_cqeq_intr(phba); + + /* Indicate device interrupt mode */ + phba->sli4_hba.intr_enable = 1; + if (!(phba->hba_flag & HBA_FCOE_MODE) && (phba->hba_flag & LINK_DISABLED)) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI, @@ -7820,8 +7821,9 @@ lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba) mcq = phba->sli4_hba.mbx_cq; idx = mcq->hba_index; qe_valid = mcq->qe_valid; - while (bf_get_le32(lpfc_cqe_valid, mcq->qe[idx].cqe) == qe_valid) { - mcqe = (struct lpfc_mcqe *)mcq->qe[idx].cqe; + while (bf_get_le32(lpfc_cqe_valid, + (struct lpfc_cqe *)lpfc_sli4_qe(mcq, idx)) == qe_valid) { + mcqe = (struct lpfc_mcqe *)(lpfc_sli4_qe(mcq, idx)); if (bf_get_le32(lpfc_trailer_completed, mcqe) && (!bf_get_le32(lpfc_trailer_async, mcqe))) { pending_completions = true; @@ -8500,7 +8502,7 @@ lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr); db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg); if (!db_ready) - msleep(2); + mdelay(2); if (time_after(jiffies, timeout)) return MBXERR_ERROR; @@ -11264,102 +11266,6 @@ abort_iotag_exit: } /** - * lpfc_sli4_abort_nvme_io - Issue abort for a command iocb - * @phba: Pointer to HBA context object. - * @pring: Pointer to driver SLI ring object. - * @cmdiocb: Pointer to driver command iocb object. - * - * This function issues an abort iocb for the provided command iocb down to - * the port. Other than the case the outstanding command iocb is an abort - * request, this function issues abort out unconditionally. This function is - * called with hbalock held. The function returns 0 when it fails due to - * memory allocation failure or when the command iocb is an abort request. - **/ -static int -lpfc_sli4_abort_nvme_io(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, - struct lpfc_iocbq *cmdiocb) -{ - struct lpfc_vport *vport = cmdiocb->vport; - struct lpfc_iocbq *abtsiocbp; - union lpfc_wqe128 *abts_wqe; - int retval; - int idx = cmdiocb->hba_wqidx; - - /* - * There are certain command types we don't want to abort. And we - * don't want to abort commands that are already in the process of - * being aborted. - */ - if (cmdiocb->iocb.ulpCommand == CMD_ABORT_XRI_CN || - cmdiocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN || - (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0) - return 0; - - /* issue ABTS for this io based on iotag */ - abtsiocbp = __lpfc_sli_get_iocbq(phba); - if (abtsiocbp == NULL) - return 0; - - /* This signals the response to set the correct status - * before calling the completion handler - */ - cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED; - - /* Complete prepping the abort wqe and issue to the FW. */ - abts_wqe = &abtsiocbp->wqe; - - /* Clear any stale WQE contents */ - memset(abts_wqe, 0, sizeof(union lpfc_wqe)); - bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG); - - /* word 7 */ - bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX); - bf_set(wqe_class, &abts_wqe->abort_cmd.wqe_com, - cmdiocb->iocb.ulpClass); - - /* word 8 - tell the FW to abort the IO associated with this - * outstanding exchange ID. - */ - abts_wqe->abort_cmd.wqe_com.abort_tag = cmdiocb->sli4_xritag; - - /* word 9 - this is the iotag for the abts_wqe completion. */ - bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com, - abtsiocbp->iotag); - - /* word 10 */ - bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1); - bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE); - - /* word 11 */ - bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND); - bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1); - bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); - - /* ABTS WQE must go to the same WQ as the WQE to be aborted */ - abtsiocbp->iocb_flag |= LPFC_IO_NVME; - abtsiocbp->vport = vport; - abtsiocbp->wqe_cmpl = lpfc_nvme_abort_fcreq_cmpl; - retval = lpfc_sli4_issue_wqe(phba, &phba->sli4_hba.hdwq[idx], - abtsiocbp); - if (retval) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME, - "6147 Failed abts issue_wqe with status x%x " - "for oxid x%x\n", - retval, cmdiocb->sli4_xritag); - lpfc_sli_release_iocbq(phba, abtsiocbp); - return retval; - } - - lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME, - "6148 Drv Abort NVME Request Issued for " - "ox_id x%x on reqtag x%x\n", - cmdiocb->sli4_xritag, - abtsiocbp->iotag); - - return retval; -} - -/** * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba. * @phba: pointer to lpfc HBA data structure. * @@ -13636,7 +13542,7 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "0390 Cannot schedule soft IRQ " "for CQ eqcqid=%d, cqid=%d on CPU %d\n", - cqid, cq->queue_id, smp_processor_id()); + cqid, cq->queue_id, raw_smp_processor_id()); } /** @@ -14019,7 +13925,7 @@ lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, return false; } drop: - lpfc_in_buf_free(phba, &dma_buf->dbuf); + lpfc_rq_buf_free(phba, &dma_buf->hbuf); break; case FC_STATUS_INSUFF_BUF_FRM_DISC: if (phba->nvmet_support) { @@ -14185,7 +14091,7 @@ work_cq: lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "0363 Cannot schedule soft IRQ " "for CQ eqcqid=%d, cqid=%d on CPU %d\n", - cqid, cq->queue_id, smp_processor_id()); + cqid, cq->queue_id, raw_smp_processor_id()); } /** @@ -14324,7 +14230,7 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id) eqi = phba->sli4_hba.eq_info; icnt = this_cpu_inc_return(eqi->icnt); - fpeq->last_cpu = smp_processor_id(); + fpeq->last_cpu = raw_smp_processor_id(); if (icnt > LPFC_EQD_ISR_TRIGGER && phba->cfg_irq_chann == 1 && @@ -14410,6 +14316,9 @@ lpfc_sli4_queue_free(struct lpfc_queue *queue) if (!queue) return; + if (!list_empty(&queue->wq_list)) + list_del(&queue->wq_list); + while (!list_empty(&queue->page_list)) { list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf, list); @@ -14425,9 +14334,6 @@ lpfc_sli4_queue_free(struct lpfc_queue *queue) if (!list_empty(&queue->cpu_list)) list_del(&queue->cpu_list); - if (!list_empty(&queue->wq_list)) - list_del(&queue->wq_list); - kfree(queue); return; } @@ -14438,6 +14344,7 @@ lpfc_sli4_queue_free(struct lpfc_queue *queue) * @page_size: The size of a queue page * @entry_size: The size of each queue entry for this queue. * @entry count: The number of entries that this queue will handle. + * @cpu: The cpu that will primarily utilize this queue. * * This function allocates a queue structure and the DMAable memory used for * the host resident queue. This function must be called before creating the @@ -14445,28 +14352,26 @@ lpfc_sli4_queue_free(struct lpfc_queue *queue) **/ struct lpfc_queue * lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size, - uint32_t entry_size, uint32_t entry_count) + uint32_t entry_size, uint32_t entry_count, int cpu) { struct lpfc_queue *queue; struct lpfc_dmabuf *dmabuf; - int x, total_qe_count; - void *dma_pointer; uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; + uint16_t x, pgcnt; if (!phba->sli4_hba.pc_sli4_params.supported) hw_page_size = page_size; - queue = kzalloc(sizeof(struct lpfc_queue) + - (sizeof(union sli4_qe) * entry_count), GFP_KERNEL); - if (!queue) - return NULL; - queue->page_count = (ALIGN(entry_size * entry_count, - hw_page_size))/hw_page_size; + pgcnt = ALIGN(entry_size * entry_count, hw_page_size) / hw_page_size; /* If needed, Adjust page count to match the max the adapter supports */ - if (phba->sli4_hba.pc_sli4_params.wqpcnt && - (queue->page_count > phba->sli4_hba.pc_sli4_params.wqpcnt)) - queue->page_count = phba->sli4_hba.pc_sli4_params.wqpcnt; + if (pgcnt > phba->sli4_hba.pc_sli4_params.wqpcnt) + pgcnt = phba->sli4_hba.pc_sli4_params.wqpcnt; + + queue = kzalloc_node(sizeof(*queue) + (sizeof(void *) * pgcnt), + GFP_KERNEL, cpu_to_node(cpu)); + if (!queue) + return NULL; INIT_LIST_HEAD(&queue->list); INIT_LIST_HEAD(&queue->wq_list); @@ -14478,13 +14383,17 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size, /* Set queue parameters now. If the system cannot provide memory * resources, the free routine needs to know what was allocated. */ + queue->page_count = pgcnt; + queue->q_pgs = (void **)&queue[1]; + queue->entry_cnt_per_pg = hw_page_size / entry_size; queue->entry_size = entry_size; queue->entry_count = entry_count; queue->page_size = hw_page_size; queue->phba = phba; - for (x = 0, total_qe_count = 0; x < queue->page_count; x++) { - dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); + for (x = 0; x < queue->page_count; x++) { + dmabuf = kzalloc_node(sizeof(*dmabuf), GFP_KERNEL, + dev_to_node(&phba->pcidev->dev)); if (!dmabuf) goto out_fail; dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, @@ -14496,13 +14405,8 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size, } dmabuf->buffer_tag = x; list_add_tail(&dmabuf->list, &queue->page_list); - /* initialize queue's entry array */ - dma_pointer = dmabuf->virt; - for (; total_qe_count < entry_count && - dma_pointer < (hw_page_size + dmabuf->virt); - total_qe_count++, dma_pointer += entry_size) { - queue->qe[total_qe_count].address = dma_pointer; - } + /* use lpfc_sli4_qe to index a paritcular entry in this page */ + queue->q_pgs[x] = dmabuf->virt; } INIT_WORK(&queue->irqwork, lpfc_sli4_hba_process_cq); INIT_WORK(&queue->spwork, lpfc_sli4_sp_process_cq); |