diff options
author | NeilBrown <neilb@suse.de> | 2010-05-22 00:31:36 +0200 |
---|---|---|
committer | NeilBrown <neilb@suse.de> | 2010-05-22 00:31:36 +0200 |
commit | 19fdb9eefb21b72edbc365b838502780c392bad6 (patch) | |
tree | deae04c48532d6eab64ed4b0396737bb854b5506 /drivers/scsi/lpfc | |
parent | md: don't insist on valid event count for spare devices. (diff) | |
parent | sysfs: Implement sysfs tagged directory support. (diff) | |
download | linux-19fdb9eefb21b72edbc365b838502780c392bad6.tar.xz linux-19fdb9eefb21b72edbc365b838502780c392bad6.zip |
Merge commit '3ff195b011d7decf501a4d55aeed312731094796' into for-linus
Conflicts:
drivers/md/md.c
- Resolved conflict in md_update_sb
- Added extra 'NULL' arg to new instance of sysfs_get_dirent.
Signed-off-by: NeilBrown <neilb@suse.de>
Diffstat (limited to 'drivers/scsi/lpfc')
-rw-r--r-- | drivers/scsi/lpfc/lpfc.h | 14 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_attr.c | 28 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_bsg.c | 835 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_bsg.h | 15 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_crtn.h | 8 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_ct.c | 1 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_debugfs.c | 1 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_disc.h | 1 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_els.c | 161 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_hbadisc.c | 603 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_hw.h | 190 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_hw4.h | 60 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_init.c | 459 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_logmsg.h | 1 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_mbox.c | 78 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_mem.c | 1 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_nportdisc.c | 100 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_scsi.c | 203 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_sli.c | 681 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_sli.h | 5 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_sli4.h | 43 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_version.h | 2 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_vport.c | 12 |
23 files changed, 2841 insertions, 661 deletions
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index 84b696463a58..e35a4c71eb9a 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -37,6 +37,9 @@ struct lpfc_sli2_slim; the NameServer before giving up. */ #define LPFC_CMD_PER_LUN 3 /* max outstanding cmds per lun */ #define LPFC_DEFAULT_SG_SEG_CNT 64 /* sg element count per scsi cmnd */ +#define LPFC_DEFAULT_MENLO_SG_SEG_CNT 128 /* sg element count per scsi + cmnd for menlo needs nearly twice as for firmware + downloads using bsg */ #define LPFC_DEFAULT_PROT_SG_SEG_CNT 4096 /* sg protection elements count */ #define LPFC_MAX_SG_SEG_CNT 4096 /* sg element count per scsi cmnd */ #define LPFC_MAX_PROT_SG_SEG_CNT 4096 /* prot sg element count per scsi cmd*/ @@ -307,7 +310,9 @@ struct lpfc_vport { #define FC_NLP_MORE 0x40 /* More node to process in node tbl */ #define FC_OFFLINE_MODE 0x80 /* Interface is offline for diag */ #define FC_FABRIC 0x100 /* We are fabric attached */ +#define FC_VPORT_LOGO_RCVD 0x200 /* LOGO received on vport */ #define FC_RSCN_DISCOVERY 0x400 /* Auth all devices after RSCN */ +#define FC_LOGO_RCVD_DID_CHNG 0x800 /* FDISC on phys port detect DID chng*/ #define FC_SCSI_SCAN_TMO 0x4000 /* scsi scan timer running */ #define FC_ABORT_DISCOVERY 0x8000 /* we want to abort discovery */ #define FC_NDISC_ACTIVE 0x10000 /* NPort discovery active */ @@ -509,7 +514,6 @@ struct lpfc_hba { int (*lpfc_hba_down_link) (struct lpfc_hba *); - /* SLI4 specific HBA data structure */ struct lpfc_sli4_hba sli4_hba; @@ -552,6 +556,7 @@ struct lpfc_hba { struct lpfc_dmabuf slim2p; MAILBOX_t *mbox; + uint32_t *mbox_ext; uint32_t *inb_ha_copy; uint32_t *inb_counter; uint32_t inb_last_counter; @@ -620,9 +625,13 @@ struct lpfc_hba { uint32_t cfg_enable_hba_reset; uint32_t cfg_enable_hba_heartbeat; uint32_t cfg_enable_bg; + uint32_t cfg_hostmem_hgp; uint32_t cfg_log_verbose; uint32_t cfg_aer_support; uint32_t cfg_suppress_link_up; +#define LPFC_INITIALIZE_LINK 0 /* do normal init_link mbox */ +#define LPFC_DELAY_INIT_LINK 1 /* layered driver hold off */ +#define LPFC_DELAY_INIT_LINK_INDEFINITELY 2 /* wait, manual intervention */ lpfc_vpd_t vpd; /* vital product data */ @@ -804,6 +813,9 @@ struct lpfc_hba { struct list_head ct_ev_waiters; struct unsol_rcv_ct_ctx ct_ctx[64]; uint32_t ctx_idx; + + uint8_t menlo_flag; /* menlo generic flags */ +#define HBA_MENLO_SUPPORT 0x1 /* HBA supports menlo commands */ }; static inline struct Scsi_Host * diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index c992e8328f9e..2e5f376d9ccc 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -24,6 +24,7 @@ #include <linux/pci.h> #include <linux/interrupt.h> #include <linux/aer.h> +#include <linux/gfp.h> #include <scsi/scsi.h> #include <scsi/scsi_device.h> @@ -868,6 +869,7 @@ lpfc_get_hba_info(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq; MAILBOX_t *pmb; int rc = 0; + uint32_t max_vpi; /* * prevent udev from issuing mailbox commands until the port is @@ -915,11 +917,17 @@ lpfc_get_hba_info(struct lpfc_hba *phba, if (axri) *axri = bf_get(lpfc_mbx_rd_conf_xri_count, rd_config) - phba->sli4_hba.max_cfg_param.xri_used; + + /* Account for differences with SLI-3. Get vpi count from + * mailbox data and subtract one for max vpi value. + */ + max_vpi = (bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config) > 0) ? + (bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config) - 1) : 0; + if (mvpi) - *mvpi = bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config); + *mvpi = max_vpi; if (avpi) - *avpi = bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config) - - phba->sli4_hba.max_cfg_param.vpi_used; + *avpi = max_vpi - phba->sli4_hba.max_cfg_param.vpi_used; } else { if (mrpi) *mrpi = pmb->un.varRdConfig.max_rpi; @@ -1924,13 +1932,12 @@ MODULE_PARM_DESC(lpfc_sli_mode, "SLI mode selector:" " 2 - select SLI-2 even on SLI-3 capable HBAs," " 3 - select SLI-3"); -int lpfc_enable_npiv = 0; +int lpfc_enable_npiv = 1; module_param(lpfc_enable_npiv, int, 0); MODULE_PARM_DESC(lpfc_enable_npiv, "Enable NPIV functionality"); lpfc_param_show(enable_npiv); -lpfc_param_init(enable_npiv, 0, 0, 1); -static DEVICE_ATTR(lpfc_enable_npiv, S_IRUGO, - lpfc_enable_npiv_show, NULL); +lpfc_param_init(enable_npiv, 1, 0, 1); +static DEVICE_ATTR(lpfc_enable_npiv, S_IRUGO, lpfc_enable_npiv_show, NULL); /* # lpfc_suppress_link_up: Bring link up at initialization @@ -1939,7 +1946,9 @@ static DEVICE_ATTR(lpfc_enable_npiv, S_IRUGO, # 0x2 = never bring up link # Default value is 0. */ -LPFC_ATTR_R(suppress_link_up, 0, 0, 2, "Suppress Link Up at initialization"); +LPFC_ATTR_R(suppress_link_up, LPFC_INITIALIZE_LINK, LPFC_INITIALIZE_LINK, + LPFC_DELAY_INIT_LINK_INDEFINITELY, + "Suppress Link Up at initialization"); /* # lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear @@ -1966,8 +1975,7 @@ lpfc_nodev_tmo_show(struct device *dev, struct device_attribute *attr, { struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; - int val = 0; - val = vport->cfg_devloss_tmo; + return snprintf(buf, PAGE_SIZE, "%d\n", vport->cfg_devloss_tmo); } diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c index f3f1bf1a0a71..dcf088262b20 100644 --- a/drivers/scsi/lpfc/lpfc_bsg.c +++ b/drivers/scsi/lpfc/lpfc_bsg.c @@ -21,6 +21,7 @@ #include <linux/interrupt.h> #include <linux/mempool.h> #include <linux/pci.h> +#include <linux/slab.h> #include <linux/delay.h> #include <scsi/scsi.h> @@ -78,20 +79,39 @@ struct lpfc_bsg_iocb { struct lpfc_bsg_mbox { LPFC_MBOXQ_t *pmboxq; MAILBOX_t *mb; + struct lpfc_dmabuf *rxbmp; /* for BIU diags */ + struct lpfc_dmabufext *dmp; /* for BIU diags */ + uint8_t *ext; /* extended mailbox data */ + uint32_t mbOffset; /* from app */ + uint32_t inExtWLen; /* from app */ + uint32_t outExtWLen; /* from app */ /* job waiting for this mbox command to finish */ struct fc_bsg_job *set_job; }; +#define MENLO_DID 0x0000FC0E + +struct lpfc_bsg_menlo { + struct lpfc_iocbq *cmdiocbq; + struct lpfc_iocbq *rspiocbq; + struct lpfc_dmabuf *bmp; + + /* job waiting for this iocb to finish */ + struct fc_bsg_job *set_job; +}; + #define TYPE_EVT 1 #define TYPE_IOCB 2 #define TYPE_MBOX 3 +#define TYPE_MENLO 4 struct bsg_job_data { uint32_t type; union { struct lpfc_bsg_event *evt; struct lpfc_bsg_iocb iocb; struct lpfc_bsg_mbox mbox; + struct lpfc_bsg_menlo menlo; } context_un; }; @@ -419,7 +439,7 @@ lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba, dd_data = cmdiocbq->context1; /* normal completion and timeout crossed paths, already done */ if (!dd_data) { - spin_unlock_irqrestore(&phba->hbalock, flags); + spin_unlock_irqrestore(&phba->ct_ev_lock, flags); return; } @@ -1182,7 +1202,7 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba, dd_data = cmdiocbq->context1; /* normal completion and timeout crossed paths, already done */ if (!dd_data) { - spin_unlock_irqrestore(&phba->hbalock, flags); + spin_unlock_irqrestore(&phba->ct_ev_lock, flags); return; } @@ -1694,21 +1714,26 @@ static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi, dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); if (dmabuf) { dmabuf->virt = lpfc_mbuf_alloc(phba, 0, &dmabuf->phys); - INIT_LIST_HEAD(&dmabuf->list); - bpl = (struct ulp_bde64 *) dmabuf->virt; - memset(bpl, 0, sizeof(*bpl)); - ctreq = (struct lpfc_sli_ct_request *)(bpl + 1); - bpl->addrHigh = - le32_to_cpu(putPaddrHigh(dmabuf->phys + sizeof(*bpl))); - bpl->addrLow = - le32_to_cpu(putPaddrLow(dmabuf->phys + sizeof(*bpl))); - bpl->tus.f.bdeFlags = 0; - bpl->tus.f.bdeSize = ELX_LOOPBACK_HEADER_SZ; - bpl->tus.w = le32_to_cpu(bpl->tus.w); + if (dmabuf->virt) { + INIT_LIST_HEAD(&dmabuf->list); + bpl = (struct ulp_bde64 *) dmabuf->virt; + memset(bpl, 0, sizeof(*bpl)); + ctreq = (struct lpfc_sli_ct_request *)(bpl + 1); + bpl->addrHigh = + le32_to_cpu(putPaddrHigh(dmabuf->phys + + sizeof(*bpl))); + bpl->addrLow = + le32_to_cpu(putPaddrLow(dmabuf->phys + + sizeof(*bpl))); + bpl->tus.f.bdeFlags = 0; + bpl->tus.f.bdeSize = ELX_LOOPBACK_HEADER_SZ; + bpl->tus.w = le32_to_cpu(bpl->tus.w); + } } if (cmdiocbq == NULL || rspiocbq == NULL || - dmabuf == NULL || bpl == NULL || ctreq == NULL) { + dmabuf == NULL || bpl == NULL || ctreq == NULL || + dmabuf->virt == NULL) { ret_val = ENOMEM; goto err_get_xri_exit; } @@ -1904,9 +1929,11 @@ static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri, rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); if (rxbmp != NULL) { rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys); - INIT_LIST_HEAD(&rxbmp->list); - rxbpl = (struct ulp_bde64 *) rxbmp->virt; - rxbuffer = diag_cmd_data_alloc(phba, rxbpl, len, 0); + if (rxbmp->virt) { + INIT_LIST_HEAD(&rxbmp->list); + rxbpl = (struct ulp_bde64 *) rxbmp->virt; + rxbuffer = diag_cmd_data_alloc(phba, rxbpl, len, 0); + } } if (!cmdiocbq || !rxbmp || !rxbpl || !rxbuffer) { @@ -2160,14 +2187,16 @@ lpfc_bsg_diag_test(struct fc_bsg_job *job) if (txbmp) { txbmp->virt = lpfc_mbuf_alloc(phba, 0, &txbmp->phys); - INIT_LIST_HEAD(&txbmp->list); - txbpl = (struct ulp_bde64 *) txbmp->virt; - if (txbpl) + if (txbmp->virt) { + INIT_LIST_HEAD(&txbmp->list); + txbpl = (struct ulp_bde64 *) txbmp->virt; txbuffer = diag_cmd_data_alloc(phba, txbpl, full_size, 0); + } } - if (!cmdiocbq || !rspiocbq || !txbmp || !txbpl || !txbuffer) { + if (!cmdiocbq || !rspiocbq || !txbmp || !txbpl || !txbuffer || + !txbmp->virt) { rc = -ENOMEM; goto err_loopback_test_exit; } @@ -2363,35 +2392,90 @@ void lpfc_bsg_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) { struct bsg_job_data *dd_data; - MAILBOX_t *pmb; - MAILBOX_t *mb; struct fc_bsg_job *job; uint32_t size; unsigned long flags; + uint8_t *to; + uint8_t *from; spin_lock_irqsave(&phba->ct_ev_lock, flags); dd_data = pmboxq->context1; + /* job already timed out? */ if (!dd_data) { spin_unlock_irqrestore(&phba->ct_ev_lock, flags); return; } - pmb = &dd_data->context_un.mbox.pmboxq->u.mb; - mb = dd_data->context_un.mbox.mb; + /* build the outgoing buffer to do an sg copy + * the format is the response mailbox followed by any extended + * mailbox data + */ + from = (uint8_t *)&pmboxq->u.mb; + to = (uint8_t *)dd_data->context_un.mbox.mb; + memcpy(to, from, sizeof(MAILBOX_t)); + if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS) { + /* copy the extended data if any, count is in words */ + if (dd_data->context_un.mbox.outExtWLen) { + from = (uint8_t *)dd_data->context_un.mbox.ext; + to += sizeof(MAILBOX_t); + size = dd_data->context_un.mbox.outExtWLen * + sizeof(uint32_t); + memcpy(to, from, size); + } else if (pmboxq->u.mb.mbxCommand == MBX_RUN_BIU_DIAG64) { + from = (uint8_t *)dd_data->context_un.mbox. + dmp->dma.virt; + to += sizeof(MAILBOX_t); + size = dd_data->context_un.mbox.dmp->size; + memcpy(to, from, size); + } else if ((phba->sli_rev == LPFC_SLI_REV4) && + (pmboxq->u.mb.mbxCommand == MBX_DUMP_MEMORY)) { + from = (uint8_t *)dd_data->context_un.mbox.dmp->dma. + virt; + to += sizeof(MAILBOX_t); + size = pmboxq->u.mb.un.varWords[5]; + memcpy(to, from, size); + } else if (pmboxq->u.mb.mbxCommand == MBX_READ_EVENT_LOG) { + from = (uint8_t *)dd_data->context_un. + mbox.dmp->dma.virt; + to += sizeof(MAILBOX_t); + size = dd_data->context_un.mbox.dmp->size; + memcpy(to, from, size); + } + } + + from = (uint8_t *)dd_data->context_un.mbox.mb; job = dd_data->context_un.mbox.set_job; - memcpy(mb, pmb, sizeof(*pmb)); - size = job->request_payload.payload_len; + size = job->reply_payload.payload_len; job->reply->reply_payload_rcv_len = sg_copy_from_buffer(job->reply_payload.sg_list, job->reply_payload.sg_cnt, - mb, size); + from, size); job->reply->result = 0; + dd_data->context_un.mbox.set_job = NULL; job->dd_data = NULL; job->job_done(job); + /* need to hold the lock until we call job done to hold off + * the timeout handler returning to the midlayer while + * we are stillprocessing the job + */ spin_unlock_irqrestore(&phba->ct_ev_lock, flags); + + kfree(dd_data->context_un.mbox.mb); mempool_free(dd_data->context_un.mbox.pmboxq, phba->mbox_mem_pool); - kfree(mb); + kfree(dd_data->context_un.mbox.ext); + if (dd_data->context_un.mbox.dmp) { + dma_free_coherent(&phba->pcidev->dev, + dd_data->context_un.mbox.dmp->size, + dd_data->context_un.mbox.dmp->dma.virt, + dd_data->context_un.mbox.dmp->dma.phys); + kfree(dd_data->context_un.mbox.dmp); + } + if (dd_data->context_un.mbox.rxbmp) { + lpfc_mbuf_free(phba, dd_data->context_un.mbox.rxbmp->virt, + dd_data->context_un.mbox.rxbmp->phys); + kfree(dd_data->context_un.mbox.rxbmp); + } kfree(dd_data); return; } @@ -2450,14 +2534,26 @@ static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba, case MBX_SET_DEBUG: case MBX_WRITE_WWN: case MBX_SLI4_CONFIG: + case MBX_READ_EVENT_LOG: case MBX_READ_EVENT_LOG_STATUS: case MBX_WRITE_EVENT_LOG: case MBX_PORT_CAPABILITIES: case MBX_PORT_IOV_CONTROL: + case MBX_RUN_BIU_DIAG64: break; case MBX_SET_VARIABLE: - case MBX_RUN_BIU_DIAG64: - case MBX_READ_EVENT_LOG: + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "1226 mbox: set_variable 0x%x, 0x%x\n", + mb->un.varWords[0], + mb->un.varWords[1]); + if ((mb->un.varWords[0] == SETVAR_MLOMNT) + && (mb->un.varWords[1] == 1)) { + phba->wait_4_mlo_maint_flg = 1; + } else if (mb->un.varWords[0] == SETVAR_MLORST) { + phba->link_flag &= ~LS_LOOPBACK_MODE; + phba->fc_topology = TOPOLOGY_PT_PT; + } + break; case MBX_READ_SPARM64: case MBX_READ_LA: case MBX_READ_LA64: @@ -2492,97 +2588,365 @@ static uint32_t lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job, struct lpfc_vport *vport) { - LPFC_MBOXQ_t *pmboxq; - MAILBOX_t *pmb; - MAILBOX_t *mb; - struct bsg_job_data *dd_data; + LPFC_MBOXQ_t *pmboxq = NULL; /* internal mailbox queue */ + MAILBOX_t *pmb; /* shortcut to the pmboxq mailbox */ + /* a 4k buffer to hold the mb and extended data from/to the bsg */ + MAILBOX_t *mb = NULL; + struct bsg_job_data *dd_data = NULL; /* bsg data tracking structure */ uint32_t size; + struct lpfc_dmabuf *rxbmp = NULL; /* for biu diag */ + struct lpfc_dmabufext *dmp = NULL; /* for biu diag */ + struct ulp_bde64 *rxbpl = NULL; + struct dfc_mbox_req *mbox_req = (struct dfc_mbox_req *) + job->request->rqst_data.h_vendor.vendor_cmd; + uint8_t *ext = NULL; int rc = 0; + uint8_t *from; + + /* in case no data is transferred */ + job->reply->reply_payload_rcv_len = 0; + + /* check if requested extended data lengths are valid */ + if ((mbox_req->inExtWLen > MAILBOX_EXT_SIZE) || + (mbox_req->outExtWLen > MAILBOX_EXT_SIZE)) { + rc = -ERANGE; + goto job_done; + } /* allocate our bsg tracking structure */ dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); if (!dd_data) { lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, "2727 Failed allocation of dd_data\n"); - return -ENOMEM; + rc = -ENOMEM; + goto job_done; } - mb = kzalloc(PAGE_SIZE, GFP_KERNEL); + mb = kzalloc(BSG_MBOX_SIZE, GFP_KERNEL); if (!mb) { - kfree(dd_data); - return -ENOMEM; + rc = -ENOMEM; + goto job_done; } pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!pmboxq) { - kfree(dd_data); - kfree(mb); - return -ENOMEM; + rc = -ENOMEM; + goto job_done; } + memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t)); size = job->request_payload.payload_len; - job->reply->reply_payload_rcv_len = - sg_copy_to_buffer(job->request_payload.sg_list, - job->request_payload.sg_cnt, - mb, size); + sg_copy_to_buffer(job->request_payload.sg_list, + job->request_payload.sg_cnt, + mb, size); rc = lpfc_bsg_check_cmd_access(phba, mb, vport); - if (rc != 0) { - kfree(dd_data); - kfree(mb); - mempool_free(pmboxq, phba->mbox_mem_pool); - return rc; /* must be negative */ - } + if (rc != 0) + goto job_done; /* must be negative */ - memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t)); pmb = &pmboxq->u.mb; memcpy(pmb, mb, sizeof(*pmb)); pmb->mbxOwner = OWN_HOST; - pmboxq->context1 = NULL; pmboxq->vport = vport; - if ((vport->fc_flag & FC_OFFLINE_MODE) || - (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) { - rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); - if (rc != MBX_SUCCESS) { - if (rc != MBX_TIMEOUT) { - kfree(dd_data); - kfree(mb); - mempool_free(pmboxq, phba->mbox_mem_pool); + /* If HBA encountered an error attention, allow only DUMP + * or RESTART mailbox commands until the HBA is restarted. + */ + if (phba->pport->stopped && + pmb->mbxCommand != MBX_DUMP_MEMORY && + pmb->mbxCommand != MBX_RESTART && + pmb->mbxCommand != MBX_WRITE_VPARMS && + pmb->mbxCommand != MBX_WRITE_WWN) + lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, + "2797 mbox: Issued mailbox cmd " + "0x%x while in stopped state.\n", + pmb->mbxCommand); + + /* Don't allow mailbox commands to be sent when blocked + * or when in the middle of discovery + */ + if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) { + rc = -EAGAIN; + goto job_done; + } + + /* extended mailbox commands will need an extended buffer */ + if (mbox_req->inExtWLen || mbox_req->outExtWLen) { + ext = kzalloc(MAILBOX_EXT_SIZE, GFP_KERNEL); + if (!ext) { + rc = -ENOMEM; + goto job_done; + } + + /* any data for the device? */ + if (mbox_req->inExtWLen) { + from = (uint8_t *)mb; + from += sizeof(MAILBOX_t); + memcpy((uint8_t *)ext, from, + mbox_req->inExtWLen * sizeof(uint32_t)); + } + + pmboxq->context2 = ext; + pmboxq->in_ext_byte_len = + mbox_req->inExtWLen * + sizeof(uint32_t); + pmboxq->out_ext_byte_len = + mbox_req->outExtWLen * + sizeof(uint32_t); + pmboxq->mbox_offset_word = + mbox_req->mbOffset; + pmboxq->context2 = ext; + pmboxq->in_ext_byte_len = + mbox_req->inExtWLen * sizeof(uint32_t); + pmboxq->out_ext_byte_len = + mbox_req->outExtWLen * sizeof(uint32_t); + pmboxq->mbox_offset_word = mbox_req->mbOffset; + } + + /* biu diag will need a kernel buffer to transfer the data + * allocate our own buffer and setup the mailbox command to + * use ours + */ + if (pmb->mbxCommand == MBX_RUN_BIU_DIAG64) { + uint32_t transmit_length = pmb->un.varWords[1]; + uint32_t receive_length = pmb->un.varWords[4]; + /* transmit length cannot be greater than receive length or + * mailbox extension size + */ + if ((transmit_length > receive_length) || + (transmit_length > MAILBOX_EXT_SIZE)) { + rc = -ERANGE; + goto job_done; + } + + rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); + if (!rxbmp) { + rc = -ENOMEM; + goto job_done; + } + + rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys); + if (!rxbmp->virt) { + rc = -ENOMEM; + goto job_done; + } + + INIT_LIST_HEAD(&rxbmp->list); + rxbpl = (struct ulp_bde64 *) rxbmp->virt; + dmp = diag_cmd_data_alloc(phba, rxbpl, transmit_length, 0); + if (!dmp) { + rc = -ENOMEM; + goto job_done; + } + + INIT_LIST_HEAD(&dmp->dma.list); + pmb->un.varBIUdiag.un.s2.xmit_bde64.addrHigh = + putPaddrHigh(dmp->dma.phys); + pmb->un.varBIUdiag.un.s2.xmit_bde64.addrLow = + putPaddrLow(dmp->dma.phys); + + pmb->un.varBIUdiag.un.s2.rcv_bde64.addrHigh = + putPaddrHigh(dmp->dma.phys + + pmb->un.varBIUdiag.un.s2. + xmit_bde64.tus.f.bdeSize); + pmb->un.varBIUdiag.un.s2.rcv_bde64.addrLow = + putPaddrLow(dmp->dma.phys + + pmb->un.varBIUdiag.un.s2. + xmit_bde64.tus.f.bdeSize); + + /* copy the transmit data found in the mailbox extension area */ + from = (uint8_t *)mb; + from += sizeof(MAILBOX_t); + memcpy((uint8_t *)dmp->dma.virt, from, transmit_length); + } else if (pmb->mbxCommand == MBX_READ_EVENT_LOG) { + struct READ_EVENT_LOG_VAR *rdEventLog = + &pmb->un.varRdEventLog ; + uint32_t receive_length = rdEventLog->rcv_bde64.tus.f.bdeSize; + uint32_t mode = bf_get(lpfc_event_log, rdEventLog); + + /* receive length cannot be greater than mailbox + * extension size + */ + if (receive_length > MAILBOX_EXT_SIZE) { + rc = -ERANGE; + goto job_done; + } + + /* mode zero uses a bde like biu diags command */ + if (mode == 0) { + + /* rebuild the command for sli4 using our own buffers + * like we do for biu diags + */ + + rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); + if (!rxbmp) { + rc = -ENOMEM; + goto job_done; + } + + rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys); + rxbpl = (struct ulp_bde64 *) rxbmp->virt; + if (rxbpl) { + INIT_LIST_HEAD(&rxbmp->list); + dmp = diag_cmd_data_alloc(phba, rxbpl, + receive_length, 0); + } + + if (!dmp) { + rc = -ENOMEM; + goto job_done; } - return (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV; + + INIT_LIST_HEAD(&dmp->dma.list); + pmb->un.varWords[3] = putPaddrLow(dmp->dma.phys); + pmb->un.varWords[4] = putPaddrHigh(dmp->dma.phys); } + } else if (phba->sli_rev == LPFC_SLI_REV4) { + if (pmb->mbxCommand == MBX_DUMP_MEMORY) { + /* rebuild the command for sli4 using our own buffers + * like we do for biu diags + */ + uint32_t receive_length = pmb->un.varWords[2]; + /* receive length cannot be greater than mailbox + * extension size + */ + if ((receive_length == 0) || + (receive_length > MAILBOX_EXT_SIZE)) { + rc = -ERANGE; + goto job_done; + } - memcpy(mb, pmb, sizeof(*pmb)); - job->reply->reply_payload_rcv_len = - sg_copy_from_buffer(job->reply_payload.sg_list, - job->reply_payload.sg_cnt, - mb, size); - kfree(dd_data); - kfree(mb); - mempool_free(pmboxq, phba->mbox_mem_pool); - /* not waiting mbox already done */ - return 0; + rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); + if (!rxbmp) { + rc = -ENOMEM; + goto job_done; + } + + rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys); + if (!rxbmp->virt) { + rc = -ENOMEM; + goto job_done; + } + + INIT_LIST_HEAD(&rxbmp->list); + rxbpl = (struct ulp_bde64 *) rxbmp->virt; + dmp = diag_cmd_data_alloc(phba, rxbpl, receive_length, + 0); + if (!dmp) { + rc = -ENOMEM; + goto job_done; + } + + INIT_LIST_HEAD(&dmp->dma.list); + pmb->un.varWords[3] = putPaddrLow(dmp->dma.phys); + pmb->un.varWords[4] = putPaddrHigh(dmp->dma.phys); + } else if ((pmb->mbxCommand == MBX_UPDATE_CFG) && + pmb->un.varUpdateCfg.co) { + struct ulp_bde64 *bde = + (struct ulp_bde64 *)&pmb->un.varWords[4]; + + /* bde size cannot be greater than mailbox ext size */ + if (bde->tus.f.bdeSize > MAILBOX_EXT_SIZE) { + rc = -ERANGE; + goto job_done; + } + + rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); + if (!rxbmp) { + rc = -ENOMEM; + goto job_done; + } + + rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys); + if (!rxbmp->virt) { + rc = -ENOMEM; + goto job_done; + } + + INIT_LIST_HEAD(&rxbmp->list); + rxbpl = (struct ulp_bde64 *) rxbmp->virt; + dmp = diag_cmd_data_alloc(phba, rxbpl, + bde->tus.f.bdeSize, 0); + if (!dmp) { + rc = -ENOMEM; + goto job_done; + } + + INIT_LIST_HEAD(&dmp->dma.list); + bde->addrHigh = putPaddrHigh(dmp->dma.phys); + bde->addrLow = putPaddrLow(dmp->dma.phys); + + /* copy the transmit data found in the mailbox + * extension area + */ + from = (uint8_t *)mb; + from += sizeof(MAILBOX_t); + memcpy((uint8_t *)dmp->dma.virt, from, + bde->tus.f.bdeSize); + } } + dd_data->context_un.mbox.rxbmp = rxbmp; + dd_data->context_un.mbox.dmp = dmp; + /* setup wake call as IOCB callback */ pmboxq->mbox_cmpl = lpfc_bsg_wake_mbox_wait; + /* setup context field to pass wait_queue pointer to wake function */ pmboxq->context1 = dd_data; dd_data->type = TYPE_MBOX; dd_data->context_un.mbox.pmboxq = pmboxq; dd_data->context_un.mbox.mb = mb; dd_data->context_un.mbox.set_job = job; + dd_data->context_un.mbox.ext = ext; + dd_data->context_un.mbox.mbOffset = mbox_req->mbOffset; + dd_data->context_un.mbox.inExtWLen = mbox_req->inExtWLen; + dd_data->context_un.mbox.outExtWLen = mbox_req->outExtWLen; job->dd_data = dd_data; + + if ((vport->fc_flag & FC_OFFLINE_MODE) || + (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) { + rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); + if (rc != MBX_SUCCESS) { + rc = (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV; + goto job_done; + } + + /* job finished, copy the data */ + memcpy(mb, pmb, sizeof(*pmb)); + job->reply->reply_payload_rcv_len = + sg_copy_from_buffer(job->reply_payload.sg_list, + job->reply_payload.sg_cnt, + mb, size); + /* not waiting mbox already done */ + rc = 0; + goto job_done; + } + rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); - if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) { - kfree(dd_data); - kfree(mb); + if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) + return 1; /* job started */ + +job_done: + /* common exit for error or job completed inline */ + kfree(mb); + if (pmboxq) mempool_free(pmboxq, phba->mbox_mem_pool); - return -EIO; + kfree(ext); + if (dmp) { + dma_free_coherent(&phba->pcidev->dev, + dmp->size, dmp->dma.virt, + dmp->dma.phys); + kfree(dmp); + } + if (rxbmp) { + lpfc_mbuf_free(phba, rxbmp->virt, rxbmp->phys); + kfree(rxbmp); } + kfree(dd_data); - return 1; + return rc; } /** @@ -2607,7 +2971,12 @@ lpfc_bsg_mbox_cmd(struct fc_bsg_job *job) goto job_error; } - if (job->request_payload.payload_len != PAGE_SIZE) { + if (job->request_payload.payload_len != BSG_MBOX_SIZE) { + rc = -EINVAL; + goto job_error; + } + + if (job->reply_payload.payload_len != BSG_MBOX_SIZE) { rc = -EINVAL; goto job_error; } @@ -2638,6 +3007,297 @@ job_error: } /** + * lpfc_bsg_menlo_cmd_cmp - lpfc_menlo_cmd completion handler + * @phba: Pointer to HBA context object. + * @cmdiocbq: Pointer to command iocb. + * @rspiocbq: Pointer to response iocb. + * + * This function is the completion handler for iocbs issued using + * lpfc_menlo_cmd function. This function is called by the + * ring event handler function without any lock held. This function + * can be called from both worker thread context and interrupt + * context. This function also can be called from another thread which + * cleans up the SLI layer objects. + * This function copies the contents of the response iocb to the + * response iocb memory object provided by the caller of + * lpfc_sli_issue_iocb_wait and then wakes up the thread which + * sleeps for the iocb completion. + **/ +static void +lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba, + struct lpfc_iocbq *cmdiocbq, + struct lpfc_iocbq *rspiocbq) +{ + struct bsg_job_data *dd_data; + struct fc_bsg_job *job; + IOCB_t *rsp; + struct lpfc_dmabuf *bmp; + struct lpfc_bsg_menlo *menlo; + unsigned long flags; + struct menlo_response *menlo_resp; + int rc = 0; + + spin_lock_irqsave(&phba->ct_ev_lock, flags); + dd_data = cmdiocbq->context1; + if (!dd_data) { + spin_unlock_irqrestore(&phba->ct_ev_lock, flags); + return; + } + + menlo = &dd_data->context_un.menlo; + job = menlo->set_job; + job->dd_data = NULL; /* so timeout handler does not reply */ + + spin_lock_irqsave(&phba->hbalock, flags); + cmdiocbq->iocb_flag |= LPFC_IO_WAKE; + if (cmdiocbq->context2 && rspiocbq) + memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb, + &rspiocbq->iocb, sizeof(IOCB_t)); + spin_unlock_irqrestore(&phba->hbalock, flags); + + bmp = menlo->bmp; + rspiocbq = menlo->rspiocbq; + rsp = &rspiocbq->iocb; + + pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, + job->request_payload.sg_cnt, DMA_TO_DEVICE); + pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list, + job->reply_payload.sg_cnt, DMA_FROM_DEVICE); + + /* always return the xri, this would be used in the case + * of a menlo download to allow the data to be sent as a continuation + * of the exchange. + */ + menlo_resp = (struct menlo_response *) + job->reply->reply_data.vendor_reply.vendor_rsp; + menlo_resp->xri = rsp->ulpContext; + if (rsp->ulpStatus) { + if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) { + switch (rsp->un.ulpWord[4] & 0xff) { + case IOERR_SEQUENCE_TIMEOUT: + rc = -ETIMEDOUT; + break; + case IOERR_INVALID_RPI: + rc = -EFAULT; + break; + default: + rc = -EACCES; + break; + } + } else + rc = -EACCES; + } else + job->reply->reply_payload_rcv_len = + rsp->un.genreq64.bdl.bdeSize; + + lpfc_mbuf_free(phba, bmp->virt, bmp->phys); + lpfc_sli_release_iocbq(phba, rspiocbq); + lpfc_sli_release_iocbq(phba, cmdiocbq); + kfree(bmp); + kfree(dd_data); + /* make error code available to userspace */ + job->reply->result = rc; + /* complete the job back to userspace */ + job->job_done(job); + spin_unlock_irqrestore(&phba->ct_ev_lock, flags); + return; +} + +/** + * lpfc_menlo_cmd - send an ioctl for menlo hardware + * @job: fc_bsg_job to handle + * + * This function issues a gen request 64 CR ioctl for all menlo cmd requests, + * all the command completions will return the xri for the command. + * For menlo data requests a gen request 64 CX is used to continue the exchange + * supplied in the menlo request header xri field. + **/ +static int +lpfc_menlo_cmd(struct fc_bsg_job *job) +{ + struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata; + struct lpfc_hba *phba = vport->phba; + struct lpfc_iocbq *cmdiocbq, *rspiocbq; + IOCB_t *cmd, *rsp; + int rc = 0; + struct menlo_command *menlo_cmd; + struct menlo_response *menlo_resp; + struct lpfc_dmabuf *bmp = NULL; + int request_nseg; + int reply_nseg; + struct scatterlist *sgel = NULL; + int numbde; + dma_addr_t busaddr; + struct bsg_job_data *dd_data; + struct ulp_bde64 *bpl = NULL; + + /* in case no data is returned return just the return code */ + job->reply->reply_payload_rcv_len = 0; + + if (job->request_len < + sizeof(struct fc_bsg_request) + + sizeof(struct menlo_command)) { + lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, + "2784 Received MENLO_CMD request below " + "minimum size\n"); + rc = -ERANGE; + goto no_dd_data; + } + + if (job->reply_len < + sizeof(struct fc_bsg_request) + sizeof(struct menlo_response)) { + lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, + "2785 Received MENLO_CMD reply below " + "minimum size\n"); + rc = -ERANGE; + goto no_dd_data; + } + + if (!(phba->menlo_flag & HBA_MENLO_SUPPORT)) { + lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, + "2786 Adapter does not support menlo " + "commands\n"); + rc = -EPERM; + goto no_dd_data; + } + + menlo_cmd = (struct menlo_command *) + job->request->rqst_data.h_vendor.vendor_cmd; + + menlo_resp = (struct menlo_response *) + job->reply->reply_data.vendor_reply.vendor_rsp; + + /* allocate our bsg tracking structure */ + dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); + if (!dd_data) { + lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, + "2787 Failed allocation of dd_data\n"); + rc = -ENOMEM; + goto no_dd_data; + } + + bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); + if (!bmp) { + rc = -ENOMEM; + goto free_dd; + } + + cmdiocbq = lpfc_sli_get_iocbq(phba); + if (!cmdiocbq) { + rc = -ENOMEM; + goto free_bmp; + } + + rspiocbq = lpfc_sli_get_iocbq(phba); + if (!rspiocbq) { + rc = -ENOMEM; + goto free_cmdiocbq; + } + + rsp = &rspiocbq->iocb; + + bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys); + if (!bmp->virt) { + rc = -ENOMEM; + goto free_rspiocbq; + } + + INIT_LIST_HEAD(&bmp->list); + bpl = (struct ulp_bde64 *) bmp->virt; + request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list, + job->request_payload.sg_cnt, DMA_TO_DEVICE); + for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) { + busaddr = sg_dma_address(sgel); + bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; + bpl->tus.f.bdeSize = sg_dma_len(sgel); + bpl->tus.w = cpu_to_le32(bpl->tus.w); + bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr)); + bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr)); + bpl++; + } + + reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list, + job->reply_payload.sg_cnt, DMA_FROM_DEVICE); + for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) { + busaddr = sg_dma_address(sgel); + bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; + bpl->tus.f.bdeSize = sg_dma_len(sgel); + bpl->tus.w = cpu_to_le32(bpl->tus.w); + bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr)); + bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr)); + bpl++; + } + + cmd = &cmdiocbq->iocb; + cmd->un.genreq64.bdl.ulpIoTag32 = 0; + cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys); + cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys); + cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; + cmd->un.genreq64.bdl.bdeSize = + (request_nseg + reply_nseg) * sizeof(struct ulp_bde64); + cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA); + cmd->un.genreq64.w5.hcsw.Dfctl = 0; + cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CMD; + cmd->un.genreq64.w5.hcsw.Type = MENLO_TRANSPORT_TYPE; /* 0xfe */ + cmd->ulpBdeCount = 1; + cmd->ulpClass = CLASS3; + cmd->ulpOwner = OWN_CHIP; + cmd->ulpLe = 1; /* Limited Edition */ + cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC; + cmdiocbq->vport = phba->pport; + /* We want the firmware to timeout before we do */ + cmd->ulpTimeout = MENLO_TIMEOUT - 5; + cmdiocbq->context3 = bmp; + cmdiocbq->context2 = rspiocbq; + cmdiocbq->iocb_cmpl = lpfc_bsg_menlo_cmd_cmp; + cmdiocbq->context1 = dd_data; + cmdiocbq->context2 = rspiocbq; + if (menlo_cmd->cmd == LPFC_BSG_VENDOR_MENLO_CMD) { + cmd->ulpCommand = CMD_GEN_REQUEST64_CR; + cmd->ulpPU = MENLO_PU; /* 3 */ + cmd->un.ulpWord[4] = MENLO_DID; /* 0x0000FC0E */ + cmd->ulpContext = MENLO_CONTEXT; /* 0 */ + } else { + cmd->ulpCommand = CMD_GEN_REQUEST64_CX; + cmd->ulpPU = 1; + cmd->un.ulpWord[4] = 0; + cmd->ulpContext = menlo_cmd->xri; + } + + dd_data->type = TYPE_MENLO; + dd_data->context_un.menlo.cmdiocbq = cmdiocbq; + dd_data->context_un.menlo.rspiocbq = rspiocbq; + dd_data->context_un.menlo.set_job = job; + dd_data->context_un.menlo.bmp = bmp; + + rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, + MENLO_TIMEOUT - 5); + if (rc == IOCB_SUCCESS) + return 0; /* done for now */ + + /* iocb failed so cleanup */ + pci_unmap_sg(phba->pcidev, job->request_payload.sg_list, + job->request_payload.sg_cnt, DMA_TO_DEVICE); + pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list, + job->reply_payload.sg_cnt, DMA_FROM_DEVICE); + + lpfc_mbuf_free(phba, bmp->virt, bmp->phys); + +free_rspiocbq: + lpfc_sli_release_iocbq(phba, rspiocbq); +free_cmdiocbq: + lpfc_sli_release_iocbq(phba, cmdiocbq); +free_bmp: + kfree(bmp); +free_dd: + kfree(dd_data); +no_dd_data: + /* make error code available to userspace */ + job->reply->result = rc; + job->dd_data = NULL; + return rc; +} +/** * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job * @job: fc_bsg_job to handle **/ @@ -2669,6 +3329,10 @@ lpfc_bsg_hst_vendor(struct fc_bsg_job *job) case LPFC_BSG_VENDOR_MBOX: rc = lpfc_bsg_mbox_cmd(job); break; + case LPFC_BSG_VENDOR_MENLO_CMD: + case LPFC_BSG_VENDOR_MENLO_DATA: + rc = lpfc_menlo_cmd(job); + break; default: rc = -EINVAL; job->reply->reply_payload_rcv_len = 0; @@ -2728,6 +3392,7 @@ lpfc_bsg_timeout(struct fc_bsg_job *job) struct lpfc_bsg_event *evt; struct lpfc_bsg_iocb *iocb; struct lpfc_bsg_mbox *mbox; + struct lpfc_bsg_menlo *menlo; struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; struct bsg_job_data *dd_data; unsigned long flags; @@ -2772,9 +3437,21 @@ lpfc_bsg_timeout(struct fc_bsg_job *job) job->dd_data = NULL; job->reply->reply_payload_rcv_len = 0; job->reply->result = -EAGAIN; + /* the mbox completion handler can now be run */ spin_unlock_irqrestore(&phba->ct_ev_lock, flags); job->job_done(job); break; + case TYPE_MENLO: + menlo = &dd_data->context_un.menlo; + cmdiocb = menlo->cmdiocbq; + /* hint to completion handler that the job timed out */ + job->reply->result = -EAGAIN; + spin_unlock_irqrestore(&phba->ct_ev_lock, flags); + /* this will call our completion handler */ + spin_lock_irq(&phba->hbalock); + lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb); + spin_unlock_irq(&phba->hbalock); + break; default: spin_unlock_irqrestore(&phba->ct_ev_lock, flags); break; diff --git a/drivers/scsi/lpfc/lpfc_bsg.h b/drivers/scsi/lpfc/lpfc_bsg.h index 6c8f87e39b98..a2c33e7c9152 100644 --- a/drivers/scsi/lpfc/lpfc_bsg.h +++ b/drivers/scsi/lpfc/lpfc_bsg.h @@ -31,6 +31,8 @@ #define LPFC_BSG_VENDOR_DIAG_TEST 5 #define LPFC_BSG_VENDOR_GET_MGMT_REV 6 #define LPFC_BSG_VENDOR_MBOX 7 +#define LPFC_BSG_VENDOR_MENLO_CMD 8 +#define LPFC_BSG_VENDOR_MENLO_DATA 9 struct set_ct_event { uint32_t command; @@ -89,10 +91,21 @@ struct get_mgmt_rev_reply { struct MgmtRevInfo info; }; +#define BSG_MBOX_SIZE 4096 /* mailbox command plus extended data */ struct dfc_mbox_req { uint32_t command; + uint32_t mbOffset; uint32_t inExtWLen; uint32_t outExtWLen; - uint8_t mbOffset; +}; + +/* Used for menlo command or menlo data. The xri is only used for menlo data */ +struct menlo_command { + uint32_t cmd; + uint32_t xri; +}; + +struct menlo_response { + uint32_t xri; /* return the xri of the iocb exchange */ }; diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index 6f0fb51eb461..fbc9baeb6048 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h @@ -63,7 +63,9 @@ void lpfc_linkdown_port(struct lpfc_vport *); void lpfc_port_link_failure(struct lpfc_vport *); void lpfc_mbx_cmpl_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_init_vpi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *); +void lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *); void lpfc_retry_pport_discovery(struct lpfc_hba *); +void lpfc_release_rpi(struct lpfc_hba *, struct lpfc_vport *, uint16_t); void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *, LPFC_MBOXQ_t *); @@ -221,6 +223,10 @@ void lpfc_unregister_fcf_rescan(struct lpfc_hba *); void lpfc_unregister_unused_fcf(struct lpfc_hba *); int lpfc_sli4_redisc_fcf_table(struct lpfc_hba *); void lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *); +void lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *); +uint16_t lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *); +int lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *, uint16_t); +void lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *, uint16_t); int lpfc_mem_alloc(struct lpfc_hba *, int align); void lpfc_mem_free(struct lpfc_hba *); @@ -385,7 +391,7 @@ void lpfc_parse_fcoe_conf(struct lpfc_hba *, uint8_t *, uint32_t); int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int); void lpfc_start_fdiscs(struct lpfc_hba *phba); struct lpfc_vport *lpfc_find_vport_by_vpid(struct lpfc_hba *, uint16_t); - +struct lpfc_sglq *__lpfc_get_active_sglq(struct lpfc_hba *, uint16_t); #define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code) #define HBA_EVENT_RSCN 5 #define HBA_EVENT_LINK_UP 2 diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index c7e921973f66..463b74902ac4 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c @@ -25,6 +25,7 @@ #include <linux/blkdev.h> #include <linux/pci.h> #include <linux/interrupt.h> +#include <linux/slab.h> #include <linux/utsname.h> #include <scsi/scsi.h> diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c index 391584183d81..a80d938fafc9 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/drivers/scsi/lpfc/lpfc_debugfs.c @@ -24,6 +24,7 @@ #include <linux/idr.h> #include <linux/interrupt.h> #include <linux/kthread.h> +#include <linux/slab.h> #include <linux/pci.h> #include <linux/spinlock.h> #include <linux/ctype.h> diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h index 2851d75ffc6f..36257a685509 100644 --- a/drivers/scsi/lpfc/lpfc_disc.h +++ b/drivers/scsi/lpfc/lpfc_disc.h @@ -38,6 +38,7 @@ enum lpfc_work_type { LPFC_EVT_ELS_RETRY, LPFC_EVT_DEV_LOSS, LPFC_EVT_FASTPATH_MGMT_EVT, + LPFC_EVT_RESET_HBA, }; /* structure used to queue event to the discovery tasklet */ diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 08b6634cb994..c4c7f0ad7468 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -21,6 +21,7 @@ /* See Fibre Channel protocol T11 FC-LS for details */ #include <linux/blkdev.h> #include <linux/pci.h> +#include <linux/slab.h> #include <linux/interrupt.h> #include <scsi/scsi.h> @@ -583,6 +584,7 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, spin_unlock_irq(shost->host_lock); lpfc_unreg_rpi(vport, np); } + lpfc_cleanup_pending_mbox(vport); if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { lpfc_mbx_unreg_vpi(vport); spin_lock_irq(shost->host_lock); @@ -771,6 +773,7 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_nodelist *ndlp = cmdiocb->context1; struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp; struct serv_parm *sp; + uint16_t fcf_index; int rc; /* Check to see if link went down during discovery */ @@ -788,6 +791,54 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, vport->port_state); if (irsp->ulpStatus) { + /* + * In case of FIP mode, perform round robin FCF failover + * due to new FCF discovery + */ + if ((phba->hba_flag & HBA_FIP_SUPPORT) && + (phba->fcf.fcf_flag & FCF_DISCOVERY)) { + lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS, + "2611 FLOGI failed on registered " + "FCF record fcf_index:%d, trying " + "to perform round robin failover\n", + phba->fcf.current_rec.fcf_indx); + fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba); + if (fcf_index == LPFC_FCOE_FCF_NEXT_NONE) { + /* + * Exhausted the eligible FCF record list, + * fail through to retry FLOGI on current + * FCF record. + */ + lpfc_printf_log(phba, KERN_WARNING, + LOG_FIP | LOG_ELS, + "2760 FLOGI exhausted FCF " + "round robin failover list, " + "retry FLOGI on the current " + "registered FCF index:%d\n", + phba->fcf.current_rec.fcf_indx); + spin_lock_irq(&phba->hbalock); + phba->fcf.fcf_flag &= ~FCF_DISCOVERY; + spin_unlock_irq(&phba->hbalock); + } else { + rc = lpfc_sli4_fcf_rr_read_fcf_rec(phba, + fcf_index); + if (rc) { + lpfc_printf_log(phba, KERN_WARNING, + LOG_FIP | LOG_ELS, + "2761 FLOGI round " + "robin FCF failover " + "read FCF failed " + "rc:x%x, fcf_index:" + "%d\n", rc, + phba->fcf.current_rec.fcf_indx); + spin_lock_irq(&phba->hbalock); + phba->fcf.fcf_flag &= ~FCF_DISCOVERY; + spin_unlock_irq(&phba->hbalock); + } else + goto out; + } + } + /* Check for retry */ if (lpfc_els_retry(phba, cmdiocb, rspiocb)) goto out; @@ -806,15 +857,15 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, } /* FLOGI failure */ - lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, - "0100 FLOGI failure Data: x%x x%x " - "x%x\n", + lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, + "0100 FLOGI failure Status:x%x/x%x TMO:x%x\n", irsp->ulpStatus, irsp->un.ulpWord[4], irsp->ulpTimeout); goto flogifail; } spin_lock_irq(shost->host_lock); vport->fc_flag &= ~FC_VPORT_CVL_RCVD; + vport->fc_flag &= ~FC_VPORT_LOGO_RCVD; spin_unlock_irq(shost->host_lock); /* @@ -842,8 +893,21 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, else rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp); - if (!rc) + if (!rc) { + /* Mark the FCF discovery process done */ + if (phba->hba_flag & HBA_FIP_SUPPORT) + lpfc_printf_vlog(vport, KERN_INFO, LOG_FIP | + LOG_ELS, + "2769 FLOGI successful on FCF " + "record: current_fcf_index:" + "x%x, terminate FCF round " + "robin failover process\n", + phba->fcf.current_rec.fcf_indx); + spin_lock_irq(&phba->hbalock); + phba->fcf.fcf_flag &= ~FCF_DISCOVERY; + spin_unlock_irq(&phba->hbalock); goto out; + } } flogifail: @@ -981,7 +1045,7 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, * function returns, it does not guarantee all the IOCBs are actually aborted. * * Return code - * 0 - Sucessfully issued abort iocb on all outstanding flogis (Always 0) + * 0 - Successfully issued abort iocb on all outstanding flogis (Always 0) **/ int lpfc_els_abort_flogi(struct lpfc_hba *phba) @@ -1409,6 +1473,10 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, goto out; } /* PLOGI failed */ + lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, + "2753 PLOGI failure DID:%06X Status:x%x/x%x\n", + ndlp->nlp_DID, irsp->ulpStatus, + irsp->un.ulpWord[4]); /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ if (lpfc_error_lost_link(irsp)) rc = NLP_STE_FREED_NODE; @@ -1577,6 +1645,10 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, goto out; } /* PRLI failed */ + lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, + "2754 PRLI failure DID:%06X Status:x%x/x%x\n", + ndlp->nlp_DID, irsp->ulpStatus, + irsp->un.ulpWord[4]); /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ if (lpfc_error_lost_link(irsp)) goto out; @@ -1860,6 +1932,10 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, goto out; } /* ADISC failed */ + lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, + "2755 ADISC failure DID:%06X Status:x%x/x%x\n", + ndlp->nlp_DID, irsp->ulpStatus, + irsp->un.ulpWord[4]); /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ if (!lpfc_error_lost_link(irsp)) lpfc_disc_state_machine(vport, ndlp, cmdiocb, @@ -2009,6 +2085,10 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* ELS command is being retried */ goto out; /* LOGO failed */ + lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, + "2756 LOGO failure DID:%06X Status:x%x/x%x\n", + ndlp->nlp_DID, irsp->ulpStatus, + irsp->un.ulpWord[4]); /* Do not call DSM for lpfc_els_abort'ed ELS cmds */ if (lpfc_error_lost_link(irsp)) goto out; @@ -3129,7 +3209,7 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, if (ndlp && NLP_CHK_NODE_ACT(ndlp) && (*((uint32_t *) (pcmd)) == ELS_CMD_LS_RJT)) { /* A LS_RJT associated with Default RPI cleanup has its own - * seperate code path. + * separate code path. */ if (!(ndlp->nlp_flag & NLP_RM_DFLT_RPI)) ls_rjt = 1; @@ -5291,7 +5371,7 @@ lpfc_send_els_failure_event(struct lpfc_hba *phba, sizeof(struct lpfc_name)); pcmd = (uint32_t *) (((struct lpfc_dmabuf *) cmdiocbp->context2)->virt); - lsrjt_event.command = *pcmd; + lsrjt_event.command = (pcmd != NULL) ? *pcmd : 0; stat.un.lsRjtError = be32_to_cpu(rspiocbp->iocb.un.ulpWord[4]); lsrjt_event.reason_code = stat.un.b.lsRjtRsnCode; lsrjt_event.explanation = stat.un.b.lsRjtRsnCodeExp; @@ -5975,7 +6055,8 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) spin_lock_irq(shost->host_lock); vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; spin_unlock_irq(shost->host_lock); - if (vport->port_type == LPFC_PHYSICAL_PORT) + if (vport->port_type == LPFC_PHYSICAL_PORT + && !(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG)) lpfc_initial_flogi(vport); else lpfc_initial_fdisc(vport); @@ -5989,7 +6070,12 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) if (phba->sli_rev < LPFC_SLI_REV4) lpfc_issue_fabric_reglogin(vport); else { - lpfc_start_fdiscs(phba); + /* + * If the physical port is instantiated using + * FDISC, do not start vport discovery. + */ + if (vport->port_state != LPFC_FDISC) + lpfc_start_fdiscs(phba); lpfc_do_scr_ns_plogi(phba, vport); } } else @@ -6055,21 +6141,18 @@ mbox_err_exit: } /** - * lpfc_retry_pport_discovery - Start timer to retry FLOGI. + * lpfc_cancel_all_vport_retry_delay_timer - Cancel all vport retry delay timer * @phba: pointer to lpfc hba data structure. * - * This routine abort all pending discovery commands and - * start a timer to retry FLOGI for the physical port - * discovery. + * This routine cancels the retry delay timers to all the vports. **/ void -lpfc_retry_pport_discovery(struct lpfc_hba *phba) +lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *phba) { struct lpfc_vport **vports; struct lpfc_nodelist *ndlp; - struct Scsi_Host *shost; - int i; uint32_t link_state; + int i; /* Treat this failure as linkdown for all vports */ link_state = phba->link_state; @@ -6087,13 +6170,30 @@ lpfc_retry_pport_discovery(struct lpfc_hba *phba) } lpfc_destroy_vport_work_array(phba, vports); } +} + +/** + * lpfc_retry_pport_discovery - Start timer to retry FLOGI. + * @phba: pointer to lpfc hba data structure. + * + * This routine abort all pending discovery commands and + * start a timer to retry FLOGI for the physical port + * discovery. + **/ +void +lpfc_retry_pport_discovery(struct lpfc_hba *phba) +{ + struct lpfc_nodelist *ndlp; + struct Scsi_Host *shost; + + /* Cancel the all vports retry delay retry timers */ + lpfc_cancel_all_vport_retry_delay_timer(phba); /* If fabric require FLOGI, then re-instantiate physical login */ ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); if (!ndlp) return; - shost = lpfc_shost_from_vport(phba->pport); mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); spin_lock_irq(shost->host_lock); @@ -6192,6 +6292,7 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, } spin_lock_irq(shost->host_lock); vport->fc_flag &= ~FC_VPORT_CVL_RCVD; + vport->fc_flag &= ~FC_VPORT_LOGO_RCVD; vport->fc_flag |= FC_FABRIC; if (vport->phba->fc_topology == TOPOLOGY_LOOP) vport->fc_flag |= FC_PUBLIC_LOOP; @@ -6216,10 +6317,14 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, spin_unlock_irq(shost->host_lock); lpfc_unreg_rpi(vport, np); } + lpfc_cleanup_pending_mbox(vport); lpfc_mbx_unreg_vpi(vport); spin_lock_irq(shost->host_lock); vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; - vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; + if (phba->sli_rev == LPFC_SLI_REV4) + vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; + else + vport->fc_flag |= FC_LOGO_RCVD_DID_CHNG; spin_unlock_irq(shost->host_lock); } @@ -6797,21 +6902,27 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba, struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; unsigned long iflag = 0; - spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock, iflag); + spin_lock_irqsave(&phba->hbalock, iflag); + spin_lock(&phba->sli4_hba.abts_sgl_list_lock); list_for_each_entry_safe(sglq_entry, sglq_next, &phba->sli4_hba.lpfc_abts_els_sgl_list, list) { if (sglq_entry->sli4_xritag == xri) { list_del(&sglq_entry->list); - spin_unlock_irqrestore( - &phba->sli4_hba.abts_sgl_list_lock, - iflag); - spin_lock_irqsave(&phba->hbalock, iflag); - list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_sgl_list); + sglq_entry->state = SGL_FREED; + spin_unlock(&phba->sli4_hba.abts_sgl_list_lock); spin_unlock_irqrestore(&phba->hbalock, iflag); return; } } - spin_unlock_irqrestore(&phba->sli4_hba.abts_sgl_list_lock, iflag); + spin_unlock(&phba->sli4_hba.abts_sgl_list_lock); + sglq_entry = __lpfc_get_active_sglq(phba, xri); + if (!sglq_entry || (sglq_entry->sli4_xritag != xri)) { + spin_unlock_irqrestore(&phba->hbalock, iflag); + return; + } + sglq_entry->state = SGL_XRI_ABORTED; + spin_unlock_irqrestore(&phba->hbalock, iflag); + return; } diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 2359d0bfb734..1f87b4fb8b50 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -20,6 +20,7 @@ *******************************************************************/ #include <linux/blkdev.h> +#include <linux/slab.h> #include <linux/pci.h> #include <linux/kthread.h> #include <linux/interrupt.h> @@ -474,6 +475,10 @@ lpfc_work_list_done(struct lpfc_hba *phba) lpfc_send_fastpath_evt(phba, evtp); free_evt = 0; break; + case LPFC_EVT_RESET_HBA: + if (!(phba->pport->load_flag & FC_UNLOADING)) + lpfc_reset_hba(phba); + break; } if (free_evt) kfree(evtp); @@ -1481,8 +1486,6 @@ lpfc_match_fcf_conn_list(struct lpfc_hba *phba, int lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf) { - LPFC_MBOXQ_t *mbox; - int rc; /* * If the Link is up and no FCoE events while in the * FCF discovery, no need to restart FCF discovery. @@ -1491,86 +1494,100 @@ lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf) (phba->fcoe_eventtag == phba->fcoe_eventtag_at_fcf_scan)) return 0; + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, + "2768 Pending link or FCF event during current " + "handling of the previous event: link_state:x%x, " + "evt_tag_at_scan:x%x, evt_tag_current:x%x\n", + phba->link_state, phba->fcoe_eventtag_at_fcf_scan, + phba->fcoe_eventtag); + spin_lock_irq(&phba->hbalock); phba->fcf.fcf_flag &= ~FCF_AVAILABLE; spin_unlock_irq(&phba->hbalock); - if (phba->link_state >= LPFC_LINK_UP) - lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST); - else { + if (phba->link_state >= LPFC_LINK_UP) { + lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, + "2780 Restart FCF table scan due to " + "pending FCF event:evt_tag_at_scan:x%x, " + "evt_tag_current:x%x\n", + phba->fcoe_eventtag_at_fcf_scan, + phba->fcoe_eventtag); + lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); + } else { /* * Do not continue FCF discovery and clear FCF_DISC_INPROGRESS * flag */ spin_lock_irq(&phba->hbalock); phba->hba_flag &= ~FCF_DISC_INPROGRESS; - phba->fcf.fcf_flag &= ~FCF_REDISC_FOV; + phba->fcf.fcf_flag &= ~(FCF_REDISC_FOV | FCF_DISCOVERY); spin_unlock_irq(&phba->hbalock); } + /* Unregister the currently registered FCF if required */ if (unreg_fcf) { spin_lock_irq(&phba->hbalock); phba->fcf.fcf_flag &= ~FCF_REGISTERED; spin_unlock_irq(&phba->hbalock); - mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); - if (!mbox) { - lpfc_printf_log(phba, KERN_ERR, - LOG_DISCOVERY|LOG_MBOX, - "2610 UNREG_FCFI mbox allocation failed\n"); - return 1; - } - lpfc_unreg_fcfi(mbox, phba->fcf.fcfi); - mbox->vport = phba->pport; - mbox->mbox_cmpl = lpfc_unregister_fcfi_cmpl; - rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); - if (rc == MBX_NOT_FINISHED) { - lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, - "2611 UNREG_FCFI issue mbox failed\n"); - mempool_free(mbox, phba->mbox_mem_pool); - } + lpfc_sli4_unregister_fcf(phba); } - return 1; } /** + * lpfc_sli4_new_fcf_random_select - Randomly select an eligible new fcf record + * @phba: pointer to lpfc hba data structure. + * @fcf_cnt: number of eligible fcf record seen so far. + * + * This function makes an running random selection decision on FCF record to + * use through a sequence of @fcf_cnt eligible FCF records with equal + * probability. To perform integer manunipulation of random numbers with + * size unit32_t, the lower 16 bits of the 32-bit random number returned + * from random32() are taken as the random random number generated. + * + * Returns true when outcome is for the newly read FCF record should be + * chosen; otherwise, return false when outcome is for keeping the previously + * chosen FCF record. + **/ +static bool +lpfc_sli4_new_fcf_random_select(struct lpfc_hba *phba, uint32_t fcf_cnt) +{ + uint32_t rand_num; + + /* Get 16-bit uniform random number */ + rand_num = (0xFFFF & random32()); + + /* Decision with probability 1/fcf_cnt */ + if ((fcf_cnt * rand_num) < 0xFFFF) + return true; + else + return false; +} + +/** * lpfc_mbx_cmpl_read_fcf_record - Completion handler for read_fcf mbox. * @phba: pointer to lpfc hba data structure. * @mboxq: pointer to mailbox object. + * @next_fcf_index: pointer to holder of next fcf index. * - * This function iterate through all the fcf records available in - * HBA and choose the optimal FCF record for discovery. After finding - * the FCF for discovery it register the FCF record and kick start - * discovery. - * If FCF_IN_USE flag is set in currently used FCF, the routine try to - * use a FCF record which match fabric name and mac address of the - * currently used FCF record. - * If the driver support only one FCF, it will try to use the FCF record - * used by BOOT_BIOS. + * This routine parses the non-embedded fcf mailbox command by performing the + * necessarily error checking, non-embedded read FCF record mailbox command + * SGE parsing, and endianness swapping. + * + * Returns the pointer to the new FCF record in the non-embedded mailbox + * command DMA memory if successfully, other NULL. */ -void -lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) +static struct fcf_record * +lpfc_sli4_fcf_rec_mbox_parse(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, + uint16_t *next_fcf_index) { void *virt_addr; dma_addr_t phys_addr; - uint8_t *bytep; struct lpfc_mbx_sge sge; struct lpfc_mbx_read_fcf_tbl *read_fcf; uint32_t shdr_status, shdr_add_status; union lpfc_sli4_cfg_shdr *shdr; struct fcf_record *new_fcf_record; - uint32_t boot_flag, addr_mode; - uint32_t next_fcf_index; - struct lpfc_fcf_rec *fcf_rec = NULL; - unsigned long iflags; - uint16_t vlan_id; - int rc; - - /* If there is pending FCoE event restart FCF table scan */ - if (lpfc_check_pending_fcoe_event(phba, 0)) { - lpfc_sli4_mbox_cmd_free(phba, mboxq); - return; - } /* Get the first SGE entry from the non-embedded DMA memory. This * routine only uses a single SGE. @@ -1581,59 +1598,187 @@ lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, "2524 Failed to get the non-embedded SGE " "virtual address\n"); - goto out; + return NULL; } virt_addr = mboxq->sge_array->addr[0]; shdr = (union lpfc_sli4_cfg_shdr *)virt_addr; shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); - shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, - &shdr->response); - /* - * The FCF Record was read and there is no reason for the driver - * to maintain the FCF record data or memory. Instead, just need - * to book keeping the FCFIs can be used. - */ + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); if (shdr_status || shdr_add_status) { - if (shdr_status == STATUS_FCF_TABLE_EMPTY) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + if (shdr_status == STATUS_FCF_TABLE_EMPTY) + lpfc_printf_log(phba, KERN_ERR, LOG_FIP, "2726 READ_FCF_RECORD Indicates empty " "FCF table.\n"); - } else { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + else + lpfc_printf_log(phba, KERN_ERR, LOG_FIP, "2521 READ_FCF_RECORD mailbox failed " - "with status x%x add_status x%x, mbx\n", - shdr_status, shdr_add_status); - } - goto out; + "with status x%x add_status x%x, " + "mbx\n", shdr_status, shdr_add_status); + return NULL; } - /* Interpreting the returned information of FCF records */ + + /* Interpreting the returned information of the FCF record */ read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr; lpfc_sli_pcimem_bcopy(read_fcf, read_fcf, sizeof(struct lpfc_mbx_read_fcf_tbl)); - next_fcf_index = bf_get(lpfc_mbx_read_fcf_tbl_nxt_vindx, read_fcf); - + *next_fcf_index = bf_get(lpfc_mbx_read_fcf_tbl_nxt_vindx, read_fcf); new_fcf_record = (struct fcf_record *)(virt_addr + sizeof(struct lpfc_mbx_read_fcf_tbl)); lpfc_sli_pcimem_bcopy(new_fcf_record, new_fcf_record, - sizeof(struct fcf_record)); - bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr); + offsetof(struct fcf_record, vlan_bitmap)); + new_fcf_record->word137 = le32_to_cpu(new_fcf_record->word137); + new_fcf_record->word138 = le32_to_cpu(new_fcf_record->word138); + return new_fcf_record; +} + +/** + * lpfc_sli4_log_fcf_record_info - Log the information of a fcf record + * @phba: pointer to lpfc hba data structure. + * @fcf_record: pointer to the fcf record. + * @vlan_id: the lowest vlan identifier associated to this fcf record. + * @next_fcf_index: the index to the next fcf record in hba's fcf table. + * + * This routine logs the detailed FCF record if the LOG_FIP loggin is + * enabled. + **/ +static void +lpfc_sli4_log_fcf_record_info(struct lpfc_hba *phba, + struct fcf_record *fcf_record, + uint16_t vlan_id, + uint16_t next_fcf_index) +{ + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, + "2764 READ_FCF_RECORD:\n" + "\tFCF_Index : x%x\n" + "\tFCF_Avail : x%x\n" + "\tFCF_Valid : x%x\n" + "\tFIP_Priority : x%x\n" + "\tMAC_Provider : x%x\n" + "\tLowest VLANID : x%x\n" + "\tFCF_MAC Addr : x%x:%x:%x:%x:%x:%x\n" + "\tFabric_Name : x%x:%x:%x:%x:%x:%x:%x:%x\n" + "\tSwitch_Name : x%x:%x:%x:%x:%x:%x:%x:%x\n" + "\tNext_FCF_Index: x%x\n", + bf_get(lpfc_fcf_record_fcf_index, fcf_record), + bf_get(lpfc_fcf_record_fcf_avail, fcf_record), + bf_get(lpfc_fcf_record_fcf_valid, fcf_record), + fcf_record->fip_priority, + bf_get(lpfc_fcf_record_mac_addr_prov, fcf_record), + vlan_id, + bf_get(lpfc_fcf_record_mac_0, fcf_record), + bf_get(lpfc_fcf_record_mac_1, fcf_record), + bf_get(lpfc_fcf_record_mac_2, fcf_record), + bf_get(lpfc_fcf_record_mac_3, fcf_record), + bf_get(lpfc_fcf_record_mac_4, fcf_record), + bf_get(lpfc_fcf_record_mac_5, fcf_record), + bf_get(lpfc_fcf_record_fab_name_0, fcf_record), + bf_get(lpfc_fcf_record_fab_name_1, fcf_record), + bf_get(lpfc_fcf_record_fab_name_2, fcf_record), + bf_get(lpfc_fcf_record_fab_name_3, fcf_record), + bf_get(lpfc_fcf_record_fab_name_4, fcf_record), + bf_get(lpfc_fcf_record_fab_name_5, fcf_record), + bf_get(lpfc_fcf_record_fab_name_6, fcf_record), + bf_get(lpfc_fcf_record_fab_name_7, fcf_record), + bf_get(lpfc_fcf_record_switch_name_0, fcf_record), + bf_get(lpfc_fcf_record_switch_name_1, fcf_record), + bf_get(lpfc_fcf_record_switch_name_2, fcf_record), + bf_get(lpfc_fcf_record_switch_name_3, fcf_record), + bf_get(lpfc_fcf_record_switch_name_4, fcf_record), + bf_get(lpfc_fcf_record_switch_name_5, fcf_record), + bf_get(lpfc_fcf_record_switch_name_6, fcf_record), + bf_get(lpfc_fcf_record_switch_name_7, fcf_record), + next_fcf_index); +} + +/** + * lpfc_mbx_cmpl_fcf_scan_read_fcf_rec - fcf scan read_fcf mbox cmpl handler. + * @phba: pointer to lpfc hba data structure. + * @mboxq: pointer to mailbox object. + * + * This function iterates through all the fcf records available in + * HBA and chooses the optimal FCF record for discovery. After finding + * the FCF for discovery it registers the FCF record and kicks start + * discovery. + * If FCF_IN_USE flag is set in currently used FCF, the routine tries to + * use an FCF record which matches fabric name and mac address of the + * currently used FCF record. + * If the driver supports only one FCF, it will try to use the FCF record + * used by BOOT_BIOS. + */ +void +lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) +{ + struct fcf_record *new_fcf_record; + uint32_t boot_flag, addr_mode; + uint16_t fcf_index, next_fcf_index; + struct lpfc_fcf_rec *fcf_rec = NULL; + uint16_t vlan_id; + uint32_t seed; + bool select_new_fcf; + int rc; + + /* If there is pending FCoE event restart FCF table scan */ + if (lpfc_check_pending_fcoe_event(phba, 0)) { + lpfc_sli4_mbox_cmd_free(phba, mboxq); + return; + } + + /* Parse the FCF record from the non-embedded mailbox command */ + new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq, + &next_fcf_index); + if (!new_fcf_record) { + lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, + "2765 Mailbox command READ_FCF_RECORD " + "failed to retrieve a FCF record.\n"); + /* Let next new FCF event trigger fast failover */ + spin_lock_irq(&phba->hbalock); + phba->hba_flag &= ~FCF_DISC_INPROGRESS; + spin_unlock_irq(&phba->hbalock); + lpfc_sli4_mbox_cmd_free(phba, mboxq); + return; + } + + /* Check the FCF record against the connection list */ rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag, &addr_mode, &vlan_id); + + /* Log the FCF record information if turned on */ + lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id, + next_fcf_index); + /* * If the fcf record does not match with connect list entries - * read the next entry. + * read the next entry; otherwise, this is an eligible FCF + * record for round robin FCF failover. */ - if (!rc) + if (!rc) { + lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, + "2781 FCF record fcf_index:x%x failed FCF " + "connection list check, fcf_avail:x%x, " + "fcf_valid:x%x\n", + bf_get(lpfc_fcf_record_fcf_index, + new_fcf_record), + bf_get(lpfc_fcf_record_fcf_avail, + new_fcf_record), + bf_get(lpfc_fcf_record_fcf_valid, + new_fcf_record)); goto read_next_fcf; + } else { + fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record); + rc = lpfc_sli4_fcf_rr_index_set(phba, fcf_index); + if (rc) + goto read_next_fcf; + } + /* * If this is not the first FCF discovery of the HBA, use last * FCF record for the discovery. The condition that a rescan * matches the in-use FCF record: fabric name, switch name, mac * address, and vlan_id. */ - spin_lock_irqsave(&phba->hbalock, iflags); + spin_lock_irq(&phba->hbalock); if (phba->fcf.fcf_flag & FCF_IN_USE) { if (lpfc_fab_name_match(phba->fcf.current_rec.fabric_name, new_fcf_record) && @@ -1649,8 +1794,9 @@ lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) __lpfc_sli4_stop_fcf_redisc_wait_timer(phba); else if (phba->fcf.fcf_flag & FCF_REDISC_FOV) /* If in fast failover, mark it's completed */ - phba->fcf.fcf_flag &= ~FCF_REDISC_FOV; - spin_unlock_irqrestore(&phba->hbalock, iflags); + phba->fcf.fcf_flag &= ~(FCF_REDISC_FOV | + FCF_DISCOVERY); + spin_unlock_irq(&phba->hbalock); goto out; } /* @@ -1661,7 +1807,7 @@ lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) * next candidate. */ if (!(phba->fcf.fcf_flag & FCF_REDISC_FOV)) { - spin_unlock_irqrestore(&phba->hbalock, iflags); + spin_unlock_irq(&phba->hbalock); goto read_next_fcf; } } @@ -1669,14 +1815,9 @@ lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) * Update on failover FCF record only if it's in FCF fast-failover * period; otherwise, update on current FCF record. */ - if (phba->fcf.fcf_flag & FCF_REDISC_FOV) { - /* Fast FCF failover only to the same fabric name */ - if (lpfc_fab_name_match(phba->fcf.current_rec.fabric_name, - new_fcf_record)) - fcf_rec = &phba->fcf.failover_rec; - else - goto read_next_fcf; - } else + if (phba->fcf.fcf_flag & FCF_REDISC_FOV) + fcf_rec = &phba->fcf.failover_rec; + else fcf_rec = &phba->fcf.current_rec; if (phba->fcf.fcf_flag & FCF_AVAILABLE) { @@ -1689,7 +1830,7 @@ lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) /* Choose this FCF record */ __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record, addr_mode, vlan_id, BOOT_ENABLE); - spin_unlock_irqrestore(&phba->hbalock, iflags); + spin_unlock_irq(&phba->hbalock); goto read_next_fcf; } /* @@ -1698,20 +1839,31 @@ lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) * the next FCF record. */ if (!boot_flag && (fcf_rec->flag & BOOT_ENABLE)) { - spin_unlock_irqrestore(&phba->hbalock, iflags); + spin_unlock_irq(&phba->hbalock); goto read_next_fcf; } /* * If the new hba FCF record has lower priority value * than the driver FCF record, use the new record. */ - if (lpfc_fab_name_match(fcf_rec->fabric_name, new_fcf_record) && - (new_fcf_record->fip_priority < fcf_rec->priority)) { - /* Choose this FCF record */ + if (new_fcf_record->fip_priority < fcf_rec->priority) { + /* Choose the new FCF record with lower priority */ __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record, addr_mode, vlan_id, 0); + /* Reset running random FCF selection count */ + phba->fcf.eligible_fcf_cnt = 1; + } else if (new_fcf_record->fip_priority == fcf_rec->priority) { + /* Update running random FCF selection count */ + phba->fcf.eligible_fcf_cnt++; + select_new_fcf = lpfc_sli4_new_fcf_random_select(phba, + phba->fcf.eligible_fcf_cnt); + if (select_new_fcf) + /* Choose the new FCF by random selection */ + __lpfc_update_fcf_record(phba, fcf_rec, + new_fcf_record, + addr_mode, vlan_id, 0); } - spin_unlock_irqrestore(&phba->hbalock, iflags); + spin_unlock_irq(&phba->hbalock); goto read_next_fcf; } /* @@ -1723,8 +1875,13 @@ lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) addr_mode, vlan_id, (boot_flag ? BOOT_ENABLE : 0)); phba->fcf.fcf_flag |= FCF_AVAILABLE; + /* Setup initial running random FCF selection count */ + phba->fcf.eligible_fcf_cnt = 1; + /* Seeding the random number generator for random selection */ + seed = (uint32_t)(0xFFFFFFFF & jiffies); + srandom32(seed); } - spin_unlock_irqrestore(&phba->hbalock, iflags); + spin_unlock_irq(&phba->hbalock); goto read_next_fcf; read_next_fcf: @@ -1740,9 +1897,22 @@ read_next_fcf: * FCF scan inprogress, and do nothing */ if (!(phba->fcf.failover_rec.flag & RECORD_VALID)) { - spin_lock_irqsave(&phba->hbalock, iflags); + lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, + "2782 No suitable FCF record " + "found during this round of " + "post FCF rediscovery scan: " + "fcf_evt_tag:x%x, fcf_index: " + "x%x\n", + phba->fcoe_eventtag_at_fcf_scan, + bf_get(lpfc_fcf_record_fcf_index, + new_fcf_record)); + /* + * Let next new FCF event trigger fast + * failover + */ + spin_lock_irq(&phba->hbalock); phba->hba_flag &= ~FCF_DISC_INPROGRESS; - spin_unlock_irqrestore(&phba->hbalock, iflags); + spin_unlock_irq(&phba->hbalock); return; } /* @@ -1754,16 +1924,23 @@ read_next_fcf: * record. */ - /* unregister the current in-use FCF record */ + /* Unregister the current in-use FCF record */ lpfc_unregister_fcf(phba); - /* replace in-use record with the new record */ + + /* Replace in-use record with the new record */ memcpy(&phba->fcf.current_rec, &phba->fcf.failover_rec, sizeof(struct lpfc_fcf_rec)); /* mark the FCF fast failover completed */ - spin_lock_irqsave(&phba->hbalock, iflags); + spin_lock_irq(&phba->hbalock); phba->fcf.fcf_flag &= ~FCF_REDISC_FOV; - spin_unlock_irqrestore(&phba->hbalock, iflags); + spin_unlock_irq(&phba->hbalock); + /* + * Set up the initial registered FCF index for FLOGI + * round robin FCF failover. + */ + phba->fcf.fcf_rr_init_indx = + phba->fcf.failover_rec.fcf_indx; /* Register to the new FCF record */ lpfc_register_fcf(phba); } else { @@ -1776,13 +1953,25 @@ read_next_fcf: return; /* * Otherwise, initial scan or post linkdown rescan, - * register with the best fit FCF record found so - * far through the scanning process. + * register with the best FCF record found so far + * through the FCF scanning process. */ + + /* mark the initial FCF discovery completed */ + spin_lock_irq(&phba->hbalock); + phba->fcf.fcf_flag &= ~FCF_INIT_DISC; + spin_unlock_irq(&phba->hbalock); + /* + * Set up the initial registered FCF index for FLOGI + * round robin FCF failover + */ + phba->fcf.fcf_rr_init_indx = + phba->fcf.current_rec.fcf_indx; + /* Register to the new FCF record */ lpfc_register_fcf(phba); } } else - lpfc_sli4_read_fcf_record(phba, next_fcf_index); + lpfc_sli4_fcf_scan_read_fcf_rec(phba, next_fcf_index); return; out: @@ -1793,6 +1982,141 @@ out: } /** + * lpfc_mbx_cmpl_fcf_rr_read_fcf_rec - fcf round robin read_fcf mbox cmpl hdler + * @phba: pointer to lpfc hba data structure. + * @mboxq: pointer to mailbox object. + * + * This is the callback function for FLOGI failure round robin FCF failover + * read FCF record mailbox command from the eligible FCF record bmask for + * performing the failover. If the FCF read back is not valid/available, it + * fails through to retrying FLOGI to the currently registered FCF again. + * Otherwise, if the FCF read back is valid and available, it will set the + * newly read FCF record to the failover FCF record, unregister currently + * registered FCF record, copy the failover FCF record to the current + * FCF record, and then register the current FCF record before proceeding + * to trying FLOGI on the new failover FCF. + */ +void +lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) +{ + struct fcf_record *new_fcf_record; + uint32_t boot_flag, addr_mode; + uint16_t next_fcf_index; + uint16_t current_fcf_index; + uint16_t vlan_id; + + /* If link state is not up, stop the round robin failover process */ + if (phba->link_state < LPFC_LINK_UP) { + spin_lock_irq(&phba->hbalock); + phba->fcf.fcf_flag &= ~FCF_DISCOVERY; + spin_unlock_irq(&phba->hbalock); + lpfc_sli4_mbox_cmd_free(phba, mboxq); + return; + } + + /* Parse the FCF record from the non-embedded mailbox command */ + new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq, + &next_fcf_index); + if (!new_fcf_record) { + lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, + "2766 Mailbox command READ_FCF_RECORD " + "failed to retrieve a FCF record.\n"); + goto out; + } + + /* Get the needed parameters from FCF record */ + lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag, + &addr_mode, &vlan_id); + + /* Log the FCF record information if turned on */ + lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id, + next_fcf_index); + + /* Upload new FCF record to the failover FCF record */ + spin_lock_irq(&phba->hbalock); + __lpfc_update_fcf_record(phba, &phba->fcf.failover_rec, + new_fcf_record, addr_mode, vlan_id, + (boot_flag ? BOOT_ENABLE : 0)); + spin_unlock_irq(&phba->hbalock); + + current_fcf_index = phba->fcf.current_rec.fcf_indx; + + /* Unregister the current in-use FCF record */ + lpfc_unregister_fcf(phba); + + /* Replace in-use record with the new record */ + memcpy(&phba->fcf.current_rec, &phba->fcf.failover_rec, + sizeof(struct lpfc_fcf_rec)); + + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, + "2783 FLOGI round robin FCF failover from FCF " + "(index:x%x) to FCF (index:x%x).\n", + current_fcf_index, + bf_get(lpfc_fcf_record_fcf_index, new_fcf_record)); + +out: + lpfc_sli4_mbox_cmd_free(phba, mboxq); + lpfc_register_fcf(phba); +} + +/** + * lpfc_mbx_cmpl_read_fcf_rec - read fcf completion handler. + * @phba: pointer to lpfc hba data structure. + * @mboxq: pointer to mailbox object. + * + * This is the callback function of read FCF record mailbox command for + * updating the eligible FCF bmask for FLOGI failure round robin FCF + * failover when a new FCF event happened. If the FCF read back is + * valid/available and it passes the connection list check, it updates + * the bmask for the eligible FCF record for round robin failover. + */ +void +lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) +{ + struct fcf_record *new_fcf_record; + uint32_t boot_flag, addr_mode; + uint16_t fcf_index, next_fcf_index; + uint16_t vlan_id; + int rc; + + /* If link state is not up, no need to proceed */ + if (phba->link_state < LPFC_LINK_UP) + goto out; + + /* If FCF discovery period is over, no need to proceed */ + if (phba->fcf.fcf_flag & FCF_DISCOVERY) + goto out; + + /* Parse the FCF record from the non-embedded mailbox command */ + new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq, + &next_fcf_index); + if (!new_fcf_record) { + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, + "2767 Mailbox command READ_FCF_RECORD " + "failed to retrieve a FCF record.\n"); + goto out; + } + + /* Check the connection list for eligibility */ + rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag, + &addr_mode, &vlan_id); + + /* Log the FCF record information if turned on */ + lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id, + next_fcf_index); + + if (!rc) + goto out; + + /* Update the eligible FCF record index bmask */ + fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record); + rc = lpfc_sli4_fcf_rr_index_set(phba, fcf_index); + +out: + lpfc_sli4_mbox_cmd_free(phba, mboxq); +} + +/** * lpfc_init_vpi_cmpl - Completion handler for init_vpi mbox command. * @phba: pointer to lpfc hba data structure. * @mboxq: pointer to mailbox data structure. @@ -2024,8 +2348,6 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la) int rc; struct fcf_record *fcf_record; - sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); - spin_lock_irq(&phba->hbalock); switch (la->UlnkSpeed) { case LA_1GHZ_LINK: @@ -2117,18 +2439,24 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la) spin_unlock_irq(&phba->hbalock); lpfc_linkup(phba); - if (sparam_mbox) { - lpfc_read_sparam(phba, sparam_mbox, 0); - sparam_mbox->vport = vport; - sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam; - rc = lpfc_sli_issue_mbox(phba, sparam_mbox, MBX_NOWAIT); - if (rc == MBX_NOT_FINISHED) { - mp = (struct lpfc_dmabuf *) sparam_mbox->context1; - lpfc_mbuf_free(phba, mp->virt, mp->phys); - kfree(mp); - mempool_free(sparam_mbox, phba->mbox_mem_pool); - goto out; - } + sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!sparam_mbox) + goto out; + + rc = lpfc_read_sparam(phba, sparam_mbox, 0); + if (rc) { + mempool_free(sparam_mbox, phba->mbox_mem_pool); + goto out; + } + sparam_mbox->vport = vport; + sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam; + rc = lpfc_sli_issue_mbox(phba, sparam_mbox, MBX_NOWAIT); + if (rc == MBX_NOT_FINISHED) { + mp = (struct lpfc_dmabuf *) sparam_mbox->context1; + lpfc_mbuf_free(phba, mp->virt, mp->phys); + kfree(mp); + mempool_free(sparam_mbox, phba->mbox_mem_pool); + goto out; } if (!(phba->hba_flag & HBA_FCOE_SUPPORT)) { @@ -2186,10 +2514,20 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la) spin_unlock_irq(&phba->hbalock); return; } + /* This is the initial FCF discovery scan */ + phba->fcf.fcf_flag |= FCF_INIT_DISC; spin_unlock_irq(&phba->hbalock); - rc = lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST); - if (rc) + lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, + "2778 Start FCF table scan at linkup\n"); + + rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, + LPFC_FCOE_FCF_GET_FIRST); + if (rc) { + spin_lock_irq(&phba->hbalock); + phba->fcf.fcf_flag &= ~FCF_INIT_DISC; + spin_unlock_irq(&phba->hbalock); goto out; + } } return; @@ -2403,11 +2741,18 @@ lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) switch (mb->mbxStatus) { case 0x0011: case 0x0020: - case 0x9700: lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, "0911 cmpl_unreg_vpi, mb status = 0x%x\n", mb->mbxStatus); break; + /* If VPI is busy, reset the HBA */ + case 0x9700: + lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, + "2798 Unreg_vpi failed vpi 0x%x, mb status = 0x%x\n", + vport->vpi, mb->mbxStatus); + if (!(phba->pport->load_flag & FC_UNLOADING)) + lpfc_workq_post_event(phba, NULL, NULL, + LPFC_EVT_RESET_HBA); } spin_lock_irq(shost->host_lock); vport->vpi_state &= ~LPFC_VPI_REGISTERED; @@ -2682,7 +3027,12 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); if (vport->port_state == LPFC_FABRIC_CFG_LINK) { - lpfc_start_fdiscs(phba); + /* when physical port receive logo donot start + * vport discovery */ + if (!(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG)) + lpfc_start_fdiscs(phba); + else + vport->fc_flag &= ~FC_LOGO_RCVD_DID_CHNG ; lpfc_do_scr_ns_plogi(phba, vport); } @@ -2894,7 +3244,6 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, struct Scsi_Host *shost = lpfc_shost_from_vport(vport); if (new_state == NLP_STE_UNMAPPED_NODE) { - ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR); ndlp->nlp_flag &= ~NLP_NODEV_REMOVE; ndlp->nlp_type |= NLP_FC_NODE; } @@ -3379,8 +3728,12 @@ lpfc_unreg_hba_rpis(struct lpfc_hba *phba) shost = lpfc_shost_from_vport(vports[i]); spin_lock_irq(shost->host_lock); list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) { - if (ndlp->nlp_flag & NLP_RPI_VALID) + if (ndlp->nlp_flag & NLP_RPI_VALID) { + /* The mempool_alloc might sleep */ + spin_unlock_irq(shost->host_lock); lpfc_unreg_rpi(vports[i], ndlp); + spin_lock_irq(shost->host_lock); + } } spin_unlock_irq(shost->host_lock); } @@ -4648,6 +5001,7 @@ lpfc_unregister_fcf_prep(struct lpfc_hba *phba) ndlp = lpfc_findnode_did(vports[i], Fabric_DID); if (ndlp) lpfc_cancel_retry_delay_tmo(vports[i], ndlp); + lpfc_cleanup_pending_mbox(vports[i]); lpfc_mbx_unreg_vpi(vports[i]); shost = lpfc_shost_from_vport(vports[i]); spin_lock_irq(shost->host_lock); @@ -4756,6 +5110,7 @@ lpfc_unregister_fcf_rescan(struct lpfc_hba *phba) return; /* Reset HBA FCF states after successful unregister FCF */ phba->fcf.fcf_flag = 0; + phba->fcf.current_rec.flag = 0; /* * If driver is not unloading, check if there is any other @@ -4765,13 +5120,21 @@ lpfc_unregister_fcf_rescan(struct lpfc_hba *phba) (phba->link_state < LPFC_LINK_UP)) return; - rc = lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST); + /* This is considered as the initial FCF discovery scan */ + spin_lock_irq(&phba->hbalock); + phba->fcf.fcf_flag |= FCF_INIT_DISC; + spin_unlock_irq(&phba->hbalock); + rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); - if (rc) + if (rc) { + spin_lock_irq(&phba->hbalock); + phba->fcf.fcf_flag &= ~FCF_INIT_DISC; + spin_unlock_irq(&phba->hbalock); lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, "2553 lpfc_unregister_unused_fcf failed " "to read FCF record HBA state x%x\n", phba->pport->port_state); + } } /** diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h index 89ff7c09e298..e654d01dad24 100644 --- a/drivers/scsi/lpfc/lpfc_hw.h +++ b/drivers/scsi/lpfc/lpfc_hw.h @@ -1565,95 +1565,83 @@ enum lpfc_protgrp_type { }; /* PDE Descriptors */ -#define LPFC_PDE1_DESCRIPTOR 0x81 -#define LPFC_PDE2_DESCRIPTOR 0x82 -#define LPFC_PDE3_DESCRIPTOR 0x83 - -/* BlockGuard Profiles */ -enum lpfc_bg_prof_codes { - LPFC_PROF_INVALID, - LPFC_PROF_A1 = 128, /* Full Protection */ - LPFC_PROF_A2, /* Disabled Protection Checks:A2~A4 */ - LPFC_PROF_A3, - LPFC_PROF_A4, - LPFC_PROF_B1, /* Embedded DIFs: B1~B3 */ - LPFC_PROF_B2, - LPFC_PROF_B3, - LPFC_PROF_C1, /* Separate DIFs: C1~C3 */ - LPFC_PROF_C2, - LPFC_PROF_C3, - LPFC_PROF_D1, /* Full Protection */ - LPFC_PROF_D2, /* Partial Protection & Check Disabling */ - LPFC_PROF_D3, - LPFC_PROF_E1, /* E1~E4:out - check-only, in - update apptag */ - LPFC_PROF_E2, - LPFC_PROF_E3, - LPFC_PROF_E4, - LPFC_PROF_F1, /* Full Translation - F1 Prot Descriptor */ - /* F1 Translation BDE */ - LPFC_PROF_ANT1, /* TCP checksum, DIF inline with data buffers */ - LPFC_PROF_AST1, /* TCP checksum, DIF split from data buffer */ - LPFC_PROF_ANT2, - LPFC_PROF_AST2 +#define LPFC_PDE5_DESCRIPTOR 0x85 +#define LPFC_PDE6_DESCRIPTOR 0x86 +#define LPFC_PDE7_DESCRIPTOR 0x87 + +/* BlockGuard Opcodes */ +#define BG_OP_IN_NODIF_OUT_CRC 0x0 +#define BG_OP_IN_CRC_OUT_NODIF 0x1 +#define BG_OP_IN_NODIF_OUT_CSUM 0x2 +#define BG_OP_IN_CSUM_OUT_NODIF 0x3 +#define BG_OP_IN_CRC_OUT_CRC 0x4 +#define BG_OP_IN_CSUM_OUT_CSUM 0x5 +#define BG_OP_IN_CRC_OUT_CSUM 0x6 +#define BG_OP_IN_CSUM_OUT_CRC 0x7 + +struct lpfc_pde5 { + uint32_t word0; +#define pde5_type_SHIFT 24 +#define pde5_type_MASK 0x000000ff +#define pde5_type_WORD word0 +#define pde5_rsvd0_SHIFT 0 +#define pde5_rsvd0_MASK 0x00ffffff +#define pde5_rsvd0_WORD word0 + uint32_t reftag; /* Reference Tag Value */ + uint32_t reftagtr; /* Reference Tag Translation Value */ }; -/* BlockGuard error-control defines */ -#define BG_EC_STOP_ERR 0x00 -#define BG_EC_CONT_ERR 0x01 -#define BG_EC_IGN_UNINIT_STOP_ERR 0x10 -#define BG_EC_IGN_UNINIT_CONT_ERR 0x11 - -/* PDE (Protection Descriptor Entry) word 0 bit masks and shifts */ -#define PDE_DESC_TYPE_MASK 0xff000000 -#define PDE_DESC_TYPE_SHIFT 24 -#define PDE_BG_PROFILE_MASK 0x00ff0000 -#define PDE_BG_PROFILE_SHIFT 16 -#define PDE_BLOCK_LEN_MASK 0x0000fffc -#define PDE_BLOCK_LEN_SHIFT 2 -#define PDE_ERR_CTRL_MASK 0x00000003 -#define PDE_ERR_CTRL_SHIFT 0 -/* PDE word 1 bit masks and shifts */ -#define PDE_APPTAG_MASK_MASK 0xffff0000 -#define PDE_APPTAG_MASK_SHIFT 16 -#define PDE_APPTAG_VAL_MASK 0x0000ffff -#define PDE_APPTAG_VAL_SHIFT 0 -struct lpfc_pde { - uint32_t parms; /* bitfields of descriptor, prof, len, and ec */ - uint32_t apptag; /* bitfields of app tag maskand app tag value */ - uint32_t reftag; /* reference tag occupying all 32 bits */ +struct lpfc_pde6 { + uint32_t word0; +#define pde6_type_SHIFT 24 +#define pde6_type_MASK 0x000000ff +#define pde6_type_WORD word0 +#define pde6_rsvd0_SHIFT 0 +#define pde6_rsvd0_MASK 0x00ffffff +#define pde6_rsvd0_WORD word0 + uint32_t word1; +#define pde6_rsvd1_SHIFT 26 +#define pde6_rsvd1_MASK 0x0000003f +#define pde6_rsvd1_WORD word1 +#define pde6_na_SHIFT 25 +#define pde6_na_MASK 0x00000001 +#define pde6_na_WORD word1 +#define pde6_rsvd2_SHIFT 16 +#define pde6_rsvd2_MASK 0x000001FF +#define pde6_rsvd2_WORD word1 +#define pde6_apptagtr_SHIFT 0 +#define pde6_apptagtr_MASK 0x0000ffff +#define pde6_apptagtr_WORD word1 + uint32_t word2; +#define pde6_optx_SHIFT 28 +#define pde6_optx_MASK 0x0000000f +#define pde6_optx_WORD word2 +#define pde6_oprx_SHIFT 24 +#define pde6_oprx_MASK 0x0000000f +#define pde6_oprx_WORD word2 +#define pde6_nr_SHIFT 23 +#define pde6_nr_MASK 0x00000001 +#define pde6_nr_WORD word2 +#define pde6_ce_SHIFT 22 +#define pde6_ce_MASK 0x00000001 +#define pde6_ce_WORD word2 +#define pde6_re_SHIFT 21 +#define pde6_re_MASK 0x00000001 +#define pde6_re_WORD word2 +#define pde6_ae_SHIFT 20 +#define pde6_ae_MASK 0x00000001 +#define pde6_ae_WORD word2 +#define pde6_ai_SHIFT 19 +#define pde6_ai_MASK 0x00000001 +#define pde6_ai_WORD word2 +#define pde6_bs_SHIFT 16 +#define pde6_bs_MASK 0x00000007 +#define pde6_bs_WORD word2 +#define pde6_apptagval_SHIFT 0 +#define pde6_apptagval_MASK 0x0000ffff +#define pde6_apptagval_WORD word2 }; -/* inline function to set fields in parms of PDE */ -static inline void -lpfc_pde_set_bg_parms(struct lpfc_pde *p, u8 desc, u8 prof, u16 len, u8 ec) -{ - uint32_t *wp = &p->parms; - - /* spec indicates that adapter appends two 0's to length field */ - len = len >> 2; - - *wp &= 0; - *wp |= ((desc << PDE_DESC_TYPE_SHIFT) & PDE_DESC_TYPE_MASK); - *wp |= ((prof << PDE_BG_PROFILE_SHIFT) & PDE_BG_PROFILE_MASK); - *wp |= ((len << PDE_BLOCK_LEN_SHIFT) & PDE_BLOCK_LEN_MASK); - *wp |= ((ec << PDE_ERR_CTRL_SHIFT) & PDE_ERR_CTRL_MASK); - *wp = le32_to_cpu(*wp); -} - -/* inline function to set apptag and reftag fields of PDE */ -static inline void -lpfc_pde_set_dif_parms(struct lpfc_pde *p, u16 apptagmask, u16 apptagval, - u32 reftag) -{ - uint32_t *wp = &p->apptag; - *wp &= 0; - *wp |= ((apptagmask << PDE_APPTAG_MASK_SHIFT) & PDE_APPTAG_MASK_MASK); - *wp |= ((apptagval << PDE_APPTAG_VAL_SHIFT) & PDE_APPTAG_VAL_MASK); - *wp = le32_to_cpu(*wp); - wp = &p->reftag; - *wp = le32_to_cpu(reftag); -} - /* Structure for MB Command LOAD_SM and DOWN_LOAD */ @@ -1744,6 +1732,17 @@ typedef struct { } un; } BIU_DIAG_VAR; +/* Structure for MB command READ_EVENT_LOG (0x38) */ +struct READ_EVENT_LOG_VAR { + uint32_t word1; +#define lpfc_event_log_SHIFT 29 +#define lpfc_event_log_MASK 0x00000001 +#define lpfc_event_log_WORD word1 +#define USE_MAILBOX_RESPONSE 1 + uint32_t offset; + struct ulp_bde64 rcv_bde64; +}; + /* Structure for MB Command INIT_LINK (05) */ typedef struct { @@ -2487,8 +2486,8 @@ typedef struct { #define DMP_VPORT_REGION_SIZE 0x200 #define DMP_MBOX_OFFSET_WORD 0x5 -#define DMP_REGION_23 0x17 /* fcoe param and port state region */ -#define DMP_RGN23_SIZE 0x400 +#define DMP_REGION_23 0x17 /* fcoe param and port state region */ +#define DMP_RGN23_SIZE 0x400 #define WAKE_UP_PARMS_REGION_ID 4 #define WAKE_UP_PARMS_WORD_SIZE 15 @@ -2503,9 +2502,9 @@ struct vport_rec { #define VPORT_INFO_REV 0x1 #define MAX_STATIC_VPORT_COUNT 16 struct static_vport_info { - uint32_t signature; + uint32_t signature; uint32_t rev; - struct vport_rec vport_list[MAX_STATIC_VPORT_COUNT]; + struct vport_rec vport_list[MAX_STATIC_VPORT_COUNT]; uint32_t resvd[66]; }; @@ -2934,6 +2933,12 @@ typedef struct { /* Union of all Mailbox Command types */ #define MAILBOX_CMD_WSIZE 32 #define MAILBOX_CMD_SIZE (MAILBOX_CMD_WSIZE * sizeof(uint32_t)) +/* ext_wsize times 4 bytes should not be greater than max xmit size */ +#define MAILBOX_EXT_WSIZE 512 +#define MAILBOX_EXT_SIZE (MAILBOX_EXT_WSIZE * sizeof(uint32_t)) +#define MAILBOX_HBA_EXT_OFFSET 0x100 +/* max mbox xmit size is a page size for sysfs IO operations */ +#define MAILBOX_MAX_XMIT_SIZE PAGE_SIZE typedef union { uint32_t varWords[MAILBOX_CMD_WSIZE - 1]; /* first word is type/ @@ -2972,6 +2977,9 @@ typedef union { REG_VPI_VAR varRegVpi; /* cmd = 0x96 (REG_VPI) */ UNREG_VPI_VAR varUnregVpi; /* cmd = 0x97 (UNREG_VPI) */ ASYNCEVT_ENABLE_VAR varCfgAsyncEvent; /*cmd = x33 (CONFIG_ASYNC) */ + struct READ_EVENT_LOG_VAR varRdEventLog; /* cmd = 0x38 + * (READ_EVENT_LOG) + */ struct config_msi_var varCfgMSI;/* cmd = x30 (CONFIG_MSI) */ } MAILVARIANTS; @@ -3652,7 +3660,8 @@ typedef struct _IOCB { /* IOCB structure */ /* Maximum IOCBs that will fit in SLI2 slim */ #define MAX_SLI2_IOCB 498 #define MAX_SLIM_IOCB_SIZE (SLI2_SLIM_SIZE - \ - (sizeof(MAILBOX_t) + sizeof(PCB_t))) + (sizeof(MAILBOX_t) + sizeof(PCB_t) + \ + sizeof(uint32_t) * MAILBOX_EXT_WSIZE)) /* HBQ entries are 4 words each = 4k */ #define LPFC_TOTAL_HBQ_SIZE (sizeof(struct lpfc_hbq_entry) * \ @@ -3660,6 +3669,7 @@ typedef struct _IOCB { /* IOCB structure */ struct lpfc_sli2_slim { MAILBOX_t mbx; + uint32_t mbx_ext_words[MAILBOX_EXT_WSIZE]; PCB_t pcb; IOCB_t IOCBs[MAX_SLIM_IOCB_SIZE]; }; diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h index 820015fbc4d6..bbdcf96800f6 100644 --- a/drivers/scsi/lpfc/lpfc_hw4.h +++ b/drivers/scsi/lpfc/lpfc_hw4.h @@ -41,8 +41,14 @@ * Or clear that bit field: * bf_set(example_bit_field, &t1, 0); */ +#define bf_get_le32(name, ptr) \ + ((le32_to_cpu((ptr)->name##_WORD) >> name##_SHIFT) & name##_MASK) #define bf_get(name, ptr) \ (((ptr)->name##_WORD >> name##_SHIFT) & name##_MASK) +#define bf_set_le32(name, ptr, value) \ + ((ptr)->name##_WORD = cpu_to_le32(((((value) & \ + name##_MASK) << name##_SHIFT) | (le32_to_cpu((ptr)->name##_WORD) & \ + ~(name##_MASK << name##_SHIFT))))) #define bf_set(name, ptr, value) \ ((ptr)->name##_WORD = ((((value) & name##_MASK) << name##_SHIFT) | \ ((ptr)->name##_WORD & ~(name##_MASK << name##_SHIFT)))) @@ -781,6 +787,7 @@ struct mbox_header { #define LPFC_MBOX_OPCODE_EQ_DESTROY 0x37 #define LPFC_MBOX_OPCODE_QUERY_FW_CFG 0x3A #define LPFC_MBOX_OPCODE_FUNCTION_RESET 0x3D +#define LPFC_MBOX_OPCODE_MQ_CREATE_EXT 0x5A /* FCoE Opcodes */ #define LPFC_MBOX_OPCODE_FCOE_WQ_CREATE 0x01 @@ -1102,6 +1109,39 @@ struct lpfc_mbx_mq_create { } u; }; +struct lpfc_mbx_mq_create_ext { + struct mbox_header header; + union { + struct { + uint32_t word0; +#define lpfc_mbx_mq_create_ext_num_pages_SHIFT 0 +#define lpfc_mbx_mq_create_ext_num_pages_MASK 0x0000FFFF +#define lpfc_mbx_mq_create_ext_num_pages_WORD word0 + uint32_t async_evt_bmap; +#define lpfc_mbx_mq_create_ext_async_evt_link_SHIFT LPFC_TRAILER_CODE_LINK +#define lpfc_mbx_mq_create_ext_async_evt_link_MASK 0x00000001 +#define lpfc_mbx_mq_create_ext_async_evt_link_WORD async_evt_bmap +#define lpfc_mbx_mq_create_ext_async_evt_fcfste_SHIFT LPFC_TRAILER_CODE_FCOE +#define lpfc_mbx_mq_create_ext_async_evt_fcfste_MASK 0x00000001 +#define lpfc_mbx_mq_create_ext_async_evt_fcfste_WORD async_evt_bmap +#define lpfc_mbx_mq_create_ext_async_evt_group5_SHIFT LPFC_TRAILER_CODE_GRP5 +#define lpfc_mbx_mq_create_ext_async_evt_group5_MASK 0x00000001 +#define lpfc_mbx_mq_create_ext_async_evt_group5_WORD async_evt_bmap + struct mq_context context; + struct dma_address page[LPFC_MAX_MQ_PAGE]; + } request; + struct { + uint32_t word0; +#define lpfc_mbx_mq_create_q_id_SHIFT 0 +#define lpfc_mbx_mq_create_q_id_MASK 0x0000FFFF +#define lpfc_mbx_mq_create_q_id_WORD word0 + } response; + } u; +#define LPFC_ASYNC_EVENT_LINK_STATE 0x2 +#define LPFC_ASYNC_EVENT_FCF_STATE 0x4 +#define LPFC_ASYNC_EVENT_GROUP5 0x20 +}; + struct lpfc_mbx_mq_destroy { struct mbox_header header; union { @@ -1428,8 +1468,8 @@ struct lpfc_mbx_reg_vfi { #define lpfc_reg_vfi_fcfi_WORD word2 uint32_t wwn[2]; struct ulp_bde64 bde; - uint32_t word8_rsvd; - uint32_t word9_rsvd; + uint32_t e_d_tov; + uint32_t r_a_tov; uint32_t word10; #define lpfc_reg_vfi_nport_id_SHIFT 0 #define lpfc_reg_vfi_nport_id_MASK 0x00FFFFFF @@ -1940,6 +1980,7 @@ struct lpfc_mbx_sli4_params { #define rdma_MASK 0x00000001 #define rdma_WORD word3 uint32_t sge_supp_len; +#define SLI4_PAGE_SIZE 4096 uint32_t word5; #define if_page_sz_SHIFT 0 #define if_page_sz_MASK 0x0000ffff @@ -2041,6 +2082,7 @@ struct lpfc_mqe { struct lpfc_mbx_reg_fcfi reg_fcfi; struct lpfc_mbx_unreg_fcfi unreg_fcfi; struct lpfc_mbx_mq_create mq_create; + struct lpfc_mbx_mq_create_ext mq_create_ext; struct lpfc_mbx_eq_create eq_create; struct lpfc_mbx_cq_create cq_create; struct lpfc_mbx_wq_create wq_create; @@ -2099,6 +2141,7 @@ struct lpfc_mcqe { #define LPFC_TRAILER_CODE_LINK 0x1 #define LPFC_TRAILER_CODE_FCOE 0x2 #define LPFC_TRAILER_CODE_DCBX 0x3 +#define LPFC_TRAILER_CODE_GRP5 0x5 }; struct lpfc_acqe_link { @@ -2168,6 +2211,19 @@ struct lpfc_acqe_dcbx { uint32_t trailer; }; +struct lpfc_acqe_grp5 { + uint32_t word0; +#define lpfc_acqe_grp5_pport_SHIFT 0 +#define lpfc_acqe_grp5_pport_MASK 0x000000FF +#define lpfc_acqe_grp5_pport_WORD word0 + uint32_t word1; +#define lpfc_acqe_grp5_llink_spd_SHIFT 16 +#define lpfc_acqe_grp5_llink_spd_MASK 0x0000FFFF +#define lpfc_acqe_grp5_llink_spd_WORD word1 + uint32_t event_tag; + uint32_t trailer; +}; + /* * Define the bootstrap mailbox (bmbx) region used to communicate * mailbox command between the host and port. The mailbox consists diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index d29ac7c317d9..cd9697edf860 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -29,6 +29,7 @@ #include <linux/spinlock.h> #include <linux/ctype.h> #include <linux/aer.h> +#include <linux/slab.h> #include <scsi/scsi.h> #include <scsi/scsi_device.h> @@ -350,7 +351,12 @@ lpfc_config_port_post(struct lpfc_hba *phba) mb = &pmb->u.mb; /* Get login parameters for NID. */ - lpfc_read_sparam(phba, pmb, 0); + rc = lpfc_read_sparam(phba, pmb, 0); + if (rc) { + mempool_free(pmb, phba->mbox_mem_pool); + return -ENOMEM; + } + pmb->vport = vport; if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, @@ -359,7 +365,7 @@ lpfc_config_port_post(struct lpfc_hba *phba) mb->mbxCommand, mb->mbxStatus); phba->link_state = LPFC_HBA_ERROR; mp = (struct lpfc_dmabuf *) pmb->context1; - mempool_free( pmb, phba->mbox_mem_pool); + mempool_free(pmb, phba->mbox_mem_pool); lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); return -EIO; @@ -544,7 +550,7 @@ lpfc_config_port_post(struct lpfc_hba *phba) mempool_free(pmb, phba->mbox_mem_pool); return -EIO; } - } else if (phba->cfg_suppress_link_up == 0) { + } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) { lpfc_init_link(phba, pmb, phba->cfg_topology, phba->cfg_link_speed); pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; @@ -571,6 +577,11 @@ lpfc_config_port_post(struct lpfc_hba *phba) } /* MBOX buffer will be freed in mbox compl */ pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!pmb) { + phba->link_state = LPFC_HBA_ERROR; + return -ENOMEM; + } + lpfc_config_async(phba, pmb, LPFC_ELS_RING); pmb->mbox_cmpl = lpfc_config_async_cmpl; pmb->vport = phba->pport; @@ -588,6 +599,11 @@ lpfc_config_port_post(struct lpfc_hba *phba) /* Get Option rom version */ pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!pmb) { + phba->link_state = LPFC_HBA_ERROR; + return -ENOMEM; + } + lpfc_dump_wakeup_param(phba, pmb); pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl; pmb->vport = phba->pport; @@ -652,7 +668,7 @@ lpfc_hba_init_link(struct lpfc_hba *phba) mempool_free(pmb, phba->mbox_mem_pool); return -EIO; } - phba->cfg_suppress_link_up = 0; + phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK; return 0; } @@ -807,6 +823,8 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba) LIST_HEAD(aborts); int ret; unsigned long iflag = 0; + struct lpfc_sglq *sglq_entry = NULL; + ret = lpfc_hba_down_post_s3(phba); if (ret) return ret; @@ -822,6 +840,10 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba) * list. */ spin_lock(&phba->sli4_hba.abts_sgl_list_lock); + list_for_each_entry(sglq_entry, + &phba->sli4_hba.lpfc_abts_els_sgl_list, list) + sglq_entry->state = SGL_FREED; + list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list, &phba->sli4_hba.lpfc_sgl_list); spin_unlock(&phba->sli4_hba.abts_sgl_list_lock); @@ -2178,8 +2200,10 @@ lpfc_stop_vport_timers(struct lpfc_vport *vport) void __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) { - /* Clear pending FCF rediscovery wait timer */ - phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; + /* Clear pending FCF rediscovery wait and failover in progress flags */ + phba->fcf.fcf_flag &= ~(FCF_REDISC_PEND | + FCF_DEAD_DISC | + FCF_ACVL_DISC); /* Now, try to stop the timer */ del_timer(&phba->fcf.redisc_wait); } @@ -2542,7 +2566,7 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) shost->max_cmd_len = 16; if (phba->sli_rev == LPFC_SLI_REV4) { shost->dma_boundary = - phba->sli4_hba.pc_sli4_params.sge_supp_len; + phba->sli4_hba.pc_sli4_params.sge_supp_len-1; shost->sg_tablesize = phba->cfg_sg_seg_cnt; } @@ -2576,7 +2600,6 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) init_timer(&vport->els_tmofunc); vport->els_tmofunc.function = lpfc_els_timeout; vport->els_tmofunc.data = (unsigned long)vport; - error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev); if (error) goto out_put_shost; @@ -2912,6 +2935,9 @@ lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr) /* FCF rediscovery event to worker thread */ phba->fcf.fcf_flag |= FCF_REDISC_EVT; spin_unlock_irq(&phba->hbalock); + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, + "2776 FCF rediscover wait timer expired, post " + "a worker thread event for FCF table scan\n"); /* wake up worker thread */ lpfc_worker_wake_up(phba); } @@ -3183,6 +3209,82 @@ out_free_pmb: } /** + * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport + * @vport: pointer to vport data structure. + * + * This routine is to perform Clear Virtual Link (CVL) on a vport in + * response to a CVL event. + * + * Return the pointer to the ndlp with the vport if successful, otherwise + * return NULL. + **/ +static struct lpfc_nodelist * +lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport) +{ + struct lpfc_nodelist *ndlp; + struct Scsi_Host *shost; + struct lpfc_hba *phba; + + if (!vport) + return NULL; + phba = vport->phba; + if (!phba) + return NULL; + ndlp = lpfc_findnode_did(vport, Fabric_DID); + if (!ndlp) { + /* Cannot find existing Fabric ndlp, so allocate a new one */ + ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); + if (!ndlp) + return 0; + lpfc_nlp_init(vport, ndlp, Fabric_DID); + /* Set the node type */ + ndlp->nlp_type |= NLP_FABRIC; + /* Put ndlp onto node list */ + lpfc_enqueue_node(vport, ndlp); + } else if (!NLP_CHK_NODE_ACT(ndlp)) { + /* re-setup ndlp without removing from node list */ + ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); + if (!ndlp) + return 0; + } + if (phba->pport->port_state <= LPFC_FLOGI) + return NULL; + /* If virtual link is not yet instantiated ignore CVL */ + if (vport->port_state <= LPFC_FDISC) + return NULL; + shost = lpfc_shost_from_vport(vport); + if (!shost) + return NULL; + lpfc_linkdown_port(vport); + lpfc_cleanup_pending_mbox(vport); + spin_lock_irq(shost->host_lock); + vport->fc_flag |= FC_VPORT_CVL_RCVD; + spin_unlock_irq(shost->host_lock); + + return ndlp; +} + +/** + * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports + * @vport: pointer to lpfc hba data structure. + * + * This routine is to perform Clear Virtual Link (CVL) on all vports in + * response to a FCF dead event. + **/ +static void +lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba) +{ + struct lpfc_vport **vports; + int i; + + vports = lpfc_create_vport_work_array(phba); + if (vports) + for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) + lpfc_sli4_perform_vport_cvl(vports[i]); + lpfc_destroy_vport_work_array(phba, vports); +} + +/** * lpfc_sli4_async_fcoe_evt - Process the asynchronous fcoe event * @phba: pointer to lpfc hba data structure. * @acqe_link: pointer to the async fcoe completion queue entry. @@ -3198,7 +3300,6 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, struct lpfc_vport *vport; struct lpfc_nodelist *ndlp; struct Scsi_Host *shost; - uint32_t link_state; int active_vlink_present; struct lpfc_vport **vports; int i; @@ -3208,10 +3309,20 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, switch (event_type) { case LPFC_FCOE_EVENT_TYPE_NEW_FCF: case LPFC_FCOE_EVENT_TYPE_FCF_PARAM_MOD: - lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, - "2546 New FCF found index 0x%x tag 0x%x\n", - acqe_fcoe->index, - acqe_fcoe->event_tag); + if (event_type == LPFC_FCOE_EVENT_TYPE_NEW_FCF) + lpfc_printf_log(phba, KERN_ERR, LOG_FIP | + LOG_DISCOVERY, + "2546 New FCF found event: " + "evt_tag:x%x, fcf_index:x%x\n", + acqe_fcoe->event_tag, + acqe_fcoe->index); + else + lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | + LOG_DISCOVERY, + "2788 FCF parameter modified event: " + "evt_tag:x%x, fcf_index:x%x\n", + acqe_fcoe->event_tag, + acqe_fcoe->index); spin_lock_irq(&phba->hbalock); if ((phba->fcf.fcf_flag & FCF_SCAN_DONE) || (phba->hba_flag & FCF_DISC_INPROGRESS)) { @@ -3222,6 +3333,7 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, spin_unlock_irq(&phba->hbalock); break; } + if (phba->fcf.fcf_flag & FCF_REDISC_EVT) { /* * If fast FCF failover rescan event is pending, @@ -3232,12 +3344,33 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, } spin_unlock_irq(&phba->hbalock); - /* Read the FCF table and re-discover SAN. */ - rc = lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST); + if ((phba->fcf.fcf_flag & FCF_DISCOVERY) && + !(phba->fcf.fcf_flag & FCF_REDISC_FOV)) { + /* + * During period of FCF discovery, read the FCF + * table record indexed by the event to update + * FCF round robin failover eligible FCF bmask. + */ + lpfc_printf_log(phba, KERN_INFO, LOG_FIP | + LOG_DISCOVERY, + "2779 Read new FCF record with " + "fcf_index:x%x for updating FCF " + "round robin failover bmask\n", + acqe_fcoe->index); + rc = lpfc_sli4_read_fcf_rec(phba, acqe_fcoe->index); + } + + /* Otherwise, scan the entire FCF table and re-discover SAN */ + lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, + "2770 Start FCF table scan due to new FCF " + "event: evt_tag:x%x, fcf_index:x%x\n", + acqe_fcoe->event_tag, acqe_fcoe->index); + rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, + LPFC_FCOE_FCF_GET_FIRST); if (rc) - lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, - "2547 Read FCF record failed 0x%x\n", - rc); + lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, + "2547 Issue FCF scan read FCF mailbox " + "command failed 0x%x\n", rc); break; case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL: @@ -3248,47 +3381,63 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, break; case LPFC_FCOE_EVENT_TYPE_FCF_DEAD: - lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, + lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, "2549 FCF disconnected from network index 0x%x" " tag 0x%x\n", acqe_fcoe->index, acqe_fcoe->event_tag); /* If the event is not for currently used fcf do nothing */ if (phba->fcf.current_rec.fcf_indx != acqe_fcoe->index) break; - /* - * Currently, driver support only one FCF - so treat this as - * a link down, but save the link state because we don't want - * it to be changed to Link Down unless it is already down. + /* We request port to rediscover the entire FCF table for + * a fast recovery from case that the current FCF record + * is no longer valid if we are not in the middle of FCF + * failover process already. */ - link_state = phba->link_state; - lpfc_linkdown(phba); - phba->link_state = link_state; - /* Unregister FCF if no devices connected to it */ - lpfc_unregister_unused_fcf(phba); + spin_lock_irq(&phba->hbalock); + if (phba->fcf.fcf_flag & FCF_DISCOVERY) { + spin_unlock_irq(&phba->hbalock); + /* Update FLOGI FCF failover eligible FCF bmask */ + lpfc_sli4_fcf_rr_index_clear(phba, acqe_fcoe->index); + break; + } + /* Mark the fast failover process in progress */ + phba->fcf.fcf_flag |= FCF_DEAD_DISC; + spin_unlock_irq(&phba->hbalock); + lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, + "2771 Start FCF fast failover process due to " + "FCF DEAD event: evt_tag:x%x, fcf_index:x%x " + "\n", acqe_fcoe->event_tag, acqe_fcoe->index); + rc = lpfc_sli4_redisc_fcf_table(phba); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_FIP | + LOG_DISCOVERY, + "2772 Issue FCF rediscover mabilbox " + "command failed, fail through to FCF " + "dead event\n"); + spin_lock_irq(&phba->hbalock); + phba->fcf.fcf_flag &= ~FCF_DEAD_DISC; + spin_unlock_irq(&phba->hbalock); + /* + * Last resort will fail over by treating this + * as a link down to FCF registration. + */ + lpfc_sli4_fcf_dead_failthrough(phba); + } else + /* Handling fast FCF failover to a DEAD FCF event + * is considered equalivant to receiving CVL to all + * vports. + */ + lpfc_sli4_perform_all_vport_cvl(phba); break; case LPFC_FCOE_EVENT_TYPE_CVL: - lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, + lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, "2718 Clear Virtual Link Received for VPI 0x%x" " tag 0x%x\n", acqe_fcoe->index, acqe_fcoe->event_tag); vport = lpfc_find_vport_by_vpid(phba, acqe_fcoe->index - phba->vpi_base); - if (!vport) - break; - ndlp = lpfc_findnode_did(vport, Fabric_DID); + ndlp = lpfc_sli4_perform_vport_cvl(vport); if (!ndlp) break; - shost = lpfc_shost_from_vport(vport); - if (phba->pport->port_state <= LPFC_FLOGI) - break; - /* If virtual link is not yet instantiated ignore CVL */ - if (vport->port_state <= LPFC_FDISC) - break; - - lpfc_linkdown_port(vport); - lpfc_cleanup_pending_mbox(vport); - spin_lock_irq(shost->host_lock); - vport->fc_flag |= FC_VPORT_CVL_RCVD; - spin_unlock_irq(shost->host_lock); active_vlink_present = 0; vports = lpfc_create_vport_work_array(phba); @@ -3311,6 +3460,7 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, * re-instantiate the Vlink using FDISC. */ mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); + shost = lpfc_shost_from_vport(vport); spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_DELAY_TMO; spin_unlock_irq(shost->host_lock); @@ -3321,15 +3471,38 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, * Otherwise, we request port to rediscover * the entire FCF table for a fast recovery * from possible case that the current FCF - * is no longer valid. + * is no longer valid if we are not already + * in the FCF failover process. */ + spin_lock_irq(&phba->hbalock); + if (phba->fcf.fcf_flag & FCF_DISCOVERY) { + spin_unlock_irq(&phba->hbalock); + break; + } + /* Mark the fast failover process in progress */ + phba->fcf.fcf_flag |= FCF_ACVL_DISC; + spin_unlock_irq(&phba->hbalock); + lpfc_printf_log(phba, KERN_INFO, LOG_FIP | + LOG_DISCOVERY, + "2773 Start FCF fast failover due " + "to CVL event: evt_tag:x%x\n", + acqe_fcoe->event_tag); rc = lpfc_sli4_redisc_fcf_table(phba); - if (rc) + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_FIP | + LOG_DISCOVERY, + "2774 Issue FCF rediscover " + "mabilbox command failed, " + "through to CVL event\n"); + spin_lock_irq(&phba->hbalock); + phba->fcf.fcf_flag &= ~FCF_ACVL_DISC; + spin_unlock_irq(&phba->hbalock); /* * Last resort will be re-try on the * the current registered FCF entry. */ lpfc_retry_pport_discovery(phba); + } } break; default: @@ -3358,6 +3531,32 @@ lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba, } /** + * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event + * @phba: pointer to lpfc hba data structure. + * @acqe_link: pointer to the async grp5 completion queue entry. + * + * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event + * is an asynchronous notified of a logical link speed change. The Port + * reports the logical link speed in units of 10Mbps. + **/ +static void +lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba, + struct lpfc_acqe_grp5 *acqe_grp5) +{ + uint16_t prev_ll_spd; + + phba->fc_eventTag = acqe_grp5->event_tag; + phba->fcoe_eventtag = acqe_grp5->event_tag; + prev_ll_spd = phba->sli4_hba.link_state.logical_speed; + phba->sli4_hba.link_state.logical_speed = + (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)); + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "2789 GRP5 Async Event: Updating logical link speed " + "from %dMbps to %dMbps\n", (prev_ll_spd * 10), + (phba->sli4_hba.link_state.logical_speed*10)); +} + +/** * lpfc_sli4_async_event_proc - Process all the pending asynchronous event * @phba: pointer to lpfc hba data structure. * @@ -3393,6 +3592,10 @@ void lpfc_sli4_async_event_proc(struct lpfc_hba *phba) lpfc_sli4_async_dcbx_evt(phba, &cq_event->cqe.acqe_dcbx); break; + case LPFC_TRAILER_CODE_GRP5: + lpfc_sli4_async_grp5_evt(phba, + &cq_event->cqe.acqe_grp5); + break; default: lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "1804 Invalid asynchrous event code: " @@ -3426,11 +3629,14 @@ void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba) spin_unlock_irq(&phba->hbalock); /* Scan FCF table from the first entry to re-discover SAN */ - rc = lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST); + lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, + "2777 Start FCF table scan after FCF " + "rediscovery quiescent period over\n"); + rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); if (rc) - lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, - "2747 Post FCF rediscovery read FCF record " - "failed 0x%x\n", rc); + lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, + "2747 Issue FCF scan read FCF mailbox " + "command failed 0x%x\n", rc); } /** @@ -3651,6 +3857,13 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba) /* Get all the module params for configuring this host */ lpfc_get_cfgparam(phba); + if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) { + phba->menlo_flag |= HBA_MENLO_SUPPORT; + /* check for menlo minimum sg count */ + if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT) + phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT; + } + /* * Since the sg_tablesize is module parameter, the sg_dma_buf_size * used to create the sg_dma_buf_pool must be dynamically calculated. @@ -3722,6 +3935,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) int rc, i, hbq_count, buf_size, dma_buf_size, max_buf_size; uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0}; struct lpfc_mqe *mqe; + int longs; /* Before proceed, wait for POST done and device ready */ rc = lpfc_sli4_post_status_check(phba); @@ -3867,6 +4081,43 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) if (unlikely(rc)) goto out_free_bsmbx; + mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, + GFP_KERNEL); + if (!mboxq) { + rc = -ENOMEM; + goto out_free_bsmbx; + } + + /* Get the Supported Pages. It is always available. */ + lpfc_supported_pages(mboxq); + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); + if (unlikely(rc)) { + rc = -EIO; + mempool_free(mboxq, phba->mbox_mem_pool); + goto out_free_bsmbx; + } + + mqe = &mboxq->u.mqe; + memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3), + LPFC_MAX_SUPPORTED_PAGES); + for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) { + switch (pn_page[i]) { + case LPFC_SLI4_PARAMETERS: + phba->sli4_hba.pc_sli4_params.supported = 1; + break; + default: + break; + } + } + + /* Read the port's SLI4 Parameters capabilities if supported. */ + if (phba->sli4_hba.pc_sli4_params.supported) + rc = lpfc_pc_sli4_params_get(phba, mboxq); + mempool_free(mboxq, phba->mbox_mem_pool); + if (rc) { + rc = -EIO; + goto out_free_bsmbx; + } /* Create all the SLI4 queues */ rc = lpfc_sli4_queue_create(phba); if (rc) @@ -3898,13 +4149,24 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) goto out_free_active_sgl; } + /* Allocate eligible FCF bmask memory for FCF round robin failover */ + longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG; + phba->fcf.fcf_rr_bmask = kzalloc(longs * sizeof(unsigned long), + GFP_KERNEL); + if (!phba->fcf.fcf_rr_bmask) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2759 Failed allocate memory for FCF round " + "robin failover bmask\n"); + goto out_remove_rpi_hdrs; + } + phba->sli4_hba.fcp_eq_hdl = kzalloc((sizeof(struct lpfc_fcp_eq_hdl) * phba->cfg_fcp_eq_count), GFP_KERNEL); if (!phba->sli4_hba.fcp_eq_hdl) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2572 Failed allocate memory for fast-path " "per-EQ handle array\n"); - goto out_remove_rpi_hdrs; + goto out_free_fcf_rr_bmask; } phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) * @@ -3916,47 +4178,12 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) goto out_free_fcp_eq_hdl; } - mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, - GFP_KERNEL); - if (!mboxq) { - rc = -ENOMEM; - goto out_free_fcp_eq_hdl; - } - - /* Get the Supported Pages. It is always available. */ - lpfc_supported_pages(mboxq); - rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); - if (unlikely(rc)) { - rc = -EIO; - mempool_free(mboxq, phba->mbox_mem_pool); - goto out_free_fcp_eq_hdl; - } - - mqe = &mboxq->u.mqe; - memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3), - LPFC_MAX_SUPPORTED_PAGES); - for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) { - switch (pn_page[i]) { - case LPFC_SLI4_PARAMETERS: - phba->sli4_hba.pc_sli4_params.supported = 1; - break; - default: - break; - } - } - - /* Read the port's SLI4 Parameters capabilities if supported. */ - if (phba->sli4_hba.pc_sli4_params.supported) - rc = lpfc_pc_sli4_params_get(phba, mboxq); - mempool_free(mboxq, phba->mbox_mem_pool); - if (rc) { - rc = -EIO; - goto out_free_fcp_eq_hdl; - } return rc; out_free_fcp_eq_hdl: kfree(phba->sli4_hba.fcp_eq_hdl); +out_free_fcf_rr_bmask: + kfree(phba->fcf.fcf_rr_bmask); out_remove_rpi_hdrs: lpfc_sli4_remove_rpi_hdrs(phba); out_free_active_sgl: @@ -4002,6 +4229,9 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba) lpfc_sli4_remove_rpi_hdrs(phba); lpfc_sli4_remove_rpis(phba); + /* Free eligible FCF index bmask */ + kfree(phba->fcf.fcf_rr_bmask); + /* Free the ELS sgl list */ lpfc_free_active_sgl(phba); lpfc_free_sgl_list(phba); @@ -4397,6 +4627,7 @@ lpfc_init_sgl_list(struct lpfc_hba *phba) /* The list order is used by later block SGL registraton */ spin_lock_irq(&phba->hbalock); + sglq_entry->state = SGL_FREED; list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_sgl_list); phba->sli4_hba.lpfc_els_sgl_array[i] = sglq_entry; phba->sli4_hba.total_sglq_bufs++; @@ -4870,6 +5101,8 @@ lpfc_sli_pci_mem_setup(struct lpfc_hba *phba) memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE); phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx); + phba->mbox_ext = (phba->slim2p.virt + + offsetof(struct lpfc_sli2_slim, mbx_ext_words)); phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb)); phba->IOCBs = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, IOCBs)); @@ -7573,21 +7806,23 @@ lpfc_pci_resume_one_s3(struct pci_dev *pdev) * @phba: pointer to lpfc hba data structure. * * This routine is called to prepare the SLI3 device for PCI slot recover. It - * aborts and stops all the on-going I/Os on the pci device. + * aborts all the outstanding SCSI I/Os to the pci device. **/ static void lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba) { + struct lpfc_sli *psli = &phba->sli; + struct lpfc_sli_ring *pring; + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2723 PCI channel I/O abort preparing for recovery\n"); - /* Prepare for bringing HBA offline */ - lpfc_offline_prep(phba); - /* Clear sli active flag to prevent sysfs access to HBA */ - spin_lock_irq(&phba->hbalock); - phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE; - spin_unlock_irq(&phba->hbalock); - /* Stop and flush all I/Os and bring HBA offline */ - lpfc_offline(phba); + + /* + * There may be errored I/Os through HBA, abort all I/Os on txcmplq + * and let the SCSI mid-layer to retry them to recover. + */ + pring = &psli->ring[psli->fcp_ring]; + lpfc_sli_abort_iocb_ring(phba, pring); } /** @@ -7601,21 +7836,20 @@ lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba) static void lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba) { - struct lpfc_sli *psli = &phba->sli; - struct lpfc_sli_ring *pring; - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2710 PCI channel disable preparing for reset\n"); + + /* Block all SCSI devices' I/Os on the host */ + lpfc_scsi_dev_block(phba); + + /* stop all timers */ + lpfc_stop_hba_timers(phba); + /* Disable interrupt and pci device */ lpfc_sli_disable_intr(phba); pci_disable_device(phba->pcidev); - /* - * There may be I/Os dropped by the firmware. - * Error iocb (I/O) on txcmplq and let the SCSI layer - * retry it after re-establishing link. - */ - pring = &psli->ring[psli->fcp_ring]; - lpfc_sli_abort_iocb_ring(phba, pring); + /* Flush all driver's outstanding SCSI I/Os as we are to reset */ + lpfc_sli_flush_fcp_rings(phba); } /** @@ -7631,6 +7865,12 @@ lpfc_prep_dev_for_perm_failure(struct lpfc_hba *phba) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2711 PCI channel permanent disable for failure\n"); + /* Block all SCSI devices' I/Os on the host */ + lpfc_scsi_dev_block(phba); + + /* stop all timers */ + lpfc_stop_hba_timers(phba); + /* Clean up all driver's outstanding SCSI I/Os */ lpfc_sli_flush_fcp_rings(phba); } @@ -7659,9 +7899,6 @@ lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state) struct Scsi_Host *shost = pci_get_drvdata(pdev); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; - /* Block all SCSI devices' I/Os on the host */ - lpfc_scsi_dev_block(phba); - switch (state) { case pci_channel_io_normal: /* Non-fatal error, prepare for recovery */ @@ -7768,7 +8005,7 @@ lpfc_io_resume_s3(struct pci_dev *pdev) struct Scsi_Host *shost = pci_get_drvdata(pdev); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; - /* Bring the device online */ + /* Bring device online, it will be no-op for non-fatal error resume */ lpfc_online(phba); /* Clean up Advanced Error Reporting (AER) if needed */ diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h index 954ba57970a3..bb59e9273126 100644 --- a/drivers/scsi/lpfc/lpfc_logmsg.h +++ b/drivers/scsi/lpfc/lpfc_logmsg.h @@ -35,6 +35,7 @@ #define LOG_VPORT 0x00004000 /* NPIV events */ #define LOF_SECURITY 0x00008000 /* Security events */ #define LOG_EVENT 0x00010000 /* CT,TEMP,DUMP, logging */ +#define LOG_FIP 0x00020000 /* FIP events */ #define LOG_ALL_MSG 0xffffffff /* LOG all messages */ #define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \ diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c index 6c4dce1a30ca..e84dc33ca201 100644 --- a/drivers/scsi/lpfc/lpfc_mbox.c +++ b/drivers/scsi/lpfc/lpfc_mbox.c @@ -21,6 +21,7 @@ #include <linux/blkdev.h> #include <linux/pci.h> +#include <linux/slab.h> #include <linux/interrupt.h> #include <scsi/scsi_device.h> @@ -1215,7 +1216,7 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) phba->pcb->feature = FEATURE_INITIAL_SLI2; /* Setup Mailbox pointers */ - phba->pcb->mailBoxSize = sizeof(MAILBOX_t); + phba->pcb->mailBoxSize = sizeof(MAILBOX_t) + MAILBOX_EXT_SIZE; offset = (uint8_t *)phba->mbox - (uint8_t *)phba->slim2p.virt; pdma_addr = phba->slim2p.phys + offset; phba->pcb->mbAddrHigh = putPaddrHigh(pdma_addr); @@ -1271,28 +1272,41 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) * */ - if (phba->sli_rev == 3) { - phba->host_gp = &mb_slim->us.s3.host[0]; - phba->hbq_put = &mb_slim->us.s3.hbq_put[0]; - } else { - phba->host_gp = &mb_slim->us.s2.host[0]; + if (phba->cfg_hostmem_hgp && phba->sli_rev != 3) { + phba->host_gp = &phba->mbox->us.s2.host[0]; phba->hbq_put = NULL; - } + offset = (uint8_t *)&phba->mbox->us.s2.host - + (uint8_t *)phba->slim2p.virt; + pdma_addr = phba->slim2p.phys + offset; + phba->pcb->hgpAddrHigh = putPaddrHigh(pdma_addr); + phba->pcb->hgpAddrLow = putPaddrLow(pdma_addr); + } else { + /* Always Host Group Pointer is in SLIM */ + mb->un.varCfgPort.hps = 1; - /* mask off BAR0's flag bits 0 - 3 */ - phba->pcb->hgpAddrLow = (bar_low & PCI_BASE_ADDRESS_MEM_MASK) + - (void __iomem *)phba->host_gp - - (void __iomem *)phba->MBslimaddr; - if (bar_low & PCI_BASE_ADDRESS_MEM_TYPE_64) - phba->pcb->hgpAddrHigh = bar_high; - else - phba->pcb->hgpAddrHigh = 0; - /* write HGP data to SLIM at the required longword offset */ - memset(&hgp, 0, sizeof(struct lpfc_hgp)); + if (phba->sli_rev == 3) { + phba->host_gp = &mb_slim->us.s3.host[0]; + phba->hbq_put = &mb_slim->us.s3.hbq_put[0]; + } else { + phba->host_gp = &mb_slim->us.s2.host[0]; + phba->hbq_put = NULL; + } - for (i=0; i < phba->sli.num_rings; i++) { - lpfc_memcpy_to_slim(phba->host_gp + i, &hgp, + /* mask off BAR0's flag bits 0 - 3 */ + phba->pcb->hgpAddrLow = (bar_low & PCI_BASE_ADDRESS_MEM_MASK) + + (void __iomem *)phba->host_gp - + (void __iomem *)phba->MBslimaddr; + if (bar_low & PCI_BASE_ADDRESS_MEM_TYPE_64) + phba->pcb->hgpAddrHigh = bar_high; + else + phba->pcb->hgpAddrHigh = 0; + /* write HGP data to SLIM at the required longword offset */ + memset(&hgp, 0, sizeof(struct lpfc_hgp)); + + for (i = 0; i < phba->sli.num_rings; i++) { + lpfc_memcpy_to_slim(phba->host_gp + i, &hgp, sizeof(*phba->host_gp)); + } } /* Setup Port Group offset */ @@ -1597,7 +1611,7 @@ lpfc_sli4_mbox_cmd_free(struct lpfc_hba *phba, struct lpfcMboxq *mbox) for (sgentry = 0; sgentry < sgecount; sgentry++) { lpfc_sli4_mbx_sge_get(mbox, sgentry, &sge); phyaddr = getPaddr(sge.pa_hi, sge.pa_lo); - dma_free_coherent(&phba->pcidev->dev, PAGE_SIZE, + dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE, mbox->sge_array->addr[sgentry], phyaddr); } /* Free the sge address array memory */ @@ -1655,7 +1669,7 @@ lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox, } /* Setup for the none-embedded mbox command */ - pcount = (PAGE_ALIGN(length))/PAGE_SIZE; + pcount = (PAGE_ALIGN(length))/SLI4_PAGE_SIZE; pcount = (pcount > LPFC_SLI4_MBX_SGE_MAX_PAGES) ? LPFC_SLI4_MBX_SGE_MAX_PAGES : pcount; /* Allocate record for keeping SGE virtual addresses */ @@ -1670,24 +1684,24 @@ lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox, for (pagen = 0, alloc_len = 0; pagen < pcount; pagen++) { /* The DMA memory is always allocated in the length of a * page even though the last SGE might not fill up to a - * page, this is used as a priori size of PAGE_SIZE for + * page, this is used as a priori size of SLI4_PAGE_SIZE for * the later DMA memory free. */ - viraddr = dma_alloc_coherent(&phba->pcidev->dev, PAGE_SIZE, + viraddr = dma_alloc_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE, &phyaddr, GFP_KERNEL); /* In case of malloc fails, proceed with whatever we have */ if (!viraddr) break; - memset(viraddr, 0, PAGE_SIZE); + memset(viraddr, 0, SLI4_PAGE_SIZE); mbox->sge_array->addr[pagen] = viraddr; /* Keep the first page for later sub-header construction */ if (pagen == 0) cfg_shdr = (union lpfc_sli4_cfg_shdr *)viraddr; resid_len = length - alloc_len; - if (resid_len > PAGE_SIZE) { + if (resid_len > SLI4_PAGE_SIZE) { lpfc_sli4_mbx_sge_set(mbox, pagen, phyaddr, - PAGE_SIZE); - alloc_len += PAGE_SIZE; + SLI4_PAGE_SIZE); + alloc_len += SLI4_PAGE_SIZE; } else { lpfc_sli4_mbx_sge_set(mbox, pagen, phyaddr, resid_len); @@ -1748,7 +1762,7 @@ lpfc_sli4_mbox_opcode_get(struct lpfc_hba *phba, struct lpfcMboxq *mbox) } /** - * lpfc_sli4_mbx_read_fcf_record - Allocate and construct read fcf mbox cmd + * lpfc_sli4_mbx_read_fcf_rec - Allocate and construct read fcf mbox cmd * @phba: pointer to lpfc hba data structure. * @fcf_index: index to fcf table. * @@ -1759,9 +1773,9 @@ lpfc_sli4_mbox_opcode_get(struct lpfc_hba *phba, struct lpfcMboxq *mbox) * NULL. **/ int -lpfc_sli4_mbx_read_fcf_record(struct lpfc_hba *phba, - struct lpfcMboxq *mboxq, - uint16_t fcf_index) +lpfc_sli4_mbx_read_fcf_rec(struct lpfc_hba *phba, + struct lpfcMboxq *mboxq, + uint16_t fcf_index) { void *virt_addr; dma_addr_t phys_addr; @@ -1885,6 +1899,8 @@ lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys) memcpy(reg_vfi->wwn, &vport->fc_portname, sizeof(struct lpfc_name)); reg_vfi->wwn[0] = cpu_to_le32(reg_vfi->wwn[0]); reg_vfi->wwn[1] = cpu_to_le32(reg_vfi->wwn[1]); + reg_vfi->e_d_tov = vport->phba->fc_edtov; + reg_vfi->r_a_tov = vport->phba->fc_ratov; reg_vfi->bde.addrHigh = putPaddrHigh(phys); reg_vfi->bde.addrLow = putPaddrLow(phys); reg_vfi->bde.tus.f.bdeSize = sizeof(vport->fc_sparam); diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c index a1b6db6016da..8f879e477e9d 100644 --- a/drivers/scsi/lpfc/lpfc_mem.c +++ b/drivers/scsi/lpfc/lpfc_mem.c @@ -20,6 +20,7 @@ *******************************************************************/ #include <linux/mempool.h> +#include <linux/slab.h> #include <linux/pci.h> #include <linux/interrupt.h> diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index d20ae6b3b3cf..b90820a699fd 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c @@ -21,6 +21,7 @@ #include <linux/blkdev.h> #include <linux/pci.h> +#include <linux/slab.h> #include <linux/interrupt.h> #include <scsi/scsi.h> @@ -492,6 +493,9 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, struct lpfc_iocbq *cmdiocb, uint32_t els_cmd) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + struct lpfc_hba *phba = vport->phba; + struct lpfc_vport **vports; + int i, active_vlink_present = 0 ; /* Put ndlp in NPR state with 1 sec timeout for plogi, ACC logo */ /* Only call LOGO ACC for first LOGO, this avoids sending unnecessary @@ -504,15 +508,44 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL); else lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); - if ((ndlp->nlp_DID == Fabric_DID) && - vport->port_type == LPFC_NPIV_PORT) { + if (ndlp->nlp_DID == Fabric_DID) { + if (vport->port_state <= LPFC_FDISC) + goto out; lpfc_linkdown_port(vport); - mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); spin_lock_irq(shost->host_lock); - ndlp->nlp_flag |= NLP_DELAY_TMO; + vport->fc_flag |= FC_VPORT_LOGO_RCVD; spin_unlock_irq(shost->host_lock); + vports = lpfc_create_vport_work_array(phba); + if (vports) { + for (i = 0; i <= phba->max_vports && vports[i] != NULL; + i++) { + if ((!(vports[i]->fc_flag & + FC_VPORT_LOGO_RCVD)) && + (vports[i]->port_state > LPFC_FDISC)) { + active_vlink_present = 1; + break; + } + } + lpfc_destroy_vport_work_array(phba, vports); + } - ndlp->nlp_last_elscmd = ELS_CMD_FDISC; + if (active_vlink_present) { + /* + * If there are other active VLinks present, + * re-instantiate the Vlink using FDISC. + */ + mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); + spin_lock_irq(shost->host_lock); + ndlp->nlp_flag |= NLP_DELAY_TMO; + spin_unlock_irq(shost->host_lock); + ndlp->nlp_last_elscmd = ELS_CMD_FDISC; + vport->port_state = LPFC_FDISC; + } else { + spin_lock_irq(shost->host_lock); + phba->pport->fc_flag &= ~FC_LOGO_RCVD_DID_CHNG; + spin_unlock_irq(shost->host_lock); + lpfc_retry_pport_discovery(phba); + } } else if ((!(ndlp->nlp_type & NLP_FABRIC) && ((ndlp->nlp_type & NLP_FCP_TARGET) || !(ndlp->nlp_type & NLP_FCP_INITIATOR))) || @@ -525,6 +558,7 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; } +out: ndlp->nlp_prev_state = ndlp->nlp_state; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); @@ -603,11 +637,55 @@ lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) lpfc_unreg_rpi(vport, ndlp); return 0; } +/** + * lpfc_release_rpi - Release a RPI by issueing unreg_login mailbox cmd. + * @phba : Pointer to lpfc_hba structure. + * @vport: Pointer to lpfc_vport structure. + * @rpi : rpi to be release. + * + * This function will send a unreg_login mailbox command to the firmware + * to release a rpi. + **/ +void +lpfc_release_rpi(struct lpfc_hba *phba, + struct lpfc_vport *vport, + uint16_t rpi) +{ + LPFC_MBOXQ_t *pmb; + int rc; + + pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, + GFP_KERNEL); + if (!pmb) + lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, + "2796 mailbox memory allocation failed \n"); + else { + lpfc_unreg_login(phba, vport->vpi, rpi, pmb); + pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); + if (rc == MBX_NOT_FINISHED) + mempool_free(pmb, phba->mbox_mem_pool); + } +} static uint32_t lpfc_disc_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { + struct lpfc_hba *phba; + LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg; + MAILBOX_t *mb; + uint16_t rpi; + + phba = vport->phba; + /* Release the RPI if reglogin completing */ + if (!(phba->pport->load_flag & FC_UNLOADING) && + (evt == NLP_EVT_CMPL_REG_LOGIN) && + (!pmb->u.mb.mbxStatus)) { + mb = &pmb->u.mb; + rpi = pmb->u.mb.un.varWords[0]; + lpfc_release_rpi(phba, vport, rpi); + } lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, "0271 Illegal State Transition: node x%x " "event x%x, state x%x Data: x%x x%x\n", @@ -943,6 +1021,18 @@ static uint32_t lpfc_cmpl_reglogin_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { + struct lpfc_hba *phba; + LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg; + MAILBOX_t *mb = &pmb->u.mb; + uint16_t rpi; + + phba = vport->phba; + /* Release the RPI */ + if (!(phba->pport->load_flag & FC_UNLOADING) && + !mb->mbxStatus) { + rpi = pmb->u.mb.un.varWords[0]; + lpfc_release_rpi(phba, vport, rpi); + } return ndlp->nlp_state; } diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index 7f21b47db791..f4a3b2e79eea 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -19,6 +19,7 @@ * included with this package. * *******************************************************************/ #include <linux/pci.h> +#include <linux/slab.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <asm/unaligned.h> @@ -620,23 +621,40 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba, uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); struct lpfc_scsi_buf *psb, *next_psb; unsigned long iflag = 0; + struct lpfc_iocbq *iocbq; + int i; - spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock, iflag); + spin_lock_irqsave(&phba->hbalock, iflag); + spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock); list_for_each_entry_safe(psb, next_psb, &phba->sli4_hba.lpfc_abts_scsi_buf_list, list) { if (psb->cur_iocbq.sli4_xritag == xri) { list_del(&psb->list); psb->exch_busy = 0; psb->status = IOSTAT_SUCCESS; - spin_unlock_irqrestore( - &phba->sli4_hba.abts_scsi_buf_list_lock, - iflag); + spin_unlock( + &phba->sli4_hba.abts_scsi_buf_list_lock); + spin_unlock_irqrestore(&phba->hbalock, iflag); lpfc_release_scsi_buf_s4(phba, psb); return; } } - spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock, - iflag); + spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock); + for (i = 1; i <= phba->sli.last_iotag; i++) { + iocbq = phba->sli.iocbq_lookup[i]; + + if (!(iocbq->iocb_flag & LPFC_IO_FCP) || + (iocbq->iocb_flag & LPFC_IO_LIBDFC)) + continue; + if (iocbq->sli4_xritag != xri) + continue; + psb = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq); + psb->exch_busy = 0; + spin_unlock_irqrestore(&phba->hbalock, iflag); + return; + + } + spin_unlock_irqrestore(&phba->hbalock, iflag); } /** @@ -1006,6 +1024,7 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) struct scatterlist *sgel = NULL; struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl; + struct lpfc_iocbq *iocbq = &lpfc_cmd->cur_iocbq; IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde; dma_addr_t physaddr; @@ -1056,6 +1075,7 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) physaddr = sg_dma_address(sgel); if (phba->sli_rev == 3 && !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) && + !(iocbq->iocb_flag & DSS_SECURITY_OP) && nseg <= LPFC_EXT_DATA_BDE_COUNT) { data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64; data_bde->tus.f.bdeSize = sg_dma_len(sgel); @@ -1082,7 +1102,8 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) * explicitly reinitialized since all iocb memory resources are reused. */ if (phba->sli_rev == 3 && - !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) { + !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) && + !(iocbq->iocb_flag & DSS_SECURITY_OP)) { if (num_bde > LPFC_EXT_DATA_BDE_COUNT) { /* * The extended IOCB format can only fit 3 BDE or a BPL. @@ -1107,6 +1128,7 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) } else { iocb_cmd->un.fcpi64.bdl.bdeSize = ((num_bde + 2) * sizeof(struct ulp_bde64)); + iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1); } fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd)); @@ -1119,37 +1141,47 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) } /* - * Given a scsi cmnd, determine the BlockGuard profile to be used - * with the cmd + * Given a scsi cmnd, determine the BlockGuard opcodes to be used with it + * @sc: The SCSI command to examine + * @txopt: (out) BlockGuard operation for transmitted data + * @rxopt: (out) BlockGuard operation for received data + * + * Returns: zero on success; non-zero if tx and/or rx op cannot be determined + * */ static int -lpfc_sc_to_sli_prof(struct lpfc_hba *phba, struct scsi_cmnd *sc) +lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc, + uint8_t *txop, uint8_t *rxop) { uint8_t guard_type = scsi_host_get_guard(sc->device->host); - uint8_t ret_prof = LPFC_PROF_INVALID; + uint8_t ret = 0; if (guard_type == SHOST_DIX_GUARD_IP) { switch (scsi_get_prot_op(sc)) { case SCSI_PROT_READ_INSERT: case SCSI_PROT_WRITE_STRIP: - ret_prof = LPFC_PROF_AST2; + *txop = BG_OP_IN_CSUM_OUT_NODIF; + *rxop = BG_OP_IN_NODIF_OUT_CSUM; break; case SCSI_PROT_READ_STRIP: case SCSI_PROT_WRITE_INSERT: - ret_prof = LPFC_PROF_A1; + *txop = BG_OP_IN_NODIF_OUT_CRC; + *rxop = BG_OP_IN_CRC_OUT_NODIF; break; case SCSI_PROT_READ_PASS: case SCSI_PROT_WRITE_PASS: - ret_prof = LPFC_PROF_AST1; + *txop = BG_OP_IN_CSUM_OUT_CRC; + *rxop = BG_OP_IN_CRC_OUT_CSUM; break; case SCSI_PROT_NORMAL: default: lpfc_printf_log(phba, KERN_ERR, LOG_BG, - "9063 BLKGRD:Bad op/guard:%d/%d combination\n", + "9063 BLKGRD: Bad op/guard:%d/%d combination\n", scsi_get_prot_op(sc), guard_type); + ret = 1; break; } @@ -1157,12 +1189,14 @@ lpfc_sc_to_sli_prof(struct lpfc_hba *phba, struct scsi_cmnd *sc) switch (scsi_get_prot_op(sc)) { case SCSI_PROT_READ_STRIP: case SCSI_PROT_WRITE_INSERT: - ret_prof = LPFC_PROF_A1; + *txop = BG_OP_IN_NODIF_OUT_CRC; + *rxop = BG_OP_IN_CRC_OUT_NODIF; break; case SCSI_PROT_READ_PASS: case SCSI_PROT_WRITE_PASS: - ret_prof = LPFC_PROF_C1; + *txop = BG_OP_IN_CRC_OUT_CRC; + *rxop = BG_OP_IN_CRC_OUT_CRC; break; case SCSI_PROT_READ_INSERT: @@ -1172,6 +1206,7 @@ lpfc_sc_to_sli_prof(struct lpfc_hba *phba, struct scsi_cmnd *sc) lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9075 BLKGRD: Bad op/guard:%d/%d combination\n", scsi_get_prot_op(sc), guard_type); + ret = 1; break; } } else { @@ -1179,7 +1214,7 @@ lpfc_sc_to_sli_prof(struct lpfc_hba *phba, struct scsi_cmnd *sc) BUG(); } - return ret_prof; + return ret; } struct scsi_dif_tuple { @@ -1244,7 +1279,9 @@ lpfc_get_cmd_dif_parms(struct scsi_cmnd *sc, uint16_t *apptagmask, * The buffer list consists of just one protection group described * below: * +-------------------------+ - * start of prot group --> | PDE_1 | + * start of prot group --> | PDE_5 | + * +-------------------------+ + * | PDE_6 | * +-------------------------+ * | Data BDE | * +-------------------------+ @@ -1262,30 +1299,49 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc, struct ulp_bde64 *bpl, int datasegcnt) { struct scatterlist *sgde = NULL; /* s/g data entry */ - struct lpfc_pde *pde1 = NULL; + struct lpfc_pde5 *pde5 = NULL; + struct lpfc_pde6 *pde6 = NULL; dma_addr_t physaddr; - int i = 0, num_bde = 0; + int i = 0, num_bde = 0, status; int datadir = sc->sc_data_direction; - int prof = LPFC_PROF_INVALID; unsigned blksize; uint32_t reftag; uint16_t apptagmask, apptagval; + uint8_t txop, rxop; - pde1 = (struct lpfc_pde *) bpl; - prof = lpfc_sc_to_sli_prof(phba, sc); - - if (prof == LPFC_PROF_INVALID) + status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop); + if (status) goto out; - /* extract some info from the scsi command for PDE1*/ + /* extract some info from the scsi command for pde*/ blksize = lpfc_cmd_blksize(sc); lpfc_get_cmd_dif_parms(sc, &apptagmask, &apptagval, &reftag); - /* setup PDE1 with what we have */ - lpfc_pde_set_bg_parms(pde1, LPFC_PDE1_DESCRIPTOR, prof, blksize, - BG_EC_STOP_ERR); - lpfc_pde_set_dif_parms(pde1, apptagmask, apptagval, reftag); + /* setup PDE5 with what we have */ + pde5 = (struct lpfc_pde5 *) bpl; + memset(pde5, 0, sizeof(struct lpfc_pde5)); + bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR); + pde5->reftag = reftag; + /* advance bpl and increment bde count */ + num_bde++; + bpl++; + pde6 = (struct lpfc_pde6 *) bpl; + + /* setup PDE6 with the rest of the info */ + memset(pde6, 0, sizeof(struct lpfc_pde6)); + bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR); + bf_set(pde6_optx, pde6, txop); + bf_set(pde6_oprx, pde6, rxop); + if (datadir == DMA_FROM_DEVICE) { + bf_set(pde6_ce, pde6, 1); + bf_set(pde6_re, pde6, 1); + bf_set(pde6_ae, pde6, 1); + } + bf_set(pde6_ai, pde6, 1); + bf_set(pde6_apptagval, pde6, apptagval); + + /* advance bpl and increment bde count */ num_bde++; bpl++; @@ -1320,15 +1376,17 @@ out: * The buffer list for this type consists of one or more of the * protection groups described below: * +-------------------------+ - * start of first prot group --> | PDE_1 | + * start of first prot group --> | PDE_5 | * +-------------------------+ - * | PDE_3 (Prot BDE) | + * | PDE_6 | + * +-------------------------+ + * | PDE_7 (Prot BDE) | * +-------------------------+ * | Data BDE | * +-------------------------+ * |more Data BDE's ... (opt)| * +-------------------------+ - * start of new prot group --> | PDE_1 | + * start of new prot group --> | PDE_5 | * +-------------------------+ * | ... | * +-------------------------+ @@ -1347,19 +1405,21 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, { struct scatterlist *sgde = NULL; /* s/g data entry */ struct scatterlist *sgpe = NULL; /* s/g prot entry */ - struct lpfc_pde *pde1 = NULL; + struct lpfc_pde5 *pde5 = NULL; + struct lpfc_pde6 *pde6 = NULL; struct ulp_bde64 *prot_bde = NULL; dma_addr_t dataphysaddr, protphysaddr; unsigned short curr_data = 0, curr_prot = 0; unsigned int split_offset, protgroup_len; unsigned int protgrp_blks, protgrp_bytes; unsigned int remainder, subtotal; - int prof = LPFC_PROF_INVALID; + int status; int datadir = sc->sc_data_direction; unsigned char pgdone = 0, alldone = 0; unsigned blksize; uint32_t reftag; uint16_t apptagmask, apptagval; + uint8_t txop, rxop; int num_bde = 0; sgpe = scsi_prot_sglist(sc); @@ -1372,31 +1432,47 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, return 0; } - prof = lpfc_sc_to_sli_prof(phba, sc); - if (prof == LPFC_PROF_INVALID) + status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop); + if (status) goto out; - /* extract some info from the scsi command for PDE1*/ + /* extract some info from the scsi command */ blksize = lpfc_cmd_blksize(sc); lpfc_get_cmd_dif_parms(sc, &apptagmask, &apptagval, &reftag); split_offset = 0; do { - /* setup the first PDE_1 */ - pde1 = (struct lpfc_pde *) bpl; - - lpfc_pde_set_bg_parms(pde1, LPFC_PDE1_DESCRIPTOR, prof, blksize, - BG_EC_STOP_ERR); - lpfc_pde_set_dif_parms(pde1, apptagmask, apptagval, reftag); + /* setup PDE5 with what we have */ + pde5 = (struct lpfc_pde5 *) bpl; + memset(pde5, 0, sizeof(struct lpfc_pde5)); + bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR); + pde5->reftag = reftag; + /* advance bpl and increment bde count */ + num_bde++; + bpl++; + pde6 = (struct lpfc_pde6 *) bpl; + + /* setup PDE6 with the rest of the info */ + memset(pde6, 0, sizeof(struct lpfc_pde6)); + bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR); + bf_set(pde6_optx, pde6, txop); + bf_set(pde6_oprx, pde6, rxop); + bf_set(pde6_ce, pde6, 1); + bf_set(pde6_re, pde6, 1); + bf_set(pde6_ae, pde6, 1); + bf_set(pde6_ai, pde6, 1); + bf_set(pde6_apptagval, pde6, apptagval); + + /* advance bpl and increment bde count */ num_bde++; bpl++; /* setup the first BDE that points to protection buffer */ prot_bde = (struct ulp_bde64 *) bpl; protphysaddr = sg_dma_address(sgpe); - prot_bde->addrLow = le32_to_cpu(putPaddrLow(protphysaddr)); - prot_bde->addrHigh = le32_to_cpu(putPaddrHigh(protphysaddr)); + prot_bde->addrHigh = le32_to_cpu(putPaddrLow(protphysaddr)); + prot_bde->addrLow = le32_to_cpu(putPaddrHigh(protphysaddr)); protgroup_len = sg_dma_len(sgpe); @@ -1407,10 +1483,7 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, protgrp_bytes = protgrp_blks * blksize; prot_bde->tus.f.bdeSize = protgroup_len; - if (datadir == DMA_TO_DEVICE) - prot_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64; - else - prot_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; + prot_bde->tus.f.bdeFlags = LPFC_PDE7_DESCRIPTOR; prot_bde->tus.w = le32_to_cpu(bpl->tus.w); curr_prot++; @@ -1462,6 +1535,7 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, /* Move to the next s/g segment if possible */ sgde = sg_next(sgde); + } /* are we done ? */ @@ -1484,7 +1558,6 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, out: - return num_bde; } /* @@ -1575,7 +1648,7 @@ lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba, case LPFC_PG_TYPE_NO_DIF: num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl, datasegcnt); - /* we shoud have 2 or more entries in buffer list */ + /* we should have 2 or more entries in buffer list */ if (num_bde < 2) goto err; break; @@ -1612,7 +1685,7 @@ lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba, num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl, datasegcnt, protsegcnt); - /* we shoud have 3 or more entries in buffer list */ + /* we should have 3 or more entries in buffer list */ if (num_bde < 3) goto err; break; @@ -1806,8 +1879,8 @@ out: * field of @lpfc_cmd for device with SLI-4 interface spec. * * Return codes: - * 1 - Error - * 0 - Success + * 1 - Error + * 0 - Success **/ static int lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) @@ -1915,8 +1988,8 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) * lpfc_hba struct. * * Return codes: - * 1 - Error - * 0 - Success + * 1 - Error + * 0 - Success **/ static inline int lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) @@ -2079,8 +2152,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, if (resp_info & RSP_LEN_VALID) { rsplen = be32_to_cpu(fcprsp->rspRspLen); - if ((rsplen != 0 && rsplen != 4 && rsplen != 8) || - (fcprsp->rspInfo3 != RSP_NO_FAILURE)) { + if (rsplen != 0 && rsplen != 4 && rsplen != 8) { lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, "2719 Invalid response length: " "tgt x%x lun x%x cmnd x%x rsplen x%x\n", @@ -2090,6 +2162,17 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, host_status = DID_ERROR; goto out; } + if (fcprsp->rspInfo3 != RSP_NO_FAILURE) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, + "2757 Protocol failure detected during " + "processing of FCP I/O op: " + "tgt x%x lun x%x cmnd x%x rspInfo3 x%x\n", + cmnd->device->id, + cmnd->device->lun, cmnd->cmnd[0], + fcprsp->rspInfo3); + host_status = DID_ERROR; + goto out; + } } if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) { diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 35e3b96d4e07..7a61455140b6 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -23,6 +23,7 @@ #include <linux/pci.h> #include <linux/interrupt.h> #include <linux/delay.h> +#include <linux/slab.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> @@ -211,7 +212,7 @@ lpfc_sli4_eq_get(struct lpfc_queue *q) struct lpfc_eqe *eqe = q->qe[q->hba_index].eqe; /* If the next EQE is not valid then we are done */ - if (!bf_get(lpfc_eqe_valid, eqe)) + if (!bf_get_le32(lpfc_eqe_valid, eqe)) return NULL; /* If the host has not yet processed the next entry then we are done */ if (((q->hba_index + 1) % q->entry_count) == q->host_index) @@ -246,7 +247,7 @@ lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm) /* while there are valid entries */ while (q->hba_index != q->host_index) { temp_eqe = q->qe[q->host_index].eqe; - bf_set(lpfc_eqe_valid, temp_eqe, 0); + bf_set_le32(lpfc_eqe_valid, temp_eqe, 0); released++; q->host_index = ((q->host_index + 1) % q->entry_count); } @@ -284,7 +285,7 @@ lpfc_sli4_cq_get(struct lpfc_queue *q) struct lpfc_cqe *cqe; /* If the next CQE is not valid then we are done */ - if (!bf_get(lpfc_cqe_valid, q->qe[q->hba_index].cqe)) + if (!bf_get_le32(lpfc_cqe_valid, q->qe[q->hba_index].cqe)) return NULL; /* If the host has not yet processed the next entry then we are done */ if (((q->hba_index + 1) % q->entry_count) == q->host_index) @@ -320,7 +321,7 @@ lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm) /* while there are valid entries */ while (q->hba_index != q->host_index) { temp_qe = q->qe[q->host_index].cqe; - bf_set(lpfc_cqe_valid, temp_qe, 0); + bf_set_le32(lpfc_cqe_valid, temp_qe, 0); released++; q->host_index = ((q->host_index + 1) % q->entry_count); } @@ -494,7 +495,7 @@ __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag) * * Returns sglq ponter = success, NULL = Failure. **/ -static struct lpfc_sglq * +struct lpfc_sglq * __lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag) { uint16_t adj_xri; @@ -526,6 +527,7 @@ __lpfc_sli_get_sglq(struct lpfc_hba *phba) return NULL; adj_xri = sglq->sli4_xritag - phba->sli4_hba.max_cfg_param.xri_base; phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = sglq; + sglq->state = SGL_ALLOCATED; return sglq; } @@ -580,15 +582,18 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) else sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_xritag); if (sglq) { - if (iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) { + if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) && + (sglq->state != SGL_XRI_ABORTED)) { spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock, iflag); list_add(&sglq->list, &phba->sli4_hba.lpfc_abts_els_sgl_list); spin_unlock_irqrestore( &phba->sli4_hba.abts_sgl_list_lock, iflag); - } else + } else { + sglq->state = SGL_FREED; list_add(&sglq->list, &phba->sli4_hba.lpfc_sgl_list); + } } @@ -1654,6 +1659,8 @@ lpfc_sli_chk_mbx_command(uint8_t mbxCommand) case MBX_INIT_VPI: case MBX_INIT_VFI: case MBX_RESUME_RPI: + case MBX_READ_EVENT_LOG_STATUS: + case MBX_READ_EVENT_LOG: ret = mbxCommand; break; default: @@ -2258,41 +2265,56 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, spin_unlock_irqrestore(&phba->hbalock, iflag); } - if ((phba->sli_rev == LPFC_SLI_REV4) && - (saveq->iocb_flag & LPFC_EXCHANGE_BUSY)) { - /* Set cmdiocb flag for the exchange - * busy so sgl (xri) will not be - * released until the abort xri is - * received from hba, clear the - * LPFC_DRIVER_ABORTED bit in case - * it was driver initiated abort. - */ - spin_lock_irqsave(&phba->hbalock, - iflag); - cmdiocbp->iocb_flag &= - ~LPFC_DRIVER_ABORTED; - cmdiocbp->iocb_flag |= - LPFC_EXCHANGE_BUSY; - spin_unlock_irqrestore(&phba->hbalock, - iflag); - cmdiocbp->iocb.ulpStatus = - IOSTAT_LOCAL_REJECT; - cmdiocbp->iocb.un.ulpWord[4] = - IOERR_ABORT_REQUESTED; - /* - * For SLI4, irsiocb contains NO_XRI - * in sli_xritag, it shall not affect - * releasing sgl (xri) process. - */ - saveq->iocb.ulpStatus = - IOSTAT_LOCAL_REJECT; - saveq->iocb.un.ulpWord[4] = - IOERR_SLI_ABORTED; - spin_lock_irqsave(&phba->hbalock, - iflag); - saveq->iocb_flag |= LPFC_DELAY_MEM_FREE; - spin_unlock_irqrestore(&phba->hbalock, - iflag); + if (phba->sli_rev == LPFC_SLI_REV4) { + if (saveq->iocb_flag & + LPFC_EXCHANGE_BUSY) { + /* Set cmdiocb flag for the + * exchange busy so sgl (xri) + * will not be released until + * the abort xri is received + * from hba. + */ + spin_lock_irqsave( + &phba->hbalock, iflag); + cmdiocbp->iocb_flag |= + LPFC_EXCHANGE_BUSY; + spin_unlock_irqrestore( + &phba->hbalock, iflag); + } + if (cmdiocbp->iocb_flag & + LPFC_DRIVER_ABORTED) { + /* + * Clear LPFC_DRIVER_ABORTED + * bit in case it was driver + * initiated abort. + */ + spin_lock_irqsave( + &phba->hbalock, iflag); + cmdiocbp->iocb_flag &= + ~LPFC_DRIVER_ABORTED; + spin_unlock_irqrestore( + &phba->hbalock, iflag); + cmdiocbp->iocb.ulpStatus = + IOSTAT_LOCAL_REJECT; + cmdiocbp->iocb.un.ulpWord[4] = + IOERR_ABORT_REQUESTED; + /* + * For SLI4, irsiocb contains + * NO_XRI in sli_xritag, it + * shall not affect releasing + * sgl (xri) process. + */ + saveq->iocb.ulpStatus = + IOSTAT_LOCAL_REJECT; + saveq->iocb.un.ulpWord[4] = + IOERR_SLI_ABORTED; + spin_lock_irqsave( + &phba->hbalock, iflag); + saveq->iocb_flag |= + LPFC_DELAY_MEM_FREE; + spin_unlock_irqrestore( + &phba->hbalock, iflag); + } } } (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq); @@ -2515,14 +2537,16 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba, cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring, &rspiocbq); - if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) { - spin_unlock_irqrestore(&phba->hbalock, - iflag); - (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, - &rspiocbq); - spin_lock_irqsave(&phba->hbalock, - iflag); - } + if (unlikely(!cmdiocbq)) + break; + if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) + cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED; + if (cmdiocbq->iocb_cmpl) { + spin_unlock_irqrestore(&phba->hbalock, iflag); + (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, + &rspiocbq); + spin_lock_irqsave(&phba->hbalock, iflag); + } break; case LPFC_UNSOL_IOCB: spin_unlock_irqrestore(&phba->hbalock, iflag); @@ -3091,6 +3115,12 @@ lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask) /* Check to see if any errors occurred during init */ if ((status & HS_FFERM) || (i >= 20)) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2751 Adapter failed to restart, " + "status reg x%x, FW Data: A8 x%x AC x%x\n", + status, + readl(phba->MBslimaddr + 0xa8), + readl(phba->MBslimaddr + 0xac)); phba->link_state = LPFC_HBA_ERROR; retval = 1; } @@ -3278,6 +3308,9 @@ lpfc_sli_brdkill(struct lpfc_hba *phba) if (retval != MBX_SUCCESS) { if (retval != MBX_BUSY) mempool_free(pmb, phba->mbox_mem_pool); + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "2752 KILL_BOARD command failed retval %d\n", + retval); spin_lock_irq(&phba->hbalock); phba->link_flag &= ~LS_IGNORE_ERATT; spin_unlock_irq(&phba->hbalock); @@ -4035,7 +4068,7 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba) lpfc_sli_hba_setup_error: phba->link_state = LPFC_HBA_ERROR; - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0445 Firmware initialization failed\n"); return rc; } @@ -4265,7 +4298,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) "2570 Failed to read FCoE parameters\n"); /* Issue READ_REV to collect vpd and FW information. */ - vpd_size = PAGE_SIZE; + vpd_size = SLI4_PAGE_SIZE; vpd = kzalloc(vpd_size, GFP_KERNEL); if (!vpd) { rc = -ENOMEM; @@ -4388,7 +4421,13 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) spin_unlock_irq(&phba->hbalock); /* Read the port's service parameters. */ - lpfc_read_sparam(phba, mboxq, vport->vpi); + rc = lpfc_read_sparam(phba, mboxq, vport->vpi); + if (rc) { + phba->link_state = LPFC_HBA_ERROR; + rc = -ENOMEM; + goto out_free_vpd; + } + mboxq->vport = vport; rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); mp = (struct lpfc_dmabuf *) mboxq->context1; @@ -4483,6 +4522,10 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) /* Post receive buffers to the device */ lpfc_sli4_rb_setup(phba); + /* Reset HBA FCF states after HBA reset */ + phba->fcf.fcf_flag = 0; + phba->fcf.current_rec.flag = 0; + /* Start the ELS watchdog timer */ mod_timer(&vport->els_tmofunc, jiffies + HZ * (phba->fc_ratov * 2)); @@ -4850,9 +4893,34 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, mb->mbxOwner = OWN_CHIP; if (psli->sli_flag & LPFC_SLI_ACTIVE) { - /* First copy command data to host SLIM area */ + /* Populate mbox extension offset word. */ + if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) { + *(((uint32_t *)mb) + pmbox->mbox_offset_word) + = (uint8_t *)phba->mbox_ext + - (uint8_t *)phba->mbox; + } + + /* Copy the mailbox extension data */ + if (pmbox->in_ext_byte_len && pmbox->context2) { + lpfc_sli_pcimem_bcopy(pmbox->context2, + (uint8_t *)phba->mbox_ext, + pmbox->in_ext_byte_len); + } + /* Copy command data to host SLIM area */ lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE); } else { + /* Populate mbox extension offset word. */ + if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) + *(((uint32_t *)mb) + pmbox->mbox_offset_word) + = MAILBOX_HBA_EXT_OFFSET; + + /* Copy the mailbox extension data */ + if (pmbox->in_ext_byte_len && pmbox->context2) { + lpfc_memcpy_to_slim(phba->MBslimaddr + + MAILBOX_HBA_EXT_OFFSET, + pmbox->context2, pmbox->in_ext_byte_len); + + } if (mb->mbxCommand == MBX_CONFIG_PORT) { /* copy command data into host mbox for cmpl */ lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE); @@ -4962,15 +5030,22 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, if (psli->sli_flag & LPFC_SLI_ACTIVE) { /* copy results back to user */ lpfc_sli_pcimem_bcopy(phba->mbox, mb, MAILBOX_CMD_SIZE); + /* Copy the mailbox extension data */ + if (pmbox->out_ext_byte_len && pmbox->context2) { + lpfc_sli_pcimem_bcopy(phba->mbox_ext, + pmbox->context2, + pmbox->out_ext_byte_len); + } } else { /* First copy command data */ lpfc_memcpy_from_slim(mb, phba->MBslimaddr, MAILBOX_CMD_SIZE); - if ((mb->mbxCommand == MBX_DUMP_MEMORY) && - pmbox->context2) { - lpfc_memcpy_from_slim((void *)pmbox->context2, - phba->MBslimaddr + DMP_RSP_OFFSET, - mb->un.varDmp.word_cnt); + /* Copy the mailbox extension data */ + if (pmbox->out_ext_byte_len && pmbox->context2) { + lpfc_memcpy_from_slim(pmbox->context2, + phba->MBslimaddr + + MAILBOX_HBA_EXT_OFFSET, + pmbox->out_ext_byte_len); } } @@ -7063,13 +7138,11 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, */ list_del_init(&abort_iocb->list); pring->txcmplq_cnt--; - spin_unlock_irq(&phba->hbalock); /* Firmware could still be in progress of DMAing * payload, so don't free data buffer till after * a hbeat. */ - spin_lock_irq(&phba->hbalock); abort_iocb->iocb_flag |= LPFC_DELAY_MEM_FREE; abort_iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED; spin_unlock_irq(&phba->hbalock); @@ -7077,7 +7150,8 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, abort_iocb->iocb.ulpStatus = IOSTAT_LOCAL_REJECT; abort_iocb->iocb.un.ulpWord[4] = IOERR_ABORT_REQUESTED; (abort_iocb->iocb_cmpl)(phba, abort_iocb, abort_iocb); - } + } else + spin_unlock_irq(&phba->hbalock); } lpfc_sli_release_iocbq(phba, cmdiocb); @@ -7436,6 +7510,7 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba, { wait_queue_head_t *pdone_q; unsigned long iflags; + struct lpfc_scsi_buf *lpfc_cmd; spin_lock_irqsave(&phba->hbalock, iflags); cmdiocbq->iocb_flag |= LPFC_IO_WAKE; @@ -7443,6 +7518,14 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba, memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb, &rspiocbq->iocb, sizeof(IOCB_t)); + /* Set the exchange busy flag for task management commands */ + if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) && + !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) { + lpfc_cmd = container_of(cmdiocbq, struct lpfc_scsi_buf, + cur_iocbq); + lpfc_cmd->exch_busy = rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY; + } + pdone_q = cmdiocbq->context_un.wait_queue; if (pdone_q) wake_up(pdone_q); @@ -8083,6 +8166,12 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id) if (pmb->mbox_cmpl) { lpfc_sli_pcimem_bcopy(mbox, pmbox, MAILBOX_CMD_SIZE); + if (pmb->out_ext_byte_len && + pmb->context2) + lpfc_sli_pcimem_bcopy( + phba->mbox_ext, + pmb->context2, + pmb->out_ext_byte_len); } if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) { pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG; @@ -8933,17 +9022,17 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe) int ecount = 0; uint16_t cqid; - if (bf_get(lpfc_eqe_major_code, eqe) != 0) { + if (bf_get_le32(lpfc_eqe_major_code, eqe) != 0) { lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "0359 Not a valid slow-path completion " "event: majorcode=x%x, minorcode=x%x\n", - bf_get(lpfc_eqe_major_code, eqe), - bf_get(lpfc_eqe_minor_code, eqe)); + bf_get_le32(lpfc_eqe_major_code, eqe), + bf_get_le32(lpfc_eqe_minor_code, eqe)); return; } /* Get the reference to the corresponding CQ */ - cqid = bf_get(lpfc_eqe_resource_id, eqe); + cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); /* Search for completion queue pointer matching this cqid */ speq = phba->sli4_hba.sp_eq; @@ -9061,6 +9150,12 @@ lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, /* Fake the irspiocb and copy necessary response information */ lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe); + if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) { + spin_lock_irqsave(&phba->hbalock, iflags); + cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED; + spin_unlock_irqrestore(&phba->hbalock, iflags); + } + /* Pass the cmd_iocb and the rsp state to the upper layer */ (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq); } @@ -9165,12 +9260,12 @@ lpfc_sli4_fp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, uint16_t cqid; int ecount = 0; - if (unlikely(bf_get(lpfc_eqe_major_code, eqe) != 0)) { + if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) { lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "0366 Not a valid fast-path completion " "event: majorcode=x%x, minorcode=x%x\n", - bf_get(lpfc_eqe_major_code, eqe), - bf_get(lpfc_eqe_minor_code, eqe)); + bf_get_le32(lpfc_eqe_major_code, eqe), + bf_get_le32(lpfc_eqe_minor_code, eqe)); return; } @@ -9183,7 +9278,7 @@ lpfc_sli4_fp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, } /* Get the reference to the corresponding CQ */ - cqid = bf_get(lpfc_eqe_resource_id, eqe); + cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); if (unlikely(cqid != cq->queue_id)) { lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "0368 Miss-matched fast-path completion " @@ -9450,7 +9545,7 @@ lpfc_sli4_queue_free(struct lpfc_queue *queue) while (!list_empty(&queue->page_list)) { list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf, list); - dma_free_coherent(&queue->phba->pcidev->dev, PAGE_SIZE, + dma_free_coherent(&queue->phba->pcidev->dev, SLI4_PAGE_SIZE, dmabuf->virt, dmabuf->phys); kfree(dmabuf); } @@ -9476,13 +9571,17 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size, struct lpfc_dmabuf *dmabuf; int x, total_qe_count; void *dma_pointer; + uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; + if (!phba->sli4_hba.pc_sli4_params.supported) + hw_page_size = SLI4_PAGE_SIZE; queue = kzalloc(sizeof(struct lpfc_queue) + (sizeof(union sli4_qe) * entry_count), GFP_KERNEL); if (!queue) return NULL; - queue->page_count = (PAGE_ALIGN(entry_size * entry_count))/PAGE_SIZE; + queue->page_count = (ALIGN(entry_size * entry_count, + hw_page_size))/hw_page_size; INIT_LIST_HEAD(&queue->list); INIT_LIST_HEAD(&queue->page_list); INIT_LIST_HEAD(&queue->child_list); @@ -9491,19 +9590,19 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size, if (!dmabuf) goto out_fail; dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, - PAGE_SIZE, &dmabuf->phys, + hw_page_size, &dmabuf->phys, GFP_KERNEL); if (!dmabuf->virt) { kfree(dmabuf); goto out_fail; } - memset(dmabuf->virt, 0, PAGE_SIZE); + memset(dmabuf->virt, 0, hw_page_size); dmabuf->buffer_tag = x; list_add_tail(&dmabuf->list, &queue->page_list); /* initialize queue's entry array */ dma_pointer = dmabuf->virt; for (; total_qe_count < entry_count && - dma_pointer < (PAGE_SIZE + dmabuf->virt); + dma_pointer < (hw_page_size + dmabuf->virt); total_qe_count++, dma_pointer += entry_size) { queue->qe[total_qe_count].address = dma_pointer; } @@ -9548,6 +9647,10 @@ lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint16_t imax) uint32_t shdr_status, shdr_add_status; union lpfc_sli4_cfg_shdr *shdr; uint16_t dmult; + uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; + + if (!phba->sli4_hba.pc_sli4_params.supported) + hw_page_size = SLI4_PAGE_SIZE; mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) @@ -9597,6 +9700,7 @@ lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint16_t imax) break; } list_for_each_entry(dmabuf, &eq->page_list, list) { + memset(dmabuf->virt, 0, hw_page_size); eq_create->u.request.page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys); eq_create->u.request.page[dmabuf->buffer_tag].addr_hi = @@ -9659,6 +9763,11 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq, int rc, length, status = 0; uint32_t shdr_status, shdr_add_status; union lpfc_sli4_cfg_shdr *shdr; + uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; + + if (!phba->sli4_hba.pc_sli4_params.supported) + hw_page_size = SLI4_PAGE_SIZE; + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) @@ -9696,6 +9805,7 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq, break; } list_for_each_entry(dmabuf, &cq->page_list, list) { + memset(dmabuf->virt, 0, hw_page_size); cq_create->u.request.page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys); cq_create->u.request.page[dmabuf->buffer_tag].addr_hi = @@ -9735,9 +9845,70 @@ out: } /** + * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration + * @phba: HBA structure that indicates port to create a queue on. + * @mq: The queue structure to use to create the mailbox queue. + * @mbox: An allocated pointer to type LPFC_MBOXQ_t + * @cq: The completion queue to associate with this cq. + * + * This function provides failback (fb) functionality when the + * mq_create_ext fails on older FW generations. It's purpose is identical + * to mq_create_ext otherwise. + * + * This routine cannot fail as all attributes were previously accessed and + * initialized in mq_create_ext. + **/ +static void +lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq, + LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq) +{ + struct lpfc_mbx_mq_create *mq_create; + struct lpfc_dmabuf *dmabuf; + int length; + + length = (sizeof(struct lpfc_mbx_mq_create) - + sizeof(struct lpfc_sli4_cfg_mhdr)); + lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, + LPFC_MBOX_OPCODE_MQ_CREATE, + length, LPFC_SLI4_MBX_EMBED); + mq_create = &mbox->u.mqe.un.mq_create; + bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request, + mq->page_count); + bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context, + cq->queue_id); + bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1); + switch (mq->entry_count) { + case 16: + bf_set(lpfc_mq_context_count, &mq_create->u.request.context, + LPFC_MQ_CNT_16); + break; + case 32: + bf_set(lpfc_mq_context_count, &mq_create->u.request.context, + LPFC_MQ_CNT_32); + break; + case 64: + bf_set(lpfc_mq_context_count, &mq_create->u.request.context, + LPFC_MQ_CNT_64); + break; + case 128: + bf_set(lpfc_mq_context_count, &mq_create->u.request.context, + LPFC_MQ_CNT_128); + break; + } + list_for_each_entry(dmabuf, &mq->page_list, list) { + mq_create->u.request.page[dmabuf->buffer_tag].addr_lo = + putPaddrLow(dmabuf->phys); + mq_create->u.request.page[dmabuf->buffer_tag].addr_hi = + putPaddrHigh(dmabuf->phys); + } +} + +/** * lpfc_mq_create - Create a mailbox Queue on the HBA * @phba: HBA structure that indicates port to create a queue on. * @mq: The queue structure to use to create the mailbox queue. + * @cq: The completion queue to associate with this cq. + * @subtype: The queue's subtype. * * This function creates a mailbox queue, as detailed in @mq, on a port, * described by @phba by sending a MQ_CREATE mailbox command to the HBA. @@ -9753,31 +9924,43 @@ out: * memory this function will return ENOMEM. If the queue create mailbox command * fails this function will return ENXIO. **/ -uint32_t +int32_t lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq, struct lpfc_queue *cq, uint32_t subtype) { struct lpfc_mbx_mq_create *mq_create; + struct lpfc_mbx_mq_create_ext *mq_create_ext; struct lpfc_dmabuf *dmabuf; LPFC_MBOXQ_t *mbox; int rc, length, status = 0; uint32_t shdr_status, shdr_add_status; union lpfc_sli4_cfg_shdr *shdr; + uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; + + if (!phba->sli4_hba.pc_sli4_params.supported) + hw_page_size = SLI4_PAGE_SIZE; mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) return -ENOMEM; - length = (sizeof(struct lpfc_mbx_mq_create) - + length = (sizeof(struct lpfc_mbx_mq_create_ext) - sizeof(struct lpfc_sli4_cfg_mhdr)); lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, - LPFC_MBOX_OPCODE_MQ_CREATE, + LPFC_MBOX_OPCODE_MQ_CREATE_EXT, length, LPFC_SLI4_MBX_EMBED); - mq_create = &mbox->u.mqe.un.mq_create; - bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request, + + mq_create_ext = &mbox->u.mqe.un.mq_create_ext; + bf_set(lpfc_mbx_mq_create_ext_num_pages, &mq_create_ext->u.request, mq->page_count); - bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context, - cq->queue_id); - bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1); + bf_set(lpfc_mbx_mq_create_ext_async_evt_link, &mq_create_ext->u.request, + 1); + bf_set(lpfc_mbx_mq_create_ext_async_evt_fcfste, + &mq_create_ext->u.request, 1); + bf_set(lpfc_mbx_mq_create_ext_async_evt_group5, + &mq_create_ext->u.request, 1); + bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context, + cq->queue_id); + bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1); switch (mq->entry_count) { default: lpfc_printf_log(phba, KERN_ERR, LOG_SLI, @@ -9787,31 +9970,47 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq, return -EINVAL; /* otherwise default to smallest count (drop through) */ case 16: - bf_set(lpfc_mq_context_count, &mq_create->u.request.context, + bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context, LPFC_MQ_CNT_16); break; case 32: - bf_set(lpfc_mq_context_count, &mq_create->u.request.context, + bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context, LPFC_MQ_CNT_32); break; case 64: - bf_set(lpfc_mq_context_count, &mq_create->u.request.context, + bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context, LPFC_MQ_CNT_64); break; case 128: - bf_set(lpfc_mq_context_count, &mq_create->u.request.context, + bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context, LPFC_MQ_CNT_128); break; } list_for_each_entry(dmabuf, &mq->page_list, list) { - mq_create->u.request.page[dmabuf->buffer_tag].addr_lo = + memset(dmabuf->virt, 0, hw_page_size); + mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys); - mq_create->u.request.page[dmabuf->buffer_tag].addr_hi = + mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys); } rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); + shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr; + mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id, + &mq_create_ext->u.response); + if (rc != MBX_SUCCESS) { + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "2795 MQ_CREATE_EXT failed with " + "status x%x. Failback to MQ_CREATE.\n", + rc); + lpfc_mq_create_fb_init(phba, mq, mbox, cq); + mq_create = &mbox->u.mqe.un.mq_create; + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); + shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr; + mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id, + &mq_create->u.response); + } + /* The IOCTL status is embedded in the mailbox subheader. */ - shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr; shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); if (shdr_status || shdr_add_status || rc) { @@ -9822,7 +10021,6 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq, status = -ENXIO; goto out; } - mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id, &mq_create->u.response); if (mq->queue_id == 0xFFFF) { status = -ENXIO; goto out; @@ -9871,6 +10069,10 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, int rc, length, status = 0; uint32_t shdr_status, shdr_add_status; union lpfc_sli4_cfg_shdr *shdr; + uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; + + if (!phba->sli4_hba.pc_sli4_params.supported) + hw_page_size = SLI4_PAGE_SIZE; mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) @@ -9886,6 +10088,7 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request, cq->queue_id); list_for_each_entry(dmabuf, &wq->page_list, list) { + memset(dmabuf->virt, 0, hw_page_size); wq_create->u.request.page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys); wq_create->u.request.page[dmabuf->buffer_tag].addr_hi = @@ -9954,6 +10157,10 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, int rc, length, status = 0; uint32_t shdr_status, shdr_add_status; union lpfc_sli4_cfg_shdr *shdr; + uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; + + if (!phba->sli4_hba.pc_sli4_params.supported) + hw_page_size = SLI4_PAGE_SIZE; if (hrq->entry_count != drq->entry_count) return -EINVAL; @@ -9998,6 +10205,7 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context, LPFC_HDR_BUF_SIZE); list_for_each_entry(dmabuf, &hrq->page_list, list) { + memset(dmabuf->virt, 0, hw_page_size); rq_create->u.request.page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys); rq_create->u.request.page[dmabuf->buffer_tag].addr_hi = @@ -10570,7 +10778,7 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba) reqlen = els_xri_cnt * sizeof(struct sgl_page_pairs) + sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); - if (reqlen > PAGE_SIZE) { + if (reqlen > SLI4_PAGE_SIZE) { lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "2559 Block sgl registration required DMA " "size (%d) great than a page\n", reqlen); @@ -10676,7 +10884,7 @@ lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, struct list_head *sblist, /* Calculate the requested length of the dma memory */ reqlen = cnt * sizeof(struct sgl_page_pairs) + sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); - if (reqlen > PAGE_SIZE) { + if (reqlen > SLI4_PAGE_SIZE) { lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "0217 Block sgl registration required DMA " "size (%d) great than a page\n", reqlen); @@ -11512,8 +11720,8 @@ lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba, * * This routine is invoked to post rpi header templates to the * HBA consistent with the SLI-4 interface spec. This routine - * posts a PAGE_SIZE memory region to the port to hold up to - * PAGE_SIZE modulo 64 rpi context headers. + * posts a SLI4_PAGE_SIZE memory region to the port to hold up to + * SLI4_PAGE_SIZE modulo 64 rpi context headers. * * This routine does not require any locks. It's usage is expected * to be driver load or reset recovery when the driver is @@ -11616,8 +11824,8 @@ lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page) * * This routine is invoked to post rpi header templates to the * HBA consistent with the SLI-4 interface spec. This routine - * posts a PAGE_SIZE memory region to the port to hold up to - * PAGE_SIZE modulo 64 rpi context headers. + * posts a SLI4_PAGE_SIZE memory region to the port to hold up to + * SLI4_PAGE_SIZE modulo 64 rpi context headers. * * Returns * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful @@ -11941,15 +12149,19 @@ lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba, } /** - * lpfc_sli4_read_fcf_record - Read the driver's default FCF Record. + * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan. * @phba: pointer to lpfc hba data structure. * @fcf_index: FCF table entry offset. * - * This routine is invoked to read up to @fcf_num of FCF record from the - * device starting with the given @fcf_index. + * This routine is invoked to scan the entire FCF table by reading FCF + * record and processing it one at a time starting from the @fcf_index + * for initial FCF discovery or fast FCF failover rediscovery. + * + * Return 0 if the mailbox command is submitted sucessfully, none 0 + * otherwise. **/ int -lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index) +lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) { int rc = 0, error; LPFC_MBOXQ_t *mboxq; @@ -11961,17 +12173,17 @@ lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index) "2000 Failed to allocate mbox for " "READ_FCF cmd\n"); error = -ENOMEM; - goto fail_fcfscan; + goto fail_fcf_scan; } /* Construct the read FCF record mailbox command */ - rc = lpfc_sli4_mbx_read_fcf_record(phba, mboxq, fcf_index); + rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index); if (rc) { error = -EINVAL; - goto fail_fcfscan; + goto fail_fcf_scan; } /* Issue the mailbox command asynchronously */ mboxq->vport = phba->pport; - mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_record; + mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec; rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) error = -EIO; @@ -11979,9 +12191,15 @@ lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index) spin_lock_irq(&phba->hbalock); phba->hba_flag |= FCF_DISC_INPROGRESS; spin_unlock_irq(&phba->hbalock); + /* Reset FCF round robin index bmask for new scan */ + if (fcf_index == LPFC_FCOE_FCF_GET_FIRST) { + memset(phba->fcf.fcf_rr_bmask, 0, + sizeof(*phba->fcf.fcf_rr_bmask)); + phba->fcf.eligible_fcf_cnt = 0; + } error = 0; } -fail_fcfscan: +fail_fcf_scan: if (error) { if (mboxq) lpfc_sli4_mbox_cmd_free(phba, mboxq); @@ -11994,6 +12212,181 @@ fail_fcfscan: } /** + * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for round robin fcf. + * @phba: pointer to lpfc hba data structure. + * @fcf_index: FCF table entry offset. + * + * This routine is invoked to read an FCF record indicated by @fcf_index + * and to use it for FLOGI round robin FCF failover. + * + * Return 0 if the mailbox command is submitted sucessfully, none 0 + * otherwise. + **/ +int +lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) +{ + int rc = 0, error; + LPFC_MBOXQ_t *mboxq; + + mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mboxq) { + lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT, + "2763 Failed to allocate mbox for " + "READ_FCF cmd\n"); + error = -ENOMEM; + goto fail_fcf_read; + } + /* Construct the read FCF record mailbox command */ + rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index); + if (rc) { + error = -EINVAL; + goto fail_fcf_read; + } + /* Issue the mailbox command asynchronously */ + mboxq->vport = phba->pport; + mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec; + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); + if (rc == MBX_NOT_FINISHED) + error = -EIO; + else + error = 0; + +fail_fcf_read: + if (error && mboxq) + lpfc_sli4_mbox_cmd_free(phba, mboxq); + return error; +} + +/** + * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask. + * @phba: pointer to lpfc hba data structure. + * @fcf_index: FCF table entry offset. + * + * This routine is invoked to read an FCF record indicated by @fcf_index to + * determine whether it's eligible for FLOGI round robin failover list. + * + * Return 0 if the mailbox command is submitted sucessfully, none 0 + * otherwise. + **/ +int +lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) +{ + int rc = 0, error; + LPFC_MBOXQ_t *mboxq; + + mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mboxq) { + lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT, + "2758 Failed to allocate mbox for " + "READ_FCF cmd\n"); + error = -ENOMEM; + goto fail_fcf_read; + } + /* Construct the read FCF record mailbox command */ + rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index); + if (rc) { + error = -EINVAL; + goto fail_fcf_read; + } + /* Issue the mailbox command asynchronously */ + mboxq->vport = phba->pport; + mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec; + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); + if (rc == MBX_NOT_FINISHED) + error = -EIO; + else + error = 0; + +fail_fcf_read: + if (error && mboxq) + lpfc_sli4_mbox_cmd_free(phba, mboxq); + return error; +} + +/** + * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index + * @phba: pointer to lpfc hba data structure. + * + * This routine is to get the next eligible FCF record index in a round + * robin fashion. If the next eligible FCF record index equals to the + * initial round robin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF) + * shall be returned, otherwise, the next eligible FCF record's index + * shall be returned. + **/ +uint16_t +lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba) +{ + uint16_t next_fcf_index; + + /* Search from the currently registered FCF index */ + next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask, + LPFC_SLI4_FCF_TBL_INDX_MAX, + phba->fcf.current_rec.fcf_indx); + /* Wrap around condition on phba->fcf.fcf_rr_bmask */ + if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) + next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask, + LPFC_SLI4_FCF_TBL_INDX_MAX, 0); + /* Round robin failover stop condition */ + if (next_fcf_index == phba->fcf.fcf_rr_init_indx) + return LPFC_FCOE_FCF_NEXT_NONE; + + return next_fcf_index; +} + +/** + * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index + * @phba: pointer to lpfc hba data structure. + * + * This routine sets the FCF record index in to the eligible bmask for + * round robin failover search. It checks to make sure that the index + * does not go beyond the range of the driver allocated bmask dimension + * before setting the bit. + * + * Returns 0 if the index bit successfully set, otherwise, it returns + * -EINVAL. + **/ +int +lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index) +{ + if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { + lpfc_printf_log(phba, KERN_ERR, LOG_FIP, + "2610 HBA FCF index reached driver's " + "book keeping dimension: fcf_index:%d, " + "driver_bmask_max:%d\n", + fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX); + return -EINVAL; + } + /* Set the eligible FCF record index bmask */ + set_bit(fcf_index, phba->fcf.fcf_rr_bmask); + + return 0; +} + +/** + * lpfc_sli4_fcf_rr_index_set - Clear bmask from eligible fcf record index + * @phba: pointer to lpfc hba data structure. + * + * This routine clears the FCF record index from the eligible bmask for + * round robin failover search. It checks to make sure that the index + * does not go beyond the range of the driver allocated bmask dimension + * before clearing the bit. + **/ +void +lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index) +{ + if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { + lpfc_printf_log(phba, KERN_ERR, LOG_FIP, + "2762 HBA FCF index goes beyond driver's " + "book keeping dimension: fcf_index:%d, " + "driver_bmask_max:%d\n", + fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX); + return; + } + /* Clear the eligible FCF record index bmask */ + clear_bit(fcf_index, phba->fcf.fcf_rr_bmask); +} + +/** * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table * @phba: pointer to lpfc hba data structure. * @@ -12014,21 +12407,40 @@ lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &redisc_fcf->header.cfg_shdr.response); if (shdr_status || shdr_add_status) { - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + lpfc_printf_log(phba, KERN_ERR, LOG_FIP, "2746 Requesting for FCF rediscovery failed " "status x%x add_status x%x\n", shdr_status, shdr_add_status); - /* - * Request failed, last resort to re-try current - * registered FCF entry - */ - lpfc_retry_pport_discovery(phba); - } else + if (phba->fcf.fcf_flag & FCF_ACVL_DISC) { + spin_lock_irq(&phba->hbalock); + phba->fcf.fcf_flag &= ~FCF_ACVL_DISC; + spin_unlock_irq(&phba->hbalock); + /* + * CVL event triggered FCF rediscover request failed, + * last resort to re-try current registered FCF entry. + */ + lpfc_retry_pport_discovery(phba); + } else { + spin_lock_irq(&phba->hbalock); + phba->fcf.fcf_flag &= ~FCF_DEAD_DISC; + spin_unlock_irq(&phba->hbalock); + /* + * DEAD FCF event triggered FCF rediscover request + * failed, last resort to fail over as a link down + * to FCF registration. + */ + lpfc_sli4_fcf_dead_failthrough(phba); + } + } else { + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, + "2775 Start FCF rediscovery quiescent period " + "wait timer before scaning FCF table\n"); /* * Start FCF rediscovery wait timer for pending FCF * before rescan FCF record table. */ lpfc_fcf_redisc_wait_start_timer(phba); + } mempool_free(mbox, phba->mbox_mem_pool); } @@ -12047,6 +12459,9 @@ lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba) struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf; int rc, length; + /* Cancel retry delay timers to all vports before FCF rediscover */ + lpfc_cancel_all_vport_retry_delay_timer(phba); + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) { lpfc_printf_log(phba, KERN_ERR, LOG_SLI, @@ -12078,6 +12493,31 @@ lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba) } /** + * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event + * @phba: pointer to lpfc hba data structure. + * + * This function is the failover routine as a last resort to the FCF DEAD + * event when driver failed to perform fast FCF failover. + **/ +void +lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba) +{ + uint32_t link_state; + + /* + * Last resort as FCF DEAD event failover will treat this as + * a link down, but save the link state because we don't want + * it to be changed to Link Down unless it is already down. + */ + link_state = phba->link_state; + lpfc_linkdown(phba); + phba->link_state = link_state; + + /* Unregister FCF if no devices connected to it */ + lpfc_unregister_unused_fcf(phba); +} + +/** * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled. * @phba: pointer to lpfc hba data structure. * @@ -12221,6 +12661,7 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport) struct lpfc_hba *phba = vport->phba; LPFC_MBOXQ_t *mb, *nextmb; struct lpfc_dmabuf *mp; + struct lpfc_nodelist *ndlp; spin_lock_irq(&phba->hbalock); list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { @@ -12237,6 +12678,11 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport) __lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); } + ndlp = (struct lpfc_nodelist *) mb->context2; + if (ndlp) { + lpfc_nlp_put(ndlp); + mb->context2 = NULL; + } } list_del(&mb->list); mempool_free(mb, phba->mbox_mem_pool); @@ -12246,6 +12692,15 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport) if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) || (mb->u.mb.mbxCommand == MBX_REG_VPI)) mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { + ndlp = (struct lpfc_nodelist *) mb->context2; + if (ndlp) { + lpfc_nlp_put(ndlp); + mb->context2 = NULL; + } + /* Unregister the RPI when mailbox complete */ + mb->mbox_flag |= LPFC_MBX_IMED_UNREG; + } } spin_unlock_irq(&phba->hbalock); } diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h index dfcf5437d1f5..e3792151ca06 100644 --- a/drivers/scsi/lpfc/lpfc_sli.h +++ b/drivers/scsi/lpfc/lpfc_sli.h @@ -36,6 +36,7 @@ struct lpfc_cq_event { struct lpfc_acqe_link acqe_link; struct lpfc_acqe_fcoe acqe_fcoe; struct lpfc_acqe_dcbx acqe_dcbx; + struct lpfc_acqe_grp5 acqe_grp5; struct lpfc_rcqe rcqe_cmpl; struct sli4_wcqe_xri_aborted wcqe_axri; struct lpfc_wcqe_complete wcqe_cmpl; @@ -62,6 +63,7 @@ struct lpfc_iocbq { #define LPFC_DELAY_MEM_FREE 0x20 /* Defer free'ing of FC data */ #define LPFC_EXCHANGE_BUSY 0x40 /* SLI4 hba reported XB in response */ #define LPFC_USE_FCPWQIDX 0x80 /* Submit to specified FCPWQ index */ +#define DSS_SECURITY_OP 0x100 /* security IO */ #define LPFC_FIP_ELS_ID_MASK 0xc000 /* ELS_ID range 0-3, non-shifted mask */ #define LPFC_FIP_ELS_ID_SHIFT 14 @@ -109,6 +111,9 @@ typedef struct lpfcMboxq { void (*mbox_cmpl) (struct lpfc_hba *, struct lpfcMboxq *); uint8_t mbox_flag; + uint16_t in_ext_byte_len; + uint16_t out_ext_byte_len; + uint8_t mbox_offset_word; struct lpfc_mcqe mcqe; struct lpfc_mbx_nembed_sge_virt *sge_array; } LPFC_MBOXQ_t; diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h index 86308836600f..58bb4c81b54e 100644 --- a/drivers/scsi/lpfc/lpfc_sli4.h +++ b/drivers/scsi/lpfc/lpfc_sli4.h @@ -153,15 +153,28 @@ struct lpfc_fcf { #define FCF_REGISTERED 0x02 /* FCF registered with FW */ #define FCF_SCAN_DONE 0x04 /* FCF table scan done */ #define FCF_IN_USE 0x08 /* Atleast one discovery completed */ -#define FCF_REDISC_PEND 0x10 /* FCF rediscovery pending */ -#define FCF_REDISC_EVT 0x20 /* FCF rediscovery event to worker thread */ -#define FCF_REDISC_FOV 0x40 /* Post FCF rediscovery fast failover */ +#define FCF_INIT_DISC 0x10 /* Initial FCF discovery */ +#define FCF_DEAD_DISC 0x20 /* FCF DEAD fast FCF failover discovery */ +#define FCF_ACVL_DISC 0x40 /* All CVL fast FCF failover discovery */ +#define FCF_DISCOVERY (FCF_INIT_DISC | FCF_DEAD_DISC | FCF_ACVL_DISC) +#define FCF_REDISC_PEND 0x80 /* FCF rediscovery pending */ +#define FCF_REDISC_EVT 0x100 /* FCF rediscovery event to worker thread */ +#define FCF_REDISC_FOV 0x200 /* Post FCF rediscovery fast failover */ uint32_t addr_mode; + uint16_t fcf_rr_init_indx; + uint32_t eligible_fcf_cnt; struct lpfc_fcf_rec current_rec; struct lpfc_fcf_rec failover_rec; struct timer_list redisc_wait; + unsigned long *fcf_rr_bmask; /* Eligible FCF indexes for RR failover */ }; +/* + * Maximum FCF table index, it is for driver internal book keeping, it + * just needs to be no less than the supported HBA's FCF table size. + */ +#define LPFC_SLI4_FCF_TBL_INDX_MAX 32 + #define LPFC_REGION23_SIGNATURE "RG23" #define LPFC_REGION23_VERSION 1 #define LPFC_REGION23_LAST_REC 0xff @@ -431,11 +444,18 @@ enum lpfc_sge_type { SCSI_BUFF_TYPE }; +enum lpfc_sgl_state { + SGL_FREED, + SGL_ALLOCATED, + SGL_XRI_ABORTED +}; + struct lpfc_sglq { /* lpfc_sglqs are used in double linked lists */ struct list_head list; struct list_head clist; enum lpfc_sge_type buff_type; /* is this a scsi sgl */ + enum lpfc_sgl_state state; uint16_t iotag; /* pre-assigned IO tag */ uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */ struct sli4_sge *sgl; /* pre-assigned SGL */ @@ -463,8 +483,8 @@ void lpfc_sli4_mbox_cmd_free(struct lpfc_hba *, struct lpfcMboxq *); void lpfc_sli4_mbx_sge_set(struct lpfcMboxq *, uint32_t, dma_addr_t, uint32_t); void lpfc_sli4_mbx_sge_get(struct lpfcMboxq *, uint32_t, struct lpfc_mbx_sge *); -int lpfc_sli4_mbx_read_fcf_record(struct lpfc_hba *, struct lpfcMboxq *, - uint16_t); +int lpfc_sli4_mbx_read_fcf_rec(struct lpfc_hba *, struct lpfcMboxq *, + uint16_t); void lpfc_sli4_hba_reset(struct lpfc_hba *); struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *, uint32_t, @@ -473,8 +493,8 @@ void lpfc_sli4_queue_free(struct lpfc_queue *); uint32_t lpfc_eq_create(struct lpfc_hba *, struct lpfc_queue *, uint16_t); uint32_t lpfc_cq_create(struct lpfc_hba *, struct lpfc_queue *, struct lpfc_queue *, uint32_t, uint32_t); -uint32_t lpfc_mq_create(struct lpfc_hba *, struct lpfc_queue *, - struct lpfc_queue *, uint32_t); +int32_t lpfc_mq_create(struct lpfc_hba *, struct lpfc_queue *, + struct lpfc_queue *, uint32_t); uint32_t lpfc_wq_create(struct lpfc_hba *, struct lpfc_queue *, struct lpfc_queue *, uint32_t); uint32_t lpfc_rq_create(struct lpfc_hba *, struct lpfc_queue *, @@ -523,8 +543,13 @@ int lpfc_sli4_init_vpi(struct lpfc_hba *, uint16_t); uint32_t lpfc_sli4_cq_release(struct lpfc_queue *, bool); uint32_t lpfc_sli4_eq_release(struct lpfc_queue *, bool); void lpfc_sli4_fcfi_unreg(struct lpfc_hba *, uint16_t); -int lpfc_sli4_read_fcf_record(struct lpfc_hba *, uint16_t); -void lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *, LPFC_MBOXQ_t *); +int lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *, uint16_t); +int lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *, uint16_t); +int lpfc_sli4_read_fcf_rec(struct lpfc_hba *, uint16_t); +void lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *, LPFC_MBOXQ_t *); +void lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *, LPFC_MBOXQ_t *); +void lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *, LPFC_MBOXQ_t *); +int lpfc_sli4_unregister_fcf(struct lpfc_hba *); int lpfc_sli4_post_status_check(struct lpfc_hba *); uint8_t lpfc_sli4_mbox_opcode_get(struct lpfc_hba *, struct lpfcMboxq *); diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index ac276aa46fba..5294c3a515a1 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h @@ -18,7 +18,7 @@ * included with this package. * *******************************************************************/ -#define LPFC_DRIVER_VERSION "8.3.9" +#define LPFC_DRIVER_VERSION "8.3.12" #define LPFC_DRIVER_NAME "lpfc" #define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" #define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp" diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c index dc86e873102a..ab91359bde20 100644 --- a/drivers/scsi/lpfc/lpfc_vport.c +++ b/drivers/scsi/lpfc/lpfc_vport.c @@ -26,6 +26,7 @@ #include <linux/interrupt.h> #include <linux/kthread.h> #include <linux/pci.h> +#include <linux/slab.h> #include <linux/spinlock.h> #include <scsi/scsi.h> @@ -123,7 +124,12 @@ lpfc_vport_sparm(struct lpfc_hba *phba, struct lpfc_vport *vport) } mb = &pmb->u.mb; - lpfc_read_sparam(phba, pmb, vport->vpi); + rc = lpfc_read_sparam(phba, pmb, vport->vpi); + if (rc) { + mempool_free(pmb, phba->mbox_mem_pool); + return -ENOMEM; + } + /* * Grab buffer pointer and clear context1 so we can use * lpfc_sli_issue_box_wait @@ -757,7 +763,9 @@ lpfc_create_vport_work_array(struct lpfc_hba *phba) spin_lock_irq(&phba->hbalock); list_for_each_entry(port_iterator, &phba->port_list, listentry) { if (!scsi_host_get(lpfc_shost_from_vport(port_iterator))) { - lpfc_printf_vlog(port_iterator, KERN_WARNING, LOG_VPORT, + if (!(port_iterator->load_flag & FC_UNLOADING)) + lpfc_printf_vlog(port_iterator, KERN_ERR, + LOG_VPORT, "1801 Create vport work array FAILED: " "cannot do scsi_host_get\n"); continue; |