diff options
author | Krishna Gudipati <kgudipat@brocade.com> | 2011-06-14 00:52:12 +0200 |
---|---|---|
committer | James Bottomley <JBottomley@Parallels.com> | 2011-06-29 22:46:19 +0200 |
commit | 775c7742adfd7726f05914198bf33eaa3b9f64bb (patch) | |
tree | 87c9373091ca11b5808dc28a2acb7d4a4efe33a2 | |
parent | [SCSI] bfa: Changes to support vport disable and enable operations. (diff) | |
download | linux-775c7742adfd7726f05914198bf33eaa3b9f64bb.tar.xz linux-775c7742adfd7726f05914198bf33eaa3b9f64bb.zip |
[SCSI] bfa: IOC and PLL init changes for Brocade-1860 Fabric Adapter.
- Introduced IOC poll mechanism which replaces current interrupt
based FW READY method.
- The timer based poll routine in IOC will query the ioc_fwstate
register to see if there is a state change in FW, and sends the READY event.
- Bug fixes in the new asic PLL initialization.
- Added logic to handle CPE/RME queue interrupts before iocfc config done.
1. Use the queue_process flag to see if iocfc configuration is done
in INTX mode.
2. Split the MSIX handler installation in two - one for IOC intr
handler and the other for cpe/rme queue handler - and delay
assigning queue handlers until iocfc config is done in MSIX mode.
Signed-off-by: Krishna Gudipati <kgudipat@brocade.com>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
-rw-r--r-- | drivers/scsi/bfa/bfa.h | 15 | ||||
-rw-r--r-- | drivers/scsi/bfa/bfa_core.c | 28 | ||||
-rw-r--r-- | drivers/scsi/bfa/bfa_hw_cb.c | 58 | ||||
-rw-r--r-- | drivers/scsi/bfa/bfa_hw_ct.c | 18 | ||||
-rw-r--r-- | drivers/scsi/bfa/bfa_ioc.c | 130 | ||||
-rw-r--r-- | drivers/scsi/bfa/bfa_ioc.h | 3 | ||||
-rw-r--r-- | drivers/scsi/bfa/bfa_ioc_ct.c | 53 | ||||
-rw-r--r-- | drivers/scsi/bfa/bfa_modules.h | 2 | ||||
-rw-r--r-- | drivers/scsi/bfa/bfi.h | 14 | ||||
-rw-r--r-- | drivers/scsi/bfa/bfi_reg.h | 4 |
10 files changed, 188 insertions, 137 deletions
diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h index 708bab093c14..8c73265f977d 100644 --- a/drivers/scsi/bfa/bfa.h +++ b/drivers/scsi/bfa/bfa.h @@ -228,7 +228,8 @@ struct bfa_hwif_s { void (*hw_reqq_ack)(struct bfa_s *bfa, int reqq); void (*hw_rspq_ack)(struct bfa_s *bfa, int rspq); void (*hw_msix_init)(struct bfa_s *bfa, int nvecs); - void (*hw_msix_install)(struct bfa_s *bfa); + void (*hw_msix_ctrl_install)(struct bfa_s *bfa); + void (*hw_msix_queue_install)(struct bfa_s *bfa); void (*hw_msix_uninstall)(struct bfa_s *bfa); void (*hw_isr_mode_set)(struct bfa_s *bfa, bfa_boolean_t msix); void (*hw_msix_getvecs)(struct bfa_s *bfa, u32 *vecmap, @@ -271,8 +272,10 @@ struct bfa_iocfc_s { bfa_ioc_portid(&(__bfa)->ioc) #define bfa_msix_init(__bfa, __nvecs) \ ((__bfa)->iocfc.hwif.hw_msix_init(__bfa, __nvecs)) -#define bfa_msix_install(__bfa) \ - ((__bfa)->iocfc.hwif.hw_msix_install(__bfa)) +#define bfa_msix_ctrl_install(__bfa) \ + ((__bfa)->iocfc.hwif.hw_msix_ctrl_install(__bfa)) +#define bfa_msix_queue_install(__bfa) \ + ((__bfa)->iocfc.hwif.hw_msix_queue_install(__bfa)) #define bfa_msix_uninstall(__bfa) \ ((__bfa)->iocfc.hwif.hw_msix_uninstall(__bfa)) #define bfa_isr_mode_set(__bfa, __msix) do { \ @@ -314,7 +317,8 @@ void bfa_hwcb_reginit(struct bfa_s *bfa); void bfa_hwcb_reqq_ack(struct bfa_s *bfa, int rspq); void bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq); void bfa_hwcb_msix_init(struct bfa_s *bfa, int nvecs); -void bfa_hwcb_msix_install(struct bfa_s *bfa); +void bfa_hwcb_msix_ctrl_install(struct bfa_s *bfa); +void bfa_hwcb_msix_queue_install(struct bfa_s *bfa); void bfa_hwcb_msix_uninstall(struct bfa_s *bfa); void bfa_hwcb_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix); void bfa_hwcb_msix_getvecs(struct bfa_s *bfa, u32 *vecmap, u32 *nvecs, @@ -326,7 +330,8 @@ void bfa_hwct2_reginit(struct bfa_s *bfa); void bfa_hwct_reqq_ack(struct bfa_s *bfa, int rspq); void bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq); void bfa_hwct_msix_init(struct bfa_s *bfa, int nvecs); -void bfa_hwct_msix_install(struct bfa_s *bfa); +void bfa_hwct_msix_ctrl_install(struct bfa_s *bfa); +void bfa_hwct_msix_queue_install(struct bfa_s *bfa); void bfa_hwct_msix_uninstall(struct bfa_s *bfa); void bfa_hwct_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix); void bfa_hwct_msix_getvecs(struct bfa_s *bfa, u32 *vecmap, u32 *nvecs, diff --git a/drivers/scsi/bfa/bfa_core.c b/drivers/scsi/bfa/bfa_core.c index 4c9e83803cb6..0dbdd2da5b48 100644 --- a/drivers/scsi/bfa/bfa_core.c +++ b/drivers/scsi/bfa/bfa_core.c @@ -248,7 +248,7 @@ bfa_intx(struct bfa_s *bfa) writel(qintr, bfa->iocfc.bfa_regs.intr_status); for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue++) { - if (intr & (__HFN_INT_RME_Q0 << queue)) + if ((intr & (__HFN_INT_RME_Q0 << queue)) && bfa->queue_process) bfa_isr_rspq(bfa, queue & (BFI_IOC_MAX_CQS - 1)); } intr &= ~qintr; @@ -262,7 +262,7 @@ bfa_intx(struct bfa_s *bfa) writel(qintr, bfa->iocfc.bfa_regs.intr_status); for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue++) { - if (intr & (__HFN_INT_CPE_Q0 << queue)) + if ((intr & (__HFN_INT_CPE_Q0 << queue)) && bfa->queue_process) bfa_isr_reqq(bfa, queue & (BFI_IOC_MAX_CQS - 1)); } intr &= ~qintr; @@ -282,7 +282,7 @@ bfa_isr_enable(struct bfa_s *bfa) bfa_trc(bfa, pci_func); - bfa_msix_install(bfa); + bfa_msix_ctrl_install(bfa); if (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)) { umsk = __HFN_INT_ERR_MASK_CT2; @@ -326,9 +326,6 @@ bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m) void bfa_msix_rspq(struct bfa_s *bfa, int vec) { - if (!bfa->rme_process) - return; - bfa_isr_rspq(bfa, vec - bfa->iocfc.hwif.rme_vec_q0); } @@ -512,7 +509,8 @@ bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, iocfc->hwif.hw_reqq_ack = bfa_hwct_reqq_ack; iocfc->hwif.hw_rspq_ack = bfa_hwct_rspq_ack; iocfc->hwif.hw_msix_init = bfa_hwct_msix_init; - iocfc->hwif.hw_msix_install = bfa_hwct_msix_install; + iocfc->hwif.hw_msix_ctrl_install = bfa_hwct_msix_ctrl_install; + iocfc->hwif.hw_msix_queue_install = bfa_hwct_msix_queue_install; iocfc->hwif.hw_msix_uninstall = bfa_hwct_msix_uninstall; iocfc->hwif.hw_isr_mode_set = bfa_hwct_isr_mode_set; iocfc->hwif.hw_msix_getvecs = bfa_hwct_msix_getvecs; @@ -524,7 +522,8 @@ bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, iocfc->hwif.hw_reqq_ack = bfa_hwcb_reqq_ack; iocfc->hwif.hw_rspq_ack = bfa_hwcb_rspq_ack; iocfc->hwif.hw_msix_init = bfa_hwcb_msix_init; - iocfc->hwif.hw_msix_install = bfa_hwcb_msix_install; + iocfc->hwif.hw_msix_ctrl_install = bfa_hwcb_msix_ctrl_install; + iocfc->hwif.hw_msix_queue_install = bfa_hwcb_msix_queue_install; iocfc->hwif.hw_msix_uninstall = bfa_hwcb_msix_uninstall; iocfc->hwif.hw_isr_mode_set = bfa_hwcb_isr_mode_set; iocfc->hwif.hw_msix_getvecs = bfa_hwcb_msix_getvecs; @@ -640,7 +639,7 @@ bfa_iocfc_start_submod(struct bfa_s *bfa) { int i; - bfa->rme_process = BFA_TRUE; + bfa->queue_process = BFA_TRUE; for (i = 0; i < BFI_IOC_MAX_CQS; i++) bfa->iocfc.hwif.hw_rspq_ack(bfa, i); @@ -743,6 +742,11 @@ bfa_iocfc_cfgrsp(struct bfa_s *bfa) bfa_iocfc_qreg(bfa, &cfgrsp->qreg); /* + * Install MSIX queue handlers + */ + bfa_msix_queue_install(bfa); + + /* * Configuration is complete - initialize/start submodules */ bfa_fcport_init(bfa); @@ -813,7 +817,7 @@ bfa_iocfc_hbfail_cbfn(void *bfa_arg) { struct bfa_s *bfa = bfa_arg; - bfa->rme_process = BFA_FALSE; + bfa->queue_process = BFA_FALSE; bfa_isr_disable(bfa); bfa_iocfc_disable_submod(bfa); @@ -917,7 +921,7 @@ bfa_iocfc_stop(struct bfa_s *bfa) { bfa->iocfc.action = BFA_IOCFC_ACT_STOP; - bfa->rme_process = BFA_FALSE; + bfa->queue_process = BFA_FALSE; bfa_ioc_disable(&bfa->ioc); } @@ -1017,7 +1021,7 @@ bfa_iocfc_disable(struct bfa_s *bfa) "IOC Disable"); bfa->iocfc.action = BFA_IOCFC_ACT_DISABLE; - bfa->rme_process = BFA_FALSE; + bfa->queue_process = BFA_FALSE; bfa_ioc_disable(&bfa->ioc); } diff --git a/drivers/scsi/bfa/bfa_hw_cb.c b/drivers/scsi/bfa/bfa_hw_cb.c index 4ef3cf2e7d98..15fbb13df96c 100644 --- a/drivers/scsi/bfa/bfa_hw_cb.c +++ b/drivers/scsi/bfa/bfa_hw_cb.c @@ -86,43 +86,71 @@ bfa_hwcb_msix_getvecs(struct bfa_s *bfa, u32 *msix_vecs_bmap, } /* + * Dummy interrupt handler for handling spurious interrupts. + */ +static void +bfa_hwcb_msix_dummy(struct bfa_s *bfa, int vec) +{ +} + +/* * No special setup required for crossbow -- vector assignments are implicit. */ void bfa_hwcb_msix_init(struct bfa_s *bfa, int nvecs) { - int i; - WARN_ON((nvecs != 1) && (nvecs != __HFN_NUMINTS)); bfa->msix.nvecs = nvecs; - if (nvecs == 1) { - for (i = 0; i < BFI_MSIX_CB_MAX; i++) + bfa_hwcb_msix_uninstall(bfa); +} + +void +bfa_hwcb_msix_ctrl_install(struct bfa_s *bfa) +{ + int i; + + if (bfa->msix.nvecs == 0) + return; + + if (bfa->msix.nvecs == 1) { + for (i = BFI_MSIX_RME_QMAX_CB+1; i < BFI_MSIX_CB_MAX; i++) bfa->msix.handler[i] = bfa_msix_all; return; } - for (i = BFI_MSIX_CPE_QMIN_CB; i <= BFI_MSIX_CPE_QMAX_CB; i++) - bfa->msix.handler[i] = bfa_msix_reqq; - - for (i = BFI_MSIX_RME_QMIN_CB; i <= BFI_MSIX_RME_QMAX_CB; i++) - bfa->msix.handler[i] = bfa_msix_rspq; - - for (; i < BFI_MSIX_CB_MAX; i++) + for (i = BFI_MSIX_RME_QMAX_CB+1; i < BFI_MSIX_CB_MAX; i++) bfa->msix.handler[i] = bfa_msix_lpu_err; } -/* - * Crossbow -- dummy, interrupts are masked - */ void -bfa_hwcb_msix_install(struct bfa_s *bfa) +bfa_hwcb_msix_queue_install(struct bfa_s *bfa) { + int i; + + if (bfa->msix.nvecs == 0) + return; + + if (bfa->msix.nvecs == 1) { + for (i = BFI_MSIX_CPE_QMIN_CB; i <= BFI_MSIX_RME_QMAX_CB; i++) + bfa->msix.handler[i] = bfa_msix_all; + return; + } + + for (i = BFI_MSIX_CPE_QMIN_CB; i <= BFI_MSIX_CPE_QMAX_CB; i++) + bfa->msix.handler[i] = bfa_msix_reqq; + + for (i = BFI_MSIX_RME_QMIN_CB; i <= BFI_MSIX_RME_QMAX_CB; i++) + bfa->msix.handler[i] = bfa_msix_rspq; } void bfa_hwcb_msix_uninstall(struct bfa_s *bfa) { + int i; + + for (i = 0; i < BFI_MSIX_CB_MAX; i++) + bfa->msix.handler[i] = bfa_hwcb_msix_dummy; } /* diff --git a/drivers/scsi/bfa/bfa_hw_ct.c b/drivers/scsi/bfa/bfa_hw_ct.c index 66e2d29ff45a..989bbce9b296 100644 --- a/drivers/scsi/bfa/bfa_hw_ct.c +++ b/drivers/scsi/bfa/bfa_hw_ct.c @@ -96,7 +96,19 @@ bfa_hwct_msix_init(struct bfa_s *bfa, int nvecs) } void -bfa_hwct_msix_install(struct bfa_s *bfa) +bfa_hwct_msix_ctrl_install(struct bfa_s *bfa) +{ + if (bfa->msix.nvecs == 0) + return; + + if (bfa->msix.nvecs == 1) + bfa->msix.handler[BFI_MSIX_LPU_ERR_CT] = bfa_msix_all; + else + bfa->msix.handler[BFI_MSIX_LPU_ERR_CT] = bfa_msix_lpu_err; +} + +void +bfa_hwct_msix_queue_install(struct bfa_s *bfa) { int i; @@ -104,7 +116,7 @@ bfa_hwct_msix_install(struct bfa_s *bfa) return; if (bfa->msix.nvecs == 1) { - for (i = 0; i < BFI_MSIX_CT_MAX; i++) + for (i = BFI_MSIX_CPE_QMIN_CT; i < BFI_MSIX_CT_MAX; i++) bfa->msix.handler[i] = bfa_msix_all; return; } @@ -114,8 +126,6 @@ bfa_hwct_msix_install(struct bfa_s *bfa) for (i = BFI_MSIX_RME_QMIN_CT; i <= BFI_MSIX_RME_QMAX_CT; i++) bfa->msix.handler[i] = bfa_msix_rspq; - - bfa->msix.handler[BFI_MSIX_LPU_ERR_CT] = bfa_msix_lpu_err; } void diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c index 3d336c279c42..9c6e493cb9c7 100644 --- a/drivers/scsi/bfa/bfa_ioc.c +++ b/drivers/scsi/bfa/bfa_ioc.c @@ -29,8 +29,8 @@ BFA_TRC_FILE(CNA, IOC); #define BFA_IOC_TOV 3000 /* msecs */ #define BFA_IOC_HWSEM_TOV 500 /* msecs */ #define BFA_IOC_HB_TOV 500 /* msecs */ -#define BFA_IOC_HWINIT_MAX 5 #define BFA_IOC_TOV_RECOVER BFA_IOC_HB_TOV +#define BFA_IOC_POLL_TOV BFA_TIMER_FREQ #define bfa_ioc_timer_start(__ioc) \ bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \ @@ -79,6 +79,7 @@ bfa_boolean_t bfa_auto_recover = BFA_TRUE; static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc); static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force); static void bfa_ioc_timeout(void *ioc); +static void bfa_ioc_poll_fwinit(struct bfa_ioc_s *ioc); static void bfa_ioc_send_enable(struct bfa_ioc_s *ioc); static void bfa_ioc_send_disable(struct bfa_ioc_s *ioc); static void bfa_ioc_send_getattr(struct bfa_ioc_s *ioc); @@ -107,11 +108,10 @@ enum ioc_event { IOC_E_ENABLED = 5, /* f/w enabled */ IOC_E_FWRSP_GETATTR = 6, /* IOC get attribute response */ IOC_E_DISABLED = 7, /* f/w disabled */ - IOC_E_INITFAILED = 8, /* failure notice by iocpf sm */ - IOC_E_PFFAILED = 9, /* failure notice by iocpf sm */ - IOC_E_HBFAIL = 10, /* heartbeat failure */ - IOC_E_HWERROR = 11, /* hardware error interrupt */ - IOC_E_TIMEOUT = 12, /* timeout */ + IOC_E_PFFAILED = 8, /* failure notice by iocpf sm */ + IOC_E_HBFAIL = 9, /* heartbeat failure */ + IOC_E_HWERROR = 10, /* hardware error interrupt */ + IOC_E_TIMEOUT = 11, /* timeout */ }; bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc_s, enum ioc_event); @@ -145,9 +145,9 @@ static struct bfa_sm_table_s ioc_sm_table[] = { bfa_iocpf_timeout, (__ioc), BFA_IOC_TOV) #define bfa_iocpf_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer) -#define bfa_iocpf_recovery_timer_start(__ioc) \ +#define bfa_iocpf_poll_timer_start(__ioc) \ bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \ - bfa_iocpf_timeout, (__ioc), BFA_IOC_TOV_RECOVER) + bfa_iocpf_poll_timeout, (__ioc), BFA_IOC_POLL_TOV) #define bfa_sem_timer_start(__ioc) \ bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->sem_timer, \ @@ -159,6 +159,7 @@ static struct bfa_sm_table_s ioc_sm_table[] = { */ static void bfa_iocpf_timeout(void *ioc_arg); static void bfa_iocpf_sem_timeout(void *ioc_arg); +static void bfa_iocpf_poll_timeout(void *ioc_arg); /* * IOCPF state machine events @@ -316,7 +317,7 @@ bfa_ioc_sm_enabling(struct bfa_ioc_s *ioc, enum ioc_event event) /* !!! fall through !!! */ case IOC_E_HWERROR: ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); - bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry); + bfa_fsm_set_state(ioc, bfa_ioc_sm_fail); if (event != IOC_E_PFFAILED) bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL); break; @@ -368,7 +369,7 @@ bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event) /* !!! fall through !!! */ case IOC_E_TIMEOUT: ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); - bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry); + bfa_fsm_set_state(ioc, bfa_ioc_sm_fail); if (event != IOC_E_PFFAILED) bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL); break; @@ -417,13 +418,13 @@ bfa_ioc_sm_op(struct bfa_ioc_s *ioc, enum ioc_event event) bfa_hb_timer_stop(ioc); /* !!! fall through !!! */ case IOC_E_HBFAIL: - bfa_ioc_fail_notify(ioc); - if (ioc->iocpf.auto_recover) bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry); else bfa_fsm_set_state(ioc, bfa_ioc_sm_fail); + bfa_ioc_fail_notify(ioc); + if (event != IOC_E_PFFAILED) bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL); break; @@ -528,14 +529,11 @@ bfa_ioc_sm_fail_retry(struct bfa_ioc_s *ioc, enum ioc_event event) * Initialization retry failed. */ ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); + bfa_fsm_set_state(ioc, bfa_ioc_sm_fail); if (event != IOC_E_PFFAILED) bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL); break; - case IOC_E_INITFAILED: - bfa_fsm_set_state(ioc, bfa_ioc_sm_fail); - break; - case IOC_E_ENABLE: break; @@ -603,7 +601,7 @@ bfa_ioc_sm_fail(struct bfa_ioc_s *ioc, enum ioc_event event) static void bfa_iocpf_sm_reset_entry(struct bfa_iocpf_s *iocpf) { - iocpf->retry_count = 0; + iocpf->fw_mismatch_notified = BFA_FALSE; iocpf->auto_recover = bfa_auto_recover; } @@ -653,7 +651,6 @@ bfa_iocpf_sm_fwcheck(struct bfa_iocpf_s *iocpf, enum iocpf_event event) case IOCPF_E_SEMLOCKED: if (bfa_ioc_firmware_lock(ioc)) { if (bfa_ioc_sync_start(ioc)) { - iocpf->retry_count = 0; bfa_ioc_sync_join(ioc); bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit); } else { @@ -692,10 +689,10 @@ bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf_s *iocpf) /* * Call only the first time sm enters fwmismatch state. */ - if (iocpf->retry_count == 0) + if (iocpf->fw_mismatch_notified == BFA_FALSE) bfa_ioc_pf_fwmismatch(iocpf->ioc); - iocpf->retry_count++; + iocpf->fw_mismatch_notified = BFA_TRUE; bfa_iocpf_timer_start(iocpf->ioc); } @@ -773,7 +770,7 @@ bfa_iocpf_sm_semwait(struct bfa_iocpf_s *iocpf, enum iocpf_event event) static void bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf_s *iocpf) { - bfa_iocpf_timer_start(iocpf->ioc); + iocpf->poll_time = 0; bfa_ioc_hwinit(iocpf->ioc, BFA_FALSE); } @@ -790,20 +787,12 @@ bfa_iocpf_sm_hwinit(struct bfa_iocpf_s *iocpf, enum iocpf_event event) switch (event) { case IOCPF_E_FWREADY: - bfa_iocpf_timer_stop(ioc); bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling); break; - case IOCPF_E_INITFAIL: - bfa_iocpf_timer_stop(ioc); - /* - * !!! fall through !!! - */ - case IOCPF_E_TIMEOUT: writel(1, ioc->ioc_regs.ioc_sem_reg); - if (event == IOCPF_E_TIMEOUT) - bfa_fsm_send_event(ioc, IOC_E_PFFAILED); + bfa_fsm_send_event(ioc, IOC_E_PFFAILED); bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync); break; @@ -823,6 +812,10 @@ static void bfa_iocpf_sm_enabling_entry(struct bfa_iocpf_s *iocpf) { bfa_iocpf_timer_start(iocpf->ioc); + /* + * Enable Interrupts before sending fw IOC ENABLE cmd. + */ + iocpf->ioc->cbfn->reset_cbfn(iocpf->ioc->bfa); bfa_ioc_send_enable(iocpf->ioc); } @@ -863,10 +856,6 @@ bfa_iocpf_sm_enabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event) bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling); break; - case IOCPF_E_FWREADY: - bfa_ioc_send_enable(ioc); - break; - default: bfa_sm_fault(ioc, event); } @@ -898,16 +887,6 @@ bfa_iocpf_sm_ready(struct bfa_iocpf_s *iocpf, enum iocpf_event event) bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync); break; - case IOCPF_E_FWREADY: - if (bfa_ioc_is_operational(ioc)) { - bfa_fsm_send_event(ioc, IOC_E_PFFAILED); - bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync); - } else { - bfa_fsm_send_event(ioc, IOC_E_PFFAILED); - bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync); - } - break; - default: bfa_sm_fault(ioc, event); } @@ -932,7 +911,6 @@ bfa_iocpf_sm_disabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event) switch (event) { case IOCPF_E_FWRSP_DISABLE: - case IOCPF_E_FWREADY: bfa_iocpf_timer_stop(ioc); bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync); break; @@ -1005,7 +983,6 @@ bfa_iocpf_sm_disabled(struct bfa_iocpf_s *iocpf, enum iocpf_event event) switch (event) { case IOCPF_E_ENABLE: - iocpf->retry_count = 0; bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait); break; @@ -1038,20 +1015,10 @@ bfa_iocpf_sm_initfail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event) switch (event) { case IOCPF_E_SEMLOCKED: bfa_ioc_notify_fail(ioc); - bfa_ioc_sync_ack(ioc); - iocpf->retry_count++; - if (iocpf->retry_count >= BFA_IOC_HWINIT_MAX) { - bfa_ioc_sync_leave(ioc); - writel(1, ioc->ioc_regs.ioc_sem_reg); - bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail); - } else { - if (bfa_ioc_sync_complete(ioc)) - bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit); - else { - writel(1, ioc->ioc_regs.ioc_sem_reg); - bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait); - } - } + bfa_ioc_sync_leave(ioc); + writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate); + writel(1, ioc->ioc_regs.ioc_sem_reg); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail); break; case IOCPF_E_DISABLE: @@ -1076,7 +1043,6 @@ bfa_iocpf_sm_initfail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event) static void bfa_iocpf_sm_initfail_entry(struct bfa_iocpf_s *iocpf) { - bfa_fsm_send_event(iocpf->ioc, IOC_E_INITFAILED); } /* @@ -1129,11 +1095,11 @@ bfa_iocpf_sm_fail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event) switch (event) { case IOCPF_E_SEMLOCKED: - iocpf->retry_count = 0; bfa_ioc_sync_ack(ioc); bfa_ioc_notify_fail(ioc); if (!iocpf->auto_recover) { bfa_ioc_sync_leave(ioc); + writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate); writel(1, ioc->ioc_regs.ioc_sem_reg); bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail); } else { @@ -1441,7 +1407,7 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force) * just wait for an initialization completion interrupt. */ if (ioc_fwstate == BFI_IOC_INITING) { - ioc->cbfn->reset_cbfn(ioc->bfa); + bfa_ioc_poll_fwinit(ioc); return; } @@ -1460,7 +1426,6 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force) * be flushed. Otherwise MSI-X interrupts are not delivered. */ bfa_ioc_msgflush(ioc); - ioc->cbfn->reset_cbfn(ioc->bfa); bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY); return; } @@ -1902,11 +1867,6 @@ bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_env) bfa_ioc_msgflush(ioc); bfa_ioc_download_fw(ioc, boot_type, boot_env); - - /* - * Enable interrupts just before starting LPU - */ - ioc->cbfn->reset_cbfn(ioc->bfa); bfa_ioc_lpu_start(ioc); } @@ -1981,10 +1941,6 @@ bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m) case BFI_IOC_I2H_HBEAT: break; - case BFI_IOC_I2H_READY_EVENT: - bfa_fsm_send_event(iocpf, IOCPF_E_FWREADY); - break; - case BFI_IOC_I2H_ENABLE_REPLY: bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE); break; @@ -2752,6 +2708,34 @@ bfa_iocpf_sem_timeout(void *ioc_arg) bfa_ioc_hw_sem_get(ioc); } +static void +bfa_ioc_poll_fwinit(struct bfa_ioc_s *ioc) +{ + u32 fwstate = readl(ioc->ioc_regs.ioc_fwstate); + + bfa_trc(ioc, fwstate); + + if (fwstate == BFI_IOC_DISABLED) { + bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY); + return; + } + + if (ioc->iocpf.poll_time >= BFA_IOC_TOV) + bfa_iocpf_timeout(ioc); + else { + ioc->iocpf.poll_time += BFA_IOC_POLL_TOV; + bfa_iocpf_poll_timer_start(ioc); + } +} + +static void +bfa_iocpf_poll_timeout(void *ioc_arg) +{ + struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg; + + bfa_ioc_poll_fwinit(ioc); +} + /* * bfa timer function */ diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h index 39382ea21449..8c9bbdf26482 100644 --- a/drivers/scsi/bfa/bfa_ioc.h +++ b/drivers/scsi/bfa/bfa_ioc.h @@ -224,8 +224,9 @@ struct bfa_ioc_notify_s { struct bfa_iocpf_s { bfa_fsm_t fsm; struct bfa_ioc_s *ioc; - u32 retry_count; + bfa_boolean_t fw_mismatch_notified; bfa_boolean_t auto_recover; + u32 poll_time; }; struct bfa_ioc_s { diff --git a/drivers/scsi/bfa/bfa_ioc_ct.c b/drivers/scsi/bfa/bfa_ioc_ct.c index 118ed8c7fc2e..6710a8016601 100644 --- a/drivers/scsi/bfa/bfa_ioc_ct.c +++ b/drivers/scsi/bfa/bfa_ioc_ct.c @@ -300,8 +300,8 @@ bfa_ioc_ct2_reg_init(struct bfa_ioc_s *ioc) ioc->ioc_regs.ioc_sem_reg = (rb + CT2_HOST_SEM0_REG); ioc->ioc_regs.ioc_usage_sem_reg = (rb + CT2_HOST_SEM1_REG); ioc->ioc_regs.ioc_init_sem_reg = (rb + CT2_HOST_SEM2_REG); - ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT); - ioc->ioc_regs.ioc_fail_sync = (rb + BFA_IOC_FAIL_SYNC); + ioc->ioc_regs.ioc_usage_reg = (rb + CT2_BFA_FW_USE_COUNT); + ioc->ioc_regs.ioc_fail_sync = (rb + CT2_BFA_IOC_FAIL_SYNC); /* * sram memory access @@ -636,10 +636,10 @@ bfa_ioc_ct_pll_init(void __iomem *rb, enum bfi_asic_mode mode) static struct { u32 sclk, speed, half_speed; } ct2_pll[] = { {0}, /* unused */ {__APP_PLL_SCLK_CLK_DIV2, 0, 0}, /* FC 8G */ - {0, __APP_LPU_SPEED, 0}, /* FC 16G */ + {0, 0, 0}, /* FC 16G */ {__APP_PLL_SCLK_REFCLK_SEL | __APP_PLL_SCLK_CLK_DIV2, 0, /* ETH */ __APP_LPUCLK_HALFSPEED}, - {0, __APP_LPU_SPEED, 0}, /* COMBO */ + {0, 0, 0}, /* COMBO */ }; static void @@ -664,15 +664,13 @@ bfa_ioc_ct2_sclk_init(void __iomem *rb, enum bfi_asic_mode mode) writel(r32 | ct2_pll[mode].sclk, (rb + CT2_APP_PLL_SCLK_CTL_REG)); /* - * remove clock gating for ethernet subsystem for ethernet mode + * while doing PLL init dont clock gate ethernet subsystem */ - if (mode == BFI_ASIC_MODE_ETH) { - r32 = readl((rb + CT2_CHIP_MISC_PRG)); - writel(r32 | __ETH_CLK_ENABLE_PORT0, (rb + CT2_CHIP_MISC_PRG)); + r32 = readl((rb + CT2_CHIP_MISC_PRG)); + writel(r32 | __ETH_CLK_ENABLE_PORT0, (rb + CT2_CHIP_MISC_PRG)); - r32 = readl((rb + CT2_PCIE_MISC_REG)); - writel(r32 | __ETH_CLK_ENABLE_PORT1, (rb + CT2_PCIE_MISC_REG)); - } + r32 = readl((rb + CT2_PCIE_MISC_REG)); + writel(r32 | __ETH_CLK_ENABLE_PORT1, (rb + CT2_PCIE_MISC_REG)); /* * set sclk value @@ -693,6 +691,19 @@ bfa_ioc_ct2_sclk_init(void __iomem *rb, enum bfi_asic_mode mode) r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG)); writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET, (rb + CT2_APP_PLL_SCLK_CTL_REG)); + + /* + * clock gating for ethernet subsystem if not in ethernet mode + */ + if (mode != BFI_ASIC_MODE_ETH) { + r32 = readl((rb + CT2_CHIP_MISC_PRG)); + writel(r32 & ~__ETH_CLK_ENABLE_PORT0, + (rb + CT2_CHIP_MISC_PRG)); + + r32 = readl((rb + CT2_PCIE_MISC_REG)); + writel(r32 & ~__ETH_CLK_ENABLE_PORT1, + (rb + CT2_PCIE_MISC_REG)); + } } static void @@ -728,7 +739,8 @@ bfa_ioc_ct2_lclk_init(void __iomem *rb, enum bfi_asic_mode mode) */ r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG)); r32 &= (__P_LCLK_PLL_LOCK | __APP_LPUCLK_HALFSPEED); - if (mode == BFI_ASIC_MODE_FC || mode == BFI_ASIC_MODE_ETH) + if (mode == BFI_ASIC_MODE_FC || mode == BFI_ASIC_MODE_FC16 || + mode == BFI_ASIC_MODE_ETH) r32 |= 0x20c1731b; else r32 |= 0x2081731b; @@ -755,8 +767,10 @@ bfa_ioc_ct2_mem_init(void __iomem *rb, enum bfi_asic_mode mode) fcmode = (mode == BFI_ASIC_MODE_FC) || (mode == BFI_ASIC_MODE_FC16); if (!fcmode) { - writel(__PMM_1T_RESET_P, (rb + CT2_PMM_1T_CONTROL_REG_P0)); - writel(__PMM_1T_RESET_P, (rb + CT2_PMM_1T_CONTROL_REG_P1)); + writel(__PMM_1T_PNDB_P | __PMM_1T_RESET_P, + (rb + CT2_PMM_1T_CONTROL_REG_P0)); + writel(__PMM_1T_PNDB_P | __PMM_1T_RESET_P, + (rb + CT2_PMM_1T_CONTROL_REG_P1)); } r32 = readl((rb + PSS_CTL_REG)); @@ -764,6 +778,11 @@ bfa_ioc_ct2_mem_init(void __iomem *rb, enum bfi_asic_mode mode) writel(r32, (rb + PSS_CTL_REG)); udelay(1000); + if (!fcmode) { + writel(__PMM_1T_PNDB_P, (rb + CT2_PMM_1T_CONTROL_REG_P0)); + writel(__PMM_1T_PNDB_P, (rb + CT2_PMM_1T_CONTROL_REG_P1)); + } + writel(__EDRAM_BISTR_START, (rb + CT2_MBIST_CTL_REG)); udelay(1000); writel(0, (rb + CT2_MBIST_CTL_REG)); @@ -776,6 +795,12 @@ bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode mode) bfa_ioc_ct2_lclk_init(rb, mode); bfa_ioc_ct2_mem_init(rb, mode); + /* + * Disable flash presence to NFC by clearing GPIO 0 + */ + writel(0, (rb + PSS_GPIO_OUT_REG)); + writel(1, (rb + PSS_GPIO_OE_REG)); + writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC0_STATE_REG)); writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC1_STATE_REG)); return BFA_STATUS_OK; diff --git a/drivers/scsi/bfa/bfa_modules.h b/drivers/scsi/bfa/bfa_modules.h index ab79ff6fdeea..4b67ea2ea407 100644 --- a/drivers/scsi/bfa/bfa_modules.h +++ b/drivers/scsi/bfa/bfa_modules.h @@ -109,7 +109,7 @@ struct bfa_s { struct bfa_timer_mod_s timer_mod; /* timer module */ struct bfa_modules_s modules; /* BFA modules */ struct list_head comp_q; /* pending completions */ - bfa_boolean_t rme_process; /* RME processing enabled */ + bfa_boolean_t queue_process; /* queue processing enabled */ struct list_head reqq_waitq[BFI_IOC_MAX_CQS]; bfa_boolean_t fcs; /* FCS is attached to BFA */ struct bfa_msix_s msix; diff --git a/drivers/scsi/bfa/bfi.h b/drivers/scsi/bfa/bfi.h index 7096c5fd2ba0..e6383f23e065 100644 --- a/drivers/scsi/bfa/bfi.h +++ b/drivers/scsi/bfa/bfi.h @@ -221,8 +221,7 @@ enum bfi_ioc_i2h_msgs { BFI_IOC_I2H_ENABLE_REPLY = BFA_I2HM(1), BFI_IOC_I2H_DISABLE_REPLY = BFA_I2HM(2), BFI_IOC_I2H_GETATTR_REPLY = BFA_I2HM(3), - BFI_IOC_I2H_READY_EVENT = BFA_I2HM(4), - BFI_IOC_I2H_HBEAT = BFA_I2HM(5), + BFI_IOC_I2H_HBEAT = BFA_I2HM(4), }; /* @@ -318,15 +317,6 @@ enum bfi_port_mode { BFI_PORT_MODE_ETH = 2, }; -/* - * BFI_IOC_I2H_READY_EVENT message - */ -struct bfi_ioc_rdy_event_s { - struct bfi_mhdr_s mh; /* common msg header */ - u8 init_status; /* init event status */ - u8 rsvd[3]; -}; - struct bfi_ioc_hbeat_s { struct bfi_mhdr_s mh; /* common msg header */ u32 hb_count; /* current heart beat count */ @@ -418,7 +408,7 @@ union bfi_ioc_h2i_msg_u { */ union bfi_ioc_i2h_msg_u { struct bfi_mhdr_s mh; - struct bfi_ioc_rdy_event_s rdy_event; + struct bfi_ioc_ctrl_reply_s rdy_event; u32 mboxmsg[BFI_IOC_MSGSZ]; }; diff --git a/drivers/scsi/bfa/bfi_reg.h b/drivers/scsi/bfa/bfi_reg.h index 0e8b68540018..de4db72b39e9 100644 --- a/drivers/scsi/bfa/bfi_reg.h +++ b/drivers/scsi/bfa/bfi_reg.h @@ -137,6 +137,10 @@ #define __PSS_LPU0_RESET 0x00000001 #define PSS_ERR_STATUS_REG 0x00018810 /* cb/ct */ #define ERR_SET_REG 0x00018818 /* cb/ct */ +#define PSS_GPIO_OUT_REG 0x000188c0 /* cb/ct */ +#define __PSS_GPIO_OUT_REG 0x00000fff +#define PSS_GPIO_OE_REG 0x000188c8 /* cb/ct */ +#define __PSS_GPIO_OE_REG 0x000000ff #define HOSTFN0_LPU_MBOX0_0 0x00019200 /* cb/ct */ #define HOSTFN1_LPU_MBOX0_8 0x00019260 /* cb/ct */ |