diff options
Diffstat (limited to 'drivers/scsi/bfa/bfa_ioc.c')
-rw-r--r-- | drivers/scsi/bfa/bfa_ioc.c | 520 |
1 files changed, 493 insertions, 27 deletions
diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c index 8cdb79c2fcdf..0116c1032e25 100644 --- a/drivers/scsi/bfa/bfa_ioc.c +++ b/drivers/scsi/bfa/bfa_ioc.c @@ -92,7 +92,6 @@ static void bfa_ioc_event_notify(struct bfa_ioc_s *ioc , enum bfa_ioc_event_e event); static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc); static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc); -static void bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc); static void bfa_ioc_fail_notify(struct bfa_ioc_s *ioc); static void bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc); @@ -599,8 +598,9 @@ bfa_ioc_sm_fail(struct bfa_ioc_s *ioc, enum ioc_event event) break; case IOC_E_HWERROR: + case IOC_E_HWFAILED: /* - * HB failure notification, ignore. + * HB failure / HW error notification, ignore. */ break; default: @@ -632,6 +632,10 @@ bfa_ioc_sm_hwfail(struct bfa_ioc_s *ioc, enum ioc_event event) bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); break; + case IOC_E_HWERROR: + /* Ignore - already in hwfail state */ + break; + default: bfa_sm_fault(ioc, event); } @@ -727,8 +731,7 @@ bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf_s *iocpf) /* * Unlock the hw semaphore. Should be here only once per boot. */ - readl(iocpf->ioc->ioc_regs.ioc_sem_reg); - writel(1, iocpf->ioc->ioc_regs.ioc_sem_reg); + bfa_ioc_ownership_reset(iocpf->ioc); /* * unlock init semaphore. @@ -1455,7 +1458,7 @@ bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr) bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0); for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) { - if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i]) { + if (fwhdr->md5sum[i] != cpu_to_le32(drv_fwhdr->md5sum[i])) { bfa_trc(ioc, i); bfa_trc(ioc, fwhdr->md5sum[i]); bfa_trc(ioc, drv_fwhdr->md5sum[i]); @@ -1480,7 +1483,7 @@ bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc, u32 boot_env) drv_fwhdr = (struct bfi_ioc_image_hdr_s *) bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0); - if (fwhdr.signature != drv_fwhdr->signature) { + if (fwhdr.signature != cpu_to_le32(drv_fwhdr->signature)) { bfa_trc(ioc, fwhdr.signature); bfa_trc(ioc, drv_fwhdr->signature); return BFA_FALSE; @@ -1704,7 +1707,7 @@ bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type, * write smem */ bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, - fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)]); + cpu_to_le32(fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)])); loff += sizeof(u32); @@ -1747,6 +1750,7 @@ bfa_ioc_getattr_reply(struct bfa_ioc_s *ioc) attr->card_type = be32_to_cpu(attr->card_type); attr->maxfrsize = be16_to_cpu(attr->maxfrsize); ioc->fcmode = (attr->port_mode == BFI_PORT_MODE_FC); + attr->mfg_year = be16_to_cpu(attr->mfg_year); bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR); } @@ -2260,6 +2264,12 @@ bfa_ioc_disable(struct bfa_ioc_s *ioc) bfa_fsm_send_event(ioc, IOC_E_DISABLE); } +void +bfa_ioc_suspend(struct bfa_ioc_s *ioc) +{ + ioc->dbg_fwsave_once = BFA_TRUE; + bfa_fsm_send_event(ioc, IOC_E_HWERROR); +} /* * Initialize memory for saving firmware trace. Driver must initialize @@ -2269,7 +2279,7 @@ void bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave) { ioc->dbg_fwsave = dbg_fwsave; - ioc->dbg_fwsave_len = (ioc->iocpf.auto_recover) ? BFA_DBG_FWTRC_LEN : 0; + ioc->dbg_fwsave_len = BFA_DBG_FWTRC_LEN; } /* @@ -2487,6 +2497,9 @@ bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc, ad_attr->cna_capable = bfa_ioc_is_cna(ioc); ad_attr->trunk_capable = (ad_attr->nports > 1) && !bfa_ioc_is_cna(ioc) && !ad_attr->is_mezz; + ad_attr->mfg_day = ioc_attr->mfg_day; + ad_attr->mfg_month = ioc_attr->mfg_month; + ad_attr->mfg_year = ioc_attr->mfg_year; } enum bfa_ioc_type_e @@ -2856,7 +2869,7 @@ bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc) /* * Save firmware trace if configured. */ -static void +void bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc) { int tlen; @@ -2913,7 +2926,7 @@ bfa_ioc_poll_fwinit(struct bfa_ioc_s *ioc) return; } - if (ioc->iocpf.poll_time >= BFA_IOC_TOV) + if (ioc->iocpf.poll_time >= (3 * BFA_IOC_TOV)) bfa_iocpf_timeout(ioc); else { ioc->iocpf.poll_time += BFA_IOC_POLL_TOV; @@ -3006,7 +3019,6 @@ bfa_ablk_config_swap(struct bfa_ablk_cfg_s *cfg) struct bfa_ablk_cfg_inst_s *cfg_inst; int i, j; u16 be16; - u32 be32; for (i = 0; i < BFA_ABLK_MAX; i++) { cfg_inst = &cfg->inst[i]; @@ -3017,8 +3029,10 @@ bfa_ablk_config_swap(struct bfa_ablk_cfg_s *cfg) cfg_inst->pf_cfg[j].num_qpairs = be16_to_cpu(be16); be16 = cfg_inst->pf_cfg[j].num_vectors; cfg_inst->pf_cfg[j].num_vectors = be16_to_cpu(be16); - be32 = cfg_inst->pf_cfg[j].bw; - cfg_inst->pf_cfg[j].bw = be16_to_cpu(be32); + be16 = cfg_inst->pf_cfg[j].bw_min; + cfg_inst->pf_cfg[j].bw_min = be16_to_cpu(be16); + be16 = cfg_inst->pf_cfg[j].bw_max; + cfg_inst->pf_cfg[j].bw_max = be16_to_cpu(be16); } } } @@ -3160,7 +3174,8 @@ bfa_ablk_query(struct bfa_ablk_s *ablk, struct bfa_ablk_cfg_s *ablk_cfg, bfa_status_t bfa_ablk_pf_create(struct bfa_ablk_s *ablk, u16 *pcifn, - u8 port, enum bfi_pcifn_class personality, int bw, + u8 port, enum bfi_pcifn_class personality, + u16 bw_min, u16 bw_max, bfa_ablk_cbfn_t cbfn, void *cbarg) { struct bfi_ablk_h2i_pf_req_s *m; @@ -3184,7 +3199,8 @@ bfa_ablk_pf_create(struct bfa_ablk_s *ablk, u16 *pcifn, bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_CREATE, bfa_ioc_portid(ablk->ioc)); m->pers = cpu_to_be16((u16)personality); - m->bw = cpu_to_be32(bw); + m->bw_min = cpu_to_be16(bw_min); + m->bw_max = cpu_to_be16(bw_max); m->port = port; bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb); @@ -3284,8 +3300,8 @@ bfa_ablk_port_config(struct bfa_ablk_s *ablk, int port, enum bfa_mode_s mode, } bfa_status_t -bfa_ablk_pf_update(struct bfa_ablk_s *ablk, int pcifn, int bw, - bfa_ablk_cbfn_t cbfn, void *cbarg) +bfa_ablk_pf_update(struct bfa_ablk_s *ablk, int pcifn, u16 bw_min, + u16 bw_max, bfa_ablk_cbfn_t cbfn, void *cbarg) { struct bfi_ablk_h2i_pf_req_s *m; @@ -3307,7 +3323,8 @@ bfa_ablk_pf_update(struct bfa_ablk_s *ablk, int pcifn, int bw, bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_UPDATE, bfa_ioc_portid(ablk->ioc)); m->pcifn = (u8)pcifn; - m->bw = cpu_to_be32(bw); + m->bw_min = cpu_to_be16(bw_min); + m->bw_max = cpu_to_be16(bw_max); bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb); return BFA_STATUS_OK; @@ -4670,22 +4687,25 @@ diag_tempsensor_comp(struct bfa_diag_s *diag, bfi_diag_ts_rsp_t *rsp) diag->tsensor.temp->temp = be16_to_cpu(rsp->temp); diag->tsensor.temp->ts_junc = rsp->ts_junc; diag->tsensor.temp->ts_brd = rsp->ts_brd; - diag->tsensor.temp->status = BFA_STATUS_OK; if (rsp->ts_brd) { + /* tsensor.temp->status is brd_temp status */ + diag->tsensor.temp->status = rsp->status; if (rsp->status == BFA_STATUS_OK) { diag->tsensor.temp->brd_temp = be16_to_cpu(rsp->brd_temp); - } else { - bfa_trc(diag, rsp->status); + } else diag->tsensor.temp->brd_temp = 0; - diag->tsensor.temp->status = BFA_STATUS_DEVBUSY; - } } + + bfa_trc(diag, rsp->status); bfa_trc(diag, rsp->ts_junc); bfa_trc(diag, rsp->temp); bfa_trc(diag, rsp->ts_brd); bfa_trc(diag, rsp->brd_temp); + + /* tsensor status is always good bcos we always have junction temp */ + diag->tsensor.status = BFA_STATUS_OK; diag->tsensor.cbfn(diag->tsensor.cbarg, diag->tsensor.status); diag->tsensor.lock = 0; } @@ -4914,6 +4934,7 @@ bfa_diag_tsensor_query(struct bfa_diag_s *diag, diag->tsensor.temp = result; diag->tsensor.cbfn = cbfn; diag->tsensor.cbarg = cbarg; + diag->tsensor.status = BFA_STATUS_OK; /* Send msg to fw */ diag_tempsensor_send(diag); @@ -5587,7 +5608,7 @@ static bfa_status_t bfa_dconf_flash_write(struct bfa_dconf_mod_s *dconf); static void bfa_dconf_init_cb(void *arg, bfa_status_t status); /* - * Begining state of dconf module. Waiting for an event to start. + * Beginning state of dconf module. Waiting for an event to start. */ static void bfa_dconf_sm_uninit(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event) @@ -5605,7 +5626,7 @@ bfa_dconf_sm_uninit(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event) } bfa_sm_set_state(dconf, bfa_dconf_sm_flash_read); bfa_timer_start(dconf->bfa, &dconf->timer, - bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV); + bfa_dconf_timer, dconf, 2 * BFA_DCONF_UPDATE_TOV); bfa_status = bfa_flash_read_part(BFA_FLASH(dconf->bfa), BFA_FLASH_PART_DRV, dconf->instance, dconf->dconf, @@ -5645,7 +5666,7 @@ bfa_dconf_sm_flash_read(struct bfa_dconf_mod_s *dconf, break; case BFA_DCONF_SM_TIMEOUT: bfa_sm_set_state(dconf, bfa_dconf_sm_ready); - bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_IOC_FAILED); + bfa_ioc_suspend(&dconf->bfa->ioc); break; case BFA_DCONF_SM_EXIT: bfa_timer_stop(&dconf->timer); @@ -5843,7 +5864,6 @@ bfa_dconf_init_cb(void *arg, bfa_status_t status) struct bfa_s *bfa = arg; struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa); - bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP); if (status == BFA_STATUS_OK) { bfa_dconf_read_data_valid(bfa) = BFA_TRUE; if (dconf->dconf->hdr.signature != BFI_DCONF_SIGNATURE) @@ -5851,6 +5871,7 @@ bfa_dconf_init_cb(void *arg, bfa_status_t status) if (dconf->dconf->hdr.version != BFI_DCONF_VERSION) dconf->dconf->hdr.version = BFI_DCONF_VERSION; } + bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP); bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_DCONF_DONE); } @@ -5935,3 +5956,448 @@ bfa_dconf_modexit(struct bfa_s *bfa) struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa); bfa_sm_send_event(dconf, BFA_DCONF_SM_EXIT); } + +/* + * FRU specific functions + */ + +#define BFA_FRU_DMA_BUF_SZ 0x02000 /* 8k dma buffer */ +#define BFA_FRU_CHINOOK_MAX_SIZE 0x10000 +#define BFA_FRU_LIGHTNING_MAX_SIZE 0x200 + +static void +bfa_fru_notify(void *cbarg, enum bfa_ioc_event_e event) +{ + struct bfa_fru_s *fru = cbarg; + + bfa_trc(fru, event); + + switch (event) { + case BFA_IOC_E_DISABLED: + case BFA_IOC_E_FAILED: + if (fru->op_busy) { + fru->status = BFA_STATUS_IOC_FAILURE; + fru->cbfn(fru->cbarg, fru->status); + fru->op_busy = 0; + } + break; + + default: + break; + } +} + +/* + * Send fru write request. + * + * @param[in] cbarg - callback argument + */ +static void +bfa_fru_write_send(void *cbarg, enum bfi_fru_h2i_msgs msg_type) +{ + struct bfa_fru_s *fru = cbarg; + struct bfi_fru_write_req_s *msg = + (struct bfi_fru_write_req_s *) fru->mb.msg; + u32 len; + + msg->offset = cpu_to_be32(fru->addr_off + fru->offset); + len = (fru->residue < BFA_FRU_DMA_BUF_SZ) ? + fru->residue : BFA_FRU_DMA_BUF_SZ; + msg->length = cpu_to_be32(len); + + /* + * indicate if it's the last msg of the whole write operation + */ + msg->last = (len == fru->residue) ? 1 : 0; + + bfi_h2i_set(msg->mh, BFI_MC_FRU, msg_type, bfa_ioc_portid(fru->ioc)); + bfa_alen_set(&msg->alen, len, fru->dbuf_pa); + + memcpy(fru->dbuf_kva, fru->ubuf + fru->offset, len); + bfa_ioc_mbox_queue(fru->ioc, &fru->mb); + + fru->residue -= len; + fru->offset += len; +} + +/* + * Send fru read request. + * + * @param[in] cbarg - callback argument + */ +static void +bfa_fru_read_send(void *cbarg, enum bfi_fru_h2i_msgs msg_type) +{ + struct bfa_fru_s *fru = cbarg; + struct bfi_fru_read_req_s *msg = + (struct bfi_fru_read_req_s *) fru->mb.msg; + u32 len; + + msg->offset = cpu_to_be32(fru->addr_off + fru->offset); + len = (fru->residue < BFA_FRU_DMA_BUF_SZ) ? + fru->residue : BFA_FRU_DMA_BUF_SZ; + msg->length = cpu_to_be32(len); + bfi_h2i_set(msg->mh, BFI_MC_FRU, msg_type, bfa_ioc_portid(fru->ioc)); + bfa_alen_set(&msg->alen, len, fru->dbuf_pa); + bfa_ioc_mbox_queue(fru->ioc, &fru->mb); +} + +/* + * Flash memory info API. + * + * @param[in] mincfg - minimal cfg variable + */ +u32 +bfa_fru_meminfo(bfa_boolean_t mincfg) +{ + /* min driver doesn't need fru */ + if (mincfg) + return 0; + + return BFA_ROUNDUP(BFA_FRU_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ); +} + +/* + * Flash attach API. + * + * @param[in] fru - fru structure + * @param[in] ioc - ioc structure + * @param[in] dev - device structure + * @param[in] trcmod - trace module + * @param[in] logmod - log module + */ +void +bfa_fru_attach(struct bfa_fru_s *fru, struct bfa_ioc_s *ioc, void *dev, + struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg) +{ + fru->ioc = ioc; + fru->trcmod = trcmod; + fru->cbfn = NULL; + fru->cbarg = NULL; + fru->op_busy = 0; + + bfa_ioc_mbox_regisr(fru->ioc, BFI_MC_FRU, bfa_fru_intr, fru); + bfa_q_qe_init(&fru->ioc_notify); + bfa_ioc_notify_init(&fru->ioc_notify, bfa_fru_notify, fru); + list_add_tail(&fru->ioc_notify.qe, &fru->ioc->notify_q); + + /* min driver doesn't need fru */ + if (mincfg) { + fru->dbuf_kva = NULL; + fru->dbuf_pa = 0; + } +} + +/* + * Claim memory for fru + * + * @param[in] fru - fru structure + * @param[in] dm_kva - pointer to virtual memory address + * @param[in] dm_pa - frusical memory address + * @param[in] mincfg - minimal cfg variable + */ +void +bfa_fru_memclaim(struct bfa_fru_s *fru, u8 *dm_kva, u64 dm_pa, + bfa_boolean_t mincfg) +{ + if (mincfg) + return; + + fru->dbuf_kva = dm_kva; + fru->dbuf_pa = dm_pa; + memset(fru->dbuf_kva, 0, BFA_FRU_DMA_BUF_SZ); + dm_kva += BFA_ROUNDUP(BFA_FRU_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ); + dm_pa += BFA_ROUNDUP(BFA_FRU_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ); +} + +/* + * Update fru vpd image. + * + * @param[in] fru - fru structure + * @param[in] buf - update data buffer + * @param[in] len - data buffer length + * @param[in] offset - offset relative to starting address + * @param[in] cbfn - callback function + * @param[in] cbarg - callback argument + * + * Return status. + */ +bfa_status_t +bfa_fruvpd_update(struct bfa_fru_s *fru, void *buf, u32 len, u32 offset, + bfa_cb_fru_t cbfn, void *cbarg) +{ + bfa_trc(fru, BFI_FRUVPD_H2I_WRITE_REQ); + bfa_trc(fru, len); + bfa_trc(fru, offset); + + if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2) + return BFA_STATUS_FRU_NOT_PRESENT; + + if (fru->ioc->attr->card_type != BFA_MFG_TYPE_CHINOOK) + return BFA_STATUS_CMD_NOTSUPP; + + if (!bfa_ioc_is_operational(fru->ioc)) + return BFA_STATUS_IOC_NON_OP; + + if (fru->op_busy) { + bfa_trc(fru, fru->op_busy); + return BFA_STATUS_DEVBUSY; + } + + fru->op_busy = 1; + + fru->cbfn = cbfn; + fru->cbarg = cbarg; + fru->residue = len; + fru->offset = 0; + fru->addr_off = offset; + fru->ubuf = buf; + + bfa_fru_write_send(fru, BFI_FRUVPD_H2I_WRITE_REQ); + + return BFA_STATUS_OK; +} + +/* + * Read fru vpd image. + * + * @param[in] fru - fru structure + * @param[in] buf - read data buffer + * @param[in] len - data buffer length + * @param[in] offset - offset relative to starting address + * @param[in] cbfn - callback function + * @param[in] cbarg - callback argument + * + * Return status. + */ +bfa_status_t +bfa_fruvpd_read(struct bfa_fru_s *fru, void *buf, u32 len, u32 offset, + bfa_cb_fru_t cbfn, void *cbarg) +{ + bfa_trc(fru, BFI_FRUVPD_H2I_READ_REQ); + bfa_trc(fru, len); + bfa_trc(fru, offset); + + if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2) + return BFA_STATUS_FRU_NOT_PRESENT; + + if (fru->ioc->attr->card_type != BFA_MFG_TYPE_CHINOOK) + return BFA_STATUS_CMD_NOTSUPP; + + if (!bfa_ioc_is_operational(fru->ioc)) + return BFA_STATUS_IOC_NON_OP; + + if (fru->op_busy) { + bfa_trc(fru, fru->op_busy); + return BFA_STATUS_DEVBUSY; + } + + fru->op_busy = 1; + + fru->cbfn = cbfn; + fru->cbarg = cbarg; + fru->residue = len; + fru->offset = 0; + fru->addr_off = offset; + fru->ubuf = buf; + bfa_fru_read_send(fru, BFI_FRUVPD_H2I_READ_REQ); + + return BFA_STATUS_OK; +} + +/* + * Get maximum size fru vpd image. + * + * @param[in] fru - fru structure + * @param[out] size - maximum size of fru vpd data + * + * Return status. + */ +bfa_status_t +bfa_fruvpd_get_max_size(struct bfa_fru_s *fru, u32 *max_size) +{ + if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2) + return BFA_STATUS_FRU_NOT_PRESENT; + + if (!bfa_ioc_is_operational(fru->ioc)) + return BFA_STATUS_IOC_NON_OP; + + if (fru->ioc->attr->card_type == BFA_MFG_TYPE_CHINOOK) + *max_size = BFA_FRU_CHINOOK_MAX_SIZE; + else + return BFA_STATUS_CMD_NOTSUPP; + return BFA_STATUS_OK; +} +/* + * tfru write. + * + * @param[in] fru - fru structure + * @param[in] buf - update data buffer + * @param[in] len - data buffer length + * @param[in] offset - offset relative to starting address + * @param[in] cbfn - callback function + * @param[in] cbarg - callback argument + * + * Return status. + */ +bfa_status_t +bfa_tfru_write(struct bfa_fru_s *fru, void *buf, u32 len, u32 offset, + bfa_cb_fru_t cbfn, void *cbarg) +{ + bfa_trc(fru, BFI_TFRU_H2I_WRITE_REQ); + bfa_trc(fru, len); + bfa_trc(fru, offset); + bfa_trc(fru, *((u8 *) buf)); + + if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2) + return BFA_STATUS_FRU_NOT_PRESENT; + + if (!bfa_ioc_is_operational(fru->ioc)) + return BFA_STATUS_IOC_NON_OP; + + if (fru->op_busy) { + bfa_trc(fru, fru->op_busy); + return BFA_STATUS_DEVBUSY; + } + + fru->op_busy = 1; + + fru->cbfn = cbfn; + fru->cbarg = cbarg; + fru->residue = len; + fru->offset = 0; + fru->addr_off = offset; + fru->ubuf = buf; + + bfa_fru_write_send(fru, BFI_TFRU_H2I_WRITE_REQ); + + return BFA_STATUS_OK; +} + +/* + * tfru read. + * + * @param[in] fru - fru structure + * @param[in] buf - read data buffer + * @param[in] len - data buffer length + * @param[in] offset - offset relative to starting address + * @param[in] cbfn - callback function + * @param[in] cbarg - callback argument + * + * Return status. + */ +bfa_status_t +bfa_tfru_read(struct bfa_fru_s *fru, void *buf, u32 len, u32 offset, + bfa_cb_fru_t cbfn, void *cbarg) +{ + bfa_trc(fru, BFI_TFRU_H2I_READ_REQ); + bfa_trc(fru, len); + bfa_trc(fru, offset); + + if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2) + return BFA_STATUS_FRU_NOT_PRESENT; + + if (!bfa_ioc_is_operational(fru->ioc)) + return BFA_STATUS_IOC_NON_OP; + + if (fru->op_busy) { + bfa_trc(fru, fru->op_busy); + return BFA_STATUS_DEVBUSY; + } + + fru->op_busy = 1; + + fru->cbfn = cbfn; + fru->cbarg = cbarg; + fru->residue = len; + fru->offset = 0; + fru->addr_off = offset; + fru->ubuf = buf; + bfa_fru_read_send(fru, BFI_TFRU_H2I_READ_REQ); + + return BFA_STATUS_OK; +} + +/* + * Process fru response messages upon receiving interrupts. + * + * @param[in] fruarg - fru structure + * @param[in] msg - message structure + */ +void +bfa_fru_intr(void *fruarg, struct bfi_mbmsg_s *msg) +{ + struct bfa_fru_s *fru = fruarg; + struct bfi_fru_rsp_s *rsp = (struct bfi_fru_rsp_s *)msg; + u32 status; + + bfa_trc(fru, msg->mh.msg_id); + + if (!fru->op_busy) { + /* + * receiving response after ioc failure + */ + bfa_trc(fru, 0x9999); + return; + } + + switch (msg->mh.msg_id) { + case BFI_FRUVPD_I2H_WRITE_RSP: + case BFI_TFRU_I2H_WRITE_RSP: + status = be32_to_cpu(rsp->status); + bfa_trc(fru, status); + + if (status != BFA_STATUS_OK || fru->residue == 0) { + fru->status = status; + fru->op_busy = 0; + if (fru->cbfn) + fru->cbfn(fru->cbarg, fru->status); + } else { + bfa_trc(fru, fru->offset); + if (msg->mh.msg_id == BFI_FRUVPD_I2H_WRITE_RSP) + bfa_fru_write_send(fru, + BFI_FRUVPD_H2I_WRITE_REQ); + else + bfa_fru_write_send(fru, + BFI_TFRU_H2I_WRITE_REQ); + } + break; + case BFI_FRUVPD_I2H_READ_RSP: + case BFI_TFRU_I2H_READ_RSP: + status = be32_to_cpu(rsp->status); + bfa_trc(fru, status); + + if (status != BFA_STATUS_OK) { + fru->status = status; + fru->op_busy = 0; + if (fru->cbfn) + fru->cbfn(fru->cbarg, fru->status); + } else { + u32 len = be32_to_cpu(rsp->length); + + bfa_trc(fru, fru->offset); + bfa_trc(fru, len); + + memcpy(fru->ubuf + fru->offset, fru->dbuf_kva, len); + fru->residue -= len; + fru->offset += len; + + if (fru->residue == 0) { + fru->status = status; + fru->op_busy = 0; + if (fru->cbfn) + fru->cbfn(fru->cbarg, fru->status); + } else { + if (msg->mh.msg_id == BFI_FRUVPD_I2H_READ_RSP) + bfa_fru_read_send(fru, + BFI_FRUVPD_H2I_READ_REQ); + else + bfa_fru_read_send(fru, + BFI_TFRU_H2I_READ_REQ); + } + } + break; + default: + WARN_ON(1); + } +} |