diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2023-06-30 20:57:07 +0200 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2023-06-30 20:57:07 +0200 |
commit | ca7ce08d6a063e0ccb91dc57f9bc213120d0d1a7 (patch) | |
tree | 99ce2811fdf52befc76d2cad95b16040423ecd10 /drivers/scsi/lpfc | |
parent | Merge tag 'ata-6.5-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/dlem... (diff) | |
parent | Merge patch series "scsi: fixes for targets with many LUNs, and scsi_target_b... (diff) | |
download | linux-ca7ce08d6a063e0ccb91dc57f9bc213120d0d1a7.tar.xz linux-ca7ce08d6a063e0ccb91dc57f9bc213120d0d1a7.zip |
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull SCSI updates from James Bottomley:
"Updates to the usual drivers (ufs, pm80xx, libata-scsi, smartpqi,
lpfc, qla2xxx).
We have a couple of major core changes impacting other systems:
- Command Duration Limits, which spills into block and ATA
- block level Persistent Reservation Operations, which touches block,
nvme, target and dm
Both of these are added with merge commits containing a cover letter
explaining what's going on"
* tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (187 commits)
scsi: core: Improve warning message in scsi_device_block()
scsi: core: Replace scsi_target_block() with scsi_block_targets()
scsi: core: Don't wait for quiesce in scsi_device_block()
scsi: core: Don't wait for quiesce in scsi_stop_queue()
scsi: core: Merge scsi_internal_device_block() and device_block()
scsi: sg: Increase number of devices
scsi: bsg: Increase number of devices
scsi: qla2xxx: Remove unused nvme_ls_waitq wait queue
scsi: ufs: ufs-pci: Add support for Intel Arrow Lake
scsi: sd: sd_zbc: Use PAGE_SECTORS_SHIFT
scsi: ufs: wb: Add explicit flush_threshold sysfs attribute
scsi: ufs: ufs-qcom: Switch to the new ICE API
scsi: ufs: dt-bindings: qcom: Add ICE phandle
scsi: ufs: ufs-mediatek: Set UFSHCD_QUIRK_MCQ_BROKEN_RTC quirk
scsi: ufs: ufs-mediatek: Set UFSHCD_QUIRK_MCQ_BROKEN_INTR quirk
scsi: ufs: core: Add host quirk UFSHCD_QUIRK_MCQ_BROKEN_RTC
scsi: ufs: core: Add host quirk UFSHCD_QUIRK_MCQ_BROKEN_INTR
scsi: ufs: core: Remove dedicated hwq for dev command
scsi: ufs: core: mcq: Fix the incorrect OCS value for the device command
scsi: ufs: dt-bindings: samsung,exynos: Drop unneeded quotes
...
Diffstat (limited to 'drivers/scsi/lpfc')
-rw-r--r-- | drivers/scsi/lpfc/lpfc.h | 65 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_attr.c | 4 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_crtn.h | 4 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_ct.c | 92 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_debugfs.c | 8 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_els.c | 44 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_hbadisc.c | 59 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_hw.h | 20 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_hw4.h | 14 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_init.c | 276 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_logmsg.h | 6 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_nvme.c | 61 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_nvmet.c | 6 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_scsi.c | 68 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_sli.c | 446 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_sli4.h | 4 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_version.h | 2 |
17 files changed, 566 insertions, 613 deletions
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index 5e3a93d13a91..9a8963684369 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -429,6 +429,15 @@ struct lpfc_cgn_param { /* Max number of days of congestion data */ #define LPFC_MAX_CGN_DAYS 10 +struct lpfc_cgn_ts { + uint8_t month; + uint8_t day; + uint8_t year; + uint8_t hour; + uint8_t minute; + uint8_t second; +}; + /* Format of congestion buffer info * This structure defines memory thats allocated and registered with * the HBA firmware. When adding or removing fields from this structure @@ -442,6 +451,7 @@ struct lpfc_cgn_info { #define LPFC_CGN_INFO_V1 1 #define LPFC_CGN_INFO_V2 2 #define LPFC_CGN_INFO_V3 3 +#define LPFC_CGN_INFO_V4 4 uint8_t cgn_info_mode; /* 0=off 1=managed 2=monitor only */ uint8_t cgn_info_detect; uint8_t cgn_info_action; @@ -450,12 +460,7 @@ struct lpfc_cgn_info { uint8_t cgn_info_level2; /* Start Time */ - uint8_t cgn_info_month; - uint8_t cgn_info_day; - uint8_t cgn_info_year; - uint8_t cgn_info_hour; - uint8_t cgn_info_minute; - uint8_t cgn_info_second; + struct lpfc_cgn_ts base_time; /* minute / hours / daily indices */ uint8_t cgn_index_minute; @@ -496,45 +501,17 @@ struct lpfc_cgn_info { uint8_t cgn_stat_npm; /* Notifications per minute */ /* Start Time */ - uint8_t cgn_stat_month; - uint8_t cgn_stat_day; - uint8_t cgn_stat_year; - uint8_t cgn_stat_hour; - uint8_t cgn_stat_minute; - uint8_t cgn_pad2[2]; + struct lpfc_cgn_ts stat_start; /* Base time */ + uint8_t cgn_pad2; __le32 cgn_notification; __le32 cgn_peer_notification; __le32 link_integ_notification; __le32 delivery_notification; - - uint8_t cgn_stat_cgn_month; /* Last congestion notification FPIN */ - uint8_t cgn_stat_cgn_day; - uint8_t cgn_stat_cgn_year; - uint8_t cgn_stat_cgn_hour; - uint8_t cgn_stat_cgn_min; - uint8_t cgn_stat_cgn_sec; - - uint8_t cgn_stat_peer_month; /* Last peer congestion FPIN */ - uint8_t cgn_stat_peer_day; - uint8_t cgn_stat_peer_year; - uint8_t cgn_stat_peer_hour; - uint8_t cgn_stat_peer_min; - uint8_t cgn_stat_peer_sec; - - uint8_t cgn_stat_lnk_month; /* Last link integrity FPIN */ - uint8_t cgn_stat_lnk_day; - uint8_t cgn_stat_lnk_year; - uint8_t cgn_stat_lnk_hour; - uint8_t cgn_stat_lnk_min; - uint8_t cgn_stat_lnk_sec; - - uint8_t cgn_stat_del_month; /* Last delivery notification FPIN */ - uint8_t cgn_stat_del_day; - uint8_t cgn_stat_del_year; - uint8_t cgn_stat_del_hour; - uint8_t cgn_stat_del_min; - uint8_t cgn_stat_del_sec; + struct lpfc_cgn_ts stat_fpin; /* Last congestion notification FPIN */ + struct lpfc_cgn_ts stat_peer; /* Last peer congestion FPIN */ + struct lpfc_cgn_ts stat_lnk; /* Last link integrity FPIN */ + struct lpfc_cgn_ts stat_delivery; /* Last delivery notification FPIN */ ); __le32 cgn_info_crc; @@ -932,8 +909,6 @@ struct lpfc_hba { void (*__lpfc_sli_release_iocbq)(struct lpfc_hba *, struct lpfc_iocbq *); int (*lpfc_hba_down_post)(struct lpfc_hba *phba); - void (*lpfc_scsi_cmd_iocb_cmpl) - (struct lpfc_hba *, struct lpfc_iocbq *, struct lpfc_iocbq *); /* MBOX interface function jump table entries */ int (*lpfc_sli_issue_mbox) @@ -1045,8 +1020,6 @@ struct lpfc_hba { * capability */ #define HBA_FLOGI_ISSUED 0x100000 /* FLOGI was issued */ -#define HBA_SHORT_CMF 0x200000 /* shorter CMF timer routine */ -#define HBA_CGN_DAY_WRAP 0x400000 /* HBA Congestion info day wraps */ #define HBA_DEFER_FLOGI 0x800000 /* Defer FLOGI till read_sparm cmpl */ #define HBA_SETUP 0x1000000 /* Signifies HBA setup is completed */ #define HBA_NEEDS_CFG_PORT 0x2000000 /* SLI3 - needs a CONFIG_PORT mbox */ @@ -1529,6 +1502,7 @@ struct lpfc_hba { uint64_t cmf_last_sync_bw; #define LPFC_CMF_BLK_SIZE 512 struct hrtimer cmf_timer; + struct hrtimer cmf_stats_timer; /* 1 minute stats timer */ atomic_t cmf_bw_wait; atomic_t cmf_busy; atomic_t cmf_stop_io; /* To block request and stop IO's */ @@ -1576,12 +1550,11 @@ struct lpfc_hba { atomic_t cgn_sync_alarm_cnt; /* Total alarm events for SYNC wqe */ atomic_t cgn_driver_evt_cnt; /* Total driver cgn events for fmw */ atomic_t cgn_latency_evt_cnt; - struct timespec64 cgn_daily_ts; atomic64_t cgn_latency_evt; /* Avg latency per minute */ unsigned long cgn_evt_timestamp; #define LPFC_CGN_TIMER_TO_MIN 60000 /* ms in a minute */ uint32_t cgn_evt_minute; -#define LPFC_SEC_MIN 60 +#define LPFC_SEC_MIN 60UL #define LPFC_MIN_HOUR 60 #define LPFC_HOUR_DAY 24 #define LPFC_MIN_DAY (LPFC_MIN_HOUR * LPFC_HOUR_DAY) diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 3863a5341782..21c7ecd3ede5 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -5858,8 +5858,8 @@ int lpfc_fabric_cgn_frequency = 100; /* 100 ms default */ module_param(lpfc_fabric_cgn_frequency, int, 0444); MODULE_PARM_DESC(lpfc_fabric_cgn_frequency, "Congestion signaling fabric freq"); -int lpfc_acqe_cgn_frequency = 10; /* 10 sec default */ -module_param(lpfc_acqe_cgn_frequency, int, 0444); +unsigned char lpfc_acqe_cgn_frequency = 10; /* 10 sec default */ +module_param(lpfc_acqe_cgn_frequency, byte, 0444); MODULE_PARM_DESC(lpfc_acqe_cgn_frequency, "Congestion signaling ACQE freq"); int lpfc_use_cgn_signal = 1; /* 0 - only use FPINs, 1 - Use signals if avail */ diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index b833b983e69d..d4e46a08f94d 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h @@ -134,7 +134,6 @@ void lpfc_check_nlp_post_devloss(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp); void lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_iocbq *rspiocb); -int lpfc_nlp_not_used(struct lpfc_nodelist *ndlp); struct lpfc_nodelist *lpfc_setup_disc_node(struct lpfc_vport *, uint32_t); void lpfc_disc_list_loopmap(struct lpfc_vport *); void lpfc_disc_start(struct lpfc_vport *); @@ -248,6 +247,7 @@ irqreturn_t lpfc_sli_sp_intr_handler(int, void *); irqreturn_t lpfc_sli_fp_intr_handler(int, void *); irqreturn_t lpfc_sli4_intr_handler(int, void *); irqreturn_t lpfc_sli4_hba_intr_handler(int, void *); +irqreturn_t lpfc_sli4_hba_intr_handler_th(int irq, void *dev_id); int lpfc_read_object(struct lpfc_hba *phba, char *s, uint32_t *datap, uint32_t len); @@ -664,7 +664,7 @@ extern int lpfc_enable_nvmet_cnt; extern unsigned long long lpfc_enable_nvmet[]; extern int lpfc_no_hba_reset_cnt; extern unsigned long lpfc_no_hba_reset[]; -extern int lpfc_acqe_cgn_frequency; +extern unsigned char lpfc_acqe_cgn_frequency; extern int lpfc_fabric_cgn_frequency; extern int lpfc_use_cgn_signal; diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index f3bdcebe67f5..474834f313a7 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c @@ -287,7 +287,7 @@ lpfc_ct_handle_mibreq(struct lpfc_hba *phba, struct lpfc_iocbq *ctiocbq) u32 ulp_status = get_job_ulpstatus(phba, ctiocbq); u32 ulp_word4 = get_job_word4(phba, ctiocbq); u32 did; - u32 mi_cmd; + u16 mi_cmd; did = bf_get(els_rsp64_sid, &ctiocbq->wqe.xmit_els_rsp); if (ulp_status) { @@ -311,7 +311,7 @@ lpfc_ct_handle_mibreq(struct lpfc_hba *phba, struct lpfc_iocbq *ctiocbq) ct_req = (struct lpfc_sli_ct_request *)ctiocbq->cmd_dmabuf->virt; - mi_cmd = ct_req->CommandResponse.bits.CmdRsp; + mi_cmd = be16_to_cpu(ct_req->CommandResponse.bits.CmdRsp); lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "6442 : MI Cmd : x%x Not Supported\n", mi_cmd); lpfc_ct_reject_event(ndlp, ct_req, @@ -486,7 +486,7 @@ lpfc_free_ct_rsp(struct lpfc_hba *phba, struct lpfc_dmabuf *mlist) } static struct lpfc_dmabuf * -lpfc_alloc_ct_rsp(struct lpfc_hba *phba, int cmdcode, struct ulp_bde64 *bpl, +lpfc_alloc_ct_rsp(struct lpfc_hba *phba, __be16 cmdcode, struct ulp_bde64 *bpl, uint32_t size, int *entries) { struct lpfc_dmabuf *mlist = NULL; @@ -507,8 +507,8 @@ lpfc_alloc_ct_rsp(struct lpfc_hba *phba, int cmdcode, struct ulp_bde64 *bpl, INIT_LIST_HEAD(&mp->list); - if (cmdcode == be16_to_cpu(SLI_CTNS_GID_FT) || - cmdcode == be16_to_cpu(SLI_CTNS_GFF_ID)) + if (be16_to_cpu(cmdcode) == SLI_CTNS_GID_FT || + be16_to_cpu(cmdcode) == SLI_CTNS_GFF_ID) mp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(mp->phys)); else mp->virt = lpfc_mbuf_alloc(phba, 0, &(mp->phys)); @@ -671,7 +671,7 @@ lpfc_ct_cmd(struct lpfc_vport *vport, struct lpfc_dmabuf *inmp, struct ulp_bde64 *bpl = (struct ulp_bde64 *) bmp->virt; struct lpfc_dmabuf *outmp; int cnt = 0, status; - int cmdcode = ((struct lpfc_sli_ct_request *) inmp->virt)-> + __be16 cmdcode = ((struct lpfc_sli_ct_request *)inmp->virt)-> CommandResponse.bits.CmdRsp; bpl++; /* Skip past ct request */ @@ -1043,8 +1043,8 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, outp, CTreq->un.gid.Fc4Type, get_job_data_placed(phba, rspiocb)); - } else if (CTrsp->CommandResponse.bits.CmdRsp == - be16_to_cpu(SLI_CT_RESPONSE_FS_RJT)) { + } else if (be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp) == + SLI_CT_RESPONSE_FS_RJT) { /* NameServer Rsp Error */ if ((CTrsp->ReasonCode == SLI_CT_UNABLE_TO_PERFORM_REQ) && (CTrsp->Explanation == SLI_CT_NO_FC4_TYPES)) { @@ -1052,14 +1052,14 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, LOG_DISCOVERY, "0269 No NameServer Entries " "Data: x%x x%x x%x x%x\n", - CTrsp->CommandResponse.bits.CmdRsp, + be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp), (uint32_t) CTrsp->ReasonCode, (uint32_t) CTrsp->Explanation, vport->fc_flag); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, "GID_FT no entry cmd:x%x rsn:x%x exp:x%x", - (uint32_t)CTrsp->CommandResponse.bits.CmdRsp, + be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp), (uint32_t) CTrsp->ReasonCode, (uint32_t) CTrsp->Explanation); } else { @@ -1067,14 +1067,14 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, LOG_DISCOVERY, "0240 NameServer Rsp Error " "Data: x%x x%x x%x x%x\n", - CTrsp->CommandResponse.bits.CmdRsp, + be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp), (uint32_t) CTrsp->ReasonCode, (uint32_t) CTrsp->Explanation, vport->fc_flag); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, "GID_FT rsp err1 cmd:x%x rsn:x%x exp:x%x", - (uint32_t)CTrsp->CommandResponse.bits.CmdRsp, + be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp), (uint32_t) CTrsp->ReasonCode, (uint32_t) CTrsp->Explanation); } @@ -1085,14 +1085,14 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0241 NameServer Rsp Error " "Data: x%x x%x x%x x%x\n", - CTrsp->CommandResponse.bits.CmdRsp, + be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp), (uint32_t) CTrsp->ReasonCode, (uint32_t) CTrsp->Explanation, vport->fc_flag); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, "GID_FT rsp err2 cmd:x%x rsn:x%x exp:x%x", - (uint32_t)CTrsp->CommandResponse.bits.CmdRsp, + be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp), (uint32_t) CTrsp->ReasonCode, (uint32_t) CTrsp->Explanation); } @@ -1247,8 +1247,8 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* Good status, continue checking */ CTreq = (struct lpfc_sli_ct_request *)inp->virt; CTrsp = (struct lpfc_sli_ct_request *)outp->virt; - if (CTrsp->CommandResponse.bits.CmdRsp == - cpu_to_be16(SLI_CT_RESPONSE_FS_ACC)) { + if (be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp) == + SLI_CT_RESPONSE_FS_ACC) { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "4105 NameServer Rsp Data: x%x x%x " "x%x x%x sz x%x\n", @@ -1262,8 +1262,8 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, outp, CTreq->un.gid.Fc4Type, get_job_data_placed(phba, rspiocb)); - } else if (CTrsp->CommandResponse.bits.CmdRsp == - be16_to_cpu(SLI_CT_RESPONSE_FS_RJT)) { + } else if (be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp) == + SLI_CT_RESPONSE_FS_RJT) { /* NameServer Rsp Error */ if ((CTrsp->ReasonCode == SLI_CT_UNABLE_TO_PERFORM_REQ) && (CTrsp->Explanation == SLI_CT_NO_FC4_TYPES)) { @@ -1271,7 +1271,7 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, vport, KERN_INFO, LOG_DISCOVERY, "4106 No NameServer Entries " "Data: x%x x%x x%x x%x\n", - CTrsp->CommandResponse.bits.CmdRsp, + be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp), (uint32_t)CTrsp->ReasonCode, (uint32_t)CTrsp->Explanation, vport->fc_flag); @@ -1279,7 +1279,7 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, lpfc_debugfs_disc_trc( vport, LPFC_DISC_TRC_CT, "GID_PT no entry cmd:x%x rsn:x%x exp:x%x", - (uint32_t)CTrsp->CommandResponse.bits.CmdRsp, + be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp), (uint32_t)CTrsp->ReasonCode, (uint32_t)CTrsp->Explanation); } else { @@ -1287,7 +1287,7 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, vport, KERN_INFO, LOG_DISCOVERY, "4107 NameServer Rsp Error " "Data: x%x x%x x%x x%x\n", - CTrsp->CommandResponse.bits.CmdRsp, + be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp), (uint32_t)CTrsp->ReasonCode, (uint32_t)CTrsp->Explanation, vport->fc_flag); @@ -1295,7 +1295,7 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, lpfc_debugfs_disc_trc( vport, LPFC_DISC_TRC_CT, "GID_PT rsp err1 cmd:x%x rsn:x%x exp:x%x", - (uint32_t)CTrsp->CommandResponse.bits.CmdRsp, + be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp), (uint32_t)CTrsp->ReasonCode, (uint32_t)CTrsp->Explanation); } @@ -1304,7 +1304,7 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "4109 NameServer Rsp Error " "Data: x%x x%x x%x x%x\n", - CTrsp->CommandResponse.bits.CmdRsp, + be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp), (uint32_t)CTrsp->ReasonCode, (uint32_t)CTrsp->Explanation, vport->fc_flag); @@ -1312,7 +1312,7 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, lpfc_debugfs_disc_trc( vport, LPFC_DISC_TRC_CT, "GID_PT rsp err2 cmd:x%x rsn:x%x exp:x%x", - (uint32_t)CTrsp->CommandResponse.bits.CmdRsp, + be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp), (uint32_t)CTrsp->ReasonCode, (uint32_t)CTrsp->Explanation); } @@ -1391,8 +1391,8 @@ lpfc_cmpl_ct_cmd_gff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, (fbits & FC4_FEATURE_INIT) ? "Initiator" : " ", (fbits & FC4_FEATURE_TARGET) ? "Target" : " "); - if (CTrsp->CommandResponse.bits.CmdRsp == - be16_to_cpu(SLI_CT_RESPONSE_FS_ACC)) { + if (be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp) == + SLI_CT_RESPONSE_FS_ACC) { if ((fbits & FC4_FEATURE_INIT) && !(fbits & FC4_FEATURE_TARGET)) { lpfc_printf_vlog(vport, KERN_INFO, @@ -1631,7 +1631,7 @@ lpfc_cmpl_ct(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, "0209 CT Request completes, latt %d, " "ulp_status x%x CmdRsp x%x, Context x%x, Tag x%x\n", latt, ulp_status, - CTrsp->CommandResponse.bits.CmdRsp, + be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp), get_job_ulpcontext(phba, cmdiocb), cmdiocb->iotag); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, @@ -1681,8 +1681,8 @@ lpfc_cmpl_ct_cmd_rft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, outp = cmdiocb->rsp_dmabuf; CTrsp = (struct lpfc_sli_ct_request *)outp->virt; - if (CTrsp->CommandResponse.bits.CmdRsp == - be16_to_cpu(SLI_CT_RESPONSE_FS_ACC)) + if (be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp) == + SLI_CT_RESPONSE_FS_ACC) vport->ct_flags |= FC_CT_RFT_ID; } lpfc_cmpl_ct(phba, cmdiocb, rspiocb); @@ -1702,8 +1702,8 @@ lpfc_cmpl_ct_cmd_rnn_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, outp = cmdiocb->rsp_dmabuf; CTrsp = (struct lpfc_sli_ct_request *) outp->virt; - if (CTrsp->CommandResponse.bits.CmdRsp == - be16_to_cpu(SLI_CT_RESPONSE_FS_ACC)) + if (be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp) == + SLI_CT_RESPONSE_FS_ACC) vport->ct_flags |= FC_CT_RNN_ID; } lpfc_cmpl_ct(phba, cmdiocb, rspiocb); @@ -1723,8 +1723,8 @@ lpfc_cmpl_ct_cmd_rspn_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, outp = cmdiocb->rsp_dmabuf; CTrsp = (struct lpfc_sli_ct_request *)outp->virt; - if (CTrsp->CommandResponse.bits.CmdRsp == - be16_to_cpu(SLI_CT_RESPONSE_FS_ACC)) + if (be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp) == + SLI_CT_RESPONSE_FS_ACC) vport->ct_flags |= FC_CT_RSPN_ID; } lpfc_cmpl_ct(phba, cmdiocb, rspiocb); @@ -1744,8 +1744,8 @@ lpfc_cmpl_ct_cmd_rsnn_nn(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, outp = cmdiocb->rsp_dmabuf; CTrsp = (struct lpfc_sli_ct_request *) outp->virt; - if (CTrsp->CommandResponse.bits.CmdRsp == - be16_to_cpu(SLI_CT_RESPONSE_FS_ACC)) + if (be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp) == + SLI_CT_RESPONSE_FS_ACC) vport->ct_flags |= FC_CT_RSNN_NN; } lpfc_cmpl_ct(phba, cmdiocb, rspiocb); @@ -1777,8 +1777,8 @@ lpfc_cmpl_ct_cmd_rff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, outp = cmdiocb->rsp_dmabuf; CTrsp = (struct lpfc_sli_ct_request *)outp->virt; - if (CTrsp->CommandResponse.bits.CmdRsp == - be16_to_cpu(SLI_CT_RESPONSE_FS_ACC)) + if (be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp) == + SLI_CT_RESPONSE_FS_ACC) vport->ct_flags |= FC_CT_RFF_ID; } lpfc_cmpl_ct(phba, cmdiocb, rspiocb); @@ -2217,8 +2217,8 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_dmabuf *outp = cmdiocb->rsp_dmabuf; struct lpfc_sli_ct_request *CTcmd = inp->virt; struct lpfc_sli_ct_request *CTrsp = outp->virt; - uint16_t fdmi_cmd = CTcmd->CommandResponse.bits.CmdRsp; - uint16_t fdmi_rsp = CTrsp->CommandResponse.bits.CmdRsp; + __be16 fdmi_cmd = CTcmd->CommandResponse.bits.CmdRsp; + __be16 fdmi_rsp = CTrsp->CommandResponse.bits.CmdRsp; struct lpfc_nodelist *ndlp, *free_ndlp = NULL; uint32_t latt, cmd, err; u32 ulp_status = get_job_ulpstatus(phba, rspiocb); @@ -2278,7 +2278,7 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* Check for a CT LS_RJT response */ cmd = be16_to_cpu(fdmi_cmd); - if (fdmi_rsp == cpu_to_be16(SLI_CT_RESPONSE_FS_RJT)) { + if (be16_to_cpu(fdmi_rsp) == SLI_CT_RESPONSE_FS_RJT) { /* FDMI rsp failed */ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY | LOG_ELS, "0220 FDMI cmd failed FS_RJT Data: x%x", cmd); @@ -3110,7 +3110,7 @@ lpfc_fdmi_vendor_attr_mi(struct lpfc_vport *vport, void *attr) } /* RHBA attribute jump table */ -int (*lpfc_fdmi_hba_action[]) +static int (*lpfc_fdmi_hba_action[]) (struct lpfc_vport *vport, void *attrbuf) = { /* Action routine Mask bit Attribute type */ lpfc_fdmi_hba_attr_wwnn, /* bit0 RHBA_NODENAME */ @@ -3134,7 +3134,7 @@ int (*lpfc_fdmi_hba_action[]) }; /* RPA / RPRT attribute jump table */ -int (*lpfc_fdmi_port_action[]) +static int (*lpfc_fdmi_port_action[]) (struct lpfc_vport *vport, void *attrbuf) = { /* Action routine Mask bit Attribute type */ lpfc_fdmi_port_attr_fc4type, /* bit0 RPRT_SUPPORT_FC4_TYPES */ @@ -3570,7 +3570,7 @@ lpfc_cmpl_ct_cmd_vmid(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_dmabuf *outp = cmdiocb->rsp_dmabuf; struct lpfc_sli_ct_request *ctcmd = inp->virt; struct lpfc_sli_ct_request *ctrsp = outp->virt; - u16 rsp = ctrsp->CommandResponse.bits.CmdRsp; + __be16 rsp = ctrsp->CommandResponse.bits.CmdRsp; struct app_id_object *app; struct lpfc_nodelist *ndlp = cmdiocb->ndlp; u32 cmd, hash, bucket; @@ -3587,7 +3587,7 @@ lpfc_cmpl_ct_cmd_vmid(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, goto free_res; } /* Check for a CT LS_RJT response */ - if (rsp == be16_to_cpu(SLI_CT_RESPONSE_FS_RJT)) { + if (be16_to_cpu(rsp) == SLI_CT_RESPONSE_FS_RJT) { if (cmd != SLI_CTAS_DALLAPP_ID) lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY, "3306 VMID FS_RJT Data: x%x x%x x%x\n", @@ -3748,7 +3748,7 @@ lpfc_vmid_cmd(struct lpfc_vport *vport, rap->obj[0].entity_id_len = vmid->vmid_len; memcpy(rap->obj[0].entity_id, vmid->host_vmid, vmid->vmid_len); size = RAPP_IDENT_OFFSET + - sizeof(struct lpfc_vmid_rapp_ident_list); + struct_size(rap, obj, be32_to_cpu(rap->no_of_objects)); retry = 1; break; @@ -3767,7 +3767,7 @@ lpfc_vmid_cmd(struct lpfc_vport *vport, dap->obj[0].entity_id_len = vmid->vmid_len; memcpy(dap->obj[0].entity_id, vmid->host_vmid, vmid->vmid_len); size = DAPP_IDENT_OFFSET + - sizeof(struct lpfc_vmid_dapp_ident_list); + struct_size(dap, obj, be32_to_cpu(dap->no_of_objects)); write_lock(&vport->vmid_lock); vmid->flag &= ~LPFC_VMID_REGISTERED; write_unlock(&vport->vmid_lock); diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c index bdf34af4ef36..7f9b221e7c34 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/drivers/scsi/lpfc/lpfc_debugfs.c @@ -2259,11 +2259,15 @@ lpfc_debugfs_ras_log_open(struct inode *inode, struct file *file) goto out; } spin_unlock_irq(&phba->hbalock); - debug = kmalloc(sizeof(*debug), GFP_KERNEL); + + if (check_mul_overflow(LPFC_RAS_MIN_BUFF_POST_SIZE, + phba->cfg_ras_fwlog_buffsize, &size)) + goto out; + + debug = kzalloc(sizeof(*debug), GFP_KERNEL); if (!debug) goto out; - size = LPFC_RAS_MIN_BUFF_POST_SIZE * phba->cfg_ras_fwlog_buffsize; debug->buffer = vmalloc(size); if (!debug->buffer) goto free_debug; diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 6a15f879e517..2bad9954c355 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -5205,14 +5205,9 @@ lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb) * * This routine is the completion callback function to the Logout (LOGO) * Accept (ACC) Response ELS command. This routine is invoked to indicate - * the completion of the LOGO process. It invokes the lpfc_nlp_not_used() to - * release the ndlp if it has the last reference remaining (reference count - * is 1). If succeeded (meaning ndlp released), it sets the iocb ndlp - * field to NULL to inform the following lpfc_els_free_iocb() routine no - * ndlp reference count needs to be decremented. Otherwise, the ndlp - * reference use-count shall be decremented by the lpfc_els_free_iocb() - * routine. Finally, the lpfc_els_free_iocb() is invoked to release the - * IOCB data structure. + * the completion of the LOGO process. If the node has transitioned to NPR, + * this routine unregisters the RPI if it is still registered. The + * lpfc_els_free_iocb() is invoked to release the IOCB data structure. **/ static void lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, @@ -5253,19 +5248,9 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, (ndlp->nlp_last_elscmd == ELS_CMD_PLOGI)) goto out; - /* NPort Recovery mode or node is just allocated */ - if (!lpfc_nlp_not_used(ndlp)) { - /* A LOGO is completing and the node is in NPR state. - * Just unregister the RPI because the node is still - * required. - */ + if (ndlp->nlp_flag & NLP_RPI_REGISTERED) lpfc_unreg_rpi(vport, ndlp); - } else { - /* Indicate the node has already released, should - * not reference to it from within lpfc_els_free_iocb. - */ - cmdiocb->ndlp = NULL; - } + } out: /* @@ -5285,9 +5270,8 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, * RPI (Remote Port Index) mailbox command to the @phba. It simply releases * the associated lpfc Direct Memory Access (DMA) buffer back to the pool and * decrements the ndlp reference count held for this completion callback - * function. After that, it invokes the lpfc_nlp_not_used() to check - * whether there is only one reference left on the ndlp. If so, it will - * perform one more decrement and trigger the release of the ndlp. + * function. After that, it invokes the lpfc_drop_node to check + * whether it is appropriate to release the node. **/ void lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) @@ -5468,9 +5452,19 @@ out: ndlp->nlp_flag &= ~NLP_RELEASE_RPI; spin_unlock_irq(&ndlp->lock); } + lpfc_drop_node(vport, ndlp); + } else if (ndlp->nlp_state != NLP_STE_PLOGI_ISSUE && + ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE && + ndlp->nlp_state != NLP_STE_PRLI_ISSUE) { + /* Drop ndlp if there is no planned or outstanding + * issued PRLI. + * + * In cases when the ndlp is acting as both an initiator + * and target function, let our issued PRLI determine + * the final ndlp kref drop. + */ + lpfc_drop_node(vport, ndlp); } - - lpfc_drop_node(vport, ndlp); } /* Release the originating I/O reference. */ diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 5ba3a9ad9501..499849b58ee4 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -458,11 +458,9 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) if (ndlp->nlp_type & NLP_FABRIC) { spin_lock_irqsave(&ndlp->lock, iflags); - /* In massive vport configuration settings or when the FLOGI - * completes with a sequence timeout, it's possible - * dev_loss_tmo fired during node recovery. The driver has to - * account for this race to allow for recovery and keep - * the reference counting correct. + /* The driver has to account for a race between any fabric + * node that's in recovery when dev_loss_tmo expires. When this + * happens, the driver has to allow node recovery. */ switch (ndlp->nlp_DID) { case Fabric_DID: @@ -489,6 +487,17 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) ndlp->nlp_state <= NLP_STE_REG_LOGIN_ISSUE) recovering = true; break; + default: + /* Ensure the nlp_DID at least has the correct prefix. + * The fabric domain controller's last three nibbles + * vary so we handle it in the default case. + */ + if (ndlp->nlp_DID & Fabric_DID_MASK) { + if (ndlp->nlp_state >= NLP_STE_PLOGI_ISSUE && + ndlp->nlp_state <= NLP_STE_REG_LOGIN_ISSUE) + recovering = true; + } + break; } spin_unlock_irqrestore(&ndlp->lock, iflags); @@ -556,6 +565,9 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); } + spin_lock_irqsave(&ndlp->lock, iflags); + ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS; + spin_unlock_irqrestore(&ndlp->lock, iflags); /* If we are devloss, but we are in the process of rediscovering the * ndlp, don't issue a NLP_EVT_DEVICE_RM event. @@ -565,9 +577,6 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) return fcf_inuse; } - spin_lock_irqsave(&ndlp->lock, iflags); - ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS; - spin_unlock_irqrestore(&ndlp->lock, iflags); if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD)) lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); @@ -4333,13 +4342,14 @@ out: /* If the node is not registered with the scsi or nvme * transport, remove the fabric node. The failed reg_login - * is terminal. + * is terminal and forces the removal of the last node + * reference. */ if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) { spin_lock_irq(&ndlp->lock); ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; spin_unlock_irq(&ndlp->lock); - lpfc_nlp_not_used(ndlp); + lpfc_nlp_put(ndlp); } if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { @@ -4497,14 +4507,6 @@ lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) if (vport->load_flag & FC_UNLOADING) return; - /* - * Disassociate any older association between this ndlp and rport - */ - if (ndlp->rport) { - rdata = ndlp->rport->dd_data; - rdata->pnode = NULL; - } - ndlp->rport = rport = fc_remote_port_add(shost, 0, &rport_ids); if (!rport) { dev_printk(KERN_WARNING, &phba->pcidev->dev, @@ -4835,7 +4837,7 @@ lpfc_nlp_state_name(char *buffer, size_t size, int state) }; if (state < NLP_STE_MAX_STATE && states[state]) - strlcpy(buffer, states[state], size); + strscpy(buffer, states[state], size); else snprintf(buffer, size, "unknown (%d)", state); return buffer; @@ -6704,25 +6706,6 @@ lpfc_nlp_put(struct lpfc_nodelist *ndlp) return ndlp ? kref_put(&ndlp->kref, lpfc_nlp_release) : 0; } -/* This routine free's the specified nodelist if it is not in use - * by any other discovery thread. This routine returns 1 if the - * ndlp has been freed. A return value of 0 indicates the ndlp is - * not yet been released. - */ -int -lpfc_nlp_not_used(struct lpfc_nodelist *ndlp) -{ - lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE, - "node not used: did:x%x flg:x%x refcnt:x%x", - ndlp->nlp_DID, ndlp->nlp_flag, - kref_read(&ndlp->kref)); - - if (kref_read(&ndlp->kref) == 1) - if (lpfc_nlp_put(ndlp)) - return 1; - return 0; -} - /** * lpfc_fcf_inuse - Check if FCF can be unregistered. * @phba: Pointer to hba context object. diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h index 19b2d2754f32..663755842e4a 100644 --- a/drivers/scsi/lpfc/lpfc_hw.h +++ b/drivers/scsi/lpfc/lpfc_hw.h @@ -86,8 +86,8 @@ union CtRevisionId { union CtCommandResponse { /* Structure is in Big Endian format */ struct { - uint32_t CmdRsp:16; - uint32_t Size:16; + __be16 CmdRsp; + __be16 Size; } bits; uint32_t word; }; @@ -124,7 +124,7 @@ struct lpfc_sli_ct_request { #define LPFC_CT_PREAMBLE 20 /* Size of CTReq + 4 up to here */ union { - uint32_t PortID; + __be32 PortID; struct gid { uint8_t PortType; /* for GID_PT requests */ #define GID_PT_N_PORT 1 @@ -1408,19 +1408,19 @@ struct entity_id_object { }; struct app_id_object { - uint32_t port_id; - uint32_t app_id; + __be32 port_id; + __be32 app_id; struct entity_id_object obj; }; struct lpfc_vmid_rapp_ident_list { - uint32_t no_of_objects; - struct entity_id_object obj[1]; + __be32 no_of_objects; + struct entity_id_object obj[]; }; struct lpfc_vmid_dapp_ident_list { - uint32_t no_of_objects; - struct entity_id_object obj[1]; + __be32 no_of_objects; + struct entity_id_object obj[]; }; #define GALLAPPIA_ID_LAST 0x80 @@ -1512,7 +1512,7 @@ struct lpfc_fdmi_hba_ident { * Registered Port List Format */ struct lpfc_fdmi_reg_port_list { - uint32_t EntryCnt; + __be32 EntryCnt; struct lpfc_fdmi_port_entry pe; } __packed; diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h index 58fa39c403a0..5d4f9f27084d 100644 --- a/drivers/scsi/lpfc/lpfc_hw4.h +++ b/drivers/scsi/lpfc/lpfc_hw4.h @@ -395,9 +395,6 @@ struct lpfc_cqe { #define CQE_STATUS_NEED_BUFF_ENTRY 0xf #define CQE_STATUS_DI_ERROR 0x16 -/* Used when mapping CQE status to IOCB */ -#define LPFC_IOCB_STATUS_MASK 0xf - /* Status returned by hardware (valid only if status = CQE_STATUS_SUCCESS). */ #define CQE_HW_STATUS_NO_ERR 0x0 #define CQE_HW_STATUS_UNDERRUN 0x1 @@ -536,9 +533,9 @@ struct sli4_wcqe_xri_aborted { /* completion queue entry structure for rqe completion */ struct lpfc_rcqe { uint32_t word0; -#define lpfc_rcqe_bindex_SHIFT 16 -#define lpfc_rcqe_bindex_MASK 0x0000FFF -#define lpfc_rcqe_bindex_WORD word0 +#define lpfc_rcqe_iv_SHIFT 31 +#define lpfc_rcqe_iv_MASK 0x00000001 +#define lpfc_rcqe_iv_WORD word0 #define lpfc_rcqe_status_SHIFT 8 #define lpfc_rcqe_status_MASK 0x000000FF #define lpfc_rcqe_status_WORD word0 @@ -546,6 +543,7 @@ struct lpfc_rcqe { #define FC_STATUS_RQ_BUF_LEN_EXCEEDED 0x11 /* payload truncated */ #define FC_STATUS_INSUFF_BUF_NEED_BUF 0x12 /* Insufficient buffers */ #define FC_STATUS_INSUFF_BUF_FRM_DISC 0x13 /* Frame Discard */ +#define FC_STATUS_RQ_DMA_FAILURE 0x14 /* DMA failure */ uint32_t word1; #define lpfc_rcqe_fcf_id_v1_SHIFT 0 #define lpfc_rcqe_fcf_id_v1_MASK 0x0000003F @@ -4813,8 +4811,8 @@ struct cmf_sync_wqe { #define cmf_sync_cqid_WORD word11 uint32_t read_bytes; uint32_t word13; -#define cmf_sync_period_SHIFT 16 -#define cmf_sync_period_MASK 0x0000ffff +#define cmf_sync_period_SHIFT 24 +#define cmf_sync_period_MASK 0x000000ff #define cmf_sync_period_WORD word13 uint32_t word14; uint32_t word15; diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 867b4c788f08..3221a934066b 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -101,6 +101,7 @@ static struct scsi_transport_template *lpfc_vport_transport_template = NULL; static DEFINE_IDR(lpfc_hba_index); #define LPFC_NVMET_BUF_POST 254 static int lpfc_vmid_res_alloc(struct lpfc_hba *phba, struct lpfc_vport *vport); +static void lpfc_cgn_update_tstamp(struct lpfc_hba *phba, struct lpfc_cgn_ts *ts); /** * lpfc_config_port_prep - Perform lpfc initialization prior to config port @@ -1279,7 +1280,7 @@ lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) /* * lpfc_idle_stat_delay_work - idle_stat tracking * - * This routine tracks per-cq idle_stat and determines polling decisions. + * This routine tracks per-eq idle_stat and determines polling decisions. * * Return codes: * None @@ -1290,7 +1291,7 @@ lpfc_idle_stat_delay_work(struct work_struct *work) struct lpfc_hba *phba = container_of(to_delayed_work(work), struct lpfc_hba, idle_stat_delay_work); - struct lpfc_queue *cq; + struct lpfc_queue *eq; struct lpfc_sli4_hdw_queue *hdwq; struct lpfc_idle_stat *idle_stat; u32 i, idle_percent; @@ -1306,10 +1307,10 @@ lpfc_idle_stat_delay_work(struct work_struct *work) for_each_present_cpu(i) { hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq]; - cq = hdwq->io_cq; + eq = hdwq->hba_eq; - /* Skip if we've already handled this cq's primary CPU */ - if (cq->chann != i) + /* Skip if we've already handled this eq's primary CPU */ + if (eq->chann != i) continue; idle_stat = &phba->sli4_hba.idle_stat[i]; @@ -1333,9 +1334,9 @@ lpfc_idle_stat_delay_work(struct work_struct *work) idle_percent = 100 - idle_percent; if (idle_percent < 15) - cq->poll_mode = LPFC_QUEUE_WORK; + eq->poll_mode = LPFC_QUEUE_WORK; else - cq->poll_mode = LPFC_IRQ_POLL; + eq->poll_mode = LPFC_THREADED_IRQ; idle_stat->prev_idle = wall_idle; idle_stat->prev_wall = wall; @@ -3197,6 +3198,7 @@ lpfc_cmf_stop(struct lpfc_hba *phba) "6221 Stop CMF / Cancel Timer\n"); /* Cancel the CMF timer */ + hrtimer_cancel(&phba->cmf_stats_timer); hrtimer_cancel(&phba->cmf_timer); /* Zero CMF counters */ @@ -3283,7 +3285,10 @@ lpfc_cmf_start(struct lpfc_hba *phba) phba->cmf_timer_cnt = 0; hrtimer_start(&phba->cmf_timer, - ktime_set(0, LPFC_CMF_INTERVAL * 1000000), + ktime_set(0, LPFC_CMF_INTERVAL * NSEC_PER_MSEC), + HRTIMER_MODE_REL); + hrtimer_start(&phba->cmf_stats_timer, + ktime_set(0, LPFC_SEC_MIN * NSEC_PER_SEC), HRTIMER_MODE_REL); /* Setup for latency check in IO cmpl routines */ ktime_get_real_ts64(&phba->cmf_latency); @@ -4357,6 +4362,7 @@ lpfc_io_buf_replenish(struct lpfc_hba *phba, struct list_head *cbuf) struct lpfc_sli4_hdw_queue *qp; struct lpfc_io_buf *lpfc_cmd; int idx, cnt; + unsigned long iflags; qp = phba->sli4_hba.hdwq; cnt = 0; @@ -4371,12 +4377,13 @@ lpfc_io_buf_replenish(struct lpfc_hba *phba, struct list_head *cbuf) lpfc_cmd->hdwq_no = idx; lpfc_cmd->hdwq = qp; lpfc_cmd->cur_iocbq.cmd_cmpl = NULL; - spin_lock(&qp->io_buf_list_put_lock); + spin_lock_irqsave(&qp->io_buf_list_put_lock, iflags); list_add_tail(&lpfc_cmd->list, &qp->lpfc_io_buf_list_put); qp->put_io_bufs++; qp->total_io_bufs++; - spin_unlock(&qp->io_buf_list_put_lock); + spin_unlock_irqrestore(&qp->io_buf_list_put_lock, + iflags); } } return cnt; @@ -5593,81 +5600,74 @@ void lpfc_cgn_update_stat(struct lpfc_hba *phba, uint32_t dtag) { struct lpfc_cgn_info *cp; - struct tm broken; - struct timespec64 cur_time; - u32 cnt; u32 value; /* Make sure we have a congestion info buffer */ if (!phba->cgn_i) return; cp = (struct lpfc_cgn_info *)phba->cgn_i->virt; - ktime_get_real_ts64(&cur_time); - time64_to_tm(cur_time.tv_sec, 0, &broken); /* Update congestion statistics */ switch (dtag) { case ELS_DTAG_LNK_INTEGRITY: - cnt = le32_to_cpu(cp->link_integ_notification); - cnt++; - cp->link_integ_notification = cpu_to_le32(cnt); - - cp->cgn_stat_lnk_month = broken.tm_mon + 1; - cp->cgn_stat_lnk_day = broken.tm_mday; - cp->cgn_stat_lnk_year = broken.tm_year - 100; - cp->cgn_stat_lnk_hour = broken.tm_hour; - cp->cgn_stat_lnk_min = broken.tm_min; - cp->cgn_stat_lnk_sec = broken.tm_sec; + le32_add_cpu(&cp->link_integ_notification, 1); + lpfc_cgn_update_tstamp(phba, &cp->stat_lnk); break; case ELS_DTAG_DELIVERY: - cnt = le32_to_cpu(cp->delivery_notification); - cnt++; - cp->delivery_notification = cpu_to_le32(cnt); - - cp->cgn_stat_del_month = broken.tm_mon + 1; - cp->cgn_stat_del_day = broken.tm_mday; - cp->cgn_stat_del_year = broken.tm_year - 100; - cp->cgn_stat_del_hour = broken.tm_hour; - cp->cgn_stat_del_min = broken.tm_min; - cp->cgn_stat_del_sec = broken.tm_sec; + le32_add_cpu(&cp->delivery_notification, 1); + lpfc_cgn_update_tstamp(phba, &cp->stat_delivery); break; case ELS_DTAG_PEER_CONGEST: - cnt = le32_to_cpu(cp->cgn_peer_notification); - cnt++; - cp->cgn_peer_notification = cpu_to_le32(cnt); - - cp->cgn_stat_peer_month = broken.tm_mon + 1; - cp->cgn_stat_peer_day = broken.tm_mday; - cp->cgn_stat_peer_year = broken.tm_year - 100; - cp->cgn_stat_peer_hour = broken.tm_hour; - cp->cgn_stat_peer_min = broken.tm_min; - cp->cgn_stat_peer_sec = broken.tm_sec; + le32_add_cpu(&cp->cgn_peer_notification, 1); + lpfc_cgn_update_tstamp(phba, &cp->stat_peer); break; case ELS_DTAG_CONGESTION: - cnt = le32_to_cpu(cp->cgn_notification); - cnt++; - cp->cgn_notification = cpu_to_le32(cnt); - - cp->cgn_stat_cgn_month = broken.tm_mon + 1; - cp->cgn_stat_cgn_day = broken.tm_mday; - cp->cgn_stat_cgn_year = broken.tm_year - 100; - cp->cgn_stat_cgn_hour = broken.tm_hour; - cp->cgn_stat_cgn_min = broken.tm_min; - cp->cgn_stat_cgn_sec = broken.tm_sec; + le32_add_cpu(&cp->cgn_notification, 1); + lpfc_cgn_update_tstamp(phba, &cp->stat_fpin); } if (phba->cgn_fpin_frequency && phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) { value = LPFC_CGN_TIMER_TO_MIN / phba->cgn_fpin_frequency; cp->cgn_stat_npm = value; } + value = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED); cp->cgn_info_crc = cpu_to_le32(value); } /** - * lpfc_cgn_save_evt_cnt - Save data into registered congestion buffer + * lpfc_cgn_update_tstamp - Update cmf timestamp * @phba: pointer to lpfc hba data structure. + * @ts: structure to write the timestamp to. + */ +void +lpfc_cgn_update_tstamp(struct lpfc_hba *phba, struct lpfc_cgn_ts *ts) +{ + struct timespec64 cur_time; + struct tm tm_val; + + ktime_get_real_ts64(&cur_time); + time64_to_tm(cur_time.tv_sec, 0, &tm_val); + + ts->month = tm_val.tm_mon + 1; + ts->day = tm_val.tm_mday; + ts->year = tm_val.tm_year - 100; + ts->hour = tm_val.tm_hour; + ts->minute = tm_val.tm_min; + ts->second = tm_val.tm_sec; + + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, + "2646 Updated CMF timestamp : " + "%u/%u/%u %u:%u:%u\n", + ts->day, ts->month, + ts->year, ts->hour, + ts->minute, ts->second); +} + +/** + * lpfc_cmf_stats_timer - Save data into registered congestion buffer + * @timer: Timer cookie to access lpfc private data * * Save the congestion event data every minute. * On the hour collapse all the minute data into hour data. Every day @@ -5675,12 +5675,11 @@ lpfc_cgn_update_stat(struct lpfc_hba *phba, uint32_t dtag) * and fabrc congestion event counters that will be saved out * to the registered congestion buffer every minute. */ -static void -lpfc_cgn_save_evt_cnt(struct lpfc_hba *phba) +static enum hrtimer_restart +lpfc_cmf_stats_timer(struct hrtimer *timer) { + struct lpfc_hba *phba; struct lpfc_cgn_info *cp; - struct tm broken; - struct timespec64 cur_time; uint32_t i, index; uint16_t value, mvalue; uint64_t bps; @@ -5691,21 +5690,18 @@ lpfc_cgn_save_evt_cnt(struct lpfc_hba *phba) __le32 *lptr; __le16 *mptr; + phba = container_of(timer, struct lpfc_hba, cmf_stats_timer); /* Make sure we have a congestion info buffer */ if (!phba->cgn_i) - return; + return HRTIMER_NORESTART; cp = (struct lpfc_cgn_info *)phba->cgn_i->virt; - if (time_before(jiffies, phba->cgn_evt_timestamp)) - return; phba->cgn_evt_timestamp = jiffies + msecs_to_jiffies(LPFC_CGN_TIMER_TO_MIN); phba->cgn_evt_minute++; /* We should get to this point in the routine on 1 minute intervals */ - - ktime_get_real_ts64(&cur_time); - time64_to_tm(cur_time.tv_sec, 0, &broken); + lpfc_cgn_update_tstamp(phba, &cp->base_time); if (phba->cgn_fpin_frequency && phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) { @@ -5858,31 +5854,6 @@ lpfc_cgn_save_evt_cnt(struct lpfc_hba *phba) index = 0; } - /* Anytime we overwrite daily index 0, after we wrap, - * we will be overwriting the oldest day, so we must - * update the congestion data start time for that day. - * That start time should have previously been saved after - * we wrote the last days worth of data. - */ - if ((phba->hba_flag & HBA_CGN_DAY_WRAP) && index == 0) { - time64_to_tm(phba->cgn_daily_ts.tv_sec, 0, &broken); - - cp->cgn_info_month = broken.tm_mon + 1; - cp->cgn_info_day = broken.tm_mday; - cp->cgn_info_year = broken.tm_year - 100; - cp->cgn_info_hour = broken.tm_hour; - cp->cgn_info_minute = broken.tm_min; - cp->cgn_info_second = broken.tm_sec; - - lpfc_printf_log - (phba, KERN_INFO, LOG_CGN_MGMT, - "2646 CGNInfo idx0 Start Time: " - "%d/%d/%d %d:%d:%d\n", - cp->cgn_info_day, cp->cgn_info_month, - cp->cgn_info_year, cp->cgn_info_hour, - cp->cgn_info_minute, cp->cgn_info_second); - } - dvalue = 0; wvalue = 0; lvalue = 0; @@ -5916,15 +5887,6 @@ lpfc_cgn_save_evt_cnt(struct lpfc_hba *phba) "2420 Congestion Info - daily (%d): " "%d %d %d %d %d\n", index, dvalue, wvalue, lvalue, mvalue, avalue); - - /* We just wrote LPFC_MAX_CGN_DAYS of data, - * so we are wrapped on any data after this. - * Save this as the start time for the next day. - */ - if (index == (LPFC_MAX_CGN_DAYS - 1)) { - phba->hba_flag |= HBA_CGN_DAY_WRAP; - ktime_get_real_ts64(&phba->cgn_daily_ts); - } } /* Use the frequency found in the last rcv'ed FPIN */ @@ -5935,6 +5897,10 @@ lpfc_cgn_save_evt_cnt(struct lpfc_hba *phba) lvalue = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED); cp->cgn_info_crc = cpu_to_le32(lvalue); + + hrtimer_forward_now(timer, ktime_set(0, LPFC_SEC_MIN * NSEC_PER_SEC)); + + return HRTIMER_RESTART; } /** @@ -6065,13 +6031,6 @@ lpfc_cmf_timer(struct hrtimer *timer) if (ms && ms < LPFC_CMF_INTERVAL) { cnt = div_u64(total, ms); /* bytes per ms */ cnt *= LPFC_CMF_INTERVAL; /* what total should be */ - - /* If the timeout is scheduled to be shorter, - * this value may skew the data, so cap it at mbpi. - */ - if ((phba->hba_flag & HBA_SHORT_CMF) && cnt > mbpi) - cnt = mbpi; - extra = cnt - total; } lpfc_issue_cmf_sync_wqe(phba, LPFC_CMF_INTERVAL, total + extra); @@ -6141,34 +6100,6 @@ lpfc_cmf_timer(struct hrtimer *timer) } phba->rx_block_cnt += div_u64(rcv, 512); /* save 512 byte block cnt */ - /* Each minute save Fabric and Driver congestion information */ - lpfc_cgn_save_evt_cnt(phba); - - phba->hba_flag &= ~HBA_SHORT_CMF; - - /* Since we need to call lpfc_cgn_save_evt_cnt every minute, on the - * minute, adjust our next timer interval, if needed, to ensure a - * 1 minute granularity when we get the next timer interrupt. - */ - if (time_after(jiffies + msecs_to_jiffies(LPFC_CMF_INTERVAL), - phba->cgn_evt_timestamp)) { - timer_interval = jiffies_to_msecs(phba->cgn_evt_timestamp - - jiffies); - if (timer_interval <= 0) - timer_interval = LPFC_CMF_INTERVAL; - else - phba->hba_flag |= HBA_SHORT_CMF; - - /* If we adjust timer_interval, max_bytes_per_interval - * needs to be adjusted as well. - */ - phba->cmf_link_byte_count = div_u64(phba->cmf_max_line_rate * - timer_interval, 1000); - if (phba->cmf_active_mode == LPFC_CFG_MONITOR) - phba->cmf_max_bytes_per_interval = - phba->cmf_link_byte_count; - } - /* Since total_bytes has already been zero'ed, its okay to unblock * after max_bytes_per_interval is setup. */ @@ -8014,6 +7945,9 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) /* CMF congestion timer */ hrtimer_init(&phba->cmf_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); phba->cmf_timer.function = lpfc_cmf_timer; + /* CMF 1 minute stats collection timer */ + hrtimer_init(&phba->cmf_stats_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + phba->cmf_stats_timer.function = lpfc_cmf_stats_timer; /* * Control structure for handling external multi-buffer mailbox @@ -13117,8 +13051,10 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba) } eqhdl->irq = rc; - rc = request_irq(eqhdl->irq, &lpfc_sli4_hba_intr_handler, 0, - name, eqhdl); + rc = request_threaded_irq(eqhdl->irq, + &lpfc_sli4_hba_intr_handler, + &lpfc_sli4_hba_intr_handler_th, + IRQF_ONESHOT, name, eqhdl); if (rc) { lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "0486 MSI-X fast-path (%d) " @@ -13521,6 +13457,7 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba) struct pci_dev *pdev = phba->pcidev; lpfc_stop_hba_timers(phba); + hrtimer_cancel(&phba->cmf_stats_timer); hrtimer_cancel(&phba->cmf_timer); if (phba->pport) @@ -13645,8 +13582,6 @@ void lpfc_init_congestion_buf(struct lpfc_hba *phba) { struct lpfc_cgn_info *cp; - struct timespec64 cmpl_time; - struct tm broken; uint16_t size; uint32_t crc; @@ -13666,11 +13601,10 @@ lpfc_init_congestion_buf(struct lpfc_hba *phba) atomic_set(&phba->cgn_latency_evt_cnt, 0); atomic64_set(&phba->cgn_latency_evt, 0); phba->cgn_evt_minute = 0; - phba->hba_flag &= ~HBA_CGN_DAY_WRAP; memset(cp, 0xff, offsetof(struct lpfc_cgn_info, cgn_stat)); cp->cgn_info_size = cpu_to_le16(LPFC_CGN_INFO_SZ); - cp->cgn_info_version = LPFC_CGN_INFO_V3; + cp->cgn_info_version = LPFC_CGN_INFO_V4; /* cgn parameters */ cp->cgn_info_mode = phba->cgn_p.cgn_param_mode; @@ -13678,22 +13612,7 @@ lpfc_init_congestion_buf(struct lpfc_hba *phba) cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1; cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2; - ktime_get_real_ts64(&cmpl_time); - time64_to_tm(cmpl_time.tv_sec, 0, &broken); - - cp->cgn_info_month = broken.tm_mon + 1; - cp->cgn_info_day = broken.tm_mday; - cp->cgn_info_year = broken.tm_year - 100; /* relative to 2000 */ - cp->cgn_info_hour = broken.tm_hour; - cp->cgn_info_minute = broken.tm_min; - cp->cgn_info_second = broken.tm_sec; - - lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT, - "2643 CGNInfo Init: Start Time " - "%d/%d/%d %d:%d:%d\n", - cp->cgn_info_day, cp->cgn_info_month, - cp->cgn_info_year, cp->cgn_info_hour, - cp->cgn_info_minute, cp->cgn_info_second); + lpfc_cgn_update_tstamp(phba, &cp->base_time); /* Fill in default LUN qdepth */ if (phba->pport) { @@ -13716,8 +13635,6 @@ void lpfc_init_congestion_stat(struct lpfc_hba *phba) { struct lpfc_cgn_info *cp; - struct timespec64 cmpl_time; - struct tm broken; uint32_t crc; lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, @@ -13729,22 +13646,7 @@ lpfc_init_congestion_stat(struct lpfc_hba *phba) cp = (struct lpfc_cgn_info *)phba->cgn_i->virt; memset(&cp->cgn_stat, 0, sizeof(cp->cgn_stat)); - ktime_get_real_ts64(&cmpl_time); - time64_to_tm(cmpl_time.tv_sec, 0, &broken); - - cp->cgn_stat_month = broken.tm_mon + 1; - cp->cgn_stat_day = broken.tm_mday; - cp->cgn_stat_year = broken.tm_year - 100; /* relative to 2000 */ - cp->cgn_stat_hour = broken.tm_hour; - cp->cgn_stat_minute = broken.tm_min; - - lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT, - "2647 CGNstat Init: Start Time " - "%d/%d/%d %d:%d\n", - cp->cgn_stat_day, cp->cgn_stat_month, - cp->cgn_stat_year, cp->cgn_stat_hour, - cp->cgn_stat_minute); - + lpfc_cgn_update_tstamp(phba, &cp->stat_start); crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED); cp->cgn_info_crc = cpu_to_le32(crc); } @@ -14743,10 +14645,10 @@ lpfc_write_firmware(const struct firmware *fw, void *context) INIT_LIST_HEAD(&dma_buffer_list); lpfc_decode_firmware_rev(phba, fwrev, 1); if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) { - lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, - "3023 Updating Firmware, Current Version:%s " - "New Version:%s\n", - fwrev, image->revision); + lpfc_log_msg(phba, KERN_NOTICE, LOG_INIT | LOG_SLI, + "3023 Updating Firmware, Current Version:%s " + "New Version:%s\n", + fwrev, image->revision); for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) { dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); @@ -14793,10 +14695,10 @@ lpfc_write_firmware(const struct firmware *fw, void *context) } rc = offset; } else - lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, - "3029 Skipped Firmware update, Current " - "Version:%s New Version:%s\n", - fwrev, image->revision); + lpfc_log_msg(phba, KERN_NOTICE, LOG_INIT | LOG_SLI, + "3029 Skipped Firmware update, Current " + "Version:%s New Version:%s\n", + fwrev, image->revision); release_out: list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) { @@ -14808,11 +14710,11 @@ release_out: release_firmware(fw); out: if (rc < 0) - lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, - "3062 Firmware update error, status %d.\n", rc); + lpfc_log_msg(phba, KERN_ERR, LOG_INIT | LOG_SLI, + "3062 Firmware update error, status %d.\n", rc); else - lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, - "3024 Firmware update success: size %d.\n", rc); + lpfc_log_msg(phba, KERN_NOTICE, LOG_INIT | LOG_SLI, + "3024 Firmware update success: size %d.\n", rc); } /** diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h index b39cefcd8703..f896ec610433 100644 --- a/drivers/scsi/lpfc/lpfc_logmsg.h +++ b/drivers/scsi/lpfc/lpfc_logmsg.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2009 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -55,7 +55,7 @@ void lpfc_dbg_print(struct lpfc_hba *phba, const char *fmt, ...); /* generate message by verbose log setting or severity */ #define lpfc_vlog_msg(vport, level, mask, fmt, arg...) \ -{ if (((mask) & (vport)->cfg_log_verbose) || (level[1] <= '4')) \ +{ if (((mask) & (vport)->cfg_log_verbose) || (level[1] <= '5')) \ dev_printk(level, &((vport)->phba->pcidev)->dev, "%d:(%d):" \ fmt, (vport)->phba->brd_no, vport->vpi, ##arg); } @@ -64,7 +64,7 @@ do { \ { uint32_t log_verbose = (phba)->pport ? \ (phba)->pport->cfg_log_verbose : \ (phba)->cfg_log_verbose; \ - if (((mask) & log_verbose) || (level[1] <= '4')) \ + if (((mask) & log_verbose) || (level[1] <= '5')) \ dev_printk(level, &((phba)->pcidev)->dev, "%d:" \ fmt, phba->brd_no, ##arg); \ } \ diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c index adda70423c77..8db7cb99903d 100644 --- a/drivers/scsi/lpfc/lpfc_nvme.c +++ b/drivers/scsi/lpfc/lpfc_nvme.c @@ -310,20 +310,20 @@ lpfc_nvme_handle_lsreq(struct lpfc_hba *phba, * for the LS request. **/ void -__lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_vport *vport, +__lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_vport *vport, struct lpfc_iocbq *cmdwqe, struct lpfc_wcqe_complete *wcqe) { struct nvmefc_ls_req *pnvme_lsreq; struct lpfc_dmabuf *buf_ptr; struct lpfc_nodelist *ndlp; - uint32_t status; + int status; pnvme_lsreq = cmdwqe->context_un.nvme_lsreq; ndlp = cmdwqe->ndlp; buf_ptr = cmdwqe->bpl_dmabuf; - status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK; + status = bf_get(lpfc_wcqe_c_status, wcqe); lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, "6047 NVMEx LS REQ x%px cmpl DID %x Xri: %x " @@ -343,14 +343,17 @@ __lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_vport *vport, kfree(buf_ptr); cmdwqe->bpl_dmabuf = NULL; } - if (pnvme_lsreq->done) + if (pnvme_lsreq->done) { + if (status != CQE_STATUS_SUCCESS) + status = -ENXIO; pnvme_lsreq->done(pnvme_lsreq, status); - else + } else { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "6046 NVMEx cmpl without done call back? " "Data x%px DID %x Xri: %x status %x\n", pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0, cmdwqe->sli4_xritag, status); + } if (ndlp) { lpfc_nlp_put(ndlp); cmdwqe->ndlp = NULL; @@ -367,7 +370,7 @@ lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, uint32_t status; struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl; - status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK; + status = bf_get(lpfc_wcqe_c_status, wcqe); if (vport->localport) { lport = (struct lpfc_nvme_lport *)vport->localport->private; @@ -1040,7 +1043,7 @@ lpfc_nvme_io_cmd_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, nCmd->rcv_rsplen = LPFC_NVME_ERSP_LEN; nCmd->transferred_length = nCmd->payload_length; } else { - lpfc_ncmd->status = (status & LPFC_IOCB_STATUS_MASK); + lpfc_ncmd->status = status; lpfc_ncmd->result = (wcqe->parameter & IOERR_PARAM_MASK); /* For NVME, the only failure path that results in an @@ -1893,38 +1896,38 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport, pnvme_rport->port_id, pnvme_fcreq); - /* If the hba is getting reset, this flag is set. It is - * cleared when the reset is complete and rings reestablished. - */ - spin_lock_irqsave(&phba->hbalock, flags); - /* driver queued commands are in process of being flushed */ - if (phba->hba_flag & HBA_IOQ_FLUSH) { - spin_unlock_irqrestore(&phba->hbalock, flags); - lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, - "6139 Driver in reset cleanup - flushing " - "NVME Req now. hba_flag x%x\n", - phba->hba_flag); - return; - } - lpfc_nbuf = freqpriv->nvme_buf; if (!lpfc_nbuf) { - spin_unlock_irqrestore(&phba->hbalock, flags); lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "6140 NVME IO req has no matching lpfc nvme " "io buffer. Skipping abort req.\n"); return; } else if (!lpfc_nbuf->nvmeCmd) { - spin_unlock_irqrestore(&phba->hbalock, flags); lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "6141 lpfc NVME IO req has no nvme_fcreq " "io buffer. Skipping abort req.\n"); return; } - nvmereq_wqe = &lpfc_nbuf->cur_iocbq; /* Guard against IO completion being called at same time */ - spin_lock(&lpfc_nbuf->buf_lock); + spin_lock_irqsave(&lpfc_nbuf->buf_lock, flags); + + /* If the hba is getting reset, this flag is set. It is + * cleared when the reset is complete and rings reestablished. + */ + spin_lock(&phba->hbalock); + /* driver queued commands are in process of being flushed */ + if (phba->hba_flag & HBA_IOQ_FLUSH) { + spin_unlock(&phba->hbalock); + spin_unlock_irqrestore(&lpfc_nbuf->buf_lock, flags); + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "6139 Driver in reset cleanup - flushing " + "NVME Req now. hba_flag x%x\n", + phba->hba_flag); + return; + } + + nvmereq_wqe = &lpfc_nbuf->cur_iocbq; /* * The lpfc_nbuf and the mapped nvme_fcreq in the driver's @@ -1971,8 +1974,8 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport, ret_val = lpfc_sli4_issue_abort_iotag(phba, nvmereq_wqe, lpfc_nvme_abort_fcreq_cmpl); - spin_unlock(&lpfc_nbuf->buf_lock); - spin_unlock_irqrestore(&phba->hbalock, flags); + spin_unlock(&phba->hbalock); + spin_unlock_irqrestore(&lpfc_nbuf->buf_lock, flags); /* Make sure HBA is alive */ lpfc_issue_hb_tmo(phba); @@ -1998,8 +2001,8 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport, return; out_unlock: - spin_unlock(&lpfc_nbuf->buf_lock); - spin_unlock_irqrestore(&phba->hbalock, flags); + spin_unlock(&phba->hbalock); + spin_unlock_irqrestore(&lpfc_nbuf->buf_lock, flags); return; } diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c index 7517dd55fe91..dff4584d338b 100644 --- a/drivers/scsi/lpfc/lpfc_nvmet.c +++ b/drivers/scsi/lpfc/lpfc_nvmet.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -300,7 +300,7 @@ __lpfc_nvme_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, struct nvmefc_ls_rsp *ls_rsp = &axchg->ls_rsp; uint32_t status, result; - status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK; + status = bf_get(lpfc_wcqe_c_status, wcqe); result = wcqe->parameter; if (axchg->state != LPFC_NVME_STE_LS_RSP || axchg->entry_cnt != 2) { @@ -350,7 +350,7 @@ lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, if (!phba->targetport) goto finish; - status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK; + status = bf_get(lpfc_wcqe_c_status, wcqe); result = wcqe->parameter; tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index e989f130434e..a62e091894f6 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -4026,7 +4026,7 @@ lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, struct lpfc_fast_path_event *fast_path_evt; struct Scsi_Host *shost; u32 logit = LOG_FCP; - u32 status, idx; + u32 idx; u32 lat; u8 wait_xb_clr = 0; @@ -4061,8 +4061,7 @@ lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, #endif shost = cmd->device->host; - status = bf_get(lpfc_wcqe_c_status, wcqe); - lpfc_cmd->status = (status & LPFC_IOCB_STATUS_MASK); + lpfc_cmd->status = bf_get(lpfc_wcqe_c_status, wcqe); lpfc_cmd->result = (wcqe->parameter & IOERR_PARAM_MASK); lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY; @@ -4104,11 +4103,6 @@ lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, } #endif if (unlikely(lpfc_cmd->status)) { - if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT && - (lpfc_cmd->result & IOERR_DRVR_MASK)) - lpfc_cmd->status = IOSTAT_DRIVER_REJECT; - else if (lpfc_cmd->status >= IOSTAT_CNT) - lpfc_cmd->status = IOSTAT_DEFAULT; if (lpfc_cmd->status == IOSTAT_FCP_RSP_ERROR && !lpfc_cmd->fcp_rsp->rspStatus3 && (lpfc_cmd->fcp_rsp->rspStatus2 & RESID_UNDER) && @@ -4133,16 +4127,16 @@ lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, } switch (lpfc_cmd->status) { - case IOSTAT_SUCCESS: + case CQE_STATUS_SUCCESS: cmd->result = DID_OK << 16; break; - case IOSTAT_FCP_RSP_ERROR: + case CQE_STATUS_FCP_RSP_FAILURE: lpfc_handle_fcp_err(vport, lpfc_cmd, pwqeIn->wqe.fcp_iread.total_xfer_len - wcqe->total_data_placed); break; - case IOSTAT_NPORT_BSY: - case IOSTAT_FABRIC_BSY: + case CQE_STATUS_NPORT_BSY: + case CQE_STATUS_FABRIC_BSY: cmd->result = DID_TRANSPORT_DISRUPTED << 16; fast_path_evt = lpfc_alloc_fast_evt(phba); if (!fast_path_evt) @@ -4185,7 +4179,27 @@ lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, wcqe->total_data_placed, lpfc_cmd->cur_iocbq.iocb.ulpIoTag); break; - case IOSTAT_REMOTE_STOP: + case CQE_STATUS_DI_ERROR: + if (bf_get(lpfc_wcqe_c_bg_edir, wcqe)) + lpfc_cmd->result = IOERR_RX_DMA_FAILED; + else + lpfc_cmd->result = IOERR_TX_DMA_FAILED; + lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_BG, + "9048 DI Error xri x%x status x%x DI ext " + "status x%x data placed x%x\n", + lpfc_cmd->cur_iocbq.sli4_xritag, + lpfc_cmd->status, wcqe->parameter, + wcqe->total_data_placed); + if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) { + /* BG enabled cmd. Parse BG error */ + lpfc_parse_bg_err(phba, lpfc_cmd, pwqeOut); + break; + } + cmd->result = DID_ERROR << 16; + lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, + "9040 DI Error on unprotected cmd\n"); + break; + case CQE_STATUS_REMOTE_STOP: if (ndlp) { /* This I/O was aborted by the target, we don't * know the rxid and because we did not send the @@ -4196,7 +4210,7 @@ lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, 0, 0); } fallthrough; - case IOSTAT_LOCAL_REJECT: + case CQE_STATUS_LOCAL_REJECT: if (lpfc_cmd->result & IOERR_DRVR_MASK) lpfc_cmd->status = IOSTAT_DRIVER_REJECT; if (lpfc_cmd->result == IOERR_ELXSEC_KEY_UNWRAP_ERROR || @@ -4217,24 +4231,6 @@ lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, cmd->result = DID_TRANSPORT_DISRUPTED << 16; break; } - if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED || - lpfc_cmd->result == IOERR_TX_DMA_FAILED) && - status == CQE_STATUS_DI_ERROR) { - if (scsi_get_prot_op(cmd) != - SCSI_PROT_NORMAL) { - /* - * This is a response for a BG enabled - * cmd. Parse BG error - */ - lpfc_parse_bg_err(phba, lpfc_cmd, pwqeOut); - break; - } else { - lpfc_printf_vlog(vport, KERN_WARNING, - LOG_BG, - "9040 non-zero BGSTAT " - "on unprotected cmd\n"); - } - } lpfc_printf_vlog(vport, KERN_WARNING, logit, "9036 Local Reject FCP cmd x%x failed" " <%d/%lld> " @@ -4253,10 +4249,8 @@ lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, lpfc_cmd->cur_iocbq.iocb.ulpIoTag); fallthrough; default: - if (lpfc_cmd->status >= IOSTAT_CNT) - lpfc_cmd->status = IOSTAT_DEFAULT; cmd->result = DID_ERROR << 16; - lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, + lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, "9037 FCP Completion Error: xri %x " "status x%x result x%x [x%x] " "placed x%x\n", @@ -4273,7 +4267,8 @@ lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, "x%x SNS x%x x%x LBA x%llx Data: x%x x%x\n", cmd->device->id, cmd->device->lun, cmd, cmd->result, *lp, *(lp + 3), - (u64)scsi_get_lba(cmd), + (cmd->device->sector_size) ? + (u64)scsi_get_lba(cmd) : 0, cmd->retries, scsi_get_resid(cmd)); } @@ -5009,7 +5004,6 @@ lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) return -ENODEV; } phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth; - phba->lpfc_scsi_cmd_iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl; return 0; } diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 8693578888f1..58d10f8f75a7 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -82,7 +82,8 @@ static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *, int); static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq, - struct lpfc_eqe *eqe); + struct lpfc_eqe *eqe, + enum lpfc_poll_mode poll_mode); static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba); static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba); static struct lpfc_cqe *lpfc_sli4_cq_get(struct lpfc_queue *q); @@ -629,7 +630,7 @@ lpfc_sli4_eqcq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq) static int lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq, - uint8_t rearm) + u8 rearm, enum lpfc_poll_mode poll_mode) { struct lpfc_eqe *eqe; int count = 0, consumed = 0; @@ -639,7 +640,7 @@ lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq, eqe = lpfc_sli4_eq_get(eq); while (eqe) { - lpfc_sli4_hba_handle_eqe(phba, eq, eqe); + lpfc_sli4_hba_handle_eqe(phba, eq, eqe, poll_mode); __lpfc_sli4_consume_eqe(phba, eq, eqe); consumed++; @@ -1931,7 +1932,7 @@ lpfc_issue_cmf_sync_wqe(struct lpfc_hba *phba, u32 ms, u64 total) unsigned long iflags; u32 ret_val; u32 atot, wtot, max; - u16 warn_sync_period = 0; + u8 warn_sync_period = 0; /* First address any alarm / warning activity */ atot = atomic_xchg(&phba->cgn_sync_alarm_cnt, 0); @@ -7957,7 +7958,7 @@ out_rdf: * lpfc_init_idle_stat_hb - Initialize idle_stat tracking * @phba: pointer to lpfc hba data structure. * - * This routine initializes the per-cq idle_stat to dynamically dictate + * This routine initializes the per-eq idle_stat to dynamically dictate * polling decisions. * * Return codes: @@ -7967,16 +7968,16 @@ static void lpfc_init_idle_stat_hb(struct lpfc_hba *phba) { int i; struct lpfc_sli4_hdw_queue *hdwq; - struct lpfc_queue *cq; + struct lpfc_queue *eq; struct lpfc_idle_stat *idle_stat; u64 wall; for_each_present_cpu(i) { hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq]; - cq = hdwq->io_cq; + eq = hdwq->hba_eq; - /* Skip if we've already handled this cq's primary CPU */ - if (cq->chann != i) + /* Skip if we've already handled this eq's primary CPU */ + if (eq->chann != i) continue; idle_stat = &phba->sli4_hba.idle_stat[i]; @@ -7985,13 +7986,14 @@ static void lpfc_init_idle_stat_hb(struct lpfc_hba *phba) idle_stat->prev_wall = wall; if (phba->nvmet_support || - phba->cmf_active_mode != LPFC_CFG_OFF) - cq->poll_mode = LPFC_QUEUE_WORK; + phba->cmf_active_mode != LPFC_CFG_OFF || + phba->intr_type != MSIX) + eq->poll_mode = LPFC_QUEUE_WORK; else - cq->poll_mode = LPFC_IRQ_POLL; + eq->poll_mode = LPFC_THREADED_IRQ; } - if (!phba->nvmet_support) + if (!phba->nvmet_support && phba->intr_type == MSIX) schedule_delayed_work(&phba->idle_stat_delay_work, msecs_to_jiffies(LPFC_IDLE_STAT_DELAY)); } @@ -9218,7 +9220,8 @@ lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba) if (mbox_pending) /* process and rearm the EQ */ - lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM); + lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM, + LPFC_QUEUE_WORK); else /* Always clear and re-arm the EQ */ sli4_hba->sli4_write_eq_db(phba, fpeq, 0, LPFC_QUEUE_REARM); @@ -11254,7 +11257,8 @@ inline void lpfc_sli4_poll_eq(struct lpfc_queue *eq) * will be handled through a sched from polling timer * function which is currently triggered every 1msec. */ - lpfc_sli4_process_eq(phba, eq, LPFC_QUEUE_NOARM); + lpfc_sli4_process_eq(phba, eq, LPFC_QUEUE_NOARM, + LPFC_QUEUE_WORK); } /** @@ -14682,6 +14686,38 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe) spin_unlock_irqrestore(&phba->hbalock, iflags); workposted = true; break; + case FC_STATUS_RQ_DMA_FAILURE: + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2564 RQE DMA Error x%x, x%08x x%08x x%08x " + "x%08x\n", + status, rcqe->word0, rcqe->word1, + rcqe->word2, rcqe->word3); + + /* If IV set, no further recovery */ + if (bf_get(lpfc_rcqe_iv, rcqe)) + break; + + /* recycle consumed resource */ + spin_lock_irqsave(&phba->hbalock, iflags); + lpfc_sli4_rq_release(hrq, drq); + dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list); + if (!dma_buf) { + hrq->RQ_no_buf_found++; + spin_unlock_irqrestore(&phba->hbalock, iflags); + break; + } + hrq->RQ_rcv_buf++; + hrq->RQ_buf_posted--; + spin_unlock_irqrestore(&phba->hbalock, iflags); + lpfc_in_buf_free(phba, &dma_buf->dbuf); + break; + default: + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2565 Unexpected RQE Status x%x, w0-3 x%08x " + "x%08x x%08x x%08x\n", + status, rcqe->word0, rcqe->word1, + rcqe->word2, rcqe->word3); + break; } out: return workposted; @@ -14803,7 +14839,6 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, * @cq: Pointer to CQ to be processed * @handler: Routine to process each cqe * @delay: Pointer to usdelay to set in case of rescheduling of the handler - * @poll_mode: Polling mode we were called from * * This routine processes completion queue entries in a CQ. While a valid * queue element is found, the handler is called. During processing checks @@ -14821,8 +14856,7 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, static bool __lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq, bool (*handler)(struct lpfc_hba *, struct lpfc_queue *, - struct lpfc_cqe *), unsigned long *delay, - enum lpfc_poll_mode poll_mode) + struct lpfc_cqe *), unsigned long *delay) { struct lpfc_cqe *cqe; bool workposted = false; @@ -14863,10 +14897,6 @@ __lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq, arm = false; } - /* Note: complete the irq_poll softirq before rearming CQ */ - if (poll_mode == LPFC_IRQ_POLL) - irq_poll_complete(&cq->iop); - /* Track the max number of CQEs processed in 1 EQ */ if (count > cq->CQ_max_cqe) cq->CQ_max_cqe = count; @@ -14916,17 +14946,17 @@ __lpfc_sli4_sp_process_cq(struct lpfc_queue *cq) case LPFC_MCQ: workposted |= __lpfc_sli4_process_cq(phba, cq, lpfc_sli4_sp_handle_mcqe, - &delay, LPFC_QUEUE_WORK); + &delay); break; case LPFC_WCQ: if (cq->subtype == LPFC_IO) workposted |= __lpfc_sli4_process_cq(phba, cq, lpfc_sli4_fp_handle_cqe, - &delay, LPFC_QUEUE_WORK); + &delay); else workposted |= __lpfc_sli4_process_cq(phba, cq, lpfc_sli4_sp_handle_cqe, - &delay, LPFC_QUEUE_WORK); + &delay); break; default: lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, @@ -15203,6 +15233,38 @@ drop: hrq->RQ_no_posted_buf++; /* Post more buffers if possible */ break; + case FC_STATUS_RQ_DMA_FAILURE: + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2575 RQE DMA Error x%x, x%08x x%08x x%08x " + "x%08x\n", + status, rcqe->word0, rcqe->word1, + rcqe->word2, rcqe->word3); + + /* If IV set, no further recovery */ + if (bf_get(lpfc_rcqe_iv, rcqe)) + break; + + /* recycle consumed resource */ + spin_lock_irqsave(&phba->hbalock, iflags); + lpfc_sli4_rq_release(hrq, drq); + dma_buf = lpfc_sli_rqbuf_get(phba, hrq); + if (!dma_buf) { + hrq->RQ_no_buf_found++; + spin_unlock_irqrestore(&phba->hbalock, iflags); + break; + } + hrq->RQ_rcv_buf++; + hrq->RQ_buf_posted--; + spin_unlock_irqrestore(&phba->hbalock, iflags); + lpfc_rq_buf_free(phba, &dma_buf->hbuf); + break; + default: + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2576 Unexpected RQE Status x%x, w0-3 x%08x " + "x%08x x%08x x%08x\n", + status, rcqe->word0, rcqe->word1, + rcqe->word2, rcqe->word3); + break; } out: return workposted; @@ -15271,45 +15333,64 @@ lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq, } /** - * lpfc_sli4_sched_cq_work - Schedules cq work - * @phba: Pointer to HBA context object. - * @cq: Pointer to CQ - * @cqid: CQ ID - * - * This routine checks the poll mode of the CQ corresponding to - * cq->chann, then either schedules a softirq or queue_work to complete - * cq work. + * __lpfc_sli4_hba_process_cq - Process a fast-path event queue entry + * @cq: Pointer to CQ to be processed * - * queue_work path is taken if in NVMET mode, or if poll_mode is in - * LPFC_QUEUE_WORK mode. Otherwise, softirq path is taken. + * This routine calls the cq processing routine with the handler for + * fast path CQEs. * + * The CQ routine returns two values: the first is the calling status, + * which indicates whether work was queued to the background discovery + * thread. If true, the routine should wakeup the discovery thread; + * the second is the delay parameter. If non-zero, rather than rearming + * the CQ and yet another interrupt, the CQ handler should be queued so + * that it is processed in a subsequent polling action. The value of + * the delay indicates when to reschedule it. **/ -static void lpfc_sli4_sched_cq_work(struct lpfc_hba *phba, - struct lpfc_queue *cq, uint16_t cqid) +static void +__lpfc_sli4_hba_process_cq(struct lpfc_queue *cq) { - int ret = 0; + struct lpfc_hba *phba = cq->phba; + unsigned long delay; + bool workposted = false; + int ret; - switch (cq->poll_mode) { - case LPFC_IRQ_POLL: - /* CGN mgmt is mutually exclusive from softirq processing */ - if (phba->cmf_active_mode == LPFC_CFG_OFF) { - irq_poll_sched(&cq->iop); - break; - } - fallthrough; - case LPFC_QUEUE_WORK: - default: + /* process and rearm the CQ */ + workposted |= __lpfc_sli4_process_cq(phba, cq, lpfc_sli4_fp_handle_cqe, + &delay); + + if (delay) { if (is_kdump_kernel()) - ret = queue_work(phba->wq, &cq->irqwork); + ret = queue_delayed_work(phba->wq, &cq->sched_irqwork, + delay); else - ret = queue_work_on(cq->chann, phba->wq, &cq->irqwork); + ret = queue_delayed_work_on(cq->chann, phba->wq, + &cq->sched_irqwork, delay); if (!ret) lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, - "0383 Cannot schedule queue work " - "for CQ eqcqid=%d, cqid=%d on CPU %d\n", - cqid, cq->queue_id, - raw_smp_processor_id()); + "0367 Cannot schedule queue work " + "for cqid=%d on CPU %d\n", + cq->queue_id, cq->chann); } + + /* wake up worker thread if there are works to be done */ + if (workposted) + lpfc_worker_wake_up(phba); +} + +/** + * lpfc_sli4_hba_process_cq - fast-path work handler when started by + * interrupt + * @work: pointer to work element + * + * translates from the work handler and calls the fast-path handler. + **/ +static void +lpfc_sli4_hba_process_cq(struct work_struct *work) +{ + struct lpfc_queue *cq = container_of(work, struct lpfc_queue, irqwork); + + __lpfc_sli4_hba_process_cq(cq); } /** @@ -15317,6 +15398,7 @@ static void lpfc_sli4_sched_cq_work(struct lpfc_hba *phba, * @phba: Pointer to HBA context object. * @eq: Pointer to the queue structure. * @eqe: Pointer to fast-path event queue entry. + * @poll_mode: poll_mode to execute processing the cq. * * This routine process a event queue entry from the fast-path event queue. * It will check the MajorCode and MinorCode to determine this is for a @@ -15327,11 +15409,12 @@ static void lpfc_sli4_sched_cq_work(struct lpfc_hba *phba, **/ static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq, - struct lpfc_eqe *eqe) + struct lpfc_eqe *eqe, enum lpfc_poll_mode poll_mode) { struct lpfc_queue *cq = NULL; uint32_t qidx = eq->hdwq; uint16_t cqid, id; + int ret; if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, @@ -15391,70 +15474,25 @@ work_cq: else cq->isr_timestamp = 0; #endif - lpfc_sli4_sched_cq_work(phba, cq, cqid); -} - -/** - * __lpfc_sli4_hba_process_cq - Process a fast-path event queue entry - * @cq: Pointer to CQ to be processed - * @poll_mode: Enum lpfc_poll_state to determine poll mode - * - * This routine calls the cq processing routine with the handler for - * fast path CQEs. - * - * The CQ routine returns two values: the first is the calling status, - * which indicates whether work was queued to the background discovery - * thread. If true, the routine should wakeup the discovery thread; - * the second is the delay parameter. If non-zero, rather than rearming - * the CQ and yet another interrupt, the CQ handler should be queued so - * that it is processed in a subsequent polling action. The value of - * the delay indicates when to reschedule it. - **/ -static void -__lpfc_sli4_hba_process_cq(struct lpfc_queue *cq, - enum lpfc_poll_mode poll_mode) -{ - struct lpfc_hba *phba = cq->phba; - unsigned long delay; - bool workposted = false; - int ret = 0; - - /* process and rearm the CQ */ - workposted |= __lpfc_sli4_process_cq(phba, cq, lpfc_sli4_fp_handle_cqe, - &delay, poll_mode); - if (delay) { + switch (poll_mode) { + case LPFC_THREADED_IRQ: + __lpfc_sli4_hba_process_cq(cq); + break; + case LPFC_QUEUE_WORK: + default: if (is_kdump_kernel()) - ret = queue_delayed_work(phba->wq, &cq->sched_irqwork, - delay); + ret = queue_work(phba->wq, &cq->irqwork); else - ret = queue_delayed_work_on(cq->chann, phba->wq, - &cq->sched_irqwork, delay); + ret = queue_work_on(cq->chann, phba->wq, &cq->irqwork); if (!ret) lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, - "0367 Cannot schedule queue work " - "for cqid=%d on CPU %d\n", - cq->queue_id, cq->chann); + "0383 Cannot schedule queue work " + "for CQ eqcqid=%d, cqid=%d on CPU %d\n", + cqid, cq->queue_id, + raw_smp_processor_id()); + break; } - - /* wake up worker thread if there are works to be done */ - if (workposted) - lpfc_worker_wake_up(phba); -} - -/** - * lpfc_sli4_hba_process_cq - fast-path work handler when started by - * interrupt - * @work: pointer to work element - * - * translates from the work handler and calls the fast-path handler. - **/ -static void -lpfc_sli4_hba_process_cq(struct work_struct *work) -{ - struct lpfc_queue *cq = container_of(work, struct lpfc_queue, irqwork); - - __lpfc_sli4_hba_process_cq(cq, LPFC_QUEUE_WORK); } /** @@ -15469,7 +15507,7 @@ lpfc_sli4_dly_hba_process_cq(struct work_struct *work) struct lpfc_queue *cq = container_of(to_delayed_work(work), struct lpfc_queue, sched_irqwork); - __lpfc_sli4_hba_process_cq(cq, LPFC_QUEUE_WORK); + __lpfc_sli4_hba_process_cq(cq); } /** @@ -15495,8 +15533,9 @@ lpfc_sli4_dly_hba_process_cq(struct work_struct *work) * and returns for these events. This function is called without any lock * held. It gets the hbalock to access and update SLI data structures. * - * This function returns IRQ_HANDLED when interrupt is handled else it - * returns IRQ_NONE. + * This function returns IRQ_HANDLED when interrupt is handled, IRQ_WAKE_THREAD + * when interrupt is scheduled to be handled from a threaded irq context, or + * else returns IRQ_NONE. **/ irqreturn_t lpfc_sli4_hba_intr_handler(int irq, void *dev_id) @@ -15505,8 +15544,8 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id) struct lpfc_hba_eq_hdl *hba_eq_hdl; struct lpfc_queue *fpeq; unsigned long iflag; - int ecount = 0; int hba_eqidx; + int ecount = 0; struct lpfc_eq_intr_info *eqi; /* Get the driver's phba structure from the dev_id */ @@ -15535,30 +15574,41 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id) return IRQ_NONE; } - eqi = this_cpu_ptr(phba->sli4_hba.eq_info); - eqi->icnt++; - - fpeq->last_cpu = raw_smp_processor_id(); + switch (fpeq->poll_mode) { + case LPFC_THREADED_IRQ: + /* CGN mgmt is mutually exclusive from irq processing */ + if (phba->cmf_active_mode == LPFC_CFG_OFF) + return IRQ_WAKE_THREAD; + fallthrough; + case LPFC_QUEUE_WORK: + default: + eqi = this_cpu_ptr(phba->sli4_hba.eq_info); + eqi->icnt++; - if (eqi->icnt > LPFC_EQD_ISR_TRIGGER && - fpeq->q_flag & HBA_EQ_DELAY_CHK && - phba->cfg_auto_imax && - fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY && - phba->sli.sli_flag & LPFC_SLI_USE_EQDR) - lpfc_sli4_mod_hba_eq_delay(phba, fpeq, LPFC_MAX_AUTO_EQ_DELAY); + fpeq->last_cpu = raw_smp_processor_id(); - /* process and rearm the EQ */ - ecount = lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM); + if (eqi->icnt > LPFC_EQD_ISR_TRIGGER && + fpeq->q_flag & HBA_EQ_DELAY_CHK && + phba->cfg_auto_imax && + fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY && + phba->sli.sli_flag & LPFC_SLI_USE_EQDR) + lpfc_sli4_mod_hba_eq_delay(phba, fpeq, + LPFC_MAX_AUTO_EQ_DELAY); - if (unlikely(ecount == 0)) { - fpeq->EQ_no_entry++; - if (phba->intr_type == MSIX) - /* MSI-X treated interrupt served as no EQ share INT */ - lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, - "0358 MSI-X interrupt with no EQE\n"); - else - /* Non MSI-X treated on interrupt as EQ share INT */ - return IRQ_NONE; + /* process and rearm the EQ */ + ecount = lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM, + LPFC_QUEUE_WORK); + + if (unlikely(ecount == 0)) { + fpeq->EQ_no_entry++; + if (phba->intr_type == MSIX) + /* MSI-X treated interrupt served as no EQ share INT */ + lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, + "0358 MSI-X interrupt with no EQE\n"); + else + /* Non MSI-X treated on interrupt as EQ share INT */ + return IRQ_NONE; + } } return IRQ_HANDLED; @@ -16115,13 +16165,69 @@ out: return status; } -static int lpfc_cq_poll_hdler(struct irq_poll *iop, int budget) +/** + * lpfc_sli4_hba_intr_handler_th - SLI4 HBA threaded interrupt handler + * @irq: Interrupt number. + * @dev_id: The device context pointer. + * + * This routine is a mirror of lpfc_sli4_hba_intr_handler, but executed within + * threaded irq context. + * + * Returns + * IRQ_HANDLED - interrupt is handled + * IRQ_NONE - otherwise + **/ +irqreturn_t lpfc_sli4_hba_intr_handler_th(int irq, void *dev_id) { - struct lpfc_queue *cq = container_of(iop, struct lpfc_queue, iop); + struct lpfc_hba *phba; + struct lpfc_hba_eq_hdl *hba_eq_hdl; + struct lpfc_queue *fpeq; + int ecount = 0; + int hba_eqidx; + struct lpfc_eq_intr_info *eqi; - __lpfc_sli4_hba_process_cq(cq, LPFC_IRQ_POLL); + /* Get the driver's phba structure from the dev_id */ + hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id; + phba = hba_eq_hdl->phba; + hba_eqidx = hba_eq_hdl->idx; - return 1; + if (unlikely(!phba)) + return IRQ_NONE; + if (unlikely(!phba->sli4_hba.hdwq)) + return IRQ_NONE; + + /* Get to the EQ struct associated with this vector */ + fpeq = phba->sli4_hba.hba_eq_hdl[hba_eqidx].eq; + if (unlikely(!fpeq)) + return IRQ_NONE; + + eqi = per_cpu_ptr(phba->sli4_hba.eq_info, raw_smp_processor_id()); + eqi->icnt++; + + fpeq->last_cpu = raw_smp_processor_id(); + + if (eqi->icnt > LPFC_EQD_ISR_TRIGGER && + fpeq->q_flag & HBA_EQ_DELAY_CHK && + phba->cfg_auto_imax && + fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY && + phba->sli.sli_flag & LPFC_SLI_USE_EQDR) + lpfc_sli4_mod_hba_eq_delay(phba, fpeq, LPFC_MAX_AUTO_EQ_DELAY); + + /* process and rearm the EQ */ + ecount = lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM, + LPFC_THREADED_IRQ); + + if (unlikely(ecount == 0)) { + fpeq->EQ_no_entry++; + if (phba->intr_type == MSIX) + /* MSI-X treated interrupt served as no EQ share INT */ + lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, + "3358 MSI-X interrupt with no EQE\n"); + else + /* Non MSI-X treated on interrupt as EQ share INT */ + return IRQ_NONE; + } + return IRQ_HANDLED; } /** @@ -16265,8 +16371,6 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq, if (cq->queue_id > phba->sli4_hba.cq_max) phba->sli4_hba.cq_max = cq->queue_id; - - irq_poll_init(&cq->iop, LPFC_IRQ_POLL_WEIGHT, lpfc_cq_poll_hdler); out: mempool_free(mbox, phba->mbox_mem_pool); return status; @@ -20696,23 +20800,23 @@ lpfc_log_fw_write_cmpl(struct lpfc_hba *phba, u32 shdr_status, if (shdr_add_status == LPFC_ADD_STATUS_INCOMPAT_OBJ) { switch (shdr_add_status_2) { case LPFC_ADD_STATUS_2_INCOMPAT_FLASH: - lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, - "4199 Firmware write failed: " - "image incompatible with flash x%02x\n", - phba->sli4_hba.flash_id); + lpfc_log_msg(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, + "4199 Firmware write failed: " + "image incompatible with flash x%02x\n", + phba->sli4_hba.flash_id); break; case LPFC_ADD_STATUS_2_INCORRECT_ASIC: - lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, - "4200 Firmware write failed: " - "image incompatible with ASIC " - "architecture x%02x\n", - phba->sli4_hba.asic_rev); + lpfc_log_msg(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, + "4200 Firmware write failed: " + "image incompatible with ASIC " + "architecture x%02x\n", + phba->sli4_hba.asic_rev); break; default: - lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, - "4210 Firmware write failed: " - "add_status_2 x%02x\n", - shdr_add_status_2); + lpfc_log_msg(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, + "4210 Firmware write failed: " + "add_status_2 x%02x\n", + shdr_add_status_2); break; } } else if (!shdr_status && !shdr_add_status) { @@ -20725,26 +20829,26 @@ lpfc_log_fw_write_cmpl(struct lpfc_hba *phba, u32 shdr_status, switch (shdr_change_status) { case (LPFC_CHANGE_STATUS_PHYS_DEV_RESET): - lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, - "3198 Firmware write complete: System " - "reboot required to instantiate\n"); + lpfc_log_msg(phba, KERN_NOTICE, LOG_MBOX | LOG_SLI, + "3198 Firmware write complete: System " + "reboot required to instantiate\n"); break; case (LPFC_CHANGE_STATUS_FW_RESET): - lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, - "3199 Firmware write complete: " - "Firmware reset required to " - "instantiate\n"); + lpfc_log_msg(phba, KERN_NOTICE, LOG_MBOX | LOG_SLI, + "3199 Firmware write complete: " + "Firmware reset required to " + "instantiate\n"); break; case (LPFC_CHANGE_STATUS_PORT_MIGRATION): - lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, - "3200 Firmware write complete: Port " - "Migration or PCI Reset required to " - "instantiate\n"); + lpfc_log_msg(phba, KERN_NOTICE, LOG_MBOX | LOG_SLI, + "3200 Firmware write complete: Port " + "Migration or PCI Reset required to " + "instantiate\n"); break; case (LPFC_CHANGE_STATUS_PCI_RESET): - lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, - "3201 Firmware write complete: PCI " - "Reset required to instantiate\n"); + lpfc_log_msg(phba, KERN_NOTICE, LOG_MBOX | LOG_SLI, + "3201 Firmware write complete: PCI " + "Reset required to instantiate\n"); break; default: break; diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h index 2a0864e6d7cd..2541a8fba093 100644 --- a/drivers/scsi/lpfc/lpfc_sli4.h +++ b/drivers/scsi/lpfc/lpfc_sli4.h @@ -140,7 +140,7 @@ struct lpfc_rqb { enum lpfc_poll_mode { LPFC_QUEUE_WORK, - LPFC_IRQ_POLL + LPFC_THREADED_IRQ, }; struct lpfc_idle_stat { @@ -279,8 +279,6 @@ struct lpfc_queue { struct list_head _poll_list; void **q_pgs; /* array to index entries per page */ -#define LPFC_IRQ_POLL_WEIGHT 256 - struct irq_poll iop; enum lpfc_poll_mode poll_mode; }; diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index c97411b0992e..6f35491aed0f 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h @@ -20,7 +20,7 @@ * included with this package. * *******************************************************************/ -#define LPFC_DRIVER_VERSION "14.2.0.11" +#define LPFC_DRIVER_VERSION "14.2.0.13" #define LPFC_DRIVER_NAME "lpfc" /* Used for SLI 2/3 */ |