diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2021-07-03 00:14:36 +0200 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2021-07-03 00:14:36 +0200 |
commit | bd31b9efbf549d9630bf2f269a3a56dcb29fcac1 (patch) | |
tree | 677abd40f1f86276199e68c098e48120671ec851 /drivers/scsi/lpfc | |
parent | Merge tag 'xfs-5.14-merge-6' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux (diff) | |
parent | scsi: aha1740: Avoid over-read of sense buffer (diff) | |
download | linux-bd31b9efbf549d9630bf2f269a3a56dcb29fcac1.tar.xz linux-bd31b9efbf549d9630bf2f269a3a56dcb29fcac1.zip |
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull SCSI updates from James Bottomley:
"This series consists of the usual driver updates (ufs, ibmvfc,
megaraid_sas, lpfc, elx, mpi3mr, qedi, iscsi, storvsc, mpt3sas) with
elx and mpi3mr being new drivers.
The major core change is a rework to drop the status byte handling
macros and the old bit shifted definitions and the rest of the updates
are minor fixes"
* tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (287 commits)
scsi: aha1740: Avoid over-read of sense buffer
scsi: arcmsr: Avoid over-read of sense buffer
scsi: ips: Avoid over-read of sense buffer
scsi: ufs: ufs-mediatek: Add missing of_node_put() in ufs_mtk_probe()
scsi: elx: libefc: Fix IRQ restore in efc_domain_dispatch_frame()
scsi: elx: libefc: Fix less than zero comparison of a unsigned int
scsi: elx: efct: Fix pointer error checking in debugfs init
scsi: elx: efct: Fix is_originator return code type
scsi: elx: efct: Fix link error for _bad_cmpxchg
scsi: elx: efct: Eliminate unnecessary boolean check in efct_hw_command_cancel()
scsi: elx: efct: Do not use id uninitialized in efct_lio_setup_session()
scsi: elx: efct: Fix error handling in efct_hw_init()
scsi: elx: efct: Remove redundant initialization of variable lun
scsi: elx: efct: Fix spelling mistake "Unexected" -> "Unexpected"
scsi: lpfc: Fix build error in lpfc_scsi.c
scsi: target: iscsi: Remove redundant continue statement
scsi: qla4xxx: Remove redundant continue statement
scsi: ppa: Switch to use module_parport_driver()
scsi: imm: Switch to use module_parport_driver()
scsi: mpt3sas: Fix error return value in _scsih_expander_add()
...
Diffstat (limited to 'drivers/scsi/lpfc')
-rw-r--r-- | drivers/scsi/lpfc/lpfc.h | 124 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_attr.c | 59 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_crtn.h | 12 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_ct.c | 298 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_debugfs.c | 11 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_disc.h | 2 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_els.c | 665 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_hbadisc.c | 229 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_hw.h | 124 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_hw4.h | 12 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_init.c | 109 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_mbox.c | 9 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_nportdisc.c | 40 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_nvme.c | 14 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_scsi.c | 416 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_sli.c | 66 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_sli.h | 11 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_version.h | 2 |
18 files changed, 2083 insertions, 120 deletions
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index f8de0d10620b..17028861234b 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -266,6 +266,7 @@ struct lpfc_stats { uint32_t elsRcvECHO; uint32_t elsRcvLCB; uint32_t elsRcvRDP; + uint32_t elsRcvRDF; uint32_t elsXmitFLOGI; uint32_t elsXmitFDISC; uint32_t elsXmitPLOGI; @@ -303,6 +304,64 @@ struct lpfc_stats { struct lpfc_hba; +#define LPFC_VMID_TIMER 300 /* timer interval in seconds */ + +#define LPFC_MAX_VMID_SIZE 256 +#define LPFC_COMPRESS_VMID_SIZE 16 + +union lpfc_vmid_io_tag { + u32 app_id; /* App Id vmid */ + u8 cs_ctl_vmid; /* Priority tag vmid */ +}; + +#define JIFFIES_PER_HR (HZ * 60 * 60) + +struct lpfc_vmid { + u8 flag; +#define LPFC_VMID_SLOT_FREE 0x0 +#define LPFC_VMID_SLOT_USED 0x1 +#define LPFC_VMID_REQ_REGISTER 0x2 +#define LPFC_VMID_REGISTERED 0x4 +#define LPFC_VMID_DE_REGISTER 0x8 + char host_vmid[LPFC_MAX_VMID_SIZE]; + union lpfc_vmid_io_tag un; + struct hlist_node hnode; + u64 io_rd_cnt; + u64 io_wr_cnt; + u8 vmid_len; + u8 delete_inactive; /* Delete if inactive flag 0 = no, 1 = yes */ + u32 hash_index; + u64 __percpu *last_io_time; +}; + +#define lpfc_vmid_is_type_priority_tag(vport)\ + (vport->vmid_priority_tagging ? 1 : 0) + +#define LPFC_VMID_HASH_SIZE 256 +#define LPFC_VMID_HASH_MASK 255 +#define LPFC_VMID_HASH_SHIFT 6 + +struct lpfc_vmid_context { + struct lpfc_vmid *vmp; + struct lpfc_nodelist *nlp; + bool instantiated; +}; + +struct lpfc_vmid_priority_range { + u8 low; + u8 high; + u8 qos; +}; + +struct lpfc_vmid_priority_info { + u32 num_descriptors; + struct lpfc_vmid_priority_range *vmid_range; +}; + +#define QFPA_EVEN_ONLY 0x01 +#define QFPA_ODD_ONLY 0x02 +#define QFPA_EVEN_ODD 0x03 + enum discovery_state { LPFC_VPORT_UNKNOWN = 0, /* vport state is unknown */ LPFC_VPORT_FAILED = 1, /* vport has failed */ @@ -442,6 +501,9 @@ struct lpfc_vport { #define WORKER_RAMP_DOWN_QUEUE 0x800 /* hba: Decrease Q depth */ #define WORKER_RAMP_UP_QUEUE 0x1000 /* hba: Increase Q depth */ #define WORKER_SERVICE_TXQ 0x2000 /* hba: IOCBs on the txq */ +#define WORKER_CHECK_INACTIVE_VMID 0x4000 /* hba: check inactive vmids */ +#define WORKER_CHECK_VMID_ISSUE_QFPA 0x8000 /* vport: Check if qfpa needs + * to be issued */ struct timer_list els_tmofunc; struct timer_list delayed_disc_tmo; @@ -452,6 +514,8 @@ struct lpfc_vport { #define FC_LOADING 0x1 /* HBA in process of loading drvr */ #define FC_UNLOADING 0x2 /* HBA in process of unloading drvr */ #define FC_ALLOW_FDMI 0x4 /* port is ready for FDMI requests */ +#define FC_ALLOW_VMID 0x8 /* Allow VMID I/Os */ +#define FC_DEREGISTER_ALL_APP_ID 0x10 /* Deregister all VMIDs */ /* Vport Config Parameters */ uint32_t cfg_scan_down; uint32_t cfg_lun_queue_depth; @@ -470,9 +534,36 @@ struct lpfc_vport { uint32_t cfg_tgt_queue_depth; uint32_t cfg_first_burst_size; uint32_t dev_loss_tmo_changed; + /* VMID parameters */ + u8 lpfc_vmid_host_uuid[LPFC_COMPRESS_VMID_SIZE]; + u32 max_vmid; /* maximum VMIDs allowed per port */ + u32 cur_vmid_cnt; /* Current VMID count */ +#define LPFC_MIN_VMID 4 +#define LPFC_MAX_VMID 255 + u32 vmid_inactivity_timeout; /* Time after which the VMID */ + /* deregisters from switch */ + u32 vmid_priority_tagging; +#define LPFC_VMID_PRIO_TAG_DISABLE 0 /* Disable */ +#define LPFC_VMID_PRIO_TAG_SUP_TARGETS 1 /* Allow supported targets only */ +#define LPFC_VMID_PRIO_TAG_ALL_TARGETS 2 /* Allow all targets */ + unsigned long *vmid_priority_range; +#define LPFC_VMID_MAX_PRIORITY_RANGE 256 +#define LPFC_VMID_PRIORITY_BITMAP_SIZE 32 + u8 vmid_flag; +#define LPFC_VMID_IN_USE 0x1 +#define LPFC_VMID_ISSUE_QFPA 0x2 +#define LPFC_VMID_QFPA_CMPL 0x4 +#define LPFC_VMID_QOS_ENABLED 0x8 +#define LPFC_VMID_TIMER_ENBLD 0x10 + struct fc_qfpa_res *qfpa_res; struct fc_vport *fc_vport; + struct lpfc_vmid *vmid; + DECLARE_HASHTABLE(hash_table, 8); + rwlock_t vmid_lock; + struct lpfc_vmid_priority_info vmid_priority; + #ifdef CONFIG_SCSI_LPFC_DEBUG_FS struct dentry *debug_disc_trc; struct dentry *debug_nodelist; @@ -915,6 +1006,7 @@ struct lpfc_hba { uint32_t cfg_request_firmware_upgrade; uint32_t cfg_suppress_link_up; uint32_t cfg_rrq_xri_bitmap_sz; + u32 cfg_fcp_wait_abts_rsp; uint32_t cfg_delay_discovery; uint32_t cfg_sli_mode; #define LPFC_INITIALIZE_LINK 0 /* do normal init_link mbox */ @@ -938,6 +1030,13 @@ struct lpfc_hba { struct nvmet_fc_target_port *targetport; lpfc_vpd_t vpd; /* vital product data */ + u32 cfg_max_vmid; /* maximum VMIDs allowed per port */ + u32 cfg_vmid_app_header; +#define LPFC_VMID_APP_HEADER_DISABLE 0 +#define LPFC_VMID_APP_HEADER_ENABLE 1 + u32 cfg_vmid_priority_tagging; + u32 cfg_vmid_inactivity_timeout; /* Time after which the VMID */ + /* deregisters from switch */ struct pci_dev *pcidev; struct list_head work_list; uint32_t work_ha; /* Host Attention Bits for WT */ @@ -1178,6 +1277,7 @@ struct lpfc_hba { struct list_head ct_ev_waiters; struct unsol_rcv_ct_ctx ct_ctx[LPFC_CT_CTX_MAX]; uint32_t ctx_idx; + struct timer_list inactive_vmid_poll; /* RAS Support */ struct lpfc_ras_fwlog ras_fwlog; @@ -1419,3 +1519,27 @@ static const char *routine(enum enum_name table_key) \ } \ return name; \ } + +/** + * lpfc_is_vmid_enabled - returns if VMID is enabled for either switch types + * @phba: Pointer to HBA context object. + * + * Relationship between the enable, target support and if vmid tag is required + * for the particular combination + * --------------------------------------------------- + * Switch Enable Flag Target Support VMID Needed + * --------------------------------------------------- + * App Id 0 NA N + * App Id 1 0 N + * App Id 1 1 Y + * Pr Tag 0 NA N + * Pr Tag 1 0 N + * Pr Tag 1 1 Y + * Pr Tag 2 * Y + --------------------------------------------------- + * + **/ +static inline int lpfc_is_vmid_enabled(struct lpfc_hba *phba) +{ + return phba->cfg_vmid_app_header || phba->cfg_vmid_priority_tagging; +} diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 0975a8b252a0..eb88aaaf36eb 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -3449,6 +3449,15 @@ LPFC_ATTR_R(fcf_failover_policy, 1, 1, 2, "FCF Fast failover=1 Priority failover=2"); /* + * lpfc_fcp_wait_abts_rsp: Modifies criteria for reporting completion of + * aborted IO. + * The range is [0,1]. Default value is 0 + * 0, IO completes after ABTS issued (default). + * 1, IO completes after receipt of ABTS response or timeout. + */ +LPFC_ATTR_R(fcp_wait_abts_rsp, 0, 0, 1, "Wait for FCP ABTS completion"); + +/* # lpfc_enable_rrq: Track XRI/OXID reuse after IO failures # 0x0 = disabled, XRI/OXID use not tracked. # 0x1 = XRI/OXID reuse is timed with ratov, RRQ sent. @@ -6153,6 +6162,45 @@ LPFC_ATTR_RW(enable_dpp, 1, 0, 1, "Enable Direct Packet Push"); */ LPFC_ATTR_R(enable_mi, 1, 0, 1, "Enable MI"); +/* + * lpfc_max_vmid: Maximum number of VMs to be tagged. This is valid only if + * either vmid_app_header or vmid_priority_tagging is enabled. + * 4 - 255 = vmid support enabled for 4-255 VMs + * Value range is [4,255]. + */ +LPFC_ATTR_RW(max_vmid, LPFC_MIN_VMID, LPFC_MIN_VMID, LPFC_MAX_VMID, + "Maximum number of VMs supported"); + +/* + * lpfc_vmid_inactivity_timeout: Inactivity timeout duration in hours + * 0 = Timeout is disabled + * Value range is [0,24]. + */ +LPFC_ATTR_RW(vmid_inactivity_timeout, 4, 0, 24, + "Inactivity timeout in hours"); + +/* + * lpfc_vmid_app_header: Enable App Header VMID support + * 0 = Support is disabled (default) + * 1 = Support is enabled + * Value range is [0,1]. + */ +LPFC_ATTR_RW(vmid_app_header, LPFC_VMID_APP_HEADER_DISABLE, + LPFC_VMID_APP_HEADER_DISABLE, LPFC_VMID_APP_HEADER_ENABLE, + "Enable App Header VMID support"); + +/* + * lpfc_vmid_priority_tagging: Enable Priority Tagging VMID support + * 0 = Support is disabled (default) + * 1 = Allow supported targets only + * 2 = Allow all targets + * Value range is [0,2]. + */ +LPFC_ATTR_RW(vmid_priority_tagging, LPFC_VMID_PRIO_TAG_DISABLE, + LPFC_VMID_PRIO_TAG_DISABLE, + LPFC_VMID_PRIO_TAG_ALL_TARGETS, + "Enable Priority Tagging VMID support"); + struct device_attribute *lpfc_hba_attrs[] = { &dev_attr_nvme_info, &dev_attr_scsi_stat, @@ -6205,6 +6253,7 @@ struct device_attribute *lpfc_hba_attrs[] = { &dev_attr_lpfc_enable_npiv, &dev_attr_lpfc_fcf_failover_policy, &dev_attr_lpfc_enable_rrq, + &dev_attr_lpfc_fcp_wait_abts_rsp, &dev_attr_nport_evt_cnt, &dev_attr_board_mode, &dev_attr_max_vpi, @@ -6271,6 +6320,10 @@ struct device_attribute *lpfc_hba_attrs[] = { &dev_attr_lpfc_enable_bbcr, &dev_attr_lpfc_enable_dpp, &dev_attr_lpfc_enable_mi, + &dev_attr_lpfc_max_vmid, + &dev_attr_lpfc_vmid_inactivity_timeout, + &dev_attr_lpfc_vmid_app_header, + &dev_attr_lpfc_vmid_priority_tagging, NULL, }; @@ -7332,6 +7385,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) lpfc_enable_npiv_init(phba, lpfc_enable_npiv); lpfc_fcf_failover_policy_init(phba, lpfc_fcf_failover_policy); lpfc_enable_rrq_init(phba, lpfc_enable_rrq); + lpfc_fcp_wait_abts_rsp_init(phba, lpfc_fcp_wait_abts_rsp); lpfc_fdmi_on_init(phba, lpfc_fdmi_on); lpfc_enable_SmartSAN_init(phba, lpfc_enable_SmartSAN); lpfc_use_msi_init(phba, lpfc_use_msi); @@ -7346,6 +7400,11 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat); lpfc_EnableXLane_init(phba, lpfc_EnableXLane); + /* VMID Inits */ + lpfc_max_vmid_init(phba, lpfc_max_vmid); + lpfc_vmid_inactivity_timeout_init(phba, lpfc_vmid_inactivity_timeout); + lpfc_vmid_app_header_init(phba, lpfc_vmid_app_header); + lpfc_vmid_priority_tagging_init(phba, lpfc_vmid_priority_tagging); if (phba->sli_rev != LPFC_SLI_REV4) phba->cfg_EnableXLane = 0; lpfc_XLanePriority_init(phba, lpfc_XLanePriority); diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index 383abf46fd29..737483c3f01d 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h @@ -80,6 +80,7 @@ void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); +void lpfc_mbx_cmpl_fc_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb); void lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_unregister_vfi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *); @@ -607,3 +608,14 @@ extern unsigned long lpfc_no_hba_reset[]; extern union lpfc_wqe128 lpfc_iread_cmd_template; extern union lpfc_wqe128 lpfc_iwrite_cmd_template; extern union lpfc_wqe128 lpfc_icmnd_cmd_template; + +/* vmid interface */ +int lpfc_vmid_uvem(struct lpfc_vport *vport, struct lpfc_vmid *vmid, bool ins); +uint32_t lpfc_vmid_get_cs_ctl(struct lpfc_vport *vport); +int lpfc_vmid_cmd(struct lpfc_vport *vport, + int cmdcode, struct lpfc_vmid *vmid); +int lpfc_vmid_hash_fn(const char *vmid, int len); +struct lpfc_vmid *lpfc_get_vmid_from_hashtable(struct lpfc_vport *vport, + uint32_t hash, uint8_t *buf); +void lpfc_vmid_vport_cleanup(struct lpfc_vport *vport); +int lpfc_issue_els_qfpa(struct lpfc_vport *vport); diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index 3bbefa225484..610b6dabb3b5 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c @@ -75,6 +75,9 @@ static char *lpfc_release_version = LPFC_DRIVER_VERSION; +static void +lpfc_cmpl_ct_cmd_vmid(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, + struct lpfc_iocbq *rspiocb); static void lpfc_ct_ignore_hbq_buffer(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq, @@ -587,7 +590,7 @@ lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp, struct lpfc_dmabuf *inp, struct lpfc_dmabuf *outp, void (*cmpl) (struct lpfc_hba *, struct lpfc_iocbq *, struct lpfc_iocbq *), - struct lpfc_nodelist *ndlp, uint32_t usr_flg, uint32_t num_entry, + struct lpfc_nodelist *ndlp, uint32_t event_tag, uint32_t num_entry, uint32_t tmo, uint8_t retry) { struct lpfc_hba *phba = vport->phba; @@ -608,15 +611,14 @@ lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp, icmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64; icmd->un.genreq64.bdl.bdeSize = (num_entry * sizeof(struct ulp_bde64)); - if (usr_flg) - geniocb->context3 = NULL; - else - geniocb->context3 = (uint8_t *) bmp; + geniocb->context3 = (uint8_t *) bmp; /* Save for completion so we can release these resources */ geniocb->context1 = (uint8_t *) inp; geniocb->context2 = (uint8_t *) outp; + geniocb->event_tag = event_tag; + /* Fill in payload, bp points to frame payload */ icmd->ulpCommand = CMD_GEN_REQUEST64_CR; @@ -707,8 +709,8 @@ lpfc_ct_cmd(struct lpfc_vport *vport, struct lpfc_dmabuf *inmp, * lpfc_alloc_ct_rsp. */ cnt += 1; - status = lpfc_gen_req(vport, bmp, inmp, outmp, cmpl, ndlp, 0, - cnt, 0, retry); + status = lpfc_gen_req(vport, bmp, inmp, outmp, cmpl, ndlp, + phba->fc_eventTag, cnt, 0, retry); if (status) { lpfc_free_ct_rsp(phba, outmp); return -ENOMEM; @@ -957,6 +959,13 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, "GID_FT cmpl: status:x%x/x%x rtry:%d", irsp->ulpStatus, irsp->un.ulpWord[4], vport->fc_ns_retry); + /* Ignore response if link flipped after this request was made */ + if (cmdiocb->event_tag != phba->fc_eventTag) { + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "9043 Event tag mismatch. Ignoring NS rsp\n"); + goto out; + } + /* Don't bother processing response if vport is being torn down. */ if (vport->load_flag & FC_UNLOADING) { if (vport->fc_flag & FC_RSCN_MODE) @@ -1167,6 +1176,13 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, irsp->ulpStatus, irsp->un.ulpWord[4], vport->fc_ns_retry); + /* Ignore response if link flipped after this request was made */ + if (cmdiocb->event_tag != phba->fc_eventTag) { + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "9044 Event tag mismatch. Ignoring NS rsp\n"); + goto out; + } + /* Don't bother processing response if vport is being torn down. */ if (vport->load_flag & FC_UNLOADING) { if (vport->fc_flag & FC_RSCN_MODE) @@ -1366,6 +1382,13 @@ lpfc_cmpl_ct_cmd_gff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, "GFF_ID cmpl: status:x%x/x%x did:x%x", irsp->ulpStatus, irsp->un.ulpWord[4], did); + /* Ignore response if link flipped after this request was made */ + if (cmdiocb->event_tag != phba->fc_eventTag) { + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "9045 Event tag mismatch. Ignoring NS rsp\n"); + goto iocb_free; + } + if (irsp->ulpStatus == IOSTAT_SUCCESS) { /* Good status, continue checking */ CTrsp = (struct lpfc_sli_ct_request *) outp->virt; @@ -1479,6 +1502,7 @@ out: lpfc_disc_start(vport); } +iocb_free: free_ndlp = cmdiocb->context_un.ndlp; lpfc_ct_free_iocb(phba, cmdiocb); lpfc_nlp_put(free_ndlp); @@ -1506,6 +1530,13 @@ lpfc_cmpl_ct_cmd_gft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, "GFT_ID cmpl: status:x%x/x%x did:x%x", irsp->ulpStatus, irsp->un.ulpWord[4], did); + /* Ignore response if link flipped after this request was made */ + if ((uint32_t) cmdiocb->event_tag != phba->fc_eventTag) { + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "9046 Event tag mismatch. Ignoring NS rsp\n"); + goto out; + } + /* Preserve the nameserver node to release the reference. */ ns_ndlp = cmdiocb->context_un.ndlp; @@ -1572,6 +1603,7 @@ lpfc_cmpl_ct_cmd_gft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "3065 GFT_ID failed x%08x\n", irsp->ulpStatus); +out: lpfc_ct_free_iocb(phba, cmdiocb); lpfc_nlp_put(ns_ndlp); } @@ -3748,3 +3780,255 @@ lpfc_decode_firmware_rev(struct lpfc_hba *phba, char *fwrevision, int flag) } return; } + +static void +lpfc_cmpl_ct_cmd_vmid(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, + struct lpfc_iocbq *rspiocb) +{ + struct lpfc_vport *vport = cmdiocb->vport; + struct lpfc_dmabuf *inp = cmdiocb->context1; + struct lpfc_dmabuf *outp = cmdiocb->context2; + struct lpfc_sli_ct_request *ctcmd = inp->virt; + struct lpfc_sli_ct_request *ctrsp = outp->virt; + u16 rsp = ctrsp->CommandResponse.bits.CmdRsp; + struct app_id_object *app; + u32 cmd, hash, bucket; + struct lpfc_vmid *vmp, *cur; + u8 *data = outp->virt; + int i; + + cmd = be16_to_cpu(ctcmd->CommandResponse.bits.CmdRsp); + if (cmd == SLI_CTAS_DALLAPP_ID) + lpfc_ct_free_iocb(phba, cmdiocb); + + if (lpfc_els_chk_latt(vport) || rspiocb->iocb.ulpStatus) { + if (cmd != SLI_CTAS_DALLAPP_ID) + return; + } + /* Check for a CT LS_RJT response */ + if (rsp == be16_to_cpu(SLI_CT_RESPONSE_FS_RJT)) { + if (cmd != SLI_CTAS_DALLAPP_ID) + lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY, + "3306 VMID FS_RJT Data: x%x x%x x%x\n", + cmd, ctrsp->ReasonCode, + ctrsp->Explanation); + if ((cmd != SLI_CTAS_DALLAPP_ID) || + (ctrsp->ReasonCode != SLI_CT_UNABLE_TO_PERFORM_REQ) || + (ctrsp->Explanation != SLI_CT_APP_ID_NOT_AVAILABLE)) { + /* If DALLAPP_ID failed retry later */ + if (cmd == SLI_CTAS_DALLAPP_ID) + vport->load_flag |= FC_DEREGISTER_ALL_APP_ID; + return; + } + } + + switch (cmd) { + case SLI_CTAS_RAPP_IDENT: + app = (struct app_id_object *)(RAPP_IDENT_OFFSET + data); + lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY, + "6712 RAPP_IDENT app id %d port id x%x id " + "len %d\n", be32_to_cpu(app->app_id), + be32_to_cpu(app->port_id), + app->obj.entity_id_len); + + if (app->obj.entity_id_len == 0 || app->port_id == 0) + return; + + hash = lpfc_vmid_hash_fn(app->obj.entity_id, + app->obj.entity_id_len); + vmp = lpfc_get_vmid_from_hashtable(vport, hash, + app->obj.entity_id); + if (vmp) { + write_lock(&vport->vmid_lock); + vmp->un.app_id = be32_to_cpu(app->app_id); + vmp->flag |= LPFC_VMID_REGISTERED; + vmp->flag &= ~LPFC_VMID_REQ_REGISTER; + write_unlock(&vport->vmid_lock); + /* Set IN USE flag */ + vport->vmid_flag |= LPFC_VMID_IN_USE; + } else { + lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY, + "6901 No entry found %s hash %d\n", + app->obj.entity_id, hash); + } + break; + case SLI_CTAS_DAPP_IDENT: + app = (struct app_id_object *)(DAPP_IDENT_OFFSET + data); + lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY, + "6713 DAPP_IDENT app id %d port id x%x\n", + be32_to_cpu(app->app_id), + be32_to_cpu(app->port_id)); + break; + case SLI_CTAS_DALLAPP_ID: + lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY, + "8856 Deregistered all app ids\n"); + read_lock(&vport->vmid_lock); + for (i = 0; i < phba->cfg_max_vmid; i++) { + vmp = &vport->vmid[i]; + if (vmp->flag != LPFC_VMID_SLOT_FREE) + memset(vmp, 0, sizeof(struct lpfc_vmid)); + } + read_unlock(&vport->vmid_lock); + /* for all elements in the hash table */ + if (!hash_empty(vport->hash_table)) + hash_for_each(vport->hash_table, bucket, cur, hnode) + hash_del(&cur->hnode); + vport->load_flag |= FC_ALLOW_VMID; + break; + default: + lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY, + "8857 Invalid command code\n"); + } +} + +/** + * lpfc_vmid_cmd - Build and send a FDMI cmd to the specified NPort + * @vport: pointer to a host virtual N_Port data structure. + * @ndlp: ndlp to send FDMI cmd to (if NULL use FDMI_DID) + * cmdcode: FDMI command to send + * mask: Mask of HBA or PORT Attributes to send + * + * Builds and sends a FDMI command using the CT subsystem. + */ +int +lpfc_vmid_cmd(struct lpfc_vport *vport, + int cmdcode, struct lpfc_vmid *vmid) +{ + struct lpfc_hba *phba = vport->phba; + struct lpfc_dmabuf *mp, *bmp; + struct lpfc_sli_ct_request *ctreq; + struct ulp_bde64 *bpl; + u32 size; + u32 rsp_size; + u8 *data; + struct lpfc_vmid_rapp_ident_list *rap; + struct lpfc_vmid_dapp_ident_list *dap; + u8 retry = 0; + struct lpfc_nodelist *ndlp; + + void (*cmpl)(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, + struct lpfc_iocbq *rspiocb); + + ndlp = lpfc_findnode_did(vport, FDMI_DID); + if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) + return 0; + + cmpl = lpfc_cmpl_ct_cmd_vmid; + + /* fill in BDEs for command */ + /* Allocate buffer for command payload */ + mp = kmalloc(sizeof(*mp), GFP_KERNEL); + if (!mp) + goto vmid_free_mp_exit; + + mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); + if (!mp->virt) + goto vmid_free_mp_virt_exit; + + /* Allocate buffer for Buffer ptr list */ + bmp = kmalloc(sizeof(*bmp), GFP_KERNEL); + if (!bmp) + goto vmid_free_bmp_exit; + + bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys); + if (!bmp->virt) + goto vmid_free_bmp_virt_exit; + + INIT_LIST_HEAD(&mp->list); + INIT_LIST_HEAD(&bmp->list); + + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "3275 VMID Request Data: x%x x%x x%x\n", + vport->fc_flag, vport->port_state, cmdcode); + ctreq = (struct lpfc_sli_ct_request *)mp->virt; + data = mp->virt; + /* First populate the CT_IU preamble */ + memset(data, 0, LPFC_BPL_SIZE); + ctreq->RevisionId.bits.Revision = SLI_CT_REVISION; + ctreq->RevisionId.bits.InId = 0; + + ctreq->FsType = SLI_CT_MANAGEMENT_SERVICE; + ctreq->FsSubType = SLI_CT_APP_SEV_Subtypes; + + ctreq->CommandResponse.bits.CmdRsp = cpu_to_be16(cmdcode); + rsp_size = LPFC_BPL_SIZE; + size = 0; + + switch (cmdcode) { + case SLI_CTAS_RAPP_IDENT: + lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY, + "1329 RAPP_IDENT for %s\n", vmid->host_vmid); + ctreq->un.PortID = cpu_to_be32(vport->fc_myDID); + rap = (struct lpfc_vmid_rapp_ident_list *) + (DAPP_IDENT_OFFSET + data); + rap->no_of_objects = cpu_to_be32(1); + rap->obj[0].entity_id_len = vmid->vmid_len; + memcpy(rap->obj[0].entity_id, vmid->host_vmid, vmid->vmid_len); + size = RAPP_IDENT_OFFSET + + sizeof(struct lpfc_vmid_rapp_ident_list); + retry = 1; + break; + + case SLI_CTAS_GALLAPPIA_ID: + ctreq->un.PortID = cpu_to_be32(vport->fc_myDID); + size = GALLAPPIA_ID_SIZE; + break; + + case SLI_CTAS_DAPP_IDENT: + lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY, + "1469 DAPP_IDENT for %s\n", vmid->host_vmid); + ctreq->un.PortID = cpu_to_be32(vport->fc_myDID); + dap = (struct lpfc_vmid_dapp_ident_list *) + (DAPP_IDENT_OFFSET + data); + dap->no_of_objects = cpu_to_be32(1); + dap->obj[0].entity_id_len = vmid->vmid_len; + memcpy(dap->obj[0].entity_id, vmid->host_vmid, vmid->vmid_len); + size = DAPP_IDENT_OFFSET + + sizeof(struct lpfc_vmid_dapp_ident_list); + write_lock(&vport->vmid_lock); + vmid->flag &= ~LPFC_VMID_REGISTERED; + write_unlock(&vport->vmid_lock); + retry = 1; + break; + + case SLI_CTAS_DALLAPP_ID: + ctreq->un.PortID = cpu_to_be32(vport->fc_myDID); + size = DALLAPP_ID_SIZE; + break; + + default: + lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY, + "7062 VMID cmdcode x%x not supported\n", + cmdcode); + goto vmid_free_all_mem; + } + + ctreq->CommandResponse.bits.Size = cpu_to_be16(rsp_size); + + bpl = (struct ulp_bde64 *)bmp->virt; + bpl->addrHigh = putPaddrHigh(mp->phys); + bpl->addrLow = putPaddrLow(mp->phys); + bpl->tus.f.bdeFlags = 0; + bpl->tus.f.bdeSize = size; + + /* The lpfc_ct_cmd/lpfc_get_req shall increment ndlp reference count + * to hold ndlp reference for the corresponding callback function. + */ + if (!lpfc_ct_cmd(vport, mp, bmp, ndlp, cmpl, rsp_size, retry)) + return 0; + + vmid_free_all_mem: + lpfc_mbuf_free(phba, bmp->virt, bmp->phys); + vmid_free_bmp_virt_exit: + kfree(bmp); + vmid_free_bmp_exit: + lpfc_mbuf_free(phba, mp->virt, mp->phys); + vmid_free_mp_virt_exit: + kfree(mp); + vmid_free_mp_exit: + + /* Issue CT request failed */ + lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY, + "3276 VMID CT request failed Data: x%x\n", cmdcode); + return -EIO; +} diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c index 658a962832b3..6ff85ae57e79 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/drivers/scsi/lpfc/lpfc_debugfs.c @@ -863,16 +863,13 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size) len += scnprintf(buf+len, size-len, "%s DID:x%06x ", statep, ndlp->nlp_DID); len += scnprintf(buf+len, size-len, - "WWPN x%llx ", + "WWPN x%016llx ", wwn_to_u64(ndlp->nlp_portname.u.wwn)); len += scnprintf(buf+len, size-len, - "WWNN x%llx ", + "WWNN x%016llx ", wwn_to_u64(ndlp->nlp_nodename.u.wwn)); - if (ndlp->nlp_flag & NLP_RPI_REGISTERED) - len += scnprintf(buf+len, size-len, "RPI:%04d ", - ndlp->nlp_rpi); - else - len += scnprintf(buf+len, size-len, "RPI:none "); + len += scnprintf(buf+len, size-len, "RPI:x%04x ", + ndlp->nlp_rpi); len += scnprintf(buf+len, size-len, "flag:x%08x ", ndlp->nlp_flag); if (!ndlp->nlp_type) diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h index 08999aad6a10..131374a61d7e 100644 --- a/drivers/scsi/lpfc/lpfc_disc.h +++ b/drivers/scsi/lpfc/lpfc_disc.h @@ -86,6 +86,7 @@ enum lpfc_fc4_xpt_flags { struct lpfc_nodelist { struct list_head nlp_listp; + struct serv_parm fc_sparam; /* buffer for service params */ struct lpfc_name nlp_portname; struct lpfc_name nlp_nodename; @@ -124,6 +125,7 @@ struct lpfc_nodelist { uint8_t nlp_fcp_info; /* class info, bits 0-3 */ #define NLP_FCP_2_DEVICE 0x10 /* FCP-2 device */ u8 nlp_nvme_info; /* NVME NSLER Support */ + uint8_t vmid_support; /* destination VMID support */ #define NLP_NVME_NSLER 0x1 /* NVME NSLER device */ struct timer_list nlp_delayfunc; /* Used for delayed ELS cmds */ diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 21108f322c99..e481f5fe29d7 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -25,6 +25,7 @@ #include <linux/pci.h> #include <linux/slab.h> #include <linux/interrupt.h> +#include <linux/delay.h> #include <scsi/scsi.h> #include <scsi/scsi_device.h> @@ -55,9 +56,15 @@ static int lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, uint8_t retry); static int lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb); +static void lpfc_cmpl_els_uvem(struct lpfc_hba *, struct lpfc_iocbq *, + struct lpfc_iocbq *); static int lpfc_max_els_tries = 3; +static void lpfc_init_cs_ctl_bitmap(struct lpfc_vport *vport); +static void lpfc_vmid_set_cs_ctl_range(struct lpfc_vport *vport, u32 min, u32 max); +static void lpfc_vmid_put_cs_ctl(struct lpfc_vport *vport, u32 ctcl_vmid); + /** * lpfc_els_chk_latt - Check host link attention event for a vport * @vport: pointer to a host virtual N_Port data structure. @@ -314,10 +321,10 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp, lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0116 Xmit ELS command x%x to remote " "NPORT x%x I/O tag: x%x, port state:x%x " - "rpi x%x fc_flag:x%x\n", + "rpi x%x fc_flag:x%x nlp_flag:x%x vport:x%p\n", elscmd, did, elsiocb->iotag, vport->port_state, ndlp->nlp_rpi, - vport->fc_flag); + vport->fc_flag, ndlp->nlp_flag, vport); } else { /* Xmit ELS response <elsCmd> to remote NPORT <did> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, @@ -1112,11 +1119,15 @@ stop_rr_fcf_flogi: /* FLOGI completes successfully */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0101 FLOGI completes successfully, I/O tag:x%x, " - "xri x%x Data: x%x x%x x%x x%x x%x %x\n", + "xri x%x Data: x%x x%x x%x x%x x%x x%x x%x\n", cmdiocb->iotag, cmdiocb->sli4_xritag, irsp->un.ulpWord[4], sp->cmn.e_d_tov, sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution, - vport->port_state, vport->fc_flag); + vport->port_state, vport->fc_flag, + sp->cmn.priority_tagging); + + if (sp->cmn.priority_tagging) + vport->vmid_flag |= LPFC_VMID_ISSUE_QFPA; if (vport->port_state == LPFC_FLOGI) { /* @@ -1175,6 +1186,15 @@ stop_rr_fcf_flogi: phba->fcf.fcf_redisc_attempted = 0; /* reset */ goto out; } + } else if (vport->port_state > LPFC_FLOGI && + vport->fc_flag & FC_PT2PT) { + /* + * In a p2p topology, it is possible that discovery has + * already progressed, and this completion can be ignored. + * Recheck the indicated topology. + */ + if (!sp->cmn.fPort) + goto out; } flogifail: @@ -1299,6 +1319,18 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, if (sp->cmn.fcphHigh < FC_PH3) sp->cmn.fcphHigh = FC_PH3; + /* Determine if switch supports priority tagging */ + if (phba->cfg_vmid_priority_tagging) { + sp->cmn.priority_tagging = 1; + /* lpfc_vmid_host_uuid is combination of wwpn and wwnn */ + if (uuid_is_null((uuid_t *)vport->lpfc_vmid_host_uuid)) { + memcpy(vport->lpfc_vmid_host_uuid, phba->wwpn, + sizeof(phba->wwpn)); + memcpy(&vport->lpfc_vmid_host_uuid[8], phba->wwnn, + sizeof(phba->wwnn)); + } + } + if (phba->sli_rev == LPFC_SLI_REV4) { if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == LPFC_SLI_INTF_IF_TYPE_0) { @@ -1925,6 +1957,7 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_nodelist *ndlp, *free_ndlp; struct lpfc_dmabuf *prsp; int disc; + struct serv_parm *sp = NULL; /* we pass cmdiocb to state machine which needs rspiocb as well */ cmdiocb->context_un.rsp_iocb = rspiocb; @@ -1998,9 +2031,20 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_CMPL_PLOGI); - /* As long as this node is not registered with the scsi or nvme - * transport, it is no longer an active node. Otherwise - * devloss handles the final cleanup. + /* If a PLOGI collision occurred, the node needs to continue + * with the reglogin process. + */ + spin_lock_irq(&ndlp->lock); + if ((ndlp->nlp_flag & (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI)) && + ndlp->nlp_state == NLP_STE_REG_LOGIN_ISSUE) { + spin_unlock_irq(&ndlp->lock); + goto out; + } + spin_unlock_irq(&ndlp->lock); + + /* No PLOGI collision and the node is not registered with the + * scsi or nvme transport. It is no longer an active node. Just + * start the device remove process. */ if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) { spin_lock_irq(&ndlp->lock); @@ -2015,6 +2059,23 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, cmdiocb->context2)->list.next, struct lpfc_dmabuf, list); ndlp = lpfc_plogi_confirm_nport(phba, prsp->virt, ndlp); + + sp = (struct serv_parm *)((u8 *)prsp->virt + + sizeof(u32)); + + ndlp->vmid_support = 0; + if ((phba->cfg_vmid_app_header && sp->cmn.app_hdr_support) || + (phba->cfg_vmid_priority_tagging && + sp->cmn.priority_tagging)) { + lpfc_printf_log(phba, KERN_DEBUG, LOG_ELS, + "4018 app_hdr_support %d tagging %d DID x%x\n", + sp->cmn.app_hdr_support, + sp->cmn.priority_tagging, + ndlp->nlp_DID); + /* if the dest port supports VMID, mark it in ndlp */ + ndlp->vmid_support = 1; + } + lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_CMPL_PLOGI); } @@ -2137,6 +2198,14 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry) memset(sp->un.vendorVersion, 0, sizeof(sp->un.vendorVersion)); sp->cmn.bbRcvSizeMsb &= 0xF; + /* Check if the destination port supports VMID */ + ndlp->vmid_support = 0; + if (vport->vmid_priority_tagging) + sp->cmn.priority_tagging = 1; + else if (phba->cfg_vmid_app_header && + bf_get(lpfc_ftr_ashdr, &phba->sli4_hba.sli4_flags)) + sp->cmn.app_hdr_support = 1; + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, "Issue PLOGI: did:x%x", did, 0, 0); @@ -2869,6 +2938,11 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, * log into the remote port. */ if (ndlp->nlp_flag & NLP_TARGET_REMOVE) { + spin_lock_irq(&ndlp->lock); + if (phba->sli_rev == LPFC_SLI_REV4) + ndlp->nlp_flag |= NLP_RELEASE_RPI; + ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; + spin_unlock_irq(&ndlp->lock); lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_DEVICE_RM); lpfc_els_free_iocb(phba, cmdiocb); @@ -3061,6 +3135,95 @@ lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, } /** + * lpfc_reg_fab_ctrl_node - RPI register the fabric controller node. + * @vport: pointer to lpfc_vport data structure. + * @fc_ndlp: pointer to the fabric controller (0xfffffd) node. + * + * This routine registers the rpi assigned to the fabric controller + * NPort_ID (0xfffffd) with the port and moves the node to UNMAPPED + * state triggering a registration with the SCSI transport. + * + * This routine is single out because the fabric controller node + * does not receive a PLOGI. This routine is consumed by the + * SCR and RDF ELS commands. Callers are expected to qualify + * with SLI4 first. + **/ +static int +lpfc_reg_fab_ctrl_node(struct lpfc_vport *vport, struct lpfc_nodelist *fc_ndlp) +{ + int rc = 0; + struct lpfc_hba *phba = vport->phba; + struct lpfc_nodelist *ns_ndlp; + LPFC_MBOXQ_t *mbox; + struct lpfc_dmabuf *mp; + + if (fc_ndlp->nlp_flag & NLP_RPI_REGISTERED) + return rc; + + ns_ndlp = lpfc_findnode_did(vport, NameServer_DID); + if (!ns_ndlp) + return -ENODEV; + + lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, + "0935 %s: Reg FC RPI x%x on FC DID x%x NSSte: x%x\n", + __func__, fc_ndlp->nlp_rpi, fc_ndlp->nlp_DID, + ns_ndlp->nlp_state); + if (ns_ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) + return -ENODEV; + + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mbox) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, + "0936 %s: no memory for reg_login " + "Data: x%x x%x x%x x%x\n", __func__, + fc_ndlp->nlp_DID, fc_ndlp->nlp_state, + fc_ndlp->nlp_flag, fc_ndlp->nlp_rpi); + return -ENOMEM; + } + rc = lpfc_reg_rpi(phba, vport->vpi, fc_ndlp->nlp_DID, + (u8 *)&vport->fc_sparam, mbox, fc_ndlp->nlp_rpi); + if (rc) { + rc = -EACCES; + goto out; + } + + fc_ndlp->nlp_flag |= NLP_REG_LOGIN_SEND; + mbox->mbox_cmpl = lpfc_mbx_cmpl_fc_reg_login; + mbox->ctx_ndlp = lpfc_nlp_get(fc_ndlp); + if (!mbox->ctx_ndlp) { + rc = -ENOMEM; + goto out_mem; + } + + mbox->vport = vport; + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); + if (rc == MBX_NOT_FINISHED) { + rc = -ENODEV; + lpfc_nlp_put(fc_ndlp); + goto out_mem; + } + /* Success path. Exit. */ + lpfc_nlp_set_state(vport, fc_ndlp, + NLP_STE_REG_LOGIN_ISSUE); + return 0; + + out_mem: + fc_ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; + mp = (struct lpfc_dmabuf *)mbox->ctx_buf; + lpfc_mbuf_free(phba, mp->virt, mp->phys); + kfree(mp); + + out: + mempool_free(mbox, phba->mbox_mem_pool); + lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, + "0938 %s: failed to format reg_login " + "Data: x%x x%x x%x x%x\n", __func__, + fc_ndlp->nlp_DID, fc_ndlp->nlp_state, + fc_ndlp->nlp_flag, fc_ndlp->nlp_rpi); + return rc; +} + +/** * lpfc_cmpl_els_disc_cmd - Completion callback function for Discovery ELS cmd * @phba: pointer to lpfc hba data structure. * @cmdiocb: pointer to lpfc command iocb data structure. @@ -3206,10 +3369,18 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint8_t retry) elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, ndlp->nlp_DID, ELS_CMD_SCR); - if (!elsiocb) return 1; + if (phba->sli_rev == LPFC_SLI_REV4) { + rc = lpfc_reg_fab_ctrl_node(vport, ndlp); + if (rc) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, + "0937 %s: Failed to reg fc node, rc %d\n", + __func__, rc); + return 1; + } + } pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); *((uint32_t *) (pcmd)) = ELS_CMD_SCR; @@ -3497,6 +3668,17 @@ lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry) if (!elsiocb) return -ENOMEM; + if (phba->sli_rev == LPFC_SLI_REV4 && + !(ndlp->nlp_flag & NLP_RPI_REGISTERED)) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, + "0939 %s: FC_NODE x%x RPI x%x flag x%x " + "ste x%x type x%x Not registered\n", + __func__, ndlp->nlp_DID, ndlp->nlp_rpi, + ndlp->nlp_flag, ndlp->nlp_state, + ndlp->nlp_type); + return -ENODEV; + } + /* Configure the payload for the supported FPIN events. */ prdf = (struct lpfc_els_rdf_req *) (((struct lpfc_dmabuf *)elsiocb->context2)->virt); @@ -3537,6 +3719,43 @@ lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry) return 0; } + /** + * lpfc_els_rcv_rdf - Receive RDF ELS request from the fabric. + * @vport: pointer to a host virtual N_Port data structure. + * @cmdiocb: pointer to lpfc command iocb data structure. + * @ndlp: pointer to a node-list data structure. + * + * A received RDF implies a possible change to fabric supported diagnostic + * functions. This routine sends LS_ACC and then has the Nx_Port issue a new + * RDF request to reregister for supported diagnostic functions. + * + * Return code + * 0 - Success + * -EIO - Failed to process received RDF + **/ +static int +lpfc_els_rcv_rdf(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, + struct lpfc_nodelist *ndlp) +{ + /* Send LS_ACC */ + if (lpfc_els_rsp_acc(vport, ELS_CMD_RDF, cmdiocb, ndlp, NULL)) { + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "1623 Failed to RDF_ACC from x%x for x%x\n", + ndlp->nlp_DID, vport->fc_myDID); + return -EIO; + } + + /* Issue new RDF for reregistering */ + if (lpfc_issue_els_rdf(vport, 0)) { + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "2623 Failed to re register RDF for x%x\n", + vport->fc_myDID); + return -EIO; + } + + return 0; +} + /** * lpfc_cancel_retry_delay_tmo - Cancel the timer with delayed iocb-cmd retry * @vport: pointer to a host virtual N_Port data structure. @@ -4383,12 +4602,27 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); + /* This clause allows the LOGO ACC to complete and free resources + * for the Fabric Domain Controller. It does deliberately skip + * the unreg_rpi and release rpi because some fabrics send RDP + * requests after logging out from the initiator. + */ + if (ndlp->nlp_type & NLP_FABRIC && + ((ndlp->nlp_DID & WELL_KNOWN_DID_MASK) != WELL_KNOWN_DID_MASK)) + goto out; + if (ndlp->nlp_state == NLP_STE_NPR_NODE) { /* NPort Recovery mode or node is just allocated */ if (!lpfc_nlp_not_used(ndlp)) { - /* If the ndlp is being used by another discovery - * thread, just unregister the RPI. + /* A LOGO is completing and the node is in NPR state. + * If this a fabric node that cleared its transport + * registration, release the rpi. */ + spin_lock_irq(&ndlp->lock); + ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; + if (phba->sli_rev == LPFC_SLI_REV4) + ndlp->nlp_flag |= NLP_RELEASE_RPI; + spin_unlock_irq(&ndlp->lock); lpfc_unreg_rpi(vport, ndlp); } else { /* Indicate the node has already released, should @@ -4397,7 +4631,7 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, cmdiocb->context1 = NULL; } } - + out: /* * The driver received a LOGO from the rport and has ACK'd it. * At this point, the driver is done so release the IOCB @@ -4424,28 +4658,37 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf); struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp; + u32 mbx_flag = pmb->mbox_flag; + u32 mbx_cmd = pmb->u.mb.mbxCommand; pmb->ctx_buf = NULL; pmb->ctx_ndlp = NULL; - lpfc_mbuf_free(phba, mp->virt, mp->phys); - kfree(mp); - mempool_free(pmb, phba->mbox_mem_pool); if (ndlp) { lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, - "0006 rpi x%x DID:%x flg:%x %d x%px\n", + "0006 rpi x%x DID:%x flg:%x %d x%px " + "mbx_cmd x%x mbx_flag x%x x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, - kref_read(&ndlp->kref), - ndlp); - /* This is the end of the default RPI cleanup logic for - * this ndlp and it could get released. Clear the nlp_flags to - * prevent any further processing. + kref_read(&ndlp->kref), ndlp, mbx_cmd, + mbx_flag, pmb); + + /* This ends the default/temporary RPI cleanup logic for this + * ndlp and the node and rpi needs to be released. Free the rpi + * first on an UNREG_LOGIN and then release the final + * references. */ + spin_lock_irq(&ndlp->lock); ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; + if (mbx_cmd == MBX_UNREG_LOGIN) + ndlp->nlp_flag &= ~NLP_UNREG_INP; + spin_unlock_irq(&ndlp->lock); lpfc_nlp_put(ndlp); - lpfc_nlp_not_used(ndlp); + lpfc_drop_node(ndlp->vport, ndlp); } + lpfc_mbuf_free(phba, mp->virt, mp->phys); + kfree(mp); + mempool_free(pmb, phba->mbox_mem_pool); return; } @@ -4503,11 +4746,11 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* ELS response tag <ulpIoTag> completes */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0110 ELS response tag x%x completes " - "Data: x%x x%x x%x x%x x%x x%x x%x\n", + "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%px\n", cmdiocb->iocb.ulpIoTag, rspiocb->iocb.ulpStatus, rspiocb->iocb.un.ulpWord[4], rspiocb->iocb.ulpTimeout, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, - ndlp->nlp_rpi); + ndlp->nlp_rpi, kref_read(&ndlp->kref), mbox); if (mbox) { if ((rspiocb->iocb.ulpStatus == 0) && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) { @@ -4587,6 +4830,20 @@ out: spin_unlock_irq(&ndlp->lock); } + /* An SLI4 NPIV instance wants to drop the node at this point under + * these conditions and release the RPI. + */ + if (phba->sli_rev == LPFC_SLI_REV4 && + (vport && vport->port_type == LPFC_NPIV_PORT) && + ndlp->nlp_flag & NLP_RELEASE_RPI) { + lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi); + spin_lock_irq(&ndlp->lock); + ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; + ndlp->nlp_flag &= ~NLP_RELEASE_RPI; + spin_unlock_irq(&ndlp->lock); + lpfc_drop_node(vport, ndlp); + } + /* Release the originating I/O reference. */ lpfc_els_free_iocb(phba, cmdiocb); lpfc_nlp_put(ndlp); @@ -4632,6 +4889,7 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, uint16_t cmdsize; int rc; ELS_PKT *els_pkt_ptr; + struct fc_els_rdf_resp *rdf_resp; oldcmd = &oldiocb->iocb; @@ -4743,6 +5001,29 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, "Issue ACC PRLO: did:x%x flg:x%x", ndlp->nlp_DID, ndlp->nlp_flag, 0); break; + case ELS_CMD_RDF: + cmdsize = sizeof(*rdf_resp); + elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, + ndlp, ndlp->nlp_DID, ELS_CMD_ACC); + if (!elsiocb) + return 1; + + icmd = &elsiocb->iocb; + icmd->ulpContext = oldcmd->ulpContext; /* Xri / rx_id */ + icmd->unsli3.rcvsli3.ox_id = oldcmd->unsli3.rcvsli3.ox_id; + pcmd = (((struct lpfc_dmabuf *)elsiocb->context2)->virt); + rdf_resp = (struct fc_els_rdf_resp *)pcmd; + memset(rdf_resp, 0, sizeof(*rdf_resp)); + rdf_resp->acc_hdr.la_cmd = ELS_LS_ACC; + + /* FC-LS-5 specifies desc_list_len shall be set to 12 */ + rdf_resp->desc_list_len = cpu_to_be32(12); + + /* FC-LS-5 specifies LS REQ Information descriptor */ + rdf_resp->lsri.desc_tag = cpu_to_be32(1); + rdf_resp->lsri.desc_len = cpu_to_be32(sizeof(u32)); + rdf_resp->lsri.rqst_w0.cmd = ELS_RDF; + break; default: return 1; } @@ -4775,10 +5056,10 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0128 Xmit ELS ACC response Status: x%x, IoTag: x%x, " "XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x " - "RPI: x%x, fc_flag x%x\n", + "RPI: x%x, fc_flag x%x refcnt %d\n", rc, elsiocb->iotag, elsiocb->sli4_xritag, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, - ndlp->nlp_rpi, vport->fc_flag); + ndlp->nlp_rpi, vport->fc_flag, kref_read(&ndlp->kref)); return 0; } @@ -4856,6 +5137,17 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError, return 1; } + /* The NPIV instance is rejecting this unsolicited ELS. Make sure the + * node's assigned RPI needs to be released as this node will get + * freed. + */ + if (phba->sli_rev == LPFC_SLI_REV4 && + vport->port_type == LPFC_NPIV_PORT) { + spin_lock_irq(&ndlp->lock); + ndlp->nlp_flag |= NLP_RELEASE_RPI; + spin_unlock_irq(&ndlp->lock); + } + rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); if (rc == IOCB_ERROR) { lpfc_els_free_iocb(phba, elsiocb); @@ -8845,6 +9137,20 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, /* There are no replies, so no rjt codes */ break; + case ELS_CMD_RDF: + phba->fc_stat.elsRcvRDF++; + /* Accept RDF only from fabric controller */ + if (did != Fabric_Cntl_DID) { + lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, + "1115 Received RDF from invalid DID " + "x%x\n", did); + rjt_err = LSRJT_PROTOCOL_ERR; + rjt_exp = LSEXP_NOTHING_MORE; + goto lsrjt; + } + + lpfc_els_rcv_rdf(vport, elsiocb, ndlp); + break; default: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, "RCV ELS cmd: cmd:x%x did:x%x/ste:x%x", @@ -10208,3 +10514,312 @@ lpfc_sli_abts_recover_port(struct lpfc_vport *vport, lpfc_unreg_rpi(vport, ndlp); } +static void lpfc_init_cs_ctl_bitmap(struct lpfc_vport *vport) +{ + bitmap_zero(vport->vmid_priority_range, LPFC_VMID_MAX_PRIORITY_RANGE); +} + +static void +lpfc_vmid_set_cs_ctl_range(struct lpfc_vport *vport, u32 min, u32 max) +{ + u32 i; + + if ((min > max) || (max > LPFC_VMID_MAX_PRIORITY_RANGE)) + return; + + for (i = min; i <= max; i++) + set_bit(i, vport->vmid_priority_range); +} + +static void lpfc_vmid_put_cs_ctl(struct lpfc_vport *vport, u32 ctcl_vmid) +{ + set_bit(ctcl_vmid, vport->vmid_priority_range); +} + +u32 lpfc_vmid_get_cs_ctl(struct lpfc_vport *vport) +{ + u32 i; + + i = find_first_bit(vport->vmid_priority_range, + LPFC_VMID_MAX_PRIORITY_RANGE); + + if (i == LPFC_VMID_MAX_PRIORITY_RANGE) + return 0; + + clear_bit(i, vport->vmid_priority_range); + return i; +} + +#define MAX_PRIORITY_DESC 255 + +static void +lpfc_cmpl_els_qfpa(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, + struct lpfc_iocbq *rspiocb) +{ + struct lpfc_vport *vport = cmdiocb->vport; + struct priority_range_desc *desc; + struct lpfc_dmabuf *prsp = NULL; + struct lpfc_vmid_priority_range *vmid_range = NULL; + u32 *data; + struct lpfc_dmabuf *dmabuf = cmdiocb->context2; + IOCB_t *irsp = &rspiocb->iocb; + u8 *pcmd, max_desc; + u32 len, i; + struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)cmdiocb->context1; + + prsp = list_get_first(&dmabuf->list, struct lpfc_dmabuf, list); + if (!prsp) + goto out; + + pcmd = prsp->virt; + data = (u32 *)pcmd; + if (data[0] == ELS_CMD_LS_RJT) { + lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI, + "3277 QFPA LS_RJT x%x x%x\n", + data[0], data[1]); + goto out; + } + if (irsp->ulpStatus) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI, + "6529 QFPA failed with status x%x x%x\n", + irsp->ulpStatus, irsp->un.ulpWord[4]); + goto out; + } + + if (!vport->qfpa_res) { + max_desc = FCELSSIZE / sizeof(*vport->qfpa_res); + vport->qfpa_res = kcalloc(max_desc, sizeof(*vport->qfpa_res), + GFP_KERNEL); + if (!vport->qfpa_res) + goto out; + } + + len = *((u32 *)(pcmd + 4)); + len = be32_to_cpu(len); + memcpy(vport->qfpa_res, pcmd, len + 8); + len = len / LPFC_PRIORITY_RANGE_DESC_SIZE; + + desc = (struct priority_range_desc *)(pcmd + 8); + vmid_range = vport->vmid_priority.vmid_range; + if (!vmid_range) { + vmid_range = kcalloc(MAX_PRIORITY_DESC, sizeof(*vmid_range), + GFP_KERNEL); + if (!vmid_range) { + kfree(vport->qfpa_res); + goto out; + } + vport->vmid_priority.vmid_range = vmid_range; + } + vport->vmid_priority.num_descriptors = len; + + for (i = 0; i < len; i++, vmid_range++, desc++) { + lpfc_printf_vlog(vport, KERN_DEBUG, LOG_ELS, + "6539 vmid values low=%d, high=%d, qos=%d, " + "local ve id=%d\n", desc->lo_range, + desc->hi_range, desc->qos_priority, + desc->local_ve_id); + + vmid_range->low = desc->lo_range << 1; + if (desc->local_ve_id == QFPA_ODD_ONLY) + vmid_range->low++; + if (desc->qos_priority) + vport->vmid_flag |= LPFC_VMID_QOS_ENABLED; + vmid_range->qos = desc->qos_priority; + + vmid_range->high = desc->hi_range << 1; + if ((desc->local_ve_id == QFPA_ODD_ONLY) || + (desc->local_ve_id == QFPA_EVEN_ODD)) + vmid_range->high++; + } + lpfc_init_cs_ctl_bitmap(vport); + for (i = 0; i < vport->vmid_priority.num_descriptors; i++) { + lpfc_vmid_set_cs_ctl_range(vport, + vport->vmid_priority.vmid_range[i].low, + vport->vmid_priority.vmid_range[i].high); + } + + vport->vmid_flag |= LPFC_VMID_QFPA_CMPL; + out: + lpfc_els_free_iocb(phba, cmdiocb); + lpfc_nlp_put(ndlp); +} + +int lpfc_issue_els_qfpa(struct lpfc_vport *vport) +{ + struct lpfc_hba *phba = vport->phba; + struct lpfc_nodelist *ndlp; + struct lpfc_iocbq *elsiocb; + u8 *pcmd; + int ret; + + ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); + if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) + return -ENXIO; + + elsiocb = lpfc_prep_els_iocb(vport, 1, LPFC_QFPA_SIZE, 2, ndlp, + ndlp->nlp_DID, ELS_CMD_QFPA); + if (!elsiocb) + return -ENOMEM; + + pcmd = (u8 *)(((struct lpfc_dmabuf *)elsiocb->context2)->virt); + + *((u32 *)(pcmd)) = ELS_CMD_QFPA; + pcmd += 4; + + elsiocb->iocb_cmpl = lpfc_cmpl_els_qfpa; + + elsiocb->context1 = lpfc_nlp_get(ndlp); + if (!elsiocb->context1) { + lpfc_els_free_iocb(vport->phba, elsiocb); + return -ENXIO; + } + + ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 2); + if (ret != IOCB_SUCCESS) { + lpfc_els_free_iocb(phba, elsiocb); + lpfc_nlp_put(ndlp); + return -EIO; + } + vport->vmid_flag &= ~LPFC_VMID_QOS_ENABLED; + return 0; +} + +int +lpfc_vmid_uvem(struct lpfc_vport *vport, + struct lpfc_vmid *vmid, bool instantiated) +{ + struct lpfc_vem_id_desc *vem_id_desc; + struct lpfc_nodelist *ndlp; + struct lpfc_iocbq *elsiocb; + struct instantiated_ve_desc *inst_desc; + struct lpfc_vmid_context *vmid_context; + u8 *pcmd; + u32 *len; + int ret = 0; + + ndlp = lpfc_findnode_did(vport, Fabric_DID); + if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) + return -ENXIO; + + vmid_context = kmalloc(sizeof(*vmid_context), GFP_KERNEL); + if (!vmid_context) + return -ENOMEM; + elsiocb = lpfc_prep_els_iocb(vport, 1, LPFC_UVEM_SIZE, 2, + ndlp, Fabric_DID, ELS_CMD_UVEM); + if (!elsiocb) + goto out; + + lpfc_printf_vlog(vport, KERN_DEBUG, LOG_ELS, + "3427 Host vmid %s %d\n", + vmid->host_vmid, instantiated); + vmid_context->vmp = vmid; + vmid_context->nlp = ndlp; + vmid_context->instantiated = instantiated; + elsiocb->vmid_tag.vmid_context = vmid_context; + pcmd = (u8 *)(((struct lpfc_dmabuf *)elsiocb->context2)->virt); + + if (uuid_is_null((uuid_t *)vport->lpfc_vmid_host_uuid)) + memcpy(vport->lpfc_vmid_host_uuid, vmid->host_vmid, + LPFC_COMPRESS_VMID_SIZE); + + *((u32 *)(pcmd)) = ELS_CMD_UVEM; + len = (u32 *)(pcmd + 4); + *len = cpu_to_be32(LPFC_UVEM_SIZE - 8); + + vem_id_desc = (struct lpfc_vem_id_desc *)(pcmd + 8); + vem_id_desc->tag = be32_to_cpu(VEM_ID_DESC_TAG); + vem_id_desc->length = be32_to_cpu(LPFC_UVEM_VEM_ID_DESC_SIZE); + memcpy(vem_id_desc->vem_id, vport->lpfc_vmid_host_uuid, + LPFC_COMPRESS_VMID_SIZE); + + inst_desc = (struct instantiated_ve_desc *)(pcmd + 32); + inst_desc->tag = be32_to_cpu(INSTANTIATED_VE_DESC_TAG); + inst_desc->length = be32_to_cpu(LPFC_UVEM_VE_MAP_DESC_SIZE); + memcpy(inst_desc->global_vem_id, vmid->host_vmid, + LPFC_COMPRESS_VMID_SIZE); + + bf_set(lpfc_instantiated_nport_id, inst_desc, vport->fc_myDID); + bf_set(lpfc_instantiated_local_id, inst_desc, + vmid->un.cs_ctl_vmid); + if (instantiated) { + inst_desc->tag = be32_to_cpu(INSTANTIATED_VE_DESC_TAG); + } else { + inst_desc->tag = be32_to_cpu(DEINSTANTIATED_VE_DESC_TAG); + lpfc_vmid_put_cs_ctl(vport, vmid->un.cs_ctl_vmid); + } + inst_desc->word6 = cpu_to_be32(inst_desc->word6); + + elsiocb->iocb_cmpl = lpfc_cmpl_els_uvem; + + elsiocb->context1 = lpfc_nlp_get(ndlp); + if (!elsiocb->context1) { + lpfc_els_free_iocb(vport->phba, elsiocb); + goto out; + } + + ret = lpfc_sli_issue_iocb(vport->phba, LPFC_ELS_RING, elsiocb, 0); + if (ret != IOCB_SUCCESS) { + lpfc_els_free_iocb(vport->phba, elsiocb); + lpfc_nlp_put(ndlp); + goto out; + } + + return 0; + out: + kfree(vmid_context); + return -EIO; +} + +static void +lpfc_cmpl_els_uvem(struct lpfc_hba *phba, struct lpfc_iocbq *icmdiocb, + struct lpfc_iocbq *rspiocb) +{ + struct lpfc_vport *vport = icmdiocb->vport; + struct lpfc_dmabuf *prsp = NULL; + struct lpfc_vmid_context *vmid_context = + icmdiocb->vmid_tag.vmid_context; + struct lpfc_nodelist *ndlp = icmdiocb->context1; + u8 *pcmd; + u32 *data; + IOCB_t *irsp = &rspiocb->iocb; + struct lpfc_dmabuf *dmabuf = icmdiocb->context2; + struct lpfc_vmid *vmid; + + vmid = vmid_context->vmp; + if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) + ndlp = NULL; + + prsp = list_get_first(&dmabuf->list, struct lpfc_dmabuf, list); + if (!prsp) + goto out; + pcmd = prsp->virt; + data = (u32 *)pcmd; + if (data[0] == ELS_CMD_LS_RJT) { + lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI, + "4532 UVEM LS_RJT %x %x\n", data[0], data[1]); + goto out; + } + if (irsp->ulpStatus) { + lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI, + "4533 UVEM error status %x: %x\n", + irsp->ulpStatus, irsp->un.ulpWord[4]); + goto out; + } + spin_lock(&phba->hbalock); + /* Set IN USE flag */ + vport->vmid_flag |= LPFC_VMID_IN_USE; + phba->pport->vmid_flag |= LPFC_VMID_IN_USE; + spin_unlock(&phba->hbalock); + + if (vmid_context->instantiated) { + write_lock(&vport->vmid_lock); + vmid->flag |= LPFC_VMID_REGISTERED; + vmid->flag &= ~LPFC_VMID_REQ_REGISTER; + write_unlock(&vport->vmid_lock); + } + + out: + kfree(vmid_context); + lpfc_els_free_iocb(phba, icmdiocb); + lpfc_nlp_put(ndlp); +} diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index f5a898c2c904..7cc5920979f8 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -72,14 +72,14 @@ static void lpfc_disc_flush_list(struct lpfc_vport *vport); static void lpfc_unregister_fcfi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *); static int lpfc_fcf_inuse(struct lpfc_hba *); static void lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *); +static void lpfc_check_inactive_vmid(struct lpfc_hba *phba); +static void lpfc_check_vmid_qfpa_issue(struct lpfc_hba *phba); static int lpfc_valid_xpt_node(struct lpfc_nodelist *ndlp) { if (ndlp->nlp_fc4_type || - ndlp->nlp_DID == Fabric_DID || - ndlp->nlp_DID == NameServer_DID || - ndlp->nlp_DID == FDMI_DID) + ndlp->nlp_type & NLP_FABRIC) return 1; return 0; } @@ -237,6 +237,110 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) } /** + * lpfc_check_inactive_vmid_one - VMID inactivity checker for a vport + * @vport: Pointer to vport context object. + * + * This function checks for idle VMID entries related to a particular vport. If + * found unused/idle, free them accordingly. + **/ +static void lpfc_check_inactive_vmid_one(struct lpfc_vport *vport) +{ + u16 keep; + u32 difftime = 0, r, bucket; + u64 *lta; + int cpu; + struct lpfc_vmid *vmp; + + write_lock(&vport->vmid_lock); + + if (!vport->cur_vmid_cnt) + goto out; + + /* iterate through the table */ + hash_for_each(vport->hash_table, bucket, vmp, hnode) { + keep = 0; + if (vmp->flag & LPFC_VMID_REGISTERED) { + /* check if the particular VMID is in use */ + /* for all available per cpu variable */ + for_each_possible_cpu(cpu) { + /* if last access time is less than timeout */ + lta = per_cpu_ptr(vmp->last_io_time, cpu); + if (!lta) + continue; + difftime = (jiffies) - (*lta); + if ((vport->vmid_inactivity_timeout * + JIFFIES_PER_HR) > difftime) { + keep = 1; + break; + } + } + + /* if none of the cpus have been used by the vm, */ + /* remove the entry if already registered */ + if (!keep) { + /* mark the entry for deregistration */ + vmp->flag = LPFC_VMID_DE_REGISTER; + write_unlock(&vport->vmid_lock); + if (vport->vmid_priority_tagging) + r = lpfc_vmid_uvem(vport, vmp, false); + else + r = lpfc_vmid_cmd(vport, + SLI_CTAS_DAPP_IDENT, + vmp); + + /* decrement number of active vms and mark */ + /* entry in slot as free */ + write_lock(&vport->vmid_lock); + if (!r) { + struct lpfc_vmid *ht = vmp; + + vport->cur_vmid_cnt--; + ht->flag = LPFC_VMID_SLOT_FREE; + free_percpu(ht->last_io_time); + ht->last_io_time = NULL; + hash_del(&ht->hnode); + } + } + } + } + out: + write_unlock(&vport->vmid_lock); +} + +/** + * lpfc_check_inactive_vmid - VMID inactivity checker + * @phba: Pointer to hba context object. + * + * This function is called from the worker thread to determine if an entry in + * the VMID table can be released since there was no I/O activity seen from that + * particular VM for the specified time. When this happens, the entry in the + * table is released and also the resources on the switch cleared. + **/ + +static void lpfc_check_inactive_vmid(struct lpfc_hba *phba) +{ + struct lpfc_vport *vport; + struct lpfc_vport **vports; + int i; + + vports = lpfc_create_vport_work_array(phba); + if (!vports) + return; + + for (i = 0; i <= phba->max_vports; i++) { + if ((!vports[i]) && (i == 0)) + vport = phba->pport; + else + vport = vports[i]; + if (!vport) + break; + + lpfc_check_inactive_vmid_one(vport); + } + lpfc_destroy_vport_work_array(phba, vports); +} + +/** * lpfc_dev_loss_tmo_handler - Remote node devloss timeout handler * @ndlp: Pointer to remote node object. * @@ -325,6 +429,32 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) return fcf_inuse; } +static void lpfc_check_vmid_qfpa_issue(struct lpfc_hba *phba) +{ + struct lpfc_vport *vport; + struct lpfc_vport **vports; + int i; + + vports = lpfc_create_vport_work_array(phba); + if (!vports) + return; + + for (i = 0; i <= phba->max_vports; i++) { + if ((!vports[i]) && (i == 0)) + vport = phba->pport; + else + vport = vports[i]; + if (!vport) + break; + + if (vport->vmid_flag & LPFC_VMID_ISSUE_QFPA) { + if (!lpfc_issue_els_qfpa(vport)) + vport->vmid_flag &= ~LPFC_VMID_ISSUE_QFPA; + } + } + lpfc_destroy_vport_work_array(phba, vports); +} + /** * lpfc_sli4_post_dev_loss_tmo_handler - SLI4 post devloss timeout handler * @phba: Pointer to hba context object. @@ -645,6 +775,22 @@ lpfc_work_done(struct lpfc_hba *phba) if (ha_copy & HA_LATT) lpfc_handle_latt(phba); + /* Handle VMID Events */ + if (lpfc_is_vmid_enabled(phba)) { + if (phba->pport->work_port_events & + WORKER_CHECK_VMID_ISSUE_QFPA) { + lpfc_check_vmid_qfpa_issue(phba); + phba->pport->work_port_events &= + ~WORKER_CHECK_VMID_ISSUE_QFPA; + } + if (phba->pport->work_port_events & + WORKER_CHECK_INACTIVE_VMID) { + lpfc_check_inactive_vmid(phba); + phba->pport->work_port_events &= + ~WORKER_CHECK_INACTIVE_VMID; + } + } + /* Process SLI4 events */ if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) { if (phba->hba_flag & HBA_RRQ_ACTIVE) @@ -826,7 +972,8 @@ lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove) if ((phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) || ((vport->port_type == LPFC_NPIV_PORT) && ((ndlp->nlp_DID == NameServer_DID) || - (ndlp->nlp_DID == FDMI_DID)))) + (ndlp->nlp_DID == FDMI_DID) || + (ndlp->nlp_DID == Fabric_Cntl_DID)))) lpfc_unreg_rpi(vport, ndlp); /* Leave Fabric nodes alone on link down */ @@ -4160,6 +4307,53 @@ out: return; } +/* + * This routine handles processing a Fabric Controller REG_LOGIN mailbox + * command upon completion. It is setup in the LPFC_MBOXQ + * as the completion routine when the command is handed off to the SLI layer. + */ +void +lpfc_mbx_cmpl_fc_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) +{ + struct lpfc_vport *vport = pmb->vport; + MAILBOX_t *mb = &pmb->u.mb; + struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf); + struct lpfc_nodelist *ndlp; + + ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp; + pmb->ctx_ndlp = NULL; + pmb->ctx_buf = NULL; + + if (mb->mbxStatus) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "0933 %s: Register FC login error: 0x%x\n", + __func__, mb->mbxStatus); + goto out; + } + + if (phba->sli_rev < LPFC_SLI_REV4) + ndlp->nlp_rpi = mb->un.varWords[0]; + + lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, + "0934 %s: Complete FC x%x RegLogin rpi x%x ste x%x\n", + __func__, ndlp->nlp_DID, ndlp->nlp_rpi, + ndlp->nlp_state); + + ndlp->nlp_flag |= NLP_RPI_REGISTERED; + ndlp->nlp_type |= NLP_FABRIC; + lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); + + out: + lpfc_mbuf_free(phba, mp->virt, mp->phys); + kfree(mp); + mempool_free(pmb, phba->mbox_mem_pool); + + /* Drop the reference count from the mbox at the end after + * all the current reference to the ndlp have been done. + */ + lpfc_nlp_put(ndlp); +} + static void lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) { @@ -4789,12 +4983,17 @@ lpfc_nlp_logo_unreg(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING; lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); } else { + /* NLP_RELEASE_RPI is only set for SLI4 ports. */ if (ndlp->nlp_flag & NLP_RELEASE_RPI) { lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi); + spin_lock_irq(&ndlp->lock); ndlp->nlp_flag &= ~NLP_RELEASE_RPI; ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; + spin_unlock_irq(&ndlp->lock); } + spin_lock_irq(&ndlp->lock); ndlp->nlp_flag &= ~NLP_UNREG_INP; + spin_unlock_irq(&ndlp->lock); } } @@ -5129,8 +5328,10 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) list_del_init(&ndlp->dev_loss_evt.evt_listp); list_del_init(&ndlp->recovery_evt.evt_listp); lpfc_cleanup_vports_rrqs(vport, ndlp); + if (phba->sli_rev == LPFC_SLI_REV4) ndlp->nlp_flag |= NLP_RELEASE_RPI; + return 0; } @@ -6176,8 +6377,23 @@ lpfc_nlp_release(struct kref *kref) lpfc_cancel_retry_delay_tmo(vport, ndlp); lpfc_cleanup_node(vport, ndlp); - /* Clear Node key fields to give other threads notice - * that this node memory is not valid anymore. + /* Not all ELS transactions have registered the RPI with the port. + * In these cases the rpi usage is temporary and the node is + * released when the WQE is completed. Catch this case to free the + * RPI to the pool. Because this node is in the release path, a lock + * is unnecessary. All references are gone and the node has been + * dequeued. + */ + if (ndlp->nlp_flag & NLP_RELEASE_RPI) { + if (ndlp->nlp_rpi != LPFC_RPI_ALLOC_ERROR && + !(ndlp->nlp_flag & (NLP_RPI_REGISTERED | NLP_UNREG_INP))) { + lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi); + ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; + } + } + + /* The node is not freed back to memory, it is released to a pool so + * the node fields need to be cleaned up. */ ndlp->vport = NULL; ndlp->nlp_state = NLP_STE_FREED_NODE; @@ -6257,6 +6473,7 @@ lpfc_nlp_not_used(struct lpfc_nodelist *ndlp) "node not used: did:x%x flg:x%x refcnt:x%x", ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); + if (kref_read(&ndlp->kref) == 1) if (lpfc_nlp_put(ndlp)) return 1; diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h index 42682d95af52..4a5a85ed42ec 100644 --- a/drivers/scsi/lpfc/lpfc_hw.h +++ b/drivers/scsi/lpfc/lpfc_hw.h @@ -275,6 +275,7 @@ struct lpfc_sli_ct_request { #define SLI_CT_ACCESS_DENIED 0x10 #define SLI_CT_INVALID_PORT_ID 0x11 #define SLI_CT_DATABASE_EMPTY 0x12 +#define SLI_CT_APP_ID_NOT_AVAILABLE 0x40 /* * Name Server Command Codes @@ -400,16 +401,16 @@ struct csp { uint16_t altBbCredit:1; /* FC Word 1, bit 27 */ uint16_t edtovResolution:1; /* FC Word 1, bit 26 */ uint16_t multicast:1; /* FC Word 1, bit 25 */ - uint16_t broadcast:1; /* FC Word 1, bit 24 */ + uint16_t app_hdr_support:1; /* FC Word 1, bit 24 */ - uint16_t huntgroup:1; /* FC Word 1, bit 23 */ + uint16_t priority_tagging:1; /* FC Word 1, bit 23 */ uint16_t simplex:1; /* FC Word 1, bit 22 */ uint16_t word1Reserved1:3; /* FC Word 1, bit 21:19 */ uint16_t dhd:1; /* FC Word 1, bit 18 */ uint16_t contIncSeqCnt:1; /* FC Word 1, bit 17 */ uint16_t payloadlength:1; /* FC Word 1, bit 16 */ #else /* __LITTLE_ENDIAN_BITFIELD */ - uint16_t broadcast:1; /* FC Word 1, bit 24 */ + uint16_t app_hdr_support:1; /* FC Word 1, bit 24 */ uint16_t multicast:1; /* FC Word 1, bit 25 */ uint16_t edtovResolution:1; /* FC Word 1, bit 26 */ uint16_t altBbCredit:1; /* FC Word 1, bit 27 */ @@ -423,7 +424,7 @@ struct csp { uint16_t dhd:1; /* FC Word 1, bit 18 */ uint16_t word1Reserved1:3; /* FC Word 1, bit 21:19 */ uint16_t simplex:1; /* FC Word 1, bit 22 */ - uint16_t huntgroup:1; /* FC Word 1, bit 23 */ + uint16_t priority_tagging:1; /* FC Word 1, bit 23 */ #endif uint8_t bbRcvSizeMsb; /* Upper nibble is reserved */ @@ -607,6 +608,8 @@ struct fc_vft_header { #define ELS_CMD_LIRR 0x7A000000 #define ELS_CMD_LCB 0x81000000 #define ELS_CMD_FPIN 0x16000000 +#define ELS_CMD_QFPA 0xB0000000 +#define ELS_CMD_UVEM 0xB1000000 #else /* __LITTLE_ENDIAN_BITFIELD */ #define ELS_CMD_MASK 0xffff #define ELS_RSP_MASK 0xff @@ -649,6 +652,8 @@ struct fc_vft_header { #define ELS_CMD_LIRR 0x7A #define ELS_CMD_LCB 0x81 #define ELS_CMD_FPIN ELS_FPIN +#define ELS_CMD_QFPA 0xB0 +#define ELS_CMD_UVEM 0xB1 #endif /* @@ -1317,6 +1322,117 @@ struct fc_rdp_res_frame { }; +/* UVEM */ + +#define LPFC_UVEM_SIZE 60 +#define LPFC_UVEM_VEM_ID_DESC_SIZE 16 +#define LPFC_UVEM_VE_MAP_DESC_SIZE 20 + +#define VEM_ID_DESC_TAG 0x0001000A +struct lpfc_vem_id_desc { + uint32_t tag; + uint32_t length; + uint8_t vem_id[16]; +}; + +#define LPFC_QFPA_SIZE 4 + +#define INSTANTIATED_VE_DESC_TAG 0x0001000B +struct instantiated_ve_desc { + uint32_t tag; + uint32_t length; + uint8_t global_vem_id[16]; + uint32_t word6; +#define lpfc_instantiated_local_id_SHIFT 0 +#define lpfc_instantiated_local_id_MASK 0x000000ff +#define lpfc_instantiated_local_id_WORD word6 +#define lpfc_instantiated_nport_id_SHIFT 8 +#define lpfc_instantiated_nport_id_MASK 0x00ffffff +#define lpfc_instantiated_nport_id_WORD word6 +}; + +#define DEINSTANTIATED_VE_DESC_TAG 0x0001000C +struct deinstantiated_ve_desc { + uint32_t tag; + uint32_t length; + uint8_t global_vem_id[16]; + uint32_t word6; +#define lpfc_deinstantiated_nport_id_SHIFT 0 +#define lpfc_deinstantiated_nport_id_MASK 0x000000ff +#define lpfc_deinstantiated_nport_id_WORD word6 +#define lpfc_deinstantiated_local_id_SHIFT 24 +#define lpfc_deinstantiated_local_id_MASK 0x00ffffff +#define lpfc_deinstantiated_local_id_WORD word6 +}; + +/* Query Fabric Priority Allocation Response */ +#define LPFC_PRIORITY_RANGE_DESC_SIZE 12 + +struct priority_range_desc { + uint32_t tag; + uint32_t length; + uint8_t lo_range; + uint8_t hi_range; + uint8_t qos_priority; + uint8_t local_ve_id; +}; + +struct fc_qfpa_res { + uint32_t reply_sequence; /* LS_ACC or LS_RJT */ + uint32_t length; /* FC Word 1 */ + struct priority_range_desc desc[1]; +}; + +/* Application Server command code */ +/* VMID */ + +#define SLI_CT_APP_SEV_Subtypes 0x20 /* Application Server subtype */ + +#define SLI_CTAS_GAPPIA_ENT 0x0100 /* Get Application Identifier */ +#define SLI_CTAS_GALLAPPIA 0x0101 /* Get All Application Identifier */ +#define SLI_CTAS_GALLAPPIA_ID 0x0102 /* Get All Application Identifier */ + /* for Nport */ +#define SLI_CTAS_GAPPIA_IDAPP 0x0103 /* Get Application Identifier */ + /* for Nport */ +#define SLI_CTAS_RAPP_IDENT 0x0200 /* Register Application Identifier */ +#define SLI_CTAS_DAPP_IDENT 0x0300 /* Deregister Application */ + /* Identifier */ +#define SLI_CTAS_DALLAPP_ID 0x0301 /* Deregister All Application */ + /* Identifier */ + +struct entity_id_object { + uint8_t entity_id_len; + uint8_t entity_id[255]; /* VM UUID */ +}; + +struct app_id_object { + uint32_t port_id; + uint32_t app_id; + struct entity_id_object obj; +}; + +struct lpfc_vmid_rapp_ident_list { + uint32_t no_of_objects; + struct entity_id_object obj[1]; +}; + +struct lpfc_vmid_dapp_ident_list { + uint32_t no_of_objects; + struct entity_id_object obj[1]; +}; + +#define GALLAPPIA_ID_LAST 0x80 +struct lpfc_vmid_gallapp_ident_list { + uint8_t control; + uint8_t reserved[3]; + struct app_id_object app_id; +}; + +#define RAPP_IDENT_OFFSET (offsetof(struct lpfc_sli_ct_request, un) + 4) +#define DAPP_IDENT_OFFSET (offsetof(struct lpfc_sli_ct_request, un) + 4) +#define GALLAPPIA_ID_SIZE (offsetof(struct lpfc_sli_ct_request, un) + 4) +#define DALLAPP_ID_SIZE (offsetof(struct lpfc_sli_ct_request, un) + 4) + /******** FDMI ********/ /* lpfc_sli_ct_request defines the CT_IU preamble for FDMI commands */ diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h index f77e71e6dbbd..eb8c735a243b 100644 --- a/drivers/scsi/lpfc/lpfc_hw4.h +++ b/drivers/scsi/lpfc/lpfc_hw4.h @@ -273,6 +273,9 @@ struct lpfc_sli4_flags { #define lpfc_vfi_rsrc_rdy_MASK 0x00000001 #define lpfc_vfi_rsrc_rdy_WORD word0 #define LPFC_VFI_RSRC_RDY 1 +#define lpfc_ftr_ashdr_SHIFT 4 +#define lpfc_ftr_ashdr_MASK 0x00000001 +#define lpfc_ftr_ashdr_WORD word0 }; struct sli4_bls_rsp { @@ -2944,6 +2947,9 @@ struct lpfc_mbx_request_features { #define lpfc_mbx_rq_ftr_rq_mrqp_SHIFT 16 #define lpfc_mbx_rq_ftr_rq_mrqp_MASK 0x00000001 #define lpfc_mbx_rq_ftr_rq_mrqp_WORD word2 +#define lpfc_mbx_rq_ftr_rq_ashdr_SHIFT 17 +#define lpfc_mbx_rq_ftr_rq_ashdr_MASK 0x00000001 +#define lpfc_mbx_rq_ftr_rq_ashdr_WORD word2 uint32_t word3; #define lpfc_mbx_rq_ftr_rsp_iaab_SHIFT 0 #define lpfc_mbx_rq_ftr_rsp_iaab_MASK 0x00000001 @@ -2975,6 +2981,9 @@ struct lpfc_mbx_request_features { #define lpfc_mbx_rq_ftr_rsp_mrqp_SHIFT 16 #define lpfc_mbx_rq_ftr_rsp_mrqp_MASK 0x00000001 #define lpfc_mbx_rq_ftr_rsp_mrqp_WORD word3 +#define lpfc_mbx_rq_ftr_rsp_ashdr_SHIFT 17 +#define lpfc_mbx_rq_ftr_rsp_ashdr_MASK 0x00000001 +#define lpfc_mbx_rq_ftr_rsp_ashdr_WORD word3 }; struct lpfc_mbx_memory_dump_type3 { @@ -4219,6 +4228,9 @@ struct wqe_common { #define wqe_xchg_WORD word10 #define LPFC_SCSI_XCHG 0x0 #define LPFC_NVME_XCHG 0x1 +#define wqe_appid_SHIFT 5 +#define wqe_appid_MASK 0x00000001 +#define wqe_appid_WORD word10 #define wqe_oas_SHIFT 6 #define wqe_oas_MASK 0x00000001 #define wqe_oas_WORD word10 diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 5f018d02bf56..f3032e30c3e4 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -98,6 +98,7 @@ static struct scsi_transport_template *lpfc_transport_template = NULL; static struct scsi_transport_template *lpfc_vport_transport_template = NULL; static DEFINE_IDR(lpfc_hba_index); #define LPFC_NVMET_BUF_POST 254 +static int lpfc_vmid_res_alloc(struct lpfc_hba *phba, struct lpfc_vport *vport); /** * lpfc_config_port_prep - Perform lpfc initialization prior to config port @@ -2888,6 +2889,10 @@ lpfc_cleanup(struct lpfc_vport *vport) if (phba->link_state > LPFC_LINK_DOWN) lpfc_port_link_failure(vport); + /* Clean up VMID resources */ + if (lpfc_is_vmid_enabled(phba)) + lpfc_vmid_vport_cleanup(vport); + list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { if (vport->port_type != LPFC_PHYSICAL_PORT && ndlp->nlp_DID == Fabric_DID) { @@ -3532,13 +3537,6 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action) list_for_each_entry_safe(ndlp, next_ndlp, &vports[i]->fc_nodes, nlp_listp) { - if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) { - /* Driver must assume RPI is invalid for - * any unused or inactive node. - */ - ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; - continue; - } spin_lock_irq(&ndlp->lock); ndlp->nlp_flag &= ~NLP_NPR_ADISC; @@ -4315,6 +4313,55 @@ lpfc_get_wwpn(struct lpfc_hba *phba) } /** + * lpfc_vmid_res_alloc - Allocates resources for VMID + * @phba: pointer to lpfc hba data structure. + * @vport: pointer to vport data structure + * + * This routine allocated the resources needed for the VMID. + * + * Return codes + * 0 on Success + * Non-0 on Failure + */ +static int +lpfc_vmid_res_alloc(struct lpfc_hba *phba, struct lpfc_vport *vport) +{ + /* VMID feature is supported only on SLI4 */ + if (phba->sli_rev == LPFC_SLI_REV3) { + phba->cfg_vmid_app_header = 0; + phba->cfg_vmid_priority_tagging = 0; + } + + if (lpfc_is_vmid_enabled(phba)) { + vport->vmid = + kcalloc(phba->cfg_max_vmid, sizeof(struct lpfc_vmid), + GFP_KERNEL); + if (!vport->vmid) + return -ENOMEM; + + rwlock_init(&vport->vmid_lock); + + /* Set the VMID parameters for the vport */ + vport->vmid_priority_tagging = phba->cfg_vmid_priority_tagging; + vport->vmid_inactivity_timeout = + phba->cfg_vmid_inactivity_timeout; + vport->max_vmid = phba->cfg_max_vmid; + vport->cur_vmid_cnt = 0; + + vport->vmid_priority_range = bitmap_zalloc + (LPFC_VMID_MAX_PRIORITY_RANGE, GFP_KERNEL); + + if (!vport->vmid_priority_range) { + kfree(vport->vmid); + return -ENOMEM; + } + + hash_init(vport->hash_table); + } + return 0; +} + +/** * lpfc_create_port - Create an FC port * @phba: pointer to lpfc hba data structure. * @instance: a unique integer ID to this FC port. @@ -4466,6 +4513,12 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) vport->port_type, shost->sg_tablesize, phba->cfg_scsi_seg_cnt, phba->cfg_sg_seg_cnt); + /* Allocate the resources for VMID */ + rc = lpfc_vmid_res_alloc(phba, vport); + + if (rc) + goto out; + /* Initialize all internally managed lists. */ INIT_LIST_HEAD(&vport->fc_nodes); INIT_LIST_HEAD(&vport->rcv_buffer_list); @@ -4490,6 +4543,8 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) return vport; out_put_shost: + kfree(vport->vmid); + bitmap_free(vport->vmid_priority_range); scsi_host_put(shost); out: return NULL; @@ -4789,6 +4844,42 @@ lpfc_sli4_fcf_redisc_wait_tmo(struct timer_list *t) } /** + * lpfc_vmid_poll - VMID timeout detection + * @ptr: Map to lpfc_hba data structure pointer. + * + * This routine is invoked when there is no I/O on by a VM for the specified + * amount of time. When this situation is detected, the VMID has to be + * deregistered from the switch and all the local resources freed. The VMID + * will be reassigned to the VM once the I/O begins. + **/ +static void +lpfc_vmid_poll(struct timer_list *t) +{ + struct lpfc_hba *phba = from_timer(phba, t, inactive_vmid_poll); + u32 wake_up = 0; + + /* check if there is a need to issue QFPA */ + if (phba->pport->vmid_priority_tagging) { + wake_up = 1; + phba->pport->work_port_events |= WORKER_CHECK_VMID_ISSUE_QFPA; + } + + /* Is the vmid inactivity timer enabled */ + if (phba->pport->vmid_inactivity_timeout || + phba->pport->load_flag & FC_DEREGISTER_ALL_APP_ID) { + wake_up = 1; + phba->pport->work_port_events |= WORKER_CHECK_INACTIVE_VMID; + } + + if (wake_up) + lpfc_worker_wake_up(phba); + + /* restart the timer for the next iteration */ + mod_timer(&phba->inactive_vmid_poll, jiffies + msecs_to_jiffies(1000 * + LPFC_VMID_TIMER)); +} + +/** * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code * @phba: pointer to lpfc hba data structure. * @acqe_link: pointer to the async link completion queue entry. @@ -6636,6 +6727,10 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc; phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free; + /* for VMID idle timeout if VMID is enabled */ + if (lpfc_is_vmid_enabled(phba)) + timer_setup(&phba->inactive_vmid_poll, lpfc_vmid_poll, 0); + /* * Initialize the SLI Layer to run with lpfc SLI4 HBAs. */ diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c index 1b40a3bbd1cd..84bc373190d8 100644 --- a/drivers/scsi/lpfc/lpfc_mbox.c +++ b/drivers/scsi/lpfc/lpfc_mbox.c @@ -522,7 +522,8 @@ lpfc_init_link(struct lpfc_hba * phba, } /* Enable asynchronous ABTS responses from firmware */ - mb->un.varInitLnk.link_flags |= FLAGS_IMED_ABORT; + if (phba->sli_rev == LPFC_SLI_REV3 && !phba->cfg_fcp_wait_abts_rsp) + mb->un.varInitLnk.link_flags |= FLAGS_IMED_ABORT; /* NEW_FEATURE * Setting up the link speed @@ -2100,6 +2101,12 @@ lpfc_request_features(struct lpfc_hba *phba, struct lpfcMboxq *mboxq) bf_set(lpfc_mbx_rq_ftr_rq_iaab, &mboxq->u.mqe.un.req_ftrs, 0); bf_set(lpfc_mbx_rq_ftr_rq_iaar, &mboxq->u.mqe.un.req_ftrs, 0); } + + /* Enable Application Services Header for appheader VMID */ + if (phba->cfg_vmid_app_header) { + bf_set(lpfc_mbx_rq_ftr_rq_ashdr, &mboxq->u.mqe.un.req_ftrs, 1); + bf_set(lpfc_ftr_ashdr, &phba->sli4_hba.sli4_flags, 1); + } return; } diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index bb4e65a32ecc..e12f83fb795c 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c @@ -567,15 +567,24 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, /* no deferred ACC */ kfree(save_iocb); - /* In order to preserve RPIs, we want to cleanup - * the default RPI the firmware created to rcv - * this ELS request. The only way to do this is - * to register, then unregister the RPI. + /* This is an NPIV SLI4 instance that does not need to register + * a default RPI. */ - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= (NLP_RM_DFLT_RPI | NLP_ACC_REGLOGIN | - NLP_RCV_PLOGI); - spin_unlock_irq(&ndlp->lock); + if (phba->sli_rev == LPFC_SLI_REV4) { + mempool_free(login_mbox, phba->mbox_mem_pool); + login_mbox = NULL; + } else { + /* In order to preserve RPIs, we want to cleanup + * the default RPI the firmware created to rcv + * this ELS request. The only way to do this is + * to register, then unregister the RPI. + */ + spin_lock_irq(&ndlp->lock); + ndlp->nlp_flag |= (NLP_RM_DFLT_RPI | NLP_ACC_REGLOGIN | + NLP_RCV_PLOGI); + spin_unlock_irq(&ndlp->lock); + } + stat.un.b.lsRjtRsnCode = LSRJT_INVALID_CMD; stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; rc = lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, @@ -653,6 +662,10 @@ lpfc_mbx_cmpl_resume_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, elsiocb, ndlp, NULL); } + + /* This nlp_put pairs with lpfc_sli4_resume_rpi */ + lpfc_nlp_put(ndlp); + kfree(elsiocb); mempool_free(mboxq, phba->mbox_mem_pool); } @@ -772,6 +785,15 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, else lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); + /* This clause allows the initiator to ACC the LOGO back to the + * Fabric Domain Controller. It does deliberately skip all other + * steps because some fabrics send RDP requests after logging out + * from the initiator. + */ + if (ndlp->nlp_type & NLP_FABRIC && + ((ndlp->nlp_DID & WELL_KNOWN_DID_MASK) != WELL_KNOWN_DID_MASK)) + return 0; + /* Notify transport of connectivity loss to trigger cleanup. */ if (phba->nvmet_support && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) @@ -1410,6 +1432,8 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport, switch (ndlp->nlp_DID) { case NameServer_DID: mbox->mbox_cmpl = lpfc_mbx_cmpl_ns_reg_login; + /* Fabric Controller Node needs these parameters. */ + memcpy(&ndlp->fc_sparam, sp, sizeof(struct serv_parm)); break; case FDMI_DID: mbox->mbox_cmpl = lpfc_mbx_cmpl_fdmi_reg_login; diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c index 41e49f61fac2..bcc804cefd30 100644 --- a/drivers/scsi/lpfc/lpfc_nvme.c +++ b/drivers/scsi/lpfc/lpfc_nvme.c @@ -1049,9 +1049,19 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, nCmd->transferred_length = wcqe->total_data_placed; nCmd->rcv_rsplen = wcqe->parameter; nCmd->status = 0; - /* Sanity check */ - if (nCmd->rcv_rsplen == LPFC_NVME_ERSP_LEN) + + /* Check if this is really an ERSP */ + if (nCmd->rcv_rsplen == LPFC_NVME_ERSP_LEN) { + lpfc_ncmd->status = IOSTAT_SUCCESS; + lpfc_ncmd->result = 0; + + lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME, + "6084 NVME Completion ERSP: " + "xri %x placed x%x\n", + lpfc_ncmd->cur_iocbq.sli4_xritag, + wcqe->total_data_placed); break; + } lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "6081 NVME Completion Protocol Error: " "xri %x status x%x result x%x " diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index eefbb9b22798..1b248c237be1 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -28,6 +28,7 @@ #include <asm/unaligned.h> #include <linux/t10-pi.h> #include <linux/crc-t10dif.h> +#include <linux/blk-cgroup.h> #include <net/checksum.h> #include <scsi/scsi.h> @@ -86,6 +87,14 @@ static void lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *psb); static int lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc); +static void +lpfc_put_vmid_in_hashtable(struct lpfc_vport *vport, u32 hash, + struct lpfc_vmid *vmp); +static void lpfc_vmid_update_entry(struct lpfc_vport *vport, struct scsi_cmnd + *cmd, struct lpfc_vmid *vmp, + union lpfc_vmid_io_tag *tag); +static void lpfc_vmid_assign_cs_ctl(struct lpfc_vport *vport, + struct lpfc_vmid *vmid); static inline unsigned lpfc_cmd_blksize(struct scsi_cmnd *sc) @@ -518,6 +527,7 @@ lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp; int rrq_empty = 0; struct lpfc_sli_ring *pring = phba->sli4_hba.els_wq->pring; + struct scsi_cmnd *cmd; if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) return; @@ -553,6 +563,31 @@ lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba, psb->cur_iocbq.sli4_lxritag, rxid, 1); lpfc_sli4_abts_err_handler(phba, ndlp, axri); } + + if (phba->cfg_fcp_wait_abts_rsp) { + spin_lock_irqsave(&psb->buf_lock, iflag); + cmd = psb->pCmd; + psb->pCmd = NULL; + spin_unlock_irqrestore(&psb->buf_lock, iflag); + + /* The sdev is not guaranteed to be valid post + * scsi_done upcall. + */ + if (cmd) + cmd->scsi_done(cmd); + + /* + * We expect there is an abort thread waiting + * for command completion wake up the thread. + */ + spin_lock_irqsave(&psb->buf_lock, iflag); + psb->cur_iocbq.iocb_flag &= + ~LPFC_DRIVER_ABORTED; + if (psb->waitq) + wake_up(psb->waitq); + spin_unlock_irqrestore(&psb->buf_lock, iflag); + } + lpfc_release_scsi_buf_s4(phba, psb); if (rrq_empty) lpfc_worker_wake_up(phba); @@ -780,7 +815,8 @@ lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *psb) qp = psb->hdwq; if (psb->flags & LPFC_SBUF_XBUSY) { spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag); - psb->pCmd = NULL; + if (!phba->cfg_fcp_wait_abts_rsp) + psb->pCmd = NULL; list_add_tail(&psb->list, &qp->lpfc_abts_io_buf_list); qp->abts_scsi_io_bufs++; spin_unlock_irqrestore(&qp->abts_io_buf_list_lock, iflag); @@ -2869,10 +2905,8 @@ skipit: } out: if (err_type == BGS_GUARD_ERR_MASK) { - scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, - 0x10, 0x1); - cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 | - SAM_STAT_CHECK_CONDITION; + scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x1); + set_host_byte(cmd, DID_ABORT); phba->bg_guard_err_cnt++; lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, "9069 BLKGRD: reftag %x grd_tag err %x != %x\n", @@ -2880,10 +2914,8 @@ out: sum, guard_tag); } else if (err_type == BGS_REFTAG_ERR_MASK) { - scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, - 0x10, 0x3); - cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 | - SAM_STAT_CHECK_CONDITION; + scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x3); + set_host_byte(cmd, DID_ABORT); phba->bg_reftag_err_cnt++; lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, @@ -2892,10 +2924,8 @@ out: ref_tag, start_ref_tag); } else if (err_type == BGS_APPTAG_ERR_MASK) { - scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, - 0x10, 0x2); - cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 | - SAM_STAT_CHECK_CONDITION; + scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x2); + set_host_byte(cmd, DID_ABORT); phba->bg_apptag_err_cnt++; lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, @@ -2954,10 +2984,8 @@ lpfc_sli4_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd, if (lpfc_bgs_get_guard_err(bgstat)) { ret = 1; - scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, - 0x10, 0x1); - cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 | - SAM_STAT_CHECK_CONDITION; + scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x1); + set_host_byte(cmd, DID_ABORT); phba->bg_guard_err_cnt++; lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, "9059 BLKGRD: Guard Tag error in cmd" @@ -2970,10 +2998,8 @@ lpfc_sli4_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd, if (lpfc_bgs_get_reftag_err(bgstat)) { ret = 1; - scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, - 0x10, 0x3); - cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 | - SAM_STAT_CHECK_CONDITION; + scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x3); + set_host_byte(cmd, DID_ABORT); phba->bg_reftag_err_cnt++; lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, @@ -2987,10 +3013,8 @@ lpfc_sli4_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd, if (lpfc_bgs_get_apptag_err(bgstat)) { ret = 1; - scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, - 0x10, 0x2); - cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 | - SAM_STAT_CHECK_CONDITION; + scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x2); + set_host_byte(cmd, DID_ABORT); phba->bg_apptag_err_cnt++; lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, @@ -3100,10 +3124,8 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd, if (lpfc_bgs_get_guard_err(bgstat)) { ret = 1; - scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, - 0x10, 0x1); - cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 | - SAM_STAT_CHECK_CONDITION; + scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x1); + set_host_byte(cmd, DID_ABORT); phba->bg_guard_err_cnt++; lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, "9055 BLKGRD: Guard Tag error in cmd " @@ -3116,10 +3138,8 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd, if (lpfc_bgs_get_reftag_err(bgstat)) { ret = 1; - scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, - 0x10, 0x3); - cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 | - SAM_STAT_CHECK_CONDITION; + scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x3); + set_host_byte(cmd, DID_ABORT); phba->bg_reftag_err_cnt++; lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, @@ -3133,10 +3153,8 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd, if (lpfc_bgs_get_apptag_err(bgstat)) { ret = 1; - scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, - 0x10, 0x2); - cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 | - SAM_STAT_CHECK_CONDITION; + scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x2); + set_host_byte(cmd, DID_ABORT); phba->bg_apptag_err_cnt++; lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, @@ -4045,6 +4063,7 @@ lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, u32 logit = LOG_FCP; u32 status, idx; unsigned long iflags = 0; + u8 wait_xb_clr = 0; /* Sanity check on return of outstanding command */ if (!lpfc_cmd) { @@ -4096,8 +4115,11 @@ lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, lpfc_cmd->result = (wcqe->parameter & IOERR_PARAM_MASK); lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY; - if (bf_get(lpfc_wcqe_c_xb, wcqe)) + if (bf_get(lpfc_wcqe_c_xb, wcqe)) { lpfc_cmd->flags |= LPFC_SBUF_XBUSY; + if (phba->cfg_fcp_wait_abts_rsp) + wait_xb_clr = 1; + } #ifdef CONFIG_SCSI_LPFC_DEBUG_FS if (lpfc_cmd->prot_data_type) { @@ -4329,6 +4351,8 @@ lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, lpfc_io_ktime(phba, lpfc_cmd); } #endif + if (wait_xb_clr) + goto out; lpfc_cmd->pCmd = NULL; spin_unlock(&lpfc_cmd->buf_lock); @@ -4343,8 +4367,8 @@ lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, lpfc_cmd->cur_iocbq.iocb_flag &= ~LPFC_DRIVER_ABORTED; if (lpfc_cmd->waitq) wake_up(lpfc_cmd->waitq); +out: spin_unlock(&lpfc_cmd->buf_lock); - lpfc_release_scsi_buf(phba, lpfc_cmd); } @@ -4398,11 +4422,10 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, lpfc_cmd->result = (pIocbOut->iocb.un.ulpWord[4] & IOERR_PARAM_MASK); lpfc_cmd->status = pIocbOut->iocb.ulpStatus; - /* pick up SLI4 exhange busy status from HBA */ + /* pick up SLI4 exchange busy status from HBA */ + lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY; if (pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY) lpfc_cmd->flags |= LPFC_SBUF_XBUSY; - else - lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY; #ifdef CONFIG_SCSI_LPFC_DEBUG_FS if (lpfc_cmd->prot_data_type) { @@ -4601,6 +4624,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, lpfc_io_ktime(phba, lpfc_cmd); } #endif + /* The sdev is not guaranteed to be valid post scsi_done upcall. */ cmd->scsi_done(cmd); @@ -5145,6 +5169,269 @@ void lpfc_poll_timeout(struct timer_list *t) } } +/* + * lpfc_get_vmid_from_hashtable - search the UUID in the hash table + * @vport: The virtual port for which this call is being executed. + * @hash: calculated hash value + * @buf: uuid associated with the VE + * Return the VMID entry associated with the UUID + * Make sure to acquire the appropriate lock before invoking this routine. + */ +struct lpfc_vmid *lpfc_get_vmid_from_hashtable(struct lpfc_vport *vport, + u32 hash, u8 *buf) +{ + struct lpfc_vmid *vmp; + + hash_for_each_possible(vport->hash_table, vmp, hnode, hash) { + if (memcmp(&vmp->host_vmid[0], buf, 16) == 0) + return vmp; + } + return NULL; +} + +/* + * lpfc_put_vmid_in_hashtable - put the VMID in the hash table + * @vport: The virtual port for which this call is being executed. + * @hash - calculated hash value + * @vmp: Pointer to a VMID entry representing a VM sending I/O + * + * This routine will insert the newly acquired VMID entity in the hash table. + * Make sure to acquire the appropriate lock before invoking this routine. + */ +static void +lpfc_put_vmid_in_hashtable(struct lpfc_vport *vport, u32 hash, + struct lpfc_vmid *vmp) +{ + hash_add(vport->hash_table, &vmp->hnode, hash); +} + +/* + * lpfc_vmid_hash_fn - create a hash value of the UUID + * @vmid: uuid associated with the VE + * @len: length of the VMID string + * Returns the calculated hash value + */ +int lpfc_vmid_hash_fn(const char *vmid, int len) +{ + int c; + int hash = 0; + + if (len == 0) + return 0; + while (len--) { + c = *vmid++; + if (c >= 'A' && c <= 'Z') + c += 'a' - 'A'; + + hash = (hash + (c << LPFC_VMID_HASH_SHIFT) + + (c >> LPFC_VMID_HASH_SHIFT)) * 19; + } + + return hash & LPFC_VMID_HASH_MASK; +} + +/* + * lpfc_vmid_update_entry - update the vmid entry in the hash table + * @vport: The virtual port for which this call is being executed. + * @cmd: address of scsi cmd descriptor + * @vmp: Pointer to a VMID entry representing a VM sending I/O + * @tag: VMID tag + */ +static void lpfc_vmid_update_entry(struct lpfc_vport *vport, struct scsi_cmnd + *cmd, struct lpfc_vmid *vmp, + union lpfc_vmid_io_tag *tag) +{ + u64 *lta; + + if (vport->vmid_priority_tagging) + tag->cs_ctl_vmid = vmp->un.cs_ctl_vmid; + else + tag->app_id = vmp->un.app_id; + + if (cmd->sc_data_direction == DMA_TO_DEVICE) + vmp->io_wr_cnt++; + else + vmp->io_rd_cnt++; + + /* update the last access timestamp in the table */ + lta = per_cpu_ptr(vmp->last_io_time, raw_smp_processor_id()); + *lta = jiffies; +} + +static void lpfc_vmid_assign_cs_ctl(struct lpfc_vport *vport, + struct lpfc_vmid *vmid) +{ + u32 hash; + struct lpfc_vmid *pvmid; + + if (vport->port_type == LPFC_PHYSICAL_PORT) { + vmid->un.cs_ctl_vmid = lpfc_vmid_get_cs_ctl(vport); + } else { + hash = lpfc_vmid_hash_fn(vmid->host_vmid, vmid->vmid_len); + pvmid = + lpfc_get_vmid_from_hashtable(vport->phba->pport, hash, + vmid->host_vmid); + if (pvmid) + vmid->un.cs_ctl_vmid = pvmid->un.cs_ctl_vmid; + else + vmid->un.cs_ctl_vmid = lpfc_vmid_get_cs_ctl(vport); + } +} + +/* + * lpfc_vmid_get_appid - get the VMID associated with the UUID + * @vport: The virtual port for which this call is being executed. + * @uuid: UUID associated with the VE + * @cmd: address of scsi_cmd descriptor + * @tag: VMID tag + * Returns status of the function + */ +static int lpfc_vmid_get_appid(struct lpfc_vport *vport, char *uuid, struct + scsi_cmnd * cmd, union lpfc_vmid_io_tag *tag) +{ + struct lpfc_vmid *vmp = NULL; + int hash, len, rc, i; + + /* check if QFPA is complete */ + if (lpfc_vmid_is_type_priority_tag(vport) && !(vport->vmid_flag & + LPFC_VMID_QFPA_CMPL)) { + vport->work_port_events |= WORKER_CHECK_VMID_ISSUE_QFPA; + return -EAGAIN; + } + + /* search if the UUID has already been mapped to the VMID */ + len = strlen(uuid); + hash = lpfc_vmid_hash_fn(uuid, len); + + /* search for the VMID in the table */ + read_lock(&vport->vmid_lock); + vmp = lpfc_get_vmid_from_hashtable(vport, hash, uuid); + + /* if found, check if its already registered */ + if (vmp && vmp->flag & LPFC_VMID_REGISTERED) { + read_unlock(&vport->vmid_lock); + lpfc_vmid_update_entry(vport, cmd, vmp, tag); + rc = 0; + } else if (vmp && (vmp->flag & LPFC_VMID_REQ_REGISTER || + vmp->flag & LPFC_VMID_DE_REGISTER)) { + /* else if register or dereg request has already been sent */ + /* Hence VMID tag will not be added for this I/O */ + read_unlock(&vport->vmid_lock); + rc = -EBUSY; + } else { + /* The VMID was not found in the hashtable. At this point, */ + /* drop the read lock first before proceeding further */ + read_unlock(&vport->vmid_lock); + /* start the process to obtain one as per the */ + /* type of the VMID indicated */ + write_lock(&vport->vmid_lock); + vmp = lpfc_get_vmid_from_hashtable(vport, hash, uuid); + + /* while the read lock was released, in case the entry was */ + /* added by other context or is in process of being added */ + if (vmp && vmp->flag & LPFC_VMID_REGISTERED) { + lpfc_vmid_update_entry(vport, cmd, vmp, tag); + write_unlock(&vport->vmid_lock); + return 0; + } else if (vmp && vmp->flag & LPFC_VMID_REQ_REGISTER) { + write_unlock(&vport->vmid_lock); + return -EBUSY; + } + + /* else search and allocate a free slot in the hash table */ + if (vport->cur_vmid_cnt < vport->max_vmid) { + for (i = 0; i < vport->max_vmid; i++) { + vmp = vport->vmid + i; + if (vmp->flag == LPFC_VMID_SLOT_FREE) + break; + } + if (i == vport->max_vmid) + vmp = NULL; + } else { + vmp = NULL; + } + + if (!vmp) { + write_unlock(&vport->vmid_lock); + return -ENOMEM; + } + + /* Add the vmid and register */ + lpfc_put_vmid_in_hashtable(vport, hash, vmp); + vmp->vmid_len = len; + memcpy(vmp->host_vmid, uuid, vmp->vmid_len); + vmp->io_rd_cnt = 0; + vmp->io_wr_cnt = 0; + vmp->flag = LPFC_VMID_SLOT_USED; + + vmp->delete_inactive = + vport->vmid_inactivity_timeout ? 1 : 0; + + /* if type priority tag, get next available VMID */ + if (lpfc_vmid_is_type_priority_tag(vport)) + lpfc_vmid_assign_cs_ctl(vport, vmp); + + /* allocate the per cpu variable for holding */ + /* the last access time stamp only if VMID is enabled */ + if (!vmp->last_io_time) + vmp->last_io_time = __alloc_percpu(sizeof(u64), + __alignof__(struct + lpfc_vmid)); + if (!vmp->last_io_time) { + hash_del(&vmp->hnode); + vmp->flag = LPFC_VMID_SLOT_FREE; + write_unlock(&vport->vmid_lock); + return -EIO; + } + + write_unlock(&vport->vmid_lock); + + /* complete transaction with switch */ + if (lpfc_vmid_is_type_priority_tag(vport)) + rc = lpfc_vmid_uvem(vport, vmp, true); + else + rc = lpfc_vmid_cmd(vport, SLI_CTAS_RAPP_IDENT, vmp); + if (!rc) { + write_lock(&vport->vmid_lock); + vport->cur_vmid_cnt++; + vmp->flag |= LPFC_VMID_REQ_REGISTER; + write_unlock(&vport->vmid_lock); + } else { + write_lock(&vport->vmid_lock); + hash_del(&vmp->hnode); + vmp->flag = LPFC_VMID_SLOT_FREE; + free_percpu(vmp->last_io_time); + write_unlock(&vport->vmid_lock); + return -EIO; + } + + /* finally, enable the idle timer once */ + if (!(vport->phba->pport->vmid_flag & LPFC_VMID_TIMER_ENBLD)) { + mod_timer(&vport->phba->inactive_vmid_poll, + jiffies + + msecs_to_jiffies(1000 * LPFC_VMID_TIMER)); + vport->phba->pport->vmid_flag |= LPFC_VMID_TIMER_ENBLD; + } + } + return rc; +} + +/* + * lpfc_is_command_vm_io - get the UUID from blk cgroup + * @cmd: Pointer to scsi_cmnd data structure + * Returns UUID if present, otherwise NULL + */ +static char *lpfc_is_command_vm_io(struct scsi_cmnd *cmd) +{ + char *uuid = NULL; + + if (cmd->request) { + if (cmd->request->bio) + uuid = blkcg_get_fc_appid(cmd->request->bio); + } + return uuid; +} + /** * lpfc_queuecommand - scsi_host_template queuecommand entry point * @shost: kernel scsi host pointer. @@ -5168,6 +5455,7 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) struct lpfc_io_buf *lpfc_cmd; struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); int err, idx; + u8 *uuid = NULL; #ifdef CONFIG_SCSI_LPFC_DEBUG_FS uint64_t start = 0L; @@ -5297,6 +5585,25 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) } + /* check the necessary and sufficient condition to support VMID */ + if (lpfc_is_vmid_enabled(phba) && + (ndlp->vmid_support || + phba->pport->vmid_priority_tagging == + LPFC_VMID_PRIO_TAG_ALL_TARGETS)) { + /* is the I/O generated by a VM, get the associated virtual */ + /* entity id */ + uuid = lpfc_is_command_vm_io(cmnd); + + if (uuid) { + err = lpfc_vmid_get_appid(vport, uuid, cmnd, + (union lpfc_vmid_io_tag *) + &lpfc_cmd->cur_iocbq.vmid_tag); + if (!err) + lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_VMID; + } + } + + atomic_inc(&ndlp->cmd_pending); #ifdef CONFIG_SCSI_LPFC_DEBUG_FS if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO)) this_cpu_inc(phba->sli4_hba.c_stat->xmt_io); @@ -5384,6 +5691,31 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) return 0; } +/* + * lpfc_vmid_vport_cleanup - cleans up the resources associated with a vport + * @vport: The virtual port for which this call is being executed. + */ +void lpfc_vmid_vport_cleanup(struct lpfc_vport *vport) +{ + u32 bucket; + struct lpfc_vmid *cur; + + if (vport->port_type == LPFC_PHYSICAL_PORT) + del_timer_sync(&vport->phba->inactive_vmid_poll); + + kfree(vport->qfpa_res); + kfree(vport->vmid_priority.vmid_range); + kfree(vport->vmid); + + if (!hash_empty(vport->hash_table)) + hash_for_each(vport->hash_table, bucket, cur, hnode) + hash_del(&cur->hnode); + + vport->qfpa_res = NULL; + vport->vmid_priority.vmid_range = NULL; + vport->vmid = NULL; + vport->cur_vmid_cnt = 0; +} /** * lpfc_abort_handler - scsi_host_template eh_abort_handler entry point diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index fc3682f15f50..f530d8fe7a8c 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -2679,6 +2679,12 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) } } + /* This nlp_put pairs with lpfc_sli4_resume_rpi */ + if (pmb->u.mb.mbxCommand == MBX_RESUME_RPI) { + ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp; + lpfc_nlp_put(ndlp); + } + /* Check security permission status on INIT_LINK mailbox command */ if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) && (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION)) @@ -2749,7 +2755,6 @@ lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) } else { __lpfc_sli_rpi_release(vport, ndlp); } - lpfc_nlp_put(ndlp); } } @@ -7694,6 +7699,15 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) goto out_free_mbox; } + /* Disable VMID if app header is not supported */ + if (phba->cfg_vmid_app_header && !(bf_get(lpfc_mbx_rq_ftr_rsp_ashdr, + &mqe->un.req_ftrs))) { + bf_set(lpfc_ftr_ashdr, &phba->sli4_hba.sli4_flags, 0); + phba->cfg_vmid_app_header = 0; + lpfc_printf_log(phba, KERN_DEBUG, LOG_SLI, + "1242 vmid feature not supported\n"); + } + /* * The port must support FCP initiator mode as this is the * only mode running in the host. @@ -7959,7 +7973,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) "0393 Error %d during rpi post operation\n", rc); rc = -ENODEV; - goto out_destroy_queue; + goto out_free_iocblist; } lpfc_sli4_node_prep(phba); @@ -8125,8 +8139,9 @@ out_io_buff_free: out_unset_queue: /* Unset all the queues set up in this routine when error out */ lpfc_sli4_queue_unset(phba); -out_destroy_queue: +out_free_iocblist: lpfc_free_iocb_list(phba); +out_destroy_queue: lpfc_sli4_queue_destroy(phba); out_stop_timers: lpfc_stop_hba_timers(phba); @@ -9751,6 +9766,8 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, *pcmd == ELS_CMD_RSCN_XMT || *pcmd == ELS_CMD_FDISC || *pcmd == ELS_CMD_LOGO || + *pcmd == ELS_CMD_QFPA || + *pcmd == ELS_CMD_UVEM || *pcmd == ELS_CMD_PLOGI)) { bf_set(els_req64_sp, &wqe->els_req, 1); bf_set(els_req64_sid, &wqe->els_req, @@ -10313,6 +10330,18 @@ __lpfc_sli_issue_fcp_io_s4(struct lpfc_hba *phba, uint32_t ring_number, bf_set(wqe_wqes, &wqe->generic.wqe_com, 0); } + /* add the VMID tags as per switch response */ + if (unlikely(piocb->iocb_flag & LPFC_IO_VMID)) { + if (phba->pport->vmid_priority_tagging) { + bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1); + bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com, + (piocb->vmid_tag.cs_ctl_vmid)); + } else { + bf_set(wqe_appid, &wqe->fcp_iwrite.wqe_com, 1); + bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1); + wqe->words[31] = piocb->vmid_tag.app_id; + } + } rc = lpfc_sli4_issue_wqe(phba, lpfc_cmd->hdwq, piocb); return rc; } @@ -13625,9 +13654,15 @@ lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe) if (mcqe_status == MB_CQE_STATUS_SUCCESS) { mp = (struct lpfc_dmabuf *)(pmb->ctx_buf); ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp; - /* Reg_LOGIN of dflt RPI was successful. Now lets get - * RID of the PPI using the same mbox buffer. + + /* Reg_LOGIN of dflt RPI was successful. Mark the + * node as having an UNREG_LOGIN in progress to stop + * an unsolicited PLOGI from the same NPortId from + * starting another mailbox transaction. */ + spin_lock_irqsave(&ndlp->lock, iflags); + ndlp->nlp_flag |= NLP_UNREG_INP; + spin_unlock_irqrestore(&ndlp->lock, iflags); lpfc_unreg_login(phba, vport->vpi, pmbox->un.varWords[0], pmb); pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi; @@ -17943,7 +17978,6 @@ lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf) seq_dmabuf->time_stamp = jiffies; lpfc_update_rcv_time_stamp(vport); if (list_empty(&seq_dmabuf->dbuf.list)) { - temp_hdr = dmabuf->hbuf.virt; list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list); return seq_dmabuf; } @@ -19032,14 +19066,28 @@ lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp, if (!mboxq) return -ENOMEM; + /* If cmpl assigned, then this nlp_get pairs with + * lpfc_mbx_cmpl_resume_rpi. + * + * Else cmpl is NULL, then this nlp_get pairs with + * lpfc_sli_def_mbox_cmpl. + */ + if (!lpfc_nlp_get(ndlp)) { + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2122 %s: Failed to get nlp ref\n", + __func__); + mempool_free(mboxq, phba->mbox_mem_pool); + return -EIO; + } + /* Post all rpi memory regions to the port. */ lpfc_resume_rpi(mboxq, ndlp); if (cmpl) { mboxq->mbox_cmpl = cmpl; mboxq->ctx_buf = arg; - mboxq->ctx_ndlp = ndlp; } else mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + mboxq->ctx_ndlp = ndlp; mboxq->vport = ndlp->vport; rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) { @@ -19047,6 +19095,7 @@ lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp, "2010 Resume RPI Mailbox failed " "status %d, mbxStatus x%x\n", rc, bf_get(lpfc_mqe_status, &mboxq->u.mqe)); + lpfc_nlp_put(ndlp); mempool_free(mboxq, phba->mbox_mem_pool); return -EIO; } @@ -20136,8 +20185,7 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport) (mb->u.mb.mbxCommand != MBX_REG_VPI)) continue; - list_del(&mb->list); - list_add_tail(&mb->list, &mbox_cmd_list); + list_move_tail(&mb->list, &mbox_cmd_list); } /* Clean up active mailbox command with the vport */ mb = phba->sli.mbox_active; diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h index 4f6936014ff5..dde8eb9d796d 100644 --- a/drivers/scsi/lpfc/lpfc_sli.h +++ b/drivers/scsi/lpfc/lpfc_sli.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -35,6 +35,12 @@ typedef enum _lpfc_ctx_cmd { LPFC_CTX_HOST } lpfc_ctx_cmd; +union lpfc_vmid_iocb_tag { + uint32_t app_id; + uint8_t cs_ctl_vmid; + struct lpfc_vmid_context *vmid_context; /* UVEM context information */ +}; + struct lpfc_cq_event { struct list_head list; uint16_t hdwq; @@ -100,12 +106,14 @@ struct lpfc_iocbq { #define LPFC_IO_NVME 0x200000 /* NVME FCP command */ #define LPFC_IO_NVME_LS 0x400000 /* NVME LS command */ #define LPFC_IO_NVMET 0x800000 /* NVMET command */ +#define LPFC_IO_VMID 0x1000000 /* VMID tagged IO */ uint32_t drvrTimeout; /* driver timeout in seconds */ struct lpfc_vport *vport;/* virtual port pointer */ void *context1; /* caller context information */ void *context2; /* caller context information */ void *context3; /* caller context information */ + uint32_t event_tag; /* LA Event tag */ union { wait_queue_head_t *wait_queue; struct lpfc_iocbq *rsp_iocb; @@ -114,6 +122,7 @@ struct lpfc_iocbq { struct lpfc_node_rrq *rrq; } context_un; + union lpfc_vmid_iocb_tag vmid_tag; void (*fabric_iocb_cmpl)(struct lpfc_hba *, struct lpfc_iocbq *, struct lpfc_iocbq *); void (*wait_iocb_cmpl)(struct lpfc_hba *, struct lpfc_iocbq *, diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index 4b8e89375644..2d62fd2a9824 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h @@ -20,7 +20,7 @@ * included with this package. * *******************************************************************/ -#define LPFC_DRIVER_VERSION "12.8.0.9" +#define LPFC_DRIVER_VERSION "12.8.0.10" #define LPFC_DRIVER_NAME "lpfc" /* Used for SLI 2/3 */ |