summaryrefslogtreecommitdiffstats
path: root/drivers/scsi
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2017-03-17 20:34:30 +0100
committerThomas Gleixner <tglx@linutronix.de>2017-03-17 20:34:30 +0100
commit79a21d572cf66968a2272fdf9711f835518256d9 (patch)
tree5fe3e4692fb8375faf8e1aeea1c2eae38c342250 /drivers/scsi
parentefi/arm: Fix boot crash with CONFIG_CPUMASK_OFFSTACK=y (diff)
parentefi/esrt: Cleanup bad memory map log messages (diff)
downloadlinux-79a21d572cf66968a2272fdf9711f835518256d9.tar.xz
linux-79a21d572cf66968a2272fdf9711f835518256d9.zip
Merge tag 'efi-urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/efi/efi into efi/urgent
Pull a single UEFI fix from Ard: - Reduce the severity of the notice that appears when the ESRT table points to memory that is not covered by the memory map. It is scaring our users and interfering with their nice splash screens. Note that the ESRT may still be perfectly usable, and is currently (to my knowledge) not widely used to begin with.
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/Kconfig4
-rw-r--r--drivers/scsi/Makefile1
-rw-r--r--drivers/scsi/aacraid/aachba.c59
-rw-r--r--drivers/scsi/aacraid/aacraid.h107
-rw-r--r--drivers/scsi/aacraid/commctrl.c2
-rw-r--r--drivers/scsi/aacraid/comminit.c2
-rw-r--r--drivers/scsi/aacraid/commsup.c118
-rw-r--r--drivers/scsi/aacraid/linit.c47
-rw-r--r--drivers/scsi/aacraid/rx.c2
-rw-r--r--drivers/scsi/aacraid/src.c48
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_core.c2
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc.h2
-rw-r--r--drivers/scsi/bnx2i/bnx2i.h2
-rw-r--r--drivers/scsi/cxlflash/main.c4
-rw-r--r--drivers/scsi/cxlflash/main.h1
-rw-r--r--drivers/scsi/cxlflash/superpipe.c8
-rw-r--r--drivers/scsi/cxlflash/vlun.c4
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c16
-rw-r--r--drivers/scsi/device_handler/scsi_dh_emc.c9
-rw-r--r--drivers/scsi/device_handler/scsi_dh_hp_sw.c10
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c7
-rw-r--r--drivers/scsi/libfc/fc_disc.c2
-rw-r--r--drivers/scsi/libfc/fc_rport.c2
-rw-r--r--drivers/scsi/libiscsi.c1
-rw-r--r--drivers/scsi/lpfc/Makefile11
-rw-r--r--drivers/scsi/lpfc/lpfc.h162
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c570
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c33
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_compat.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h68
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c388
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c2300
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.h291
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h24
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c366
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c363
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h86
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h506
-rw-r--r--drivers/scsi/lpfc/lpfc_ids.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c2831
-rw-r--r--drivers/scsi/lpfc/lpfc_logmsg.h8
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c114
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c278
-rw-r--r--drivers/scsi/lpfc/lpfc_nl.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c257
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c2464
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.h103
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c1986
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.h116
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c111
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.h22
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c2189
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h42
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h98
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h10
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c22
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.h4
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c105
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h2
-rw-r--r--drivers/scsi/osd/osd_uld.c3
-rw-r--r--drivers/scsi/osst.c2
-rw-r--r--drivers/scsi/qedf/Kconfig11
-rw-r--r--drivers/scsi/qedf/Makefile5
-rw-r--r--drivers/scsi/qedf/qedf.h545
-rw-r--r--drivers/scsi/qedf/qedf_attr.c165
-rw-r--r--drivers/scsi/qedf/qedf_dbg.c195
-rw-r--r--drivers/scsi/qedf/qedf_dbg.h154
-rw-r--r--drivers/scsi/qedf/qedf_debugfs.c460
-rw-r--r--drivers/scsi/qedf/qedf_els.c949
-rw-r--r--drivers/scsi/qedf/qedf_fip.c269
-rw-r--r--drivers/scsi/qedf/qedf_hsi.h422
-rw-r--r--drivers/scsi/qedf/qedf_io.c2282
-rw-r--r--drivers/scsi/qedf/qedf_main.c3336
-rw-r--r--drivers/scsi/qedf/qedf_version.h15
-rw-r--r--drivers/scsi/qedi/qedi_fw.c5
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c23
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h306
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c11
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h106
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h72
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c726
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c1596
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h18
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c167
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c324
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c232
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.c48
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c346
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c2356
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h252
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c258
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.h4
-rw-r--r--drivers/scsi/scsi_common.c4
-rw-r--r--drivers/scsi/scsi_dh.c22
-rw-r--r--drivers/scsi/scsi_ioctl.c3
-rw-r--r--drivers/scsi/scsi_lib.c113
-rw-r--r--drivers/scsi/scsi_transport_spi.c24
-rw-r--r--drivers/scsi/sd.c75
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c8
-rw-r--r--drivers/scsi/sr_ioctl.c19
-rw-r--r--drivers/scsi/st.c2
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c12
-rw-r--r--drivers/scsi/ufs/ufshcd.c10
-rw-r--r--drivers/scsi/virtio_scsi.c127
107 files changed, 26655 insertions, 5842 deletions
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index d4023bf1e739..230043c1c90f 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -1235,11 +1235,13 @@ config SCSI_QLOGICPTI
source "drivers/scsi/qla2xxx/Kconfig"
source "drivers/scsi/qla4xxx/Kconfig"
source "drivers/scsi/qedi/Kconfig"
+source "drivers/scsi/qedf/Kconfig"
config SCSI_LPFC
tristate "Emulex LightPulse Fibre Channel Support"
depends on PCI && SCSI
depends on SCSI_FC_ATTRS
+ depends on NVME_FC && NVME_TARGET_FC
select CRC_T10DIF
help
This lpfc driver supports the Emulex LightPulse
@@ -1478,7 +1480,7 @@ config ATARI_SCSI
config MAC_SCSI
tristate "Macintosh NCR5380 SCSI"
- depends on MAC && SCSI=y
+ depends on MAC && SCSI
select SCSI_SPI_ATTRS
help
This is the NCR 5380 SCSI controller included on most of the 68030
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 736b77414a4b..fc2855565a51 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -41,6 +41,7 @@ obj-$(CONFIG_FCOE) += fcoe/
obj-$(CONFIG_FCOE_FNIC) += fnic/
obj-$(CONFIG_SCSI_SNIC) += snic/
obj-$(CONFIG_SCSI_BNX2X_FCOE) += libfc/ fcoe/ bnx2fc/
+obj-$(CONFIG_QEDF) += qedf/
obj-$(CONFIG_ISCSI_TCP) += libiscsi.o libiscsi_tcp.o iscsi_tcp.o
obj-$(CONFIG_INFINIBAND_ISER) += libiscsi.o
obj-$(CONFIG_ISCSI_BOOT_SYSFS) += iscsi_boot_sysfs.o
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index 907f1e80665b..e3e93def722b 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -294,6 +294,10 @@ MODULE_PARM_DESC(aif_timeout, "The duration of time in seconds to wait for"
"deregistering them. This is typically adjusted for heavily burdened"
" systems.");
+int aac_fib_dump;
+module_param(aac_fib_dump, int, 0644);
+MODULE_PARM_DESC(aac_fib_dump, "Dump controller fibs prior to IOP_RESET 0=off, 1=on");
+
int numacb = -1;
module_param(numacb, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(numacb, "Request a limit to the number of adapter control"
@@ -311,7 +315,7 @@ module_param(update_interval, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(update_interval, "Interval in seconds between time sync"
" updates issued to adapter.");
-int check_interval = 24 * 60 * 60;
+int check_interval = 60;
module_param(check_interval, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(check_interval, "Interval in seconds between adapter health"
" checks.");
@@ -483,7 +487,7 @@ int aac_get_containers(struct aac_dev *dev)
if (status >= 0) {
dresp = (struct aac_get_container_count_resp *)fib_data(fibptr);
maximum_num_containers = le32_to_cpu(dresp->ContainerSwitchEntries);
- if (fibptr->dev->supplement_adapter_info.SupportedOptions2 &
+ if (fibptr->dev->supplement_adapter_info.supported_options2 &
AAC_OPTION_SUPPORTED_240_VOLUMES) {
maximum_num_containers =
le32_to_cpu(dresp->MaxSimpleVolumes);
@@ -639,13 +643,16 @@ static void _aac_probe_container2(void * context, struct fib * fibptr)
fsa_dev_ptr = fibptr->dev->fsa_dev;
if (fsa_dev_ptr) {
struct aac_mount * dresp = (struct aac_mount *) fib_data(fibptr);
+ __le32 sup_options2;
+
fsa_dev_ptr += scmd_id(scsicmd);
+ sup_options2 =
+ fibptr->dev->supplement_adapter_info.supported_options2;
if ((le32_to_cpu(dresp->status) == ST_OK) &&
(le32_to_cpu(dresp->mnt[0].vol) != CT_NONE) &&
(le32_to_cpu(dresp->mnt[0].state) != FSCS_HIDDEN)) {
- if (!(fibptr->dev->supplement_adapter_info.SupportedOptions2 &
- AAC_OPTION_VARIABLE_BLOCK_SIZE)) {
+ if (!(sup_options2 & AAC_OPTION_VARIABLE_BLOCK_SIZE)) {
dresp->mnt[0].fileinfo.bdevinfo.block_size = 0x200;
fsa_dev_ptr->block_size = 0x200;
} else {
@@ -688,7 +695,7 @@ static void _aac_probe_container1(void * context, struct fib * fibptr)
int status;
dresp = (struct aac_mount *) fib_data(fibptr);
- if (!(fibptr->dev->supplement_adapter_info.SupportedOptions2 &
+ if (!(fibptr->dev->supplement_adapter_info.supported_options2 &
AAC_OPTION_VARIABLE_BLOCK_SIZE))
dresp->mnt[0].capacityhigh = 0;
if ((le32_to_cpu(dresp->status) != ST_OK) ||
@@ -705,7 +712,7 @@ static void _aac_probe_container1(void * context, struct fib * fibptr)
dinfo = (struct aac_query_mount *)fib_data(fibptr);
- if (fibptr->dev->supplement_adapter_info.SupportedOptions2 &
+ if (fibptr->dev->supplement_adapter_info.supported_options2 &
AAC_OPTION_VARIABLE_BLOCK_SIZE)
dinfo->command = cpu_to_le32(VM_NameServeAllBlk);
else
@@ -745,7 +752,7 @@ static int _aac_probe_container(struct scsi_cmnd * scsicmd, int (*callback)(stru
dinfo = (struct aac_query_mount *)fib_data(fibptr);
- if (fibptr->dev->supplement_adapter_info.SupportedOptions2 &
+ if (fibptr->dev->supplement_adapter_info.supported_options2 &
AAC_OPTION_VARIABLE_BLOCK_SIZE)
dinfo->command = cpu_to_le32(VM_NameServeAllBlk);
else
@@ -896,12 +903,14 @@ char * get_container_type(unsigned tindex)
static void setinqstr(struct aac_dev *dev, void *data, int tindex)
{
struct scsi_inq *str;
+ struct aac_supplement_adapter_info *sup_adap_info;
+ sup_adap_info = &dev->supplement_adapter_info;
str = (struct scsi_inq *)(data); /* cast data to scsi inq block */
memset(str, ' ', sizeof(*str));
- if (dev->supplement_adapter_info.AdapterTypeText[0]) {
- char * cp = dev->supplement_adapter_info.AdapterTypeText;
+ if (sup_adap_info->adapter_type_text[0]) {
+ char *cp = sup_adap_info->adapter_type_text;
int c;
if ((cp[0] == 'A') && (cp[1] == 'O') && (cp[2] == 'C'))
inqstrcpy("SMC", str->vid);
@@ -911,8 +920,7 @@ static void setinqstr(struct aac_dev *dev, void *data, int tindex)
++cp;
c = *cp;
*cp = '\0';
- inqstrcpy (dev->supplement_adapter_info.AdapterTypeText,
- str->vid);
+ inqstrcpy(sup_adap_info->adapter_type_text, str->vid);
*cp = c;
while (*cp && *cp != ' ')
++cp;
@@ -1675,8 +1683,8 @@ int aac_issue_bmic_identify(struct aac_dev *dev, u32 bus, u32 target)
if (!identify_resp)
goto fib_free_ptr;
- vbus = (u32)le16_to_cpu(dev->supplement_adapter_info.VirtDeviceBus);
- vid = (u32)le16_to_cpu(dev->supplement_adapter_info.VirtDeviceTarget);
+ vbus = (u32)le16_to_cpu(dev->supplement_adapter_info.virt_device_bus);
+ vid = (u32)le16_to_cpu(dev->supplement_adapter_info.virt_device_target);
aac_fib_init(fibptr);
@@ -1815,9 +1823,9 @@ int aac_report_phys_luns(struct aac_dev *dev, struct fib *fibptr, int rescan)
}
vbus = (u32) le16_to_cpu(
- dev->supplement_adapter_info.VirtDeviceBus);
+ dev->supplement_adapter_info.virt_device_bus);
vid = (u32) le16_to_cpu(
- dev->supplement_adapter_info.VirtDeviceTarget);
+ dev->supplement_adapter_info.virt_device_target);
aac_fib_init(fibptr);
@@ -1893,7 +1901,7 @@ int aac_get_adapter_info(struct aac_dev* dev)
}
memcpy(&dev->adapter_info, info, sizeof(*info));
- dev->supplement_adapter_info.VirtDeviceBus = 0xffff;
+ dev->supplement_adapter_info.virt_device_bus = 0xffff;
if (dev->adapter_info.options & AAC_OPT_SUPPLEMENT_ADAPTER_INFO) {
struct aac_supplement_adapter_info * sinfo;
@@ -1961,7 +1969,7 @@ int aac_get_adapter_info(struct aac_dev* dev)
}
if (!dev->sync_mode && dev->sa_firmware &&
- dev->supplement_adapter_info.VirtDeviceBus != 0xffff) {
+ dev->supplement_adapter_info.virt_device_bus != 0xffff) {
/* Thor SA Firmware -> CISS_REPORT_PHYSICAL_LUNS */
rcode = aac_report_phys_luns(dev, fibptr, AAC_INIT);
}
@@ -1976,8 +1984,8 @@ int aac_get_adapter_info(struct aac_dev* dev)
(tmp>>16)&0xff,
tmp&0xff,
le32_to_cpu(dev->adapter_info.kernelbuild),
- (int)sizeof(dev->supplement_adapter_info.BuildDate),
- dev->supplement_adapter_info.BuildDate);
+ (int)sizeof(dev->supplement_adapter_info.build_date),
+ dev->supplement_adapter_info.build_date);
tmp = le32_to_cpu(dev->adapter_info.monitorrev);
printk(KERN_INFO "%s%d: monitor %d.%d-%d[%d]\n",
dev->name, dev->id,
@@ -1993,14 +2001,15 @@ int aac_get_adapter_info(struct aac_dev* dev)
shost_to_class(dev->scsi_host_ptr), buffer))
printk(KERN_INFO "%s%d: serial %s",
dev->name, dev->id, buffer);
- if (dev->supplement_adapter_info.VpdInfo.Tsid[0]) {
+ if (dev->supplement_adapter_info.vpd_info.tsid[0]) {
printk(KERN_INFO "%s%d: TSID %.*s\n",
dev->name, dev->id,
- (int)sizeof(dev->supplement_adapter_info.VpdInfo.Tsid),
- dev->supplement_adapter_info.VpdInfo.Tsid);
+ (int)sizeof(dev->supplement_adapter_info
+ .vpd_info.tsid),
+ dev->supplement_adapter_info.vpd_info.tsid);
}
if (!aac_check_reset || ((aac_check_reset == 1) &&
- (dev->supplement_adapter_info.SupportedOptions2 &
+ (dev->supplement_adapter_info.supported_options2 &
AAC_OPTION_IGNORE_RESET))) {
printk(KERN_INFO "%s%d: Reset Adapter Ignored\n",
dev->name, dev->id);
@@ -2008,7 +2017,7 @@ int aac_get_adapter_info(struct aac_dev* dev)
}
dev->cache_protected = 0;
- dev->jbod = ((dev->supplement_adapter_info.FeatureBits &
+ dev->jbod = ((dev->supplement_adapter_info.feature_bits &
AAC_FEATURE_JBOD) != 0);
dev->nondasd_support = 0;
dev->raid_scsi_mode = 0;
@@ -2631,7 +2640,7 @@ static int aac_start_stop(struct scsi_cmnd *scsicmd)
struct scsi_device *sdev = scsicmd->device;
struct aac_dev *aac = (struct aac_dev *)sdev->host->hostdata;
- if (!(aac->supplement_adapter_info.SupportedOptions2 &
+ if (!(aac->supplement_adapter_info.supported_options2 &
AAC_OPTION_POWER_MANAGEMENT)) {
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
SAM_STAT_GOOD;
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index f2344971e3cb..d036a806f31c 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -97,7 +97,7 @@ enum {
#define PMC_GLOBAL_INT_BIT0 0x00000001
#ifndef AAC_DRIVER_BUILD
-# define AAC_DRIVER_BUILD 50740
+# define AAC_DRIVER_BUILD 50792
# define AAC_DRIVER_BRANCH "-custom"
#endif
#define MAXIMUM_NUM_CONTAINERS 32
@@ -1380,57 +1380,57 @@ struct aac_adapter_info
struct aac_supplement_adapter_info
{
- u8 AdapterTypeText[17+1];
- u8 Pad[2];
- __le32 FlashMemoryByteSize;
- __le32 FlashImageId;
- __le32 MaxNumberPorts;
- __le32 Version;
- __le32 FeatureBits;
- u8 SlotNumber;
- u8 ReservedPad0[3];
- u8 BuildDate[12];
- __le32 CurrentNumberPorts;
+ u8 adapter_type_text[17+1];
+ u8 pad[2];
+ __le32 flash_memory_byte_size;
+ __le32 flash_image_id;
+ __le32 max_number_ports;
+ __le32 version;
+ __le32 feature_bits;
+ u8 slot_number;
+ u8 reserved_pad0[3];
+ u8 build_date[12];
+ __le32 current_number_ports;
struct {
- u8 AssemblyPn[8];
- u8 FruPn[8];
- u8 BatteryFruPn[8];
- u8 EcVersionString[8];
- u8 Tsid[12];
- } VpdInfo;
- __le32 FlashFirmwareRevision;
- __le32 FlashFirmwareBuild;
- __le32 RaidTypeMorphOptions;
- __le32 FlashFirmwareBootRevision;
- __le32 FlashFirmwareBootBuild;
- u8 MfgPcbaSerialNo[12];
- u8 MfgWWNName[8];
- __le32 SupportedOptions2;
- __le32 StructExpansion;
+ u8 assembly_pn[8];
+ u8 fru_pn[8];
+ u8 battery_fru_pn[8];
+ u8 ec_version_string[8];
+ u8 tsid[12];
+ } vpd_info;
+ __le32 flash_firmware_revision;
+ __le32 flash_firmware_build;
+ __le32 raid_type_morph_options;
+ __le32 flash_firmware_boot_revision;
+ __le32 flash_firmware_boot_build;
+ u8 mfg_pcba_serial_no[12];
+ u8 mfg_wwn_name[8];
+ __le32 supported_options2;
+ __le32 struct_expansion;
/* StructExpansion == 1 */
- __le32 FeatureBits3;
- __le32 SupportedPerformanceModes;
- u8 HostBusType; /* uses HOST_BUS_TYPE_xxx defines */
- u8 HostBusWidth; /* actual width in bits or links */
- u16 HostBusSpeed; /* actual bus speed/link rate in MHz */
- u8 MaxRRCDrives; /* max. number of ITP-RRC drives/pool */
- u8 MaxDiskXtasks; /* max. possible num of DiskX Tasks */
-
- u8 CpldVerLoaded;
- u8 CpldVerInFlash;
-
- __le64 MaxRRCCapacity;
- __le32 CompiledMaxHistLogLevel;
- u8 CustomBoardName[12];
- u16 SupportedCntlrMode; /* identify supported controller mode */
- u16 ReservedForFuture16;
- __le32 SupportedOptions3; /* reserved for future options */
-
- __le16 VirtDeviceBus; /* virt. SCSI device for Thor */
- __le16 VirtDeviceTarget;
- __le16 VirtDeviceLUN;
- __le16 Unused;
- __le32 ReservedForFutureGrowth[68];
+ __le32 feature_bits3;
+ __le32 supported_performance_modes;
+ u8 host_bus_type; /* uses HOST_BUS_TYPE_xxx defines */
+ u8 host_bus_width; /* actual width in bits or links */
+ u16 host_bus_speed; /* actual bus speed/link rate in MHz */
+ u8 max_rrc_drives; /* max. number of ITP-RRC drives/pool */
+ u8 max_disk_xtasks; /* max. possible num of DiskX Tasks */
+
+ u8 cpld_ver_loaded;
+ u8 cpld_ver_in_flash;
+
+ __le64 max_rrc_capacity;
+ __le32 compiled_max_hist_log_level;
+ u8 custom_board_name[12];
+ u16 supported_cntlr_mode; /* identify supported controller mode */
+ u16 reserved_for_future16;
+ __le32 supported_options3; /* reserved for future options */
+
+ __le16 virt_device_bus; /* virt. SCSI device for Thor */
+ __le16 virt_device_target;
+ __le16 virt_device_lun;
+ __le16 unused;
+ __le32 reserved_for_future_growth[68];
};
#define AAC_FEATURE_FALCON cpu_to_le32(0x00000010)
@@ -1444,6 +1444,10 @@ struct aac_supplement_adapter_info
#define AAC_OPTION_VARIABLE_BLOCK_SIZE cpu_to_le32(0x00040000)
/* 240 simple volume support */
#define AAC_OPTION_SUPPORTED_240_VOLUMES cpu_to_le32(0x10000000)
+/*
+ * Supports FIB dump sync command send prior to IOP_RESET
+ */
+#define AAC_OPTION_SUPPORTED3_IOP_RESET_FIB_DUMP cpu_to_le32(0x00004000)
#define AAC_SIS_VERSION_V3 3
#define AAC_SIS_SLOT_UNKNOWN 0xFF
@@ -2483,6 +2487,7 @@ struct aac_hba_info {
#define GET_DRIVER_BUFFER_PROPERTIES 0x00000023
#define RCV_TEMP_READINGS 0x00000025
#define GET_COMM_PREFERRED_SETTINGS 0x00000026
+#define IOP_RESET_FW_FIB_DUMP 0x00000034
#define IOP_RESET 0x00001000
#define IOP_RESET_ALWAYS 0x00001001
#define RE_INIT_ADAPTER 0x000000ee
@@ -2639,6 +2644,7 @@ void aac_hba_callback(void *context, struct fib *fibptr);
#define fib_data(fibctx) ((void *)(fibctx)->hw_fib_va->data)
struct aac_dev *aac_init_adapter(struct aac_dev *dev);
void aac_src_access_devreg(struct aac_dev *dev, int mode);
+void aac_set_intx_mode(struct aac_dev *dev);
int aac_get_config_status(struct aac_dev *dev, int commit_flag);
int aac_get_containers(struct aac_dev *dev);
int aac_scsi_cmd(struct scsi_cmnd *cmd);
@@ -2685,4 +2691,5 @@ extern int aac_commit;
extern int update_interval;
extern int check_interval;
extern int aac_check_reset;
+extern int aac_fib_dump;
#endif
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
index 614842a9eb07..f6afd50579c0 100644
--- a/drivers/scsi/aacraid/commctrl.c
+++ b/drivers/scsi/aacraid/commctrl.c
@@ -580,7 +580,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
goto cleanup;
}
- chn = aac_logical_to_phys(user_srbcmd->channel);
+ chn = user_srbcmd->channel;
if (chn < AAC_MAX_BUSES && user_srbcmd->id < AAC_MAX_TARGETS &&
dev->hba_map[chn][user_srbcmd->id].devtype ==
AAC_DEVTYPE_NATIVE_RAW) {
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index 40bfc57b6849..35607005f7e1 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -330,7 +330,7 @@ int aac_send_shutdown(struct aac_dev * dev)
dev->pdev->device == PMC_DEVICE_S8 ||
dev->pdev->device == PMC_DEVICE_S9) &&
dev->msi_enabled)
- aac_src_access_devreg(dev, AAC_ENABLE_INTX);
+ aac_set_intx_mode(dev);
return status;
}
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 969727b67cdd..a3ad04293487 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -95,12 +95,20 @@ static int fib_map_alloc(struct aac_dev *dev)
void aac_fib_map_free(struct aac_dev *dev)
{
- if (dev->hw_fib_va && dev->max_cmd_size) {
- pci_free_consistent(dev->pdev,
- (dev->max_cmd_size *
- (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB)),
- dev->hw_fib_va, dev->hw_fib_pa);
- }
+ size_t alloc_size;
+ size_t fib_size;
+ int num_fibs;
+
+ if(!dev->hw_fib_va || !dev->max_cmd_size)
+ return;
+
+ num_fibs = dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB;
+ fib_size = dev->max_fib_size + sizeof(struct aac_fib_xporthdr);
+ alloc_size = fib_size * num_fibs + ALIGN32 - 1;
+
+ pci_free_consistent(dev->pdev, alloc_size, dev->hw_fib_va,
+ dev->hw_fib_pa);
+
dev->hw_fib_va = NULL;
dev->hw_fib_pa = 0;
}
@@ -153,22 +161,20 @@ int aac_fib_setup(struct aac_dev * dev)
if (i<0)
return -ENOMEM;
- /* 32 byte alignment for PMC */
- hw_fib_pa = (dev->hw_fib_pa + (ALIGN32 - 1)) & ~(ALIGN32 - 1);
- dev->hw_fib_va = (struct hw_fib *)((unsigned char *)dev->hw_fib_va +
- (hw_fib_pa - dev->hw_fib_pa));
- dev->hw_fib_pa = hw_fib_pa;
memset(dev->hw_fib_va, 0,
(dev->max_cmd_size + sizeof(struct aac_fib_xporthdr)) *
(dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB));
+ /* 32 byte alignment for PMC */
+ hw_fib_pa = (dev->hw_fib_pa + (ALIGN32 - 1)) & ~(ALIGN32 - 1);
+ hw_fib = (struct hw_fib *)((unsigned char *)dev->hw_fib_va +
+ (hw_fib_pa - dev->hw_fib_pa));
+
/* add Xport header */
- dev->hw_fib_va = (struct hw_fib *)((unsigned char *)dev->hw_fib_va +
+ hw_fib = (struct hw_fib *)((unsigned char *)hw_fib +
sizeof(struct aac_fib_xporthdr));
- dev->hw_fib_pa += sizeof(struct aac_fib_xporthdr);
+ hw_fib_pa += sizeof(struct aac_fib_xporthdr);
- hw_fib = dev->hw_fib_va;
- hw_fib_pa = dev->hw_fib_pa;
/*
* Initialise the fibs
*/
@@ -461,6 +467,35 @@ int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw
return 0;
}
+#ifdef CONFIG_EEH
+static inline int aac_check_eeh_failure(struct aac_dev *dev)
+{
+ /* Check for an EEH failure for the given
+ * device node. Function eeh_dev_check_failure()
+ * returns 0 if there has not been an EEH error
+ * otherwise returns a non-zero value.
+ *
+ * Need to be called before any PCI operation,
+ * i.e.,before aac_adapter_check_health()
+ */
+ struct eeh_dev *edev = pci_dev_to_eeh_dev(dev->pdev);
+
+ if (eeh_dev_check_failure(edev)) {
+ /* The EEH mechanisms will handle this
+ * error and reset the device if
+ * necessary.
+ */
+ return 1;
+ }
+ return 0;
+}
+#else
+static inline int aac_check_eeh_failure(struct aac_dev *dev)
+{
+ return 0;
+}
+#endif
+
/*
* Define the highest level of host to adapter communication routines.
* These routines will support host to adapter FS commuication. These
@@ -496,9 +531,12 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
unsigned long mflags = 0;
unsigned long sflags = 0;
-
if (!(hw_fib->header.XferState & cpu_to_le32(HostOwned)))
return -EBUSY;
+
+ if (hw_fib->header.XferState & cpu_to_le32(AdapterProcessed))
+ return -EINVAL;
+
/*
* There are 5 cases with the wait and response requested flags.
* The only invalid cases are if the caller requests to wait and
@@ -662,6 +700,10 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
}
return -ETIMEDOUT;
}
+
+ if (aac_check_eeh_failure(dev))
+ return -EFAULT;
+
if ((blink = aac_adapter_check_health(dev)) > 0) {
if (wait == -1) {
printk(KERN_ERR "aacraid: aac_fib_send: adapter blinkLED 0x%x.\n"
@@ -755,7 +797,12 @@ int aac_hba_send(u8 command, struct fib *fibptr, fib_callback callback,
FIB_COUNTER_INCREMENT(aac_config.NativeSent);
if (wait) {
+
spin_unlock_irqrestore(&fibptr->event_lock, flags);
+
+ if (aac_check_eeh_failure(dev))
+ return -EFAULT;
+
/* Only set for first known interruptable command */
if (down_interruptible(&fibptr->event_wait)) {
fibptr->done = 2;
@@ -1590,11 +1637,29 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
command->SCp.phase = AAC_OWNER_ERROR_HANDLER;
command->scsi_done(command);
}
+ /*
+ * Any Device that was already marked offline needs to be cleaned up
+ */
+ __shost_for_each_device(dev, host) {
+ if (!scsi_device_online(dev)) {
+ sdev_printk(KERN_INFO, dev, "Removing offline device\n");
+ scsi_remove_device(dev);
+ scsi_device_put(dev);
+ }
+ }
retval = 0;
out:
aac->in_reset = 0;
scsi_unblock_requests(host);
+ /*
+ * Issue bus rescan to catch any configuration that might have
+ * occurred
+ */
+ if (!retval) {
+ dev_info(&aac->pdev->dev, "Issuing bus rescan\n");
+ scsi_scan_host(host);
+ }
if (jafo) {
spin_lock_irq(host->host_lock);
}
@@ -1815,7 +1880,7 @@ int aac_check_health(struct aac_dev * aac)
printk(KERN_ERR "%s: Host adapter BLINK LED 0x%x\n", aac->name, BlinkLED);
if (!aac_check_reset || ((aac_check_reset == 1) &&
- (aac->supplement_adapter_info.SupportedOptions2 &
+ (aac->supplement_adapter_info.supported_options2 &
AAC_OPTION_IGNORE_RESET)))
goto out;
host = aac->scsi_host_ptr;
@@ -1843,9 +1908,6 @@ static void aac_resolve_luns(struct aac_dev *dev)
for (bus = 0; bus < AAC_MAX_BUSES; bus++) {
for (target = 0; target < AAC_MAX_TARGETS; target++) {
- if (aac_phys_to_logical(bus) == ENCLOSURE_CHANNEL)
- continue;
-
if (bus == CONTAINER_CHANNEL)
channel = CONTAINER_CHANNEL;
else
@@ -1857,7 +1919,7 @@ static void aac_resolve_luns(struct aac_dev *dev)
sdev = scsi_device_lookup(dev->scsi_host_ptr, channel,
target, 0);
- if (!sdev && devtype)
+ if (!sdev && new_devtype)
scsi_add_device(dev->scsi_host_ptr, channel,
target, 0);
else if (sdev && new_devtype != devtype)
@@ -2150,7 +2212,7 @@ static void aac_process_events(struct aac_dev *dev)
/* Thor AIF */
aac_handle_sa_aif(dev, fib);
aac_fib_adapter_complete(fib, (u16)sizeof(u32));
- continue;
+ goto free_fib;
}
/*
* We will process the FIB here or pass it to a
@@ -2264,8 +2326,8 @@ static int aac_send_wellness_command(struct aac_dev *dev, char *wellness_str,
aac_fib_init(fibptr);
- vbus = (u32)le16_to_cpu(dev->supplement_adapter_info.VirtDeviceBus);
- vid = (u32)le16_to_cpu(dev->supplement_adapter_info.VirtDeviceTarget);
+ vbus = (u32)le16_to_cpu(dev->supplement_adapter_info.virt_device_bus);
+ vid = (u32)le16_to_cpu(dev->supplement_adapter_info.virt_device_target);
srbcmd = (struct aac_srb *)fib_data(fibptr);
@@ -2434,7 +2496,7 @@ int aac_command_thread(void *data)
/* Don't even try to talk to adapter if its sick */
ret = aac_check_health(dev);
- if (!dev->queues)
+ if (ret || !dev->queues)
break;
next_check_jiffies = jiffies
+ ((long)(unsigned)check_interval)
@@ -2446,8 +2508,7 @@ int aac_command_thread(void *data)
&& (now.tv_usec > (1000000 / HZ)))
difference = (((1000000 - now.tv_usec) * HZ)
+ 500000) / 1000000;
- else if (ret == 0) {
-
+ else {
if (now.tv_usec > 500000)
++now.tv_sec;
@@ -2458,9 +2519,6 @@ int aac_command_thread(void *data)
ret = aac_send_hosttime(dev, &now);
difference = (long)(unsigned)update_interval*HZ;
- } else {
- /* retry shortly */
- difference = 10 * HZ;
}
next_jiffies = jiffies + difference;
if (time_before(next_check_jiffies,next_jiffies))
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 838347c44f32..520ada8266af 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -891,13 +891,13 @@ static int aac_eh_reset(struct scsi_cmnd* cmd)
* Adapters that support a register, instead of a commanded,
* reset.
*/
- if (((aac->supplement_adapter_info.SupportedOptions2 &
+ if (((aac->supplement_adapter_info.supported_options2 &
AAC_OPTION_MU_RESET) ||
- (aac->supplement_adapter_info.SupportedOptions2 &
+ (aac->supplement_adapter_info.supported_options2 &
AAC_OPTION_DOORBELL_RESET)) &&
aac_check_reset &&
((aac_check_reset != 1) ||
- !(aac->supplement_adapter_info.SupportedOptions2 &
+ !(aac->supplement_adapter_info.supported_options2 &
AAC_OPTION_IGNORE_RESET))) {
/* Bypass wait for command quiesce */
aac_reset_adapter(aac, 2, IOP_HWSOFT_RESET);
@@ -1029,8 +1029,8 @@ static ssize_t aac_show_model(struct device *device,
struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata;
int len;
- if (dev->supplement_adapter_info.AdapterTypeText[0]) {
- char * cp = dev->supplement_adapter_info.AdapterTypeText;
+ if (dev->supplement_adapter_info.adapter_type_text[0]) {
+ char *cp = dev->supplement_adapter_info.adapter_type_text;
while (*cp && *cp != ' ')
++cp;
while (*cp == ' ')
@@ -1046,18 +1046,20 @@ static ssize_t aac_show_vendor(struct device *device,
struct device_attribute *attr, char *buf)
{
struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata;
+ struct aac_supplement_adapter_info *sup_adap_info;
int len;
- if (dev->supplement_adapter_info.AdapterTypeText[0]) {
- char * cp = dev->supplement_adapter_info.AdapterTypeText;
+ sup_adap_info = &dev->supplement_adapter_info;
+ if (sup_adap_info->adapter_type_text[0]) {
+ char *cp = sup_adap_info->adapter_type_text;
while (*cp && *cp != ' ')
++cp;
len = snprintf(buf, PAGE_SIZE, "%.*s\n",
- (int)(cp - (char *)dev->supplement_adapter_info.AdapterTypeText),
- dev->supplement_adapter_info.AdapterTypeText);
+ (int)(cp - (char *)sup_adap_info->adapter_type_text),
+ sup_adap_info->adapter_type_text);
} else
len = snprintf(buf, PAGE_SIZE, "%s\n",
- aac_drivers[dev->cardtype].vname);
+ aac_drivers[dev->cardtype].vname);
return len;
}
@@ -1078,7 +1080,7 @@ static ssize_t aac_show_flags(struct device *cdev,
"SAI_READ_CAPACITY_16\n");
if (dev->jbod)
len += snprintf(buf + len, PAGE_SIZE - len, "SUPPORTED_JBOD\n");
- if (dev->supplement_adapter_info.SupportedOptions2 &
+ if (dev->supplement_adapter_info.supported_options2 &
AAC_OPTION_POWER_MANAGEMENT)
len += snprintf(buf + len, PAGE_SIZE - len,
"SUPPORTED_POWER_MANAGEMENT\n");
@@ -1129,6 +1131,13 @@ static ssize_t aac_show_bios_version(struct device *device,
return len;
}
+static ssize_t aac_show_driver_version(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%s\n", aac_driver_version);
+}
+
static ssize_t aac_show_serial_number(struct device *device,
struct device_attribute *attr, char *buf)
{
@@ -1139,12 +1148,12 @@ static ssize_t aac_show_serial_number(struct device *device,
len = snprintf(buf, 16, "%06X\n",
le32_to_cpu(dev->adapter_info.serial[0]));
if (len &&
- !memcmp(&dev->supplement_adapter_info.MfgPcbaSerialNo[
- sizeof(dev->supplement_adapter_info.MfgPcbaSerialNo)-len],
+ !memcmp(&dev->supplement_adapter_info.mfg_pcba_serial_no[
+ sizeof(dev->supplement_adapter_info.mfg_pcba_serial_no)-len],
buf, len-1))
len = snprintf(buf, 16, "%.*s\n",
- (int)sizeof(dev->supplement_adapter_info.MfgPcbaSerialNo),
- dev->supplement_adapter_info.MfgPcbaSerialNo);
+ (int)sizeof(dev->supplement_adapter_info.mfg_pcba_serial_no),
+ dev->supplement_adapter_info.mfg_pcba_serial_no);
return min(len, 16);
}
@@ -1239,6 +1248,13 @@ static struct device_attribute aac_bios_version = {
},
.show = aac_show_bios_version,
};
+static struct device_attribute aac_lld_version = {
+ .attr = {
+ .name = "driver_version",
+ .mode = 0444,
+ },
+ .show = aac_show_driver_version,
+};
static struct device_attribute aac_serial_number = {
.attr = {
.name = "serial_number",
@@ -1276,6 +1292,7 @@ static struct device_attribute *aac_attrs[] = {
&aac_kernel_version,
&aac_monitor_version,
&aac_bios_version,
+ &aac_lld_version,
&aac_serial_number,
&aac_max_channel,
&aac_max_id,
diff --git a/drivers/scsi/aacraid/rx.c b/drivers/scsi/aacraid/rx.c
index 0e69a80c3275..5d19c31e3bba 100644
--- a/drivers/scsi/aacraid/rx.c
+++ b/drivers/scsi/aacraid/rx.c
@@ -475,7 +475,7 @@ static int aac_rx_restart_adapter(struct aac_dev *dev, int bled, u8 reset_type)
{
u32 var = 0;
- if (!(dev->supplement_adapter_info.SupportedOptions2 &
+ if (!(dev->supplement_adapter_info.supported_options2 &
AAC_OPTION_MU_RESET) || (bled >= 0) || (bled == -2)) {
if (bled)
printk(KERN_ERR "%s%d: adapter kernel panic'd %x.\n",
diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c
index 8e4e2ddbafd7..2e5338dec621 100644
--- a/drivers/scsi/aacraid/src.c
+++ b/drivers/scsi/aacraid/src.c
@@ -437,16 +437,23 @@ static int aac_src_check_health(struct aac_dev *dev)
u32 status = src_readl(dev, MUnit.OMR);
/*
+ * Check to see if the board panic'd.
+ */
+ if (unlikely(status & KERNEL_PANIC))
+ goto err_blink;
+
+ /*
* Check to see if the board failed any self tests.
*/
if (unlikely(status & SELF_TEST_FAILED))
- return -1;
+ goto err_out;
/*
- * Check to see if the board panic'd.
+ * Check to see if the board failed any self tests.
*/
- if (unlikely(status & KERNEL_PANIC))
- return (status >> 16) & 0xFF;
+ if (unlikely(status & MONITOR_PANIC))
+ goto err_out;
+
/*
* Wait for the adapter to be up and running.
*/
@@ -456,6 +463,12 @@ static int aac_src_check_health(struct aac_dev *dev)
* Everything is OK
*/
return 0;
+
+err_out:
+ return -1;
+
+err_blink:
+ return (status > 16) & 0xFF;
}
static inline u32 aac_get_vector(struct aac_dev *dev)
@@ -657,7 +670,7 @@ static int aac_srcv_ioremap(struct aac_dev *dev, u32 size)
return 0;
}
-static void aac_set_intx_mode(struct aac_dev *dev)
+void aac_set_intx_mode(struct aac_dev *dev)
{
if (dev->msi_enabled) {
aac_src_access_devreg(dev, AAC_ENABLE_INTX);
@@ -666,10 +679,27 @@ static void aac_set_intx_mode(struct aac_dev *dev)
}
}
+static void aac_dump_fw_fib_iop_reset(struct aac_dev *dev)
+{
+ __le32 supported_options3;
+
+ if (!aac_fib_dump)
+ return;
+
+ supported_options3 = dev->supplement_adapter_info.supported_options3;
+ if (!(supported_options3 & AAC_OPTION_SUPPORTED3_IOP_RESET_FIB_DUMP))
+ return;
+
+ aac_adapter_sync_cmd(dev, IOP_RESET_FW_FIB_DUMP,
+ 0, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL);
+}
+
static void aac_send_iop_reset(struct aac_dev *dev, int bled)
{
u32 var, reset_mask;
+ aac_dump_fw_fib_iop_reset(dev);
+
bled = aac_adapter_sync_cmd(dev, IOP_RESET_ALWAYS,
0, 0, 0, 0, 0, 0, &var,
&reset_mask, NULL, NULL, NULL);
@@ -684,7 +714,7 @@ static void aac_send_iop_reset(struct aac_dev *dev, int bled)
aac_set_intx_mode(dev);
- if (!bled && (dev->supplement_adapter_info.SupportedOptions2 &
+ if (!bled && (dev->supplement_adapter_info.supported_options2 &
AAC_OPTION_DOORBELL_RESET)) {
src_writel(dev, MUnit.IDR, reset_mask);
} else {
@@ -714,6 +744,12 @@ static int aac_src_restart_adapter(struct aac_dev *dev, int bled, u8 reset_type)
pr_err("%s%d: adapter kernel panic'd %x.\n",
dev->name, dev->id, bled);
+ /*
+ * When there is a BlinkLED, IOP_RESET has not effect
+ */
+ if (bled >= 2 && dev->sa_firmware && reset_type & HW_IOP_RESET)
+ reset_type &= ~HW_IOP_RESET;
+
dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
switch (reset_type) {
diff --git a/drivers/scsi/aic7xxx/aic79xx_core.c b/drivers/scsi/aic7xxx/aic79xx_core.c
index 109e2c99e6c1..95d8f25cbcca 100644
--- a/drivers/scsi/aic7xxx/aic79xx_core.c
+++ b/drivers/scsi/aic7xxx/aic79xx_core.c
@@ -6278,7 +6278,7 @@ ahd_reset(struct ahd_softc *ahd, int reinit)
* does not disable its parity logic prior to
* the start of the reset. This may cause a
* parity error to be detected and thus a
- * spurious SERR or PERR assertion. Disble
+ * spurious SERR or PERR assertion. Disable
* PERR and SERR responses during the CHIPRST.
*/
mod_cmd = cmd & ~(PCIM_CMD_PERRESPEN|PCIM_CMD_SERRESPEN);
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h
index fdd4eb4e41b2..4fc8ed5fe067 100644
--- a/drivers/scsi/bnx2fc/bnx2fc.h
+++ b/drivers/scsi/bnx2fc/bnx2fc.h
@@ -39,7 +39,7 @@
#include <linux/bitops.h>
#include <linux/log2.h>
#include <linux/interrupt.h>
-#include <linux/sched.h>
+#include <linux/sched/signal.h>
#include <linux/io.h>
#include <scsi/scsi.h>
diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h
index ed7f3228e234..89ef1a1678d1 100644
--- a/drivers/scsi/bnx2i/bnx2i.h
+++ b/drivers/scsi/bnx2i/bnx2i.h
@@ -25,7 +25,7 @@
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
-#include <linux/sched.h>
+#include <linux/sched/signal.h>
#include <linux/in.h>
#include <linux/kfifo.h>
#include <linux/netdevice.h>
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index 7069639e92bc..3061d8045382 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -2259,6 +2259,8 @@ static struct dev_dependent_vals dev_corsa_vals = { CXLFLASH_MAX_SECTORS,
0ULL };
static struct dev_dependent_vals dev_flash_gt_vals = { CXLFLASH_MAX_SECTORS,
CXLFLASH_NOTIFY_SHUTDOWN };
+static struct dev_dependent_vals dev_briard_vals = { CXLFLASH_MAX_SECTORS,
+ CXLFLASH_NOTIFY_SHUTDOWN };
/*
* PCI device binding table
@@ -2268,6 +2270,8 @@ static struct pci_device_id cxlflash_pci_table[] = {
PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_corsa_vals},
{PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_FLASH_GT,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_flash_gt_vals},
+ {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_BRIARD,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_briard_vals},
{}
};
diff --git a/drivers/scsi/cxlflash/main.h b/drivers/scsi/cxlflash/main.h
index e43545c86bcf..0be2261e6312 100644
--- a/drivers/scsi/cxlflash/main.h
+++ b/drivers/scsi/cxlflash/main.h
@@ -25,6 +25,7 @@
#define PCI_DEVICE_ID_IBM_CORSA 0x04F0
#define PCI_DEVICE_ID_IBM_FLASH_GT 0x0600
+#define PCI_DEVICE_ID_IBM_BRIARD 0x0624
/* Since there is only one target, make it 0 */
#define CXLFLASH_TARGET 0
diff --git a/drivers/scsi/cxlflash/superpipe.c b/drivers/scsi/cxlflash/superpipe.c
index ef5bf55f08a4..b46fd2f45628 100644
--- a/drivers/scsi/cxlflash/superpipe.c
+++ b/drivers/scsi/cxlflash/superpipe.c
@@ -305,6 +305,7 @@ static int read_cap16(struct scsi_device *sdev, struct llun_info *lli)
struct cxlflash_cfg *cfg = shost_priv(sdev->host);
struct device *dev = &cfg->dev->dev;
struct glun_info *gli = lli->parent;
+ struct scsi_sense_hdr sshdr;
u8 *cmd_buf = NULL;
u8 *scsi_cmd = NULL;
u8 *sense_buf = NULL;
@@ -332,7 +333,8 @@ retry:
/* Drop the ioctl read semahpore across lengthy call */
up_read(&cfg->ioctl_rwsem);
result = scsi_execute(sdev, scsi_cmd, DMA_FROM_DEVICE, cmd_buf,
- CMD_BUFSIZE, sense_buf, to, CMD_RETRIES, 0, NULL);
+ CMD_BUFSIZE, sense_buf, &sshdr, to, CMD_RETRIES,
+ 0, 0, NULL);
down_read(&cfg->ioctl_rwsem);
rc = check_state(cfg);
if (rc) {
@@ -345,10 +347,6 @@ retry:
if (driver_byte(result) == DRIVER_SENSE) {
result &= ~(0xFF<<24); /* DRIVER_SENSE is not an error */
if (result & SAM_STAT_CHECK_CONDITION) {
- struct scsi_sense_hdr sshdr;
-
- scsi_normalize_sense(sense_buf, SCSI_SENSE_BUFFERSIZE,
- &sshdr);
switch (sshdr.sense_key) {
case NO_SENSE:
case RECOVERED_ERROR:
diff --git a/drivers/scsi/cxlflash/vlun.c b/drivers/scsi/cxlflash/vlun.c
index 8fcc804dbef9..7aa06ef229fd 100644
--- a/drivers/scsi/cxlflash/vlun.c
+++ b/drivers/scsi/cxlflash/vlun.c
@@ -453,8 +453,8 @@ static int write_same16(struct scsi_device *sdev,
/* Drop the ioctl read semahpore across lengthy call */
up_read(&cfg->ioctl_rwsem);
result = scsi_execute(sdev, scsi_cmd, DMA_TO_DEVICE, cmd_buf,
- CMD_BUFSIZE, sense_buf, to, CMD_RETRIES,
- 0, NULL);
+ CMD_BUFSIZE, sense_buf, NULL, to,
+ CMD_RETRIES, 0, 0, NULL);
down_read(&cfg->ioctl_rwsem);
rc = check_state(cfg);
if (rc) {
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index d704752b6332..48e200102221 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -151,11 +151,9 @@ static int submit_rtpg(struct scsi_device *sdev, unsigned char *buff,
cdb[1] = MI_REPORT_TARGET_PGS;
put_unaligned_be32(bufflen, &cdb[6]);
- return scsi_execute_req_flags(sdev, cdb, DMA_FROM_DEVICE,
- buff, bufflen, sshdr,
- ALUA_FAILOVER_TIMEOUT * HZ,
- ALUA_FAILOVER_RETRIES, NULL,
- req_flags, 0);
+ return scsi_execute(sdev, cdb, DMA_FROM_DEVICE, buff, bufflen, NULL,
+ sshdr, ALUA_FAILOVER_TIMEOUT * HZ,
+ ALUA_FAILOVER_RETRIES, req_flags, 0, NULL);
}
/*
@@ -185,11 +183,9 @@ static int submit_stpg(struct scsi_device *sdev, int group_id,
cdb[1] = MO_SET_TARGET_PGS;
put_unaligned_be32(stpg_len, &cdb[6]);
- return scsi_execute_req_flags(sdev, cdb, DMA_TO_DEVICE,
- stpg_data, stpg_len,
- sshdr, ALUA_FAILOVER_TIMEOUT * HZ,
- ALUA_FAILOVER_RETRIES, NULL,
- req_flags, 0);
+ return scsi_execute(sdev, cdb, DMA_TO_DEVICE, stpg_data, stpg_len, NULL,
+ sshdr, ALUA_FAILOVER_TIMEOUT * HZ,
+ ALUA_FAILOVER_RETRIES, req_flags, 0, NULL);
}
static struct alua_port_group *alua_find_get_pg(char *id_str, size_t id_size,
diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c
index 4a7679f6c73d..8654e940e1a8 100644
--- a/drivers/scsi/device_handler/scsi_dh_emc.c
+++ b/drivers/scsi/device_handler/scsi_dh_emc.c
@@ -276,10 +276,9 @@ static int send_trespass_cmd(struct scsi_device *sdev,
BUG_ON((len > CLARIION_BUFFER_SIZE));
memcpy(csdev->buffer, page22, len);
- err = scsi_execute_req_flags(sdev, cdb, DMA_TO_DEVICE,
- csdev->buffer, len, &sshdr,
- CLARIION_TIMEOUT * HZ, CLARIION_RETRIES,
- NULL, req_flags, 0);
+ err = scsi_execute(sdev, cdb, DMA_TO_DEVICE, csdev->buffer, len, NULL,
+ &sshdr, CLARIION_TIMEOUT * HZ, CLARIION_RETRIES,
+ req_flags, 0, NULL);
if (err) {
if (scsi_sense_valid(&sshdr))
res = trespass_endio(sdev, &sshdr);
@@ -358,7 +357,7 @@ static int clariion_prep_fn(struct scsi_device *sdev, struct request *req)
static int clariion_std_inquiry(struct scsi_device *sdev,
struct clariion_dh_data *csdev)
{
- int err;
+ int err = SCSI_DH_OK;
char *sp_model;
sp_model = parse_sp_model(sdev, sdev->inquiry);
diff --git a/drivers/scsi/device_handler/scsi_dh_hp_sw.c b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
index be43c940636d..62d314e07d11 100644
--- a/drivers/scsi/device_handler/scsi_dh_hp_sw.c
+++ b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
@@ -100,9 +100,8 @@ static int hp_sw_tur(struct scsi_device *sdev, struct hp_sw_dh_data *h)
REQ_FAILFAST_DRIVER;
retry:
- res = scsi_execute_req_flags(sdev, cmd, DMA_NONE, NULL, 0, &sshdr,
- HP_SW_TIMEOUT, HP_SW_RETRIES,
- NULL, req_flags, 0);
+ res = scsi_execute(sdev, cmd, DMA_NONE, NULL, 0, NULL, &sshdr,
+ HP_SW_TIMEOUT, HP_SW_RETRIES, req_flags, 0, NULL);
if (res) {
if (scsi_sense_valid(&sshdr))
ret = tur_done(sdev, h, &sshdr);
@@ -139,9 +138,8 @@ static int hp_sw_start_stop(struct hp_sw_dh_data *h)
REQ_FAILFAST_DRIVER;
retry:
- res = scsi_execute_req_flags(sdev, cmd, DMA_NONE, NULL, 0, &sshdr,
- HP_SW_TIMEOUT, HP_SW_RETRIES,
- NULL, req_flags, 0);
+ res = scsi_execute(sdev, cmd, DMA_NONE, NULL, 0, NULL, &sshdr,
+ HP_SW_TIMEOUT, HP_SW_RETRIES, req_flags, 0, NULL);
if (res) {
if (!scsi_sense_valid(&sshdr)) {
sdev_printk(KERN_WARNING, sdev,
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index b64eaae8533d..3cbab8710e58 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -555,10 +555,9 @@ static void send_mode_select(struct work_struct *work)
(char *) h->ctlr->array_name, h->ctlr->index,
(retry_cnt == RDAC_RETRY_COUNT) ? "queueing" : "retrying");
- if (scsi_execute_req_flags(sdev, cdb, DMA_TO_DEVICE,
- &h->ctlr->mode_select, data_size, &sshdr,
- RDAC_TIMEOUT * HZ,
- RDAC_RETRIES, NULL, req_flags, 0)) {
+ if (scsi_execute(sdev, cdb, DMA_TO_DEVICE, &h->ctlr->mode_select,
+ data_size, NULL, &sshdr, RDAC_TIMEOUT * HZ,
+ RDAC_RETRIES, req_flags, 0, NULL)) {
err = mode_select_handle_sense(sdev, &sshdr);
if (err == SCSI_DH_RETRY && retry_cnt--)
goto retry;
diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c
index 6103231104da..fd501f8dbb11 100644
--- a/drivers/scsi/libfc/fc_disc.c
+++ b/drivers/scsi/libfc/fc_disc.c
@@ -36,6 +36,8 @@
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/export.h>
+#include <linux/rculist.h>
+
#include <asm/unaligned.h>
#include <scsi/fc/fc_gs.h>
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index c991f3b822f8..b44c3136eb51 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -65,6 +65,8 @@
#include <linux/timer.h>
#include <linux/workqueue.h>
#include <linux/export.h>
+#include <linux/rculist.h>
+
#include <asm/unaligned.h>
#include <scsi/libfc.h>
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 834d1212b6d5..07c08ce68d70 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -26,6 +26,7 @@
#include <linux/delay.h>
#include <linux/log2.h>
#include <linux/slab.h>
+#include <linux/sched/signal.h>
#include <linux/module.h>
#include <asm/unaligned.h>
#include <net/tcp.h>
diff --git a/drivers/scsi/lpfc/Makefile b/drivers/scsi/lpfc/Makefile
index e2516ba8ebfa..cb6aa802c48e 100644
--- a/drivers/scsi/lpfc/Makefile
+++ b/drivers/scsi/lpfc/Makefile
@@ -1,9 +1,11 @@
#/*******************************************************************
# * This file is part of the Emulex Linux Device Driver for *
# * Fibre Channel Host Bus Adapters. *
+# * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
+# * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
# * Copyright (C) 2004-2012 Emulex. All rights reserved. *
# * EMULEX and SLI are trademarks of Emulex. *
-# * www.emulex.com *
+# * www.broadcom.com *
# * *
# * This program is free software; you can redistribute it and/or *
# * modify it under the terms of version 2 of the GNU General *
@@ -28,6 +30,7 @@ endif
obj-$(CONFIG_SCSI_LPFC) := lpfc.o
-lpfc-objs := lpfc_mem.o lpfc_sli.o lpfc_ct.o lpfc_els.o lpfc_hbadisc.o \
- lpfc_init.o lpfc_mbox.o lpfc_nportdisc.o lpfc_scsi.o lpfc_attr.o \
- lpfc_vport.o lpfc_debugfs.o lpfc_bsg.o
+lpfc-objs := lpfc_mem.o lpfc_sli.o lpfc_ct.o lpfc_els.o \
+ lpfc_hbadisc.o lpfc_init.o lpfc_mbox.o lpfc_nportdisc.o \
+ lpfc_scsi.o lpfc_attr.o lpfc_vport.o lpfc_debugfs.o lpfc_bsg.o \
+ lpfc_nvme.o lpfc_nvmet.o
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 6593b073c524..0bba2e30b4f0 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -1,9 +1,11 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
+ * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
+ * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
- * www.emulex.com *
+ * www.broadcom.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
* *
* This program is free software; you can redistribute it and/or *
@@ -20,6 +22,7 @@
*******************************************************************/
#include <scsi/scsi_host.h>
+#include <linux/ktime.h>
#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_SCSI_LPFC_DEBUG_FS)
#define CONFIG_SCSI_LPFC_DEBUG_FS
@@ -53,6 +56,7 @@ struct lpfc_sli2_slim;
#define LPFC_MAX_SG_SEG_CNT 4096 /* sg element count per scsi cmnd */
#define LPFC_MAX_SGL_SEG_CNT 512 /* SGL element count per scsi cmnd */
#define LPFC_MAX_BPL_SEG_CNT 4096 /* BPL element count per scsi cmnd */
+#define LPFC_MIN_NVME_SEG_CNT 254
#define LPFC_MAX_SGE_SIZE 0x80000000 /* Maximum data allowed in a SGE */
#define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */
@@ -114,6 +118,20 @@ enum lpfc_polling_flags {
DISABLE_FCP_RING_INT = 0x2
};
+struct perf_prof {
+ uint16_t cmd_cpu[40];
+ uint16_t rsp_cpu[40];
+ uint16_t qh_cpu[40];
+ uint16_t wqidx[40];
+};
+
+/*
+ * Provide for FC4 TYPE x28 - NVME. The
+ * bit mask for FCP and NVME is 0x8 identically
+ * because they are 32 bit positions distance.
+ */
+#define LPFC_FC4_TYPE_BITMASK 0x00000100
+
/* Provide DMA memory definitions the driver uses per port instance. */
struct lpfc_dmabuf {
struct list_head list;
@@ -131,10 +149,24 @@ struct lpfc_dma_pool {
struct hbq_dmabuf {
struct lpfc_dmabuf hbuf;
struct lpfc_dmabuf dbuf;
- uint32_t size;
+ uint16_t total_size;
+ uint16_t bytes_recv;
uint32_t tag;
struct lpfc_cq_event cq_event;
unsigned long time_stamp;
+ void *context;
+};
+
+struct rqb_dmabuf {
+ struct lpfc_dmabuf hbuf;
+ struct lpfc_dmabuf dbuf;
+ uint16_t total_size;
+ uint16_t bytes_recv;
+ void *context;
+ struct lpfc_iocbq *iocbq;
+ struct lpfc_sglq *sglq;
+ struct lpfc_queue *hrq; /* ptr to associated Header RQ */
+ struct lpfc_queue *drq; /* ptr to associated Data RQ */
};
/* Priority bit. Set value to exceed low water mark in lpfc_mem. */
@@ -367,7 +399,8 @@ struct lpfc_vport {
int32_t stopped; /* HBA has not been restarted since last ERATT */
uint8_t fc_linkspeed; /* Link speed after last READ_LA */
- uint32_t num_disc_nodes; /*in addition to hba_state */
+ uint32_t num_disc_nodes; /* in addition to hba_state */
+ uint32_t gidft_inp; /* cnt of outstanding GID_FTs */
uint32_t fc_nlp_cnt; /* outstanding NODELIST requests */
uint32_t fc_rscn_id_cnt; /* count of RSCNs payloads in list */
@@ -420,7 +453,6 @@ struct lpfc_vport {
uint32_t cfg_max_scsicmpl_time;
uint32_t cfg_tgt_queue_depth;
uint32_t cfg_first_burst_size;
-
uint32_t dev_loss_tmo_changed;
struct fc_vport *fc_vport;
@@ -428,6 +460,9 @@ struct lpfc_vport {
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
struct dentry *debug_disc_trc;
struct dentry *debug_nodelist;
+ struct dentry *debug_nvmestat;
+ struct dentry *debug_nvmektime;
+ struct dentry *debug_cpucheck;
struct dentry *vport_debugfs_root;
struct lpfc_debugfs_trc *disc_trc;
atomic_t disc_trc_cnt;
@@ -442,6 +477,11 @@ struct lpfc_vport {
uint16_t fdmi_num_disc;
uint32_t fdmi_hba_mask;
uint32_t fdmi_port_mask;
+
+ /* There is a single nvme instance per vport. */
+ struct nvme_fc_local_port *localport;
+ uint8_t nvmei_support; /* driver supports NVME Initiator */
+ uint32_t last_fcp_wqidx;
};
struct hbq_s {
@@ -459,10 +499,9 @@ struct hbq_s {
struct hbq_dmabuf *);
};
-#define LPFC_MAX_HBQS 4
/* this matches the position in the lpfc_hbq_defs array */
#define LPFC_ELS_HBQ 0
-#define LPFC_EXTRA_HBQ 1
+#define LPFC_MAX_HBQS 1
enum hba_temp_state {
HBA_NORMAL_TEMP,
@@ -652,6 +691,8 @@ struct lpfc_hba {
* Firmware supports Forced Link Speed
* capability
*/
+#define HBA_NVME_IOQ_FLUSH 0x80000 /* NVME IO queues flushed. */
+
uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/
struct lpfc_dmabuf slim2p;
@@ -700,6 +741,9 @@ struct lpfc_hba {
uint8_t wwpn[8];
uint32_t RandomData[7];
uint8_t fcp_embed_io;
+ uint8_t nvme_support; /* Firmware supports NVME */
+ uint8_t nvmet_support; /* driver supports NVMET */
+#define LPFC_NVMET_MAX_PORTS 32
uint8_t mds_diags_support;
/* HBA Config Parameters */
@@ -725,6 +769,14 @@ struct lpfc_hba {
uint32_t cfg_fcp_imax;
uint32_t cfg_fcp_cpu_map;
uint32_t cfg_fcp_io_channel;
+ uint32_t cfg_suppress_rsp;
+ uint32_t cfg_nvme_oas;
+ uint32_t cfg_nvme_io_channel;
+ uint32_t cfg_nvmet_mrq;
+ uint32_t cfg_nvmet_mrq_post;
+ uint32_t cfg_enable_nvmet;
+ uint32_t cfg_nvme_enable_fb;
+ uint32_t cfg_nvmet_fb_size;
uint32_t cfg_total_seg_cnt;
uint32_t cfg_sg_seg_cnt;
uint32_t cfg_sg_dma_buf_size;
@@ -770,6 +822,13 @@ struct lpfc_hba {
#define LPFC_FDMI_SUPPORT 1 /* FDMI supported? */
uint32_t cfg_enable_SmartSAN;
uint32_t cfg_enable_mds_diags;
+ uint32_t cfg_enable_fc4_type;
+ uint32_t cfg_xri_split;
+#define LPFC_ENABLE_FCP 1
+#define LPFC_ENABLE_NVME 2
+#define LPFC_ENABLE_BOTH 3
+ uint32_t io_channel_irqs; /* number of irqs for io channels */
+ struct nvmet_fc_target_port *targetport;
lpfc_vpd_t vpd; /* vital product data */
struct pci_dev *pcidev;
@@ -784,11 +843,11 @@ struct lpfc_hba {
unsigned long data_flags;
uint32_t hbq_in_use; /* HBQs in use flag */
- struct list_head rb_pend_list; /* Received buffers to be processed */
uint32_t hbq_count; /* Count of configured HBQs */
struct hbq_s hbqs[LPFC_MAX_HBQS]; /* local copy of hbq indicies */
- atomic_t fcp_qidx; /* next work queue to post work to */
+ atomic_t fcp_qidx; /* next FCP WQ (RR Policy) */
+ atomic_t nvme_qidx; /* next NVME WQ (RR Policy) */
phys_addr_t pci_bar0_map; /* Physical address for PCI BAR0 */
phys_addr_t pci_bar1_map; /* Physical address for PCI BAR1 */
@@ -843,9 +902,17 @@ struct lpfc_hba {
/*
* stat counters
*/
- uint64_t fc4InputRequests;
- uint64_t fc4OutputRequests;
- uint64_t fc4ControlRequests;
+ uint64_t fc4ScsiInputRequests;
+ uint64_t fc4ScsiOutputRequests;
+ uint64_t fc4ScsiControlRequests;
+ uint64_t fc4ScsiIoCmpls;
+ uint64_t fc4NvmeInputRequests;
+ uint64_t fc4NvmeOutputRequests;
+ uint64_t fc4NvmeControlRequests;
+ uint64_t fc4NvmeIoCmpls;
+ uint64_t fc4NvmeLsRequests;
+ uint64_t fc4NvmeLsCmpls;
+
uint64_t bg_guard_err_cnt;
uint64_t bg_apptag_err_cnt;
uint64_t bg_reftag_err_cnt;
@@ -856,17 +923,23 @@ struct lpfc_hba {
struct list_head lpfc_scsi_buf_list_get;
struct list_head lpfc_scsi_buf_list_put;
uint32_t total_scsi_bufs;
+ spinlock_t nvme_buf_list_get_lock; /* NVME buf alloc list lock */
+ spinlock_t nvme_buf_list_put_lock; /* NVME buf free list lock */
+ struct list_head lpfc_nvme_buf_list_get;
+ struct list_head lpfc_nvme_buf_list_put;
+ uint32_t total_nvme_bufs;
struct list_head lpfc_iocb_list;
uint32_t total_iocbq_bufs;
struct list_head active_rrq_list;
spinlock_t hbalock;
/* pci_mem_pools */
- struct pci_pool *lpfc_scsi_dma_buf_pool;
+ struct pci_pool *lpfc_sg_dma_buf_pool;
struct pci_pool *lpfc_mbuf_pool;
struct pci_pool *lpfc_hrb_pool; /* header receive buffer pool */
struct pci_pool *lpfc_drb_pool; /* data receive buffer pool */
struct pci_pool *lpfc_hbq_pool; /* SLI3 hbq buffer pool */
+ struct pci_pool *txrdy_payload_pool;
struct lpfc_dma_pool lpfc_mbuf_safety_pool;
mempool_t *mbox_mem_pool;
@@ -878,8 +951,6 @@ struct lpfc_hba {
enum intr_type_t intr_type;
uint32_t intr_mode;
#define LPFC_INTR_ERROR 0xFFFFFFFF
- struct msix_entry msix_entries[LPFC_MSIX_VECTORS];
-
struct list_head port_list;
struct lpfc_vport *pport; /* physical lpfc_vport pointer */
uint16_t max_vpi; /* Maximum virtual nports */
@@ -925,6 +996,12 @@ struct lpfc_hba {
struct dentry *debug_readApp; /* inject read app_tag errors */
struct dentry *debug_readRef; /* inject read ref_tag errors */
+ struct dentry *debug_nvmeio_trc;
+ struct lpfc_debugfs_nvmeio_trc *nvmeio_trc;
+ atomic_t nvmeio_trc_cnt;
+ uint32_t nvmeio_trc_size;
+ uint32_t nvmeio_trc_output_idx;
+
/* T10 DIF error injection */
uint32_t lpfc_injerr_wgrd_cnt;
uint32_t lpfc_injerr_wapp_cnt;
@@ -950,7 +1027,9 @@ struct lpfc_hba {
struct dentry *idiag_ctl_acc;
struct dentry *idiag_mbx_acc;
struct dentry *idiag_ext_acc;
+ uint8_t lpfc_idiag_last_eq;
#endif
+ uint16_t nvmeio_trc_on;
/* Used for deferred freeing of ELS data buffers */
struct list_head elsbuf;
@@ -1023,6 +1102,53 @@ struct lpfc_hba {
#define LPFC_TRANSGRESSION_LOW_RXPOWER 0x4000
uint16_t sfp_alarm;
uint16_t sfp_warning;
+
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+#define LPFC_CHECK_CPU_CNT 32
+ uint32_t cpucheck_rcv_io[LPFC_CHECK_CPU_CNT];
+ uint32_t cpucheck_xmt_io[LPFC_CHECK_CPU_CNT];
+ uint32_t cpucheck_cmpl_io[LPFC_CHECK_CPU_CNT];
+ uint32_t cpucheck_ccmpl_io[LPFC_CHECK_CPU_CNT];
+ uint16_t cpucheck_on;
+#define LPFC_CHECK_OFF 0
+#define LPFC_CHECK_NVME_IO 1
+#define LPFC_CHECK_NVMET_RCV 2
+#define LPFC_CHECK_NVMET_IO 4
+ uint16_t ktime_on;
+ uint64_t ktime_data_samples;
+ uint64_t ktime_status_samples;
+ uint64_t ktime_last_cmd;
+ uint64_t ktime_seg1_total;
+ uint64_t ktime_seg1_min;
+ uint64_t ktime_seg1_max;
+ uint64_t ktime_seg2_total;
+ uint64_t ktime_seg2_min;
+ uint64_t ktime_seg2_max;
+ uint64_t ktime_seg3_total;
+ uint64_t ktime_seg3_min;
+ uint64_t ktime_seg3_max;
+ uint64_t ktime_seg4_total;
+ uint64_t ktime_seg4_min;
+ uint64_t ktime_seg4_max;
+ uint64_t ktime_seg5_total;
+ uint64_t ktime_seg5_min;
+ uint64_t ktime_seg5_max;
+ uint64_t ktime_seg6_total;
+ uint64_t ktime_seg6_min;
+ uint64_t ktime_seg6_max;
+ uint64_t ktime_seg7_total;
+ uint64_t ktime_seg7_min;
+ uint64_t ktime_seg7_max;
+ uint64_t ktime_seg8_total;
+ uint64_t ktime_seg8_min;
+ uint64_t ktime_seg8_max;
+ uint64_t ktime_seg9_total;
+ uint64_t ktime_seg9_min;
+ uint64_t ktime_seg9_max;
+ uint64_t ktime_seg10_total;
+ uint64_t ktime_seg10_min;
+ uint64_t ktime_seg10_max;
+#endif
};
static inline struct Scsi_Host *
@@ -1093,3 +1219,11 @@ lpfc_sli_read_hs(struct lpfc_hba *phba)
return 0;
}
+
+static inline struct lpfc_sli_ring *
+lpfc_phba_elsring(struct lpfc_hba *phba)
+{
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ return phba->sli4_hba.els_wq->pring;
+ return &phba->sli.sli3_ring[LPFC_ELS_RING];
+}
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 03cb05abc821..5c783ef7f260 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -1,9 +1,11 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
+ * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
+ * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
- * www.emulex.com *
+ * www.broadcom.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
* *
* This program is free software; you can redistribute it and/or *
@@ -35,14 +37,18 @@
#include <scsi/scsi_transport_fc.h>
#include <scsi/fc/fc_fs.h>
+#include <linux/nvme-fc-driver.h>
+
#include "lpfc_hw4.h"
#include "lpfc_hw.h"
#include "lpfc_sli.h"
#include "lpfc_sli4.h"
#include "lpfc_nl.h"
#include "lpfc_disc.h"
-#include "lpfc_scsi.h"
#include "lpfc.h"
+#include "lpfc_scsi.h"
+#include "lpfc_nvme.h"
+#include "lpfc_nvmet.h"
#include "lpfc_logmsg.h"
#include "lpfc_version.h"
#include "lpfc_compat.h"
@@ -50,9 +56,13 @@
#include "lpfc_vport.h"
#include "lpfc_attr.h"
-#define LPFC_DEF_DEVLOSS_TMO 30
-#define LPFC_MIN_DEVLOSS_TMO 1
-#define LPFC_MAX_DEVLOSS_TMO 255
+#define LPFC_DEF_DEVLOSS_TMO 30
+#define LPFC_MIN_DEVLOSS_TMO 1
+#define LPFC_MAX_DEVLOSS_TMO 255
+
+#define LPFC_DEF_MRQ_POST 256
+#define LPFC_MIN_MRQ_POST 32
+#define LPFC_MAX_MRQ_POST 512
/*
* Write key size should be multiple of 4. If write key is changed
@@ -130,6 +140,211 @@ lpfc_enable_fip_show(struct device *dev, struct device_attribute *attr,
}
static ssize_t
+lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = shost_priv(shost);
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_nvmet_tgtport *tgtp;
+ struct nvme_fc_local_port *localport;
+ struct lpfc_nvme_lport *lport;
+ struct lpfc_nvme_rport *rport;
+ struct nvme_fc_remote_port *nrport;
+ char *statep;
+ int len = 0;
+
+ if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) {
+ len += snprintf(buf, PAGE_SIZE, "NVME Disabled\n");
+ return len;
+ }
+ if (phba->nvmet_support) {
+ if (!phba->targetport) {
+ len = snprintf(buf, PAGE_SIZE,
+ "NVME Target: x%llx is not allocated\n",
+ wwn_to_u64(vport->fc_portname.u.wwn));
+ return len;
+ }
+ /* Port state is only one of two values for now. */
+ if (phba->targetport->port_id)
+ statep = "REGISTERED";
+ else
+ statep = "INIT";
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "NVME Target: Enabled State %s\n",
+ statep);
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "%s%d WWPN x%llx WWNN x%llx DID x%06x\n",
+ "NVME Target: lpfc",
+ phba->brd_no,
+ wwn_to_u64(vport->fc_portname.u.wwn),
+ wwn_to_u64(vport->fc_nodename.u.wwn),
+ phba->targetport->port_id);
+
+ len += snprintf(buf + len, PAGE_SIZE,
+ "\nNVME Target: Statistics\n");
+ tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
+ len += snprintf(buf+len, PAGE_SIZE-len,
+ "LS: Rcv %08x Drop %08x Abort %08x\n",
+ atomic_read(&tgtp->rcv_ls_req_in),
+ atomic_read(&tgtp->rcv_ls_req_drop),
+ atomic_read(&tgtp->xmt_ls_abort));
+ if (atomic_read(&tgtp->rcv_ls_req_in) !=
+ atomic_read(&tgtp->rcv_ls_req_out)) {
+ len += snprintf(buf+len, PAGE_SIZE-len,
+ "Rcv LS: in %08x != out %08x\n",
+ atomic_read(&tgtp->rcv_ls_req_in),
+ atomic_read(&tgtp->rcv_ls_req_out));
+ }
+
+ len += snprintf(buf+len, PAGE_SIZE-len,
+ "LS: Xmt %08x Drop %08x Cmpl %08x Err %08x\n",
+ atomic_read(&tgtp->xmt_ls_rsp),
+ atomic_read(&tgtp->xmt_ls_drop),
+ atomic_read(&tgtp->xmt_ls_rsp_cmpl),
+ atomic_read(&tgtp->xmt_ls_rsp_error));
+
+ len += snprintf(buf+len, PAGE_SIZE-len,
+ "FCP: Rcv %08x Drop %08x\n",
+ atomic_read(&tgtp->rcv_fcp_cmd_in),
+ atomic_read(&tgtp->rcv_fcp_cmd_drop));
+
+ if (atomic_read(&tgtp->rcv_fcp_cmd_in) !=
+ atomic_read(&tgtp->rcv_fcp_cmd_out)) {
+ len += snprintf(buf+len, PAGE_SIZE-len,
+ "Rcv FCP: in %08x != out %08x\n",
+ atomic_read(&tgtp->rcv_fcp_cmd_in),
+ atomic_read(&tgtp->rcv_fcp_cmd_out));
+ }
+
+ len += snprintf(buf+len, PAGE_SIZE-len,
+ "FCP Rsp: RD %08x rsp %08x WR %08x rsp %08x\n",
+ atomic_read(&tgtp->xmt_fcp_read),
+ atomic_read(&tgtp->xmt_fcp_read_rsp),
+ atomic_read(&tgtp->xmt_fcp_write),
+ atomic_read(&tgtp->xmt_fcp_rsp));
+
+ len += snprintf(buf+len, PAGE_SIZE-len,
+ "FCP Rsp: abort %08x drop %08x\n",
+ atomic_read(&tgtp->xmt_fcp_abort),
+ atomic_read(&tgtp->xmt_fcp_drop));
+
+ len += snprintf(buf+len, PAGE_SIZE-len,
+ "FCP Rsp Cmpl: %08x err %08x drop %08x\n",
+ atomic_read(&tgtp->xmt_fcp_rsp_cmpl),
+ atomic_read(&tgtp->xmt_fcp_rsp_error),
+ atomic_read(&tgtp->xmt_fcp_rsp_drop));
+
+ len += snprintf(buf+len, PAGE_SIZE-len,
+ "ABORT: Xmt %08x Err %08x Cmpl %08x",
+ atomic_read(&tgtp->xmt_abort_rsp),
+ atomic_read(&tgtp->xmt_abort_rsp_error),
+ atomic_read(&tgtp->xmt_abort_cmpl));
+
+ len += snprintf(buf+len, PAGE_SIZE-len, "\n");
+ return len;
+ }
+
+ localport = vport->localport;
+ if (!localport) {
+ len = snprintf(buf, PAGE_SIZE,
+ "NVME Initiator x%llx is not allocated\n",
+ wwn_to_u64(vport->fc_portname.u.wwn));
+ return len;
+ }
+ len = snprintf(buf, PAGE_SIZE, "NVME Initiator Enabled\n");
+
+ spin_lock_irq(shost->host_lock);
+ lport = (struct lpfc_nvme_lport *)localport->private;
+
+ /* Port state is only one of two values for now. */
+ if (localport->port_id)
+ statep = "ONLINE";
+ else
+ statep = "UNKNOWN ";
+
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "%s%d WWPN x%llx WWNN x%llx DID x%06x %s\n",
+ "NVME LPORT lpfc",
+ phba->brd_no,
+ wwn_to_u64(vport->fc_portname.u.wwn),
+ wwn_to_u64(vport->fc_nodename.u.wwn),
+ localport->port_id, statep);
+
+ list_for_each_entry(rport, &lport->rport_list, list) {
+ /* local short-hand pointer. */
+ nrport = rport->remoteport;
+
+ /* Port state is only one of two values for now. */
+ switch (nrport->port_state) {
+ case FC_OBJSTATE_ONLINE:
+ statep = "ONLINE";
+ break;
+ case FC_OBJSTATE_UNKNOWN:
+ statep = "UNKNOWN ";
+ break;
+ default:
+ statep = "UNSUPPORTED";
+ break;
+ }
+
+ /* Tab in to show lport ownership. */
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "NVME RPORT ");
+ if (phba->brd_no >= 10)
+ len += snprintf(buf + len, PAGE_SIZE - len, " ");
+
+ len += snprintf(buf + len, PAGE_SIZE - len, "WWPN x%llx ",
+ nrport->port_name);
+ len += snprintf(buf + len, PAGE_SIZE - len, "WWNN x%llx ",
+ nrport->node_name);
+ len += snprintf(buf + len, PAGE_SIZE - len, "DID x%06x ",
+ nrport->port_id);
+
+ switch (nrport->port_role) {
+ case FC_PORT_ROLE_NVME_INITIATOR:
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "INITIATOR ");
+ break;
+ case FC_PORT_ROLE_NVME_TARGET:
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "TARGET ");
+ break;
+ case FC_PORT_ROLE_NVME_DISCOVERY:
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "DISCOVERY ");
+ break;
+ default:
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "UNKNOWN_ROLE x%x",
+ nrport->port_role);
+ break;
+ }
+ len += snprintf(buf + len, PAGE_SIZE - len, "%s ", statep);
+ /* Terminate the string. */
+ len += snprintf(buf + len, PAGE_SIZE - len, "\n");
+ }
+ spin_unlock_irq(shost->host_lock);
+
+ len += snprintf(buf + len, PAGE_SIZE, "\nNVME Statistics\n");
+ len += snprintf(buf+len, PAGE_SIZE-len,
+ "LS: Xmt %016llx Cmpl %016llx\n",
+ phba->fc4NvmeLsRequests,
+ phba->fc4NvmeLsCmpls);
+
+ len += snprintf(buf+len, PAGE_SIZE-len,
+ "FCP: Rd %016llx Wr %016llx IO %016llx\n",
+ phba->fc4NvmeInputRequests,
+ phba->fc4NvmeOutputRequests,
+ phba->fc4NvmeControlRequests);
+
+ len += snprintf(buf+len, PAGE_SIZE-len,
+ " Cmpl %016llx\n", phba->fc4NvmeIoCmpls);
+
+ return len;
+}
+
+static ssize_t
lpfc_bg_info_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -675,6 +890,28 @@ lpfc_issue_lip(struct Scsi_Host *shost)
return 0;
}
+int
+lpfc_emptyq_wait(struct lpfc_hba *phba, struct list_head *q, spinlock_t *lock)
+{
+ int cnt = 0;
+
+ spin_lock_irq(lock);
+ while (!list_empty(q)) {
+ spin_unlock_irq(lock);
+ msleep(20);
+ if (cnt++ > 250) { /* 5 secs */
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "0466 %s %s\n",
+ "Outstanding IO when ",
+ "bringing Adapter offline\n");
+ return 0;
+ }
+ spin_lock_irq(lock);
+ }
+ spin_unlock_irq(lock);
+ return 1;
+}
+
/**
* lpfc_do_offline - Issues a mailbox command to bring the link down
* @phba: lpfc_hba pointer.
@@ -694,10 +931,10 @@ static int
lpfc_do_offline(struct lpfc_hba *phba, uint32_t type)
{
struct completion online_compl;
+ struct lpfc_queue *qp = NULL;
struct lpfc_sli_ring *pring;
struct lpfc_sli *psli;
int status = 0;
- int cnt = 0;
int i;
int rc;
@@ -717,20 +954,24 @@ lpfc_do_offline(struct lpfc_hba *phba, uint32_t type)
/* Wait a little for things to settle down, but not
* long enough for dev loss timeout to expire.
*/
- for (i = 0; i < psli->num_rings; i++) {
- pring = &psli->ring[i];
- while (!list_empty(&pring->txcmplq)) {
- msleep(10);
- if (cnt++ > 500) { /* 5 secs */
- lpfc_printf_log(phba,
- KERN_WARNING, LOG_INIT,
- "0466 Outstanding IO when "
- "bringing Adapter offline\n");
- break;
- }
+ if (phba->sli_rev != LPFC_SLI_REV4) {
+ for (i = 0; i < psli->num_rings; i++) {
+ pring = &psli->sli3_ring[i];
+ if (!lpfc_emptyq_wait(phba, &pring->txcmplq,
+ &phba->hbalock))
+ goto out;
+ }
+ } else {
+ list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
+ pring = qp->pring;
+ if (!pring)
+ continue;
+ if (!lpfc_emptyq_wait(phba, &pring->txcmplq,
+ &pring->ring_lock))
+ goto out;
}
}
-
+out:
init_completion(&online_compl);
rc = lpfc_workq_post_event(phba, &status, &online_compl, type);
if (rc == 0)
@@ -1945,6 +2186,7 @@ lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \
}
+static DEVICE_ATTR(nvme_info, 0444, lpfc_nvme_info_show, NULL);
static DEVICE_ATTR(bg_info, S_IRUGO, lpfc_bg_info_show, NULL);
static DEVICE_ATTR(bg_guard_err, S_IRUGO, lpfc_bg_guard_err_show, NULL);
static DEVICE_ATTR(bg_apptag_err, S_IRUGO, lpfc_bg_apptag_err_show, NULL);
@@ -2751,6 +2993,13 @@ lpfc_oas_lun_store(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR(lpfc_xlane_lun, S_IRUGO | S_IWUSR,
lpfc_oas_lun_show, lpfc_oas_lun_store);
+int lpfc_enable_nvmet_cnt;
+unsigned long long lpfc_enable_nvmet[LPFC_NVMET_MAX_PORTS] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+module_param_array(lpfc_enable_nvmet, ullong, &lpfc_enable_nvmet_cnt, 0444);
+MODULE_PARM_DESC(lpfc_enable_nvmet, "Enable HBA port(s) WWPN as a NVME Target");
+
static int lpfc_poll = 0;
module_param(lpfc_poll, int, S_IRUGO);
MODULE_PARM_DESC(lpfc_poll, "FCP ring polling mode control:"
@@ -2816,9 +3065,9 @@ lpfc_txq_hw_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_hba *phba = ((struct lpfc_vport *) shost->hostdata)->phba;
+ struct lpfc_sli_ring *pring = lpfc_phba_elsring(phba);
- return snprintf(buf, PAGE_SIZE, "%d\n",
- phba->sli.ring[LPFC_ELS_RING].txq_max);
+ return snprintf(buf, PAGE_SIZE, "%d\n", pring->txq_max);
}
static DEVICE_ATTR(txq_hw, S_IRUGO,
@@ -2829,9 +3078,9 @@ lpfc_txcmplq_hw_show(struct device *dev, struct device_attribute *attr,
{
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_hba *phba = ((struct lpfc_vport *) shost->hostdata)->phba;
+ struct lpfc_sli_ring *pring = lpfc_phba_elsring(phba);
- return snprintf(buf, PAGE_SIZE, "%d\n",
- phba->sli.ring[LPFC_ELS_RING].txcmplq_max);
+ return snprintf(buf, PAGE_SIZE, "%d\n", pring->txcmplq_max);
}
static DEVICE_ATTR(txcmplq_hw, S_IRUGO,
@@ -3030,6 +3279,59 @@ static DEVICE_ATTR(lpfc_devloss_tmo, S_IRUGO | S_IWUSR,
lpfc_devloss_tmo_show, lpfc_devloss_tmo_store);
/*
+ * lpfc_suppress_rsp: Enable suppress rsp feature is firmware supports it
+ * lpfc_suppress_rsp = 0 Disable
+ * lpfc_suppress_rsp = 1 Enable (default)
+ *
+ */
+LPFC_ATTR_R(suppress_rsp, 1, 0, 1,
+ "Enable suppress rsp feature is firmware supports it");
+
+/*
+ * lpfc_nvmet_mrq: Specify number of RQ pairs for processing NVMET cmds
+ * lpfc_nvmet_mrq = 1 use a single RQ pair
+ * lpfc_nvmet_mrq >= 2 use specified RQ pairs for MRQ
+ *
+ */
+LPFC_ATTR_R(nvmet_mrq,
+ 1, 1, 16,
+ "Specify number of RQ pairs for processing NVMET cmds");
+
+/*
+ * lpfc_nvmet_mrq_post: Specify number buffers to post on every MRQ
+ *
+ */
+LPFC_ATTR_R(nvmet_mrq_post, LPFC_DEF_MRQ_POST,
+ LPFC_MIN_MRQ_POST, LPFC_MAX_MRQ_POST,
+ "Specify number of buffers to post on every MRQ");
+
+/*
+ * lpfc_enable_fc4_type: Defines what FC4 types are supported.
+ * Supported Values: 1 - register just FCP
+ * 3 - register both FCP and NVME
+ * Supported values are [1,3]. Default value is 3
+ */
+LPFC_ATTR_R(enable_fc4_type, LPFC_ENABLE_BOTH,
+ LPFC_ENABLE_FCP, LPFC_ENABLE_BOTH,
+ "Define fc4 type to register with fabric.");
+
+/*
+ * lpfc_xri_split: Defines the division of XRI resources between SCSI and NVME
+ * This parameter is only used if:
+ * lpfc_enable_fc4_type is 3 - register both FCP and NVME and
+ * port is not configured for NVMET.
+ *
+ * ELS/CT always get 10% of XRIs, up to a maximum of 250
+ * The remaining XRIs get split up based on lpfc_xri_split per port:
+ *
+ * Supported Values are in percentages
+ * the xri_split value is the percentage the SCSI port will get. The remaining
+ * percentage will go to NVME.
+ */
+LPFC_ATTR_R(xri_split, 50, 10, 90,
+ "Division of XRI resources between SCSI and NVME");
+
+/*
# lpfc_log_verbose: Only turn this flag on if you are willing to risk being
# deluged with LOTS of information.
# You can set a bit mask to record specific types of verbose messages:
@@ -4143,13 +4445,14 @@ lpfc_fcp_imax_store(struct device *dev, struct device_attribute *attr,
/*
* Value range for the HBA is [5000,5000000]
* The value for each EQ depends on how many EQs are configured.
+ * Allow value == 0
*/
- if (val < LPFC_MIN_IMAX || val > LPFC_MAX_IMAX)
+ if (val && (val < LPFC_MIN_IMAX || val > LPFC_MAX_IMAX))
return -EINVAL;
phba->cfg_fcp_imax = (uint32_t)val;
- for (i = 0; i < phba->cfg_fcp_io_channel; i += LPFC_MAX_EQ_DELAY)
- lpfc_modify_fcp_eq_delay(phba, i);
+ for (i = 0; i < phba->io_channel_irqs; i++)
+ lpfc_modify_hba_eq_delay(phba, i);
return strlen(buf);
}
@@ -4187,7 +4490,8 @@ lpfc_fcp_imax_init(struct lpfc_hba *phba, int val)
return 0;
}
- if (val >= LPFC_MIN_IMAX && val <= LPFC_MAX_IMAX) {
+ if ((val >= LPFC_MIN_IMAX && val <= LPFC_MAX_IMAX) ||
+ (val == 0)) {
phba->cfg_fcp_imax = val;
return 0;
}
@@ -4377,6 +4681,32 @@ LPFC_VPORT_ATTR_RW(first_burst_size, 0, 0, 65536,
"First burst size for Targets that support first burst");
/*
+* lpfc_nvmet_fb_size: NVME Target mode supported first burst size.
+* When the driver is configured as an NVME target, this value is
+* communicated to the NVME initiator in the PRLI response. It is
+* used only when the lpfc_nvme_enable_fb and lpfc_nvmet_support
+* parameters are set and the target is sending the PRLI RSP.
+* Parameter supported on physical port only - no NPIV support.
+* Value range is [0,65536]. Default value is 0.
+*/
+LPFC_ATTR_RW(nvmet_fb_size, 0, 0, 65536,
+ "NVME Target mode first burst size in 512B increments.");
+
+/*
+ * lpfc_nvme_enable_fb: Enable NVME first burst on I and T functions.
+ * For the Initiator (I), enabling this parameter means that an NVMET
+ * PRLI response with FBA enabled and an FB_SIZE set to a nonzero value will be
+ * processed by the initiator for subsequent NVME FCP IO. For the target
+ * function (T), enabling this parameter qualifies the lpfc_nvmet_fb_size
+ * driver parameter as the target function's first burst size returned to the
+ * initiator in the target's NVME PRLI response. Parameter supported on physical
+ * port only - no NPIV support.
+ * Value range is [0,1]. Default value is 0 (disabled).
+ */
+LPFC_ATTR_RW(nvme_enable_fb, 0, 0, 1,
+ "Enable First Burst feature on I and T functions.");
+
+/*
# lpfc_max_scsicmpl_time: Use scsi command completion time to control I/O queue
# depth. Default value is 0. When the value of this parameter is zero the
# SCSI command completion time is not used for controlling I/O queue depth. When
@@ -4423,17 +4753,25 @@ static DEVICE_ATTR(lpfc_max_scsicmpl_time, S_IRUGO | S_IWUSR,
LPFC_ATTR_R(ack0, 0, 0, 1, "Enable ACK0 support");
/*
-# lpfc_fcp_io_sched: Determine scheduling algrithmn for issuing FCP cmds
-# range is [0,1]. Default value is 0.
-# For [0], FCP commands are issued to Work Queues ina round robin fashion.
-# For [1], FCP commands are issued to a Work Queue associated with the
-# current CPU.
-# It would be set to 1 by the driver if it's able to set up cpu affinity
-# for FCP I/Os through Work Queue associated with the current CPU. Otherwise,
-# roundrobin scheduling of FCP I/Os through WQs will be used.
-*/
-LPFC_ATTR_RW(fcp_io_sched, 0, 0, 1, "Determine scheduling algorithm for "
- "issuing commands [0] - Round Robin, [1] - Current CPU");
+ * lpfc_io_sched: Determine scheduling algrithmn for issuing FCP cmds
+ * range is [0,1]. Default value is 0.
+ * For [0], FCP commands are issued to Work Queues ina round robin fashion.
+ * For [1], FCP commands are issued to a Work Queue associated with the
+ * current CPU.
+ *
+ * LPFC_FCP_SCHED_ROUND_ROBIN == 0
+ * LPFC_FCP_SCHED_BY_CPU == 1
+ *
+ * The driver dynamically sets this to 1 (BY_CPU) if it's able to set up cpu
+ * affinity for FCP/NVME I/Os through Work Queues associated with the current
+ * CPU. Otherwise, the default 0 (Round Robin) scheduling of FCP/NVME I/Os
+ * through WQs will be used.
+ */
+LPFC_ATTR_RW(fcp_io_sched, LPFC_FCP_SCHED_ROUND_ROBIN,
+ LPFC_FCP_SCHED_ROUND_ROBIN,
+ LPFC_FCP_SCHED_BY_CPU,
+ "Determine scheduling algorithm for "
+ "issuing commands [0] - Round Robin, [1] - Current CPU");
/*
# lpfc_fcp2_no_tgt_reset: Determine bus reset behavior
@@ -4560,15 +4898,54 @@ LPFC_ATTR_R(use_msi, 2, 0, 2, "Use Message Signaled Interrupts (1) or "
"MSI-X (2), if possible");
/*
-# lpfc_fcp_io_channel: Set the number of FCP EQ/CQ/WQ IO channels
-#
-# Value range is [1,7]. Default value is 4.
-*/
-LPFC_ATTR_R(fcp_io_channel, LPFC_FCP_IO_CHAN_DEF, LPFC_FCP_IO_CHAN_MIN,
- LPFC_FCP_IO_CHAN_MAX,
+ * lpfc_nvme_oas: Use the oas bit when sending NVME/NVMET IOs
+ *
+ * 0 = NVME OAS disabled
+ * 1 = NVME OAS enabled
+ *
+ * Value range is [0,1]. Default value is 0.
+ */
+LPFC_ATTR_RW(nvme_oas, 0, 0, 1,
+ "Use OAS bit on NVME IOs");
+
+/*
+ * lpfc_fcp_io_channel: Set the number of FCP IO channels the driver
+ * will advertise it supports to the SCSI layer. This also will map to
+ * the number of WQs the driver will create.
+ *
+ * 0 = Configure the number of io channels to the number of active CPUs.
+ * 1,32 = Manually specify how many io channels to use.
+ *
+ * Value range is [0,32]. Default value is 4.
+ */
+LPFC_ATTR_R(fcp_io_channel,
+ LPFC_FCP_IO_CHAN_DEF,
+ LPFC_HBA_IO_CHAN_MIN, LPFC_HBA_IO_CHAN_MAX,
"Set the number of FCP I/O channels");
/*
+ * lpfc_nvme_io_channel: Set the number of IO hardware queues the driver
+ * will advertise it supports to the NVME layer. This also will map to
+ * the number of WQs the driver will create.
+ *
+ * This module parameter is valid when lpfc_enable_fc4_type is set
+ * to support NVME.
+ *
+ * The NVME Layer will try to create this many, plus 1 administrative
+ * hardware queue. The administrative queue will always map to WQ 0
+ * A hardware IO queue maps (qidx) to a specific driver WQ.
+ *
+ * 0 = Configure the number of io channels to the number of active CPUs.
+ * 1,32 = Manually specify how many io channels to use.
+ *
+ * Value range is [0,32]. Default value is 0.
+ */
+LPFC_ATTR_R(nvme_io_channel,
+ LPFC_NVME_IO_CHAN_DEF,
+ LPFC_HBA_IO_CHAN_MIN, LPFC_HBA_IO_CHAN_MAX,
+ "Set the number of NVME I/O channels");
+
+/*
# lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware.
# 0 = HBA resets disabled
# 1 = HBA resets enabled (default)
@@ -4692,6 +5069,7 @@ LPFC_ATTR_R(sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT, LPFC_DEFAULT_SG_SEG_CNT,
LPFC_ATTR_R(enable_mds_diags, 0, 0, 1, "Enable MDS Diagnostics");
struct device_attribute *lpfc_hba_attrs[] = {
+ &dev_attr_nvme_info,
&dev_attr_bg_info,
&dev_attr_bg_guard_err,
&dev_attr_bg_apptag_err,
@@ -4718,6 +5096,8 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_lpfc_peer_port_login,
&dev_attr_lpfc_nodev_tmo,
&dev_attr_lpfc_devloss_tmo,
+ &dev_attr_lpfc_enable_fc4_type,
+ &dev_attr_lpfc_xri_split,
&dev_attr_lpfc_fcp_class,
&dev_attr_lpfc_use_adisc,
&dev_attr_lpfc_first_burst_size,
@@ -4752,9 +5132,16 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_lpfc_poll_tmo,
&dev_attr_lpfc_task_mgmt_tmo,
&dev_attr_lpfc_use_msi,
+ &dev_attr_lpfc_nvme_oas,
&dev_attr_lpfc_fcp_imax,
&dev_attr_lpfc_fcp_cpu_map,
&dev_attr_lpfc_fcp_io_channel,
+ &dev_attr_lpfc_suppress_rsp,
+ &dev_attr_lpfc_nvme_io_channel,
+ &dev_attr_lpfc_nvmet_mrq,
+ &dev_attr_lpfc_nvmet_mrq_post,
+ &dev_attr_lpfc_nvme_enable_fb,
+ &dev_attr_lpfc_nvmet_fb_size,
&dev_attr_lpfc_enable_bg,
&dev_attr_lpfc_soft_wwnn,
&dev_attr_lpfc_soft_wwpn,
@@ -5764,15 +6151,17 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
lpfc_fdmi_on_init(phba, lpfc_fdmi_on);
lpfc_enable_SmartSAN_init(phba, lpfc_enable_SmartSAN);
lpfc_use_msi_init(phba, lpfc_use_msi);
+ lpfc_nvme_oas_init(phba, lpfc_nvme_oas);
lpfc_fcp_imax_init(phba, lpfc_fcp_imax);
lpfc_fcp_cpu_map_init(phba, lpfc_fcp_cpu_map);
- lpfc_fcp_io_channel_init(phba, lpfc_fcp_io_channel);
lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset);
lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat);
+
lpfc_EnableXLane_init(phba, lpfc_EnableXLane);
if (phba->sli_rev != LPFC_SLI_REV4)
phba->cfg_EnableXLane = 0;
lpfc_XLanePriority_init(phba, lpfc_XLanePriority);
+
memset(phba->cfg_oas_tgt_wwpn, 0, (8 * sizeof(uint8_t)));
memset(phba->cfg_oas_vpt_wwpn, 0, (8 * sizeof(uint8_t)));
phba->cfg_oas_lun_state = 0;
@@ -5786,9 +6175,48 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
phba->cfg_poll = 0;
else
phba->cfg_poll = lpfc_poll;
+ lpfc_suppress_rsp_init(phba, lpfc_suppress_rsp);
+
+ lpfc_enable_fc4_type_init(phba, lpfc_enable_fc4_type);
+ lpfc_nvmet_mrq_init(phba, lpfc_nvmet_mrq);
+ lpfc_nvmet_mrq_post_init(phba, lpfc_nvmet_mrq_post);
+
+ /* Initialize first burst. Target vs Initiator are different. */
+ lpfc_nvme_enable_fb_init(phba, lpfc_nvme_enable_fb);
+ lpfc_nvmet_fb_size_init(phba, lpfc_nvmet_fb_size);
+ lpfc_fcp_io_channel_init(phba, lpfc_fcp_io_channel);
+ lpfc_nvme_io_channel_init(phba, lpfc_nvme_io_channel);
+
+ if (phba->sli_rev != LPFC_SLI_REV4) {
+ /* NVME only supported on SLI4 */
+ phba->nvmet_support = 0;
+ phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP;
+ } else {
+ /* We MUST have FCP support */
+ if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
+ phba->cfg_enable_fc4_type |= LPFC_ENABLE_FCP;
+ }
+
+ /* A value of 0 means use the number of CPUs found in the system */
+ if (phba->cfg_fcp_io_channel == 0)
+ phba->cfg_fcp_io_channel = phba->sli4_hba.num_present_cpu;
+ if (phba->cfg_nvme_io_channel == 0)
+ phba->cfg_nvme_io_channel = phba->sli4_hba.num_present_cpu;
+
+ if (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)
+ phba->cfg_fcp_io_channel = 0;
+
+ if (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP)
+ phba->cfg_nvme_io_channel = 0;
+
+ if (phba->cfg_fcp_io_channel > phba->cfg_nvme_io_channel)
+ phba->io_channel_irqs = phba->cfg_fcp_io_channel;
+ else
+ phba->io_channel_irqs = phba->cfg_nvme_io_channel;
phba->cfg_soft_wwnn = 0L;
phba->cfg_soft_wwpn = 0L;
+ lpfc_xri_split_init(phba, lpfc_xri_split);
lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt);
lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth);
lpfc_hba_log_verbose_init(phba, lpfc_log_verbose);
@@ -5805,6 +6233,60 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
}
/**
+ * lpfc_nvme_mod_param_dep - Adjust module parameter value based on
+ * dependencies between protocols and roles.
+ * @phba: lpfc_hba pointer.
+ **/
+void
+lpfc_nvme_mod_param_dep(struct lpfc_hba *phba)
+{
+ if (phba->cfg_nvme_io_channel > phba->sli4_hba.num_present_cpu)
+ phba->cfg_nvme_io_channel = phba->sli4_hba.num_present_cpu;
+
+ if (phba->cfg_fcp_io_channel > phba->sli4_hba.num_present_cpu)
+ phba->cfg_fcp_io_channel = phba->sli4_hba.num_present_cpu;
+
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME &&
+ phba->nvmet_support) {
+ phba->cfg_enable_fc4_type &= ~LPFC_ENABLE_FCP;
+ phba->cfg_fcp_io_channel = 0;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
+ "6013 %s x%x fb_size x%x, fb_max x%x\n",
+ "NVME Target PRLI ACC enable_fb ",
+ phba->cfg_nvme_enable_fb,
+ phba->cfg_nvmet_fb_size,
+ LPFC_NVMET_FB_SZ_MAX);
+
+ if (phba->cfg_nvme_enable_fb == 0)
+ phba->cfg_nvmet_fb_size = 0;
+ else {
+ if (phba->cfg_nvmet_fb_size > LPFC_NVMET_FB_SZ_MAX)
+ phba->cfg_nvmet_fb_size = LPFC_NVMET_FB_SZ_MAX;
+ }
+
+ /* Adjust lpfc_nvmet_mrq to avoid running out of WQE slots */
+ if (phba->cfg_nvmet_mrq > phba->cfg_nvme_io_channel) {
+ phba->cfg_nvmet_mrq = phba->cfg_nvme_io_channel;
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
+ "6018 Adjust lpfc_nvmet_mrq to %d\n",
+ phba->cfg_nvmet_mrq);
+ }
+ } else {
+ /* Not NVME Target mode. Turn off Target parameters. */
+ phba->nvmet_support = 0;
+ phba->cfg_nvmet_mrq = 0;
+ phba->cfg_nvmet_mrq_post = 0;
+ phba->cfg_nvmet_fb_size = 0;
+ }
+
+ if (phba->cfg_fcp_io_channel > phba->cfg_nvme_io_channel)
+ phba->io_channel_irqs = phba->cfg_fcp_io_channel;
+ else
+ phba->io_channel_irqs = phba->cfg_nvme_io_channel;
+}
+
+/**
* lpfc_get_vport_cfgparam - Used during port create, init the vport structure
* @vport: lpfc_vport pointer.
**/
diff --git a/drivers/scsi/lpfc/lpfc_attr.h b/drivers/scsi/lpfc/lpfc_attr.h
index b2bd28e965fa..d56dafcdd563 100644
--- a/drivers/scsi/lpfc/lpfc_attr.h
+++ b/drivers/scsi/lpfc/lpfc_attr.h
@@ -1,9 +1,11 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
+ * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
+ * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
- * www.emulex.com *
+ * www.broadcom.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
* *
* This program is free software; you can redistribute it and/or *
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 7dca4d6a8883..18157d2840a3 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -1,9 +1,11 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
+ * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
+ * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
* Copyright (C) 2009-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
- * www.emulex.com *
+ * www.broadcom.com *
* *
* This program is free software; you can redistribute it and/or *
* modify it under the terms of version 2 of the GNU General *
@@ -1704,6 +1706,7 @@ lpfc_bsg_diag_mode_enter(struct lpfc_hba *phba)
struct lpfc_vport **vports;
struct Scsi_Host *shost;
struct lpfc_sli *psli;
+ struct lpfc_queue *qp = NULL;
struct lpfc_sli_ring *pring;
int i = 0;
@@ -1711,9 +1714,6 @@ lpfc_bsg_diag_mode_enter(struct lpfc_hba *phba)
if (!psli)
return -ENODEV;
- pring = &psli->ring[LPFC_FCP_RING];
- if (!pring)
- return -ENODEV;
if ((phba->link_state == LPFC_HBA_ERROR) ||
(psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
@@ -1732,10 +1732,18 @@ lpfc_bsg_diag_mode_enter(struct lpfc_hba *phba)
scsi_block_requests(shost);
}
- while (!list_empty(&pring->txcmplq)) {
- if (i++ > 500) /* wait up to 5 seconds */
+ if (phba->sli_rev != LPFC_SLI_REV4) {
+ pring = &psli->sli3_ring[LPFC_FCP_RING];
+ lpfc_emptyq_wait(phba, &pring->txcmplq, &phba->hbalock);
+ return 0;
+ }
+ list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
+ pring = qp->pring;
+ if (!pring || (pring->ringno != LPFC_FCP_RING))
+ continue;
+ if (!lpfc_emptyq_wait(phba, &pring->txcmplq,
+ &pring->ring_lock))
break;
- msleep(10);
}
return 0;
}
@@ -2703,7 +2711,7 @@ err_get_xri_exit:
* lpfc_bsg_dma_page_alloc - allocate a bsg mbox page sized dma buffers
* @phba: Pointer to HBA context object
*
- * This function allocates BSG_MBOX_SIZE (4KB) page size dma buffer and.
+ * This function allocates BSG_MBOX_SIZE (4KB) page size dma buffer and
* returns the pointer to the buffer.
**/
static struct lpfc_dmabuf *
@@ -2875,8 +2883,7 @@ out:
static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri,
size_t len)
{
- struct lpfc_sli *psli = &phba->sli;
- struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
+ struct lpfc_sli_ring *pring;
struct lpfc_iocbq *cmdiocbq;
IOCB_t *cmd = NULL;
struct list_head head, *curr, *next;
@@ -2890,6 +2897,8 @@ static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri,
int iocb_stat;
int i = 0;
+ pring = lpfc_phba_elsring(phba);
+
cmdiocbq = lpfc_sli_get_iocbq(phba);
rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
if (rxbmp != NULL) {
@@ -5403,13 +5412,15 @@ lpfc_bsg_timeout(struct bsg_job *job)
struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *cmdiocb;
- struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
+ struct lpfc_sli_ring *pring;
struct bsg_job_data *dd_data;
unsigned long flags;
int rc = 0;
LIST_HEAD(completions);
struct lpfc_iocbq *check_iocb, *next_iocb;
+ pring = lpfc_phba_elsring(phba);
+
/* if job's driver data is NULL, the command completed or is in the
* the process of completing. In this case, return status to request
* so the timeout is retried. This avoids double completion issues
diff --git a/drivers/scsi/lpfc/lpfc_bsg.h b/drivers/scsi/lpfc/lpfc_bsg.h
index f2247aa4fa17..e7d95a4e8042 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.h
+++ b/drivers/scsi/lpfc/lpfc_bsg.h
@@ -1,9 +1,11 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2010-2015 Emulex. All rights reserved. *
+ * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
+ * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
+ * Copyright (C) 2010-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
- * www.emulex.com *
+ * www.broadcom.com *
* *
* This program is free software; you can redistribute it and/or *
* modify it under the terms of version 2 of the GNU General *
diff --git a/drivers/scsi/lpfc/lpfc_compat.h b/drivers/scsi/lpfc/lpfc_compat.h
index c88e556ea62e..6b32b0ae7506 100644
--- a/drivers/scsi/lpfc/lpfc_compat.h
+++ b/drivers/scsi/lpfc/lpfc_compat.h
@@ -1,9 +1,11 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
+ * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
+ * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
* Copyright (C) 2004-2011 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
- * www.emulex.com *
+ * www.broadcom.com *
* *
* This program is free software; you can redistribute it and/or *
* modify it under the terms of version 2 of the GNU General *
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 309643a2c55c..843dd73004da 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -1,9 +1,11 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
+ * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
+ * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
- * www.emulex.com *
+ * www.broadcom.com *
* *
* This program is free software; you can redistribute it and/or *
* modify it under the terms of version 2 of the GNU General *
@@ -21,6 +23,7 @@
typedef int (*node_filter)(struct lpfc_nodelist *, void *);
struct fc_rport;
+struct fc_frame_header;
void lpfc_down_link(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_sli_read_link_ste(struct lpfc_hba *);
void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t, uint16_t);
@@ -167,6 +170,8 @@ void lpfc_hb_timeout_handler(struct lpfc_hba *);
void lpfc_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
struct lpfc_iocbq *);
int lpfc_ct_handle_unsol_abort(struct lpfc_hba *, struct hbq_dmabuf *);
+int lpfc_issue_gidft(struct lpfc_vport *vport);
+int lpfc_get_gidft_type(struct lpfc_vport *vport, struct lpfc_iocbq *iocbq);
int lpfc_ns_cmd(struct lpfc_vport *, int, uint8_t, uint32_t);
int lpfc_fdmi_cmd(struct lpfc_vport *, struct lpfc_nodelist *, int, uint32_t);
void lpfc_fdmi_num_disc_check(struct lpfc_vport *);
@@ -186,6 +191,8 @@ void lpfc_unblock_mgmt_io(struct lpfc_hba *);
void lpfc_offline_prep(struct lpfc_hba *, int);
void lpfc_offline(struct lpfc_hba *);
void lpfc_reset_hba(struct lpfc_hba *);
+int lpfc_emptyq_wait(struct lpfc_hba *phba, struct list_head *hd,
+ spinlock_t *slock);
int lpfc_fof_queue_create(struct lpfc_hba *);
int lpfc_fof_queue_setup(struct lpfc_hba *);
@@ -193,7 +200,11 @@ int lpfc_fof_queue_destroy(struct lpfc_hba *);
irqreturn_t lpfc_sli4_fof_intr_handler(int, void *);
int lpfc_sli_setup(struct lpfc_hba *);
-int lpfc_sli_queue_setup(struct lpfc_hba *);
+int lpfc_sli4_setup(struct lpfc_hba *phba);
+void lpfc_sli_queue_init(struct lpfc_hba *phba);
+void lpfc_sli4_queue_init(struct lpfc_hba *phba);
+struct lpfc_sli_ring *lpfc_sli4_calc_ring(struct lpfc_hba *phba,
+ struct lpfc_iocbq *iocbq);
void lpfc_handle_eratt(struct lpfc_hba *);
void lpfc_handle_latt(struct lpfc_hba *);
@@ -220,6 +231,7 @@ void lpfc_reg_vfi(struct lpfcMboxq *, struct lpfc_vport *, dma_addr_t);
void lpfc_init_vpi(struct lpfc_hba *, struct lpfcMboxq *, uint16_t);
void lpfc_unreg_vfi(struct lpfcMboxq *, struct lpfc_vport *);
void lpfc_reg_fcfi(struct lpfc_hba *, struct lpfcMboxq *);
+void lpfc_reg_fcfi_mrq(struct lpfc_hba *phba, struct lpfcMboxq *mbox, int mode);
void lpfc_unreg_fcfi(struct lpfcMboxq *, uint16_t);
void lpfc_resume_rpi(struct lpfcMboxq *, struct lpfc_nodelist *);
int lpfc_check_pending_fcoe_event(struct lpfc_hba *, uint8_t);
@@ -231,8 +243,15 @@ struct hbq_dmabuf *lpfc_els_hbq_alloc(struct lpfc_hba *);
void lpfc_els_hbq_free(struct lpfc_hba *, struct hbq_dmabuf *);
struct hbq_dmabuf *lpfc_sli4_rb_alloc(struct lpfc_hba *);
void lpfc_sli4_rb_free(struct lpfc_hba *, struct hbq_dmabuf *);
+struct rqb_dmabuf *lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba);
+void lpfc_sli4_nvmet_free(struct lpfc_hba *phba, struct rqb_dmabuf *dmab);
void lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *, struct fcf_record *,
uint16_t);
+int lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
+ struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe);
+int lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hq,
+ struct lpfc_queue *dq, int count);
+int lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hq);
void lpfc_unregister_fcf(struct lpfc_hba *);
void lpfc_unregister_fcf_rescan(struct lpfc_hba *);
void lpfc_unregister_unused_fcf(struct lpfc_hba *);
@@ -287,6 +306,11 @@ void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *, LPFC_MBOXQ_t *);
int lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t,
struct lpfc_iocbq *, uint32_t);
+int lpfc_sli4_issue_wqe(struct lpfc_hba *phba, uint32_t rnum,
+ struct lpfc_iocbq *iocbq);
+struct lpfc_sglq *__lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xri);
+struct lpfc_sglq *__lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba,
+ struct lpfc_iocbq *piocbq);
void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t);
void lpfc_sli_bemem_bcopy(void *, void *, uint32_t);
void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *);
@@ -336,8 +360,13 @@ void lpfc_sli_free_hbq(struct lpfc_hba *, struct hbq_dmabuf *);
void *lpfc_mbuf_alloc(struct lpfc_hba *, int, dma_addr_t *);
void __lpfc_mbuf_free(struct lpfc_hba *, void *, dma_addr_t);
void lpfc_mbuf_free(struct lpfc_hba *, void *, dma_addr_t);
+void *lpfc_nvmet_buf_alloc(struct lpfc_hba *phba, int flags,
+ dma_addr_t *handle);
+void lpfc_nvmet_buf_free(struct lpfc_hba *phba, void *virtp, dma_addr_t dma);
void lpfc_in_buf_free(struct lpfc_hba *, struct lpfc_dmabuf *);
+void lpfc_rq_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp);
+
/* Function prototypes. */
const char* lpfc_info(struct Scsi_Host *);
int lpfc_scan_finished(struct Scsi_Host *, unsigned long);
@@ -356,6 +385,7 @@ extern struct device_attribute *lpfc_hba_attrs[];
extern struct device_attribute *lpfc_vport_attrs[];
extern struct scsi_host_template lpfc_template;
extern struct scsi_host_template lpfc_template_s3;
+extern struct scsi_host_template lpfc_template_nvme;
extern struct scsi_host_template lpfc_vport_template;
extern struct fc_function_template lpfc_transport_functions;
extern struct fc_function_template lpfc_vport_transport_functions;
@@ -375,9 +405,11 @@ void lpfc_host_attrib_init(struct Scsi_Host *);
extern void lpfc_debugfs_initialize(struct lpfc_vport *);
extern void lpfc_debugfs_terminate(struct lpfc_vport *);
extern void lpfc_debugfs_disc_trc(struct lpfc_vport *, int, char *, uint32_t,
- uint32_t, uint32_t);
+ uint32_t, uint32_t);
extern void lpfc_debugfs_slow_ring_trc(struct lpfc_hba *, char *, uint32_t,
- uint32_t, uint32_t);
+ uint32_t, uint32_t);
+extern void lpfc_debugfs_nvme_trc(struct lpfc_hba *phba, char *fmt,
+ uint16_t data1, uint16_t data2, uint32_t data3);
extern struct lpfc_hbq_init *lpfc_hbq_defs[];
/* SLI4 if_type 2 externs. */
@@ -471,7 +503,10 @@ int lpfc_issue_unreg_vfi(struct lpfc_vport *);
int lpfc_selective_reset(struct lpfc_hba *);
int lpfc_sli4_read_config(struct lpfc_hba *);
void lpfc_sli4_node_prep(struct lpfc_hba *);
-int lpfc_sli4_xri_sgl_update(struct lpfc_hba *);
+int lpfc_sli4_els_sgl_update(struct lpfc_hba *phba);
+int lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba);
+int lpfc_sli4_scsi_sgl_update(struct lpfc_hba *phba);
+int lpfc_sli4_nvme_sgl_update(struct lpfc_hba *phba);
void lpfc_free_sgl_list(struct lpfc_hba *, struct list_head *);
uint32_t lpfc_sli_port_speed_get(struct lpfc_hba *);
int lpfc_sli4_request_firmware_update(struct lpfc_hba *, uint8_t);
@@ -496,3 +531,26 @@ bool lpfc_find_next_oas_lun(struct lpfc_hba *, struct lpfc_name *,
uint32_t *, uint32_t *);
int lpfc_sli4_dump_page_a0(struct lpfc_hba *phba, struct lpfcMboxq *mbox);
void lpfc_mbx_cmpl_rdp_page_a0(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb);
+
+/* NVME interfaces. */
+void lpfc_nvme_unregister_port(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp);
+int lpfc_nvme_register_port(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp);
+int lpfc_nvme_create_localport(struct lpfc_vport *vport);
+void lpfc_nvme_destroy_localport(struct lpfc_vport *vport);
+void lpfc_nvme_update_localport(struct lpfc_vport *vport);
+int lpfc_nvmet_create_targetport(struct lpfc_hba *phba);
+int lpfc_nvmet_update_targetport(struct lpfc_hba *phba);
+void lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba);
+void lpfc_nvmet_unsol_ls_event(struct lpfc_hba *phba,
+ struct lpfc_sli_ring *pring, struct lpfc_iocbq *piocb);
+void lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba,
+ struct lpfc_sli_ring *pring,
+ struct rqb_dmabuf *nvmebuf, uint64_t isr_ts);
+void lpfc_nvme_mod_param_dep(struct lpfc_hba *phba);
+void lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba,
+ struct lpfc_iocbq *cmdiocb,
+ struct lpfc_wcqe_complete *abts_cmpl);
+extern int lpfc_enable_nvmet_cnt;
+extern unsigned long long lpfc_enable_nvmet[];
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 4ac03b16d17f..c22bb3f887e1 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -1,9 +1,11 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
+ * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
+ * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
- * www.emulex.com *
+ * www.broadcom.com *
* *
* This program is free software; you can redistribute it and/or *
* modify it under the terms of version 2 of the GNU General *
@@ -40,8 +42,9 @@
#include "lpfc_sli4.h"
#include "lpfc_nl.h"
#include "lpfc_disc.h"
-#include "lpfc_scsi.h"
#include "lpfc.h"
+#include "lpfc_scsi.h"
+#include "lpfc_nvme.h"
#include "lpfc_logmsg.h"
#include "lpfc_crtn.h"
#include "lpfc_version.h"
@@ -453,8 +456,90 @@ lpfc_find_vport_by_did(struct lpfc_hba *phba, uint32_t did) {
return NULL;
}
+static void
+lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type)
+{
+ struct lpfc_nodelist *ndlp;
+
+ if ((vport->port_type != LPFC_NPIV_PORT) ||
+ !(vport->ct_flags & FC_CT_RFF_ID) || !vport->cfg_restrict_login) {
+
+ ndlp = lpfc_setup_disc_node(vport, Did);
+
+ if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
+ "Parse GID_FTrsp: did:x%x flg:x%x x%x",
+ Did, ndlp->nlp_flag, vport->fc_flag);
+
+ /* By default, the driver expects to support FCP FC4 */
+ if (fc4_type == FC_TYPE_FCP)
+ ndlp->nlp_fc4_type |= NLP_FC4_FCP;
+
+ if (fc4_type == FC_TYPE_NVME)
+ ndlp->nlp_fc4_type |= NLP_FC4_NVME;
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "0238 Process x%06x NameServer Rsp "
+ "Data: x%x x%x x%x x%x\n", Did,
+ ndlp->nlp_flag, ndlp->nlp_fc4_type,
+ vport->fc_flag,
+ vport->fc_rscn_id_cnt);
+ } else {
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
+ "Skip1 GID_FTrsp: did:x%x flg:x%x cnt:%d",
+ Did, vport->fc_flag, vport->fc_rscn_id_cnt);
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "0239 Skip x%06x NameServer Rsp "
+ "Data: x%x x%x\n", Did,
+ vport->fc_flag,
+ vport->fc_rscn_id_cnt);
+ }
+ } else {
+ if (!(vport->fc_flag & FC_RSCN_MODE) ||
+ lpfc_rscn_payload_check(vport, Did)) {
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
+ "Query GID_FTrsp: did:x%x flg:x%x cnt:%d",
+ Did, vport->fc_flag, vport->fc_rscn_id_cnt);
+
+ /*
+ * This NPortID was previously a FCP target,
+ * Don't even bother to send GFF_ID.
+ */
+ ndlp = lpfc_findnode_did(vport, Did);
+ if (ndlp && NLP_CHK_NODE_ACT(ndlp))
+ ndlp->nlp_fc4_type = fc4_type;
+
+ if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
+ ndlp->nlp_fc4_type = fc4_type;
+
+ if (ndlp->nlp_type & NLP_FCP_TARGET)
+ lpfc_setup_disc_node(vport, Did);
+
+ else if (lpfc_ns_cmd(vport, SLI_CTNS_GFF_ID,
+ 0, Did) == 0)
+ vport->num_disc_nodes++;
+
+ else
+ lpfc_setup_disc_node(vport, Did);
+ }
+ } else {
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
+ "Skip2 GID_FTrsp: did:x%x flg:x%x cnt:%d",
+ Did, vport->fc_flag, vport->fc_rscn_id_cnt);
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "0245 Skip x%06x NameServer Rsp "
+ "Data: x%x x%x\n", Did,
+ vport->fc_flag,
+ vport->fc_rscn_id_cnt);
+ }
+ }
+}
+
static int
-lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint32_t Size)
+lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint8_t fc4_type,
+ uint32_t Size)
{
struct lpfc_hba *phba = vport->phba;
struct lpfc_sli_ct_request *Response =
@@ -499,97 +584,12 @@ lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint32_t Size)
*/
if ((Did != vport->fc_myDID) &&
((lpfc_find_vport_by_did(phba, Did) == NULL) ||
- vport->cfg_peer_port_login)) {
- if ((vport->port_type != LPFC_NPIV_PORT) ||
- (!(vport->ct_flags & FC_CT_RFF_ID)) ||
- (!vport->cfg_restrict_login)) {
- ndlp = lpfc_setup_disc_node(vport, Did);
- if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
- lpfc_debugfs_disc_trc(vport,
- LPFC_DISC_TRC_CT,
- "Parse GID_FTrsp: "
- "did:x%x flg:x%x x%x",
- Did, ndlp->nlp_flag,
- vport->fc_flag);
-
- lpfc_printf_vlog(vport,
- KERN_INFO,
- LOG_DISCOVERY,
- "0238 Process "
- "x%x NameServer Rsp"
- "Data: x%x x%x x%x\n",
- Did, ndlp->nlp_flag,
- vport->fc_flag,
- vport->fc_rscn_id_cnt);
- } else {
- lpfc_debugfs_disc_trc(vport,
- LPFC_DISC_TRC_CT,
- "Skip1 GID_FTrsp: "
- "did:x%x flg:x%x cnt:%d",
- Did, vport->fc_flag,
- vport->fc_rscn_id_cnt);
-
- lpfc_printf_vlog(vport,
- KERN_INFO,
- LOG_DISCOVERY,
- "0239 Skip x%x "
- "NameServer Rsp Data: "
- "x%x x%x\n",
- Did, vport->fc_flag,
- vport->fc_rscn_id_cnt);
- }
-
- } else {
- if (!(vport->fc_flag & FC_RSCN_MODE) ||
- (lpfc_rscn_payload_check(vport, Did))) {
- lpfc_debugfs_disc_trc(vport,
- LPFC_DISC_TRC_CT,
- "Query GID_FTrsp: "
- "did:x%x flg:x%x cnt:%d",
- Did, vport->fc_flag,
- vport->fc_rscn_id_cnt);
-
- /* This NPortID was previously
- * a FCP target, * Don't even
- * bother to send GFF_ID.
- */
- ndlp = lpfc_findnode_did(vport,
- Did);
- if (ndlp &&
- NLP_CHK_NODE_ACT(ndlp)
- && (ndlp->nlp_type &
- NLP_FCP_TARGET))
- lpfc_setup_disc_node
- (vport, Did);
- else if (lpfc_ns_cmd(vport,
- SLI_CTNS_GFF_ID,
- 0, Did) == 0)
- vport->num_disc_nodes++;
- else
- lpfc_setup_disc_node
- (vport, Did);
- }
- else {
- lpfc_debugfs_disc_trc(vport,
- LPFC_DISC_TRC_CT,
- "Skip2 GID_FTrsp: "
- "did:x%x flg:x%x cnt:%d",
- Did, vport->fc_flag,
- vport->fc_rscn_id_cnt);
-
- lpfc_printf_vlog(vport,
- KERN_INFO,
- LOG_DISCOVERY,
- "0245 Skip x%x "
- "NameServer Rsp Data: "
- "x%x x%x\n",
- Did, vport->fc_flag,
- vport->fc_rscn_id_cnt);
- }
- }
- }
+ vport->cfg_peer_port_login))
+ lpfc_prep_node_fc4type(vport, Did, fc4_type);
+
if (CTentry & (cpu_to_be32(SLI_CT_LAST_ENTRY)))
goto nsout1;
+
Cnt -= sizeof(uint32_t);
}
ctptr = NULL;
@@ -609,16 +609,18 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
IOCB_t *irsp;
struct lpfc_dmabuf *outp;
+ struct lpfc_dmabuf *inp;
struct lpfc_sli_ct_request *CTrsp;
+ struct lpfc_sli_ct_request *CTreq;
struct lpfc_nodelist *ndlp;
- int rc;
+ int rc, type;
/* First save ndlp, before we overwrite it */
ndlp = cmdiocb->context_un.ndlp;
/* we pass cmdiocb to state machine which needs rspiocb as well */
cmdiocb->context_un.rsp_iocb = rspiocb;
-
+ inp = (struct lpfc_dmabuf *) cmdiocb->context1;
outp = (struct lpfc_dmabuf *) cmdiocb->context2;
irsp = &rspiocb->iocb;
@@ -656,9 +658,14 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
IOERR_NO_RESOURCES)
vport->fc_ns_retry++;
+ type = lpfc_get_gidft_type(vport, cmdiocb);
+ if (type == 0)
+ goto out;
+
/* CT command is being retried */
+ vport->gidft_inp--;
rc = lpfc_ns_cmd(vport, SLI_CTNS_GID_FT,
- vport->fc_ns_retry, 0);
+ vport->fc_ns_retry, type);
if (rc == 0)
goto out;
}
@@ -670,13 +677,18 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
irsp->ulpStatus, vport->fc_ns_retry);
} else {
/* Good status, continue checking */
+ CTreq = (struct lpfc_sli_ct_request *) inp->virt;
CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
if (CTrsp->CommandResponse.bits.CmdRsp ==
cpu_to_be16(SLI_CT_RESPONSE_FS_ACC)) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
- "0208 NameServer Rsp Data: x%x\n",
- vport->fc_flag);
- lpfc_ns_rsp(vport, outp,
+ "0208 NameServer Rsp Data: x%x x%x\n",
+ vport->fc_flag,
+ CTreq->un.gid.Fc4Type);
+
+ lpfc_ns_rsp(vport,
+ outp,
+ CTreq->un.gid.Fc4Type,
(uint32_t) (irsp->un.genreq64.bdl.bdeSize));
} else if (CTrsp->CommandResponse.bits.CmdRsp ==
be16_to_cpu(SLI_CT_RESPONSE_FS_RJT)) {
@@ -731,9 +743,11 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
(uint32_t) CTrsp->ReasonCode,
(uint32_t) CTrsp->Explanation);
}
+ vport->gidft_inp--;
}
/* Link up / RSCN discovery */
- if (vport->num_disc_nodes == 0) {
+ if ((vport->num_disc_nodes == 0) &&
+ (vport->gidft_inp == 0)) {
/*
* The driver has cycled through all Nports in the RSCN payload.
* Complete the handling by cleaning up and marking the
@@ -881,6 +895,60 @@ out:
return;
}
+static void
+lpfc_cmpl_ct_cmd_gft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_iocbq *rspiocb)
+{
+ struct lpfc_vport *vport = cmdiocb->vport;
+ IOCB_t *irsp = &rspiocb->iocb;
+ struct lpfc_dmabuf *inp = (struct lpfc_dmabuf *)cmdiocb->context1;
+ struct lpfc_dmabuf *outp = (struct lpfc_dmabuf *)cmdiocb->context2;
+ struct lpfc_sli_ct_request *CTrsp;
+ int did;
+ struct lpfc_nodelist *ndlp;
+ uint32_t fc4_data_0, fc4_data_1;
+
+ did = ((struct lpfc_sli_ct_request *)inp->virt)->un.gft.PortId;
+ did = be32_to_cpu(did);
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
+ "GFT_ID cmpl: status:x%x/x%x did:x%x",
+ irsp->ulpStatus, irsp->un.ulpWord[4], did);
+
+ if (irsp->ulpStatus == IOSTAT_SUCCESS) {
+ /* Good status, continue checking */
+ CTrsp = (struct lpfc_sli_ct_request *)outp->virt;
+ fc4_data_0 = be32_to_cpu(CTrsp->un.gft_acc.fc4_types[0]);
+ fc4_data_1 = be32_to_cpu(CTrsp->un.gft_acc.fc4_types[1]);
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+ "3062 DID x%06x GFT Wd0 x%08x Wd1 x%08x\n",
+ did, fc4_data_0, fc4_data_1);
+
+ ndlp = lpfc_findnode_did(vport, did);
+ if (ndlp) {
+ /* The bitmask value for FCP and NVME FCP types is
+ * the same because they are 32 bits distant from
+ * each other in word0 and word0.
+ */
+ if (fc4_data_0 & LPFC_FC4_TYPE_BITMASK)
+ ndlp->nlp_fc4_type |= NLP_FC4_FCP;
+ if (fc4_data_1 & LPFC_FC4_TYPE_BITMASK)
+ ndlp->nlp_fc4_type |= NLP_FC4_NVME;
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+ "3064 Setting ndlp %p, DID x%06x with "
+ "FC4 x%08x, Data: x%08x x%08x\n",
+ ndlp, did, ndlp->nlp_fc4_type,
+ FC_TYPE_FCP, FC_TYPE_NVME);
+ }
+ ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
+ lpfc_issue_els_prli(vport, ndlp, 0);
+ } else
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
+ "3065 GFT_ID failed x%08x\n", irsp->ulpStatus);
+
+ lpfc_ct_free_iocb(phba, cmdiocb);
+}
static void
lpfc_cmpl_ct(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
@@ -1071,31 +1139,27 @@ lpfc_cmpl_ct_cmd_rff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
return;
}
+/*
+ * Although the symbolic port name is thought to be an integer
+ * as of January 18, 2016, leave it as a string until more of
+ * the record state becomes defined.
+ */
int
lpfc_vport_symbolic_port_name(struct lpfc_vport *vport, char *symbol,
size_t size)
{
int n;
- uint8_t *wwn = vport->phba->wwpn;
- n = snprintf(symbol, size,
- "Emulex PPN-%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
- wwn[0], wwn[1], wwn[2], wwn[3],
- wwn[4], wwn[5], wwn[6], wwn[7]);
-
- if (vport->port_type == LPFC_PHYSICAL_PORT)
- return n;
-
- if (n < size)
- n += snprintf(symbol + n, size - n, " VPort-%d", vport->vpi);
-
- if (n < size &&
- strlen(vport->fc_vport->symbolic_name))
- n += snprintf(symbol + n, size - n, " VName-%s",
- vport->fc_vport->symbolic_name);
+ /*
+ * Use the lpfc board number as the Symbolic Port
+ * Name object. NPIV is not in play so this integer
+ * value is sufficient and unique per FC-ID.
+ */
+ n = snprintf(symbol, size, "%d", vport->phba->brd_no);
return n;
}
+
int
lpfc_vport_symbolic_node_name(struct lpfc_vport *vport, char *symbol,
size_t size)
@@ -1106,24 +1170,26 @@ lpfc_vport_symbolic_node_name(struct lpfc_vport *vport, char *symbol,
lpfc_decode_firmware_rev(vport->phba, fwrev, 0);
n = snprintf(symbol, size, "Emulex %s", vport->phba->ModelName);
-
if (size < n)
return n;
- n += snprintf(symbol + n, size - n, " FV%s", fwrev);
+ n += snprintf(symbol + n, size - n, " FV%s", fwrev);
if (size < n)
return n;
- n += snprintf(symbol + n, size - n, " DV%s", lpfc_release_version);
+ n += snprintf(symbol + n, size - n, " DV%s.",
+ lpfc_release_version);
if (size < n)
return n;
- n += snprintf(symbol + n, size - n, " HN:%s", init_utsname()->nodename);
- /* Note :- OS name is "Linux" */
+ n += snprintf(symbol + n, size - n, " HN:%s.",
+ init_utsname()->nodename);
if (size < n)
return n;
- n += snprintf(symbol + n, size - n, " OS:%s", init_utsname()->sysname);
+ /* Note :- OS name is "Linux" */
+ n += snprintf(symbol + n, size - n, " OS:%s\n",
+ init_utsname()->sysname);
return n;
}
@@ -1148,6 +1214,27 @@ lpfc_find_map_node(struct lpfc_vport *vport)
}
/*
+ * This routine will return the FC4 Type associated with the CT
+ * GID_FT command.
+ */
+int
+lpfc_get_gidft_type(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb)
+{
+ struct lpfc_sli_ct_request *CtReq;
+ struct lpfc_dmabuf *mp;
+ uint32_t type;
+
+ mp = cmdiocb->context1;
+ if (mp == NULL)
+ return 0;
+ CtReq = (struct lpfc_sli_ct_request *)mp->virt;
+ type = (uint32_t)CtReq->un.gid.Fc4Type;
+ if ((type != SLI_CTPT_FCP) && (type != SLI_CTPT_NVME))
+ return 0;
+ return type;
+}
+
+/*
* lpfc_ns_cmd
* Description:
* Issue Cmd to NameServer
@@ -1207,8 +1294,9 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
/* NameServer Req */
lpfc_printf_vlog(vport, KERN_INFO ,LOG_DISCOVERY,
- "0236 NameServer Req Data: x%x x%x x%x\n",
- cmdcode, vport->fc_flag, vport->fc_rscn_id_cnt);
+ "0236 NameServer Req Data: x%x x%x x%x x%x\n",
+ cmdcode, vport->fc_flag, vport->fc_rscn_id_cnt,
+ context);
bpl = (struct ulp_bde64 *) bmp->virt;
memset(bpl, 0, sizeof(struct ulp_bde64));
@@ -1219,6 +1307,8 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
bpl->tus.f.bdeSize = GID_REQUEST_SZ;
else if (cmdcode == SLI_CTNS_GFF_ID)
bpl->tus.f.bdeSize = GFF_REQUEST_SZ;
+ else if (cmdcode == SLI_CTNS_GFT_ID)
+ bpl->tus.f.bdeSize = GFT_REQUEST_SZ;
else if (cmdcode == SLI_CTNS_RFT_ID)
bpl->tus.f.bdeSize = RFT_REQUEST_SZ;
else if (cmdcode == SLI_CTNS_RNN_ID)
@@ -1246,7 +1336,8 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
case SLI_CTNS_GID_FT:
CtReq->CommandResponse.bits.CmdRsp =
cpu_to_be16(SLI_CTNS_GID_FT);
- CtReq->un.gid.Fc4Type = SLI_CTPT_FCP;
+ CtReq->un.gid.Fc4Type = context;
+
if (vport->port_state < LPFC_NS_QRY)
vport->port_state = LPFC_NS_QRY;
lpfc_set_disctmo(vport);
@@ -1261,12 +1352,32 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
cmpl = lpfc_cmpl_ct_cmd_gff_id;
break;
+ case SLI_CTNS_GFT_ID:
+ CtReq->CommandResponse.bits.CmdRsp =
+ cpu_to_be16(SLI_CTNS_GFT_ID);
+ CtReq->un.gft.PortId = cpu_to_be32(context);
+ cmpl = lpfc_cmpl_ct_cmd_gft_id;
+ break;
+
case SLI_CTNS_RFT_ID:
vport->ct_flags &= ~FC_CT_RFT_ID;
CtReq->CommandResponse.bits.CmdRsp =
cpu_to_be16(SLI_CTNS_RFT_ID);
CtReq->un.rft.PortId = cpu_to_be32(vport->fc_myDID);
- CtReq->un.rft.fcpReg = 1;
+
+ /* Register FC4 FCP type if enabled. */
+ if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
+ (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP))
+ CtReq->un.rft.fcpReg = 1;
+
+ /* Register NVME type if enabled. Defined LE and swapped.
+ * rsvd[0] is used as word1 because of the hard-coded
+ * word0 usage in the ct_request data structure.
+ */
+ if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
+ (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME))
+ CtReq->un.rft.rsvd[0] = cpu_to_be32(0x00000100);
+
cmpl = lpfc_cmpl_ct_cmd_rft_id;
break;
@@ -1316,7 +1427,31 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
cpu_to_be16(SLI_CTNS_RFF_ID);
CtReq->un.rff.PortId = cpu_to_be32(vport->fc_myDID);
CtReq->un.rff.fbits = FC4_FEATURE_INIT;
- CtReq->un.rff.type_code = FC_TYPE_FCP;
+
+ /* The driver always supports FC_TYPE_FCP. However, the
+ * caller can specify NVME (type x28) as well. But only
+ * these that FC4 type is supported.
+ */
+ if (((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
+ (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) &&
+ (context == FC_TYPE_NVME)) {
+ if ((vport == phba->pport) && phba->nvmet_support) {
+ CtReq->un.rff.fbits = (FC4_FEATURE_TARGET |
+ FC4_FEATURE_NVME_DISC);
+ lpfc_nvmet_update_targetport(phba);
+ } else {
+ lpfc_nvme_update_localport(vport);
+ }
+ CtReq->un.rff.type_code = context;
+
+ } else if (((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
+ (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP)) &&
+ (context == FC_TYPE_FCP))
+ CtReq->un.rff.type_code = context;
+
+ else
+ goto ns_cmd_free_bmpvirt;
+
cmpl = lpfc_cmpl_ct_cmd_rff_id;
break;
}
@@ -1337,6 +1472,7 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
*/
lpfc_nlp_put(ndlp);
+ns_cmd_free_bmpvirt:
lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
ns_cmd_free_bmp:
kfree(bmp);
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index caa7a7b0ec53..9f4798e9d938 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -1,9 +1,11 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
+ * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
+ * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
* Copyright (C) 2007-2015 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
- * www.emulex.com *
+ * www.broadcom.com *
* *
* This program is free software; you can redistribute it and/or *
* modify it under the terms of version 2 of the GNU General *
@@ -34,6 +36,9 @@
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_transport_fc.h>
+#include <scsi/fc/fc_fs.h>
+
+#include <linux/nvme-fc-driver.h>
#include "lpfc_hw4.h"
#include "lpfc_hw.h"
@@ -41,8 +46,10 @@
#include "lpfc_sli4.h"
#include "lpfc_nl.h"
#include "lpfc_disc.h"
-#include "lpfc_scsi.h"
#include "lpfc.h"
+#include "lpfc_scsi.h"
+#include "lpfc_nvme.h"
+#include "lpfc_nvmet.h"
#include "lpfc_logmsg.h"
#include "lpfc_crtn.h"
#include "lpfc_vport.h"
@@ -99,6 +106,12 @@ module_param(lpfc_debugfs_max_slow_ring_trc, int, S_IRUGO);
MODULE_PARM_DESC(lpfc_debugfs_max_slow_ring_trc,
"Set debugfs slow ring trace depth");
+/* This MUST be a power of 2 */
+static int lpfc_debugfs_max_nvmeio_trc;
+module_param(lpfc_debugfs_max_nvmeio_trc, int, 0444);
+MODULE_PARM_DESC(lpfc_debugfs_max_nvmeio_trc,
+ "Set debugfs NVME IO trace depth");
+
static int lpfc_debugfs_mask_disc_trc;
module_param(lpfc_debugfs_mask_disc_trc, int, S_IRUGO);
MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
@@ -484,20 +497,23 @@ lpfc_debugfs_dumpHostSlim_data(struct lpfc_hba *phba, char *buf, int size)
off += (8 * sizeof(uint32_t));
}
- for (i = 0; i < 4; i++) {
- pgpp = &phba->port_gp[i];
- pring = &psli->ring[i];
- len += snprintf(buf+len, size-len,
- "Ring %d: CMD GetInx:%d (Max:%d Next:%d "
- "Local:%d flg:x%x) RSP PutInx:%d Max:%d\n",
- i, pgpp->cmdGetInx, pring->sli.sli3.numCiocb,
- pring->sli.sli3.next_cmdidx,
- pring->sli.sli3.local_getidx,
- pring->flag, pgpp->rspPutInx,
- pring->sli.sli3.numRiocb);
- }
-
if (phba->sli_rev <= LPFC_SLI_REV3) {
+ for (i = 0; i < 4; i++) {
+ pgpp = &phba->port_gp[i];
+ pring = &psli->sli3_ring[i];
+ len += snprintf(buf+len, size-len,
+ "Ring %d: CMD GetInx:%d "
+ "(Max:%d Next:%d "
+ "Local:%d flg:x%x) "
+ "RSP PutInx:%d Max:%d\n",
+ i, pgpp->cmdGetInx,
+ pring->sli.sli3.numCiocb,
+ pring->sli.sli3.next_cmdidx,
+ pring->sli.sli3.local_getidx,
+ pring->flag, pgpp->rspPutInx,
+ pring->sli.sli3.numRiocb);
+ }
+
word0 = readl(phba->HAregaddr);
word1 = readl(phba->CAregaddr);
word2 = readl(phba->HSregaddr);
@@ -530,11 +546,18 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
int len = 0;
int cnt;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_hba *phba = vport->phba;
struct lpfc_nodelist *ndlp;
- unsigned char *statep, *name;
+ unsigned char *statep;
+ struct nvme_fc_local_port *localport;
+ struct lpfc_nvme_lport *lport;
+ struct lpfc_nvme_rport *rport;
+ struct lpfc_nvmet_tgtport *tgtp;
+ struct nvme_fc_remote_port *nrport;
cnt = (LPFC_NODELIST_SIZE / LPFC_NODELIST_ENTRY_SIZE);
+ len += snprintf(buf+len, size-len, "\nFCP Nodelist Entries ...\n");
spin_lock_irq(shost->host_lock);
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
if (!cnt) {
@@ -574,36 +597,32 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
default:
statep = "UNKNOWN";
}
- len += snprintf(buf+len, size-len, "%s DID:x%06x ",
- statep, ndlp->nlp_DID);
- name = (unsigned char *)&ndlp->nlp_portname;
- len += snprintf(buf+len, size-len,
- "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x ",
- *name, *(name+1), *(name+2), *(name+3),
- *(name+4), *(name+5), *(name+6), *(name+7));
- name = (unsigned char *)&ndlp->nlp_nodename;
- len += snprintf(buf+len, size-len,
- "WWNN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x ",
- *name, *(name+1), *(name+2), *(name+3),
- *(name+4), *(name+5), *(name+6), *(name+7));
+ len += snprintf(buf+len, size-len, "%s DID:x%06x ",
+ statep, ndlp->nlp_DID);
+ len += snprintf(buf+len, size-len,
+ "WWPN x%llx ",
+ wwn_to_u64(ndlp->nlp_portname.u.wwn));
+ len += snprintf(buf+len, size-len,
+ "WWNN x%llx ",
+ wwn_to_u64(ndlp->nlp_nodename.u.wwn));
if (ndlp->nlp_flag & NLP_RPI_REGISTERED)
- len += snprintf(buf+len, size-len, "RPI:%03d ",
- ndlp->nlp_rpi);
+ len += snprintf(buf+len, size-len, "RPI:%03d ",
+ ndlp->nlp_rpi);
else
- len += snprintf(buf+len, size-len, "RPI:none ");
+ len += snprintf(buf+len, size-len, "RPI:none ");
len += snprintf(buf+len, size-len, "flag:x%08x ",
ndlp->nlp_flag);
if (!ndlp->nlp_type)
- len += snprintf(buf+len, size-len, "UNKNOWN_TYPE ");
+ len += snprintf(buf+len, size-len, "UNKNOWN_TYPE ");
if (ndlp->nlp_type & NLP_FC_NODE)
- len += snprintf(buf+len, size-len, "FC_NODE ");
+ len += snprintf(buf+len, size-len, "FC_NODE ");
if (ndlp->nlp_type & NLP_FABRIC)
- len += snprintf(buf+len, size-len, "FABRIC ");
+ len += snprintf(buf+len, size-len, "FABRIC ");
if (ndlp->nlp_type & NLP_FCP_TARGET)
- len += snprintf(buf+len, size-len, "FCP_TGT sid:%d ",
+ len += snprintf(buf+len, size-len, "FCP_TGT sid:%d ",
ndlp->nlp_sid);
if (ndlp->nlp_type & NLP_FCP_INITIATOR)
- len += snprintf(buf+len, size-len, "FCP_INITIATOR ");
+ len += snprintf(buf+len, size-len, "FCP_INITIATOR ");
len += snprintf(buf+len, size-len, "usgmap:%x ",
ndlp->nlp_usg_map);
len += snprintf(buf+len, size-len, "refcnt:%x",
@@ -611,8 +630,592 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
len += snprintf(buf+len, size-len, "\n");
}
spin_unlock_irq(shost->host_lock);
+
+ if (phba->nvmet_support && phba->targetport && (vport == phba->pport)) {
+ tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
+ len += snprintf(buf + len, size - len,
+ "\nNVME Targetport Entry ...\n");
+
+ /* Port state is only one of two values for now. */
+ if (phba->targetport->port_id)
+ statep = "REGISTERED";
+ else
+ statep = "INIT";
+ len += snprintf(buf + len, size - len,
+ "TGT WWNN x%llx WWPN x%llx State %s\n",
+ wwn_to_u64(vport->fc_nodename.u.wwn),
+ wwn_to_u64(vport->fc_portname.u.wwn),
+ statep);
+ len += snprintf(buf + len, size - len,
+ " Targetport DID x%06x\n",
+ phba->targetport->port_id);
+ goto out_exit;
+ }
+
+ len += snprintf(buf + len, size - len,
+ "\nNVME Lport/Rport Entries ...\n");
+
+ localport = vport->localport;
+ if (!localport)
+ goto out_exit;
+
+ spin_lock_irq(shost->host_lock);
+ lport = (struct lpfc_nvme_lport *)localport->private;
+
+ /* Port state is only one of two values for now. */
+ if (localport->port_id)
+ statep = "ONLINE";
+ else
+ statep = "UNKNOWN ";
+
+ len += snprintf(buf + len, size - len,
+ "Lport DID x%06x PortState %s\n",
+ localport->port_id, statep);
+
+ len += snprintf(buf + len, size - len, "\tRport List:\n");
+ list_for_each_entry(rport, &lport->rport_list, list) {
+ /* local short-hand pointer. */
+ nrport = rport->remoteport;
+
+ /* Port state is only one of two values for now. */
+ switch (nrport->port_state) {
+ case FC_OBJSTATE_ONLINE:
+ statep = "ONLINE";
+ break;
+ case FC_OBJSTATE_UNKNOWN:
+ statep = "UNKNOWN ";
+ break;
+ default:
+ statep = "UNSUPPORTED";
+ break;
+ }
+
+ /* Tab in to show lport ownership. */
+ len += snprintf(buf + len, size - len,
+ "\t%s Port ID:x%06x ",
+ statep, nrport->port_id);
+ len += snprintf(buf + len, size - len, "WWPN x%llx ",
+ nrport->port_name);
+ len += snprintf(buf + len, size - len, "WWNN x%llx ",
+ nrport->node_name);
+ switch (nrport->port_role) {
+ case FC_PORT_ROLE_NVME_INITIATOR:
+ len += snprintf(buf + len, size - len,
+ "NVME INITIATOR ");
+ break;
+ case FC_PORT_ROLE_NVME_TARGET:
+ len += snprintf(buf + len, size - len,
+ "NVME TARGET ");
+ break;
+ case FC_PORT_ROLE_NVME_DISCOVERY:
+ len += snprintf(buf + len, size - len,
+ "NVME DISCOVERY ");
+ break;
+ default:
+ len += snprintf(buf + len, size - len,
+ "UNKNOWN ROLE x%x",
+ nrport->port_role);
+ break;
+ }
+
+ /* Terminate the string. */
+ len += snprintf(buf + len, size - len, "\n");
+ }
+
+ spin_unlock_irq(shost->host_lock);
+ out_exit:
+ return len;
+}
+
+/**
+ * lpfc_debugfs_nvmestat_data - Dump target node list to a buffer
+ * @vport: The vport to gather target node info from.
+ * @buf: The buffer to dump log into.
+ * @size: The maximum amount of data to process.
+ *
+ * Description:
+ * This routine dumps the NVME statistics associated with @vport
+ *
+ * Return Value:
+ * This routine returns the amount of bytes that were dumped into @buf and will
+ * not exceed @size.
+ **/
+static int
+lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
+{
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_nvmet_tgtport *tgtp;
+ int len = 0;
+
+ if (phba->nvmet_support) {
+ if (!phba->targetport)
+ return len;
+ tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
+ len += snprintf(buf+len, size-len,
+ "\nNVME Targetport Statistics\n");
+
+ len += snprintf(buf+len, size-len,
+ "LS: Rcv %08x Drop %08x Abort %08x\n",
+ atomic_read(&tgtp->rcv_ls_req_in),
+ atomic_read(&tgtp->rcv_ls_req_drop),
+ atomic_read(&tgtp->xmt_ls_abort));
+ if (atomic_read(&tgtp->rcv_ls_req_in) !=
+ atomic_read(&tgtp->rcv_ls_req_out)) {
+ len += snprintf(buf+len, size-len,
+ "Rcv LS: in %08x != out %08x\n",
+ atomic_read(&tgtp->rcv_ls_req_in),
+ atomic_read(&tgtp->rcv_ls_req_out));
+ }
+
+ len += snprintf(buf+len, size-len,
+ "LS: Xmt %08x Drop %08x Cmpl %08x Err %08x\n",
+ atomic_read(&tgtp->xmt_ls_rsp),
+ atomic_read(&tgtp->xmt_ls_drop),
+ atomic_read(&tgtp->xmt_ls_rsp_cmpl),
+ atomic_read(&tgtp->xmt_ls_rsp_error));
+
+ len += snprintf(buf+len, size-len,
+ "FCP: Rcv %08x Drop %08x\n",
+ atomic_read(&tgtp->rcv_fcp_cmd_in),
+ atomic_read(&tgtp->rcv_fcp_cmd_drop));
+
+ if (atomic_read(&tgtp->rcv_fcp_cmd_in) !=
+ atomic_read(&tgtp->rcv_fcp_cmd_out)) {
+ len += snprintf(buf+len, size-len,
+ "Rcv FCP: in %08x != out %08x\n",
+ atomic_read(&tgtp->rcv_fcp_cmd_in),
+ atomic_read(&tgtp->rcv_fcp_cmd_out));
+ }
+
+ len += snprintf(buf+len, size-len,
+ "FCP Rsp: read %08x readrsp %08x write %08x rsp %08x\n",
+ atomic_read(&tgtp->xmt_fcp_read),
+ atomic_read(&tgtp->xmt_fcp_read_rsp),
+ atomic_read(&tgtp->xmt_fcp_write),
+ atomic_read(&tgtp->xmt_fcp_rsp));
+
+ len += snprintf(buf+len, size-len,
+ "FCP Rsp: abort %08x drop %08x\n",
+ atomic_read(&tgtp->xmt_fcp_abort),
+ atomic_read(&tgtp->xmt_fcp_drop));
+
+ len += snprintf(buf+len, size-len,
+ "FCP Rsp Cmpl: %08x err %08x drop %08x\n",
+ atomic_read(&tgtp->xmt_fcp_rsp_cmpl),
+ atomic_read(&tgtp->xmt_fcp_rsp_error),
+ atomic_read(&tgtp->xmt_fcp_rsp_drop));
+
+ len += snprintf(buf+len, size-len,
+ "ABORT: Xmt %08x Err %08x Cmpl %08x",
+ atomic_read(&tgtp->xmt_abort_rsp),
+ atomic_read(&tgtp->xmt_abort_rsp_error),
+ atomic_read(&tgtp->xmt_abort_cmpl));
+
+ len += snprintf(buf+len, size-len, "\n");
+ } else {
+ if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
+ return len;
+
+ len += snprintf(buf + len, size - len,
+ "\nNVME Lport Statistics\n");
+
+ len += snprintf(buf + len, size - len,
+ "LS: Xmt %016llx Cmpl %016llx\n",
+ phba->fc4NvmeLsRequests,
+ phba->fc4NvmeLsCmpls);
+
+ len += snprintf(buf + len, size - len,
+ "FCP: Rd %016llx Wr %016llx IO %016llx\n",
+ phba->fc4NvmeInputRequests,
+ phba->fc4NvmeOutputRequests,
+ phba->fc4NvmeControlRequests);
+
+ len += snprintf(buf + len, size - len,
+ " Cmpl %016llx\n", phba->fc4NvmeIoCmpls);
+ }
+
return len;
}
+
+
+/**
+ * lpfc_debugfs_nvmektime_data - Dump target node list to a buffer
+ * @vport: The vport to gather target node info from.
+ * @buf: The buffer to dump log into.
+ * @size: The maximum amount of data to process.
+ *
+ * Description:
+ * This routine dumps the NVME statistics associated with @vport
+ *
+ * Return Value:
+ * This routine returns the amount of bytes that were dumped into @buf and will
+ * not exceed @size.
+ **/
+static int
+lpfc_debugfs_nvmektime_data(struct lpfc_vport *vport, char *buf, int size)
+{
+ struct lpfc_hba *phba = vport->phba;
+ int len = 0;
+
+ if (phba->nvmet_support == 0) {
+ /* NVME Initiator */
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "ktime %s: Total Samples: %lld\n",
+ (phba->ktime_on ? "Enabled" : "Disabled"),
+ phba->ktime_data_samples);
+ if (phba->ktime_data_samples == 0)
+ return len;
+
+ len += snprintf(
+ buf + len, PAGE_SIZE - len,
+ "Segment 1: Last NVME Cmd cmpl "
+ "done -to- Start of next NVME cnd (in driver)\n");
+ len += snprintf(
+ buf + len, PAGE_SIZE - len,
+ "avg:%08lld min:%08lld max %08lld\n",
+ div_u64(phba->ktime_seg1_total,
+ phba->ktime_data_samples),
+ phba->ktime_seg1_min,
+ phba->ktime_seg1_max);
+ len += snprintf(
+ buf + len, PAGE_SIZE - len,
+ "Segment 2: Driver start of NVME cmd "
+ "-to- Firmware WQ doorbell\n");
+ len += snprintf(
+ buf + len, PAGE_SIZE - len,
+ "avg:%08lld min:%08lld max %08lld\n",
+ div_u64(phba->ktime_seg2_total,
+ phba->ktime_data_samples),
+ phba->ktime_seg2_min,
+ phba->ktime_seg2_max);
+ len += snprintf(
+ buf + len, PAGE_SIZE - len,
+ "Segment 3: Firmware WQ doorbell -to- "
+ "MSI-X ISR cmpl\n");
+ len += snprintf(
+ buf + len, PAGE_SIZE - len,
+ "avg:%08lld min:%08lld max %08lld\n",
+ div_u64(phba->ktime_seg3_total,
+ phba->ktime_data_samples),
+ phba->ktime_seg3_min,
+ phba->ktime_seg3_max);
+ len += snprintf(
+ buf + len, PAGE_SIZE - len,
+ "Segment 4: MSI-X ISR cmpl -to- "
+ "NVME cmpl done\n");
+ len += snprintf(
+ buf + len, PAGE_SIZE - len,
+ "avg:%08lld min:%08lld max %08lld\n",
+ div_u64(phba->ktime_seg4_total,
+ phba->ktime_data_samples),
+ phba->ktime_seg4_min,
+ phba->ktime_seg4_max);
+ len += snprintf(
+ buf + len, PAGE_SIZE - len,
+ "Total IO avg time: %08lld\n",
+ div_u64(phba->ktime_seg1_total +
+ phba->ktime_seg2_total +
+ phba->ktime_seg3_total +
+ phba->ktime_seg4_total,
+ phba->ktime_data_samples));
+ return len;
+ }
+
+ /* NVME Target */
+ len += snprintf(buf + len, PAGE_SIZE-len,
+ "ktime %s: Total Samples: %lld %lld\n",
+ (phba->ktime_on ? "Enabled" : "Disabled"),
+ phba->ktime_data_samples,
+ phba->ktime_status_samples);
+ if (phba->ktime_data_samples == 0)
+ return len;
+
+ len += snprintf(buf + len, PAGE_SIZE-len,
+ "Segment 1: MSI-X ISR Rcv cmd -to- "
+ "cmd pass to NVME Layer\n");
+ len += snprintf(buf + len, PAGE_SIZE-len,
+ "avg:%08lld min:%08lld max %08lld\n",
+ div_u64(phba->ktime_seg1_total,
+ phba->ktime_data_samples),
+ phba->ktime_seg1_min,
+ phba->ktime_seg1_max);
+ len += snprintf(buf + len, PAGE_SIZE-len,
+ "Segment 2: cmd pass to NVME Layer- "
+ "-to- Driver rcv cmd OP (action)\n");
+ len += snprintf(buf + len, PAGE_SIZE-len,
+ "avg:%08lld min:%08lld max %08lld\n",
+ div_u64(phba->ktime_seg2_total,
+ phba->ktime_data_samples),
+ phba->ktime_seg2_min,
+ phba->ktime_seg2_max);
+ len += snprintf(buf + len, PAGE_SIZE-len,
+ "Segment 3: Driver rcv cmd OP -to- "
+ "Firmware WQ doorbell: cmd\n");
+ len += snprintf(buf + len, PAGE_SIZE-len,
+ "avg:%08lld min:%08lld max %08lld\n",
+ div_u64(phba->ktime_seg3_total,
+ phba->ktime_data_samples),
+ phba->ktime_seg3_min,
+ phba->ktime_seg3_max);
+ len += snprintf(buf + len, PAGE_SIZE-len,
+ "Segment 4: Firmware WQ doorbell: cmd "
+ "-to- MSI-X ISR for cmd cmpl\n");
+ len += snprintf(buf + len, PAGE_SIZE-len,
+ "avg:%08lld min:%08lld max %08lld\n",
+ div_u64(phba->ktime_seg4_total,
+ phba->ktime_data_samples),
+ phba->ktime_seg4_min,
+ phba->ktime_seg4_max);
+ len += snprintf(buf + len, PAGE_SIZE-len,
+ "Segment 5: MSI-X ISR for cmd cmpl "
+ "-to- NVME layer passed cmd done\n");
+ len += snprintf(buf + len, PAGE_SIZE-len,
+ "avg:%08lld min:%08lld max %08lld\n",
+ div_u64(phba->ktime_seg5_total,
+ phba->ktime_data_samples),
+ phba->ktime_seg5_min,
+ phba->ktime_seg5_max);
+
+ if (phba->ktime_status_samples == 0) {
+ len += snprintf(buf + len, PAGE_SIZE-len,
+ "Total: cmd received by MSI-X ISR "
+ "-to- cmd completed on wire\n");
+ len += snprintf(buf + len, PAGE_SIZE-len,
+ "avg:%08lld min:%08lld "
+ "max %08lld\n",
+ div_u64(phba->ktime_seg10_total,
+ phba->ktime_data_samples),
+ phba->ktime_seg10_min,
+ phba->ktime_seg10_max);
+ return len;
+ }
+
+ len += snprintf(buf + len, PAGE_SIZE-len,
+ "Segment 6: NVME layer passed cmd done "
+ "-to- Driver rcv rsp status OP\n");
+ len += snprintf(buf + len, PAGE_SIZE-len,
+ "avg:%08lld min:%08lld max %08lld\n",
+ div_u64(phba->ktime_seg6_total,
+ phba->ktime_status_samples),
+ phba->ktime_seg6_min,
+ phba->ktime_seg6_max);
+ len += snprintf(buf + len, PAGE_SIZE-len,
+ "Segment 7: Driver rcv rsp status OP "
+ "-to- Firmware WQ doorbell: status\n");
+ len += snprintf(buf + len, PAGE_SIZE-len,
+ "avg:%08lld min:%08lld max %08lld\n",
+ div_u64(phba->ktime_seg7_total,
+ phba->ktime_status_samples),
+ phba->ktime_seg7_min,
+ phba->ktime_seg7_max);
+ len += snprintf(buf + len, PAGE_SIZE-len,
+ "Segment 8: Firmware WQ doorbell: status"
+ " -to- MSI-X ISR for status cmpl\n");
+ len += snprintf(buf + len, PAGE_SIZE-len,
+ "avg:%08lld min:%08lld max %08lld\n",
+ div_u64(phba->ktime_seg8_total,
+ phba->ktime_status_samples),
+ phba->ktime_seg8_min,
+ phba->ktime_seg8_max);
+ len += snprintf(buf + len, PAGE_SIZE-len,
+ "Segment 9: MSI-X ISR for status cmpl "
+ "-to- NVME layer passed status done\n");
+ len += snprintf(buf + len, PAGE_SIZE-len,
+ "avg:%08lld min:%08lld max %08lld\n",
+ div_u64(phba->ktime_seg9_total,
+ phba->ktime_status_samples),
+ phba->ktime_seg9_min,
+ phba->ktime_seg9_max);
+ len += snprintf(buf + len, PAGE_SIZE-len,
+ "Total: cmd received by MSI-X ISR -to- "
+ "cmd completed on wire\n");
+ len += snprintf(buf + len, PAGE_SIZE-len,
+ "avg:%08lld min:%08lld max %08lld\n",
+ div_u64(phba->ktime_seg10_total,
+ phba->ktime_status_samples),
+ phba->ktime_seg10_min,
+ phba->ktime_seg10_max);
+ return len;
+}
+
+/**
+ * lpfc_debugfs_nvmeio_trc_data - Dump NVME IO trace list to a buffer
+ * @phba: The phba to gather target node info from.
+ * @buf: The buffer to dump log into.
+ * @size: The maximum amount of data to process.
+ *
+ * Description:
+ * This routine dumps the NVME IO trace associated with @phba
+ *
+ * Return Value:
+ * This routine returns the amount of bytes that were dumped into @buf and will
+ * not exceed @size.
+ **/
+static int
+lpfc_debugfs_nvmeio_trc_data(struct lpfc_hba *phba, char *buf, int size)
+{
+ struct lpfc_debugfs_nvmeio_trc *dtp;
+ int i, state, index, skip;
+ int len = 0;
+
+ state = phba->nvmeio_trc_on;
+
+ index = (atomic_read(&phba->nvmeio_trc_cnt) + 1) &
+ (phba->nvmeio_trc_size - 1);
+ skip = phba->nvmeio_trc_output_idx;
+
+ len += snprintf(buf + len, size - len,
+ "%s IO Trace %s: next_idx %d skip %d size %d\n",
+ (phba->nvmet_support ? "NVME" : "NVMET"),
+ (state ? "Enabled" : "Disabled"),
+ index, skip, phba->nvmeio_trc_size);
+
+ if (!phba->nvmeio_trc || state)
+ return len;
+
+ /* trace MUST bhe off to continue */
+
+ for (i = index; i < phba->nvmeio_trc_size; i++) {
+ if (skip) {
+ skip--;
+ continue;
+ }
+ dtp = phba->nvmeio_trc + i;
+ phba->nvmeio_trc_output_idx++;
+
+ if (!dtp->fmt)
+ continue;
+
+ len += snprintf(buf + len, size - len, dtp->fmt,
+ dtp->data1, dtp->data2, dtp->data3);
+
+ if (phba->nvmeio_trc_output_idx >= phba->nvmeio_trc_size) {
+ phba->nvmeio_trc_output_idx = 0;
+ len += snprintf(buf + len, size - len,
+ "Trace Complete\n");
+ goto out;
+ }
+
+ if (len >= (size - LPFC_DEBUG_OUT_LINE_SZ)) {
+ len += snprintf(buf + len, size - len,
+ "Trace Continue (%d of %d)\n",
+ phba->nvmeio_trc_output_idx,
+ phba->nvmeio_trc_size);
+ goto out;
+ }
+ }
+ for (i = 0; i < index; i++) {
+ if (skip) {
+ skip--;
+ continue;
+ }
+ dtp = phba->nvmeio_trc + i;
+ phba->nvmeio_trc_output_idx++;
+
+ if (!dtp->fmt)
+ continue;
+
+ len += snprintf(buf + len, size - len, dtp->fmt,
+ dtp->data1, dtp->data2, dtp->data3);
+
+ if (phba->nvmeio_trc_output_idx >= phba->nvmeio_trc_size) {
+ phba->nvmeio_trc_output_idx = 0;
+ len += snprintf(buf + len, size - len,
+ "Trace Complete\n");
+ goto out;
+ }
+
+ if (len >= (size - LPFC_DEBUG_OUT_LINE_SZ)) {
+ len += snprintf(buf + len, size - len,
+ "Trace Continue (%d of %d)\n",
+ phba->nvmeio_trc_output_idx,
+ phba->nvmeio_trc_size);
+ goto out;
+ }
+ }
+
+ len += snprintf(buf + len, size - len,
+ "Trace Done\n");
+out:
+ return len;
+}
+
+/**
+ * lpfc_debugfs_cpucheck_data - Dump target node list to a buffer
+ * @vport: The vport to gather target node info from.
+ * @buf: The buffer to dump log into.
+ * @size: The maximum amount of data to process.
+ *
+ * Description:
+ * This routine dumps the NVME statistics associated with @vport
+ *
+ * Return Value:
+ * This routine returns the amount of bytes that were dumped into @buf and will
+ * not exceed @size.
+ **/
+static int
+lpfc_debugfs_cpucheck_data(struct lpfc_vport *vport, char *buf, int size)
+{
+ struct lpfc_hba *phba = vport->phba;
+ int i;
+ int len = 0;
+ uint32_t tot_xmt = 0;
+ uint32_t tot_rcv = 0;
+ uint32_t tot_cmpl = 0;
+ uint32_t tot_ccmpl = 0;
+
+ if (phba->nvmet_support == 0) {
+ /* NVME Initiator */
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "CPUcheck %s\n",
+ (phba->cpucheck_on & LPFC_CHECK_NVME_IO ?
+ "Enabled" : "Disabled"));
+ for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
+ if (i >= LPFC_CHECK_CPU_CNT)
+ break;
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "%02d: xmit x%08x cmpl x%08x\n",
+ i, phba->cpucheck_xmt_io[i],
+ phba->cpucheck_cmpl_io[i]);
+ tot_xmt += phba->cpucheck_xmt_io[i];
+ tot_cmpl += phba->cpucheck_cmpl_io[i];
+ }
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "tot:xmit x%08x cmpl x%08x\n",
+ tot_xmt, tot_cmpl);
+ return len;
+ }
+
+ /* NVME Target */
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "CPUcheck %s ",
+ (phba->cpucheck_on & LPFC_CHECK_NVMET_IO ?
+ "IO Enabled - " : "IO Disabled - "));
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "%s\n",
+ (phba->cpucheck_on & LPFC_CHECK_NVMET_RCV ?
+ "Rcv Enabled\n" : "Rcv Disabled\n"));
+ for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
+ if (i >= LPFC_CHECK_CPU_CNT)
+ break;
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "%02d: xmit x%08x ccmpl x%08x "
+ "cmpl x%08x rcv x%08x\n",
+ i, phba->cpucheck_xmt_io[i],
+ phba->cpucheck_ccmpl_io[i],
+ phba->cpucheck_cmpl_io[i],
+ phba->cpucheck_rcv_io[i]);
+ tot_xmt += phba->cpucheck_xmt_io[i];
+ tot_rcv += phba->cpucheck_rcv_io[i];
+ tot_cmpl += phba->cpucheck_cmpl_io[i];
+ tot_ccmpl += phba->cpucheck_ccmpl_io[i];
+ }
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "tot:xmit x%08x ccmpl x%08x cmpl x%08x rcv x%08x\n",
+ tot_xmt, tot_ccmpl, tot_cmpl, tot_rcv);
+ return len;
+}
+
#endif
/**
@@ -697,6 +1300,40 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
return;
}
+/**
+ * lpfc_debugfs_nvme_trc - Store NVME/NVMET trace log
+ * @phba: The phba to associate this trace string with for retrieval.
+ * @fmt: Format string to be displayed when dumping the log.
+ * @data1: 1st data parameter to be applied to @fmt.
+ * @data2: 2nd data parameter to be applied to @fmt.
+ * @data3: 3rd data parameter to be applied to @fmt.
+ *
+ * Description:
+ * This routine is used by the driver code to add a debugfs log entry to the
+ * nvme trace buffer associated with @phba. @fmt, @data1, @data2, and
+ * @data3 are used like printf when displaying the log.
+ **/
+inline void
+lpfc_debugfs_nvme_trc(struct lpfc_hba *phba, char *fmt,
+ uint16_t data1, uint16_t data2, uint32_t data3)
+{
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ struct lpfc_debugfs_nvmeio_trc *dtp;
+ int index;
+
+ if (!phba->nvmeio_trc_on || !phba->nvmeio_trc)
+ return;
+
+ index = atomic_inc_return(&phba->nvmeio_trc_cnt) &
+ (phba->nvmeio_trc_size - 1);
+ dtp = phba->nvmeio_trc + index;
+ dtp->fmt = fmt;
+ dtp->data1 = data1;
+ dtp->data2 = data2;
+ dtp->data3 = data3;
+#endif
+}
+
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
/**
* lpfc_debugfs_disc_trc_open - Open the discovery trace log
@@ -938,7 +1575,7 @@ lpfc_debugfs_dumpData_open(struct inode *inode, struct file *file)
goto out;
/* Round to page boundary */
- printk(KERN_ERR "9059 BLKGRD: %s: _dump_buf_data=0x%p\n",
+ pr_err("9059 BLKGRD: %s: _dump_buf_data=0x%p\n",
__func__, _dump_buf_data);
debug->buffer = _dump_buf_data;
if (!debug->buffer) {
@@ -968,8 +1605,8 @@ lpfc_debugfs_dumpDif_open(struct inode *inode, struct file *file)
goto out;
/* Round to page boundary */
- printk(KERN_ERR "9060 BLKGRD: %s: _dump_buf_dif=0x%p file=%pD\n",
- __func__, _dump_buf_dif, file);
+ pr_err("9060 BLKGRD: %s: _dump_buf_dif=0x%p file=%pD\n",
+ __func__, _dump_buf_dif, file);
debug->buffer = _dump_buf_dif;
if (!debug->buffer) {
kfree(debug);
@@ -1229,6 +1866,422 @@ lpfc_debugfs_dumpDataDif_release(struct inode *inode, struct file *file)
return 0;
}
+
+static int
+lpfc_debugfs_nvmestat_open(struct inode *inode, struct file *file)
+{
+ struct lpfc_vport *vport = inode->i_private;
+ struct lpfc_debug *debug;
+ int rc = -ENOMEM;
+
+ debug = kmalloc(sizeof(*debug), GFP_KERNEL);
+ if (!debug)
+ goto out;
+
+ /* Round to page boundary */
+ debug->buffer = kmalloc(LPFC_NVMESTAT_SIZE, GFP_KERNEL);
+ if (!debug->buffer) {
+ kfree(debug);
+ goto out;
+ }
+
+ debug->len = lpfc_debugfs_nvmestat_data(vport, debug->buffer,
+ LPFC_NVMESTAT_SIZE);
+
+ debug->i_private = inode->i_private;
+ file->private_data = debug;
+
+ rc = 0;
+out:
+ return rc;
+}
+
+static ssize_t
+lpfc_debugfs_nvmestat_write(struct file *file, const char __user *buf,
+ size_t nbytes, loff_t *ppos)
+{
+ struct lpfc_debug *debug = file->private_data;
+ struct lpfc_vport *vport = (struct lpfc_vport *)debug->i_private;
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_nvmet_tgtport *tgtp;
+ char mybuf[64];
+ char *pbuf;
+
+ if (!phba->targetport)
+ return -ENXIO;
+
+ if (nbytes > 64)
+ nbytes = 64;
+
+ /* Protect copy from user */
+ if (!access_ok(VERIFY_READ, buf, nbytes))
+ return -EFAULT;
+
+ memset(mybuf, 0, sizeof(mybuf));
+
+ if (copy_from_user(mybuf, buf, nbytes))
+ return -EFAULT;
+ pbuf = &mybuf[0];
+
+ tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
+ if ((strncmp(pbuf, "reset", strlen("reset")) == 0) ||
+ (strncmp(pbuf, "zero", strlen("zero")) == 0)) {
+ atomic_set(&tgtp->rcv_ls_req_in, 0);
+ atomic_set(&tgtp->rcv_ls_req_out, 0);
+ atomic_set(&tgtp->rcv_ls_req_drop, 0);
+ atomic_set(&tgtp->xmt_ls_abort, 0);
+ atomic_set(&tgtp->xmt_ls_rsp, 0);
+ atomic_set(&tgtp->xmt_ls_drop, 0);
+ atomic_set(&tgtp->xmt_ls_rsp_error, 0);
+ atomic_set(&tgtp->xmt_ls_rsp_cmpl, 0);
+
+ atomic_set(&tgtp->rcv_fcp_cmd_in, 0);
+ atomic_set(&tgtp->rcv_fcp_cmd_out, 0);
+ atomic_set(&tgtp->rcv_fcp_cmd_drop, 0);
+ atomic_set(&tgtp->xmt_fcp_abort, 0);
+ atomic_set(&tgtp->xmt_fcp_drop, 0);
+ atomic_set(&tgtp->xmt_fcp_read_rsp, 0);
+ atomic_set(&tgtp->xmt_fcp_read, 0);
+ atomic_set(&tgtp->xmt_fcp_write, 0);
+ atomic_set(&tgtp->xmt_fcp_rsp, 0);
+ atomic_set(&tgtp->xmt_fcp_rsp_cmpl, 0);
+ atomic_set(&tgtp->xmt_fcp_rsp_error, 0);
+ atomic_set(&tgtp->xmt_fcp_rsp_drop, 0);
+
+ atomic_set(&tgtp->xmt_abort_rsp, 0);
+ atomic_set(&tgtp->xmt_abort_rsp_error, 0);
+ atomic_set(&tgtp->xmt_abort_cmpl, 0);
+ }
+ return nbytes;
+}
+
+static int
+lpfc_debugfs_nvmektime_open(struct inode *inode, struct file *file)
+{
+ struct lpfc_vport *vport = inode->i_private;
+ struct lpfc_debug *debug;
+ int rc = -ENOMEM;
+
+ debug = kmalloc(sizeof(*debug), GFP_KERNEL);
+ if (!debug)
+ goto out;
+
+ /* Round to page boundary */
+ debug->buffer = kmalloc(LPFC_NVMEKTIME_SIZE, GFP_KERNEL);
+ if (!debug->buffer) {
+ kfree(debug);
+ goto out;
+ }
+
+ debug->len = lpfc_debugfs_nvmektime_data(vport, debug->buffer,
+ LPFC_NVMEKTIME_SIZE);
+
+ debug->i_private = inode->i_private;
+ file->private_data = debug;
+
+ rc = 0;
+out:
+ return rc;
+}
+
+static ssize_t
+lpfc_debugfs_nvmektime_write(struct file *file, const char __user *buf,
+ size_t nbytes, loff_t *ppos)
+{
+ struct lpfc_debug *debug = file->private_data;
+ struct lpfc_vport *vport = (struct lpfc_vport *)debug->i_private;
+ struct lpfc_hba *phba = vport->phba;
+ char mybuf[64];
+ char *pbuf;
+
+ if (nbytes > 64)
+ nbytes = 64;
+
+ /* Protect copy from user */
+ if (!access_ok(VERIFY_READ, buf, nbytes))
+ return -EFAULT;
+
+ memset(mybuf, 0, sizeof(mybuf));
+
+ if (copy_from_user(mybuf, buf, nbytes))
+ return -EFAULT;
+ pbuf = &mybuf[0];
+
+ if ((strncmp(pbuf, "on", sizeof("on") - 1) == 0)) {
+ phba->ktime_data_samples = 0;
+ phba->ktime_status_samples = 0;
+ phba->ktime_seg1_total = 0;
+ phba->ktime_seg1_max = 0;
+ phba->ktime_seg1_min = 0xffffffff;
+ phba->ktime_seg2_total = 0;
+ phba->ktime_seg2_max = 0;
+ phba->ktime_seg2_min = 0xffffffff;
+ phba->ktime_seg3_total = 0;
+ phba->ktime_seg3_max = 0;
+ phba->ktime_seg3_min = 0xffffffff;
+ phba->ktime_seg4_total = 0;
+ phba->ktime_seg4_max = 0;
+ phba->ktime_seg4_min = 0xffffffff;
+ phba->ktime_seg5_total = 0;
+ phba->ktime_seg5_max = 0;
+ phba->ktime_seg5_min = 0xffffffff;
+ phba->ktime_seg6_total = 0;
+ phba->ktime_seg6_max = 0;
+ phba->ktime_seg6_min = 0xffffffff;
+ phba->ktime_seg7_total = 0;
+ phba->ktime_seg7_max = 0;
+ phba->ktime_seg7_min = 0xffffffff;
+ phba->ktime_seg8_total = 0;
+ phba->ktime_seg8_max = 0;
+ phba->ktime_seg8_min = 0xffffffff;
+ phba->ktime_seg9_total = 0;
+ phba->ktime_seg9_max = 0;
+ phba->ktime_seg9_min = 0xffffffff;
+ phba->ktime_seg10_total = 0;
+ phba->ktime_seg10_max = 0;
+ phba->ktime_seg10_min = 0xffffffff;
+
+ phba->ktime_on = 1;
+ return strlen(pbuf);
+ } else if ((strncmp(pbuf, "off",
+ sizeof("off") - 1) == 0)) {
+ phba->ktime_on = 0;
+ return strlen(pbuf);
+ } else if ((strncmp(pbuf, "zero",
+ sizeof("zero") - 1) == 0)) {
+ phba->ktime_data_samples = 0;
+ phba->ktime_status_samples = 0;
+ phba->ktime_seg1_total = 0;
+ phba->ktime_seg1_max = 0;
+ phba->ktime_seg1_min = 0xffffffff;
+ phba->ktime_seg2_total = 0;
+ phba->ktime_seg2_max = 0;
+ phba->ktime_seg2_min = 0xffffffff;
+ phba->ktime_seg3_total = 0;
+ phba->ktime_seg3_max = 0;
+ phba->ktime_seg3_min = 0xffffffff;
+ phba->ktime_seg4_total = 0;
+ phba->ktime_seg4_max = 0;
+ phba->ktime_seg4_min = 0xffffffff;
+ phba->ktime_seg5_total = 0;
+ phba->ktime_seg5_max = 0;
+ phba->ktime_seg5_min = 0xffffffff;
+ phba->ktime_seg6_total = 0;
+ phba->ktime_seg6_max = 0;
+ phba->ktime_seg6_min = 0xffffffff;
+ phba->ktime_seg7_total = 0;
+ phba->ktime_seg7_max = 0;
+ phba->ktime_seg7_min = 0xffffffff;
+ phba->ktime_seg8_total = 0;
+ phba->ktime_seg8_max = 0;
+ phba->ktime_seg8_min = 0xffffffff;
+ phba->ktime_seg9_total = 0;
+ phba->ktime_seg9_max = 0;
+ phba->ktime_seg9_min = 0xffffffff;
+ phba->ktime_seg10_total = 0;
+ phba->ktime_seg10_max = 0;
+ phba->ktime_seg10_min = 0xffffffff;
+ return strlen(pbuf);
+ }
+ return -EINVAL;
+}
+
+static int
+lpfc_debugfs_nvmeio_trc_open(struct inode *inode, struct file *file)
+{
+ struct lpfc_hba *phba = inode->i_private;
+ struct lpfc_debug *debug;
+ int rc = -ENOMEM;
+
+ debug = kmalloc(sizeof(*debug), GFP_KERNEL);
+ if (!debug)
+ goto out;
+
+ /* Round to page boundary */
+ debug->buffer = kmalloc(LPFC_NVMEIO_TRC_SIZE, GFP_KERNEL);
+ if (!debug->buffer) {
+ kfree(debug);
+ goto out;
+ }
+
+ debug->len = lpfc_debugfs_nvmeio_trc_data(phba, debug->buffer,
+ LPFC_NVMEIO_TRC_SIZE);
+
+ debug->i_private = inode->i_private;
+ file->private_data = debug;
+
+ rc = 0;
+out:
+ return rc;
+}
+
+static ssize_t
+lpfc_debugfs_nvmeio_trc_write(struct file *file, const char __user *buf,
+ size_t nbytes, loff_t *ppos)
+{
+ struct lpfc_debug *debug = file->private_data;
+ struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
+ int i;
+ unsigned long sz;
+ char mybuf[64];
+ char *pbuf;
+
+ if (nbytes > 64)
+ nbytes = 64;
+
+ /* Protect copy from user */
+ if (!access_ok(VERIFY_READ, buf, nbytes))
+ return -EFAULT;
+
+ memset(mybuf, 0, sizeof(mybuf));
+
+ if (copy_from_user(mybuf, buf, nbytes))
+ return -EFAULT;
+ pbuf = &mybuf[0];
+
+ if ((strncmp(pbuf, "off", sizeof("off") - 1) == 0)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0570 nvmeio_trc_off\n");
+ phba->nvmeio_trc_output_idx = 0;
+ phba->nvmeio_trc_on = 0;
+ return strlen(pbuf);
+ } else if ((strncmp(pbuf, "on", sizeof("on") - 1) == 0)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0571 nvmeio_trc_on\n");
+ phba->nvmeio_trc_output_idx = 0;
+ phba->nvmeio_trc_on = 1;
+ return strlen(pbuf);
+ }
+
+ /* We must be off to allocate the trace buffer */
+ if (phba->nvmeio_trc_on != 0)
+ return -EINVAL;
+
+ /* If not on or off, the parameter is the trace buffer size */
+ i = kstrtoul(pbuf, 0, &sz);
+ if (i)
+ return -EINVAL;
+ phba->nvmeio_trc_size = (uint32_t)sz;
+
+ /* It must be a power of 2 - round down */
+ i = 0;
+ while (sz > 1) {
+ sz = sz >> 1;
+ i++;
+ }
+ sz = (1 << i);
+ if (phba->nvmeio_trc_size != sz)
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0572 nvmeio_trc_size changed to %ld\n",
+ sz);
+ phba->nvmeio_trc_size = (uint32_t)sz;
+
+ /* If one previously exists, free it */
+ kfree(phba->nvmeio_trc);
+
+ /* Allocate new trace buffer and initialize */
+ phba->nvmeio_trc = kmalloc((sizeof(struct lpfc_debugfs_nvmeio_trc) *
+ sz), GFP_KERNEL);
+ if (!phba->nvmeio_trc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0573 Cannot create debugfs "
+ "nvmeio_trc buffer\n");
+ return -ENOMEM;
+ }
+ memset(phba->nvmeio_trc, 0,
+ (sizeof(struct lpfc_debugfs_nvmeio_trc) * sz));
+ atomic_set(&phba->nvmeio_trc_cnt, 0);
+ phba->nvmeio_trc_on = 0;
+ phba->nvmeio_trc_output_idx = 0;
+
+ return strlen(pbuf);
+}
+
+static int
+lpfc_debugfs_cpucheck_open(struct inode *inode, struct file *file)
+{
+ struct lpfc_vport *vport = inode->i_private;
+ struct lpfc_debug *debug;
+ int rc = -ENOMEM;
+
+ debug = kmalloc(sizeof(*debug), GFP_KERNEL);
+ if (!debug)
+ goto out;
+
+ /* Round to page boundary */
+ debug->buffer = kmalloc(LPFC_CPUCHECK_SIZE, GFP_KERNEL);
+ if (!debug->buffer) {
+ kfree(debug);
+ goto out;
+ }
+
+ debug->len = lpfc_debugfs_cpucheck_data(vport, debug->buffer,
+ LPFC_NVMEKTIME_SIZE);
+
+ debug->i_private = inode->i_private;
+ file->private_data = debug;
+
+ rc = 0;
+out:
+ return rc;
+}
+
+static ssize_t
+lpfc_debugfs_cpucheck_write(struct file *file, const char __user *buf,
+ size_t nbytes, loff_t *ppos)
+{
+ struct lpfc_debug *debug = file->private_data;
+ struct lpfc_vport *vport = (struct lpfc_vport *)debug->i_private;
+ struct lpfc_hba *phba = vport->phba;
+ char mybuf[64];
+ char *pbuf;
+ int i;
+
+ if (nbytes > 64)
+ nbytes = 64;
+
+ /* Protect copy from user */
+ if (!access_ok(VERIFY_READ, buf, nbytes))
+ return -EFAULT;
+
+ memset(mybuf, 0, sizeof(mybuf));
+
+ if (copy_from_user(mybuf, buf, nbytes))
+ return -EFAULT;
+ pbuf = &mybuf[0];
+
+ if ((strncmp(pbuf, "on", sizeof("on") - 1) == 0)) {
+ if (phba->nvmet_support)
+ phba->cpucheck_on |= LPFC_CHECK_NVMET_IO;
+ else
+ phba->cpucheck_on |= LPFC_CHECK_NVME_IO;
+ return strlen(pbuf);
+ } else if ((strncmp(pbuf, "rcv",
+ sizeof("rcv") - 1) == 0)) {
+ if (phba->nvmet_support)
+ phba->cpucheck_on |= LPFC_CHECK_NVMET_RCV;
+ else
+ return -EINVAL;
+ return strlen(pbuf);
+ } else if ((strncmp(pbuf, "off",
+ sizeof("off") - 1) == 0)) {
+ phba->cpucheck_on = LPFC_CHECK_OFF;
+ return strlen(pbuf);
+ } else if ((strncmp(pbuf, "zero",
+ sizeof("zero") - 1) == 0)) {
+ for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
+ if (i >= LPFC_CHECK_CPU_CNT)
+ break;
+ phba->cpucheck_rcv_io[i] = 0;
+ phba->cpucheck_xmt_io[i] = 0;
+ phba->cpucheck_cmpl_io[i] = 0;
+ phba->cpucheck_ccmpl_io[i] = 0;
+ }
+ return strlen(pbuf);
+ }
+ return -EINVAL;
+}
+
/*
* ---------------------------------
* iDiag debugfs file access methods
@@ -1974,6 +3027,203 @@ error_out:
return -EINVAL;
}
+static int
+__lpfc_idiag_print_wq(struct lpfc_queue *qp, char *wqtype,
+ char *pbuffer, int len)
+{
+ if (!qp)
+ return len;
+
+ len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
+ "\t\t%s WQ info: ", wqtype);
+ len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
+ "AssocCQID[%04d]: WQ-STAT[oflow:x%x posted:x%llx]\n",
+ qp->assoc_qid, qp->q_cnt_1,
+ (unsigned long long)qp->q_cnt_4);
+ len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
+ "\t\tWQID[%02d], QE-CNT[%04d], QE-SIZE[%04d], "
+ "HOST-IDX[%04d], PORT-IDX[%04d]",
+ qp->queue_id, qp->entry_count,
+ qp->entry_size, qp->host_index,
+ qp->hba_index);
+ len += snprintf(pbuffer + len,
+ LPFC_QUE_INFO_GET_BUF_SIZE - len, "\n");
+ return len;
+}
+
+static int
+lpfc_idiag_wqs_for_cq(struct lpfc_hba *phba, char *wqtype, char *pbuffer,
+ int *len, int max_cnt, int cq_id)
+{
+ struct lpfc_queue *qp;
+ int qidx;
+
+ for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++) {
+ qp = phba->sli4_hba.fcp_wq[qidx];
+ if (qp->assoc_qid != cq_id)
+ continue;
+ *len = __lpfc_idiag_print_wq(qp, wqtype, pbuffer, *len);
+ if (*len >= max_cnt)
+ return 1;
+ }
+ for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++) {
+ qp = phba->sli4_hba.nvme_wq[qidx];
+ if (qp->assoc_qid != cq_id)
+ continue;
+ *len = __lpfc_idiag_print_wq(qp, wqtype, pbuffer, *len);
+ if (*len >= max_cnt)
+ return 1;
+ }
+ return 0;
+}
+
+static int
+__lpfc_idiag_print_cq(struct lpfc_queue *qp, char *cqtype,
+ char *pbuffer, int len)
+{
+ if (!qp)
+ return len;
+
+ len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
+ "\t%s CQ info: ", cqtype);
+ len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
+ "AssocEQID[%02d]: CQ STAT[max:x%x relw:x%x "
+ "xabt:x%x wq:x%llx]\n",
+ qp->assoc_qid, qp->q_cnt_1, qp->q_cnt_2,
+ qp->q_cnt_3, (unsigned long long)qp->q_cnt_4);
+ len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
+ "\tCQID[%02d], QE-CNT[%04d], QE-SIZE[%04d], "
+ "HOST-IDX[%04d], PORT-IDX[%04d]",
+ qp->queue_id, qp->entry_count,
+ qp->entry_size, qp->host_index,
+ qp->hba_index);
+
+ len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "\n");
+
+ return len;
+}
+
+static int
+__lpfc_idiag_print_rqpair(struct lpfc_queue *qp, struct lpfc_queue *datqp,
+ char *rqtype, char *pbuffer, int len)
+{
+ if (!qp || !datqp)
+ return len;
+
+ len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
+ "\t\t%s RQ info: ", rqtype);
+ len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
+ "AssocCQID[%02d]: RQ-STAT[nopost:x%x nobuf:x%x "
+ "trunc:x%x rcv:x%llx]\n",
+ qp->assoc_qid, qp->q_cnt_1, qp->q_cnt_2,
+ qp->q_cnt_3, (unsigned long long)qp->q_cnt_4);
+ len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
+ "\t\tHQID[%02d], QE-CNT[%04d], QE-SIZE[%04d], "
+ "HOST-IDX[%04d], PORT-IDX[%04d]\n",
+ qp->queue_id, qp->entry_count, qp->entry_size,
+ qp->host_index, qp->hba_index);
+ len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
+ "\t\tDQID[%02d], QE-CNT[%04d], QE-SIZE[%04d], "
+ "HOST-IDX[%04d], PORT-IDX[%04d]\n",
+ datqp->queue_id, datqp->entry_count,
+ datqp->entry_size, datqp->host_index,
+ datqp->hba_index);
+ len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "\n");
+
+ return len;
+}
+
+static int
+lpfc_idiag_cqs_for_eq(struct lpfc_hba *phba, char *pbuffer,
+ int *len, int max_cnt, int eqidx, int eq_id)
+{
+ struct lpfc_queue *qp;
+ int qidx, rc;
+
+ for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++) {
+ qp = phba->sli4_hba.fcp_cq[qidx];
+ if (qp->assoc_qid != eq_id)
+ continue;
+
+ *len = __lpfc_idiag_print_cq(qp, "FCP", pbuffer, *len);
+
+ /* Reset max counter */
+ qp->CQ_max_cqe = 0;
+
+ if (*len >= max_cnt)
+ return 1;
+
+ rc = lpfc_idiag_wqs_for_cq(phba, "FCP", pbuffer, len,
+ max_cnt, qp->queue_id);
+ if (rc)
+ return 1;
+ }
+
+ for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++) {
+ qp = phba->sli4_hba.nvme_cq[qidx];
+ if (qp->assoc_qid != eq_id)
+ continue;
+
+ *len = __lpfc_idiag_print_cq(qp, "NVME", pbuffer, *len);
+
+ /* Reset max counter */
+ qp->CQ_max_cqe = 0;
+
+ if (*len >= max_cnt)
+ return 1;
+
+ rc = lpfc_idiag_wqs_for_cq(phba, "NVME", pbuffer, len,
+ max_cnt, qp->queue_id);
+ if (rc)
+ return 1;
+ }
+
+ if (eqidx < phba->cfg_nvmet_mrq) {
+ /* NVMET CQset */
+ qp = phba->sli4_hba.nvmet_cqset[eqidx];
+ *len = __lpfc_idiag_print_cq(qp, "NVMET CQset", pbuffer, *len);
+
+ /* Reset max counter */
+ qp->CQ_max_cqe = 0;
+
+ if (*len >= max_cnt)
+ return 1;
+
+ /* RQ header */
+ qp = phba->sli4_hba.nvmet_mrq_hdr[eqidx];
+ *len = __lpfc_idiag_print_rqpair(qp,
+ phba->sli4_hba.nvmet_mrq_data[eqidx],
+ "NVMET MRQ", pbuffer, *len);
+
+ if (*len >= max_cnt)
+ return 1;
+ }
+
+ return 0;
+}
+
+static int
+__lpfc_idiag_print_eq(struct lpfc_queue *qp, char *eqtype,
+ char *pbuffer, int len)
+{
+ if (!qp)
+ return len;
+
+ len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
+ "\n%s EQ info: EQ-STAT[max:x%x noE:x%x "
+ "bs:x%x proc:x%llx]\n",
+ eqtype, qp->q_cnt_1, qp->q_cnt_2, qp->q_cnt_3,
+ (unsigned long long)qp->q_cnt_4);
+ len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
+ "EQID[%02d], QE-CNT[%04d], QE-SIZE[%04d], "
+ "HOST-IDX[%04d], PORT-IDX[%04d]",
+ qp->queue_id, qp->entry_count, qp->entry_size,
+ qp->host_index, qp->hba_index);
+ len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len, "\n");
+
+ return len;
+}
+
/**
* lpfc_idiag_queinfo_read - idiag debugfs read queue information
* @file: The file pointer to read from.
@@ -1984,6 +3234,9 @@ error_out:
* Description:
* This routine reads data from the @phba SLI4 PCI function queue information,
* and copies to user @buf.
+ * This routine only returns 1 EQs worth of information. It remembers the last
+ * EQ read and jumps to the next EQ. Thus subsequent calls to queInfo will
+ * retrieve all EQs allocated for the phba.
*
* Returns:
* This function returns the amount of data that was read (this could be less
@@ -1995,19 +3248,16 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
{
struct lpfc_debug *debug = file->private_data;
struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
- int len = 0;
char *pbuffer;
- int x, cnt;
- int max_cnt;
+ int max_cnt, rc, x, len = 0;
struct lpfc_queue *qp = NULL;
-
if (!debug->buffer)
debug->buffer = kmalloc(LPFC_QUE_INFO_GET_BUF_SIZE, GFP_KERNEL);
if (!debug->buffer)
return 0;
pbuffer = debug->buffer;
- max_cnt = LPFC_QUE_INFO_GET_BUF_SIZE - 128;
+ max_cnt = LPFC_QUE_INFO_GET_BUF_SIZE - 256;
if (*ppos)
return 0;
@@ -2015,375 +3265,134 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
spin_lock_irq(&phba->hbalock);
/* Fast-path event queue */
- if (phba->sli4_hba.hba_eq && phba->cfg_fcp_io_channel) {
- cnt = phba->cfg_fcp_io_channel;
+ if (phba->sli4_hba.hba_eq && phba->io_channel_irqs) {
- for (x = 0; x < cnt; x++) {
+ x = phba->lpfc_idiag_last_eq;
+ if (phba->cfg_fof && (x >= phba->io_channel_irqs)) {
+ phba->lpfc_idiag_last_eq = 0;
+ goto fof;
+ }
+ phba->lpfc_idiag_last_eq++;
+ if (phba->lpfc_idiag_last_eq >= phba->io_channel_irqs)
+ if (phba->cfg_fof == 0)
+ phba->lpfc_idiag_last_eq = 0;
- /* Fast-path EQ */
- qp = phba->sli4_hba.hba_eq[x];
- if (!qp)
- goto proc_cq;
+ len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
+ "EQ %d out of %d HBA EQs\n",
+ x, phba->io_channel_irqs);
- len += snprintf(pbuffer+len,
- LPFC_QUE_INFO_GET_BUF_SIZE-len,
- "\nHBA EQ info: "
- "EQ-STAT[max:x%x noE:x%x "
- "bs:x%x proc:x%llx]\n",
- qp->q_cnt_1, qp->q_cnt_2,
- qp->q_cnt_3, (unsigned long long)qp->q_cnt_4);
+ /* Fast-path EQ */
+ qp = phba->sli4_hba.hba_eq[x];
+ if (!qp)
+ goto out;
- len += snprintf(pbuffer+len,
- LPFC_QUE_INFO_GET_BUF_SIZE-len,
- "EQID[%02d], "
- "QE-CNT[%04d], QE-SIZE[%04d], "
- "HOST-IDX[%04d], PORT-IDX[%04d]",
- qp->queue_id,
- qp->entry_count,
- qp->entry_size,
- qp->host_index,
- qp->hba_index);
-
-
- /* Reset max counter */
- qp->EQ_max_eqe = 0;
+ len = __lpfc_idiag_print_eq(qp, "HBA", pbuffer, len);
- len += snprintf(pbuffer+len,
- LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
- if (len >= max_cnt)
- goto too_big;
-proc_cq:
- /* Fast-path FCP CQ */
- qp = phba->sli4_hba.fcp_cq[x];
- len += snprintf(pbuffer+len,
- LPFC_QUE_INFO_GET_BUF_SIZE-len,
- "\tFCP CQ info: ");
- len += snprintf(pbuffer+len,
- LPFC_QUE_INFO_GET_BUF_SIZE-len,
- "AssocEQID[%02d]: "
- "CQ STAT[max:x%x relw:x%x "
- "xabt:x%x wq:x%llx]\n",
- qp->assoc_qid,
- qp->q_cnt_1, qp->q_cnt_2,
- qp->q_cnt_3, (unsigned long long)qp->q_cnt_4);
- len += snprintf(pbuffer+len,
- LPFC_QUE_INFO_GET_BUF_SIZE-len,
- "\tCQID[%02d], "
- "QE-CNT[%04d], QE-SIZE[%04d], "
- "HOST-IDX[%04d], PORT-IDX[%04d]",
- qp->queue_id, qp->entry_count,
- qp->entry_size, qp->host_index,
- qp->hba_index);
+ /* Reset max counter */
+ qp->EQ_max_eqe = 0;
+ if (len >= max_cnt)
+ goto too_big;
- /* Reset max counter */
- qp->CQ_max_cqe = 0;
+ /* will dump both fcp and nvme cqs/wqs for the eq */
+ rc = lpfc_idiag_cqs_for_eq(phba, pbuffer, &len,
+ max_cnt, x, qp->queue_id);
+ if (rc)
+ goto too_big;
- len += snprintf(pbuffer+len,
- LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
- if (len >= max_cnt)
- goto too_big;
+ /* Only EQ 0 has slow path CQs configured */
+ if (x)
+ goto out;
- /* Fast-path FCP WQ */
- qp = phba->sli4_hba.fcp_wq[x];
+ /* Slow-path mailbox CQ */
+ qp = phba->sli4_hba.mbx_cq;
+ len = __lpfc_idiag_print_cq(qp, "MBX", pbuffer, len);
+ if (len >= max_cnt)
+ goto too_big;
- len += snprintf(pbuffer+len,
- LPFC_QUE_INFO_GET_BUF_SIZE-len,
- "\t\tFCP WQ info: ");
- len += snprintf(pbuffer+len,
- LPFC_QUE_INFO_GET_BUF_SIZE-len,
- "AssocCQID[%02d]: "
- "WQ-STAT[oflow:x%x posted:x%llx]\n",
- qp->assoc_qid,
- qp->q_cnt_1, (unsigned long long)qp->q_cnt_4);
- len += snprintf(pbuffer+len,
- LPFC_QUE_INFO_GET_BUF_SIZE-len,
- "\t\tWQID[%02d], "
- "QE-CNT[%04d], QE-SIZE[%04d], "
- "HOST-IDX[%04d], PORT-IDX[%04d]",
- qp->queue_id,
- qp->entry_count,
- qp->entry_size,
- qp->host_index,
- qp->hba_index);
-
- len += snprintf(pbuffer+len,
- LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
- if (len >= max_cnt)
- goto too_big;
-
- if (x)
- continue;
-
- /* Only EQ 0 has slow path CQs configured */
-
- /* Slow-path mailbox CQ */
- qp = phba->sli4_hba.mbx_cq;
- if (qp) {
- len += snprintf(pbuffer+len,
- LPFC_QUE_INFO_GET_BUF_SIZE-len,
- "\tMBX CQ info: ");
- len += snprintf(pbuffer+len,
- LPFC_QUE_INFO_GET_BUF_SIZE-len,
- "AssocEQID[%02d]: "
- "CQ-STAT[mbox:x%x relw:x%x "
- "xabt:x%x wq:x%llx]\n",
- qp->assoc_qid,
- qp->q_cnt_1, qp->q_cnt_2,
- qp->q_cnt_3,
- (unsigned long long)qp->q_cnt_4);
- len += snprintf(pbuffer+len,
- LPFC_QUE_INFO_GET_BUF_SIZE-len,
- "\tCQID[%02d], "
- "QE-CNT[%04d], QE-SIZE[%04d], "
- "HOST-IDX[%04d], PORT-IDX[%04d]",
- qp->queue_id, qp->entry_count,
- qp->entry_size, qp->host_index,
- qp->hba_index);
-
- len += snprintf(pbuffer+len,
- LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
- if (len >= max_cnt)
- goto too_big;
- }
+ /* Slow-path MBOX MQ */
+ qp = phba->sli4_hba.mbx_wq;
+ len = __lpfc_idiag_print_wq(qp, "MBX", pbuffer, len);
+ if (len >= max_cnt)
+ goto too_big;
- /* Slow-path MBOX MQ */
- qp = phba->sli4_hba.mbx_wq;
- if (qp) {
- len += snprintf(pbuffer+len,
- LPFC_QUE_INFO_GET_BUF_SIZE-len,
- "\t\tMBX MQ info: ");
- len += snprintf(pbuffer+len,
- LPFC_QUE_INFO_GET_BUF_SIZE-len,
- "AssocCQID[%02d]:\n",
- phba->sli4_hba.mbx_wq->assoc_qid);
- len += snprintf(pbuffer+len,
- LPFC_QUE_INFO_GET_BUF_SIZE-len,
- "\t\tWQID[%02d], "
- "QE-CNT[%04d], QE-SIZE[%04d], "
- "HOST-IDX[%04d], PORT-IDX[%04d]",
- qp->queue_id, qp->entry_count,
- qp->entry_size, qp->host_index,
- qp->hba_index);
-
- len += snprintf(pbuffer+len,
- LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
- if (len >= max_cnt)
- goto too_big;
- }
+ /* Slow-path ELS response CQ */
+ qp = phba->sli4_hba.els_cq;
+ len = __lpfc_idiag_print_cq(qp, "ELS", pbuffer, len);
+ /* Reset max counter */
+ if (qp)
+ qp->CQ_max_cqe = 0;
+ if (len >= max_cnt)
+ goto too_big;
- /* Slow-path ELS response CQ */
- qp = phba->sli4_hba.els_cq;
- if (qp) {
- len += snprintf(pbuffer+len,
- LPFC_QUE_INFO_GET_BUF_SIZE-len,
- "\tELS CQ info: ");
- len += snprintf(pbuffer+len,
- LPFC_QUE_INFO_GET_BUF_SIZE-len,
- "AssocEQID[%02d]: "
- "CQ-STAT[max:x%x relw:x%x "
- "xabt:x%x wq:x%llx]\n",
- qp->assoc_qid,
- qp->q_cnt_1, qp->q_cnt_2,
- qp->q_cnt_3,
- (unsigned long long)qp->q_cnt_4);
- len += snprintf(pbuffer+len,
- LPFC_QUE_INFO_GET_BUF_SIZE-len,
- "\tCQID [%02d], "
- "QE-CNT[%04d], QE-SIZE[%04d], "
- "HOST-IDX[%04d], PORT-IDX[%04d]",
- qp->queue_id, qp->entry_count,
- qp->entry_size, qp->host_index,
- qp->hba_index);
-
- /* Reset max counter */
- qp->CQ_max_cqe = 0;
-
- len += snprintf(pbuffer+len,
- LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
- if (len >= max_cnt)
- goto too_big;
- }
+ /* Slow-path ELS WQ */
+ qp = phba->sli4_hba.els_wq;
+ len = __lpfc_idiag_print_wq(qp, "ELS", pbuffer, len);
+ if (len >= max_cnt)
+ goto too_big;
- /* Slow-path ELS WQ */
- qp = phba->sli4_hba.els_wq;
- if (qp) {
- len += snprintf(pbuffer+len,
- LPFC_QUE_INFO_GET_BUF_SIZE-len,
- "\t\tELS WQ info: ");
- len += snprintf(pbuffer+len,
- LPFC_QUE_INFO_GET_BUF_SIZE-len,
- "AssocCQID[%02d]: "
- " WQ-STAT[oflow:x%x "
- "posted:x%llx]\n",
- qp->assoc_qid,
- qp->q_cnt_1,
- (unsigned long long)qp->q_cnt_4);
- len += snprintf(pbuffer+len,
- LPFC_QUE_INFO_GET_BUF_SIZE-len,
- "\t\tWQID[%02d], "
- "QE-CNT[%04d], QE-SIZE[%04d], "
- "HOST-IDX[%04d], PORT-IDX[%04d]",
- qp->queue_id, qp->entry_count,
- qp->entry_size, qp->host_index,
- qp->hba_index);
-
- len += snprintf(pbuffer+len,
- LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
- if (len >= max_cnt)
- goto too_big;
- }
+ /* Slow-path NVME LS response CQ */
+ qp = phba->sli4_hba.nvmels_cq;
+ len = __lpfc_idiag_print_cq(qp, "NVME LS",
+ pbuffer, len);
+ /* Reset max counter */
+ if (qp)
+ qp->CQ_max_cqe = 0;
+ if (len >= max_cnt)
+ goto too_big;
- if (phba->sli4_hba.hdr_rq && phba->sli4_hba.dat_rq) {
- /* Slow-path RQ header */
- qp = phba->sli4_hba.hdr_rq;
+ /* Slow-path NVME LS WQ */
+ qp = phba->sli4_hba.nvmels_wq;
+ len = __lpfc_idiag_print_wq(qp, "NVME LS",
+ pbuffer, len);
+ if (len >= max_cnt)
+ goto too_big;
- len += snprintf(pbuffer+len,
- LPFC_QUE_INFO_GET_BUF_SIZE-len,
- "\t\tRQ info: ");
- len += snprintf(pbuffer+len,
- LPFC_QUE_INFO_GET_BUF_SIZE-len,
- "AssocCQID[%02d]: "
- "RQ-STAT[nopost:x%x nobuf:x%x "
- "trunc:x%x rcv:x%llx]\n",
- qp->assoc_qid,
- qp->q_cnt_1, qp->q_cnt_2,
- qp->q_cnt_3,
- (unsigned long long)qp->q_cnt_4);
- len += snprintf(pbuffer+len,
- LPFC_QUE_INFO_GET_BUF_SIZE-len,
- "\t\tHQID[%02d], "
- "QE-CNT[%04d], QE-SIZE[%04d], "
- "HOST-IDX[%04d], PORT-IDX[%04d]\n",
- qp->queue_id,
- qp->entry_count,
- qp->entry_size,
- qp->host_index,
- qp->hba_index);
-
- /* Slow-path RQ data */
- qp = phba->sli4_hba.dat_rq;
- len += snprintf(pbuffer+len,
- LPFC_QUE_INFO_GET_BUF_SIZE-len,
- "\t\tDQID[%02d], "
- "QE-CNT[%04d], QE-SIZE[%04d], "
- "HOST-IDX[%04d], PORT-IDX[%04d]\n",
- qp->queue_id,
- qp->entry_count,
- qp->entry_size,
- qp->host_index,
- qp->hba_index);
-
- len += snprintf(pbuffer+len,
- LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
- }
- }
+ qp = phba->sli4_hba.hdr_rq;
+ len = __lpfc_idiag_print_rqpair(qp, phba->sli4_hba.dat_rq,
+ "RQpair", pbuffer, len);
+ if (len >= max_cnt)
+ goto too_big;
+
+ goto out;
}
+fof:
if (phba->cfg_fof) {
/* FOF EQ */
qp = phba->sli4_hba.fof_eq;
- if (!qp)
- goto out;
-
- len += snprintf(pbuffer+len,
- LPFC_QUE_INFO_GET_BUF_SIZE-len,
- "\nFOF EQ info: "
- "EQ-STAT[max:x%x noE:x%x "
- "bs:x%x proc:x%llx]\n",
- qp->q_cnt_1, qp->q_cnt_2,
- qp->q_cnt_3, (unsigned long long)qp->q_cnt_4);
-
- len += snprintf(pbuffer+len,
- LPFC_QUE_INFO_GET_BUF_SIZE-len,
- "EQID[%02d], "
- "QE-CNT[%04d], QE-SIZE[%04d], "
- "HOST-IDX[%04d], PORT-IDX[%04d]",
- qp->queue_id,
- qp->entry_count,
- qp->entry_size,
- qp->host_index,
- qp->hba_index);
+ len = __lpfc_idiag_print_eq(qp, "FOF", pbuffer, len);
/* Reset max counter */
- qp->EQ_max_eqe = 0;
+ if (qp)
+ qp->EQ_max_eqe = 0;
- len += snprintf(pbuffer+len,
- LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
if (len >= max_cnt)
goto too_big;
- }
-
- if (phba->cfg_fof) {
/* OAS CQ */
qp = phba->sli4_hba.oas_cq;
- if (qp) {
- len += snprintf(pbuffer+len,
- LPFC_QUE_INFO_GET_BUF_SIZE-len,
- "\tOAS CQ info: ");
- len += snprintf(pbuffer+len,
- LPFC_QUE_INFO_GET_BUF_SIZE-len,
- "AssocEQID[%02d]: "
- "CQ STAT[max:x%x relw:x%x "
- "xabt:x%x wq:x%llx]\n",
- qp->assoc_qid,
- qp->q_cnt_1, qp->q_cnt_2,
- qp->q_cnt_3, (unsigned long long)qp->q_cnt_4);
- len += snprintf(pbuffer+len,
- LPFC_QUE_INFO_GET_BUF_SIZE-len,
- "\tCQID[%02d], "
- "QE-CNT[%04d], QE-SIZE[%04d], "
- "HOST-IDX[%04d], PORT-IDX[%04d]",
- qp->queue_id, qp->entry_count,
- qp->entry_size, qp->host_index,
- qp->hba_index);
-
- /* Reset max counter */
+ len = __lpfc_idiag_print_cq(qp, "OAS", pbuffer, len);
+ /* Reset max counter */
+ if (qp)
qp->CQ_max_cqe = 0;
-
- len += snprintf(pbuffer+len,
- LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
- if (len >= max_cnt)
- goto too_big;
- }
+ if (len >= max_cnt)
+ goto too_big;
/* OAS WQ */
qp = phba->sli4_hba.oas_wq;
- if (qp) {
- len += snprintf(pbuffer+len,
- LPFC_QUE_INFO_GET_BUF_SIZE-len,
- "\t\tOAS WQ info: ");
- len += snprintf(pbuffer+len,
- LPFC_QUE_INFO_GET_BUF_SIZE-len,
- "AssocCQID[%02d]: "
- "WQ-STAT[oflow:x%x posted:x%llx]\n",
- qp->assoc_qid,
- qp->q_cnt_1, (unsigned long long)qp->q_cnt_4);
- len += snprintf(pbuffer+len,
- LPFC_QUE_INFO_GET_BUF_SIZE-len,
- "\t\tWQID[%02d], "
- "QE-CNT[%04d], QE-SIZE[%04d], "
- "HOST-IDX[%04d], PORT-IDX[%04d]",
- qp->queue_id,
- qp->entry_count,
- qp->entry_size,
- qp->host_index,
- qp->hba_index);
-
- len += snprintf(pbuffer+len,
- LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
- if (len >= max_cnt)
- goto too_big;
- }
+ len = __lpfc_idiag_print_wq(qp, "OAS", pbuffer, len);
+ if (len >= max_cnt)
+ goto too_big;
}
-out:
+
spin_unlock_irq(&phba->hbalock);
return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
too_big:
- len += snprintf(pbuffer+len,
- LPFC_QUE_INFO_GET_BUF_SIZE-len, "Truncated ...\n");
+ len += snprintf(pbuffer + len,
+ LPFC_QUE_INFO_GET_BUF_SIZE - len, "Truncated ...\n");
+out:
spin_unlock_irq(&phba->hbalock);
return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
}
@@ -2559,7 +3568,7 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
uint32_t qidx, quetp, queid, index, count, offset, value;
uint32_t *pentry;
- struct lpfc_queue *pque;
+ struct lpfc_queue *pque, *qp;
int rc;
/* This is a user write operation */
@@ -2595,19 +3604,15 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
case LPFC_IDIAG_EQ:
/* HBA event queue */
if (phba->sli4_hba.hba_eq) {
- for (qidx = 0; qidx < phba->cfg_fcp_io_channel;
- qidx++) {
- if (phba->sli4_hba.hba_eq[qidx] &&
- phba->sli4_hba.hba_eq[qidx]->queue_id ==
- queid) {
+ for (qidx = 0; qidx < phba->io_channel_irqs; qidx++) {
+ qp = phba->sli4_hba.hba_eq[qidx];
+ if (qp && qp->queue_id == queid) {
/* Sanity check */
- rc = lpfc_idiag_que_param_check(
- phba->sli4_hba.hba_eq[qidx],
+ rc = lpfc_idiag_que_param_check(qp,
index, count);
if (rc)
goto error_out;
- idiag.ptr_private =
- phba->sli4_hba.hba_eq[qidx];
+ idiag.ptr_private = qp;
goto pass_check;
}
}
@@ -2637,24 +3642,62 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
idiag.ptr_private = phba->sli4_hba.els_cq;
goto pass_check;
}
+ /* NVME LS complete queue */
+ if (phba->sli4_hba.nvmels_cq &&
+ phba->sli4_hba.nvmels_cq->queue_id == queid) {
+ /* Sanity check */
+ rc = lpfc_idiag_que_param_check(
+ phba->sli4_hba.nvmels_cq, index, count);
+ if (rc)
+ goto error_out;
+ idiag.ptr_private = phba->sli4_hba.nvmels_cq;
+ goto pass_check;
+ }
+ /* NVME LS complete queue */
+ if (phba->sli4_hba.nvmels_cq &&
+ phba->sli4_hba.nvmels_cq->queue_id == queid) {
+ /* Sanity check */
+ rc = lpfc_idiag_que_param_check(
+ phba->sli4_hba.nvmels_cq, index, count);
+ if (rc)
+ goto error_out;
+ idiag.ptr_private = phba->sli4_hba.nvmels_cq;
+ goto pass_check;
+ }
/* FCP complete queue */
if (phba->sli4_hba.fcp_cq) {
+ for (qidx = 0; qidx < phba->cfg_fcp_io_channel;
+ qidx++) {
+ qp = phba->sli4_hba.fcp_cq[qidx];
+ if (qp && qp->queue_id == queid) {
+ /* Sanity check */
+ rc = lpfc_idiag_que_param_check(
+ qp, index, count);
+ if (rc)
+ goto error_out;
+ idiag.ptr_private = qp;
+ goto pass_check;
+ }
+ }
+ }
+ /* NVME complete queue */
+ if (phba->sli4_hba.nvme_cq) {
qidx = 0;
do {
- if (phba->sli4_hba.fcp_cq[qidx] &&
- phba->sli4_hba.fcp_cq[qidx]->queue_id ==
+ if (phba->sli4_hba.nvme_cq[qidx] &&
+ phba->sli4_hba.nvme_cq[qidx]->queue_id ==
queid) {
/* Sanity check */
rc = lpfc_idiag_que_param_check(
- phba->sli4_hba.fcp_cq[qidx],
+ phba->sli4_hba.nvme_cq[qidx],
index, count);
if (rc)
goto error_out;
idiag.ptr_private =
- phba->sli4_hba.fcp_cq[qidx];
+ phba->sli4_hba.nvme_cq[qidx];
goto pass_check;
}
- } while (++qidx < phba->cfg_fcp_io_channel);
+ } while (++qidx < phba->cfg_nvme_io_channel);
}
goto error_out;
break;
@@ -2684,22 +3727,77 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
idiag.ptr_private = phba->sli4_hba.els_wq;
goto pass_check;
}
+ /* NVME LS work queue */
+ if (phba->sli4_hba.nvmels_wq &&
+ phba->sli4_hba.nvmels_wq->queue_id == queid) {
+ /* Sanity check */
+ rc = lpfc_idiag_que_param_check(
+ phba->sli4_hba.nvmels_wq, index, count);
+ if (rc)
+ goto error_out;
+ idiag.ptr_private = phba->sli4_hba.nvmels_wq;
+ goto pass_check;
+ }
+ /* NVME LS work queue */
+ if (phba->sli4_hba.nvmels_wq &&
+ phba->sli4_hba.nvmels_wq->queue_id == queid) {
+ /* Sanity check */
+ rc = lpfc_idiag_que_param_check(
+ phba->sli4_hba.nvmels_wq, index, count);
+ if (rc)
+ goto error_out;
+ idiag.ptr_private = phba->sli4_hba.nvmels_wq;
+ goto pass_check;
+ }
/* FCP work queue */
if (phba->sli4_hba.fcp_wq) {
for (qidx = 0; qidx < phba->cfg_fcp_io_channel;
+ qidx++) {
+ qp = phba->sli4_hba.fcp_wq[qidx];
+ if (qp && qp->queue_id == queid) {
+ /* Sanity check */
+ rc = lpfc_idiag_que_param_check(
+ qp, index, count);
+ if (rc)
+ goto error_out;
+ idiag.ptr_private = qp;
+ goto pass_check;
+ }
+ }
+ }
+ /* NVME work queue */
+ if (phba->sli4_hba.nvme_wq) {
+ for (qidx = 0; qidx < phba->cfg_nvme_io_channel;
+ qidx++) {
+ qp = phba->sli4_hba.nvme_wq[qidx];
+ if (qp && qp->queue_id == queid) {
+ /* Sanity check */
+ rc = lpfc_idiag_que_param_check(
+ qp, index, count);
+ if (rc)
+ goto error_out;
+ idiag.ptr_private = qp;
+ goto pass_check;
+ }
+ }
+ }
+
+ /* NVME work queues */
+ if (phba->sli4_hba.nvme_wq) {
+ for (qidx = 0; qidx < phba->cfg_nvme_io_channel;
qidx++) {
- if (!phba->sli4_hba.fcp_wq[qidx])
+ if (!phba->sli4_hba.nvme_wq[qidx])
continue;
- if (phba->sli4_hba.fcp_wq[qidx]->queue_id ==
+ if (phba->sli4_hba.nvme_wq[qidx]->queue_id ==
queid) {
/* Sanity check */
rc = lpfc_idiag_que_param_check(
- phba->sli4_hba.fcp_wq[qidx],
+ phba->sli4_hba.nvme_wq[qidx],
index, count);
if (rc)
goto error_out;
idiag.ptr_private =
- phba->sli4_hba.fcp_wq[qidx];
+ phba->sli4_hba.nvme_wq[qidx];
goto pass_check;
}
}
@@ -3687,6 +4785,46 @@ static const struct file_operations lpfc_debugfs_op_dumpHostSlim = {
.release = lpfc_debugfs_release,
};
+#undef lpfc_debugfs_op_nvmestat
+static const struct file_operations lpfc_debugfs_op_nvmestat = {
+ .owner = THIS_MODULE,
+ .open = lpfc_debugfs_nvmestat_open,
+ .llseek = lpfc_debugfs_lseek,
+ .read = lpfc_debugfs_read,
+ .write = lpfc_debugfs_nvmestat_write,
+ .release = lpfc_debugfs_release,
+};
+
+#undef lpfc_debugfs_op_nvmektime
+static const struct file_operations lpfc_debugfs_op_nvmektime = {
+ .owner = THIS_MODULE,
+ .open = lpfc_debugfs_nvmektime_open,
+ .llseek = lpfc_debugfs_lseek,
+ .read = lpfc_debugfs_read,
+ .write = lpfc_debugfs_nvmektime_write,
+ .release = lpfc_debugfs_release,
+};
+
+#undef lpfc_debugfs_op_nvmeio_trc
+static const struct file_operations lpfc_debugfs_op_nvmeio_trc = {
+ .owner = THIS_MODULE,
+ .open = lpfc_debugfs_nvmeio_trc_open,
+ .llseek = lpfc_debugfs_lseek,
+ .read = lpfc_debugfs_read,
+ .write = lpfc_debugfs_nvmeio_trc_write,
+ .release = lpfc_debugfs_release,
+};
+
+#undef lpfc_debugfs_op_cpucheck
+static const struct file_operations lpfc_debugfs_op_cpucheck = {
+ .owner = THIS_MODULE,
+ .open = lpfc_debugfs_cpucheck_open,
+ .llseek = lpfc_debugfs_lseek,
+ .read = lpfc_debugfs_read,
+ .write = lpfc_debugfs_cpucheck_write,
+ .release = lpfc_debugfs_release,
+};
+
#undef lpfc_debugfs_op_dumpData
static const struct file_operations lpfc_debugfs_op_dumpData = {
.owner = THIS_MODULE,
@@ -3853,7 +4991,7 @@ lpfc_idiag_mbxacc_dump_bsg_mbox(struct lpfc_hba *phba, enum nemb_type nemb_tp,
if ((mbox_tp == mbox_rd) && (dma_tp == dma_mbox)) {
if (*mbx_dump_map & LPFC_BSG_DMP_MBX_RD_MBX) {
do_dump |= LPFC_BSG_DMP_MBX_RD_MBX;
- printk(KERN_ERR "\nRead mbox command (x%x), "
+ pr_err("\nRead mbox command (x%x), "
"nemb:0x%x, extbuf_cnt:%d:\n",
sta_tp, nemb_tp, ext_buf);
}
@@ -3861,7 +4999,7 @@ lpfc_idiag_mbxacc_dump_bsg_mbox(struct lpfc_hba *phba, enum nemb_type nemb_tp,
if ((mbox_tp == mbox_rd) && (dma_tp == dma_ebuf)) {
if (*mbx_dump_map & LPFC_BSG_DMP_MBX_RD_BUF) {
do_dump |= LPFC_BSG_DMP_MBX_RD_BUF;
- printk(KERN_ERR "\nRead mbox buffer (x%x), "
+ pr_err("\nRead mbox buffer (x%x), "
"nemb:0x%x, extbuf_seq:%d:\n",
sta_tp, nemb_tp, ext_buf);
}
@@ -3869,7 +5007,7 @@ lpfc_idiag_mbxacc_dump_bsg_mbox(struct lpfc_hba *phba, enum nemb_type nemb_tp,
if ((mbox_tp == mbox_wr) && (dma_tp == dma_mbox)) {
if (*mbx_dump_map & LPFC_BSG_DMP_MBX_WR_MBX) {
do_dump |= LPFC_BSG_DMP_MBX_WR_MBX;
- printk(KERN_ERR "\nWrite mbox command (x%x), "
+ pr_err("\nWrite mbox command (x%x), "
"nemb:0x%x, extbuf_cnt:%d:\n",
sta_tp, nemb_tp, ext_buf);
}
@@ -3877,7 +5015,7 @@ lpfc_idiag_mbxacc_dump_bsg_mbox(struct lpfc_hba *phba, enum nemb_type nemb_tp,
if ((mbox_tp == mbox_wr) && (dma_tp == dma_ebuf)) {
if (*mbx_dump_map & LPFC_BSG_DMP_MBX_WR_BUF) {
do_dump |= LPFC_BSG_DMP_MBX_WR_BUF;
- printk(KERN_ERR "\nWrite mbox buffer (x%x), "
+ pr_err("\nWrite mbox buffer (x%x), "
"nemb:0x%x, extbuf_seq:%d:\n",
sta_tp, nemb_tp, ext_buf);
}
@@ -3889,7 +5027,7 @@ lpfc_idiag_mbxacc_dump_bsg_mbox(struct lpfc_hba *phba, enum nemb_type nemb_tp,
for (i = 0; i < *mbx_word_cnt; i++) {
if (!(i % 8)) {
if (i != 0)
- printk(KERN_ERR "%s\n", line_buf);
+ pr_err("%s\n", line_buf);
len = 0;
len += snprintf(line_buf+len,
LPFC_MBX_ACC_LBUF_SZ-len,
@@ -3900,7 +5038,7 @@ lpfc_idiag_mbxacc_dump_bsg_mbox(struct lpfc_hba *phba, enum nemb_type nemb_tp,
pword++;
}
if ((i - 1) % 8)
- printk(KERN_ERR "%s\n", line_buf);
+ pr_err("%s\n", line_buf);
(*mbx_dump_cnt)--;
}
@@ -3949,13 +5087,13 @@ lpfc_idiag_mbxacc_dump_issue_mbox(struct lpfc_hba *phba, MAILBOX_t *pmbox)
/* dump buffer content */
if (*mbx_dump_map & LPFC_MBX_DMP_MBX_WORD) {
- printk(KERN_ERR "Mailbox command:0x%x dump by word:\n",
+ pr_err("Mailbox command:0x%x dump by word:\n",
pmbox->mbxCommand);
pword = (uint32_t *)pmbox;
for (i = 0; i < *mbx_word_cnt; i++) {
if (!(i % 8)) {
if (i != 0)
- printk(KERN_ERR "%s\n", line_buf);
+ pr_err("%s\n", line_buf);
len = 0;
memset(line_buf, 0, LPFC_MBX_ACC_LBUF_SZ);
len += snprintf(line_buf+len,
@@ -3968,17 +5106,17 @@ lpfc_idiag_mbxacc_dump_issue_mbox(struct lpfc_hba *phba, MAILBOX_t *pmbox)
pword++;
}
if ((i - 1) % 8)
- printk(KERN_ERR "%s\n", line_buf);
- printk(KERN_ERR "\n");
+ pr_err("%s\n", line_buf);
+ pr_err("\n");
}
if (*mbx_dump_map & LPFC_MBX_DMP_MBX_BYTE) {
- printk(KERN_ERR "Mailbox command:0x%x dump by byte:\n",
+ pr_err("Mailbox command:0x%x dump by byte:\n",
pmbox->mbxCommand);
pbyte = (uint8_t *)pmbox;
for (i = 0; i < *mbx_word_cnt; i++) {
if (!(i % 8)) {
if (i != 0)
- printk(KERN_ERR "%s\n", line_buf);
+ pr_err("%s\n", line_buf);
len = 0;
memset(line_buf, 0, LPFC_MBX_ACC_LBUF_SZ);
len += snprintf(line_buf+len,
@@ -3996,8 +5134,8 @@ lpfc_idiag_mbxacc_dump_issue_mbox(struct lpfc_hba *phba, MAILBOX_t *pmbox)
LPFC_MBX_ACC_LBUF_SZ-len, " ");
}
if ((i - 1) % 8)
- printk(KERN_ERR "%s\n", line_buf);
- printk(KERN_ERR "\n");
+ pr_err("%s\n", line_buf);
+ pr_err("\n");
}
(*mbx_dump_cnt)--;
@@ -4240,8 +5378,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
i++;
}
lpfc_debugfs_max_slow_ring_trc = (1 << i);
- printk(KERN_ERR
- "lpfc_debugfs_max_disc_trc changed to "
+ pr_err("lpfc_debugfs_max_disc_trc changed to "
"%d\n", lpfc_debugfs_max_disc_trc);
}
}
@@ -4273,6 +5410,61 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
(sizeof(struct lpfc_debugfs_trc) *
lpfc_debugfs_max_slow_ring_trc));
}
+
+ snprintf(name, sizeof(name), "nvmeio_trc");
+ phba->debug_nvmeio_trc =
+ debugfs_create_file(name, 0644,
+ phba->hba_debugfs_root,
+ phba, &lpfc_debugfs_op_nvmeio_trc);
+ if (!phba->debug_nvmeio_trc) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "0574 No create debugfs nvmeio_trc\n");
+ goto debug_failed;
+ }
+
+ atomic_set(&phba->nvmeio_trc_cnt, 0);
+ if (lpfc_debugfs_max_nvmeio_trc) {
+ num = lpfc_debugfs_max_nvmeio_trc - 1;
+ if (num & lpfc_debugfs_max_disc_trc) {
+ /* Change to be a power of 2 */
+ num = lpfc_debugfs_max_nvmeio_trc;
+ i = 0;
+ while (num > 1) {
+ num = num >> 1;
+ i++;
+ }
+ lpfc_debugfs_max_nvmeio_trc = (1 << i);
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0575 lpfc_debugfs_max_nvmeio_trc "
+ "changed to %d\n",
+ lpfc_debugfs_max_nvmeio_trc);
+ }
+ phba->nvmeio_trc_size = lpfc_debugfs_max_nvmeio_trc;
+
+ /* Allocate trace buffer and initialize */
+ phba->nvmeio_trc = kmalloc(
+ (sizeof(struct lpfc_debugfs_nvmeio_trc) *
+ phba->nvmeio_trc_size), GFP_KERNEL);
+
+ if (!phba->nvmeio_trc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0576 Cannot create debugfs "
+ "nvmeio_trc buffer\n");
+ goto nvmeio_off;
+ }
+ memset(phba->nvmeio_trc, 0,
+ (sizeof(struct lpfc_debugfs_nvmeio_trc) *
+ phba->nvmeio_trc_size));
+ phba->nvmeio_trc_on = 1;
+ phba->nvmeio_trc_output_idx = 0;
+ phba->nvmeio_trc = NULL;
+ } else {
+nvmeio_off:
+ phba->nvmeio_trc_size = 0;
+ phba->nvmeio_trc_on = 0;
+ phba->nvmeio_trc_output_idx = 0;
+ phba->nvmeio_trc = NULL;
+ }
}
snprintf(name, sizeof(name), "vport%d", vport->vpi);
@@ -4298,8 +5490,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
i++;
}
lpfc_debugfs_max_disc_trc = (1 << i);
- printk(KERN_ERR
- "lpfc_debugfs_max_disc_trc changed to %d\n",
+ pr_err("lpfc_debugfs_max_disc_trc changed to %d\n",
lpfc_debugfs_max_disc_trc);
}
}
@@ -4338,6 +5529,39 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
goto debug_failed;
}
+ snprintf(name, sizeof(name), "nvmestat");
+ vport->debug_nvmestat =
+ debugfs_create_file(name, 0644,
+ vport->vport_debugfs_root,
+ vport, &lpfc_debugfs_op_nvmestat);
+ if (!vport->debug_nvmestat) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "0811 Cannot create debugfs nvmestat\n");
+ goto debug_failed;
+ }
+
+ snprintf(name, sizeof(name), "nvmektime");
+ vport->debug_nvmektime =
+ debugfs_create_file(name, 0644,
+ vport->vport_debugfs_root,
+ vport, &lpfc_debugfs_op_nvmektime);
+ if (!vport->debug_nvmektime) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "0815 Cannot create debugfs nvmektime\n");
+ goto debug_failed;
+ }
+
+ snprintf(name, sizeof(name), "cpucheck");
+ vport->debug_cpucheck =
+ debugfs_create_file(name, 0644,
+ vport->vport_debugfs_root,
+ vport, &lpfc_debugfs_op_cpucheck);
+ if (!vport->debug_cpucheck) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "0819 Cannot create debugfs cpucheck\n");
+ goto debug_failed;
+ }
+
/*
* The following section is for additional directories/files for the
* physical port.
@@ -4502,140 +5726,126 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport)
kfree(vport->disc_trc);
vport->disc_trc = NULL;
}
- if (vport->debug_disc_trc) {
- debugfs_remove(vport->debug_disc_trc); /* discovery_trace */
- vport->debug_disc_trc = NULL;
- }
- if (vport->debug_nodelist) {
- debugfs_remove(vport->debug_nodelist); /* nodelist */
- vport->debug_nodelist = NULL;
- }
+
+ debugfs_remove(vport->debug_disc_trc); /* discovery_trace */
+ vport->debug_disc_trc = NULL;
+
+ debugfs_remove(vport->debug_nodelist); /* nodelist */
+ vport->debug_nodelist = NULL;
+
+ debugfs_remove(vport->debug_nvmestat); /* nvmestat */
+ vport->debug_nvmestat = NULL;
+
+ debugfs_remove(vport->debug_nvmektime); /* nvmektime */
+ vport->debug_nvmektime = NULL;
+
+ debugfs_remove(vport->debug_cpucheck); /* cpucheck */
+ vport->debug_cpucheck = NULL;
+
if (vport->vport_debugfs_root) {
debugfs_remove(vport->vport_debugfs_root); /* vportX */
vport->vport_debugfs_root = NULL;
atomic_dec(&phba->debugfs_vport_count);
}
+
if (atomic_read(&phba->debugfs_vport_count) == 0) {
- if (phba->debug_hbqinfo) {
- debugfs_remove(phba->debug_hbqinfo); /* hbqinfo */
- phba->debug_hbqinfo = NULL;
- }
- if (phba->debug_dumpHBASlim) {
- debugfs_remove(phba->debug_dumpHBASlim); /* HBASlim */
- phba->debug_dumpHBASlim = NULL;
- }
- if (phba->debug_dumpHostSlim) {
- debugfs_remove(phba->debug_dumpHostSlim); /* HostSlim */
- phba->debug_dumpHostSlim = NULL;
- }
- if (phba->debug_dumpData) {
- debugfs_remove(phba->debug_dumpData); /* dumpData */
- phba->debug_dumpData = NULL;
- }
+ debugfs_remove(phba->debug_hbqinfo); /* hbqinfo */
+ phba->debug_hbqinfo = NULL;
- if (phba->debug_dumpDif) {
- debugfs_remove(phba->debug_dumpDif); /* dumpDif */
- phba->debug_dumpDif = NULL;
- }
- if (phba->debug_InjErrLBA) {
- debugfs_remove(phba->debug_InjErrLBA); /* InjErrLBA */
- phba->debug_InjErrLBA = NULL;
- }
- if (phba->debug_InjErrNPortID) { /* InjErrNPortID */
- debugfs_remove(phba->debug_InjErrNPortID);
- phba->debug_InjErrNPortID = NULL;
- }
- if (phba->debug_InjErrWWPN) {
- debugfs_remove(phba->debug_InjErrWWPN); /* InjErrWWPN */
- phba->debug_InjErrWWPN = NULL;
- }
- if (phba->debug_writeGuard) {
- debugfs_remove(phba->debug_writeGuard); /* writeGuard */
- phba->debug_writeGuard = NULL;
- }
- if (phba->debug_writeApp) {
- debugfs_remove(phba->debug_writeApp); /* writeApp */
- phba->debug_writeApp = NULL;
- }
- if (phba->debug_writeRef) {
- debugfs_remove(phba->debug_writeRef); /* writeRef */
- phba->debug_writeRef = NULL;
- }
- if (phba->debug_readGuard) {
- debugfs_remove(phba->debug_readGuard); /* readGuard */
- phba->debug_readGuard = NULL;
- }
- if (phba->debug_readApp) {
- debugfs_remove(phba->debug_readApp); /* readApp */
- phba->debug_readApp = NULL;
- }
- if (phba->debug_readRef) {
- debugfs_remove(phba->debug_readRef); /* readRef */
- phba->debug_readRef = NULL;
- }
+ debugfs_remove(phba->debug_dumpHBASlim); /* HBASlim */
+ phba->debug_dumpHBASlim = NULL;
+
+ debugfs_remove(phba->debug_dumpHostSlim); /* HostSlim */
+ phba->debug_dumpHostSlim = NULL;
+
+ debugfs_remove(phba->debug_dumpData); /* dumpData */
+ phba->debug_dumpData = NULL;
+
+ debugfs_remove(phba->debug_dumpDif); /* dumpDif */
+ phba->debug_dumpDif = NULL;
+
+ debugfs_remove(phba->debug_InjErrLBA); /* InjErrLBA */
+ phba->debug_InjErrLBA = NULL;
+
+ debugfs_remove(phba->debug_InjErrNPortID);
+ phba->debug_InjErrNPortID = NULL;
+
+ debugfs_remove(phba->debug_InjErrWWPN); /* InjErrWWPN */
+ phba->debug_InjErrWWPN = NULL;
+
+ debugfs_remove(phba->debug_writeGuard); /* writeGuard */
+ phba->debug_writeGuard = NULL;
+
+ debugfs_remove(phba->debug_writeApp); /* writeApp */
+ phba->debug_writeApp = NULL;
+
+ debugfs_remove(phba->debug_writeRef); /* writeRef */
+ phba->debug_writeRef = NULL;
+
+ debugfs_remove(phba->debug_readGuard); /* readGuard */
+ phba->debug_readGuard = NULL;
+
+ debugfs_remove(phba->debug_readApp); /* readApp */
+ phba->debug_readApp = NULL;
+
+ debugfs_remove(phba->debug_readRef); /* readRef */
+ phba->debug_readRef = NULL;
if (phba->slow_ring_trc) {
kfree(phba->slow_ring_trc);
phba->slow_ring_trc = NULL;
}
- if (phba->debug_slow_ring_trc) {
- /* slow_ring_trace */
- debugfs_remove(phba->debug_slow_ring_trc);
- phba->debug_slow_ring_trc = NULL;
- }
+
+ /* slow_ring_trace */
+ debugfs_remove(phba->debug_slow_ring_trc);
+ phba->debug_slow_ring_trc = NULL;
+
+ debugfs_remove(phba->debug_nvmeio_trc);
+ phba->debug_nvmeio_trc = NULL;
+
+ kfree(phba->nvmeio_trc);
+ phba->nvmeio_trc = NULL;
/*
* iDiag release
*/
if (phba->sli_rev == LPFC_SLI_REV4) {
- if (phba->idiag_ext_acc) {
- /* iDiag extAcc */
- debugfs_remove(phba->idiag_ext_acc);
- phba->idiag_ext_acc = NULL;
- }
- if (phba->idiag_mbx_acc) {
- /* iDiag mbxAcc */
- debugfs_remove(phba->idiag_mbx_acc);
- phba->idiag_mbx_acc = NULL;
- }
- if (phba->idiag_ctl_acc) {
- /* iDiag ctlAcc */
- debugfs_remove(phba->idiag_ctl_acc);
- phba->idiag_ctl_acc = NULL;
- }
- if (phba->idiag_drb_acc) {
- /* iDiag drbAcc */
- debugfs_remove(phba->idiag_drb_acc);
- phba->idiag_drb_acc = NULL;
- }
- if (phba->idiag_que_acc) {
- /* iDiag queAcc */
- debugfs_remove(phba->idiag_que_acc);
- phba->idiag_que_acc = NULL;
- }
- if (phba->idiag_que_info) {
- /* iDiag queInfo */
- debugfs_remove(phba->idiag_que_info);
- phba->idiag_que_info = NULL;
- }
- if (phba->idiag_bar_acc) {
- /* iDiag barAcc */
- debugfs_remove(phba->idiag_bar_acc);
- phba->idiag_bar_acc = NULL;
- }
- if (phba->idiag_pci_cfg) {
- /* iDiag pciCfg */
- debugfs_remove(phba->idiag_pci_cfg);
- phba->idiag_pci_cfg = NULL;
- }
+ /* iDiag extAcc */
+ debugfs_remove(phba->idiag_ext_acc);
+ phba->idiag_ext_acc = NULL;
+
+ /* iDiag mbxAcc */
+ debugfs_remove(phba->idiag_mbx_acc);
+ phba->idiag_mbx_acc = NULL;
+
+ /* iDiag ctlAcc */
+ debugfs_remove(phba->idiag_ctl_acc);
+ phba->idiag_ctl_acc = NULL;
+
+ /* iDiag drbAcc */
+ debugfs_remove(phba->idiag_drb_acc);
+ phba->idiag_drb_acc = NULL;
+
+ /* iDiag queAcc */
+ debugfs_remove(phba->idiag_que_acc);
+ phba->idiag_que_acc = NULL;
+
+ /* iDiag queInfo */
+ debugfs_remove(phba->idiag_que_info);
+ phba->idiag_que_info = NULL;
+
+ /* iDiag barAcc */
+ debugfs_remove(phba->idiag_bar_acc);
+ phba->idiag_bar_acc = NULL;
+
+ /* iDiag pciCfg */
+ debugfs_remove(phba->idiag_pci_cfg);
+ phba->idiag_pci_cfg = NULL;
/* Finally remove the iDiag debugfs root */
- if (phba->idiag_root) {
- /* iDiag root */
- debugfs_remove(phba->idiag_root);
- phba->idiag_root = NULL;
- }
+ debugfs_remove(phba->idiag_root);
+ phba->idiag_root = NULL;
}
if (phba->hba_debugfs_root) {
@@ -4644,10 +5854,8 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport)
atomic_dec(&lpfc_debugfs_hba_count);
}
- if (atomic_read(&lpfc_debugfs_hba_count) == 0) {
- debugfs_remove(lpfc_debugfs_root); /* lpfc */
- lpfc_debugfs_root = NULL;
- }
+ debugfs_remove(lpfc_debugfs_root); /* lpfc */
+ lpfc_debugfs_root = NULL;
}
#endif
return;
@@ -4668,31 +5876,39 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport)
void
lpfc_debug_dump_all_queues(struct lpfc_hba *phba)
{
- int fcp_wqidx;
+ int idx;
/*
* Dump Work Queues (WQs)
*/
- lpfc_debug_dump_mbx_wq(phba);
- lpfc_debug_dump_els_wq(phba);
+ lpfc_debug_dump_wq(phba, DUMP_MBX, 0);
+ lpfc_debug_dump_wq(phba, DUMP_ELS, 0);
+ lpfc_debug_dump_wq(phba, DUMP_NVMELS, 0);
- for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_io_channel; fcp_wqidx++)
- lpfc_debug_dump_fcp_wq(phba, fcp_wqidx);
+ for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++)
+ lpfc_debug_dump_wq(phba, DUMP_FCP, idx);
+
+ for (idx = 0; idx < phba->cfg_nvme_io_channel; idx++)
+ lpfc_debug_dump_wq(phba, DUMP_NVME, idx);
lpfc_debug_dump_hdr_rq(phba);
lpfc_debug_dump_dat_rq(phba);
/*
* Dump Complete Queues (CQs)
*/
- lpfc_debug_dump_mbx_cq(phba);
- lpfc_debug_dump_els_cq(phba);
+ lpfc_debug_dump_cq(phba, DUMP_MBX, 0);
+ lpfc_debug_dump_cq(phba, DUMP_ELS, 0);
+ lpfc_debug_dump_cq(phba, DUMP_NVMELS, 0);
+
+ for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++)
+ lpfc_debug_dump_cq(phba, DUMP_FCP, idx);
- for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_io_channel; fcp_wqidx++)
- lpfc_debug_dump_fcp_cq(phba, fcp_wqidx);
+ for (idx = 0; idx < phba->cfg_nvme_io_channel; idx++)
+ lpfc_debug_dump_cq(phba, DUMP_NVME, idx);
/*
* Dump Event Queues (EQs)
*/
- for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_io_channel; fcp_wqidx++)
- lpfc_debug_dump_hba_eq(phba, fcp_wqidx);
+ for (idx = 0; idx < phba->io_channel_irqs; idx++)
+ lpfc_debug_dump_hba_eq(phba, idx);
}
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h
index 8b2b6a3bfc25..c05f56c3023f 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.h
+++ b/drivers/scsi/lpfc/lpfc_debugfs.h
@@ -1,9 +1,11 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
+ * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
+ * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
* Copyright (C) 2007-2011 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
- * www.emulex.com *
+ * www.broadcom.com *
* *
* This program is free software; you can redistribute it and/or *
* modify it under the terms of version 2 of the GNU General *
@@ -42,6 +44,22 @@
/* hbqinfo output buffer size */
#define LPFC_HBQINFO_SIZE 8192
+enum {
+ DUMP_FCP,
+ DUMP_NVME,
+ DUMP_MBX,
+ DUMP_ELS,
+ DUMP_NVMELS,
+};
+
+/* nvmestat output buffer size */
+#define LPFC_NVMESTAT_SIZE 8192
+#define LPFC_NVMEKTIME_SIZE 8192
+#define LPFC_CPUCHECK_SIZE 8192
+#define LPFC_NVMEIO_TRC_SIZE 8192
+
+#define LPFC_DEBUG_OUT_LINE_SZ 80
+
/*
* For SLI4 iDiag debugfs diagnostics tool
*/
@@ -188,6 +206,12 @@
#define SIZE_U16 sizeof(uint16_t)
#define SIZE_U32 sizeof(uint32_t)
+#define lpfc_nvmeio_data(phba, fmt, arg...) \
+ { \
+ if (phba->nvmeio_trc_on) \
+ lpfc_debugfs_nvme_trc(phba, fmt, ##arg); \
+ }
+
struct lpfc_debug {
char *i_private;
char op;
@@ -206,6 +230,13 @@ struct lpfc_debugfs_trc {
unsigned long jif;
};
+struct lpfc_debugfs_nvmeio_trc {
+ char *fmt;
+ uint16_t data1;
+ uint16_t data2;
+ uint32_t data3;
+};
+
struct lpfc_idiag_offset {
uint32_t last_rd;
};
@@ -358,58 +389,111 @@ lpfc_debug_dump_q(struct lpfc_queue *q)
}
/**
- * lpfc_debug_dump_fcp_wq - dump all entries from a fcp work queue
+ * lpfc_debug_dump_wq - dump all entries from the fcp or nvme work queue
* @phba: Pointer to HBA context object.
- * @fcp_wqidx: Index to a FCP work queue.
+ * @wqidx: Index to a FCP or NVME work queue.
*
- * This function dumps all entries from a FCP work queue specified by the
- * @fcp_wqidx.
+ * This function dumps all entries from a FCP or NVME work queue specified
+ * by the wqidx.
**/
static inline void
-lpfc_debug_dump_fcp_wq(struct lpfc_hba *phba, int fcp_wqidx)
+lpfc_debug_dump_wq(struct lpfc_hba *phba, int qtype, int wqidx)
{
- /* sanity check */
- if (fcp_wqidx >= phba->cfg_fcp_io_channel)
+ struct lpfc_queue *wq;
+ char *qtypestr;
+
+ if (qtype == DUMP_FCP) {
+ wq = phba->sli4_hba.fcp_wq[wqidx];
+ qtypestr = "FCP";
+ } else if (qtype == DUMP_NVME) {
+ wq = phba->sli4_hba.nvme_wq[wqidx];
+ qtypestr = "NVME";
+ } else if (qtype == DUMP_MBX) {
+ wq = phba->sli4_hba.mbx_wq;
+ qtypestr = "MBX";
+ } else if (qtype == DUMP_ELS) {
+ wq = phba->sli4_hba.els_wq;
+ qtypestr = "ELS";
+ } else if (qtype == DUMP_NVMELS) {
+ wq = phba->sli4_hba.nvmels_wq;
+ qtypestr = "NVMELS";
+ } else
return;
- printk(KERN_ERR "FCP WQ: WQ[Idx:%d|Qid:%d]\n",
- fcp_wqidx, phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id);
- lpfc_debug_dump_q(phba->sli4_hba.fcp_wq[fcp_wqidx]);
+ if (qtype == DUMP_FCP || qtype == DUMP_NVME)
+ pr_err("%s WQ: WQ[Idx:%d|Qid:%d]\n",
+ qtypestr, wqidx, wq->queue_id);
+ else
+ pr_err("%s WQ: WQ[Qid:%d]\n",
+ qtypestr, wq->queue_id);
+
+ lpfc_debug_dump_q(wq);
}
/**
- * lpfc_debug_dump_fcp_cq - dump all entries from a fcp work queue's cmpl queue
+ * lpfc_debug_dump_cq - dump all entries from a fcp or nvme work queue's
+ * cmpl queue
* @phba: Pointer to HBA context object.
- * @fcp_wqidx: Index to a FCP work queue.
+ * @wqidx: Index to a FCP work queue.
*
- * This function dumps all entries from a FCP complete queue which is
- * associated to the FCP work queue specified by the @fcp_wqidx.
+ * This function dumps all entries from a FCP or NVME completion queue
+ * which is associated to the work queue specified by the @wqidx.
**/
static inline void
-lpfc_debug_dump_fcp_cq(struct lpfc_hba *phba, int fcp_wqidx)
+lpfc_debug_dump_cq(struct lpfc_hba *phba, int qtype, int wqidx)
{
- int fcp_cqidx, fcp_cqid;
-
- /* sanity check */
- if (fcp_wqidx >= phba->cfg_fcp_io_channel)
+ struct lpfc_queue *wq, *cq, *eq;
+ char *qtypestr;
+ int eqidx;
+
+ /* fcp/nvme wq and cq are 1:1, thus same indexes */
+
+ if (qtype == DUMP_FCP) {
+ wq = phba->sli4_hba.fcp_wq[wqidx];
+ cq = phba->sli4_hba.fcp_cq[wqidx];
+ qtypestr = "FCP";
+ } else if (qtype == DUMP_NVME) {
+ wq = phba->sli4_hba.nvme_wq[wqidx];
+ cq = phba->sli4_hba.nvme_cq[wqidx];
+ qtypestr = "NVME";
+ } else if (qtype == DUMP_MBX) {
+ wq = phba->sli4_hba.mbx_wq;
+ cq = phba->sli4_hba.mbx_cq;
+ qtypestr = "MBX";
+ } else if (qtype == DUMP_ELS) {
+ wq = phba->sli4_hba.els_wq;
+ cq = phba->sli4_hba.els_cq;
+ qtypestr = "ELS";
+ } else if (qtype == DUMP_NVMELS) {
+ wq = phba->sli4_hba.nvmels_wq;
+ cq = phba->sli4_hba.nvmels_cq;
+ qtypestr = "NVMELS";
+ } else
return;
- fcp_cqid = phba->sli4_hba.fcp_wq[fcp_wqidx]->assoc_qid;
- for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_io_channel; fcp_cqidx++)
- if (phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id == fcp_cqid)
+ for (eqidx = 0; eqidx < phba->io_channel_irqs; eqidx++) {
+ eq = phba->sli4_hba.hba_eq[eqidx];
+ if (cq->assoc_qid == eq->queue_id)
break;
- if (phba->intr_type == MSIX) {
- if (fcp_cqidx >= phba->cfg_fcp_io_channel)
- return;
- } else {
- if (fcp_cqidx > 0)
- return;
+ }
+ if (eqidx == phba->io_channel_irqs) {
+ pr_err("Couldn't find EQ for CQ. Using EQ[0]\n");
+ eqidx = 0;
+ eq = phba->sli4_hba.hba_eq[0];
}
- printk(KERN_ERR "FCP CQ: WQ[Idx:%d|Qid%d]->CQ[Idx%d|Qid%d]:\n",
- fcp_wqidx, phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id,
- fcp_cqidx, fcp_cqid);
- lpfc_debug_dump_q(phba->sli4_hba.fcp_cq[fcp_cqidx]);
+ if (qtype == DUMP_FCP || qtype == DUMP_NVME)
+ pr_err("%s CQ: WQ[Idx:%d|Qid%d]->CQ[Idx%d|Qid%d]"
+ "->EQ[Idx:%d|Qid:%d]:\n",
+ qtypestr, wqidx, wq->queue_id, wqidx, cq->queue_id,
+ eqidx, eq->queue_id);
+ else
+ pr_err("%s CQ: WQ[Qid:%d]->CQ[Qid:%d]"
+ "->EQ[Idx:%d|Qid:%d]:\n",
+ qtypestr, wq->queue_id, cq->queue_id,
+ eqidx, eq->queue_id);
+
+ lpfc_debug_dump_q(cq);
}
/**
@@ -421,64 +505,15 @@ lpfc_debug_dump_fcp_cq(struct lpfc_hba *phba, int fcp_wqidx)
* associated to the FCP work queue specified by the @fcp_wqidx.
**/
static inline void
-lpfc_debug_dump_hba_eq(struct lpfc_hba *phba, int fcp_wqidx)
+lpfc_debug_dump_hba_eq(struct lpfc_hba *phba, int qidx)
{
- struct lpfc_queue *qdesc;
- int fcp_eqidx, fcp_eqid;
- int fcp_cqidx, fcp_cqid;
+ struct lpfc_queue *qp;
- /* sanity check */
- if (fcp_wqidx >= phba->cfg_fcp_io_channel)
- return;
- fcp_cqid = phba->sli4_hba.fcp_wq[fcp_wqidx]->assoc_qid;
- for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_io_channel; fcp_cqidx++)
- if (phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id == fcp_cqid)
- break;
- if (phba->intr_type == MSIX) {
- if (fcp_cqidx >= phba->cfg_fcp_io_channel)
- return;
- } else {
- if (fcp_cqidx > 0)
- return;
- }
+ qp = phba->sli4_hba.hba_eq[qidx];
- fcp_eqidx = fcp_cqidx;
- fcp_eqid = phba->sli4_hba.hba_eq[fcp_eqidx]->queue_id;
- qdesc = phba->sli4_hba.hba_eq[fcp_eqidx];
+ pr_err("EQ[Idx:%d|Qid:%d]\n", qidx, qp->queue_id);
- printk(KERN_ERR "FCP EQ: WQ[Idx:%d|Qid:%d]->CQ[Idx:%d|Qid:%d]->"
- "EQ[Idx:%d|Qid:%d]\n",
- fcp_wqidx, phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id,
- fcp_cqidx, fcp_cqid, fcp_eqidx, fcp_eqid);
- lpfc_debug_dump_q(qdesc);
-}
-
-/**
- * lpfc_debug_dump_els_wq - dump all entries from the els work queue
- * @phba: Pointer to HBA context object.
- *
- * This function dumps all entries from the ELS work queue.
- **/
-static inline void
-lpfc_debug_dump_els_wq(struct lpfc_hba *phba)
-{
- printk(KERN_ERR "ELS WQ: WQ[Qid:%d]:\n",
- phba->sli4_hba.els_wq->queue_id);
- lpfc_debug_dump_q(phba->sli4_hba.els_wq);
-}
-
-/**
- * lpfc_debug_dump_mbx_wq - dump all entries from the mbox work queue
- * @phba: Pointer to HBA context object.
- *
- * This function dumps all entries from the MBOX work queue.
- **/
-static inline void
-lpfc_debug_dump_mbx_wq(struct lpfc_hba *phba)
-{
- printk(KERN_ERR "MBX WQ: WQ[Qid:%d]\n",
- phba->sli4_hba.mbx_wq->queue_id);
- lpfc_debug_dump_q(phba->sli4_hba.mbx_wq);
+ lpfc_debug_dump_q(qp);
}
/**
@@ -510,36 +545,6 @@ lpfc_debug_dump_hdr_rq(struct lpfc_hba *phba)
}
/**
- * lpfc_debug_dump_els_cq - dump all entries from the els complete queue
- * @phba: Pointer to HBA context object.
- *
- * This function dumps all entries from the els complete queue.
- **/
-static inline void
-lpfc_debug_dump_els_cq(struct lpfc_hba *phba)
-{
- printk(KERN_ERR "ELS CQ: WQ[Qid:%d]->CQ[Qid:%d]\n",
- phba->sli4_hba.els_wq->queue_id,
- phba->sli4_hba.els_cq->queue_id);
- lpfc_debug_dump_q(phba->sli4_hba.els_cq);
-}
-
-/**
- * lpfc_debug_dump_mbx_cq - dump all entries from the mbox complete queue
- * @phba: Pointer to HBA context object.
- *
- * This function dumps all entries from the mbox complete queue.
- **/
-static inline void
-lpfc_debug_dump_mbx_cq(struct lpfc_hba *phba)
-{
- printk(KERN_ERR "MBX CQ: WQ[Qid:%d]->CQ[Qid:%d]\n",
- phba->sli4_hba.mbx_wq->queue_id,
- phba->sli4_hba.mbx_cq->queue_id);
- lpfc_debug_dump_q(phba->sli4_hba.mbx_cq);
-}
-
-/**
* lpfc_debug_dump_wq_by_id - dump all entries from a work queue by queue id
* @phba: Pointer to HBA context object.
* @qid: Work queue identifier.
@@ -556,14 +561,29 @@ lpfc_debug_dump_wq_by_id(struct lpfc_hba *phba, int qid)
if (phba->sli4_hba.fcp_wq[wq_idx]->queue_id == qid)
break;
if (wq_idx < phba->cfg_fcp_io_channel) {
- printk(KERN_ERR "FCP WQ[Idx:%d|Qid:%d]\n", wq_idx, qid);
+ pr_err("FCP WQ[Idx:%d|Qid:%d]\n", wq_idx, qid);
lpfc_debug_dump_q(phba->sli4_hba.fcp_wq[wq_idx]);
return;
}
+ for (wq_idx = 0; wq_idx < phba->cfg_nvme_io_channel; wq_idx++)
+ if (phba->sli4_hba.nvme_wq[wq_idx]->queue_id == qid)
+ break;
+ if (wq_idx < phba->cfg_nvme_io_channel) {
+ pr_err("NVME WQ[Idx:%d|Qid:%d]\n", wq_idx, qid);
+ lpfc_debug_dump_q(phba->sli4_hba.nvme_wq[wq_idx]);
+ return;
+ }
+
if (phba->sli4_hba.els_wq->queue_id == qid) {
- printk(KERN_ERR "ELS WQ[Qid:%d]\n", qid);
+ pr_err("ELS WQ[Qid:%d]\n", qid);
lpfc_debug_dump_q(phba->sli4_hba.els_wq);
+ return;
+ }
+
+ if (phba->sli4_hba.nvmels_wq->queue_id == qid) {
+ pr_err("NVME LS WQ[Qid:%d]\n", qid);
+ lpfc_debug_dump_q(phba->sli4_hba.nvmels_wq);
}
}
@@ -617,27 +637,42 @@ lpfc_debug_dump_rq_by_id(struct lpfc_hba *phba, int qid)
static inline void
lpfc_debug_dump_cq_by_id(struct lpfc_hba *phba, int qid)
{
- int cq_idx = 0;
+ int cq_idx;
- do {
+ for (cq_idx = 0; cq_idx < phba->cfg_fcp_io_channel; cq_idx++)
if (phba->sli4_hba.fcp_cq[cq_idx]->queue_id == qid)
break;
- } while (++cq_idx < phba->cfg_fcp_io_channel);
if (cq_idx < phba->cfg_fcp_io_channel) {
- printk(KERN_ERR "FCP CQ[Idx:%d|Qid:%d]\n", cq_idx, qid);
+ pr_err("FCP CQ[Idx:%d|Qid:%d]\n", cq_idx, qid);
lpfc_debug_dump_q(phba->sli4_hba.fcp_cq[cq_idx]);
return;
}
+ for (cq_idx = 0; cq_idx < phba->cfg_nvme_io_channel; cq_idx++)
+ if (phba->sli4_hba.nvme_cq[cq_idx]->queue_id == qid)
+ break;
+
+ if (cq_idx < phba->cfg_nvme_io_channel) {
+ pr_err("NVME CQ[Idx:%d|Qid:%d]\n", cq_idx, qid);
+ lpfc_debug_dump_q(phba->sli4_hba.nvme_cq[cq_idx]);
+ return;
+ }
+
if (phba->sli4_hba.els_cq->queue_id == qid) {
- printk(KERN_ERR "ELS CQ[Qid:%d]\n", qid);
+ pr_err("ELS CQ[Qid:%d]\n", qid);
lpfc_debug_dump_q(phba->sli4_hba.els_cq);
return;
}
+ if (phba->sli4_hba.nvmels_cq->queue_id == qid) {
+ pr_err("NVME LS CQ[Qid:%d]\n", qid);
+ lpfc_debug_dump_q(phba->sli4_hba.nvmels_cq);
+ return;
+ }
+
if (phba->sli4_hba.mbx_cq->queue_id == qid) {
- printk(KERN_ERR "MBX CQ[Qid:%d]\n", qid);
+ pr_err("MBX CQ[Qid:%d]\n", qid);
lpfc_debug_dump_q(phba->sli4_hba.mbx_cq);
}
}
@@ -655,17 +690,15 @@ lpfc_debug_dump_eq_by_id(struct lpfc_hba *phba, int qid)
{
int eq_idx;
- for (eq_idx = 0; eq_idx < phba->cfg_fcp_io_channel; eq_idx++) {
+ for (eq_idx = 0; eq_idx < phba->io_channel_irqs; eq_idx++)
if (phba->sli4_hba.hba_eq[eq_idx]->queue_id == qid)
break;
- }
- if (eq_idx < phba->cfg_fcp_io_channel) {
+ if (eq_idx < phba->io_channel_irqs) {
printk(KERN_ERR "FCP EQ[Idx:%d|Qid:%d]\n", eq_idx, qid);
lpfc_debug_dump_q(phba->sli4_hba.hba_eq[eq_idx]);
return;
}
-
}
void lpfc_debug_dump_all_queues(struct lpfc_hba *);
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index 361f5b3d9d93..f4ff99d95db3 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -1,9 +1,11 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
+ * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
+ * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
* Copyright (C) 2004-2013 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
- * www.emulex.com *
+ * www.broadcom.com *
* *
* This program is free software; you can redistribute it and/or *
* modify it under the terms of version 2 of the GNU General *
@@ -86,6 +88,17 @@ struct lpfc_nodelist {
#define NLP_FABRIC 0x4 /* entry rep a Fabric entity */
#define NLP_FCP_TARGET 0x8 /* entry is an FCP target */
#define NLP_FCP_INITIATOR 0x10 /* entry is an FCP Initiator */
+#define NLP_NVME_TARGET 0x20 /* entry is a NVME Target */
+#define NLP_NVME_INITIATOR 0x40 /* entry is a NVME Initiator */
+
+ uint16_t nlp_fc4_type; /* FC types node supports. */
+ /* Assigned from GID_FF, only
+ * FCP (0x8) and NVME (0x28)
+ * supported.
+ */
+#define NLP_FC4_NONE 0x0
+#define NLP_FC4_FCP 0x1 /* FC4 Type FCP (value x8)) */
+#define NLP_FC4_NVME 0x2 /* FC4 TYPE NVME (value x28) */
uint16_t nlp_rpi;
uint16_t nlp_state; /* state transition indicator */
@@ -107,8 +120,8 @@ struct lpfc_nodelist {
struct timer_list nlp_delayfunc; /* Used for delayed ELS cmds */
struct lpfc_hba *phba;
- struct fc_rport *rport; /* Corresponding FC transport
- port structure */
+ struct fc_rport *rport; /* scsi_transport_fc port structure */
+ struct lpfc_nvme_rport *nrport; /* nvme transport rport struct. */
struct lpfc_vport *vport;
struct lpfc_work_evt els_retry_evt;
struct lpfc_work_evt dev_loss_evt;
@@ -118,6 +131,10 @@ struct lpfc_nodelist {
unsigned long last_change_time;
unsigned long *active_rrqs_xri_bitmap;
struct lpfc_scsicmd_bkt *lat_data; /* Latency data */
+ uint32_t fc4_prli_sent;
+ uint32_t upcall_flags;
+ uint32_t nvme_fb_size; /* NVME target's supported byte cnt */
+#define NVME_FB_BIT_SHIFT 9 /* PRLI Rsp first burst in 512B units. */
};
struct lpfc_node_rrq {
struct list_head list;
@@ -133,6 +150,7 @@ struct lpfc_node_rrq {
/* Defines for nlp_flag (uint32) */
#define NLP_IGNR_REG_CMPL 0x00000001 /* Rcvd rscn before we cmpl reg login */
#define NLP_REG_LOGIN_SEND 0x00000002 /* sent reglogin to adapter */
+#define NLP_SUPPRESS_RSP 0x00000010 /* Remote NPort supports suppress rsp */
#define NLP_PLOGI_SND 0x00000020 /* sent PLOGI request for this entry */
#define NLP_PRLI_SND 0x00000040 /* sent PRLI request for this entry */
#define NLP_ADISC_SND 0x00000080 /* sent ADISC request for this entry */
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 3a1f1a2a2b55..2d26440e6f2f 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1,9 +1,11 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
+ * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
+ * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
- * www.emulex.com *
+ * www.broadcom.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
* *
* This program is free software; you can redistribute it and/or *
@@ -29,7 +31,6 @@
#include <scsi/scsi_host.h>
#include <scsi/scsi_transport_fc.h>
-
#include "lpfc_hw4.h"
#include "lpfc_hw.h"
#include "lpfc_sli.h"
@@ -1323,7 +1324,7 @@ lpfc_els_abort_flogi(struct lpfc_hba *phba)
"0201 Abort outstanding I/O on NPort x%x\n",
Fabric_DID);
- pring = &phba->sli.ring[LPFC_ELS_RING];
+ pring = lpfc_phba_elsring(phba);
/*
* Check the txcmplq for an iocb that matches the nport the driver is
@@ -1513,7 +1514,7 @@ static struct lpfc_nodelist *
lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
struct lpfc_nodelist *ndlp)
{
- struct lpfc_vport *vport = ndlp->vport;
+ struct lpfc_vport *vport = ndlp->vport;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_nodelist *new_ndlp;
struct lpfc_rport_data *rdata;
@@ -1868,10 +1869,12 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* PLOGI completes to NPort <nlp_DID> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
- "0102 PLOGI completes to NPort x%x "
+ "0102 PLOGI completes to NPort x%06x "
"Data: x%x x%x x%x x%x x%x\n",
- ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
- irsp->ulpTimeout, disc, vport->num_disc_nodes);
+ ndlp->nlp_DID, ndlp->nlp_fc4_type,
+ irsp->ulpStatus, irsp->un.ulpWord[4],
+ disc, vport->num_disc_nodes);
+
/* Check to see if link went down during discovery */
if (lpfc_els_chk_latt(vport)) {
spin_lock_irq(shost->host_lock);
@@ -2000,12 +2003,21 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
sp->cmn.fcphHigh = FC_PH3;
sp->cmn.valid_vendor_ver_level = 0;
- memset(sp->vendorVersion, 0, sizeof(sp->vendorVersion));
+ memset(sp->un.vendorVersion, 0, sizeof(sp->un.vendorVersion));
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"Issue PLOGI: did:x%x",
did, 0, 0);
+ /* If our firmware supports this feature, convey that
+ * information to the target using the vendor specific field.
+ */
+ if (phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) {
+ sp->cmn.valid_vendor_ver_level = 1;
+ sp->un.vv.vid = cpu_to_be32(LPFC_VV_EMLX_ID);
+ sp->un.vv.flags = cpu_to_be32(LPFC_VV_SUPPRESS_RSP);
+ }
+
phba->fc_stat.elsXmitPLOGI++;
elsiocb->iocb_cmpl = lpfc_cmpl_els_plogi;
ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
@@ -2052,14 +2064,17 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
"PRLI cmpl: status:x%x/x%x did:x%x",
irsp->ulpStatus, irsp->un.ulpWord[4],
ndlp->nlp_DID);
+
+ /* Ddriver supports multiple FC4 types. Counters matter. */
+ vport->fc_prli_sent--;
+
/* PRLI completes to NPort <nlp_DID> */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
- "0103 PRLI completes to NPort x%x "
+ "0103 PRLI completes to NPort x%06x "
"Data: x%x x%x x%x x%x\n",
ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
- irsp->ulpTimeout, vport->num_disc_nodes);
+ vport->num_disc_nodes, ndlp->fc4_prli_sent);
- vport->fc_prli_sent--;
/* Check to see if link went down during discovery */
if (lpfc_els_chk_latt(vport))
goto out;
@@ -2068,6 +2083,7 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* Check for retry */
if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
/* ELS command is being retried */
+ ndlp->fc4_prli_sent--;
goto out;
}
/* PRLI failed */
@@ -2082,9 +2098,14 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_disc_state_machine(vport, ndlp, cmdiocb,
NLP_EVT_CMPL_PRLI);
} else
- /* Good status, call state machine */
+ /* Good status, call state machine. However, if another
+ * PRLI is outstanding, don't call the state machine
+ * because final disposition to Mapped or Unmapped is
+ * completed there.
+ */
lpfc_disc_state_machine(vport, ndlp, cmdiocb,
NLP_EVT_CMPL_PRLI);
+
out:
lpfc_els_free_iocb(phba, cmdiocb);
return;
@@ -2118,42 +2139,100 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
PRLI *npr;
+ struct lpfc_nvme_prli *npr_nvme;
struct lpfc_iocbq *elsiocb;
uint8_t *pcmd;
uint16_t cmdsize;
-
- cmdsize = (sizeof(uint32_t) + sizeof(PRLI));
+ u32 local_nlp_type, elscmd;
+
+ local_nlp_type = ndlp->nlp_fc4_type;
+
+ send_next_prli:
+ if (local_nlp_type & NLP_FC4_FCP) {
+ /* Payload is 4 + 16 = 20 x14 bytes. */
+ cmdsize = (sizeof(uint32_t) + sizeof(PRLI));
+ elscmd = ELS_CMD_PRLI;
+ } else if (local_nlp_type & NLP_FC4_NVME) {
+ /* Payload is 4 + 20 = 24 x18 bytes. */
+ cmdsize = (sizeof(uint32_t) + sizeof(struct lpfc_nvme_prli));
+ elscmd = ELS_CMD_NVMEPRLI;
+ } else {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "3083 Unknown FC_TYPE x%x ndlp x%06x\n",
+ ndlp->nlp_fc4_type, ndlp->nlp_DID);
+ return 1;
+ }
elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
- ndlp->nlp_DID, ELS_CMD_PRLI);
+ ndlp->nlp_DID, elscmd);
if (!elsiocb)
return 1;
pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
/* For PRLI request, remainder of payload is service parameters */
- memset(pcmd, 0, (sizeof(PRLI) + sizeof(uint32_t)));
- *((uint32_t *) (pcmd)) = ELS_CMD_PRLI;
- pcmd += sizeof(uint32_t);
+ memset(pcmd, 0, cmdsize);
- /* For PRLI, remainder of payload is PRLI parameter page */
- npr = (PRLI *) pcmd;
- /*
- * If our firmware version is 3.20 or later,
- * set the following bits for FC-TAPE support.
- */
- if (phba->vpd.rev.feaLevelHigh >= 0x02) {
- npr->ConfmComplAllowed = 1;
- npr->Retry = 1;
- npr->TaskRetryIdReq = 1;
- }
- npr->estabImagePair = 1;
- npr->readXferRdyDis = 1;
- if (vport->cfg_first_burst_size)
- npr->writeXferRdyDis = 1;
+ if (local_nlp_type & NLP_FC4_FCP) {
+ /* Remainder of payload is FCP PRLI parameter page.
+ * Note: this data structure is defined as
+ * BE/LE in the structure definition so no
+ * byte swap call is made.
+ */
+ *((uint32_t *)(pcmd)) = ELS_CMD_PRLI;
+ pcmd += sizeof(uint32_t);
+ npr = (PRLI *)pcmd;
- /* For FCP support */
- npr->prliType = PRLI_FCP_TYPE;
- npr->initiatorFunc = 1;
+ /*
+ * If our firmware version is 3.20 or later,
+ * set the following bits for FC-TAPE support.
+ */
+ if (phba->vpd.rev.feaLevelHigh >= 0x02) {
+ npr->ConfmComplAllowed = 1;
+ npr->Retry = 1;
+ npr->TaskRetryIdReq = 1;
+ }
+ npr->estabImagePair = 1;
+ npr->readXferRdyDis = 1;
+ if (vport->cfg_first_burst_size)
+ npr->writeXferRdyDis = 1;
+
+ /* For FCP support */
+ npr->prliType = PRLI_FCP_TYPE;
+ npr->initiatorFunc = 1;
+ elsiocb->iocb_flag |= LPFC_PRLI_FCP_REQ;
+
+ /* Remove FCP type - processed. */
+ local_nlp_type &= ~NLP_FC4_FCP;
+ } else if (local_nlp_type & NLP_FC4_NVME) {
+ /* Remainder of payload is NVME PRLI parameter page.
+ * This data structure is the newer definition that
+ * uses bf macros so a byte swap is required.
+ */
+ *((uint32_t *)(pcmd)) = ELS_CMD_NVMEPRLI;
+ pcmd += sizeof(uint32_t);
+ npr_nvme = (struct lpfc_nvme_prli *)pcmd;
+ bf_set(prli_type_code, npr_nvme, PRLI_NVME_TYPE);
+ bf_set(prli_estabImagePair, npr_nvme, 0); /* Should be 0 */
+
+ /* Only initiators request first burst. */
+ if ((phba->cfg_nvme_enable_fb) &&
+ !phba->nvmet_support)
+ bf_set(prli_fba, npr_nvme, 1);
+
+ if (phba->nvmet_support) {
+ bf_set(prli_tgt, npr_nvme, 1);
+ bf_set(prli_disc, npr_nvme, 1);
+
+ } else {
+ bf_set(prli_init, npr_nvme, 1);
+ }
+ npr_nvme->word1 = cpu_to_be32(npr_nvme->word1);
+ npr_nvme->word4 = cpu_to_be32(npr_nvme->word4);
+ elsiocb->iocb_flag |= LPFC_PRLI_NVME_REQ;
+
+ /* Remove NVME type - processed. */
+ local_nlp_type &= ~NLP_FC4_NVME;
+ }
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"Issue PRLI: did:x%x",
@@ -2172,7 +2251,20 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
lpfc_els_free_iocb(phba, elsiocb);
return 1;
}
+
+ /* The vport counters are used for lpfc_scan_finished, but
+ * the ndlp is used to track outstanding PRLIs for different
+ * FC4 types.
+ */
vport->fc_prli_sent++;
+ ndlp->fc4_prli_sent++;
+
+ /* The driver supports 2 FC4 types. Make sure
+ * a PRLI is issued for all types before exiting.
+ */
+ if (local_nlp_type & (NLP_FC4_FCP | NLP_FC4_NVME))
+ goto send_next_prli;
+
return 0;
}
@@ -2543,6 +2635,15 @@ out:
if ((vport->fc_flag & FC_PT2PT) &&
!(vport->fc_flag & FC_PT2PT_PLOGI)) {
phba->pport->fc_myDID = 0;
+
+ if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
+ (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
+ if (phba->nvmet_support)
+ lpfc_nvmet_update_targetport(phba);
+ else
+ lpfc_nvme_update_localport(phba->pport);
+ }
+
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (mbox) {
lpfc_config_link(phba, mbox);
@@ -3055,6 +3156,7 @@ lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp)
}
break;
case ELS_CMD_PRLI:
+ case ELS_CMD_NVMEPRLI:
if (!lpfc_issue_els_prli(vport, ndlp, retry)) {
ndlp->nlp_prev_state = ndlp->nlp_state;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
@@ -3245,7 +3347,8 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
break;
}
if ((cmd == ELS_CMD_PLOGI) ||
- (cmd == ELS_CMD_PRLI)) {
+ (cmd == ELS_CMD_PRLI) ||
+ (cmd == ELS_CMD_NVMEPRLI)) {
delay = 1000;
maxretry = lpfc_max_els_tries + 1;
retry = 1;
@@ -3265,7 +3368,8 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
case LSRJT_LOGICAL_BSY:
if ((cmd == ELS_CMD_PLOGI) ||
- (cmd == ELS_CMD_PRLI)) {
+ (cmd == ELS_CMD_PRLI) ||
+ (cmd == ELS_CMD_NVMEPRLI)) {
delay = 1000;
maxretry = 48;
} else if (cmd == ELS_CMD_FDISC) {
@@ -3399,7 +3503,8 @@ out_retry:
spin_unlock_irq(shost->host_lock);
ndlp->nlp_prev_state = ndlp->nlp_state;
- if (cmd == ELS_CMD_PRLI)
+ if ((cmd == ELS_CMD_PRLI) ||
+ (cmd == ELS_CMD_NVMEPRLI))
lpfc_nlp_set_state(vport, ndlp,
NLP_STE_PRLI_ISSUE);
else
@@ -3430,6 +3535,7 @@ out_retry:
lpfc_issue_els_adisc(vport, ndlp, cmdiocb->retry);
return 1;
case ELS_CMD_PRLI:
+ case ELS_CMD_NVMEPRLI:
ndlp->nlp_prev_state = ndlp->nlp_state;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
lpfc_issue_els_prli(vport, ndlp, cmdiocb->retry);
@@ -3995,7 +4101,18 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
sizeof(struct serv_parm));
sp->cmn.valid_vendor_ver_level = 0;
- memset(sp->vendorVersion, 0, sizeof(sp->vendorVersion));
+ memset(sp->un.vendorVersion, 0,
+ sizeof(sp->un.vendorVersion));
+
+ /* If our firmware supports this feature, convey that
+ * info to the target using the vendor specific field.
+ */
+ if (phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) {
+ sp->cmn.valid_vendor_ver_level = 1;
+ sp->un.vv.vid = cpu_to_be32(LPFC_VV_EMLX_ID);
+ sp->un.vv.flags =
+ cpu_to_be32(LPFC_VV_SUPPRESS_RSP);
+ }
}
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
@@ -4231,17 +4348,43 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
{
struct lpfc_hba *phba = vport->phba;
PRLI *npr;
+ struct lpfc_nvme_prli *npr_nvme;
lpfc_vpd_t *vpd;
IOCB_t *icmd;
IOCB_t *oldcmd;
struct lpfc_iocbq *elsiocb;
uint8_t *pcmd;
uint16_t cmdsize;
+ uint32_t prli_fc4_req, *req_payload;
+ struct lpfc_dmabuf *req_buf;
int rc;
+ u32 elsrspcmd;
+
+ /* Need the incoming PRLI payload to determine if the ACC is for an
+ * FC4 or NVME PRLI type. The PRLI type is at word 1.
+ */
+ req_buf = (struct lpfc_dmabuf *)oldiocb->context2;
+ req_payload = (((uint32_t *)req_buf->virt) + 1);
+
+ /* PRLI type payload is at byte 3 for FCP or NVME. */
+ prli_fc4_req = be32_to_cpu(*req_payload);
+ prli_fc4_req = (prli_fc4_req >> 24) & 0xff;
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "6127 PRLI_ACC: Req Type x%x, Word1 x%08x\n",
+ prli_fc4_req, *((uint32_t *)req_payload));
+
+ if (prli_fc4_req == PRLI_FCP_TYPE) {
+ cmdsize = sizeof(uint32_t) + sizeof(PRLI);
+ elsrspcmd = (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK));
+ } else if (prli_fc4_req & PRLI_NVME_TYPE) {
+ cmdsize = sizeof(uint32_t) + sizeof(struct lpfc_nvme_prli);
+ elsrspcmd = (ELS_CMD_ACC | (ELS_CMD_NVMEPRLI & ~ELS_RSP_MASK));
+ } else {
+ return 1;
+ }
- cmdsize = sizeof(uint32_t) + sizeof(PRLI);
elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
- ndlp->nlp_DID, (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK)));
+ ndlp->nlp_DID, elsrspcmd);
if (!elsiocb)
return 1;
@@ -4258,33 +4401,71 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
ndlp->nlp_rpi);
pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+ memset(pcmd, 0, cmdsize);
*((uint32_t *) (pcmd)) = (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK));
pcmd += sizeof(uint32_t);
/* For PRLI, remainder of payload is PRLI parameter page */
- memset(pcmd, 0, sizeof(PRLI));
-
- npr = (PRLI *) pcmd;
vpd = &phba->vpd;
- /*
- * If the remote port is a target and our firmware version is 3.20 or
- * later, set the following bits for FC-TAPE support.
- */
- if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
- (vpd->rev.feaLevelHigh >= 0x02)) {
- npr->ConfmComplAllowed = 1;
- npr->Retry = 1;
- npr->TaskRetryIdReq = 1;
- }
- npr->acceptRspCode = PRLI_REQ_EXECUTED;
- npr->estabImagePair = 1;
- npr->readXferRdyDis = 1;
- npr->ConfmComplAllowed = 1;
+ if (prli_fc4_req == PRLI_FCP_TYPE) {
+ /*
+ * If the remote port is a target and our firmware version
+ * is 3.20 or later, set the following bits for FC-TAPE
+ * support.
+ */
+ npr = (PRLI *) pcmd;
+ if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
+ (vpd->rev.feaLevelHigh >= 0x02)) {
+ npr->ConfmComplAllowed = 1;
+ npr->Retry = 1;
+ npr->TaskRetryIdReq = 1;
+ }
+ npr->acceptRspCode = PRLI_REQ_EXECUTED;
+ npr->estabImagePair = 1;
+ npr->readXferRdyDis = 1;
+ npr->ConfmComplAllowed = 1;
+ npr->prliType = PRLI_FCP_TYPE;
+ npr->initiatorFunc = 1;
+ } else if (prli_fc4_req & PRLI_NVME_TYPE) {
+ /* Respond with an NVME PRLI Type */
+ npr_nvme = (struct lpfc_nvme_prli *) pcmd;
+ bf_set(prli_type_code, npr_nvme, PRLI_NVME_TYPE);
+ bf_set(prli_estabImagePair, npr_nvme, 0); /* Should be 0 */
+ bf_set(prli_acc_rsp_code, npr_nvme, PRLI_REQ_EXECUTED);
+ if (phba->nvmet_support) {
+ bf_set(prli_tgt, npr_nvme, 1);
+ bf_set(prli_disc, npr_nvme, 1);
+ if (phba->cfg_nvme_enable_fb) {
+ bf_set(prli_fba, npr_nvme, 1);
+
+ /* TBD. Target mode needs to post buffers
+ * that support the configured first burst
+ * byte size.
+ */
+ bf_set(prli_fb_sz, npr_nvme,
+ phba->cfg_nvmet_fb_size);
+ }
+ } else {
+ bf_set(prli_init, npr_nvme, 1);
+ }
- npr->prliType = PRLI_FCP_TYPE;
- npr->initiatorFunc = 1;
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
+ "6015 NVME issue PRLI ACC word1 x%08x "
+ "word4 x%08x word5 x%08x flag x%x, "
+ "fcp_info x%x nlp_type x%x\n",
+ npr_nvme->word1, npr_nvme->word4,
+ npr_nvme->word5, ndlp->nlp_flag,
+ ndlp->nlp_fcp_info, ndlp->nlp_type);
+ npr_nvme->word1 = cpu_to_be32(npr_nvme->word1);
+ npr_nvme->word4 = cpu_to_be32(npr_nvme->word4);
+ npr_nvme->word5 = cpu_to_be32(npr_nvme->word5);
+ } else
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "6128 Unknown FC_TYPE x%x x%x ndlp x%06x\n",
+ prli_fc4_req, ndlp->nlp_fc4_type,
+ ndlp->nlp_DID);
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
"Issue ACC PRLI: did:x%x flg:x%x",
@@ -4411,7 +4592,7 @@ lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
**/
static void
lpfc_els_clear_rrq(struct lpfc_vport *vport,
- struct lpfc_iocbq *iocb, struct lpfc_nodelist *ndlp)
+ struct lpfc_iocbq *iocb, struct lpfc_nodelist *ndlp)
{
struct lpfc_hba *phba = vport->phba;
uint8_t *pcmd;
@@ -4909,7 +5090,7 @@ lpfc_rdp_res_opd_desc(struct fc_rdp_opd_sfp_desc *desc,
memcpy(desc->opd_info.vendor_name, &page_a0[SSF_VENDOR_NAME], 16);
memcpy(desc->opd_info.model_number, &page_a0[SSF_VENDOR_PN], 16);
memcpy(desc->opd_info.serial_number, &page_a0[SSF_VENDOR_SN], 16);
- memcpy(desc->opd_info.revision, &page_a0[SSF_VENDOR_REV], 2);
+ memcpy(desc->opd_info.revision, &page_a0[SSF_VENDOR_REV], 4);
memcpy(desc->opd_info.date, &page_a0[SSF_DATE_CODE], 8);
desc->length = cpu_to_be32(sizeof(desc->opd_info));
return sizeof(struct fc_rdp_opd_sfp_desc);
@@ -5004,7 +5185,7 @@ lpfc_rdp_res_diag_port_names(struct fc_rdp_port_name_desc *desc,
memcpy(desc->port_names.wwnn, phba->wwnn,
sizeof(desc->port_names.wwnn));
- memcpy(desc->port_names.wwpn, &phba->wwpn,
+ memcpy(desc->port_names.wwpn, phba->wwpn,
sizeof(desc->port_names.wwpn));
desc->length = cpu_to_be32(sizeof(desc->port_names));
@@ -5233,9 +5414,8 @@ lpfc_els_rcv_rdp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
struct ls_rjt stat;
if (phba->sli_rev < LPFC_SLI_REV4 ||
- (bf_get(lpfc_sli_intf_if_type,
- &phba->sli4_hba.sli_intf) !=
- LPFC_SLI_INTF_IF_TYPE_2)) {
+ bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+ LPFC_SLI_INTF_IF_TYPE_2) {
rjt_err = LSRJT_UNABLE_TPC;
rjt_expl = LSEXP_REQ_UNSUPPORTED;
goto error;
@@ -5687,6 +5867,8 @@ lpfc_rscn_recovery_check(struct lpfc_vport *vport)
(ndlp->nlp_state == NLP_STE_UNUSED_NODE) ||
!lpfc_rscn_payload_check(vport, ndlp->nlp_DID))
continue;
+ if (vport->phba->nvmet_support)
+ continue;
lpfc_disc_state_machine(vport, ndlp, NULL,
NLP_EVT_DEVICE_RECOVERY);
lpfc_cancel_retry_delay_tmo(vport, ndlp);
@@ -5976,9 +6158,11 @@ lpfc_els_handle_rscn(struct lpfc_vport *vport)
if (ndlp && NLP_CHK_NODE_ACT(ndlp)
&& ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
/* Good ndlp, issue CT Request to NameServer */
- if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, 0) == 0)
+ vport->gidft_inp = 0;
+ if (lpfc_issue_gidft(vport) == 0)
/* Wait for NameServer query cmpl before we can
- continue */
+ * continue
+ */
return 1;
} else {
/* If login to NameServer does not exist, issue one */
@@ -6082,7 +6266,6 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
(void) lpfc_check_sparm(vport, ndlp, sp, CLASS3, 1);
-
/*
* If our portname is greater than the remote portname,
* then we initiate Nport login.
@@ -7155,7 +7338,8 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
timeout = (uint32_t)(phba->fc_ratov << 1);
- pring = &phba->sli.ring[LPFC_ELS_RING];
+ pring = lpfc_phba_elsring(phba);
+
if ((phba->pport->load_flag & FC_UNLOADING))
return;
spin_lock_irq(&phba->hbalock);
@@ -7224,7 +7408,7 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
spin_unlock_irq(&phba->hbalock);
}
- if (!list_empty(&phba->sli.ring[LPFC_ELS_RING].txcmplq))
+ if (!list_empty(&pring->txcmplq))
if (!(phba->pport->load_flag & FC_UNLOADING))
mod_timer(&vport->els_tmofunc,
jiffies + msecs_to_jiffies(1000 * timeout));
@@ -7255,7 +7439,7 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
{
LIST_HEAD(abort_list);
struct lpfc_hba *phba = vport->phba;
- struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
+ struct lpfc_sli_ring *pring;
struct lpfc_iocbq *tmp_iocb, *piocb;
IOCB_t *cmd = NULL;
@@ -7267,6 +7451,7 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
* a working list and release the locks before calling the abort.
*/
spin_lock_irq(&phba->hbalock);
+ pring = lpfc_phba_elsring(phba);
if (phba->sli_rev == LPFC_SLI_REV4)
spin_lock(&pring->ring_lock);
@@ -7777,6 +7962,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
lpfc_els_rcv_fan(vport, elsiocb, ndlp);
break;
case ELS_CMD_PRLI:
+ case ELS_CMD_NVMEPRLI:
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
"RCV PRLI: did:x%x/ste:x%x flg:x%x",
did, vport->port_state, ndlp->nlp_flag);
@@ -8881,8 +9067,7 @@ lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
break;
}
- if (atomic_read(&phba->fabric_iocb_count) == 0)
- BUG();
+ BUG_ON(atomic_read(&phba->fabric_iocb_count) == 0);
cmdiocb->iocb_cmpl = cmdiocb->fabric_iocb_cmpl;
cmdiocb->fabric_iocb_cmpl = NULL;
@@ -8927,8 +9112,7 @@ lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb)
int ready;
int ret;
- if (atomic_read(&phba->fabric_iocb_count) > 1)
- BUG();
+ BUG_ON(atomic_read(&phba->fabric_iocb_count) > 1);
spin_lock_irqsave(&phba->hbalock, iflags);
ready = atomic_read(&phba->fabric_iocb_count) == 0 &&
@@ -9013,7 +9197,9 @@ void lpfc_fabric_abort_nport(struct lpfc_nodelist *ndlp)
LIST_HEAD(completions);
struct lpfc_hba *phba = ndlp->phba;
struct lpfc_iocbq *tmp_iocb, *piocb;
- struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
+ struct lpfc_sli_ring *pring;
+
+ pring = lpfc_phba_elsring(phba);
spin_lock_irq(&phba->hbalock);
list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list,
@@ -9069,13 +9255,13 @@ lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *vport)
unsigned long iflag = 0;
spin_lock_irqsave(&phba->hbalock, iflag);
- spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
+ spin_lock(&phba->sli4_hba.sgl_list_lock);
list_for_each_entry_safe(sglq_entry, sglq_next,
&phba->sli4_hba.lpfc_abts_els_sgl_list, list) {
if (sglq_entry->ndlp && sglq_entry->ndlp->vport == vport)
sglq_entry->ndlp = NULL;
}
- spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
+ spin_unlock(&phba->sli4_hba.sgl_list_lock);
spin_unlock_irqrestore(&phba->hbalock, iflag);
return;
}
@@ -9099,22 +9285,22 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
unsigned long iflag = 0;
struct lpfc_nodelist *ndlp;
- struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
+ struct lpfc_sli_ring *pring;
+
+ pring = lpfc_phba_elsring(phba);
spin_lock_irqsave(&phba->hbalock, iflag);
- spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
+ spin_lock(&phba->sli4_hba.sgl_list_lock);
list_for_each_entry_safe(sglq_entry, sglq_next,
&phba->sli4_hba.lpfc_abts_els_sgl_list, list) {
if (sglq_entry->sli4_xritag == xri) {
list_del(&sglq_entry->list);
ndlp = sglq_entry->ndlp;
sglq_entry->ndlp = NULL;
- spin_lock(&pring->ring_lock);
list_add_tail(&sglq_entry->list,
- &phba->sli4_hba.lpfc_sgl_list);
+ &phba->sli4_hba.lpfc_els_sgl_list);
sglq_entry->state = SGL_FREED;
- spin_unlock(&pring->ring_lock);
- spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
+ spin_unlock(&phba->sli4_hba.sgl_list_lock);
spin_unlock_irqrestore(&phba->hbalock, iflag);
lpfc_set_rrq_active(phba, ndlp,
sglq_entry->sli4_lxritag,
@@ -9126,21 +9312,21 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
return;
}
}
- spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
+ spin_unlock(&phba->sli4_hba.sgl_list_lock);
lxri = lpfc_sli4_xri_inrange(phba, xri);
if (lxri == NO_XRI) {
spin_unlock_irqrestore(&phba->hbalock, iflag);
return;
}
- spin_lock(&pring->ring_lock);
+ spin_lock(&phba->sli4_hba.sgl_list_lock);
sglq_entry = __lpfc_get_active_sglq(phba, lxri);
if (!sglq_entry || (sglq_entry->sli4_xritag != xri)) {
- spin_unlock(&pring->ring_lock);
+ spin_unlock(&phba->sli4_hba.sgl_list_lock);
spin_unlock_irqrestore(&phba->hbalock, iflag);
return;
}
sglq_entry->state = SGL_XRI_ABORTED;
- spin_unlock(&pring->ring_lock);
+ spin_unlock(&phba->sli4_hba.sgl_list_lock);
spin_unlock_irqrestore(&phba->hbalock, iflag);
return;
}
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 82047070cdc9..194a14d5f8a9 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1,9 +1,11 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
+ * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
+ * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
- * www.emulex.com *
+ * www.broadcom.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
* *
* This program is free software; you can redistribute it and/or *
@@ -31,6 +33,9 @@
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_transport_fc.h>
+#include <scsi/fc/fc_fs.h>
+
+#include <linux/nvme-fc-driver.h>
#include "lpfc_hw4.h"
#include "lpfc_hw.h"
@@ -38,8 +43,9 @@
#include "lpfc_disc.h"
#include "lpfc_sli.h"
#include "lpfc_sli4.h"
-#include "lpfc_scsi.h"
#include "lpfc.h"
+#include "lpfc_scsi.h"
+#include "lpfc_nvme.h"
#include "lpfc_logmsg.h"
#include "lpfc_crtn.h"
#include "lpfc_vport.h"
@@ -93,7 +99,7 @@ lpfc_terminate_rport_io(struct fc_rport *rport)
if (ndlp->nlp_sid != NLP_NO_SID) {
lpfc_sli_abort_iocb(ndlp->vport,
- &phba->sli.ring[phba->sli.fcp_ring],
+ &phba->sli.sli3_ring[LPFC_FCP_RING],
ndlp->nlp_sid, 0, LPFC_CTX_TGT);
}
}
@@ -247,8 +253,8 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
if (ndlp->nlp_sid != NLP_NO_SID) {
/* flush the target */
lpfc_sli_abort_iocb(vport,
- &phba->sli.ring[phba->sli.fcp_ring],
- ndlp->nlp_sid, 0, LPFC_CTX_TGT);
+ &phba->sli.sli3_ring[LPFC_FCP_RING],
+ ndlp->nlp_sid, 0, LPFC_CTX_TGT);
}
put_node = rdata->pnode != NULL;
rdata->pnode = NULL;
@@ -283,7 +289,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
if (ndlp->nlp_sid != NLP_NO_SID) {
warn_on = 1;
- lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
+ lpfc_sli_abort_iocb(vport, &phba->sli.sli3_ring[LPFC_FCP_RING],
ndlp->nlp_sid, 0, LPFC_CTX_TGT);
}
@@ -495,11 +501,12 @@ lpfc_send_fastpath_evt(struct lpfc_hba *phba,
return;
}
- fc_host_post_vendor_event(shost,
- fc_get_event_number(),
- evt_data_size,
- evt_data,
- LPFC_NL_VENDOR_ID);
+ if (phba->cfg_enable_fc4_type != LPFC_ENABLE_NVME)
+ fc_host_post_vendor_event(shost,
+ fc_get_event_number(),
+ evt_data_size,
+ evt_data,
+ LPFC_NL_VENDOR_ID);
lpfc_free_fast_evt(phba, fast_evt_data);
return;
@@ -682,7 +689,7 @@ lpfc_work_done(struct lpfc_hba *phba)
}
lpfc_destroy_vport_work_array(phba, vports);
- pring = &phba->sli.ring[LPFC_ELS_RING];
+ pring = lpfc_phba_elsring(phba);
status = (ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
status >>= (4*LPFC_ELS_RING);
if ((status & HA_RXMASK) ||
@@ -852,9 +859,12 @@ lpfc_port_link_failure(struct lpfc_vport *vport)
void
lpfc_linkdown_port(struct lpfc_vport *vport)
{
+ struct lpfc_hba *phba = vport->phba;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
- fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKDOWN, 0);
+ if (phba->cfg_enable_fc4_type != LPFC_ENABLE_NVME)
+ fc_host_post_event(shost, fc_get_event_number(),
+ FCH_EVT_LINKDOWN, 0);
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"Link Down: state:x%x rtry:x%x flg:x%x",
@@ -894,11 +904,22 @@ lpfc_linkdown(struct lpfc_hba *phba)
spin_unlock_irq(shost->host_lock);
}
vports = lpfc_create_vport_work_array(phba);
- if (vports != NULL)
+ if (vports != NULL) {
for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
/* Issue a LINK DOWN event to all nodes */
lpfc_linkdown_port(vports[i]);
+
+ vports[i]->fc_myDID = 0;
+
+ if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
+ (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
+ if (phba->nvmet_support)
+ lpfc_nvmet_update_targetport(phba);
+ else
+ lpfc_nvme_update_localport(vports[i]);
+ }
}
+ }
lpfc_destroy_vport_work_array(phba, vports);
/* Clean up any firmware default rpi's */
mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@@ -914,7 +935,6 @@ lpfc_linkdown(struct lpfc_hba *phba)
/* Setup myDID for link up if we are in pt2pt mode */
if (phba->pport->fc_flag & FC_PT2PT) {
- phba->pport->fc_myDID = 0;
mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (mb) {
lpfc_config_link(phba, mb);
@@ -929,7 +949,6 @@ lpfc_linkdown(struct lpfc_hba *phba)
phba->pport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI);
spin_unlock_irq(shost->host_lock);
}
-
return 0;
}
@@ -977,7 +996,9 @@ lpfc_linkup_port(struct lpfc_vport *vport)
(vport != phba->pport))
return;
- fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKUP, 0);
+ if (phba->cfg_enable_fc4_type != LPFC_ENABLE_NVME)
+ fc_host_post_event(shost, fc_get_event_number(),
+ FCH_EVT_LINKUP, 0);
spin_lock_irq(shost->host_lock);
vport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY |
@@ -1016,7 +1037,7 @@ lpfc_linkup(struct lpfc_hba *phba)
* This routine handles processing a CLEAR_LA mailbox
* command upon completion. It is setup in the LPFC_MBOXQ
* as the completion routine when the command is
- * handed off to the SLI layer.
+ * handed off to the SLI layer. SLI3 only.
*/
static void
lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
@@ -1028,9 +1049,8 @@ lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
uint32_t control;
/* Since we don't do discovery right now, turn these off here */
- psli->ring[psli->extra_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
- psli->ring[psli->fcp_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
- psli->ring[psli->next_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
+ psli->sli3_ring[LPFC_EXTRA_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
+ psli->sli3_ring[LPFC_FCP_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
/* Check for error */
if ((mb->mbxStatus) && (mb->mbxStatus != 0x1601)) {
@@ -3277,7 +3297,7 @@ lpfc_mbx_issue_link_down(struct lpfc_hba *phba)
* This routine handles processing a READ_TOPOLOGY mailbox
* command upon completion. It is setup in the LPFC_MBOXQ
* as the completion routine when the command is
- * handed off to the SLI layer.
+ * handed off to the SLI layer. SLI4 only.
*/
void
lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
@@ -3285,11 +3305,14 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
struct lpfc_vport *vport = pmb->vport;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_mbx_read_top *la;
+ struct lpfc_sli_ring *pring;
MAILBOX_t *mb = &pmb->u.mb;
struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
/* Unblock ELS traffic */
- phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
+ pring = lpfc_phba_elsring(phba);
+ pring->flag &= ~LPFC_STOP_IOCB_EVENT;
+
/* Check for error */
if (mb->mbxStatus) {
lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
@@ -3458,6 +3481,14 @@ lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
spin_unlock_irq(shost->host_lock);
+
+ /*
+ * We cannot leave the RPI registered because
+ * if we go thru discovery again for this ndlp
+ * a subsequent REG_RPI will fail.
+ */
+ ndlp->nlp_flag |= NLP_RPI_REGISTERED;
+ lpfc_unreg_rpi(vport, ndlp);
}
/* Call state machine */
@@ -3556,6 +3587,14 @@ lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
spin_unlock_irq(shost->host_lock);
vport->fc_myDID = 0;
+
+ if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
+ (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
+ if (phba->nvmet_support)
+ lpfc_nvmet_update_targetport(phba);
+ else
+ lpfc_nvme_update_localport(vport);
+ }
goto out;
}
@@ -3805,6 +3844,52 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
return;
}
+ /*
+ * This routine will issue a GID_FT for each FC4 Type supported
+ * by the driver. ALL GID_FTs must complete before discovery is started.
+ */
+int
+lpfc_issue_gidft(struct lpfc_vport *vport)
+{
+ struct lpfc_hba *phba = vport->phba;
+
+ /* Good status, issue CT Request to NameServer */
+ if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
+ (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP)) {
+ if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, SLI_CTPT_FCP)) {
+ /* Cannot issue NameServer FCP Query, so finish up
+ * discovery
+ */
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI,
+ "0604 %s FC TYPE %x %s\n",
+ "Failed to issue GID_FT to ",
+ FC_TYPE_FCP,
+ "Finishing discovery.");
+ return 0;
+ }
+ vport->gidft_inp++;
+ }
+
+ if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
+ (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
+ if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, SLI_CTPT_NVME)) {
+ /* Cannot issue NameServer NVME Query, so finish up
+ * discovery
+ */
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI,
+ "0605 %s FC_TYPE %x %s %d\n",
+ "Failed to issue GID_FT to ",
+ FC_TYPE_NVME,
+ "Finishing discovery: gidftinp ",
+ vport->gidft_inp);
+ if (vport->gidft_inp == 0)
+ return 0;
+ } else
+ vport->gidft_inp++;
+ }
+ return vport->gidft_inp;
+}
+
/*
* This routine handles processing a NameServer REG_LOGIN mailbox
* command upon completion. It is setup in the LPFC_MBOXQ
@@ -3821,12 +3906,14 @@ lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
pmb->context1 = NULL;
pmb->context2 = NULL;
+ vport->gidft_inp = 0;
if (mb->mbxStatus) {
-out:
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
"0260 Register NameServer error: 0x%x\n",
mb->mbxStatus);
+
+out:
/* decrement the node reference count held for this
* callback function.
*/
@@ -3870,20 +3957,29 @@ out:
lpfc_ns_cmd(vport, SLI_CTNS_RSNN_NN, 0, 0);
lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0);
lpfc_ns_cmd(vport, SLI_CTNS_RFT_ID, 0, 0);
- lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0, 0);
+
+ if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
+ (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP))
+ lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0, FC_TYPE_FCP);
+
+ if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
+ (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME))
+ lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0,
+ FC_TYPE_NVME);
/* Issue SCR just before NameServer GID_FT Query */
lpfc_issue_els_scr(vport, SCR_DID, 0);
}
vport->fc_ns_retry = 0;
- /* Good status, issue CT Request to NameServer */
- if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, 0)) {
- /* Cannot issue NameServer Query, so finish up discovery */
+ if (lpfc_issue_gidft(vport) == 0)
goto out;
- }
- /* decrement the node reference count held for this
+ /*
+ * At this point in time we may need to wait for multiple
+ * SLI_CTNS_GID_FT CT commands to complete before we start discovery.
+ *
+ * decrement the node reference count held for this
* callback function.
*/
lpfc_nlp_put(ndlp);
@@ -3903,6 +3999,9 @@ lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
struct fc_rport_identifiers rport_ids;
struct lpfc_hba *phba = vport->phba;
+ if (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)
+ return;
+
/* Remote port has reappeared. Re-register w/ FC transport */
rport_ids.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
rport_ids.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
@@ -3972,12 +4071,17 @@ static void
lpfc_unregister_remote_port(struct lpfc_nodelist *ndlp)
{
struct fc_rport *rport = ndlp->rport;
+ struct lpfc_vport *vport = ndlp->vport;
+ struct lpfc_hba *phba = vport->phba;
- lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_RPORT,
+ if (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)
+ return;
+
+ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
"rport delete: did:x%x flg:x%x type x%x",
ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
- lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
"3184 rport unregister x%06x, rport %p\n",
ndlp->nlp_DID, rport);
@@ -4029,6 +4133,7 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
int old_state, int new_state)
{
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_hba *phba = vport->phba;
if (new_state == NLP_STE_UNMAPPED_NODE) {
ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
@@ -4039,23 +4144,56 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (new_state == NLP_STE_NPR_NODE)
ndlp->nlp_flag &= ~NLP_RCV_PLOGI;
- /* Transport interface */
- if (ndlp->rport && (old_state == NLP_STE_MAPPED_NODE ||
- old_state == NLP_STE_UNMAPPED_NODE)) {
- vport->phba->nport_event_cnt++;
- lpfc_unregister_remote_port(ndlp);
+ /* FCP and NVME Transport interface */
+ if ((old_state == NLP_STE_MAPPED_NODE ||
+ old_state == NLP_STE_UNMAPPED_NODE)) {
+ if (ndlp->rport) {
+ vport->phba->nport_event_cnt++;
+ lpfc_unregister_remote_port(ndlp);
+ }
+
+ /* Notify the NVME transport of this rport's loss */
+ if (((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
+ (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) &&
+ (vport->phba->nvmet_support == 0) &&
+ ((ndlp->nlp_fc4_type & NLP_FC4_NVME) ||
+ (ndlp->nlp_DID == Fabric_DID))) {
+ vport->phba->nport_event_cnt++;
+ lpfc_nvme_unregister_port(vport, ndlp);
+ }
}
+ /* FCP and NVME Transport interfaces */
+
if (new_state == NLP_STE_MAPPED_NODE ||
new_state == NLP_STE_UNMAPPED_NODE) {
- vport->phba->nport_event_cnt++;
- /*
- * Tell the fc transport about the port, if we haven't
- * already. If we have, and it's a scsi entity, be
- * sure to unblock any attached scsi devices
- */
- lpfc_register_remote_port(vport, ndlp);
+ if ((ndlp->nlp_fc4_type & NLP_FC4_FCP) ||
+ (ndlp->nlp_DID == Fabric_DID)) {
+ vport->phba->nport_event_cnt++;
+ /*
+ * Tell the fc transport about the port, if we haven't
+ * already. If we have, and it's a scsi entity, be
+ */
+ lpfc_register_remote_port(vport, ndlp);
+ }
+ /* Notify the NVME transport of this new rport. */
+ if (ndlp->nlp_fc4_type & NLP_FC4_NVME) {
+ if (vport->phba->nvmet_support == 0) {
+ /* Register this rport with the transport.
+ * Initiators take the NDLP ref count in
+ * the register.
+ */
+ vport->phba->nport_event_cnt++;
+ lpfc_nvme_register_port(vport, ndlp);
+ } else {
+ /* Just take an NDLP ref count since the
+ * target does not register rports.
+ */
+ lpfc_nlp_get(ndlp);
+ }
+ }
}
+
if ((new_state == NLP_STE_MAPPED_NODE) &&
(vport->stat_data_enabled)) {
/*
@@ -4073,12 +4211,13 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
"0x%x\n", ndlp->nlp_DID);
}
/*
- * if we added to Mapped list, but the remote port
- * registration failed or assigned a target id outside
- * our presentable range - move the node to the
- * Unmapped List
+ * If the node just added to Mapped list was an FCP target,
+ * but the remote port registration failed or assigned a target
+ * id outside the presentable range - move the node to the
+ * Unmapped List.
*/
- if (new_state == NLP_STE_MAPPED_NODE &&
+ if ((new_state == NLP_STE_MAPPED_NODE) &&
+ (ndlp->nlp_type & NLP_FCP_TARGET) &&
(!ndlp->rport ||
ndlp->rport->scsi_target_id == -1 ||
ndlp->rport->scsi_target_id >= LPFC_MAX_TARGET)) {
@@ -4212,6 +4351,7 @@ lpfc_initialize_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->vport = vport;
ndlp->phba = vport->phba;
ndlp->nlp_sid = NLP_NO_SID;
+ ndlp->nlp_fc4_type = NLP_FC4_NONE;
kref_init(&ndlp->kref);
NLP_INT_NODE_ACT(ndlp);
atomic_set(&ndlp->cmd_pending, 0);
@@ -4394,7 +4534,6 @@ lpfc_check_sli_ndlp(struct lpfc_hba *phba,
struct lpfc_iocbq *iocb,
struct lpfc_nodelist *ndlp)
{
- struct lpfc_sli *psli = &phba->sli;
IOCB_t *icmd = &iocb->iocb;
struct lpfc_vport *vport = ndlp->vport;
@@ -4413,9 +4552,7 @@ lpfc_check_sli_ndlp(struct lpfc_hba *phba,
if (iocb->context1 == (uint8_t *) ndlp)
return 1;
}
- } else if (pring->ringno == psli->extra_ring) {
-
- } else if (pring->ringno == psli->fcp_ring) {
+ } else if (pring->ringno == LPFC_FCP_RING) {
/* Skip match check if waiting to relogin to FCP target */
if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
(ndlp->nlp_flag & NLP_DELAY_TMO)) {
@@ -4424,12 +4561,58 @@ lpfc_check_sli_ndlp(struct lpfc_hba *phba,
if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi) {
return 1;
}
- } else if (pring->ringno == psli->next_ring) {
-
}
return 0;
}
+static void
+__lpfc_dequeue_nport_iocbs(struct lpfc_hba *phba,
+ struct lpfc_nodelist *ndlp, struct lpfc_sli_ring *pring,
+ struct list_head *dequeue_list)
+{
+ struct lpfc_iocbq *iocb, *next_iocb;
+
+ list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
+ /* Check to see if iocb matches the nport */
+ if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp))
+ /* match, dequeue */
+ list_move_tail(&iocb->list, dequeue_list);
+ }
+}
+
+static void
+lpfc_sli3_dequeue_nport_iocbs(struct lpfc_hba *phba,
+ struct lpfc_nodelist *ndlp, struct list_head *dequeue_list)
+{
+ struct lpfc_sli *psli = &phba->sli;
+ uint32_t i;
+
+ spin_lock_irq(&phba->hbalock);
+ for (i = 0; i < psli->num_rings; i++)
+ __lpfc_dequeue_nport_iocbs(phba, ndlp, &psli->sli3_ring[i],
+ dequeue_list);
+ spin_unlock_irq(&phba->hbalock);
+}
+
+static void
+lpfc_sli4_dequeue_nport_iocbs(struct lpfc_hba *phba,
+ struct lpfc_nodelist *ndlp, struct list_head *dequeue_list)
+{
+ struct lpfc_sli_ring *pring;
+ struct lpfc_queue *qp = NULL;
+
+ spin_lock_irq(&phba->hbalock);
+ list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
+ pring = qp->pring;
+ if (!pring)
+ continue;
+ spin_lock_irq(&pring->ring_lock);
+ __lpfc_dequeue_nport_iocbs(phba, ndlp, pring, dequeue_list);
+ spin_unlock_irq(&pring->ring_lock);
+ }
+ spin_unlock_irq(&phba->hbalock);
+}
+
/*
* Free resources / clean up outstanding I/Os
* associated with nlp_rpi in the LPFC_NODELIST entry.
@@ -4438,10 +4621,6 @@ static int
lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
{
LIST_HEAD(completions);
- struct lpfc_sli *psli;
- struct lpfc_sli_ring *pring;
- struct lpfc_iocbq *iocb, *next_iocb;
- uint32_t i;
lpfc_fabric_abort_nport(ndlp);
@@ -4449,29 +4628,11 @@ lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
* Everything that matches on txcmplq will be returned
* by firmware with a no rpi error.
*/
- psli = &phba->sli;
if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
- /* Now process each ring */
- for (i = 0; i < psli->num_rings; i++) {
- pring = &psli->ring[i];
-
- spin_lock_irq(&phba->hbalock);
- list_for_each_entry_safe(iocb, next_iocb, &pring->txq,
- list) {
- /*
- * Check to see if iocb matches the nport we are
- * looking for
- */
- if ((lpfc_check_sli_ndlp(phba, pring, iocb,
- ndlp))) {
- /* It matches, so deque and call compl
- with an error */
- list_move_tail(&iocb->list,
- &completions);
- }
- }
- spin_unlock_irq(&phba->hbalock);
- }
+ if (phba->sli_rev != LPFC_SLI_REV4)
+ lpfc_sli3_dequeue_nport_iocbs(phba, ndlp, &completions);
+ else
+ lpfc_sli4_dequeue_nport_iocbs(phba, ndlp, &completions);
}
/* Cancel all the IOCBs from the completions list */
@@ -4950,6 +5111,8 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
return NULL;
lpfc_nlp_init(vport, ndlp, did);
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+ if (vport->phba->nvmet_support)
+ return ndlp;
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
spin_unlock_irq(shost->host_lock);
@@ -4958,6 +5121,8 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_NPR_NODE);
if (!ndlp)
return NULL;
+ if (vport->phba->nvmet_support)
+ return ndlp;
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
spin_unlock_irq(shost->host_lock);
@@ -4977,6 +5142,8 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
* delay timeout is not needed.
*/
lpfc_cancel_retry_delay_tmo(vport, ndlp);
+ if (vport->phba->nvmet_support)
+ return ndlp;
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
spin_unlock_irq(shost->host_lock);
@@ -4992,6 +5159,8 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
ndlp->nlp_flag & NLP_RCV_PLOGI)
return NULL;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+ if (vport->phba->nvmet_support)
+ return ndlp;
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
spin_unlock_irq(shost->host_lock);
@@ -5040,14 +5209,14 @@ lpfc_disc_list_loopmap(struct lpfc_vport *vport)
return;
}
+/* SLI3 only */
void
lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport)
{
LPFC_MBOXQ_t *mbox;
struct lpfc_sli *psli = &phba->sli;
- struct lpfc_sli_ring *extra_ring = &psli->ring[psli->extra_ring];
- struct lpfc_sli_ring *fcp_ring = &psli->ring[psli->fcp_ring];
- struct lpfc_sli_ring *next_ring = &psli->ring[psli->next_ring];
+ struct lpfc_sli_ring *extra_ring = &psli->sli3_ring[LPFC_EXTRA_RING];
+ struct lpfc_sli_ring *fcp_ring = &psli->sli3_ring[LPFC_FCP_RING];
int rc;
/*
@@ -5071,7 +5240,6 @@ lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport)
lpfc_disc_flush_list(vport);
extra_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
fcp_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
- next_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
phba->link_state = LPFC_HBA_ERROR;
}
}
@@ -5207,7 +5375,7 @@ lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
struct lpfc_sli_ring *pring;
psli = &phba->sli;
- pring = &psli->ring[LPFC_ELS_RING];
+ pring = lpfc_phba_elsring(phba);
/* Error matching iocb on txq or txcmplq
* First check the txq.
@@ -5331,12 +5499,13 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
switch (vport->port_state) {
case LPFC_LOCAL_CFG_LINK:
- /* port_state is identically LPFC_LOCAL_CFG_LINK while waiting for
- * FAN
- */
- /* FAN timeout */
+ /*
+ * port_state is identically LPFC_LOCAL_CFG_LINK while
+ * waiting for FAN timeout
+ */
lpfc_printf_vlog(vport, KERN_WARNING, LOG_DISCOVERY,
"0221 FAN timeout\n");
+
/* Start discovery by sending FLOGI, clean up old rpis */
list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
nlp_listp) {
@@ -5407,8 +5576,8 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport)
if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) {
/* Try it one more time */
vport->fc_ns_retry++;
- rc = lpfc_ns_cmd(vport, SLI_CTNS_GID_FT,
- vport->fc_ns_retry, 0);
+ vport->gidft_inp = 0;
+ rc = lpfc_issue_gidft(vport);
if (rc == 0)
break;
}
@@ -5523,12 +5692,14 @@ restart_disc:
if (clrlaerr) {
lpfc_disc_flush_list(vport);
- psli->ring[(psli->extra_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
- psli->ring[(psli->fcp_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
- psli->ring[(psli->next_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
+ if (phba->sli_rev != LPFC_SLI_REV4) {
+ psli->sli3_ring[(LPFC_EXTRA_RING)].flag &=
+ ~LPFC_STOP_IOCB_EVENT;
+ psli->sli3_ring[LPFC_FCP_RING].flag &=
+ ~LPFC_STOP_IOCB_EVENT;
+ }
vport->port_state = LPFC_VPORT_READY;
}
-
return;
}
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 3b970d370600..15ca21484150 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -1,9 +1,11 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
+ * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
+ * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
- * www.emulex.com *
+ * www.broadcom.com *
* *
* This program is free software; you can redistribute it and/or *
* modify it under the terms of version 2 of the GNU General *
@@ -44,8 +46,6 @@
#define LPFC_FCP_RING 0 /* ring 0 for FCP initiator commands */
#define LPFC_EXTRA_RING 1 /* ring 1 for other protocols */
#define LPFC_ELS_RING 2 /* ring 2 for ELS commands */
-#define LPFC_FCP_NEXT_RING 3
-#define LPFC_FCP_OAS_RING 3
#define SLI2_IOCB_CMD_R0_ENTRIES 172 /* SLI-2 FCP command ring entries */
#define SLI2_IOCB_RSP_R0_ENTRIES 134 /* SLI-2 FCP response ring entries */
@@ -92,8 +92,10 @@ union CtCommandResponse {
uint32_t word;
};
-#define FC4_FEATURE_INIT 0x2
-#define FC4_FEATURE_TARGET 0x1
+/* FC4 Feature bits for RFF_ID */
+#define FC4_FEATURE_TARGET 0x1
+#define FC4_FEATURE_INIT 0x2
+#define FC4_FEATURE_NVME_DISC 0x4
struct lpfc_sli_ct_request {
/* Structure is in Big Endian format */
@@ -117,6 +119,16 @@ struct lpfc_sli_ct_request {
uint8_t AreaScope;
uint8_t Fc4Type; /* for GID_FT requests */
} gid;
+ struct gid_ff {
+ uint8_t Flags;
+ uint8_t DomainScope;
+ uint8_t AreaScope;
+ uint8_t rsvd1;
+ uint8_t rsvd2;
+ uint8_t rsvd3;
+ uint8_t Fc4FBits;
+ uint8_t Fc4Type;
+ } gid_ff;
struct rft {
uint32_t PortId; /* For RFT_ID requests */
@@ -161,6 +173,12 @@ struct lpfc_sli_ct_request {
struct gff_acc {
uint8_t fbits[128];
} gff_acc;
+ struct gft {
+ uint32_t PortId;
+ } gft;
+ struct gft_acc {
+ uint32_t fc4_types[8];
+ } gft_acc;
#define FCP_TYPE_FEATURE_OFFSET 7
struct rff {
uint32_t PortId;
@@ -176,8 +194,12 @@ struct lpfc_sli_ct_request {
#define SLI_CT_REVISION 1
#define GID_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \
sizeof(struct gid))
+#define GIDFF_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \
+ sizeof(struct gid_ff))
#define GFF_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \
sizeof(struct gff))
+#define GFT_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \
+ sizeof(struct gft))
#define RFT_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \
sizeof(struct rft))
#define RFF_REQUEST_SZ (offsetof(struct lpfc_sli_ct_request, un) + \
@@ -273,6 +295,7 @@ struct lpfc_sli_ct_request {
#define SLI_CTNS_GNN_IP 0x0153
#define SLI_CTNS_GIPA_IP 0x0156
#define SLI_CTNS_GID_FT 0x0171
+#define SLI_CTNS_GID_FF 0x01F1
#define SLI_CTNS_GID_PT 0x01A1
#define SLI_CTNS_RPN_ID 0x0212
#define SLI_CTNS_RNN_ID 0x0213
@@ -290,15 +313,16 @@ struct lpfc_sli_ct_request {
* Port Types
*/
-#define SLI_CTPT_N_PORT 0x01
-#define SLI_CTPT_NL_PORT 0x02
-#define SLI_CTPT_FNL_PORT 0x03
-#define SLI_CTPT_IP 0x04
-#define SLI_CTPT_FCP 0x08
-#define SLI_CTPT_NX_PORT 0x7F
-#define SLI_CTPT_F_PORT 0x81
-#define SLI_CTPT_FL_PORT 0x82
-#define SLI_CTPT_E_PORT 0x84
+#define SLI_CTPT_N_PORT 0x01
+#define SLI_CTPT_NL_PORT 0x02
+#define SLI_CTPT_FNL_PORT 0x03
+#define SLI_CTPT_IP 0x04
+#define SLI_CTPT_FCP 0x08
+#define SLI_CTPT_NVME 0x28
+#define SLI_CTPT_NX_PORT 0x7F
+#define SLI_CTPT_F_PORT 0x81
+#define SLI_CTPT_FL_PORT 0x82
+#define SLI_CTPT_E_PORT 0x84
#define SLI_CT_LAST_ENTRY 0x80000000
@@ -339,6 +363,7 @@ struct lpfc_name {
uint8_t IEEE[6]; /* FC IEEE address */
} s;
uint8_t wwn[8];
+ uint64_t name;
} u;
};
@@ -492,7 +517,15 @@ struct serv_parm { /* Structure is in Big Endian format */
struct class_parms cls2;
struct class_parms cls3;
struct class_parms cls4;
- uint8_t vendorVersion[16];
+ union {
+ uint8_t vendorVersion[16];
+ struct {
+ uint32_t vid;
+#define LPFC_VV_EMLX_ID 0x454d4c58 /* EMLX */
+ uint32_t flags;
+#define LPFC_VV_SUPPRESS_RSP 1
+ } vv;
+ } un;
};
/*
@@ -551,6 +584,7 @@ struct fc_vft_header {
#define ELS_CMD_REC 0x13000000
#define ELS_CMD_RDP 0x18000000
#define ELS_CMD_PRLI 0x20100014
+#define ELS_CMD_NVMEPRLI 0x20140018
#define ELS_CMD_PRLO 0x21100014
#define ELS_CMD_PRLO_ACC 0x02100014
#define ELS_CMD_PDISC 0x50000000
@@ -590,6 +624,7 @@ struct fc_vft_header {
#define ELS_CMD_REC 0x13
#define ELS_CMD_RDP 0x18
#define ELS_CMD_PRLI 0x14001020
+#define ELS_CMD_NVMEPRLI 0x18001420
#define ELS_CMD_PRLO 0x14001021
#define ELS_CMD_PRLO_ACC 0x14001002
#define ELS_CMD_PDISC 0x50
@@ -686,6 +721,7 @@ typedef struct _PRLI { /* Structure is in Big Endian format */
uint8_t prliType; /* FC Parm Word 0, bit 24:31 */
#define PRLI_FCP_TYPE 0x08
+#define PRLI_NVME_TYPE 0x28
uint8_t word0Reserved1; /* FC Parm Word 0, bit 16:23 */
#ifdef __BIG_ENDIAN_BITFIELD
@@ -1245,8 +1281,7 @@ struct fc_rdp_opd_sfp_info {
uint8_t vendor_name[16];
uint8_t model_number[16];
uint8_t serial_number[16];
- uint8_t revision[2];
- uint8_t reserved[2];
+ uint8_t revision[4];
uint8_t date[8];
};
@@ -1265,14 +1300,14 @@ struct fc_rdp_req_frame {
struct fc_rdp_res_frame {
- uint32_t reply_sequence; /* FC word0 LS_ACC or LS_RJT */
- uint32_t length; /* FC Word 1 */
- struct fc_rdp_link_service_desc link_service_desc; /* Word 2 -4 */
- struct fc_rdp_sfp_desc sfp_desc; /* Word 5 -9 */
- struct fc_rdp_port_speed_desc portspeed_desc; /* Word 10-12 */
- struct fc_rdp_link_error_status_desc link_error_desc; /* Word 13-21 */
- struct fc_rdp_port_name_desc diag_port_names_desc; /* Word 22-27 */
- struct fc_rdp_port_name_desc attached_port_names_desc;/* Word 28-33 */
+ uint32_t reply_sequence; /* FC word0 LS_ACC or LS_RJT */
+ uint32_t length; /* FC Word 1 */
+ struct fc_rdp_link_service_desc link_service_desc; /* Word 2 -4 */
+ struct fc_rdp_sfp_desc sfp_desc; /* Word 5 -9 */
+ struct fc_rdp_port_speed_desc portspeed_desc; /* Word 10 -12 */
+ struct fc_rdp_link_error_status_desc link_error_desc; /* Word 13 -21 */
+ struct fc_rdp_port_name_desc diag_port_names_desc; /* Word 22 -27 */
+ struct fc_rdp_port_name_desc attached_port_names_desc;/* Word 28 -33 */
struct fc_fec_rdp_desc fec_desc; /* FC word 34-37*/
struct fc_rdp_bbc_desc bbc_desc; /* FC Word 38-42*/
struct fc_rdp_oed_sfp_desc oed_temp_desc; /* FC Word 43-47*/
@@ -1791,6 +1826,7 @@ typedef struct { /* FireFly BIU registers */
#define MBX_INIT_VFI 0xA3
#define MBX_INIT_VPI 0xA4
#define MBX_ACCESS_VDATA 0xA5
+#define MBX_REG_FCFI_MRQ 0xAF
#define MBX_AUTH_PORT 0xF8
#define MBX_SECURITY_MGMT 0xF9
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 5646699b0516..cfdb068a3bfc 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -1,9 +1,11 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2009-2016 Emulex. All rights reserved. *
+ * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
+ * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
+ * Copyright (C) 2009-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
- * www.emulex.com *
+ * www.broadcom.com *
* *
* This program is free software; you can redistribute it and/or *
* modify it under the terms of version 2 of the GNU General *
@@ -108,6 +110,7 @@ struct lpfc_sli_intf {
#define LPFC_MAX_MQ_PAGE 8
#define LPFC_MAX_WQ_PAGE_V0 4
#define LPFC_MAX_WQ_PAGE 8
+#define LPFC_MAX_RQ_PAGE 8
#define LPFC_MAX_CQ_PAGE 4
#define LPFC_MAX_EQ_PAGE 8
@@ -198,7 +201,7 @@ struct lpfc_sli_intf {
/* Configuration of Interrupts / sec for entire HBA port */
#define LPFC_MIN_IMAX 5000
#define LPFC_MAX_IMAX 5000000
-#define LPFC_DEF_IMAX 50000
+#define LPFC_DEF_IMAX 150000
#define LPFC_MIN_CPU_MAP 0
#define LPFC_MAX_CPU_MAP 2
@@ -348,6 +351,7 @@ struct lpfc_cqe {
#define CQE_CODE_RECEIVE 0x4
#define CQE_CODE_XRI_ABORTED 0x5
#define CQE_CODE_RECEIVE_V1 0x9
+#define CQE_CODE_NVME_ERSP 0xd
/*
* Define mask value for xri_aborted and wcqe completed CQE extended status.
@@ -367,6 +371,9 @@ struct lpfc_wcqe_complete {
#define lpfc_wcqe_c_hw_status_SHIFT 0
#define lpfc_wcqe_c_hw_status_MASK 0x000000FF
#define lpfc_wcqe_c_hw_status_WORD word0
+#define lpfc_wcqe_c_ersp0_SHIFT 0
+#define lpfc_wcqe_c_ersp0_MASK 0x0000FFFF
+#define lpfc_wcqe_c_ersp0_WORD word0
uint32_t total_data_placed;
uint32_t parameter;
#define lpfc_wcqe_c_bg_edir_SHIFT 5
@@ -400,6 +407,9 @@ struct lpfc_wcqe_complete {
#define lpfc_wcqe_c_code_SHIFT lpfc_cqe_code_SHIFT
#define lpfc_wcqe_c_code_MASK lpfc_cqe_code_MASK
#define lpfc_wcqe_c_code_WORD lpfc_cqe_code_WORD
+#define lpfc_wcqe_c_sqhead_SHIFT 0
+#define lpfc_wcqe_c_sqhead_MASK 0x0000FFFF
+#define lpfc_wcqe_c_sqhead_WORD word3
};
/* completion queue entry for wqe release */
@@ -954,6 +964,7 @@ struct mbox_header {
#define LPFC_MBOX_OPCODE_FCOE_DELETE_FCF 0x0A
#define LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE 0x0B
#define LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF 0x10
+#define LPFC_MBOX_OPCODE_FCOE_CQ_CREATE_SET 0x1D
#define LPFC_MBOX_OPCODE_FCOE_SET_FCLINK_SETTINGS 0x21
#define LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE 0x22
#define LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_LOOPBACK 0x23
@@ -1135,6 +1146,116 @@ struct lpfc_mbx_cq_create {
} u;
};
+struct lpfc_mbx_cq_create_set {
+ union lpfc_sli4_cfg_shdr cfg_shdr;
+ union {
+ struct {
+ uint32_t word0;
+#define lpfc_mbx_cq_create_set_page_size_SHIFT 16 /* Version 2 Only */
+#define lpfc_mbx_cq_create_set_page_size_MASK 0x000000FF
+#define lpfc_mbx_cq_create_set_page_size_WORD word0
+#define lpfc_mbx_cq_create_set_num_pages_SHIFT 0
+#define lpfc_mbx_cq_create_set_num_pages_MASK 0x0000FFFF
+#define lpfc_mbx_cq_create_set_num_pages_WORD word0
+ uint32_t word1;
+#define lpfc_mbx_cq_create_set_evt_SHIFT 31
+#define lpfc_mbx_cq_create_set_evt_MASK 0x00000001
+#define lpfc_mbx_cq_create_set_evt_WORD word1
+#define lpfc_mbx_cq_create_set_valid_SHIFT 29
+#define lpfc_mbx_cq_create_set_valid_MASK 0x00000001
+#define lpfc_mbx_cq_create_set_valid_WORD word1
+#define lpfc_mbx_cq_create_set_cqe_cnt_SHIFT 27
+#define lpfc_mbx_cq_create_set_cqe_cnt_MASK 0x00000003
+#define lpfc_mbx_cq_create_set_cqe_cnt_WORD word1
+#define lpfc_mbx_cq_create_set_cqe_size_SHIFT 25
+#define lpfc_mbx_cq_create_set_cqe_size_MASK 0x00000003
+#define lpfc_mbx_cq_create_set_cqe_size_WORD word1
+#define lpfc_mbx_cq_create_set_auto_SHIFT 15
+#define lpfc_mbx_cq_create_set_auto_MASK 0x0000001
+#define lpfc_mbx_cq_create_set_auto_WORD word1
+#define lpfc_mbx_cq_create_set_nodelay_SHIFT 14
+#define lpfc_mbx_cq_create_set_nodelay_MASK 0x00000001
+#define lpfc_mbx_cq_create_set_nodelay_WORD word1
+#define lpfc_mbx_cq_create_set_clswm_SHIFT 12
+#define lpfc_mbx_cq_create_set_clswm_MASK 0x00000003
+#define lpfc_mbx_cq_create_set_clswm_WORD word1
+ uint32_t word2;
+#define lpfc_mbx_cq_create_set_arm_SHIFT 31
+#define lpfc_mbx_cq_create_set_arm_MASK 0x00000001
+#define lpfc_mbx_cq_create_set_arm_WORD word2
+#define lpfc_mbx_cq_create_set_num_cq_SHIFT 0
+#define lpfc_mbx_cq_create_set_num_cq_MASK 0x0000FFFF
+#define lpfc_mbx_cq_create_set_num_cq_WORD word2
+ uint32_t word3;
+#define lpfc_mbx_cq_create_set_eq_id1_SHIFT 16
+#define lpfc_mbx_cq_create_set_eq_id1_MASK 0x0000FFFF
+#define lpfc_mbx_cq_create_set_eq_id1_WORD word3
+#define lpfc_mbx_cq_create_set_eq_id0_SHIFT 0
+#define lpfc_mbx_cq_create_set_eq_id0_MASK 0x0000FFFF
+#define lpfc_mbx_cq_create_set_eq_id0_WORD word3
+ uint32_t word4;
+#define lpfc_mbx_cq_create_set_eq_id3_SHIFT 16
+#define lpfc_mbx_cq_create_set_eq_id3_MASK 0x0000FFFF
+#define lpfc_mbx_cq_create_set_eq_id3_WORD word4
+#define lpfc_mbx_cq_create_set_eq_id2_SHIFT 0
+#define lpfc_mbx_cq_create_set_eq_id2_MASK 0x0000FFFF
+#define lpfc_mbx_cq_create_set_eq_id2_WORD word4
+ uint32_t word5;
+#define lpfc_mbx_cq_create_set_eq_id5_SHIFT 16
+#define lpfc_mbx_cq_create_set_eq_id5_MASK 0x0000FFFF
+#define lpfc_mbx_cq_create_set_eq_id5_WORD word5
+#define lpfc_mbx_cq_create_set_eq_id4_SHIFT 0
+#define lpfc_mbx_cq_create_set_eq_id4_MASK 0x0000FFFF
+#define lpfc_mbx_cq_create_set_eq_id4_WORD word5
+ uint32_t word6;
+#define lpfc_mbx_cq_create_set_eq_id7_SHIFT 16
+#define lpfc_mbx_cq_create_set_eq_id7_MASK 0x0000FFFF
+#define lpfc_mbx_cq_create_set_eq_id7_WORD word6
+#define lpfc_mbx_cq_create_set_eq_id6_SHIFT 0
+#define lpfc_mbx_cq_create_set_eq_id6_MASK 0x0000FFFF
+#define lpfc_mbx_cq_create_set_eq_id6_WORD word6
+ uint32_t word7;
+#define lpfc_mbx_cq_create_set_eq_id9_SHIFT 16
+#define lpfc_mbx_cq_create_set_eq_id9_MASK 0x0000FFFF
+#define lpfc_mbx_cq_create_set_eq_id9_WORD word7
+#define lpfc_mbx_cq_create_set_eq_id8_SHIFT 0
+#define lpfc_mbx_cq_create_set_eq_id8_MASK 0x0000FFFF
+#define lpfc_mbx_cq_create_set_eq_id8_WORD word7
+ uint32_t word8;
+#define lpfc_mbx_cq_create_set_eq_id11_SHIFT 16
+#define lpfc_mbx_cq_create_set_eq_id11_MASK 0x0000FFFF
+#define lpfc_mbx_cq_create_set_eq_id11_WORD word8
+#define lpfc_mbx_cq_create_set_eq_id10_SHIFT 0
+#define lpfc_mbx_cq_create_set_eq_id10_MASK 0x0000FFFF
+#define lpfc_mbx_cq_create_set_eq_id10_WORD word8
+ uint32_t word9;
+#define lpfc_mbx_cq_create_set_eq_id13_SHIFT 16
+#define lpfc_mbx_cq_create_set_eq_id13_MASK 0x0000FFFF
+#define lpfc_mbx_cq_create_set_eq_id13_WORD word9
+#define lpfc_mbx_cq_create_set_eq_id12_SHIFT 0
+#define lpfc_mbx_cq_create_set_eq_id12_MASK 0x0000FFFF
+#define lpfc_mbx_cq_create_set_eq_id12_WORD word9
+ uint32_t word10;
+#define lpfc_mbx_cq_create_set_eq_id15_SHIFT 16
+#define lpfc_mbx_cq_create_set_eq_id15_MASK 0x0000FFFF
+#define lpfc_mbx_cq_create_set_eq_id15_WORD word10
+#define lpfc_mbx_cq_create_set_eq_id14_SHIFT 0
+#define lpfc_mbx_cq_create_set_eq_id14_MASK 0x0000FFFF
+#define lpfc_mbx_cq_create_set_eq_id14_WORD word10
+ struct dma_address page[1];
+ } request;
+ struct {
+ uint32_t word0;
+#define lpfc_mbx_cq_create_set_num_alloc_SHIFT 16
+#define lpfc_mbx_cq_create_set_num_alloc_MASK 0x0000FFFF
+#define lpfc_mbx_cq_create_set_num_alloc_WORD word0
+#define lpfc_mbx_cq_create_set_base_id_SHIFT 0
+#define lpfc_mbx_cq_create_set_base_id_MASK 0x0000FFFF
+#define lpfc_mbx_cq_create_set_base_id_WORD word0
+ } response;
+ } u;
+};
+
struct lpfc_mbx_cq_destroy {
struct mbox_header header;
union {
@@ -1186,6 +1307,7 @@ struct lpfc_mbx_wq_create {
#define lpfc_mbx_wq_create_page_size_SHIFT 0
#define lpfc_mbx_wq_create_page_size_MASK 0x000000FF
#define lpfc_mbx_wq_create_page_size_WORD word1
+#define LPFC_WQ_PAGE_SIZE_4096 0x1
#define lpfc_mbx_wq_create_wqe_size_SHIFT 8
#define lpfc_mbx_wq_create_wqe_size_MASK 0x0000000F
#define lpfc_mbx_wq_create_wqe_size_WORD word1
@@ -1243,10 +1365,10 @@ struct rq_context {
#define LPFC_RQ_RING_SIZE_1024 10 /* 1024 entries */
#define LPFC_RQ_RING_SIZE_2048 11 /* 2048 entries */
#define LPFC_RQ_RING_SIZE_4096 12 /* 4096 entries */
-#define lpfc_rq_context_rqe_count_1_SHIFT 16 /* Version 1 Only */
+#define lpfc_rq_context_rqe_count_1_SHIFT 16 /* Version 1-2 Only */
#define lpfc_rq_context_rqe_count_1_MASK 0x0000FFFF
#define lpfc_rq_context_rqe_count_1_WORD word0
-#define lpfc_rq_context_rqe_size_SHIFT 8 /* Version 1 Only */
+#define lpfc_rq_context_rqe_size_SHIFT 8 /* Version 1-2 Only */
#define lpfc_rq_context_rqe_size_MASK 0x0000000F
#define lpfc_rq_context_rqe_size_WORD word0
#define LPFC_RQE_SIZE_8 2
@@ -1257,7 +1379,14 @@ struct rq_context {
#define lpfc_rq_context_page_size_SHIFT 0 /* Version 1 Only */
#define lpfc_rq_context_page_size_MASK 0x000000FF
#define lpfc_rq_context_page_size_WORD word0
- uint32_t reserved1;
+#define LPFC_RQ_PAGE_SIZE_4096 0x1
+ uint32_t word1;
+#define lpfc_rq_context_data_size_SHIFT 16 /* Version 2 Only */
+#define lpfc_rq_context_data_size_MASK 0x0000FFFF
+#define lpfc_rq_context_data_size_WORD word1
+#define lpfc_rq_context_hdr_size_SHIFT 0 /* Version 2 Only */
+#define lpfc_rq_context_hdr_size_MASK 0x0000FFFF
+#define lpfc_rq_context_hdr_size_WORD word1
uint32_t word2;
#define lpfc_rq_context_cq_id_SHIFT 16
#define lpfc_rq_context_cq_id_MASK 0x000003FF
@@ -1265,6 +1394,9 @@ struct rq_context {
#define lpfc_rq_context_buf_size_SHIFT 0
#define lpfc_rq_context_buf_size_MASK 0x0000FFFF
#define lpfc_rq_context_buf_size_WORD word2
+#define lpfc_rq_context_base_cq_SHIFT 0 /* Version 2 Only */
+#define lpfc_rq_context_base_cq_MASK 0x0000FFFF
+#define lpfc_rq_context_base_cq_WORD word2
uint32_t buffer_size; /* Version 1 Only */
};
@@ -1286,10 +1418,65 @@ struct lpfc_mbx_rq_create {
#define lpfc_mbx_rq_create_ulp_num_MASK 0x000000FF
#define lpfc_mbx_rq_create_ulp_num_WORD word0
struct rq_context context;
- struct dma_address page[LPFC_MAX_WQ_PAGE];
+ struct dma_address page[LPFC_MAX_RQ_PAGE];
} request;
struct {
uint32_t word0;
+#define lpfc_mbx_rq_create_q_cnt_v2_SHIFT 16
+#define lpfc_mbx_rq_create_q_cnt_v2_MASK 0x0000FFFF
+#define lpfc_mbx_rq_create_q_cnt_v2_WORD word0
+#define lpfc_mbx_rq_create_q_id_SHIFT 0
+#define lpfc_mbx_rq_create_q_id_MASK 0x0000FFFF
+#define lpfc_mbx_rq_create_q_id_WORD word0
+ uint32_t doorbell_offset;
+ uint32_t word2;
+#define lpfc_mbx_rq_create_bar_set_SHIFT 0
+#define lpfc_mbx_rq_create_bar_set_MASK 0x0000FFFF
+#define lpfc_mbx_rq_create_bar_set_WORD word2
+#define lpfc_mbx_rq_create_db_format_SHIFT 16
+#define lpfc_mbx_rq_create_db_format_MASK 0x0000FFFF
+#define lpfc_mbx_rq_create_db_format_WORD word2
+ } response;
+ } u;
+};
+
+struct lpfc_mbx_rq_create_v2 {
+ union lpfc_sli4_cfg_shdr cfg_shdr;
+ union {
+ struct {
+ uint32_t word0;
+#define lpfc_mbx_rq_create_num_pages_SHIFT 0
+#define lpfc_mbx_rq_create_num_pages_MASK 0x0000FFFF
+#define lpfc_mbx_rq_create_num_pages_WORD word0
+#define lpfc_mbx_rq_create_rq_cnt_SHIFT 16
+#define lpfc_mbx_rq_create_rq_cnt_MASK 0x000000FF
+#define lpfc_mbx_rq_create_rq_cnt_WORD word0
+#define lpfc_mbx_rq_create_dua_SHIFT 16
+#define lpfc_mbx_rq_create_dua_MASK 0x00000001
+#define lpfc_mbx_rq_create_dua_WORD word0
+#define lpfc_mbx_rq_create_bqu_SHIFT 17
+#define lpfc_mbx_rq_create_bqu_MASK 0x00000001
+#define lpfc_mbx_rq_create_bqu_WORD word0
+#define lpfc_mbx_rq_create_ulp_num_SHIFT 24
+#define lpfc_mbx_rq_create_ulp_num_MASK 0x000000FF
+#define lpfc_mbx_rq_create_ulp_num_WORD word0
+#define lpfc_mbx_rq_create_dim_SHIFT 29
+#define lpfc_mbx_rq_create_dim_MASK 0x00000001
+#define lpfc_mbx_rq_create_dim_WORD word0
+#define lpfc_mbx_rq_create_dfd_SHIFT 30
+#define lpfc_mbx_rq_create_dfd_MASK 0x00000001
+#define lpfc_mbx_rq_create_dfd_WORD word0
+#define lpfc_mbx_rq_create_dnb_SHIFT 31
+#define lpfc_mbx_rq_create_dnb_MASK 0x00000001
+#define lpfc_mbx_rq_create_dnb_WORD word0
+ struct rq_context context;
+ struct dma_address page[1];
+ } request;
+ struct {
+ uint32_t word0;
+#define lpfc_mbx_rq_create_q_cnt_v2_SHIFT 16
+#define lpfc_mbx_rq_create_q_cnt_v2_MASK 0x0000FFFF
+#define lpfc_mbx_rq_create_q_cnt_v2_WORD word0
#define lpfc_mbx_rq_create_q_id_SHIFT 0
#define lpfc_mbx_rq_create_q_id_MASK 0x0000FFFF
#define lpfc_mbx_rq_create_q_id_WORD word0
@@ -2203,6 +2390,160 @@ struct lpfc_mbx_reg_fcfi {
#define lpfc_reg_fcfi_vlan_tag_WORD word8
};
+struct lpfc_mbx_reg_fcfi_mrq {
+ uint32_t word1;
+#define lpfc_reg_fcfi_mrq_info_index_SHIFT 0
+#define lpfc_reg_fcfi_mrq_info_index_MASK 0x0000FFFF
+#define lpfc_reg_fcfi_mrq_info_index_WORD word1
+#define lpfc_reg_fcfi_mrq_fcfi_SHIFT 16
+#define lpfc_reg_fcfi_mrq_fcfi_MASK 0x0000FFFF
+#define lpfc_reg_fcfi_mrq_fcfi_WORD word1
+ uint32_t word2;
+#define lpfc_reg_fcfi_mrq_rq_id1_SHIFT 0
+#define lpfc_reg_fcfi_mrq_rq_id1_MASK 0x0000FFFF
+#define lpfc_reg_fcfi_mrq_rq_id1_WORD word2
+#define lpfc_reg_fcfi_mrq_rq_id0_SHIFT 16
+#define lpfc_reg_fcfi_mrq_rq_id0_MASK 0x0000FFFF
+#define lpfc_reg_fcfi_mrq_rq_id0_WORD word2
+ uint32_t word3;
+#define lpfc_reg_fcfi_mrq_rq_id3_SHIFT 0
+#define lpfc_reg_fcfi_mrq_rq_id3_MASK 0x0000FFFF
+#define lpfc_reg_fcfi_mrq_rq_id3_WORD word3
+#define lpfc_reg_fcfi_mrq_rq_id2_SHIFT 16
+#define lpfc_reg_fcfi_mrq_rq_id2_MASK 0x0000FFFF
+#define lpfc_reg_fcfi_mrq_rq_id2_WORD word3
+ uint32_t word4;
+#define lpfc_reg_fcfi_mrq_type_match0_SHIFT 24
+#define lpfc_reg_fcfi_mrq_type_match0_MASK 0x000000FF
+#define lpfc_reg_fcfi_mrq_type_match0_WORD word4
+#define lpfc_reg_fcfi_mrq_type_mask0_SHIFT 16
+#define lpfc_reg_fcfi_mrq_type_mask0_MASK 0x000000FF
+#define lpfc_reg_fcfi_mrq_type_mask0_WORD word4
+#define lpfc_reg_fcfi_mrq_rctl_match0_SHIFT 8
+#define lpfc_reg_fcfi_mrq_rctl_match0_MASK 0x000000FF
+#define lpfc_reg_fcfi_mrq_rctl_match0_WORD word4
+#define lpfc_reg_fcfi_mrq_rctl_mask0_SHIFT 0
+#define lpfc_reg_fcfi_mrq_rctl_mask0_MASK 0x000000FF
+#define lpfc_reg_fcfi_mrq_rctl_mask0_WORD word4
+ uint32_t word5;
+#define lpfc_reg_fcfi_mrq_type_match1_SHIFT 24
+#define lpfc_reg_fcfi_mrq_type_match1_MASK 0x000000FF
+#define lpfc_reg_fcfi_mrq_type_match1_WORD word5
+#define lpfc_reg_fcfi_mrq_type_mask1_SHIFT 16
+#define lpfc_reg_fcfi_mrq_type_mask1_MASK 0x000000FF
+#define lpfc_reg_fcfi_mrq_type_mask1_WORD word5
+#define lpfc_reg_fcfi_mrq_rctl_match1_SHIFT 8
+#define lpfc_reg_fcfi_mrq_rctl_match1_MASK 0x000000FF
+#define lpfc_reg_fcfi_mrq_rctl_match1_WORD word5
+#define lpfc_reg_fcfi_mrq_rctl_mask1_SHIFT 0
+#define lpfc_reg_fcfi_mrq_rctl_mask1_MASK 0x000000FF
+#define lpfc_reg_fcfi_mrq_rctl_mask1_WORD word5
+ uint32_t word6;
+#define lpfc_reg_fcfi_mrq_type_match2_SHIFT 24
+#define lpfc_reg_fcfi_mrq_type_match2_MASK 0x000000FF
+#define lpfc_reg_fcfi_mrq_type_match2_WORD word6
+#define lpfc_reg_fcfi_mrq_type_mask2_SHIFT 16
+#define lpfc_reg_fcfi_mrq_type_mask2_MASK 0x000000FF
+#define lpfc_reg_fcfi_mrq_type_mask2_WORD word6
+#define lpfc_reg_fcfi_mrq_rctl_match2_SHIFT 8
+#define lpfc_reg_fcfi_mrq_rctl_match2_MASK 0x000000FF
+#define lpfc_reg_fcfi_mrq_rctl_match2_WORD word6
+#define lpfc_reg_fcfi_mrq_rctl_mask2_SHIFT 0
+#define lpfc_reg_fcfi_mrq_rctl_mask2_MASK 0x000000FF
+#define lpfc_reg_fcfi_mrq_rctl_mask2_WORD word6
+ uint32_t word7;
+#define lpfc_reg_fcfi_mrq_type_match3_SHIFT 24
+#define lpfc_reg_fcfi_mrq_type_match3_MASK 0x000000FF
+#define lpfc_reg_fcfi_mrq_type_match3_WORD word7
+#define lpfc_reg_fcfi_mrq_type_mask3_SHIFT 16
+#define lpfc_reg_fcfi_mrq_type_mask3_MASK 0x000000FF
+#define lpfc_reg_fcfi_mrq_type_mask3_WORD word7
+#define lpfc_reg_fcfi_mrq_rctl_match3_SHIFT 8
+#define lpfc_reg_fcfi_mrq_rctl_match3_MASK 0x000000FF
+#define lpfc_reg_fcfi_mrq_rctl_match3_WORD word7
+#define lpfc_reg_fcfi_mrq_rctl_mask3_SHIFT 0
+#define lpfc_reg_fcfi_mrq_rctl_mask3_MASK 0x000000FF
+#define lpfc_reg_fcfi_mrq_rctl_mask3_WORD word7
+ uint32_t word8;
+#define lpfc_reg_fcfi_mrq_ptc7_SHIFT 31
+#define lpfc_reg_fcfi_mrq_ptc7_MASK 0x00000001
+#define lpfc_reg_fcfi_mrq_ptc7_WORD word8
+#define lpfc_reg_fcfi_mrq_ptc6_SHIFT 30
+#define lpfc_reg_fcfi_mrq_ptc6_MASK 0x00000001
+#define lpfc_reg_fcfi_mrq_ptc6_WORD word8
+#define lpfc_reg_fcfi_mrq_ptc5_SHIFT 29
+#define lpfc_reg_fcfi_mrq_ptc5_MASK 0x00000001
+#define lpfc_reg_fcfi_mrq_ptc5_WORD word8
+#define lpfc_reg_fcfi_mrq_ptc4_SHIFT 28
+#define lpfc_reg_fcfi_mrq_ptc4_MASK 0x00000001
+#define lpfc_reg_fcfi_mrq_ptc4_WORD word8
+#define lpfc_reg_fcfi_mrq_ptc3_SHIFT 27
+#define lpfc_reg_fcfi_mrq_ptc3_MASK 0x00000001
+#define lpfc_reg_fcfi_mrq_ptc3_WORD word8
+#define lpfc_reg_fcfi_mrq_ptc2_SHIFT 26
+#define lpfc_reg_fcfi_mrq_ptc2_MASK 0x00000001
+#define lpfc_reg_fcfi_mrq_ptc2_WORD word8
+#define lpfc_reg_fcfi_mrq_ptc1_SHIFT 25
+#define lpfc_reg_fcfi_mrq_ptc1_MASK 0x00000001
+#define lpfc_reg_fcfi_mrq_ptc1_WORD word8
+#define lpfc_reg_fcfi_mrq_ptc0_SHIFT 24
+#define lpfc_reg_fcfi_mrq_ptc0_MASK 0x00000001
+#define lpfc_reg_fcfi_mrq_ptc0_WORD word8
+#define lpfc_reg_fcfi_mrq_pt7_SHIFT 23
+#define lpfc_reg_fcfi_mrq_pt7_MASK 0x00000001
+#define lpfc_reg_fcfi_mrq_pt7_WORD word8
+#define lpfc_reg_fcfi_mrq_pt6_SHIFT 22
+#define lpfc_reg_fcfi_mrq_pt6_MASK 0x00000001
+#define lpfc_reg_fcfi_mrq_pt6_WORD word8
+#define lpfc_reg_fcfi_mrq_pt5_SHIFT 21
+#define lpfc_reg_fcfi_mrq_pt5_MASK 0x00000001
+#define lpfc_reg_fcfi_mrq_pt5_WORD word8
+#define lpfc_reg_fcfi_mrq_pt4_SHIFT 20
+#define lpfc_reg_fcfi_mrq_pt4_MASK 0x00000001
+#define lpfc_reg_fcfi_mrq_pt4_WORD word8
+#define lpfc_reg_fcfi_mrq_pt3_SHIFT 19
+#define lpfc_reg_fcfi_mrq_pt3_MASK 0x00000001
+#define lpfc_reg_fcfi_mrq_pt3_WORD word8
+#define lpfc_reg_fcfi_mrq_pt2_SHIFT 18
+#define lpfc_reg_fcfi_mrq_pt2_MASK 0x00000001
+#define lpfc_reg_fcfi_mrq_pt2_WORD word8
+#define lpfc_reg_fcfi_mrq_pt1_SHIFT 17
+#define lpfc_reg_fcfi_mrq_pt1_MASK 0x00000001
+#define lpfc_reg_fcfi_mrq_pt1_WORD word8
+#define lpfc_reg_fcfi_mrq_pt0_SHIFT 16
+#define lpfc_reg_fcfi_mrq_pt0_MASK 0x00000001
+#define lpfc_reg_fcfi_mrq_pt0_WORD word8
+#define lpfc_reg_fcfi_mrq_xmv_SHIFT 15
+#define lpfc_reg_fcfi_mrq_xmv_MASK 0x00000001
+#define lpfc_reg_fcfi_mrq_xmv_WORD word8
+#define lpfc_reg_fcfi_mrq_mode_SHIFT 13
+#define lpfc_reg_fcfi_mrq_mode_MASK 0x00000001
+#define lpfc_reg_fcfi_mrq_mode_WORD word8
+#define lpfc_reg_fcfi_mrq_vv_SHIFT 12
+#define lpfc_reg_fcfi_mrq_vv_MASK 0x00000001
+#define lpfc_reg_fcfi_mrq_vv_WORD word8
+#define lpfc_reg_fcfi_mrq_vlan_tag_SHIFT 0
+#define lpfc_reg_fcfi_mrq_vlan_tag_MASK 0x00000FFF
+#define lpfc_reg_fcfi_mrq_vlan_tag_WORD word8
+ uint32_t word9;
+#define lpfc_reg_fcfi_mrq_policy_SHIFT 12
+#define lpfc_reg_fcfi_mrq_policy_MASK 0x0000000F
+#define lpfc_reg_fcfi_mrq_policy_WORD word9
+#define lpfc_reg_fcfi_mrq_filter_SHIFT 8
+#define lpfc_reg_fcfi_mrq_filter_MASK 0x0000000F
+#define lpfc_reg_fcfi_mrq_filter_WORD word9
+#define lpfc_reg_fcfi_mrq_npairs_SHIFT 0
+#define lpfc_reg_fcfi_mrq_npairs_MASK 0x000000FF
+#define lpfc_reg_fcfi_mrq_npairs_WORD word9
+ uint32_t word10;
+ uint32_t word11;
+ uint32_t word12;
+ uint32_t word13;
+ uint32_t word14;
+ uint32_t word15;
+ uint32_t word16;
+};
+
struct lpfc_mbx_unreg_fcfi {
uint32_t word1_rsv;
uint32_t word2;
@@ -2382,6 +2723,9 @@ struct lpfc_mbx_request_features {
#define lpfc_mbx_rq_ftr_rq_perfh_SHIFT 11
#define lpfc_mbx_rq_ftr_rq_perfh_MASK 0x00000001
#define lpfc_mbx_rq_ftr_rq_perfh_WORD word2
+#define lpfc_mbx_rq_ftr_rq_mrqp_SHIFT 16
+#define lpfc_mbx_rq_ftr_rq_mrqp_MASK 0x00000001
+#define lpfc_mbx_rq_ftr_rq_mrqp_WORD word2
uint32_t word3;
#define lpfc_mbx_rq_ftr_rsp_iaab_SHIFT 0
#define lpfc_mbx_rq_ftr_rsp_iaab_MASK 0x00000001
@@ -2410,6 +2754,9 @@ struct lpfc_mbx_request_features {
#define lpfc_mbx_rq_ftr_rsp_perfh_SHIFT 11
#define lpfc_mbx_rq_ftr_rsp_perfh_MASK 0x00000001
#define lpfc_mbx_rq_ftr_rsp_perfh_WORD word3
+#define lpfc_mbx_rq_ftr_rsp_mrqp_SHIFT 16
+#define lpfc_mbx_rq_ftr_rsp_mrqp_MASK 0x00000001
+#define lpfc_mbx_rq_ftr_rsp_mrqp_WORD word3
};
struct lpfc_mbx_supp_pages {
@@ -2839,12 +3186,18 @@ struct lpfc_sli4_parameters {
#define cfg_mqv_WORD word6
uint32_t word7;
uint32_t word8;
+#define cfg_wqpcnt_SHIFT 0
+#define cfg_wqpcnt_MASK 0x0000000f
+#define cfg_wqpcnt_WORD word8
#define cfg_wqsize_SHIFT 8
#define cfg_wqsize_MASK 0x0000000f
#define cfg_wqsize_WORD word8
#define cfg_wqv_SHIFT 14
#define cfg_wqv_MASK 0x00000003
#define cfg_wqv_WORD word8
+#define cfg_wqpsize_SHIFT 16
+#define cfg_wqpsize_MASK 0x000000ff
+#define cfg_wqpsize_WORD word8
uint32_t word9;
uint32_t word10;
#define cfg_rqv_SHIFT 14
@@ -2895,6 +3248,12 @@ struct lpfc_sli4_parameters {
#define cfg_mds_diags_SHIFT 1
#define cfg_mds_diags_MASK 0x00000001
#define cfg_mds_diags_WORD word19
+#define cfg_nvme_SHIFT 3
+#define cfg_nvme_MASK 0x00000001
+#define cfg_nvme_WORD word19
+#define cfg_xib_SHIFT 4
+#define cfg_xib_MASK 0x00000001
+#define cfg_xib_WORD word19
};
#define LPFC_SET_UE_RECOVERY 0x10
@@ -3290,14 +3649,17 @@ struct lpfc_mqe {
struct lpfc_mbx_del_fcf_tbl_entry del_fcf_entry;
struct lpfc_mbx_redisc_fcf_tbl redisc_fcf_tbl;
struct lpfc_mbx_reg_fcfi reg_fcfi;
+ struct lpfc_mbx_reg_fcfi_mrq reg_fcfi_mrq;
struct lpfc_mbx_unreg_fcfi unreg_fcfi;
struct lpfc_mbx_mq_create mq_create;
struct lpfc_mbx_mq_create_ext mq_create_ext;
struct lpfc_mbx_eq_create eq_create;
struct lpfc_mbx_modify_eq_delay eq_delay;
struct lpfc_mbx_cq_create cq_create;
+ struct lpfc_mbx_cq_create_set cq_create_set;
struct lpfc_mbx_wq_create wq_create;
struct lpfc_mbx_rq_create rq_create;
+ struct lpfc_mbx_rq_create_v2 rq_create_v2;
struct lpfc_mbx_mq_destroy mq_destroy;
struct lpfc_mbx_eq_destroy eq_destroy;
struct lpfc_mbx_cq_destroy cq_destroy;
@@ -3657,6 +4019,9 @@ struct wqe_common {
#define wqe_ebde_cnt_SHIFT 0
#define wqe_ebde_cnt_MASK 0x0000000f
#define wqe_ebde_cnt_WORD word10
+#define wqe_nvme_SHIFT 4
+#define wqe_nvme_MASK 0x00000001
+#define wqe_nvme_WORD word10
#define wqe_oas_SHIFT 6
#define wqe_oas_MASK 0x00000001
#define wqe_oas_WORD word10
@@ -3717,9 +4082,18 @@ struct wqe_common {
#define LPFC_ELS_ID_FDISC 2
#define LPFC_ELS_ID_LOGO 1
#define LPFC_ELS_ID_DEFAULT 0
+#define wqe_irsp_SHIFT 4
+#define wqe_irsp_MASK 0x00000001
+#define wqe_irsp_WORD word11
+#define wqe_sup_SHIFT 6
+#define wqe_sup_MASK 0x00000001
+#define wqe_sup_WORD word11
#define wqe_wqec_SHIFT 7
#define wqe_wqec_MASK 0x00000001
#define wqe_wqec_WORD word11
+#define wqe_irsplen_SHIFT 8
+#define wqe_irsplen_MASK 0x0000000f
+#define wqe_irsplen_WORD word11
#define wqe_cqid_SHIFT 16
#define wqe_cqid_MASK 0x0000ffff
#define wqe_cqid_WORD word11
@@ -3897,6 +4271,50 @@ struct gen_req64_wqe {
uint32_t max_response_payload_len;
};
+/* Define NVME PRLI request to fabric. NVME is a
+ * fabric-only protocol.
+ * Updated to red-lined v1.08 on Sept 16, 2016
+ */
+struct lpfc_nvme_prli {
+ uint32_t word1;
+ /* The Response Code is defined in the FCP PRLI lpfc_hw.h */
+#define prli_acc_rsp_code_SHIFT 8
+#define prli_acc_rsp_code_MASK 0x0000000f
+#define prli_acc_rsp_code_WORD word1
+#define prli_estabImagePair_SHIFT 13
+#define prli_estabImagePair_MASK 0x00000001
+#define prli_estabImagePair_WORD word1
+#define prli_type_code_ext_SHIFT 16
+#define prli_type_code_ext_MASK 0x000000ff
+#define prli_type_code_ext_WORD word1
+#define prli_type_code_SHIFT 24
+#define prli_type_code_MASK 0x000000ff
+#define prli_type_code_WORD word1
+ uint32_t word_rsvd2;
+ uint32_t word_rsvd3;
+ uint32_t word4;
+#define prli_fba_SHIFT 0
+#define prli_fba_MASK 0x00000001
+#define prli_fba_WORD word4
+#define prli_disc_SHIFT 3
+#define prli_disc_MASK 0x00000001
+#define prli_disc_WORD word4
+#define prli_tgt_SHIFT 4
+#define prli_tgt_MASK 0x00000001
+#define prli_tgt_WORD word4
+#define prli_init_SHIFT 5
+#define prli_init_MASK 0x00000001
+#define prli_init_WORD word4
+#define prli_recov_SHIFT 8
+#define prli_recov_MASK 0x00000001
+#define prli_recov_WORD word4
+ uint32_t word5;
+#define prli_fb_sz_SHIFT 0
+#define prli_fb_sz_MASK 0x0000ffff
+#define prli_fb_sz_WORD word5
+#define LPFC_NVMET_FB_SZ_MAX 65536 /* Driver target mode only. */
+};
+
struct create_xri_wqe {
uint32_t rsrvd[5]; /* words 0-4 */
struct wqe_did wqe_dest; /* word 5 */
@@ -3969,6 +4387,35 @@ struct fcp_icmnd64_wqe {
uint32_t rsvd_12_15[4]; /* word 12-15 */
};
+struct fcp_trsp64_wqe {
+ struct ulp_bde64 bde;
+ uint32_t response_len;
+ uint32_t rsvd_4_5[2];
+ struct wqe_common wqe_com; /* words 6-11 */
+ uint32_t rsvd_12_15[4]; /* word 12-15 */
+};
+
+struct fcp_tsend64_wqe {
+ struct ulp_bde64 bde;
+ uint32_t payload_offset_len;
+ uint32_t relative_offset;
+ uint32_t reserved;
+ struct wqe_common wqe_com; /* words 6-11 */
+ uint32_t fcp_data_len; /* word 12 */
+ uint32_t rsvd_13_15[3]; /* word 13-15 */
+};
+
+struct fcp_treceive64_wqe {
+ struct ulp_bde64 bde;
+ uint32_t payload_offset_len;
+ uint32_t relative_offset;
+ uint32_t reserved;
+ struct wqe_common wqe_com; /* words 6-11 */
+ uint32_t fcp_data_len; /* word 12 */
+ uint32_t rsvd_13_15[3]; /* word 13-15 */
+};
+#define TXRDY_PAYLOAD_LEN 12
+
union lpfc_wqe {
uint32_t words[16];
@@ -3984,6 +4431,10 @@ union lpfc_wqe {
struct xmit_els_rsp64_wqe xmit_els_rsp;
struct els_request64_wqe els_req;
struct gen_req64_wqe gen_req;
+ struct fcp_trsp64_wqe fcp_trsp;
+ struct fcp_tsend64_wqe fcp_tsend;
+ struct fcp_treceive64_wqe fcp_treceive;
+
};
union lpfc_wqe128 {
@@ -3992,6 +4443,9 @@ union lpfc_wqe128 {
struct fcp_icmnd64_wqe fcp_icmd;
struct fcp_iread64_wqe fcp_iread;
struct fcp_iwrite64_wqe fcp_iwrite;
+ struct fcp_trsp64_wqe fcp_trsp;
+ struct fcp_tsend64_wqe fcp_tsend;
+ struct fcp_treceive64_wqe fcp_treceive;
struct xmit_seq64_wqe xmit_sequence;
struct gen_req64_wqe gen_req;
};
@@ -4015,11 +4469,39 @@ struct lpfc_grp_hdr {
uint8_t revision[32];
};
-#define FCP_COMMAND 0x0
-#define FCP_COMMAND_DATA_OUT 0x1
-#define ELS_COMMAND_NON_FIP 0xC
-#define ELS_COMMAND_FIP 0xD
-#define OTHER_COMMAND 0x8
+/* Defines for WQE command type */
+#define FCP_COMMAND 0x0
+#define NVME_READ_CMD 0x0
+#define FCP_COMMAND_DATA_OUT 0x1
+#define NVME_WRITE_CMD 0x1
+#define FCP_COMMAND_TRECEIVE 0x2
+#define FCP_COMMAND_TRSP 0x3
+#define FCP_COMMAND_TSEND 0x7
+#define OTHER_COMMAND 0x8
+#define ELS_COMMAND_NON_FIP 0xC
+#define ELS_COMMAND_FIP 0xD
+
+#define LPFC_NVME_EMBED_CMD 0x0
+#define LPFC_NVME_EMBED_WRITE 0x1
+#define LPFC_NVME_EMBED_READ 0x2
+
+/* WQE Commands */
+#define CMD_ABORT_XRI_WQE 0x0F
+#define CMD_XMIT_SEQUENCE64_WQE 0x82
+#define CMD_XMIT_BCAST64_WQE 0x84
+#define CMD_ELS_REQUEST64_WQE 0x8A
+#define CMD_XMIT_ELS_RSP64_WQE 0x95
+#define CMD_XMIT_BLS_RSP64_WQE 0x97
+#define CMD_FCP_IWRITE64_WQE 0x98
+#define CMD_FCP_IREAD64_WQE 0x9A
+#define CMD_FCP_ICMND64_WQE 0x9C
+#define CMD_FCP_TSEND64_WQE 0x9F
+#define CMD_FCP_TRECEIVE64_WQE 0xA1
+#define CMD_FCP_TRSP64_WQE 0xA3
+#define CMD_GEN_REQUEST64_WQE 0xC2
+
+#define CMD_WQE_MASK 0xff
+
#define LPFC_FW_DUMP 1
#define LPFC_FW_RESET 2
diff --git a/drivers/scsi/lpfc/lpfc_ids.h b/drivers/scsi/lpfc/lpfc_ids.h
index 5733feafe25f..0ba3733eb36d 100644
--- a/drivers/scsi/lpfc/lpfc_ids.h
+++ b/drivers/scsi/lpfc/lpfc_ids.h
@@ -1,9 +1,11 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
+ * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
+ * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
- * www.emulex.com *
+ * www.broadcom.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
* *
* This program is free software; you can redistribute it and/or *
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 64717c171b15..0ee429d773f3 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1,9 +1,11 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
+ * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
+ * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
- * www.emulex.com *
+ * www.broadcom.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
* *
* This program is free software; you can redistribute it and/or *
@@ -34,6 +36,7 @@
#include <linux/firmware.h>
#include <linux/miscdevice.h>
#include <linux/percpu.h>
+#include <linux/msi.h>
#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
@@ -46,8 +49,9 @@
#include "lpfc_sli4.h"
#include "lpfc_nl.h"
#include "lpfc_disc.h"
-#include "lpfc_scsi.h"
#include "lpfc.h"
+#include "lpfc_scsi.h"
+#include "lpfc_nvme.h"
#include "lpfc_logmsg.h"
#include "lpfc_crtn.h"
#include "lpfc_vport.h"
@@ -71,6 +75,7 @@ static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
static int lpfc_setup_endian_order(struct lpfc_hba *);
static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
static void lpfc_free_els_sgl_list(struct lpfc_hba *);
+static void lpfc_free_nvmet_sgl_list(struct lpfc_hba *);
static void lpfc_init_sgl_list(struct lpfc_hba *);
static int lpfc_init_active_sgl_array(struct lpfc_hba *);
static void lpfc_free_active_sgl(struct lpfc_hba *);
@@ -86,6 +91,7 @@ static void lpfc_sli4_oas_verify(struct lpfc_hba *phba);
static struct scsi_transport_template *lpfc_transport_template = NULL;
static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
static DEFINE_IDR(lpfc_hba_index);
+#define LPFC_NVMET_BUF_POST 254
/**
* lpfc_config_port_prep - Perform lpfc initialization prior to config port
@@ -499,12 +505,10 @@ lpfc_config_port_post(struct lpfc_hba *phba)
phba->link_state = LPFC_LINK_DOWN;
/* Only process IOCBs on ELS ring till hba_state is READY */
- if (psli->ring[psli->extra_ring].sli.sli3.cmdringaddr)
- psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT;
- if (psli->ring[psli->fcp_ring].sli.sli3.cmdringaddr)
- psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT;
- if (psli->ring[psli->next_ring].sli.sli3.cmdringaddr)
- psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT;
+ if (psli->sli3_ring[LPFC_EXTRA_RING].sli.sli3.cmdringaddr)
+ psli->sli3_ring[LPFC_EXTRA_RING].flag |= LPFC_STOP_IOCB_EVENT;
+ if (psli->sli3_ring[LPFC_FCP_RING].sli.sli3.cmdringaddr)
+ psli->sli3_ring[LPFC_FCP_RING].flag |= LPFC_STOP_IOCB_EVENT;
/* Post receive buffers for desired rings */
if (phba->sli_rev != 3)
@@ -892,7 +896,7 @@ lpfc_hba_free_post_buf(struct lpfc_hba *phba)
lpfc_sli_hbqbuf_free_all(phba);
else {
/* Cleanup preposted buffers on the ELS ring */
- pring = &psli->ring[LPFC_ELS_RING];
+ pring = &psli->sli3_ring[LPFC_ELS_RING];
spin_lock_irq(&phba->hbalock);
list_splice_init(&pring->postbufq, &buflist);
spin_unlock_irq(&phba->hbalock);
@@ -925,32 +929,43 @@ static void
lpfc_hba_clean_txcmplq(struct lpfc_hba *phba)
{
struct lpfc_sli *psli = &phba->sli;
+ struct lpfc_queue *qp = NULL;
struct lpfc_sli_ring *pring;
LIST_HEAD(completions);
int i;
- for (i = 0; i < psli->num_rings; i++) {
- pring = &psli->ring[i];
- if (phba->sli_rev >= LPFC_SLI_REV4)
- spin_lock_irq(&pring->ring_lock);
- else
+ if (phba->sli_rev != LPFC_SLI_REV4) {
+ for (i = 0; i < psli->num_rings; i++) {
+ pring = &psli->sli3_ring[i];
spin_lock_irq(&phba->hbalock);
- /* At this point in time the HBA is either reset or DOA. Either
- * way, nothing should be on txcmplq as it will NEVER complete.
- */
- list_splice_init(&pring->txcmplq, &completions);
- pring->txcmplq_cnt = 0;
-
- if (phba->sli_rev >= LPFC_SLI_REV4)
- spin_unlock_irq(&pring->ring_lock);
- else
+ /* At this point in time the HBA is either reset or DOA
+ * Nothing should be on txcmplq as it will
+ * NEVER complete.
+ */
+ list_splice_init(&pring->txcmplq, &completions);
+ pring->txcmplq_cnt = 0;
spin_unlock_irq(&phba->hbalock);
+ lpfc_sli_abort_iocb_ring(phba, pring);
+ }
/* Cancel all the IOCBs from the completions list */
- lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
- IOERR_SLI_ABORTED);
+ lpfc_sli_cancel_iocbs(phba, &completions,
+ IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
+ return;
+ }
+ list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
+ pring = qp->pring;
+ if (!pring)
+ continue;
+ spin_lock_irq(&pring->ring_lock);
+ list_splice_init(&pring->txcmplq, &completions);
+ pring->txcmplq_cnt = 0;
+ spin_unlock_irq(&pring->ring_lock);
lpfc_sli_abort_iocb_ring(phba, pring);
}
+ /* Cancel all the IOCBs from the completions list */
+ lpfc_sli_cancel_iocbs(phba, &completions,
+ IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
}
/**
@@ -989,43 +1004,58 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
{
struct lpfc_scsi_buf *psb, *psb_next;
LIST_HEAD(aborts);
+ LIST_HEAD(nvme_aborts);
unsigned long iflag = 0;
struct lpfc_sglq *sglq_entry = NULL;
- struct lpfc_sli *psli = &phba->sli;
- struct lpfc_sli_ring *pring;
- lpfc_hba_free_post_buf(phba);
+
+ lpfc_sli_hbqbuf_free_all(phba);
lpfc_hba_clean_txcmplq(phba);
- pring = &psli->ring[LPFC_ELS_RING];
/* At this point in time the HBA is either reset or DOA. Either
* way, nothing should be on lpfc_abts_els_sgl_list, it needs to be
- * on the lpfc_sgl_list so that it can either be freed if the
+ * on the lpfc_els_sgl_list so that it can either be freed if the
* driver is unloading or reposted if the driver is restarting
* the port.
*/
- spin_lock_irq(&phba->hbalock); /* required for lpfc_sgl_list and */
+ spin_lock_irq(&phba->hbalock); /* required for lpfc_els_sgl_list and */
/* scsl_buf_list */
- /* abts_sgl_list_lock required because worker thread uses this
+ /* sgl_list_lock required because worker thread uses this
* list.
*/
- spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
+ spin_lock(&phba->sli4_hba.sgl_list_lock);
list_for_each_entry(sglq_entry,
&phba->sli4_hba.lpfc_abts_els_sgl_list, list)
sglq_entry->state = SGL_FREED;
+ list_for_each_entry(sglq_entry,
+ &phba->sli4_hba.lpfc_abts_nvmet_sgl_list, list)
+ sglq_entry->state = SGL_FREED;
- spin_lock(&pring->ring_lock);
list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
- &phba->sli4_hba.lpfc_sgl_list);
- spin_unlock(&pring->ring_lock);
- spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
+ &phba->sli4_hba.lpfc_els_sgl_list);
+
+ if (phba->sli4_hba.nvme_wq)
+ list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_sgl_list,
+ &phba->sli4_hba.lpfc_nvmet_sgl_list);
+
+ spin_unlock(&phba->sli4_hba.sgl_list_lock);
/* abts_scsi_buf_list_lock required because worker thread uses this
* list.
*/
- spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
- list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list,
- &aborts);
- spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
+ spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
+ list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list,
+ &aborts);
+ spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
+ }
+
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
+ spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
+ list_splice_init(&phba->sli4_hba.lpfc_abts_nvme_buf_list,
+ &nvme_aborts);
+ spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
+ }
+
spin_unlock_irq(&phba->hbalock);
list_for_each_entry_safe(psb, psb_next, &aborts, list) {
@@ -1036,6 +1066,14 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
list_splice(&aborts, &phba->lpfc_scsi_buf_list_put);
spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
+ list_for_each_entry_safe(psb, psb_next, &nvme_aborts, list) {
+ psb->pCmd = NULL;
+ psb->status = IOSTAT_SUCCESS;
+ }
+ spin_lock_irqsave(&phba->nvme_buf_list_put_lock, iflag);
+ list_splice(&nvme_aborts, &phba->lpfc_nvme_buf_list_put);
+ spin_unlock_irqrestore(&phba->nvme_buf_list_put_lock, iflag);
+
lpfc_sli4_free_sp_events(phba);
return 0;
}
@@ -1829,7 +1867,7 @@ lpfc_handle_eratt(struct lpfc_hba *phba)
* @phba: pointer to lpfc hba data structure.
*
* This routine is invoked from the worker thread to handle a HBA host
- * attention link event.
+ * attention link event. SLI3 only.
**/
void
lpfc_handle_latt(struct lpfc_hba *phba)
@@ -1867,7 +1905,7 @@ lpfc_handle_latt(struct lpfc_hba *phba)
pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
pmb->vport = vport;
/* Block ELS IOCBs until we have processed this mbox command */
- phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
+ phba->sli.sli3_ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED) {
rc = 4;
@@ -1883,7 +1921,7 @@ lpfc_handle_latt(struct lpfc_hba *phba)
return;
lpfc_handle_latt_free_mbuf:
- phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
+ phba->sli.sli3_ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
lpfc_mbuf_free(phba, mp->virt, mp->phys);
lpfc_handle_latt_free_mp:
kfree(mp);
@@ -2441,7 +2479,7 @@ lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
*
* This routine posts initial receive IOCB buffers to the ELS ring. The
* current number of initial IOCB buffers specified by LPFC_BUF_RING0 is
- * set to 64 IOCBs.
+ * set to 64 IOCBs. SLI3 only.
*
* Return codes
* 0 - success (currently always success)
@@ -2452,7 +2490,7 @@ lpfc_post_rcv_buf(struct lpfc_hba *phba)
struct lpfc_sli *psli = &phba->sli;
/* Ring 0, ELS / CT buffers */
- lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0);
+ lpfc_post_buffer(phba, &psli->sli3_ring[LPFC_ELS_RING], LPFC_BUF_RING0);
/* Ring 2 - FCP no buffers needed */
return 0;
@@ -2640,6 +2678,13 @@ lpfc_cleanup(struct lpfc_vport *vport)
lpfc_disc_state_machine(vport, ndlp, NULL,
NLP_EVT_DEVICE_RECOVERY);
+ if (ndlp->nlp_fc4_type & NLP_FC4_NVME) {
+ /* Remove the NVME transport reference now and
+ * continue to remove the node.
+ */
+ lpfc_nlp_put(ndlp);
+ }
+
lpfc_disc_state_machine(vport, ndlp, NULL,
NLP_EVT_DEVICE_RM);
}
@@ -2894,11 +2939,6 @@ lpfc_online(struct lpfc_hba *phba)
lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
- if (!lpfc_sli_queue_setup(phba)) {
- lpfc_unblock_mgmt_io(phba);
- return 1;
- }
-
if (phba->sli_rev == LPFC_SLI_REV4) {
if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */
lpfc_unblock_mgmt_io(phba);
@@ -2909,6 +2949,7 @@ lpfc_online(struct lpfc_hba *phba)
vpis_cleared = true;
spin_unlock_irq(&phba->hbalock);
} else {
+ lpfc_sli_queue_init(phba);
if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */
lpfc_unblock_mgmt_io(phba);
return 1;
@@ -3098,7 +3139,9 @@ static void
lpfc_scsi_free(struct lpfc_hba *phba)
{
struct lpfc_scsi_buf *sb, *sb_next;
- struct lpfc_iocbq *io, *io_next;
+
+ if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
+ return;
spin_lock_irq(&phba->hbalock);
@@ -3108,7 +3151,7 @@ lpfc_scsi_free(struct lpfc_hba *phba)
list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put,
list) {
list_del(&sb->list);
- pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data,
+ pci_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
sb->dma_handle);
kfree(sb);
phba->total_scsi_bufs--;
@@ -3119,25 +3162,58 @@ lpfc_scsi_free(struct lpfc_hba *phba)
list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get,
list) {
list_del(&sb->list);
- pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data,
+ pci_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
sb->dma_handle);
kfree(sb);
phba->total_scsi_bufs--;
}
spin_unlock(&phba->scsi_buf_list_get_lock);
+ spin_unlock_irq(&phba->hbalock);
+}
+/**
+ * lpfc_nvme_free - Free all the NVME buffers and IOCBs from driver lists
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is to free all the NVME buffers and IOCBs from the driver
+ * list back to kernel. It is called from lpfc_pci_remove_one to free
+ * the internal resources before the device is removed from the system.
+ **/
+static void
+lpfc_nvme_free(struct lpfc_hba *phba)
+{
+ struct lpfc_nvme_buf *lpfc_ncmd, *lpfc_ncmd_next;
- /* Release all the lpfc_iocbq entries maintained by this host. */
- list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) {
- list_del(&io->list);
- kfree(io);
- phba->total_iocbq_bufs--;
- }
+ if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
+ return;
+
+ spin_lock_irq(&phba->hbalock);
+ /* Release all the lpfc_nvme_bufs maintained by this host. */
+ spin_lock(&phba->nvme_buf_list_put_lock);
+ list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
+ &phba->lpfc_nvme_buf_list_put, list) {
+ list_del(&lpfc_ncmd->list);
+ pci_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data,
+ lpfc_ncmd->dma_handle);
+ kfree(lpfc_ncmd);
+ phba->total_nvme_bufs--;
+ }
+ spin_unlock(&phba->nvme_buf_list_put_lock);
+
+ spin_lock(&phba->nvme_buf_list_get_lock);
+ list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
+ &phba->lpfc_nvme_buf_list_get, list) {
+ list_del(&lpfc_ncmd->list);
+ pci_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data,
+ lpfc_ncmd->dma_handle);
+ kfree(lpfc_ncmd);
+ phba->total_nvme_bufs--;
+ }
+ spin_unlock(&phba->nvme_buf_list_get_lock);
spin_unlock_irq(&phba->hbalock);
}
-
/**
- * lpfc_sli4_xri_sgl_update - update xri-sgl sizing and mapping
+ * lpfc_sli4_els_sgl_update - update ELS xri-sgl sizing and mapping
* @phba: pointer to lpfc hba data structure.
*
* This routine first calculates the sizes of the current els and allocated
@@ -3149,20 +3225,18 @@ lpfc_scsi_free(struct lpfc_hba *phba)
* 0 - successful (for now, it always returns 0)
**/
int
-lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba)
+lpfc_sli4_els_sgl_update(struct lpfc_hba *phba)
{
struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
- struct lpfc_scsi_buf *psb = NULL, *psb_next = NULL;
- uint16_t i, lxri, xri_cnt, els_xri_cnt, scsi_xri_cnt;
+ uint16_t i, lxri, xri_cnt, els_xri_cnt;
LIST_HEAD(els_sgl_list);
- LIST_HEAD(scsi_sgl_list);
int rc;
- struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
/*
* update on pci function's els xri-sgl list
*/
els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
+
if (els_xri_cnt > phba->sli4_hba.els_xri_cnt) {
/* els xri-sgl expanded */
xri_cnt = els_xri_cnt - phba->sli4_hba.els_xri_cnt;
@@ -3198,9 +3272,10 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba)
list_add_tail(&sglq_entry->list, &els_sgl_list);
}
spin_lock_irq(&phba->hbalock);
- spin_lock(&pring->ring_lock);
- list_splice_init(&els_sgl_list, &phba->sli4_hba.lpfc_sgl_list);
- spin_unlock(&pring->ring_lock);
+ spin_lock(&phba->sli4_hba.sgl_list_lock);
+ list_splice_init(&els_sgl_list,
+ &phba->sli4_hba.lpfc_els_sgl_list);
+ spin_unlock(&phba->sli4_hba.sgl_list_lock);
spin_unlock_irq(&phba->hbalock);
} else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) {
/* els xri-sgl shrinked */
@@ -3210,24 +3285,22 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba)
"%d to %d\n", phba->sli4_hba.els_xri_cnt,
els_xri_cnt);
spin_lock_irq(&phba->hbalock);
- spin_lock(&pring->ring_lock);
- list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &els_sgl_list);
- spin_unlock(&pring->ring_lock);
- spin_unlock_irq(&phba->hbalock);
+ spin_lock(&phba->sli4_hba.sgl_list_lock);
+ list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list,
+ &els_sgl_list);
/* release extra els sgls from list */
for (i = 0; i < xri_cnt; i++) {
list_remove_head(&els_sgl_list,
sglq_entry, struct lpfc_sglq, list);
if (sglq_entry) {
- lpfc_mbuf_free(phba, sglq_entry->virt,
- sglq_entry->phys);
+ __lpfc_mbuf_free(phba, sglq_entry->virt,
+ sglq_entry->phys);
kfree(sglq_entry);
}
}
- spin_lock_irq(&phba->hbalock);
- spin_lock(&pring->ring_lock);
- list_splice_init(&els_sgl_list, &phba->sli4_hba.lpfc_sgl_list);
- spin_unlock(&pring->ring_lock);
+ list_splice_init(&els_sgl_list,
+ &phba->sli4_hba.lpfc_els_sgl_list);
+ spin_unlock(&phba->sli4_hba.sgl_list_lock);
spin_unlock_irq(&phba->hbalock);
} else
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
@@ -3239,7 +3312,7 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba)
sglq_entry = NULL;
sglq_entry_next = NULL;
list_for_each_entry_safe(sglq_entry, sglq_entry_next,
- &phba->sli4_hba.lpfc_sgl_list, list) {
+ &phba->sli4_hba.lpfc_els_sgl_list, list) {
lxri = lpfc_sli4_next_xritag(phba);
if (lxri == NO_XRI) {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
@@ -3251,21 +3324,182 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba)
sglq_entry->sli4_lxritag = lxri;
sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
}
+ return 0;
+
+out_free_mem:
+ lpfc_free_els_sgl_list(phba);
+ return rc;
+}
+
+/**
+ * lpfc_sli4_nvmet_sgl_update - update xri-sgl sizing and mapping
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine first calculates the sizes of the current els and allocated
+ * scsi sgl lists, and then goes through all sgls to updates the physical
+ * XRIs assigned due to port function reset. During port initialization, the
+ * current els and allocated scsi sgl lists are 0s.
+ *
+ * Return codes
+ * 0 - successful (for now, it always returns 0)
+ **/
+int
+lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba)
+{
+ struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
+ uint16_t i, lxri, xri_cnt, els_xri_cnt;
+ uint16_t nvmet_xri_cnt, tot_cnt;
+ LIST_HEAD(nvmet_sgl_list);
+ int rc;
/*
- * update on pci function's allocated scsi xri-sgl list
+ * update on pci function's nvmet xri-sgl list
+ */
+ els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
+ nvmet_xri_cnt = phba->cfg_nvmet_mrq * phba->cfg_nvmet_mrq_post;
+ tot_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
+ if (nvmet_xri_cnt > tot_cnt) {
+ phba->cfg_nvmet_mrq_post = tot_cnt / phba->cfg_nvmet_mrq;
+ nvmet_xri_cnt = phba->cfg_nvmet_mrq * phba->cfg_nvmet_mrq_post;
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "6301 NVMET post-sgl count changed to %d\n",
+ phba->cfg_nvmet_mrq_post);
+ }
+
+ if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) {
+ /* els xri-sgl expanded */
+ xri_cnt = nvmet_xri_cnt - phba->sli4_hba.nvmet_xri_cnt;
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "6302 NVMET xri-sgl cnt grew from %d to %d\n",
+ phba->sli4_hba.nvmet_xri_cnt, nvmet_xri_cnt);
+ /* allocate the additional nvmet sgls */
+ for (i = 0; i < xri_cnt; i++) {
+ sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
+ GFP_KERNEL);
+ if (sglq_entry == NULL) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "6303 Failure to allocate an "
+ "NVMET sgl entry:%d\n", i);
+ rc = -ENOMEM;
+ goto out_free_mem;
+ }
+ sglq_entry->buff_type = NVMET_BUFF_TYPE;
+ sglq_entry->virt = lpfc_nvmet_buf_alloc(phba, 0,
+ &sglq_entry->phys);
+ if (sglq_entry->virt == NULL) {
+ kfree(sglq_entry);
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "6304 Failure to allocate an "
+ "NVMET buf:%d\n", i);
+ rc = -ENOMEM;
+ goto out_free_mem;
+ }
+ sglq_entry->sgl = sglq_entry->virt;
+ memset(sglq_entry->sgl, 0,
+ phba->cfg_sg_dma_buf_size);
+ sglq_entry->state = SGL_FREED;
+ list_add_tail(&sglq_entry->list, &nvmet_sgl_list);
+ }
+ spin_lock_irq(&phba->hbalock);
+ spin_lock(&phba->sli4_hba.sgl_list_lock);
+ list_splice_init(&nvmet_sgl_list,
+ &phba->sli4_hba.lpfc_nvmet_sgl_list);
+ spin_unlock(&phba->sli4_hba.sgl_list_lock);
+ spin_unlock_irq(&phba->hbalock);
+ } else if (nvmet_xri_cnt < phba->sli4_hba.nvmet_xri_cnt) {
+ /* nvmet xri-sgl shrunk */
+ xri_cnt = phba->sli4_hba.nvmet_xri_cnt - nvmet_xri_cnt;
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "6305 NVMET xri-sgl count decreased from "
+ "%d to %d\n", phba->sli4_hba.nvmet_xri_cnt,
+ nvmet_xri_cnt);
+ spin_lock_irq(&phba->hbalock);
+ spin_lock(&phba->sli4_hba.sgl_list_lock);
+ list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list,
+ &nvmet_sgl_list);
+ /* release extra nvmet sgls from list */
+ for (i = 0; i < xri_cnt; i++) {
+ list_remove_head(&nvmet_sgl_list,
+ sglq_entry, struct lpfc_sglq, list);
+ if (sglq_entry) {
+ lpfc_nvmet_buf_free(phba, sglq_entry->virt,
+ sglq_entry->phys);
+ kfree(sglq_entry);
+ }
+ }
+ list_splice_init(&nvmet_sgl_list,
+ &phba->sli4_hba.lpfc_nvmet_sgl_list);
+ spin_unlock(&phba->sli4_hba.sgl_list_lock);
+ spin_unlock_irq(&phba->hbalock);
+ } else
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "6306 NVMET xri-sgl count unchanged: %d\n",
+ nvmet_xri_cnt);
+ phba->sli4_hba.nvmet_xri_cnt = nvmet_xri_cnt;
+
+ /* update xris to nvmet sgls on the list */
+ sglq_entry = NULL;
+ sglq_entry_next = NULL;
+ list_for_each_entry_safe(sglq_entry, sglq_entry_next,
+ &phba->sli4_hba.lpfc_nvmet_sgl_list, list) {
+ lxri = lpfc_sli4_next_xritag(phba);
+ if (lxri == NO_XRI) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "6307 Failed to allocate xri for "
+ "NVMET sgl\n");
+ rc = -ENOMEM;
+ goto out_free_mem;
+ }
+ sglq_entry->sli4_lxritag = lxri;
+ sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
+ }
+ return 0;
+
+out_free_mem:
+ lpfc_free_nvmet_sgl_list(phba);
+ return rc;
+}
+
+/**
+ * lpfc_sli4_scsi_sgl_update - update xri-sgl sizing and mapping
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine first calculates the sizes of the current els and allocated
+ * scsi sgl lists, and then goes through all sgls to updates the physical
+ * XRIs assigned due to port function reset. During port initialization, the
+ * current els and allocated scsi sgl lists are 0s.
+ *
+ * Return codes
+ * 0 - successful (for now, it always returns 0)
+ **/
+int
+lpfc_sli4_scsi_sgl_update(struct lpfc_hba *phba)
+{
+ struct lpfc_scsi_buf *psb, *psb_next;
+ uint16_t i, lxri, els_xri_cnt, scsi_xri_cnt;
+ LIST_HEAD(scsi_sgl_list);
+ int rc;
+
+ /*
+ * update on pci function's els xri-sgl list
*/
+ els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
phba->total_scsi_bufs = 0;
+ /*
+ * update on pci function's allocated scsi xri-sgl list
+ */
/* maximum number of xris available for scsi buffers */
phba->sli4_hba.scsi_xri_max = phba->sli4_hba.max_cfg_param.max_xri -
els_xri_cnt;
- lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
- "2401 Current allocated SCSI xri-sgl count:%d, "
- "maximum SCSI xri count:%d\n",
- phba->sli4_hba.scsi_xri_cnt,
- phba->sli4_hba.scsi_xri_max);
+ if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
+ return 0;
+
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
+ phba->sli4_hba.scsi_xri_max = /* Split them up */
+ (phba->sli4_hba.scsi_xri_max *
+ phba->cfg_xri_split) / 100;
spin_lock_irq(&phba->scsi_buf_list_get_lock);
spin_lock(&phba->scsi_buf_list_put_lock);
@@ -3283,7 +3517,7 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba)
list_remove_head(&scsi_sgl_list, psb,
struct lpfc_scsi_buf, list);
if (psb) {
- pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
+ pci_pool_free(phba->lpfc_sg_dma_buf_pool,
psb->data, psb->dma_handle);
kfree(psb);
}
@@ -3314,16 +3548,113 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba)
INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
spin_unlock(&phba->scsi_buf_list_put_lock);
spin_unlock_irq(&phba->scsi_buf_list_get_lock);
-
return 0;
out_free_mem:
- lpfc_free_els_sgl_list(phba);
lpfc_scsi_free(phba);
return rc;
}
/**
+ * lpfc_sli4_nvme_sgl_update - update xri-sgl sizing and mapping
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine first calculates the sizes of the current els and allocated
+ * scsi sgl lists, and then goes through all sgls to updates the physical
+ * XRIs assigned due to port function reset. During port initialization, the
+ * current els and allocated scsi sgl lists are 0s.
+ *
+ * Return codes
+ * 0 - successful (for now, it always returns 0)
+ **/
+int
+lpfc_sli4_nvme_sgl_update(struct lpfc_hba *phba)
+{
+ struct lpfc_nvme_buf *lpfc_ncmd = NULL, *lpfc_ncmd_next = NULL;
+ uint16_t i, lxri, els_xri_cnt;
+ uint16_t nvme_xri_cnt, nvme_xri_max;
+ LIST_HEAD(nvme_sgl_list);
+ int rc;
+
+ phba->total_nvme_bufs = 0;
+
+ if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
+ return 0;
+ /*
+ * update on pci function's allocated nvme xri-sgl list
+ */
+
+ /* maximum number of xris available for nvme buffers */
+ els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
+ nvme_xri_max = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
+ phba->sli4_hba.nvme_xri_max = nvme_xri_max;
+ phba->sli4_hba.nvme_xri_max -= phba->sli4_hba.scsi_xri_max;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "6074 Current allocated NVME xri-sgl count:%d, "
+ "maximum NVME xri count:%d\n",
+ phba->sli4_hba.nvme_xri_cnt,
+ phba->sli4_hba.nvme_xri_max);
+
+ spin_lock_irq(&phba->nvme_buf_list_get_lock);
+ spin_lock(&phba->nvme_buf_list_put_lock);
+ list_splice_init(&phba->lpfc_nvme_buf_list_get, &nvme_sgl_list);
+ list_splice(&phba->lpfc_nvme_buf_list_put, &nvme_sgl_list);
+ spin_unlock(&phba->nvme_buf_list_put_lock);
+ spin_unlock_irq(&phba->nvme_buf_list_get_lock);
+
+ if (phba->sli4_hba.nvme_xri_cnt > phba->sli4_hba.nvme_xri_max) {
+ /* max nvme xri shrunk below the allocated nvme buffers */
+ spin_lock_irq(&phba->nvme_buf_list_get_lock);
+ nvme_xri_cnt = phba->sli4_hba.nvme_xri_cnt -
+ phba->sli4_hba.nvme_xri_max;
+ spin_unlock_irq(&phba->nvme_buf_list_get_lock);
+ /* release the extra allocated nvme buffers */
+ for (i = 0; i < nvme_xri_cnt; i++) {
+ list_remove_head(&nvme_sgl_list, lpfc_ncmd,
+ struct lpfc_nvme_buf, list);
+ if (lpfc_ncmd) {
+ pci_pool_free(phba->lpfc_sg_dma_buf_pool,
+ lpfc_ncmd->data,
+ lpfc_ncmd->dma_handle);
+ kfree(lpfc_ncmd);
+ }
+ }
+ spin_lock_irq(&phba->nvme_buf_list_get_lock);
+ phba->sli4_hba.nvme_xri_cnt -= nvme_xri_cnt;
+ spin_unlock_irq(&phba->nvme_buf_list_get_lock);
+ }
+
+ /* update xris associated to remaining allocated nvme buffers */
+ lpfc_ncmd = NULL;
+ lpfc_ncmd_next = NULL;
+ list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
+ &nvme_sgl_list, list) {
+ lxri = lpfc_sli4_next_xritag(phba);
+ if (lxri == NO_XRI) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "6075 Failed to allocate xri for "
+ "nvme buffer\n");
+ rc = -ENOMEM;
+ goto out_free_mem;
+ }
+ lpfc_ncmd->cur_iocbq.sli4_lxritag = lxri;
+ lpfc_ncmd->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
+ }
+ spin_lock_irq(&phba->nvme_buf_list_get_lock);
+ spin_lock(&phba->nvme_buf_list_put_lock);
+ list_splice_init(&nvme_sgl_list, &phba->lpfc_nvme_buf_list_get);
+ INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put);
+ spin_unlock(&phba->nvme_buf_list_put_lock);
+ spin_unlock_irq(&phba->nvme_buf_list_get_lock);
+ return 0;
+
+out_free_mem:
+ lpfc_nvme_free(phba);
+ return rc;
+}
+
+/**
* lpfc_create_port - Create an FC port
* @phba: pointer to lpfc hba data structure.
* @instance: a unique integer ID to this FC port.
@@ -3343,18 +3674,23 @@ struct lpfc_vport *
lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
{
struct lpfc_vport *vport;
- struct Scsi_Host *shost;
+ struct Scsi_Host *shost = NULL;
int error = 0;
- if (dev != &phba->pcidev->dev) {
- shost = scsi_host_alloc(&lpfc_vport_template,
- sizeof(struct lpfc_vport));
- } else {
- if (phba->sli_rev == LPFC_SLI_REV4)
- shost = scsi_host_alloc(&lpfc_template,
- sizeof(struct lpfc_vport));
- else
- shost = scsi_host_alloc(&lpfc_template_s3,
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
+ if (dev != &phba->pcidev->dev) {
+ shost = scsi_host_alloc(&lpfc_vport_template,
+ sizeof(struct lpfc_vport));
+ } else {
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ shost = scsi_host_alloc(&lpfc_template,
+ sizeof(struct lpfc_vport));
+ else
+ shost = scsi_host_alloc(&lpfc_template_s3,
+ sizeof(struct lpfc_vport));
+ }
+ } else if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
+ shost = scsi_host_alloc(&lpfc_template_nvme,
sizeof(struct lpfc_vport));
}
if (!shost)
@@ -3365,8 +3701,8 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
vport->load_flag |= FC_LOADING;
vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
vport->fc_rscn_flush = 0;
-
lpfc_get_vport_cfgparam(vport);
+
shost->unique_id = instance;
shost->max_id = LPFC_MAX_TARGET;
shost->max_lun = vport->cfg_max_luns;
@@ -3944,7 +4280,7 @@ lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
lpfc_els_flush_all_cmd(phba);
/* Block ELS IOCBs until we have done process link event */
- phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
+ phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT;
/* Update link event statistics */
phba->sli.slistat.link_event++;
@@ -4103,7 +4439,7 @@ lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
lpfc_els_flush_all_cmd(phba);
/* Block ELS IOCBs until we have done process link event */
- phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
+ phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT;
/* Update link event statistics */
phba->sli.slistat.link_event++;
@@ -4272,13 +4608,13 @@ lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
sprintf(message, "Unqualified optics - Replace with "
"Avago optics for Warranty and Technical "
"Support - Link is%s operational",
- (operational) ? "" : " not");
+ (operational) ? " not" : "");
break;
case LPFC_SLI_EVENT_STATUS_UNCERTIFIED:
sprintf(message, "Uncertified optics - Replace with "
"Avago-certified optics to enable link "
"operation - Link is%s operational",
- (operational) ? "" : " not");
+ (operational) ? " not" : "");
break;
default:
/* firmware is reporting a status we don't know about */
@@ -5000,40 +5336,79 @@ lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn)
}
/**
- * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev.
+ * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources.
* @phba: pointer to lpfc hba data structure.
*
- * This routine is invoked to set up the driver internal resources specific to
- * support the SLI-3 HBA device it attached to.
+ * This routine is invoked to set up the driver internal resources before the
+ * device specific resource setup to support the HBA device it attached to.
*
* Return codes
- * 0 - successful
- * other values - error
+ * 0 - successful
+ * other values - error
**/
static int
-lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
+lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
{
- struct lpfc_sli *psli;
- int rc;
+ struct lpfc_sli *psli = &phba->sli;
/*
- * Initialize timers used by driver
+ * Driver resources common to all SLI revisions
*/
+ atomic_set(&phba->fast_event_count, 0);
+ spin_lock_init(&phba->hbalock);
- /* Heartbeat timer */
- init_timer(&phba->hb_tmofunc);
- phba->hb_tmofunc.function = lpfc_hb_timeout;
- phba->hb_tmofunc.data = (unsigned long)phba;
+ /* Initialize ndlp management spinlock */
+ spin_lock_init(&phba->ndlp_lock);
+
+ INIT_LIST_HEAD(&phba->port_list);
+ INIT_LIST_HEAD(&phba->work_list);
+ init_waitqueue_head(&phba->wait_4_mlo_m_q);
+
+ /* Initialize the wait queue head for the kernel thread */
+ init_waitqueue_head(&phba->work_waitq);
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "1403 Protocols supported %s %s %s\n",
+ ((phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) ?
+ "SCSI" : " "),
+ ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) ?
+ "NVME" : " "),
+ (phba->nvmet_support ? "NVMET" : " "));
+
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
+ /* Initialize the scsi buffer list used by driver for scsi IO */
+ spin_lock_init(&phba->scsi_buf_list_get_lock);
+ INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get);
+ spin_lock_init(&phba->scsi_buf_list_put_lock);
+ INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
+ }
+
+ if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) &&
+ (phba->nvmet_support == 0)) {
+ /* Initialize the NVME buffer list used by driver for NVME IO */
+ spin_lock_init(&phba->nvme_buf_list_get_lock);
+ INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_get);
+ spin_lock_init(&phba->nvme_buf_list_put_lock);
+ INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put);
+ }
+
+ /* Initialize the fabric iocb list */
+ INIT_LIST_HEAD(&phba->fabric_iocb_list);
+
+ /* Initialize list to save ELS buffers */
+ INIT_LIST_HEAD(&phba->elsbuf);
+
+ /* Initialize FCF connection rec list */
+ INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
+
+ /* Initialize OAS configuration list */
+ spin_lock_init(&phba->devicelock);
+ INIT_LIST_HEAD(&phba->luns);
- psli = &phba->sli;
/* MBOX heartbeat timer */
init_timer(&psli->mbox_tmo);
psli->mbox_tmo.function = lpfc_mbox_timeout;
psli->mbox_tmo.data = (unsigned long) phba;
- /* FCP polling mode timer */
- init_timer(&phba->fcp_poll_timer);
- phba->fcp_poll_timer.function = lpfc_poll_timeout;
- phba->fcp_poll_timer.data = (unsigned long) phba;
/* Fabric block timer */
init_timer(&phba->fabric_block_timer);
phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
@@ -5042,6 +5417,38 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
init_timer(&phba->eratt_poll);
phba->eratt_poll.function = lpfc_poll_eratt;
phba->eratt_poll.data = (unsigned long) phba;
+ /* Heartbeat timer */
+ init_timer(&phba->hb_tmofunc);
+ phba->hb_tmofunc.function = lpfc_hb_timeout;
+ phba->hb_tmofunc.data = (unsigned long)phba;
+
+ return 0;
+}
+
+/**
+ * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to set up the driver internal resources specific to
+ * support the SLI-3 HBA device it attached to.
+ *
+ * Return codes
+ * 0 - successful
+ * other values - error
+ **/
+static int
+lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
+{
+ int rc;
+
+ /*
+ * Initialize timers used by driver
+ */
+
+ /* FCP polling mode timer */
+ init_timer(&phba->fcp_poll_timer);
+ phba->fcp_poll_timer.function = lpfc_poll_timeout;
+ phba->fcp_poll_timer.data = (unsigned long) phba;
/* Host attention work mask setup */
phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
@@ -5049,6 +5456,12 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
/* Get all the module params for configuring this host */
lpfc_get_cfgparam(phba);
+ /* Set up phase-1 common device driver resources */
+
+ rc = lpfc_setup_driver_resource_phase1(phba);
+ if (rc)
+ return -ENODEV;
+
if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) {
phba->menlo_flag |= HBA_MENLO_SUPPORT;
/* check for menlo minimum sg count */
@@ -5056,10 +5469,10 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT;
}
- if (!phba->sli.ring)
- phba->sli.ring = kzalloc(LPFC_SLI3_MAX_RING *
+ if (!phba->sli.sli3_ring)
+ phba->sli.sli3_ring = kzalloc(LPFC_SLI3_MAX_RING *
sizeof(struct lpfc_sli_ring), GFP_KERNEL);
- if (!phba->sli.ring)
+ if (!phba->sli.sli3_ring)
return -ENOMEM;
/*
@@ -5118,7 +5531,7 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
* Initialize the SLI Layer to run with lpfc HBAs.
*/
lpfc_sli_setup(phba);
- lpfc_sli_queue_setup(phba);
+ lpfc_sli_queue_init(phba);
/* Allocate device driver memory */
if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
@@ -5174,18 +5587,27 @@ lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
static int
lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
{
- struct lpfc_vector_map_info *cpup;
- struct lpfc_sli *psli;
LPFC_MBOXQ_t *mboxq;
- int rc, i, hbq_count, max_buf_size;
+ MAILBOX_t *mb;
+ int rc, i, max_buf_size;
uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
struct lpfc_mqe *mqe;
int longs;
int fof_vectors = 0;
+ uint64_t wwn;
+
+ phba->sli4_hba.num_online_cpu = num_online_cpus();
+ phba->sli4_hba.num_present_cpu = lpfc_present_cpu;
+ phba->sli4_hba.curr_disp_cpu = 0;
/* Get all the module params for configuring this host */
lpfc_get_cfgparam(phba);
+ /* Set up phase-1 common device driver resources */
+ rc = lpfc_setup_driver_resource_phase1(phba);
+ if (rc)
+ return -ENODEV;
+
/* Before proceed, wait for POST done and device ready */
rc = lpfc_sli4_post_status_check(phba);
if (rc)
@@ -5195,27 +5617,10 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
* Initialize timers used by driver
*/
- /* Heartbeat timer */
- init_timer(&phba->hb_tmofunc);
- phba->hb_tmofunc.function = lpfc_hb_timeout;
- phba->hb_tmofunc.data = (unsigned long)phba;
init_timer(&phba->rrq_tmr);
phba->rrq_tmr.function = lpfc_rrq_timeout;
phba->rrq_tmr.data = (unsigned long)phba;
- psli = &phba->sli;
- /* MBOX heartbeat timer */
- init_timer(&psli->mbox_tmo);
- psli->mbox_tmo.function = lpfc_mbox_timeout;
- psli->mbox_tmo.data = (unsigned long) phba;
- /* Fabric block timer */
- init_timer(&phba->fabric_block_timer);
- phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
- phba->fabric_block_timer.data = (unsigned long) phba;
- /* EA polling mode timer */
- init_timer(&phba->eratt_poll);
- phba->eratt_poll.function = lpfc_poll_eratt;
- phba->eratt_poll.data = (unsigned long) phba;
/* FCF rediscover timer */
init_timer(&phba->fcf.redisc_wait);
phba->fcf.redisc_wait.function = lpfc_sli4_fcf_redisc_wait_tmo;
@@ -5242,14 +5647,9 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
/*
* For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands
- * we will associate a new ring, for each FCP fastpath EQ/CQ/WQ tuple.
+ * we will associate a new ring, for each EQ/CQ/WQ tuple.
+ * The WQ create will allocate the ring.
*/
- if (!phba->sli.ring)
- phba->sli.ring = kzalloc(
- (LPFC_SLI3_MAX_RING + phba->cfg_fcp_io_channel) *
- sizeof(struct lpfc_sli_ring), GFP_KERNEL);
- if (!phba->sli.ring)
- return -ENOMEM;
/*
* It doesn't matter what family our adapter is in, we are
@@ -5261,43 +5661,45 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
phba->cfg_sg_seg_cnt = LPFC_MAX_SGL_SEG_CNT - 2;
/*
- * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size
- * used to create the sg_dma_buf_pool must be dynamically calculated.
+ * Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size
+ * used to create the sg_dma_buf_pool must be calculated.
*/
-
if (phba->cfg_enable_bg) {
/*
- * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd,
- * the FCP rsp, and a SGE for each. Sice we have no control
- * over how many protection data segments the SCSI Layer
+ * The scsi_buf for a T10-DIF I/O holds the FCP cmnd,
+ * the FCP rsp, and a SGE. Sice we have no control
+ * over how many protection segments the SCSI Layer
* will hand us (ie: there could be one for every block
- * in the IO), we just allocate enough SGEs to accomidate
- * our max amount and we need to limit lpfc_sg_seg_cnt to
- * minimize the risk of running out.
+ * in the IO), just allocate enough SGEs to accomidate
+ * our max amount and we need to limit lpfc_sg_seg_cnt
+ * to minimize the risk of running out.
*/
phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
- sizeof(struct fcp_rsp) + max_buf_size;
+ sizeof(struct fcp_rsp) + max_buf_size;
/* Total SGEs for scsi_sg_list and scsi_sg_prot_list */
phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT;
if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SLI4_SEG_CNT_DIF)
- phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SLI4_SEG_CNT_DIF;
+ phba->cfg_sg_seg_cnt =
+ LPFC_MAX_SG_SLI4_SEG_CNT_DIF;
} else {
/*
- * The scsi_buf for a regular I/O will hold the FCP cmnd,
+ * The scsi_buf for a regular I/O holds the FCP cmnd,
* the FCP rsp, a SGE for each, and a SGE for up to
* cfg_sg_seg_cnt data segments.
*/
phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
- sizeof(struct fcp_rsp) +
- ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge));
+ sizeof(struct fcp_rsp) +
+ ((phba->cfg_sg_seg_cnt + 2) *
+ sizeof(struct sli4_sge));
/* Total SGEs for scsi_sg_list */
phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
+
/*
- * NOTE: if (phba->cfg_sg_seg_cnt + 2) <= 256 we only need
- * to post 1 page for the SGL.
+ * NOTE: if (phba->cfg_sg_seg_cnt + 2) <= 256 we only
+ * need to post 1 page for the SGL.
*/
}
@@ -5317,21 +5719,28 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
phba->cfg_total_seg_cnt);
/* Initialize buffer queue management fields */
- hbq_count = lpfc_sli_hbq_count();
- for (i = 0; i < hbq_count; ++i)
- INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
- INIT_LIST_HEAD(&phba->rb_pend_list);
+ INIT_LIST_HEAD(&phba->hbqs[LPFC_ELS_HBQ].hbq_buffer_list);
phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free;
/*
* Initialize the SLI Layer to run with lpfc SLI4 HBAs.
*/
- /* Initialize the Abort scsi buffer list used by driver */
- spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock);
- INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
+ /* Initialize the Abort scsi buffer list used by driver */
+ spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock);
+ INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
+ }
+
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
+ /* Initialize the Abort nvme buffer list used by driver */
+ spin_lock_init(&phba->sli4_hba.abts_nvme_buf_list_lock);
+ INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvme_buf_list);
+ }
+
/* This abort list used by worker thread */
- spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock);
+ spin_lock_init(&phba->sli4_hba.sgl_list_lock);
+ spin_lock_init(&phba->sli4_hba.nvmet_io_lock);
/*
* Initialize driver internal slow-path work queues
@@ -5359,10 +5768,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
/* initialize optic_state to 0xFF */
phba->sli4_hba.lnk_info.optic_state = 0xff;
- /* Initialize the driver internal SLI layer lists. */
- lpfc_sli_setup(phba);
- lpfc_sli_queue_setup(phba);
-
/* Allocate device driver memory */
rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
if (rc)
@@ -5372,8 +5777,10 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
LPFC_SLI_INTF_IF_TYPE_2) {
rc = lpfc_pci_function_reset(phba);
- if (unlikely(rc))
- return -ENODEV;
+ if (unlikely(rc)) {
+ rc = -ENODEV;
+ goto out_free_mem;
+ }
phba->temp_sensor_support = 1;
}
@@ -5410,6 +5817,46 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
goto out_free_bsmbx;
}
+ /* Check for NVMET being configured */
+ phba->nvmet_support = 0;
+ if (lpfc_enable_nvmet_cnt) {
+
+ /* First get WWN of HBA instance */
+ lpfc_read_nv(phba, mboxq);
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+ if (rc != MBX_SUCCESS) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "6016 Mailbox failed , mbxCmd x%x "
+ "READ_NV, mbxStatus x%x\n",
+ bf_get(lpfc_mqe_command, &mboxq->u.mqe),
+ bf_get(lpfc_mqe_status, &mboxq->u.mqe));
+ rc = -EIO;
+ goto out_free_bsmbx;
+ }
+ mb = &mboxq->u.mb;
+ memcpy(&wwn, (char *)mb->un.varRDnvp.nodename,
+ sizeof(uint64_t));
+ wwn = cpu_to_be64(wwn);
+ phba->sli4_hba.wwnn.u.name = wwn;
+ memcpy(&wwn, (char *)mb->un.varRDnvp.portname,
+ sizeof(uint64_t));
+ /* wwn is WWPN of HBA instance */
+ wwn = cpu_to_be64(wwn);
+ phba->sli4_hba.wwpn.u.name = wwn;
+
+ /* Check to see if it matches any module parameter */
+ for (i = 0; i < lpfc_enable_nvmet_cnt; i++) {
+ if (wwn == lpfc_enable_nvmet[i]) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "6017 NVME Target %016llx\n",
+ wwn);
+ phba->nvmet_support = 1; /* a match */
+ }
+ }
+ }
+
+ lpfc_nvme_mod_param_dep(phba);
+
/* Get the Supported Pages if PORT_CAPABILITIES is supported by port. */
lpfc_supported_pages(mboxq);
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
@@ -5448,9 +5895,11 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2999 Unsupported SLI4 Parameters "
"Extents and RPI headers enabled.\n");
- goto out_free_bsmbx;
}
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ goto out_free_bsmbx;
}
+
mempool_free(mboxq, phba->mbox_mem_pool);
/* Verify OAS is supported */
@@ -5497,11 +5946,10 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
goto out_remove_rpi_hdrs;
}
- phba->sli4_hba.fcp_eq_hdl =
- kzalloc((sizeof(struct lpfc_fcp_eq_hdl) *
- (fof_vectors + phba->cfg_fcp_io_channel)),
- GFP_KERNEL);
- if (!phba->sli4_hba.fcp_eq_hdl) {
+ phba->sli4_hba.hba_eq_hdl = kcalloc(fof_vectors + phba->io_channel_irqs,
+ sizeof(struct lpfc_hba_eq_hdl),
+ GFP_KERNEL);
+ if (!phba->sli4_hba.hba_eq_hdl) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2572 Failed allocate memory for "
"fast-path per-EQ handle array\n");
@@ -5509,52 +5957,31 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
goto out_free_fcf_rr_bmask;
}
- phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) *
- (fof_vectors +
- phba->cfg_fcp_io_channel)), GFP_KERNEL);
- if (!phba->sli4_hba.msix_entries) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "2573 Failed allocate memory for msi-x "
- "interrupt vector entries\n");
- rc = -ENOMEM;
- goto out_free_fcp_eq_hdl;
- }
-
- phba->sli4_hba.cpu_map = kzalloc((sizeof(struct lpfc_vector_map_info) *
- phba->sli4_hba.num_present_cpu),
- GFP_KERNEL);
+ phba->sli4_hba.cpu_map = kcalloc(phba->sli4_hba.num_present_cpu,
+ sizeof(struct lpfc_vector_map_info),
+ GFP_KERNEL);
if (!phba->sli4_hba.cpu_map) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3327 Failed allocate memory for msi-x "
"interrupt vector mapping\n");
rc = -ENOMEM;
- goto out_free_msix;
+ goto out_free_hba_eq_hdl;
}
if (lpfc_used_cpu == NULL) {
- lpfc_used_cpu = kzalloc((sizeof(uint16_t) * lpfc_present_cpu),
- GFP_KERNEL);
+ lpfc_used_cpu = kcalloc(lpfc_present_cpu, sizeof(uint16_t),
+ GFP_KERNEL);
if (!lpfc_used_cpu) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3335 Failed allocate memory for msi-x "
"interrupt vector mapping\n");
kfree(phba->sli4_hba.cpu_map);
rc = -ENOMEM;
- goto out_free_msix;
+ goto out_free_hba_eq_hdl;
}
for (i = 0; i < lpfc_present_cpu; i++)
lpfc_used_cpu[i] = LPFC_VECTOR_MAP_EMPTY;
}
- /* Initialize io channels for round robin */
- cpup = phba->sli4_hba.cpu_map;
- rc = 0;
- for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
- cpup->channel_id = rc;
- rc++;
- if (rc >= phba->cfg_fcp_io_channel)
- rc = 0;
- }
-
/*
* Enable sr-iov virtual functions if supported and configured
* through the module parameter.
@@ -5574,10 +6001,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
return 0;
-out_free_msix:
- kfree(phba->sli4_hba.msix_entries);
-out_free_fcp_eq_hdl:
- kfree(phba->sli4_hba.fcp_eq_hdl);
+out_free_hba_eq_hdl:
+ kfree(phba->sli4_hba.hba_eq_hdl);
out_free_fcf_rr_bmask:
kfree(phba->fcf.fcf_rr_bmask);
out_remove_rpi_hdrs:
@@ -5611,11 +6036,8 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
phba->sli4_hba.num_online_cpu = 0;
phba->sli4_hba.curr_disp_cpu = 0;
- /* Free memory allocated for msi-x interrupt vector entries */
- kfree(phba->sli4_hba.msix_entries);
-
/* Free memory allocated for fast-path work queue handles */
- kfree(phba->sli4_hba.fcp_eq_hdl);
+ kfree(phba->sli4_hba.hba_eq_hdl);
/* Free the allocated rpi headers. */
lpfc_sli4_remove_rpi_hdrs(phba);
@@ -5627,6 +6049,7 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
/* Free the ELS sgl list */
lpfc_free_active_sgl(phba);
lpfc_free_els_sgl_list(phba);
+ lpfc_free_nvmet_sgl_list(phba);
/* Free the completion queue EQ event pool */
lpfc_sli4_cq_event_release_all(phba);
@@ -5689,58 +6112,6 @@ lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
}
/**
- * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources.
- * @phba: pointer to lpfc hba data structure.
- *
- * This routine is invoked to set up the driver internal resources before the
- * device specific resource setup to support the HBA device it attached to.
- *
- * Return codes
- * 0 - successful
- * other values - error
- **/
-static int
-lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
-{
- /*
- * Driver resources common to all SLI revisions
- */
- atomic_set(&phba->fast_event_count, 0);
- spin_lock_init(&phba->hbalock);
-
- /* Initialize ndlp management spinlock */
- spin_lock_init(&phba->ndlp_lock);
-
- INIT_LIST_HEAD(&phba->port_list);
- INIT_LIST_HEAD(&phba->work_list);
- init_waitqueue_head(&phba->wait_4_mlo_m_q);
-
- /* Initialize the wait queue head for the kernel thread */
- init_waitqueue_head(&phba->work_waitq);
-
- /* Initialize the scsi buffer list used by driver for scsi IO */
- spin_lock_init(&phba->scsi_buf_list_get_lock);
- INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get);
- spin_lock_init(&phba->scsi_buf_list_put_lock);
- INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
-
- /* Initialize the fabric iocb list */
- INIT_LIST_HEAD(&phba->fabric_iocb_list);
-
- /* Initialize list to save ELS buffers */
- INIT_LIST_HEAD(&phba->elsbuf);
-
- /* Initialize FCF connection rec list */
- INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
-
- /* Initialize OAS configuration list */
- spin_lock_init(&phba->devicelock);
- INIT_LIST_HEAD(&phba->luns);
-
- return 0;
-}
-
-/**
* lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources.
* @phba: pointer to lpfc hba data structure.
*
@@ -5887,13 +6258,12 @@ static void
lpfc_free_els_sgl_list(struct lpfc_hba *phba)
{
LIST_HEAD(sglq_list);
- struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
/* Retrieve all els sgls from driver list */
spin_lock_irq(&phba->hbalock);
- spin_lock(&pring->ring_lock);
- list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list);
- spin_unlock(&pring->ring_lock);
+ spin_lock(&phba->sli4_hba.sgl_list_lock);
+ list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, &sglq_list);
+ spin_unlock(&phba->sli4_hba.sgl_list_lock);
spin_unlock_irq(&phba->hbalock);
/* Now free the sgl list */
@@ -5901,6 +6271,33 @@ lpfc_free_els_sgl_list(struct lpfc_hba *phba)
}
/**
+ * lpfc_free_nvmet_sgl_list - Free nvmet sgl list.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to free the driver's nvmet sgl list and memory.
+ **/
+static void
+lpfc_free_nvmet_sgl_list(struct lpfc_hba *phba)
+{
+ struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
+ LIST_HEAD(sglq_list);
+
+ /* Retrieve all nvmet sgls from driver list */
+ spin_lock_irq(&phba->hbalock);
+ spin_lock(&phba->sli4_hba.sgl_list_lock);
+ list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, &sglq_list);
+ spin_unlock(&phba->sli4_hba.sgl_list_lock);
+ spin_unlock_irq(&phba->hbalock);
+
+ /* Now free the sgl list */
+ list_for_each_entry_safe(sglq_entry, sglq_next, &sglq_list, list) {
+ list_del(&sglq_entry->list);
+ lpfc_nvmet_buf_free(phba, sglq_entry->virt, sglq_entry->phys);
+ kfree(sglq_entry);
+ }
+}
+
+/**
* lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs.
* @phba: pointer to lpfc hba data structure.
*
@@ -5947,14 +6344,19 @@ static void
lpfc_init_sgl_list(struct lpfc_hba *phba)
{
/* Initialize and populate the sglq list per host/VF. */
- INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list);
+ INIT_LIST_HEAD(&phba->sli4_hba.lpfc_els_sgl_list);
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
+ INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_sgl_list);
+ INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_sgl_list);
/* els xri-sgl book keeping */
phba->sli4_hba.els_xri_cnt = 0;
/* scsi xri-buffer book keeping */
phba->sli4_hba.scsi_xri_cnt = 0;
+
+ /* nvme xri-buffer book keeping */
+ phba->sli4_hba.nvme_xri_cnt = 0;
}
/**
@@ -6185,9 +6587,9 @@ lpfc_hba_free(struct lpfc_hba *phba)
/* Release the driver assigned board number */
idr_remove(&lpfc_hba_index, phba->brd_no);
- /* Free memory allocated with sli rings */
- kfree(phba->sli.ring);
- phba->sli.ring = NULL;
+ /* Free memory allocated with sli3 rings */
+ kfree(phba->sli.sli3_ring);
+ phba->sli.sli3_ring = NULL;
kfree(phba);
return;
@@ -6223,6 +6625,23 @@ lpfc_create_shost(struct lpfc_hba *phba)
shost = lpfc_shost_from_vport(vport);
phba->pport = vport;
+
+ if (phba->nvmet_support) {
+ /* Only 1 vport (pport) will support NVME target */
+ if (phba->txrdy_payload_pool == NULL) {
+ phba->txrdy_payload_pool = pci_pool_create(
+ "txrdy_pool", phba->pcidev,
+ TXRDY_PAYLOAD_LEN, 16, 0);
+ if (phba->txrdy_payload_pool) {
+ phba->targetport = NULL;
+ phba->cfg_enable_fc4_type = LPFC_ENABLE_NVME;
+ lpfc_printf_log(phba, KERN_INFO,
+ LOG_INIT | LOG_NVME_DISC,
+ "6076 NVME Target Found\n");
+ }
+ }
+ }
+
lpfc_debugfs_initialize(vport);
/* Put reference to SCSI host to driver's device private data */
pci_set_drvdata(phba->pcidev, shost);
@@ -6504,8 +6923,6 @@ lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
- INIT_LIST_HEAD(&phba->rb_pend_list);
-
phba->MBslimaddr = phba->slim_memmap_p;
phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
@@ -7009,7 +7426,7 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
"VPI(B:%d M:%d) "
"VFI(B:%d M:%d) "
"RPI(B:%d M:%d) "
- "FCFI(Count:%d)\n",
+ "FCFI:%d EQ:%d CQ:%d WQ:%d RQ:%d\n",
phba->sli4_hba.extents_in_use,
phba->sli4_hba.max_cfg_param.xri_base,
phba->sli4_hba.max_cfg_param.max_xri,
@@ -7019,7 +7436,12 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
phba->sli4_hba.max_cfg_param.max_vfi,
phba->sli4_hba.max_cfg_param.rpi_base,
phba->sli4_hba.max_cfg_param.max_rpi,
- phba->sli4_hba.max_cfg_param.max_fcfi);
+ phba->sli4_hba.max_cfg_param.max_fcfi,
+ phba->sli4_hba.max_cfg_param.max_eq,
+ phba->sli4_hba.max_cfg_param.max_cq,
+ phba->sli4_hba.max_cfg_param.max_wq,
+ phba->sli4_hba.max_cfg_param.max_rq);
+
}
if (rc)
@@ -7210,11 +7632,11 @@ lpfc_setup_endian_order(struct lpfc_hba *phba)
}
/**
- * lpfc_sli4_queue_verify - Verify and update EQ and CQ counts
+ * lpfc_sli4_queue_verify - Verify and update EQ counts
* @phba: pointer to lpfc hba data structure.
*
- * This routine is invoked to check the user settable queue counts for EQs and
- * CQs. after this routine is called the counts will be set to valid values that
+ * This routine is invoked to check the user settable queue counts for EQs.
+ * After this routine is called the counts will be set to valid values that
* adhere to the constraints of the system's interrupt vectors and the port's
* queue resources.
*
@@ -7225,9 +7647,7 @@ lpfc_setup_endian_order(struct lpfc_hba *phba)
static int
lpfc_sli4_queue_verify(struct lpfc_hba *phba)
{
- int cfg_fcp_io_channel;
- uint32_t cpu;
- uint32_t i = 0;
+ int io_channel;
int fof_vectors = phba->cfg_fof ? 1 : 0;
/*
@@ -7236,49 +7656,40 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)
*/
/* Sanity check on HBA EQ parameters */
- cfg_fcp_io_channel = phba->cfg_fcp_io_channel;
-
- /* It doesn't make sense to have more io channels then online CPUs */
- for_each_present_cpu(cpu) {
- if (cpu_online(cpu))
- i++;
- }
- phba->sli4_hba.num_online_cpu = i;
- phba->sli4_hba.num_present_cpu = lpfc_present_cpu;
- phba->sli4_hba.curr_disp_cpu = 0;
+ io_channel = phba->io_channel_irqs;
- if (i < cfg_fcp_io_channel) {
+ if (phba->sli4_hba.num_online_cpu < io_channel) {
lpfc_printf_log(phba,
KERN_ERR, LOG_INIT,
"3188 Reducing IO channels to match number of "
"online CPUs: from %d to %d\n",
- cfg_fcp_io_channel, i);
- cfg_fcp_io_channel = i;
+ io_channel, phba->sli4_hba.num_online_cpu);
+ io_channel = phba->sli4_hba.num_online_cpu;
}
- if (cfg_fcp_io_channel + fof_vectors >
- phba->sli4_hba.max_cfg_param.max_eq) {
- if (phba->sli4_hba.max_cfg_param.max_eq <
- LPFC_FCP_IO_CHAN_MIN) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "2574 Not enough EQs (%d) from the "
- "pci function for supporting FCP "
- "EQs (%d)\n",
- phba->sli4_hba.max_cfg_param.max_eq,
- phba->cfg_fcp_io_channel);
- goto out_error;
- }
+ if (io_channel + fof_vectors > phba->sli4_hba.max_cfg_param.max_eq) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2575 Reducing IO channels to match number of "
"available EQs: from %d to %d\n",
- cfg_fcp_io_channel,
+ io_channel,
phba->sli4_hba.max_cfg_param.max_eq);
- cfg_fcp_io_channel = phba->sli4_hba.max_cfg_param.max_eq -
- fof_vectors;
+ io_channel = phba->sli4_hba.max_cfg_param.max_eq - fof_vectors;
}
- /* The actual number of FCP event queues adopted */
- phba->cfg_fcp_io_channel = cfg_fcp_io_channel;
+ /* The actual number of FCP / NVME event queues adopted */
+ if (io_channel != phba->io_channel_irqs)
+ phba->io_channel_irqs = io_channel;
+ if (phba->cfg_fcp_io_channel > io_channel)
+ phba->cfg_fcp_io_channel = io_channel;
+ if (phba->cfg_nvme_io_channel > io_channel)
+ phba->cfg_nvme_io_channel = io_channel;
+ if (phba->cfg_nvme_io_channel < phba->cfg_nvmet_mrq)
+ phba->cfg_nvmet_mrq = phba->cfg_nvme_io_channel;
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2574 IO channels: irqs %d fcp %d nvme %d MRQ: %d\n",
+ phba->io_channel_irqs, phba->cfg_fcp_io_channel,
+ phba->cfg_nvme_io_channel, phba->cfg_nvmet_mrq);
/* Get EQ depth from module parameter, fake the default for now */
phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
@@ -7287,10 +7698,67 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)
/* Get CQ depth from module parameter, fake the default for now */
phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
+ return 0;
+}
+static int
+lpfc_alloc_nvme_wq_cq(struct lpfc_hba *phba, int wqidx)
+{
+ struct lpfc_queue *qdesc;
+ int cnt;
+
+ qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
+ phba->sli4_hba.cq_ecount);
+ if (!qdesc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0508 Failed allocate fast-path NVME CQ (%d)\n",
+ wqidx);
+ return 1;
+ }
+ phba->sli4_hba.nvme_cq[wqidx] = qdesc;
+
+ cnt = LPFC_NVME_WQSIZE;
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_WQE128_SIZE, cnt);
+ if (!qdesc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0509 Failed allocate fast-path NVME WQ (%d)\n",
+ wqidx);
+ return 1;
+ }
+ phba->sli4_hba.nvme_wq[wqidx] = qdesc;
+ list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
+ return 0;
+}
+
+static int
+lpfc_alloc_fcp_wq_cq(struct lpfc_hba *phba, int wqidx)
+{
+ struct lpfc_queue *qdesc;
+ uint32_t wqesize;
+
+ /* Create Fast Path FCP CQs */
+ qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
+ phba->sli4_hba.cq_ecount);
+ if (!qdesc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0499 Failed allocate fast-path FCP CQ (%d)\n", wqidx);
+ return 1;
+ }
+ phba->sli4_hba.fcp_cq[wqidx] = qdesc;
+
+ /* Create Fast Path FCP WQs */
+ wqesize = (phba->fcp_embed_io) ?
+ LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
+ qdesc = lpfc_sli4_queue_alloc(phba, wqesize, phba->sli4_hba.wq_ecount);
+ if (!qdesc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0503 Failed allocate fast-path FCP WQ (%d)\n",
+ wqidx);
+ return 1;
+ }
+ phba->sli4_hba.fcp_wq[wqidx] = qdesc;
+ list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
return 0;
-out_error:
- return -ENOMEM;
}
/**
@@ -7311,13 +7779,14 @@ int
lpfc_sli4_queue_create(struct lpfc_hba *phba)
{
struct lpfc_queue *qdesc;
- uint32_t wqesize;
- int idx;
+ int idx, io_channel, max;
/*
* Create HBA Record arrays.
+ * Both NVME and FCP will share that same vectors / EQs
*/
- if (!phba->cfg_fcp_io_channel)
+ io_channel = phba->io_channel_irqs;
+ if (!io_channel)
return -ERANGE;
phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
@@ -7326,9 +7795,14 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
+ phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
+ phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
+ phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
+ phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
- phba->sli4_hba.hba_eq = kzalloc((sizeof(struct lpfc_queue *) *
- phba->cfg_fcp_io_channel), GFP_KERNEL);
+ phba->sli4_hba.hba_eq = kcalloc(io_channel,
+ sizeof(struct lpfc_queue *),
+ GFP_KERNEL);
if (!phba->sli4_hba.hba_eq) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2576 Failed allocate memory for "
@@ -7336,44 +7810,115 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
goto out_error;
}
- phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) *
- phba->cfg_fcp_io_channel), GFP_KERNEL);
- if (!phba->sli4_hba.fcp_cq) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "2577 Failed allocate memory for fast-path "
- "CQ record array\n");
- goto out_error;
+ if (phba->cfg_fcp_io_channel) {
+ phba->sli4_hba.fcp_cq = kcalloc(phba->cfg_fcp_io_channel,
+ sizeof(struct lpfc_queue *),
+ GFP_KERNEL);
+ if (!phba->sli4_hba.fcp_cq) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2577 Failed allocate memory for "
+ "fast-path CQ record array\n");
+ goto out_error;
+ }
+ phba->sli4_hba.fcp_wq = kcalloc(phba->cfg_fcp_io_channel,
+ sizeof(struct lpfc_queue *),
+ GFP_KERNEL);
+ if (!phba->sli4_hba.fcp_wq) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2578 Failed allocate memory for "
+ "fast-path FCP WQ record array\n");
+ goto out_error;
+ }
+ /*
+ * Since the first EQ can have multiple CQs associated with it,
+ * this array is used to quickly see if we have a FCP fast-path
+ * CQ match.
+ */
+ phba->sli4_hba.fcp_cq_map = kcalloc(phba->cfg_fcp_io_channel,
+ sizeof(uint16_t),
+ GFP_KERNEL);
+ if (!phba->sli4_hba.fcp_cq_map) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2545 Failed allocate memory for "
+ "fast-path CQ map\n");
+ goto out_error;
+ }
}
- phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) *
- phba->cfg_fcp_io_channel), GFP_KERNEL);
- if (!phba->sli4_hba.fcp_wq) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "2578 Failed allocate memory for fast-path "
- "WQ record array\n");
- goto out_error;
- }
+ if (phba->cfg_nvme_io_channel) {
+ phba->sli4_hba.nvme_cq = kcalloc(phba->cfg_nvme_io_channel,
+ sizeof(struct lpfc_queue *),
+ GFP_KERNEL);
+ if (!phba->sli4_hba.nvme_cq) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "6077 Failed allocate memory for "
+ "fast-path CQ record array\n");
+ goto out_error;
+ }
- /*
- * Since the first EQ can have multiple CQs associated with it,
- * this array is used to quickly see if we have a FCP fast-path
- * CQ match.
- */
- phba->sli4_hba.fcp_cq_map = kzalloc((sizeof(uint16_t) *
- phba->cfg_fcp_io_channel), GFP_KERNEL);
- if (!phba->sli4_hba.fcp_cq_map) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "2545 Failed allocate memory for fast-path "
- "CQ map\n");
- goto out_error;
+ phba->sli4_hba.nvme_wq = kcalloc(phba->cfg_nvme_io_channel,
+ sizeof(struct lpfc_queue *),
+ GFP_KERNEL);
+ if (!phba->sli4_hba.nvme_wq) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2581 Failed allocate memory for "
+ "fast-path NVME WQ record array\n");
+ goto out_error;
+ }
+
+ /*
+ * Since the first EQ can have multiple CQs associated with it,
+ * this array is used to quickly see if we have a NVME fast-path
+ * CQ match.
+ */
+ phba->sli4_hba.nvme_cq_map = kcalloc(phba->cfg_nvme_io_channel,
+ sizeof(uint16_t),
+ GFP_KERNEL);
+ if (!phba->sli4_hba.nvme_cq_map) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "6078 Failed allocate memory for "
+ "fast-path CQ map\n");
+ goto out_error;
+ }
+
+ if (phba->nvmet_support) {
+ phba->sli4_hba.nvmet_cqset = kcalloc(
+ phba->cfg_nvmet_mrq,
+ sizeof(struct lpfc_queue *),
+ GFP_KERNEL);
+ if (!phba->sli4_hba.nvmet_cqset) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3121 Fail allocate memory for "
+ "fast-path CQ set array\n");
+ goto out_error;
+ }
+ phba->sli4_hba.nvmet_mrq_hdr = kcalloc(
+ phba->cfg_nvmet_mrq,
+ sizeof(struct lpfc_queue *),
+ GFP_KERNEL);
+ if (!phba->sli4_hba.nvmet_mrq_hdr) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3122 Fail allocate memory for "
+ "fast-path RQ set hdr array\n");
+ goto out_error;
+ }
+ phba->sli4_hba.nvmet_mrq_data = kcalloc(
+ phba->cfg_nvmet_mrq,
+ sizeof(struct lpfc_queue *),
+ GFP_KERNEL);
+ if (!phba->sli4_hba.nvmet_mrq_data) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3124 Fail allocate memory for "
+ "fast-path RQ set data array\n");
+ goto out_error;
+ }
+ }
}
- /*
- * Create HBA Event Queues (EQs). The cfg_fcp_io_channel specifies
- * how many EQs to create.
- */
- for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) {
+ INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
+ /* Create HBA Event Queues (EQs) */
+ for (idx = 0; idx < io_channel; idx++) {
/* Create EQs */
qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
phba->sli4_hba.eq_ecount);
@@ -7383,33 +7928,42 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
goto out_error;
}
phba->sli4_hba.hba_eq[idx] = qdesc;
+ }
- /* Create Fast Path FCP CQs */
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
- phba->sli4_hba.cq_ecount);
- if (!qdesc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0499 Failed allocate fast-path FCP "
- "CQ (%d)\n", idx);
+ /* FCP and NVME io channels are not required to be balanced */
+
+ for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++)
+ if (lpfc_alloc_fcp_wq_cq(phba, idx))
goto out_error;
- }
- phba->sli4_hba.fcp_cq[idx] = qdesc;
- /* Create Fast Path FCP WQs */
- wqesize = (phba->fcp_embed_io) ?
- LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
- qdesc = lpfc_sli4_queue_alloc(phba, wqesize,
- phba->sli4_hba.wq_ecount);
- if (!qdesc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0503 Failed allocate fast-path FCP "
- "WQ (%d)\n", idx);
+ for (idx = 0; idx < phba->cfg_nvme_io_channel; idx++)
+ if (lpfc_alloc_nvme_wq_cq(phba, idx))
+ goto out_error;
+
+ /* allocate MRQ CQs */
+ max = phba->cfg_nvme_io_channel;
+ if (max < phba->cfg_nvmet_mrq)
+ max = phba->cfg_nvmet_mrq;
+
+ for (idx = 0; idx < max; idx++)
+ if (lpfc_alloc_nvme_wq_cq(phba, idx))
goto out_error;
+
+ if (phba->nvmet_support) {
+ for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
+ qdesc = lpfc_sli4_queue_alloc(phba,
+ phba->sli4_hba.cq_esize,
+ phba->sli4_hba.cq_ecount);
+ if (!qdesc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3142 Failed allocate NVME "
+ "CQ Set (%d)\n", idx);
+ goto out_error;
+ }
+ phba->sli4_hba.nvmet_cqset[idx] = qdesc;
}
- phba->sli4_hba.fcp_wq[idx] = qdesc;
}
-
/*
* Create Slow Path Completion Queues (CQs)
*/
@@ -7463,6 +8017,30 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
goto out_error;
}
phba->sli4_hba.els_wq = qdesc;
+ list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
+
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
+ /* Create NVME LS Complete Queue */
+ qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
+ phba->sli4_hba.cq_ecount);
+ if (!qdesc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "6079 Failed allocate NVME LS CQ\n");
+ goto out_error;
+ }
+ phba->sli4_hba.nvmels_cq = qdesc;
+
+ /* Create NVME LS Work Queue */
+ qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
+ phba->sli4_hba.wq_ecount);
+ if (!qdesc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "6080 Failed allocate NVME LS WQ\n");
+ goto out_error;
+ }
+ phba->sli4_hba.nvmels_wq = qdesc;
+ list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
+ }
/*
* Create Receive Queue (RQ)
@@ -7488,6 +8066,44 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
}
phba->sli4_hba.dat_rq = qdesc;
+ if (phba->nvmet_support) {
+ for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
+ /* Create NVMET Receive Queue for header */
+ qdesc = lpfc_sli4_queue_alloc(phba,
+ phba->sli4_hba.rq_esize,
+ phba->sli4_hba.rq_ecount);
+ if (!qdesc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3146 Failed allocate "
+ "receive HRQ\n");
+ goto out_error;
+ }
+ phba->sli4_hba.nvmet_mrq_hdr[idx] = qdesc;
+
+ /* Only needed for header of RQ pair */
+ qdesc->rqbp = kzalloc(sizeof(struct lpfc_rqb),
+ GFP_KERNEL);
+ if (qdesc->rqbp == NULL) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "6131 Failed allocate "
+ "Header RQBP\n");
+ goto out_error;
+ }
+
+ /* Create NVMET Receive Queue for data */
+ qdesc = lpfc_sli4_queue_alloc(phba,
+ phba->sli4_hba.rq_esize,
+ phba->sli4_hba.rq_ecount);
+ if (!qdesc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3156 Failed allocate "
+ "receive DRQ\n");
+ goto out_error;
+ }
+ phba->sli4_hba.nvmet_mrq_data[idx] = qdesc;
+ }
+ }
+
/* Create the Queues needed for Flash Optimized Fabric operations */
if (phba->cfg_fof)
lpfc_fof_queue_create(phba);
@@ -7498,6 +8114,39 @@ out_error:
return -ENOMEM;
}
+static inline void
+__lpfc_sli4_release_queue(struct lpfc_queue **qp)
+{
+ if (*qp != NULL) {
+ lpfc_sli4_queue_free(*qp);
+ *qp = NULL;
+ }
+}
+
+static inline void
+lpfc_sli4_release_queues(struct lpfc_queue ***qs, int max)
+{
+ int idx;
+
+ if (*qs == NULL)
+ return;
+
+ for (idx = 0; idx < max; idx++)
+ __lpfc_sli4_release_queue(&(*qs)[idx]);
+
+ kfree(*qs);
+ *qs = NULL;
+}
+
+static inline void
+lpfc_sli4_release_queue_map(uint16_t **qmap)
+{
+ if (*qmap != NULL) {
+ kfree(*qmap);
+ *qmap = NULL;
+ }
+}
+
/**
* lpfc_sli4_queue_destroy - Destroy all the SLI4 queues
* @phba: pointer to lpfc hba data structure.
@@ -7513,91 +8162,196 @@ out_error:
void
lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
{
- int idx;
-
if (phba->cfg_fof)
lpfc_fof_queue_destroy(phba);
- if (phba->sli4_hba.hba_eq != NULL) {
- /* Release HBA event queue */
- for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) {
- if (phba->sli4_hba.hba_eq[idx] != NULL) {
- lpfc_sli4_queue_free(
- phba->sli4_hba.hba_eq[idx]);
- phba->sli4_hba.hba_eq[idx] = NULL;
- }
- }
- kfree(phba->sli4_hba.hba_eq);
- phba->sli4_hba.hba_eq = NULL;
- }
+ /* Release HBA eqs */
+ lpfc_sli4_release_queues(&phba->sli4_hba.hba_eq, phba->io_channel_irqs);
- if (phba->sli4_hba.fcp_cq != NULL) {
- /* Release FCP completion queue */
- for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) {
- if (phba->sli4_hba.fcp_cq[idx] != NULL) {
- lpfc_sli4_queue_free(
- phba->sli4_hba.fcp_cq[idx]);
- phba->sli4_hba.fcp_cq[idx] = NULL;
- }
- }
- kfree(phba->sli4_hba.fcp_cq);
- phba->sli4_hba.fcp_cq = NULL;
- }
+ /* Release FCP cqs */
+ lpfc_sli4_release_queues(&phba->sli4_hba.fcp_cq,
+ phba->cfg_fcp_io_channel);
- if (phba->sli4_hba.fcp_wq != NULL) {
- /* Release FCP work queue */
- for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) {
- if (phba->sli4_hba.fcp_wq[idx] != NULL) {
- lpfc_sli4_queue_free(
- phba->sli4_hba.fcp_wq[idx]);
- phba->sli4_hba.fcp_wq[idx] = NULL;
- }
- }
- kfree(phba->sli4_hba.fcp_wq);
- phba->sli4_hba.fcp_wq = NULL;
- }
+ /* Release FCP wqs */
+ lpfc_sli4_release_queues(&phba->sli4_hba.fcp_wq,
+ phba->cfg_fcp_io_channel);
/* Release FCP CQ mapping array */
- if (phba->sli4_hba.fcp_cq_map != NULL) {
- kfree(phba->sli4_hba.fcp_cq_map);
- phba->sli4_hba.fcp_cq_map = NULL;
- }
+ lpfc_sli4_release_queue_map(&phba->sli4_hba.fcp_cq_map);
+
+ /* Release NVME cqs */
+ lpfc_sli4_release_queues(&phba->sli4_hba.nvme_cq,
+ phba->cfg_nvme_io_channel);
+
+ /* Release NVME wqs */
+ lpfc_sli4_release_queues(&phba->sli4_hba.nvme_wq,
+ phba->cfg_nvme_io_channel);
+
+ /* Release NVME CQ mapping array */
+ lpfc_sli4_release_queue_map(&phba->sli4_hba.nvme_cq_map);
+
+ lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset,
+ phba->cfg_nvmet_mrq);
+
+ lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr,
+ phba->cfg_nvmet_mrq);
+ lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data,
+ phba->cfg_nvmet_mrq);
/* Release mailbox command work queue */
- if (phba->sli4_hba.mbx_wq != NULL) {
- lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
- phba->sli4_hba.mbx_wq = NULL;
- }
+ __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_wq);
/* Release ELS work queue */
- if (phba->sli4_hba.els_wq != NULL) {
- lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
- phba->sli4_hba.els_wq = NULL;
- }
+ __lpfc_sli4_release_queue(&phba->sli4_hba.els_wq);
+
+ /* Release ELS work queue */
+ __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_wq);
/* Release unsolicited receive queue */
- if (phba->sli4_hba.hdr_rq != NULL) {
- lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
- phba->sli4_hba.hdr_rq = NULL;
+ __lpfc_sli4_release_queue(&phba->sli4_hba.hdr_rq);
+ __lpfc_sli4_release_queue(&phba->sli4_hba.dat_rq);
+
+ /* Release ELS complete queue */
+ __lpfc_sli4_release_queue(&phba->sli4_hba.els_cq);
+
+ /* Release NVME LS complete queue */
+ __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_cq);
+
+ /* Release mailbox command complete queue */
+ __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_cq);
+
+ /* Everything on this list has been freed */
+ INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
+}
+
+int
+lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
+ struct lpfc_queue *drq, int count)
+{
+ int rc, i;
+ struct lpfc_rqe hrqe;
+ struct lpfc_rqe drqe;
+ struct lpfc_rqb *rqbp;
+ struct rqb_dmabuf *rqb_buffer;
+ LIST_HEAD(rqb_buf_list);
+
+ rqbp = hrq->rqbp;
+ for (i = 0; i < count; i++) {
+ rqb_buffer = (rqbp->rqb_alloc_buffer)(phba);
+ if (!rqb_buffer)
+ break;
+ rqb_buffer->hrq = hrq;
+ rqb_buffer->drq = drq;
+ list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list);
+ }
+ while (!list_empty(&rqb_buf_list)) {
+ list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf,
+ hbuf.list);
+
+ hrqe.address_lo = putPaddrLow(rqb_buffer->hbuf.phys);
+ hrqe.address_hi = putPaddrHigh(rqb_buffer->hbuf.phys);
+ drqe.address_lo = putPaddrLow(rqb_buffer->dbuf.phys);
+ drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys);
+ rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
+ if (rc < 0) {
+ (rqbp->rqb_free_buffer)(phba, rqb_buffer);
+ } else {
+ list_add_tail(&rqb_buffer->hbuf.list,
+ &rqbp->rqb_buffer_list);
+ rqbp->buffer_count++;
+ }
}
- if (phba->sli4_hba.dat_rq != NULL) {
- lpfc_sli4_queue_free(phba->sli4_hba.dat_rq);
- phba->sli4_hba.dat_rq = NULL;
+ return 1;
+}
+
+int
+lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *rq)
+{
+ struct lpfc_rqb *rqbp;
+ struct lpfc_dmabuf *h_buf;
+ struct rqb_dmabuf *rqb_buffer;
+
+ rqbp = rq->rqbp;
+ while (!list_empty(&rqbp->rqb_buffer_list)) {
+ list_remove_head(&rqbp->rqb_buffer_list, h_buf,
+ struct lpfc_dmabuf, list);
+
+ rqb_buffer = container_of(h_buf, struct rqb_dmabuf, hbuf);
+ (rqbp->rqb_free_buffer)(phba, rqb_buffer);
+ rqbp->buffer_count--;
}
+ return 1;
+}
- /* Release ELS complete queue */
- if (phba->sli4_hba.els_cq != NULL) {
- lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
- phba->sli4_hba.els_cq = NULL;
+static int
+lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq,
+ struct lpfc_queue *cq, struct lpfc_queue *wq, uint16_t *cq_map,
+ int qidx, uint32_t qtype)
+{
+ struct lpfc_sli_ring *pring;
+ int rc;
+
+ if (!eq || !cq || !wq) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "6085 Fast-path %s (%d) not allocated\n",
+ ((eq) ? ((cq) ? "WQ" : "CQ") : "EQ"), qidx);
+ return -ENOMEM;
}
- /* Release mailbox command complete queue */
- if (phba->sli4_hba.mbx_cq != NULL) {
- lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
- phba->sli4_hba.mbx_cq = NULL;
+ /* create the Cq first */
+ rc = lpfc_cq_create(phba, cq, eq,
+ (qtype == LPFC_MBOX) ? LPFC_MCQ : LPFC_WCQ, qtype);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "6086 Failed setup of CQ (%d), rc = 0x%x\n",
+ qidx, (uint32_t)rc);
+ return rc;
}
- return;
+ if (qtype != LPFC_MBOX) {
+ /* Setup nvme_cq_map for fast lookup */
+ if (cq_map)
+ *cq_map = cq->queue_id;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "6087 CQ setup: cq[%d]-id=%d, parent eq[%d]-id=%d\n",
+ qidx, cq->queue_id, qidx, eq->queue_id);
+
+ /* create the wq */
+ rc = lpfc_wq_create(phba, wq, cq, qtype);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "6123 Fail setup fastpath WQ (%d), rc = 0x%x\n",
+ qidx, (uint32_t)rc);
+ /* no need to tear down cq - caller will do so */
+ return rc;
+ }
+
+ /* Bind this CQ/WQ to the NVME ring */
+ pring = wq->pring;
+ pring->sli.sli4.wqp = (void *)wq;
+ cq->pring = pring;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "2593 WQ setup: wq[%d]-id=%d assoc=%d, cq[%d]-id=%d\n",
+ qidx, wq->queue_id, wq->assoc_qid, qidx, cq->queue_id);
+ } else {
+ rc = lpfc_mq_create(phba, wq, cq, LPFC_MBOX);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0539 Failed setup of slow-path MQ: "
+ "rc = 0x%x\n", rc);
+ /* no need to tear down cq - caller will do so */
+ return rc;
+ }
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
+ phba->sli4_hba.mbx_wq->queue_id,
+ phba->sli4_hba.mbx_cq->queue_id);
+ }
+
+ return 0;
}
/**
@@ -7615,15 +8369,12 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
int
lpfc_sli4_queue_setup(struct lpfc_hba *phba)
{
- struct lpfc_sli *psli = &phba->sli;
- struct lpfc_sli_ring *pring;
- int rc = -ENOMEM;
- int fcp_eqidx, fcp_cqidx, fcp_wqidx;
- int fcp_cq_index = 0;
uint32_t shdr_status, shdr_add_status;
union lpfc_sli4_cfg_shdr *shdr;
LPFC_MBOXQ_t *mboxq;
- uint32_t length;
+ int qidx;
+ uint32_t length, io_channel;
+ int rc = -ENOMEM;
/* Check for dual-ULP support */
mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@@ -7673,220 +8424,263 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
/*
* Set up HBA Event Queues (EQs)
*/
+ io_channel = phba->io_channel_irqs;
/* Set up HBA event queue */
- if (phba->cfg_fcp_io_channel && !phba->sli4_hba.hba_eq) {
+ if (io_channel && !phba->sli4_hba.hba_eq) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3147 Fast-path EQs not allocated\n");
rc = -ENOMEM;
goto out_error;
}
- for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel; fcp_eqidx++) {
- if (!phba->sli4_hba.hba_eq[fcp_eqidx]) {
+ for (qidx = 0; qidx < io_channel; qidx++) {
+ if (!phba->sli4_hba.hba_eq[qidx]) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0522 Fast-path EQ (%d) not "
- "allocated\n", fcp_eqidx);
+ "allocated\n", qidx);
rc = -ENOMEM;
- goto out_destroy_hba_eq;
+ goto out_destroy;
}
- rc = lpfc_eq_create(phba, phba->sli4_hba.hba_eq[fcp_eqidx],
- (phba->cfg_fcp_imax / phba->cfg_fcp_io_channel));
+ rc = lpfc_eq_create(phba, phba->sli4_hba.hba_eq[qidx],
+ phba->cfg_fcp_imax);
if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0523 Failed setup of fast-path EQ "
- "(%d), rc = 0x%x\n", fcp_eqidx,
+ "(%d), rc = 0x%x\n", qidx,
(uint32_t)rc);
- goto out_destroy_hba_eq;
+ goto out_destroy;
}
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "2584 HBA EQ setup: "
- "queue[%d]-id=%d\n", fcp_eqidx,
- phba->sli4_hba.hba_eq[fcp_eqidx]->queue_id);
- }
-
- /* Set up fast-path FCP Response Complete Queue */
- if (!phba->sli4_hba.fcp_cq) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "3148 Fast-path FCP CQ array not "
- "allocated\n");
- rc = -ENOMEM;
- goto out_destroy_hba_eq;
+ "2584 HBA EQ setup: queue[%d]-id=%d\n",
+ qidx, phba->sli4_hba.hba_eq[qidx]->queue_id);
}
- for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_io_channel; fcp_cqidx++) {
- if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
+ if (phba->cfg_nvme_io_channel) {
+ if (!phba->sli4_hba.nvme_cq || !phba->sli4_hba.nvme_wq) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0526 Fast-path FCP CQ (%d) not "
- "allocated\n", fcp_cqidx);
+ "6084 Fast-path NVME %s array not allocated\n",
+ (phba->sli4_hba.nvme_cq) ? "CQ" : "WQ");
rc = -ENOMEM;
- goto out_destroy_fcp_cq;
- }
- rc = lpfc_cq_create(phba, phba->sli4_hba.fcp_cq[fcp_cqidx],
- phba->sli4_hba.hba_eq[fcp_cqidx], LPFC_WCQ, LPFC_FCP);
- if (rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0527 Failed setup of fast-path FCP "
- "CQ (%d), rc = 0x%x\n", fcp_cqidx,
- (uint32_t)rc);
- goto out_destroy_fcp_cq;
+ goto out_destroy;
}
- /* Setup fcp_cq_map for fast lookup */
- phba->sli4_hba.fcp_cq_map[fcp_cqidx] =
- phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id;
-
- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "2588 FCP CQ setup: cq[%d]-id=%d, "
- "parent seq[%d]-id=%d\n",
- fcp_cqidx,
- phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id,
- fcp_cqidx,
- phba->sli4_hba.hba_eq[fcp_cqidx]->queue_id);
- }
-
- /* Set up fast-path FCP Work Queue */
- if (!phba->sli4_hba.fcp_wq) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "3149 Fast-path FCP WQ array not "
- "allocated\n");
- rc = -ENOMEM;
- goto out_destroy_fcp_cq;
+ for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++) {
+ rc = lpfc_create_wq_cq(phba,
+ phba->sli4_hba.hba_eq[
+ qidx % io_channel],
+ phba->sli4_hba.nvme_cq[qidx],
+ phba->sli4_hba.nvme_wq[qidx],
+ &phba->sli4_hba.nvme_cq_map[qidx],
+ qidx, LPFC_NVME);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "6123 Failed to setup fastpath "
+ "NVME WQ/CQ (%d), rc = 0x%x\n",
+ qidx, (uint32_t)rc);
+ goto out_destroy;
+ }
+ }
}
- for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_io_channel; fcp_wqidx++) {
- if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) {
+ if (phba->cfg_fcp_io_channel) {
+ /* Set up fast-path FCP Response Complete Queue */
+ if (!phba->sli4_hba.fcp_cq || !phba->sli4_hba.fcp_wq) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0534 Fast-path FCP WQ (%d) not "
- "allocated\n", fcp_wqidx);
+ "3148 Fast-path FCP %s array not allocated\n",
+ phba->sli4_hba.fcp_cq ? "WQ" : "CQ");
rc = -ENOMEM;
- goto out_destroy_fcp_wq;
- }
- rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx],
- phba->sli4_hba.fcp_cq[fcp_wqidx],
- LPFC_FCP);
- if (rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0535 Failed setup of fast-path FCP "
- "WQ (%d), rc = 0x%x\n", fcp_wqidx,
- (uint32_t)rc);
- goto out_destroy_fcp_wq;
+ goto out_destroy;
}
- /* Bind this WQ to the next FCP ring */
- pring = &psli->ring[MAX_SLI3_CONFIGURED_RINGS + fcp_wqidx];
- pring->sli.sli4.wqp = (void *)phba->sli4_hba.fcp_wq[fcp_wqidx];
- phba->sli4_hba.fcp_cq[fcp_wqidx]->pring = pring;
-
- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "2591 FCP WQ setup: wq[%d]-id=%d, "
- "parent cq[%d]-id=%d\n",
- fcp_wqidx,
- phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id,
- fcp_cq_index,
- phba->sli4_hba.fcp_cq[fcp_wqidx]->queue_id);
+ for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++) {
+ rc = lpfc_create_wq_cq(phba,
+ phba->sli4_hba.hba_eq[
+ qidx % io_channel],
+ phba->sli4_hba.fcp_cq[qidx],
+ phba->sli4_hba.fcp_wq[qidx],
+ &phba->sli4_hba.fcp_cq_map[qidx],
+ qidx, LPFC_FCP);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0535 Failed to setup fastpath "
+ "FCP WQ/CQ (%d), rc = 0x%x\n",
+ qidx, (uint32_t)rc);
+ goto out_destroy;
+ }
+ }
}
+
/*
- * Set up Complete Queues (CQs)
+ * Set up Slow Path Complete Queues (CQs)
*/
- /* Set up slow-path MBOX Complete Queue as the first CQ */
- if (!phba->sli4_hba.mbx_cq) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0528 Mailbox CQ not allocated\n");
- rc = -ENOMEM;
- goto out_destroy_fcp_wq;
- }
- rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq,
- phba->sli4_hba.hba_eq[0], LPFC_MCQ, LPFC_MBOX);
- if (rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0529 Failed setup of slow-path mailbox CQ: "
- "rc = 0x%x\n", (uint32_t)rc);
- goto out_destroy_fcp_wq;
- }
- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n",
- phba->sli4_hba.mbx_cq->queue_id,
- phba->sli4_hba.hba_eq[0]->queue_id);
+ /* Set up slow-path MBOX CQ/MQ */
- /* Set up slow-path ELS Complete Queue */
- if (!phba->sli4_hba.els_cq) {
+ if (!phba->sli4_hba.mbx_cq || !phba->sli4_hba.mbx_wq) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0530 ELS CQ not allocated\n");
+ "0528 %s not allocated\n",
+ phba->sli4_hba.mbx_cq ?
+ "Mailbox WQ" : "Mailbox CQ");
rc = -ENOMEM;
- goto out_destroy_mbx_cq;
- }
- rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq,
- phba->sli4_hba.hba_eq[0], LPFC_WCQ, LPFC_ELS);
- if (rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0531 Failed setup of slow-path ELS CQ: "
- "rc = 0x%x\n", (uint32_t)rc);
- goto out_destroy_mbx_cq;
+ goto out_destroy;
}
- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n",
- phba->sli4_hba.els_cq->queue_id,
- phba->sli4_hba.hba_eq[0]->queue_id);
-
- /*
- * Set up all the Work Queues (WQs)
- */
- /* Set up Mailbox Command Queue */
- if (!phba->sli4_hba.mbx_wq) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0538 Slow-path MQ not allocated\n");
- rc = -ENOMEM;
- goto out_destroy_els_cq;
- }
- rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq,
- phba->sli4_hba.mbx_cq, LPFC_MBOX);
+ rc = lpfc_create_wq_cq(phba, phba->sli4_hba.hba_eq[0],
+ phba->sli4_hba.mbx_cq,
+ phba->sli4_hba.mbx_wq,
+ NULL, 0, LPFC_MBOX);
if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0539 Failed setup of slow-path MQ: "
- "rc = 0x%x\n", rc);
- goto out_destroy_els_cq;
+ "0529 Failed setup of mailbox WQ/CQ: rc = 0x%x\n",
+ (uint32_t)rc);
+ goto out_destroy;
+ }
+ if (phba->nvmet_support) {
+ if (!phba->sli4_hba.nvmet_cqset) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3165 Fast-path NVME CQ Set "
+ "array not allocated\n");
+ rc = -ENOMEM;
+ goto out_destroy;
+ }
+ if (phba->cfg_nvmet_mrq > 1) {
+ rc = lpfc_cq_create_set(phba,
+ phba->sli4_hba.nvmet_cqset,
+ phba->sli4_hba.hba_eq,
+ LPFC_WCQ, LPFC_NVMET);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3164 Failed setup of NVME CQ "
+ "Set, rc = 0x%x\n",
+ (uint32_t)rc);
+ goto out_destroy;
+ }
+ } else {
+ /* Set up NVMET Receive Complete Queue */
+ rc = lpfc_cq_create(phba, phba->sli4_hba.nvmet_cqset[0],
+ phba->sli4_hba.hba_eq[0],
+ LPFC_WCQ, LPFC_NVMET);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "6089 Failed setup NVMET CQ: "
+ "rc = 0x%x\n", (uint32_t)rc);
+ goto out_destroy;
+ }
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "6090 NVMET CQ setup: cq-id=%d, "
+ "parent eq-id=%d\n",
+ phba->sli4_hba.nvmet_cqset[0]->queue_id,
+ phba->sli4_hba.hba_eq[0]->queue_id);
+ }
}
- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
- phba->sli4_hba.mbx_wq->queue_id,
- phba->sli4_hba.mbx_cq->queue_id);
- /* Set up slow-path ELS Work Queue */
- if (!phba->sli4_hba.els_wq) {
+ /* Set up slow-path ELS WQ/CQ */
+ if (!phba->sli4_hba.els_cq || !phba->sli4_hba.els_wq) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0536 Slow-path ELS WQ not allocated\n");
+ "0530 ELS %s not allocated\n",
+ phba->sli4_hba.els_cq ? "WQ" : "CQ");
rc = -ENOMEM;
- goto out_destroy_mbx_wq;
+ goto out_destroy;
}
- rc = lpfc_wq_create(phba, phba->sli4_hba.els_wq,
- phba->sli4_hba.els_cq, LPFC_ELS);
+ rc = lpfc_create_wq_cq(phba, phba->sli4_hba.hba_eq[0],
+ phba->sli4_hba.els_cq,
+ phba->sli4_hba.els_wq,
+ NULL, 0, LPFC_ELS);
if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0537 Failed setup of slow-path ELS WQ: "
- "rc = 0x%x\n", (uint32_t)rc);
- goto out_destroy_mbx_wq;
+ "0529 Failed setup of ELS WQ/CQ: rc = 0x%x\n",
+ (uint32_t)rc);
+ goto out_destroy;
}
-
- /* Bind this WQ to the ELS ring */
- pring = &psli->ring[LPFC_ELS_RING];
- pring->sli.sli4.wqp = (void *)phba->sli4_hba.els_wq;
- phba->sli4_hba.els_cq->pring = pring;
-
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
phba->sli4_hba.els_wq->queue_id,
phba->sli4_hba.els_cq->queue_id);
+ if (phba->cfg_nvme_io_channel) {
+ /* Set up NVME LS Complete Queue */
+ if (!phba->sli4_hba.nvmels_cq || !phba->sli4_hba.nvmels_wq) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "6091 LS %s not allocated\n",
+ phba->sli4_hba.nvmels_cq ? "WQ" : "CQ");
+ rc = -ENOMEM;
+ goto out_destroy;
+ }
+ rc = lpfc_create_wq_cq(phba, phba->sli4_hba.hba_eq[0],
+ phba->sli4_hba.nvmels_cq,
+ phba->sli4_hba.nvmels_wq,
+ NULL, 0, LPFC_NVME_LS);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0529 Failed setup of NVVME LS WQ/CQ: "
+ "rc = 0x%x\n", (uint32_t)rc);
+ goto out_destroy;
+ }
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "6096 ELS WQ setup: wq-id=%d, "
+ "parent cq-id=%d\n",
+ phba->sli4_hba.nvmels_wq->queue_id,
+ phba->sli4_hba.nvmels_cq->queue_id);
+ }
+
/*
- * Create Receive Queue (RQ)
+ * Create NVMET Receive Queue (RQ)
*/
+ if (phba->nvmet_support) {
+ if ((!phba->sli4_hba.nvmet_cqset) ||
+ (!phba->sli4_hba.nvmet_mrq_hdr) ||
+ (!phba->sli4_hba.nvmet_mrq_data)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "6130 MRQ CQ Queues not "
+ "allocated\n");
+ rc = -ENOMEM;
+ goto out_destroy;
+ }
+ if (phba->cfg_nvmet_mrq > 1) {
+ rc = lpfc_mrq_create(phba,
+ phba->sli4_hba.nvmet_mrq_hdr,
+ phba->sli4_hba.nvmet_mrq_data,
+ phba->sli4_hba.nvmet_cqset,
+ LPFC_NVMET);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "6098 Failed setup of NVMET "
+ "MRQ: rc = 0x%x\n",
+ (uint32_t)rc);
+ goto out_destroy;
+ }
+
+ } else {
+ rc = lpfc_rq_create(phba,
+ phba->sli4_hba.nvmet_mrq_hdr[0],
+ phba->sli4_hba.nvmet_mrq_data[0],
+ phba->sli4_hba.nvmet_cqset[0],
+ LPFC_NVMET);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "6057 Failed setup of NVMET "
+ "Receive Queue: rc = 0x%x\n",
+ (uint32_t)rc);
+ goto out_destroy;
+ }
+
+ lpfc_printf_log(
+ phba, KERN_INFO, LOG_INIT,
+ "6099 NVMET RQ setup: hdr-rq-id=%d, "
+ "dat-rq-id=%d parent cq-id=%d\n",
+ phba->sli4_hba.nvmet_mrq_hdr[0]->queue_id,
+ phba->sli4_hba.nvmet_mrq_data[0]->queue_id,
+ phba->sli4_hba.nvmet_cqset[0]->queue_id);
+
+ }
+ }
+
if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0540 Receive Queue not allocated\n");
rc = -ENOMEM;
- goto out_destroy_els_wq;
+ goto out_destroy;
}
lpfc_rq_adjust_repost(phba, phba->sli4_hba.hdr_rq, LPFC_ELS_HBQ);
@@ -7898,7 +8692,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0541 Failed setup of Receive Queue: "
"rc = 0x%x\n", (uint32_t)rc);
- goto out_destroy_fcp_wq;
+ goto out_destroy;
}
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
@@ -7914,7 +8708,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0549 Failed setup of FOF Queues: "
"rc = 0x%x\n", rc);
- goto out_destroy_els_rq;
+ goto out_destroy;
}
}
@@ -7922,30 +8716,12 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
* Configure EQ delay multipier for interrupt coalescing using
* MODIFY_EQ_DELAY for all EQs created, LPFC_MAX_EQ_DELAY at a time.
*/
- for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel;
- fcp_eqidx += LPFC_MAX_EQ_DELAY)
- lpfc_modify_fcp_eq_delay(phba, fcp_eqidx);
+ for (qidx = 0; qidx < io_channel; qidx += LPFC_MAX_EQ_DELAY)
+ lpfc_modify_hba_eq_delay(phba, qidx);
return 0;
-out_destroy_els_rq:
- lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq);
-out_destroy_els_wq:
- lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
-out_destroy_mbx_wq:
- lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
-out_destroy_els_cq:
- lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
-out_destroy_mbx_cq:
- lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
-out_destroy_fcp_wq:
- for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--)
- lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]);
-out_destroy_fcp_cq:
- for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--)
- lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]);
-out_destroy_hba_eq:
- for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--)
- lpfc_eq_destroy(phba, phba->sli4_hba.hba_eq[fcp_eqidx]);
+out_destroy:
+ lpfc_sli4_queue_unset(phba);
out_error:
return rc;
}
@@ -7965,39 +8741,81 @@ out_error:
void
lpfc_sli4_queue_unset(struct lpfc_hba *phba)
{
- int fcp_qidx;
+ int qidx;
/* Unset the queues created for Flash Optimized Fabric operations */
if (phba->cfg_fof)
lpfc_fof_queue_destroy(phba);
+
/* Unset mailbox command work queue */
- lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
+ if (phba->sli4_hba.mbx_wq)
+ lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
+
+ /* Unset NVME LS work queue */
+ if (phba->sli4_hba.nvmels_wq)
+ lpfc_wq_destroy(phba, phba->sli4_hba.nvmels_wq);
+
/* Unset ELS work queue */
- lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
+ if (phba->sli4_hba.els_cq)
+ lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
+
/* Unset unsolicited receive queue */
- lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq);
+ if (phba->sli4_hba.hdr_rq)
+ lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq,
+ phba->sli4_hba.dat_rq);
+
/* Unset FCP work queue */
- if (phba->sli4_hba.fcp_wq) {
- for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_io_channel;
- fcp_qidx++)
- lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]);
+ if (phba->sli4_hba.fcp_wq)
+ for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++)
+ lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[qidx]);
+
+ /* Unset NVME work queue */
+ if (phba->sli4_hba.nvme_wq) {
+ for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++)
+ lpfc_wq_destroy(phba, phba->sli4_hba.nvme_wq[qidx]);
}
+
/* Unset mailbox command complete queue */
- lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
+ if (phba->sli4_hba.mbx_cq)
+ lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
+
/* Unset ELS complete queue */
- lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
- /* Unset FCP response complete queue */
- if (phba->sli4_hba.fcp_cq) {
- for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_io_channel;
- fcp_qidx++)
- lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]);
+ if (phba->sli4_hba.els_cq)
+ lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
+
+ /* Unset NVME LS complete queue */
+ if (phba->sli4_hba.nvmels_cq)
+ lpfc_cq_destroy(phba, phba->sli4_hba.nvmels_cq);
+
+ /* Unset NVME response complete queue */
+ if (phba->sli4_hba.nvme_cq)
+ for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++)
+ lpfc_cq_destroy(phba, phba->sli4_hba.nvme_cq[qidx]);
+
+ /* Unset NVMET MRQ queue */
+ if (phba->sli4_hba.nvmet_mrq_hdr) {
+ for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
+ lpfc_rq_destroy(phba,
+ phba->sli4_hba.nvmet_mrq_hdr[qidx],
+ phba->sli4_hba.nvmet_mrq_data[qidx]);
}
- /* Unset fast-path event queue */
- if (phba->sli4_hba.hba_eq) {
- for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_io_channel;
- fcp_qidx++)
- lpfc_eq_destroy(phba, phba->sli4_hba.hba_eq[fcp_qidx]);
+
+ /* Unset NVMET CQ Set complete queue */
+ if (phba->sli4_hba.nvmet_cqset) {
+ for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
+ lpfc_cq_destroy(phba,
+ phba->sli4_hba.nvmet_cqset[qidx]);
}
+
+ /* Unset FCP response complete queue */
+ if (phba->sli4_hba.fcp_cq)
+ for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++)
+ lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[qidx]);
+
+ /* Unset fast-path event queue */
+ if (phba->sli4_hba.hba_eq)
+ for (qidx = 0; qidx < phba->io_channel_irqs; qidx++)
+ lpfc_eq_destroy(phba, phba->sli4_hba.hba_eq[qidx]);
}
/**
@@ -8484,16 +9302,7 @@ lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
* @phba: pointer to lpfc hba data structure.
*
* This routine is invoked to enable the MSI-X interrupt vectors to device
- * with SLI-3 interface specs. The kernel function pci_enable_msix_exact()
- * is called to enable the MSI-X vectors. Note that pci_enable_msix_exact(),
- * once invoked, enables either all or nothing, depending on the current
- * availability of PCI vector resources. The device driver is responsible
- * for calling the individual request_irq() to register each MSI-X vector
- * with a interrupt handler, which is done in this function. Note that
- * later when device is unloading, the driver should always call free_irq()
- * on all MSI-X vectors it has done request_irq() on before calling
- * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device
- * will be left with MSI-X enabled and leaks its vectors.
+ * with SLI-3 interface specs.
*
* Return codes
* 0 - successful
@@ -8502,33 +9311,24 @@ lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
static int
lpfc_sli_enable_msix(struct lpfc_hba *phba)
{
- int rc, i;
+ int rc;
LPFC_MBOXQ_t *pmb;
/* Set up MSI-X multi-message vectors */
- for (i = 0; i < LPFC_MSIX_VECTORS; i++)
- phba->msix_entries[i].entry = i;
-
- /* Configure MSI-X capability structure */
- rc = pci_enable_msix_exact(phba->pcidev, phba->msix_entries,
- LPFC_MSIX_VECTORS);
- if (rc) {
+ rc = pci_alloc_irq_vectors(phba->pcidev,
+ LPFC_MSIX_VECTORS, LPFC_MSIX_VECTORS, PCI_IRQ_MSIX);
+ if (rc < 0) {
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"0420 PCI enable MSI-X failed (%d)\n", rc);
goto vec_fail_out;
}
- for (i = 0; i < LPFC_MSIX_VECTORS; i++)
- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "0477 MSI-X entry[%d]: vector=x%x "
- "message=%d\n", i,
- phba->msix_entries[i].vector,
- phba->msix_entries[i].entry);
+
/*
* Assign MSI-X vectors to interrupt handlers
*/
/* vector-0 is associated to slow-path handler */
- rc = request_irq(phba->msix_entries[0].vector,
+ rc = request_irq(pci_irq_vector(phba->pcidev, 0),
&lpfc_sli_sp_intr_handler, 0,
LPFC_SP_DRIVER_HANDLER_NAME, phba);
if (rc) {
@@ -8539,7 +9339,7 @@ lpfc_sli_enable_msix(struct lpfc_hba *phba)
}
/* vector-1 is associated to fast-path handler */
- rc = request_irq(phba->msix_entries[1].vector,
+ rc = request_irq(pci_irq_vector(phba->pcidev, 1),
&lpfc_sli_fp_intr_handler, 0,
LPFC_FP_DRIVER_HANDLER_NAME, phba);
@@ -8584,42 +9384,21 @@ mbx_fail_out:
mem_fail_out:
/* free the irq already requested */
- free_irq(phba->msix_entries[1].vector, phba);
+ free_irq(pci_irq_vector(phba->pcidev, 1), phba);
irq_fail_out:
/* free the irq already requested */
- free_irq(phba->msix_entries[0].vector, phba);
+ free_irq(pci_irq_vector(phba->pcidev, 0), phba);
msi_fail_out:
/* Unconfigure MSI-X capability structure */
- pci_disable_msix(phba->pcidev);
+ pci_free_irq_vectors(phba->pcidev);
vec_fail_out:
return rc;
}
/**
- * lpfc_sli_disable_msix - Disable MSI-X interrupt mode on SLI-3 device.
- * @phba: pointer to lpfc hba data structure.
- *
- * This routine is invoked to release the MSI-X vectors and then disable the
- * MSI-X interrupt mode to device with SLI-3 interface spec.
- **/
-static void
-lpfc_sli_disable_msix(struct lpfc_hba *phba)
-{
- int i;
-
- /* Free up MSI-X multi-message vectors */
- for (i = 0; i < LPFC_MSIX_VECTORS; i++)
- free_irq(phba->msix_entries[i].vector, phba);
- /* Disable MSI-X */
- pci_disable_msix(phba->pcidev);
-
- return;
-}
-
-/**
* lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device.
* @phba: pointer to lpfc hba data structure.
*
@@ -8659,24 +9438,6 @@ lpfc_sli_enable_msi(struct lpfc_hba *phba)
}
/**
- * lpfc_sli_disable_msi - Disable MSI interrupt mode to SLI-3 device.
- * @phba: pointer to lpfc hba data structure.
- *
- * This routine is invoked to disable the MSI interrupt mode to device with
- * SLI-3 interface spec. The driver calls free_irq() on MSI vector it has
- * done request_irq() on before calling pci_disable_msi(). Failure to do so
- * results in a BUG_ON() and a device will be left with MSI enabled and leaks
- * its vector.
- */
-static void
-lpfc_sli_disable_msi(struct lpfc_hba *phba)
-{
- free_irq(phba->pcidev->irq, phba);
- pci_disable_msi(phba->pcidev);
- return;
-}
-
-/**
* lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device.
* @phba: pointer to lpfc hba data structure.
*
@@ -8747,107 +9508,50 @@ lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
static void
lpfc_sli_disable_intr(struct lpfc_hba *phba)
{
- /* Disable the currently initialized interrupt mode */
+ int nr_irqs, i;
+
if (phba->intr_type == MSIX)
- lpfc_sli_disable_msix(phba);
- else if (phba->intr_type == MSI)
- lpfc_sli_disable_msi(phba);
- else if (phba->intr_type == INTx)
- free_irq(phba->pcidev->irq, phba);
+ nr_irqs = LPFC_MSIX_VECTORS;
+ else
+ nr_irqs = 1;
+
+ for (i = 0; i < nr_irqs; i++)
+ free_irq(pci_irq_vector(phba->pcidev, i), phba);
+ pci_free_irq_vectors(phba->pcidev);
/* Reset interrupt management states */
phba->intr_type = NONE;
phba->sli.slistat.sli_intr = 0;
-
- return;
}
/**
- * lpfc_find_next_cpu - Find next available CPU that matches the phys_id
+ * lpfc_cpu_affinity_check - Check vector CPU affinity mappings
* @phba: pointer to lpfc hba data structure.
+ * @vectors: number of msix vectors allocated.
*
- * Find next available CPU to use for IRQ to CPU affinity.
+ * The routine will figure out the CPU affinity assignment for every
+ * MSI-X vector allocated for the HBA. The hba_eq_hdl will be updated
+ * with a pointer to the CPU mask that defines ALL the CPUs this vector
+ * can be associated with. If the vector can be unquely associated with
+ * a single CPU, that CPU will be recorded in hba_eq_hdl[index].cpu.
+ * In addition, the CPU to IO channel mapping will be calculated
+ * and the phba->sli4_hba.cpu_map array will reflect this.
*/
-static int
-lpfc_find_next_cpu(struct lpfc_hba *phba, uint32_t phys_id)
+static void
+lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
{
struct lpfc_vector_map_info *cpup;
+ int index = 0;
+ int vec = 0;
int cpu;
-
- cpup = phba->sli4_hba.cpu_map;
- for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) {
- /* CPU must be online */
- if (cpu_online(cpu)) {
- if ((cpup->irq == LPFC_VECTOR_MAP_EMPTY) &&
- (lpfc_used_cpu[cpu] == LPFC_VECTOR_MAP_EMPTY) &&
- (cpup->phys_id == phys_id)) {
- return cpu;
- }
- }
- cpup++;
- }
-
- /*
- * If we get here, we have used ALL CPUs for the specific
- * phys_id. Now we need to clear out lpfc_used_cpu and start
- * reusing CPUs.
- */
-
- for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) {
- if (lpfc_used_cpu[cpu] == phys_id)
- lpfc_used_cpu[cpu] = LPFC_VECTOR_MAP_EMPTY;
- }
-
- cpup = phba->sli4_hba.cpu_map;
- for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) {
- /* CPU must be online */
- if (cpu_online(cpu)) {
- if ((cpup->irq == LPFC_VECTOR_MAP_EMPTY) &&
- (cpup->phys_id == phys_id)) {
- return cpu;
- }
- }
- cpup++;
- }
- return LPFC_VECTOR_MAP_EMPTY;
-}
-
-/**
- * lpfc_sli4_set_affinity - Set affinity for HBA IRQ vectors
- * @phba: pointer to lpfc hba data structure.
- * @vectors: number of HBA vectors
- *
- * Affinitize MSIX IRQ vectors to CPUs. Try to equally spread vector
- * affinization across multple physical CPUs (numa nodes).
- * In addition, this routine will assign an IO channel for each CPU
- * to use when issuing I/Os.
- */
-static int
-lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors)
-{
- int i, idx, saved_chann, used_chann, cpu, phys_id;
- int max_phys_id, min_phys_id;
- int num_io_channel, first_cpu, chan;
- struct lpfc_vector_map_info *cpup;
#ifdef CONFIG_X86
struct cpuinfo_x86 *cpuinfo;
#endif
- uint8_t chann[LPFC_FCP_IO_CHAN_MAX+1];
-
- /* If there is no mapping, just return */
- if (!phba->cfg_fcp_cpu_map)
- return 1;
/* Init cpu_map array */
memset(phba->sli4_hba.cpu_map, 0xff,
(sizeof(struct lpfc_vector_map_info) *
- phba->sli4_hba.num_present_cpu));
-
- max_phys_id = 0;
- min_phys_id = 0xff;
- phys_id = 0;
- num_io_channel = 0;
- first_cpu = LPFC_VECTOR_MAP_EMPTY;
+ phba->sli4_hba.num_present_cpu));
/* Update CPU map with physical id and core id of each CPU */
cpup = phba->sli4_hba.cpu_map;
@@ -8861,184 +9565,16 @@ lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors)
cpup->phys_id = 0;
cpup->core_id = 0;
#endif
-
- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "3328 CPU physid %d coreid %d\n",
- cpup->phys_id, cpup->core_id);
-
- if (cpup->phys_id > max_phys_id)
- max_phys_id = cpup->phys_id;
- if (cpup->phys_id < min_phys_id)
- min_phys_id = cpup->phys_id;
+ cpup->channel_id = index; /* For now round robin */
+ cpup->irq = pci_irq_vector(phba->pcidev, vec);
+ vec++;
+ if (vec >= vectors)
+ vec = 0;
+ index++;
+ if (index >= phba->cfg_fcp_io_channel)
+ index = 0;
cpup++;
}
-
- phys_id = min_phys_id;
- /* Now associate the HBA vectors with specific CPUs */
- for (idx = 0; idx < vectors; idx++) {
- cpup = phba->sli4_hba.cpu_map;
- cpu = lpfc_find_next_cpu(phba, phys_id);
- if (cpu == LPFC_VECTOR_MAP_EMPTY) {
-
- /* Try for all phys_id's */
- for (i = 1; i < max_phys_id; i++) {
- phys_id++;
- if (phys_id > max_phys_id)
- phys_id = min_phys_id;
- cpu = lpfc_find_next_cpu(phba, phys_id);
- if (cpu == LPFC_VECTOR_MAP_EMPTY)
- continue;
- goto found;
- }
-
- /* Use round robin for scheduling */
- phba->cfg_fcp_io_sched = LPFC_FCP_SCHED_ROUND_ROBIN;
- chan = 0;
- cpup = phba->sli4_hba.cpu_map;
- for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
- cpup->channel_id = chan;
- cpup++;
- chan++;
- if (chan >= phba->cfg_fcp_io_channel)
- chan = 0;
- }
-
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "3329 Cannot set affinity:"
- "Error mapping vector %d (%d)\n",
- idx, vectors);
- return 0;
- }
-found:
- cpup += cpu;
- if (phba->cfg_fcp_cpu_map == LPFC_DRIVER_CPU_MAP)
- lpfc_used_cpu[cpu] = phys_id;
-
- /* Associate vector with selected CPU */
- cpup->irq = phba->sli4_hba.msix_entries[idx].vector;
-
- /* Associate IO channel with selected CPU */
- cpup->channel_id = idx;
- num_io_channel++;
-
- if (first_cpu == LPFC_VECTOR_MAP_EMPTY)
- first_cpu = cpu;
-
- /* Now affinitize to the selected CPU */
- i = irq_set_affinity_hint(phba->sli4_hba.msix_entries[idx].
- vector, get_cpu_mask(cpu));
-
- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "3330 Set Affinity: CPU %d channel %d "
- "irq %d (%x)\n",
- cpu, cpup->channel_id,
- phba->sli4_hba.msix_entries[idx].vector, i);
-
- /* Spread vector mapping across multple physical CPU nodes */
- phys_id++;
- if (phys_id > max_phys_id)
- phys_id = min_phys_id;
- }
-
- /*
- * Finally fill in the IO channel for any remaining CPUs.
- * At this point, all IO channels have been assigned to a specific
- * MSIx vector, mapped to a specific CPU.
- * Base the remaining IO channel assigned, to IO channels already
- * assigned to other CPUs on the same phys_id.
- */
- for (i = min_phys_id; i <= max_phys_id; i++) {
- /*
- * If there are no io channels already mapped to
- * this phys_id, just round robin thru the io_channels.
- * Setup chann[] for round robin.
- */
- for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++)
- chann[idx] = idx;
-
- saved_chann = 0;
- used_chann = 0;
-
- /*
- * First build a list of IO channels already assigned
- * to this phys_id before reassigning the same IO
- * channels to the remaining CPUs.
- */
- cpup = phba->sli4_hba.cpu_map;
- cpu = first_cpu;
- cpup += cpu;
- for (idx = 0; idx < phba->sli4_hba.num_present_cpu;
- idx++) {
- if (cpup->phys_id == i) {
- /*
- * Save any IO channels that are
- * already mapped to this phys_id.
- */
- if (cpup->irq != LPFC_VECTOR_MAP_EMPTY) {
- if (saved_chann <=
- LPFC_FCP_IO_CHAN_MAX) {
- chann[saved_chann] =
- cpup->channel_id;
- saved_chann++;
- }
- goto out;
- }
-
- /* See if we are using round-robin */
- if (saved_chann == 0)
- saved_chann =
- phba->cfg_fcp_io_channel;
-
- /* Associate next IO channel with CPU */
- cpup->channel_id = chann[used_chann];
- num_io_channel++;
- used_chann++;
- if (used_chann == saved_chann)
- used_chann = 0;
-
- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "3331 Set IO_CHANN "
- "CPU %d channel %d\n",
- idx, cpup->channel_id);
- }
-out:
- cpu++;
- if (cpu >= phba->sli4_hba.num_present_cpu) {
- cpup = phba->sli4_hba.cpu_map;
- cpu = 0;
- } else {
- cpup++;
- }
- }
- }
-
- if (phba->sli4_hba.num_online_cpu != phba->sli4_hba.num_present_cpu) {
- cpup = phba->sli4_hba.cpu_map;
- for (idx = 0; idx < phba->sli4_hba.num_present_cpu; idx++) {
- if (cpup->channel_id == LPFC_VECTOR_MAP_EMPTY) {
- cpup->channel_id = 0;
- num_io_channel++;
-
- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "3332 Assign IO_CHANN "
- "CPU %d channel %d\n",
- idx, cpup->channel_id);
- }
- cpup++;
- }
- }
-
- /* Sanity check */
- if (num_io_channel != phba->sli4_hba.num_present_cpu)
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "3333 Set affinity mismatch:"
- "%d chann != %d cpus: %d vectors\n",
- num_io_channel, phba->sli4_hba.num_present_cpu,
- vectors);
-
- /* Enable using cpu affinity for scheduling */
- phba->cfg_fcp_io_sched = LPFC_FCP_SCHED_BY_CPU;
- return 1;
}
@@ -9047,14 +9583,7 @@ out:
* @phba: pointer to lpfc hba data structure.
*
* This routine is invoked to enable the MSI-X interrupt vectors to device
- * with SLI-4 interface spec. The kernel function pci_enable_msix_range()
- * is called to enable the MSI-X vectors. The device driver is responsible
- * for calling the individual request_irq() to register each MSI-X vector
- * with a interrupt handler, which is done in this function. Note that
- * later when device is unloading, the driver should always call free_irq()
- * on all MSI-X vectors it has done request_irq() on before calling
- * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device
- * will be left with MSI-X enabled and leaks its vectors.
+ * with SLI-4 interface spec.
*
* Return codes
* 0 - successful
@@ -9066,17 +9595,13 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
int vectors, rc, index;
/* Set up MSI-X multi-message vectors */
- for (index = 0; index < phba->cfg_fcp_io_channel; index++)
- phba->sli4_hba.msix_entries[index].entry = index;
-
- /* Configure MSI-X capability structure */
- vectors = phba->cfg_fcp_io_channel;
- if (phba->cfg_fof) {
- phba->sli4_hba.msix_entries[index].entry = index;
+ vectors = phba->io_channel_irqs;
+ if (phba->cfg_fof)
vectors++;
- }
- rc = pci_enable_msix_range(phba->pcidev, phba->sli4_hba.msix_entries,
- 2, vectors);
+
+ rc = pci_alloc_irq_vectors(phba->pcidev,
+ (phba->nvmet_support) ? 1 : 2,
+ vectors, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
if (rc < 0) {
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"0484 PCI enable MSI-X failed (%d)\n", rc);
@@ -9084,14 +9609,6 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
}
vectors = rc;
- /* Log MSI-X vector assignment */
- for (index = 0; index < vectors; index++)
- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "0489 MSI-X entry[%d]: vector=x%x "
- "message=%d\n", index,
- phba->sli4_hba.msix_entries[index].vector,
- phba->sli4_hba.msix_entries[index].entry);
-
/* Assign MSI-X vectors to interrupt handlers */
for (index = 0; index < vectors; index++) {
memset(&phba->sli4_hba.handler_name[index], 0, 16);
@@ -9099,21 +9616,19 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
LPFC_SLI4_HANDLER_NAME_SZ,
LPFC_DRIVER_HANDLER_NAME"%d", index);
- phba->sli4_hba.fcp_eq_hdl[index].idx = index;
- phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
- atomic_set(&phba->sli4_hba.fcp_eq_hdl[index].fcp_eq_in_use, 1);
+ phba->sli4_hba.hba_eq_hdl[index].idx = index;
+ phba->sli4_hba.hba_eq_hdl[index].phba = phba;
+ atomic_set(&phba->sli4_hba.hba_eq_hdl[index].hba_eq_in_use, 1);
if (phba->cfg_fof && (index == (vectors - 1)))
- rc = request_irq(
- phba->sli4_hba.msix_entries[index].vector,
+ rc = request_irq(pci_irq_vector(phba->pcidev, index),
&lpfc_sli4_fof_intr_handler, 0,
(char *)&phba->sli4_hba.handler_name[index],
- &phba->sli4_hba.fcp_eq_hdl[index]);
+ &phba->sli4_hba.hba_eq_hdl[index]);
else
- rc = request_irq(
- phba->sli4_hba.msix_entries[index].vector,
+ rc = request_irq(pci_irq_vector(phba->pcidev, index),
&lpfc_sli4_hba_intr_handler, 0,
(char *)&phba->sli4_hba.handler_name[index],
- &phba->sli4_hba.fcp_eq_hdl[index]);
+ &phba->sli4_hba.hba_eq_hdl[index]);
if (rc) {
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
"0486 MSI-X fast-path (%d) "
@@ -9125,64 +9640,38 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
if (phba->cfg_fof)
vectors--;
- if (vectors != phba->cfg_fcp_io_channel) {
+ if (vectors != phba->io_channel_irqs) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3238 Reducing IO channels to match number of "
"MSI-X vectors, requested %d got %d\n",
- phba->cfg_fcp_io_channel, vectors);
- phba->cfg_fcp_io_channel = vectors;
+ phba->io_channel_irqs, vectors);
+ if (phba->cfg_fcp_io_channel > vectors)
+ phba->cfg_fcp_io_channel = vectors;
+ if (phba->cfg_nvme_io_channel > vectors)
+ phba->cfg_nvme_io_channel = vectors;
+ if (phba->cfg_fcp_io_channel > phba->cfg_nvme_io_channel)
+ phba->io_channel_irqs = phba->cfg_fcp_io_channel;
+ else
+ phba->io_channel_irqs = phba->cfg_nvme_io_channel;
}
+ lpfc_cpu_affinity_check(phba, vectors);
- if (!shost_use_blk_mq(lpfc_shost_from_vport(phba->pport)))
- lpfc_sli4_set_affinity(phba, vectors);
return rc;
cfg_fail_out:
/* free the irq already requested */
- for (--index; index >= 0; index--) {
- irq_set_affinity_hint(phba->sli4_hba.msix_entries[index].
- vector, NULL);
- free_irq(phba->sli4_hba.msix_entries[index].vector,
- &phba->sli4_hba.fcp_eq_hdl[index]);
- }
+ for (--index; index >= 0; index--)
+ free_irq(pci_irq_vector(phba->pcidev, index),
+ &phba->sli4_hba.hba_eq_hdl[index]);
/* Unconfigure MSI-X capability structure */
- pci_disable_msix(phba->pcidev);
+ pci_free_irq_vectors(phba->pcidev);
vec_fail_out:
return rc;
}
/**
- * lpfc_sli4_disable_msix - Disable MSI-X interrupt mode to SLI-4 device
- * @phba: pointer to lpfc hba data structure.
- *
- * This routine is invoked to release the MSI-X vectors and then disable the
- * MSI-X interrupt mode to device with SLI-4 interface spec.
- **/
-static void
-lpfc_sli4_disable_msix(struct lpfc_hba *phba)
-{
- int index;
-
- /* Free up MSI-X multi-message vectors */
- for (index = 0; index < phba->cfg_fcp_io_channel; index++) {
- irq_set_affinity_hint(phba->sli4_hba.msix_entries[index].
- vector, NULL);
- free_irq(phba->sli4_hba.msix_entries[index].vector,
- &phba->sli4_hba.fcp_eq_hdl[index]);
- }
- if (phba->cfg_fof) {
- free_irq(phba->sli4_hba.msix_entries[index].vector,
- &phba->sli4_hba.fcp_eq_hdl[index]);
- }
- /* Disable MSI-X */
- pci_disable_msix(phba->pcidev);
-
- return;
-}
-
-/**
* lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device
* @phba: pointer to lpfc hba data structure.
*
@@ -9220,37 +9709,19 @@ lpfc_sli4_enable_msi(struct lpfc_hba *phba)
return rc;
}
- for (index = 0; index < phba->cfg_fcp_io_channel; index++) {
- phba->sli4_hba.fcp_eq_hdl[index].idx = index;
- phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
+ for (index = 0; index < phba->io_channel_irqs; index++) {
+ phba->sli4_hba.hba_eq_hdl[index].idx = index;
+ phba->sli4_hba.hba_eq_hdl[index].phba = phba;
}
if (phba->cfg_fof) {
- phba->sli4_hba.fcp_eq_hdl[index].idx = index;
- phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
+ phba->sli4_hba.hba_eq_hdl[index].idx = index;
+ phba->sli4_hba.hba_eq_hdl[index].phba = phba;
}
return 0;
}
/**
- * lpfc_sli4_disable_msi - Disable MSI interrupt mode to SLI-4 device
- * @phba: pointer to lpfc hba data structure.
- *
- * This routine is invoked to disable the MSI interrupt mode to device with
- * SLI-4 interface spec. The driver calls free_irq() on MSI vector it has
- * done request_irq() on before calling pci_disable_msi(). Failure to do so
- * results in a BUG_ON() and a device will be left with MSI enabled and leaks
- * its vector.
- **/
-static void
-lpfc_sli4_disable_msi(struct lpfc_hba *phba)
-{
- free_irq(phba->pcidev->irq, phba);
- pci_disable_msi(phba->pcidev);
- return;
-}
-
-/**
* lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device
* @phba: pointer to lpfc hba data structure.
*
@@ -9270,7 +9741,7 @@ static uint32_t
lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
{
uint32_t intr_mode = LPFC_INTR_ERROR;
- int retval, index;
+ int retval, idx;
if (cfg_mode == 2) {
/* Preparation before conf_msi mbox cmd */
@@ -9301,21 +9772,23 @@ lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
IRQF_SHARED, LPFC_DRIVER_NAME, phba);
if (!retval) {
+ struct lpfc_hba_eq_hdl *eqhdl;
+
/* Indicate initialization to INTx mode */
phba->intr_type = INTx;
intr_mode = 0;
- for (index = 0; index < phba->cfg_fcp_io_channel;
- index++) {
- phba->sli4_hba.fcp_eq_hdl[index].idx = index;
- phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
- atomic_set(&phba->sli4_hba.fcp_eq_hdl[index].
- fcp_eq_in_use, 1);
+
+ for (idx = 0; idx < phba->io_channel_irqs; idx++) {
+ eqhdl = &phba->sli4_hba.hba_eq_hdl[idx];
+ eqhdl->idx = idx;
+ eqhdl->phba = phba;
+ atomic_set(&eqhdl->hba_eq_in_use, 1);
}
if (phba->cfg_fof) {
- phba->sli4_hba.fcp_eq_hdl[index].idx = index;
- phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
- atomic_set(&phba->sli4_hba.fcp_eq_hdl[index].
- fcp_eq_in_use, 1);
+ eqhdl = &phba->sli4_hba.hba_eq_hdl[idx];
+ eqhdl->idx = idx;
+ eqhdl->phba = phba;
+ atomic_set(&eqhdl->hba_eq_in_use, 1);
}
}
}
@@ -9335,18 +9808,26 @@ static void
lpfc_sli4_disable_intr(struct lpfc_hba *phba)
{
/* Disable the currently initialized interrupt mode */
- if (phba->intr_type == MSIX)
- lpfc_sli4_disable_msix(phba);
- else if (phba->intr_type == MSI)
- lpfc_sli4_disable_msi(phba);
- else if (phba->intr_type == INTx)
+ if (phba->intr_type == MSIX) {
+ int index;
+
+ /* Free up MSI-X multi-message vectors */
+ for (index = 0; index < phba->io_channel_irqs; index++)
+ free_irq(pci_irq_vector(phba->pcidev, index),
+ &phba->sli4_hba.hba_eq_hdl[index]);
+
+ if (phba->cfg_fof)
+ free_irq(pci_irq_vector(phba->pcidev, index),
+ &phba->sli4_hba.hba_eq_hdl[index]);
+ } else {
free_irq(phba->pcidev->irq, phba);
+ }
+
+ pci_free_irq_vectors(phba->pcidev);
/* Reset interrupt management states */
phba->intr_type = NONE;
phba->sli.slistat.sli_intr = 0;
-
- return;
}
/**
@@ -9399,11 +9880,27 @@ static void
lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
{
int wait_time = 0;
- int fcp_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
+ int nvme_xri_cmpl = 1;
+ int fcp_xri_cmpl = 1;
int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
+ int nvmet_xri_cmpl =
+ list_empty(&phba->sli4_hba.lpfc_abts_nvmet_sgl_list);
+
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)
+ fcp_xri_cmpl =
+ list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
+ nvme_xri_cmpl =
+ list_empty(&phba->sli4_hba.lpfc_abts_nvme_buf_list);
- while (!fcp_xri_cmpl || !els_xri_cmpl) {
+ while (!fcp_xri_cmpl || !els_xri_cmpl || !nvme_xri_cmpl ||
+ !nvmet_xri_cmpl) {
if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) {
+ if (!nvme_xri_cmpl)
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "6100 NVME XRI exchange busy "
+ "wait time: %d seconds.\n",
+ wait_time/1000);
if (!fcp_xri_cmpl)
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2877 FCP XRI exchange busy "
@@ -9420,10 +9917,19 @@ lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1;
}
- fcp_xri_cmpl =
- list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
+ nvme_xri_cmpl = list_empty(
+ &phba->sli4_hba.lpfc_abts_nvme_buf_list);
+
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)
+ fcp_xri_cmpl = list_empty(
+ &phba->sli4_hba.lpfc_abts_scsi_buf_list);
+
els_xri_cmpl =
list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
+
+ nvmet_xri_cmpl =
+ list_empty(&phba->sli4_hba.lpfc_abts_nvmet_sgl_list);
}
}
@@ -9635,10 +10141,35 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters);
sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt,
mbx_sli4_parameters);
+ sli4_params->wqpcnt = bf_get(cfg_wqpcnt, mbx_sli4_parameters);
sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align,
mbx_sli4_parameters);
phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters);
phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters);
+ phba->nvme_support = (bf_get(cfg_nvme, mbx_sli4_parameters) &&
+ bf_get(cfg_xib, mbx_sli4_parameters));
+
+ if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP) ||
+ !phba->nvme_support) {
+ phba->nvme_support = 0;
+ phba->nvmet_support = 0;
+ phba->cfg_nvmet_mrq = 0;
+ phba->cfg_nvme_io_channel = 0;
+ phba->io_channel_irqs = phba->cfg_fcp_io_channel;
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME,
+ "6101 Disabling NVME support: "
+ "Not supported by firmware: %d %d\n",
+ bf_get(cfg_nvme, mbx_sli4_parameters),
+ bf_get(cfg_xib, mbx_sli4_parameters));
+
+ /* If firmware doesn't support NVME, just use SCSI support */
+ if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
+ return -ENODEV;
+ phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP;
+ }
+
+ if (bf_get(cfg_xib, mbx_sli4_parameters) && phba->cfg_suppress_rsp)
+ phba->sli.sli_flag |= LPFC_SLI_SUPPRESS_RSP;
/* Make sure that sge_supp_len can be handled by the driver */
if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
@@ -9713,14 +10244,6 @@ lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
goto out_disable_pci_dev;
}
- /* Set up phase-1 common device driver resources */
- error = lpfc_setup_driver_resource_phase1(phba);
- if (error) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "1403 Failed to set up driver resource.\n");
- goto out_unset_pci_mem_s3;
- }
-
/* Set up SLI-3 specific device driver resources */
error = lpfc_sli_driver_resource_setup(phba);
if (error) {
@@ -9876,7 +10399,13 @@ lpfc_pci_remove_one_s3(struct pci_dev *pdev)
/* Remove FC host and then SCSI host with the physical port */
fc_remove_host(shost);
scsi_remove_host(shost);
+
+ /* Perform ndlp cleanup on the physical port. The nvme and nvmet
+ * localports are destroyed after to cleanup all transport memory.
+ */
lpfc_cleanup(vport);
+ lpfc_nvmet_destroy_targetport(phba);
+ lpfc_nvme_destroy_localport(vport);
/*
* Bring down the SLI Layer. This step disable all interrupts,
@@ -10296,6 +10825,23 @@ lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
}
/**
+ * lpfc_sli4_get_iocb_cnt - Calculate the # of total IOCBs to reserve
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * returns the number of ELS/CT + NVMET IOCBs to reserve
+ **/
+int
+lpfc_sli4_get_iocb_cnt(struct lpfc_hba *phba)
+{
+ int max_xri = lpfc_sli4_get_els_iocb_cnt(phba);
+
+ if (phba->nvmet_support)
+ max_xri += LPFC_NVMET_BUF_POST;
+ return max_xri;
+}
+
+
+/**
* lpfc_write_firmware - attempt to write a firmware image to the port
* @fw: pointer to firmware image returned from request_firmware.
* @phba: pointer to lpfc hba data structure.
@@ -10459,7 +11005,6 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
struct Scsi_Host *shost = NULL;
int error;
uint32_t cfg_mode, intr_mode;
- int adjusted_fcp_io_channel;
/* Allocate memory for HBA structure */
phba = lpfc_hba_alloc(pdev);
@@ -10484,14 +11029,6 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
goto out_disable_pci_dev;
}
- /* Set up phase-1 common device driver resources */
- error = lpfc_setup_driver_resource_phase1(phba);
- if (error) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "1411 Failed to set up driver resource.\n");
- goto out_unset_pci_mem_s4;
- }
-
/* Set up SLI-4 Specific device driver resources */
error = lpfc_sli4_driver_resource_setup(phba);
if (error) {
@@ -10550,6 +11087,7 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
/* Put device to a known state before enabling interrupt */
lpfc_stop_port(phba);
+
/* Configure and enable interrupt */
intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
if (intr_mode == LPFC_INTR_ERROR) {
@@ -10559,11 +11097,17 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
goto out_free_sysfs_attr;
}
/* Default to single EQ for non-MSI-X */
- if (phba->intr_type != MSIX)
- adjusted_fcp_io_channel = 1;
- else
- adjusted_fcp_io_channel = phba->cfg_fcp_io_channel;
- phba->cfg_fcp_io_channel = adjusted_fcp_io_channel;
+ if (phba->intr_type != MSIX) {
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)
+ phba->cfg_fcp_io_channel = 1;
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
+ phba->cfg_nvme_io_channel = 1;
+ if (phba->nvmet_support)
+ phba->cfg_nvmet_mrq = 1;
+ }
+ phba->io_channel_irqs = 1;
+ }
+
/* Set up SLI-4 HBA */
if (lpfc_sli4_hba_setup(phba)) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -10579,6 +11123,24 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
/* Perform post initialization setup */
lpfc_post_init_setup(phba);
+ /* NVME support in FW earlier in the driver load corrects the
+ * FC4 type making a check for nvme_support unnecessary.
+ */
+ if ((phba->nvmet_support == 0) &&
+ (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) {
+ /* Create NVME binding with nvme_fc_transport. This
+ * ensures the vport is initialized.
+ */
+ error = lpfc_nvme_create_localport(vport);
+ if (error) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "6004 NVME registration failed, "
+ "error x%x\n",
+ error);
+ goto out_disable_intr;
+ }
+ }
+
/* check for firmware upgrade or downgrade */
if (phba->cfg_request_firmware_upgrade)
lpfc_sli4_request_firmware_update(phba, INT_FW_UPGRADE);
@@ -10650,8 +11212,12 @@ lpfc_pci_remove_one_s4(struct pci_dev *pdev)
fc_remove_host(shost);
scsi_remove_host(shost);
- /* Perform cleanup on the physical port */
+ /* Perform ndlp cleanup on the physical port. The nvme and nvmet
+ * localports are destroyed after to cleanup all transport memory.
+ */
lpfc_cleanup(vport);
+ lpfc_nvmet_destroy_targetport(phba);
+ lpfc_nvme_destroy_localport(vport);
/*
* Bring down the SLI Layer. This step disables all interrupts,
@@ -10669,6 +11235,8 @@ lpfc_pci_remove_one_s4(struct pci_dev *pdev)
* buffers are released to their corresponding pools here.
*/
lpfc_scsi_free(phba);
+ lpfc_nvme_free(phba);
+ lpfc_free_iocb_list(phba);
lpfc_sli4_driver_resource_unset(phba);
@@ -11314,7 +11882,7 @@ lpfc_sli4_oas_verify(struct lpfc_hba *phba)
int
lpfc_fof_queue_setup(struct lpfc_hba *phba)
{
- struct lpfc_sli *psli = &phba->sli;
+ struct lpfc_sli_ring *pring;
int rc;
rc = lpfc_eq_create(phba, phba->sli4_hba.fof_eq, LPFC_MAX_IMAX);
@@ -11333,8 +11901,11 @@ lpfc_fof_queue_setup(struct lpfc_hba *phba)
if (rc)
goto out_oas_wq;
- phba->sli4_hba.oas_cq->pring = &psli->ring[LPFC_FCP_OAS_RING];
- phba->sli4_hba.oas_ring = &psli->ring[LPFC_FCP_OAS_RING];
+ /* Bind this CQ/WQ to the NVME ring */
+ pring = phba->sli4_hba.oas_wq->pring;
+ pring->sli.sli4.wqp =
+ (void *)phba->sli4_hba.oas_wq;
+ phba->sli4_hba.oas_cq->pring = pring;
}
return 0;
@@ -11391,6 +11962,7 @@ lpfc_fof_queue_create(struct lpfc_hba *phba)
goto out_error;
phba->sli4_hba.oas_wq = qdesc;
+ list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
}
return 0;
@@ -11476,7 +12048,6 @@ static struct miscdevice lpfc_mgmt_dev = {
static int __init
lpfc_init(void)
{
- int cpu;
int error = 0;
printk(LPFC_MODULE_DESC "\n");
@@ -11502,9 +12073,7 @@ lpfc_init(void)
/* Initialize in case vector mapping is needed */
lpfc_used_cpu = NULL;
- lpfc_present_cpu = 0;
- for_each_present_cpu(cpu)
- lpfc_present_cpu++;
+ lpfc_present_cpu = num_present_cpus();
error = pci_register_driver(&lpfc_driver);
if (error) {
@@ -11550,5 +12119,5 @@ module_init(lpfc_init);
module_exit(lpfc_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION(LPFC_MODULE_DESC);
-MODULE_AUTHOR("Emulex Corporation - tech.support@emulex.com");
+MODULE_AUTHOR("Broadcom");
MODULE_VERSION("0:" LPFC_DRIVER_VERSION);
diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h
index 2a4e5d21eab2..3b654ad08d1f 100644
--- a/drivers/scsi/lpfc/lpfc_logmsg.h
+++ b/drivers/scsi/lpfc/lpfc_logmsg.h
@@ -1,9 +1,11 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
+ * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
+ * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
* Copyright (C) 2004-2009 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
- * www.emulex.com *
+ * www.broadcom.com *
* *
* This program is free software; you can redistribute it and/or *
* modify it under the terms of version 2 of the GNU General *
@@ -38,6 +40,10 @@
#define LOG_FIP 0x00020000 /* FIP events */
#define LOG_FCP_UNDER 0x00040000 /* FCP underruns errors */
#define LOG_SCSI_CMD 0x00080000 /* ALL SCSI commands */
+#define LOG_NVME 0x00100000 /* NVME general events. */
+#define LOG_NVME_DISC 0x00200000 /* NVME Discovery/Connect events. */
+#define LOG_NVME_ABTS 0x00400000 /* NVME ABTS events. */
+#define LOG_NVME_IOERR 0x00800000 /* NVME IO Error events. */
#define LOG_ALL_MSG 0xffffffff /* LOG all messages */
#define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index b234c50c255f..a928f5187fa4 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -1,9 +1,11 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
+ * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
+ * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
- * www.emulex.com *
+ * www.broadcom.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
* *
* This program is free software; you can redistribute it and/or *
@@ -954,7 +956,7 @@ lpfc_config_pcb_setup(struct lpfc_hba * phba)
pcbp->maxRing = (psli->num_rings - 1);
for (i = 0; i < psli->num_rings; i++) {
- pring = &psli->ring[i];
+ pring = &psli->sli3_ring[i];
pring->sli.sli3.sizeCiocb =
phba->sli_rev == 3 ? SLI3_IOCB_CMD_SIZE :
@@ -1217,7 +1219,7 @@ lpfc_config_ring(struct lpfc_hba * phba, int ring, LPFC_MBOXQ_t * pmb)
mb->un.varCfgRing.recvNotify = 1;
psli = &phba->sli;
- pring = &psli->ring[ring];
+ pring = &psli->sli3_ring[ring];
mb->un.varCfgRing.numMask = pring->num_mask;
mb->mbxCommand = MBX_CONFIG_RING;
mb->mbxOwner = OWN_HOST;
@@ -2081,6 +2083,9 @@ lpfc_request_features(struct lpfc_hba *phba, struct lpfcMboxq *mboxq)
if (phba->max_vpi && phba->cfg_enable_npiv)
bf_set(lpfc_mbx_rq_ftr_rq_npiv, &mboxq->u.mqe.un.req_ftrs, 1);
+ if (phba->nvmet_support)
+ bf_set(lpfc_mbx_rq_ftr_rq_mrqp, &mboxq->u.mqe.un.req_ftrs, 1);
+
return;
}
@@ -2434,14 +2439,45 @@ lpfc_reg_fcfi(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
memset(mbox, 0, sizeof(*mbox));
reg_fcfi = &mbox->u.mqe.un.reg_fcfi;
bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_FCFI);
- bf_set(lpfc_reg_fcfi_rq_id0, reg_fcfi, phba->sli4_hba.hdr_rq->queue_id);
- bf_set(lpfc_reg_fcfi_rq_id1, reg_fcfi, REG_FCF_INVALID_QID);
+ if (phba->nvmet_support == 0) {
+ bf_set(lpfc_reg_fcfi_rq_id0, reg_fcfi,
+ phba->sli4_hba.hdr_rq->queue_id);
+ /* Match everything - rq_id0 */
+ bf_set(lpfc_reg_fcfi_type_match0, reg_fcfi, 0);
+ bf_set(lpfc_reg_fcfi_type_mask0, reg_fcfi, 0);
+ bf_set(lpfc_reg_fcfi_rctl_match0, reg_fcfi, 0);
+ bf_set(lpfc_reg_fcfi_rctl_mask0, reg_fcfi, 0);
+
+ bf_set(lpfc_reg_fcfi_rq_id1, reg_fcfi, REG_FCF_INVALID_QID);
+
+ /* addr mode is bit wise inverted value of fcf addr_mode */
+ bf_set(lpfc_reg_fcfi_mam, reg_fcfi,
+ (~phba->fcf.addr_mode) & 0x3);
+ } else {
+ /* This is ONLY for NVMET MRQ == 1 */
+ if (phba->cfg_nvmet_mrq != 1)
+ return;
+
+ bf_set(lpfc_reg_fcfi_rq_id0, reg_fcfi,
+ phba->sli4_hba.nvmet_mrq_hdr[0]->queue_id);
+ /* Match type FCP - rq_id0 */
+ bf_set(lpfc_reg_fcfi_type_match0, reg_fcfi, FC_TYPE_FCP);
+ bf_set(lpfc_reg_fcfi_type_mask0, reg_fcfi, 0xff);
+ bf_set(lpfc_reg_fcfi_rctl_match0, reg_fcfi,
+ FC_RCTL_DD_UNSOL_CMD);
+
+ bf_set(lpfc_reg_fcfi_rq_id1, reg_fcfi,
+ phba->sli4_hba.hdr_rq->queue_id);
+ /* Match everything else - rq_id1 */
+ bf_set(lpfc_reg_fcfi_type_match1, reg_fcfi, 0);
+ bf_set(lpfc_reg_fcfi_type_mask1, reg_fcfi, 0);
+ bf_set(lpfc_reg_fcfi_rctl_match1, reg_fcfi, 0);
+ bf_set(lpfc_reg_fcfi_rctl_mask1, reg_fcfi, 0);
+ }
bf_set(lpfc_reg_fcfi_rq_id2, reg_fcfi, REG_FCF_INVALID_QID);
bf_set(lpfc_reg_fcfi_rq_id3, reg_fcfi, REG_FCF_INVALID_QID);
bf_set(lpfc_reg_fcfi_info_index, reg_fcfi,
phba->fcf.current_rec.fcf_indx);
- /* reg_fcf addr mode is bit wise inverted value of fcf addr_mode */
- bf_set(lpfc_reg_fcfi_mam, reg_fcfi, (~phba->fcf.addr_mode) & 0x3);
if (phba->fcf.current_rec.vlan_id != LPFC_FCOE_NULL_VID) {
bf_set(lpfc_reg_fcfi_vv, reg_fcfi, 1);
bf_set(lpfc_reg_fcfi_vlan_tag, reg_fcfi,
@@ -2450,6 +2486,70 @@ lpfc_reg_fcfi(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
}
/**
+ * lpfc_reg_fcfi_mrq - Initialize the REG_FCFI_MRQ mailbox command
+ * @phba: pointer to the hba structure containing the FCF index and RQ ID.
+ * @mbox: pointer to lpfc mbox command to initialize.
+ * @mode: 0 to register FCFI, 1 to register MRQs
+ *
+ * The REG_FCFI_MRQ mailbox command supports Fibre Channel Forwarders (FCFs).
+ * The SLI Host uses the command to activate an FCF after it has acquired FCF
+ * information via a READ_FCF mailbox command. This mailbox command also is used
+ * to indicate where received unsolicited frames from this FCF will be sent. By
+ * default this routine will set up the FCF to forward all unsolicited frames
+ * the the RQ ID passed in the @phba. This can be overridden by the caller for
+ * more complicated setups.
+ **/
+void
+lpfc_reg_fcfi_mrq(struct lpfc_hba *phba, struct lpfcMboxq *mbox, int mode)
+{
+ struct lpfc_mbx_reg_fcfi_mrq *reg_fcfi;
+
+ /* This is ONLY for MRQ */
+ if (phba->cfg_nvmet_mrq <= 1)
+ return;
+
+ memset(mbox, 0, sizeof(*mbox));
+ reg_fcfi = &mbox->u.mqe.un.reg_fcfi_mrq;
+ bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_FCFI_MRQ);
+ if (mode == 0) {
+ bf_set(lpfc_reg_fcfi_mrq_info_index, reg_fcfi,
+ phba->fcf.current_rec.fcf_indx);
+ if (phba->fcf.current_rec.vlan_id != LPFC_FCOE_NULL_VID) {
+ bf_set(lpfc_reg_fcfi_mrq_vv, reg_fcfi, 1);
+ bf_set(lpfc_reg_fcfi_mrq_vlan_tag, reg_fcfi,
+ phba->fcf.current_rec.vlan_id);
+ }
+ return;
+ }
+
+ bf_set(lpfc_reg_fcfi_mrq_rq_id0, reg_fcfi,
+ phba->sli4_hba.nvmet_mrq_hdr[0]->queue_id);
+ /* Match NVME frames of type FCP (protocol NVME) - rq_id0 */
+ bf_set(lpfc_reg_fcfi_mrq_type_match0, reg_fcfi, FC_TYPE_FCP);
+ bf_set(lpfc_reg_fcfi_mrq_type_mask0, reg_fcfi, 0xff);
+ bf_set(lpfc_reg_fcfi_mrq_rctl_match0, reg_fcfi, FC_RCTL_DD_UNSOL_CMD);
+ bf_set(lpfc_reg_fcfi_mrq_rctl_mask0, reg_fcfi, 0xff);
+ bf_set(lpfc_reg_fcfi_mrq_ptc0, reg_fcfi, 1);
+ bf_set(lpfc_reg_fcfi_mrq_pt0, reg_fcfi, 1);
+
+ bf_set(lpfc_reg_fcfi_mrq_policy, reg_fcfi, 3); /* NVME connection id */
+ bf_set(lpfc_reg_fcfi_mrq_mode, reg_fcfi, 1);
+ bf_set(lpfc_reg_fcfi_mrq_filter, reg_fcfi, 1); /* rq_id0 */
+ bf_set(lpfc_reg_fcfi_mrq_npairs, reg_fcfi, phba->cfg_nvmet_mrq);
+
+ bf_set(lpfc_reg_fcfi_mrq_rq_id1, reg_fcfi,
+ phba->sli4_hba.hdr_rq->queue_id);
+ /* Match everything - rq_id1 */
+ bf_set(lpfc_reg_fcfi_mrq_type_match1, reg_fcfi, 0);
+ bf_set(lpfc_reg_fcfi_mrq_type_mask1, reg_fcfi, 0);
+ bf_set(lpfc_reg_fcfi_mrq_rctl_match1, reg_fcfi, 0);
+ bf_set(lpfc_reg_fcfi_mrq_rctl_mask1, reg_fcfi, 0);
+
+ bf_set(lpfc_reg_fcfi_mrq_rq_id2, reg_fcfi, REG_FCF_INVALID_QID);
+ bf_set(lpfc_reg_fcfi_mrq_rq_id3, reg_fcfi, REG_FCF_INVALID_QID);
+}
+
+/**
* lpfc_unreg_fcfi - Initialize the UNREG_FCFI mailbox command
* @mbox: pointer to lpfc mbox command to initialize.
* @fcfi: FCFI to be unregistered.
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index 3fa65338d3f5..c61d8d692ede 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -1,9 +1,11 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
+ * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
+ * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
* Copyright (C) 2004-2014 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
- * www.emulex.com *
+ * www.broadcom.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
* *
* This program is free software; you can redistribute it and/or *
@@ -24,10 +26,12 @@
#include <linux/pci.h>
#include <linux/interrupt.h>
+#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_transport_fc.h>
+#include <scsi/fc/fc_fs.h>
-#include <scsi/scsi.h>
+#include <linux/nvme-fc-driver.h>
#include "lpfc_hw4.h"
#include "lpfc_hw.h"
@@ -35,8 +39,10 @@
#include "lpfc_sli4.h"
#include "lpfc_nl.h"
#include "lpfc_disc.h"
-#include "lpfc_scsi.h"
#include "lpfc.h"
+#include "lpfc_scsi.h"
+#include "lpfc_nvme.h"
+#include "lpfc_nvmet.h"
#include "lpfc_crtn.h"
#include "lpfc_logmsg.h"
@@ -66,7 +72,7 @@ lpfc_mem_alloc_active_rrq_pool_s4(struct lpfc_hba *phba) {
* lpfc_mem_alloc - create and allocate all PCI and memory pools
* @phba: HBA to allocate pools for
*
- * Description: Creates and allocates PCI pools lpfc_scsi_dma_buf_pool,
+ * Description: Creates and allocates PCI pools lpfc_sg_dma_buf_pool,
* lpfc_mbuf_pool, lpfc_hrb_pool. Creates and allocates kmalloc-backed mempools
* for LPFC_MBOXQ_t and lpfc_nodelist. Also allocates the VPI bitmask.
*
@@ -90,21 +96,23 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align)
else
i = SLI4_PAGE_SIZE;
- phba->lpfc_scsi_dma_buf_pool =
- pci_pool_create("lpfc_scsi_dma_buf_pool",
- phba->pcidev,
- phba->cfg_sg_dma_buf_size,
- i,
- 0);
+ phba->lpfc_sg_dma_buf_pool =
+ pci_pool_create("lpfc_sg_dma_buf_pool",
+ phba->pcidev,
+ phba->cfg_sg_dma_buf_size,
+ i, 0);
+ if (!phba->lpfc_sg_dma_buf_pool)
+ goto fail;
+
} else {
- phba->lpfc_scsi_dma_buf_pool =
- pci_pool_create("lpfc_scsi_dma_buf_pool",
- phba->pcidev, phba->cfg_sg_dma_buf_size,
- align, 0);
- }
+ phba->lpfc_sg_dma_buf_pool =
+ pci_pool_create("lpfc_sg_dma_buf_pool",
+ phba->pcidev, phba->cfg_sg_dma_buf_size,
+ align, 0);
- if (!phba->lpfc_scsi_dma_buf_pool)
- goto fail;
+ if (!phba->lpfc_sg_dma_buf_pool)
+ goto fail;
+ }
phba->lpfc_mbuf_pool = pci_pool_create("lpfc_mbuf_pool", phba->pcidev,
LPFC_BPL_SIZE,
@@ -170,12 +178,15 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align)
LPFC_DEVICE_DATA_POOL_SIZE,
sizeof(struct lpfc_device_data));
if (!phba->device_data_mem_pool)
- goto fail_free_hrb_pool;
+ goto fail_free_drb_pool;
} else {
phba->device_data_mem_pool = NULL;
}
return 0;
+fail_free_drb_pool:
+ pci_pool_destroy(phba->lpfc_drb_pool);
+ phba->lpfc_drb_pool = NULL;
fail_free_hrb_pool:
pci_pool_destroy(phba->lpfc_hrb_pool);
phba->lpfc_hrb_pool = NULL;
@@ -197,8 +208,8 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align)
pci_pool_destroy(phba->lpfc_mbuf_pool);
phba->lpfc_mbuf_pool = NULL;
fail_free_dma_buf_pool:
- pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool);
- phba->lpfc_scsi_dma_buf_pool = NULL;
+ pci_pool_destroy(phba->lpfc_sg_dma_buf_pool);
+ phba->lpfc_sg_dma_buf_pool = NULL;
fail:
return -ENOMEM;
}
@@ -227,6 +238,9 @@ lpfc_mem_free(struct lpfc_hba *phba)
if (phba->lpfc_hrb_pool)
pci_pool_destroy(phba->lpfc_hrb_pool);
phba->lpfc_hrb_pool = NULL;
+ if (phba->txrdy_payload_pool)
+ pci_pool_destroy(phba->txrdy_payload_pool);
+ phba->txrdy_payload_pool = NULL;
if (phba->lpfc_hbq_pool)
pci_pool_destroy(phba->lpfc_hbq_pool);
@@ -258,8 +272,8 @@ lpfc_mem_free(struct lpfc_hba *phba)
phba->lpfc_mbuf_pool = NULL;
/* Free DMA buffer memory pool */
- pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool);
- phba->lpfc_scsi_dma_buf_pool = NULL;
+ pci_pool_destroy(phba->lpfc_sg_dma_buf_pool);
+ phba->lpfc_sg_dma_buf_pool = NULL;
/* Free Device Data memory pool */
if (phba->device_data_mem_pool) {
@@ -282,7 +296,7 @@ lpfc_mem_free(struct lpfc_hba *phba)
* @phba: HBA to free memory for
*
* Description: Free memory from PCI and driver memory pools and also those
- * used : lpfc_scsi_dma_buf_pool, lpfc_mbuf_pool, lpfc_hrb_pool. Frees
+ * used : lpfc_sg_dma_buf_pool, lpfc_mbuf_pool, lpfc_hrb_pool. Frees
* kmalloc-backed mempools for LPFC_MBOXQ_t and lpfc_nodelist. Also frees
* the VPI bitmask.
*
@@ -431,6 +445,44 @@ lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma)
}
/**
+ * lpfc_nvmet_buf_alloc - Allocate an nvmet_buf from the
+ * lpfc_sg_dma_buf_pool PCI pool
+ * @phba: HBA which owns the pool to allocate from
+ * @mem_flags: indicates if this is a priority (MEM_PRI) allocation
+ * @handle: used to return the DMA-mapped address of the nvmet_buf
+ *
+ * Description: Allocates a DMA-mapped buffer from the lpfc_sg_dma_buf_pool
+ * PCI pool. Allocates from generic pci_pool_alloc function.
+ *
+ * Returns:
+ * pointer to the allocated nvmet_buf on success
+ * NULL on failure
+ **/
+void *
+lpfc_nvmet_buf_alloc(struct lpfc_hba *phba, int mem_flags, dma_addr_t *handle)
+{
+ void *ret;
+
+ ret = pci_pool_alloc(phba->lpfc_sg_dma_buf_pool, GFP_KERNEL, handle);
+ return ret;
+}
+
+/**
+ * lpfc_nvmet_buf_free - Free an nvmet_buf from the lpfc_sg_dma_buf_pool
+ * PCI pool
+ * @phba: HBA which owns the pool to return to
+ * @virt: nvmet_buf to free
+ * @dma: the DMA-mapped address of the lpfc_sg_dma_buf_pool to be freed
+ *
+ * Returns: None
+ **/
+void
+lpfc_nvmet_buf_free(struct lpfc_hba *phba, void *virt, dma_addr_t dma)
+{
+ pci_pool_free(phba->lpfc_sg_dma_buf_pool, virt, dma);
+}
+
+/**
* lpfc_els_hbq_alloc - Allocate an HBQ buffer
* @phba: HBA to allocate HBQ buffer for
*
@@ -458,7 +510,7 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba)
kfree(hbqbp);
return NULL;
}
- hbqbp->size = LPFC_BPL_SIZE;
+ hbqbp->total_size = LPFC_BPL_SIZE;
return hbqbp;
}
@@ -518,7 +570,7 @@ lpfc_sli4_rb_alloc(struct lpfc_hba *phba)
kfree(dma_buf);
return NULL;
}
- dma_buf->size = LPFC_BPL_SIZE;
+ dma_buf->total_size = LPFC_DATA_BUF_SIZE;
return dma_buf;
}
@@ -540,7 +592,134 @@ lpfc_sli4_rb_free(struct lpfc_hba *phba, struct hbq_dmabuf *dmab)
pci_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys);
pci_pool_free(phba->lpfc_drb_pool, dmab->dbuf.virt, dmab->dbuf.phys);
kfree(dmab);
- return;
+}
+
+/**
+ * lpfc_sli4_nvmet_alloc - Allocate an SLI4 Receive buffer
+ * @phba: HBA to allocate a receive buffer for
+ *
+ * Description: Allocates a DMA-mapped receive buffer from the lpfc_hrb_pool PCI
+ * pool along a non-DMA-mapped container for it.
+ *
+ * Notes: Not interrupt-safe. Must be called with no locks held.
+ *
+ * Returns:
+ * pointer to HBQ on success
+ * NULL on failure
+ **/
+struct rqb_dmabuf *
+lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba)
+{
+ struct rqb_dmabuf *dma_buf;
+ struct lpfc_iocbq *nvmewqe;
+ union lpfc_wqe128 *wqe;
+
+ dma_buf = kzalloc(sizeof(struct rqb_dmabuf), GFP_KERNEL);
+ if (!dma_buf)
+ return NULL;
+
+ dma_buf->hbuf.virt = pci_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL,
+ &dma_buf->hbuf.phys);
+ if (!dma_buf->hbuf.virt) {
+ kfree(dma_buf);
+ return NULL;
+ }
+ dma_buf->dbuf.virt = pci_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
+ &dma_buf->dbuf.phys);
+ if (!dma_buf->dbuf.virt) {
+ pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
+ dma_buf->hbuf.phys);
+ kfree(dma_buf);
+ return NULL;
+ }
+ dma_buf->total_size = LPFC_DATA_BUF_SIZE;
+
+ dma_buf->context = kzalloc(sizeof(struct lpfc_nvmet_rcv_ctx),
+ GFP_KERNEL);
+ if (!dma_buf->context) {
+ pci_pool_free(phba->lpfc_drb_pool, dma_buf->dbuf.virt,
+ dma_buf->dbuf.phys);
+ pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
+ dma_buf->hbuf.phys);
+ kfree(dma_buf);
+ return NULL;
+ }
+
+ dma_buf->iocbq = lpfc_sli_get_iocbq(phba);
+ dma_buf->iocbq->iocb_flag = LPFC_IO_NVMET;
+ if (!dma_buf->iocbq) {
+ kfree(dma_buf->context);
+ pci_pool_free(phba->lpfc_drb_pool, dma_buf->dbuf.virt,
+ dma_buf->dbuf.phys);
+ pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
+ dma_buf->hbuf.phys);
+ kfree(dma_buf);
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
+ "2621 Ran out of nvmet iocb/WQEs\n");
+ return NULL;
+ }
+ nvmewqe = dma_buf->iocbq;
+ wqe = (union lpfc_wqe128 *)&nvmewqe->wqe;
+ /* Initialize WQE */
+ memset(wqe, 0, sizeof(union lpfc_wqe));
+ /* Word 7 */
+ bf_set(wqe_ct, &wqe->generic.wqe_com, SLI4_CT_RPI);
+ bf_set(wqe_class, &wqe->generic.wqe_com, CLASS3);
+ bf_set(wqe_pu, &wqe->generic.wqe_com, 1);
+ /* Word 10 */
+ bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1);
+ bf_set(wqe_ebde_cnt, &wqe->generic.wqe_com, 0);
+ bf_set(wqe_qosd, &wqe->generic.wqe_com, 0);
+
+ dma_buf->iocbq->context1 = NULL;
+ spin_lock(&phba->sli4_hba.sgl_list_lock);
+ dma_buf->sglq = __lpfc_sli_get_nvmet_sglq(phba, dma_buf->iocbq);
+ spin_unlock(&phba->sli4_hba.sgl_list_lock);
+ if (!dma_buf->sglq) {
+ lpfc_sli_release_iocbq(phba, dma_buf->iocbq);
+ kfree(dma_buf->context);
+ pci_pool_free(phba->lpfc_drb_pool, dma_buf->dbuf.virt,
+ dma_buf->dbuf.phys);
+ pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
+ dma_buf->hbuf.phys);
+ kfree(dma_buf);
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
+ "6132 Ran out of nvmet XRIs\n");
+ return NULL;
+ }
+ return dma_buf;
+}
+
+/**
+ * lpfc_sli4_nvmet_free - Frees a receive buffer
+ * @phba: HBA buffer was allocated for
+ * @dmab: DMA Buffer container returned by lpfc_sli4_rbq_alloc
+ *
+ * Description: Frees both the container and the DMA-mapped buffers returned by
+ * lpfc_sli4_nvmet_alloc.
+ *
+ * Notes: Can be called with or without locks held.
+ *
+ * Returns: None
+ **/
+void
+lpfc_sli4_nvmet_free(struct lpfc_hba *phba, struct rqb_dmabuf *dmab)
+{
+ unsigned long flags;
+
+ __lpfc_clear_active_sglq(phba, dmab->sglq->sli4_lxritag);
+ dmab->sglq->state = SGL_FREED;
+ dmab->sglq->ndlp = NULL;
+
+ spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, flags);
+ list_add_tail(&dmab->sglq->list, &phba->sli4_hba.lpfc_nvmet_sgl_list);
+ spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, flags);
+
+ lpfc_sli_release_iocbq(phba, dmab->iocbq);
+ kfree(dmab->context);
+ pci_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys);
+ pci_pool_free(phba->lpfc_drb_pool, dmab->dbuf.virt, dmab->dbuf.phys);
+ kfree(dmab);
}
/**
@@ -565,13 +744,13 @@ lpfc_in_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp)
return;
if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
+ hbq_entry = container_of(mp, struct hbq_dmabuf, dbuf);
/* Check whether HBQ is still in use */
spin_lock_irqsave(&phba->hbalock, flags);
if (!phba->hbq_in_use) {
spin_unlock_irqrestore(&phba->hbalock, flags);
return;
}
- hbq_entry = container_of(mp, struct hbq_dmabuf, dbuf);
list_del(&hbq_entry->dbuf.list);
if (hbq_entry->tag == -1) {
(phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer)
@@ -586,3 +765,48 @@ lpfc_in_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp)
}
return;
}
+
+/**
+ * lpfc_rq_buf_free - Free a RQ DMA buffer
+ * @phba: HBA buffer is associated with
+ * @mp: Buffer to free
+ *
+ * Description: Frees the given DMA buffer in the appropriate way given by
+ * reposting it to its associated RQ so it can be reused.
+ *
+ * Notes: Takes phba->hbalock. Can be called with or without other locks held.
+ *
+ * Returns: None
+ **/
+void
+lpfc_rq_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp)
+{
+ struct lpfc_rqb *rqbp;
+ struct lpfc_rqe hrqe;
+ struct lpfc_rqe drqe;
+ struct rqb_dmabuf *rqb_entry;
+ unsigned long flags;
+ int rc;
+
+ if (!mp)
+ return;
+
+ rqb_entry = container_of(mp, struct rqb_dmabuf, hbuf);
+ rqbp = rqb_entry->hrq->rqbp;
+
+ spin_lock_irqsave(&phba->hbalock, flags);
+ list_del(&rqb_entry->hbuf.list);
+ hrqe.address_lo = putPaddrLow(rqb_entry->hbuf.phys);
+ hrqe.address_hi = putPaddrHigh(rqb_entry->hbuf.phys);
+ drqe.address_lo = putPaddrLow(rqb_entry->dbuf.phys);
+ drqe.address_hi = putPaddrHigh(rqb_entry->dbuf.phys);
+ rc = lpfc_sli4_rq_put(rqb_entry->hrq, rqb_entry->drq, &hrqe, &drqe);
+ if (rc < 0) {
+ (rqbp->rqb_free_buffer)(phba, rqb_entry);
+ } else {
+ list_add_tail(&rqb_entry->hbuf.list, &rqbp->rqb_buffer_list);
+ rqbp->buffer_count++;
+ }
+
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+}
diff --git a/drivers/scsi/lpfc/lpfc_nl.h b/drivers/scsi/lpfc/lpfc_nl.h
index f2b1bbcb196f..b93e78f671fb 100644
--- a/drivers/scsi/lpfc/lpfc_nl.h
+++ b/drivers/scsi/lpfc/lpfc_nl.h
@@ -1,9 +1,11 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
+ * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
+ * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
* Copyright (C) 2010 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
- * www.emulex.com *
+ * www.broadcom.com *
* *
* This program is free software; you can redistribute it and/or *
* modify it under the terms of version 2 of the GNU General *
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 56a3df4fddb0..061626bdf701 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -1,9 +1,11 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
+ * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
+ * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
- * www.emulex.com *
+ * www.broadcom.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
* *
* This program is free software; you can redistribute it and/or *
@@ -28,6 +30,9 @@
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_transport_fc.h>
+#include <scsi/fc/fc_fs.h>
+
+#include <linux/nvme-fc-driver.h>
#include "lpfc_hw4.h"
#include "lpfc_hw.h"
@@ -35,8 +40,9 @@
#include "lpfc_sli4.h"
#include "lpfc_nl.h"
#include "lpfc_disc.h"
-#include "lpfc_scsi.h"
#include "lpfc.h"
+#include "lpfc_scsi.h"
+#include "lpfc_nvme.h"
#include "lpfc_logmsg.h"
#include "lpfc_crtn.h"
#include "lpfc_vport.h"
@@ -204,10 +210,11 @@ int
lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
{
LIST_HEAD(abort_list);
- struct lpfc_sli *psli = &phba->sli;
- struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
+ struct lpfc_sli_ring *pring;
struct lpfc_iocbq *iocb, *next_iocb;
+ pring = lpfc_phba_elsring(phba);
+
/* Abort outstanding I/O on NPort <nlp_DID> */
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_DISCOVERY,
"2819 Abort outstanding I/O on NPort x%x "
@@ -283,6 +290,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
uint32_t ed_tov;
LPFC_MBOXQ_t *mbox;
struct ls_rjt stat;
+ uint32_t vid, flag;
int rc;
memset(&stat, 0, sizeof (struct ls_rjt));
@@ -418,6 +426,15 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
lpfc_can_disctmo(vport);
}
+ ndlp->nlp_flag &= ~NLP_SUPPRESS_RSP;
+ if ((phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) &&
+ sp->cmn.valid_vendor_ver_level) {
+ vid = be32_to_cpu(sp->un.vv.vid);
+ flag = be32_to_cpu(sp->un.vv.flags);
+ if ((vid == LPFC_VV_EMLX_ID) && (flag & LPFC_VV_SUPPRESS_RSP))
+ ndlp->nlp_flag |= NLP_SUPPRESS_RSP;
+ }
+
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox)
goto out;
@@ -707,6 +724,7 @@ static void
lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct lpfc_iocbq *cmdiocb)
{
+ struct lpfc_hba *phba = vport->phba;
struct lpfc_dmabuf *pcmd;
uint32_t *lp;
PRLI *npr;
@@ -720,16 +738,32 @@ lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
ndlp->nlp_flag &= ~NLP_FIRSTBURST;
- if (npr->prliType == PRLI_FCP_TYPE) {
- if (npr->initiatorFunc)
- ndlp->nlp_type |= NLP_FCP_INITIATOR;
+ if ((npr->prliType == PRLI_FCP_TYPE) ||
+ (npr->prliType == PRLI_NVME_TYPE)) {
+ if (npr->initiatorFunc) {
+ if (npr->prliType == PRLI_FCP_TYPE)
+ ndlp->nlp_type |= NLP_FCP_INITIATOR;
+ if (npr->prliType == PRLI_NVME_TYPE)
+ ndlp->nlp_type |= NLP_NVME_INITIATOR;
+ }
if (npr->targetFunc) {
- ndlp->nlp_type |= NLP_FCP_TARGET;
+ if (npr->prliType == PRLI_FCP_TYPE)
+ ndlp->nlp_type |= NLP_FCP_TARGET;
+ if (npr->prliType == PRLI_NVME_TYPE)
+ ndlp->nlp_type |= NLP_NVME_TARGET;
if (npr->writeXferRdyDis)
ndlp->nlp_flag |= NLP_FIRSTBURST;
}
if (npr->Retry)
ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
+
+ /* If this driver is in nvme target mode, set the ndlp's fc4
+ * type to NVME provided the PRLI response claims NVME FC4
+ * type. Target mode does not issue gft_id so doesn't get
+ * the fc4 type set until now.
+ */
+ if ((phba->nvmet_support) && (npr->prliType == PRLI_NVME_TYPE))
+ ndlp->nlp_fc4_type |= NLP_FC4_NVME;
}
if (rport) {
/* We need to update the rport role values */
@@ -743,7 +777,8 @@ lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
"rport rolechg: role:x%x did:x%x flg:x%x",
roles, ndlp->nlp_DID, ndlp->nlp_flag);
- fc_remote_port_rolechg(rport, roles);
+ if (phba->cfg_enable_fc4_type != LPFC_ENABLE_NVME)
+ fc_remote_port_rolechg(rport, roles);
}
}
@@ -1026,6 +1061,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
struct lpfc_iocbq *cmdiocb, *rspiocb;
struct lpfc_dmabuf *pcmd, *prsp, *mp;
uint32_t *lp;
+ uint32_t vid, flag;
IOCB_t *irsp;
struct serv_parm *sp;
uint32_t ed_tov;
@@ -1094,6 +1130,16 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
ed_tov = (phba->fc_edtov + 999999) / 1000000;
}
+ ndlp->nlp_flag &= ~NLP_SUPPRESS_RSP;
+ if ((phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) &&
+ sp->cmn.valid_vendor_ver_level) {
+ vid = be32_to_cpu(sp->un.vv.vid);
+ flag = be32_to_cpu(sp->un.vv.flags);
+ if ((vid == LPFC_VV_EMLX_ID) &&
+ (flag & LPFC_VV_SUPPRESS_RSP))
+ ndlp->nlp_flag |= NLP_SUPPRESS_RSP;
+ }
+
/*
* Use the larger EDTOV
* RATOV = 2 * EDTOV for pt-to-pt
@@ -1489,8 +1535,38 @@ lpfc_rcv_prli_reglogin_issue(struct lpfc_vport *vport,
uint32_t evt)
{
struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
+ struct ls_rjt stat;
+
+ if (vport->phba->nvmet_support) {
+ /* NVME Target mode. Handle and respond to the PRLI and
+ * transition to UNMAPPED provided the RPI has completed
+ * registration.
+ */
+ if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
+ lpfc_rcv_prli(vport, ndlp, cmdiocb);
+ lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
+ } else {
+ /* RPI registration has not completed. Reject the PRLI
+ * to prevent an illegal state transition when the
+ * rpi registration does complete.
+ */
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME_DISC,
+ "6115 NVMET ndlp rpi %d state "
+ "unknown, state x%x flags x%08x\n",
+ ndlp->nlp_rpi, ndlp->nlp_state,
+ ndlp->nlp_flag);
+ memset(&stat, 0, sizeof(struct ls_rjt));
+ stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+ stat.un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS;
+ lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb,
+ ndlp, NULL);
+ }
+ } else {
+ /* Initiator mode. */
+ lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
+ }
- lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
return ndlp->nlp_state;
}
@@ -1573,9 +1649,11 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
uint32_t evt)
{
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_hba *phba = vport->phba;
LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
MAILBOX_t *mb = &pmb->u.mb;
uint32_t did = mb->un.varWords[1];
+ int rc = 0;
if (mb->mbxStatus) {
/* RegLogin failed */
@@ -1610,19 +1688,55 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
}
/* SLI4 ports have preallocated logical rpis. */
- if (vport->phba->sli_rev < LPFC_SLI_REV4)
+ if (phba->sli_rev < LPFC_SLI_REV4)
ndlp->nlp_rpi = mb->un.varWords[0];
ndlp->nlp_flag |= NLP_RPI_REGISTERED;
/* Only if we are not a fabric nport do we issue PRLI */
- if (!(ndlp->nlp_type & NLP_FABRIC)) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ "3066 RegLogin Complete on x%x x%x x%x\n",
+ did, ndlp->nlp_type, ndlp->nlp_fc4_type);
+ if (!(ndlp->nlp_type & NLP_FABRIC) &&
+ (phba->nvmet_support == 0)) {
+ /* The driver supports FCP and NVME concurrently. If the
+ * ndlp's nlp_fc4_type is still zero, the driver doesn't
+ * know what PRLI to send yet. Figure that out now and
+ * call PRLI depending on the outcome.
+ */
+ if (vport->fc_flag & FC_PT2PT) {
+ /* If we are pt2pt, there is no Fabric to determine
+ * the FC4 type of the remote nport. So if NVME
+ * is configured try it.
+ */
+ ndlp->nlp_fc4_type |= NLP_FC4_FCP;
+ if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
+ (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
+ ndlp->nlp_fc4_type |= NLP_FC4_NVME;
+ /* We need to update the localport also */
+ lpfc_nvme_update_localport(vport);
+ }
+
+ } else if (ndlp->nlp_fc4_type == 0) {
+ rc = lpfc_ns_cmd(vport, SLI_CTNS_GFT_ID,
+ 0, ndlp->nlp_DID);
+ return ndlp->nlp_state;
+ }
+
ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
lpfc_issue_els_prli(vport, ndlp, 0);
} else {
- ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
- lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
+ if ((vport->fc_flag & FC_PT2PT) && phba->nvmet_support)
+ phba->targetport->port_id = vport->fc_myDID;
+
+ /* Only Fabric ports should transition. NVME target
+ * must complete PRLI.
+ */
+ if (ndlp->nlp_type & NLP_FABRIC) {
+ ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
+ }
}
return ndlp->nlp_state;
}
@@ -1663,7 +1777,14 @@ lpfc_device_recov_reglogin_issue(struct lpfc_vport *vport,
ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
spin_lock_irq(shost->host_lock);
- ndlp->nlp_flag |= NLP_IGNR_REG_CMPL;
+
+ /* If we are a target we won't immediately transition into PRLI,
+ * so if REG_LOGIN already completed we don't need to ignore it.
+ */
+ if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED) ||
+ !vport->phba->nvmet_support)
+ ndlp->nlp_flag |= NLP_IGNR_REG_CMPL;
+
ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
spin_unlock_irq(shost->host_lock);
lpfc_disc_set_adisc(vport, ndlp);
@@ -1739,10 +1860,23 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct lpfc_hba *phba = vport->phba;
IOCB_t *irsp;
PRLI *npr;
+ struct lpfc_nvme_prli *nvpr;
+ void *temp_ptr;
cmdiocb = (struct lpfc_iocbq *) arg;
rspiocb = cmdiocb->context_un.rsp_iocb;
- npr = (PRLI *)lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb);
+
+ /* A solicited PRLI is either FCP or NVME. The PRLI cmd/rsp
+ * format is different so NULL the two PRLI types so that the
+ * driver correctly gets the correct context.
+ */
+ npr = NULL;
+ nvpr = NULL;
+ temp_ptr = lpfc_check_elscmpl_iocb(phba, cmdiocb, rspiocb);
+ if (cmdiocb->iocb_flag & LPFC_PRLI_FCP_REQ)
+ npr = (PRLI *) temp_ptr;
+ else if (cmdiocb->iocb_flag & LPFC_PRLI_NVME_REQ)
+ nvpr = (struct lpfc_nvme_prli *) temp_ptr;
irsp = &rspiocb->iocb;
if (irsp->ulpStatus) {
@@ -1750,7 +1884,21 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
vport->cfg_restrict_login) {
goto out;
}
+
+ /* The LS Req had some error. Don't let this be a
+ * target.
+ */
+ if ((ndlp->fc4_prli_sent == 1) &&
+ (ndlp->nlp_state == NLP_STE_PRLI_ISSUE) &&
+ (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_FCP_INITIATOR)))
+ /* The FCP PRLI completed successfully but
+ * the NVME PRLI failed. Since they are sent in
+ * succession, allow the FCP to complete.
+ */
+ goto out_err;
+
ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
+ ndlp->nlp_type |= NLP_FCP_INITIATOR;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
return ndlp->nlp_state;
}
@@ -1758,9 +1906,16 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* Check out PRLI rsp */
ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
+
+ /* NVME or FCP first burst must be negotiated for each PRLI. */
ndlp->nlp_flag &= ~NLP_FIRSTBURST;
- if ((npr->acceptRspCode == PRLI_REQ_EXECUTED) &&
+ ndlp->nvme_fb_size = 0;
+ if (npr && (npr->acceptRspCode == PRLI_REQ_EXECUTED) &&
(npr->prliType == PRLI_FCP_TYPE)) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
+ "6028 FCP NPR PRLI Cmpl Init %d Target %d\n",
+ npr->initiatorFunc,
+ npr->targetFunc);
if (npr->initiatorFunc)
ndlp->nlp_type |= NLP_FCP_INITIATOR;
if (npr->targetFunc) {
@@ -1770,6 +1925,49 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
}
if (npr->Retry)
ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
+
+ /* PRLI completed. Decrement count. */
+ ndlp->fc4_prli_sent--;
+ } else if (nvpr &&
+ (bf_get_be32(prli_acc_rsp_code, nvpr) ==
+ PRLI_REQ_EXECUTED) &&
+ (bf_get_be32(prli_type_code, nvpr) ==
+ PRLI_NVME_TYPE)) {
+
+ /* Complete setting up the remote ndlp personality. */
+ if (bf_get_be32(prli_init, nvpr))
+ ndlp->nlp_type |= NLP_NVME_INITIATOR;
+
+ /* Target driver cannot solicit NVME FB. */
+ if (bf_get_be32(prli_tgt, nvpr)) {
+ ndlp->nlp_type |= NLP_NVME_TARGET;
+ if ((bf_get_be32(prli_fba, nvpr) == 1) &&
+ (bf_get_be32(prli_fb_sz, nvpr) > 0) &&
+ (phba->cfg_nvme_enable_fb) &&
+ (!phba->nvmet_support)) {
+ /* Both sides support FB. The target's first
+ * burst size is a 512 byte encoded value.
+ */
+ ndlp->nlp_flag |= NLP_FIRSTBURST;
+ ndlp->nvme_fb_size = bf_get_be32(prli_fb_sz,
+ nvpr);
+ }
+ }
+
+ if (bf_get_be32(prli_recov, nvpr))
+ ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
+ "6029 NVME PRLI Cmpl w1 x%08x "
+ "w4 x%08x w5 x%08x flag x%x, "
+ "fcp_info x%x nlp_type x%x\n",
+ be32_to_cpu(nvpr->word1),
+ be32_to_cpu(nvpr->word4),
+ be32_to_cpu(nvpr->word5),
+ ndlp->nlp_flag, ndlp->nlp_fcp_info,
+ ndlp->nlp_type);
+ /* PRLI completed. Decrement count. */
+ ndlp->fc4_prli_sent--;
}
if (!(ndlp->nlp_type & NLP_FCP_TARGET) &&
(vport->port_type == LPFC_NPIV_PORT) &&
@@ -1785,11 +1983,24 @@ out:
return ndlp->nlp_state;
}
- ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
- if (ndlp->nlp_type & NLP_FCP_TARGET)
- lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
- else
- lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
+out_err:
+ /* The ndlp state cannot move to MAPPED or UNMAPPED before all PRLIs
+ * are complete.
+ */
+ if (ndlp->fc4_prli_sent == 0) {
+ ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE;
+ if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET))
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
+ else
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
+ } else
+ lpfc_printf_vlog(vport,
+ KERN_INFO, LOG_ELS,
+ "3067 PRLI's still outstanding "
+ "on x%06x - count %d, Pend Node Mode "
+ "transition...\n",
+ ndlp->nlp_DID, ndlp->fc4_prli_sent);
+
return ndlp->nlp_state;
}
@@ -2104,7 +2315,7 @@ lpfc_rcv_prlo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
/* flush the target */
- lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
+ lpfc_sli_abort_iocb(vport, &phba->sli.sli3_ring[LPFC_FCP_RING],
ndlp->nlp_sid, 0, LPFC_CTX_TGT);
/* Treat like rcv logo */
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
new file mode 100644
index 000000000000..609a908ea9db
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -0,0 +1,2464 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for *
+ * Fibre Channel Host Bus Adapters. *
+ * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
+ * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
+ * Copyright (C) 2004-2016 Emulex. All rights reserved. *
+ * EMULEX and SLI are trademarks of Emulex. *
+ * www.broadcom.com *
+ * Portions Copyright (C) 2004-2005 Christoph Hellwig *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of version 2 of the GNU General *
+ * Public License as published by the Free Software Foundation. *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID. See the GNU General Public License for *
+ * more details, a copy of which can be found in the file COPYING *
+ * included with this package. *
+ ********************************************************************/
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <asm/unaligned.h>
+#include <linux/crc-t10dif.h>
+#include <net/checksum.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_transport_fc.h>
+#include <scsi/fc/fc_fs.h>
+
+#include <linux/nvme.h>
+#include <linux/nvme-fc-driver.h>
+#include <linux/nvme-fc.h>
+#include "lpfc_version.h"
+#include "lpfc_hw4.h"
+#include "lpfc_hw.h"
+#include "lpfc_sli.h"
+#include "lpfc_sli4.h"
+#include "lpfc_nl.h"
+#include "lpfc_disc.h"
+#include "lpfc.h"
+#include "lpfc_nvme.h"
+#include "lpfc_scsi.h"
+#include "lpfc_logmsg.h"
+#include "lpfc_crtn.h"
+#include "lpfc_vport.h"
+#include "lpfc_debugfs.h"
+
+/* NVME initiator-based functions */
+
+static struct lpfc_nvme_buf *
+lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp);
+
+static void
+lpfc_release_nvme_buf(struct lpfc_hba *, struct lpfc_nvme_buf *);
+
+
+/**
+ * lpfc_nvme_create_queue -
+ * @lpfc_pnvme: Pointer to the driver's nvme instance data
+ * @qidx: An cpu index used to affinitize IO queues and MSIX vectors.
+ * @handle: An opaque driver handle used in follow-up calls.
+ *
+ * Driver registers this routine to preallocate and initialize any
+ * internal data structures to bind the @qidx to its internal IO queues.
+ * A hardware queue maps (qidx) to a specific driver MSI-X vector/EQ/CQ/WQ.
+ *
+ * Return value :
+ * 0 - Success
+ * -EINVAL - Unsupported input value.
+ * -ENOMEM - Could not alloc necessary memory
+ **/
+static int
+lpfc_nvme_create_queue(struct nvme_fc_local_port *pnvme_lport,
+ unsigned int qidx, u16 qsize,
+ void **handle)
+{
+ struct lpfc_nvme_lport *lport;
+ struct lpfc_vport *vport;
+ struct lpfc_nvme_qhandle *qhandle;
+ char *str;
+
+ lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
+ vport = lport->vport;
+ qhandle = kzalloc(sizeof(struct lpfc_nvme_qhandle), GFP_KERNEL);
+ if (qhandle == NULL)
+ return -ENOMEM;
+
+ qhandle->cpu_id = smp_processor_id();
+ qhandle->qidx = qidx;
+ /*
+ * NVME qidx == 0 is the admin queue, so both admin queue
+ * and first IO queue will use MSI-X vector and associated
+ * EQ/CQ/WQ at index 0. After that they are sequentially assigned.
+ */
+ if (qidx) {
+ str = "IO "; /* IO queue */
+ qhandle->index = ((qidx - 1) %
+ vport->phba->cfg_nvme_io_channel);
+ } else {
+ str = "ADM"; /* Admin queue */
+ qhandle->index = qidx;
+ }
+
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
+ "6073 Binding %s HdwQueue %d (cpu %d) to "
+ "io_channel %d qhandle %p\n", str,
+ qidx, qhandle->cpu_id, qhandle->index, qhandle);
+ *handle = (void *)qhandle;
+ return 0;
+}
+
+/**
+ * lpfc_nvme_delete_queue -
+ * @lpfc_pnvme: Pointer to the driver's nvme instance data
+ * @qidx: An cpu index used to affinitize IO queues and MSIX vectors.
+ * @handle: An opaque driver handle from lpfc_nvme_create_queue
+ *
+ * Driver registers this routine to free
+ * any internal data structures to bind the @qidx to its internal
+ * IO queues.
+ *
+ * Return value :
+ * 0 - Success
+ * TODO: What are the failure codes.
+ **/
+static void
+lpfc_nvme_delete_queue(struct nvme_fc_local_port *pnvme_lport,
+ unsigned int qidx,
+ void *handle)
+{
+ struct lpfc_nvme_lport *lport;
+ struct lpfc_vport *vport;
+
+ lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
+ vport = lport->vport;
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
+ "6001 ENTER. lpfc_pnvme %p, qidx x%xi qhandle %p\n",
+ lport, qidx, handle);
+ kfree(handle);
+}
+
+static void
+lpfc_nvme_localport_delete(struct nvme_fc_local_port *localport)
+{
+ struct lpfc_nvme_lport *lport = localport->private;
+
+ /* release any threads waiting for the unreg to complete */
+ complete(&lport->lport_unreg_done);
+}
+
+/* lpfc_nvme_remoteport_delete
+ *
+ * @remoteport: Pointer to an nvme transport remoteport instance.
+ *
+ * This is a template downcall. NVME transport calls this function
+ * when it has completed the unregistration of a previously
+ * registered remoteport.
+ *
+ * Return value :
+ * None
+ */
+void
+lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport)
+{
+ struct lpfc_nvme_rport *rport = remoteport->private;
+ struct lpfc_vport *vport;
+ struct lpfc_nodelist *ndlp;
+
+ ndlp = rport->ndlp;
+ if (!ndlp)
+ goto rport_err;
+
+ vport = ndlp->vport;
+ if (!vport)
+ goto rport_err;
+
+ /* Remove this rport from the lport's list - memory is owned by the
+ * transport. Remove the ndlp reference for the NVME transport before
+ * calling state machine to remove the node, this is devloss = 0
+ * semantics.
+ */
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
+ "6146 remoteport delete complete %p\n",
+ remoteport);
+ list_del(&rport->list);
+ lpfc_nlp_put(ndlp);
+
+ rport_err:
+ /* This call has to execute as long as the rport is valid.
+ * Release any threads waiting for the unreg to complete.
+ */
+ complete(&rport->rport_unreg_done);
+}
+
+static void
+lpfc_nvme_cmpl_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
+ struct lpfc_wcqe_complete *wcqe)
+{
+ struct lpfc_vport *vport = cmdwqe->vport;
+ uint32_t status;
+ struct nvmefc_ls_req *pnvme_lsreq;
+ struct lpfc_dmabuf *buf_ptr;
+ struct lpfc_nodelist *ndlp;
+
+ vport->phba->fc4NvmeLsCmpls++;
+
+ pnvme_lsreq = (struct nvmefc_ls_req *)cmdwqe->context2;
+ status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
+ ndlp = (struct lpfc_nodelist *)cmdwqe->context1;
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
+ "6047 nvme cmpl Enter "
+ "Data %p DID %x Xri: %x status %x cmd:%p lsreg:%p "
+ "bmp:%p ndlp:%p\n",
+ pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0,
+ cmdwqe->sli4_xritag, status,
+ cmdwqe, pnvme_lsreq, cmdwqe->context3, ndlp);
+
+ lpfc_nvmeio_data(phba, "NVME LS CMPL: xri x%x stat x%x parm x%x\n",
+ cmdwqe->sli4_xritag, status, wcqe->parameter);
+
+ if (cmdwqe->context3) {
+ buf_ptr = (struct lpfc_dmabuf *)cmdwqe->context3;
+ lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
+ kfree(buf_ptr);
+ cmdwqe->context3 = NULL;
+ }
+ if (pnvme_lsreq->done)
+ pnvme_lsreq->done(pnvme_lsreq, status);
+ else
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
+ "6046 nvme cmpl without done call back? "
+ "Data %p DID %x Xri: %x status %x\n",
+ pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0,
+ cmdwqe->sli4_xritag, status);
+ if (ndlp) {
+ lpfc_nlp_put(ndlp);
+ cmdwqe->context1 = NULL;
+ }
+ lpfc_sli_release_iocbq(phba, cmdwqe);
+}
+
+static int
+lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
+ struct lpfc_dmabuf *inp,
+ struct nvmefc_ls_req *pnvme_lsreq,
+ void (*cmpl)(struct lpfc_hba *, struct lpfc_iocbq *,
+ struct lpfc_wcqe_complete *),
+ struct lpfc_nodelist *ndlp, uint32_t num_entry,
+ uint32_t tmo, uint8_t retry)
+{
+ struct lpfc_hba *phba = vport->phba;
+ union lpfc_wqe *wqe;
+ struct lpfc_iocbq *genwqe;
+ struct ulp_bde64 *bpl;
+ struct ulp_bde64 bde;
+ int i, rc, xmit_len, first_len;
+
+ /* Allocate buffer for command WQE */
+ genwqe = lpfc_sli_get_iocbq(phba);
+ if (genwqe == NULL)
+ return 1;
+
+ wqe = &genwqe->wqe;
+ memset(wqe, 0, sizeof(union lpfc_wqe));
+
+ genwqe->context3 = (uint8_t *)bmp;
+ genwqe->iocb_flag |= LPFC_IO_NVME_LS;
+
+ /* Save for completion so we can release these resources */
+ genwqe->context1 = lpfc_nlp_get(ndlp);
+ genwqe->context2 = (uint8_t *)pnvme_lsreq;
+ /* Fill in payload, bp points to frame payload */
+
+ if (!tmo)
+ /* FC spec states we need 3 * ratov for CT requests */
+ tmo = (3 * phba->fc_ratov);
+
+ /* For this command calculate the xmit length of the request bde. */
+ xmit_len = 0;
+ first_len = 0;
+ bpl = (struct ulp_bde64 *)bmp->virt;
+ for (i = 0; i < num_entry; i++) {
+ bde.tus.w = bpl[i].tus.w;
+ if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
+ break;
+ xmit_len += bde.tus.f.bdeSize;
+ if (i == 0)
+ first_len = xmit_len;
+ }
+
+ genwqe->rsvd2 = num_entry;
+ genwqe->hba_wqidx = 0;
+
+ /* Words 0 - 2 */
+ wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+ wqe->generic.bde.tus.f.bdeSize = first_len;
+ wqe->generic.bde.addrLow = bpl[0].addrLow;
+ wqe->generic.bde.addrHigh = bpl[0].addrHigh;
+
+ /* Word 3 */
+ wqe->gen_req.request_payload_len = first_len;
+
+ /* Word 4 */
+
+ /* Word 5 */
+ bf_set(wqe_dfctl, &wqe->gen_req.wge_ctl, 0);
+ bf_set(wqe_si, &wqe->gen_req.wge_ctl, 1);
+ bf_set(wqe_la, &wqe->gen_req.wge_ctl, 1);
+ bf_set(wqe_rctl, &wqe->gen_req.wge_ctl, FC_RCTL_DD_UNSOL_CTL);
+ bf_set(wqe_type, &wqe->gen_req.wge_ctl, FC_TYPE_NVME);
+
+ /* Word 6 */
+ bf_set(wqe_ctxt_tag, &wqe->gen_req.wqe_com,
+ phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
+ bf_set(wqe_xri_tag, &wqe->gen_req.wqe_com, genwqe->sli4_xritag);
+
+ /* Word 7 */
+ bf_set(wqe_tmo, &wqe->gen_req.wqe_com, (vport->phba->fc_ratov-1));
+ bf_set(wqe_class, &wqe->gen_req.wqe_com, CLASS3);
+ bf_set(wqe_cmnd, &wqe->gen_req.wqe_com, CMD_GEN_REQUEST64_WQE);
+ bf_set(wqe_ct, &wqe->gen_req.wqe_com, SLI4_CT_RPI);
+
+ /* Word 8 */
+ wqe->gen_req.wqe_com.abort_tag = genwqe->iotag;
+
+ /* Word 9 */
+ bf_set(wqe_reqtag, &wqe->gen_req.wqe_com, genwqe->iotag);
+
+ /* Word 10 */
+ bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
+ bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
+ bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
+ bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
+ bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
+
+ /* Word 11 */
+ bf_set(wqe_cqid, &wqe->gen_req.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
+ bf_set(wqe_cmd_type, &wqe->gen_req.wqe_com, OTHER_COMMAND);
+
+
+ /* Issue GEN REQ WQE for NPORT <did> */
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "6050 Issue GEN REQ WQE to NPORT x%x "
+ "Data: x%x x%x wq:%p lsreq:%p bmp:%p xmit:%d 1st:%d\n",
+ ndlp->nlp_DID, genwqe->iotag,
+ vport->port_state,
+ genwqe, pnvme_lsreq, bmp, xmit_len, first_len);
+ genwqe->wqe_cmpl = cmpl;
+ genwqe->iocb_cmpl = NULL;
+ genwqe->drvrTimeout = tmo + LPFC_DRVR_TIMEOUT;
+ genwqe->vport = vport;
+ genwqe->retry = retry;
+
+ lpfc_nvmeio_data(phba, "NVME LS XMIT: xri x%x iotag x%x to x%06x\n",
+ genwqe->sli4_xritag, genwqe->iotag, ndlp->nlp_DID);
+
+ rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, genwqe);
+ if (rc == WQE_ERROR) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ "6045 Issue GEN REQ WQE to NPORT x%x "
+ "Data: x%x x%x\n",
+ ndlp->nlp_DID, genwqe->iotag,
+ vport->port_state);
+ lpfc_sli_release_iocbq(phba, genwqe);
+ return 1;
+ }
+ return 0;
+}
+
+/**
+ * lpfc_nvme_ls_req - Issue an Link Service request
+ * @lpfc_pnvme: Pointer to the driver's nvme instance data
+ * @lpfc_nvme_lport: Pointer to the driver's local port data
+ * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
+ *
+ * Driver registers this routine to handle any link service request
+ * from the nvme_fc transport to a remote nvme-aware port.
+ *
+ * Return value :
+ * 0 - Success
+ * TODO: What are the failure codes.
+ **/
+static int
+lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
+ struct nvme_fc_remote_port *pnvme_rport,
+ struct nvmefc_ls_req *pnvme_lsreq)
+{
+ int ret = 0;
+ struct lpfc_nvme_lport *lport;
+ struct lpfc_vport *vport;
+ struct lpfc_nodelist *ndlp;
+ struct ulp_bde64 *bpl;
+ struct lpfc_dmabuf *bmp;
+
+ /* there are two dma buf in the request, actually there is one and
+ * the second one is just the start address + cmd size.
+ * Before calling lpfc_nvme_gen_req these buffers need to be wrapped
+ * in a lpfc_dmabuf struct. When freeing we just free the wrapper
+ * because the nvem layer owns the data bufs.
+ * We do not have to break these packets open, we don't care what is in
+ * them. And we do not have to look at the resonse data, we only care
+ * that we got a response. All of the caring is going to happen in the
+ * nvme-fc layer.
+ */
+
+ lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
+ vport = lport->vport;
+
+ ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id);
+ if (!ndlp) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
+ "6043 Could not find node for DID %x\n",
+ pnvme_rport->port_id);
+ return 1;
+ }
+ bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+ if (!bmp) {
+
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
+ "6044 Could not find node for DID %x\n",
+ pnvme_rport->port_id);
+ return 2;
+ }
+ INIT_LIST_HEAD(&bmp->list);
+ bmp->virt = lpfc_mbuf_alloc(vport->phba, MEM_PRI, &(bmp->phys));
+ if (!bmp->virt) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
+ "6042 Could not find node for DID %x\n",
+ pnvme_rport->port_id);
+ kfree(bmp);
+ return 3;
+ }
+ bpl = (struct ulp_bde64 *)bmp->virt;
+ bpl->addrHigh = le32_to_cpu(putPaddrHigh(pnvme_lsreq->rqstdma));
+ bpl->addrLow = le32_to_cpu(putPaddrLow(pnvme_lsreq->rqstdma));
+ bpl->tus.f.bdeFlags = 0;
+ bpl->tus.f.bdeSize = pnvme_lsreq->rqstlen;
+ bpl->tus.w = le32_to_cpu(bpl->tus.w);
+ bpl++;
+
+ bpl->addrHigh = le32_to_cpu(putPaddrHigh(pnvme_lsreq->rspdma));
+ bpl->addrLow = le32_to_cpu(putPaddrLow(pnvme_lsreq->rspdma));
+ bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
+ bpl->tus.f.bdeSize = pnvme_lsreq->rsplen;
+ bpl->tus.w = le32_to_cpu(bpl->tus.w);
+
+ /* Expand print to include key fields. */
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
+ "6051 ENTER. lport %p, rport %p lsreq%p rqstlen:%d "
+ "rsplen:%d %pad %pad\n",
+ pnvme_lport, pnvme_rport,
+ pnvme_lsreq, pnvme_lsreq->rqstlen,
+ pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma,
+ &pnvme_lsreq->rspdma);
+
+ vport->phba->fc4NvmeLsRequests++;
+
+ /* Hardcode the wait to 30 seconds. Connections are failing otherwise.
+ * This code allows it all to work.
+ */
+ ret = lpfc_nvme_gen_req(vport, bmp, pnvme_lsreq->rqstaddr,
+ pnvme_lsreq, lpfc_nvme_cmpl_gen_req,
+ ndlp, 2, 30, 0);
+ if (ret != WQE_SUCCESS) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
+ "6052 EXIT. issue ls wqe failed lport %p, "
+ "rport %p lsreq%p Status %x DID %x\n",
+ pnvme_lport, pnvme_rport, pnvme_lsreq,
+ ret, ndlp->nlp_DID);
+ lpfc_mbuf_free(vport->phba, bmp->virt, bmp->phys);
+ kfree(bmp);
+ return ret;
+ }
+
+ /* Stub in routine and return 0 for now. */
+ return ret;
+}
+
+/**
+ * lpfc_nvme_ls_abort - Issue an Link Service request
+ * @lpfc_pnvme: Pointer to the driver's nvme instance data
+ * @lpfc_nvme_lport: Pointer to the driver's local port data
+ * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
+ *
+ * Driver registers this routine to handle any link service request
+ * from the nvme_fc transport to a remote nvme-aware port.
+ *
+ * Return value :
+ * 0 - Success
+ * TODO: What are the failure codes.
+ **/
+static void
+lpfc_nvme_ls_abort(struct nvme_fc_local_port *pnvme_lport,
+ struct nvme_fc_remote_port *pnvme_rport,
+ struct nvmefc_ls_req *pnvme_lsreq)
+{
+ struct lpfc_nvme_lport *lport;
+ struct lpfc_vport *vport;
+ struct lpfc_hba *phba;
+ struct lpfc_nodelist *ndlp;
+ LIST_HEAD(abort_list);
+ struct lpfc_sli_ring *pring;
+ struct lpfc_iocbq *wqe, *next_wqe;
+
+ lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
+ vport = lport->vport;
+ phba = vport->phba;
+
+ ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id);
+ if (!ndlp) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
+ "6049 Could not find node for DID %x\n",
+ pnvme_rport->port_id);
+ return;
+ }
+
+ /* Expand print to include key fields. */
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
+ "6040 ENTER. lport %p, rport %p lsreq %p rqstlen:%d "
+ "rsplen:%d %pad %pad\n",
+ pnvme_lport, pnvme_rport,
+ pnvme_lsreq, pnvme_lsreq->rqstlen,
+ pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma,
+ &pnvme_lsreq->rspdma);
+
+ /*
+ * Lock the ELS ring txcmplq and build a local list of all ELS IOs
+ * that need an ABTS. The IOs need to stay on the txcmplq so that
+ * the abort operation completes them successfully.
+ */
+ pring = phba->sli4_hba.nvmels_wq->pring;
+ spin_lock_irq(&phba->hbalock);
+ spin_lock(&pring->ring_lock);
+ list_for_each_entry_safe(wqe, next_wqe, &pring->txcmplq, list) {
+ /* Add to abort_list on on NDLP match. */
+ if (lpfc_check_sli_ndlp(phba, pring, wqe, ndlp)) {
+ wqe->iocb_flag |= LPFC_DRIVER_ABORTED;
+ list_add_tail(&wqe->dlist, &abort_list);
+ }
+ }
+ spin_unlock(&pring->ring_lock);
+ spin_unlock_irq(&phba->hbalock);
+
+ /* Abort the targeted IOs and remove them from the abort list. */
+ list_for_each_entry_safe(wqe, next_wqe, &abort_list, dlist) {
+ spin_lock_irq(&phba->hbalock);
+ list_del_init(&wqe->dlist);
+ lpfc_sli_issue_abort_iotag(phba, pring, wqe);
+ spin_unlock_irq(&phba->hbalock);
+ }
+}
+
+/* Fix up the existing sgls for NVME IO. */
+static void
+lpfc_nvme_adj_fcp_sgls(struct lpfc_vport *vport,
+ struct lpfc_nvme_buf *lpfc_ncmd,
+ struct nvmefc_fcp_req *nCmd)
+{
+ struct sli4_sge *sgl;
+ union lpfc_wqe128 *wqe;
+ uint32_t *wptr, *dptr;
+
+ /*
+ * Adjust the FCP_CMD and FCP_RSP DMA data and sge_len to
+ * match NVME. NVME sends 96 bytes. Also, use the
+ * nvme commands command and response dma addresses
+ * rather than the virtual memory to ease the restore
+ * operation.
+ */
+ sgl = lpfc_ncmd->nvme_sgl;
+ sgl->sge_len = cpu_to_le32(nCmd->cmdlen);
+
+ sgl++;
+
+ /* Setup the physical region for the FCP RSP */
+ sgl->addr_hi = cpu_to_le32(putPaddrHigh(nCmd->rspdma));
+ sgl->addr_lo = cpu_to_le32(putPaddrLow(nCmd->rspdma));
+ sgl->word2 = le32_to_cpu(sgl->word2);
+ if (nCmd->sg_cnt)
+ bf_set(lpfc_sli4_sge_last, sgl, 0);
+ else
+ bf_set(lpfc_sli4_sge_last, sgl, 1);
+ sgl->word2 = cpu_to_le32(sgl->word2);
+ sgl->sge_len = cpu_to_le32(nCmd->rsplen);
+
+ /*
+ * Get a local pointer to the built-in wqe and correct
+ * the cmd size to match NVME's 96 bytes and fix
+ * the dma address.
+ */
+
+ /* 128 byte wqe support here */
+ wqe = (union lpfc_wqe128 *)&lpfc_ncmd->cur_iocbq.wqe;
+
+ /* Word 0-2 - NVME CMND IU (embedded payload) */
+ wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_IMMED;
+ wqe->generic.bde.tus.f.bdeSize = 60;
+ wqe->generic.bde.addrHigh = 0;
+ wqe->generic.bde.addrLow = 64; /* Word 16 */
+
+ /* Word 3 */
+ bf_set(payload_offset_len, &wqe->fcp_icmd,
+ (nCmd->rsplen + nCmd->cmdlen));
+
+ /* Word 10 */
+ bf_set(wqe_nvme, &wqe->fcp_icmd.wqe_com, 1);
+ bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1);
+
+ /*
+ * Embed the payload in the last half of the WQE
+ * WQE words 16-30 get the NVME CMD IU payload
+ *
+ * WQE Word 16 is already setup with flags
+ * WQE words 17-19 get payload Words 2-4
+ * WQE words 20-21 get payload Words 6-7
+ * WQE words 22-29 get payload Words 16-23
+ */
+ wptr = &wqe->words[17]; /* WQE ptr */
+ dptr = (uint32_t *)nCmd->cmdaddr; /* payload ptr */
+ dptr += 2; /* Skip Words 0-1 in payload */
+
+ *wptr++ = *dptr++; /* Word 2 */
+ *wptr++ = *dptr++; /* Word 3 */
+ *wptr++ = *dptr++; /* Word 4 */
+ dptr++; /* Skip Word 5 in payload */
+ *wptr++ = *dptr++; /* Word 6 */
+ *wptr++ = *dptr++; /* Word 7 */
+ dptr += 8; /* Skip Words 8-15 in payload */
+ *wptr++ = *dptr++; /* Word 16 */
+ *wptr++ = *dptr++; /* Word 17 */
+ *wptr++ = *dptr++; /* Word 18 */
+ *wptr++ = *dptr++; /* Word 19 */
+ *wptr++ = *dptr++; /* Word 20 */
+ *wptr++ = *dptr++; /* Word 21 */
+ *wptr++ = *dptr++; /* Word 22 */
+ *wptr = *dptr; /* Word 23 */
+}
+
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+static void
+lpfc_nvme_ktime(struct lpfc_hba *phba,
+ struct lpfc_nvme_buf *lpfc_ncmd)
+{
+ uint64_t seg1, seg2, seg3, seg4;
+
+ if (!phba->ktime_on)
+ return;
+ if (!lpfc_ncmd->ts_last_cmd ||
+ !lpfc_ncmd->ts_cmd_start ||
+ !lpfc_ncmd->ts_cmd_wqput ||
+ !lpfc_ncmd->ts_isr_cmpl ||
+ !lpfc_ncmd->ts_data_nvme)
+ return;
+ if (lpfc_ncmd->ts_cmd_start < lpfc_ncmd->ts_last_cmd)
+ return;
+ if (lpfc_ncmd->ts_cmd_wqput < lpfc_ncmd->ts_cmd_start)
+ return;
+ if (lpfc_ncmd->ts_isr_cmpl < lpfc_ncmd->ts_cmd_wqput)
+ return;
+ if (lpfc_ncmd->ts_data_nvme < lpfc_ncmd->ts_isr_cmpl)
+ return;
+ /*
+ * Segment 1 - Time from Last FCP command cmpl is handed
+ * off to NVME Layer to start of next command.
+ * Segment 2 - Time from Driver receives a IO cmd start
+ * from NVME Layer to WQ put is done on IO cmd.
+ * Segment 3 - Time from Driver WQ put is done on IO cmd
+ * to MSI-X ISR for IO cmpl.
+ * Segment 4 - Time from MSI-X ISR for IO cmpl to when
+ * cmpl is handled off to the NVME Layer.
+ */
+ seg1 = lpfc_ncmd->ts_cmd_start - lpfc_ncmd->ts_last_cmd;
+ if (seg1 > 5000000) /* 5 ms - for sequential IOs */
+ return;
+
+ /* Calculate times relative to start of IO */
+ seg2 = (lpfc_ncmd->ts_cmd_wqput - lpfc_ncmd->ts_cmd_start);
+ seg3 = (lpfc_ncmd->ts_isr_cmpl -
+ lpfc_ncmd->ts_cmd_start) - seg2;
+ seg4 = (lpfc_ncmd->ts_data_nvme -
+ lpfc_ncmd->ts_cmd_start) - seg2 - seg3;
+ phba->ktime_data_samples++;
+ phba->ktime_seg1_total += seg1;
+ if (seg1 < phba->ktime_seg1_min)
+ phba->ktime_seg1_min = seg1;
+ else if (seg1 > phba->ktime_seg1_max)
+ phba->ktime_seg1_max = seg1;
+ phba->ktime_seg2_total += seg2;
+ if (seg2 < phba->ktime_seg2_min)
+ phba->ktime_seg2_min = seg2;
+ else if (seg2 > phba->ktime_seg2_max)
+ phba->ktime_seg2_max = seg2;
+ phba->ktime_seg3_total += seg3;
+ if (seg3 < phba->ktime_seg3_min)
+ phba->ktime_seg3_min = seg3;
+ else if (seg3 > phba->ktime_seg3_max)
+ phba->ktime_seg3_max = seg3;
+ phba->ktime_seg4_total += seg4;
+ if (seg4 < phba->ktime_seg4_min)
+ phba->ktime_seg4_min = seg4;
+ else if (seg4 > phba->ktime_seg4_max)
+ phba->ktime_seg4_max = seg4;
+
+ lpfc_ncmd->ts_last_cmd = 0;
+ lpfc_ncmd->ts_cmd_start = 0;
+ lpfc_ncmd->ts_cmd_wqput = 0;
+ lpfc_ncmd->ts_isr_cmpl = 0;
+ lpfc_ncmd->ts_data_nvme = 0;
+}
+#endif
+
+/**
+ * lpfc_nvme_io_cmd_wqe_cmpl - Complete an NVME-over-FCP IO
+ * @lpfc_pnvme: Pointer to the driver's nvme instance data
+ * @lpfc_nvme_lport: Pointer to the driver's local port data
+ * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
+ *
+ * Driver registers this routine as it io request handler. This
+ * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
+ * data structure to the rport indicated in @lpfc_nvme_rport.
+ *
+ * Return value :
+ * 0 - Success
+ * TODO: What are the failure codes.
+ **/
+static void
+lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
+ struct lpfc_wcqe_complete *wcqe)
+{
+ struct lpfc_nvme_buf *lpfc_ncmd =
+ (struct lpfc_nvme_buf *)pwqeIn->context1;
+ struct lpfc_vport *vport = pwqeIn->vport;
+ struct nvmefc_fcp_req *nCmd;
+ struct nvme_fc_ersp_iu *ep;
+ struct nvme_fc_cmd_iu *cp;
+ struct lpfc_nvme_rport *rport;
+ struct lpfc_nodelist *ndlp;
+ unsigned long flags;
+ uint32_t code;
+ uint16_t cid, sqhd, data;
+ uint32_t *ptr;
+
+ /* Sanity check on return of outstanding command */
+ if (!lpfc_ncmd || !lpfc_ncmd->nvmeCmd || !lpfc_ncmd->nrport) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
+ "6071 Completion pointers bad on wqe %p.\n",
+ wcqe);
+ return;
+ }
+ phba->fc4NvmeIoCmpls++;
+
+ nCmd = lpfc_ncmd->nvmeCmd;
+ rport = lpfc_ncmd->nrport;
+
+ lpfc_nvmeio_data(phba, "NVME FCP CMPL: xri x%x stat x%x parm x%x\n",
+ lpfc_ncmd->cur_iocbq.sli4_xritag,
+ bf_get(lpfc_wcqe_c_status, wcqe), wcqe->parameter);
+ /*
+ * Catch race where our node has transitioned, but the
+ * transport is still transitioning.
+ */
+ ndlp = rport->ndlp;
+ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
+ "6061 rport %p, ndlp %p, DID x%06x ndlp "
+ "not ready.\n",
+ rport, ndlp, rport->remoteport->port_id);
+
+ ndlp = lpfc_findnode_did(vport, rport->remoteport->port_id);
+ if (!ndlp) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
+ "6062 Ignoring NVME cmpl. No ndlp\n");
+ goto out_err;
+ }
+ }
+
+ code = bf_get(lpfc_wcqe_c_code, wcqe);
+ if (code == CQE_CODE_NVME_ERSP) {
+ /* For this type of CQE, we need to rebuild the rsp */
+ ep = (struct nvme_fc_ersp_iu *)nCmd->rspaddr;
+
+ /*
+ * Get Command Id from cmd to plug into response. This
+ * code is not needed in the next NVME Transport drop.
+ */
+ cp = (struct nvme_fc_cmd_iu *)nCmd->cmdaddr;
+ cid = cp->sqe.common.command_id;
+
+ /*
+ * RSN is in CQE word 2
+ * SQHD is in CQE Word 3 bits 15:0
+ * Cmd Specific info is in CQE Word 1
+ * and in CQE Word 0 bits 15:0
+ */
+ sqhd = bf_get(lpfc_wcqe_c_sqhead, wcqe);
+
+ /* Now lets build the NVME ERSP IU */
+ ep->iu_len = cpu_to_be16(8);
+ ep->rsn = wcqe->parameter;
+ ep->xfrd_len = cpu_to_be32(nCmd->payload_length);
+ ep->rsvd12 = 0;
+ ptr = (uint32_t *)&ep->cqe.result.u64;
+ *ptr++ = wcqe->total_data_placed;
+ data = bf_get(lpfc_wcqe_c_ersp0, wcqe);
+ *ptr = (uint32_t)data;
+ ep->cqe.sq_head = sqhd;
+ ep->cqe.sq_id = nCmd->sqid;
+ ep->cqe.command_id = cid;
+ ep->cqe.status = 0;
+
+ lpfc_ncmd->status = IOSTAT_SUCCESS;
+ lpfc_ncmd->result = 0;
+ nCmd->rcv_rsplen = LPFC_NVME_ERSP_LEN;
+ nCmd->transferred_length = nCmd->payload_length;
+ } else {
+ lpfc_ncmd->status = (bf_get(lpfc_wcqe_c_status, wcqe) &
+ LPFC_IOCB_STATUS_MASK);
+ lpfc_ncmd->result = wcqe->parameter;
+
+ /* For NVME, the only failure path that results in an
+ * IO error is when the adapter rejects it. All other
+ * conditions are a success case and resolved by the
+ * transport.
+ * IOSTAT_FCP_RSP_ERROR means:
+ * 1. Length of data received doesn't match total
+ * transfer length in WQE
+ * 2. If the RSP payload does NOT match these cases:
+ * a. RSP length 12/24 bytes and all zeros
+ * b. NVME ERSP
+ */
+ switch (lpfc_ncmd->status) {
+ case IOSTAT_SUCCESS:
+ nCmd->transferred_length = wcqe->total_data_placed;
+ nCmd->rcv_rsplen = 0;
+ nCmd->status = 0;
+ break;
+ case IOSTAT_FCP_RSP_ERROR:
+ nCmd->transferred_length = wcqe->total_data_placed;
+ nCmd->rcv_rsplen = wcqe->parameter;
+ nCmd->status = 0;
+ /* Sanity check */
+ if (nCmd->rcv_rsplen == LPFC_NVME_ERSP_LEN)
+ break;
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
+ "6081 NVME Completion Protocol Error: "
+ "status x%x result x%x placed x%x\n",
+ lpfc_ncmd->status, lpfc_ncmd->result,
+ wcqe->total_data_placed);
+ break;
+ default:
+out_err:
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
+ "6072 NVME Completion Error: "
+ "status x%x result x%x placed x%x\n",
+ lpfc_ncmd->status, lpfc_ncmd->result,
+ wcqe->total_data_placed);
+ nCmd->transferred_length = 0;
+ nCmd->rcv_rsplen = 0;
+ nCmd->status = NVME_SC_FC_TRANSPORT_ERROR;
+ }
+ }
+
+ /* pick up SLI4 exhange busy condition */
+ if (bf_get(lpfc_wcqe_c_xb, wcqe))
+ lpfc_ncmd->flags |= LPFC_SBUF_XBUSY;
+ else
+ lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
+
+ if (ndlp && NLP_CHK_NODE_ACT(ndlp))
+ atomic_dec(&ndlp->cmd_pending);
+
+ /* Update stats and complete the IO. There is
+ * no need for dma unprep because the nvme_transport
+ * owns the dma address.
+ */
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ if (phba->ktime_on) {
+ lpfc_ncmd->ts_isr_cmpl = pwqeIn->isr_timestamp;
+ lpfc_ncmd->ts_data_nvme = ktime_get_ns();
+ phba->ktime_last_cmd = lpfc_ncmd->ts_data_nvme;
+ lpfc_nvme_ktime(phba, lpfc_ncmd);
+ }
+ if (phba->cpucheck_on & LPFC_CHECK_NVME_IO) {
+ if (lpfc_ncmd->cpu != smp_processor_id())
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
+ "6701 CPU Check cmpl: "
+ "cpu %d expect %d\n",
+ smp_processor_id(), lpfc_ncmd->cpu);
+ if (lpfc_ncmd->cpu < LPFC_CHECK_CPU_CNT)
+ phba->cpucheck_cmpl_io[lpfc_ncmd->cpu]++;
+ }
+#endif
+ nCmd->done(nCmd);
+
+ spin_lock_irqsave(&phba->hbalock, flags);
+ lpfc_ncmd->nrport = NULL;
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+
+ lpfc_release_nvme_buf(phba, lpfc_ncmd);
+}
+
+
+/**
+ * lpfc_nvme_prep_io_cmd - Issue an NVME-over-FCP IO
+ * @lpfc_pnvme: Pointer to the driver's nvme instance data
+ * @lpfc_nvme_lport: Pointer to the driver's local port data
+ * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
+ * @lpfc_nvme_fcreq: IO request from nvme fc to driver.
+ * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue
+ *
+ * Driver registers this routine as it io request handler. This
+ * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
+ * data structure to the rport indicated in @lpfc_nvme_rport.
+ *
+ * Return value :
+ * 0 - Success
+ * TODO: What are the failure codes.
+ **/
+static int
+lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
+ struct lpfc_nvme_buf *lpfc_ncmd,
+ struct lpfc_nodelist *pnode)
+{
+ struct lpfc_hba *phba = vport->phba;
+ struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd;
+ struct lpfc_iocbq *pwqeq = &(lpfc_ncmd->cur_iocbq);
+ union lpfc_wqe128 *wqe = (union lpfc_wqe128 *)&pwqeq->wqe;
+ uint32_t req_len;
+
+ if (!pnode || !NLP_CHK_NODE_ACT(pnode))
+ return -EINVAL;
+
+ /*
+ * There are three possibilities here - use scatter-gather segment, use
+ * the single mapping, or neither.
+ */
+ wqe->fcp_iwrite.initial_xfer_len = 0;
+ if (nCmd->sg_cnt) {
+ if (nCmd->io_dir == NVMEFC_FCP_WRITE) {
+ /* Word 5 */
+ if ((phba->cfg_nvme_enable_fb) &&
+ (pnode->nlp_flag & NLP_FIRSTBURST)) {
+ req_len = lpfc_ncmd->nvmeCmd->payload_length;
+ if (req_len < pnode->nvme_fb_size)
+ wqe->fcp_iwrite.initial_xfer_len =
+ req_len;
+ else
+ wqe->fcp_iwrite.initial_xfer_len =
+ pnode->nvme_fb_size;
+ }
+
+ /* Word 7 */
+ bf_set(wqe_cmnd, &wqe->generic.wqe_com,
+ CMD_FCP_IWRITE64_WQE);
+ bf_set(wqe_pu, &wqe->generic.wqe_com,
+ PARM_READ_CHECK);
+
+ /* Word 10 */
+ bf_set(wqe_qosd, &wqe->fcp_iwrite.wqe_com, 0);
+ bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com,
+ LPFC_WQE_IOD_WRITE);
+ bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com,
+ LPFC_WQE_LENLOC_WORD4);
+ if (phba->cfg_nvme_oas)
+ bf_set(wqe_oas, &wqe->fcp_iwrite.wqe_com, 1);
+
+ /* Word 11 */
+ bf_set(wqe_cmd_type, &wqe->generic.wqe_com,
+ NVME_WRITE_CMD);
+
+ /* Word 16 */
+ wqe->words[16] = LPFC_NVME_EMBED_WRITE;
+
+ phba->fc4NvmeOutputRequests++;
+ } else {
+ /* Word 7 */
+ bf_set(wqe_cmnd, &wqe->generic.wqe_com,
+ CMD_FCP_IREAD64_WQE);
+ bf_set(wqe_pu, &wqe->generic.wqe_com,
+ PARM_READ_CHECK);
+
+ /* Word 10 */
+ bf_set(wqe_qosd, &wqe->fcp_iread.wqe_com, 0);
+ bf_set(wqe_iod, &wqe->fcp_iread.wqe_com,
+ LPFC_WQE_IOD_READ);
+ bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com,
+ LPFC_WQE_LENLOC_WORD4);
+ if (phba->cfg_nvme_oas)
+ bf_set(wqe_oas, &wqe->fcp_iread.wqe_com, 1);
+
+ /* Word 11 */
+ bf_set(wqe_cmd_type, &wqe->generic.wqe_com,
+ NVME_READ_CMD);
+
+ /* Word 16 */
+ wqe->words[16] = LPFC_NVME_EMBED_READ;
+
+ phba->fc4NvmeInputRequests++;
+ }
+ } else {
+ /* Word 4 */
+ wqe->fcp_icmd.rsrvd4 = 0;
+
+ /* Word 7 */
+ bf_set(wqe_cmnd, &wqe->generic.wqe_com, CMD_FCP_ICMND64_WQE);
+ bf_set(wqe_pu, &wqe->generic.wqe_com, 0);
+
+ /* Word 10 */
+ bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
+ bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE);
+ bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com,
+ LPFC_WQE_LENLOC_NONE);
+ if (phba->cfg_nvme_oas)
+ bf_set(wqe_oas, &wqe->fcp_icmd.wqe_com, 1);
+
+ /* Word 11 */
+ bf_set(wqe_cmd_type, &wqe->generic.wqe_com, NVME_READ_CMD);
+
+ /* Word 16 */
+ wqe->words[16] = LPFC_NVME_EMBED_CMD;
+
+ phba->fc4NvmeControlRequests++;
+ }
+ /*
+ * Finish initializing those WQE fields that are independent
+ * of the nvme_cmnd request_buffer
+ */
+
+ /* Word 6 */
+ bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
+ phba->sli4_hba.rpi_ids[pnode->nlp_rpi]);
+ bf_set(wqe_xri_tag, &wqe->generic.wqe_com, pwqeq->sli4_xritag);
+
+ /* Word 7 */
+ /* Preserve Class data in the ndlp. */
+ bf_set(wqe_class, &wqe->generic.wqe_com,
+ (pnode->nlp_fcp_info & 0x0f));
+
+ /* Word 8 */
+ wqe->generic.wqe_com.abort_tag = pwqeq->iotag;
+
+ /* Word 9 */
+ bf_set(wqe_reqtag, &wqe->generic.wqe_com, pwqeq->iotag);
+
+ /* Word 11 */
+ bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
+
+ pwqeq->vport = vport;
+ return 0;
+}
+
+
+/**
+ * lpfc_nvme_prep_io_dma - Issue an NVME-over-FCP IO
+ * @lpfc_pnvme: Pointer to the driver's nvme instance data
+ * @lpfc_nvme_lport: Pointer to the driver's local port data
+ * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
+ * @lpfc_nvme_fcreq: IO request from nvme fc to driver.
+ * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue
+ *
+ * Driver registers this routine as it io request handler. This
+ * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
+ * data structure to the rport indicated in @lpfc_nvme_rport.
+ *
+ * Return value :
+ * 0 - Success
+ * TODO: What are the failure codes.
+ **/
+static int
+lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
+ struct lpfc_nvme_buf *lpfc_ncmd)
+{
+ struct lpfc_hba *phba = vport->phba;
+ struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd;
+ union lpfc_wqe128 *wqe = (union lpfc_wqe128 *)&lpfc_ncmd->cur_iocbq.wqe;
+ struct sli4_sge *sgl = lpfc_ncmd->nvme_sgl;
+ struct scatterlist *data_sg;
+ struct sli4_sge *first_data_sgl;
+ dma_addr_t physaddr;
+ uint32_t num_bde = 0;
+ uint32_t dma_len;
+ uint32_t dma_offset = 0;
+ int nseg, i;
+
+ /* Fix up the command and response DMA stuff. */
+ lpfc_nvme_adj_fcp_sgls(vport, lpfc_ncmd, nCmd);
+
+ /*
+ * There are three possibilities here - use scatter-gather segment, use
+ * the single mapping, or neither.
+ */
+ if (nCmd->sg_cnt) {
+ /*
+ * Jump over the cmd and rsp SGEs. The fix routine
+ * has already adjusted for this.
+ */
+ sgl += 2;
+
+ first_data_sgl = sgl;
+ lpfc_ncmd->seg_cnt = nCmd->sg_cnt;
+ if (lpfc_ncmd->seg_cnt > phba->cfg_sg_seg_cnt) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+ "6058 Too many sg segments from "
+ "NVME Transport. Max %d, "
+ "nvmeIO sg_cnt %d\n",
+ phba->cfg_sg_seg_cnt,
+ lpfc_ncmd->seg_cnt);
+ lpfc_ncmd->seg_cnt = 0;
+ return 1;
+ }
+
+ /*
+ * The driver established a maximum scatter-gather segment count
+ * during probe that limits the number of sg elements in any
+ * single nvme command. Just run through the seg_cnt and format
+ * the sge's.
+ */
+ nseg = nCmd->sg_cnt;
+ data_sg = nCmd->first_sgl;
+ for (i = 0; i < nseg; i++) {
+ if (data_sg == NULL) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+ "6059 dptr err %d, nseg %d\n",
+ i, nseg);
+ lpfc_ncmd->seg_cnt = 0;
+ return 1;
+ }
+ physaddr = data_sg->dma_address;
+ dma_len = data_sg->length;
+ sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
+ sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
+ sgl->word2 = le32_to_cpu(sgl->word2);
+ if ((num_bde + 1) == nseg)
+ bf_set(lpfc_sli4_sge_last, sgl, 1);
+ else
+ bf_set(lpfc_sli4_sge_last, sgl, 0);
+ bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
+ bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
+ sgl->word2 = cpu_to_le32(sgl->word2);
+ sgl->sge_len = cpu_to_le32(dma_len);
+
+ dma_offset += dma_len;
+ data_sg = sg_next(data_sg);
+ sgl++;
+ }
+ } else {
+ /* For this clause to be valid, the payload_length
+ * and sg_cnt must zero.
+ */
+ if (nCmd->payload_length != 0) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+ "6063 NVME DMA Prep Err: sg_cnt %d "
+ "payload_length x%x\n",
+ nCmd->sg_cnt, nCmd->payload_length);
+ return 1;
+ }
+ }
+
+ /*
+ * Due to difference in data length between DIF/non-DIF paths,
+ * we need to set word 4 of WQE here
+ */
+ wqe->fcp_iread.total_xfer_len = nCmd->payload_length;
+ return 0;
+}
+
+/**
+ * lpfc_nvme_fcp_io_submit - Issue an NVME-over-FCP IO
+ * @lpfc_pnvme: Pointer to the driver's nvme instance data
+ * @lpfc_nvme_lport: Pointer to the driver's local port data
+ * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
+ * @lpfc_nvme_fcreq: IO request from nvme fc to driver.
+ * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue
+ *
+ * Driver registers this routine as it io request handler. This
+ * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
+ * data structure to the rport
+ indicated in @lpfc_nvme_rport.
+ *
+ * Return value :
+ * 0 - Success
+ * TODO: What are the failure codes.
+ **/
+static int
+lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
+ struct nvme_fc_remote_port *pnvme_rport,
+ void *hw_queue_handle,
+ struct nvmefc_fcp_req *pnvme_fcreq)
+{
+ int ret = 0;
+ struct lpfc_nvme_lport *lport;
+ struct lpfc_vport *vport;
+ struct lpfc_hba *phba;
+ struct lpfc_nodelist *ndlp;
+ struct lpfc_nvme_buf *lpfc_ncmd;
+ struct lpfc_nvme_rport *rport;
+ struct lpfc_nvme_qhandle *lpfc_queue_info;
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ uint64_t start = 0;
+#endif
+
+ lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
+ vport = lport->vport;
+ phba = vport->phba;
+
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ if (phba->ktime_on)
+ start = ktime_get_ns();
+#endif
+ rport = (struct lpfc_nvme_rport *)pnvme_rport->private;
+ lpfc_queue_info = (struct lpfc_nvme_qhandle *)hw_queue_handle;
+
+ /*
+ * Catch race where our node has transitioned, but the
+ * transport is still transitioning.
+ */
+ ndlp = rport->ndlp;
+ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
+ "6053 rport %p, ndlp %p, DID x%06x "
+ "ndlp not ready.\n",
+ rport, ndlp, pnvme_rport->port_id);
+
+ ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id);
+ if (!ndlp) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
+ "6066 Missing node for DID %x\n",
+ pnvme_rport->port_id);
+ ret = -ENODEV;
+ goto out_fail;
+ }
+ }
+
+ /* The remote node has to be a mapped target or it's an error. */
+ if ((ndlp->nlp_type & NLP_NVME_TARGET) &&
+ (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
+ "6036 rport %p, DID x%06x not ready for "
+ "IO. State x%x, Type x%x\n",
+ rport, pnvme_rport->port_id,
+ ndlp->nlp_state, ndlp->nlp_type);
+ ret = -ENODEV;
+ goto out_fail;
+
+ }
+
+ /* The node is shared with FCP IO, make sure the IO pending count does
+ * not exceed the programmed depth.
+ */
+ if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) {
+ ret = -EAGAIN;
+ goto out_fail;
+ }
+
+ lpfc_ncmd = lpfc_get_nvme_buf(phba, ndlp);
+ if (lpfc_ncmd == NULL) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
+ "6065 driver's buffer pool is empty, "
+ "IO failed\n");
+ ret = -ENOMEM;
+ goto out_fail;
+ }
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ if (phba->ktime_on) {
+ lpfc_ncmd->ts_cmd_start = start;
+ lpfc_ncmd->ts_last_cmd = phba->ktime_last_cmd;
+ }
+#endif
+
+ /*
+ * Store the data needed by the driver to issue, abort, and complete
+ * an IO.
+ * Do not let the IO hang out forever. There is no midlayer issuing
+ * an abort so inform the FW of the maximum IO pending time.
+ */
+ pnvme_fcreq->private = (void *)lpfc_ncmd;
+ lpfc_ncmd->nvmeCmd = pnvme_fcreq;
+ lpfc_ncmd->nrport = rport;
+ lpfc_ncmd->start_time = jiffies;
+
+ lpfc_nvme_prep_io_cmd(vport, lpfc_ncmd, ndlp);
+ ret = lpfc_nvme_prep_io_dma(vport, lpfc_ncmd);
+ if (ret) {
+ ret = -ENOMEM;
+ goto out_free_nvme_buf;
+ }
+
+ atomic_inc(&ndlp->cmd_pending);
+
+ /*
+ * Issue the IO on the WQ indicated by index in the hw_queue_handle.
+ * This identfier was create in our hardware queue create callback
+ * routine. The driver now is dependent on the IO queue steering from
+ * the transport. We are trusting the upper NVME layers know which
+ * index to use and that they have affinitized a CPU to this hardware
+ * queue. A hardware queue maps to a driver MSI-X vector/EQ/CQ/WQ.
+ */
+ lpfc_ncmd->cur_iocbq.hba_wqidx = lpfc_queue_info->index;
+
+ lpfc_nvmeio_data(phba, "NVME FCP XMIT: xri x%x idx %d to %06x\n",
+ lpfc_ncmd->cur_iocbq.sli4_xritag,
+ lpfc_queue_info->index, ndlp->nlp_DID);
+
+ ret = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, &lpfc_ncmd->cur_iocbq);
+ if (ret) {
+ atomic_dec(&ndlp->cmd_pending);
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
+ "6113 FCP could not issue WQE err %x "
+ "sid: x%x did: x%x oxid: x%x\n",
+ ret, vport->fc_myDID, ndlp->nlp_DID,
+ lpfc_ncmd->cur_iocbq.sli4_xritag);
+ ret = -EINVAL;
+ goto out_free_nvme_buf;
+ }
+
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ if (phba->ktime_on)
+ lpfc_ncmd->ts_cmd_wqput = ktime_get_ns();
+
+ if (phba->cpucheck_on & LPFC_CHECK_NVME_IO) {
+ lpfc_ncmd->cpu = smp_processor_id();
+ if (lpfc_ncmd->cpu != lpfc_queue_info->index) {
+ /* Check for admin queue */
+ if (lpfc_queue_info->qidx) {
+ lpfc_printf_vlog(vport,
+ KERN_ERR, LOG_NVME_IOERR,
+ "6702 CPU Check cmd: "
+ "cpu %d wq %d\n",
+ lpfc_ncmd->cpu,
+ lpfc_queue_info->index);
+ }
+ lpfc_ncmd->cpu = lpfc_queue_info->index;
+ }
+ if (lpfc_ncmd->cpu < LPFC_CHECK_CPU_CNT)
+ phba->cpucheck_xmt_io[lpfc_ncmd->cpu]++;
+ }
+#endif
+ return 0;
+
+ out_free_nvme_buf:
+ lpfc_release_nvme_buf(phba, lpfc_ncmd);
+ out_fail:
+ return ret;
+}
+
+/**
+ * lpfc_nvme_abort_fcreq_cmpl - Complete an NVME FCP abort request.
+ * @phba: Pointer to HBA context object
+ * @cmdiocb: Pointer to command iocb object.
+ * @rspiocb: Pointer to response iocb object.
+ *
+ * This is the callback function for any NVME FCP IO that was aborted.
+ *
+ * Return value:
+ * None
+ **/
+void
+lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
+ struct lpfc_wcqe_complete *abts_cmpl)
+{
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
+ "6145 ABORT_XRI_CN completing on rpi x%x "
+ "original iotag x%x, abort cmd iotag x%x "
+ "req_tag x%x, status x%x, hwstatus x%x\n",
+ cmdiocb->iocb.un.acxri.abortContextTag,
+ cmdiocb->iocb.un.acxri.abortIoTag,
+ cmdiocb->iotag,
+ bf_get(lpfc_wcqe_c_request_tag, abts_cmpl),
+ bf_get(lpfc_wcqe_c_status, abts_cmpl),
+ bf_get(lpfc_wcqe_c_hw_status, abts_cmpl));
+ lpfc_sli_release_iocbq(phba, cmdiocb);
+}
+
+/**
+ * lpfc_nvme_fcp_abort - Issue an NVME-over-FCP ABTS
+ * @lpfc_pnvme: Pointer to the driver's nvme instance data
+ * @lpfc_nvme_lport: Pointer to the driver's local port data
+ * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
+ * @lpfc_nvme_fcreq: IO request from nvme fc to driver.
+ * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue
+ *
+ * Driver registers this routine as its nvme request io abort handler. This
+ * routine issues an fcp Abort WQE with data from the @lpfc_nvme_fcpreq
+ * data structure to the rport indicated in @lpfc_nvme_rport. This routine
+ * is executed asynchronously - one the target is validated as "MAPPED" and
+ * ready for IO, the driver issues the abort request and returns.
+ *
+ * Return value:
+ * None
+ **/
+static void
+lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
+ struct nvme_fc_remote_port *pnvme_rport,
+ void *hw_queue_handle,
+ struct nvmefc_fcp_req *pnvme_fcreq)
+{
+ struct lpfc_nvme_lport *lport;
+ struct lpfc_vport *vport;
+ struct lpfc_hba *phba;
+ struct lpfc_nodelist *ndlp;
+ struct lpfc_nvme_rport *rport;
+ struct lpfc_nvme_buf *lpfc_nbuf;
+ struct lpfc_iocbq *abts_buf;
+ struct lpfc_iocbq *nvmereq_wqe;
+ union lpfc_wqe *abts_wqe;
+ unsigned long flags;
+ int ret_val;
+
+ lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
+ rport = (struct lpfc_nvme_rport *)pnvme_rport->private;
+ vport = lport->vport;
+ phba = vport->phba;
+
+ /* Announce entry to new IO submit field. */
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
+ "6002 Abort Request to rport DID x%06x "
+ "for nvme_fc_req %p\n",
+ pnvme_rport->port_id,
+ pnvme_fcreq);
+
+ /*
+ * Catch race where our node has transitioned, but the
+ * transport is still transitioning.
+ */
+ ndlp = rport->ndlp;
+ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_ABTS,
+ "6054 rport %p, ndlp %p, DID x%06x ndlp "
+ " not ready.\n",
+ rport, ndlp, pnvme_rport->port_id);
+
+ ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id);
+ if (!ndlp) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
+ "6055 Could not find node for "
+ "DID %x\n",
+ pnvme_rport->port_id);
+ return;
+ }
+ }
+
+ /* The remote node has to be ready to send an abort. */
+ if ((ndlp->nlp_state != NLP_STE_MAPPED_NODE) &&
+ !(ndlp->nlp_type & NLP_NVME_TARGET)) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_ABTS,
+ "6048 rport %p, DID x%06x not ready for "
+ "IO. State x%x, Type x%x\n",
+ rport, pnvme_rport->port_id,
+ ndlp->nlp_state, ndlp->nlp_type);
+ return;
+ }
+
+ /* If the hba is getting reset, this flag is set. It is
+ * cleared when the reset is complete and rings reestablished.
+ */
+ spin_lock_irqsave(&phba->hbalock, flags);
+ /* driver queued commands are in process of being flushed */
+ if (phba->hba_flag & HBA_NVME_IOQ_FLUSH) {
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
+ "6139 Driver in reset cleanup - flushing "
+ "NVME Req now. hba_flag x%x\n",
+ phba->hba_flag);
+ return;
+ }
+
+ lpfc_nbuf = (struct lpfc_nvme_buf *)pnvme_fcreq->private;
+ if (!lpfc_nbuf) {
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
+ "6140 NVME IO req has no matching lpfc nvme "
+ "io buffer. Skipping abort req.\n");
+ return;
+ } else if (!lpfc_nbuf->nvmeCmd) {
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
+ "6141 lpfc NVME IO req has no nvme_fcreq "
+ "io buffer. Skipping abort req.\n");
+ return;
+ }
+
+ /*
+ * The lpfc_nbuf and the mapped nvme_fcreq in the driver's
+ * state must match the nvme_fcreq passed by the nvme
+ * transport. If they don't match, it is likely the driver
+ * has already completed the NVME IO and the nvme transport
+ * has not seen it yet.
+ */
+ if (lpfc_nbuf->nvmeCmd != pnvme_fcreq) {
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
+ "6143 NVME req mismatch: "
+ "lpfc_nbuf %p nvmeCmd %p, "
+ "pnvme_fcreq %p. Skipping Abort\n",
+ lpfc_nbuf, lpfc_nbuf->nvmeCmd,
+ pnvme_fcreq);
+ return;
+ }
+
+ /* Don't abort IOs no longer on the pending queue. */
+ nvmereq_wqe = &lpfc_nbuf->cur_iocbq;
+ if (!(nvmereq_wqe->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
+ "6142 NVME IO req %p not queued - skipping "
+ "abort req\n",
+ pnvme_fcreq);
+ return;
+ }
+
+ lpfc_nvmeio_data(phba, "NVME FCP ABORT: xri x%x idx %d to %06x\n",
+ nvmereq_wqe->sli4_xritag,
+ nvmereq_wqe->hba_wqidx, ndlp->nlp_DID);
+
+ /* Outstanding abort is in progress */
+ if (nvmereq_wqe->iocb_flag & LPFC_DRIVER_ABORTED) {
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
+ "6144 Outstanding NVME I/O Abort Request "
+ "still pending on nvme_fcreq %p, "
+ "lpfc_ncmd %p\n",
+ pnvme_fcreq, lpfc_nbuf);
+ return;
+ }
+
+ abts_buf = __lpfc_sli_get_iocbq(phba);
+ if (!abts_buf) {
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
+ "6136 No available abort wqes. Skipping "
+ "Abts req for nvme_fcreq %p.\n",
+ pnvme_fcreq);
+ return;
+ }
+
+ /* Ready - mark outstanding as aborted by driver. */
+ nvmereq_wqe->iocb_flag |= LPFC_DRIVER_ABORTED;
+
+ /* Complete prepping the abort wqe and issue to the FW. */
+ abts_wqe = &abts_buf->wqe;
+
+ /* WQEs are reused. Clear stale data and set key fields to
+ * zero like ia, iaab, iaar, xri_tag, and ctxt_tag.
+ */
+ memset(abts_wqe, 0, sizeof(union lpfc_wqe));
+ bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);
+
+ /* word 7 */
+ bf_set(wqe_ct, &abts_wqe->abort_cmd.wqe_com, 0);
+ bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
+ bf_set(wqe_class, &abts_wqe->abort_cmd.wqe_com,
+ nvmereq_wqe->iocb.ulpClass);
+
+ /* word 8 - tell the FW to abort the IO associated with this
+ * outstanding exchange ID.
+ */
+ abts_wqe->abort_cmd.wqe_com.abort_tag = nvmereq_wqe->sli4_xritag;
+
+ /* word 9 - this is the iotag for the abts_wqe completion. */
+ bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com,
+ abts_buf->iotag);
+
+ /* word 10 */
+ bf_set(wqe_wqid, &abts_wqe->abort_cmd.wqe_com, nvmereq_wqe->hba_wqidx);
+ bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1);
+ bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
+
+ /* word 11 */
+ bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND);
+ bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1);
+ bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
+
+ /* ABTS WQE must go to the same WQ as the WQE to be aborted */
+ abts_buf->iocb_flag |= LPFC_IO_NVME;
+ abts_buf->hba_wqidx = nvmereq_wqe->hba_wqidx;
+ abts_buf->vport = vport;
+ abts_buf->wqe_cmpl = lpfc_nvme_abort_fcreq_cmpl;
+ ret_val = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_buf);
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+ if (ret_val == IOCB_ERROR) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
+ "6137 Failed abts issue_wqe with status x%x "
+ "for nvme_fcreq %p.\n",
+ ret_val, pnvme_fcreq);
+ lpfc_sli_release_iocbq(phba, abts_buf);
+ return;
+ }
+
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
+ "6138 Transport Abort NVME Request Issued for\n"
+ "ox_id x%x on reqtag x%x\n",
+ nvmereq_wqe->sli4_xritag,
+ abts_buf->iotag);
+}
+
+/* Declare and initialization an instance of the FC NVME template. */
+static struct nvme_fc_port_template lpfc_nvme_template = {
+ /* initiator-based functions */
+ .localport_delete = lpfc_nvme_localport_delete,
+ .remoteport_delete = lpfc_nvme_remoteport_delete,
+ .create_queue = lpfc_nvme_create_queue,
+ .delete_queue = lpfc_nvme_delete_queue,
+ .ls_req = lpfc_nvme_ls_req,
+ .fcp_io = lpfc_nvme_fcp_io_submit,
+ .ls_abort = lpfc_nvme_ls_abort,
+ .fcp_abort = lpfc_nvme_fcp_abort,
+
+ .max_hw_queues = 1,
+ .max_sgl_segments = LPFC_NVME_DEFAULT_SEGS,
+ .max_dif_sgl_segments = LPFC_NVME_DEFAULT_SEGS,
+ .dma_boundary = 0xFFFFFFFF,
+
+ /* Sizes of additional private data for data structures.
+ * No use for the last two sizes at this time.
+ */
+ .local_priv_sz = sizeof(struct lpfc_nvme_lport),
+ .remote_priv_sz = sizeof(struct lpfc_nvme_rport),
+ .lsrqst_priv_sz = 0,
+ .fcprqst_priv_sz = 0,
+};
+
+/**
+ * lpfc_sli4_post_nvme_sgl_block - post a block of nvme sgl list to firmware
+ * @phba: pointer to lpfc hba data structure.
+ * @nblist: pointer to nvme buffer list.
+ * @count: number of scsi buffers on the list.
+ *
+ * This routine is invoked to post a block of @count scsi sgl pages from a
+ * SCSI buffer list @nblist to the HBA using non-embedded mailbox command.
+ * No Lock is held.
+ *
+ **/
+static int
+lpfc_sli4_post_nvme_sgl_block(struct lpfc_hba *phba,
+ struct list_head *nblist,
+ int count)
+{
+ struct lpfc_nvme_buf *lpfc_ncmd;
+ struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
+ struct sgl_page_pairs *sgl_pg_pairs;
+ void *viraddr;
+ LPFC_MBOXQ_t *mbox;
+ uint32_t reqlen, alloclen, pg_pairs;
+ uint32_t mbox_tmo;
+ uint16_t xritag_start = 0;
+ int rc = 0;
+ uint32_t shdr_status, shdr_add_status;
+ dma_addr_t pdma_phys_bpl1;
+ union lpfc_sli4_cfg_shdr *shdr;
+
+ /* Calculate the requested length of the dma memory */
+ reqlen = count * sizeof(struct sgl_page_pairs) +
+ sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
+ if (reqlen > SLI4_PAGE_SIZE) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "6118 Block sgl registration required DMA "
+ "size (%d) great than a page\n", reqlen);
+ return -ENOMEM;
+ }
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "6119 Failed to allocate mbox cmd memory\n");
+ return -ENOMEM;
+ }
+
+ /* Allocate DMA memory and set up the non-embedded mailbox command */
+ alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
+ LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
+ LPFC_SLI4_MBX_NEMBED);
+
+ if (alloclen < reqlen) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "6120 Allocated DMA memory size (%d) is "
+ "less than the requested DMA memory "
+ "size (%d)\n", alloclen, reqlen);
+ lpfc_sli4_mbox_cmd_free(phba, mbox);
+ return -ENOMEM;
+ }
+
+ /* Get the first SGE entry from the non-embedded DMA memory */
+ viraddr = mbox->sge_array->addr[0];
+
+ /* Set up the SGL pages in the non-embedded DMA pages */
+ sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
+ sgl_pg_pairs = &sgl->sgl_pg_pairs;
+
+ pg_pairs = 0;
+ list_for_each_entry(lpfc_ncmd, nblist, list) {
+ /* Set up the sge entry */
+ sgl_pg_pairs->sgl_pg0_addr_lo =
+ cpu_to_le32(putPaddrLow(lpfc_ncmd->dma_phys_sgl));
+ sgl_pg_pairs->sgl_pg0_addr_hi =
+ cpu_to_le32(putPaddrHigh(lpfc_ncmd->dma_phys_sgl));
+ if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
+ pdma_phys_bpl1 = lpfc_ncmd->dma_phys_sgl +
+ SGL_PAGE_SIZE;
+ else
+ pdma_phys_bpl1 = 0;
+ sgl_pg_pairs->sgl_pg1_addr_lo =
+ cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
+ sgl_pg_pairs->sgl_pg1_addr_hi =
+ cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
+ /* Keep the first xritag on the list */
+ if (pg_pairs == 0)
+ xritag_start = lpfc_ncmd->cur_iocbq.sli4_xritag;
+ sgl_pg_pairs++;
+ pg_pairs++;
+ }
+ bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
+ bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
+ /* Perform endian conversion if necessary */
+ sgl->word0 = cpu_to_le32(sgl->word0);
+
+ if (!phba->sli4_hba.intr_enable)
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+ else {
+ mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
+ rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
+ }
+ shdr = (union lpfc_sli4_cfg_shdr *)&sgl->cfg_shdr;
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+ if (rc != MBX_TIMEOUT)
+ lpfc_sli4_mbox_cmd_free(phba, mbox);
+ if (shdr_status || shdr_add_status || rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "6125 POST_SGL_BLOCK mailbox command failed "
+ "status x%x add_status x%x mbx status x%x\n",
+ shdr_status, shdr_add_status, rc);
+ rc = -ENXIO;
+ }
+ return rc;
+}
+
+/**
+ * lpfc_post_nvme_sgl_list - Post blocks of nvme buffer sgls from a list
+ * @phba: pointer to lpfc hba data structure.
+ * @post_nblist: pointer to the nvme buffer list.
+ *
+ * This routine walks a list of nvme buffers that was passed in. It attempts
+ * to construct blocks of nvme buffer sgls which contains contiguous xris and
+ * uses the non-embedded SGL block post mailbox commands to post to the port.
+ * For single NVME buffer sgl with non-contiguous xri, if any, it shall use
+ * embedded SGL post mailbox command for posting. The @post_nblist passed in
+ * must be local list, thus no lock is needed when manipulate the list.
+ *
+ * Returns: 0 = failure, non-zero number of successfully posted buffers.
+ **/
+static int
+lpfc_post_nvme_sgl_list(struct lpfc_hba *phba,
+ struct list_head *post_nblist, int sb_count)
+{
+ struct lpfc_nvme_buf *lpfc_ncmd, *lpfc_ncmd_next;
+ int status, sgl_size;
+ int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0;
+ dma_addr_t pdma_phys_sgl1;
+ int last_xritag = NO_XRI;
+ int cur_xritag;
+ LIST_HEAD(prep_nblist);
+ LIST_HEAD(blck_nblist);
+ LIST_HEAD(nvme_nblist);
+
+ /* sanity check */
+ if (sb_count <= 0)
+ return -EINVAL;
+
+ sgl_size = phba->cfg_sg_dma_buf_size;
+
+ list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, post_nblist, list) {
+ list_del_init(&lpfc_ncmd->list);
+ block_cnt++;
+ if ((last_xritag != NO_XRI) &&
+ (lpfc_ncmd->cur_iocbq.sli4_xritag != last_xritag + 1)) {
+ /* a hole in xri block, form a sgl posting block */
+ list_splice_init(&prep_nblist, &blck_nblist);
+ post_cnt = block_cnt - 1;
+ /* prepare list for next posting block */
+ list_add_tail(&lpfc_ncmd->list, &prep_nblist);
+ block_cnt = 1;
+ } else {
+ /* prepare list for next posting block */
+ list_add_tail(&lpfc_ncmd->list, &prep_nblist);
+ /* enough sgls for non-embed sgl mbox command */
+ if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
+ list_splice_init(&prep_nblist, &blck_nblist);
+ post_cnt = block_cnt;
+ block_cnt = 0;
+ }
+ }
+ num_posting++;
+ last_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
+
+ /* end of repost sgl list condition for NVME buffers */
+ if (num_posting == sb_count) {
+ if (post_cnt == 0) {
+ /* last sgl posting block */
+ list_splice_init(&prep_nblist, &blck_nblist);
+ post_cnt = block_cnt;
+ } else if (block_cnt == 1) {
+ /* last single sgl with non-contiguous xri */
+ if (sgl_size > SGL_PAGE_SIZE)
+ pdma_phys_sgl1 =
+ lpfc_ncmd->dma_phys_sgl +
+ SGL_PAGE_SIZE;
+ else
+ pdma_phys_sgl1 = 0;
+ cur_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
+ status = lpfc_sli4_post_sgl(phba,
+ lpfc_ncmd->dma_phys_sgl,
+ pdma_phys_sgl1, cur_xritag);
+ if (status) {
+ /* failure, put on abort nvme list */
+ lpfc_ncmd->exch_busy = 1;
+ } else {
+ /* success, put on NVME buffer list */
+ lpfc_ncmd->exch_busy = 0;
+ lpfc_ncmd->status = IOSTAT_SUCCESS;
+ num_posted++;
+ }
+ /* success, put on NVME buffer sgl list */
+ list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
+ }
+ }
+
+ /* continue until a nembed page worth of sgls */
+ if (post_cnt == 0)
+ continue;
+
+ /* post block of NVME buffer list sgls */
+ status = lpfc_sli4_post_nvme_sgl_block(phba, &blck_nblist,
+ post_cnt);
+
+ /* don't reset xirtag due to hole in xri block */
+ if (block_cnt == 0)
+ last_xritag = NO_XRI;
+
+ /* reset NVME buffer post count for next round of posting */
+ post_cnt = 0;
+
+ /* put posted NVME buffer-sgl posted on NVME buffer sgl list */
+ while (!list_empty(&blck_nblist)) {
+ list_remove_head(&blck_nblist, lpfc_ncmd,
+ struct lpfc_nvme_buf, list);
+ if (status) {
+ /* failure, put on abort nvme list */
+ lpfc_ncmd->exch_busy = 1;
+ } else {
+ /* success, put on NVME buffer list */
+ lpfc_ncmd->exch_busy = 0;
+ lpfc_ncmd->status = IOSTAT_SUCCESS;
+ num_posted++;
+ }
+ list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
+ }
+ }
+ /* Push NVME buffers with sgl posted to the available list */
+ while (!list_empty(&nvme_nblist)) {
+ list_remove_head(&nvme_nblist, lpfc_ncmd,
+ struct lpfc_nvme_buf, list);
+ lpfc_release_nvme_buf(phba, lpfc_ncmd);
+ }
+ return num_posted;
+}
+
+/**
+ * lpfc_repost_nvme_sgl_list - Repost all the allocated nvme buffer sgls
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine walks the list of nvme buffers that have been allocated and
+ * repost them to the port by using SGL block post. This is needed after a
+ * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
+ * is responsible for moving all nvme buffers on the lpfc_abts_nvme_sgl_list
+ * to the lpfc_nvme_buf_list. If the repost fails, reject all nvme buffers.
+ *
+ * Returns: 0 = success, non-zero failure.
+ **/
+int
+lpfc_repost_nvme_sgl_list(struct lpfc_hba *phba)
+{
+ LIST_HEAD(post_nblist);
+ int num_posted, rc = 0;
+
+ /* get all NVME buffers need to repost to a local list */
+ spin_lock_irq(&phba->nvme_buf_list_get_lock);
+ spin_lock(&phba->nvme_buf_list_put_lock);
+ list_splice_init(&phba->lpfc_nvme_buf_list_get, &post_nblist);
+ list_splice(&phba->lpfc_nvme_buf_list_put, &post_nblist);
+ spin_unlock(&phba->nvme_buf_list_put_lock);
+ spin_unlock_irq(&phba->nvme_buf_list_get_lock);
+
+ /* post the list of nvme buffer sgls to port if available */
+ if (!list_empty(&post_nblist)) {
+ num_posted = lpfc_post_nvme_sgl_list(phba, &post_nblist,
+ phba->sli4_hba.nvme_xri_cnt);
+ /* failed to post any nvme buffer, return error */
+ if (num_posted == 0)
+ rc = -EIO;
+ }
+ return rc;
+}
+
+/**
+ * lpfc_new_nvme_buf - Scsi buffer allocator for HBA with SLI4 IF spec
+ * @vport: The virtual port for which this call being executed.
+ * @num_to_allocate: The requested number of buffers to allocate.
+ *
+ * This routine allocates nvme buffers for device with SLI-4 interface spec,
+ * the nvme buffer contains all the necessary information needed to initiate
+ * a NVME I/O. After allocating up to @num_to_allocate NVME buffers and put
+ * them on a list, it post them to the port by using SGL block post.
+ *
+ * Return codes:
+ * int - number of nvme buffers that were allocated and posted.
+ * 0 = failure, less than num_to_alloc is a partial failure.
+ **/
+static int
+lpfc_new_nvme_buf(struct lpfc_vport *vport, int num_to_alloc)
+{
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_nvme_buf *lpfc_ncmd;
+ struct lpfc_iocbq *pwqeq;
+ union lpfc_wqe128 *wqe;
+ struct sli4_sge *sgl;
+ dma_addr_t pdma_phys_sgl;
+ uint16_t iotag, lxri = 0;
+ int bcnt, num_posted, sgl_size;
+ LIST_HEAD(prep_nblist);
+ LIST_HEAD(post_nblist);
+ LIST_HEAD(nvme_nblist);
+
+ sgl_size = phba->cfg_sg_dma_buf_size;
+
+ for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
+ lpfc_ncmd = kzalloc(sizeof(struct lpfc_nvme_buf), GFP_KERNEL);
+ if (!lpfc_ncmd)
+ break;
+ /*
+ * Get memory from the pci pool to map the virt space to
+ * pci bus space for an I/O. The DMA buffer includes the
+ * number of SGE's necessary to support the sg_tablesize.
+ */
+ lpfc_ncmd->data = pci_pool_alloc(phba->lpfc_sg_dma_buf_pool,
+ GFP_KERNEL,
+ &lpfc_ncmd->dma_handle);
+ if (!lpfc_ncmd->data) {
+ kfree(lpfc_ncmd);
+ break;
+ }
+ memset(lpfc_ncmd->data, 0, phba->cfg_sg_dma_buf_size);
+
+ lxri = lpfc_sli4_next_xritag(phba);
+ if (lxri == NO_XRI) {
+ pci_pool_free(phba->lpfc_sg_dma_buf_pool,
+ lpfc_ncmd->data, lpfc_ncmd->dma_handle);
+ kfree(lpfc_ncmd);
+ break;
+ }
+ pwqeq = &(lpfc_ncmd->cur_iocbq);
+ wqe = (union lpfc_wqe128 *)&pwqeq->wqe;
+
+ /* Allocate iotag for lpfc_ncmd->cur_iocbq. */
+ iotag = lpfc_sli_next_iotag(phba, pwqeq);
+ if (iotag == 0) {
+ pci_pool_free(phba->lpfc_sg_dma_buf_pool,
+ lpfc_ncmd->data, lpfc_ncmd->dma_handle);
+ kfree(lpfc_ncmd);
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+ "6121 Failed to allocated IOTAG for"
+ " XRI:0x%x\n", lxri);
+ lpfc_sli4_free_xri(phba, lxri);
+ break;
+ }
+ pwqeq->sli4_lxritag = lxri;
+ pwqeq->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
+ pwqeq->iocb_flag |= LPFC_IO_NVME;
+ pwqeq->context1 = lpfc_ncmd;
+ pwqeq->wqe_cmpl = lpfc_nvme_io_cmd_wqe_cmpl;
+
+ /* Initialize local short-hand pointers. */
+ lpfc_ncmd->nvme_sgl = lpfc_ncmd->data;
+ sgl = lpfc_ncmd->nvme_sgl;
+ pdma_phys_sgl = lpfc_ncmd->dma_handle;
+ lpfc_ncmd->dma_phys_sgl = pdma_phys_sgl;
+
+ /* Rsp SGE will be filled in when we rcv an IO
+ * from the NVME Layer to be sent.
+ * The cmd is going to be embedded so we need a SKIP SGE.
+ */
+ bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
+ bf_set(lpfc_sli4_sge_last, sgl, 0);
+ sgl->word2 = cpu_to_le32(sgl->word2);
+ /* Fill in word 3 / sgl_len during cmd submission */
+
+ lpfc_ncmd->cur_iocbq.context1 = lpfc_ncmd;
+
+ /* Word 7 */
+ bf_set(wqe_erp, &wqe->generic.wqe_com, 0);
+ /* NVME upper layers will time things out, if needed */
+ bf_set(wqe_tmo, &wqe->generic.wqe_com, 0);
+
+ /* Word 10 */
+ bf_set(wqe_ebde_cnt, &wqe->generic.wqe_com, 0);
+ bf_set(wqe_dbde, &wqe->generic.wqe_com, 1);
+
+ /* add the nvme buffer to a post list */
+ list_add_tail(&lpfc_ncmd->list, &post_nblist);
+ spin_lock_irq(&phba->nvme_buf_list_get_lock);
+ phba->sli4_hba.nvme_xri_cnt++;
+ spin_unlock_irq(&phba->nvme_buf_list_get_lock);
+ }
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
+ "6114 Allocate %d out of %d requested new NVME "
+ "buffers\n", bcnt, num_to_alloc);
+
+ /* post the list of nvme buffer sgls to port if available */
+ if (!list_empty(&post_nblist))
+ num_posted = lpfc_post_nvme_sgl_list(phba,
+ &post_nblist, bcnt);
+ else
+ num_posted = 0;
+
+ return num_posted;
+}
+
+/**
+ * lpfc_get_nvme_buf - Get a nvme buffer from lpfc_nvme_buf_list of the HBA
+ * @phba: The HBA for which this call is being executed.
+ *
+ * This routine removes a nvme buffer from head of @phba lpfc_nvme_buf_list list
+ * and returns to caller.
+ *
+ * Return codes:
+ * NULL - Error
+ * Pointer to lpfc_nvme_buf - Success
+ **/
+static struct lpfc_nvme_buf *
+lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
+{
+ struct lpfc_nvme_buf *lpfc_ncmd, *lpfc_ncmd_next;
+ unsigned long iflag = 0;
+ int found = 0;
+
+ spin_lock_irqsave(&phba->nvme_buf_list_get_lock, iflag);
+ list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
+ &phba->lpfc_nvme_buf_list_get, list) {
+ if (lpfc_test_rrq_active(phba, ndlp,
+ lpfc_ncmd->cur_iocbq.sli4_lxritag))
+ continue;
+ list_del(&lpfc_ncmd->list);
+ found = 1;
+ break;
+ }
+ if (!found) {
+ spin_lock(&phba->nvme_buf_list_put_lock);
+ list_splice(&phba->lpfc_nvme_buf_list_put,
+ &phba->lpfc_nvme_buf_list_get);
+ INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put);
+ spin_unlock(&phba->nvme_buf_list_put_lock);
+ list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
+ &phba->lpfc_nvme_buf_list_get, list) {
+ if (lpfc_test_rrq_active(
+ phba, ndlp, lpfc_ncmd->cur_iocbq.sli4_lxritag))
+ continue;
+ list_del(&lpfc_ncmd->list);
+ found = 1;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&phba->nvme_buf_list_get_lock, iflag);
+ if (!found)
+ return NULL;
+ return lpfc_ncmd;
+}
+
+/**
+ * lpfc_release_nvme_buf: Return a nvme buffer back to hba nvme buf list.
+ * @phba: The Hba for which this call is being executed.
+ * @lpfc_ncmd: The nvme buffer which is being released.
+ *
+ * This routine releases @lpfc_ncmd nvme buffer by adding it to tail of @phba
+ * lpfc_nvme_buf_list list. For SLI4 XRI's are tied to the nvme buffer
+ * and cannot be reused for at least RA_TOV amount of time if it was
+ * aborted.
+ **/
+static void
+lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_nvme_buf *lpfc_ncmd)
+{
+ unsigned long iflag = 0;
+
+ lpfc_ncmd->nonsg_phys = 0;
+ if (lpfc_ncmd->exch_busy) {
+ spin_lock_irqsave(&phba->sli4_hba.abts_nvme_buf_list_lock,
+ iflag);
+ lpfc_ncmd->nvmeCmd = NULL;
+ list_add_tail(&lpfc_ncmd->list,
+ &phba->sli4_hba.lpfc_abts_nvme_buf_list);
+ spin_unlock_irqrestore(&phba->sli4_hba.abts_nvme_buf_list_lock,
+ iflag);
+ } else {
+ lpfc_ncmd->nvmeCmd = NULL;
+ lpfc_ncmd->cur_iocbq.iocb_flag = LPFC_IO_NVME;
+ spin_lock_irqsave(&phba->nvme_buf_list_put_lock, iflag);
+ list_add_tail(&lpfc_ncmd->list, &phba->lpfc_nvme_buf_list_put);
+ spin_unlock_irqrestore(&phba->nvme_buf_list_put_lock, iflag);
+ }
+}
+
+/**
+ * lpfc_nvme_create_localport - Create/Bind an nvme localport instance.
+ * @pvport - the lpfc_vport instance requesting a localport.
+ *
+ * This routine is invoked to create an nvme localport instance to bind
+ * to the nvme_fc_transport. It is called once during driver load
+ * like lpfc_create_shost after all other services are initialized.
+ * It requires a vport, vpi, and wwns at call time. Other localport
+ * parameters are modified as the driver's FCID and the Fabric WWN
+ * are established.
+ *
+ * Return codes
+ * 0 - successful
+ * -ENOMEM - no heap memory available
+ * other values - from nvme registration upcall
+ **/
+int
+lpfc_nvme_create_localport(struct lpfc_vport *vport)
+{
+ struct lpfc_hba *phba = vport->phba;
+ struct nvme_fc_port_info nfcp_info;
+ struct nvme_fc_local_port *localport;
+ struct lpfc_nvme_lport *lport;
+ int len, ret = 0;
+
+ /* Initialize this localport instance. The vport wwn usage ensures
+ * that NPIV is accounted for.
+ */
+ memset(&nfcp_info, 0, sizeof(struct nvme_fc_port_info));
+ nfcp_info.port_role = FC_PORT_ROLE_NVME_INITIATOR;
+ nfcp_info.node_name = wwn_to_u64(vport->fc_nodename.u.wwn);
+ nfcp_info.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
+
+ /* For now need + 1 to get around NVME transport logic */
+ lpfc_nvme_template.max_sgl_segments = phba->cfg_sg_seg_cnt + 1;
+ lpfc_nvme_template.max_hw_queues = phba->cfg_nvme_io_channel;
+
+ /* localport is allocated from the stack, but the registration
+ * call allocates heap memory as well as the private area.
+ */
+ ret = nvme_fc_register_localport(&nfcp_info, &lpfc_nvme_template,
+ &vport->phba->pcidev->dev, &localport);
+ if (!ret) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME | LOG_NVME_DISC,
+ "6005 Successfully registered local "
+ "NVME port num %d, localP %p, private %p, "
+ "sg_seg %d\n",
+ localport->port_num, localport,
+ localport->private,
+ lpfc_nvme_template.max_sgl_segments);
+
+ /* Private is our lport size declared in the template. */
+ lport = (struct lpfc_nvme_lport *)localport->private;
+ vport->localport = localport;
+ lport->vport = vport;
+ INIT_LIST_HEAD(&lport->rport_list);
+ vport->nvmei_support = 1;
+ }
+
+ len = lpfc_new_nvme_buf(vport, phba->sli4_hba.nvme_xri_max);
+ vport->phba->total_nvme_bufs += len;
+ return ret;
+}
+
+/**
+ * lpfc_nvme_destroy_localport - Destroy lpfc_nvme bound to nvme transport.
+ * @pnvme: pointer to lpfc nvme data structure.
+ *
+ * This routine is invoked to destroy all lports bound to the phba.
+ * The lport memory was allocated by the nvme fc transport and is
+ * released there. This routine ensures all rports bound to the
+ * lport have been disconnected.
+ *
+ **/
+void
+lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
+{
+ struct nvme_fc_local_port *localport;
+ struct lpfc_nvme_lport *lport;
+ struct lpfc_nvme_rport *rport = NULL, *rport_next = NULL;
+ int ret;
+
+ if (vport->nvmei_support == 0)
+ return;
+
+ localport = vport->localport;
+ vport->localport = NULL;
+ lport = (struct lpfc_nvme_lport *)localport->private;
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
+ "6011 Destroying NVME localport %p\n",
+ localport);
+
+ list_for_each_entry_safe(rport, rport_next, &lport->rport_list, list) {
+ /* The last node ref has to get released now before the rport
+ * private memory area is released by the transport.
+ */
+ list_del(&rport->list);
+
+ init_completion(&rport->rport_unreg_done);
+ ret = nvme_fc_unregister_remoteport(rport->remoteport);
+ if (ret)
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
+ "6008 rport fail destroy %x\n", ret);
+ wait_for_completion_timeout(&rport->rport_unreg_done, 5);
+ }
+ /* lport's rport list is clear. Unregister
+ * lport and release resources.
+ */
+ init_completion(&lport->lport_unreg_done);
+ ret = nvme_fc_unregister_localport(localport);
+ wait_for_completion_timeout(&lport->lport_unreg_done, 5);
+
+ /* Regardless of the unregister upcall response, clear
+ * nvmei_support. All rports are unregistered and the
+ * driver will clean up.
+ */
+ vport->nvmei_support = 0;
+ if (ret == 0) {
+ lpfc_printf_vlog(vport,
+ KERN_INFO, LOG_NVME_DISC,
+ "6009 Unregistered lport Success\n");
+ } else {
+ lpfc_printf_vlog(vport,
+ KERN_INFO, LOG_NVME_DISC,
+ "6010 Unregistered lport "
+ "Failed, status x%x\n",
+ ret);
+ }
+}
+
+void
+lpfc_nvme_update_localport(struct lpfc_vport *vport)
+{
+ struct nvme_fc_local_port *localport;
+ struct lpfc_nvme_lport *lport;
+
+ localport = vport->localport;
+ lport = (struct lpfc_nvme_lport *)localport->private;
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
+ "6012 Update NVME lport %p did x%x\n",
+ localport, vport->fc_myDID);
+
+ localport->port_id = vport->fc_myDID;
+ if (localport->port_id == 0)
+ localport->port_role = FC_PORT_ROLE_NVME_DISCOVERY;
+ else
+ localport->port_role = FC_PORT_ROLE_NVME_INITIATOR;
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
+ "6030 bound lport %p to DID x%06x\n",
+ lport, localport->port_id);
+
+}
+
+int
+lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+{
+ int ret = 0;
+ struct nvme_fc_local_port *localport;
+ struct lpfc_nvme_lport *lport;
+ struct lpfc_nvme_rport *rport;
+ struct nvme_fc_remote_port *remote_port;
+ struct nvme_fc_port_info rpinfo;
+
+ lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NVME_DISC,
+ "6006 Register NVME PORT. DID x%06x nlptype x%x\n",
+ ndlp->nlp_DID, ndlp->nlp_type);
+
+ localport = vport->localport;
+ lport = (struct lpfc_nvme_lport *)localport->private;
+
+ if (ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_INITIATOR)) {
+
+ /* The driver isn't expecting the rport wwn to change
+ * but it might get a different DID on a different
+ * fabric.
+ */
+ list_for_each_entry(rport, &lport->rport_list, list) {
+ if (rport->remoteport->port_name !=
+ wwn_to_u64(ndlp->nlp_portname.u.wwn))
+ continue;
+ lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NVME_DISC,
+ "6035 lport %p, found matching rport "
+ "at wwpn 0x%llx, Data: x%x x%x x%x "
+ "x%06x\n",
+ lport,
+ rport->remoteport->port_name,
+ rport->remoteport->port_id,
+ rport->remoteport->port_role,
+ ndlp->nlp_type,
+ ndlp->nlp_DID);
+ remote_port = rport->remoteport;
+ if ((remote_port->port_id == 0) &&
+ (remote_port->port_role ==
+ FC_PORT_ROLE_NVME_DISCOVERY)) {
+ remote_port->port_id = ndlp->nlp_DID;
+ remote_port->port_role &=
+ ~FC_PORT_ROLE_NVME_DISCOVERY;
+ if (ndlp->nlp_type & NLP_NVME_TARGET)
+ remote_port->port_role |=
+ FC_PORT_ROLE_NVME_TARGET;
+ if (ndlp->nlp_type & NLP_NVME_INITIATOR)
+ remote_port->port_role |=
+ FC_PORT_ROLE_NVME_INITIATOR;
+
+ lpfc_printf_vlog(ndlp->vport, KERN_INFO,
+ LOG_NVME_DISC,
+ "6014 Rebinding lport to "
+ "rport wwpn 0x%llx, "
+ "Data: x%x x%x x%x x%06x\n",
+ remote_port->port_name,
+ remote_port->port_id,
+ remote_port->port_role,
+ ndlp->nlp_type,
+ ndlp->nlp_DID);
+ }
+ return 0;
+ }
+
+ /* NVME rports are not preserved across devloss.
+ * Just register this instance.
+ */
+ rpinfo.port_id = ndlp->nlp_DID;
+ rpinfo.port_role = 0;
+ if (ndlp->nlp_type & NLP_NVME_TARGET)
+ rpinfo.port_role |= FC_PORT_ROLE_NVME_TARGET;
+ if (ndlp->nlp_type & NLP_NVME_INITIATOR)
+ rpinfo.port_role |= FC_PORT_ROLE_NVME_INITIATOR;
+ rpinfo.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
+ rpinfo.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
+
+ ret = nvme_fc_register_remoteport(localport, &rpinfo,
+ &remote_port);
+ if (!ret) {
+ rport = remote_port->private;
+ rport->remoteport = remote_port;
+ rport->lport = lport;
+ rport->ndlp = lpfc_nlp_get(ndlp);
+ if (!rport->ndlp)
+ return -1;
+ ndlp->nrport = rport;
+ INIT_LIST_HEAD(&rport->list);
+ list_add_tail(&rport->list, &lport->rport_list);
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_NVME_DISC | LOG_NODE,
+ "6022 Binding new rport to lport %p "
+ "Rport WWNN 0x%llx, Rport WWPN 0x%llx "
+ "DID x%06x Role x%x\n",
+ lport,
+ rpinfo.node_name, rpinfo.port_name,
+ rpinfo.port_id, rpinfo.port_role);
+ } else {
+ lpfc_printf_vlog(vport, KERN_ERR,
+ LOG_NVME_DISC | LOG_NODE,
+ "6031 RemotePort Registration failed "
+ "err: %d, DID x%06x\n",
+ ret, ndlp->nlp_DID);
+ }
+ } else {
+ ret = -EINVAL;
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
+ "6027 Unknown nlp_type x%x on DID x%06x "
+ "ndlp %p. Not Registering nvme rport\n",
+ ndlp->nlp_type, ndlp->nlp_DID, ndlp);
+ }
+ return ret;
+}
+
+/* lpfc_nvme_unregister_port - unbind the DID and port_role from this rport.
+ *
+ * There is no notion of Devloss or rport recovery from the current
+ * nvme_transport perspective. Loss of an rport just means IO cannot
+ * be sent and recovery is completely up to the initator.
+ * For now, the driver just unbinds the DID and port_role so that
+ * no further IO can be issued. Changes are planned for later.
+ *
+ * Notes - the ndlp reference count is not decremented here since
+ * since there is no nvme_transport api for devloss. Node ref count
+ * is only adjusted in driver unload.
+ */
+void
+lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+{
+ int ret;
+ struct nvme_fc_local_port *localport;
+ struct lpfc_nvme_lport *lport;
+ struct lpfc_nvme_rport *rport;
+ struct nvme_fc_remote_port *remoteport;
+
+ localport = vport->localport;
+
+ /* This is fundamental error. The localport is always
+ * available until driver unload. Just exit.
+ */
+ if (!localport)
+ return;
+
+ lport = (struct lpfc_nvme_lport *)localport->private;
+ if (!lport)
+ goto input_err;
+
+ rport = ndlp->nrport;
+ if (!rport)
+ goto input_err;
+
+ remoteport = rport->remoteport;
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
+ "6033 Unreg nvme remoteport %p, portname x%llx, "
+ "port_id x%06x, portstate x%x port type x%x\n",
+ remoteport, remoteport->port_name,
+ remoteport->port_id, remoteport->port_state,
+ ndlp->nlp_type);
+
+ /* Sanity check ndlp type. Only call for NVME ports. Don't
+ * clear any rport state until the transport calls back.
+ */
+ if (ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_INITIATOR)) {
+ init_completion(&rport->rport_unreg_done);
+ ret = nvme_fc_unregister_remoteport(remoteport);
+ if (ret != 0) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
+ "6167 NVME unregister failed %d "
+ "port_state x%x\n",
+ ret, remoteport->port_state);
+ }
+
+ /* Wait for the driver's delete completion routine to finish
+ * before proceeding. This guarantees the transport and driver
+ * have completed the unreg process.
+ */
+ ret = wait_for_completion_timeout(&rport->rport_unreg_done, 5);
+ if (ret == 0) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
+ "6169 Unreg nvme wait failed %d\n",
+ ret);
+ }
+ }
+ return;
+
+ input_err:
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
+ "6168: State error: lport %p, rport%p FCID x%06x\n",
+ vport->localport, ndlp->rport, ndlp->nlp_DID);
+}
diff --git a/drivers/scsi/lpfc/lpfc_nvme.h b/drivers/scsi/lpfc/lpfc_nvme.h
new file mode 100644
index 000000000000..b2fae5e813f8
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_nvme.h
@@ -0,0 +1,103 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for *
+ * Fibre Channel Host Bus Adapters. *
+ * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
+ * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
+ * Copyright (C) 2004-2016 Emulex. All rights reserved. *
+ * EMULEX and SLI are trademarks of Emulex. *
+ * www.broadcom.com *
+ * Portions Copyright (C) 2004-2005 Christoph Hellwig *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of version 2 of the GNU General *
+ * Public License as published by the Free Software Foundation. *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID. See the GNU General Public License for *
+ * more details, a copy of which can be found in the file COPYING *
+ * included with this package. *
+ ********************************************************************/
+
+#define LPFC_NVME_MIN_SEGS 16
+#define LPFC_NVME_DEFAULT_SEGS 66 /* 256K IOs - 64 + 2 */
+#define LPFC_NVME_MAX_SEGS 510
+#define LPFC_NVMET_MIN_POSTBUF 16
+#define LPFC_NVMET_DEFAULT_POSTBUF 1024
+#define LPFC_NVMET_MAX_POSTBUF 4096
+#define LPFC_NVME_WQSIZE 256
+
+#define LPFC_NVME_ERSP_LEN 0x20
+
+struct lpfc_nvme_qhandle {
+ uint32_t index; /* WQ index to use */
+ uint32_t qidx; /* queue index passed to create */
+ uint32_t cpu_id; /* current cpu id at time of create */
+};
+
+/* Declare nvme-based local and remote port definitions. */
+struct lpfc_nvme_lport {
+ struct lpfc_vport *vport;
+ struct list_head rport_list;
+ struct completion lport_unreg_done;
+ /* Add sttats counters here */
+};
+
+struct lpfc_nvme_rport {
+ struct list_head list;
+ struct lpfc_nvme_lport *lport;
+ struct nvme_fc_remote_port *remoteport;
+ struct lpfc_nodelist *ndlp;
+ struct completion rport_unreg_done;
+};
+
+struct lpfc_nvme_buf {
+ struct list_head list;
+ struct nvmefc_fcp_req *nvmeCmd;
+ struct lpfc_nvme_rport *nrport;
+
+ uint32_t timeout;
+
+ uint16_t flags; /* TBD convert exch_busy to flags */
+#define LPFC_SBUF_XBUSY 0x1 /* SLI4 hba reported XB on WCQE cmpl */
+ uint16_t exch_busy; /* SLI4 hba reported XB on complete WCQE */
+ uint16_t status; /* From IOCB Word 7- ulpStatus */
+ uint16_t cpu;
+ uint16_t qidx;
+ uint16_t sqid;
+ uint32_t result; /* From IOCB Word 4. */
+
+ uint32_t seg_cnt; /* Number of scatter-gather segments returned by
+ * dma_map_sg. The driver needs this for calls
+ * to dma_unmap_sg.
+ */
+ dma_addr_t nonsg_phys; /* Non scatter-gather physical address. */
+
+ /*
+ * data and dma_handle are the kernel virtual and bus address of the
+ * dma-able buffer containing the fcp_cmd, fcp_rsp and a scatter
+ * gather bde list that supports the sg_tablesize value.
+ */
+ void *data;
+ dma_addr_t dma_handle;
+
+ struct sli4_sge *nvme_sgl;
+ dma_addr_t dma_phys_sgl;
+
+ /* cur_iocbq has phys of the dma-able buffer.
+ * Iotag is in here
+ */
+ struct lpfc_iocbq cur_iocbq;
+
+ wait_queue_head_t *waitq;
+ unsigned long start_time;
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ uint64_t ts_cmd_start;
+ uint64_t ts_last_cmd;
+ uint64_t ts_cmd_wqput;
+ uint64_t ts_isr_cmpl;
+ uint64_t ts_data_nvme;
+#endif
+};
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
new file mode 100644
index 000000000000..c421e1738ee9
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -0,0 +1,1986 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for *
+ * Fibre Channsel Host Bus Adapters. *
+ * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
+ * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
+ * Copyright (C) 2004-2016 Emulex. All rights reserved. *
+ * EMULEX and SLI are trademarks of Emulex. *
+ * www.broadcom.com *
+ * Portions Copyright (C) 2004-2005 Christoph Hellwig *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of version 2 of the GNU General *
+ * Public License as published by the Free Software Foundation. *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID. See the GNU General Public License for *
+ * more details, a copy of which can be found in the file COPYING *
+ * included with this package. *
+ ********************************************************************/
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <asm/unaligned.h>
+#include <linux/crc-t10dif.h>
+#include <net/checksum.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_transport_fc.h>
+#include <scsi/fc/fc_fs.h>
+
+#include <../drivers/nvme/host/nvme.h>
+#include <linux/nvme-fc-driver.h>
+
+#include "lpfc_version.h"
+#include "lpfc_hw4.h"
+#include "lpfc_hw.h"
+#include "lpfc_sli.h"
+#include "lpfc_sli4.h"
+#include "lpfc_nl.h"
+#include "lpfc_disc.h"
+#include "lpfc.h"
+#include "lpfc_scsi.h"
+#include "lpfc_nvme.h"
+#include "lpfc_nvmet.h"
+#include "lpfc_logmsg.h"
+#include "lpfc_crtn.h"
+#include "lpfc_vport.h"
+#include "lpfc_debugfs.h"
+
+static struct lpfc_iocbq *lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *,
+ struct lpfc_nvmet_rcv_ctx *,
+ dma_addr_t rspbuf,
+ uint16_t rspsize);
+static struct lpfc_iocbq *lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *,
+ struct lpfc_nvmet_rcv_ctx *);
+static int lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *,
+ struct lpfc_nvmet_rcv_ctx *,
+ uint32_t, uint16_t);
+static int lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *,
+ struct lpfc_nvmet_rcv_ctx *,
+ uint32_t, uint16_t);
+static int lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *,
+ struct lpfc_nvmet_rcv_ctx *,
+ uint32_t, uint16_t);
+
+/**
+ * lpfc_nvmet_xmt_ls_rsp_cmp - Completion handler for LS Response
+ * @phba: Pointer to HBA context object.
+ * @cmdwqe: Pointer to driver command WQE object.
+ * @wcqe: Pointer to driver response CQE object.
+ *
+ * The function is called from SLI ring event handler with no
+ * lock held. This function is the completion handler for NVME LS commands
+ * The function frees memory resources used for the NVME commands.
+ **/
+static void
+lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
+ struct lpfc_wcqe_complete *wcqe)
+{
+ struct lpfc_nvmet_tgtport *tgtp;
+ struct nvmefc_tgt_ls_req *rsp;
+ struct lpfc_nvmet_rcv_ctx *ctxp;
+ uint32_t status, result;
+
+ status = bf_get(lpfc_wcqe_c_status, wcqe);
+ result = wcqe->parameter;
+ if (!phba->targetport)
+ goto out;
+
+ tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
+
+ if (status)
+ atomic_inc(&tgtp->xmt_ls_rsp_error);
+ else
+ atomic_inc(&tgtp->xmt_ls_rsp_cmpl);
+
+out:
+ ctxp = cmdwqe->context2;
+ rsp = &ctxp->ctx.ls_req;
+
+ lpfc_nvmeio_data(phba, "NVMET LS CMPL: xri x%x stat x%x result x%x\n",
+ ctxp->oxid, status, result);
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
+ "6038 %s: Entrypoint: ctx %p status %x/%x\n", __func__,
+ ctxp, status, result);
+
+ lpfc_nlp_put(cmdwqe->context1);
+ cmdwqe->context2 = NULL;
+ cmdwqe->context3 = NULL;
+ lpfc_sli_release_iocbq(phba, cmdwqe);
+ rsp->done(rsp);
+ kfree(ctxp);
+}
+
+/**
+ * lpfc_nvmet_rq_post - Repost a NVMET RQ DMA buffer and clean up context
+ * @phba: HBA buffer is associated with
+ * @ctxp: context to clean up
+ * @mp: Buffer to free
+ *
+ * Description: Frees the given DMA buffer in the appropriate way given by
+ * reposting it to its associated RQ so it can be reused.
+ *
+ * Notes: Takes phba->hbalock. Can be called with or without other locks held.
+ *
+ * Returns: None
+ **/
+void
+lpfc_nvmet_rq_post(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp,
+ struct lpfc_dmabuf *mp)
+{
+ if (ctxp) {
+ if (ctxp->txrdy) {
+ pci_pool_free(phba->txrdy_payload_pool, ctxp->txrdy,
+ ctxp->txrdy_phys);
+ ctxp->txrdy = NULL;
+ ctxp->txrdy_phys = 0;
+ }
+ ctxp->state = LPFC_NVMET_STE_FREE;
+ }
+ lpfc_rq_buf_free(phba, mp);
+}
+
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+static void
+lpfc_nvmet_ktime(struct lpfc_hba *phba,
+ struct lpfc_nvmet_rcv_ctx *ctxp)
+{
+ uint64_t seg1, seg2, seg3, seg4, seg5;
+ uint64_t seg6, seg7, seg8, seg9, seg10;
+
+ if (!phba->ktime_on)
+ return;
+
+ if (!ctxp->ts_isr_cmd || !ctxp->ts_cmd_nvme ||
+ !ctxp->ts_nvme_data || !ctxp->ts_data_wqput ||
+ !ctxp->ts_isr_data || !ctxp->ts_data_nvme ||
+ !ctxp->ts_nvme_status || !ctxp->ts_status_wqput ||
+ !ctxp->ts_isr_status || !ctxp->ts_status_nvme)
+ return;
+
+ if (ctxp->ts_isr_cmd > ctxp->ts_cmd_nvme)
+ return;
+ if (ctxp->ts_cmd_nvme > ctxp->ts_nvme_data)
+ return;
+ if (ctxp->ts_nvme_data > ctxp->ts_data_wqput)
+ return;
+ if (ctxp->ts_data_wqput > ctxp->ts_isr_data)
+ return;
+ if (ctxp->ts_isr_data > ctxp->ts_data_nvme)
+ return;
+ if (ctxp->ts_data_nvme > ctxp->ts_nvme_status)
+ return;
+ if (ctxp->ts_nvme_status > ctxp->ts_status_wqput)
+ return;
+ if (ctxp->ts_status_wqput > ctxp->ts_isr_status)
+ return;
+ if (ctxp->ts_isr_status > ctxp->ts_status_nvme)
+ return;
+ /*
+ * Segment 1 - Time from FCP command received by MSI-X ISR
+ * to FCP command is passed to NVME Layer.
+ * Segment 2 - Time from FCP command payload handed
+ * off to NVME Layer to Driver receives a Command op
+ * from NVME Layer.
+ * Segment 3 - Time from Driver receives a Command op
+ * from NVME Layer to Command is put on WQ.
+ * Segment 4 - Time from Driver WQ put is done
+ * to MSI-X ISR for Command cmpl.
+ * Segment 5 - Time from MSI-X ISR for Command cmpl to
+ * Command cmpl is passed to NVME Layer.
+ * Segment 6 - Time from Command cmpl is passed to NVME
+ * Layer to Driver receives a RSP op from NVME Layer.
+ * Segment 7 - Time from Driver receives a RSP op from
+ * NVME Layer to WQ put is done on TRSP FCP Status.
+ * Segment 8 - Time from Driver WQ put is done on TRSP
+ * FCP Status to MSI-X ISR for TRSP cmpl.
+ * Segment 9 - Time from MSI-X ISR for TRSP cmpl to
+ * TRSP cmpl is passed to NVME Layer.
+ * Segment 10 - Time from FCP command received by
+ * MSI-X ISR to command is completed on wire.
+ * (Segments 1 thru 8) for READDATA / WRITEDATA
+ * (Segments 1 thru 4) for READDATA_RSP
+ */
+ seg1 = ctxp->ts_cmd_nvme - ctxp->ts_isr_cmd;
+ seg2 = (ctxp->ts_nvme_data - ctxp->ts_isr_cmd) - seg1;
+ seg3 = (ctxp->ts_data_wqput - ctxp->ts_isr_cmd) -
+ seg1 - seg2;
+ seg4 = (ctxp->ts_isr_data - ctxp->ts_isr_cmd) -
+ seg1 - seg2 - seg3;
+ seg5 = (ctxp->ts_data_nvme - ctxp->ts_isr_cmd) -
+ seg1 - seg2 - seg3 - seg4;
+
+ /* For auto rsp commands seg6 thru seg10 will be 0 */
+ if (ctxp->ts_nvme_status > ctxp->ts_data_nvme) {
+ seg6 = (ctxp->ts_nvme_status -
+ ctxp->ts_isr_cmd) -
+ seg1 - seg2 - seg3 - seg4 - seg5;
+ seg7 = (ctxp->ts_status_wqput -
+ ctxp->ts_isr_cmd) -
+ seg1 - seg2 - seg3 -
+ seg4 - seg5 - seg6;
+ seg8 = (ctxp->ts_isr_status -
+ ctxp->ts_isr_cmd) -
+ seg1 - seg2 - seg3 - seg4 -
+ seg5 - seg6 - seg7;
+ seg9 = (ctxp->ts_status_nvme -
+ ctxp->ts_isr_cmd) -
+ seg1 - seg2 - seg3 - seg4 -
+ seg5 - seg6 - seg7 - seg8;
+ seg10 = (ctxp->ts_isr_status -
+ ctxp->ts_isr_cmd);
+ } else {
+ seg6 = 0;
+ seg7 = 0;
+ seg8 = 0;
+ seg9 = 0;
+ seg10 = (ctxp->ts_isr_data - ctxp->ts_isr_cmd);
+ }
+
+ phba->ktime_seg1_total += seg1;
+ if (seg1 < phba->ktime_seg1_min)
+ phba->ktime_seg1_min = seg1;
+ else if (seg1 > phba->ktime_seg1_max)
+ phba->ktime_seg1_max = seg1;
+
+ phba->ktime_seg2_total += seg2;
+ if (seg2 < phba->ktime_seg2_min)
+ phba->ktime_seg2_min = seg2;
+ else if (seg2 > phba->ktime_seg2_max)
+ phba->ktime_seg2_max = seg2;
+
+ phba->ktime_seg3_total += seg3;
+ if (seg3 < phba->ktime_seg3_min)
+ phba->ktime_seg3_min = seg3;
+ else if (seg3 > phba->ktime_seg3_max)
+ phba->ktime_seg3_max = seg3;
+
+ phba->ktime_seg4_total += seg4;
+ if (seg4 < phba->ktime_seg4_min)
+ phba->ktime_seg4_min = seg4;
+ else if (seg4 > phba->ktime_seg4_max)
+ phba->ktime_seg4_max = seg4;
+
+ phba->ktime_seg5_total += seg5;
+ if (seg5 < phba->ktime_seg5_min)
+ phba->ktime_seg5_min = seg5;
+ else if (seg5 > phba->ktime_seg5_max)
+ phba->ktime_seg5_max = seg5;
+
+ phba->ktime_data_samples++;
+ if (!seg6)
+ goto out;
+
+ phba->ktime_seg6_total += seg6;
+ if (seg6 < phba->ktime_seg6_min)
+ phba->ktime_seg6_min = seg6;
+ else if (seg6 > phba->ktime_seg6_max)
+ phba->ktime_seg6_max = seg6;
+
+ phba->ktime_seg7_total += seg7;
+ if (seg7 < phba->ktime_seg7_min)
+ phba->ktime_seg7_min = seg7;
+ else if (seg7 > phba->ktime_seg7_max)
+ phba->ktime_seg7_max = seg7;
+
+ phba->ktime_seg8_total += seg8;
+ if (seg8 < phba->ktime_seg8_min)
+ phba->ktime_seg8_min = seg8;
+ else if (seg8 > phba->ktime_seg8_max)
+ phba->ktime_seg8_max = seg8;
+
+ phba->ktime_seg9_total += seg9;
+ if (seg9 < phba->ktime_seg9_min)
+ phba->ktime_seg9_min = seg9;
+ else if (seg9 > phba->ktime_seg9_max)
+ phba->ktime_seg9_max = seg9;
+out:
+ phba->ktime_seg10_total += seg10;
+ if (seg10 < phba->ktime_seg10_min)
+ phba->ktime_seg10_min = seg10;
+ else if (seg10 > phba->ktime_seg10_max)
+ phba->ktime_seg10_max = seg10;
+ phba->ktime_status_samples++;
+}
+#endif
+
+/**
+ * lpfc_nvmet_xmt_fcp_op_cmp - Completion handler for FCP Response
+ * @phba: Pointer to HBA context object.
+ * @cmdwqe: Pointer to driver command WQE object.
+ * @wcqe: Pointer to driver response CQE object.
+ *
+ * The function is called from SLI ring event handler with no
+ * lock held. This function is the completion handler for NVME FCP commands
+ * The function frees memory resources used for the NVME commands.
+ **/
+static void
+lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
+ struct lpfc_wcqe_complete *wcqe)
+{
+ struct lpfc_nvmet_tgtport *tgtp;
+ struct nvmefc_tgt_fcp_req *rsp;
+ struct lpfc_nvmet_rcv_ctx *ctxp;
+ uint32_t status, result, op, start_clean;
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ uint32_t id;
+#endif
+
+ ctxp = cmdwqe->context2;
+ rsp = &ctxp->ctx.fcp_req;
+ op = rsp->op;
+ ctxp->flag &= ~LPFC_NVMET_IO_INP;
+
+ status = bf_get(lpfc_wcqe_c_status, wcqe);
+ result = wcqe->parameter;
+
+ if (!phba->targetport)
+ goto out;
+
+ lpfc_nvmeio_data(phba, "NVMET FCP CMPL: xri x%x op x%x status x%x\n",
+ ctxp->oxid, op, status);
+
+ tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
+ if (status) {
+ rsp->fcp_error = NVME_SC_DATA_XFER_ERROR;
+ rsp->transferred_length = 0;
+ atomic_inc(&tgtp->xmt_fcp_rsp_error);
+ } else {
+ rsp->fcp_error = NVME_SC_SUCCESS;
+ if (op == NVMET_FCOP_RSP)
+ rsp->transferred_length = rsp->rsplen;
+ else
+ rsp->transferred_length = rsp->transfer_length;
+ atomic_inc(&tgtp->xmt_fcp_rsp_cmpl);
+ }
+
+out:
+ if ((op == NVMET_FCOP_READDATA_RSP) ||
+ (op == NVMET_FCOP_RSP)) {
+ /* Sanity check */
+ ctxp->state = LPFC_NVMET_STE_DONE;
+ ctxp->entry_cnt++;
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ if (phba->ktime_on) {
+ if (rsp->op == NVMET_FCOP_READDATA_RSP) {
+ ctxp->ts_isr_data =
+ cmdwqe->isr_timestamp;
+ ctxp->ts_data_nvme =
+ ktime_get_ns();
+ ctxp->ts_nvme_status =
+ ctxp->ts_data_nvme;
+ ctxp->ts_status_wqput =
+ ctxp->ts_data_nvme;
+ ctxp->ts_isr_status =
+ ctxp->ts_data_nvme;
+ ctxp->ts_status_nvme =
+ ctxp->ts_data_nvme;
+ } else {
+ ctxp->ts_isr_status =
+ cmdwqe->isr_timestamp;
+ ctxp->ts_status_nvme =
+ ktime_get_ns();
+ }
+ }
+ if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
+ id = smp_processor_id();
+ if (ctxp->cpu != id)
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+ "6703 CPU Check cmpl: "
+ "cpu %d expect %d\n",
+ id, ctxp->cpu);
+ if (ctxp->cpu < LPFC_CHECK_CPU_CNT)
+ phba->cpucheck_cmpl_io[id]++;
+ }
+#endif
+ rsp->done(rsp);
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ if (phba->ktime_on)
+ lpfc_nvmet_ktime(phba, ctxp);
+#endif
+ /* Let Abort cmpl repost the context */
+ if (!(ctxp->flag & LPFC_NVMET_ABORT_OP))
+ lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf);
+ } else {
+ ctxp->entry_cnt++;
+ start_clean = offsetof(struct lpfc_iocbq, wqe);
+ memset(((char *)cmdwqe) + start_clean, 0,
+ (sizeof(struct lpfc_iocbq) - start_clean));
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ if (phba->ktime_on) {
+ ctxp->ts_isr_data = cmdwqe->isr_timestamp;
+ ctxp->ts_data_nvme = ktime_get_ns();
+ }
+ if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
+ id = smp_processor_id();
+ if (ctxp->cpu != id)
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+ "6704 CPU Check cmdcmpl: "
+ "cpu %d expect %d\n",
+ id, ctxp->cpu);
+ if (ctxp->cpu < LPFC_CHECK_CPU_CNT)
+ phba->cpucheck_ccmpl_io[id]++;
+ }
+#endif
+ rsp->done(rsp);
+ }
+}
+
+static int
+lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport,
+ struct nvmefc_tgt_ls_req *rsp)
+{
+ struct lpfc_nvmet_rcv_ctx *ctxp =
+ container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.ls_req);
+ struct lpfc_hba *phba = ctxp->phba;
+ struct hbq_dmabuf *nvmebuf =
+ (struct hbq_dmabuf *)ctxp->rqb_buffer;
+ struct lpfc_iocbq *nvmewqeq;
+ struct lpfc_nvmet_tgtport *nvmep = tgtport->private;
+ struct lpfc_dmabuf dmabuf;
+ struct ulp_bde64 bpl;
+ int rc;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
+ "6023 %s: Entrypoint ctx %p %p\n", __func__,
+ ctxp, tgtport);
+
+ nvmewqeq = lpfc_nvmet_prep_ls_wqe(phba, ctxp, rsp->rspdma,
+ rsp->rsplen);
+ if (nvmewqeq == NULL) {
+ atomic_inc(&nvmep->xmt_ls_drop);
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+ "6150 LS Drop IO x%x: Prep\n",
+ ctxp->oxid);
+ lpfc_in_buf_free(phba, &nvmebuf->dbuf);
+ lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp,
+ ctxp->sid, ctxp->oxid);
+ return -ENOMEM;
+ }
+
+ /* Save numBdes for bpl2sgl */
+ nvmewqeq->rsvd2 = 1;
+ nvmewqeq->hba_wqidx = 0;
+ nvmewqeq->context3 = &dmabuf;
+ dmabuf.virt = &bpl;
+ bpl.addrLow = nvmewqeq->wqe.xmit_sequence.bde.addrLow;
+ bpl.addrHigh = nvmewqeq->wqe.xmit_sequence.bde.addrHigh;
+ bpl.tus.f.bdeSize = rsp->rsplen;
+ bpl.tus.f.bdeFlags = 0;
+ bpl.tus.w = le32_to_cpu(bpl.tus.w);
+
+ nvmewqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_rsp_cmp;
+ nvmewqeq->iocb_cmpl = NULL;
+ nvmewqeq->context2 = ctxp;
+
+ lpfc_nvmeio_data(phba, "NVMET LS RESP: xri x%x wqidx x%x len x%x\n",
+ ctxp->oxid, nvmewqeq->hba_wqidx, rsp->rsplen);
+
+ rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, nvmewqeq);
+ if (rc == WQE_SUCCESS) {
+ /*
+ * Okay to repost buffer here, but wait till cmpl
+ * before freeing ctxp and iocbq.
+ */
+ lpfc_in_buf_free(phba, &nvmebuf->dbuf);
+ ctxp->rqb_buffer = 0;
+ atomic_inc(&nvmep->xmt_ls_rsp);
+ return 0;
+ }
+ /* Give back resources */
+ atomic_inc(&nvmep->xmt_ls_drop);
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+ "6151 LS Drop IO x%x: Issue %d\n",
+ ctxp->oxid, rc);
+
+ lpfc_nlp_put(nvmewqeq->context1);
+
+ lpfc_in_buf_free(phba, &nvmebuf->dbuf);
+ lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid);
+ return -ENXIO;
+}
+
+static int
+lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
+ struct nvmefc_tgt_fcp_req *rsp)
+{
+ struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
+ struct lpfc_nvmet_rcv_ctx *ctxp =
+ container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
+ struct lpfc_hba *phba = ctxp->phba;
+ struct lpfc_iocbq *nvmewqeq;
+ unsigned long iflags;
+ int rc, id;
+
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ if (phba->ktime_on) {
+ if (rsp->op == NVMET_FCOP_RSP)
+ ctxp->ts_nvme_status = ktime_get_ns();
+ else
+ ctxp->ts_nvme_data = ktime_get_ns();
+ }
+ if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
+ id = smp_processor_id();
+ ctxp->cpu = id;
+ if (id < LPFC_CHECK_CPU_CNT)
+ phba->cpucheck_xmt_io[id]++;
+ if (rsp->hwqid != id) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+ "6705 CPU Check OP: "
+ "cpu %d expect %d\n",
+ id, rsp->hwqid);
+ ctxp->cpu = rsp->hwqid;
+ }
+ }
+#endif
+
+ if (rsp->op == NVMET_FCOP_ABORT) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
+ "6103 Abort op: oxri x%x %d cnt %d\n",
+ ctxp->oxid, ctxp->state, ctxp->entry_cnt);
+
+ lpfc_nvmeio_data(phba, "NVMET FCP ABRT: "
+ "xri x%x state x%x cnt x%x\n",
+ ctxp->oxid, ctxp->state, ctxp->entry_cnt);
+
+ atomic_inc(&lpfc_nvmep->xmt_fcp_abort);
+ ctxp->entry_cnt++;
+ ctxp->flag |= LPFC_NVMET_ABORT_OP;
+ if (ctxp->flag & LPFC_NVMET_IO_INP)
+ lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
+ ctxp->oxid);
+ else
+ lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
+ ctxp->oxid);
+ return 0;
+ }
+
+ /* Sanity check */
+ if (ctxp->state == LPFC_NVMET_STE_ABORT) {
+ atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+ "6102 Bad state IO x%x aborted\n",
+ ctxp->oxid);
+ goto aerr;
+ }
+
+ nvmewqeq = lpfc_nvmet_prep_fcp_wqe(phba, ctxp);
+ if (nvmewqeq == NULL) {
+ atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+ "6152 FCP Drop IO x%x: Prep\n",
+ ctxp->oxid);
+ goto aerr;
+ }
+
+ nvmewqeq->wqe_cmpl = lpfc_nvmet_xmt_fcp_op_cmp;
+ nvmewqeq->iocb_cmpl = NULL;
+ nvmewqeq->context2 = ctxp;
+ nvmewqeq->iocb_flag |= LPFC_IO_NVMET;
+ ctxp->wqeq->hba_wqidx = rsp->hwqid;
+
+ lpfc_nvmeio_data(phba, "NVMET FCP CMND: xri x%x op x%x len x%x\n",
+ ctxp->oxid, rsp->op, rsp->rsplen);
+
+ /* For now we take hbalock */
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, nvmewqeq);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ if (rc == WQE_SUCCESS) {
+ ctxp->flag |= LPFC_NVMET_IO_INP;
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ if (!phba->ktime_on)
+ return 0;
+ if (rsp->op == NVMET_FCOP_RSP)
+ ctxp->ts_status_wqput = ktime_get_ns();
+ else
+ ctxp->ts_data_wqput = ktime_get_ns();
+#endif
+ return 0;
+ }
+
+ /* Give back resources */
+ atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+ "6153 FCP Drop IO x%x: Issue: %d\n",
+ ctxp->oxid, rc);
+
+ ctxp->wqeq->hba_wqidx = 0;
+ nvmewqeq->context2 = NULL;
+ nvmewqeq->context3 = NULL;
+aerr:
+ return -ENXIO;
+}
+
+static void
+lpfc_nvmet_targetport_delete(struct nvmet_fc_target_port *targetport)
+{
+ struct lpfc_nvmet_tgtport *tport = targetport->private;
+
+ /* release any threads waiting for the unreg to complete */
+ complete(&tport->tport_unreg_done);
+}
+
+static struct nvmet_fc_target_template lpfc_tgttemplate = {
+ .targetport_delete = lpfc_nvmet_targetport_delete,
+ .xmt_ls_rsp = lpfc_nvmet_xmt_ls_rsp,
+ .fcp_op = lpfc_nvmet_xmt_fcp_op,
+
+ .max_hw_queues = 1,
+ .max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
+ .max_dif_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
+ .dma_boundary = 0xFFFFFFFF,
+
+ /* optional features */
+ .target_features = 0,
+ /* sizes of additional private data for data structures */
+ .target_priv_sz = sizeof(struct lpfc_nvmet_tgtport),
+};
+
+int
+lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
+{
+ struct lpfc_vport *vport = phba->pport;
+ struct lpfc_nvmet_tgtport *tgtp;
+ struct nvmet_fc_port_info pinfo;
+ int error = 0;
+
+ if (phba->targetport)
+ return 0;
+
+ memset(&pinfo, 0, sizeof(struct nvmet_fc_port_info));
+ pinfo.node_name = wwn_to_u64(vport->fc_nodename.u.wwn);
+ pinfo.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
+ pinfo.port_id = vport->fc_myDID;
+
+ lpfc_tgttemplate.max_hw_queues = phba->cfg_nvme_io_channel;
+ lpfc_tgttemplate.max_sgl_segments = phba->cfg_sg_seg_cnt;
+ lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP |
+ NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED;
+
+ error = nvmet_fc_register_targetport(&pinfo, &lpfc_tgttemplate,
+ &phba->pcidev->dev,
+ &phba->targetport);
+ if (error) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
+ "6025 Cannot register NVME targetport "
+ "x%x\n", error);
+ phba->targetport = NULL;
+ } else {
+ tgtp = (struct lpfc_nvmet_tgtport *)
+ phba->targetport->private;
+ tgtp->phba = phba;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
+ "6026 Registered NVME "
+ "targetport: %p, private %p "
+ "portnm %llx nodenm %llx\n",
+ phba->targetport, tgtp,
+ pinfo.port_name, pinfo.node_name);
+
+ atomic_set(&tgtp->rcv_ls_req_in, 0);
+ atomic_set(&tgtp->rcv_ls_req_out, 0);
+ atomic_set(&tgtp->rcv_ls_req_drop, 0);
+ atomic_set(&tgtp->xmt_ls_abort, 0);
+ atomic_set(&tgtp->xmt_ls_rsp, 0);
+ atomic_set(&tgtp->xmt_ls_drop, 0);
+ atomic_set(&tgtp->xmt_ls_rsp_error, 0);
+ atomic_set(&tgtp->xmt_ls_rsp_cmpl, 0);
+ atomic_set(&tgtp->rcv_fcp_cmd_in, 0);
+ atomic_set(&tgtp->rcv_fcp_cmd_out, 0);
+ atomic_set(&tgtp->rcv_fcp_cmd_drop, 0);
+ atomic_set(&tgtp->xmt_fcp_abort, 0);
+ atomic_set(&tgtp->xmt_fcp_drop, 0);
+ atomic_set(&tgtp->xmt_fcp_read_rsp, 0);
+ atomic_set(&tgtp->xmt_fcp_read, 0);
+ atomic_set(&tgtp->xmt_fcp_write, 0);
+ atomic_set(&tgtp->xmt_fcp_rsp, 0);
+ atomic_set(&tgtp->xmt_fcp_rsp_cmpl, 0);
+ atomic_set(&tgtp->xmt_fcp_rsp_error, 0);
+ atomic_set(&tgtp->xmt_fcp_rsp_drop, 0);
+ atomic_set(&tgtp->xmt_abort_rsp, 0);
+ atomic_set(&tgtp->xmt_abort_rsp_error, 0);
+ atomic_set(&tgtp->xmt_abort_cmpl, 0);
+ }
+ return error;
+}
+
+int
+lpfc_nvmet_update_targetport(struct lpfc_hba *phba)
+{
+ struct lpfc_vport *vport = phba->pport;
+
+ if (!phba->targetport)
+ return 0;
+
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
+ "6007 Update NVMET port %p did x%x\n",
+ phba->targetport, vport->fc_myDID);
+
+ phba->targetport->port_id = vport->fc_myDID;
+ return 0;
+}
+
+void
+lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
+{
+ struct lpfc_nvmet_tgtport *tgtp;
+
+ if (phba->nvmet_support == 0)
+ return;
+ if (phba->targetport) {
+ tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
+ init_completion(&tgtp->tport_unreg_done);
+ nvmet_fc_unregister_targetport(phba->targetport);
+ wait_for_completion_timeout(&tgtp->tport_unreg_done, 5);
+ }
+ phba->targetport = NULL;
+}
+
+/**
+ * lpfc_nvmet_unsol_ls_buffer - Process an unsolicited event data buffer
+ * @phba: pointer to lpfc hba data structure.
+ * @pring: pointer to a SLI ring.
+ * @nvmebuf: pointer to lpfc nvme command HBQ data structure.
+ *
+ * This routine is used for processing the WQE associated with a unsolicited
+ * event. It first determines whether there is an existing ndlp that matches
+ * the DID from the unsolicited WQE. If not, it will create a new one with
+ * the DID from the unsolicited WQE. The ELS command from the unsolicited
+ * WQE is then used to invoke the proper routine and to set up proper state
+ * of the discovery state machine.
+ **/
+static void
+lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+ struct hbq_dmabuf *nvmebuf)
+{
+ struct lpfc_nvmet_tgtport *tgtp;
+ struct fc_frame_header *fc_hdr;
+ struct lpfc_nvmet_rcv_ctx *ctxp;
+ uint32_t *payload;
+ uint32_t size, oxid, sid, rc;
+
+ if (!nvmebuf || !phba->targetport) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+ "6154 LS Drop IO\n");
+ oxid = 0;
+ size = 0;
+ sid = 0;
+ goto dropit;
+ }
+
+ tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
+ payload = (uint32_t *)(nvmebuf->dbuf.virt);
+ fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
+ size = bf_get(lpfc_rcqe_length, &nvmebuf->cq_event.cqe.rcqe_cmpl);
+ oxid = be16_to_cpu(fc_hdr->fh_ox_id);
+ sid = sli4_sid_from_fc_hdr(fc_hdr);
+
+ ctxp = kzalloc(sizeof(struct lpfc_nvmet_rcv_ctx), GFP_ATOMIC);
+ if (ctxp == NULL) {
+ atomic_inc(&tgtp->rcv_ls_req_drop);
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+ "6155 LS Drop IO x%x: Alloc\n",
+ oxid);
+dropit:
+ lpfc_nvmeio_data(phba, "NVMET LS DROP: "
+ "xri x%x sz %d from %06x\n",
+ oxid, size, sid);
+ if (nvmebuf)
+ lpfc_in_buf_free(phba, &nvmebuf->dbuf);
+ return;
+ }
+ ctxp->phba = phba;
+ ctxp->size = size;
+ ctxp->oxid = oxid;
+ ctxp->sid = sid;
+ ctxp->wqeq = NULL;
+ ctxp->state = LPFC_NVMET_STE_RCV;
+ ctxp->rqb_buffer = (void *)nvmebuf;
+
+ lpfc_nvmeio_data(phba, "NVMET LS RCV: xri x%x sz %d from %06x\n",
+ oxid, size, sid);
+ /*
+ * The calling sequence should be:
+ * nvmet_fc_rcv_ls_req -> lpfc_nvmet_xmt_ls_rsp/cmp ->_req->done
+ * lpfc_nvmet_xmt_ls_rsp_cmp should free the allocated ctxp.
+ */
+ atomic_inc(&tgtp->rcv_ls_req_in);
+ rc = nvmet_fc_rcv_ls_req(phba->targetport, &ctxp->ctx.ls_req,
+ payload, size);
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
+ "6037 %s: ctx %p sz %d rc %d: %08x %08x %08x "
+ "%08x %08x %08x\n", __func__, ctxp, size, rc,
+ *payload, *(payload+1), *(payload+2),
+ *(payload+3), *(payload+4), *(payload+5));
+
+ if (rc == 0) {
+ atomic_inc(&tgtp->rcv_ls_req_out);
+ return;
+ }
+
+ lpfc_nvmeio_data(phba, "NVMET LS DROP: xri x%x sz %d from %06x\n",
+ oxid, size, sid);
+
+ atomic_inc(&tgtp->rcv_ls_req_drop);
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+ "6156 LS Drop IO x%x: nvmet_fc_rcv_ls_req %d\n",
+ ctxp->oxid, rc);
+
+ /* We assume a rcv'ed cmd ALWAYs fits into 1 buffer */
+ if (nvmebuf)
+ lpfc_in_buf_free(phba, &nvmebuf->dbuf);
+
+ atomic_inc(&tgtp->xmt_ls_abort);
+ lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, sid, oxid);
+}
+
+/**
+ * lpfc_nvmet_unsol_fcp_buffer - Process an unsolicited event data buffer
+ * @phba: pointer to lpfc hba data structure.
+ * @pring: pointer to a SLI ring.
+ * @nvmebuf: pointer to lpfc nvme command HBQ data structure.
+ *
+ * This routine is used for processing the WQE associated with a unsolicited
+ * event. It first determines whether there is an existing ndlp that matches
+ * the DID from the unsolicited WQE. If not, it will create a new one with
+ * the DID from the unsolicited WQE. The ELS command from the unsolicited
+ * WQE is then used to invoke the proper routine and to set up proper state
+ * of the discovery state machine.
+ **/
+static void
+lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
+ struct lpfc_sli_ring *pring,
+ struct rqb_dmabuf *nvmebuf,
+ uint64_t isr_timestamp)
+{
+ struct lpfc_nvmet_rcv_ctx *ctxp;
+ struct lpfc_nvmet_tgtport *tgtp;
+ struct fc_frame_header *fc_hdr;
+ uint32_t *payload;
+ uint32_t size, oxid, sid, rc;
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ uint32_t id;
+#endif
+
+ if (!nvmebuf || !phba->targetport) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+ "6157 FCP Drop IO\n");
+ oxid = 0;
+ size = 0;
+ sid = 0;
+ goto dropit;
+ }
+
+
+ tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
+ payload = (uint32_t *)(nvmebuf->dbuf.virt);
+ fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
+ size = nvmebuf->bytes_recv;
+ oxid = be16_to_cpu(fc_hdr->fh_ox_id);
+ sid = sli4_sid_from_fc_hdr(fc_hdr);
+
+ ctxp = (struct lpfc_nvmet_rcv_ctx *)nvmebuf->context;
+ if (ctxp == NULL) {
+ atomic_inc(&tgtp->rcv_fcp_cmd_drop);
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+ "6158 FCP Drop IO x%x: Alloc\n",
+ oxid);
+ lpfc_nvmet_rq_post(phba, NULL, &nvmebuf->hbuf);
+ /* Cannot send ABTS without context */
+ return;
+ }
+ memset(ctxp, 0, sizeof(ctxp->ctx));
+ ctxp->wqeq = NULL;
+ ctxp->txrdy = NULL;
+ ctxp->offset = 0;
+ ctxp->phba = phba;
+ ctxp->size = size;
+ ctxp->oxid = oxid;
+ ctxp->sid = sid;
+ ctxp->state = LPFC_NVMET_STE_RCV;
+ ctxp->rqb_buffer = nvmebuf;
+ ctxp->entry_cnt = 1;
+ ctxp->flag = 0;
+
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ if (phba->ktime_on) {
+ ctxp->ts_isr_cmd = isr_timestamp;
+ ctxp->ts_cmd_nvme = ktime_get_ns();
+ ctxp->ts_nvme_data = 0;
+ ctxp->ts_data_wqput = 0;
+ ctxp->ts_isr_data = 0;
+ ctxp->ts_data_nvme = 0;
+ ctxp->ts_nvme_status = 0;
+ ctxp->ts_status_wqput = 0;
+ ctxp->ts_isr_status = 0;
+ ctxp->ts_status_nvme = 0;
+ }
+
+ if (phba->cpucheck_on & LPFC_CHECK_NVMET_RCV) {
+ id = smp_processor_id();
+ if (id < LPFC_CHECK_CPU_CNT)
+ phba->cpucheck_rcv_io[id]++;
+ }
+#endif
+
+ lpfc_nvmeio_data(phba, "NVMET FCP RCV: xri x%x sz %d from %06x\n",
+ oxid, size, sid);
+
+ atomic_inc(&tgtp->rcv_fcp_cmd_in);
+ /*
+ * The calling sequence should be:
+ * nvmet_fc_rcv_fcp_req -> lpfc_nvmet_xmt_fcp_op/cmp -> req->done
+ * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp.
+ */
+ rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req,
+ payload, size);
+
+ /* Process FCP command */
+ if (rc == 0) {
+ atomic_inc(&tgtp->rcv_fcp_cmd_out);
+ return;
+ }
+
+ atomic_inc(&tgtp->rcv_fcp_cmd_drop);
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+ "6159 FCP Drop IO x%x: nvmet_fc_rcv_fcp_req x%x\n",
+ ctxp->oxid, rc);
+dropit:
+ lpfc_nvmeio_data(phba, "NVMET FCP DROP: xri x%x sz %d from %06x\n",
+ oxid, size, sid);
+ if (oxid) {
+ lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
+ return;
+ }
+
+ if (nvmebuf) {
+ nvmebuf->iocbq->hba_wqidx = 0;
+ /* We assume a rcv'ed cmd ALWAYs fits into 1 buffer */
+ lpfc_nvmet_rq_post(phba, NULL, &nvmebuf->hbuf);
+ }
+}
+
+/**
+ * lpfc_nvmet_unsol_ls_event - Process an unsolicited event from an nvme nport
+ * @phba: pointer to lpfc hba data structure.
+ * @pring: pointer to a SLI ring.
+ * @nvmebuf: pointer to received nvme data structure.
+ *
+ * This routine is used to process an unsolicited event received from a SLI
+ * (Service Level Interface) ring. The actual processing of the data buffer
+ * associated with the unsolicited event is done by invoking the routine
+ * lpfc_nvmet_unsol_ls_buffer() after properly set up the buffer from the
+ * SLI RQ on which the unsolicited event was received.
+ **/
+void
+lpfc_nvmet_unsol_ls_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+ struct lpfc_iocbq *piocb)
+{
+ struct lpfc_dmabuf *d_buf;
+ struct hbq_dmabuf *nvmebuf;
+
+ d_buf = piocb->context2;
+ nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
+
+ if (phba->nvmet_support == 0) {
+ lpfc_in_buf_free(phba, &nvmebuf->dbuf);
+ return;
+ }
+ lpfc_nvmet_unsol_ls_buffer(phba, pring, nvmebuf);
+}
+
+/**
+ * lpfc_nvmet_unsol_fcp_event - Process an unsolicited event from an nvme nport
+ * @phba: pointer to lpfc hba data structure.
+ * @pring: pointer to a SLI ring.
+ * @nvmebuf: pointer to received nvme data structure.
+ *
+ * This routine is used to process an unsolicited event received from a SLI
+ * (Service Level Interface) ring. The actual processing of the data buffer
+ * associated with the unsolicited event is done by invoking the routine
+ * lpfc_nvmet_unsol_fcp_buffer() after properly set up the buffer from the
+ * SLI RQ on which the unsolicited event was received.
+ **/
+void
+lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba,
+ struct lpfc_sli_ring *pring,
+ struct rqb_dmabuf *nvmebuf,
+ uint64_t isr_timestamp)
+{
+ if (phba->nvmet_support == 0) {
+ lpfc_nvmet_rq_post(phba, NULL, &nvmebuf->hbuf);
+ return;
+ }
+ lpfc_nvmet_unsol_fcp_buffer(phba, pring, nvmebuf,
+ isr_timestamp);
+}
+
+/**
+ * lpfc_nvmet_prep_ls_wqe - Allocate and prepare a lpfc wqe data structure
+ * @phba: pointer to a host N_Port data structure.
+ * @ctxp: Context info for NVME LS Request
+ * @rspbuf: DMA buffer of NVME command.
+ * @rspsize: size of the NVME command.
+ *
+ * This routine is used for allocating a lpfc-WQE data structure from
+ * the driver lpfc-WQE free-list and prepare the WQE with the parameters
+ * passed into the routine for discovery state machine to issue an Extended
+ * Link Service (NVME) commands. It is a generic lpfc-WQE allocation
+ * and preparation routine that is used by all the discovery state machine
+ * routines and the NVME command-specific fields will be later set up by
+ * the individual discovery machine routines after calling this routine
+ * allocating and preparing a generic WQE data structure. It fills in the
+ * Buffer Descriptor Entries (BDEs), allocates buffers for both command
+ * payload and response payload (if expected). The reference count on the
+ * ndlp is incremented by 1 and the reference to the ndlp is put into
+ * context1 of the WQE data structure for this WQE to hold the ndlp
+ * reference for the command's callback function to access later.
+ *
+ * Return code
+ * Pointer to the newly allocated/prepared nvme wqe data structure
+ * NULL - when nvme wqe data structure allocation/preparation failed
+ **/
+static struct lpfc_iocbq *
+lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba,
+ struct lpfc_nvmet_rcv_ctx *ctxp,
+ dma_addr_t rspbuf, uint16_t rspsize)
+{
+ struct lpfc_nodelist *ndlp;
+ struct lpfc_iocbq *nvmewqe;
+ union lpfc_wqe *wqe;
+
+ if (!lpfc_is_link_up(phba)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
+ "6104 lpfc_nvmet_prep_ls_wqe: link err: "
+ "NPORT x%x oxid:x%x\n",
+ ctxp->sid, ctxp->oxid);
+ return NULL;
+ }
+
+ /* Allocate buffer for command wqe */
+ nvmewqe = lpfc_sli_get_iocbq(phba);
+ if (nvmewqe == NULL) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
+ "6105 lpfc_nvmet_prep_ls_wqe: No WQE: "
+ "NPORT x%x oxid:x%x\n",
+ ctxp->sid, ctxp->oxid);
+ return NULL;
+ }
+
+ ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
+ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
+ ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
+ (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
+ "6106 lpfc_nvmet_prep_ls_wqe: No ndlp: "
+ "NPORT x%x oxid:x%x\n",
+ ctxp->sid, ctxp->oxid);
+ goto nvme_wqe_free_wqeq_exit;
+ }
+ ctxp->wqeq = nvmewqe;
+
+ /* prevent preparing wqe with NULL ndlp reference */
+ nvmewqe->context1 = lpfc_nlp_get(ndlp);
+ if (nvmewqe->context1 == NULL)
+ goto nvme_wqe_free_wqeq_exit;
+ nvmewqe->context2 = ctxp;
+
+ wqe = &nvmewqe->wqe;
+ memset(wqe, 0, sizeof(union lpfc_wqe));
+
+ /* Words 0 - 2 */
+ wqe->xmit_sequence.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+ wqe->xmit_sequence.bde.tus.f.bdeSize = rspsize;
+ wqe->xmit_sequence.bde.addrLow = le32_to_cpu(putPaddrLow(rspbuf));
+ wqe->xmit_sequence.bde.addrHigh = le32_to_cpu(putPaddrHigh(rspbuf));
+
+ /* Word 3 */
+
+ /* Word 4 */
+
+ /* Word 5 */
+ bf_set(wqe_dfctl, &wqe->xmit_sequence.wge_ctl, 0);
+ bf_set(wqe_ls, &wqe->xmit_sequence.wge_ctl, 1);
+ bf_set(wqe_la, &wqe->xmit_sequence.wge_ctl, 0);
+ bf_set(wqe_rctl, &wqe->xmit_sequence.wge_ctl, FC_RCTL_DD_SOL_CTL);
+ bf_set(wqe_type, &wqe->xmit_sequence.wge_ctl, FC_TYPE_NVME);
+
+ /* Word 6 */
+ bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
+ phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
+ bf_set(wqe_xri_tag, &wqe->xmit_sequence.wqe_com, nvmewqe->sli4_xritag);
+
+ /* Word 7 */
+ bf_set(wqe_cmnd, &wqe->xmit_sequence.wqe_com,
+ CMD_XMIT_SEQUENCE64_WQE);
+ bf_set(wqe_ct, &wqe->xmit_sequence.wqe_com, SLI4_CT_RPI);
+ bf_set(wqe_class, &wqe->xmit_sequence.wqe_com, CLASS3);
+ bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
+
+ /* Word 8 */
+ wqe->xmit_sequence.wqe_com.abort_tag = nvmewqe->iotag;
+
+ /* Word 9 */
+ bf_set(wqe_reqtag, &wqe->xmit_sequence.wqe_com, nvmewqe->iotag);
+ /* Needs to be set by caller */
+ bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, ctxp->oxid);
+
+ /* Word 10 */
+ bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
+ bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
+ bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
+ LPFC_WQE_LENLOC_WORD12);
+ bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
+
+ /* Word 11 */
+ bf_set(wqe_cqid, &wqe->xmit_sequence.wqe_com,
+ LPFC_WQE_CQ_ID_DEFAULT);
+ bf_set(wqe_cmd_type, &wqe->xmit_sequence.wqe_com,
+ OTHER_COMMAND);
+
+ /* Word 12 */
+ wqe->xmit_sequence.xmit_len = rspsize;
+
+ nvmewqe->retry = 1;
+ nvmewqe->vport = phba->pport;
+ nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
+ nvmewqe->iocb_flag |= LPFC_IO_NVME_LS;
+
+ /* Xmit NVME response to remote NPORT <did> */
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
+ "6039 Xmit NVME LS response to remote "
+ "NPORT x%x iotag:x%x oxid:x%x size:x%x\n",
+ ndlp->nlp_DID, nvmewqe->iotag, ctxp->oxid,
+ rspsize);
+ return nvmewqe;
+
+nvme_wqe_free_wqeq_exit:
+ nvmewqe->context2 = NULL;
+ nvmewqe->context3 = NULL;
+ lpfc_sli_release_iocbq(phba, nvmewqe);
+ return NULL;
+}
+
+
+static struct lpfc_iocbq *
+lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
+ struct lpfc_nvmet_rcv_ctx *ctxp)
+{
+ struct nvmefc_tgt_fcp_req *rsp = &ctxp->ctx.fcp_req;
+ struct lpfc_nvmet_tgtport *tgtp;
+ struct sli4_sge *sgl;
+ struct lpfc_nodelist *ndlp;
+ struct lpfc_iocbq *nvmewqe;
+ struct scatterlist *sgel;
+ union lpfc_wqe128 *wqe;
+ uint32_t *txrdy;
+ dma_addr_t physaddr;
+ int i, cnt;
+ int xc = 1;
+
+ if (!lpfc_is_link_up(phba)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+ "6107 lpfc_nvmet_prep_fcp_wqe: link err:"
+ "NPORT x%x oxid:x%x\n", ctxp->sid,
+ ctxp->oxid);
+ return NULL;
+ }
+
+ ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
+ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
+ ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
+ (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+ "6108 lpfc_nvmet_prep_fcp_wqe: no ndlp: "
+ "NPORT x%x oxid:x%x\n",
+ ctxp->sid, ctxp->oxid);
+ return NULL;
+ }
+
+ if (rsp->sg_cnt > phba->cfg_sg_seg_cnt) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+ "6109 lpfc_nvmet_prep_fcp_wqe: seg cnt err: "
+ "NPORT x%x oxid:x%x\n",
+ ctxp->sid, ctxp->oxid);
+ return NULL;
+ }
+
+ tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
+ nvmewqe = ctxp->wqeq;
+ if (nvmewqe == NULL) {
+ /* Allocate buffer for command wqe */
+ nvmewqe = ctxp->rqb_buffer->iocbq;
+ if (nvmewqe == NULL) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+ "6110 lpfc_nvmet_prep_fcp_wqe: No "
+ "WQE: NPORT x%x oxid:x%x\n",
+ ctxp->sid, ctxp->oxid);
+ return NULL;
+ }
+ ctxp->wqeq = nvmewqe;
+ xc = 0; /* create new XRI */
+ nvmewqe->sli4_lxritag = NO_XRI;
+ nvmewqe->sli4_xritag = NO_XRI;
+ }
+
+ /* Sanity check */
+ if (((ctxp->state == LPFC_NVMET_STE_RCV) &&
+ (ctxp->entry_cnt == 1)) ||
+ ((ctxp->state == LPFC_NVMET_STE_DATA) &&
+ (ctxp->entry_cnt > 1))) {
+ wqe = (union lpfc_wqe128 *)&nvmewqe->wqe;
+ } else {
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+ "6111 Wrong state %s: %d cnt %d\n",
+ __func__, ctxp->state, ctxp->entry_cnt);
+ return NULL;
+ }
+
+ sgl = (struct sli4_sge *)ctxp->rqb_buffer->sglq->sgl;
+ switch (rsp->op) {
+ case NVMET_FCOP_READDATA:
+ case NVMET_FCOP_READDATA_RSP:
+ /* Words 0 - 2 : The first sg segment */
+ sgel = &rsp->sg[0];
+ physaddr = sg_dma_address(sgel);
+ wqe->fcp_tsend.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+ wqe->fcp_tsend.bde.tus.f.bdeSize = sg_dma_len(sgel);
+ wqe->fcp_tsend.bde.addrLow = cpu_to_le32(putPaddrLow(physaddr));
+ wqe->fcp_tsend.bde.addrHigh =
+ cpu_to_le32(putPaddrHigh(physaddr));
+
+ /* Word 3 */
+ wqe->fcp_tsend.payload_offset_len = 0;
+
+ /* Word 4 */
+ wqe->fcp_tsend.relative_offset = ctxp->offset;
+
+ /* Word 5 */
+
+ /* Word 6 */
+ bf_set(wqe_ctxt_tag, &wqe->fcp_tsend.wqe_com,
+ phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
+ bf_set(wqe_xri_tag, &wqe->fcp_tsend.wqe_com,
+ nvmewqe->sli4_xritag);
+
+ /* Word 7 */
+ bf_set(wqe_cmnd, &wqe->fcp_tsend.wqe_com, CMD_FCP_TSEND64_WQE);
+
+ /* Word 8 */
+ wqe->fcp_tsend.wqe_com.abort_tag = nvmewqe->iotag;
+
+ /* Word 9 */
+ bf_set(wqe_reqtag, &wqe->fcp_tsend.wqe_com, nvmewqe->iotag);
+ bf_set(wqe_rcvoxid, &wqe->fcp_tsend.wqe_com, ctxp->oxid);
+
+ /* Word 10 */
+ bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1);
+ bf_set(wqe_dbde, &wqe->fcp_tsend.wqe_com, 1);
+ bf_set(wqe_iod, &wqe->fcp_tsend.wqe_com, LPFC_WQE_IOD_WRITE);
+ bf_set(wqe_lenloc, &wqe->fcp_tsend.wqe_com,
+ LPFC_WQE_LENLOC_WORD12);
+ bf_set(wqe_ebde_cnt, &wqe->fcp_tsend.wqe_com, 0);
+ bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, xc);
+ bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1);
+ if (phba->cfg_nvme_oas)
+ bf_set(wqe_oas, &wqe->fcp_tsend.wqe_com, 1);
+
+ /* Word 11 */
+ bf_set(wqe_cqid, &wqe->fcp_tsend.wqe_com,
+ LPFC_WQE_CQ_ID_DEFAULT);
+ bf_set(wqe_cmd_type, &wqe->fcp_tsend.wqe_com,
+ FCP_COMMAND_TSEND);
+
+ /* Word 12 */
+ wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
+
+ /* Setup 2 SKIP SGEs */
+ sgl->addr_hi = 0;
+ sgl->addr_lo = 0;
+ sgl->word2 = 0;
+ bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
+ sgl->word2 = cpu_to_le32(sgl->word2);
+ sgl->sge_len = 0;
+ sgl++;
+ sgl->addr_hi = 0;
+ sgl->addr_lo = 0;
+ sgl->word2 = 0;
+ bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
+ sgl->word2 = cpu_to_le32(sgl->word2);
+ sgl->sge_len = 0;
+ sgl++;
+ if (rsp->op == NVMET_FCOP_READDATA_RSP) {
+ atomic_inc(&tgtp->xmt_fcp_read_rsp);
+ bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 1);
+ if ((ndlp->nlp_flag & NLP_SUPPRESS_RSP) &&
+ (rsp->rsplen == 12)) {
+ bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 1);
+ bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 0);
+ bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 0);
+ bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com, 0);
+ } else {
+ bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0);
+ bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 1);
+ bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 1);
+ bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com,
+ ((rsp->rsplen >> 2) - 1));
+ memcpy(&wqe->words[16], rsp->rspaddr,
+ rsp->rsplen);
+ }
+ } else {
+ atomic_inc(&tgtp->xmt_fcp_read);
+
+ bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0);
+ bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 0);
+ bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 0);
+ bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 0);
+ bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com, 0);
+ }
+ ctxp->state = LPFC_NVMET_STE_DATA;
+ break;
+
+ case NVMET_FCOP_WRITEDATA:
+ /* Words 0 - 2 : The first sg segment */
+ txrdy = pci_pool_alloc(phba->txrdy_payload_pool,
+ GFP_KERNEL, &physaddr);
+ if (!txrdy) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
+ "6041 Bad txrdy buffer: oxid x%x\n",
+ ctxp->oxid);
+ return NULL;
+ }
+ ctxp->txrdy = txrdy;
+ ctxp->txrdy_phys = physaddr;
+ wqe->fcp_treceive.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+ wqe->fcp_treceive.bde.tus.f.bdeSize = TXRDY_PAYLOAD_LEN;
+ wqe->fcp_treceive.bde.addrLow =
+ cpu_to_le32(putPaddrLow(physaddr));
+ wqe->fcp_treceive.bde.addrHigh =
+ cpu_to_le32(putPaddrHigh(physaddr));
+
+ /* Word 3 */
+ wqe->fcp_treceive.payload_offset_len = TXRDY_PAYLOAD_LEN;
+
+ /* Word 4 */
+ wqe->fcp_treceive.relative_offset = ctxp->offset;
+
+ /* Word 5 */
+
+ /* Word 6 */
+ bf_set(wqe_ctxt_tag, &wqe->fcp_treceive.wqe_com,
+ phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
+ bf_set(wqe_xri_tag, &wqe->fcp_treceive.wqe_com,
+ nvmewqe->sli4_xritag);
+
+ /* Word 7 */
+ bf_set(wqe_ar, &wqe->fcp_treceive.wqe_com, 0);
+ bf_set(wqe_cmnd, &wqe->fcp_treceive.wqe_com,
+ CMD_FCP_TRECEIVE64_WQE);
+
+ /* Word 8 */
+ wqe->fcp_treceive.wqe_com.abort_tag = nvmewqe->iotag;
+
+ /* Word 9 */
+ bf_set(wqe_reqtag, &wqe->fcp_treceive.wqe_com, nvmewqe->iotag);
+ bf_set(wqe_rcvoxid, &wqe->fcp_treceive.wqe_com, ctxp->oxid);
+
+ /* Word 10 */
+ bf_set(wqe_nvme, &wqe->fcp_treceive.wqe_com, 1);
+ bf_set(wqe_dbde, &wqe->fcp_treceive.wqe_com, 1);
+ bf_set(wqe_iod, &wqe->fcp_treceive.wqe_com, LPFC_WQE_IOD_READ);
+ bf_set(wqe_lenloc, &wqe->fcp_treceive.wqe_com,
+ LPFC_WQE_LENLOC_WORD12);
+ bf_set(wqe_xc, &wqe->fcp_treceive.wqe_com, xc);
+ bf_set(wqe_wqes, &wqe->fcp_treceive.wqe_com, 0);
+ bf_set(wqe_irsp, &wqe->fcp_treceive.wqe_com, 0);
+ bf_set(wqe_irsplen, &wqe->fcp_treceive.wqe_com, 0);
+ bf_set(wqe_nvme, &wqe->fcp_treceive.wqe_com, 1);
+ if (phba->cfg_nvme_oas)
+ bf_set(wqe_oas, &wqe->fcp_treceive.wqe_com, 1);
+
+ /* Word 11 */
+ bf_set(wqe_cqid, &wqe->fcp_treceive.wqe_com,
+ LPFC_WQE_CQ_ID_DEFAULT);
+ bf_set(wqe_cmd_type, &wqe->fcp_treceive.wqe_com,
+ FCP_COMMAND_TRECEIVE);
+ bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0);
+
+ /* Word 12 */
+ wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
+
+ /* Setup 1 TXRDY and 1 SKIP SGE */
+ txrdy[0] = 0;
+ txrdy[1] = cpu_to_be32(rsp->transfer_length);
+ txrdy[2] = 0;
+
+ sgl->addr_hi = putPaddrHigh(physaddr);
+ sgl->addr_lo = putPaddrLow(physaddr);
+ sgl->word2 = 0;
+ bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
+ sgl->word2 = cpu_to_le32(sgl->word2);
+ sgl->sge_len = cpu_to_le32(TXRDY_PAYLOAD_LEN);
+ sgl++;
+ sgl->addr_hi = 0;
+ sgl->addr_lo = 0;
+ sgl->word2 = 0;
+ bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
+ sgl->word2 = cpu_to_le32(sgl->word2);
+ sgl->sge_len = 0;
+ sgl++;
+ ctxp->state = LPFC_NVMET_STE_DATA;
+ atomic_inc(&tgtp->xmt_fcp_write);
+ break;
+
+ case NVMET_FCOP_RSP:
+ /* Words 0 - 2 */
+ sgel = &rsp->sg[0];
+ physaddr = rsp->rspdma;
+ wqe->fcp_trsp.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+ wqe->fcp_trsp.bde.tus.f.bdeSize = rsp->rsplen;
+ wqe->fcp_trsp.bde.addrLow =
+ cpu_to_le32(putPaddrLow(physaddr));
+ wqe->fcp_trsp.bde.addrHigh =
+ cpu_to_le32(putPaddrHigh(physaddr));
+
+ /* Word 3 */
+ wqe->fcp_trsp.response_len = rsp->rsplen;
+
+ /* Word 4 */
+ wqe->fcp_trsp.rsvd_4_5[0] = 0;
+
+
+ /* Word 5 */
+
+ /* Word 6 */
+ bf_set(wqe_ctxt_tag, &wqe->fcp_trsp.wqe_com,
+ phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
+ bf_set(wqe_xri_tag, &wqe->fcp_trsp.wqe_com,
+ nvmewqe->sli4_xritag);
+
+ /* Word 7 */
+ bf_set(wqe_ag, &wqe->fcp_trsp.wqe_com, 1);
+ bf_set(wqe_cmnd, &wqe->fcp_trsp.wqe_com, CMD_FCP_TRSP64_WQE);
+
+ /* Word 8 */
+ wqe->fcp_trsp.wqe_com.abort_tag = nvmewqe->iotag;
+
+ /* Word 9 */
+ bf_set(wqe_reqtag, &wqe->fcp_trsp.wqe_com, nvmewqe->iotag);
+ bf_set(wqe_rcvoxid, &wqe->fcp_trsp.wqe_com, ctxp->oxid);
+
+ /* Word 10 */
+ bf_set(wqe_nvme, &wqe->fcp_trsp.wqe_com, 1);
+ bf_set(wqe_dbde, &wqe->fcp_trsp.wqe_com, 0);
+ bf_set(wqe_iod, &wqe->fcp_trsp.wqe_com, LPFC_WQE_IOD_WRITE);
+ bf_set(wqe_lenloc, &wqe->fcp_trsp.wqe_com,
+ LPFC_WQE_LENLOC_WORD3);
+ bf_set(wqe_xc, &wqe->fcp_trsp.wqe_com, xc);
+ bf_set(wqe_nvme, &wqe->fcp_trsp.wqe_com, 1);
+ if (phba->cfg_nvme_oas)
+ bf_set(wqe_oas, &wqe->fcp_trsp.wqe_com, 1);
+
+ /* Word 11 */
+ bf_set(wqe_cqid, &wqe->fcp_trsp.wqe_com,
+ LPFC_WQE_CQ_ID_DEFAULT);
+ bf_set(wqe_cmd_type, &wqe->fcp_trsp.wqe_com,
+ FCP_COMMAND_TRSP);
+ bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0);
+ ctxp->state = LPFC_NVMET_STE_RSP;
+
+ if (rsp->rsplen == LPFC_NVMET_SUCCESS_LEN) {
+ /* Good response - all zero's on wire */
+ bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 0);
+ bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 0);
+ bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com, 0);
+ } else {
+ bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 1);
+ bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 1);
+ bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com,
+ ((rsp->rsplen >> 2) - 1));
+ memcpy(&wqe->words[16], rsp->rspaddr, rsp->rsplen);
+ }
+
+ /* Use rspbuf, NOT sg list */
+ rsp->sg_cnt = 0;
+ sgl->word2 = 0;
+ atomic_inc(&tgtp->xmt_fcp_rsp);
+ break;
+
+ default:
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
+ "6064 Unknown Rsp Op %d\n",
+ rsp->op);
+ return NULL;
+ }
+
+ nvmewqe->retry = 1;
+ nvmewqe->vport = phba->pport;
+ nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
+ nvmewqe->context1 = ndlp;
+
+ for (i = 0; i < rsp->sg_cnt; i++) {
+ sgel = &rsp->sg[i];
+ physaddr = sg_dma_address(sgel);
+ cnt = sg_dma_len(sgel);
+ sgl->addr_hi = putPaddrHigh(physaddr);
+ sgl->addr_lo = putPaddrLow(physaddr);
+ sgl->word2 = 0;
+ bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
+ bf_set(lpfc_sli4_sge_offset, sgl, ctxp->offset);
+ if ((i+1) == rsp->sg_cnt)
+ bf_set(lpfc_sli4_sge_last, sgl, 1);
+ sgl->word2 = cpu_to_le32(sgl->word2);
+ sgl->sge_len = cpu_to_le32(cnt);
+ sgl++;
+ ctxp->offset += cnt;
+ }
+ return nvmewqe;
+}
+
+/**
+ * lpfc_nvmet_sol_fcp_abort_cmp - Completion handler for ABTS
+ * @phba: Pointer to HBA context object.
+ * @cmdwqe: Pointer to driver command WQE object.
+ * @wcqe: Pointer to driver response CQE object.
+ *
+ * The function is called from SLI ring event handler with no
+ * lock held. This function is the completion handler for NVME ABTS for FCP cmds
+ * The function frees memory resources used for the NVME commands.
+ **/
+static void
+lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
+ struct lpfc_wcqe_complete *wcqe)
+{
+ struct lpfc_nvmet_rcv_ctx *ctxp;
+ struct lpfc_nvmet_tgtport *tgtp;
+ uint32_t status, result;
+
+ ctxp = cmdwqe->context2;
+ status = bf_get(lpfc_wcqe_c_status, wcqe);
+ result = wcqe->parameter;
+
+ tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
+ atomic_inc(&tgtp->xmt_abort_cmpl);
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
+ "6165 Abort cmpl: xri x%x WCQE: %08x %08x %08x %08x\n",
+ ctxp->oxid, wcqe->word0, wcqe->total_data_placed,
+ result, wcqe->word3);
+
+ ctxp->state = LPFC_NVMET_STE_DONE;
+ lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf);
+
+ cmdwqe->context2 = NULL;
+ cmdwqe->context3 = NULL;
+ lpfc_sli_release_iocbq(phba, cmdwqe);
+}
+
+/**
+ * lpfc_nvmet_xmt_fcp_abort_cmp - Completion handler for ABTS
+ * @phba: Pointer to HBA context object.
+ * @cmdwqe: Pointer to driver command WQE object.
+ * @wcqe: Pointer to driver response CQE object.
+ *
+ * The function is called from SLI ring event handler with no
+ * lock held. This function is the completion handler for NVME ABTS for FCP cmds
+ * The function frees memory resources used for the NVME commands.
+ **/
+static void
+lpfc_nvmet_xmt_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
+ struct lpfc_wcqe_complete *wcqe)
+{
+ struct lpfc_nvmet_rcv_ctx *ctxp;
+ struct lpfc_nvmet_tgtport *tgtp;
+ uint32_t status, result;
+
+ ctxp = cmdwqe->context2;
+ status = bf_get(lpfc_wcqe_c_status, wcqe);
+ result = wcqe->parameter;
+
+ tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
+ atomic_inc(&tgtp->xmt_abort_cmpl);
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
+ "6070 Abort cmpl: ctx %p WCQE: %08x %08x %08x %08x\n",
+ ctxp, wcqe->word0, wcqe->total_data_placed,
+ result, wcqe->word3);
+
+ if (ctxp) {
+ /* Sanity check */
+ if (ctxp->state != LPFC_NVMET_STE_ABORT) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
+ "6112 ABORT Wrong state:%d oxid x%x\n",
+ ctxp->state, ctxp->oxid);
+ }
+ ctxp->state = LPFC_NVMET_STE_DONE;
+ lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf);
+ cmdwqe->context2 = NULL;
+ cmdwqe->context3 = NULL;
+ }
+}
+
+/**
+ * lpfc_nvmet_xmt_ls_abort_cmp - Completion handler for ABTS
+ * @phba: Pointer to HBA context object.
+ * @cmdwqe: Pointer to driver command WQE object.
+ * @wcqe: Pointer to driver response CQE object.
+ *
+ * The function is called from SLI ring event handler with no
+ * lock held. This function is the completion handler for NVME ABTS for LS cmds
+ * The function frees memory resources used for the NVME commands.
+ **/
+static void
+lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
+ struct lpfc_wcqe_complete *wcqe)
+{
+ struct lpfc_nvmet_rcv_ctx *ctxp;
+ struct lpfc_nvmet_tgtport *tgtp;
+ uint32_t status, result;
+
+ ctxp = cmdwqe->context2;
+ status = bf_get(lpfc_wcqe_c_status, wcqe);
+ result = wcqe->parameter;
+
+ tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
+ atomic_inc(&tgtp->xmt_abort_cmpl);
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
+ "6083 Abort cmpl: ctx %p WCQE: %08x %08x %08x %08x\n",
+ ctxp, wcqe->word0, wcqe->total_data_placed,
+ result, wcqe->word3);
+
+ if (ctxp) {
+ cmdwqe->context2 = NULL;
+ cmdwqe->context3 = NULL;
+ lpfc_sli_release_iocbq(phba, cmdwqe);
+ kfree(ctxp);
+ } else
+ lpfc_sli_release_iocbq(phba, cmdwqe);
+}
+
+static int
+lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
+ struct lpfc_nvmet_rcv_ctx *ctxp,
+ uint32_t sid, uint16_t xri)
+{
+ struct lpfc_nvmet_tgtport *tgtp;
+ struct lpfc_iocbq *abts_wqeq;
+ union lpfc_wqe *wqe_abts;
+ struct lpfc_nodelist *ndlp;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
+ "6067 %s: Entrypoint: sid %x xri %x\n", __func__,
+ sid, xri);
+
+ tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
+
+ ndlp = lpfc_findnode_did(phba->pport, sid);
+ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
+ ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
+ (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
+ atomic_inc(&tgtp->xmt_abort_rsp_error);
+ lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
+ "6134 Drop ABTS - wrong NDLP state x%x.\n",
+ ndlp->nlp_state);
+
+ /* No failure to an ABTS request. */
+ return 0;
+ }
+
+ abts_wqeq = ctxp->wqeq;
+ wqe_abts = &abts_wqeq->wqe;
+ ctxp->state = LPFC_NVMET_STE_ABORT;
+
+ /*
+ * Since we zero the whole WQE, we need to ensure we set the WQE fields
+ * that were initialized in lpfc_sli4_nvmet_alloc.
+ */
+ memset(wqe_abts, 0, sizeof(union lpfc_wqe));
+
+ /* Word 5 */
+ bf_set(wqe_dfctl, &wqe_abts->xmit_sequence.wge_ctl, 0);
+ bf_set(wqe_ls, &wqe_abts->xmit_sequence.wge_ctl, 1);
+ bf_set(wqe_la, &wqe_abts->xmit_sequence.wge_ctl, 0);
+ bf_set(wqe_rctl, &wqe_abts->xmit_sequence.wge_ctl, FC_RCTL_BA_ABTS);
+ bf_set(wqe_type, &wqe_abts->xmit_sequence.wge_ctl, FC_TYPE_BLS);
+
+ /* Word 6 */
+ bf_set(wqe_ctxt_tag, &wqe_abts->xmit_sequence.wqe_com,
+ phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
+ bf_set(wqe_xri_tag, &wqe_abts->xmit_sequence.wqe_com,
+ abts_wqeq->sli4_xritag);
+
+ /* Word 7 */
+ bf_set(wqe_cmnd, &wqe_abts->xmit_sequence.wqe_com,
+ CMD_XMIT_SEQUENCE64_WQE);
+ bf_set(wqe_ct, &wqe_abts->xmit_sequence.wqe_com, SLI4_CT_RPI);
+ bf_set(wqe_class, &wqe_abts->xmit_sequence.wqe_com, CLASS3);
+ bf_set(wqe_pu, &wqe_abts->xmit_sequence.wqe_com, 0);
+
+ /* Word 8 */
+ wqe_abts->xmit_sequence.wqe_com.abort_tag = abts_wqeq->iotag;
+
+ /* Word 9 */
+ bf_set(wqe_reqtag, &wqe_abts->xmit_sequence.wqe_com, abts_wqeq->iotag);
+ /* Needs to be set by caller */
+ bf_set(wqe_rcvoxid, &wqe_abts->xmit_sequence.wqe_com, xri);
+
+ /* Word 10 */
+ bf_set(wqe_dbde, &wqe_abts->xmit_sequence.wqe_com, 1);
+ bf_set(wqe_iod, &wqe_abts->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
+ bf_set(wqe_lenloc, &wqe_abts->xmit_sequence.wqe_com,
+ LPFC_WQE_LENLOC_WORD12);
+ bf_set(wqe_ebde_cnt, &wqe_abts->xmit_sequence.wqe_com, 0);
+ bf_set(wqe_qosd, &wqe_abts->xmit_sequence.wqe_com, 0);
+
+ /* Word 11 */
+ bf_set(wqe_cqid, &wqe_abts->xmit_sequence.wqe_com,
+ LPFC_WQE_CQ_ID_DEFAULT);
+ bf_set(wqe_cmd_type, &wqe_abts->xmit_sequence.wqe_com,
+ OTHER_COMMAND);
+
+ abts_wqeq->vport = phba->pport;
+ abts_wqeq->context1 = ndlp;
+ abts_wqeq->context2 = ctxp;
+ abts_wqeq->context3 = NULL;
+ abts_wqeq->rsvd2 = 0;
+ /* hba_wqidx should already be setup from command we are aborting */
+ abts_wqeq->iocb.ulpCommand = CMD_XMIT_SEQUENCE64_CR;
+ abts_wqeq->iocb.ulpLe = 1;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
+ "6069 Issue ABTS to xri x%x reqtag x%x\n",
+ xri, abts_wqeq->iotag);
+ return 1;
+}
+
+static int
+lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
+ struct lpfc_nvmet_rcv_ctx *ctxp,
+ uint32_t sid, uint16_t xri)
+{
+ struct lpfc_nvmet_tgtport *tgtp;
+ struct lpfc_iocbq *abts_wqeq;
+ union lpfc_wqe *abts_wqe;
+ struct lpfc_nodelist *ndlp;
+ unsigned long flags;
+ int rc;
+
+ tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
+ if (!ctxp->wqeq) {
+ ctxp->wqeq = ctxp->rqb_buffer->iocbq;
+ ctxp->wqeq->hba_wqidx = 0;
+ }
+
+ ndlp = lpfc_findnode_did(phba->pport, sid);
+ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
+ ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
+ (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
+ atomic_inc(&tgtp->xmt_abort_rsp_error);
+ lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
+ "6160 Drop ABTS - wrong NDLP state x%x.\n",
+ ndlp->nlp_state);
+
+ /* No failure to an ABTS request. */
+ return 0;
+ }
+
+ /* Issue ABTS for this WQE based on iotag */
+ ctxp->abort_wqeq = lpfc_sli_get_iocbq(phba);
+ if (!ctxp->abort_wqeq) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
+ "6161 Abort failed: No wqeqs: "
+ "xri: x%x\n", ctxp->oxid);
+ /* No failure to an ABTS request. */
+ return 0;
+ }
+ abts_wqeq = ctxp->abort_wqeq;
+ abts_wqe = &abts_wqeq->wqe;
+ ctxp->state = LPFC_NVMET_STE_ABORT;
+
+ /* Announce entry to new IO submit field. */
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
+ "6162 Abort Request to rport DID x%06x "
+ "for xri x%x x%x\n",
+ ctxp->sid, ctxp->oxid, ctxp->wqeq->sli4_xritag);
+
+ /* If the hba is getting reset, this flag is set. It is
+ * cleared when the reset is complete and rings reestablished.
+ */
+ spin_lock_irqsave(&phba->hbalock, flags);
+ /* driver queued commands are in process of being flushed */
+ if (phba->hba_flag & HBA_NVME_IOQ_FLUSH) {
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
+ "6163 Driver in reset cleanup - flushing "
+ "NVME Req now. hba_flag x%x oxid x%x\n",
+ phba->hba_flag, ctxp->oxid);
+ lpfc_sli_release_iocbq(phba, abts_wqeq);
+ return 0;
+ }
+
+ /* Outstanding abort is in progress */
+ if (abts_wqeq->iocb_flag & LPFC_DRIVER_ABORTED) {
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
+ "6164 Outstanding NVME I/O Abort Request "
+ "still pending on oxid x%x\n",
+ ctxp->oxid);
+ lpfc_sli_release_iocbq(phba, abts_wqeq);
+ return 0;
+ }
+
+ /* Ready - mark outstanding as aborted by driver. */
+ abts_wqeq->iocb_flag |= LPFC_DRIVER_ABORTED;
+
+ /* WQEs are reused. Clear stale data and set key fields to
+ * zero like ia, iaab, iaar, xri_tag, and ctxt_tag.
+ */
+ memset(abts_wqe, 0, sizeof(union lpfc_wqe));
+
+ /* word 3 */
+ bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);
+
+ /* word 7 */
+ bf_set(wqe_ct, &abts_wqe->abort_cmd.wqe_com, 0);
+ bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
+
+ /* word 8 - tell the FW to abort the IO associated with this
+ * outstanding exchange ID.
+ */
+ abts_wqe->abort_cmd.wqe_com.abort_tag = ctxp->wqeq->sli4_xritag;
+
+ /* word 9 - this is the iotag for the abts_wqe completion. */
+ bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com,
+ abts_wqeq->iotag);
+
+ /* word 10 */
+ bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1);
+ bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
+
+ /* word 11 */
+ bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND);
+ bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1);
+ bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
+
+ /* ABTS WQE must go to the same WQ as the WQE to be aborted */
+ abts_wqeq->hba_wqidx = ctxp->wqeq->hba_wqidx;
+ abts_wqeq->wqe_cmpl = lpfc_nvmet_sol_fcp_abort_cmp;
+ abts_wqeq->iocb_cmpl = 0;
+ abts_wqeq->iocb_flag |= LPFC_IO_NVME;
+ abts_wqeq->context2 = ctxp;
+ rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq);
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+ if (rc == WQE_SUCCESS)
+ return 0;
+
+ lpfc_sli_release_iocbq(phba, abts_wqeq);
+ lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
+ "6166 Failed abts issue_wqe with status x%x "
+ "for oxid x%x.\n",
+ rc, ctxp->oxid);
+ return 1;
+}
+
+
+static int
+lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba,
+ struct lpfc_nvmet_rcv_ctx *ctxp,
+ uint32_t sid, uint16_t xri)
+{
+ struct lpfc_nvmet_tgtport *tgtp;
+ struct lpfc_iocbq *abts_wqeq;
+ unsigned long flags;
+ int rc;
+
+ tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
+ if (!ctxp->wqeq) {
+ ctxp->wqeq = ctxp->rqb_buffer->iocbq;
+ ctxp->wqeq->hba_wqidx = 0;
+ }
+
+ rc = lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri);
+ if (rc == 0)
+ goto aerr;
+
+ spin_lock_irqsave(&phba->hbalock, flags);
+ abts_wqeq = ctxp->wqeq;
+ abts_wqeq->wqe_cmpl = lpfc_nvmet_xmt_fcp_abort_cmp;
+ abts_wqeq->iocb_cmpl = 0;
+ abts_wqeq->iocb_flag |= LPFC_IO_NVMET;
+ rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq);
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+ if (rc == WQE_SUCCESS) {
+ atomic_inc(&tgtp->xmt_abort_rsp);
+ return 0;
+ }
+
+aerr:
+ lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf);
+ atomic_inc(&tgtp->xmt_abort_rsp_error);
+ lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
+ "6135 Failed to Issue ABTS for oxid x%x. Status x%x\n",
+ ctxp->oxid, rc);
+ return 1;
+}
+
+static int
+lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba,
+ struct lpfc_nvmet_rcv_ctx *ctxp,
+ uint32_t sid, uint16_t xri)
+{
+ struct lpfc_nvmet_tgtport *tgtp;
+ struct lpfc_iocbq *abts_wqeq;
+ union lpfc_wqe *wqe_abts;
+ unsigned long flags;
+ int rc;
+
+ tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
+ if (!ctxp->wqeq) {
+ /* Issue ABTS for this WQE based on iotag */
+ ctxp->wqeq = lpfc_sli_get_iocbq(phba);
+ if (!ctxp->wqeq) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
+ "6068 Abort failed: No wqeqs: "
+ "xri: x%x\n", xri);
+ /* No failure to an ABTS request. */
+ kfree(ctxp);
+ return 0;
+ }
+ }
+ abts_wqeq = ctxp->wqeq;
+ wqe_abts = &abts_wqeq->wqe;
+ lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri);
+
+ spin_lock_irqsave(&phba->hbalock, flags);
+ abts_wqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_abort_cmp;
+ abts_wqeq->iocb_cmpl = 0;
+ abts_wqeq->iocb_flag |= LPFC_IO_NVME_LS;
+ rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, abts_wqeq);
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+ if (rc == WQE_SUCCESS) {
+ atomic_inc(&tgtp->xmt_abort_rsp);
+ return 0;
+ }
+
+ atomic_inc(&tgtp->xmt_abort_rsp_error);
+ abts_wqeq->context2 = NULL;
+ abts_wqeq->context3 = NULL;
+ lpfc_sli_release_iocbq(phba, abts_wqeq);
+ kfree(ctxp);
+ lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
+ "6056 Failed to Issue ABTS. Status x%x\n", rc);
+ return 0;
+}
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.h b/drivers/scsi/lpfc/lpfc_nvmet.h
new file mode 100644
index 000000000000..ca96f05c1604
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_nvmet.h
@@ -0,0 +1,116 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for *
+ * Fibre Channel Host Bus Adapters. *
+ * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
+ * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
+ * Copyright (C) 2004-2016 Emulex. All rights reserved. *
+ * EMULEX and SLI are trademarks of Emulex. *
+ * www.broadcom.com *
+ * Portions Copyright (C) 2004-2005 Christoph Hellwig *
+ * *
+ * This program is free software; you can redistribute it and/or *
+ * modify it under the terms of version 2 of the GNU General *
+ * Public License as published by the Free Software Foundation. *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID. See the GNU General Public License for *
+ * more details, a copy of which can be found in the file COPYING *
+ * included with this package. *
+ ********************************************************************/
+
+#define LPFC_NVMET_MIN_SEGS 16
+#define LPFC_NVMET_DEFAULT_SEGS 64 /* 256K IOs */
+#define LPFC_NVMET_MAX_SEGS 510
+#define LPFC_NVMET_SUCCESS_LEN 12
+
+/* Used for NVME Target */
+struct lpfc_nvmet_tgtport {
+ struct lpfc_hba *phba;
+ struct completion tport_unreg_done;
+
+ /* Stats counters - lpfc_nvmet_unsol_ls_buffer */
+ atomic_t rcv_ls_req_in;
+ atomic_t rcv_ls_req_out;
+ atomic_t rcv_ls_req_drop;
+ atomic_t xmt_ls_abort;
+
+ /* Stats counters - lpfc_nvmet_xmt_ls_rsp */
+ atomic_t xmt_ls_rsp;
+ atomic_t xmt_ls_drop;
+
+ /* Stats counters - lpfc_nvmet_xmt_ls_rsp_cmp */
+ atomic_t xmt_ls_rsp_error;
+ atomic_t xmt_ls_rsp_cmpl;
+
+ /* Stats counters - lpfc_nvmet_unsol_fcp_buffer */
+ atomic_t rcv_fcp_cmd_in;
+ atomic_t rcv_fcp_cmd_out;
+ atomic_t rcv_fcp_cmd_drop;
+
+ /* Stats counters - lpfc_nvmet_xmt_fcp_op */
+ atomic_t xmt_fcp_abort;
+ atomic_t xmt_fcp_drop;
+ atomic_t xmt_fcp_read_rsp;
+ atomic_t xmt_fcp_read;
+ atomic_t xmt_fcp_write;
+ atomic_t xmt_fcp_rsp;
+
+ /* Stats counters - lpfc_nvmet_xmt_fcp_op_cmp */
+ atomic_t xmt_fcp_rsp_cmpl;
+ atomic_t xmt_fcp_rsp_error;
+ atomic_t xmt_fcp_rsp_drop;
+
+
+ /* Stats counters - lpfc_nvmet_unsol_issue_abort */
+ atomic_t xmt_abort_rsp;
+ atomic_t xmt_abort_rsp_error;
+
+ /* Stats counters - lpfc_nvmet_xmt_abort_cmp */
+ atomic_t xmt_abort_cmpl;
+};
+
+struct lpfc_nvmet_rcv_ctx {
+ union {
+ struct nvmefc_tgt_ls_req ls_req;
+ struct nvmefc_tgt_fcp_req fcp_req;
+ } ctx;
+ struct lpfc_hba *phba;
+ struct lpfc_iocbq *wqeq;
+ struct lpfc_iocbq *abort_wqeq;
+ dma_addr_t txrdy_phys;
+ uint32_t *txrdy;
+ uint32_t sid;
+ uint32_t offset;
+ uint16_t oxid;
+ uint16_t size;
+ uint16_t entry_cnt;
+ uint16_t cpu;
+ uint16_t state;
+ /* States */
+#define LPFC_NVMET_STE_FREE 0
+#define LPFC_NVMET_STE_RCV 1
+#define LPFC_NVMET_STE_DATA 2
+#define LPFC_NVMET_STE_ABORT 3
+#define LPFC_NVMET_STE_RSP 4
+#define LPFC_NVMET_STE_DONE 5
+ uint16_t flag;
+#define LPFC_NVMET_IO_INP 1
+#define LPFC_NVMET_ABORT_OP 2
+ struct rqb_dmabuf *rqb_buffer;
+
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ uint64_t ts_isr_cmd;
+ uint64_t ts_cmd_nvme;
+ uint64_t ts_nvme_data;
+ uint64_t ts_data_wqput;
+ uint64_t ts_isr_data;
+ uint64_t ts_data_nvme;
+ uint64_t ts_nvme_status;
+ uint64_t ts_status_wqput;
+ uint64_t ts_isr_status;
+ uint64_t ts_status_nvme;
+#endif
+};
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 1180a22beb43..9d6384af9fce 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -1,9 +1,11 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
+ * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
+ * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
- * www.emulex.com *
+ * www.broadcom.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
* *
* This program is free software; you can redistribute it and/or *
@@ -413,7 +415,7 @@ lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
* struct fcp_cmnd, struct fcp_rsp and the number of bde's
* necessary to support the sg_tablesize.
*/
- psb->data = pci_pool_zalloc(phba->lpfc_scsi_dma_buf_pool,
+ psb->data = pci_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
GFP_KERNEL, &psb->dma_handle);
if (!psb->data) {
kfree(psb);
@@ -424,8 +426,8 @@ lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
/* Allocate iotag for psb->cur_iocbq. */
iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
if (iotag == 0) {
- pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
- psb->data, psb->dma_handle);
+ pci_pool_free(phba->lpfc_sg_dma_buf_pool,
+ psb->data, psb->dma_handle);
kfree(psb);
break;
}
@@ -522,6 +524,8 @@ lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport)
struct lpfc_scsi_buf *psb, *next_psb;
unsigned long iflag = 0;
+ if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
+ return;
spin_lock_irqsave(&phba->hbalock, iflag);
spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
list_for_each_entry_safe(psb, next_psb,
@@ -554,8 +558,10 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
int i;
struct lpfc_nodelist *ndlp;
int rrq_empty = 0;
- struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
+ struct lpfc_sli_ring *pring = phba->sli4_hba.els_wq->pring;
+ if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
+ return;
spin_lock_irqsave(&phba->hbalock, iflag);
spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
list_for_each_entry_safe(psb, next_psb,
@@ -819,7 +825,7 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
* for the struct fcp_cmnd, struct fcp_rsp and the number
* of bde's necessary to support the sg_tablesize.
*/
- psb->data = pci_pool_zalloc(phba->lpfc_scsi_dma_buf_pool,
+ psb->data = pci_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
GFP_KERNEL, &psb->dma_handle);
if (!psb->data) {
kfree(psb);
@@ -832,7 +838,7 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
*/
if (phba->cfg_enable_bg && (((unsigned long)(psb->data) &
(unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) {
- pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
+ pci_pool_free(phba->lpfc_sg_dma_buf_pool,
psb->data, psb->dma_handle);
kfree(psb);
break;
@@ -841,8 +847,8 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
lxri = lpfc_sli4_next_xritag(phba);
if (lxri == NO_XRI) {
- pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
- psb->data, psb->dma_handle);
+ pci_pool_free(phba->lpfc_sg_dma_buf_pool,
+ psb->data, psb->dma_handle);
kfree(psb);
break;
}
@@ -850,8 +856,8 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
/* Allocate iotag for psb->cur_iocbq. */
iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
if (iotag == 0) {
- pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
- psb->data, psb->dma_handle);
+ pci_pool_free(phba->lpfc_sg_dma_buf_pool,
+ psb->data, psb->dma_handle);
kfree(psb);
lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
"3368 Failed to allocate IOTAG for"
@@ -920,7 +926,7 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
phba->sli4_hba.scsi_xri_cnt++;
spin_unlock_irq(&phba->scsi_buf_list_get_lock);
}
- lpfc_printf_log(phba, KERN_INFO, LOG_BG,
+ lpfc_printf_log(phba, KERN_INFO, LOG_BG | LOG_FCP,
"3021 Allocate %d out of %d requested new SCSI "
"buffers\n", bcnt, num_to_alloc);
@@ -3894,7 +3900,7 @@ int lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba,
}
}
chann = atomic_add_return(1, &phba->fcp_qidx);
- chann = (chann % phba->cfg_fcp_io_channel);
+ chann = chann % phba->cfg_fcp_io_channel;
return chann;
}
@@ -3925,6 +3931,8 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
struct Scsi_Host *shost;
uint32_t logit = LOG_FCP;
+ phba->fc4ScsiIoCmpls++;
+
/* Sanity check on return of outstanding command */
cmd = lpfc_cmd->pCmd;
if (!cmd)
@@ -3967,6 +3975,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
lpfc_cmd->prot_data_segment = NULL;
}
#endif
+
if (pnode && NLP_CHK_NODE_ACT(pnode))
atomic_dec(&pnode->cmd_pending);
@@ -4241,19 +4250,19 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
vport->cfg_first_burst_size;
}
fcp_cmnd->fcpCntl3 = WRITE_DATA;
- phba->fc4OutputRequests++;
+ phba->fc4ScsiOutputRequests++;
} else {
iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
iocb_cmd->ulpPU = PARM_READ_CHECK;
fcp_cmnd->fcpCntl3 = READ_DATA;
- phba->fc4InputRequests++;
+ phba->fc4ScsiInputRequests++;
}
} else {
iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR;
iocb_cmd->un.fcpi.fcpi_parm = 0;
iocb_cmd->ulpPU = 0;
fcp_cmnd->fcpCntl3 = 0;
- phba->fc4ControlRequests++;
+ phba->fc4ScsiControlRequests++;
}
if (phba->sli_rev == 3 &&
!(phba->sli3_options & LPFC_SLI3_BG_ENABLED))
@@ -4467,7 +4476,7 @@ static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba)
unsigned long poll_tmo_expires =
(jiffies + msecs_to_jiffies(phba->cfg_poll_tmo));
- if (!list_empty(&phba->sli.ring[LPFC_FCP_RING].txcmplq))
+ if (!list_empty(&phba->sli.sli3_ring[LPFC_FCP_RING].txcmplq))
mod_timer(&phba->fcp_poll_timer,
poll_tmo_expires);
}
@@ -4497,7 +4506,7 @@ void lpfc_poll_timeout(unsigned long ptr)
if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
lpfc_sli_handle_fast_ring_event(phba,
- &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
+ &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
if (phba->cfg_poll & DISABLE_FCP_RING_INT)
lpfc_poll_rearm_timer(phba);
@@ -4561,7 +4570,7 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
if (lpfc_cmd == NULL) {
lpfc_rampdown_queue_depth(phba);
- lpfc_printf_vlog(vport, KERN_INFO, LOG_MISC,
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR,
"0707 driver's buffer pool is empty, "
"IO busied\n");
goto out_host_busy;
@@ -4636,7 +4645,7 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
}
if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
lpfc_sli_handle_fast_ring_event(phba,
- &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
+ &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
if (phba->cfg_poll & DISABLE_FCP_RING_INT)
lpfc_poll_rearm_timer(phba);
@@ -4681,7 +4690,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
IOCB_t *cmd, *icmd;
int ret = SUCCESS, status = 0;
struct lpfc_sli_ring *pring_s4;
- int ring_number, ret_val;
+ int ret_val;
unsigned long flags, iflags;
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
@@ -4769,7 +4778,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
icmd->ulpClass = cmd->ulpClass;
/* ABTS WQE must go to the same WQ as the WQE to be aborted */
- abtsiocb->fcp_wqidx = iocb->fcp_wqidx;
+ abtsiocb->hba_wqidx = iocb->hba_wqidx;
abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
if (iocb->iocb_flag & LPFC_IO_FOF)
abtsiocb->iocb_flag |= LPFC_IO_FOF;
@@ -4782,8 +4791,11 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
abtsiocb->vport = vport;
if (phba->sli_rev == LPFC_SLI_REV4) {
- ring_number = MAX_SLI3_CONFIGURED_RINGS + iocb->fcp_wqidx;
- pring_s4 = &phba->sli.ring[ring_number];
+ pring_s4 = lpfc_sli4_calc_ring(phba, iocb);
+ if (pring_s4 == NULL) {
+ ret = FAILED;
+ goto out_unlock;
+ }
/* Note: both hbalock and ring_lock must be set here */
spin_lock_irqsave(&pring_s4->ring_lock, iflags);
ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
@@ -4805,7 +4817,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
if (phba->cfg_poll & DISABLE_FCP_RING_INT)
lpfc_sli_handle_fast_ring_event(phba,
- &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
+ &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
wait_for_cmpl:
lpfc_cmd->waitq = &waitq;
@@ -5105,7 +5117,7 @@ lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id,
cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
if (cnt)
lpfc_sli_abort_taskmgmt(vport,
- &phba->sli.ring[phba->sli.fcp_ring],
+ &phba->sli.sli3_ring[LPFC_FCP_RING],
tgt_id, lun_id, context);
later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
while (time_after(later, jiffies) && cnt) {
@@ -5323,7 +5335,8 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
continue;
if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
ndlp->nlp_sid == i &&
- ndlp->rport) {
+ ndlp->rport &&
+ ndlp->nlp_type & NLP_FCP_TARGET) {
match = 1;
break;
}
@@ -5534,7 +5547,7 @@ lpfc_slave_configure(struct scsi_device *sdev)
if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
lpfc_sli_handle_fast_ring_event(phba,
- &phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ);
+ &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
if (phba->cfg_poll & DISABLE_FCP_RING_INT)
lpfc_poll_rearm_timer(phba);
}
@@ -5898,6 +5911,48 @@ lpfc_disable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
return false;
}
+static int
+lpfc_no_command(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
+{
+ return SCSI_MLQUEUE_HOST_BUSY;
+}
+
+static int
+lpfc_no_handler(struct scsi_cmnd *cmnd)
+{
+ return FAILED;
+}
+
+static int
+lpfc_no_slave(struct scsi_device *sdev)
+{
+ return -ENODEV;
+}
+
+struct scsi_host_template lpfc_template_nvme = {
+ .module = THIS_MODULE,
+ .name = LPFC_DRIVER_NAME,
+ .proc_name = LPFC_DRIVER_NAME,
+ .info = lpfc_info,
+ .queuecommand = lpfc_no_command,
+ .eh_abort_handler = lpfc_no_handler,
+ .eh_device_reset_handler = lpfc_no_handler,
+ .eh_target_reset_handler = lpfc_no_handler,
+ .eh_bus_reset_handler = lpfc_no_handler,
+ .eh_host_reset_handler = lpfc_no_handler,
+ .slave_alloc = lpfc_no_slave,
+ .slave_configure = lpfc_no_slave,
+ .scan_finished = lpfc_scan_finished,
+ .this_id = -1,
+ .sg_tablesize = 1,
+ .cmd_per_lun = 1,
+ .use_clustering = ENABLE_CLUSTERING,
+ .shost_attrs = lpfc_hba_attrs,
+ .max_sectors = 0xFFFF,
+ .vendor_id = LPFC_NL_VENDOR_ID,
+ .track_queue_depth = 0,
+};
+
struct scsi_host_template lpfc_template_s3 = {
.module = THIS_MODULE,
.name = LPFC_DRIVER_NAME,
diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h
index 8cb80dabada8..5da7e15400cb 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.h
+++ b/drivers/scsi/lpfc/lpfc_scsi.h
@@ -1,9 +1,11 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
+ * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
+ * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
- * www.emulex.com *
+ * www.broadcom.com *
* *
* This program is free software; you can redistribute it and/or *
* modify it under the terms of version 2 of the GNU General *
@@ -135,6 +137,8 @@ struct lpfc_scsi_buf {
uint32_t timeout;
+ uint16_t flags; /* TBD convert exch_busy to flags */
+#define LPFC_SBUF_XBUSY 0x1 /* SLI4 hba reported XB on WCQE cmpl */
uint16_t exch_busy; /* SLI4 hba reported XB on complete WCQE */
uint16_t status; /* From IOCB Word 7- ulpStatus */
uint32_t result; /* From IOCB Word 4. */
@@ -164,6 +168,8 @@ struct lpfc_scsi_buf {
* Iotag is in here
*/
struct lpfc_iocbq cur_iocbq;
+ uint16_t cpu;
+
wait_queue_head_t *waitq;
unsigned long start_time;
@@ -178,13 +184,15 @@ struct lpfc_scsi_buf {
#endif
};
-#define LPFC_SCSI_DMA_EXT_SIZE 264
-#define LPFC_BPL_SIZE 1024
-#define MDAC_DIRECT_CMD 0x22
+#define LPFC_SCSI_DMA_EXT_SIZE 264
+#define LPFC_BPL_SIZE 1024
+#define MDAC_DIRECT_CMD 0x22
+
+#define FIND_FIRST_OAS_LUN 0
+#define NO_MORE_OAS_LUN -1
+#define NOT_OAS_ENABLED_LUN NO_MORE_OAS_LUN
-#define FIND_FIRST_OAS_LUN 0
-#define NO_MORE_OAS_LUN -1
-#define NOT_OAS_ENABLED_LUN NO_MORE_OAS_LUN
+#define TXRDY_PAYLOAD_LEN 12
int lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba,
struct lpfc_scsi_buf *lpfc_cmd);
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 8e886caf2454..e43e5e23c24b 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -1,9 +1,11 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
+ * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
+ * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
- * www.emulex.com *
+ * www.broadcom.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
* *
* This program is free software; you can redistribute it and/or *
@@ -34,14 +36,18 @@
#include <scsi/fc/fc_fs.h>
#include <linux/aer.h>
+#include <linux/nvme-fc-driver.h>
+
#include "lpfc_hw4.h"
#include "lpfc_hw.h"
#include "lpfc_sli.h"
#include "lpfc_sli4.h"
#include "lpfc_nl.h"
#include "lpfc_disc.h"
-#include "lpfc_scsi.h"
#include "lpfc.h"
+#include "lpfc_scsi.h"
+#include "lpfc_nvme.h"
+#include "lpfc_nvmet.h"
#include "lpfc_crtn.h"
#include "lpfc_logmsg.h"
#include "lpfc_compat.h"
@@ -67,14 +73,17 @@ static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *,
struct lpfc_iocbq *);
static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
struct hbq_dmabuf *);
-static int lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *, struct lpfc_queue *,
+static int lpfc_sli4_fp_handle_cqe(struct lpfc_hba *, struct lpfc_queue *,
struct lpfc_cqe *);
-static int lpfc_sli4_post_els_sgl_list(struct lpfc_hba *, struct list_head *,
+static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *,
int);
static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *, struct lpfc_eqe *,
uint32_t);
static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba);
static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba);
+static int lpfc_sli4_abort_nvme_io(struct lpfc_hba *phba,
+ struct lpfc_sli_ring *pring,
+ struct lpfc_iocbq *cmdiocb);
static IOCB_t *
lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
@@ -271,10 +280,11 @@ lpfc_sli4_eq_get(struct lpfc_queue *q)
/*
* insert barrier for instruction interlock : data from the hardware
* must have the valid bit checked before it can be copied and acted
- * upon. Given what was seen in lpfc_sli4_cq_get() of speculative
- * instructions allowing action on content before valid bit checked,
- * add barrier here as well. May not be needed as "content" is a
- * single 32-bit entity here (vs multi word structure for cq's).
+ * upon. Speculative instructions were allowing a bcopy at the start
+ * of lpfc_sli4_fp_handle_wcqe(), which is called immediately
+ * after our return, to copy data before the valid bit check above
+ * was done. As such, some of the copied data was stale. The barrier
+ * ensures the check is before any data is copied.
*/
mb();
return eqe;
@@ -386,11 +396,10 @@ lpfc_sli4_cq_get(struct lpfc_queue *q)
/*
* insert barrier for instruction interlock : data from the hardware
* must have the valid bit checked before it can be copied and acted
- * upon. Speculative instructions were allowing a bcopy at the start
- * of lpfc_sli4_fp_handle_wcqe(), which is called immediately
- * after our return, to copy data before the valid bit check above
- * was done. As such, some of the copied data was stale. The barrier
- * ensures the check is before any data is copied.
+ * upon. Given what was seen in lpfc_sli4_cq_get() of speculative
+ * instructions allowing action on content before valid bit checked,
+ * add barrier here as well. May not be needed as "content" is a
+ * single 32-bit entity here (vs multi word structure for cq's).
*/
mb();
return cqe;
@@ -456,7 +465,7 @@ lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm)
* on @q then this function will return -ENOMEM.
* The caller is expected to hold the hbalock when calling this routine.
**/
-static int
+int
lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
{
@@ -602,7 +611,7 @@ __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
*
* Returns sglq ponter = success, NULL = Failure.
**/
-static struct lpfc_sglq *
+struct lpfc_sglq *
__lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
{
struct lpfc_sglq *sglq;
@@ -902,7 +911,7 @@ out:
}
/**
- * __lpfc_sli_get_sglq - Allocates an iocb object from sgl pool
+ * __lpfc_sli_get_els_sglq - Allocates an iocb object from sgl pool
* @phba: Pointer to HBA context object.
* @piocb: Pointer to the iocbq.
*
@@ -912,9 +921,9 @@ out:
* allocated sglq object else it returns NULL.
**/
static struct lpfc_sglq *
-__lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
+__lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
{
- struct list_head *lpfc_sgl_list = &phba->sli4_hba.lpfc_sgl_list;
+ struct list_head *lpfc_els_sgl_list = &phba->sli4_hba.lpfc_els_sgl_list;
struct lpfc_sglq *sglq = NULL;
struct lpfc_sglq *start_sglq = NULL;
struct lpfc_scsi_buf *lpfc_cmd;
@@ -938,18 +947,21 @@ __lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
ndlp = piocbq->context1;
}
- list_remove_head(lpfc_sgl_list, sglq, struct lpfc_sglq, list);
+ spin_lock(&phba->sli4_hba.sgl_list_lock);
+ list_remove_head(lpfc_els_sgl_list, sglq, struct lpfc_sglq, list);
start_sglq = sglq;
while (!found) {
if (!sglq)
return NULL;
- if (lpfc_test_rrq_active(phba, ndlp, sglq->sli4_lxritag)) {
+ if (ndlp && ndlp->active_rrqs_xri_bitmap &&
+ test_bit(sglq->sli4_lxritag,
+ ndlp->active_rrqs_xri_bitmap)) {
/* This xri has an rrq outstanding for this DID.
* put it back in the list and get another xri.
*/
- list_add_tail(&sglq->list, lpfc_sgl_list);
+ list_add_tail(&sglq->list, lpfc_els_sgl_list);
sglq = NULL;
- list_remove_head(lpfc_sgl_list, sglq,
+ list_remove_head(lpfc_els_sgl_list, sglq,
struct lpfc_sglq, list);
if (sglq == start_sglq) {
sglq = NULL;
@@ -962,6 +974,35 @@ __lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
sglq->state = SGL_ALLOCATED;
}
+ spin_unlock(&phba->sli4_hba.sgl_list_lock);
+ return sglq;
+}
+
+/**
+ * __lpfc_sli_get_nvmet_sglq - Allocates an iocb object from sgl pool
+ * @phba: Pointer to HBA context object.
+ * @piocb: Pointer to the iocbq.
+ *
+ * This function is called with the sgl_list lock held. This function
+ * gets a new driver sglq object from the sglq list. If the
+ * list is not empty then it is successful, it returns pointer to the newly
+ * allocated sglq object else it returns NULL.
+ **/
+struct lpfc_sglq *
+__lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
+{
+ struct list_head *lpfc_nvmet_sgl_list;
+ struct lpfc_sglq *sglq = NULL;
+
+ lpfc_nvmet_sgl_list = &phba->sli4_hba.lpfc_nvmet_sgl_list;
+
+ lockdep_assert_held(&phba->sli4_hba.sgl_list_lock);
+
+ list_remove_head(lpfc_nvmet_sgl_list, sglq, struct lpfc_sglq, list);
+ if (!sglq)
+ return NULL;
+ phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
+ sglq->state = SGL_ALLOCATED;
return sglq;
}
@@ -1002,7 +1043,7 @@ lpfc_sli_get_iocbq(struct lpfc_hba *phba)
* this IO was aborted then the sglq entry it put on the
* lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the
* IO has good status or fails for any other reason then the sglq
- * entry is added to the free list (lpfc_sgl_list).
+ * entry is added to the free list (lpfc_els_sgl_list).
**/
static void
__lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
@@ -1010,7 +1051,7 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
struct lpfc_sglq *sglq;
size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
unsigned long iflag = 0;
- struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
+ struct lpfc_sli_ring *pring;
lockdep_assert_held(&phba->hbalock);
@@ -1021,21 +1062,36 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
if (sglq) {
+ if (iocbq->iocb_flag & LPFC_IO_NVMET) {
+ spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
+ iflag);
+ sglq->state = SGL_FREED;
+ sglq->ndlp = NULL;
+ list_add_tail(&sglq->list,
+ &phba->sli4_hba.lpfc_nvmet_sgl_list);
+ spin_unlock_irqrestore(
+ &phba->sli4_hba.sgl_list_lock, iflag);
+ goto out;
+ }
+
+ pring = phba->sli4_hba.els_wq->pring;
if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
(sglq->state != SGL_XRI_ABORTED)) {
- spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock,
- iflag);
+ spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
+ iflag);
list_add(&sglq->list,
- &phba->sli4_hba.lpfc_abts_els_sgl_list);
+ &phba->sli4_hba.lpfc_abts_els_sgl_list);
spin_unlock_irqrestore(
- &phba->sli4_hba.abts_sgl_list_lock, iflag);
+ &phba->sli4_hba.sgl_list_lock, iflag);
} else {
- spin_lock_irqsave(&pring->ring_lock, iflag);
+ spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
+ iflag);
sglq->state = SGL_FREED;
sglq->ndlp = NULL;
list_add_tail(&sglq->list,
- &phba->sli4_hba.lpfc_sgl_list);
- spin_unlock_irqrestore(&pring->ring_lock, iflag);
+ &phba->sli4_hba.lpfc_els_sgl_list);
+ spin_unlock_irqrestore(
+ &phba->sli4_hba.sgl_list_lock, iflag);
/* Check if TXQ queue needs to be serviced */
if (!list_empty(&pring->txq))
@@ -1043,13 +1099,15 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
}
}
-
+out:
/*
* Clean all volatile data fields, preserve iotag and node struct.
*/
memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
iocbq->sli4_lxritag = NO_XRI;
iocbq->sli4_xritag = NO_XRI;
+ iocbq->iocb_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET |
+ LPFC_IO_NVME_LS);
list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
}
@@ -1639,7 +1697,7 @@ lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
if (lpfc_is_link_up(phba) &&
(!list_empty(&pring->txq)) &&
- (pring->ringno != phba->sli.fcp_ring ||
+ (pring->ringno != LPFC_FCP_RING ||
phba->sli.sli_flag & LPFC_PROCESS_LA)) {
while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
@@ -1718,7 +1776,6 @@ lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
struct hbq_dmabuf *hbq_buf;
unsigned long flags;
int i, hbq_count;
- uint32_t hbqno;
hbq_count = lpfc_sli_hbq_count();
/* Return all memory used by all HBQs */
@@ -1732,24 +1789,6 @@ lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
}
phba->hbqs[i].buffer_count = 0;
}
- /* Return all HBQ buffer that are in-fly */
- list_for_each_entry_safe(dmabuf, next_dmabuf, &phba->rb_pend_list,
- list) {
- hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
- list_del(&hbq_buf->dbuf.list);
- if (hbq_buf->tag == -1) {
- (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer)
- (phba, hbq_buf);
- } else {
- hbqno = hbq_buf->tag >> 16;
- if (hbqno >= LPFC_MAX_HBQS)
- (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer)
- (phba, hbq_buf);
- else
- (phba->hbqs[hbqno].hbq_free_buffer)(phba,
- hbq_buf);
- }
- }
/* Mark the HBQs not in use */
phba->hbq_in_use = 0;
@@ -1802,7 +1841,7 @@ lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr));
- hbqe->bde.tus.f.bdeSize = hbq_buf->size;
+ hbqe->bde.tus.f.bdeSize = hbq_buf->total_size;
hbqe->bde.tus.f.bdeFlags = 0;
hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
@@ -1834,17 +1873,23 @@ lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
int rc;
struct lpfc_rqe hrqe;
struct lpfc_rqe drqe;
+ struct lpfc_queue *hrq;
+ struct lpfc_queue *drq;
+
+ if (hbqno != LPFC_ELS_HBQ)
+ return 1;
+ hrq = phba->sli4_hba.hdr_rq;
+ drq = phba->sli4_hba.dat_rq;
lockdep_assert_held(&phba->hbalock);
hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys);
hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys);
drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys);
drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys);
- rc = lpfc_sli4_rq_put(phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
- &hrqe, &drqe);
+ rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
if (rc < 0)
return rc;
- hbq_buf->tag = rc;
+ hbq_buf->tag = (rc | (hbqno << 16));
list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list);
return 0;
}
@@ -1861,22 +1906,9 @@ static struct lpfc_hbq_init lpfc_els_hbq = {
.add_count = 40,
};
-/* HBQ for the extra ring if needed */
-static struct lpfc_hbq_init lpfc_extra_hbq = {
- .rn = 1,
- .entry_count = 200,
- .mask_count = 0,
- .profile = 0,
- .ring_mask = (1 << LPFC_EXTRA_RING),
- .buffer_count = 0,
- .init_count = 0,
- .add_count = 5,
-};
-
/* Array of HBQs */
struct lpfc_hbq_init *lpfc_hbq_defs[] = {
&lpfc_els_hbq,
- &lpfc_extra_hbq,
};
/**
@@ -1998,6 +2030,29 @@ lpfc_sli_hbqbuf_get(struct list_head *rb_list)
}
/**
+ * lpfc_sli_rqbuf_get - Remove the first dma buffer off of an RQ list
+ * @phba: Pointer to HBA context object.
+ * @hbqno: HBQ number.
+ *
+ * This function removes the first RQ buffer on an RQ buffer list and returns a
+ * pointer to that buffer. If it finds no buffers on the list it returns NULL.
+ **/
+static struct rqb_dmabuf *
+lpfc_sli_rqbuf_get(struct lpfc_hba *phba, struct lpfc_queue *hrq)
+{
+ struct lpfc_dmabuf *h_buf;
+ struct lpfc_rqb *rqbp;
+
+ rqbp = hrq->rqbp;
+ list_remove_head(&rqbp->rqb_buffer_list, h_buf,
+ struct lpfc_dmabuf, list);
+ if (!h_buf)
+ return NULL;
+ rqbp->buffer_count--;
+ return container_of(h_buf, struct rqb_dmabuf, hbuf);
+}
+
+/**
* lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag
* @phba: Pointer to HBA context object.
* @tag: Tag of the hbq buffer.
@@ -2463,6 +2518,14 @@ lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
{
int i;
+ switch (fch_type) {
+ case FC_TYPE_NVME:
+ lpfc_nvmet_unsol_ls_event(phba, pring, saveq);
+ return 1;
+ default:
+ break;
+ }
+
/* unSolicited Responses */
if (pring->prt[0].profile) {
if (pring->prt[0].lpfc_sli_rcv_unsol_event)
@@ -2713,7 +2776,7 @@ static struct lpfc_iocbq *
lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
struct lpfc_sli_ring *pring, uint16_t iotag)
{
- struct lpfc_iocbq *cmd_iocb;
+ struct lpfc_iocbq *cmd_iocb = NULL;
lockdep_assert_held(&phba->hbalock);
if (iotag != 0 && iotag <= phba->sli.last_iotag) {
@@ -2727,8 +2790,10 @@ lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
}
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "0372 iotag x%x is out of range: max iotag (x%x)\n",
- iotag, phba->sli.last_iotag);
+ "0372 iotag x%x lookup error: max iotag (x%x) "
+ "iocb_flag x%x\n",
+ iotag, phba->sli.last_iotag,
+ cmd_iocb ? cmd_iocb->iocb_flag : 0xffff);
return NULL;
}
@@ -3598,6 +3663,33 @@ lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
}
/**
+ * lpfc_sli_abort_wqe_ring - Abort all iocbs in the ring
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ *
+ * This function aborts all iocbs in the given ring and frees all the iocb
+ * objects in txq. This function issues an abort iocb for all the iocb commands
+ * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
+ * the return of this function. The caller is not required to hold any locks.
+ **/
+void
+lpfc_sli_abort_wqe_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
+{
+ LIST_HEAD(completions);
+ struct lpfc_iocbq *iocb, *next_iocb;
+
+ if (pring->ringno == LPFC_ELS_RING)
+ lpfc_fabric_abort_hba(phba);
+
+ spin_lock_irq(&phba->hbalock);
+ /* Next issue ABTS for everything on the txcmplq */
+ list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
+ lpfc_sli4_abort_nvme_io(phba, pring, iocb);
+ spin_unlock_irq(&phba->hbalock);
+}
+
+
+/**
* lpfc_sli_abort_fcp_rings - Abort all iocbs in all FCP rings
* @phba: Pointer to HBA context object.
* @pring: Pointer to driver SLI ring object.
@@ -3617,15 +3709,40 @@ lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba)
/* Look on all the FCP Rings for the iotag */
if (phba->sli_rev >= LPFC_SLI_REV4) {
for (i = 0; i < phba->cfg_fcp_io_channel; i++) {
- pring = &psli->ring[i + MAX_SLI3_CONFIGURED_RINGS];
+ pring = phba->sli4_hba.fcp_wq[i]->pring;
lpfc_sli_abort_iocb_ring(phba, pring);
}
} else {
- pring = &psli->ring[psli->fcp_ring];
+ pring = &psli->sli3_ring[LPFC_FCP_RING];
lpfc_sli_abort_iocb_ring(phba, pring);
}
}
+/**
+ * lpfc_sli_abort_nvme_rings - Abort all wqes in all NVME rings
+ * @phba: Pointer to HBA context object.
+ *
+ * This function aborts all wqes in NVME rings. This function issues an
+ * abort wqe for all the outstanding IO commands in txcmplq. The iocbs in
+ * the txcmplq is not guaranteed to complete before the return of this
+ * function. The caller is not required to hold any locks.
+ **/
+void
+lpfc_sli_abort_nvme_rings(struct lpfc_hba *phba)
+{
+ struct lpfc_sli_ring *pring;
+ uint32_t i;
+
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ return;
+
+ /* Abort all IO on each NVME ring. */
+ for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
+ pring = phba->sli4_hba.nvme_wq[i]->pring;
+ lpfc_sli_abort_wqe_ring(phba, pring);
+ }
+}
+
/**
* lpfc_sli_flush_fcp_rings - flush all iocbs in the fcp ring
@@ -3654,7 +3771,7 @@ lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
/* Look on all the FCP Rings for the iotag */
if (phba->sli_rev >= LPFC_SLI_REV4) {
for (i = 0; i < phba->cfg_fcp_io_channel; i++) {
- pring = &psli->ring[i + MAX_SLI3_CONFIGURED_RINGS];
+ pring = phba->sli4_hba.fcp_wq[i]->pring;
spin_lock_irq(&pring->ring_lock);
/* Retrieve everything on txq */
@@ -3675,7 +3792,7 @@ lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
IOERR_SLI_DOWN);
}
} else {
- pring = &psli->ring[psli->fcp_ring];
+ pring = &psli->sli3_ring[LPFC_FCP_RING];
spin_lock_irq(&phba->hbalock);
/* Retrieve everything on txq */
@@ -3696,6 +3813,51 @@ lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
}
/**
+ * lpfc_sli_flush_nvme_rings - flush all wqes in the nvme rings
+ * @phba: Pointer to HBA context object.
+ *
+ * This function flushes all wqes in the nvme rings and frees all resources
+ * in the txcmplq. This function does not issue abort wqes for the IO
+ * commands in txcmplq, they will just be returned with
+ * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
+ * slot has been permanently disabled.
+ **/
+void
+lpfc_sli_flush_nvme_rings(struct lpfc_hba *phba)
+{
+ LIST_HEAD(txcmplq);
+ struct lpfc_sli_ring *pring;
+ uint32_t i;
+
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ return;
+
+ /* Hint to other driver operations that a flush is in progress. */
+ spin_lock_irq(&phba->hbalock);
+ phba->hba_flag |= HBA_NVME_IOQ_FLUSH;
+ spin_unlock_irq(&phba->hbalock);
+
+ /* Cycle through all NVME rings and complete each IO with
+ * a local driver reason code. This is a flush so no
+ * abort exchange to FW.
+ */
+ for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
+ pring = phba->sli4_hba.nvme_wq[i]->pring;
+
+ /* Retrieve everything on the txcmplq */
+ spin_lock_irq(&pring->ring_lock);
+ list_splice_init(&pring->txcmplq, &txcmplq);
+ pring->txcmplq_cnt = 0;
+ spin_unlock_irq(&pring->ring_lock);
+
+ /* Flush the txcmpq &&&PAE */
+ lpfc_sli_cancel_iocbs(phba, &txcmplq,
+ IOSTAT_LOCAL_REJECT,
+ IOERR_SLI_DOWN);
+ }
+}
+
+/**
* lpfc_sli_brdready_s3 - Check for sli3 host ready status
* @phba: Pointer to HBA context object.
* @mask: Bit mask to be checked.
@@ -4069,7 +4231,7 @@ lpfc_sli_brdreset(struct lpfc_hba *phba)
/* Initialize relevant SLI info */
for (i = 0; i < psli->num_rings; i++) {
- pring = &psli->ring[i];
+ pring = &psli->sli3_ring[i];
pring->flag = 0;
pring->sli.sli3.rspidx = 0;
pring->sli.sli3.next_cmdidx = 0;
@@ -4498,10 +4660,11 @@ static int
lpfc_sli4_rb_setup(struct lpfc_hba *phba)
{
phba->hbq_in_use = 1;
- phba->hbqs[0].entry_count = lpfc_hbq_defs[0]->entry_count;
+ phba->hbqs[LPFC_ELS_HBQ].entry_count =
+ lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count;
phba->hbq_count = 1;
+ lpfc_sli_hbqbuf_init_hbqs(phba, LPFC_ELS_HBQ);
/* Initially populate or replenish the HBQs */
- lpfc_sli_hbqbuf_init_hbqs(phba, 0);
return 0;
}
@@ -5107,26 +5270,38 @@ out_free_mboxq:
static void
lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
{
- int fcp_eqidx;
+ int qidx;
lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM);
lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM);
- fcp_eqidx = 0;
- if (phba->sli4_hba.fcp_cq) {
- do {
- lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx],
- LPFC_QUEUE_REARM);
- } while (++fcp_eqidx < phba->cfg_fcp_io_channel);
- }
+ if (phba->sli4_hba.nvmels_cq)
+ lpfc_sli4_cq_release(phba->sli4_hba.nvmels_cq,
+ LPFC_QUEUE_REARM);
+
+ if (phba->sli4_hba.fcp_cq)
+ for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++)
+ lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[qidx],
+ LPFC_QUEUE_REARM);
+
+ if (phba->sli4_hba.nvme_cq)
+ for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++)
+ lpfc_sli4_cq_release(phba->sli4_hba.nvme_cq[qidx],
+ LPFC_QUEUE_REARM);
if (phba->cfg_fof)
lpfc_sli4_cq_release(phba->sli4_hba.oas_cq, LPFC_QUEUE_REARM);
- if (phba->sli4_hba.hba_eq) {
- for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel;
- fcp_eqidx++)
- lpfc_sli4_eq_release(phba->sli4_hba.hba_eq[fcp_eqidx],
- LPFC_QUEUE_REARM);
+ if (phba->sli4_hba.hba_eq)
+ for (qidx = 0; qidx < phba->io_channel_irqs; qidx++)
+ lpfc_sli4_eq_release(phba->sli4_hba.hba_eq[qidx],
+ LPFC_QUEUE_REARM);
+
+ if (phba->nvmet_support) {
+ for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) {
+ lpfc_sli4_cq_release(
+ phba->sli4_hba.nvmet_cqset[qidx],
+ LPFC_QUEUE_REARM);
+ }
}
if (phba->cfg_fof)
@@ -5560,9 +5735,13 @@ lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type)
rsrc_blks->rsrc_size = rsrc_size;
list_add_tail(&rsrc_blks->list, ext_blk_list);
rsrc_start = rsrc_id;
- if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0))
+ if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0)) {
phba->sli4_hba.scsi_xri_start = rsrc_start +
- lpfc_sli4_get_els_iocb_cnt(phba);
+ lpfc_sli4_get_iocb_cnt(phba);
+ phba->sli4_hba.nvme_xri_start =
+ phba->sli4_hba.scsi_xri_start +
+ phba->sli4_hba.scsi_xri_max;
+ }
while (rsrc_id < (rsrc_start + rsrc_size)) {
ids[j] = rsrc_id;
@@ -5578,6 +5757,8 @@ lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type)
return rc;
}
+
+
/**
* lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent.
* @phba: Pointer to HBA context object.
@@ -6156,42 +6337,45 @@ lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type,
}
/**
- * lpfc_sli4_repost_els_sgl_list - Repsot the els buffers sgl pages as block
+ * lpfc_sli4_repost_sgl_list - Repsot the buffers sgl pages as block
* @phba: pointer to lpfc hba data structure.
+ * @pring: Pointer to driver SLI ring object.
+ * @sgl_list: linked link of sgl buffers to post
+ * @cnt: number of linked list buffers
*
- * This routine walks the list of els buffers that have been allocated and
+ * This routine walks the list of buffers that have been allocated and
* repost them to the port by using SGL block post. This is needed after a
* pci_function_reset/warm_start or start. It attempts to construct blocks
- * of els buffer sgls which contains contiguous xris and uses the non-embedded
- * SGL block post mailbox commands to post them to the port. For single els
+ * of buffer sgls which contains contiguous xris and uses the non-embedded
+ * SGL block post mailbox commands to post them to the port. For single
* buffer sgl with non-contiguous xri, if any, it shall use embedded SGL post
* mailbox command for posting.
*
* Returns: 0 = success, non-zero failure.
**/
static int
-lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba)
+lpfc_sli4_repost_sgl_list(struct lpfc_hba *phba,
+ struct list_head *sgl_list, int cnt)
{
struct lpfc_sglq *sglq_entry = NULL;
struct lpfc_sglq *sglq_entry_next = NULL;
struct lpfc_sglq *sglq_entry_first = NULL;
- int status, total_cnt, post_cnt = 0, num_posted = 0, block_cnt = 0;
+ int status, total_cnt;
+ int post_cnt = 0, num_posted = 0, block_cnt = 0;
int last_xritag = NO_XRI;
- struct lpfc_sli_ring *pring;
LIST_HEAD(prep_sgl_list);
LIST_HEAD(blck_sgl_list);
LIST_HEAD(allc_sgl_list);
LIST_HEAD(post_sgl_list);
LIST_HEAD(free_sgl_list);
- pring = &phba->sli.ring[LPFC_ELS_RING];
spin_lock_irq(&phba->hbalock);
- spin_lock(&pring->ring_lock);
- list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &allc_sgl_list);
- spin_unlock(&pring->ring_lock);
+ spin_lock(&phba->sli4_hba.sgl_list_lock);
+ list_splice_init(sgl_list, &allc_sgl_list);
+ spin_unlock(&phba->sli4_hba.sgl_list_lock);
spin_unlock_irq(&phba->hbalock);
- total_cnt = phba->sli4_hba.els_xri_cnt;
+ total_cnt = cnt;
list_for_each_entry_safe(sglq_entry, sglq_entry_next,
&allc_sgl_list, list) {
list_del_init(&sglq_entry->list);
@@ -6220,8 +6404,8 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba)
/* keep track of last sgl's xritag */
last_xritag = sglq_entry->sli4_xritag;
- /* end of repost sgl list condition for els buffers */
- if (num_posted == phba->sli4_hba.els_xri_cnt) {
+ /* end of repost sgl list condition for buffers */
+ if (num_posted == total_cnt) {
if (post_cnt == 0) {
list_splice_init(&prep_sgl_list,
&blck_sgl_list);
@@ -6238,7 +6422,7 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba)
/* Failure, put sgl to free list */
lpfc_printf_log(phba, KERN_WARNING,
LOG_SLI,
- "3159 Failed to post els "
+ "3159 Failed to post "
"sgl, xritag:x%x\n",
sglq_entry->sli4_xritag);
list_add_tail(&sglq_entry->list,
@@ -6252,9 +6436,9 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba)
if (post_cnt == 0)
continue;
- /* post the els buffer list sgls as a block */
- status = lpfc_sli4_post_els_sgl_list(phba, &blck_sgl_list,
- post_cnt);
+ /* post the buffer list sgls as a block */
+ status = lpfc_sli4_post_sgl_list(phba, &blck_sgl_list,
+ post_cnt);
if (!status) {
/* success, put sgl list to posted sgl list */
@@ -6265,7 +6449,7 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba)
struct lpfc_sglq,
list);
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
- "3160 Failed to post els sgl-list, "
+ "3160 Failed to post sgl-list, "
"xritag:x%x-x%x\n",
sglq_entry_first->sli4_xritag,
(sglq_entry_first->sli4_xritag +
@@ -6278,29 +6462,28 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba)
if (block_cnt == 0)
last_xritag = NO_XRI;
- /* reset els sgl post count for next round of posting */
+ /* reset sgl post count for next round of posting */
post_cnt = 0;
}
- /* update the number of XRIs posted for ELS */
- phba->sli4_hba.els_xri_cnt = total_cnt;
- /* free the els sgls failed to post */
+ /* free the sgls failed to post */
lpfc_free_sgl_list(phba, &free_sgl_list);
- /* push els sgls posted to the availble list */
+ /* push sgls posted to the available list */
if (!list_empty(&post_sgl_list)) {
spin_lock_irq(&phba->hbalock);
- spin_lock(&pring->ring_lock);
- list_splice_init(&post_sgl_list,
- &phba->sli4_hba.lpfc_sgl_list);
- spin_unlock(&pring->ring_lock);
+ spin_lock(&phba->sli4_hba.sgl_list_lock);
+ list_splice_init(&post_sgl_list, sgl_list);
+ spin_unlock(&phba->sli4_hba.sgl_list_lock);
spin_unlock_irq(&phba->hbalock);
} else {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "3161 Failure to post els sgl to port.\n");
+ "3161 Failure to post sgl to port.\n");
return -EIO;
}
- return 0;
+
+ /* return the number of XRIs actually posted */
+ return total_cnt;
}
void
@@ -6335,7 +6518,7 @@ lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
int
lpfc_sli4_hba_setup(struct lpfc_hba *phba)
{
- int rc;
+ int rc, i;
LPFC_MBOXQ_t *mboxq;
struct lpfc_mqe *mqe;
uint8_t *vpd;
@@ -6344,6 +6527,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
struct lpfc_vport *vport = phba->pport;
struct lpfc_dmabuf *mp;
+ struct lpfc_rqb *rqbp;
/* Perform a PCI function reset to start from clean */
rc = lpfc_pci_function_reset(phba);
@@ -6622,35 +6806,141 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
- /* update host els and scsi xri-sgl sizes and mappings */
- rc = lpfc_sli4_xri_sgl_update(phba);
+ /* Create all the SLI4 queues */
+ rc = lpfc_sli4_queue_create(phba);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3089 Failed to allocate queues\n");
+ rc = -ENODEV;
+ goto out_free_mbox;
+ }
+ /* Set up all the queues to the device */
+ rc = lpfc_sli4_queue_setup(phba);
+ if (unlikely(rc)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ "0381 Error %d during queue setup.\n ", rc);
+ goto out_stop_timers;
+ }
+ /* Initialize the driver internal SLI layer lists. */
+ lpfc_sli4_setup(phba);
+ lpfc_sli4_queue_init(phba);
+
+ /* update host els xri-sgl sizes and mappings */
+ rc = lpfc_sli4_els_sgl_update(phba);
if (unlikely(rc)) {
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
"1400 Failed to update xri-sgl size and "
"mapping: %d\n", rc);
- goto out_free_mbox;
+ goto out_destroy_queue;
}
/* register the els sgl pool to the port */
- rc = lpfc_sli4_repost_els_sgl_list(phba);
- if (unlikely(rc)) {
+ rc = lpfc_sli4_repost_sgl_list(phba, &phba->sli4_hba.lpfc_els_sgl_list,
+ phba->sli4_hba.els_xri_cnt);
+ if (unlikely(rc < 0)) {
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
"0582 Error %d during els sgl post "
"operation\n", rc);
rc = -ENODEV;
- goto out_free_mbox;
+ goto out_destroy_queue;
}
+ phba->sli4_hba.els_xri_cnt = rc;
- /* register the allocated scsi sgl pool to the port */
- rc = lpfc_sli4_repost_scsi_sgl_list(phba);
- if (unlikely(rc)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
- "0383 Error %d during scsi sgl post "
- "operation\n", rc);
- /* Some Scsi buffers were moved to the abort scsi list */
- /* A pci function reset will repost them */
- rc = -ENODEV;
- goto out_free_mbox;
+ if (phba->nvmet_support) {
+ /* update host nvmet xri-sgl sizes and mappings */
+ rc = lpfc_sli4_nvmet_sgl_update(phba);
+ if (unlikely(rc)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ "6308 Failed to update nvmet-sgl size "
+ "and mapping: %d\n", rc);
+ goto out_destroy_queue;
+ }
+
+ /* register the nvmet sgl pool to the port */
+ rc = lpfc_sli4_repost_sgl_list(
+ phba,
+ &phba->sli4_hba.lpfc_nvmet_sgl_list,
+ phba->sli4_hba.nvmet_xri_cnt);
+ if (unlikely(rc < 0)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ "3117 Error %d during nvmet "
+ "sgl post\n", rc);
+ rc = -ENODEV;
+ goto out_destroy_queue;
+ }
+ phba->sli4_hba.nvmet_xri_cnt = rc;
+ lpfc_nvmet_create_targetport(phba);
+ } else {
+ /* update host scsi xri-sgl sizes and mappings */
+ rc = lpfc_sli4_scsi_sgl_update(phba);
+ if (unlikely(rc)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ "6309 Failed to update scsi-sgl size "
+ "and mapping: %d\n", rc);
+ goto out_destroy_queue;
+ }
+
+ /* update host nvme xri-sgl sizes and mappings */
+ rc = lpfc_sli4_nvme_sgl_update(phba);
+ if (unlikely(rc)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ "6082 Failed to update nvme-sgl size "
+ "and mapping: %d\n", rc);
+ goto out_destroy_queue;
+ }
+ }
+
+ if (phba->nvmet_support && phba->cfg_nvmet_mrq) {
+
+ /* Post initial buffers to all RQs created */
+ for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
+ rqbp = phba->sli4_hba.nvmet_mrq_hdr[i]->rqbp;
+ INIT_LIST_HEAD(&rqbp->rqb_buffer_list);
+ rqbp->rqb_alloc_buffer = lpfc_sli4_nvmet_alloc;
+ rqbp->rqb_free_buffer = lpfc_sli4_nvmet_free;
+ rqbp->entry_count = 256;
+ rqbp->buffer_count = 0;
+
+ /* Divide by 4 and round down to multiple of 16 */
+ rc = (phba->cfg_nvmet_mrq_post >> 2) & 0xfff8;
+ phba->sli4_hba.nvmet_mrq_hdr[i]->entry_repost = rc;
+ phba->sli4_hba.nvmet_mrq_data[i]->entry_repost = rc;
+
+ lpfc_post_rq_buffer(
+ phba, phba->sli4_hba.nvmet_mrq_hdr[i],
+ phba->sli4_hba.nvmet_mrq_data[i],
+ phba->cfg_nvmet_mrq_post);
+ }
+ }
+
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
+ /* register the allocated scsi sgl pool to the port */
+ rc = lpfc_sli4_repost_scsi_sgl_list(phba);
+ if (unlikely(rc)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ "0383 Error %d during scsi sgl post "
+ "operation\n", rc);
+ /* Some Scsi buffers were moved to abort scsi list */
+ /* A pci function reset will repost them */
+ rc = -ENODEV;
+ goto out_destroy_queue;
+ }
+ }
+
+ if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) &&
+ (phba->nvmet_support == 0)) {
+
+ /* register the allocated nvme sgl pool to the port */
+ rc = lpfc_repost_nvme_sgl_list(phba);
+ if (unlikely(rc)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+ "6116 Error %d during nvme sgl post "
+ "operation\n", rc);
+ /* Some NVME buffers were moved to abort nvme list */
+ /* A pci function reset will repost them */
+ rc = -ENODEV;
+ goto out_destroy_queue;
+ }
}
/* Post the rpi header region to the device. */
@@ -6660,24 +6950,46 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
"0393 Error %d during rpi post operation\n",
rc);
rc = -ENODEV;
- goto out_free_mbox;
+ goto out_destroy_queue;
}
lpfc_sli4_node_prep(phba);
- /* Create all the SLI4 queues */
- rc = lpfc_sli4_queue_create(phba);
- if (rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "3089 Failed to allocate queues\n");
- rc = -ENODEV;
- goto out_stop_timers;
- }
- /* Set up all the queues to the device */
- rc = lpfc_sli4_queue_setup(phba);
- if (unlikely(rc)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
- "0381 Error %d during queue setup.\n ", rc);
- goto out_destroy_queue;
+ if (!(phba->hba_flag & HBA_FCOE_MODE)) {
+ if ((phba->nvmet_support == 0) || (phba->cfg_nvmet_mrq == 1)) {
+ /*
+ * The FC Port needs to register FCFI (index 0)
+ */
+ lpfc_reg_fcfi(phba, mboxq);
+ mboxq->vport = phba->pport;
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+ if (rc != MBX_SUCCESS)
+ goto out_unset_queue;
+ rc = 0;
+ phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi,
+ &mboxq->u.mqe.un.reg_fcfi);
+ } else {
+ /* We are a NVME Target mode with MRQ > 1 */
+
+ /* First register the FCFI */
+ lpfc_reg_fcfi_mrq(phba, mboxq, 0);
+ mboxq->vport = phba->pport;
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+ if (rc != MBX_SUCCESS)
+ goto out_unset_queue;
+ rc = 0;
+ phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_mrq_fcfi,
+ &mboxq->u.mqe.un.reg_fcfi_mrq);
+
+ /* Next register the MRQs */
+ lpfc_reg_fcfi_mrq(phba, mboxq, 1);
+ mboxq->vport = phba->pport;
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+ if (rc != MBX_SUCCESS)
+ goto out_unset_queue;
+ rc = 0;
+ }
+ /* Check if the port is configured to be disabled */
+ lpfc_sli_read_link_ste(phba);
}
/* Arm the CQs and then EQs on device */
@@ -6731,23 +7043,6 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
rc = 0;
}
- if (!(phba->hba_flag & HBA_FCOE_MODE)) {
- /*
- * The FC Port needs to register FCFI (index 0)
- */
- lpfc_reg_fcfi(phba, mboxq);
- mboxq->vport = phba->pport;
- rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
- if (rc != MBX_SUCCESS)
- goto out_unset_queue;
- rc = 0;
- phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi,
- &mboxq->u.mqe.un.reg_fcfi);
-
- /* Check if the port is configured to be disabled */
- lpfc_sli_read_link_ste(phba);
- }
-
/*
* The port is ready, set the host's link state to LINK_DOWN
* in preparation for link interrupts.
@@ -6884,7 +7179,7 @@ lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba)
/* Find the eq associated with the mcq */
if (phba->sli4_hba.hba_eq)
- for (eqidx = 0; eqidx < phba->cfg_fcp_io_channel; eqidx++)
+ for (eqidx = 0; eqidx < phba->io_channel_irqs; eqidx++)
if (phba->sli4_hba.hba_eq[eqidx]->queue_id ==
phba->sli4_hba.mbx_cq->assoc_qid) {
fpeq = phba->sli4_hba.hba_eq[eqidx];
@@ -7243,16 +7538,15 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
= MAILBOX_HBA_EXT_OFFSET;
/* Copy the mailbox extension data */
- if (pmbox->in_ext_byte_len && pmbox->context2) {
+ if (pmbox->in_ext_byte_len && pmbox->context2)
lpfc_memcpy_to_slim(phba->MBslimaddr +
MAILBOX_HBA_EXT_OFFSET,
pmbox->context2, pmbox->in_ext_byte_len);
- }
- if (mbx->mbxCommand == MBX_CONFIG_PORT) {
+ if (mbx->mbxCommand == MBX_CONFIG_PORT)
/* copy command data into host mbox for cmpl */
- lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE);
- }
+ lpfc_sli_pcimem_bcopy(mbx, phba->mbox,
+ MAILBOX_CMD_SIZE);
/* First copy mbox command data to HBA SLIM, skip past first
word */
@@ -7266,10 +7560,9 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
writel(ldata, to_slim);
readl(to_slim); /* flush */
- if (mbx->mbxCommand == MBX_CONFIG_PORT) {
+ if (mbx->mbxCommand == MBX_CONFIG_PORT)
/* switch over to host mailbox */
psli->sli_flag |= LPFC_SLI_ACTIVE;
- }
}
wmb();
@@ -7368,7 +7661,8 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
if (psli->sli_flag & LPFC_SLI_ACTIVE) {
/* copy results back to user */
- lpfc_sli_pcimem_bcopy(phba->mbox, mbx, MAILBOX_CMD_SIZE);
+ lpfc_sli_pcimem_bcopy(phba->mbox, mbx,
+ MAILBOX_CMD_SIZE);
/* Copy the mailbox extension data */
if (pmbox->out_ext_byte_len && pmbox->context2) {
lpfc_sli_pcimem_bcopy(phba->mbox_ext,
@@ -7378,7 +7672,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
} else {
/* First copy command data */
lpfc_memcpy_from_slim(mbx, phba->MBslimaddr,
- MAILBOX_CMD_SIZE);
+ MAILBOX_CMD_SIZE);
/* Copy the mailbox extension data */
if (pmbox->out_ext_byte_len && pmbox->context2) {
lpfc_memcpy_from_slim(pmbox->context2,
@@ -8059,7 +8353,7 @@ __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
{
struct lpfc_iocbq *nextiocb;
IOCB_t *iocb;
- struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number];
+ struct lpfc_sli_ring *pring = &phba->sli.sli3_ring[ring_number];
lockdep_assert_held(&phba->hbalock);
@@ -8133,7 +8427,7 @@ __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
* For FCP commands, we must be in a state where we can process link
* attention events.
*/
- } else if (unlikely(pring->ringno == phba->sli.fcp_ring &&
+ } else if (unlikely(pring->ringno == LPFC_FCP_RING &&
!(phba->sli.sli_flag & LPFC_PROCESS_LA))) {
goto iocb_busy;
}
@@ -8870,9 +9164,21 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
union lpfc_wqe *wqe;
union lpfc_wqe128 wqe128;
struct lpfc_queue *wq;
- struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number];
+ struct lpfc_sli_ring *pring;
- lockdep_assert_held(&phba->hbalock);
+ /* Get the WQ */
+ if ((piocb->iocb_flag & LPFC_IO_FCP) ||
+ (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
+ if (!phba->cfg_fof || (!(piocb->iocb_flag & LPFC_IO_OAS)))
+ wq = phba->sli4_hba.fcp_wq[piocb->hba_wqidx];
+ else
+ wq = phba->sli4_hba.oas_wq;
+ } else {
+ wq = phba->sli4_hba.els_wq;
+ }
+
+ /* Get corresponding ring */
+ pring = wq->pring;
/*
* The WQE can be either 64 or 128 bytes,
@@ -8880,6 +9186,8 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
*/
wqe = (union lpfc_wqe *)&wqe128;
+ lockdep_assert_held(&phba->hbalock);
+
if (piocb->sli4_xritag == NO_XRI) {
if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
@@ -8894,7 +9202,7 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
return IOCB_BUSY;
}
} else {
- sglq = __lpfc_sli_get_sglq(phba, piocb);
+ sglq = __lpfc_sli_get_els_sglq(phba, piocb);
if (!sglq) {
if (!(flag & SLI_IOCB_RET_IOCB)) {
__lpfc_sli_ringtx_put(phba,
@@ -8906,10 +9214,10 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
}
}
}
- } else if (piocb->iocb_flag & LPFC_IO_FCP) {
+ } else if (piocb->iocb_flag & LPFC_IO_FCP)
/* These IO's already have an XRI and a mapped sgl. */
sglq = NULL;
- } else {
+ else {
/*
* This is a continuation of a commandi,(CX) so this
* sglq is on the active list
@@ -8929,21 +9237,8 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
if (lpfc_sli4_iocb2wqe(phba, piocb, wqe))
return IOCB_ERROR;
- if ((piocb->iocb_flag & LPFC_IO_FCP) ||
- (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
- if (!phba->cfg_fof || (!(piocb->iocb_flag & LPFC_IO_OAS))) {
- wq = phba->sli4_hba.fcp_wq[piocb->fcp_wqidx];
- } else {
- wq = phba->sli4_hba.oas_wq;
- }
- if (lpfc_sli4_wq_put(wq, wqe))
- return IOCB_ERROR;
- } else {
- if (unlikely(!phba->sli4_hba.els_wq))
- return IOCB_ERROR;
- if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, wqe))
- return IOCB_ERROR;
- }
+ if (lpfc_sli4_wq_put(wq, wqe))
+ return IOCB_ERROR;
lpfc_sli_ringtxcmpl_put(phba, pring, piocb);
return 0;
@@ -9001,46 +9296,44 @@ lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
}
/**
- * lpfc_sli_calc_ring - Calculates which ring to use
+ * lpfc_sli4_calc_ring - Calculates which ring to use
* @phba: Pointer to HBA context object.
- * @ring_number: Initial ring
* @piocb: Pointer to command iocb.
*
- * For SLI4, FCP IO can deferred to one fo many WQs, based on
- * fcp_wqidx, thus we need to calculate the corresponding ring.
+ * For SLI4 only, FCP IO can deferred to one fo many WQs, based on
+ * hba_wqidx, thus we need to calculate the corresponding ring.
* Since ABORTS must go on the same WQ of the command they are
- * aborting, we use command's fcp_wqidx.
+ * aborting, we use command's hba_wqidx.
*/
-static int
-lpfc_sli_calc_ring(struct lpfc_hba *phba, uint32_t ring_number,
- struct lpfc_iocbq *piocb)
+struct lpfc_sli_ring *
+lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
{
- if (phba->sli_rev < LPFC_SLI_REV4)
- return ring_number;
-
- if (piocb->iocb_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) {
+ if (piocb->iocb_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) {
if (!(phba->cfg_fof) ||
- (!(piocb->iocb_flag & LPFC_IO_FOF))) {
+ (!(piocb->iocb_flag & LPFC_IO_FOF))) {
if (unlikely(!phba->sli4_hba.fcp_wq))
- return LPFC_HBA_ERROR;
+ return NULL;
/*
- * for abort iocb fcp_wqidx should already
+ * for abort iocb hba_wqidx should already
* be setup based on what work queue we used.
*/
if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX))
- piocb->fcp_wqidx =
+ piocb->hba_wqidx =
lpfc_sli4_scmd_to_wqidx_distr(phba,
piocb->context1);
- ring_number = MAX_SLI3_CONFIGURED_RINGS +
- piocb->fcp_wqidx;
+ return phba->sli4_hba.fcp_wq[piocb->hba_wqidx]->pring;
} else {
if (unlikely(!phba->sli4_hba.oas_wq))
- return LPFC_HBA_ERROR;
- piocb->fcp_wqidx = 0;
- ring_number = LPFC_FCP_OAS_RING;
+ return NULL;
+ piocb->hba_wqidx = 0;
+ return phba->sli4_hba.oas_wq->pring;
}
+ } else {
+ if (unlikely(!phba->sli4_hba.els_wq))
+ return NULL;
+ piocb->hba_wqidx = 0;
+ return phba->sli4_hba.els_wq->pring;
}
- return ring_number;
}
/**
@@ -9060,7 +9353,7 @@ int
lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
struct lpfc_iocbq *piocb, uint32_t flag)
{
- struct lpfc_fcp_eq_hdl *fcp_eq_hdl;
+ struct lpfc_hba_eq_hdl *hba_eq_hdl;
struct lpfc_sli_ring *pring;
struct lpfc_queue *fpeq;
struct lpfc_eqe *eqe;
@@ -9068,21 +9361,19 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
int rc, idx;
if (phba->sli_rev == LPFC_SLI_REV4) {
- ring_number = lpfc_sli_calc_ring(phba, ring_number, piocb);
- if (unlikely(ring_number == LPFC_HBA_ERROR))
+ pring = lpfc_sli4_calc_ring(phba, piocb);
+ if (unlikely(pring == NULL))
return IOCB_ERROR;
- idx = piocb->fcp_wqidx;
- pring = &phba->sli.ring[ring_number];
spin_lock_irqsave(&pring->ring_lock, iflags);
rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
spin_unlock_irqrestore(&pring->ring_lock, iflags);
if (lpfc_fcp_look_ahead && (piocb->iocb_flag & LPFC_IO_FCP)) {
- fcp_eq_hdl = &phba->sli4_hba.fcp_eq_hdl[idx];
+ idx = piocb->hba_wqidx;
+ hba_eq_hdl = &phba->sli4_hba.hba_eq_hdl[idx];
- if (atomic_dec_and_test(&fcp_eq_hdl->
- fcp_eq_in_use)) {
+ if (atomic_dec_and_test(&hba_eq_hdl->hba_eq_in_use)) {
/* Get associated EQ with this index */
fpeq = phba->sli4_hba.hba_eq[idx];
@@ -9103,7 +9394,7 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
lpfc_sli4_eq_release(fpeq,
LPFC_QUEUE_REARM);
}
- atomic_inc(&fcp_eq_hdl->fcp_eq_in_use);
+ atomic_inc(&hba_eq_hdl->hba_eq_in_use);
}
} else {
/* For now, SLI2/3 will still use hbalock */
@@ -9123,7 +9414,7 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
* only when driver needs to support target mode functionality
* or IP over FC functionalities.
*
- * This function is called with no lock held.
+ * This function is called with no lock held. SLI3 only.
**/
static int
lpfc_extra_ring_setup( struct lpfc_hba *phba)
@@ -9136,14 +9427,14 @@ lpfc_extra_ring_setup( struct lpfc_hba *phba)
/* Adjust cmd/rsp ring iocb entries more evenly */
/* Take some away from the FCP ring */
- pring = &psli->ring[psli->fcp_ring];
+ pring = &psli->sli3_ring[LPFC_FCP_RING];
pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
/* and give them to the extra ring */
- pring = &psli->ring[psli->extra_ring];
+ pring = &psli->sli3_ring[LPFC_EXTRA_RING];
pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
@@ -9328,7 +9619,7 @@ lpfc_sli_async_event_handler(struct lpfc_hba * phba,
/**
- * lpfc_sli_setup - SLI ring setup function
+ * lpfc_sli4_setup - SLI ring setup function
* @phba: Pointer to HBA context object.
*
* lpfc_sli_setup sets up rings of the SLI interface with
@@ -9339,6 +9630,51 @@ lpfc_sli_async_event_handler(struct lpfc_hba * phba,
* This function always returns 0.
**/
int
+lpfc_sli4_setup(struct lpfc_hba *phba)
+{
+ struct lpfc_sli_ring *pring;
+
+ pring = phba->sli4_hba.els_wq->pring;
+ pring->num_mask = LPFC_MAX_RING_MASK;
+ pring->prt[0].profile = 0; /* Mask 0 */
+ pring->prt[0].rctl = FC_RCTL_ELS_REQ;
+ pring->prt[0].type = FC_TYPE_ELS;
+ pring->prt[0].lpfc_sli_rcv_unsol_event =
+ lpfc_els_unsol_event;
+ pring->prt[1].profile = 0; /* Mask 1 */
+ pring->prt[1].rctl = FC_RCTL_ELS_REP;
+ pring->prt[1].type = FC_TYPE_ELS;
+ pring->prt[1].lpfc_sli_rcv_unsol_event =
+ lpfc_els_unsol_event;
+ pring->prt[2].profile = 0; /* Mask 2 */
+ /* NameServer Inquiry */
+ pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
+ /* NameServer */
+ pring->prt[2].type = FC_TYPE_CT;
+ pring->prt[2].lpfc_sli_rcv_unsol_event =
+ lpfc_ct_unsol_event;
+ pring->prt[3].profile = 0; /* Mask 3 */
+ /* NameServer response */
+ pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
+ /* NameServer */
+ pring->prt[3].type = FC_TYPE_CT;
+ pring->prt[3].lpfc_sli_rcv_unsol_event =
+ lpfc_ct_unsol_event;
+ return 0;
+}
+
+/**
+ * lpfc_sli_setup - SLI ring setup function
+ * @phba: Pointer to HBA context object.
+ *
+ * lpfc_sli_setup sets up rings of the SLI interface with
+ * number of iocbs per ring and iotags. This function is
+ * called while driver attach to the HBA and before the
+ * interrupts are enabled. So there is no need for locking.
+ *
+ * This function always returns 0. SLI3 only.
+ **/
+int
lpfc_sli_setup(struct lpfc_hba *phba)
{
int i, totiocbsize = 0;
@@ -9346,19 +9682,14 @@ lpfc_sli_setup(struct lpfc_hba *phba)
struct lpfc_sli_ring *pring;
psli->num_rings = MAX_SLI3_CONFIGURED_RINGS;
- if (phba->sli_rev == LPFC_SLI_REV4)
- psli->num_rings += phba->cfg_fcp_io_channel;
psli->sli_flag = 0;
- psli->fcp_ring = LPFC_FCP_RING;
- psli->next_ring = LPFC_FCP_NEXT_RING;
- psli->extra_ring = LPFC_EXTRA_RING;
psli->iocbq_lookup = NULL;
psli->iocbq_lookup_len = 0;
psli->last_iotag = 0;
for (i = 0; i < psli->num_rings; i++) {
- pring = &psli->ring[i];
+ pring = &psli->sli3_ring[i];
switch (i) {
case LPFC_FCP_RING: /* ring 0 - FCP */
/* numCiocb and numRiocb are used in config_port */
@@ -9457,18 +9788,90 @@ lpfc_sli_setup(struct lpfc_hba *phba)
}
/**
- * lpfc_sli_queue_setup - Queue initialization function
+ * lpfc_sli4_queue_init - Queue initialization function
* @phba: Pointer to HBA context object.
*
- * lpfc_sli_queue_setup sets up mailbox queues and iocb queues for each
+ * lpfc_sli4_queue_init sets up mailbox queues and iocb queues for each
* ring. This function also initializes ring indices of each ring.
* This function is called during the initialization of the SLI
* interface of an HBA.
* This function is called with no lock held and always returns
* 1.
**/
-int
-lpfc_sli_queue_setup(struct lpfc_hba *phba)
+void
+lpfc_sli4_queue_init(struct lpfc_hba *phba)
+{
+ struct lpfc_sli *psli;
+ struct lpfc_sli_ring *pring;
+ int i;
+
+ psli = &phba->sli;
+ spin_lock_irq(&phba->hbalock);
+ INIT_LIST_HEAD(&psli->mboxq);
+ INIT_LIST_HEAD(&psli->mboxq_cmpl);
+ /* Initialize list headers for txq and txcmplq as double linked lists */
+ for (i = 0; i < phba->cfg_fcp_io_channel; i++) {
+ pring = phba->sli4_hba.fcp_wq[i]->pring;
+ pring->flag = 0;
+ pring->ringno = LPFC_FCP_RING;
+ INIT_LIST_HEAD(&pring->txq);
+ INIT_LIST_HEAD(&pring->txcmplq);
+ INIT_LIST_HEAD(&pring->iocb_continueq);
+ spin_lock_init(&pring->ring_lock);
+ }
+ for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
+ pring = phba->sli4_hba.nvme_wq[i]->pring;
+ pring->flag = 0;
+ pring->ringno = LPFC_FCP_RING;
+ INIT_LIST_HEAD(&pring->txq);
+ INIT_LIST_HEAD(&pring->txcmplq);
+ INIT_LIST_HEAD(&pring->iocb_continueq);
+ spin_lock_init(&pring->ring_lock);
+ }
+ pring = phba->sli4_hba.els_wq->pring;
+ pring->flag = 0;
+ pring->ringno = LPFC_ELS_RING;
+ INIT_LIST_HEAD(&pring->txq);
+ INIT_LIST_HEAD(&pring->txcmplq);
+ INIT_LIST_HEAD(&pring->iocb_continueq);
+ spin_lock_init(&pring->ring_lock);
+
+ if (phba->cfg_nvme_io_channel) {
+ pring = phba->sli4_hba.nvmels_wq->pring;
+ pring->flag = 0;
+ pring->ringno = LPFC_ELS_RING;
+ INIT_LIST_HEAD(&pring->txq);
+ INIT_LIST_HEAD(&pring->txcmplq);
+ INIT_LIST_HEAD(&pring->iocb_continueq);
+ spin_lock_init(&pring->ring_lock);
+ }
+
+ if (phba->cfg_fof) {
+ pring = phba->sli4_hba.oas_wq->pring;
+ pring->flag = 0;
+ pring->ringno = LPFC_FCP_RING;
+ INIT_LIST_HEAD(&pring->txq);
+ INIT_LIST_HEAD(&pring->txcmplq);
+ INIT_LIST_HEAD(&pring->iocb_continueq);
+ spin_lock_init(&pring->ring_lock);
+ }
+
+ spin_unlock_irq(&phba->hbalock);
+}
+
+/**
+ * lpfc_sli_queue_init - Queue initialization function
+ * @phba: Pointer to HBA context object.
+ *
+ * lpfc_sli_queue_init sets up mailbox queues and iocb queues for each
+ * ring. This function also initializes ring indices of each ring.
+ * This function is called during the initialization of the SLI
+ * interface of an HBA.
+ * This function is called with no lock held and always returns
+ * 1.
+ **/
+void
+lpfc_sli_queue_init(struct lpfc_hba *phba)
{
struct lpfc_sli *psli;
struct lpfc_sli_ring *pring;
@@ -9480,21 +9883,20 @@ lpfc_sli_queue_setup(struct lpfc_hba *phba)
INIT_LIST_HEAD(&psli->mboxq_cmpl);
/* Initialize list headers for txq and txcmplq as double linked lists */
for (i = 0; i < psli->num_rings; i++) {
- pring = &psli->ring[i];
+ pring = &psli->sli3_ring[i];
pring->ringno = i;
pring->sli.sli3.next_cmdidx = 0;
pring->sli.sli3.local_getidx = 0;
pring->sli.sli3.cmdidx = 0;
- pring->flag = 0;
- INIT_LIST_HEAD(&pring->txq);
- INIT_LIST_HEAD(&pring->txcmplq);
INIT_LIST_HEAD(&pring->iocb_continueq);
INIT_LIST_HEAD(&pring->iocb_continue_saveq);
INIT_LIST_HEAD(&pring->postbufq);
+ pring->flag = 0;
+ INIT_LIST_HEAD(&pring->txq);
+ INIT_LIST_HEAD(&pring->txcmplq);
spin_lock_init(&pring->ring_lock);
}
spin_unlock_irq(&phba->hbalock);
- return 1;
}
/**
@@ -9566,6 +9968,7 @@ lpfc_sli_host_down(struct lpfc_vport *vport)
LIST_HEAD(completions);
struct lpfc_hba *phba = vport->phba;
struct lpfc_sli *psli = &phba->sli;
+ struct lpfc_queue *qp = NULL;
struct lpfc_sli_ring *pring;
struct lpfc_iocbq *iocb, *next_iocb;
int i;
@@ -9575,36 +9978,64 @@ lpfc_sli_host_down(struct lpfc_vport *vport)
lpfc_cleanup_discovery_resources(vport);
spin_lock_irqsave(&phba->hbalock, flags);
- for (i = 0; i < psli->num_rings; i++) {
- pring = &psli->ring[i];
- prev_pring_flag = pring->flag;
- /* Only slow rings */
- if (pring->ringno == LPFC_ELS_RING) {
- pring->flag |= LPFC_DEFERRED_RING_EVENT;
- /* Set the lpfc data pending flag */
- set_bit(LPFC_DATA_READY, &phba->data_flags);
- }
- /*
- * Error everything on the txq since these iocbs have not been
- * given to the FW yet.
- */
- list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
- if (iocb->vport != vport)
- continue;
- list_move_tail(&iocb->list, &completions);
- }
- /* Next issue ABTS for everything on the txcmplq */
- list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq,
- list) {
- if (iocb->vport != vport)
+ /*
+ * Error everything on the txq since these iocbs
+ * have not been given to the FW yet.
+ * Also issue ABTS for everything on the txcmplq
+ */
+ if (phba->sli_rev != LPFC_SLI_REV4) {
+ for (i = 0; i < psli->num_rings; i++) {
+ pring = &psli->sli3_ring[i];
+ prev_pring_flag = pring->flag;
+ /* Only slow rings */
+ if (pring->ringno == LPFC_ELS_RING) {
+ pring->flag |= LPFC_DEFERRED_RING_EVENT;
+ /* Set the lpfc data pending flag */
+ set_bit(LPFC_DATA_READY, &phba->data_flags);
+ }
+ list_for_each_entry_safe(iocb, next_iocb,
+ &pring->txq, list) {
+ if (iocb->vport != vport)
+ continue;
+ list_move_tail(&iocb->list, &completions);
+ }
+ list_for_each_entry_safe(iocb, next_iocb,
+ &pring->txcmplq, list) {
+ if (iocb->vport != vport)
+ continue;
+ lpfc_sli_issue_abort_iotag(phba, pring, iocb);
+ }
+ pring->flag = prev_pring_flag;
+ }
+ } else {
+ list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
+ pring = qp->pring;
+ if (!pring)
continue;
- lpfc_sli_issue_abort_iotag(phba, pring, iocb);
+ if (pring == phba->sli4_hba.els_wq->pring) {
+ pring->flag |= LPFC_DEFERRED_RING_EVENT;
+ /* Set the lpfc data pending flag */
+ set_bit(LPFC_DATA_READY, &phba->data_flags);
+ }
+ prev_pring_flag = pring->flag;
+ spin_lock_irq(&pring->ring_lock);
+ list_for_each_entry_safe(iocb, next_iocb,
+ &pring->txq, list) {
+ if (iocb->vport != vport)
+ continue;
+ list_move_tail(&iocb->list, &completions);
+ }
+ spin_unlock_irq(&pring->ring_lock);
+ list_for_each_entry_safe(iocb, next_iocb,
+ &pring->txcmplq, list) {
+ if (iocb->vport != vport)
+ continue;
+ lpfc_sli_issue_abort_iotag(phba, pring, iocb);
+ }
+ pring->flag = prev_pring_flag;
}
-
- pring->flag = prev_pring_flag;
}
-
spin_unlock_irqrestore(&phba->hbalock, flags);
/* Cancel all the IOCBs from the completions list */
@@ -9633,6 +10064,7 @@ lpfc_sli_hba_down(struct lpfc_hba *phba)
{
LIST_HEAD(completions);
struct lpfc_sli *psli = &phba->sli;
+ struct lpfc_queue *qp = NULL;
struct lpfc_sli_ring *pring;
struct lpfc_dmabuf *buf_ptr;
unsigned long flags = 0;
@@ -9646,20 +10078,36 @@ lpfc_sli_hba_down(struct lpfc_hba *phba)
lpfc_fabric_abort_hba(phba);
spin_lock_irqsave(&phba->hbalock, flags);
- for (i = 0; i < psli->num_rings; i++) {
- pring = &psli->ring[i];
- /* Only slow rings */
- if (pring->ringno == LPFC_ELS_RING) {
- pring->flag |= LPFC_DEFERRED_RING_EVENT;
- /* Set the lpfc data pending flag */
- set_bit(LPFC_DATA_READY, &phba->data_flags);
- }
- /*
- * Error everything on the txq since these iocbs have not been
- * given to the FW yet.
- */
- list_splice_init(&pring->txq, &completions);
+ /*
+ * Error everything on the txq since these iocbs
+ * have not been given to the FW yet.
+ */
+ if (phba->sli_rev != LPFC_SLI_REV4) {
+ for (i = 0; i < psli->num_rings; i++) {
+ pring = &psli->sli3_ring[i];
+ /* Only slow rings */
+ if (pring->ringno == LPFC_ELS_RING) {
+ pring->flag |= LPFC_DEFERRED_RING_EVENT;
+ /* Set the lpfc data pending flag */
+ set_bit(LPFC_DATA_READY, &phba->data_flags);
+ }
+ list_splice_init(&pring->txq, &completions);
+ }
+ } else {
+ list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
+ pring = qp->pring;
+ if (!pring)
+ continue;
+ spin_lock_irq(&pring->ring_lock);
+ list_splice_init(&pring->txq, &completions);
+ spin_unlock_irq(&pring->ring_lock);
+ if (pring == phba->sli4_hba.els_wq->pring) {
+ pring->flag |= LPFC_DEFERRED_RING_EVENT;
+ /* Set the lpfc data pending flag */
+ set_bit(LPFC_DATA_READY, &phba->data_flags);
+ }
+ }
}
spin_unlock_irqrestore(&phba->hbalock, flags);
@@ -9986,7 +10434,6 @@ lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_iocbq *abtsiocbp;
IOCB_t *icmd = NULL;
IOCB_t *iabt = NULL;
- int ring_number;
int retval;
unsigned long iflags;
@@ -10026,7 +10473,7 @@ lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
iabt->ulpClass = icmd->ulpClass;
/* ABTS WQE must go to the same WQ as the WQE to be aborted */
- abtsiocbp->fcp_wqidx = cmdiocb->fcp_wqidx;
+ abtsiocbp->hba_wqidx = cmdiocb->hba_wqidx;
if (cmdiocb->iocb_flag & LPFC_IO_FCP)
abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX;
if (cmdiocb->iocb_flag & LPFC_IO_FOF)
@@ -10048,11 +10495,9 @@ lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
abtsiocbp->iotag);
if (phba->sli_rev == LPFC_SLI_REV4) {
- ring_number =
- lpfc_sli_calc_ring(phba, pring->ringno, abtsiocbp);
- if (unlikely(ring_number == LPFC_HBA_ERROR))
+ pring = lpfc_sli4_calc_ring(phba, abtsiocbp);
+ if (unlikely(pring == NULL))
return 0;
- pring = &phba->sli.ring[ring_number];
/* Note: both hbalock and ring_lock need to be set here */
spin_lock_irqsave(&pring->ring_lock, iflags);
retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
@@ -10134,6 +10579,108 @@ abort_iotag_exit:
}
/**
+ * lpfc_sli4_abort_nvme_io - Issue abort for a command iocb
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @cmdiocb: Pointer to driver command iocb object.
+ *
+ * This function issues an abort iocb for the provided command iocb down to
+ * the port. Other than the case the outstanding command iocb is an abort
+ * request, this function issues abort out unconditionally. This function is
+ * called with hbalock held. The function returns 0 when it fails due to
+ * memory allocation failure or when the command iocb is an abort request.
+ **/
+static int
+lpfc_sli4_abort_nvme_io(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+ struct lpfc_iocbq *cmdiocb)
+{
+ struct lpfc_vport *vport = cmdiocb->vport;
+ struct lpfc_iocbq *abtsiocbp;
+ union lpfc_wqe *abts_wqe;
+ int retval;
+
+ /*
+ * There are certain command types we don't want to abort. And we
+ * don't want to abort commands that are already in the process of
+ * being aborted.
+ */
+ if (cmdiocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
+ cmdiocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN ||
+ (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
+ return 0;
+
+ /* issue ABTS for this io based on iotag */
+ abtsiocbp = __lpfc_sli_get_iocbq(phba);
+ if (abtsiocbp == NULL)
+ return 0;
+
+ /* This signals the response to set the correct status
+ * before calling the completion handler
+ */
+ cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
+
+ /* Complete prepping the abort wqe and issue to the FW. */
+ abts_wqe = &abtsiocbp->wqe;
+ bf_set(abort_cmd_ia, &abts_wqe->abort_cmd, 0);
+ bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);
+
+ /* Explicitly set reserved fields to zero.*/
+ abts_wqe->abort_cmd.rsrvd4 = 0;
+ abts_wqe->abort_cmd.rsrvd5 = 0;
+
+ /* WQE Common - word 6. Context is XRI tag. Set 0. */
+ bf_set(wqe_xri_tag, &abts_wqe->abort_cmd.wqe_com, 0);
+ bf_set(wqe_ctxt_tag, &abts_wqe->abort_cmd.wqe_com, 0);
+
+ /* word 7 */
+ bf_set(wqe_ct, &abts_wqe->abort_cmd.wqe_com, 0);
+ bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
+ bf_set(wqe_class, &abts_wqe->abort_cmd.wqe_com,
+ cmdiocb->iocb.ulpClass);
+
+ /* word 8 - tell the FW to abort the IO associated with this
+ * outstanding exchange ID.
+ */
+ abts_wqe->abort_cmd.wqe_com.abort_tag = cmdiocb->sli4_xritag;
+
+ /* word 9 - this is the iotag for the abts_wqe completion. */
+ bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com,
+ abtsiocbp->iotag);
+
+ /* word 10 */
+ bf_set(wqe_wqid, &abts_wqe->abort_cmd.wqe_com, cmdiocb->hba_wqidx);
+ bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1);
+ bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
+
+ /* word 11 */
+ bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND);
+ bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1);
+ bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
+
+ /* ABTS WQE must go to the same WQ as the WQE to be aborted */
+ abtsiocbp->iocb_flag |= LPFC_IO_NVME;
+ abtsiocbp->vport = vport;
+ abtsiocbp->wqe_cmpl = lpfc_nvme_abort_fcreq_cmpl;
+ retval = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abtsiocbp);
+ if (retval == IOCB_ERROR) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
+ "6147 Failed abts issue_wqe with status x%x "
+ "for oxid x%x\n",
+ retval, cmdiocb->sli4_xritag);
+ lpfc_sli_release_iocbq(phba, abtsiocbp);
+ return retval;
+ }
+
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
+ "6148 Drv Abort NVME Request Issued for "
+ "ox_id x%x on reqtag x%x\n",
+ cmdiocb->sli4_xritag,
+ abtsiocbp->iotag);
+
+ return retval;
+}
+
+/**
* lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba.
* @phba: pointer to lpfc HBA data structure.
*
@@ -10144,10 +10691,20 @@ lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba)
{
struct lpfc_sli *psli = &phba->sli;
struct lpfc_sli_ring *pring;
+ struct lpfc_queue *qp = NULL;
int i;
- for (i = 0; i < psli->num_rings; i++) {
- pring = &psli->ring[i];
+ if (phba->sli_rev != LPFC_SLI_REV4) {
+ for (i = 0; i < psli->num_rings; i++) {
+ pring = &psli->sli3_ring[i];
+ lpfc_sli_abort_iocb_ring(phba, pring);
+ }
+ return;
+ }
+ list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
+ pring = qp->pring;
+ if (!pring)
+ continue;
lpfc_sli_abort_iocb_ring(phba, pring);
}
}
@@ -10351,7 +10908,7 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
abtsiocb->vport = vport;
/* ABTS WQE must go to the same WQ as the WQE to be aborted */
- abtsiocb->fcp_wqidx = iocbq->fcp_wqidx;
+ abtsiocb->hba_wqidx = iocbq->hba_wqidx;
if (iocbq->iocb_flag & LPFC_IO_FCP)
abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
if (iocbq->iocb_flag & LPFC_IO_FOF)
@@ -10411,7 +10968,6 @@ lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
int sum, i, ret_val;
unsigned long iflags;
struct lpfc_sli_ring *pring_s4;
- uint32_t ring_number;
spin_lock_irq(&phba->hbalock);
@@ -10454,7 +11010,7 @@ lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
abtsiocbq->vport = vport;
/* ABTS WQE must go to the same WQ as the WQE to be aborted */
- abtsiocbq->fcp_wqidx = iocbq->fcp_wqidx;
+ abtsiocbq->hba_wqidx = iocbq->hba_wqidx;
if (iocbq->iocb_flag & LPFC_IO_FCP)
abtsiocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
if (iocbq->iocb_flag & LPFC_IO_FOF)
@@ -10479,9 +11035,9 @@ lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
if (phba->sli_rev == LPFC_SLI_REV4) {
- ring_number = MAX_SLI3_CONFIGURED_RINGS +
- iocbq->fcp_wqidx;
- pring_s4 = &phba->sli.ring[ring_number];
+ pring_s4 = lpfc_sli4_calc_ring(phba, iocbq);
+ if (pring_s4 == NULL)
+ continue;
/* Note: both hbalock and ring_lock must be set here */
spin_lock_irqsave(&pring_s4->ring_lock, iflags);
ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
@@ -10643,10 +11199,14 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
struct lpfc_iocbq *iocb;
int txq_cnt = 0;
int txcmplq_cnt = 0;
- struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
+ struct lpfc_sli_ring *pring;
unsigned long iflags;
bool iocb_completed = true;
+ if (phba->sli_rev >= LPFC_SLI_REV4)
+ pring = lpfc_sli4_calc_ring(phba, piocb);
+ else
+ pring = &phba->sli.sli3_ring[ring_number];
/*
* If the caller has provided a response iocbq buffer, then context2
* is NULL or its an error.
@@ -11441,6 +12001,7 @@ lpfc_sli_fp_intr_handler(int irq, void *dev_id)
uint32_t ha_copy;
unsigned long status;
unsigned long iflag;
+ struct lpfc_sli_ring *pring;
/* Get the driver's phba structure from the dev_id and
* assume the HBA is not interrupting.
@@ -11485,10 +12046,9 @@ lpfc_sli_fp_intr_handler(int irq, void *dev_id)
status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
status >>= (4*LPFC_FCP_RING);
+ pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
if (status & HA_RXMASK)
- lpfc_sli_handle_fast_ring_event(phba,
- &phba->sli.ring[LPFC_FCP_RING],
- status);
+ lpfc_sli_handle_fast_ring_event(phba, pring, status);
if (phba->cfg_multi_ring_support == 2) {
/*
@@ -11499,7 +12059,7 @@ lpfc_sli_fp_intr_handler(int irq, void *dev_id)
status >>= (4*LPFC_EXTRA_RING);
if (status & HA_RXMASK) {
lpfc_sli_handle_fast_ring_event(phba,
- &phba->sli.ring[LPFC_EXTRA_RING],
+ &phba->sli.sli3_ring[LPFC_EXTRA_RING],
status);
}
}
@@ -11812,11 +12372,13 @@ static struct lpfc_iocbq *
lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
struct lpfc_iocbq *irspiocbq)
{
- struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
+ struct lpfc_sli_ring *pring;
struct lpfc_iocbq *cmdiocbq;
struct lpfc_wcqe_complete *wcqe;
unsigned long iflags;
+ pring = lpfc_phba_elsring(phba);
+
wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
spin_lock_irqsave(&pring->ring_lock, iflags);
pring->stats.iocb_event++;
@@ -12052,8 +12614,6 @@ lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
txq_cnt++;
if (!list_empty(&pring->txcmplq))
txcmplq_cnt++;
- if (!list_empty(&phba->sli.ring[LPFC_FCP_RING].txcmplq))
- fcp_txcmplq_cnt++;
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
"fcp_txcmplq_cnt=%d, els_txcmplq_cnt=%d\n",
@@ -12172,6 +12732,7 @@ static bool
lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
{
bool workposted = false;
+ struct fc_frame_header *fc_hdr;
struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
struct hbq_dmabuf *dma_buf;
@@ -12206,6 +12767,10 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
}
hrq->RQ_rcv_buf++;
memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
+
+ /* If a NVME LS event (type 0x28), treat it as Fast path */
+ fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
+
/* save off the frame for the word thread to process */
list_add_tail(&dma_buf->cq_event.list,
&phba->sli4_hba.sp_queue_event);
@@ -12324,6 +12889,9 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
return;
}
+ /* Save EQ associated with this CQ */
+ cq->assoc_qp = speq;
+
/* Process all the entries to the CQ */
switch (cq->type) {
case LPFC_MCQ:
@@ -12336,8 +12904,9 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
break;
case LPFC_WCQ:
while ((cqe = lpfc_sli4_cq_get(cq))) {
- if (cq->subtype == LPFC_FCP)
- workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq,
+ if ((cq->subtype == LPFC_FCP) ||
+ (cq->subtype == LPFC_NVME))
+ workposted |= lpfc_sli4_fp_handle_cqe(phba, cq,
cqe);
else
workposted |= lpfc_sli4_sp_handle_cqe(phba, cq,
@@ -12424,7 +12993,23 @@ lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
bf_get(lpfc_wcqe_c_request_tag, wcqe));
return;
}
- if (unlikely(!cmdiocbq->iocb_cmpl)) {
+
+ if (cq->assoc_qp)
+ cmdiocbq->isr_timestamp =
+ cq->assoc_qp->isr_timestamp;
+
+ if (cmdiocbq->iocb_cmpl == NULL) {
+ if (cmdiocbq->wqe_cmpl) {
+ if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ }
+
+ /* Pass the cmd_iocb and the wcqe to the upper layer */
+ (cmdiocbq->wqe_cmpl)(phba, cmdiocbq, wcqe);
+ return;
+ }
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
"0375 FCP cmdiocb not callback function "
"iotag: (%d)\n",
@@ -12460,12 +13045,12 @@ lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
{
struct lpfc_queue *childwq;
bool wqid_matched = false;
- uint16_t fcp_wqid;
+ uint16_t hba_wqid;
/* Check for fast-path FCP work queue release */
- fcp_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe);
+ hba_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe);
list_for_each_entry(childwq, &cq->child_list, list) {
- if (childwq->queue_id == fcp_wqid) {
+ if (childwq->queue_id == hba_wqid) {
lpfc_sli4_wq_release(childwq,
bf_get(lpfc_wcqe_r_wqe_index, wcqe));
wqid_matched = true;
@@ -12476,11 +13061,108 @@ lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
if (wqid_matched != true)
lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
"2580 Fast-path wqe consume event carries "
- "miss-matched qid: wcqe-qid=x%x\n", fcp_wqid);
+ "miss-matched qid: wcqe-qid=x%x\n", hba_wqid);
+}
+
+/**
+ * lpfc_sli4_nvmet_handle_rcqe - Process a receive-queue completion queue entry
+ * @phba: Pointer to HBA context object.
+ * @rcqe: Pointer to receive-queue completion queue entry.
+ *
+ * This routine process a receive-queue completion queue entry.
+ *
+ * Return: true if work posted to worker thread, otherwise false.
+ **/
+static bool
+lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
+ struct lpfc_rcqe *rcqe)
+{
+ bool workposted = false;
+ struct lpfc_queue *hrq;
+ struct lpfc_queue *drq;
+ struct rqb_dmabuf *dma_buf;
+ struct fc_frame_header *fc_hdr;
+ uint32_t status, rq_id;
+ unsigned long iflags;
+ uint32_t fctl, idx;
+
+ if ((phba->nvmet_support == 0) ||
+ (phba->sli4_hba.nvmet_cqset == NULL))
+ return workposted;
+
+ idx = cq->queue_id - phba->sli4_hba.nvmet_cqset[0]->queue_id;
+ hrq = phba->sli4_hba.nvmet_mrq_hdr[idx];
+ drq = phba->sli4_hba.nvmet_mrq_data[idx];
+
+ /* sanity check on queue memory */
+ if (unlikely(!hrq) || unlikely(!drq))
+ return workposted;
+
+ if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
+ rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
+ else
+ rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
+
+ if ((phba->nvmet_support == 0) ||
+ (rq_id != hrq->queue_id))
+ return workposted;
+
+ status = bf_get(lpfc_rcqe_status, rcqe);
+ switch (status) {
+ case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "6126 Receive Frame Truncated!!\n");
+ hrq->RQ_buf_trunc++;
+ break;
+ case FC_STATUS_RQ_SUCCESS:
+ lpfc_sli4_rq_release(hrq, drq);
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ dma_buf = lpfc_sli_rqbuf_get(phba, hrq);
+ if (!dma_buf) {
+ hrq->RQ_no_buf_found++;
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ goto out;
+ }
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ hrq->RQ_rcv_buf++;
+ fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
+
+ /* Just some basic sanity checks on FCP Command frame */
+ fctl = (fc_hdr->fh_f_ctl[0] << 16 |
+ fc_hdr->fh_f_ctl[1] << 8 |
+ fc_hdr->fh_f_ctl[2]);
+ if (((fctl &
+ (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) !=
+ (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) ||
+ (fc_hdr->fh_seq_cnt != 0)) /* 0 byte swapped is still 0 */
+ goto drop;
+
+ if (fc_hdr->fh_type == FC_TYPE_FCP) {
+ dma_buf->bytes_recv = bf_get(lpfc_rcqe_length, rcqe);
+ lpfc_nvmet_unsol_fcp_event(
+ phba, phba->sli4_hba.els_wq->pring, dma_buf,
+ cq->assoc_qp->isr_timestamp);
+ return false;
+ }
+drop:
+ lpfc_in_buf_free(phba, &dma_buf->dbuf);
+ break;
+ case FC_STATUS_INSUFF_BUF_NEED_BUF:
+ case FC_STATUS_INSUFF_BUF_FRM_DISC:
+ hrq->RQ_no_posted_buf++;
+ /* Post more buffers if possible */
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ workposted = true;
+ break;
+ }
+out:
+ return workposted;
}
/**
- * lpfc_sli4_fp_handle_wcqe - Process fast-path work queue completion entry
+ * lpfc_sli4_fp_handle_cqe - Process fast-path work queue completion entry
* @cq: Pointer to the completion queue.
* @eqe: Pointer to fast-path completion queue entry.
*
@@ -12488,7 +13170,7 @@ lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
* event queue for FCP command response completion.
**/
static int
-lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
+lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
struct lpfc_cqe *cqe)
{
struct lpfc_wcqe_release wcqe;
@@ -12500,10 +13182,15 @@ lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
/* Check and process for different type of WCQE and dispatch */
switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
case CQE_CODE_COMPL_WQE:
+ case CQE_CODE_NVME_ERSP:
cq->CQ_wq++;
/* Process the WQ complete event */
phba->last_completion_time = jiffies;
- lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
+ if ((cq->subtype == LPFC_FCP) || (cq->subtype == LPFC_NVME))
+ lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
+ (struct lpfc_wcqe_complete *)&wcqe);
+ if (cq->subtype == LPFC_NVME_LS)
+ lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
(struct lpfc_wcqe_complete *)&wcqe);
break;
case CQE_CODE_RELEASE_WQE:
@@ -12519,9 +13206,17 @@ lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
(struct sli4_wcqe_xri_aborted *)&wcqe);
break;
+ case CQE_CODE_RECEIVE_V1:
+ case CQE_CODE_RECEIVE:
+ phba->last_completion_time = jiffies;
+ if (cq->subtype == LPFC_NVMET) {
+ workposted = lpfc_sli4_nvmet_handle_rcqe(
+ phba, cq, (struct lpfc_rcqe *)&wcqe);
+ }
+ break;
default:
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "0144 Not a valid WCQE code: x%x\n",
+ "0144 Not a valid CQE code: x%x\n",
bf_get(lpfc_wcqe_c_code, &wcqe));
break;
}
@@ -12544,10 +13239,10 @@ static void
lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
uint32_t qidx)
{
- struct lpfc_queue *cq;
+ struct lpfc_queue *cq = NULL;
struct lpfc_cqe *cqe;
bool workposted = false;
- uint16_t cqid;
+ uint16_t cqid, id;
int ecount = 0;
if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
@@ -12562,28 +13257,42 @@ lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
/* Get the reference to the corresponding CQ */
cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
- /* Check if this is a Slow path event */
- if (unlikely(cqid != phba->sli4_hba.fcp_cq_map[qidx])) {
- lpfc_sli4_sp_handle_eqe(phba, eqe,
- phba->sli4_hba.hba_eq[qidx]);
- return;
+ if (phba->cfg_nvmet_mrq && phba->sli4_hba.nvmet_cqset) {
+ id = phba->sli4_hba.nvmet_cqset[0]->queue_id;
+ if ((cqid >= id) && (cqid < (id + phba->cfg_nvmet_mrq))) {
+ /* Process NVMET unsol rcv */
+ cq = phba->sli4_hba.nvmet_cqset[cqid - id];
+ goto process_cq;
+ }
}
- if (unlikely(!phba->sli4_hba.fcp_cq)) {
- lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
- "3146 Fast-path completion queues "
- "does not exist\n");
- return;
+ if (phba->sli4_hba.nvme_cq_map &&
+ (cqid == phba->sli4_hba.nvme_cq_map[qidx])) {
+ /* Process NVME / NVMET command completion */
+ cq = phba->sli4_hba.nvme_cq[qidx];
+ goto process_cq;
}
- cq = phba->sli4_hba.fcp_cq[qidx];
- if (unlikely(!cq)) {
- if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "0367 Fast-path completion queue "
- "(%d) does not exist\n", qidx);
+
+ if (phba->sli4_hba.fcp_cq_map &&
+ (cqid == phba->sli4_hba.fcp_cq_map[qidx])) {
+ /* Process FCP command completion */
+ cq = phba->sli4_hba.fcp_cq[qidx];
+ goto process_cq;
+ }
+
+ if (phba->sli4_hba.nvmels_cq &&
+ (cqid == phba->sli4_hba.nvmels_cq->queue_id)) {
+ /* Process NVME unsol rcv */
+ cq = phba->sli4_hba.nvmels_cq;
+ }
+
+ /* Otherwise this is a Slow path event */
+ if (cq == NULL) {
+ lpfc_sli4_sp_handle_eqe(phba, eqe, phba->sli4_hba.hba_eq[qidx]);
return;
}
+process_cq:
if (unlikely(cqid != cq->queue_id)) {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0368 Miss-matched fast-path completion "
@@ -12592,9 +13301,12 @@ lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
return;
}
+ /* Save EQ associated with this CQ */
+ cq->assoc_qp = phba->sli4_hba.hba_eq[qidx];
+
/* Process all the entries to the CQ */
while ((cqe = lpfc_sli4_cq_get(cq))) {
- workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq, cqe);
+ workposted |= lpfc_sli4_fp_handle_cqe(phba, cq, cqe);
if (!(++ecount % cq->entry_repost))
lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
}
@@ -12685,7 +13397,7 @@ lpfc_sli4_fof_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
/* Process all the entries to the OAS CQ */
while ((cqe = lpfc_sli4_cq_get(cq))) {
- workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq, cqe);
+ workposted |= lpfc_sli4_fp_handle_cqe(phba, cq, cqe);
if (!(++ecount % cq->entry_repost))
lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
}
@@ -12733,15 +13445,15 @@ irqreturn_t
lpfc_sli4_fof_intr_handler(int irq, void *dev_id)
{
struct lpfc_hba *phba;
- struct lpfc_fcp_eq_hdl *fcp_eq_hdl;
+ struct lpfc_hba_eq_hdl *hba_eq_hdl;
struct lpfc_queue *eq;
struct lpfc_eqe *eqe;
unsigned long iflag;
int ecount = 0;
/* Get the driver's phba structure from the dev_id */
- fcp_eq_hdl = (struct lpfc_fcp_eq_hdl *)dev_id;
- phba = fcp_eq_hdl->phba;
+ hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id;
+ phba = hba_eq_hdl->phba;
if (unlikely(!phba))
return IRQ_NONE;
@@ -12827,17 +13539,17 @@ irqreturn_t
lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
{
struct lpfc_hba *phba;
- struct lpfc_fcp_eq_hdl *fcp_eq_hdl;
+ struct lpfc_hba_eq_hdl *hba_eq_hdl;
struct lpfc_queue *fpeq;
struct lpfc_eqe *eqe;
unsigned long iflag;
int ecount = 0;
- int fcp_eqidx;
+ int hba_eqidx;
/* Get the driver's phba structure from the dev_id */
- fcp_eq_hdl = (struct lpfc_fcp_eq_hdl *)dev_id;
- phba = fcp_eq_hdl->phba;
- fcp_eqidx = fcp_eq_hdl->idx;
+ hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id;
+ phba = hba_eq_hdl->phba;
+ hba_eqidx = hba_eq_hdl->idx;
if (unlikely(!phba))
return IRQ_NONE;
@@ -12845,15 +13557,20 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
return IRQ_NONE;
/* Get to the EQ struct associated with this vector */
- fpeq = phba->sli4_hba.hba_eq[fcp_eqidx];
+ fpeq = phba->sli4_hba.hba_eq[hba_eqidx];
if (unlikely(!fpeq))
return IRQ_NONE;
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ if (phba->ktime_on)
+ fpeq->isr_timestamp = ktime_get_ns();
+#endif
+
if (lpfc_fcp_look_ahead) {
- if (atomic_dec_and_test(&fcp_eq_hdl->fcp_eq_in_use))
+ if (atomic_dec_and_test(&hba_eq_hdl->hba_eq_in_use))
lpfc_sli4_eq_clr_intr(fpeq);
else {
- atomic_inc(&fcp_eq_hdl->fcp_eq_in_use);
+ atomic_inc(&hba_eq_hdl->hba_eq_in_use);
return IRQ_NONE;
}
}
@@ -12868,7 +13585,7 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
lpfc_sli4_eq_flush(phba, fpeq);
spin_unlock_irqrestore(&phba->hbalock, iflag);
if (lpfc_fcp_look_ahead)
- atomic_inc(&fcp_eq_hdl->fcp_eq_in_use);
+ atomic_inc(&hba_eq_hdl->hba_eq_in_use);
return IRQ_NONE;
}
@@ -12879,7 +13596,7 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
if (eqe == NULL)
break;
- lpfc_sli4_hba_handle_eqe(phba, eqe, fcp_eqidx);
+ lpfc_sli4_hba_handle_eqe(phba, eqe, hba_eqidx);
if (!(++ecount % fpeq->entry_repost))
lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_NOARM);
fpeq->EQ_processed++;
@@ -12896,7 +13613,7 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
fpeq->EQ_no_entry++;
if (lpfc_fcp_look_ahead) {
- atomic_inc(&fcp_eq_hdl->fcp_eq_in_use);
+ atomic_inc(&hba_eq_hdl->hba_eq_in_use);
return IRQ_NONE;
}
@@ -12910,7 +13627,8 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
}
if (lpfc_fcp_look_ahead)
- atomic_inc(&fcp_eq_hdl->fcp_eq_in_use);
+ atomic_inc(&hba_eq_hdl->hba_eq_in_use);
+
return IRQ_HANDLED;
} /* lpfc_sli4_fp_intr_handler */
@@ -12937,7 +13655,7 @@ lpfc_sli4_intr_handler(int irq, void *dev_id)
struct lpfc_hba *phba;
irqreturn_t hba_irq_rc;
bool hba_handled = false;
- int fcp_eqidx;
+ int qidx;
/* Get the driver's phba structure from the dev_id */
phba = (struct lpfc_hba *)dev_id;
@@ -12948,16 +13666,16 @@ lpfc_sli4_intr_handler(int irq, void *dev_id)
/*
* Invoke fast-path host attention interrupt handling as appropriate.
*/
- for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel; fcp_eqidx++) {
+ for (qidx = 0; qidx < phba->io_channel_irqs; qidx++) {
hba_irq_rc = lpfc_sli4_hba_intr_handler(irq,
- &phba->sli4_hba.fcp_eq_hdl[fcp_eqidx]);
+ &phba->sli4_hba.hba_eq_hdl[qidx]);
if (hba_irq_rc == IRQ_HANDLED)
hba_handled |= true;
}
if (phba->cfg_fof) {
hba_irq_rc = lpfc_sli4_fof_intr_handler(irq,
- &phba->sli4_hba.fcp_eq_hdl[0]);
+ &phba->sli4_hba.hba_eq_hdl[qidx]);
if (hba_irq_rc == IRQ_HANDLED)
hba_handled |= true;
}
@@ -12988,6 +13706,11 @@ lpfc_sli4_queue_free(struct lpfc_queue *queue)
dmabuf->virt, dmabuf->phys);
kfree(dmabuf);
}
+ if (queue->rqbp) {
+ lpfc_free_rq_buffer(queue->phba, queue);
+ kfree(queue->rqbp);
+ }
+ kfree(queue->pring);
kfree(queue);
return;
}
@@ -13021,7 +13744,13 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
return NULL;
queue->page_count = (ALIGN(entry_size * entry_count,
hw_page_size))/hw_page_size;
+
+ /* If needed, Adjust page count to match the max the adapter supports */
+ if (queue->page_count > phba->sli4_hba.pc_sli4_params.wqpcnt)
+ queue->page_count = phba->sli4_hba.pc_sli4_params.wqpcnt;
+
INIT_LIST_HEAD(&queue->list);
+ INIT_LIST_HEAD(&queue->wq_list);
INIT_LIST_HEAD(&queue->page_list);
INIT_LIST_HEAD(&queue->child_list);
for (x = 0, total_qe_count = 0; x < queue->page_count; x++) {
@@ -13093,7 +13822,7 @@ lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
}
/**
- * lpfc_modify_fcp_eq_delay - Modify Delay Multiplier on FCP EQs
+ * lpfc_modify_hba_eq_delay - Modify Delay Multiplier on FCP EQs
* @phba: HBA structure that indicates port to create a queue on.
* @startq: The starting FCP EQ to modify
*
@@ -13109,7 +13838,7 @@ lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
* fails this function will return -ENXIO.
**/
int
-lpfc_modify_fcp_eq_delay(struct lpfc_hba *phba, uint32_t startq)
+lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq)
{
struct lpfc_mbx_modify_eq_delay *eq_delay;
LPFC_MBOXQ_t *mbox;
@@ -13117,11 +13846,11 @@ lpfc_modify_fcp_eq_delay(struct lpfc_hba *phba, uint32_t startq)
int cnt, rc, length, status = 0;
uint32_t shdr_status, shdr_add_status;
uint32_t result;
- int fcp_eqidx;
+ int qidx;
union lpfc_sli4_cfg_shdr *shdr;
uint16_t dmult;
- if (startq >= phba->cfg_fcp_io_channel)
+ if (startq >= phba->io_channel_irqs)
return 0;
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@@ -13135,16 +13864,15 @@ lpfc_modify_fcp_eq_delay(struct lpfc_hba *phba, uint32_t startq)
eq_delay = &mbox->u.mqe.un.eq_delay;
/* Calculate delay multiper from maximum interrupt per second */
- result = phba->cfg_fcp_imax / phba->cfg_fcp_io_channel;
- if (result > LPFC_DMULT_CONST)
+ result = phba->cfg_fcp_imax / phba->io_channel_irqs;
+ if (result > LPFC_DMULT_CONST || result == 0)
dmult = 0;
else
dmult = LPFC_DMULT_CONST/result - 1;
cnt = 0;
- for (fcp_eqidx = startq; fcp_eqidx < phba->cfg_fcp_io_channel;
- fcp_eqidx++) {
- eq = phba->sli4_hba.hba_eq[fcp_eqidx];
+ for (qidx = startq; qidx < phba->io_channel_irqs; qidx++) {
+ eq = phba->sli4_hba.hba_eq[qidx];
if (!eq)
continue;
eq_delay->u.request.eq[cnt].eq_id = eq->queue_id;
@@ -13359,8 +14087,10 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
switch (cq->entry_count) {
default:
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "0361 Unsupported CQ count. (%d)\n",
- cq->entry_count);
+ "0361 Unsupported CQ count: "
+ "entry cnt %d sz %d pg cnt %d repost %d\n",
+ cq->entry_count, cq->entry_size,
+ cq->page_count, cq->entry_repost);
if (cq->entry_count < 256) {
status = -EINVAL;
goto out;
@@ -13420,6 +14150,234 @@ out:
}
/**
+ * lpfc_cq_create_set - Create a set of Completion Queues on the HBA for MRQ
+ * @phba: HBA structure that indicates port to create a queue on.
+ * @cqp: The queue structure array to use to create the completion queues.
+ * @eqp: The event queue array to bind these completion queues to.
+ *
+ * This function creates a set of completion queue, s to support MRQ
+ * as detailed in @cqp, on a port,
+ * described by @phba by sending a CREATE_CQ_SET mailbox command to the HBA.
+ *
+ * The @phba struct is used to send mailbox command to HBA. The @cq struct
+ * is used to get the entry count and entry size that are necessary to
+ * determine the number of pages to allocate and use for this queue. The @eq
+ * is used to indicate which event queue to bind this completion queue to. This
+ * function will send the CREATE_CQ_SET mailbox command to the HBA to setup the
+ * completion queue. This function is asynchronous and will wait for the mailbox
+ * command to finish before continuing.
+ *
+ * On success this function will return a zero. If unable to allocate enough
+ * memory this function will return -ENOMEM. If the queue create mailbox command
+ * fails this function will return -ENXIO.
+ **/
+int
+lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
+ struct lpfc_queue **eqp, uint32_t type, uint32_t subtype)
+{
+ struct lpfc_queue *cq;
+ struct lpfc_queue *eq;
+ struct lpfc_mbx_cq_create_set *cq_set;
+ struct lpfc_dmabuf *dmabuf;
+ LPFC_MBOXQ_t *mbox;
+ int rc, length, alloclen, status = 0;
+ int cnt, idx, numcq, page_idx = 0;
+ uint32_t shdr_status, shdr_add_status;
+ union lpfc_sli4_cfg_shdr *shdr;
+ uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
+
+ /* sanity check on queue memory */
+ numcq = phba->cfg_nvmet_mrq;
+ if (!cqp || !eqp || !numcq)
+ return -ENODEV;
+ if (!phba->sli4_hba.pc_sli4_params.supported)
+ hw_page_size = SLI4_PAGE_SIZE;
+
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+
+ length = sizeof(struct lpfc_mbx_cq_create_set);
+ length += ((numcq * cqp[0]->page_count) *
+ sizeof(struct dma_address));
+ alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
+ LPFC_MBOX_OPCODE_FCOE_CQ_CREATE_SET, length,
+ LPFC_SLI4_MBX_NEMBED);
+ if (alloclen < length) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "3098 Allocated DMA memory size (%d) is "
+ "less than the requested DMA memory size "
+ "(%d)\n", alloclen, length);
+ status = -ENOMEM;
+ goto out;
+ }
+ cq_set = mbox->sge_array->addr[0];
+ shdr = (union lpfc_sli4_cfg_shdr *)&cq_set->cfg_shdr;
+ bf_set(lpfc_mbox_hdr_version, &shdr->request, 0);
+
+ for (idx = 0; idx < numcq; idx++) {
+ cq = cqp[idx];
+ eq = eqp[idx];
+ if (!cq || !eq) {
+ status = -ENOMEM;
+ goto out;
+ }
+
+ switch (idx) {
+ case 0:
+ bf_set(lpfc_mbx_cq_create_set_page_size,
+ &cq_set->u.request,
+ (hw_page_size / SLI4_PAGE_SIZE));
+ bf_set(lpfc_mbx_cq_create_set_num_pages,
+ &cq_set->u.request, cq->page_count);
+ bf_set(lpfc_mbx_cq_create_set_evt,
+ &cq_set->u.request, 1);
+ bf_set(lpfc_mbx_cq_create_set_valid,
+ &cq_set->u.request, 1);
+ bf_set(lpfc_mbx_cq_create_set_cqe_size,
+ &cq_set->u.request, 0);
+ bf_set(lpfc_mbx_cq_create_set_num_cq,
+ &cq_set->u.request, numcq);
+ switch (cq->entry_count) {
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "3118 Bad CQ count. (%d)\n",
+ cq->entry_count);
+ if (cq->entry_count < 256) {
+ status = -EINVAL;
+ goto out;
+ }
+ /* otherwise default to smallest (drop thru) */
+ case 256:
+ bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
+ &cq_set->u.request, LPFC_CQ_CNT_256);
+ break;
+ case 512:
+ bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
+ &cq_set->u.request, LPFC_CQ_CNT_512);
+ break;
+ case 1024:
+ bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
+ &cq_set->u.request, LPFC_CQ_CNT_1024);
+ break;
+ }
+ bf_set(lpfc_mbx_cq_create_set_eq_id0,
+ &cq_set->u.request, eq->queue_id);
+ break;
+ case 1:
+ bf_set(lpfc_mbx_cq_create_set_eq_id1,
+ &cq_set->u.request, eq->queue_id);
+ break;
+ case 2:
+ bf_set(lpfc_mbx_cq_create_set_eq_id2,
+ &cq_set->u.request, eq->queue_id);
+ break;
+ case 3:
+ bf_set(lpfc_mbx_cq_create_set_eq_id3,
+ &cq_set->u.request, eq->queue_id);
+ break;
+ case 4:
+ bf_set(lpfc_mbx_cq_create_set_eq_id4,
+ &cq_set->u.request, eq->queue_id);
+ break;
+ case 5:
+ bf_set(lpfc_mbx_cq_create_set_eq_id5,
+ &cq_set->u.request, eq->queue_id);
+ break;
+ case 6:
+ bf_set(lpfc_mbx_cq_create_set_eq_id6,
+ &cq_set->u.request, eq->queue_id);
+ break;
+ case 7:
+ bf_set(lpfc_mbx_cq_create_set_eq_id7,
+ &cq_set->u.request, eq->queue_id);
+ break;
+ case 8:
+ bf_set(lpfc_mbx_cq_create_set_eq_id8,
+ &cq_set->u.request, eq->queue_id);
+ break;
+ case 9:
+ bf_set(lpfc_mbx_cq_create_set_eq_id9,
+ &cq_set->u.request, eq->queue_id);
+ break;
+ case 10:
+ bf_set(lpfc_mbx_cq_create_set_eq_id10,
+ &cq_set->u.request, eq->queue_id);
+ break;
+ case 11:
+ bf_set(lpfc_mbx_cq_create_set_eq_id11,
+ &cq_set->u.request, eq->queue_id);
+ break;
+ case 12:
+ bf_set(lpfc_mbx_cq_create_set_eq_id12,
+ &cq_set->u.request, eq->queue_id);
+ break;
+ case 13:
+ bf_set(lpfc_mbx_cq_create_set_eq_id13,
+ &cq_set->u.request, eq->queue_id);
+ break;
+ case 14:
+ bf_set(lpfc_mbx_cq_create_set_eq_id14,
+ &cq_set->u.request, eq->queue_id);
+ break;
+ case 15:
+ bf_set(lpfc_mbx_cq_create_set_eq_id15,
+ &cq_set->u.request, eq->queue_id);
+ break;
+ }
+
+ /* link the cq onto the parent eq child list */
+ list_add_tail(&cq->list, &eq->child_list);
+ /* Set up completion queue's type and subtype */
+ cq->type = type;
+ cq->subtype = subtype;
+ cq->assoc_qid = eq->queue_id;
+ cq->host_index = 0;
+ cq->hba_index = 0;
+
+ rc = 0;
+ list_for_each_entry(dmabuf, &cq->page_list, list) {
+ memset(dmabuf->virt, 0, hw_page_size);
+ cnt = page_idx + dmabuf->buffer_tag;
+ cq_set->u.request.page[cnt].addr_lo =
+ putPaddrLow(dmabuf->phys);
+ cq_set->u.request.page[cnt].addr_hi =
+ putPaddrHigh(dmabuf->phys);
+ rc++;
+ }
+ page_idx += rc;
+ }
+
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+
+ /* The IOCTL status is embedded in the mailbox subheader. */
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+ if (shdr_status || shdr_add_status || rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3119 CQ_CREATE_SET mailbox failed with "
+ "status x%x add_status x%x, mbx status x%x\n",
+ shdr_status, shdr_add_status, rc);
+ status = -ENXIO;
+ goto out;
+ }
+ rc = bf_get(lpfc_mbx_cq_create_set_base_id, &cq_set->u.response);
+ if (rc == 0xFFFF) {
+ status = -ENXIO;
+ goto out;
+ }
+
+ for (idx = 0; idx < numcq; idx++) {
+ cq = cqp[idx];
+ cq->queue_id = rc + idx;
+ }
+
+out:
+ lpfc_sli4_mbox_cmd_free(phba, mbox);
+ return status;
+}
+
+/**
* lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration
* @phba: HBA structure that indicates port to create a queue on.
* @mq: The queue structure to use to create the mailbox queue.
@@ -13722,7 +14680,7 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
LPFC_WQ_WQE_SIZE_128);
bf_set(lpfc_mbx_wq_create_page_size,
&wq_create->u.request_1,
- (PAGE_SIZE/SLI4_PAGE_SIZE));
+ LPFC_WQ_PAGE_SIZE_4096);
page = wq_create->u.request_1.page;
break;
}
@@ -13748,8 +14706,9 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
LPFC_WQ_WQE_SIZE_128);
break;
}
- bf_set(lpfc_mbx_wq_create_page_size, &wq_create->u.request_1,
- (PAGE_SIZE/SLI4_PAGE_SIZE));
+ bf_set(lpfc_mbx_wq_create_page_size,
+ &wq_create->u.request_1,
+ LPFC_WQ_PAGE_SIZE_4096);
page = wq_create->u.request_1.page;
break;
default:
@@ -13825,6 +14784,11 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
wq->db_format = LPFC_DB_LIST_FORMAT;
wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
}
+ wq->pring = kzalloc(sizeof(struct lpfc_sli_ring), GFP_KERNEL);
+ if (wq->pring == NULL) {
+ status = -ENOMEM;
+ goto out;
+ }
wq->type = LPFC_WQ;
wq->assoc_qid = cq->queue_id;
wq->subtype = subtype;
@@ -13935,7 +14899,7 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
LPFC_RQE_SIZE_8);
bf_set(lpfc_rq_context_page_size,
&rq_create->u.request.context,
- (PAGE_SIZE/SLI4_PAGE_SIZE));
+ LPFC_RQ_PAGE_SIZE_4096);
} else {
switch (hrq->entry_count) {
default:
@@ -14144,6 +15108,197 @@ out:
}
/**
+ * lpfc_mrq_create - Create MRQ Receive Queues on the HBA
+ * @phba: HBA structure that indicates port to create a queue on.
+ * @hrqp: The queue structure array to use to create the header receive queues.
+ * @drqp: The queue structure array to use to create the data receive queues.
+ * @cqp: The completion queue array to bind these receive queues to.
+ *
+ * This function creates a receive buffer queue pair , as detailed in @hrq and
+ * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
+ * to the HBA.
+ *
+ * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
+ * struct is used to get the entry count that is necessary to determine the
+ * number of pages to use for this queue. The @cq is used to indicate which
+ * completion queue to bind received buffers that are posted to these queues to.
+ * This function will send the RQ_CREATE mailbox command to the HBA to setup the
+ * receive queue pair. This function is asynchronous and will wait for the
+ * mailbox command to finish before continuing.
+ *
+ * On success this function will return a zero. If unable to allocate enough
+ * memory this function will return -ENOMEM. If the queue create mailbox command
+ * fails this function will return -ENXIO.
+ **/
+int
+lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp,
+ struct lpfc_queue **drqp, struct lpfc_queue **cqp,
+ uint32_t subtype)
+{
+ struct lpfc_queue *hrq, *drq, *cq;
+ struct lpfc_mbx_rq_create_v2 *rq_create;
+ struct lpfc_dmabuf *dmabuf;
+ LPFC_MBOXQ_t *mbox;
+ int rc, length, alloclen, status = 0;
+ int cnt, idx, numrq, page_idx = 0;
+ uint32_t shdr_status, shdr_add_status;
+ union lpfc_sli4_cfg_shdr *shdr;
+ uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
+
+ numrq = phba->cfg_nvmet_mrq;
+ /* sanity check on array memory */
+ if (!hrqp || !drqp || !cqp || !numrq)
+ return -ENODEV;
+ if (!phba->sli4_hba.pc_sli4_params.supported)
+ hw_page_size = SLI4_PAGE_SIZE;
+
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+
+ length = sizeof(struct lpfc_mbx_rq_create_v2);
+ length += ((2 * numrq * hrqp[0]->page_count) *
+ sizeof(struct dma_address));
+
+ alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
+ LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, length,
+ LPFC_SLI4_MBX_NEMBED);
+ if (alloclen < length) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "3099 Allocated DMA memory size (%d) is "
+ "less than the requested DMA memory size "
+ "(%d)\n", alloclen, length);
+ status = -ENOMEM;
+ goto out;
+ }
+
+
+
+ rq_create = mbox->sge_array->addr[0];
+ shdr = (union lpfc_sli4_cfg_shdr *)&rq_create->cfg_shdr;
+
+ bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_2);
+ cnt = 0;
+
+ for (idx = 0; idx < numrq; idx++) {
+ hrq = hrqp[idx];
+ drq = drqp[idx];
+ cq = cqp[idx];
+
+ if (hrq->entry_count != drq->entry_count) {
+ status = -EINVAL;
+ goto out;
+ }
+
+ /* sanity check on queue memory */
+ if (!hrq || !drq || !cq) {
+ status = -ENODEV;
+ goto out;
+ }
+
+ if (idx == 0) {
+ bf_set(lpfc_mbx_rq_create_num_pages,
+ &rq_create->u.request,
+ hrq->page_count);
+ bf_set(lpfc_mbx_rq_create_rq_cnt,
+ &rq_create->u.request, (numrq * 2));
+ bf_set(lpfc_mbx_rq_create_dnb, &rq_create->u.request,
+ 1);
+ bf_set(lpfc_rq_context_base_cq,
+ &rq_create->u.request.context,
+ cq->queue_id);
+ bf_set(lpfc_rq_context_data_size,
+ &rq_create->u.request.context,
+ LPFC_DATA_BUF_SIZE);
+ bf_set(lpfc_rq_context_hdr_size,
+ &rq_create->u.request.context,
+ LPFC_HDR_BUF_SIZE);
+ bf_set(lpfc_rq_context_rqe_count_1,
+ &rq_create->u.request.context,
+ hrq->entry_count);
+ bf_set(lpfc_rq_context_rqe_size,
+ &rq_create->u.request.context,
+ LPFC_RQE_SIZE_8);
+ bf_set(lpfc_rq_context_page_size,
+ &rq_create->u.request.context,
+ (PAGE_SIZE/SLI4_PAGE_SIZE));
+ }
+ rc = 0;
+ list_for_each_entry(dmabuf, &hrq->page_list, list) {
+ memset(dmabuf->virt, 0, hw_page_size);
+ cnt = page_idx + dmabuf->buffer_tag;
+ rq_create->u.request.page[cnt].addr_lo =
+ putPaddrLow(dmabuf->phys);
+ rq_create->u.request.page[cnt].addr_hi =
+ putPaddrHigh(dmabuf->phys);
+ rc++;
+ }
+ page_idx += rc;
+
+ rc = 0;
+ list_for_each_entry(dmabuf, &drq->page_list, list) {
+ memset(dmabuf->virt, 0, hw_page_size);
+ cnt = page_idx + dmabuf->buffer_tag;
+ rq_create->u.request.page[cnt].addr_lo =
+ putPaddrLow(dmabuf->phys);
+ rq_create->u.request.page[cnt].addr_hi =
+ putPaddrHigh(dmabuf->phys);
+ rc++;
+ }
+ page_idx += rc;
+
+ hrq->db_format = LPFC_DB_RING_FORMAT;
+ hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
+ hrq->type = LPFC_HRQ;
+ hrq->assoc_qid = cq->queue_id;
+ hrq->subtype = subtype;
+ hrq->host_index = 0;
+ hrq->hba_index = 0;
+
+ drq->db_format = LPFC_DB_RING_FORMAT;
+ drq->db_regaddr = phba->sli4_hba.RQDBregaddr;
+ drq->type = LPFC_DRQ;
+ drq->assoc_qid = cq->queue_id;
+ drq->subtype = subtype;
+ drq->host_index = 0;
+ drq->hba_index = 0;
+
+ list_add_tail(&hrq->list, &cq->child_list);
+ list_add_tail(&drq->list, &cq->child_list);
+ }
+
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+ /* The IOCTL status is embedded in the mailbox subheader. */
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+ if (shdr_status || shdr_add_status || rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3120 RQ_CREATE mailbox failed with "
+ "status x%x add_status x%x, mbx status x%x\n",
+ shdr_status, shdr_add_status, rc);
+ status = -ENXIO;
+ goto out;
+ }
+ rc = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
+ if (rc == 0xFFFF) {
+ status = -ENXIO;
+ goto out;
+ }
+
+ /* Initialize all RQs with associated queue id */
+ for (idx = 0; idx < numrq; idx++) {
+ hrq = hrqp[idx];
+ hrq->queue_id = rc + (2 * idx);
+ drq = drqp[idx];
+ drq->queue_id = rc + (2 * idx) + 1;
+ }
+
+out:
+ lpfc_sli4_mbox_cmd_free(phba, mbox);
+ return status;
+}
+
+/**
* lpfc_eq_destroy - Destroy an event Queue on the HBA
* @eq: The queue structure associated with the queue to destroy.
*
@@ -14609,7 +15764,7 @@ lpfc_sli4_next_xritag(struct lpfc_hba *phba)
}
/**
- * lpfc_sli4_post_els_sgl_list - post a block of ELS sgls to the port.
+ * lpfc_sli4_post_sgl_list - post a block of ELS sgls to the port.
* @phba: pointer to lpfc hba data structure.
* @post_sgl_list: pointer to els sgl entry list.
* @count: number of els sgl entries on the list.
@@ -14620,7 +15775,7 @@ lpfc_sli4_next_xritag(struct lpfc_hba *phba)
* stopped.
**/
static int
-lpfc_sli4_post_els_sgl_list(struct lpfc_hba *phba,
+lpfc_sli4_post_sgl_list(struct lpfc_hba *phba,
struct list_head *post_sgl_list,
int post_cnt)
{
@@ -14636,14 +15791,15 @@ lpfc_sli4_post_els_sgl_list(struct lpfc_hba *phba,
uint32_t shdr_status, shdr_add_status;
union lpfc_sli4_cfg_shdr *shdr;
- reqlen = phba->sli4_hba.els_xri_cnt * sizeof(struct sgl_page_pairs) +
+ reqlen = post_cnt * sizeof(struct sgl_page_pairs) +
sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
if (reqlen > SLI4_PAGE_SIZE) {
- lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2559 Block sgl registration required DMA "
"size (%d) great than a page\n", reqlen);
return -ENOMEM;
}
+
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox)
return -ENOMEM;
@@ -14687,8 +15843,9 @@ lpfc_sli4_post_els_sgl_list(struct lpfc_hba *phba,
/* Complete initialization and perform endian conversion. */
bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
- bf_set(lpfc_post_sgl_pages_xricnt, sgl, phba->sli4_hba.els_xri_cnt);
+ bf_set(lpfc_post_sgl_pages_xricnt, sgl, post_cnt);
sgl->word0 = cpu_to_le32(sgl->word0);
+
if (!phba->sli4_hba.intr_enable)
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
else {
@@ -14823,6 +15980,9 @@ lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba,
return rc;
}
+static char *lpfc_rctl_names[] = FC_RCTL_NAMES_INIT;
+static char *lpfc_type_names[] = FC_TYPE_NAMES_INIT;
+
/**
* lpfc_fc_frame_check - Check that this frame is a valid frame to handle
* @phba: pointer to lpfc_hba struct that the frame was received on
@@ -14837,8 +15997,6 @@ static int
lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
{
/* make rctl_names static to save stack space */
- static char *rctl_names[] = FC_RCTL_NAMES_INIT;
- char *type_names[] = FC_TYPE_NAMES_INIT;
struct fc_vft_header *fc_vft_hdr;
uint32_t *header = (uint32_t *) fc_hdr;
@@ -14883,6 +16041,7 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
case FC_TYPE_ELS:
case FC_TYPE_FCP:
case FC_TYPE_CT:
+ case FC_TYPE_NVME:
break;
case FC_TYPE_IP:
case FC_TYPE_ILS:
@@ -14893,8 +16052,8 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
"2538 Received frame rctl:%s (x%x), type:%s (x%x), "
"frame Data:%08x %08x %08x %08x %08x %08x %08x\n",
- rctl_names[fc_hdr->fh_r_ctl], fc_hdr->fh_r_ctl,
- type_names[fc_hdr->fh_type], fc_hdr->fh_type,
+ lpfc_rctl_names[fc_hdr->fh_r_ctl], fc_hdr->fh_r_ctl,
+ lpfc_type_names[fc_hdr->fh_type], fc_hdr->fh_type,
be32_to_cpu(header[0]), be32_to_cpu(header[1]),
be32_to_cpu(header[2]), be32_to_cpu(header[3]),
be32_to_cpu(header[4]), be32_to_cpu(header[5]),
@@ -14903,8 +16062,8 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
drop:
lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
"2539 Dropped frame rctl:%s type:%s\n",
- rctl_names[fc_hdr->fh_r_ctl],
- type_names[fc_hdr->fh_type]);
+ lpfc_rctl_names[fc_hdr->fh_r_ctl],
+ lpfc_type_names[fc_hdr->fh_type]);
return 1;
}
@@ -14940,14 +16099,11 @@ lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr)
**/
static struct lpfc_vport *
lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
- uint16_t fcfi)
+ uint16_t fcfi, uint32_t did)
{
struct lpfc_vport **vports;
struct lpfc_vport *vport = NULL;
int i;
- uint32_t did = (fc_hdr->fh_d_id[0] << 16 |
- fc_hdr->fh_d_id[1] << 8 |
- fc_hdr->fh_d_id[2]);
if (did == Fabric_DID)
return phba->pport;
@@ -14956,7 +16112,7 @@ lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
return phba->pport;
vports = lpfc_create_vport_work_array(phba);
- if (vports != NULL)
+ if (vports != NULL) {
for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
if (phba->fcf.fcfi == fcfi &&
vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) &&
@@ -14965,6 +16121,7 @@ lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
break;
}
}
+ }
lpfc_destroy_vport_work_array(phba, vports);
return vport;
}
@@ -15394,7 +16551,7 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
* a BA_RJT.
*/
if ((fctl & FC_FC_EX_CTX) &&
- (lxri > lpfc_sli4_get_els_iocb_cnt(phba))) {
+ (lxri > lpfc_sli4_get_iocb_cnt(phba))) {
icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
@@ -15571,6 +16728,7 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
/* Initialize the first IOCB. */
first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0;
first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
+ first_iocbq->vport = vport;
/* Check FC Header to see what TYPE of frame we are rcv'ing */
if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) {
@@ -15683,7 +16841,7 @@ lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
return;
}
if (!lpfc_complete_unsol_iocb(phba,
- &phba->sli.ring[LPFC_ELS_RING],
+ phba->sli4_hba.els_wq->pring,
iocbq, fc_hdr->fh_r_ctl,
fc_hdr->fh_type))
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
@@ -15708,8 +16866,7 @@ lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
* This function is called with no lock held. This function processes all
* the received buffers and gives it to upper layers when a received buffer
* indicates that it is the final frame in the sequence. The interrupt
- * service routine processes received buffers at interrupt contexts and adds
- * received dma buffers to the rb_pend_list queue and signals the worker thread.
+ * service routine processes received buffers at interrupt contexts.
* Worker thread calls lpfc_sli4_handle_received_buffer, which will call the
* appropriate receive function when the final frame in a sequence is received.
**/
@@ -15725,11 +16882,13 @@ lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
/* Process each received buffer */
fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
+
/* check to see if this a valid type of frame */
if (lpfc_fc_frame_check(phba, fc_hdr)) {
lpfc_in_buf_free(phba, &dmabuf->dbuf);
return;
}
+
if ((bf_get(lpfc_cqe_code,
&dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1))
fcfi = bf_get(lpfc_rcqe_fcf_id_v1,
@@ -15738,16 +16897,16 @@ lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
fcfi = bf_get(lpfc_rcqe_fcf_id,
&dmabuf->cq_event.cqe.rcqe_cmpl);
- vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi);
+ /* d_id this frame is directed to */
+ did = sli4_did_from_fc_hdr(fc_hdr);
+
+ vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi, did);
if (!vport) {
/* throw out the frame */
lpfc_in_buf_free(phba, &dmabuf->dbuf);
return;
}
- /* d_id this frame is directed to */
- did = sli4_did_from_fc_hdr(fc_hdr);
-
/* vport is registered unless we rcv a FLOGI directed to Fabric_DID */
if (!(vport->vpi_state & LPFC_VPI_REGISTERED) &&
(did != Fabric_DID)) {
@@ -17225,7 +18384,7 @@ uint32_t
lpfc_drain_txq(struct lpfc_hba *phba)
{
LIST_HEAD(completions);
- struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
+ struct lpfc_sli_ring *pring;
struct lpfc_iocbq *piocbq = NULL;
unsigned long iflags = 0;
char *fail_msg = NULL;
@@ -17234,6 +18393,8 @@ lpfc_drain_txq(struct lpfc_hba *phba)
union lpfc_wqe *wqe = (union lpfc_wqe *) &wqe128;
uint32_t txq_cnt = 0;
+ pring = lpfc_phba_elsring(phba);
+
spin_lock_irqsave(&pring->ring_lock, iflags);
list_for_each_entry(piocbq, &pring->txq, list) {
txq_cnt++;
@@ -17255,7 +18416,7 @@ lpfc_drain_txq(struct lpfc_hba *phba)
txq_cnt);
break;
}
- sglq = __lpfc_sli_get_sglq(phba, piocbq);
+ sglq = __lpfc_sli_get_els_sglq(phba, piocbq);
if (!sglq) {
__lpfc_sli_ringtx_put(phba, pring, piocbq);
spin_unlock_irqrestore(&pring->ring_lock, iflags);
@@ -17295,3 +18456,217 @@ lpfc_drain_txq(struct lpfc_hba *phba)
return txq_cnt;
}
+
+/**
+ * lpfc_wqe_bpl2sgl - Convert the bpl/bde to a sgl.
+ * @phba: Pointer to HBA context object.
+ * @pwqe: Pointer to command WQE.
+ * @sglq: Pointer to the scatter gather queue object.
+ *
+ * This routine converts the bpl or bde that is in the WQE
+ * to a sgl list for the sli4 hardware. The physical address
+ * of the bpl/bde is converted back to a virtual address.
+ * If the WQE contains a BPL then the list of BDE's is
+ * converted to sli4_sge's. If the WQE contains a single
+ * BDE then it is converted to a single sli_sge.
+ * The WQE is still in cpu endianness so the contents of
+ * the bpl can be used without byte swapping.
+ *
+ * Returns valid XRI = Success, NO_XRI = Failure.
+ */
+static uint16_t
+lpfc_wqe_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeq,
+ struct lpfc_sglq *sglq)
+{
+ uint16_t xritag = NO_XRI;
+ struct ulp_bde64 *bpl = NULL;
+ struct ulp_bde64 bde;
+ struct sli4_sge *sgl = NULL;
+ struct lpfc_dmabuf *dmabuf;
+ union lpfc_wqe *wqe;
+ int numBdes = 0;
+ int i = 0;
+ uint32_t offset = 0; /* accumulated offset in the sg request list */
+ int inbound = 0; /* number of sg reply entries inbound from firmware */
+ uint32_t cmd;
+
+ if (!pwqeq || !sglq)
+ return xritag;
+
+ sgl = (struct sli4_sge *)sglq->sgl;
+ wqe = &pwqeq->wqe;
+ pwqeq->iocb.ulpIoTag = pwqeq->iotag;
+
+ cmd = bf_get(wqe_cmnd, &wqe->generic.wqe_com);
+ if (cmd == CMD_XMIT_BLS_RSP64_WQE)
+ return sglq->sli4_xritag;
+ numBdes = pwqeq->rsvd2;
+ if (numBdes) {
+ /* The addrHigh and addrLow fields within the WQE
+ * have not been byteswapped yet so there is no
+ * need to swap them back.
+ */
+ if (pwqeq->context3)
+ dmabuf = (struct lpfc_dmabuf *)pwqeq->context3;
+ else
+ return xritag;
+
+ bpl = (struct ulp_bde64 *)dmabuf->virt;
+ if (!bpl)
+ return xritag;
+
+ for (i = 0; i < numBdes; i++) {
+ /* Should already be byte swapped. */
+ sgl->addr_hi = bpl->addrHigh;
+ sgl->addr_lo = bpl->addrLow;
+
+ sgl->word2 = le32_to_cpu(sgl->word2);
+ if ((i+1) == numBdes)
+ bf_set(lpfc_sli4_sge_last, sgl, 1);
+ else
+ bf_set(lpfc_sli4_sge_last, sgl, 0);
+ /* swap the size field back to the cpu so we
+ * can assign it to the sgl.
+ */
+ bde.tus.w = le32_to_cpu(bpl->tus.w);
+ sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
+ /* The offsets in the sgl need to be accumulated
+ * separately for the request and reply lists.
+ * The request is always first, the reply follows.
+ */
+ switch (cmd) {
+ case CMD_GEN_REQUEST64_WQE:
+ /* add up the reply sg entries */
+ if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
+ inbound++;
+ /* first inbound? reset the offset */
+ if (inbound == 1)
+ offset = 0;
+ bf_set(lpfc_sli4_sge_offset, sgl, offset);
+ bf_set(lpfc_sli4_sge_type, sgl,
+ LPFC_SGE_TYPE_DATA);
+ offset += bde.tus.f.bdeSize;
+ break;
+ case CMD_FCP_TRSP64_WQE:
+ bf_set(lpfc_sli4_sge_offset, sgl, 0);
+ bf_set(lpfc_sli4_sge_type, sgl,
+ LPFC_SGE_TYPE_DATA);
+ break;
+ case CMD_FCP_TSEND64_WQE:
+ case CMD_FCP_TRECEIVE64_WQE:
+ bf_set(lpfc_sli4_sge_type, sgl,
+ bpl->tus.f.bdeFlags);
+ if (i < 3)
+ offset = 0;
+ else
+ offset += bde.tus.f.bdeSize;
+ bf_set(lpfc_sli4_sge_offset, sgl, offset);
+ break;
+ }
+ sgl->word2 = cpu_to_le32(sgl->word2);
+ bpl++;
+ sgl++;
+ }
+ } else if (wqe->gen_req.bde.tus.f.bdeFlags == BUFF_TYPE_BDE_64) {
+ /* The addrHigh and addrLow fields of the BDE have not
+ * been byteswapped yet so they need to be swapped
+ * before putting them in the sgl.
+ */
+ sgl->addr_hi = cpu_to_le32(wqe->gen_req.bde.addrHigh);
+ sgl->addr_lo = cpu_to_le32(wqe->gen_req.bde.addrLow);
+ sgl->word2 = le32_to_cpu(sgl->word2);
+ bf_set(lpfc_sli4_sge_last, sgl, 1);
+ sgl->word2 = cpu_to_le32(sgl->word2);
+ sgl->sge_len = cpu_to_le32(wqe->gen_req.bde.tus.f.bdeSize);
+ }
+ return sglq->sli4_xritag;
+}
+
+/**
+ * lpfc_sli4_issue_wqe - Issue an SLI4 Work Queue Entry (WQE)
+ * @phba: Pointer to HBA context object.
+ * @ring_number: Base sli ring number
+ * @pwqe: Pointer to command WQE.
+ **/
+int
+lpfc_sli4_issue_wqe(struct lpfc_hba *phba, uint32_t ring_number,
+ struct lpfc_iocbq *pwqe)
+{
+ union lpfc_wqe *wqe = &pwqe->wqe;
+ struct lpfc_nvmet_rcv_ctx *ctxp;
+ struct lpfc_queue *wq;
+ struct lpfc_sglq *sglq;
+ struct lpfc_sli_ring *pring;
+ unsigned long iflags;
+
+ /* NVME_LS and NVME_LS ABTS requests. */
+ if (pwqe->iocb_flag & LPFC_IO_NVME_LS) {
+ pring = phba->sli4_hba.nvmels_wq->pring;
+ spin_lock_irqsave(&pring->ring_lock, iflags);
+ sglq = __lpfc_sli_get_els_sglq(phba, pwqe);
+ if (!sglq) {
+ spin_unlock_irqrestore(&pring->ring_lock, iflags);
+ return WQE_BUSY;
+ }
+ pwqe->sli4_lxritag = sglq->sli4_lxritag;
+ pwqe->sli4_xritag = sglq->sli4_xritag;
+ if (lpfc_wqe_bpl2sgl(phba, pwqe, sglq) == NO_XRI) {
+ spin_unlock_irqrestore(&pring->ring_lock, iflags);
+ return WQE_ERROR;
+ }
+ bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
+ pwqe->sli4_xritag);
+ if (lpfc_sli4_wq_put(phba->sli4_hba.nvmels_wq, wqe)) {
+ spin_unlock_irqrestore(&pring->ring_lock, iflags);
+ return WQE_ERROR;
+ }
+ lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
+ spin_unlock_irqrestore(&pring->ring_lock, iflags);
+ return 0;
+ }
+
+ /* NVME_FCREQ and NVME_ABTS requests */
+ if (pwqe->iocb_flag & LPFC_IO_NVME) {
+ /* Get the IO distribution (hba_wqidx) for WQ assignment. */
+ pring = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx]->pring;
+
+ spin_lock_irqsave(&pring->ring_lock, iflags);
+ wq = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx];
+ bf_set(wqe_cqid, &wqe->generic.wqe_com,
+ phba->sli4_hba.nvme_cq[pwqe->hba_wqidx]->queue_id);
+ if (lpfc_sli4_wq_put(wq, wqe)) {
+ spin_unlock_irqrestore(&pring->ring_lock, iflags);
+ return WQE_ERROR;
+ }
+ lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
+ spin_unlock_irqrestore(&pring->ring_lock, iflags);
+ return 0;
+ }
+
+ /* NVMET requests */
+ if (pwqe->iocb_flag & LPFC_IO_NVMET) {
+ /* Get the IO distribution (hba_wqidx) for WQ assignment. */
+ pring = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx]->pring;
+
+ spin_lock_irqsave(&pring->ring_lock, iflags);
+ ctxp = pwqe->context2;
+ sglq = ctxp->rqb_buffer->sglq;
+ if (pwqe->sli4_xritag == NO_XRI) {
+ pwqe->sli4_lxritag = sglq->sli4_lxritag;
+ pwqe->sli4_xritag = sglq->sli4_xritag;
+ }
+ bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
+ pwqe->sli4_xritag);
+ wq = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx];
+ bf_set(wqe_cqid, &wqe->generic.wqe_com,
+ phba->sli4_hba.nvme_cq[pwqe->hba_wqidx]->queue_id);
+ if (lpfc_sli4_wq_put(wq, wqe)) {
+ spin_unlock_irqrestore(&pring->ring_lock, iflags);
+ return WQE_ERROR;
+ }
+ lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
+ spin_unlock_irqrestore(&pring->ring_lock, iflags);
+ return 0;
+ }
+ return WQE_ERROR;
+}
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 74227a28bd56..9085306ddd78 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -1,9 +1,11 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
+ * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
+ * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
- * www.emulex.com *
+ * www.broadcom.com *
* *
* This program is free software; you can redistribute it and/or *
* modify it under the terms of version 2 of the GNU General *
@@ -54,9 +56,16 @@ struct lpfc_iocbq {
uint16_t iotag; /* pre-assigned IO tag */
uint16_t sli4_lxritag; /* logical pre-assigned XRI. */
uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */
+ uint16_t hba_wqidx; /* index to HBA work queue */
struct lpfc_cq_event cq_event;
+ struct lpfc_wcqe_complete wcqe_cmpl; /* WQE cmpl */
+ uint64_t isr_timestamp;
- IOCB_t iocb; /* IOCB cmd */
+ /* Be careful here */
+ union lpfc_wqe wqe; /* WQE cmd */
+ IOCB_t iocb; /* For IOCB cmd or if we want 128 byte WQE */
+
+ uint8_t rsvd2;
uint8_t priority; /* OAS priority */
uint8_t retry; /* retry counter for IOCB cmd - if needed */
uint32_t iocb_flag;
@@ -82,9 +91,13 @@ struct lpfc_iocbq {
#define LPFC_IO_OAS 0x10000 /* OAS FCP IO */
#define LPFC_IO_FOF 0x20000 /* FOF FCP IO */
#define LPFC_IO_LOOPBACK 0x40000 /* Loopback IO */
+#define LPFC_PRLI_NVME_REQ 0x80000 /* This is an NVME PRLI. */
+#define LPFC_PRLI_FCP_REQ 0x100000 /* This is an NVME PRLI. */
+#define LPFC_IO_NVME 0x200000 /* NVME FCP command */
+#define LPFC_IO_NVME_LS 0x400000 /* NVME LS command */
+#define LPFC_IO_NVMET 0x800000 /* NVMET command */
uint32_t drvrTimeout; /* driver timeout in seconds */
- uint32_t fcp_wqidx; /* index to FCP work queue */
struct lpfc_vport *vport;/* virtual port pointer */
void *context1; /* caller context information */
void *context2; /* caller context information */
@@ -97,12 +110,14 @@ struct lpfc_iocbq {
struct lpfc_node_rrq *rrq;
} context_un;
- void (*fabric_iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
+ void (*fabric_iocb_cmpl)(struct lpfc_hba *, struct lpfc_iocbq *,
struct lpfc_iocbq *);
- void (*wait_iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
+ void (*wait_iocb_cmpl)(struct lpfc_hba *, struct lpfc_iocbq *,
struct lpfc_iocbq *);
- void (*iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
+ void (*iocb_cmpl)(struct lpfc_hba *, struct lpfc_iocbq *,
struct lpfc_iocbq *);
+ void (*wqe_cmpl)(struct lpfc_hba *, struct lpfc_iocbq *,
+ struct lpfc_wcqe_complete *);
};
#define SLI_IOCB_RET_IOCB 1 /* Return IOCB if cmd ring full */
@@ -112,6 +127,14 @@ struct lpfc_iocbq {
#define IOCB_ERROR 2
#define IOCB_TIMEDOUT 3
+#define SLI_WQE_RET_WQE 1 /* Return WQE if cmd ring full */
+
+#define WQE_SUCCESS 0
+#define WQE_BUSY 1
+#define WQE_ERROR 2
+#define WQE_TIMEDOUT 3
+#define WQE_ABORTED 4
+
#define LPFC_MBX_WAKE 1
#define LPFC_MBX_IMED_UNREG 2
@@ -297,12 +320,9 @@ struct lpfc_sli {
#define LPFC_BLOCK_MGMT_IO 0x800 /* Don't allow mgmt mbx or iocb cmds */
#define LPFC_MENLO_MAINT 0x1000 /* need for menl fw download */
#define LPFC_SLI_ASYNC_MBX_BLK 0x2000 /* Async mailbox is blocked */
+#define LPFC_SLI_SUPPRESS_RSP 0x4000 /* Suppress RSP feature is supported */
- struct lpfc_sli_ring *ring;
- int fcp_ring; /* ring used for FCP initiator commands */
- int next_ring;
-
- int extra_ring; /* extra ring used for other protocols */
+ struct lpfc_sli_ring *sli3_ring;
struct lpfc_sli_stat slistat; /* SLI statistical info */
struct list_head mboxq;
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 0b88b5703e0f..91153c9f6d18 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -1,9 +1,11 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
+ * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
+ * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
* Copyright (C) 2009-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
- * www.emulex.com *
+ * www.broadcom.com *
* *
* This program is free software; you can redistribute it and/or *
* modify it under the terms of version 2 of the GNU General *
@@ -35,9 +37,10 @@
#define LPFC_NEMBED_MBOX_SGL_CNT 254
/* Multi-queue arrangement for FCP EQ/CQ/WQ tuples */
-#define LPFC_FCP_IO_CHAN_DEF 4
-#define LPFC_FCP_IO_CHAN_MIN 1
-#define LPFC_FCP_IO_CHAN_MAX 16
+#define LPFC_HBA_IO_CHAN_MIN 0
+#define LPFC_HBA_IO_CHAN_MAX 32
+#define LPFC_FCP_IO_CHAN_DEF 4
+#define LPFC_NVME_IO_CHAN_DEF 0
/* Number of channels used for Flash Optimized Fabric (FOF) operations */
@@ -107,6 +110,9 @@ enum lpfc_sli4_queue_subtype {
LPFC_MBOX,
LPFC_FCP,
LPFC_ELS,
+ LPFC_NVME,
+ LPFC_NVMET,
+ LPFC_NVME_LS,
LPFC_USOL
};
@@ -125,25 +131,41 @@ union sli4_qe {
struct lpfc_rqe *rqe;
};
+/* RQ buffer list */
+struct lpfc_rqb {
+ uint16_t entry_count; /* Current number of RQ slots */
+ uint16_t buffer_count; /* Current number of buffers posted */
+ struct list_head rqb_buffer_list; /* buffers assigned to this HBQ */
+ /* Callback for HBQ buffer allocation */
+ struct rqb_dmabuf *(*rqb_alloc_buffer)(struct lpfc_hba *);
+ /* Callback for HBQ buffer free */
+ void (*rqb_free_buffer)(struct lpfc_hba *,
+ struct rqb_dmabuf *);
+};
+
struct lpfc_queue {
struct list_head list;
+ struct list_head wq_list;
enum lpfc_sli4_queue_type type;
enum lpfc_sli4_queue_subtype subtype;
struct lpfc_hba *phba;
struct list_head child_list;
+ struct list_head page_list;
+ struct list_head sgl_list;
uint32_t entry_count; /* Number of entries to support on the queue */
uint32_t entry_size; /* Size of each queue entry. */
uint32_t entry_repost; /* Count of entries before doorbell is rung */
#define LPFC_QUEUE_MIN_REPOST 8
uint32_t queue_id; /* Queue ID assigned by the hardware */
uint32_t assoc_qid; /* Queue ID associated with, for CQ/WQ/MQ */
- struct list_head page_list;
uint32_t page_count; /* Number of pages allocated for this queue */
uint32_t host_index; /* The host's index for putting or getting */
uint32_t hba_index; /* The last known hba index for get or put */
struct lpfc_sli_ring *pring; /* ptr to io ring associated with q */
+ struct lpfc_rqb *rqbp; /* ptr to RQ buffers */
+ uint16_t sgl_list_cnt;
uint16_t db_format;
#define LPFC_DB_RING_FORMAT 0x01
#define LPFC_DB_LIST_FORMAT 0x02
@@ -176,6 +198,8 @@ struct lpfc_queue {
#define RQ_buf_trunc q_cnt_3
#define RQ_rcv_buf q_cnt_4
+ uint64_t isr_timestamp;
+ struct lpfc_queue *assoc_qp;
union sli4_qe qe[1]; /* array to index entries (must be last) */
};
@@ -338,6 +362,7 @@ struct lpfc_bmbx {
#define LPFC_CQE_DEF_COUNT 1024
#define LPFC_WQE_DEF_COUNT 256
#define LPFC_WQE128_DEF_COUNT 128
+#define LPFC_WQE128_MAX_COUNT 256
#define LPFC_MQE_DEF_COUNT 16
#define LPFC_RQE_DEF_COUNT 512
@@ -379,10 +404,14 @@ struct lpfc_max_cfg_param {
struct lpfc_hba;
/* SLI4 HBA multi-fcp queue handler struct */
-struct lpfc_fcp_eq_hdl {
+struct lpfc_hba_eq_hdl {
uint32_t idx;
struct lpfc_hba *phba;
- atomic_t fcp_eq_in_use;
+ atomic_t hba_eq_in_use;
+ struct cpumask *cpumask;
+ /* CPU affinitsed to or 0xffffffff if multiple */
+ uint32_t cpu;
+#define LPFC_MULTI_CPU_AFFINITY 0xffffffff
};
/* Port Capabilities for SLI4 Parameters */
@@ -427,6 +456,7 @@ struct lpfc_pc_sli4_params {
uint8_t wqsize;
#define LPFC_WQ_SZ64_SUPPORT 1
#define LPFC_WQ_SZ128_SUPPORT 2
+ uint8_t wqpcnt;
};
struct lpfc_iov {
@@ -445,7 +475,7 @@ struct lpfc_sli4_lnk_info {
uint8_t optic_state;
};
-#define LPFC_SLI4_HANDLER_CNT (LPFC_FCP_IO_CHAN_MAX+ \
+#define LPFC_SLI4_HANDLER_CNT (LPFC_HBA_IO_CHAN_MAX+ \
LPFC_FOF_IO_CHAN_NUM)
#define LPFC_SLI4_HANDLER_NAME_SZ 16
@@ -515,23 +545,34 @@ struct lpfc_sli4_hba {
uint32_t ue_to_rp;
struct lpfc_register sli_intf;
struct lpfc_pc_sli4_params pc_sli4_params;
- struct msix_entry *msix_entries;
uint8_t handler_name[LPFC_SLI4_HANDLER_CNT][LPFC_SLI4_HANDLER_NAME_SZ];
- struct lpfc_fcp_eq_hdl *fcp_eq_hdl; /* FCP per-WQ handle */
+ struct lpfc_hba_eq_hdl *hba_eq_hdl; /* HBA per-WQ handle */
/* Pointers to the constructed SLI4 queues */
- struct lpfc_queue **hba_eq;/* Event queues for HBA */
- struct lpfc_queue **fcp_cq;/* Fast-path FCP compl queue */
- struct lpfc_queue **fcp_wq;/* Fast-path FCP work queue */
+ struct lpfc_queue **hba_eq; /* Event queues for HBA */
+ struct lpfc_queue **fcp_cq; /* Fast-path FCP compl queue */
+ struct lpfc_queue **nvme_cq; /* Fast-path NVME compl queue */
+ struct lpfc_queue **nvmet_cqset; /* Fast-path NVMET CQ Set queues */
+ struct lpfc_queue **nvmet_mrq_hdr; /* Fast-path NVMET hdr MRQs */
+ struct lpfc_queue **nvmet_mrq_data; /* Fast-path NVMET data MRQs */
+ struct lpfc_queue **fcp_wq; /* Fast-path FCP work queue */
+ struct lpfc_queue **nvme_wq; /* Fast-path NVME work queue */
uint16_t *fcp_cq_map;
+ uint16_t *nvme_cq_map;
+ struct list_head lpfc_wq_list;
struct lpfc_queue *mbx_cq; /* Slow-path mailbox complete queue */
struct lpfc_queue *els_cq; /* Slow-path ELS response complete queue */
+ struct lpfc_queue *nvmels_cq; /* NVME LS complete queue */
struct lpfc_queue *mbx_wq; /* Slow-path MBOX work queue */
struct lpfc_queue *els_wq; /* Slow-path ELS work queue */
+ struct lpfc_queue *nvmels_wq; /* NVME LS work queue */
struct lpfc_queue *hdr_rq; /* Slow-path Header Receive queue */
struct lpfc_queue *dat_rq; /* Slow-path Data Receive queue */
+ struct lpfc_name wwnn;
+ struct lpfc_name wwpn;
+
uint32_t fw_func_mode; /* FW function protocol mode */
uint32_t ulp0_mode; /* ULP0 protocol mode */
uint32_t ulp1_mode; /* ULP1 protocol mode */
@@ -568,14 +609,20 @@ struct lpfc_sli4_hba {
uint16_t rpi_hdrs_in_use; /* must post rpi hdrs if set. */
uint16_t next_xri; /* last_xri - max_cfg_param.xri_base = used */
uint16_t next_rpi;
+ uint16_t nvme_xri_max;
+ uint16_t nvme_xri_cnt;
+ uint16_t nvme_xri_start;
uint16_t scsi_xri_max;
uint16_t scsi_xri_cnt;
- uint16_t els_xri_cnt;
uint16_t scsi_xri_start;
- struct list_head lpfc_free_sgl_list;
- struct list_head lpfc_sgl_list;
+ uint16_t els_xri_cnt;
+ uint16_t nvmet_xri_cnt;
+ struct list_head lpfc_els_sgl_list;
struct list_head lpfc_abts_els_sgl_list;
+ struct list_head lpfc_nvmet_sgl_list;
+ struct list_head lpfc_abts_nvmet_sgl_list;
struct list_head lpfc_abts_scsi_buf_list;
+ struct list_head lpfc_abts_nvme_buf_list;
struct lpfc_sglq **lpfc_sglq_active_list;
struct list_head lpfc_rpi_hdr_list;
unsigned long *rpi_bmask;
@@ -602,8 +649,10 @@ struct lpfc_sli4_hba {
#define LPFC_SLI4_PPNAME_NON 0
#define LPFC_SLI4_PPNAME_GET 1
struct lpfc_iov iov;
+ spinlock_t abts_nvme_buf_list_lock; /* list of aborted SCSI IOs */
spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */
- spinlock_t abts_sgl_list_lock; /* list of aborted els IOs */
+ spinlock_t sgl_list_lock; /* list of aborted els IOs */
+ spinlock_t nvmet_io_lock;
uint32_t physical_port;
/* CPU to vector mapping information */
@@ -611,11 +660,14 @@ struct lpfc_sli4_hba {
uint16_t num_online_cpu;
uint16_t num_present_cpu;
uint16_t curr_disp_cpu;
+
+ uint16_t nvmet_mrq_post_idx;
};
enum lpfc_sge_type {
GEN_BUFF_TYPE,
- SCSI_BUFF_TYPE
+ SCSI_BUFF_TYPE,
+ NVMET_BUFF_TYPE
};
enum lpfc_sgl_state {
@@ -694,15 +746,21 @@ struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *, uint32_t,
uint32_t);
void lpfc_sli4_queue_free(struct lpfc_queue *);
int lpfc_eq_create(struct lpfc_hba *, struct lpfc_queue *, uint32_t);
-int lpfc_modify_fcp_eq_delay(struct lpfc_hba *, uint32_t);
+int lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq);
int lpfc_cq_create(struct lpfc_hba *, struct lpfc_queue *,
struct lpfc_queue *, uint32_t, uint32_t);
+int lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
+ struct lpfc_queue **eqp, uint32_t type,
+ uint32_t subtype);
int32_t lpfc_mq_create(struct lpfc_hba *, struct lpfc_queue *,
struct lpfc_queue *, uint32_t);
int lpfc_wq_create(struct lpfc_hba *, struct lpfc_queue *,
struct lpfc_queue *, uint32_t);
int lpfc_rq_create(struct lpfc_hba *, struct lpfc_queue *,
struct lpfc_queue *, struct lpfc_queue *, uint32_t);
+int lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp,
+ struct lpfc_queue **drqp, struct lpfc_queue **cqp,
+ uint32_t subtype);
void lpfc_rq_adjust_repost(struct lpfc_hba *, struct lpfc_queue *, int);
int lpfc_eq_destroy(struct lpfc_hba *, struct lpfc_queue *);
int lpfc_cq_destroy(struct lpfc_hba *, struct lpfc_queue *);
@@ -714,6 +772,7 @@ int lpfc_sli4_queue_setup(struct lpfc_hba *);
void lpfc_sli4_queue_unset(struct lpfc_hba *);
int lpfc_sli4_post_sgl(struct lpfc_hba *, dma_addr_t, dma_addr_t, uint16_t);
int lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *);
+int lpfc_repost_nvme_sgl_list(struct lpfc_hba *phba);
uint16_t lpfc_sli4_next_xritag(struct lpfc_hba *);
void lpfc_sli4_free_xri(struct lpfc_hba *, int);
int lpfc_sli4_post_async_mbox(struct lpfc_hba *);
@@ -746,6 +805,7 @@ int lpfc_sli4_brdreset(struct lpfc_hba *);
int lpfc_sli4_add_fcf_record(struct lpfc_hba *, struct fcf_record *);
void lpfc_sli_remove_dflt_fcf(struct lpfc_hba *);
int lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *);
+int lpfc_sli4_get_iocb_cnt(struct lpfc_hba *phba);
int lpfc_sli4_init_vpi(struct lpfc_vport *);
uint32_t lpfc_sli4_cq_release(struct lpfc_queue *, bool);
uint32_t lpfc_sli4_eq_release(struct lpfc_queue *, bool);
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 0ee0623a354c..86c6c9b26b82 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -1,9 +1,11 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
+ * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
+ * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
- * www.emulex.com *
+ * www.broadcom.com *
* *
* This program is free software; you can redistribute it and/or *
* modify it under the terms of version 2 of the GNU General *
@@ -18,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "11.2.0.4"
+#define LPFC_DRIVER_VERSION "11.2.0.7"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
@@ -30,4 +32,6 @@
#define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \
LPFC_DRIVER_VERSION
-#define LPFC_COPYRIGHT "Copyright(c) 2004-2016 Emulex. All rights reserved."
+#define LPFC_COPYRIGHT "Copyright (C) 2017 Broadcom. All Rights Reserved. " \
+ "The term \"Broadcom\" refers to Broadcom Limited " \
+ "and/or its subsidiaries."
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index e18bbc66e83b..9a0339dbc024 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -1,9 +1,11 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
+ * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
+ * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
- * www.emulex.com *
+ * www.broadcom.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
* *
* This program is free software; you can redistribute it and/or *
@@ -28,11 +30,13 @@
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/sched/signal.h>
#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_transport_fc.h>
+
#include "lpfc_hw4.h"
#include "lpfc_hw.h"
#include "lpfc_sli.h"
@@ -402,6 +406,22 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
vport->fdmi_port_mask = phba->pport->fdmi_port_mask;
}
+ if ((phba->nvmet_support == 0) &&
+ ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
+ (phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME))) {
+ /* Create NVME binding with nvme_fc_transport. This
+ * ensures the vport is initialized.
+ */
+ rc = lpfc_nvme_create_localport(vport);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "6003 %s status x%x\n",
+ "NVME registration failed, ",
+ rc);
+ goto error_out;
+ }
+ }
+
/*
* In SLI4, the vpi must be activated before it can be used
* by the port.
diff --git a/drivers/scsi/lpfc/lpfc_vport.h b/drivers/scsi/lpfc/lpfc_vport.h
index 6b2c94eb8134..62295971f66c 100644
--- a/drivers/scsi/lpfc/lpfc_vport.h
+++ b/drivers/scsi/lpfc/lpfc_vport.h
@@ -1,9 +1,11 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
+ * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
+ * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
* Copyright (C) 2004-2006 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
- * www.emulex.com *
+ * www.broadcom.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
* *
* This program is free software; you can redistribute it and/or *
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index a3fe1fb55c17..5b7aec5d575a 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -1148,7 +1148,7 @@ mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc)
/* TMs are on msix_index == 0 */
if (reply_q->msix_index == 0)
continue;
- synchronize_irq(reply_q->vector);
+ synchronize_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index));
}
}
@@ -1837,11 +1837,8 @@ _base_free_irq(struct MPT3SAS_ADAPTER *ioc)
list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) {
list_del(&reply_q->list);
- if (smp_affinity_enable) {
- irq_set_affinity_hint(reply_q->vector, NULL);
- free_cpumask_var(reply_q->affinity_hint);
- }
- free_irq(reply_q->vector, reply_q);
+ free_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index),
+ reply_q);
kfree(reply_q);
}
}
@@ -1850,13 +1847,13 @@ _base_free_irq(struct MPT3SAS_ADAPTER *ioc)
* _base_request_irq - request irq
* @ioc: per adapter object
* @index: msix index into vector table
- * @vector: irq vector
*
* Inserting respective reply_queue into the list.
*/
static int
-_base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index, u32 vector)
+_base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index)
{
+ struct pci_dev *pdev = ioc->pdev;
struct adapter_reply_queue *reply_q;
int r;
@@ -1868,14 +1865,6 @@ _base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index, u32 vector)
}
reply_q->ioc = ioc;
reply_q->msix_index = index;
- reply_q->vector = vector;
-
- if (smp_affinity_enable) {
- if (!zalloc_cpumask_var(&reply_q->affinity_hint, GFP_KERNEL)) {
- kfree(reply_q);
- return -ENOMEM;
- }
- }
atomic_set(&reply_q->busy, 0);
if (ioc->msix_enable)
@@ -1884,12 +1873,11 @@ _base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index, u32 vector)
else
snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d",
ioc->driver_name, ioc->id);
- r = request_irq(vector, _base_interrupt, IRQF_SHARED, reply_q->name,
- reply_q);
+ r = request_irq(pci_irq_vector(pdev, index), _base_interrupt,
+ IRQF_SHARED, reply_q->name, reply_q);
if (r) {
pr_err(MPT3SAS_FMT "unable to allocate interrupt %d!\n",
- reply_q->name, vector);
- free_cpumask_var(reply_q->affinity_hint);
+ reply_q->name, pci_irq_vector(pdev, index));
kfree(reply_q);
return -EBUSY;
}
@@ -1925,6 +1913,21 @@ _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc)
if (!nr_msix)
return;
+ if (smp_affinity_enable) {
+ list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
+ const cpumask_t *mask = pci_irq_get_affinity(ioc->pdev,
+ reply_q->msix_index);
+ if (!mask) {
+ pr_warn(MPT3SAS_FMT "no affinity for msi %x\n",
+ ioc->name, reply_q->msix_index);
+ continue;
+ }
+
+ for_each_cpu(cpu, mask)
+ ioc->cpu_msix_table[cpu] = reply_q->msix_index;
+ }
+ return;
+ }
cpu = cpumask_first(cpu_online_mask);
list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
@@ -1938,18 +1941,9 @@ _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc)
group++;
for (i = 0 ; i < group ; i++) {
- ioc->cpu_msix_table[cpu] = index;
- if (smp_affinity_enable)
- cpumask_or(reply_q->affinity_hint,
- reply_q->affinity_hint, get_cpu_mask(cpu));
+ ioc->cpu_msix_table[cpu] = reply_q->msix_index;
cpu = cpumask_next(cpu, cpu_online_mask);
}
- if (smp_affinity_enable)
- if (irq_set_affinity_hint(reply_q->vector,
- reply_q->affinity_hint))
- dinitprintk(ioc, pr_info(MPT3SAS_FMT
- "Err setting affinity hint to irq vector %d\n",
- ioc->name, reply_q->vector));
index++;
}
}
@@ -1976,10 +1970,10 @@ _base_disable_msix(struct MPT3SAS_ADAPTER *ioc)
static int
_base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
{
- struct msix_entry *entries, *a;
int r;
int i, local_max_msix_vectors;
u8 try_msix = 0;
+ unsigned int irq_flags = PCI_IRQ_MSIX;
if (msix_disable == -1 || msix_disable == 0)
try_msix = 1;
@@ -1991,7 +1985,7 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
goto try_ioapic;
ioc->reply_queue_count = min_t(int, ioc->cpu_count,
- ioc->msix_vector_count);
+ ioc->msix_vector_count);
printk(MPT3SAS_FMT "MSI-X vectors supported: %d, no of cores"
": %d, max_msix_vectors: %d\n", ioc->name, ioc->msix_vector_count,
@@ -2002,56 +1996,51 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
else
local_max_msix_vectors = max_msix_vectors;
- if (local_max_msix_vectors > 0) {
+ if (local_max_msix_vectors > 0)
ioc->reply_queue_count = min_t(int, local_max_msix_vectors,
ioc->reply_queue_count);
- ioc->msix_vector_count = ioc->reply_queue_count;
- } else if (local_max_msix_vectors == 0)
+ else if (local_max_msix_vectors == 0)
goto try_ioapic;
if (ioc->msix_vector_count < ioc->cpu_count)
smp_affinity_enable = 0;
- entries = kcalloc(ioc->reply_queue_count, sizeof(struct msix_entry),
- GFP_KERNEL);
- if (!entries) {
- dfailprintk(ioc, pr_info(MPT3SAS_FMT
- "kcalloc failed @ at %s:%d/%s() !!!\n",
- ioc->name, __FILE__, __LINE__, __func__));
- goto try_ioapic;
- }
+ if (smp_affinity_enable)
+ irq_flags |= PCI_IRQ_AFFINITY;
- for (i = 0, a = entries; i < ioc->reply_queue_count; i++, a++)
- a->entry = i;
-
- r = pci_enable_msix_exact(ioc->pdev, entries, ioc->reply_queue_count);
- if (r) {
+ r = pci_alloc_irq_vectors(ioc->pdev, 1, ioc->reply_queue_count,
+ irq_flags);
+ if (r < 0) {
dfailprintk(ioc, pr_info(MPT3SAS_FMT
- "pci_enable_msix_exact failed (r=%d) !!!\n",
+ "pci_alloc_irq_vectors failed (r=%d) !!!\n",
ioc->name, r));
- kfree(entries);
goto try_ioapic;
}
ioc->msix_enable = 1;
- for (i = 0, a = entries; i < ioc->reply_queue_count; i++, a++) {
- r = _base_request_irq(ioc, i, a->vector);
+ ioc->reply_queue_count = r;
+ for (i = 0; i < ioc->reply_queue_count; i++) {
+ r = _base_request_irq(ioc, i);
if (r) {
_base_free_irq(ioc);
_base_disable_msix(ioc);
- kfree(entries);
goto try_ioapic;
}
}
- kfree(entries);
return 0;
/* failback to io_apic interrupt routing */
try_ioapic:
ioc->reply_queue_count = 1;
- r = _base_request_irq(ioc, 0, ioc->pdev->irq);
+ r = pci_alloc_irq_vectors(ioc->pdev, 1, 1, PCI_IRQ_LEGACY);
+ if (r < 0) {
+ dfailprintk(ioc, pr_info(MPT3SAS_FMT
+ "pci_alloc_irq_vector(legacy) failed (r=%d) !!!\n",
+ ioc->name, r));
+ } else
+ r = _base_request_irq(ioc, 0);
return r;
}
@@ -2222,7 +2211,8 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
list_for_each_entry(reply_q, &ioc->reply_queue_list, list)
pr_info(MPT3SAS_FMT "%s: IRQ %d\n",
reply_q->name, ((ioc->msix_enable) ? "PCI-MSI-X enabled" :
- "IO-APIC enabled"), reply_q->vector);
+ "IO-APIC enabled"),
+ pci_irq_vector(ioc->pdev, reply_q->msix_index));
pr_info(MPT3SAS_FMT "iomem(0x%016llx), mapped(0x%p), size(%d)\n",
ioc->name, (unsigned long long)chip_phys, ioc->chip, memap_sz);
@@ -5357,7 +5347,8 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
sizeof(resource_size_t *), GFP_KERNEL);
if (!ioc->reply_post_host_index) {
dfailprintk(ioc, pr_info(MPT3SAS_FMT "allocation "
- "for cpu_msix_table failed!!!\n", ioc->name));
+ "for reply_post_host_index failed!!!\n",
+ ioc->name));
r = -ENOMEM;
goto out_free_resources;
}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index 4ab634fc27df..7fe7e6ed595b 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -731,12 +731,10 @@ struct _event_ack_list {
struct adapter_reply_queue {
struct MPT3SAS_ADAPTER *ioc;
u8 msix_index;
- unsigned int vector;
u32 reply_post_host_index;
Mpi2ReplyDescriptorsUnion_t *reply_post_free;
char name[MPT_NAME_LENGTH];
atomic_t busy;
- cpumask_var_t affinity_hint;
struct list_head list;
};
diff --git a/drivers/scsi/osd/osd_uld.c b/drivers/scsi/osd/osd_uld.c
index 243eab3d10d0..e0ce5d2fd14d 100644
--- a/drivers/scsi/osd/osd_uld.c
+++ b/drivers/scsi/osd/osd_uld.c
@@ -372,6 +372,7 @@ EXPORT_SYMBOL(osduld_device_same);
static int __detect_osd(struct osd_uld_device *oud)
{
struct scsi_device *scsi_device = oud->od.scsi_device;
+ struct scsi_sense_hdr sense_hdr;
char caps[OSD_CAP_LEN];
int error;
@@ -380,7 +381,7 @@ static int __detect_osd(struct osd_uld_device *oud)
*/
OSD_DEBUG("start scsi_test_unit_ready %p %p %p\n",
oud, scsi_device, scsi_device->request_queue);
- error = scsi_test_unit_ready(scsi_device, 10*HZ, 5, NULL);
+ error = scsi_test_unit_ready(scsi_device, 10*HZ, 5, &sense_hdr);
if (error)
OSD_ERR("warning: scsi_test_unit_ready failed\n");
diff --git a/drivers/scsi/osst.c b/drivers/scsi/osst.c
index 75ac662793a3..c47f4b349bac 100644
--- a/drivers/scsi/osst.c
+++ b/drivers/scsi/osst.c
@@ -35,7 +35,7 @@ static const char * osst_version = "0.99.4";
#include <linux/fs.h>
#include <linux/kernel.h>
-#include <linux/sched.h>
+#include <linux/sched/signal.h>
#include <linux/proc_fs.h>
#include <linux/mm.h>
#include <linux/slab.h>
diff --git a/drivers/scsi/qedf/Kconfig b/drivers/scsi/qedf/Kconfig
new file mode 100644
index 000000000000..943f5ee45807
--- /dev/null
+++ b/drivers/scsi/qedf/Kconfig
@@ -0,0 +1,11 @@
+config QEDF
+ tristate "QLogic QEDF 25/40/100Gb FCoE Initiator Driver Support"
+ depends on PCI && SCSI
+ depends on QED
+ depends on LIBFC
+ depends on LIBFCOE
+ select QED_LL2
+ select QED_FCOE
+ ---help---
+ This driver supports FCoE offload for the QLogic FastLinQ
+ 41000 Series Converged Network Adapters.
diff --git a/drivers/scsi/qedf/Makefile b/drivers/scsi/qedf/Makefile
new file mode 100644
index 000000000000..64e9f507ce32
--- /dev/null
+++ b/drivers/scsi/qedf/Makefile
@@ -0,0 +1,5 @@
+obj-$(CONFIG_QEDF) := qedf.o
+qedf-y = qedf_dbg.o qedf_main.o qedf_io.o qedf_fip.o \
+ qedf_attr.o qedf_els.o
+
+qedf-$(CONFIG_DEBUG_FS) += qedf_debugfs.o
diff --git a/drivers/scsi/qedf/qedf.h b/drivers/scsi/qedf/qedf.h
new file mode 100644
index 000000000000..96346a1b1515
--- /dev/null
+++ b/drivers/scsi/qedf/qedf.h
@@ -0,0 +1,545 @@
+/*
+ * QLogic FCoE Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+#ifndef _QEDFC_H_
+#define _QEDFC_H_
+
+#include <scsi/libfcoe.h>
+#include <scsi/libfc.h>
+#include <scsi/fc/fc_fip.h>
+#include <scsi/fc/fc_fc2.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/fc_encode.h>
+#include <linux/version.h>
+
+
+/* qedf_hsi.h needs to before included any qed includes */
+#include "qedf_hsi.h"
+
+#include <linux/qed/qed_if.h>
+#include <linux/qed/qed_fcoe_if.h>
+#include <linux/qed/qed_ll2_if.h>
+#include "qedf_version.h"
+#include "qedf_dbg.h"
+
+/* Helpers to extract upper and lower 32-bits of pointer */
+#define U64_HI(val) ((u32)(((u64)(val)) >> 32))
+#define U64_LO(val) ((u32)(((u64)(val)) & 0xffffffff))
+
+#define QEDF_DESCR "QLogic FCoE Offload Driver"
+#define QEDF_MODULE_NAME "qedf"
+
+#define QEDF_MIN_XID 0
+#define QEDF_MAX_SCSI_XID (NUM_TASKS_PER_CONNECTION - 1)
+#define QEDF_MAX_ELS_XID 4095
+#define QEDF_FLOGI_RETRY_CNT 3
+#define QEDF_RPORT_RETRY_CNT 255
+#define QEDF_MAX_SESSIONS 1024
+#define QEDF_MAX_PAYLOAD 2048
+#define QEDF_MAX_BDS_PER_CMD 256
+#define QEDF_MAX_BD_LEN 0xffff
+#define QEDF_BD_SPLIT_SZ 0x1000
+#define QEDF_PAGE_SIZE 4096
+#define QED_HW_DMA_BOUNDARY 0xfff
+#define QEDF_MAX_SGLEN_FOR_CACHESGL ((1U << 16) - 1)
+#define QEDF_MFS (QEDF_MAX_PAYLOAD + \
+ sizeof(struct fc_frame_header))
+#define QEDF_MAX_NPIV 64
+#define QEDF_TM_TIMEOUT 10
+#define QEDF_ABORT_TIMEOUT 10
+#define QEDF_CLEANUP_TIMEOUT 10
+#define QEDF_MAX_CDB_LEN 16
+
+#define UPSTREAM_REMOVE 1
+#define UPSTREAM_KEEP 1
+
+struct qedf_mp_req {
+ uint8_t tm_flags;
+
+ uint32_t req_len;
+ void *req_buf;
+ dma_addr_t req_buf_dma;
+ struct fcoe_sge *mp_req_bd;
+ dma_addr_t mp_req_bd_dma;
+ struct fc_frame_header req_fc_hdr;
+
+ uint32_t resp_len;
+ void *resp_buf;
+ dma_addr_t resp_buf_dma;
+ struct fcoe_sge *mp_resp_bd;
+ dma_addr_t mp_resp_bd_dma;
+ struct fc_frame_header resp_fc_hdr;
+};
+
+struct qedf_els_cb_arg {
+ struct qedf_ioreq *aborted_io_req;
+ struct qedf_ioreq *io_req;
+ u8 op; /* Used to keep track of ELS op */
+ uint16_t l2_oxid;
+ u32 offset; /* Used for sequence cleanup */
+ u8 r_ctl; /* Used for sequence cleanup */
+};
+
+enum qedf_ioreq_event {
+ QEDF_IOREQ_EV_ABORT_SUCCESS,
+ QEDF_IOREQ_EV_ABORT_FAILED,
+ QEDF_IOREQ_EV_SEND_RRQ,
+ QEDF_IOREQ_EV_ELS_TMO,
+ QEDF_IOREQ_EV_ELS_ERR_DETECT,
+ QEDF_IOREQ_EV_ELS_FLUSH,
+ QEDF_IOREQ_EV_CLEANUP_SUCCESS,
+ QEDF_IOREQ_EV_CLEANUP_FAILED,
+};
+
+#define FC_GOOD 0
+#define FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER (0x1<<2)
+#define FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER (0x1<<3)
+#define CMD_SCSI_STATUS(Cmnd) ((Cmnd)->SCp.Status)
+#define FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID (0x1<<0)
+#define FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID (0x1<<1)
+struct qedf_ioreq {
+ struct list_head link;
+ uint16_t xid;
+ struct scsi_cmnd *sc_cmd;
+ bool use_slowpath; /* Use slow SGL for this I/O */
+#define QEDF_SCSI_CMD 1
+#define QEDF_TASK_MGMT_CMD 2
+#define QEDF_ABTS 3
+#define QEDF_ELS 4
+#define QEDF_CLEANUP 5
+#define QEDF_SEQ_CLEANUP 6
+ u8 cmd_type;
+#define QEDF_CMD_OUTSTANDING 0x0
+#define QEDF_CMD_IN_ABORT 0x1
+#define QEDF_CMD_IN_CLEANUP 0x2
+#define QEDF_CMD_SRR_SENT 0x3
+ u8 io_req_flags;
+ struct qedf_rport *fcport;
+ unsigned long flags;
+ enum qedf_ioreq_event event;
+ size_t data_xfer_len;
+ struct kref refcount;
+ struct qedf_cmd_mgr *cmd_mgr;
+ struct io_bdt *bd_tbl;
+ struct delayed_work timeout_work;
+ struct completion tm_done;
+ struct completion abts_done;
+ struct fcoe_task_context *task;
+ int idx;
+/*
+ * Need to allocate enough room for both sense data and FCP response data
+ * which has a max length of 8 bytes according to spec.
+ */
+#define QEDF_SCSI_SENSE_BUFFERSIZE (SCSI_SENSE_BUFFERSIZE + 8)
+ uint8_t *sense_buffer;
+ dma_addr_t sense_buffer_dma;
+ u32 fcp_resid;
+ u32 fcp_rsp_len;
+ u32 fcp_sns_len;
+ u8 cdb_status;
+ u8 fcp_status;
+ u8 fcp_rsp_code;
+ u8 scsi_comp_flags;
+#define QEDF_MAX_REUSE 0xfff
+ u16 reuse_count;
+ struct qedf_mp_req mp_req;
+ void (*cb_func)(struct qedf_els_cb_arg *cb_arg);
+ struct qedf_els_cb_arg *cb_arg;
+ int fp_idx;
+ unsigned int cpu;
+ unsigned int int_cpu;
+#define QEDF_IOREQ_SLOW_SGE 0
+#define QEDF_IOREQ_SINGLE_SGE 1
+#define QEDF_IOREQ_FAST_SGE 2
+ u8 sge_type;
+ struct delayed_work rrq_work;
+
+ /* Used for sequence level recovery; i.e. REC/SRR */
+ uint32_t rx_buf_off;
+ uint32_t tx_buf_off;
+ uint32_t rx_id;
+ uint32_t task_retry_identifier;
+
+ /*
+ * Used to tell if we need to return a SCSI command
+ * during some form of error processing.
+ */
+ bool return_scsi_cmd_on_abts;
+};
+
+extern struct workqueue_struct *qedf_io_wq;
+
+struct qedf_rport {
+ spinlock_t rport_lock;
+#define QEDF_RPORT_SESSION_READY 1
+#define QEDF_RPORT_UPLOADING_CONNECTION 2
+ unsigned long flags;
+ unsigned long retry_delay_timestamp;
+ struct fc_rport *rport;
+ struct fc_rport_priv *rdata;
+ struct qedf_ctx *qedf;
+ u32 handle; /* Handle from qed */
+ u32 fw_cid; /* fw_cid from qed */
+ void __iomem *p_doorbell;
+ /* Send queue management */
+ atomic_t free_sqes;
+ atomic_t num_active_ios;
+ struct fcoe_wqe *sq;
+ dma_addr_t sq_dma;
+ u16 sq_prod_idx;
+ u16 fw_sq_prod_idx;
+ u16 sq_con_idx;
+ u32 sq_mem_size;
+ void *sq_pbl;
+ dma_addr_t sq_pbl_dma;
+ u32 sq_pbl_size;
+ u32 sid;
+#define QEDF_RPORT_TYPE_DISK 1
+#define QEDF_RPORT_TYPE_TAPE 2
+ uint dev_type; /* Disk or tape */
+ struct list_head peers;
+};
+
+/* Used to contain LL2 skb's in ll2_skb_list */
+struct qedf_skb_work {
+ struct work_struct work;
+ struct sk_buff *skb;
+ struct qedf_ctx *qedf;
+};
+
+struct qedf_fastpath {
+#define QEDF_SB_ID_NULL 0xffff
+ u16 sb_id;
+ struct qed_sb_info *sb_info;
+ struct qedf_ctx *qedf;
+ /* Keep track of number of completions on this fastpath */
+ unsigned long completions;
+ uint32_t cq_num_entries;
+};
+
+/* Used to pass fastpath information needed to process CQEs */
+struct qedf_io_work {
+ struct work_struct work;
+ struct fcoe_cqe cqe;
+ struct qedf_ctx *qedf;
+ struct fc_frame *fp;
+};
+
+struct qedf_glbl_q_params {
+ u64 hw_p_cq; /* Completion queue PBL */
+ u64 hw_p_rq; /* Request queue PBL */
+ u64 hw_p_cmdq; /* Command queue PBL */
+};
+
+struct global_queue {
+ struct fcoe_cqe *cq;
+ dma_addr_t cq_dma;
+ u32 cq_mem_size;
+ u32 cq_cons_idx; /* Completion queue consumer index */
+ u32 cq_prod_idx;
+
+ void *cq_pbl;
+ dma_addr_t cq_pbl_dma;
+ u32 cq_pbl_size;
+};
+
+/* I/O tracing entry */
+#define QEDF_IO_TRACE_SIZE 2048
+struct qedf_io_log {
+#define QEDF_IO_TRACE_REQ 0
+#define QEDF_IO_TRACE_RSP 1
+ uint8_t direction;
+ uint16_t task_id;
+ uint32_t port_id; /* Remote port fabric ID */
+ int lun;
+ char op; /* SCSI CDB */
+ uint8_t lba[4];
+ unsigned int bufflen; /* SCSI buffer length */
+ unsigned int sg_count; /* Number of SG elements */
+ int result; /* Result passed back to mid-layer */
+ unsigned long jiffies; /* Time stamp when I/O logged */
+ int refcount; /* Reference count for task id */
+ unsigned int req_cpu; /* CPU that the task is queued on */
+ unsigned int int_cpu; /* Interrupt CPU that the task is received on */
+ unsigned int rsp_cpu; /* CPU that task is returned on */
+ u8 sge_type; /* Did we take the slow, single or fast SGE path */
+};
+
+/* Number of entries in BDQ */
+#define QEDF_BDQ_SIZE 256
+#define QEDF_BDQ_BUF_SIZE 2072
+
+/* DMA coherent buffers for BDQ */
+struct qedf_bdq_buf {
+ void *buf_addr;
+ dma_addr_t buf_dma;
+};
+
+/* Main adapter struct */
+struct qedf_ctx {
+ struct qedf_dbg_ctx dbg_ctx;
+ struct fcoe_ctlr ctlr;
+ struct fc_lport *lport;
+ u8 data_src_addr[ETH_ALEN];
+#define QEDF_LINK_DOWN 0
+#define QEDF_LINK_UP 1
+ atomic_t link_state;
+#define QEDF_DCBX_PENDING 0
+#define QEDF_DCBX_DONE 1
+ atomic_t dcbx;
+ uint16_t max_scsi_xid;
+ uint16_t max_els_xid;
+#define QEDF_NULL_VLAN_ID -1
+#define QEDF_FALLBACK_VLAN 1002
+#define QEDF_DEFAULT_PRIO 3
+ int vlan_id;
+ uint vlan_hw_insert:1;
+ struct qed_dev *cdev;
+ struct qed_dev_fcoe_info dev_info;
+ struct qed_int_info int_info;
+ uint16_t last_command;
+ spinlock_t hba_lock;
+ struct pci_dev *pdev;
+ u64 wwnn;
+ u64 wwpn;
+ u8 __aligned(16) mac[ETH_ALEN];
+ struct list_head fcports;
+ atomic_t num_offloads;
+ unsigned int curr_conn_id;
+ struct workqueue_struct *ll2_recv_wq;
+ struct workqueue_struct *link_update_wq;
+ struct delayed_work link_update;
+ struct delayed_work link_recovery;
+ struct completion flogi_compl;
+ struct completion fipvlan_compl;
+
+ /*
+ * Used to tell if we're in the window where we are waiting for
+ * the link to come back up before informting fcoe that the link is
+ * done.
+ */
+ atomic_t link_down_tmo_valid;
+#define QEDF_TIMER_INTERVAL (1 * HZ)
+ struct timer_list timer; /* One second book keeping timer */
+#define QEDF_DRAIN_ACTIVE 1
+#define QEDF_LL2_STARTED 2
+#define QEDF_UNLOADING 3
+#define QEDF_GRCDUMP_CAPTURE 4
+#define QEDF_IN_RECOVERY 5
+#define QEDF_DBG_STOP_IO 6
+ unsigned long flags; /* Miscellaneous state flags */
+ int fipvlan_retries;
+ u8 num_queues;
+ struct global_queue **global_queues;
+ /* Pointer to array of queue structures */
+ struct qedf_glbl_q_params *p_cpuq;
+ /* Physical address of array of queue structures */
+ dma_addr_t hw_p_cpuq;
+
+ struct qedf_bdq_buf bdq[QEDF_BDQ_SIZE];
+ void *bdq_pbl;
+ dma_addr_t bdq_pbl_dma;
+ size_t bdq_pbl_mem_size;
+ void *bdq_pbl_list;
+ dma_addr_t bdq_pbl_list_dma;
+ u8 bdq_pbl_list_num_entries;
+ void __iomem *bdq_primary_prod;
+ void __iomem *bdq_secondary_prod;
+ uint16_t bdq_prod_idx;
+
+ /* Structure for holding all the fastpath for this qedf_ctx */
+ struct qedf_fastpath *fp_array;
+ struct qed_fcoe_tid tasks;
+ struct qedf_cmd_mgr *cmd_mgr;
+ /* Holds the PF parameters we pass to qed to start he FCoE function */
+ struct qed_pf_params pf_params;
+ /* Used to time middle path ELS and TM commands */
+ struct workqueue_struct *timer_work_queue;
+
+#define QEDF_IO_WORK_MIN 64
+ mempool_t *io_mempool;
+ struct workqueue_struct *dpc_wq;
+
+ u32 slow_sge_ios;
+ u32 fast_sge_ios;
+ u32 single_sge_ios;
+
+ uint8_t *grcdump;
+ uint32_t grcdump_size;
+
+ struct qedf_io_log io_trace_buf[QEDF_IO_TRACE_SIZE];
+ spinlock_t io_trace_lock;
+ uint16_t io_trace_idx;
+
+ bool stop_io_on_error;
+
+ u32 flogi_cnt;
+ u32 flogi_failed;
+
+ /* Used for fc statistics */
+ u64 input_requests;
+ u64 output_requests;
+ u64 control_requests;
+ u64 packet_aborts;
+ u64 alloc_failures;
+};
+
+struct io_bdt {
+ struct qedf_ioreq *io_req;
+ struct fcoe_sge *bd_tbl;
+ dma_addr_t bd_tbl_dma;
+ u16 bd_valid;
+};
+
+struct qedf_cmd_mgr {
+ struct qedf_ctx *qedf;
+ u16 idx;
+ struct io_bdt **io_bdt_pool;
+#define FCOE_PARAMS_NUM_TASKS 4096
+ struct qedf_ioreq cmds[FCOE_PARAMS_NUM_TASKS];
+ spinlock_t lock;
+ atomic_t free_list_cnt;
+};
+
+/* Stolen from qed_cxt_api.h and adapted for qed_fcoe_info
+ * Usage:
+ *
+ * void *ptr;
+ * ptr = qedf_get_task_mem(&qedf->tasks, 128);
+ */
+static inline void *qedf_get_task_mem(struct qed_fcoe_tid *info, u32 tid)
+{
+ return (void *)(info->blocks[tid / info->num_tids_per_block] +
+ (tid % info->num_tids_per_block) * info->size);
+}
+
+static inline void qedf_stop_all_io(struct qedf_ctx *qedf)
+{
+ set_bit(QEDF_DBG_STOP_IO, &qedf->flags);
+}
+
+/*
+ * Externs
+ */
+#define QEDF_DEFAULT_LOG_MASK 0x3CFB6
+extern const struct qed_fcoe_ops *qed_ops;
+extern uint qedf_dump_frames;
+extern uint qedf_io_tracing;
+extern uint qedf_stop_io_on_error;
+extern uint qedf_link_down_tmo;
+#define QEDF_RETRY_DELAY_MAX 20 /* 2 seconds */
+extern bool qedf_retry_delay;
+extern uint qedf_debug;
+
+extern struct qedf_cmd_mgr *qedf_cmd_mgr_alloc(struct qedf_ctx *qedf);
+extern void qedf_cmd_mgr_free(struct qedf_cmd_mgr *cmgr);
+extern int qedf_queuecommand(struct Scsi_Host *host,
+ struct scsi_cmnd *sc_cmd);
+extern void qedf_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb);
+extern void qedf_update_src_mac(struct fc_lport *lport, u8 *addr);
+extern u8 *qedf_get_src_mac(struct fc_lport *lport);
+extern void qedf_fip_recv(struct qedf_ctx *qedf, struct sk_buff *skb);
+extern void qedf_fcoe_send_vlan_req(struct qedf_ctx *qedf);
+extern void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
+ struct qedf_ioreq *io_req);
+extern void qedf_process_warning_compl(struct qedf_ctx *qedf,
+ struct fcoe_cqe *cqe, struct qedf_ioreq *io_req);
+extern void qedf_process_error_detect(struct qedf_ctx *qedf,
+ struct fcoe_cqe *cqe, struct qedf_ioreq *io_req);
+extern void qedf_flush_active_ios(struct qedf_rport *fcport, int lun);
+extern void qedf_release_cmd(struct kref *ref);
+extern int qedf_initiate_abts(struct qedf_ioreq *io_req,
+ bool return_scsi_cmd_on_abts);
+extern void qedf_process_abts_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
+ struct qedf_ioreq *io_req);
+extern struct qedf_ioreq *qedf_alloc_cmd(struct qedf_rport *fcport,
+ u8 cmd_type);
+
+extern struct device_attribute *qedf_host_attrs[];
+extern void qedf_cmd_timer_set(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
+ unsigned int timer_msec);
+extern int qedf_init_mp_req(struct qedf_ioreq *io_req);
+extern void qedf_init_mp_task(struct qedf_ioreq *io_req,
+ struct fcoe_task_context *task_ctx);
+extern void qedf_add_to_sq(struct qedf_rport *fcport, u16 xid,
+ u32 ptu_invalidate, enum fcoe_task_type req_type, u32 offset);
+extern void qedf_ring_doorbell(struct qedf_rport *fcport);
+extern void qedf_process_els_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
+ struct qedf_ioreq *els_req);
+extern int qedf_send_rrq(struct qedf_ioreq *aborted_io_req);
+extern int qedf_send_adisc(struct qedf_rport *fcport, struct fc_frame *fp);
+extern int qedf_initiate_cleanup(struct qedf_ioreq *io_req,
+ bool return_scsi_cmd_on_abts);
+extern void qedf_process_cleanup_compl(struct qedf_ctx *qedf,
+ struct fcoe_cqe *cqe, struct qedf_ioreq *io_req);
+extern int qedf_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags);
+extern void qedf_process_tmf_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
+ struct qedf_ioreq *io_req);
+extern void qedf_process_cqe(struct qedf_ctx *qedf, struct fcoe_cqe *cqe);
+extern void qedf_scsi_done(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
+ int result);
+extern void qedf_set_vlan_id(struct qedf_ctx *qedf, int vlan_id);
+extern void qedf_create_sysfs_ctx_attr(struct qedf_ctx *qedf);
+extern void qedf_remove_sysfs_ctx_attr(struct qedf_ctx *qedf);
+extern void qedf_capture_grc_dump(struct qedf_ctx *qedf);
+extern void qedf_wait_for_upload(struct qedf_ctx *qedf);
+extern void qedf_process_unsol_compl(struct qedf_ctx *qedf, uint16_t que_idx,
+ struct fcoe_cqe *cqe);
+extern void qedf_restart_rport(struct qedf_rport *fcport);
+extern int qedf_send_rec(struct qedf_ioreq *orig_io_req);
+extern int qedf_post_io_req(struct qedf_rport *fcport,
+ struct qedf_ioreq *io_req);
+extern void qedf_process_seq_cleanup_compl(struct qedf_ctx *qedf,
+ struct fcoe_cqe *cqe, struct qedf_ioreq *io_req);
+extern int qedf_send_flogi(struct qedf_ctx *qedf);
+extern void qedf_fp_io_handler(struct work_struct *work);
+
+#define FCOE_WORD_TO_BYTE 4
+#define QEDF_MAX_TASK_NUM 0xFFFF
+
+struct fip_vlan {
+ struct ethhdr eth;
+ struct fip_header fip;
+ struct {
+ struct fip_mac_desc mac;
+ struct fip_wwn_desc wwnn;
+ } desc;
+};
+
+/* SQ/CQ Sizes */
+#define GBL_RSVD_TASKS 16
+#define NUM_TASKS_PER_CONNECTION 1024
+#define NUM_RW_TASKS_PER_CONNECTION 512
+#define FCOE_PARAMS_CQ_NUM_ENTRIES FCOE_PARAMS_NUM_TASKS
+
+#define FCOE_PARAMS_CMDQ_NUM_ENTRIES FCOE_PARAMS_NUM_TASKS
+#define SQ_NUM_ENTRIES NUM_TASKS_PER_CONNECTION
+
+#define QEDF_FCOE_PARAMS_GL_RQ_PI 0
+#define QEDF_FCOE_PARAMS_GL_CMD_PI 1
+
+#define QEDF_READ (1 << 1)
+#define QEDF_WRITE (1 << 0)
+#define MAX_FIBRE_LUNS 0xffffffff
+
+#define QEDF_MAX_NUM_CQS 8
+
+/*
+ * PCI function probe defines
+ */
+/* Probe/remove called during normal PCI probe */
+#define QEDF_MODE_NORMAL 0
+/* Probe/remove called from qed error recovery */
+#define QEDF_MODE_RECOVERY 1
+
+#define SUPPORTED_25000baseKR_Full (1<<27)
+#define SUPPORTED_50000baseKR2_Full (1<<28)
+#define SUPPORTED_100000baseKR4_Full (1<<29)
+#define SUPPORTED_100000baseCR4_Full (1<<30)
+
+#endif
diff --git a/drivers/scsi/qedf/qedf_attr.c b/drivers/scsi/qedf/qedf_attr.c
new file mode 100644
index 000000000000..47720611ad2c
--- /dev/null
+++ b/drivers/scsi/qedf/qedf_attr.c
@@ -0,0 +1,165 @@
+/*
+ * QLogic FCoE Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+#include "qedf.h"
+
+static ssize_t
+qedf_fcoe_mac_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fc_lport *lport = shost_priv(class_to_shost(dev));
+ u32 port_id;
+ u8 lport_src_id[3];
+ u8 fcoe_mac[6];
+
+ port_id = fc_host_port_id(lport->host);
+ lport_src_id[2] = (port_id & 0x000000FF);
+ lport_src_id[1] = (port_id & 0x0000FF00) >> 8;
+ lport_src_id[0] = (port_id & 0x00FF0000) >> 16;
+ fc_fcoe_set_mac(fcoe_mac, lport_src_id);
+
+ return scnprintf(buf, PAGE_SIZE, "%pM\n", fcoe_mac);
+}
+
+static DEVICE_ATTR(fcoe_mac, S_IRUGO, qedf_fcoe_mac_show, NULL);
+
+struct device_attribute *qedf_host_attrs[] = {
+ &dev_attr_fcoe_mac,
+ NULL,
+};
+
+extern const struct qed_fcoe_ops *qed_ops;
+
+inline bool qedf_is_vport(struct qedf_ctx *qedf)
+{
+ return (!(qedf->lport->vport == NULL));
+}
+
+/* Get base qedf for physical port from vport */
+static struct qedf_ctx *qedf_get_base_qedf(struct qedf_ctx *qedf)
+{
+ struct fc_lport *lport;
+ struct fc_lport *base_lport;
+
+ if (!(qedf_is_vport(qedf)))
+ return NULL;
+
+ lport = qedf->lport;
+ base_lport = shost_priv(vport_to_shost(lport->vport));
+ return (struct qedf_ctx *)(lport_priv(base_lport));
+}
+
+void qedf_capture_grc_dump(struct qedf_ctx *qedf)
+{
+ struct qedf_ctx *base_qedf;
+
+ /* Make sure we use the base qedf to take the GRC dump */
+ if (qedf_is_vport(qedf))
+ base_qedf = qedf_get_base_qedf(qedf);
+ else
+ base_qedf = qedf;
+
+ if (test_bit(QEDF_GRCDUMP_CAPTURE, &base_qedf->flags)) {
+ QEDF_INFO(&(base_qedf->dbg_ctx), QEDF_LOG_INFO,
+ "GRC Dump already captured.\n");
+ return;
+ }
+
+
+ qedf_get_grc_dump(base_qedf->cdev, qed_ops->common,
+ &base_qedf->grcdump, &base_qedf->grcdump_size);
+ QEDF_ERR(&(base_qedf->dbg_ctx), "GRC Dump captured.\n");
+ set_bit(QEDF_GRCDUMP_CAPTURE, &base_qedf->flags);
+ qedf_uevent_emit(base_qedf->lport->host, QEDF_UEVENT_CODE_GRCDUMP,
+ NULL);
+}
+
+static ssize_t
+qedf_sysfs_read_grcdump(struct file *filep, struct kobject *kobj,
+ struct bin_attribute *ba, char *buf, loff_t off,
+ size_t count)
+{
+ ssize_t ret = 0;
+ struct fc_lport *lport = shost_priv(dev_to_shost(container_of(kobj,
+ struct device, kobj)));
+ struct qedf_ctx *qedf = lport_priv(lport);
+
+ if (test_bit(QEDF_GRCDUMP_CAPTURE, &qedf->flags)) {
+ ret = memory_read_from_buffer(buf, count, &off,
+ qedf->grcdump, qedf->grcdump_size);
+ } else {
+ QEDF_ERR(&(qedf->dbg_ctx), "GRC Dump not captured!\n");
+ }
+
+ return ret;
+}
+
+static ssize_t
+qedf_sysfs_write_grcdump(struct file *filep, struct kobject *kobj,
+ struct bin_attribute *ba, char *buf, loff_t off,
+ size_t count)
+{
+ struct fc_lport *lport = NULL;
+ struct qedf_ctx *qedf = NULL;
+ long reading;
+ int ret = 0;
+ char msg[40];
+
+ if (off != 0)
+ return ret;
+
+
+ lport = shost_priv(dev_to_shost(container_of(kobj,
+ struct device, kobj)));
+ qedf = lport_priv(lport);
+
+ buf[1] = 0;
+ ret = kstrtol(buf, 10, &reading);
+ if (ret) {
+ QEDF_ERR(&(qedf->dbg_ctx), "Invalid input, err(%d)\n", ret);
+ return ret;
+ }
+
+ memset(msg, 0, sizeof(msg));
+ switch (reading) {
+ case 0:
+ memset(qedf->grcdump, 0, qedf->grcdump_size);
+ clear_bit(QEDF_GRCDUMP_CAPTURE, &qedf->flags);
+ break;
+ case 1:
+ qedf_capture_grc_dump(qedf);
+ break;
+ }
+
+ return count;
+}
+
+static struct bin_attribute sysfs_grcdump_attr = {
+ .attr = {
+ .name = "grcdump",
+ .mode = S_IRUSR | S_IWUSR,
+ },
+ .size = 0,
+ .read = qedf_sysfs_read_grcdump,
+ .write = qedf_sysfs_write_grcdump,
+};
+
+static struct sysfs_bin_attrs bin_file_entries[] = {
+ {"grcdump", &sysfs_grcdump_attr},
+ {NULL},
+};
+
+void qedf_create_sysfs_ctx_attr(struct qedf_ctx *qedf)
+{
+ qedf_create_sysfs_attr(qedf->lport->host, bin_file_entries);
+}
+
+void qedf_remove_sysfs_ctx_attr(struct qedf_ctx *qedf)
+{
+ qedf_remove_sysfs_attr(qedf->lport->host, bin_file_entries);
+}
diff --git a/drivers/scsi/qedf/qedf_dbg.c b/drivers/scsi/qedf/qedf_dbg.c
new file mode 100644
index 000000000000..e023f5d0dc12
--- /dev/null
+++ b/drivers/scsi/qedf/qedf_dbg.c
@@ -0,0 +1,195 @@
+/*
+ * QLogic FCoE Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+#include "qedf_dbg.h"
+#include <linux/vmalloc.h>
+
+void
+qedf_dbg_err(struct qedf_dbg_ctx *qedf, const char *func, u32 line,
+ const char *fmt, ...)
+{
+ va_list va;
+ struct va_format vaf;
+ char nfunc[32];
+
+ memset(nfunc, 0, sizeof(nfunc));
+ memcpy(nfunc, func, sizeof(nfunc) - 1);
+
+ va_start(va, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &va;
+
+ if (likely(qedf) && likely(qedf->pdev))
+ pr_err("[%s]:[%s:%d]:%d: %pV", dev_name(&(qedf->pdev->dev)),
+ nfunc, line, qedf->host_no, &vaf);
+ else
+ pr_err("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf);
+
+ va_end(va);
+}
+
+void
+qedf_dbg_warn(struct qedf_dbg_ctx *qedf, const char *func, u32 line,
+ const char *fmt, ...)
+{
+ va_list va;
+ struct va_format vaf;
+ char nfunc[32];
+
+ memset(nfunc, 0, sizeof(nfunc));
+ memcpy(nfunc, func, sizeof(nfunc) - 1);
+
+ va_start(va, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &va;
+
+ if (!(qedf_debug & QEDF_LOG_WARN))
+ goto ret;
+
+ if (likely(qedf) && likely(qedf->pdev))
+ pr_warn("[%s]:[%s:%d]:%d: %pV", dev_name(&(qedf->pdev->dev)),
+ nfunc, line, qedf->host_no, &vaf);
+ else
+ pr_warn("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf);
+
+ret:
+ va_end(va);
+}
+
+void
+qedf_dbg_notice(struct qedf_dbg_ctx *qedf, const char *func, u32 line,
+ const char *fmt, ...)
+{
+ va_list va;
+ struct va_format vaf;
+ char nfunc[32];
+
+ memset(nfunc, 0, sizeof(nfunc));
+ memcpy(nfunc, func, sizeof(nfunc) - 1);
+
+ va_start(va, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &va;
+
+ if (!(qedf_debug & QEDF_LOG_NOTICE))
+ goto ret;
+
+ if (likely(qedf) && likely(qedf->pdev))
+ pr_notice("[%s]:[%s:%d]:%d: %pV",
+ dev_name(&(qedf->pdev->dev)), nfunc, line,
+ qedf->host_no, &vaf);
+ else
+ pr_notice("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf);
+
+ret:
+ va_end(va);
+}
+
+void
+qedf_dbg_info(struct qedf_dbg_ctx *qedf, const char *func, u32 line,
+ u32 level, const char *fmt, ...)
+{
+ va_list va;
+ struct va_format vaf;
+ char nfunc[32];
+
+ memset(nfunc, 0, sizeof(nfunc));
+ memcpy(nfunc, func, sizeof(nfunc) - 1);
+
+ va_start(va, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &va;
+
+ if (!(qedf_debug & level))
+ goto ret;
+
+ if (likely(qedf) && likely(qedf->pdev))
+ pr_info("[%s]:[%s:%d]:%d: %pV", dev_name(&(qedf->pdev->dev)),
+ nfunc, line, qedf->host_no, &vaf);
+ else
+ pr_info("[0000:00:00.0]:[%s:%d]: %pV", nfunc, line, &vaf);
+
+ret:
+ va_end(va);
+}
+
+int
+qedf_alloc_grc_dump_buf(u8 **buf, uint32_t len)
+{
+ *buf = vmalloc(len);
+ if (!(*buf))
+ return -ENOMEM;
+
+ memset(*buf, 0, len);
+ return 0;
+}
+
+void
+qedf_free_grc_dump_buf(uint8_t **buf)
+{
+ vfree(*buf);
+ *buf = NULL;
+}
+
+int
+qedf_get_grc_dump(struct qed_dev *cdev, const struct qed_common_ops *common,
+ u8 **buf, uint32_t *grcsize)
+{
+ if (!*buf)
+ return -EINVAL;
+
+ return common->dbg_grc(cdev, *buf, grcsize);
+}
+
+void
+qedf_uevent_emit(struct Scsi_Host *shost, u32 code, char *msg)
+{
+ char event_string[40];
+ char *envp[] = {event_string, NULL};
+
+ memset(event_string, 0, sizeof(event_string));
+ switch (code) {
+ case QEDF_UEVENT_CODE_GRCDUMP:
+ if (msg)
+ strncpy(event_string, msg, strlen(msg));
+ else
+ sprintf(event_string, "GRCDUMP=%u", shost->host_no);
+ break;
+ default:
+ /* do nothing */
+ break;
+ }
+
+ kobject_uevent_env(&shost->shost_gendev.kobj, KOBJ_CHANGE, envp);
+}
+
+int
+qedf_create_sysfs_attr(struct Scsi_Host *shost, struct sysfs_bin_attrs *iter)
+{
+ int ret = 0;
+
+ for (; iter->name; iter++) {
+ ret = sysfs_create_bin_file(&shost->shost_gendev.kobj,
+ iter->attr);
+ if (ret)
+ pr_err("Unable to create sysfs %s attr, err(%d).\n",
+ iter->name, ret);
+ }
+ return ret;
+}
+
+void
+qedf_remove_sysfs_attr(struct Scsi_Host *shost, struct sysfs_bin_attrs *iter)
+{
+ for (; iter->name; iter++)
+ sysfs_remove_bin_file(&shost->shost_gendev.kobj, iter->attr);
+}
diff --git a/drivers/scsi/qedf/qedf_dbg.h b/drivers/scsi/qedf/qedf_dbg.h
new file mode 100644
index 000000000000..23bd70628a2f
--- /dev/null
+++ b/drivers/scsi/qedf/qedf_dbg.h
@@ -0,0 +1,154 @@
+/*
+ * QLogic FCoE Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+#ifndef _QEDF_DBG_H_
+#define _QEDF_DBG_H_
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/compiler.h>
+#include <linux/string.h>
+#include <linux/version.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <scsi/scsi_transport.h>
+#include <linux/fs.h>
+
+#include <linux/qed/common_hsi.h>
+#include <linux/qed/qed_if.h>
+
+extern uint qedf_debug;
+
+/* Debug print level definitions */
+#define QEDF_LOG_DEFAULT 0x1 /* Set default logging mask */
+#define QEDF_LOG_INFO 0x2 /*
+ * Informational logs,
+ * MAC address, WWPN, WWNN
+ */
+#define QEDF_LOG_DISC 0x4 /* Init, discovery, rport */
+#define QEDF_LOG_LL2 0x8 /* LL2, VLAN logs */
+#define QEDF_LOG_CONN 0x10 /* Connection setup, cleanup */
+#define QEDF_LOG_EVT 0x20 /* Events, link, mtu */
+#define QEDF_LOG_TIMER 0x40 /* Timer events */
+#define QEDF_LOG_MP_REQ 0x80 /* Middle Path (MP) logs */
+#define QEDF_LOG_SCSI_TM 0x100 /* SCSI Aborts, Task Mgmt */
+#define QEDF_LOG_UNSOL 0x200 /* unsolicited event logs */
+#define QEDF_LOG_IO 0x400 /* scsi cmd, completion */
+#define QEDF_LOG_MQ 0x800 /* Multi Queue logs */
+#define QEDF_LOG_BSG 0x1000 /* BSG logs */
+#define QEDF_LOG_DEBUGFS 0x2000 /* debugFS logs */
+#define QEDF_LOG_LPORT 0x4000 /* lport logs */
+#define QEDF_LOG_ELS 0x8000 /* ELS logs */
+#define QEDF_LOG_NPIV 0x10000 /* NPIV logs */
+#define QEDF_LOG_SESS 0x20000 /* Conection setup, cleanup */
+#define QEDF_LOG_TID 0x80000 /*
+ * FW TID context acquire
+ * free
+ */
+#define QEDF_TRACK_TID 0x100000 /*
+ * Track TID state. To be
+ * enabled only at module load
+ * and not run-time.
+ */
+#define QEDF_TRACK_CMD_LIST 0x300000 /*
+ * Track active cmd list nodes,
+ * done with reference to TID,
+ * hence TRACK_TID also enabled.
+ */
+#define QEDF_LOG_NOTICE 0x40000000 /* Notice logs */
+#define QEDF_LOG_WARN 0x80000000 /* Warning logs */
+
+/* Debug context structure */
+struct qedf_dbg_ctx {
+ unsigned int host_no;
+ struct pci_dev *pdev;
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *bdf_dentry;
+#endif
+};
+
+#define QEDF_ERR(pdev, fmt, ...) \
+ qedf_dbg_err(pdev, __func__, __LINE__, fmt, ## __VA_ARGS__)
+#define QEDF_WARN(pdev, fmt, ...) \
+ qedf_dbg_warn(pdev, __func__, __LINE__, fmt, ## __VA_ARGS__)
+#define QEDF_NOTICE(pdev, fmt, ...) \
+ qedf_dbg_notice(pdev, __func__, __LINE__, fmt, ## __VA_ARGS__)
+#define QEDF_INFO(pdev, level, fmt, ...) \
+ qedf_dbg_info(pdev, __func__, __LINE__, level, fmt, \
+ ## __VA_ARGS__)
+
+extern void qedf_dbg_err(struct qedf_dbg_ctx *qedf, const char *func, u32 line,
+ const char *fmt, ...);
+extern void qedf_dbg_warn(struct qedf_dbg_ctx *qedf, const char *func, u32 line,
+ const char *, ...);
+extern void qedf_dbg_notice(struct qedf_dbg_ctx *qedf, const char *func,
+ u32 line, const char *, ...);
+extern void qedf_dbg_info(struct qedf_dbg_ctx *qedf, const char *func, u32 line,
+ u32 info, const char *fmt, ...);
+
+/* GRC Dump related defines */
+
+struct Scsi_Host;
+
+#define QEDF_UEVENT_CODE_GRCDUMP 0
+
+struct sysfs_bin_attrs {
+ char *name;
+ struct bin_attribute *attr;
+};
+
+extern int qedf_alloc_grc_dump_buf(uint8_t **buf, uint32_t len);
+extern void qedf_free_grc_dump_buf(uint8_t **buf);
+extern int qedf_get_grc_dump(struct qed_dev *cdev,
+ const struct qed_common_ops *common, uint8_t **buf,
+ uint32_t *grcsize);
+extern void qedf_uevent_emit(struct Scsi_Host *shost, u32 code, char *msg);
+extern int qedf_create_sysfs_attr(struct Scsi_Host *shost,
+ struct sysfs_bin_attrs *iter);
+extern void qedf_remove_sysfs_attr(struct Scsi_Host *shost,
+ struct sysfs_bin_attrs *iter);
+
+#ifdef CONFIG_DEBUG_FS
+/* DebugFS related code */
+struct qedf_list_of_funcs {
+ char *oper_str;
+ ssize_t (*oper_func)(struct qedf_dbg_ctx *qedf);
+};
+
+struct qedf_debugfs_ops {
+ char *name;
+ struct qedf_list_of_funcs *qedf_funcs;
+};
+
+#define qedf_dbg_fileops(drv, ops) \
+{ \
+ .owner = THIS_MODULE, \
+ .open = simple_open, \
+ .read = drv##_dbg_##ops##_cmd_read, \
+ .write = drv##_dbg_##ops##_cmd_write \
+}
+
+/* Used for debugfs sequential files */
+#define qedf_dbg_fileops_seq(drv, ops) \
+{ \
+ .owner = THIS_MODULE, \
+ .open = drv##_dbg_##ops##_open, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+}
+
+extern void qedf_dbg_host_init(struct qedf_dbg_ctx *qedf,
+ struct qedf_debugfs_ops *dops,
+ struct file_operations *fops);
+extern void qedf_dbg_host_exit(struct qedf_dbg_ctx *qedf);
+extern void qedf_dbg_init(char *drv_name);
+extern void qedf_dbg_exit(void);
+#endif /* CONFIG_DEBUG_FS */
+
+#endif /* _QEDF_DBG_H_ */
diff --git a/drivers/scsi/qedf/qedf_debugfs.c b/drivers/scsi/qedf/qedf_debugfs.c
new file mode 100644
index 000000000000..cb08b625c594
--- /dev/null
+++ b/drivers/scsi/qedf/qedf_debugfs.c
@@ -0,0 +1,460 @@
+/*
+ * QLogic FCoE Offload Driver
+ * Copyright (c) 2016 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+#ifdef CONFIG_DEBUG_FS
+
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+#include <linux/module.h>
+
+#include "qedf.h"
+#include "qedf_dbg.h"
+
+static struct dentry *qedf_dbg_root;
+
+/**
+ * qedf_dbg_host_init - setup the debugfs file for the pf
+ * @pf: the pf that is starting up
+ **/
+void
+qedf_dbg_host_init(struct qedf_dbg_ctx *qedf,
+ struct qedf_debugfs_ops *dops,
+ struct file_operations *fops)
+{
+ char host_dirname[32];
+ struct dentry *file_dentry = NULL;
+
+ QEDF_INFO(qedf, QEDF_LOG_DEBUGFS, "Creating debugfs host node\n");
+ /* create pf dir */
+ sprintf(host_dirname, "host%u", qedf->host_no);
+ qedf->bdf_dentry = debugfs_create_dir(host_dirname, qedf_dbg_root);
+ if (!qedf->bdf_dentry)
+ return;
+
+ /* create debugfs files */
+ while (dops) {
+ if (!(dops->name))
+ break;
+
+ file_dentry = debugfs_create_file(dops->name, 0600,
+ qedf->bdf_dentry, qedf,
+ fops);
+ if (!file_dentry) {
+ QEDF_INFO(qedf, QEDF_LOG_DEBUGFS,
+ "Debugfs entry %s creation failed\n",
+ dops->name);
+ debugfs_remove_recursive(qedf->bdf_dentry);
+ return;
+ }
+ dops++;
+ fops++;
+ }
+}
+
+/**
+ * qedf_dbg_host_exit - clear out the pf's debugfs entries
+ * @pf: the pf that is stopping
+ **/
+void
+qedf_dbg_host_exit(struct qedf_dbg_ctx *qedf)
+{
+ QEDF_INFO(qedf, QEDF_LOG_DEBUGFS, "Destroying debugfs host "
+ "entry\n");
+ /* remove debugfs entries of this PF */
+ debugfs_remove_recursive(qedf->bdf_dentry);
+ qedf->bdf_dentry = NULL;
+}
+
+/**
+ * qedf_dbg_init - start up debugfs for the driver
+ **/
+void
+qedf_dbg_init(char *drv_name)
+{
+ QEDF_INFO(NULL, QEDF_LOG_DEBUGFS, "Creating debugfs root node\n");
+
+ /* create qed dir in root of debugfs. NULL means debugfs root */
+ qedf_dbg_root = debugfs_create_dir(drv_name, NULL);
+ if (!qedf_dbg_root)
+ QEDF_INFO(NULL, QEDF_LOG_DEBUGFS, "Init of debugfs "
+ "failed\n");
+}
+
+/**
+ * qedf_dbg_exit - clean out the driver's debugfs entries
+ **/
+void
+qedf_dbg_exit(void)
+{
+ QEDF_INFO(NULL, QEDF_LOG_DEBUGFS, "Destroying debugfs root "
+ "entry\n");
+
+ /* remove qed dir in root of debugfs */
+ debugfs_remove_recursive(qedf_dbg_root);
+ qedf_dbg_root = NULL;
+}
+
+struct qedf_debugfs_ops qedf_debugfs_ops[] = {
+ { "fp_int", NULL },
+ { "io_trace", NULL },
+ { "debug", NULL },
+ { "stop_io_on_error", NULL},
+ { "driver_stats", NULL},
+ { "clear_stats", NULL},
+ { "offload_stats", NULL},
+ /* This must be last */
+ { NULL, NULL }
+};
+
+DECLARE_PER_CPU(struct qedf_percpu_iothread_s, qedf_percpu_iothreads);
+
+static ssize_t
+qedf_dbg_fp_int_cmd_read(struct file *filp, char __user *buffer, size_t count,
+ loff_t *ppos)
+{
+ size_t cnt = 0;
+ int id;
+ struct qedf_fastpath *fp = NULL;
+ struct qedf_dbg_ctx *qedf_dbg =
+ (struct qedf_dbg_ctx *)filp->private_data;
+ struct qedf_ctx *qedf = container_of(qedf_dbg,
+ struct qedf_ctx, dbg_ctx);
+
+ QEDF_INFO(qedf_dbg, QEDF_LOG_DEBUGFS, "entered\n");
+
+ cnt = sprintf(buffer, "\nFastpath I/O completions\n\n");
+
+ for (id = 0; id < qedf->num_queues; id++) {
+ fp = &(qedf->fp_array[id]);
+ if (fp->sb_id == QEDF_SB_ID_NULL)
+ continue;
+ cnt += sprintf((buffer + cnt), "#%d: %lu\n", id,
+ fp->completions);
+ }
+
+ cnt = min_t(int, count, cnt - *ppos);
+ *ppos += cnt;
+ return cnt;
+}
+
+static ssize_t
+qedf_dbg_fp_int_cmd_write(struct file *filp, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ if (!count || *ppos)
+ return 0;
+
+ return count;
+}
+
+static ssize_t
+qedf_dbg_debug_cmd_read(struct file *filp, char __user *buffer, size_t count,
+ loff_t *ppos)
+{
+ int cnt;
+ struct qedf_dbg_ctx *qedf =
+ (struct qedf_dbg_ctx *)filp->private_data;
+
+ QEDF_INFO(qedf, QEDF_LOG_DEBUGFS, "entered\n");
+ cnt = sprintf(buffer, "debug mask = 0x%x\n", qedf_debug);
+
+ cnt = min_t(int, count, cnt - *ppos);
+ *ppos += cnt;
+ return cnt;
+}
+
+static ssize_t
+qedf_dbg_debug_cmd_write(struct file *filp, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ uint32_t val;
+ void *kern_buf;
+ int rval;
+ struct qedf_dbg_ctx *qedf =
+ (struct qedf_dbg_ctx *)filp->private_data;
+
+ if (!count || *ppos)
+ return 0;
+
+ kern_buf = memdup_user(buffer, count);
+ if (IS_ERR(kern_buf))
+ return PTR_ERR(kern_buf);
+
+ rval = kstrtouint(kern_buf, 10, &val);
+ kfree(kern_buf);
+ if (rval)
+ return rval;
+
+ if (val == 1)
+ qedf_debug = QEDF_DEFAULT_LOG_MASK;
+ else
+ qedf_debug = val;
+
+ QEDF_INFO(qedf, QEDF_LOG_DEBUGFS, "Setting debug=0x%x.\n", val);
+ return count;
+}
+
+static ssize_t
+qedf_dbg_stop_io_on_error_cmd_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ int cnt;
+ struct qedf_dbg_ctx *qedf_dbg =
+ (struct qedf_dbg_ctx *)filp->private_data;
+ struct qedf_ctx *qedf = container_of(qedf_dbg,
+ struct qedf_ctx, dbg_ctx);
+
+ QEDF_INFO(qedf_dbg, QEDF_LOG_DEBUGFS, "entered\n");
+ cnt = sprintf(buffer, "%s\n",
+ qedf->stop_io_on_error ? "true" : "false");
+
+ cnt = min_t(int, count, cnt - *ppos);
+ *ppos += cnt;
+ return cnt;
+}
+
+static ssize_t
+qedf_dbg_stop_io_on_error_cmd_write(struct file *filp,
+ const char __user *buffer, size_t count,
+ loff_t *ppos)
+{
+ void *kern_buf;
+ struct qedf_dbg_ctx *qedf_dbg =
+ (struct qedf_dbg_ctx *)filp->private_data;
+ struct qedf_ctx *qedf = container_of(qedf_dbg, struct qedf_ctx,
+ dbg_ctx);
+
+ QEDF_INFO(qedf_dbg, QEDF_LOG_DEBUGFS, "entered\n");
+
+ if (!count || *ppos)
+ return 0;
+
+ kern_buf = memdup_user(buffer, 6);
+ if (IS_ERR(kern_buf))
+ return PTR_ERR(kern_buf);
+
+ if (strncmp(kern_buf, "false", 5) == 0)
+ qedf->stop_io_on_error = false;
+ else if (strncmp(kern_buf, "true", 4) == 0)
+ qedf->stop_io_on_error = true;
+ else if (strncmp(kern_buf, "now", 3) == 0)
+ /* Trigger from user to stop all I/O on this host */
+ set_bit(QEDF_DBG_STOP_IO, &qedf->flags);
+
+ kfree(kern_buf);
+ return count;
+}
+
+static int
+qedf_io_trace_show(struct seq_file *s, void *unused)
+{
+ int i, idx = 0;
+ struct qedf_ctx *qedf = s->private;
+ struct qedf_dbg_ctx *qedf_dbg = &qedf->dbg_ctx;
+ struct qedf_io_log *io_log;
+ unsigned long flags;
+
+ if (!qedf_io_tracing) {
+ seq_puts(s, "I/O tracing not enabled.\n");
+ goto out;
+ }
+
+ QEDF_INFO(qedf_dbg, QEDF_LOG_DEBUGFS, "entered\n");
+
+ spin_lock_irqsave(&qedf->io_trace_lock, flags);
+ idx = qedf->io_trace_idx;
+ for (i = 0; i < QEDF_IO_TRACE_SIZE; i++) {
+ io_log = &qedf->io_trace_buf[idx];
+ seq_printf(s, "%d:", io_log->direction);
+ seq_printf(s, "0x%x:", io_log->task_id);
+ seq_printf(s, "0x%06x:", io_log->port_id);
+ seq_printf(s, "%d:", io_log->lun);
+ seq_printf(s, "0x%02x:", io_log->op);
+ seq_printf(s, "0x%02x%02x%02x%02x:", io_log->lba[0],
+ io_log->lba[1], io_log->lba[2], io_log->lba[3]);
+ seq_printf(s, "%d:", io_log->bufflen);
+ seq_printf(s, "%d:", io_log->sg_count);
+ seq_printf(s, "0x%08x:", io_log->result);
+ seq_printf(s, "%lu:", io_log->jiffies);
+ seq_printf(s, "%d:", io_log->refcount);
+ seq_printf(s, "%d:", io_log->req_cpu);
+ seq_printf(s, "%d:", io_log->int_cpu);
+ seq_printf(s, "%d:", io_log->rsp_cpu);
+ seq_printf(s, "%d\n", io_log->sge_type);
+
+ idx++;
+ if (idx == QEDF_IO_TRACE_SIZE)
+ idx = 0;
+ }
+ spin_unlock_irqrestore(&qedf->io_trace_lock, flags);
+
+out:
+ return 0;
+}
+
+static int
+qedf_dbg_io_trace_open(struct inode *inode, struct file *file)
+{
+ struct qedf_dbg_ctx *qedf_dbg = inode->i_private;
+ struct qedf_ctx *qedf = container_of(qedf_dbg,
+ struct qedf_ctx, dbg_ctx);
+
+ return single_open(file, qedf_io_trace_show, qedf);
+}
+
+static int
+qedf_driver_stats_show(struct seq_file *s, void *unused)
+{
+ struct qedf_ctx *qedf = s->private;
+ struct qedf_rport *fcport;
+ struct fc_rport_priv *rdata;
+
+ seq_printf(s, "cmg_mgr free io_reqs: %d\n",
+ atomic_read(&qedf->cmd_mgr->free_list_cnt));
+ seq_printf(s, "slow SGEs: %d\n", qedf->slow_sge_ios);
+ seq_printf(s, "single SGEs: %d\n", qedf->single_sge_ios);
+ seq_printf(s, "fast SGEs: %d\n\n", qedf->fast_sge_ios);
+
+ seq_puts(s, "Offloaded ports:\n\n");
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(fcport, &qedf->fcports, peers) {
+ rdata = fcport->rdata;
+ if (rdata == NULL)
+ continue;
+ seq_printf(s, "%06x: free_sqes: %d, num_active_ios: %d\n",
+ rdata->ids.port_id, atomic_read(&fcport->free_sqes),
+ atomic_read(&fcport->num_active_ios));
+ }
+ rcu_read_unlock();
+
+ return 0;
+}
+
+static int
+qedf_dbg_driver_stats_open(struct inode *inode, struct file *file)
+{
+ struct qedf_dbg_ctx *qedf_dbg = inode->i_private;
+ struct qedf_ctx *qedf = container_of(qedf_dbg,
+ struct qedf_ctx, dbg_ctx);
+
+ return single_open(file, qedf_driver_stats_show, qedf);
+}
+
+static ssize_t
+qedf_dbg_clear_stats_cmd_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ int cnt = 0;
+
+ /* Essentially a read stub */
+ cnt = min_t(int, count, cnt - *ppos);
+ *ppos += cnt;
+ return cnt;
+}
+
+static ssize_t
+qedf_dbg_clear_stats_cmd_write(struct file *filp,
+ const char __user *buffer, size_t count,
+ loff_t *ppos)
+{
+ struct qedf_dbg_ctx *qedf_dbg =
+ (struct qedf_dbg_ctx *)filp->private_data;
+ struct qedf_ctx *qedf = container_of(qedf_dbg, struct qedf_ctx,
+ dbg_ctx);
+
+ QEDF_INFO(qedf_dbg, QEDF_LOG_DEBUGFS, "Clearing stat counters.\n");
+
+ if (!count || *ppos)
+ return 0;
+
+ /* Clear stat counters exposed by 'stats' node */
+ qedf->slow_sge_ios = 0;
+ qedf->single_sge_ios = 0;
+ qedf->fast_sge_ios = 0;
+
+ return count;
+}
+
+static int
+qedf_offload_stats_show(struct seq_file *s, void *unused)
+{
+ struct qedf_ctx *qedf = s->private;
+ struct qed_fcoe_stats *fw_fcoe_stats;
+
+ fw_fcoe_stats = kmalloc(sizeof(struct qed_fcoe_stats), GFP_KERNEL);
+ if (!fw_fcoe_stats) {
+ QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate memory for "
+ "fw_fcoe_stats.\n");
+ goto out;
+ }
+
+ /* Query firmware for offload stats */
+ qed_ops->get_stats(qedf->cdev, fw_fcoe_stats);
+
+ seq_printf(s, "fcoe_rx_byte_cnt=%llu\n"
+ "fcoe_rx_data_pkt_cnt=%llu\n"
+ "fcoe_rx_xfer_pkt_cnt=%llu\n"
+ "fcoe_rx_other_pkt_cnt=%llu\n"
+ "fcoe_silent_drop_pkt_cmdq_full_cnt=%u\n"
+ "fcoe_silent_drop_pkt_crc_error_cnt=%u\n"
+ "fcoe_silent_drop_pkt_task_invalid_cnt=%u\n"
+ "fcoe_silent_drop_total_pkt_cnt=%u\n"
+ "fcoe_silent_drop_pkt_rq_full_cnt=%u\n"
+ "fcoe_tx_byte_cnt=%llu\n"
+ "fcoe_tx_data_pkt_cnt=%llu\n"
+ "fcoe_tx_xfer_pkt_cnt=%llu\n"
+ "fcoe_tx_other_pkt_cnt=%llu\n",
+ fw_fcoe_stats->fcoe_rx_byte_cnt,
+ fw_fcoe_stats->fcoe_rx_data_pkt_cnt,
+ fw_fcoe_stats->fcoe_rx_xfer_pkt_cnt,
+ fw_fcoe_stats->fcoe_rx_other_pkt_cnt,
+ fw_fcoe_stats->fcoe_silent_drop_pkt_cmdq_full_cnt,
+ fw_fcoe_stats->fcoe_silent_drop_pkt_crc_error_cnt,
+ fw_fcoe_stats->fcoe_silent_drop_pkt_task_invalid_cnt,
+ fw_fcoe_stats->fcoe_silent_drop_total_pkt_cnt,
+ fw_fcoe_stats->fcoe_silent_drop_pkt_rq_full_cnt,
+ fw_fcoe_stats->fcoe_tx_byte_cnt,
+ fw_fcoe_stats->fcoe_tx_data_pkt_cnt,
+ fw_fcoe_stats->fcoe_tx_xfer_pkt_cnt,
+ fw_fcoe_stats->fcoe_tx_other_pkt_cnt);
+
+ kfree(fw_fcoe_stats);
+out:
+ return 0;
+}
+
+static int
+qedf_dbg_offload_stats_open(struct inode *inode, struct file *file)
+{
+ struct qedf_dbg_ctx *qedf_dbg = inode->i_private;
+ struct qedf_ctx *qedf = container_of(qedf_dbg,
+ struct qedf_ctx, dbg_ctx);
+
+ return single_open(file, qedf_offload_stats_show, qedf);
+}
+
+
+const struct file_operations qedf_dbg_fops[] = {
+ qedf_dbg_fileops(qedf, fp_int),
+ qedf_dbg_fileops_seq(qedf, io_trace),
+ qedf_dbg_fileops(qedf, debug),
+ qedf_dbg_fileops(qedf, stop_io_on_error),
+ qedf_dbg_fileops_seq(qedf, driver_stats),
+ qedf_dbg_fileops(qedf, clear_stats),
+ qedf_dbg_fileops_seq(qedf, offload_stats),
+ /* This must be last */
+ { NULL, NULL },
+};
+
+#else /* CONFIG_DEBUG_FS */
+void qedf_dbg_host_init(struct qedf_dbg_ctx *);
+void qedf_dbg_host_exit(struct qedf_dbg_ctx *);
+void qedf_dbg_init(char *);
+void qedf_dbg_exit(void);
+#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/scsi/qedf/qedf_els.c b/drivers/scsi/qedf/qedf_els.c
new file mode 100644
index 000000000000..59f3e5c73a13
--- /dev/null
+++ b/drivers/scsi/qedf/qedf_els.c
@@ -0,0 +1,949 @@
+/*
+ * QLogic FCoE Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+#include "qedf.h"
+
+/* It's assumed that the lock is held when calling this function. */
+static int qedf_initiate_els(struct qedf_rport *fcport, unsigned int op,
+ void *data, uint32_t data_len,
+ void (*cb_func)(struct qedf_els_cb_arg *cb_arg),
+ struct qedf_els_cb_arg *cb_arg, uint32_t timer_msec)
+{
+ struct qedf_ctx *qedf = fcport->qedf;
+ struct fc_lport *lport = qedf->lport;
+ struct qedf_ioreq *els_req;
+ struct qedf_mp_req *mp_req;
+ struct fc_frame_header *fc_hdr;
+ struct fcoe_task_context *task;
+ int rc = 0;
+ uint32_t did, sid;
+ uint16_t xid;
+ uint32_t start_time = jiffies / HZ;
+ uint32_t current_time;
+
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending ELS\n");
+
+ rc = fc_remote_port_chkready(fcport->rport);
+ if (rc) {
+ QEDF_ERR(&(qedf->dbg_ctx), "els 0x%x: rport not ready\n", op);
+ rc = -EAGAIN;
+ goto els_err;
+ }
+ if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
+ QEDF_ERR(&(qedf->dbg_ctx), "els 0x%x: link is not ready\n",
+ op);
+ rc = -EAGAIN;
+ goto els_err;
+ }
+
+ if (!(test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags))) {
+ QEDF_ERR(&(qedf->dbg_ctx), "els 0x%x: fcport not ready\n", op);
+ rc = -EINVAL;
+ goto els_err;
+ }
+
+retry_els:
+ els_req = qedf_alloc_cmd(fcport, QEDF_ELS);
+ if (!els_req) {
+ current_time = jiffies / HZ;
+ if ((current_time - start_time) > 10) {
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
+ "els: Failed els 0x%x\n", op);
+ rc = -ENOMEM;
+ goto els_err;
+ }
+ mdelay(20 * USEC_PER_MSEC);
+ goto retry_els;
+ }
+
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "initiate_els els_req = "
+ "0x%p cb_arg = %p xid = %x\n", els_req, cb_arg,
+ els_req->xid);
+ els_req->sc_cmd = NULL;
+ els_req->cmd_type = QEDF_ELS;
+ els_req->fcport = fcport;
+ els_req->cb_func = cb_func;
+ cb_arg->io_req = els_req;
+ cb_arg->op = op;
+ els_req->cb_arg = cb_arg;
+ els_req->data_xfer_len = data_len;
+
+ /* Record which cpu this request is associated with */
+ els_req->cpu = smp_processor_id();
+
+ mp_req = (struct qedf_mp_req *)&(els_req->mp_req);
+ rc = qedf_init_mp_req(els_req);
+ if (rc) {
+ QEDF_ERR(&(qedf->dbg_ctx), "ELS MP request init failed\n");
+ kref_put(&els_req->refcount, qedf_release_cmd);
+ goto els_err;
+ } else {
+ rc = 0;
+ }
+
+ /* Fill ELS Payload */
+ if ((op >= ELS_LS_RJT) && (op <= ELS_AUTH_ELS)) {
+ memcpy(mp_req->req_buf, data, data_len);
+ } else {
+ QEDF_ERR(&(qedf->dbg_ctx), "Invalid ELS op 0x%x\n", op);
+ els_req->cb_func = NULL;
+ els_req->cb_arg = NULL;
+ kref_put(&els_req->refcount, qedf_release_cmd);
+ rc = -EINVAL;
+ }
+
+ if (rc)
+ goto els_err;
+
+ /* Fill FC header */
+ fc_hdr = &(mp_req->req_fc_hdr);
+
+ did = fcport->rdata->ids.port_id;
+ sid = fcport->sid;
+
+ __fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS_REQ, sid, did,
+ FC_TYPE_ELS, FC_FC_FIRST_SEQ | FC_FC_END_SEQ |
+ FC_FC_SEQ_INIT, 0);
+
+ /* Obtain exchange id */
+ xid = els_req->xid;
+
+ /* Initialize task context for this IO request */
+ task = qedf_get_task_mem(&qedf->tasks, xid);
+ qedf_init_mp_task(els_req, task);
+
+ /* Put timer on original I/O request */
+ if (timer_msec)
+ qedf_cmd_timer_set(qedf, els_req, timer_msec);
+
+ qedf_add_to_sq(fcport, xid, 0, FCOE_TASK_TYPE_MIDPATH, 0);
+
+ /* Ring doorbell */
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Ringing doorbell for ELS "
+ "req\n");
+ qedf_ring_doorbell(fcport);
+els_err:
+ return rc;
+}
+
+void qedf_process_els_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
+ struct qedf_ioreq *els_req)
+{
+ struct fcoe_task_context *task_ctx;
+ struct scsi_cmnd *sc_cmd;
+ uint16_t xid;
+ struct fcoe_cqe_midpath_info *mp_info;
+
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered with xid = 0x%x"
+ " cmd_type = %d.\n", els_req->xid, els_req->cmd_type);
+
+ /* Kill the ELS timer */
+ cancel_delayed_work(&els_req->timeout_work);
+
+ xid = els_req->xid;
+ task_ctx = qedf_get_task_mem(&qedf->tasks, xid);
+ sc_cmd = els_req->sc_cmd;
+
+ /* Get ELS response length from CQE */
+ mp_info = &cqe->cqe_info.midpath_info;
+ els_req->mp_req.resp_len = mp_info->data_placement_size;
+
+ /* Parse ELS response */
+ if ((els_req->cb_func) && (els_req->cb_arg)) {
+ els_req->cb_func(els_req->cb_arg);
+ els_req->cb_arg = NULL;
+ }
+
+ kref_put(&els_req->refcount, qedf_release_cmd);
+}
+
+static void qedf_rrq_compl(struct qedf_els_cb_arg *cb_arg)
+{
+ struct qedf_ioreq *orig_io_req;
+ struct qedf_ioreq *rrq_req;
+ struct qedf_ctx *qedf;
+ int refcount;
+
+ rrq_req = cb_arg->io_req;
+ qedf = rrq_req->fcport->qedf;
+
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered.\n");
+
+ orig_io_req = cb_arg->aborted_io_req;
+
+ if (!orig_io_req)
+ goto out_free;
+
+ if (rrq_req->event != QEDF_IOREQ_EV_ELS_TMO &&
+ rrq_req->event != QEDF_IOREQ_EV_ELS_ERR_DETECT)
+ cancel_delayed_work_sync(&orig_io_req->timeout_work);
+
+ refcount = kref_read(&orig_io_req->refcount);
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "rrq_compl: orig io = %p,"
+ " orig xid = 0x%x, rrq_xid = 0x%x, refcount=%d\n",
+ orig_io_req, orig_io_req->xid, rrq_req->xid, refcount);
+
+ /* This should return the aborted io_req to the command pool */
+ if (orig_io_req)
+ kref_put(&orig_io_req->refcount, qedf_release_cmd);
+
+out_free:
+ kfree(cb_arg);
+}
+
+/* Assumes kref is already held by caller */
+int qedf_send_rrq(struct qedf_ioreq *aborted_io_req)
+{
+
+ struct fc_els_rrq rrq;
+ struct qedf_rport *fcport;
+ struct fc_lport *lport;
+ struct qedf_els_cb_arg *cb_arg = NULL;
+ struct qedf_ctx *qedf;
+ uint32_t sid;
+ uint32_t r_a_tov;
+ int rc;
+
+ if (!aborted_io_req) {
+ QEDF_ERR(NULL, "abort_io_req is NULL.\n");
+ return -EINVAL;
+ }
+
+ fcport = aborted_io_req->fcport;
+
+ /* Check that fcport is still offloaded */
+ if (!(test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags))) {
+ QEDF_ERR(NULL, "fcport is no longer offloaded.\n");
+ return -EINVAL;
+ }
+
+ if (!fcport->qedf) {
+ QEDF_ERR(NULL, "fcport->qedf is NULL.\n");
+ return -EINVAL;
+ }
+
+ qedf = fcport->qedf;
+ lport = qedf->lport;
+ sid = fcport->sid;
+ r_a_tov = lport->r_a_tov;
+
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending RRQ orig "
+ "io = %p, orig_xid = 0x%x\n", aborted_io_req,
+ aborted_io_req->xid);
+ memset(&rrq, 0, sizeof(rrq));
+
+ cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO);
+ if (!cb_arg) {
+ QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate cb_arg for "
+ "RRQ\n");
+ rc = -ENOMEM;
+ goto rrq_err;
+ }
+
+ cb_arg->aborted_io_req = aborted_io_req;
+
+ rrq.rrq_cmd = ELS_RRQ;
+ hton24(rrq.rrq_s_id, sid);
+ rrq.rrq_ox_id = htons(aborted_io_req->xid);
+ rrq.rrq_rx_id =
+ htons(aborted_io_req->task->tstorm_st_context.read_write.rx_id);
+
+ rc = qedf_initiate_els(fcport, ELS_RRQ, &rrq, sizeof(rrq),
+ qedf_rrq_compl, cb_arg, r_a_tov);
+
+rrq_err:
+ if (rc) {
+ QEDF_ERR(&(qedf->dbg_ctx), "RRQ failed - release orig io "
+ "req 0x%x\n", aborted_io_req->xid);
+ kfree(cb_arg);
+ kref_put(&aborted_io_req->refcount, qedf_release_cmd);
+ }
+ return rc;
+}
+
+static void qedf_process_l2_frame_compl(struct qedf_rport *fcport,
+ struct fc_frame *fp,
+ u16 l2_oxid)
+{
+ struct fc_lport *lport = fcport->qedf->lport;
+ struct fc_frame_header *fh;
+ u32 crc;
+
+ fh = (struct fc_frame_header *)fc_frame_header_get(fp);
+
+ /* Set the OXID we return to what libfc used */
+ if (l2_oxid != FC_XID_UNKNOWN)
+ fh->fh_ox_id = htons(l2_oxid);
+
+ /* Setup header fields */
+ fh->fh_r_ctl = FC_RCTL_ELS_REP;
+ fh->fh_type = FC_TYPE_ELS;
+ /* Last sequence, end sequence */
+ fh->fh_f_ctl[0] = 0x98;
+ hton24(fh->fh_d_id, lport->port_id);
+ hton24(fh->fh_s_id, fcport->rdata->ids.port_id);
+ fh->fh_rx_id = 0xffff;
+
+ /* Set frame attributes */
+ crc = fcoe_fc_crc(fp);
+ fc_frame_init(fp);
+ fr_dev(fp) = lport;
+ fr_sof(fp) = FC_SOF_I3;
+ fr_eof(fp) = FC_EOF_T;
+ fr_crc(fp) = cpu_to_le32(~crc);
+
+ /* Send completed request to libfc */
+ fc_exch_recv(lport, fp);
+}
+
+/*
+ * In instances where an ELS command times out we may need to restart the
+ * rport by logging out and then logging back in.
+ */
+void qedf_restart_rport(struct qedf_rport *fcport)
+{
+ struct fc_lport *lport;
+ struct fc_rport_priv *rdata;
+ u32 port_id;
+
+ if (!fcport)
+ return;
+
+ rdata = fcport->rdata;
+ if (rdata) {
+ lport = fcport->qedf->lport;
+ port_id = rdata->ids.port_id;
+ QEDF_ERR(&(fcport->qedf->dbg_ctx),
+ "LOGO port_id=%x.\n", port_id);
+ fc_rport_logoff(rdata);
+ /* Recreate the rport and log back in */
+ rdata = fc_rport_create(lport, port_id);
+ if (rdata)
+ fc_rport_login(rdata);
+ }
+}
+
+static void qedf_l2_els_compl(struct qedf_els_cb_arg *cb_arg)
+{
+ struct qedf_ioreq *els_req;
+ struct qedf_rport *fcport;
+ struct qedf_mp_req *mp_req;
+ struct fc_frame *fp;
+ struct fc_frame_header *fh, *mp_fc_hdr;
+ void *resp_buf, *fc_payload;
+ u32 resp_len;
+ u16 l2_oxid;
+
+ l2_oxid = cb_arg->l2_oxid;
+ els_req = cb_arg->io_req;
+
+ if (!els_req) {
+ QEDF_ERR(NULL, "els_req is NULL.\n");
+ goto free_arg;
+ }
+
+ /*
+ * If we are flushing the command just free the cb_arg as none of the
+ * response data will be valid.
+ */
+ if (els_req->event == QEDF_IOREQ_EV_ELS_FLUSH)
+ goto free_arg;
+
+ fcport = els_req->fcport;
+ mp_req = &(els_req->mp_req);
+ mp_fc_hdr = &(mp_req->resp_fc_hdr);
+ resp_len = mp_req->resp_len;
+ resp_buf = mp_req->resp_buf;
+
+ /*
+ * If a middle path ELS command times out, don't try to return
+ * the command but rather do any internal cleanup and then libfc
+ * timeout the command and clean up its internal resources.
+ */
+ if (els_req->event == QEDF_IOREQ_EV_ELS_TMO) {
+ /*
+ * If ADISC times out, libfc will timeout the exchange and then
+ * try to send a PLOGI which will timeout since the session is
+ * still offloaded. Force libfc to logout the session which
+ * will offload the connection and allow the PLOGI response to
+ * flow over the LL2 path.
+ */
+ if (cb_arg->op == ELS_ADISC)
+ qedf_restart_rport(fcport);
+ return;
+ }
+
+ if (sizeof(struct fc_frame_header) + resp_len > QEDF_PAGE_SIZE) {
+ QEDF_ERR(&(fcport->qedf->dbg_ctx), "resp_len is "
+ "beyond page size.\n");
+ goto free_arg;
+ }
+
+ fp = fc_frame_alloc(fcport->qedf->lport, resp_len);
+ if (!fp) {
+ QEDF_ERR(&(fcport->qedf->dbg_ctx),
+ "fc_frame_alloc failure.\n");
+ return;
+ }
+
+ /* Copy frame header from firmware into fp */
+ fh = (struct fc_frame_header *)fc_frame_header_get(fp);
+ memcpy(fh, mp_fc_hdr, sizeof(struct fc_frame_header));
+
+ /* Copy payload from firmware into fp */
+ fc_payload = fc_frame_payload_get(fp, resp_len);
+ memcpy(fc_payload, resp_buf, resp_len);
+
+ QEDF_INFO(&(fcport->qedf->dbg_ctx), QEDF_LOG_ELS,
+ "Completing OX_ID 0x%x back to libfc.\n", l2_oxid);
+ qedf_process_l2_frame_compl(fcport, fp, l2_oxid);
+
+free_arg:
+ kfree(cb_arg);
+}
+
+int qedf_send_adisc(struct qedf_rport *fcport, struct fc_frame *fp)
+{
+ struct fc_els_adisc *adisc;
+ struct fc_frame_header *fh;
+ struct fc_lport *lport = fcport->qedf->lport;
+ struct qedf_els_cb_arg *cb_arg = NULL;
+ struct qedf_ctx *qedf;
+ uint32_t r_a_tov = lport->r_a_tov;
+ int rc;
+
+ qedf = fcport->qedf;
+ fh = fc_frame_header_get(fp);
+
+ cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO);
+ if (!cb_arg) {
+ QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate cb_arg for "
+ "ADISC\n");
+ rc = -ENOMEM;
+ goto adisc_err;
+ }
+ cb_arg->l2_oxid = ntohs(fh->fh_ox_id);
+
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
+ "Sending ADISC ox_id=0x%x.\n", cb_arg->l2_oxid);
+
+ adisc = fc_frame_payload_get(fp, sizeof(*adisc));
+
+ rc = qedf_initiate_els(fcport, ELS_ADISC, adisc, sizeof(*adisc),
+ qedf_l2_els_compl, cb_arg, r_a_tov);
+
+adisc_err:
+ if (rc) {
+ QEDF_ERR(&(qedf->dbg_ctx), "ADISC failed.\n");
+ kfree(cb_arg);
+ }
+ return rc;
+}
+
+static void qedf_srr_compl(struct qedf_els_cb_arg *cb_arg)
+{
+ struct qedf_ioreq *orig_io_req;
+ struct qedf_ioreq *srr_req;
+ struct qedf_mp_req *mp_req;
+ struct fc_frame_header *mp_fc_hdr, *fh;
+ struct fc_frame *fp;
+ void *resp_buf, *fc_payload;
+ u32 resp_len;
+ struct fc_lport *lport;
+ struct qedf_ctx *qedf;
+ int refcount;
+ u8 opcode;
+
+ srr_req = cb_arg->io_req;
+ qedf = srr_req->fcport->qedf;
+ lport = qedf->lport;
+
+ orig_io_req = cb_arg->aborted_io_req;
+
+ if (!orig_io_req)
+ goto out_free;
+
+ clear_bit(QEDF_CMD_SRR_SENT, &orig_io_req->flags);
+
+ if (srr_req->event != QEDF_IOREQ_EV_ELS_TMO &&
+ srr_req->event != QEDF_IOREQ_EV_ELS_ERR_DETECT)
+ cancel_delayed_work_sync(&orig_io_req->timeout_work);
+
+ refcount = kref_read(&orig_io_req->refcount);
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered: orig_io=%p,"
+ " orig_io_xid=0x%x, rec_xid=0x%x, refcount=%d\n",
+ orig_io_req, orig_io_req->xid, srr_req->xid, refcount);
+
+ /* If a SRR times out, simply free resources */
+ if (srr_req->event == QEDF_IOREQ_EV_ELS_TMO)
+ goto out_free;
+
+ /* Normalize response data into struct fc_frame */
+ mp_req = &(srr_req->mp_req);
+ mp_fc_hdr = &(mp_req->resp_fc_hdr);
+ resp_len = mp_req->resp_len;
+ resp_buf = mp_req->resp_buf;
+
+ fp = fc_frame_alloc(lport, resp_len);
+ if (!fp) {
+ QEDF_ERR(&(qedf->dbg_ctx),
+ "fc_frame_alloc failure.\n");
+ goto out_free;
+ }
+
+ /* Copy frame header from firmware into fp */
+ fh = (struct fc_frame_header *)fc_frame_header_get(fp);
+ memcpy(fh, mp_fc_hdr, sizeof(struct fc_frame_header));
+
+ /* Copy payload from firmware into fp */
+ fc_payload = fc_frame_payload_get(fp, resp_len);
+ memcpy(fc_payload, resp_buf, resp_len);
+
+ opcode = fc_frame_payload_op(fp);
+ switch (opcode) {
+ case ELS_LS_ACC:
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
+ "SRR success.\n");
+ break;
+ case ELS_LS_RJT:
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_ELS,
+ "SRR rejected.\n");
+ qedf_initiate_abts(orig_io_req, true);
+ break;
+ }
+
+ fc_frame_free(fp);
+out_free:
+ /* Put reference for original command since SRR completed */
+ kref_put(&orig_io_req->refcount, qedf_release_cmd);
+ kfree(cb_arg);
+}
+
+static int qedf_send_srr(struct qedf_ioreq *orig_io_req, u32 offset, u8 r_ctl)
+{
+ struct fcp_srr srr;
+ struct qedf_ctx *qedf;
+ struct qedf_rport *fcport;
+ struct fc_lport *lport;
+ struct qedf_els_cb_arg *cb_arg = NULL;
+ u32 sid, r_a_tov;
+ int rc;
+
+ if (!orig_io_req) {
+ QEDF_ERR(NULL, "orig_io_req is NULL.\n");
+ return -EINVAL;
+ }
+
+ fcport = orig_io_req->fcport;
+
+ /* Check that fcport is still offloaded */
+ if (!(test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags))) {
+ QEDF_ERR(NULL, "fcport is no longer offloaded.\n");
+ return -EINVAL;
+ }
+
+ if (!fcport->qedf) {
+ QEDF_ERR(NULL, "fcport->qedf is NULL.\n");
+ return -EINVAL;
+ }
+
+ /* Take reference until SRR command completion */
+ kref_get(&orig_io_req->refcount);
+
+ qedf = fcport->qedf;
+ lport = qedf->lport;
+ sid = fcport->sid;
+ r_a_tov = lport->r_a_tov;
+
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending SRR orig_io=%p, "
+ "orig_xid=0x%x\n", orig_io_req, orig_io_req->xid);
+ memset(&srr, 0, sizeof(srr));
+
+ cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO);
+ if (!cb_arg) {
+ QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate cb_arg for "
+ "SRR\n");
+ rc = -ENOMEM;
+ goto srr_err;
+ }
+
+ cb_arg->aborted_io_req = orig_io_req;
+
+ srr.srr_op = ELS_SRR;
+ srr.srr_ox_id = htons(orig_io_req->xid);
+ srr.srr_rx_id = htons(orig_io_req->rx_id);
+ srr.srr_rel_off = htonl(offset);
+ srr.srr_r_ctl = r_ctl;
+
+ rc = qedf_initiate_els(fcport, ELS_SRR, &srr, sizeof(srr),
+ qedf_srr_compl, cb_arg, r_a_tov);
+
+srr_err:
+ if (rc) {
+ QEDF_ERR(&(qedf->dbg_ctx), "SRR failed - release orig_io_req"
+ "=0x%x\n", orig_io_req->xid);
+ kfree(cb_arg);
+ /* If we fail to queue SRR, send ABTS to orig_io */
+ qedf_initiate_abts(orig_io_req, true);
+ kref_put(&orig_io_req->refcount, qedf_release_cmd);
+ } else
+ /* Tell other threads that SRR is in progress */
+ set_bit(QEDF_CMD_SRR_SENT, &orig_io_req->flags);
+
+ return rc;
+}
+
+static void qedf_initiate_seq_cleanup(struct qedf_ioreq *orig_io_req,
+ u32 offset, u8 r_ctl)
+{
+ struct qedf_rport *fcport;
+ unsigned long flags;
+ struct qedf_els_cb_arg *cb_arg;
+
+ fcport = orig_io_req->fcport;
+
+ QEDF_INFO(&(fcport->qedf->dbg_ctx), QEDF_LOG_ELS,
+ "Doing sequence cleanup for xid=0x%x offset=%u.\n",
+ orig_io_req->xid, offset);
+
+ cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO);
+ if (!cb_arg) {
+ QEDF_ERR(&(fcport->qedf->dbg_ctx), "Unable to allocate cb_arg "
+ "for sequence cleanup\n");
+ return;
+ }
+
+ /* Get reference for cleanup request */
+ kref_get(&orig_io_req->refcount);
+
+ orig_io_req->cmd_type = QEDF_SEQ_CLEANUP;
+ cb_arg->offset = offset;
+ cb_arg->r_ctl = r_ctl;
+ orig_io_req->cb_arg = cb_arg;
+
+ qedf_cmd_timer_set(fcport->qedf, orig_io_req,
+ QEDF_CLEANUP_TIMEOUT * HZ);
+
+ spin_lock_irqsave(&fcport->rport_lock, flags);
+
+ qedf_add_to_sq(fcport, orig_io_req->xid, 0,
+ FCOE_TASK_TYPE_SEQUENCE_CLEANUP, offset);
+ qedf_ring_doorbell(fcport);
+
+ spin_unlock_irqrestore(&fcport->rport_lock, flags);
+}
+
+void qedf_process_seq_cleanup_compl(struct qedf_ctx *qedf,
+ struct fcoe_cqe *cqe, struct qedf_ioreq *io_req)
+{
+ int rc;
+ struct qedf_els_cb_arg *cb_arg;
+
+ cb_arg = io_req->cb_arg;
+
+ /* If we timed out just free resources */
+ if (io_req->event == QEDF_IOREQ_EV_ELS_TMO || !cqe)
+ goto free;
+
+ /* Kill the timer we put on the request */
+ cancel_delayed_work_sync(&io_req->timeout_work);
+
+ rc = qedf_send_srr(io_req, cb_arg->offset, cb_arg->r_ctl);
+ if (rc)
+ QEDF_ERR(&(qedf->dbg_ctx), "Unable to send SRR, I/O will "
+ "abort, xid=0x%x.\n", io_req->xid);
+free:
+ kfree(cb_arg);
+ kref_put(&io_req->refcount, qedf_release_cmd);
+}
+
+static bool qedf_requeue_io_req(struct qedf_ioreq *orig_io_req)
+{
+ struct qedf_rport *fcport;
+ struct qedf_ioreq *new_io_req;
+ unsigned long flags;
+ bool rc = false;
+
+ fcport = orig_io_req->fcport;
+ if (!fcport) {
+ QEDF_ERR(NULL, "fcport is NULL.\n");
+ goto out;
+ }
+
+ if (!orig_io_req->sc_cmd) {
+ QEDF_ERR(&(fcport->qedf->dbg_ctx), "sc_cmd is NULL for "
+ "xid=0x%x.\n", orig_io_req->xid);
+ goto out;
+ }
+
+ new_io_req = qedf_alloc_cmd(fcport, QEDF_SCSI_CMD);
+ if (!new_io_req) {
+ QEDF_ERR(&(fcport->qedf->dbg_ctx), "Could not allocate new "
+ "io_req.\n");
+ goto out;
+ }
+
+ new_io_req->sc_cmd = orig_io_req->sc_cmd;
+
+ /*
+ * This keeps the sc_cmd struct from being returned to the tape
+ * driver and being requeued twice. We do need to put a reference
+ * for the original I/O request since we will not do a SCSI completion
+ * for it.
+ */
+ orig_io_req->sc_cmd = NULL;
+ kref_put(&orig_io_req->refcount, qedf_release_cmd);
+
+ spin_lock_irqsave(&fcport->rport_lock, flags);
+
+ /* kref for new command released in qedf_post_io_req on error */
+ if (qedf_post_io_req(fcport, new_io_req)) {
+ QEDF_ERR(&(fcport->qedf->dbg_ctx), "Unable to post io_req\n");
+ /* Return SQE to pool */
+ atomic_inc(&fcport->free_sqes);
+ } else {
+ QEDF_INFO(&(fcport->qedf->dbg_ctx), QEDF_LOG_ELS,
+ "Reissued SCSI command from orig_xid=0x%x on "
+ "new_xid=0x%x.\n", orig_io_req->xid, new_io_req->xid);
+ /*
+ * Abort the original I/O but do not return SCSI command as
+ * it has been reissued on another OX_ID.
+ */
+ spin_unlock_irqrestore(&fcport->rport_lock, flags);
+ qedf_initiate_abts(orig_io_req, false);
+ goto out;
+ }
+
+ spin_unlock_irqrestore(&fcport->rport_lock, flags);
+out:
+ return rc;
+}
+
+
+static void qedf_rec_compl(struct qedf_els_cb_arg *cb_arg)
+{
+ struct qedf_ioreq *orig_io_req;
+ struct qedf_ioreq *rec_req;
+ struct qedf_mp_req *mp_req;
+ struct fc_frame_header *mp_fc_hdr, *fh;
+ struct fc_frame *fp;
+ void *resp_buf, *fc_payload;
+ u32 resp_len;
+ struct fc_lport *lport;
+ struct qedf_ctx *qedf;
+ int refcount;
+ enum fc_rctl r_ctl;
+ struct fc_els_ls_rjt *rjt;
+ struct fc_els_rec_acc *acc;
+ u8 opcode;
+ u32 offset, e_stat;
+ struct scsi_cmnd *sc_cmd;
+ bool srr_needed = false;
+
+ rec_req = cb_arg->io_req;
+ qedf = rec_req->fcport->qedf;
+ lport = qedf->lport;
+
+ orig_io_req = cb_arg->aborted_io_req;
+
+ if (!orig_io_req)
+ goto out_free;
+
+ if (rec_req->event != QEDF_IOREQ_EV_ELS_TMO &&
+ rec_req->event != QEDF_IOREQ_EV_ELS_ERR_DETECT)
+ cancel_delayed_work_sync(&orig_io_req->timeout_work);
+
+ refcount = kref_read(&orig_io_req->refcount);
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered: orig_io=%p,"
+ " orig_io_xid=0x%x, rec_xid=0x%x, refcount=%d\n",
+ orig_io_req, orig_io_req->xid, rec_req->xid, refcount);
+
+ /* If a REC times out, free resources */
+ if (rec_req->event == QEDF_IOREQ_EV_ELS_TMO)
+ goto out_free;
+
+ /* Normalize response data into struct fc_frame */
+ mp_req = &(rec_req->mp_req);
+ mp_fc_hdr = &(mp_req->resp_fc_hdr);
+ resp_len = mp_req->resp_len;
+ acc = resp_buf = mp_req->resp_buf;
+
+ fp = fc_frame_alloc(lport, resp_len);
+ if (!fp) {
+ QEDF_ERR(&(qedf->dbg_ctx),
+ "fc_frame_alloc failure.\n");
+ goto out_free;
+ }
+
+ /* Copy frame header from firmware into fp */
+ fh = (struct fc_frame_header *)fc_frame_header_get(fp);
+ memcpy(fh, mp_fc_hdr, sizeof(struct fc_frame_header));
+
+ /* Copy payload from firmware into fp */
+ fc_payload = fc_frame_payload_get(fp, resp_len);
+ memcpy(fc_payload, resp_buf, resp_len);
+
+ opcode = fc_frame_payload_op(fp);
+ if (opcode == ELS_LS_RJT) {
+ rjt = fc_frame_payload_get(fp, sizeof(*rjt));
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
+ "Received LS_RJT for REC: er_reason=0x%x, "
+ "er_explan=0x%x.\n", rjt->er_reason, rjt->er_explan);
+ /*
+ * The following response(s) mean that we need to reissue the
+ * request on another exchange. We need to do this without
+ * informing the upper layers lest it cause an application
+ * error.
+ */
+ if ((rjt->er_reason == ELS_RJT_LOGIC ||
+ rjt->er_reason == ELS_RJT_UNAB) &&
+ rjt->er_explan == ELS_EXPL_OXID_RXID) {
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
+ "Handle CMD LOST case.\n");
+ qedf_requeue_io_req(orig_io_req);
+ }
+ } else if (opcode == ELS_LS_ACC) {
+ offset = ntohl(acc->reca_fc4value);
+ e_stat = ntohl(acc->reca_e_stat);
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
+ "Received LS_ACC for REC: offset=0x%x, e_stat=0x%x.\n",
+ offset, e_stat);
+ if (e_stat & ESB_ST_SEQ_INIT) {
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
+ "Target has the seq init\n");
+ goto out_free_frame;
+ }
+ sc_cmd = orig_io_req->sc_cmd;
+ if (!sc_cmd) {
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
+ "sc_cmd is NULL for xid=0x%x.\n",
+ orig_io_req->xid);
+ goto out_free_frame;
+ }
+ /* SCSI write case */
+ if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
+ if (offset == orig_io_req->data_xfer_len) {
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
+ "WRITE - response lost.\n");
+ r_ctl = FC_RCTL_DD_CMD_STATUS;
+ srr_needed = true;
+ offset = 0;
+ } else {
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
+ "WRITE - XFER_RDY/DATA lost.\n");
+ r_ctl = FC_RCTL_DD_DATA_DESC;
+ /* Use data from warning CQE instead of REC */
+ offset = orig_io_req->tx_buf_off;
+ }
+ /* SCSI read case */
+ } else {
+ if (orig_io_req->rx_buf_off ==
+ orig_io_req->data_xfer_len) {
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
+ "READ - response lost.\n");
+ srr_needed = true;
+ r_ctl = FC_RCTL_DD_CMD_STATUS;
+ offset = 0;
+ } else {
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
+ "READ - DATA lost.\n");
+ /*
+ * For read case we always set the offset to 0
+ * for sequence recovery task.
+ */
+ offset = 0;
+ r_ctl = FC_RCTL_DD_SOL_DATA;
+ }
+ }
+
+ if (srr_needed)
+ qedf_send_srr(orig_io_req, offset, r_ctl);
+ else
+ qedf_initiate_seq_cleanup(orig_io_req, offset, r_ctl);
+ }
+
+out_free_frame:
+ fc_frame_free(fp);
+out_free:
+ /* Put reference for original command since REC completed */
+ kref_put(&orig_io_req->refcount, qedf_release_cmd);
+ kfree(cb_arg);
+}
+
+/* Assumes kref is already held by caller */
+int qedf_send_rec(struct qedf_ioreq *orig_io_req)
+{
+
+ struct fc_els_rec rec;
+ struct qedf_rport *fcport;
+ struct fc_lport *lport;
+ struct qedf_els_cb_arg *cb_arg = NULL;
+ struct qedf_ctx *qedf;
+ uint32_t sid;
+ uint32_t r_a_tov;
+ int rc;
+
+ if (!orig_io_req) {
+ QEDF_ERR(NULL, "orig_io_req is NULL.\n");
+ return -EINVAL;
+ }
+
+ fcport = orig_io_req->fcport;
+
+ /* Check that fcport is still offloaded */
+ if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
+ QEDF_ERR(NULL, "fcport is no longer offloaded.\n");
+ return -EINVAL;
+ }
+
+ if (!fcport->qedf) {
+ QEDF_ERR(NULL, "fcport->qedf is NULL.\n");
+ return -EINVAL;
+ }
+
+ /* Take reference until REC command completion */
+ kref_get(&orig_io_req->refcount);
+
+ qedf = fcport->qedf;
+ lport = qedf->lport;
+ sid = fcport->sid;
+ r_a_tov = lport->r_a_tov;
+
+ memset(&rec, 0, sizeof(rec));
+
+ cb_arg = kzalloc(sizeof(struct qedf_els_cb_arg), GFP_NOIO);
+ if (!cb_arg) {
+ QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate cb_arg for "
+ "REC\n");
+ rc = -ENOMEM;
+ goto rec_err;
+ }
+
+ cb_arg->aborted_io_req = orig_io_req;
+
+ rec.rec_cmd = ELS_REC;
+ hton24(rec.rec_s_id, sid);
+ rec.rec_ox_id = htons(orig_io_req->xid);
+ rec.rec_rx_id =
+ htons(orig_io_req->task->tstorm_st_context.read_write.rx_id);
+
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending REC orig_io=%p, "
+ "orig_xid=0x%x rx_id=0x%x\n", orig_io_req,
+ orig_io_req->xid, rec.rec_rx_id);
+ rc = qedf_initiate_els(fcport, ELS_REC, &rec, sizeof(rec),
+ qedf_rec_compl, cb_arg, r_a_tov);
+
+rec_err:
+ if (rc) {
+ QEDF_ERR(&(qedf->dbg_ctx), "REC failed - release orig_io_req"
+ "=0x%x\n", orig_io_req->xid);
+ kfree(cb_arg);
+ kref_put(&orig_io_req->refcount, qedf_release_cmd);
+ }
+ return rc;
+}
diff --git a/drivers/scsi/qedf/qedf_fip.c b/drivers/scsi/qedf/qedf_fip.c
new file mode 100644
index 000000000000..868d423380d1
--- /dev/null
+++ b/drivers/scsi/qedf/qedf_fip.c
@@ -0,0 +1,269 @@
+/*
+ * QLogic FCoE Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include "qedf.h"
+
+extern const struct qed_fcoe_ops *qed_ops;
+/*
+ * FIP VLAN functions that will eventually move to libfcoe.
+ */
+
+void qedf_fcoe_send_vlan_req(struct qedf_ctx *qedf)
+{
+ struct sk_buff *skb;
+ char *eth_fr;
+ int fr_len;
+ struct fip_vlan *vlan;
+#define MY_FIP_ALL_FCF_MACS ((__u8[6]) { 1, 0x10, 0x18, 1, 0, 2 })
+ static u8 my_fcoe_all_fcfs[ETH_ALEN] = MY_FIP_ALL_FCF_MACS;
+
+ skb = dev_alloc_skb(sizeof(struct fip_vlan));
+ if (!skb)
+ return;
+
+ fr_len = sizeof(*vlan);
+ eth_fr = (char *)skb->data;
+ vlan = (struct fip_vlan *)eth_fr;
+
+ memset(vlan, 0, sizeof(*vlan));
+ ether_addr_copy(vlan->eth.h_source, qedf->mac);
+ ether_addr_copy(vlan->eth.h_dest, my_fcoe_all_fcfs);
+ vlan->eth.h_proto = htons(ETH_P_FIP);
+
+ vlan->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER);
+ vlan->fip.fip_op = htons(FIP_OP_VLAN);
+ vlan->fip.fip_subcode = FIP_SC_VL_REQ;
+ vlan->fip.fip_dl_len = htons(sizeof(vlan->desc) / FIP_BPW);
+
+ vlan->desc.mac.fd_desc.fip_dtype = FIP_DT_MAC;
+ vlan->desc.mac.fd_desc.fip_dlen = sizeof(vlan->desc.mac) / FIP_BPW;
+ ether_addr_copy(vlan->desc.mac.fd_mac, qedf->mac);
+
+ vlan->desc.wwnn.fd_desc.fip_dtype = FIP_DT_NAME;
+ vlan->desc.wwnn.fd_desc.fip_dlen = sizeof(vlan->desc.wwnn) / FIP_BPW;
+ put_unaligned_be64(qedf->lport->wwnn, &vlan->desc.wwnn.fd_wwn);
+
+ skb_put(skb, sizeof(*vlan));
+ skb->protocol = htons(ETH_P_FIP);
+ skb_reset_mac_header(skb);
+ skb_reset_network_header(skb);
+
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Sending FIP VLAN "
+ "request.");
+
+ if (atomic_read(&qedf->link_state) != QEDF_LINK_UP) {
+ QEDF_WARN(&(qedf->dbg_ctx), "Cannot send vlan request "
+ "because link is not up.\n");
+
+ kfree_skb(skb);
+ return;
+ }
+ qed_ops->ll2->start_xmit(qedf->cdev, skb);
+}
+
+static void qedf_fcoe_process_vlan_resp(struct qedf_ctx *qedf,
+ struct sk_buff *skb)
+{
+ struct fip_header *fiph;
+ struct fip_desc *desc;
+ u16 vid = 0;
+ ssize_t rlen;
+ size_t dlen;
+
+ fiph = (struct fip_header *)(((void *)skb->data) + 2 * ETH_ALEN + 2);
+
+ rlen = ntohs(fiph->fip_dl_len) * 4;
+ desc = (struct fip_desc *)(fiph + 1);
+ while (rlen > 0) {
+ dlen = desc->fip_dlen * FIP_BPW;
+ switch (desc->fip_dtype) {
+ case FIP_DT_VLAN:
+ vid = ntohs(((struct fip_vlan_desc *)desc)->fd_vlan);
+ break;
+ }
+ desc = (struct fip_desc *)((char *)desc + dlen);
+ rlen -= dlen;
+ }
+
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "VLAN response, "
+ "vid=0x%x.\n", vid);
+
+ if (vid > 0 && qedf->vlan_id != vid) {
+ qedf_set_vlan_id(qedf, vid);
+
+ /* Inform waiter that it's ok to call fcoe_ctlr_link up() */
+ complete(&qedf->fipvlan_compl);
+ }
+}
+
+void qedf_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
+{
+ struct qedf_ctx *qedf = container_of(fip, struct qedf_ctx, ctlr);
+ struct ethhdr *eth_hdr;
+ struct vlan_ethhdr *vlan_hdr;
+ struct fip_header *fiph;
+ u16 op, vlan_tci = 0;
+ u8 sub;
+
+ if (!test_bit(QEDF_LL2_STARTED, &qedf->flags)) {
+ QEDF_WARN(&(qedf->dbg_ctx), "LL2 not started\n");
+ kfree_skb(skb);
+ return;
+ }
+
+ fiph = (struct fip_header *) ((void *)skb->data + 2 * ETH_ALEN + 2);
+ eth_hdr = (struct ethhdr *)skb_mac_header(skb);
+ op = ntohs(fiph->fip_op);
+ sub = fiph->fip_subcode;
+
+ if (!qedf->vlan_hw_insert) {
+ vlan_hdr = (struct vlan_ethhdr *)skb_push(skb, sizeof(*vlan_hdr)
+ - sizeof(*eth_hdr));
+ memcpy(vlan_hdr, eth_hdr, 2 * ETH_ALEN);
+ vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
+ vlan_hdr->h_vlan_encapsulated_proto = eth_hdr->h_proto;
+ vlan_hdr->h_vlan_TCI = vlan_tci = htons(qedf->vlan_id);
+ }
+
+ /* Update eth_hdr since we added a VLAN tag */
+ eth_hdr = (struct ethhdr *)skb_mac_header(skb);
+
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, "FIP frame send: "
+ "dest=%pM op=%x sub=%x vlan=%04x.", eth_hdr->h_dest, op, sub,
+ ntohs(vlan_tci));
+ if (qedf_dump_frames)
+ print_hex_dump(KERN_WARNING, "fip ", DUMP_PREFIX_OFFSET, 16, 1,
+ skb->data, skb->len, false);
+
+ qed_ops->ll2->start_xmit(qedf->cdev, skb);
+}
+
+/* Process incoming FIP frames. */
+void qedf_fip_recv(struct qedf_ctx *qedf, struct sk_buff *skb)
+{
+ struct ethhdr *eth_hdr;
+ struct fip_header *fiph;
+ struct fip_desc *desc;
+ struct fip_mac_desc *mp;
+ struct fip_wwn_desc *wp;
+ struct fip_vn_desc *vp;
+ size_t rlen, dlen;
+ uint32_t cvl_port_id;
+ __u8 cvl_mac[ETH_ALEN];
+ u16 op;
+ u8 sub;
+
+ eth_hdr = (struct ethhdr *)skb_mac_header(skb);
+ fiph = (struct fip_header *) ((void *)skb->data + 2 * ETH_ALEN + 2);
+ op = ntohs(fiph->fip_op);
+ sub = fiph->fip_subcode;
+
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, "FIP frame received: "
+ "skb=%p fiph=%p source=%pM op=%x sub=%x", skb, fiph,
+ eth_hdr->h_source, op, sub);
+ if (qedf_dump_frames)
+ print_hex_dump(KERN_WARNING, "fip ", DUMP_PREFIX_OFFSET, 16, 1,
+ skb->data, skb->len, false);
+
+ /* Handle FIP VLAN resp in the driver */
+ if (op == FIP_OP_VLAN && sub == FIP_SC_VL_NOTE) {
+ qedf_fcoe_process_vlan_resp(qedf, skb);
+ qedf->vlan_hw_insert = 0;
+ kfree_skb(skb);
+ } else if (op == FIP_OP_CTRL && sub == FIP_SC_CLR_VLINK) {
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Clear virtual "
+ "link received.\n");
+
+ /* Check that an FCF has been selected by fcoe */
+ if (qedf->ctlr.sel_fcf == NULL) {
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
+ "Dropping CVL since FCF has not been selected "
+ "yet.");
+ return;
+ }
+
+ cvl_port_id = 0;
+ memset(cvl_mac, 0, ETH_ALEN);
+ /*
+ * We need to loop through the CVL descriptors to determine
+ * if we want to reset the fcoe link
+ */
+ rlen = ntohs(fiph->fip_dl_len) * FIP_BPW;
+ desc = (struct fip_desc *)(fiph + 1);
+ while (rlen >= sizeof(*desc)) {
+ dlen = desc->fip_dlen * FIP_BPW;
+ switch (desc->fip_dtype) {
+ case FIP_DT_MAC:
+ mp = (struct fip_mac_desc *)desc;
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
+ "fd_mac=%pM.\n", __func__, mp->fd_mac);
+ ether_addr_copy(cvl_mac, mp->fd_mac);
+ break;
+ case FIP_DT_NAME:
+ wp = (struct fip_wwn_desc *)desc;
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
+ "fc_wwpn=%016llx.\n",
+ get_unaligned_be64(&wp->fd_wwn));
+ break;
+ case FIP_DT_VN_ID:
+ vp = (struct fip_vn_desc *)desc;
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
+ "fd_fc_id=%x.\n", ntoh24(vp->fd_fc_id));
+ cvl_port_id = ntoh24(vp->fd_fc_id);
+ break;
+ default:
+ /* Ignore anything else */
+ break;
+ }
+ desc = (struct fip_desc *)((char *)desc + dlen);
+ rlen -= dlen;
+ }
+
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
+ "cvl_port_id=%06x cvl_mac=%pM.\n", cvl_port_id,
+ cvl_mac);
+ if (cvl_port_id == qedf->lport->port_id &&
+ ether_addr_equal(cvl_mac,
+ qedf->ctlr.sel_fcf->fcf_mac)) {
+ fcoe_ctlr_link_down(&qedf->ctlr);
+ qedf_wait_for_upload(qedf);
+ fcoe_ctlr_link_up(&qedf->ctlr);
+ }
+ kfree_skb(skb);
+ } else {
+ /* Everything else is handled by libfcoe */
+ __skb_pull(skb, ETH_HLEN);
+ fcoe_ctlr_recv(&qedf->ctlr, skb);
+ }
+}
+
+void qedf_update_src_mac(struct fc_lport *lport, u8 *addr)
+{
+ struct qedf_ctx *qedf = lport_priv(lport);
+
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
+ "Setting data_src_addr=%pM.\n", addr);
+ ether_addr_copy(qedf->data_src_addr, addr);
+}
+
+u8 *qedf_get_src_mac(struct fc_lport *lport)
+{
+ u8 mac[ETH_ALEN];
+ u8 port_id[3];
+ struct qedf_ctx *qedf = lport_priv(lport);
+
+ /* We need to use the lport port_id to create the data_src_addr */
+ if (is_zero_ether_addr(qedf->data_src_addr)) {
+ hton24(port_id, lport->port_id);
+ fc_fcoe_set_mac(mac, port_id);
+ qedf->ctlr.update_mac(lport, mac);
+ }
+ return qedf->data_src_addr;
+}
diff --git a/drivers/scsi/qedf/qedf_hsi.h b/drivers/scsi/qedf/qedf_hsi.h
new file mode 100644
index 000000000000..dfd65dec2874
--- /dev/null
+++ b/drivers/scsi/qedf/qedf_hsi.h
@@ -0,0 +1,422 @@
+/*
+ * QLogic FCoE Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+#ifndef __QEDF_HSI__
+#define __QEDF_HSI__
+/*
+ * Add include to common target
+ */
+#include <linux/qed/common_hsi.h>
+
+/*
+ * Add include to common storage target
+ */
+#include <linux/qed/storage_common.h>
+
+/*
+ * Add include to common fcoe target for both eCore and protocol driver
+ */
+#include <linux/qed/fcoe_common.h>
+
+
+/*
+ * FCoE CQ element ABTS information
+ */
+struct fcoe_abts_info {
+ u8 r_ctl /* R_CTL in the ABTS response frame */;
+ u8 reserved0;
+ __le16 rx_id;
+ __le32 reserved2[2];
+ __le32 fc_payload[3] /* ABTS FC payload response frame */;
+};
+
+
+/*
+ * FCoE class type
+ */
+enum fcoe_class_type {
+ FCOE_TASK_CLASS_TYPE_3,
+ FCOE_TASK_CLASS_TYPE_2,
+ MAX_FCOE_CLASS_TYPE
+};
+
+
+/*
+ * FCoE CMDQ element control information
+ */
+struct fcoe_cmdqe_control {
+ __le16 conn_id;
+ u8 num_additional_cmdqes;
+ u8 cmdType;
+ /* true for ABTS request cmdqe. used in Target mode */
+#define FCOE_CMDQE_CONTROL_ABTSREQCMD_MASK 0x1
+#define FCOE_CMDQE_CONTROL_ABTSREQCMD_SHIFT 0
+#define FCOE_CMDQE_CONTROL_RESERVED1_MASK 0x7F
+#define FCOE_CMDQE_CONTROL_RESERVED1_SHIFT 1
+ u8 reserved2[4];
+};
+
+/*
+ * FCoE control + payload CMDQ element
+ */
+struct fcoe_cmdqe {
+ struct fcoe_cmdqe_control hdr;
+ u8 fc_header[24];
+ __le32 fcp_cmd_payload[8];
+};
+
+
+
+/*
+ * FCP RSP flags
+ */
+struct fcoe_fcp_rsp_flags {
+ u8 flags;
+#define FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID_MASK 0x1
+#define FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID_SHIFT 0
+#define FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID_MASK 0x1
+#define FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID_SHIFT 1
+#define FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER_MASK 0x1
+#define FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER_SHIFT 2
+#define FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER_MASK 0x1
+#define FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER_SHIFT 3
+#define FCOE_FCP_RSP_FLAGS_FCP_CONF_REQ_MASK 0x1
+#define FCOE_FCP_RSP_FLAGS_FCP_CONF_REQ_SHIFT 4
+#define FCOE_FCP_RSP_FLAGS_FCP_BIDI_FLAGS_MASK 0x7
+#define FCOE_FCP_RSP_FLAGS_FCP_BIDI_FLAGS_SHIFT 5
+};
+
+/*
+ * FCoE CQ element response information
+ */
+struct fcoe_cqe_rsp_info {
+ struct fcoe_fcp_rsp_flags rsp_flags;
+ u8 scsi_status_code;
+ __le16 retry_delay_timer;
+ __le32 fcp_resid;
+ __le32 fcp_sns_len;
+ __le32 fcp_rsp_len;
+ __le16 rx_id;
+ u8 fw_error_flags;
+#define FCOE_CQE_RSP_INFO_FW_UNDERRUN_MASK 0x1 /* FW detected underrun */
+#define FCOE_CQE_RSP_INFO_FW_UNDERRUN_SHIFT 0
+#define FCOE_CQE_RSP_INFO_RESREVED_MASK 0x7F
+#define FCOE_CQE_RSP_INFO_RESREVED_SHIFT 1
+ u8 reserved;
+ __le32 fw_residual /* Residual bytes calculated by FW */;
+};
+
+/*
+ * FCoE CQ element Target completion information
+ */
+struct fcoe_cqe_target_info {
+ __le16 rx_id;
+ __le16 reserved0;
+ __le32 reserved1[5];
+};
+
+/*
+ * FCoE error/warning reporting entry
+ */
+struct fcoe_err_report_entry {
+ __le32 err_warn_bitmap_lo /* Error bitmap lower 32 bits */;
+ __le32 err_warn_bitmap_hi /* Error bitmap higher 32 bits */;
+ /* Buffer offset the beginning of the Sequence last transmitted */
+ __le32 tx_buf_off;
+ /* Buffer offset from the beginning of the Sequence last received */
+ __le32 rx_buf_off;
+ __le16 rx_id /* RX_ID of the associated task */;
+ __le16 reserved1;
+ __le32 reserved2;
+};
+
+/*
+ * FCoE CQ element middle path information
+ */
+struct fcoe_cqe_midpath_info {
+ __le32 data_placement_size;
+ __le16 rx_id;
+ __le16 reserved0;
+ __le32 reserved1[4];
+};
+
+/*
+ * FCoE CQ element unsolicited information
+ */
+struct fcoe_unsolic_info {
+ /* BD information: Physical address and opaque data */
+ struct scsi_bd bd_info;
+ __le16 conn_id /* Connection ID the frame is associated to */;
+ __le16 pkt_len /* Packet length */;
+ u8 reserved1[4];
+};
+
+/*
+ * FCoE warning reporting entry
+ */
+struct fcoe_warning_report_entry {
+ /* BD information: Physical address and opaque data */
+ struct scsi_bd bd_info;
+ /* Buffer offset the beginning of the Sequence last transmitted */
+ __le32 buf_off;
+ __le16 rx_id /* RX_ID of the associated task */;
+ __le16 reserved1;
+};
+
+/*
+ * FCoE CQ element information
+ */
+union fcoe_cqe_info {
+ struct fcoe_cqe_rsp_info rsp_info /* Response completion information */;
+ /* Target completion information */
+ struct fcoe_cqe_target_info target_info;
+ /* Error completion information */
+ struct fcoe_err_report_entry err_info;
+ struct fcoe_abts_info abts_info /* ABTS completion information */;
+ /* Middle path completion information */
+ struct fcoe_cqe_midpath_info midpath_info;
+ /* Unsolicited packet completion information */
+ struct fcoe_unsolic_info unsolic_info;
+ /* Warning completion information (Rec Tov expiration) */
+ struct fcoe_warning_report_entry warn_info;
+};
+
+/*
+ * FCoE CQ element
+ */
+struct fcoe_cqe {
+ __le32 cqe_data;
+ /* The task identifier (OX_ID) to be completed */
+#define FCOE_CQE_TASK_ID_MASK 0xFFFF
+#define FCOE_CQE_TASK_ID_SHIFT 0
+ /*
+ * The CQE type: 0x0 Indicating on a pending work request completion.
+ * 0x1 - Indicating on an unsolicited event notification. use enum
+ * fcoe_cqe_type (use enum fcoe_cqe_type)
+ */
+#define FCOE_CQE_CQE_TYPE_MASK 0xF
+#define FCOE_CQE_CQE_TYPE_SHIFT 16
+#define FCOE_CQE_RESERVED0_MASK 0xFFF
+#define FCOE_CQE_RESERVED0_SHIFT 20
+ __le16 reserved1;
+ __le16 fw_cq_prod;
+ union fcoe_cqe_info cqe_info;
+};
+
+/*
+ * FCoE CQE type
+ */
+enum fcoe_cqe_type {
+ /* solicited response on a R/W or middle-path SQE */
+ FCOE_GOOD_COMPLETION_CQE_TYPE,
+ FCOE_UNSOLIC_CQE_TYPE /* unsolicited packet, RQ consumed */,
+ FCOE_ERROR_DETECTION_CQE_TYPE /* timer expiration, validation error */,
+ FCOE_WARNING_CQE_TYPE /* rec_tov or rr_tov timer expiration */,
+ FCOE_EXCH_CLEANUP_CQE_TYPE /* task cleanup completed */,
+ FCOE_ABTS_CQE_TYPE /* ABTS received and task cleaned */,
+ FCOE_DUMMY_CQE_TYPE /* just increment SQ CONS */,
+ /* Task was completed wight after sending a pkt to the target */
+ FCOE_LOCAL_COMP_CQE_TYPE,
+ MAX_FCOE_CQE_TYPE
+};
+
+
+/*
+ * FCoE device type
+ */
+enum fcoe_device_type {
+ FCOE_TASK_DEV_TYPE_DISK,
+ FCOE_TASK_DEV_TYPE_TAPE,
+ MAX_FCOE_DEVICE_TYPE
+};
+
+
+
+
+/*
+ * FCoE fast path error codes
+ */
+enum fcoe_fp_error_warning_code {
+ FCOE_ERROR_CODE_XFER_OOO_RO /* XFER error codes */,
+ FCOE_ERROR_CODE_XFER_RO_NOT_ALIGNED,
+ FCOE_ERROR_CODE_XFER_NULL_BURST_LEN,
+ FCOE_ERROR_CODE_XFER_RO_GREATER_THAN_DATA2TRNS,
+ FCOE_ERROR_CODE_XFER_INVALID_PAYLOAD_SIZE,
+ FCOE_ERROR_CODE_XFER_TASK_TYPE_NOT_WRITE,
+ FCOE_ERROR_CODE_XFER_PEND_XFER_SET,
+ FCOE_ERROR_CODE_XFER_OPENED_SEQ,
+ FCOE_ERROR_CODE_XFER_FCTL,
+ FCOE_ERROR_CODE_FCP_RSP_BIDI_FLAGS_SET /* FCP RSP error codes */,
+ FCOE_ERROR_CODE_FCP_RSP_INVALID_LENGTH_FIELD,
+ FCOE_ERROR_CODE_FCP_RSP_INVALID_SNS_FIELD,
+ FCOE_ERROR_CODE_FCP_RSP_INVALID_PAYLOAD_SIZE,
+ FCOE_ERROR_CODE_FCP_RSP_PEND_XFER_SET,
+ FCOE_ERROR_CODE_FCP_RSP_OPENED_SEQ,
+ FCOE_ERROR_CODE_FCP_RSP_FCTL,
+ FCOE_ERROR_CODE_FCP_RSP_LAST_SEQ_RESET,
+ FCOE_ERROR_CODE_FCP_RSP_CONF_REQ_NOT_SUPPORTED_YET,
+ FCOE_ERROR_CODE_DATA_OOO_RO /* FCP DATA error codes */,
+ FCOE_ERROR_CODE_DATA_EXCEEDS_DEFINED_MAX_FRAME_SIZE,
+ FCOE_ERROR_CODE_DATA_EXCEEDS_DATA2TRNS,
+ FCOE_ERROR_CODE_DATA_SOFI3_SEQ_ACTIVE_SET,
+ FCOE_ERROR_CODE_DATA_SOFN_SEQ_ACTIVE_RESET,
+ FCOE_ERROR_CODE_DATA_EOFN_END_SEQ_SET,
+ FCOE_ERROR_CODE_DATA_EOFT_END_SEQ_RESET,
+ FCOE_ERROR_CODE_DATA_TASK_TYPE_NOT_READ,
+ FCOE_ERROR_CODE_DATA_FCTL_INITIATIR,
+ FCOE_ERROR_CODE_MIDPATH_INVALID_TYPE /* Middle path error codes */,
+ FCOE_ERROR_CODE_MIDPATH_SOFI3_SEQ_ACTIVE_SET,
+ FCOE_ERROR_CODE_MIDPATH_SOFN_SEQ_ACTIVE_RESET,
+ FCOE_ERROR_CODE_MIDPATH_EOFN_END_SEQ_SET,
+ FCOE_ERROR_CODE_MIDPATH_EOFT_END_SEQ_RESET,
+ FCOE_ERROR_CODE_MIDPATH_REPLY_FCTL,
+ FCOE_ERROR_CODE_MIDPATH_INVALID_REPLY,
+ FCOE_ERROR_CODE_MIDPATH_ELS_REPLY_RCTL,
+ FCOE_ERROR_CODE_COMMON_MIDDLE_FRAME_WITH_PAD /* Common error codes */,
+ FCOE_ERROR_CODE_COMMON_SEQ_INIT_IN_TCE,
+ FCOE_ERROR_CODE_COMMON_FC_HDR_RX_ID_MISMATCH,
+ FCOE_ERROR_CODE_COMMON_INCORRECT_SEQ_CNT,
+ FCOE_ERROR_CODE_COMMON_DATA_FC_HDR_FCP_TYPE_MISMATCH,
+ FCOE_ERROR_CODE_COMMON_DATA_NO_MORE_SGES,
+ FCOE_ERROR_CODE_COMMON_OPTIONAL_FC_HDR,
+ FCOE_ERROR_CODE_COMMON_READ_TCE_OX_ID_TOO_BIG,
+ FCOE_ERROR_CODE_COMMON_DATA_WAS_NOT_TRANSMITTED,
+ FCOE_ERROR_CODE_COMMON_TASK_DDF_RCTL_INFO_FIELD,
+ FCOE_ERROR_CODE_COMMON_TASK_INVALID_RCTL,
+ FCOE_ERROR_CODE_COMMON_TASK_RCTL_GENERAL_MISMATCH,
+ FCOE_ERROR_CODE_E_D_TOV_TIMER_EXPIRATION /* Timer error codes */,
+ FCOE_WARNING_CODE_REC_TOV_TIMER_EXPIRATION /* Timer error codes */,
+ FCOE_ERROR_CODE_RR_TOV_TIMER_EXPIRATION /* Timer error codes */,
+ /* ABTSrsp pckt arrived unexpected */
+ FCOE_ERROR_CODE_ABTS_REPLY_UNEXPECTED,
+ FCOE_ERROR_CODE_TARGET_MODE_FCP_RSP,
+ FCOE_ERROR_CODE_TARGET_MODE_FCP_XFER,
+ FCOE_ERROR_CODE_TARGET_MODE_DATA_TASK_TYPE_NOT_WRITE,
+ FCOE_ERROR_CODE_DATA_FCTL_TARGET,
+ FCOE_ERROR_CODE_TARGET_DATA_SIZE_NO_MATCH_XFER,
+ FCOE_ERROR_CODE_TARGET_DIF_CRC_CHECKSUM_ERROR,
+ FCOE_ERROR_CODE_TARGET_DIF_REF_TAG_ERROR,
+ FCOE_ERROR_CODE_TARGET_DIF_APP_TAG_ERROR,
+ MAX_FCOE_FP_ERROR_WARNING_CODE
+};
+
+
+/*
+ * FCoE RESPQ element
+ */
+struct fcoe_respqe {
+ __le16 ox_id /* OX_ID that is located in the FCP_RSP FC header */;
+ __le16 rx_id /* RX_ID that is located in the FCP_RSP FC header */;
+ __le32 additional_info;
+/* PARAM that is located in the FCP_RSP FC header */
+#define FCOE_RESPQE_PARAM_MASK 0xFFFFFF
+#define FCOE_RESPQE_PARAM_SHIFT 0
+/* Indication whther its Target-auto-rsp mode or not */
+#define FCOE_RESPQE_TARGET_AUTO_RSP_MASK 0xFF
+#define FCOE_RESPQE_TARGET_AUTO_RSP_SHIFT 24
+};
+
+
+/*
+ * FCoE slow path error codes
+ */
+enum fcoe_sp_error_code {
+ /* Error codes for Error Reporting in slow path flows */
+ FCOE_ERROR_CODE_SLOW_PATH_TOO_MANY_FUNCS,
+ FCOE_ERROR_SLOW_PATH_CODE_NO_LICENSE,
+ MAX_FCOE_SP_ERROR_CODE
+};
+
+
+/*
+ * FCoE SQE request type
+ */
+enum fcoe_sqe_request_type {
+ SEND_FCOE_CMD,
+ SEND_FCOE_MIDPATH,
+ SEND_FCOE_ABTS_REQUEST,
+ FCOE_EXCHANGE_CLEANUP,
+ FCOE_SEQUENCE_RECOVERY,
+ SEND_FCOE_XFER_RDY,
+ SEND_FCOE_RSP,
+ SEND_FCOE_RSP_WITH_SENSE_DATA,
+ SEND_FCOE_TARGET_DATA,
+ SEND_FCOE_INITIATOR_DATA,
+ /*
+ * Xfer Continuation (==1) ready to be sent. Previous XFERs data
+ * received successfully.
+ */
+ SEND_FCOE_XFER_CONTINUATION_RDY,
+ SEND_FCOE_TARGET_ABTS_RSP,
+ MAX_FCOE_SQE_REQUEST_TYPE
+};
+
+
+/*
+ * FCoE task TX state
+ */
+enum fcoe_task_tx_state {
+ /* Initiate state after driver has initialized the task */
+ FCOE_TASK_TX_STATE_NORMAL,
+ /* Updated by TX path after complete transmitting unsolicited packet */
+ FCOE_TASK_TX_STATE_UNSOLICITED_COMPLETED,
+ /*
+ * Updated by TX path after start processing the task requesting the
+ * cleanup/abort operation
+ */
+ FCOE_TASK_TX_STATE_CLEAN_REQ,
+ FCOE_TASK_TX_STATE_ABTS /* Updated by TX path during abort procedure */,
+ /* Updated by TX path during exchange cleanup procedure */
+ FCOE_TASK_TX_STATE_EXCLEANUP,
+ /*
+ * Updated by TX path during exchange cleanup continuation task
+ * procedure
+ */
+ FCOE_TASK_TX_STATE_EXCLEANUP_TARGET_WRITE_CONT,
+ /* Updated by TX path during exchange cleanup first xfer procedure */
+ FCOE_TASK_TX_STATE_EXCLEANUP_TARGET_WRITE,
+ /* Updated by TX path during exchange cleanup read task in Target */
+ FCOE_TASK_TX_STATE_EXCLEANUP_TARGET_READ_OR_RSP,
+ /* Updated by TX path during target exchange cleanup procedure */
+ FCOE_TASK_TX_STATE_EXCLEANUP_TARGET_WRITE_LAST_CYCLE,
+ /* Updated by TX path during sequence recovery procedure */
+ FCOE_TASK_TX_STATE_SEQRECOVERY,
+ MAX_FCOE_TASK_TX_STATE
+};
+
+
+/*
+ * FCoE task type
+ */
+enum fcoe_task_type {
+ FCOE_TASK_TYPE_WRITE_INITIATOR,
+ FCOE_TASK_TYPE_READ_INITIATOR,
+ FCOE_TASK_TYPE_MIDPATH,
+ FCOE_TASK_TYPE_UNSOLICITED,
+ FCOE_TASK_TYPE_ABTS,
+ FCOE_TASK_TYPE_EXCHANGE_CLEANUP,
+ FCOE_TASK_TYPE_SEQUENCE_CLEANUP,
+ FCOE_TASK_TYPE_WRITE_TARGET,
+ FCOE_TASK_TYPE_READ_TARGET,
+ FCOE_TASK_TYPE_RSP,
+ FCOE_TASK_TYPE_RSP_SENSE_DATA,
+ FCOE_TASK_TYPE_ABTS_TARGET,
+ FCOE_TASK_TYPE_ENUM_SIZE,
+ MAX_FCOE_TASK_TYPE
+};
+
+struct scsi_glbl_queue_entry {
+ /* Start physical address for the RQ (receive queue) PBL. */
+ struct regpair rq_pbl_addr;
+ /* Start physical address for the CQ (completion queue) PBL. */
+ struct regpair cq_pbl_addr;
+ /* Start physical address for the CMDQ (command queue) PBL. */
+ struct regpair cmdq_pbl_addr;
+};
+
+#endif /* __QEDF_HSI__ */
diff --git a/drivers/scsi/qedf/qedf_io.c b/drivers/scsi/qedf/qedf_io.c
new file mode 100644
index 000000000000..ee0dcf9d3aba
--- /dev/null
+++ b/drivers/scsi/qedf/qedf_io.c
@@ -0,0 +1,2282 @@
+/*
+ * QLogic FCoE Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+#include <linux/spinlock.h>
+#include <linux/vmalloc.h>
+#include "qedf.h"
+#include <scsi/scsi_tcq.h>
+
+void qedf_cmd_timer_set(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
+ unsigned int timer_msec)
+{
+ queue_delayed_work(qedf->timer_work_queue, &io_req->timeout_work,
+ msecs_to_jiffies(timer_msec));
+}
+
+static void qedf_cmd_timeout(struct work_struct *work)
+{
+
+ struct qedf_ioreq *io_req =
+ container_of(work, struct qedf_ioreq, timeout_work.work);
+ struct qedf_ctx *qedf = io_req->fcport->qedf;
+ struct qedf_rport *fcport = io_req->fcport;
+ u8 op = 0;
+
+ switch (io_req->cmd_type) {
+ case QEDF_ABTS:
+ QEDF_ERR((&qedf->dbg_ctx), "ABTS timeout, xid=0x%x.\n",
+ io_req->xid);
+ /* Cleanup timed out ABTS */
+ qedf_initiate_cleanup(io_req, true);
+ complete(&io_req->abts_done);
+
+ /*
+ * Need to call kref_put for reference taken when initiate_abts
+ * was called since abts_compl won't be called now that we've
+ * cleaned up the task.
+ */
+ kref_put(&io_req->refcount, qedf_release_cmd);
+
+ /*
+ * Now that the original I/O and the ABTS are complete see
+ * if we need to reconnect to the target.
+ */
+ qedf_restart_rport(fcport);
+ break;
+ case QEDF_ELS:
+ kref_get(&io_req->refcount);
+ /*
+ * Don't attempt to clean an ELS timeout as any subseqeunt
+ * ABTS or cleanup requests just hang. For now just free
+ * the resources of the original I/O and the RRQ
+ */
+ QEDF_ERR(&(qedf->dbg_ctx), "ELS timeout, xid=0x%x.\n",
+ io_req->xid);
+ io_req->event = QEDF_IOREQ_EV_ELS_TMO;
+ /* Call callback function to complete command */
+ if (io_req->cb_func && io_req->cb_arg) {
+ op = io_req->cb_arg->op;
+ io_req->cb_func(io_req->cb_arg);
+ io_req->cb_arg = NULL;
+ }
+ qedf_initiate_cleanup(io_req, true);
+ kref_put(&io_req->refcount, qedf_release_cmd);
+ break;
+ case QEDF_SEQ_CLEANUP:
+ QEDF_ERR(&(qedf->dbg_ctx), "Sequence cleanup timeout, "
+ "xid=0x%x.\n", io_req->xid);
+ qedf_initiate_cleanup(io_req, true);
+ io_req->event = QEDF_IOREQ_EV_ELS_TMO;
+ qedf_process_seq_cleanup_compl(qedf, NULL, io_req);
+ break;
+ default:
+ break;
+ }
+}
+
+void qedf_cmd_mgr_free(struct qedf_cmd_mgr *cmgr)
+{
+ struct io_bdt *bdt_info;
+ struct qedf_ctx *qedf = cmgr->qedf;
+ size_t bd_tbl_sz;
+ u16 min_xid = QEDF_MIN_XID;
+ u16 max_xid = (FCOE_PARAMS_NUM_TASKS - 1);
+ int num_ios;
+ int i;
+ struct qedf_ioreq *io_req;
+
+ num_ios = max_xid - min_xid + 1;
+
+ /* Free fcoe_bdt_ctx structures */
+ if (!cmgr->io_bdt_pool)
+ goto free_cmd_pool;
+
+ bd_tbl_sz = QEDF_MAX_BDS_PER_CMD * sizeof(struct fcoe_sge);
+ for (i = 0; i < num_ios; i++) {
+ bdt_info = cmgr->io_bdt_pool[i];
+ if (bdt_info->bd_tbl) {
+ dma_free_coherent(&qedf->pdev->dev, bd_tbl_sz,
+ bdt_info->bd_tbl, bdt_info->bd_tbl_dma);
+ bdt_info->bd_tbl = NULL;
+ }
+ }
+
+ /* Destroy io_bdt pool */
+ for (i = 0; i < num_ios; i++) {
+ kfree(cmgr->io_bdt_pool[i]);
+ cmgr->io_bdt_pool[i] = NULL;
+ }
+
+ kfree(cmgr->io_bdt_pool);
+ cmgr->io_bdt_pool = NULL;
+
+free_cmd_pool:
+
+ for (i = 0; i < num_ios; i++) {
+ io_req = &cmgr->cmds[i];
+ /* Make sure we free per command sense buffer */
+ if (io_req->sense_buffer)
+ dma_free_coherent(&qedf->pdev->dev,
+ QEDF_SCSI_SENSE_BUFFERSIZE, io_req->sense_buffer,
+ io_req->sense_buffer_dma);
+ cancel_delayed_work_sync(&io_req->rrq_work);
+ }
+
+ /* Free command manager itself */
+ vfree(cmgr);
+}
+
+static void qedf_handle_rrq(struct work_struct *work)
+{
+ struct qedf_ioreq *io_req =
+ container_of(work, struct qedf_ioreq, rrq_work.work);
+
+ qedf_send_rrq(io_req);
+
+}
+
+struct qedf_cmd_mgr *qedf_cmd_mgr_alloc(struct qedf_ctx *qedf)
+{
+ struct qedf_cmd_mgr *cmgr;
+ struct io_bdt *bdt_info;
+ struct qedf_ioreq *io_req;
+ u16 xid;
+ int i;
+ int num_ios;
+ u16 min_xid = QEDF_MIN_XID;
+ u16 max_xid = (FCOE_PARAMS_NUM_TASKS - 1);
+
+ /* Make sure num_queues is already set before calling this function */
+ if (!qedf->num_queues) {
+ QEDF_ERR(&(qedf->dbg_ctx), "num_queues is not set.\n");
+ return NULL;
+ }
+
+ if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN) {
+ QEDF_WARN(&(qedf->dbg_ctx), "Invalid min_xid 0x%x and "
+ "max_xid 0x%x.\n", min_xid, max_xid);
+ return NULL;
+ }
+
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "min xid 0x%x, max xid "
+ "0x%x.\n", min_xid, max_xid);
+
+ num_ios = max_xid - min_xid + 1;
+
+ cmgr = vzalloc(sizeof(struct qedf_cmd_mgr));
+ if (!cmgr) {
+ QEDF_WARN(&(qedf->dbg_ctx), "Failed to alloc cmd mgr.\n");
+ return NULL;
+ }
+
+ cmgr->qedf = qedf;
+ spin_lock_init(&cmgr->lock);
+
+ /*
+ * Initialize list of qedf_ioreq.
+ */
+ xid = QEDF_MIN_XID;
+
+ for (i = 0; i < num_ios; i++) {
+ io_req = &cmgr->cmds[i];
+ INIT_DELAYED_WORK(&io_req->timeout_work, qedf_cmd_timeout);
+
+ io_req->xid = xid++;
+
+ INIT_DELAYED_WORK(&io_req->rrq_work, qedf_handle_rrq);
+
+ /* Allocate DMA memory to hold sense buffer */
+ io_req->sense_buffer = dma_alloc_coherent(&qedf->pdev->dev,
+ QEDF_SCSI_SENSE_BUFFERSIZE, &io_req->sense_buffer_dma,
+ GFP_KERNEL);
+ if (!io_req->sense_buffer)
+ goto mem_err;
+ }
+
+ /* Allocate pool of io_bdts - one for each qedf_ioreq */
+ cmgr->io_bdt_pool = kmalloc_array(num_ios, sizeof(struct io_bdt *),
+ GFP_KERNEL);
+
+ if (!cmgr->io_bdt_pool) {
+ QEDF_WARN(&(qedf->dbg_ctx), "Failed to alloc io_bdt_pool.\n");
+ goto mem_err;
+ }
+
+ for (i = 0; i < num_ios; i++) {
+ cmgr->io_bdt_pool[i] = kmalloc(sizeof(struct io_bdt),
+ GFP_KERNEL);
+ if (!cmgr->io_bdt_pool[i]) {
+ QEDF_WARN(&(qedf->dbg_ctx), "Failed to alloc "
+ "io_bdt_pool[%d].\n", i);
+ goto mem_err;
+ }
+ }
+
+ for (i = 0; i < num_ios; i++) {
+ bdt_info = cmgr->io_bdt_pool[i];
+ bdt_info->bd_tbl = dma_alloc_coherent(&qedf->pdev->dev,
+ QEDF_MAX_BDS_PER_CMD * sizeof(struct fcoe_sge),
+ &bdt_info->bd_tbl_dma, GFP_KERNEL);
+ if (!bdt_info->bd_tbl) {
+ QEDF_WARN(&(qedf->dbg_ctx), "Failed to alloc "
+ "bdt_tbl[%d].\n", i);
+ goto mem_err;
+ }
+ }
+ atomic_set(&cmgr->free_list_cnt, num_ios);
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
+ "cmgr->free_list_cnt=%d.\n",
+ atomic_read(&cmgr->free_list_cnt));
+
+ return cmgr;
+
+mem_err:
+ qedf_cmd_mgr_free(cmgr);
+ return NULL;
+}
+
+struct qedf_ioreq *qedf_alloc_cmd(struct qedf_rport *fcport, u8 cmd_type)
+{
+ struct qedf_ctx *qedf = fcport->qedf;
+ struct qedf_cmd_mgr *cmd_mgr = qedf->cmd_mgr;
+ struct qedf_ioreq *io_req = NULL;
+ struct io_bdt *bd_tbl;
+ u16 xid;
+ uint32_t free_sqes;
+ int i;
+ unsigned long flags;
+
+ free_sqes = atomic_read(&fcport->free_sqes);
+
+ if (!free_sqes) {
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
+ "Returning NULL, free_sqes=%d.\n ",
+ free_sqes);
+ goto out_failed;
+ }
+
+ /* Limit the number of outstanding R/W tasks */
+ if ((atomic_read(&fcport->num_active_ios) >=
+ NUM_RW_TASKS_PER_CONNECTION)) {
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
+ "Returning NULL, num_active_ios=%d.\n",
+ atomic_read(&fcport->num_active_ios));
+ goto out_failed;
+ }
+
+ /* Limit global TIDs certain tasks */
+ if (atomic_read(&cmd_mgr->free_list_cnt) <= GBL_RSVD_TASKS) {
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
+ "Returning NULL, free_list_cnt=%d.\n",
+ atomic_read(&cmd_mgr->free_list_cnt));
+ goto out_failed;
+ }
+
+ spin_lock_irqsave(&cmd_mgr->lock, flags);
+ for (i = 0; i < FCOE_PARAMS_NUM_TASKS; i++) {
+ io_req = &cmd_mgr->cmds[cmd_mgr->idx];
+ cmd_mgr->idx++;
+ if (cmd_mgr->idx == FCOE_PARAMS_NUM_TASKS)
+ cmd_mgr->idx = 0;
+
+ /* Check to make sure command was previously freed */
+ if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags))
+ break;
+ }
+
+ if (i == FCOE_PARAMS_NUM_TASKS) {
+ spin_unlock_irqrestore(&cmd_mgr->lock, flags);
+ goto out_failed;
+ }
+
+ set_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
+ spin_unlock_irqrestore(&cmd_mgr->lock, flags);
+
+ atomic_inc(&fcport->num_active_ios);
+ atomic_dec(&fcport->free_sqes);
+ xid = io_req->xid;
+ atomic_dec(&cmd_mgr->free_list_cnt);
+
+ io_req->cmd_mgr = cmd_mgr;
+ io_req->fcport = fcport;
+
+ /* Hold the io_req against deletion */
+ kref_init(&io_req->refcount);
+
+ /* Bind io_bdt for this io_req */
+ /* Have a static link between io_req and io_bdt_pool */
+ bd_tbl = io_req->bd_tbl = cmd_mgr->io_bdt_pool[xid];
+ if (bd_tbl == NULL) {
+ QEDF_ERR(&(qedf->dbg_ctx), "bd_tbl is NULL, xid=%x.\n", xid);
+ kref_put(&io_req->refcount, qedf_release_cmd);
+ goto out_failed;
+ }
+ bd_tbl->io_req = io_req;
+ io_req->cmd_type = cmd_type;
+
+ /* Reset sequence offset data */
+ io_req->rx_buf_off = 0;
+ io_req->tx_buf_off = 0;
+ io_req->rx_id = 0xffff; /* No OX_ID */
+
+ return io_req;
+
+out_failed:
+ /* Record failure for stats and return NULL to caller */
+ qedf->alloc_failures++;
+ return NULL;
+}
+
+static void qedf_free_mp_resc(struct qedf_ioreq *io_req)
+{
+ struct qedf_mp_req *mp_req = &(io_req->mp_req);
+ struct qedf_ctx *qedf = io_req->fcport->qedf;
+ uint64_t sz = sizeof(struct fcoe_sge);
+
+ /* clear tm flags */
+ mp_req->tm_flags = 0;
+ if (mp_req->mp_req_bd) {
+ dma_free_coherent(&qedf->pdev->dev, sz,
+ mp_req->mp_req_bd, mp_req->mp_req_bd_dma);
+ mp_req->mp_req_bd = NULL;
+ }
+ if (mp_req->mp_resp_bd) {
+ dma_free_coherent(&qedf->pdev->dev, sz,
+ mp_req->mp_resp_bd, mp_req->mp_resp_bd_dma);
+ mp_req->mp_resp_bd = NULL;
+ }
+ if (mp_req->req_buf) {
+ dma_free_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE,
+ mp_req->req_buf, mp_req->req_buf_dma);
+ mp_req->req_buf = NULL;
+ }
+ if (mp_req->resp_buf) {
+ dma_free_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE,
+ mp_req->resp_buf, mp_req->resp_buf_dma);
+ mp_req->resp_buf = NULL;
+ }
+}
+
+void qedf_release_cmd(struct kref *ref)
+{
+ struct qedf_ioreq *io_req =
+ container_of(ref, struct qedf_ioreq, refcount);
+ struct qedf_cmd_mgr *cmd_mgr = io_req->cmd_mgr;
+ struct qedf_rport *fcport = io_req->fcport;
+
+ if (io_req->cmd_type == QEDF_ELS ||
+ io_req->cmd_type == QEDF_TASK_MGMT_CMD)
+ qedf_free_mp_resc(io_req);
+
+ atomic_inc(&cmd_mgr->free_list_cnt);
+ atomic_dec(&fcport->num_active_ios);
+ if (atomic_read(&fcport->num_active_ios) < 0)
+ QEDF_WARN(&(fcport->qedf->dbg_ctx), "active_ios < 0.\n");
+
+ /* Increment task retry identifier now that the request is released */
+ io_req->task_retry_identifier++;
+
+ clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
+}
+
+static int qedf_split_bd(struct qedf_ioreq *io_req, u64 addr, int sg_len,
+ int bd_index)
+{
+ struct fcoe_sge *bd = io_req->bd_tbl->bd_tbl;
+ int frag_size, sg_frags;
+
+ sg_frags = 0;
+ while (sg_len) {
+ if (sg_len > QEDF_BD_SPLIT_SZ)
+ frag_size = QEDF_BD_SPLIT_SZ;
+ else
+ frag_size = sg_len;
+ bd[bd_index + sg_frags].sge_addr.lo = U64_LO(addr);
+ bd[bd_index + sg_frags].sge_addr.hi = U64_HI(addr);
+ bd[bd_index + sg_frags].size = (uint16_t)frag_size;
+
+ addr += (u64)frag_size;
+ sg_frags++;
+ sg_len -= frag_size;
+ }
+ return sg_frags;
+}
+
+static int qedf_map_sg(struct qedf_ioreq *io_req)
+{
+ struct scsi_cmnd *sc = io_req->sc_cmd;
+ struct Scsi_Host *host = sc->device->host;
+ struct fc_lport *lport = shost_priv(host);
+ struct qedf_ctx *qedf = lport_priv(lport);
+ struct fcoe_sge *bd = io_req->bd_tbl->bd_tbl;
+ struct scatterlist *sg;
+ int byte_count = 0;
+ int sg_count = 0;
+ int bd_count = 0;
+ int sg_frags;
+ unsigned int sg_len;
+ u64 addr, end_addr;
+ int i;
+
+ sg_count = dma_map_sg(&qedf->pdev->dev, scsi_sglist(sc),
+ scsi_sg_count(sc), sc->sc_data_direction);
+
+ sg = scsi_sglist(sc);
+
+ /*
+ * New condition to send single SGE as cached-SGL with length less
+ * than 64k.
+ */
+ if ((sg_count == 1) && (sg_dma_len(sg) <=
+ QEDF_MAX_SGLEN_FOR_CACHESGL)) {
+ sg_len = sg_dma_len(sg);
+ addr = (u64)sg_dma_address(sg);
+
+ bd[bd_count].sge_addr.lo = (addr & 0xffffffff);
+ bd[bd_count].sge_addr.hi = (addr >> 32);
+ bd[bd_count].size = (u16)sg_len;
+
+ return ++bd_count;
+ }
+
+ scsi_for_each_sg(sc, sg, sg_count, i) {
+ sg_len = sg_dma_len(sg);
+ addr = (u64)sg_dma_address(sg);
+ end_addr = (u64)(addr + sg_len);
+
+ /*
+ * First s/g element in the list so check if the end_addr
+ * is paged aligned. Also check to make sure the length is
+ * at least page size.
+ */
+ if ((i == 0) && (sg_count > 1) &&
+ ((end_addr % QEDF_PAGE_SIZE) ||
+ sg_len < QEDF_PAGE_SIZE))
+ io_req->use_slowpath = true;
+ /*
+ * Last s/g element so check if the start address is paged
+ * aligned.
+ */
+ else if ((i == (sg_count - 1)) && (sg_count > 1) &&
+ (addr % QEDF_PAGE_SIZE))
+ io_req->use_slowpath = true;
+ /*
+ * Intermediate s/g element so check if start and end address
+ * is page aligned.
+ */
+ else if ((i != 0) && (i != (sg_count - 1)) &&
+ ((addr % QEDF_PAGE_SIZE) || (end_addr % QEDF_PAGE_SIZE)))
+ io_req->use_slowpath = true;
+
+ if (sg_len > QEDF_MAX_BD_LEN) {
+ sg_frags = qedf_split_bd(io_req, addr, sg_len,
+ bd_count);
+ } else {
+ sg_frags = 1;
+ bd[bd_count].sge_addr.lo = U64_LO(addr);
+ bd[bd_count].sge_addr.hi = U64_HI(addr);
+ bd[bd_count].size = (uint16_t)sg_len;
+ }
+
+ bd_count += sg_frags;
+ byte_count += sg_len;
+ }
+
+ if (byte_count != scsi_bufflen(sc))
+ QEDF_ERR(&(qedf->dbg_ctx), "byte_count = %d != "
+ "scsi_bufflen = %d, task_id = 0x%x.\n", byte_count,
+ scsi_bufflen(sc), io_req->xid);
+
+ return bd_count;
+}
+
+static int qedf_build_bd_list_from_sg(struct qedf_ioreq *io_req)
+{
+ struct scsi_cmnd *sc = io_req->sc_cmd;
+ struct fcoe_sge *bd = io_req->bd_tbl->bd_tbl;
+ int bd_count;
+
+ if (scsi_sg_count(sc)) {
+ bd_count = qedf_map_sg(io_req);
+ if (bd_count == 0)
+ return -ENOMEM;
+ } else {
+ bd_count = 0;
+ bd[0].sge_addr.lo = bd[0].sge_addr.hi = 0;
+ bd[0].size = 0;
+ }
+ io_req->bd_tbl->bd_valid = bd_count;
+
+ return 0;
+}
+
+static void qedf_build_fcp_cmnd(struct qedf_ioreq *io_req,
+ struct fcp_cmnd *fcp_cmnd)
+{
+ struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
+
+ /* fcp_cmnd is 32 bytes */
+ memset(fcp_cmnd, 0, FCP_CMND_LEN);
+
+ /* 8 bytes: SCSI LUN info */
+ int_to_scsilun(sc_cmd->device->lun,
+ (struct scsi_lun *)&fcp_cmnd->fc_lun);
+
+ /* 4 bytes: flag info */
+ fcp_cmnd->fc_pri_ta = 0;
+ fcp_cmnd->fc_tm_flags = io_req->mp_req.tm_flags;
+ fcp_cmnd->fc_flags = io_req->io_req_flags;
+ fcp_cmnd->fc_cmdref = 0;
+
+ /* Populate data direction */
+ if (sc_cmd->sc_data_direction == DMA_TO_DEVICE)
+ fcp_cmnd->fc_flags |= FCP_CFL_WRDATA;
+ else if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE)
+ fcp_cmnd->fc_flags |= FCP_CFL_RDDATA;
+
+ fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE;
+
+ /* 16 bytes: CDB information */
+ memcpy(fcp_cmnd->fc_cdb, sc_cmd->cmnd, sc_cmd->cmd_len);
+
+ /* 4 bytes: FCP data length */
+ fcp_cmnd->fc_dl = htonl(io_req->data_xfer_len);
+
+}
+
+static void qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport,
+ struct qedf_ioreq *io_req, u32 *ptu_invalidate,
+ struct fcoe_task_context *task_ctx)
+{
+ enum fcoe_task_type task_type;
+ struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
+ struct io_bdt *bd_tbl = io_req->bd_tbl;
+ union fcoe_data_desc_ctx *data_desc;
+ u32 *fcp_cmnd;
+ u32 tmp_fcp_cmnd[8];
+ int cnt, i;
+ int bd_count;
+ struct qedf_ctx *qedf = fcport->qedf;
+ uint16_t cq_idx = smp_processor_id() % qedf->num_queues;
+ u8 tmp_sgl_mode = 0;
+ u8 mst_sgl_mode = 0;
+
+ memset(task_ctx, 0, sizeof(struct fcoe_task_context));
+ io_req->task = task_ctx;
+
+ if (sc_cmd->sc_data_direction == DMA_TO_DEVICE)
+ task_type = FCOE_TASK_TYPE_WRITE_INITIATOR;
+ else
+ task_type = FCOE_TASK_TYPE_READ_INITIATOR;
+
+ /* Y Storm context */
+ task_ctx->ystorm_st_context.expect_first_xfer = 1;
+ task_ctx->ystorm_st_context.data_2_trns_rem = io_req->data_xfer_len;
+ /* Check if this is required */
+ task_ctx->ystorm_st_context.ox_id = io_req->xid;
+ task_ctx->ystorm_st_context.task_rety_identifier =
+ io_req->task_retry_identifier;
+
+ /* T Storm ag context */
+ SET_FIELD(task_ctx->tstorm_ag_context.flags0,
+ TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE, PROTOCOLID_FCOE);
+ task_ctx->tstorm_ag_context.icid = (u16)fcport->fw_cid;
+
+ /* T Storm st context */
+ SET_FIELD(task_ctx->tstorm_st_context.read_write.flags,
+ FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME,
+ 1);
+ task_ctx->tstorm_st_context.read_write.rx_id = 0xffff;
+
+ task_ctx->tstorm_st_context.read_only.dev_type =
+ FCOE_TASK_DEV_TYPE_DISK;
+ task_ctx->tstorm_st_context.read_only.conf_supported = 0;
+ task_ctx->tstorm_st_context.read_only.cid = fcport->fw_cid;
+
+ /* Completion queue for response. */
+ task_ctx->tstorm_st_context.read_only.glbl_q_num = cq_idx;
+ task_ctx->tstorm_st_context.read_only.fcp_cmd_trns_size =
+ io_req->data_xfer_len;
+ task_ctx->tstorm_st_context.read_write.e_d_tov_exp_timeout_val =
+ lport->e_d_tov;
+
+ task_ctx->ustorm_ag_context.global_cq_num = cq_idx;
+ io_req->fp_idx = cq_idx;
+
+ bd_count = bd_tbl->bd_valid;
+ if (task_type == FCOE_TASK_TYPE_WRITE_INITIATOR) {
+ /* Setup WRITE task */
+ struct fcoe_sge *fcoe_bd_tbl = bd_tbl->bd_tbl;
+
+ task_ctx->ystorm_st_context.task_type =
+ FCOE_TASK_TYPE_WRITE_INITIATOR;
+ data_desc = &task_ctx->ystorm_st_context.data_desc;
+
+ if (io_req->use_slowpath) {
+ SET_FIELD(task_ctx->ystorm_st_context.sgl_mode,
+ YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE,
+ FCOE_SLOW_SGL);
+ data_desc->slow.base_sgl_addr.lo =
+ U64_LO(bd_tbl->bd_tbl_dma);
+ data_desc->slow.base_sgl_addr.hi =
+ U64_HI(bd_tbl->bd_tbl_dma);
+ data_desc->slow.remainder_num_sges = bd_count;
+ data_desc->slow.curr_sge_off = 0;
+ data_desc->slow.curr_sgl_index = 0;
+ qedf->slow_sge_ios++;
+ io_req->sge_type = QEDF_IOREQ_SLOW_SGE;
+ } else {
+ SET_FIELD(task_ctx->ystorm_st_context.sgl_mode,
+ YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE,
+ (bd_count <= 4) ? (enum fcoe_sgl_mode)bd_count :
+ FCOE_MUL_FAST_SGES);
+
+ if (bd_count == 1) {
+ data_desc->single_sge.sge_addr.lo =
+ fcoe_bd_tbl->sge_addr.lo;
+ data_desc->single_sge.sge_addr.hi =
+ fcoe_bd_tbl->sge_addr.hi;
+ data_desc->single_sge.size =
+ fcoe_bd_tbl->size;
+ data_desc->single_sge.is_valid_sge = 0;
+ qedf->single_sge_ios++;
+ io_req->sge_type = QEDF_IOREQ_SINGLE_SGE;
+ } else {
+ data_desc->fast.sgl_start_addr.lo =
+ U64_LO(bd_tbl->bd_tbl_dma);
+ data_desc->fast.sgl_start_addr.hi =
+ U64_HI(bd_tbl->bd_tbl_dma);
+ data_desc->fast.sgl_byte_offset =
+ data_desc->fast.sgl_start_addr.lo &
+ (QEDF_PAGE_SIZE - 1);
+ if (data_desc->fast.sgl_byte_offset > 0)
+ QEDF_ERR(&(qedf->dbg_ctx),
+ "byte_offset=%u for xid=0x%x.\n",
+ io_req->xid,
+ data_desc->fast.sgl_byte_offset);
+ data_desc->fast.task_reuse_cnt =
+ io_req->reuse_count;
+ io_req->reuse_count++;
+ if (io_req->reuse_count == QEDF_MAX_REUSE) {
+ *ptu_invalidate = 1;
+ io_req->reuse_count = 0;
+ }
+ qedf->fast_sge_ios++;
+ io_req->sge_type = QEDF_IOREQ_FAST_SGE;
+ }
+ }
+
+ /* T Storm context */
+ task_ctx->tstorm_st_context.read_only.task_type =
+ FCOE_TASK_TYPE_WRITE_INITIATOR;
+
+ /* M Storm context */
+ tmp_sgl_mode = GET_FIELD(task_ctx->ystorm_st_context.sgl_mode,
+ YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE);
+ SET_FIELD(task_ctx->mstorm_st_context.non_fp.tx_rx_sgl_mode,
+ FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_TX_SGL_MODE,
+ tmp_sgl_mode);
+
+ } else {
+ /* Setup READ task */
+
+ /* M Storm context */
+ struct fcoe_sge *fcoe_bd_tbl = bd_tbl->bd_tbl;
+
+ data_desc = &task_ctx->mstorm_st_context.fp.data_desc;
+ task_ctx->mstorm_st_context.fp.data_2_trns_rem =
+ io_req->data_xfer_len;
+
+ if (io_req->use_slowpath) {
+ SET_FIELD(
+ task_ctx->mstorm_st_context.non_fp.tx_rx_sgl_mode,
+ FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RX_SGL_MODE,
+ FCOE_SLOW_SGL);
+ data_desc->slow.base_sgl_addr.lo =
+ U64_LO(bd_tbl->bd_tbl_dma);
+ data_desc->slow.base_sgl_addr.hi =
+ U64_HI(bd_tbl->bd_tbl_dma);
+ data_desc->slow.remainder_num_sges =
+ bd_count;
+ data_desc->slow.curr_sge_off = 0;
+ data_desc->slow.curr_sgl_index = 0;
+ qedf->slow_sge_ios++;
+ io_req->sge_type = QEDF_IOREQ_SLOW_SGE;
+ } else {
+ SET_FIELD(
+ task_ctx->mstorm_st_context.non_fp.tx_rx_sgl_mode,
+ FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RX_SGL_MODE,
+ (bd_count <= 4) ? (enum fcoe_sgl_mode)bd_count :
+ FCOE_MUL_FAST_SGES);
+
+ if (bd_count == 1) {
+ data_desc->single_sge.sge_addr.lo =
+ fcoe_bd_tbl->sge_addr.lo;
+ data_desc->single_sge.sge_addr.hi =
+ fcoe_bd_tbl->sge_addr.hi;
+ data_desc->single_sge.size =
+ fcoe_bd_tbl->size;
+ data_desc->single_sge.is_valid_sge = 0;
+ qedf->single_sge_ios++;
+ io_req->sge_type = QEDF_IOREQ_SINGLE_SGE;
+ } else {
+ data_desc->fast.sgl_start_addr.lo =
+ U64_LO(bd_tbl->bd_tbl_dma);
+ data_desc->fast.sgl_start_addr.hi =
+ U64_HI(bd_tbl->bd_tbl_dma);
+ data_desc->fast.sgl_byte_offset = 0;
+ data_desc->fast.task_reuse_cnt =
+ io_req->reuse_count;
+ io_req->reuse_count++;
+ if (io_req->reuse_count == QEDF_MAX_REUSE) {
+ *ptu_invalidate = 1;
+ io_req->reuse_count = 0;
+ }
+ qedf->fast_sge_ios++;
+ io_req->sge_type = QEDF_IOREQ_FAST_SGE;
+ }
+ }
+
+ /* Y Storm context */
+ task_ctx->ystorm_st_context.expect_first_xfer = 0;
+ task_ctx->ystorm_st_context.task_type =
+ FCOE_TASK_TYPE_READ_INITIATOR;
+
+ /* T Storm context */
+ task_ctx->tstorm_st_context.read_only.task_type =
+ FCOE_TASK_TYPE_READ_INITIATOR;
+ mst_sgl_mode = GET_FIELD(
+ task_ctx->mstorm_st_context.non_fp.tx_rx_sgl_mode,
+ FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RX_SGL_MODE);
+ SET_FIELD(task_ctx->tstorm_st_context.read_write.flags,
+ FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE,
+ mst_sgl_mode);
+ }
+
+ /* fill FCP_CMND IU */
+ fcp_cmnd = (u32 *)task_ctx->ystorm_st_context.tx_info_union.fcp_cmd_payload.opaque;
+ qedf_build_fcp_cmnd(io_req, (struct fcp_cmnd *)&tmp_fcp_cmnd);
+
+ /* Swap fcp_cmnd since FC is big endian */
+ cnt = sizeof(struct fcp_cmnd) / sizeof(u32);
+
+ for (i = 0; i < cnt; i++) {
+ *fcp_cmnd = cpu_to_be32(tmp_fcp_cmnd[i]);
+ fcp_cmnd++;
+ }
+
+ /* M Storm context - Sense buffer */
+ task_ctx->mstorm_st_context.non_fp.rsp_buf_addr.lo =
+ U64_LO(io_req->sense_buffer_dma);
+ task_ctx->mstorm_st_context.non_fp.rsp_buf_addr.hi =
+ U64_HI(io_req->sense_buffer_dma);
+}
+
+void qedf_init_mp_task(struct qedf_ioreq *io_req,
+ struct fcoe_task_context *task_ctx)
+{
+ struct qedf_mp_req *mp_req = &(io_req->mp_req);
+ struct qedf_rport *fcport = io_req->fcport;
+ struct qedf_ctx *qedf = io_req->fcport->qedf;
+ struct fc_frame_header *fc_hdr;
+ enum fcoe_task_type task_type = 0;
+ union fcoe_data_desc_ctx *data_desc;
+
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Initializing MP task "
+ "for cmd_type = %d\n", io_req->cmd_type);
+
+ qedf->control_requests++;
+
+ /* Obtain task_type */
+ if ((io_req->cmd_type == QEDF_TASK_MGMT_CMD) ||
+ (io_req->cmd_type == QEDF_ELS)) {
+ task_type = FCOE_TASK_TYPE_MIDPATH;
+ } else if (io_req->cmd_type == QEDF_ABTS) {
+ task_type = FCOE_TASK_TYPE_ABTS;
+ }
+
+ memset(task_ctx, 0, sizeof(struct fcoe_task_context));
+
+ /* Setup the task from io_req for easy reference */
+ io_req->task = task_ctx;
+
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "task type = %d\n",
+ task_type);
+
+ /* YSTORM only */
+ {
+ /* Initialize YSTORM task context */
+ struct fcoe_tx_mid_path_params *task_fc_hdr =
+ &task_ctx->ystorm_st_context.tx_info_union.tx_params.mid_path;
+ memset(task_fc_hdr, 0, sizeof(struct fcoe_tx_mid_path_params));
+ task_ctx->ystorm_st_context.task_rety_identifier =
+ io_req->task_retry_identifier;
+
+ /* Init SGL parameters */
+ if ((task_type == FCOE_TASK_TYPE_MIDPATH) ||
+ (task_type == FCOE_TASK_TYPE_UNSOLICITED)) {
+ data_desc = &task_ctx->ystorm_st_context.data_desc;
+ data_desc->slow.base_sgl_addr.lo =
+ U64_LO(mp_req->mp_req_bd_dma);
+ data_desc->slow.base_sgl_addr.hi =
+ U64_HI(mp_req->mp_req_bd_dma);
+ data_desc->slow.remainder_num_sges = 1;
+ data_desc->slow.curr_sge_off = 0;
+ data_desc->slow.curr_sgl_index = 0;
+ }
+
+ fc_hdr = &(mp_req->req_fc_hdr);
+ if (task_type == FCOE_TASK_TYPE_MIDPATH) {
+ fc_hdr->fh_ox_id = io_req->xid;
+ fc_hdr->fh_rx_id = htons(0xffff);
+ } else if (task_type == FCOE_TASK_TYPE_UNSOLICITED) {
+ fc_hdr->fh_rx_id = io_req->xid;
+ }
+
+ /* Fill FC Header into middle path buffer */
+ task_fc_hdr->parameter = fc_hdr->fh_parm_offset;
+ task_fc_hdr->r_ctl = fc_hdr->fh_r_ctl;
+ task_fc_hdr->type = fc_hdr->fh_type;
+ task_fc_hdr->cs_ctl = fc_hdr->fh_cs_ctl;
+ task_fc_hdr->df_ctl = fc_hdr->fh_df_ctl;
+ task_fc_hdr->rx_id = fc_hdr->fh_rx_id;
+ task_fc_hdr->ox_id = fc_hdr->fh_ox_id;
+
+ task_ctx->ystorm_st_context.data_2_trns_rem =
+ io_req->data_xfer_len;
+ task_ctx->ystorm_st_context.task_type = task_type;
+ }
+
+ /* TSTORM ONLY */
+ {
+ task_ctx->tstorm_ag_context.icid = (u16)fcport->fw_cid;
+ task_ctx->tstorm_st_context.read_only.cid = fcport->fw_cid;
+ /* Always send middle-path repsonses on CQ #0 */
+ task_ctx->tstorm_st_context.read_only.glbl_q_num = 0;
+ io_req->fp_idx = 0;
+ SET_FIELD(task_ctx->tstorm_ag_context.flags0,
+ TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE,
+ PROTOCOLID_FCOE);
+ task_ctx->tstorm_st_context.read_only.task_type = task_type;
+ SET_FIELD(task_ctx->tstorm_st_context.read_write.flags,
+ FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME,
+ 1);
+ task_ctx->tstorm_st_context.read_write.rx_id = 0xffff;
+ }
+
+ /* MSTORM only */
+ {
+ if (task_type == FCOE_TASK_TYPE_MIDPATH) {
+ /* Initialize task context */
+ data_desc = &task_ctx->mstorm_st_context.fp.data_desc;
+
+ /* Set cache sges address and length */
+ data_desc->slow.base_sgl_addr.lo =
+ U64_LO(mp_req->mp_resp_bd_dma);
+ data_desc->slow.base_sgl_addr.hi =
+ U64_HI(mp_req->mp_resp_bd_dma);
+ data_desc->slow.remainder_num_sges = 1;
+ data_desc->slow.curr_sge_off = 0;
+ data_desc->slow.curr_sgl_index = 0;
+
+ /*
+ * Also need to fil in non-fastpath response address
+ * for middle path commands.
+ */
+ task_ctx->mstorm_st_context.non_fp.rsp_buf_addr.lo =
+ U64_LO(mp_req->mp_resp_bd_dma);
+ task_ctx->mstorm_st_context.non_fp.rsp_buf_addr.hi =
+ U64_HI(mp_req->mp_resp_bd_dma);
+ }
+ }
+
+ /* USTORM ONLY */
+ {
+ task_ctx->ustorm_ag_context.global_cq_num = 0;
+ }
+
+ /* I/O stats. Middle path commands always use slow SGEs */
+ qedf->slow_sge_ios++;
+ io_req->sge_type = QEDF_IOREQ_SLOW_SGE;
+}
+
+void qedf_add_to_sq(struct qedf_rport *fcport, u16 xid, u32 ptu_invalidate,
+ enum fcoe_task_type req_type, u32 offset)
+{
+ struct fcoe_wqe *sqe;
+ uint16_t total_sqe = (fcport->sq_mem_size)/(sizeof(struct fcoe_wqe));
+
+ sqe = &fcport->sq[fcport->sq_prod_idx];
+
+ fcport->sq_prod_idx++;
+ fcport->fw_sq_prod_idx++;
+ if (fcport->sq_prod_idx == total_sqe)
+ fcport->sq_prod_idx = 0;
+
+ switch (req_type) {
+ case FCOE_TASK_TYPE_WRITE_INITIATOR:
+ case FCOE_TASK_TYPE_READ_INITIATOR:
+ SET_FIELD(sqe->flags, FCOE_WQE_REQ_TYPE, SEND_FCOE_CMD);
+ if (ptu_invalidate)
+ SET_FIELD(sqe->flags, FCOE_WQE_INVALIDATE_PTU, 1);
+ break;
+ case FCOE_TASK_TYPE_MIDPATH:
+ SET_FIELD(sqe->flags, FCOE_WQE_REQ_TYPE, SEND_FCOE_MIDPATH);
+ break;
+ case FCOE_TASK_TYPE_ABTS:
+ SET_FIELD(sqe->flags, FCOE_WQE_REQ_TYPE,
+ SEND_FCOE_ABTS_REQUEST);
+ break;
+ case FCOE_TASK_TYPE_EXCHANGE_CLEANUP:
+ SET_FIELD(sqe->flags, FCOE_WQE_REQ_TYPE,
+ FCOE_EXCHANGE_CLEANUP);
+ break;
+ case FCOE_TASK_TYPE_SEQUENCE_CLEANUP:
+ SET_FIELD(sqe->flags, FCOE_WQE_REQ_TYPE,
+ FCOE_SEQUENCE_RECOVERY);
+ /* NOTE: offset param only used for sequence recovery */
+ sqe->additional_info_union.seq_rec_updated_offset = offset;
+ break;
+ case FCOE_TASK_TYPE_UNSOLICITED:
+ break;
+ default:
+ break;
+ }
+
+ sqe->task_id = xid;
+
+ /* Make sure SQ data is coherent */
+ wmb();
+
+}
+
+void qedf_ring_doorbell(struct qedf_rport *fcport)
+{
+ struct fcoe_db_data dbell = { 0 };
+
+ dbell.agg_flags = 0;
+
+ dbell.params |= DB_DEST_XCM << FCOE_DB_DATA_DEST_SHIFT;
+ dbell.params |= DB_AGG_CMD_SET << FCOE_DB_DATA_AGG_CMD_SHIFT;
+ dbell.params |= DQ_XCM_FCOE_SQ_PROD_CMD <<
+ FCOE_DB_DATA_AGG_VAL_SEL_SHIFT;
+
+ dbell.sq_prod = fcport->fw_sq_prod_idx;
+ writel(*(u32 *)&dbell, fcport->p_doorbell);
+ /* Make sure SQ index is updated so f/w prcesses requests in order */
+ wmb();
+ mmiowb();
+}
+
+static void qedf_trace_io(struct qedf_rport *fcport, struct qedf_ioreq *io_req,
+ int8_t direction)
+{
+ struct qedf_ctx *qedf = fcport->qedf;
+ struct qedf_io_log *io_log;
+ struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
+ unsigned long flags;
+ uint8_t op;
+
+ spin_lock_irqsave(&qedf->io_trace_lock, flags);
+
+ io_log = &qedf->io_trace_buf[qedf->io_trace_idx];
+ io_log->direction = direction;
+ io_log->task_id = io_req->xid;
+ io_log->port_id = fcport->rdata->ids.port_id;
+ io_log->lun = sc_cmd->device->lun;
+ io_log->op = op = sc_cmd->cmnd[0];
+ io_log->lba[0] = sc_cmd->cmnd[2];
+ io_log->lba[1] = sc_cmd->cmnd[3];
+ io_log->lba[2] = sc_cmd->cmnd[4];
+ io_log->lba[3] = sc_cmd->cmnd[5];
+ io_log->bufflen = scsi_bufflen(sc_cmd);
+ io_log->sg_count = scsi_sg_count(sc_cmd);
+ io_log->result = sc_cmd->result;
+ io_log->jiffies = jiffies;
+ io_log->refcount = kref_read(&io_req->refcount);
+
+ if (direction == QEDF_IO_TRACE_REQ) {
+ /* For requests we only care abot the submission CPU */
+ io_log->req_cpu = io_req->cpu;
+ io_log->int_cpu = 0;
+ io_log->rsp_cpu = 0;
+ } else if (direction == QEDF_IO_TRACE_RSP) {
+ io_log->req_cpu = io_req->cpu;
+ io_log->int_cpu = io_req->int_cpu;
+ io_log->rsp_cpu = smp_processor_id();
+ }
+
+ io_log->sge_type = io_req->sge_type;
+
+ qedf->io_trace_idx++;
+ if (qedf->io_trace_idx == QEDF_IO_TRACE_SIZE)
+ qedf->io_trace_idx = 0;
+
+ spin_unlock_irqrestore(&qedf->io_trace_lock, flags);
+}
+
+int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req)
+{
+ struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
+ struct Scsi_Host *host = sc_cmd->device->host;
+ struct fc_lport *lport = shost_priv(host);
+ struct qedf_ctx *qedf = lport_priv(lport);
+ struct fcoe_task_context *task_ctx;
+ u16 xid;
+ enum fcoe_task_type req_type = 0;
+ u32 ptu_invalidate = 0;
+
+ /* Initialize rest of io_req fileds */
+ io_req->data_xfer_len = scsi_bufflen(sc_cmd);
+ sc_cmd->SCp.ptr = (char *)io_req;
+ io_req->use_slowpath = false; /* Assume fast SGL by default */
+
+ /* Record which cpu this request is associated with */
+ io_req->cpu = smp_processor_id();
+
+ if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) {
+ req_type = FCOE_TASK_TYPE_READ_INITIATOR;
+ io_req->io_req_flags = QEDF_READ;
+ qedf->input_requests++;
+ } else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
+ req_type = FCOE_TASK_TYPE_WRITE_INITIATOR;
+ io_req->io_req_flags = QEDF_WRITE;
+ qedf->output_requests++;
+ } else {
+ io_req->io_req_flags = 0;
+ qedf->control_requests++;
+ }
+
+ xid = io_req->xid;
+
+ /* Build buffer descriptor list for firmware from sg list */
+ if (qedf_build_bd_list_from_sg(io_req)) {
+ QEDF_ERR(&(qedf->dbg_ctx), "BD list creation failed.\n");
+ kref_put(&io_req->refcount, qedf_release_cmd);
+ return -EAGAIN;
+ }
+
+ /* Get the task context */
+ task_ctx = qedf_get_task_mem(&qedf->tasks, xid);
+ if (!task_ctx) {
+ QEDF_WARN(&(qedf->dbg_ctx), "task_ctx is NULL, xid=%d.\n",
+ xid);
+ kref_put(&io_req->refcount, qedf_release_cmd);
+ return -EINVAL;
+ }
+
+ qedf_init_task(fcport, lport, io_req, &ptu_invalidate, task_ctx);
+
+ if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
+ QEDF_ERR(&(qedf->dbg_ctx), "Session not offloaded yet.\n");
+ kref_put(&io_req->refcount, qedf_release_cmd);
+ }
+
+ /* Obtain free SQ entry */
+ qedf_add_to_sq(fcport, xid, ptu_invalidate, req_type, 0);
+
+ /* Ring doorbell */
+ qedf_ring_doorbell(fcport);
+
+ if (qedf_io_tracing && io_req->sc_cmd)
+ qedf_trace_io(fcport, io_req, QEDF_IO_TRACE_REQ);
+
+ return false;
+}
+
+int
+qedf_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc_cmd)
+{
+ struct fc_lport *lport = shost_priv(host);
+ struct qedf_ctx *qedf = lport_priv(lport);
+ struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
+ struct fc_rport_libfc_priv *rp = rport->dd_data;
+ struct qedf_rport *fcport = rport->dd_data;
+ struct qedf_ioreq *io_req;
+ int rc = 0;
+ int rval;
+ unsigned long flags = 0;
+
+
+ if (test_bit(QEDF_UNLOADING, &qedf->flags) ||
+ test_bit(QEDF_DBG_STOP_IO, &qedf->flags)) {
+ sc_cmd->result = DID_NO_CONNECT << 16;
+ sc_cmd->scsi_done(sc_cmd);
+ return 0;
+ }
+
+ rval = fc_remote_port_chkready(rport);
+ if (rval) {
+ sc_cmd->result = rval;
+ sc_cmd->scsi_done(sc_cmd);
+ return 0;
+ }
+
+ /* Retry command if we are doing a qed drain operation */
+ if (test_bit(QEDF_DRAIN_ACTIVE, &qedf->flags)) {
+ rc = SCSI_MLQUEUE_HOST_BUSY;
+ goto exit_qcmd;
+ }
+
+ if (lport->state != LPORT_ST_READY ||
+ atomic_read(&qedf->link_state) != QEDF_LINK_UP) {
+ rc = SCSI_MLQUEUE_HOST_BUSY;
+ goto exit_qcmd;
+ }
+
+ /* rport and tgt are allocated together, so tgt should be non-NULL */
+ fcport = (struct qedf_rport *)&rp[1];
+
+ if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
+ /*
+ * Session is not offloaded yet. Let SCSI-ml retry
+ * the command.
+ */
+ rc = SCSI_MLQUEUE_TARGET_BUSY;
+ goto exit_qcmd;
+ }
+ if (fcport->retry_delay_timestamp) {
+ if (time_after(jiffies, fcport->retry_delay_timestamp)) {
+ fcport->retry_delay_timestamp = 0;
+ } else {
+ /* If retry_delay timer is active, flow off the ML */
+ rc = SCSI_MLQUEUE_TARGET_BUSY;
+ goto exit_qcmd;
+ }
+ }
+
+ io_req = qedf_alloc_cmd(fcport, QEDF_SCSI_CMD);
+ if (!io_req) {
+ rc = SCSI_MLQUEUE_HOST_BUSY;
+ goto exit_qcmd;
+ }
+
+ io_req->sc_cmd = sc_cmd;
+
+ /* Take fcport->rport_lock for posting to fcport send queue */
+ spin_lock_irqsave(&fcport->rport_lock, flags);
+ if (qedf_post_io_req(fcport, io_req)) {
+ QEDF_WARN(&(qedf->dbg_ctx), "Unable to post io_req\n");
+ /* Return SQE to pool */
+ atomic_inc(&fcport->free_sqes);
+ rc = SCSI_MLQUEUE_HOST_BUSY;
+ }
+ spin_unlock_irqrestore(&fcport->rport_lock, flags);
+
+exit_qcmd:
+ return rc;
+}
+
+static void qedf_parse_fcp_rsp(struct qedf_ioreq *io_req,
+ struct fcoe_cqe_rsp_info *fcp_rsp)
+{
+ struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
+ struct qedf_ctx *qedf = io_req->fcport->qedf;
+ u8 rsp_flags = fcp_rsp->rsp_flags.flags;
+ int fcp_sns_len = 0;
+ int fcp_rsp_len = 0;
+ uint8_t *rsp_info, *sense_data;
+
+ io_req->fcp_status = FC_GOOD;
+ io_req->fcp_resid = 0;
+ if (rsp_flags & (FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER |
+ FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER))
+ io_req->fcp_resid = fcp_rsp->fcp_resid;
+
+ io_req->scsi_comp_flags = rsp_flags;
+ CMD_SCSI_STATUS(sc_cmd) = io_req->cdb_status =
+ fcp_rsp->scsi_status_code;
+
+ if (rsp_flags &
+ FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID)
+ fcp_rsp_len = fcp_rsp->fcp_rsp_len;
+
+ if (rsp_flags &
+ FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID)
+ fcp_sns_len = fcp_rsp->fcp_sns_len;
+
+ io_req->fcp_rsp_len = fcp_rsp_len;
+ io_req->fcp_sns_len = fcp_sns_len;
+ rsp_info = sense_data = io_req->sense_buffer;
+
+ /* fetch fcp_rsp_code */
+ if ((fcp_rsp_len == 4) || (fcp_rsp_len == 8)) {
+ /* Only for task management function */
+ io_req->fcp_rsp_code = rsp_info[3];
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
+ "fcp_rsp_code = %d\n", io_req->fcp_rsp_code);
+ /* Adjust sense-data location. */
+ sense_data += fcp_rsp_len;
+ }
+
+ if (fcp_sns_len > SCSI_SENSE_BUFFERSIZE) {
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
+ "Truncating sense buffer\n");
+ fcp_sns_len = SCSI_SENSE_BUFFERSIZE;
+ }
+
+ memset(sc_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
+ if (fcp_sns_len)
+ memcpy(sc_cmd->sense_buffer, sense_data,
+ fcp_sns_len);
+}
+
+static void qedf_unmap_sg_list(struct qedf_ctx *qedf, struct qedf_ioreq *io_req)
+{
+ struct scsi_cmnd *sc = io_req->sc_cmd;
+
+ if (io_req->bd_tbl->bd_valid && sc && scsi_sg_count(sc)) {
+ dma_unmap_sg(&qedf->pdev->dev, scsi_sglist(sc),
+ scsi_sg_count(sc), sc->sc_data_direction);
+ io_req->bd_tbl->bd_valid = 0;
+ }
+}
+
+void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
+ struct qedf_ioreq *io_req)
+{
+ u16 xid, rval;
+ struct fcoe_task_context *task_ctx;
+ struct scsi_cmnd *sc_cmd;
+ struct fcoe_cqe_rsp_info *fcp_rsp;
+ struct qedf_rport *fcport;
+ int refcount;
+ u16 scope, qualifier = 0;
+ u8 fw_residual_flag = 0;
+
+ if (!io_req)
+ return;
+ if (!cqe)
+ return;
+
+ xid = io_req->xid;
+ task_ctx = qedf_get_task_mem(&qedf->tasks, xid);
+ sc_cmd = io_req->sc_cmd;
+ fcp_rsp = &cqe->cqe_info.rsp_info;
+
+ if (!sc_cmd) {
+ QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd is NULL!\n");
+ return;
+ }
+
+ if (!sc_cmd->SCp.ptr) {
+ QEDF_WARN(&(qedf->dbg_ctx), "SCp.ptr is NULL, returned in "
+ "another context.\n");
+ return;
+ }
+
+ if (!sc_cmd->request) {
+ QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd->request is NULL, "
+ "sc_cmd=%p.\n", sc_cmd);
+ return;
+ }
+
+ if (!sc_cmd->request->special) {
+ QEDF_WARN(&(qedf->dbg_ctx), "request->special is NULL so "
+ "request not valid, sc_cmd=%p.\n", sc_cmd);
+ return;
+ }
+
+ if (!sc_cmd->request->q) {
+ QEDF_WARN(&(qedf->dbg_ctx), "request->q is NULL so request "
+ "is not valid, sc_cmd=%p.\n", sc_cmd);
+ return;
+ }
+
+ fcport = io_req->fcport;
+
+ qedf_parse_fcp_rsp(io_req, fcp_rsp);
+
+ qedf_unmap_sg_list(qedf, io_req);
+
+ /* Check for FCP transport error */
+ if (io_req->fcp_rsp_len > 3 && io_req->fcp_rsp_code) {
+ QEDF_ERR(&(qedf->dbg_ctx),
+ "FCP I/O protocol failure xid=0x%x fcp_rsp_len=%d "
+ "fcp_rsp_code=%d.\n", io_req->xid, io_req->fcp_rsp_len,
+ io_req->fcp_rsp_code);
+ sc_cmd->result = DID_BUS_BUSY << 16;
+ goto out;
+ }
+
+ fw_residual_flag = GET_FIELD(cqe->cqe_info.rsp_info.fw_error_flags,
+ FCOE_CQE_RSP_INFO_FW_UNDERRUN);
+ if (fw_residual_flag) {
+ QEDF_ERR(&(qedf->dbg_ctx),
+ "Firmware detected underrun: xid=0x%x fcp_rsp.flags=0x%02x "
+ "fcp_resid=%d fw_residual=0x%x.\n", io_req->xid,
+ fcp_rsp->rsp_flags.flags, io_req->fcp_resid,
+ cqe->cqe_info.rsp_info.fw_residual);
+
+ if (io_req->cdb_status == 0)
+ sc_cmd->result = (DID_ERROR << 16) | io_req->cdb_status;
+ else
+ sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
+
+ /* Abort the command since we did not get all the data */
+ init_completion(&io_req->abts_done);
+ rval = qedf_initiate_abts(io_req, true);
+ if (rval) {
+ QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n");
+ sc_cmd->result = (DID_ERROR << 16) | io_req->cdb_status;
+ }
+
+ /*
+ * Set resid to the whole buffer length so we won't try to resue
+ * any previously data.
+ */
+ scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd));
+ goto out;
+ }
+
+ switch (io_req->fcp_status) {
+ case FC_GOOD:
+ if (io_req->cdb_status == 0) {
+ /* Good I/O completion */
+ sc_cmd->result = DID_OK << 16;
+ } else {
+ refcount = kref_read(&io_req->refcount);
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
+ "%d:0:%d:%d xid=0x%0x op=0x%02x "
+ "lba=%02x%02x%02x%02x cdb_status=%d "
+ "fcp_resid=0x%x refcount=%d.\n",
+ qedf->lport->host->host_no, sc_cmd->device->id,
+ sc_cmd->device->lun, io_req->xid,
+ sc_cmd->cmnd[0], sc_cmd->cmnd[2], sc_cmd->cmnd[3],
+ sc_cmd->cmnd[4], sc_cmd->cmnd[5],
+ io_req->cdb_status, io_req->fcp_resid,
+ refcount);
+ sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
+
+ if (io_req->cdb_status == SAM_STAT_TASK_SET_FULL ||
+ io_req->cdb_status == SAM_STAT_BUSY) {
+ /*
+ * Check whether we need to set retry_delay at
+ * all based on retry_delay module parameter
+ * and the status qualifier.
+ */
+
+ /* Upper 2 bits */
+ scope = fcp_rsp->retry_delay_timer & 0xC000;
+ /* Lower 14 bits */
+ qualifier = fcp_rsp->retry_delay_timer & 0x3FFF;
+
+ if (qedf_retry_delay &&
+ scope > 0 && qualifier > 0 &&
+ qualifier <= 0x3FEF) {
+ /* Check we don't go over the max */
+ if (qualifier > QEDF_RETRY_DELAY_MAX)
+ qualifier =
+ QEDF_RETRY_DELAY_MAX;
+ fcport->retry_delay_timestamp =
+ jiffies + (qualifier * HZ / 10);
+ }
+ }
+ }
+ if (io_req->fcp_resid)
+ scsi_set_resid(sc_cmd, io_req->fcp_resid);
+ break;
+ default:
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "fcp_status=%d.\n",
+ io_req->fcp_status);
+ break;
+ }
+
+out:
+ if (qedf_io_tracing)
+ qedf_trace_io(fcport, io_req, QEDF_IO_TRACE_RSP);
+
+ io_req->sc_cmd = NULL;
+ sc_cmd->SCp.ptr = NULL;
+ sc_cmd->scsi_done(sc_cmd);
+ kref_put(&io_req->refcount, qedf_release_cmd);
+}
+
+/* Return a SCSI command in some other context besides a normal completion */
+void qedf_scsi_done(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
+ int result)
+{
+ u16 xid;
+ struct scsi_cmnd *sc_cmd;
+ int refcount;
+
+ if (!io_req)
+ return;
+
+ xid = io_req->xid;
+ sc_cmd = io_req->sc_cmd;
+
+ if (!sc_cmd) {
+ QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd is NULL!\n");
+ return;
+ }
+
+ if (!sc_cmd->SCp.ptr) {
+ QEDF_WARN(&(qedf->dbg_ctx), "SCp.ptr is NULL, returned in "
+ "another context.\n");
+ return;
+ }
+
+ qedf_unmap_sg_list(qedf, io_req);
+
+ sc_cmd->result = result << 16;
+ refcount = kref_read(&io_req->refcount);
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "%d:0:%d:%d: Completing "
+ "sc_cmd=%p result=0x%08x op=0x%02x lba=0x%02x%02x%02x%02x, "
+ "allowed=%d retries=%d refcount=%d.\n",
+ qedf->lport->host->host_no, sc_cmd->device->id,
+ sc_cmd->device->lun, sc_cmd, sc_cmd->result, sc_cmd->cmnd[0],
+ sc_cmd->cmnd[2], sc_cmd->cmnd[3], sc_cmd->cmnd[4],
+ sc_cmd->cmnd[5], sc_cmd->allowed, sc_cmd->retries,
+ refcount);
+
+ /*
+ * Set resid to the whole buffer length so we won't try to resue any
+ * previously read data
+ */
+ scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd));
+
+ if (qedf_io_tracing)
+ qedf_trace_io(io_req->fcport, io_req, QEDF_IO_TRACE_RSP);
+
+ io_req->sc_cmd = NULL;
+ sc_cmd->SCp.ptr = NULL;
+ sc_cmd->scsi_done(sc_cmd);
+ kref_put(&io_req->refcount, qedf_release_cmd);
+}
+
+/*
+ * Handle warning type CQE completions. This is mainly used for REC timer
+ * popping.
+ */
+void qedf_process_warning_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
+ struct qedf_ioreq *io_req)
+{
+ int rval, i;
+ struct qedf_rport *fcport = io_req->fcport;
+ u64 err_warn_bit_map;
+ u8 err_warn = 0xff;
+
+ if (!cqe)
+ return;
+
+ QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "Warning CQE, "
+ "xid=0x%x\n", io_req->xid);
+ QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx),
+ "err_warn_bitmap=%08x:%08x\n",
+ le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_hi),
+ le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_lo));
+ QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "tx_buff_off=%08x, "
+ "rx_buff_off=%08x, rx_id=%04x\n",
+ le32_to_cpu(cqe->cqe_info.err_info.tx_buf_off),
+ le32_to_cpu(cqe->cqe_info.err_info.rx_buf_off),
+ le32_to_cpu(cqe->cqe_info.err_info.rx_id));
+
+ /* Normalize the error bitmap value to an just an unsigned int */
+ err_warn_bit_map = (u64)
+ ((u64)cqe->cqe_info.err_info.err_warn_bitmap_hi << 32) |
+ (u64)cqe->cqe_info.err_info.err_warn_bitmap_lo;
+ for (i = 0; i < 64; i++) {
+ if (err_warn_bit_map & (u64)((u64)1 << i)) {
+ err_warn = i;
+ break;
+ }
+ }
+
+ /* Check if REC TOV expired if this is a tape device */
+ if (fcport->dev_type == QEDF_RPORT_TYPE_TAPE) {
+ if (err_warn ==
+ FCOE_WARNING_CODE_REC_TOV_TIMER_EXPIRATION) {
+ QEDF_ERR(&(qedf->dbg_ctx), "REC timer expired.\n");
+ if (!test_bit(QEDF_CMD_SRR_SENT, &io_req->flags)) {
+ io_req->rx_buf_off =
+ cqe->cqe_info.err_info.rx_buf_off;
+ io_req->tx_buf_off =
+ cqe->cqe_info.err_info.tx_buf_off;
+ io_req->rx_id = cqe->cqe_info.err_info.rx_id;
+ rval = qedf_send_rec(io_req);
+ /*
+ * We only want to abort the io_req if we
+ * can't queue the REC command as we want to
+ * keep the exchange open for recovery.
+ */
+ if (rval)
+ goto send_abort;
+ }
+ return;
+ }
+ }
+
+send_abort:
+ init_completion(&io_req->abts_done);
+ rval = qedf_initiate_abts(io_req, true);
+ if (rval)
+ QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n");
+}
+
+/* Cleanup a command when we receive an error detection completion */
+void qedf_process_error_detect(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
+ struct qedf_ioreq *io_req)
+{
+ int rval;
+
+ if (!cqe)
+ return;
+
+ QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "Error detection CQE, "
+ "xid=0x%x\n", io_req->xid);
+ QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx),
+ "err_warn_bitmap=%08x:%08x\n",
+ le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_hi),
+ le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_lo));
+ QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "tx_buff_off=%08x, "
+ "rx_buff_off=%08x, rx_id=%04x\n",
+ le32_to_cpu(cqe->cqe_info.err_info.tx_buf_off),
+ le32_to_cpu(cqe->cqe_info.err_info.rx_buf_off),
+ le32_to_cpu(cqe->cqe_info.err_info.rx_id));
+
+ if (qedf->stop_io_on_error) {
+ qedf_stop_all_io(qedf);
+ return;
+ }
+
+ init_completion(&io_req->abts_done);
+ rval = qedf_initiate_abts(io_req, true);
+ if (rval)
+ QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n");
+}
+
+static void qedf_flush_els_req(struct qedf_ctx *qedf,
+ struct qedf_ioreq *els_req)
+{
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
+ "Flushing ELS request xid=0x%x refcount=%d.\n", els_req->xid,
+ kref_read(&els_req->refcount));
+
+ /*
+ * Need to distinguish this from a timeout when calling the
+ * els_req->cb_func.
+ */
+ els_req->event = QEDF_IOREQ_EV_ELS_FLUSH;
+
+ /* Cancel the timer */
+ cancel_delayed_work_sync(&els_req->timeout_work);
+
+ /* Call callback function to complete command */
+ if (els_req->cb_func && els_req->cb_arg) {
+ els_req->cb_func(els_req->cb_arg);
+ els_req->cb_arg = NULL;
+ }
+
+ /* Release kref for original initiate_els */
+ kref_put(&els_req->refcount, qedf_release_cmd);
+}
+
+/* A value of -1 for lun is a wild card that means flush all
+ * active SCSI I/Os for the target.
+ */
+void qedf_flush_active_ios(struct qedf_rport *fcport, int lun)
+{
+ struct qedf_ioreq *io_req;
+ struct qedf_ctx *qedf;
+ struct qedf_cmd_mgr *cmd_mgr;
+ int i, rc;
+
+ if (!fcport)
+ return;
+
+ qedf = fcport->qedf;
+ cmd_mgr = qedf->cmd_mgr;
+
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "Flush active i/o's.\n");
+
+ for (i = 0; i < FCOE_PARAMS_NUM_TASKS; i++) {
+ io_req = &cmd_mgr->cmds[i];
+
+ if (!io_req)
+ continue;
+ if (io_req->fcport != fcport)
+ continue;
+ if (io_req->cmd_type == QEDF_ELS) {
+ rc = kref_get_unless_zero(&io_req->refcount);
+ if (!rc) {
+ QEDF_ERR(&(qedf->dbg_ctx),
+ "Could not get kref for io_req=0x%p.\n",
+ io_req);
+ continue;
+ }
+ qedf_flush_els_req(qedf, io_req);
+ /*
+ * Release the kref and go back to the top of the
+ * loop.
+ */
+ goto free_cmd;
+ }
+
+ if (!io_req->sc_cmd)
+ continue;
+ if (lun > 0) {
+ if (io_req->sc_cmd->device->lun !=
+ (u64)lun)
+ continue;
+ }
+
+ /*
+ * Use kref_get_unless_zero in the unlikely case the command
+ * we're about to flush was completed in the normal SCSI path
+ */
+ rc = kref_get_unless_zero(&io_req->refcount);
+ if (!rc) {
+ QEDF_ERR(&(qedf->dbg_ctx), "Could not get kref for "
+ "io_req=0x%p\n", io_req);
+ continue;
+ }
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
+ "Cleanup xid=0x%x.\n", io_req->xid);
+
+ /* Cleanup task and return I/O mid-layer */
+ qedf_initiate_cleanup(io_req, true);
+
+free_cmd:
+ kref_put(&io_req->refcount, qedf_release_cmd);
+ }
+}
+
+/*
+ * Initiate a ABTS middle path command. Note that we don't have to initialize
+ * the task context for an ABTS task.
+ */
+int qedf_initiate_abts(struct qedf_ioreq *io_req, bool return_scsi_cmd_on_abts)
+{
+ struct fc_lport *lport;
+ struct qedf_rport *fcport = io_req->fcport;
+ struct fc_rport_priv *rdata = fcport->rdata;
+ struct qedf_ctx *qedf = fcport->qedf;
+ u16 xid;
+ u32 r_a_tov = 0;
+ int rc = 0;
+ unsigned long flags;
+
+ r_a_tov = rdata->r_a_tov;
+ lport = qedf->lport;
+
+ if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
+ QEDF_ERR(&(qedf->dbg_ctx), "tgt not offloaded\n");
+ rc = 1;
+ goto abts_err;
+ }
+
+ if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
+ QEDF_ERR(&(qedf->dbg_ctx), "link is not ready\n");
+ rc = 1;
+ goto abts_err;
+ }
+
+ if (atomic_read(&qedf->link_down_tmo_valid) > 0) {
+ QEDF_ERR(&(qedf->dbg_ctx), "link_down_tmo active.\n");
+ rc = 1;
+ goto abts_err;
+ }
+
+ /* Ensure room on SQ */
+ if (!atomic_read(&fcport->free_sqes)) {
+ QEDF_ERR(&(qedf->dbg_ctx), "No SQ entries available\n");
+ rc = 1;
+ goto abts_err;
+ }
+
+
+ kref_get(&io_req->refcount);
+
+ xid = io_req->xid;
+ qedf->control_requests++;
+ qedf->packet_aborts++;
+
+ /* Set the return CPU to be the same as the request one */
+ io_req->cpu = smp_processor_id();
+
+ /* Set the command type to abort */
+ io_req->cmd_type = QEDF_ABTS;
+ io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts;
+
+ set_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "ABTS io_req xid = "
+ "0x%x\n", xid);
+
+ qedf_cmd_timer_set(qedf, io_req, QEDF_ABORT_TIMEOUT * HZ);
+
+ spin_lock_irqsave(&fcport->rport_lock, flags);
+
+ /* Add ABTS to send queue */
+ qedf_add_to_sq(fcport, xid, 0, FCOE_TASK_TYPE_ABTS, 0);
+
+ /* Ring doorbell */
+ qedf_ring_doorbell(fcport);
+
+ spin_unlock_irqrestore(&fcport->rport_lock, flags);
+
+ return rc;
+abts_err:
+ /*
+ * If the ABTS task fails to queue then we need to cleanup the
+ * task at the firmware.
+ */
+ qedf_initiate_cleanup(io_req, return_scsi_cmd_on_abts);
+ return rc;
+}
+
+void qedf_process_abts_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
+ struct qedf_ioreq *io_req)
+{
+ uint32_t r_ctl;
+ uint16_t xid;
+
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "Entered with xid = "
+ "0x%x cmd_type = %d\n", io_req->xid, io_req->cmd_type);
+
+ cancel_delayed_work(&io_req->timeout_work);
+
+ xid = io_req->xid;
+ r_ctl = cqe->cqe_info.abts_info.r_ctl;
+
+ switch (r_ctl) {
+ case FC_RCTL_BA_ACC:
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM,
+ "ABTS response - ACC Send RRQ after R_A_TOV\n");
+ io_req->event = QEDF_IOREQ_EV_ABORT_SUCCESS;
+ /*
+ * Dont release this cmd yet. It will be relesed
+ * after we get RRQ response
+ */
+ kref_get(&io_req->refcount);
+ queue_delayed_work(qedf->dpc_wq, &io_req->rrq_work,
+ msecs_to_jiffies(qedf->lport->r_a_tov));
+ break;
+ /* For error cases let the cleanup return the command */
+ case FC_RCTL_BA_RJT:
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM,
+ "ABTS response - RJT\n");
+ io_req->event = QEDF_IOREQ_EV_ABORT_FAILED;
+ break;
+ default:
+ QEDF_ERR(&(qedf->dbg_ctx), "Unknown ABTS response\n");
+ break;
+ }
+
+ clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
+
+ if (io_req->sc_cmd) {
+ if (io_req->return_scsi_cmd_on_abts)
+ qedf_scsi_done(qedf, io_req, DID_ERROR);
+ }
+
+ /* Notify eh_abort handler that ABTS is complete */
+ complete(&io_req->abts_done);
+
+ kref_put(&io_req->refcount, qedf_release_cmd);
+}
+
+int qedf_init_mp_req(struct qedf_ioreq *io_req)
+{
+ struct qedf_mp_req *mp_req;
+ struct fcoe_sge *mp_req_bd;
+ struct fcoe_sge *mp_resp_bd;
+ struct qedf_ctx *qedf = io_req->fcport->qedf;
+ dma_addr_t addr;
+ uint64_t sz;
+
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_MP_REQ, "Entered.\n");
+
+ mp_req = (struct qedf_mp_req *)&(io_req->mp_req);
+ memset(mp_req, 0, sizeof(struct qedf_mp_req));
+
+ if (io_req->cmd_type != QEDF_ELS) {
+ mp_req->req_len = sizeof(struct fcp_cmnd);
+ io_req->data_xfer_len = mp_req->req_len;
+ } else
+ mp_req->req_len = io_req->data_xfer_len;
+
+ mp_req->req_buf = dma_alloc_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE,
+ &mp_req->req_buf_dma, GFP_KERNEL);
+ if (!mp_req->req_buf) {
+ QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc MP req buffer\n");
+ qedf_free_mp_resc(io_req);
+ return -ENOMEM;
+ }
+
+ mp_req->resp_buf = dma_alloc_coherent(&qedf->pdev->dev,
+ QEDF_PAGE_SIZE, &mp_req->resp_buf_dma, GFP_KERNEL);
+ if (!mp_req->resp_buf) {
+ QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc TM resp "
+ "buffer\n");
+ qedf_free_mp_resc(io_req);
+ return -ENOMEM;
+ }
+
+ /* Allocate and map mp_req_bd and mp_resp_bd */
+ sz = sizeof(struct fcoe_sge);
+ mp_req->mp_req_bd = dma_alloc_coherent(&qedf->pdev->dev, sz,
+ &mp_req->mp_req_bd_dma, GFP_KERNEL);
+ if (!mp_req->mp_req_bd) {
+ QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc MP req bd\n");
+ qedf_free_mp_resc(io_req);
+ return -ENOMEM;
+ }
+
+ mp_req->mp_resp_bd = dma_alloc_coherent(&qedf->pdev->dev, sz,
+ &mp_req->mp_resp_bd_dma, GFP_KERNEL);
+ if (!mp_req->mp_resp_bd) {
+ QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc MP resp bd\n");
+ qedf_free_mp_resc(io_req);
+ return -ENOMEM;
+ }
+
+ /* Fill bd table */
+ addr = mp_req->req_buf_dma;
+ mp_req_bd = mp_req->mp_req_bd;
+ mp_req_bd->sge_addr.lo = U64_LO(addr);
+ mp_req_bd->sge_addr.hi = U64_HI(addr);
+ mp_req_bd->size = QEDF_PAGE_SIZE;
+
+ /*
+ * MP buffer is either a task mgmt command or an ELS.
+ * So the assumption is that it consumes a single bd
+ * entry in the bd table
+ */
+ mp_resp_bd = mp_req->mp_resp_bd;
+ addr = mp_req->resp_buf_dma;
+ mp_resp_bd->sge_addr.lo = U64_LO(addr);
+ mp_resp_bd->sge_addr.hi = U64_HI(addr);
+ mp_resp_bd->size = QEDF_PAGE_SIZE;
+
+ return 0;
+}
+
+/*
+ * Last ditch effort to clear the port if it's stuck. Used only after a
+ * cleanup task times out.
+ */
+static void qedf_drain_request(struct qedf_ctx *qedf)
+{
+ if (test_bit(QEDF_DRAIN_ACTIVE, &qedf->flags)) {
+ QEDF_ERR(&(qedf->dbg_ctx), "MCP drain already active.\n");
+ return;
+ }
+
+ /* Set bit to return all queuecommand requests as busy */
+ set_bit(QEDF_DRAIN_ACTIVE, &qedf->flags);
+
+ /* Call qed drain request for function. Should be synchronous */
+ qed_ops->common->drain(qedf->cdev);
+
+ /* Settle time for CQEs to be returned */
+ msleep(100);
+
+ /* Unplug and continue */
+ clear_bit(QEDF_DRAIN_ACTIVE, &qedf->flags);
+}
+
+/*
+ * Returns SUCCESS if the cleanup task does not timeout, otherwise return
+ * FAILURE.
+ */
+int qedf_initiate_cleanup(struct qedf_ioreq *io_req,
+ bool return_scsi_cmd_on_abts)
+{
+ struct qedf_rport *fcport;
+ struct qedf_ctx *qedf;
+ uint16_t xid;
+ struct fcoe_task_context *task;
+ int tmo = 0;
+ int rc = SUCCESS;
+ unsigned long flags;
+
+ fcport = io_req->fcport;
+ if (!fcport) {
+ QEDF_ERR(NULL, "fcport is NULL.\n");
+ return SUCCESS;
+ }
+
+ qedf = fcport->qedf;
+ if (!qedf) {
+ QEDF_ERR(NULL, "qedf is NULL.\n");
+ return SUCCESS;
+ }
+
+ if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) ||
+ test_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags)) {
+ QEDF_ERR(&(qedf->dbg_ctx), "io_req xid=0x%x already in "
+ "cleanup processing or already completed.\n",
+ io_req->xid);
+ return SUCCESS;
+ }
+
+ /* Ensure room on SQ */
+ if (!atomic_read(&fcport->free_sqes)) {
+ QEDF_ERR(&(qedf->dbg_ctx), "No SQ entries available\n");
+ return FAILED;
+ }
+
+
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "Entered xid=0x%x\n",
+ io_req->xid);
+
+ /* Cleanup cmds re-use the same TID as the original I/O */
+ xid = io_req->xid;
+ io_req->cmd_type = QEDF_CLEANUP;
+ io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts;
+
+ /* Set the return CPU to be the same as the request one */
+ io_req->cpu = smp_processor_id();
+
+ set_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
+
+ task = qedf_get_task_mem(&qedf->tasks, xid);
+
+ init_completion(&io_req->tm_done);
+
+ /* Obtain free SQ entry */
+ spin_lock_irqsave(&fcport->rport_lock, flags);
+ qedf_add_to_sq(fcport, xid, 0, FCOE_TASK_TYPE_EXCHANGE_CLEANUP, 0);
+
+ /* Ring doorbell */
+ qedf_ring_doorbell(fcport);
+ spin_unlock_irqrestore(&fcport->rport_lock, flags);
+
+ tmo = wait_for_completion_timeout(&io_req->tm_done,
+ QEDF_CLEANUP_TIMEOUT * HZ);
+
+ if (!tmo) {
+ rc = FAILED;
+ /* Timeout case */
+ QEDF_ERR(&(qedf->dbg_ctx), "Cleanup command timeout, "
+ "xid=%x.\n", io_req->xid);
+ clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
+ /* Issue a drain request if cleanup task times out */
+ QEDF_ERR(&(qedf->dbg_ctx), "Issuing MCP drain request.\n");
+ qedf_drain_request(qedf);
+ }
+
+ if (io_req->sc_cmd) {
+ if (io_req->return_scsi_cmd_on_abts)
+ qedf_scsi_done(qedf, io_req, DID_ERROR);
+ }
+
+ if (rc == SUCCESS)
+ io_req->event = QEDF_IOREQ_EV_CLEANUP_SUCCESS;
+ else
+ io_req->event = QEDF_IOREQ_EV_CLEANUP_FAILED;
+
+ return rc;
+}
+
+void qedf_process_cleanup_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
+ struct qedf_ioreq *io_req)
+{
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "Entered xid = 0x%x\n",
+ io_req->xid);
+
+ clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
+
+ /* Complete so we can finish cleaning up the I/O */
+ complete(&io_req->tm_done);
+}
+
+static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
+ uint8_t tm_flags)
+{
+ struct qedf_ioreq *io_req;
+ struct qedf_mp_req *tm_req;
+ struct fcoe_task_context *task;
+ struct fc_frame_header *fc_hdr;
+ struct fcp_cmnd *fcp_cmnd;
+ struct qedf_ctx *qedf = fcport->qedf;
+ int rc = 0;
+ uint16_t xid;
+ uint32_t sid, did;
+ int tmo = 0;
+ unsigned long flags;
+
+ if (!sc_cmd) {
+ QEDF_ERR(&(qedf->dbg_ctx), "invalid arg\n");
+ return FAILED;
+ }
+
+ if (!(test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags))) {
+ QEDF_ERR(&(qedf->dbg_ctx), "fcport not offloaded\n");
+ rc = FAILED;
+ return FAILED;
+ }
+
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "portid = 0x%x "
+ "tm_flags = %d\n", fcport->rdata->ids.port_id, tm_flags);
+
+ io_req = qedf_alloc_cmd(fcport, QEDF_TASK_MGMT_CMD);
+ if (!io_req) {
+ QEDF_ERR(&(qedf->dbg_ctx), "Failed TMF");
+ rc = -EAGAIN;
+ goto reset_tmf_err;
+ }
+
+ /* Initialize rest of io_req fields */
+ io_req->sc_cmd = sc_cmd;
+ io_req->fcport = fcport;
+ io_req->cmd_type = QEDF_TASK_MGMT_CMD;
+
+ /* Set the return CPU to be the same as the request one */
+ io_req->cpu = smp_processor_id();
+
+ tm_req = (struct qedf_mp_req *)&(io_req->mp_req);
+
+ rc = qedf_init_mp_req(io_req);
+ if (rc == FAILED) {
+ QEDF_ERR(&(qedf->dbg_ctx), "Task mgmt MP request init "
+ "failed\n");
+ kref_put(&io_req->refcount, qedf_release_cmd);
+ goto reset_tmf_err;
+ }
+
+ /* Set TM flags */
+ io_req->io_req_flags = 0;
+ tm_req->tm_flags = tm_flags;
+
+ /* Default is to return a SCSI command when an error occurs */
+ io_req->return_scsi_cmd_on_abts = true;
+
+ /* Fill FCP_CMND */
+ qedf_build_fcp_cmnd(io_req, (struct fcp_cmnd *)tm_req->req_buf);
+ fcp_cmnd = (struct fcp_cmnd *)tm_req->req_buf;
+ memset(fcp_cmnd->fc_cdb, 0, FCP_CMND_LEN);
+ fcp_cmnd->fc_dl = 0;
+
+ /* Fill FC header */
+ fc_hdr = &(tm_req->req_fc_hdr);
+ sid = fcport->sid;
+ did = fcport->rdata->ids.port_id;
+ __fc_fill_fc_hdr(fc_hdr, FC_RCTL_DD_UNSOL_CMD, sid, did,
+ FC_TYPE_FCP, FC_FC_FIRST_SEQ | FC_FC_END_SEQ |
+ FC_FC_SEQ_INIT, 0);
+ /* Obtain exchange id */
+ xid = io_req->xid;
+
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "TMF io_req xid = "
+ "0x%x\n", xid);
+
+ /* Initialize task context for this IO request */
+ task = qedf_get_task_mem(&qedf->tasks, xid);
+ qedf_init_mp_task(io_req, task);
+
+ init_completion(&io_req->tm_done);
+
+ /* Obtain free SQ entry */
+ spin_lock_irqsave(&fcport->rport_lock, flags);
+ qedf_add_to_sq(fcport, xid, 0, FCOE_TASK_TYPE_MIDPATH, 0);
+
+ /* Ring doorbell */
+ qedf_ring_doorbell(fcport);
+ spin_unlock_irqrestore(&fcport->rport_lock, flags);
+
+ tmo = wait_for_completion_timeout(&io_req->tm_done,
+ QEDF_TM_TIMEOUT * HZ);
+
+ if (!tmo) {
+ rc = FAILED;
+ QEDF_ERR(&(qedf->dbg_ctx), "wait for tm_cmpl timeout!\n");
+ } else {
+ /* Check TMF response code */
+ if (io_req->fcp_rsp_code == 0)
+ rc = SUCCESS;
+ else
+ rc = FAILED;
+ }
+
+ if (tm_flags == FCP_TMF_LUN_RESET)
+ qedf_flush_active_ios(fcport, (int)sc_cmd->device->lun);
+ else
+ qedf_flush_active_ios(fcport, -1);
+
+ kref_put(&io_req->refcount, qedf_release_cmd);
+
+ if (rc != SUCCESS) {
+ QEDF_ERR(&(qedf->dbg_ctx), "task mgmt command failed...\n");
+ rc = FAILED;
+ } else {
+ QEDF_ERR(&(qedf->dbg_ctx), "task mgmt command success...\n");
+ rc = SUCCESS;
+ }
+reset_tmf_err:
+ return rc;
+}
+
+int qedf_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
+{
+ struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
+ struct fc_rport_libfc_priv *rp = rport->dd_data;
+ struct qedf_rport *fcport = (struct qedf_rport *)&rp[1];
+ struct qedf_ctx *qedf;
+ struct fc_lport *lport;
+ int rc = SUCCESS;
+ int rval;
+
+ rval = fc_remote_port_chkready(rport);
+
+ if (rval) {
+ QEDF_ERR(NULL, "device_reset rport not ready\n");
+ rc = FAILED;
+ goto tmf_err;
+ }
+
+ if (fcport == NULL) {
+ QEDF_ERR(NULL, "device_reset: rport is NULL\n");
+ rc = FAILED;
+ goto tmf_err;
+ }
+
+ qedf = fcport->qedf;
+ lport = qedf->lport;
+
+ if (test_bit(QEDF_UNLOADING, &qedf->flags) ||
+ test_bit(QEDF_DBG_STOP_IO, &qedf->flags)) {
+ rc = SUCCESS;
+ goto tmf_err;
+ }
+
+ if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
+ QEDF_ERR(&(qedf->dbg_ctx), "link is not ready\n");
+ rc = FAILED;
+ goto tmf_err;
+ }
+
+ rc = qedf_execute_tmf(fcport, sc_cmd, tm_flags);
+
+tmf_err:
+ return rc;
+}
+
+void qedf_process_tmf_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
+ struct qedf_ioreq *io_req)
+{
+ struct fcoe_cqe_rsp_info *fcp_rsp;
+ struct fcoe_cqe_midpath_info *mp_info;
+
+
+ /* Get TMF response length from CQE */
+ mp_info = &cqe->cqe_info.midpath_info;
+ io_req->mp_req.resp_len = mp_info->data_placement_size;
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM,
+ "Response len is %d.\n", io_req->mp_req.resp_len);
+
+ fcp_rsp = &cqe->cqe_info.rsp_info;
+ qedf_parse_fcp_rsp(io_req, fcp_rsp);
+
+ io_req->sc_cmd = NULL;
+ complete(&io_req->tm_done);
+}
+
+void qedf_process_unsol_compl(struct qedf_ctx *qedf, uint16_t que_idx,
+ struct fcoe_cqe *cqe)
+{
+ unsigned long flags;
+ uint16_t tmp;
+ uint16_t pktlen = cqe->cqe_info.unsolic_info.pkt_len;
+ u32 payload_len, crc;
+ struct fc_frame_header *fh;
+ struct fc_frame *fp;
+ struct qedf_io_work *io_work;
+ u32 bdq_idx;
+ void *bdq_addr;
+
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_UNSOL,
+ "address.hi=%x address.lo=%x opaque_data.hi=%x "
+ "opaque_data.lo=%x bdq_prod_idx=%u len=%u.\n",
+ le32_to_cpu(cqe->cqe_info.unsolic_info.bd_info.address.hi),
+ le32_to_cpu(cqe->cqe_info.unsolic_info.bd_info.address.lo),
+ le32_to_cpu(cqe->cqe_info.unsolic_info.bd_info.opaque.hi),
+ le32_to_cpu(cqe->cqe_info.unsolic_info.bd_info.opaque.lo),
+ qedf->bdq_prod_idx, pktlen);
+
+ bdq_idx = le32_to_cpu(cqe->cqe_info.unsolic_info.bd_info.opaque.lo);
+ if (bdq_idx >= QEDF_BDQ_SIZE) {
+ QEDF_ERR(&(qedf->dbg_ctx), "bdq_idx is out of range %d.\n",
+ bdq_idx);
+ goto increment_prod;
+ }
+
+ bdq_addr = qedf->bdq[bdq_idx].buf_addr;
+ if (!bdq_addr) {
+ QEDF_ERR(&(qedf->dbg_ctx), "bdq_addr is NULL, dropping "
+ "unsolicited packet.\n");
+ goto increment_prod;
+ }
+
+ if (qedf_dump_frames) {
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_UNSOL,
+ "BDQ frame is at addr=%p.\n", bdq_addr);
+ print_hex_dump(KERN_WARNING, "bdq ", DUMP_PREFIX_OFFSET, 16, 1,
+ (void *)bdq_addr, pktlen, false);
+ }
+
+ /* Allocate frame */
+ payload_len = pktlen - sizeof(struct fc_frame_header);
+ fp = fc_frame_alloc(qedf->lport, payload_len);
+ if (!fp) {
+ QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate fp.\n");
+ goto increment_prod;
+ }
+
+ /* Copy data from BDQ buffer into fc_frame struct */
+ fh = (struct fc_frame_header *)fc_frame_header_get(fp);
+ memcpy(fh, (void *)bdq_addr, pktlen);
+
+ /* Initialize the frame so libfc sees it as a valid frame */
+ crc = fcoe_fc_crc(fp);
+ fc_frame_init(fp);
+ fr_dev(fp) = qedf->lport;
+ fr_sof(fp) = FC_SOF_I3;
+ fr_eof(fp) = FC_EOF_T;
+ fr_crc(fp) = cpu_to_le32(~crc);
+
+ /*
+ * We need to return the frame back up to libfc in a non-atomic
+ * context
+ */
+ io_work = mempool_alloc(qedf->io_mempool, GFP_ATOMIC);
+ if (!io_work) {
+ QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate "
+ "work for I/O completion.\n");
+ fc_frame_free(fp);
+ goto increment_prod;
+ }
+ memset(io_work, 0, sizeof(struct qedf_io_work));
+
+ INIT_WORK(&io_work->work, qedf_fp_io_handler);
+
+ /* Copy contents of CQE for deferred processing */
+ memcpy(&io_work->cqe, cqe, sizeof(struct fcoe_cqe));
+
+ io_work->qedf = qedf;
+ io_work->fp = fp;
+
+ queue_work_on(smp_processor_id(), qedf_io_wq, &io_work->work);
+increment_prod:
+ spin_lock_irqsave(&qedf->hba_lock, flags);
+
+ /* Increment producer to let f/w know we've handled the frame */
+ qedf->bdq_prod_idx++;
+
+ /* Producer index wraps at uint16_t boundary */
+ if (qedf->bdq_prod_idx == 0xffff)
+ qedf->bdq_prod_idx = 0;
+
+ writew(qedf->bdq_prod_idx, qedf->bdq_primary_prod);
+ tmp = readw(qedf->bdq_primary_prod);
+ writew(qedf->bdq_prod_idx, qedf->bdq_secondary_prod);
+ tmp = readw(qedf->bdq_secondary_prod);
+
+ spin_unlock_irqrestore(&qedf->hba_lock, flags);
+}
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
new file mode 100644
index 000000000000..d9d7a86b5f8b
--- /dev/null
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -0,0 +1,3336 @@
+/*
+ * QLogic FCoE Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/device.h>
+#include <linux/highmem.h>
+#include <linux/crc32.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/kthread.h>
+#include <scsi/libfc.h>
+#include <scsi/scsi_host.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/cpu.h>
+#include "qedf.h"
+
+const struct qed_fcoe_ops *qed_ops;
+
+static int qedf_probe(struct pci_dev *pdev, const struct pci_device_id *id);
+static void qedf_remove(struct pci_dev *pdev);
+
+extern struct qedf_debugfs_ops qedf_debugfs_ops;
+extern struct file_operations qedf_dbg_fops;
+
+/*
+ * Driver module parameters.
+ */
+static unsigned int qedf_dev_loss_tmo = 60;
+module_param_named(dev_loss_tmo, qedf_dev_loss_tmo, int, S_IRUGO);
+MODULE_PARM_DESC(dev_loss_tmo, " dev_loss_tmo setting for attached "
+ "remote ports (default 60)");
+
+uint qedf_debug = QEDF_LOG_INFO;
+module_param_named(debug, qedf_debug, uint, S_IRUGO);
+MODULE_PARM_DESC(qedf_debug, " Debug mask. Pass '1' to enable default debugging"
+ " mask");
+
+static uint qedf_fipvlan_retries = 30;
+module_param_named(fipvlan_retries, qedf_fipvlan_retries, int, S_IRUGO);
+MODULE_PARM_DESC(fipvlan_retries, " Number of FIP VLAN requests to attempt "
+ "before giving up (default 30)");
+
+static uint qedf_fallback_vlan = QEDF_FALLBACK_VLAN;
+module_param_named(fallback_vlan, qedf_fallback_vlan, int, S_IRUGO);
+MODULE_PARM_DESC(fallback_vlan, " VLAN ID to try if fip vlan request fails "
+ "(default 1002).");
+
+static uint qedf_default_prio = QEDF_DEFAULT_PRIO;
+module_param_named(default_prio, qedf_default_prio, int, S_IRUGO);
+MODULE_PARM_DESC(default_prio, " Default 802.1q priority for FIP and FCoE"
+ " traffic (default 3).");
+
+uint qedf_dump_frames;
+module_param_named(dump_frames, qedf_dump_frames, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(dump_frames, " Print the skb data of FIP and FCoE frames "
+ "(default off)");
+
+static uint qedf_queue_depth;
+module_param_named(queue_depth, qedf_queue_depth, int, S_IRUGO);
+MODULE_PARM_DESC(queue_depth, " Sets the queue depth for all LUNs discovered "
+ "by the qedf driver. Default is 0 (use OS default).");
+
+uint qedf_io_tracing;
+module_param_named(io_tracing, qedf_io_tracing, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(io_tracing, " Enable logging of SCSI requests/completions "
+ "into trace buffer. (default off).");
+
+static uint qedf_max_lun = MAX_FIBRE_LUNS;
+module_param_named(max_lun, qedf_max_lun, int, S_IRUGO);
+MODULE_PARM_DESC(max_lun, " Sets the maximum luns per target that the driver "
+ "supports. (default 0xffffffff)");
+
+uint qedf_link_down_tmo;
+module_param_named(link_down_tmo, qedf_link_down_tmo, int, S_IRUGO);
+MODULE_PARM_DESC(link_down_tmo, " Delays informing the fcoe transport that the "
+ "link is down by N seconds.");
+
+bool qedf_retry_delay;
+module_param_named(retry_delay, qedf_retry_delay, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(retry_delay, " Enable/disable handling of FCP_RSP IU retry "
+ "delay handling (default off).");
+
+static uint qedf_dp_module;
+module_param_named(dp_module, qedf_dp_module, uint, S_IRUGO);
+MODULE_PARM_DESC(dp_module, " bit flags control for verbose printk passed "
+ "qed module during probe.");
+
+static uint qedf_dp_level;
+module_param_named(dp_level, qedf_dp_level, uint, S_IRUGO);
+MODULE_PARM_DESC(dp_level, " printk verbosity control passed to qed module "
+ "during probe (0-3: 0 more verbose).");
+
+struct workqueue_struct *qedf_io_wq;
+
+static struct fcoe_percpu_s qedf_global;
+static DEFINE_SPINLOCK(qedf_global_lock);
+
+static struct kmem_cache *qedf_io_work_cache;
+
+void qedf_set_vlan_id(struct qedf_ctx *qedf, int vlan_id)
+{
+ qedf->vlan_id = vlan_id;
+ qedf->vlan_id |= qedf_default_prio << VLAN_PRIO_SHIFT;
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Setting vlan_id=%04x "
+ "prio=%d.\n", vlan_id, qedf_default_prio);
+}
+
+/* Returns true if we have a valid vlan, false otherwise */
+static bool qedf_initiate_fipvlan_req(struct qedf_ctx *qedf)
+{
+ int rc;
+
+ if (atomic_read(&qedf->link_state) != QEDF_LINK_UP) {
+ QEDF_ERR(&(qedf->dbg_ctx), "Link not up.\n");
+ return false;
+ }
+
+ while (qedf->fipvlan_retries--) {
+ if (qedf->vlan_id > 0)
+ return true;
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
+ "Retry %d.\n", qedf->fipvlan_retries);
+ init_completion(&qedf->fipvlan_compl);
+ qedf_fcoe_send_vlan_req(qedf);
+ rc = wait_for_completion_timeout(&qedf->fipvlan_compl,
+ 1 * HZ);
+ if (rc > 0) {
+ fcoe_ctlr_link_up(&qedf->ctlr);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static void qedf_handle_link_update(struct work_struct *work)
+{
+ struct qedf_ctx *qedf =
+ container_of(work, struct qedf_ctx, link_update.work);
+ int rc;
+
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Entered.\n");
+
+ if (atomic_read(&qedf->link_state) == QEDF_LINK_UP) {
+ rc = qedf_initiate_fipvlan_req(qedf);
+ if (rc)
+ return;
+ /*
+ * If we get here then we never received a repsonse to our
+ * fip vlan request so set the vlan_id to the default and
+ * tell FCoE that the link is up
+ */
+ QEDF_WARN(&(qedf->dbg_ctx), "Did not receive FIP VLAN "
+ "response, falling back to default VLAN %d.\n",
+ qedf_fallback_vlan);
+ qedf_set_vlan_id(qedf, QEDF_FALLBACK_VLAN);
+
+ /*
+ * Zero out data_src_addr so we'll update it with the new
+ * lport port_id
+ */
+ eth_zero_addr(qedf->data_src_addr);
+ fcoe_ctlr_link_up(&qedf->ctlr);
+ } else if (atomic_read(&qedf->link_state) == QEDF_LINK_DOWN) {
+ /*
+ * If we hit here and link_down_tmo_valid is still 1 it means
+ * that link_down_tmo timed out so set it to 0 to make sure any
+ * other readers have accurate state.
+ */
+ atomic_set(&qedf->link_down_tmo_valid, 0);
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
+ "Calling fcoe_ctlr_link_down().\n");
+ fcoe_ctlr_link_down(&qedf->ctlr);
+ qedf_wait_for_upload(qedf);
+ /* Reset the number of FIP VLAN retries */
+ qedf->fipvlan_retries = qedf_fipvlan_retries;
+ }
+}
+
+static void qedf_flogi_resp(struct fc_seq *seq, struct fc_frame *fp,
+ void *arg)
+{
+ struct fc_exch *exch = fc_seq_exch(seq);
+ struct fc_lport *lport = exch->lp;
+ struct qedf_ctx *qedf = lport_priv(lport);
+
+ if (!qedf) {
+ QEDF_ERR(NULL, "qedf is NULL.\n");
+ return;
+ }
+
+ /*
+ * If ERR_PTR is set then don't try to stat anything as it will cause
+ * a crash when we access fp.
+ */
+ if (IS_ERR(fp)) {
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
+ "fp has IS_ERR() set.\n");
+ goto skip_stat;
+ }
+
+ /* Log stats for FLOGI reject */
+ if (fc_frame_payload_op(fp) == ELS_LS_RJT)
+ qedf->flogi_failed++;
+
+ /* Complete flogi_compl so we can proceed to sending ADISCs */
+ complete(&qedf->flogi_compl);
+
+skip_stat:
+ /* Report response to libfc */
+ fc_lport_flogi_resp(seq, fp, lport);
+}
+
+static struct fc_seq *qedf_elsct_send(struct fc_lport *lport, u32 did,
+ struct fc_frame *fp, unsigned int op,
+ void (*resp)(struct fc_seq *,
+ struct fc_frame *,
+ void *),
+ void *arg, u32 timeout)
+{
+ struct qedf_ctx *qedf = lport_priv(lport);
+
+ /*
+ * Intercept FLOGI for statistic purposes. Note we use the resp
+ * callback to tell if this is really a flogi.
+ */
+ if (resp == fc_lport_flogi_resp) {
+ qedf->flogi_cnt++;
+ return fc_elsct_send(lport, did, fp, op, qedf_flogi_resp,
+ arg, timeout);
+ }
+
+ return fc_elsct_send(lport, did, fp, op, resp, arg, timeout);
+}
+
+int qedf_send_flogi(struct qedf_ctx *qedf)
+{
+ struct fc_lport *lport;
+ struct fc_frame *fp;
+
+ lport = qedf->lport;
+
+ if (!lport->tt.elsct_send)
+ return -EINVAL;
+
+ fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi));
+ if (!fp) {
+ QEDF_ERR(&(qedf->dbg_ctx), "fc_frame_alloc failed.\n");
+ return -ENOMEM;
+ }
+
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
+ "Sending FLOGI to reestablish session with switch.\n");
+ lport->tt.elsct_send(lport, FC_FID_FLOGI, fp,
+ ELS_FLOGI, qedf_flogi_resp, lport, lport->r_a_tov);
+
+ init_completion(&qedf->flogi_compl);
+
+ return 0;
+}
+
+struct qedf_tmp_rdata_item {
+ struct fc_rport_priv *rdata;
+ struct list_head list;
+};
+
+/*
+ * This function is called if link_down_tmo is in use. If we get a link up and
+ * link_down_tmo has not expired then use just FLOGI/ADISC to recover our
+ * sessions with targets. Otherwise, just call fcoe_ctlr_link_up().
+ */
+static void qedf_link_recovery(struct work_struct *work)
+{
+ struct qedf_ctx *qedf =
+ container_of(work, struct qedf_ctx, link_recovery.work);
+ struct qedf_rport *fcport;
+ struct fc_rport_priv *rdata;
+ struct qedf_tmp_rdata_item *rdata_item, *tmp_rdata_item;
+ bool rc;
+ int retries = 30;
+ int rval, i;
+ struct list_head rdata_login_list;
+
+ INIT_LIST_HEAD(&rdata_login_list);
+
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
+ "Link down tmo did not expire.\n");
+
+ /*
+ * Essentially reset the fcoe_ctlr here without affecting the state
+ * of the libfc structs.
+ */
+ qedf->ctlr.state = FIP_ST_LINK_WAIT;
+ fcoe_ctlr_link_down(&qedf->ctlr);
+
+ /*
+ * Bring the link up before we send the fipvlan request so libfcoe
+ * can select a new fcf in parallel
+ */
+ fcoe_ctlr_link_up(&qedf->ctlr);
+
+ /* Since the link when down and up to verify which vlan we're on */
+ qedf->fipvlan_retries = qedf_fipvlan_retries;
+ rc = qedf_initiate_fipvlan_req(qedf);
+ if (!rc)
+ return;
+
+ /*
+ * We need to wait for an FCF to be selected due to the
+ * fcoe_ctlr_link_up other the FLOGI will be rejected.
+ */
+ while (retries > 0) {
+ if (qedf->ctlr.sel_fcf) {
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
+ "FCF reselected, proceeding with FLOGI.\n");
+ break;
+ }
+ msleep(500);
+ retries--;
+ }
+
+ if (retries < 1) {
+ QEDF_ERR(&(qedf->dbg_ctx), "Exhausted retries waiting for "
+ "FCF selection.\n");
+ return;
+ }
+
+ rval = qedf_send_flogi(qedf);
+ if (rval)
+ return;
+
+ /* Wait for FLOGI completion before proceeding with sending ADISCs */
+ i = wait_for_completion_timeout(&qedf->flogi_compl,
+ qedf->lport->r_a_tov);
+ if (i == 0) {
+ QEDF_ERR(&(qedf->dbg_ctx), "FLOGI timed out.\n");
+ return;
+ }
+
+ /*
+ * Call lport->tt.rport_login which will cause libfc to send an
+ * ADISC since the rport is in state ready.
+ */
+ rcu_read_lock();
+ list_for_each_entry_rcu(fcport, &qedf->fcports, peers) {
+ rdata = fcport->rdata;
+ if (rdata == NULL)
+ continue;
+ rdata_item = kzalloc(sizeof(struct qedf_tmp_rdata_item),
+ GFP_ATOMIC);
+ if (!rdata_item)
+ continue;
+ if (kref_get_unless_zero(&rdata->kref)) {
+ rdata_item->rdata = rdata;
+ list_add(&rdata_item->list, &rdata_login_list);
+ } else
+ kfree(rdata_item);
+ }
+ rcu_read_unlock();
+ /*
+ * Do the fc_rport_login outside of the rcu lock so we don't take a
+ * mutex in an atomic context.
+ */
+ list_for_each_entry_safe(rdata_item, tmp_rdata_item, &rdata_login_list,
+ list) {
+ list_del(&rdata_item->list);
+ fc_rport_login(rdata_item->rdata);
+ kref_put(&rdata_item->rdata->kref, fc_rport_destroy);
+ kfree(rdata_item);
+ }
+}
+
+static void qedf_update_link_speed(struct qedf_ctx *qedf,
+ struct qed_link_output *link)
+{
+ struct fc_lport *lport = qedf->lport;
+
+ lport->link_speed = FC_PORTSPEED_UNKNOWN;
+ lport->link_supported_speeds = FC_PORTSPEED_UNKNOWN;
+
+ /* Set fc_host link speed */
+ switch (link->speed) {
+ case 10000:
+ lport->link_speed = FC_PORTSPEED_10GBIT;
+ break;
+ case 25000:
+ lport->link_speed = FC_PORTSPEED_25GBIT;
+ break;
+ case 40000:
+ lport->link_speed = FC_PORTSPEED_40GBIT;
+ break;
+ case 50000:
+ lport->link_speed = FC_PORTSPEED_50GBIT;
+ break;
+ case 100000:
+ lport->link_speed = FC_PORTSPEED_100GBIT;
+ break;
+ default:
+ lport->link_speed = FC_PORTSPEED_UNKNOWN;
+ break;
+ }
+
+ /*
+ * Set supported link speed by querying the supported
+ * capabilities of the link.
+ */
+ if (link->supported_caps & SUPPORTED_10000baseKR_Full)
+ lport->link_supported_speeds |= FC_PORTSPEED_10GBIT;
+ if (link->supported_caps & SUPPORTED_25000baseKR_Full)
+ lport->link_supported_speeds |= FC_PORTSPEED_25GBIT;
+ if (link->supported_caps & SUPPORTED_40000baseLR4_Full)
+ lport->link_supported_speeds |= FC_PORTSPEED_40GBIT;
+ if (link->supported_caps & SUPPORTED_50000baseKR2_Full)
+ lport->link_supported_speeds |= FC_PORTSPEED_50GBIT;
+ if (link->supported_caps & SUPPORTED_100000baseKR4_Full)
+ lport->link_supported_speeds |= FC_PORTSPEED_100GBIT;
+ fc_host_supported_speeds(lport->host) = lport->link_supported_speeds;
+}
+
+static void qedf_link_update(void *dev, struct qed_link_output *link)
+{
+ struct qedf_ctx *qedf = (struct qedf_ctx *)dev;
+
+ if (link->link_up) {
+ QEDF_ERR(&(qedf->dbg_ctx), "LINK UP (%d GB/s).\n",
+ link->speed / 1000);
+
+ /* Cancel any pending link down work */
+ cancel_delayed_work(&qedf->link_update);
+
+ atomic_set(&qedf->link_state, QEDF_LINK_UP);
+ qedf_update_link_speed(qedf, link);
+
+ if (atomic_read(&qedf->dcbx) == QEDF_DCBX_DONE) {
+ QEDF_ERR(&(qedf->dbg_ctx), "DCBx done.\n");
+ if (atomic_read(&qedf->link_down_tmo_valid) > 0)
+ queue_delayed_work(qedf->link_update_wq,
+ &qedf->link_recovery, 0);
+ else
+ queue_delayed_work(qedf->link_update_wq,
+ &qedf->link_update, 0);
+ atomic_set(&qedf->link_down_tmo_valid, 0);
+ }
+
+ } else {
+ QEDF_ERR(&(qedf->dbg_ctx), "LINK DOWN.\n");
+
+ atomic_set(&qedf->link_state, QEDF_LINK_DOWN);
+ atomic_set(&qedf->dcbx, QEDF_DCBX_PENDING);
+ /*
+ * Flag that we're waiting for the link to come back up before
+ * informing the fcoe layer of the event.
+ */
+ if (qedf_link_down_tmo > 0) {
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
+ "Starting link down tmo.\n");
+ atomic_set(&qedf->link_down_tmo_valid, 1);
+ }
+ qedf->vlan_id = 0;
+ qedf_update_link_speed(qedf, link);
+ queue_delayed_work(qedf->link_update_wq, &qedf->link_update,
+ qedf_link_down_tmo * HZ);
+ }
+}
+
+
+static void qedf_dcbx_handler(void *dev, struct qed_dcbx_get *get, u32 mib_type)
+{
+ struct qedf_ctx *qedf = (struct qedf_ctx *)dev;
+
+ QEDF_ERR(&(qedf->dbg_ctx), "DCBx event valid=%d enabled=%d fcoe "
+ "prio=%d.\n", get->operational.valid, get->operational.enabled,
+ get->operational.app_prio.fcoe);
+
+ if (get->operational.enabled && get->operational.valid) {
+ /* If DCBX was already negotiated on link up then just exit */
+ if (atomic_read(&qedf->dcbx) == QEDF_DCBX_DONE) {
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
+ "DCBX already set on link up.\n");
+ return;
+ }
+
+ atomic_set(&qedf->dcbx, QEDF_DCBX_DONE);
+
+ if (atomic_read(&qedf->link_state) == QEDF_LINK_UP) {
+ if (atomic_read(&qedf->link_down_tmo_valid) > 0)
+ queue_delayed_work(qedf->link_update_wq,
+ &qedf->link_recovery, 0);
+ else
+ queue_delayed_work(qedf->link_update_wq,
+ &qedf->link_update, 0);
+ atomic_set(&qedf->link_down_tmo_valid, 0);
+ }
+ }
+
+}
+
+static u32 qedf_get_login_failures(void *cookie)
+{
+ struct qedf_ctx *qedf;
+
+ qedf = (struct qedf_ctx *)cookie;
+ return qedf->flogi_failed;
+}
+
+static struct qed_fcoe_cb_ops qedf_cb_ops = {
+ {
+ .link_update = qedf_link_update,
+ .dcbx_aen = qedf_dcbx_handler,
+ }
+};
+
+/*
+ * Various transport templates.
+ */
+
+static struct scsi_transport_template *qedf_fc_transport_template;
+static struct scsi_transport_template *qedf_fc_vport_transport_template;
+
+/*
+ * SCSI EH handlers
+ */
+static int qedf_eh_abort(struct scsi_cmnd *sc_cmd)
+{
+ struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
+ struct fc_rport_libfc_priv *rp = rport->dd_data;
+ struct qedf_rport *fcport;
+ struct fc_lport *lport;
+ struct qedf_ctx *qedf;
+ struct qedf_ioreq *io_req;
+ int rc = FAILED;
+ int rval;
+
+ if (fc_remote_port_chkready(rport)) {
+ QEDF_ERR(NULL, "rport not ready\n");
+ goto out;
+ }
+
+ lport = shost_priv(sc_cmd->device->host);
+ qedf = (struct qedf_ctx *)lport_priv(lport);
+
+ if ((lport->state != LPORT_ST_READY) || !(lport->link_up)) {
+ QEDF_ERR(&(qedf->dbg_ctx), "link not ready.\n");
+ goto out;
+ }
+
+ fcport = (struct qedf_rport *)&rp[1];
+
+ io_req = (struct qedf_ioreq *)sc_cmd->SCp.ptr;
+ if (!io_req) {
+ QEDF_ERR(&(qedf->dbg_ctx), "io_req is NULL.\n");
+ rc = SUCCESS;
+ goto out;
+ }
+
+ if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) ||
+ test_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags) ||
+ test_bit(QEDF_CMD_IN_ABORT, &io_req->flags)) {
+ QEDF_ERR(&(qedf->dbg_ctx), "io_req xid=0x%x already in "
+ "cleanup or abort processing or already "
+ "completed.\n", io_req->xid);
+ rc = SUCCESS;
+ goto out;
+ }
+
+ QEDF_ERR(&(qedf->dbg_ctx), "Aborting io_req sc_cmd=%p xid=0x%x "
+ "fp_idx=%d.\n", sc_cmd, io_req->xid, io_req->fp_idx);
+
+ if (qedf->stop_io_on_error) {
+ qedf_stop_all_io(qedf);
+ rc = SUCCESS;
+ goto out;
+ }
+
+ init_completion(&io_req->abts_done);
+ rval = qedf_initiate_abts(io_req, true);
+ if (rval) {
+ QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n");
+ goto out;
+ }
+
+ wait_for_completion(&io_req->abts_done);
+
+ if (io_req->event == QEDF_IOREQ_EV_ABORT_SUCCESS ||
+ io_req->event == QEDF_IOREQ_EV_ABORT_FAILED ||
+ io_req->event == QEDF_IOREQ_EV_CLEANUP_SUCCESS) {
+ /*
+ * If we get a reponse to the abort this is success from
+ * the perspective that all references to the command have
+ * been removed from the driver and firmware
+ */
+ rc = SUCCESS;
+ } else {
+ /* If the abort and cleanup failed then return a failure */
+ rc = FAILED;
+ }
+
+ if (rc == SUCCESS)
+ QEDF_ERR(&(qedf->dbg_ctx), "ABTS succeeded, xid=0x%x.\n",
+ io_req->xid);
+ else
+ QEDF_ERR(&(qedf->dbg_ctx), "ABTS failed, xid=0x%x.\n",
+ io_req->xid);
+
+out:
+ return rc;
+}
+
+static int qedf_eh_target_reset(struct scsi_cmnd *sc_cmd)
+{
+ QEDF_ERR(NULL, "TARGET RESET Issued...");
+ return qedf_initiate_tmf(sc_cmd, FCP_TMF_TGT_RESET);
+}
+
+static int qedf_eh_device_reset(struct scsi_cmnd *sc_cmd)
+{
+ QEDF_ERR(NULL, "LUN RESET Issued...\n");
+ return qedf_initiate_tmf(sc_cmd, FCP_TMF_LUN_RESET);
+}
+
+void qedf_wait_for_upload(struct qedf_ctx *qedf)
+{
+ while (1) {
+ if (atomic_read(&qedf->num_offloads))
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
+ "Waiting for all uploads to complete.\n");
+ else
+ break;
+ msleep(500);
+ }
+}
+
+/* Reset the host by gracefully logging out and then logging back in */
+static int qedf_eh_host_reset(struct scsi_cmnd *sc_cmd)
+{
+ struct fc_lport *lport;
+ struct qedf_ctx *qedf;
+
+ lport = shost_priv(sc_cmd->device->host);
+
+ if (lport->vport) {
+ QEDF_ERR(NULL, "Cannot issue host reset on NPIV port.\n");
+ return SUCCESS;
+ }
+
+ qedf = (struct qedf_ctx *)lport_priv(lport);
+
+ if (atomic_read(&qedf->link_state) == QEDF_LINK_DOWN ||
+ test_bit(QEDF_UNLOADING, &qedf->flags) ||
+ test_bit(QEDF_DBG_STOP_IO, &qedf->flags))
+ return FAILED;
+
+ QEDF_ERR(&(qedf->dbg_ctx), "HOST RESET Issued...");
+
+ /* For host reset, essentially do a soft link up/down */
+ atomic_set(&qedf->link_state, QEDF_LINK_DOWN);
+ atomic_set(&qedf->dcbx, QEDF_DCBX_PENDING);
+ queue_delayed_work(qedf->link_update_wq, &qedf->link_update,
+ 0);
+ qedf_wait_for_upload(qedf);
+ atomic_set(&qedf->link_state, QEDF_LINK_UP);
+ qedf->vlan_id = 0;
+ queue_delayed_work(qedf->link_update_wq, &qedf->link_update,
+ 0);
+
+ return SUCCESS;
+}
+
+static int qedf_slave_configure(struct scsi_device *sdev)
+{
+ if (qedf_queue_depth) {
+ scsi_change_queue_depth(sdev, qedf_queue_depth);
+ }
+
+ return 0;
+}
+
+static struct scsi_host_template qedf_host_template = {
+ .module = THIS_MODULE,
+ .name = QEDF_MODULE_NAME,
+ .this_id = -1,
+ .cmd_per_lun = 3,
+ .use_clustering = ENABLE_CLUSTERING,
+ .max_sectors = 0xffff,
+ .queuecommand = qedf_queuecommand,
+ .shost_attrs = qedf_host_attrs,
+ .eh_abort_handler = qedf_eh_abort,
+ .eh_device_reset_handler = qedf_eh_device_reset, /* lun reset */
+ .eh_target_reset_handler = qedf_eh_target_reset, /* target reset */
+ .eh_host_reset_handler = qedf_eh_host_reset,
+ .slave_configure = qedf_slave_configure,
+ .dma_boundary = QED_HW_DMA_BOUNDARY,
+ .sg_tablesize = QEDF_MAX_BDS_PER_CMD,
+ .can_queue = FCOE_PARAMS_NUM_TASKS,
+};
+
+static int qedf_get_paged_crc_eof(struct sk_buff *skb, int tlen)
+{
+ int rc;
+
+ spin_lock(&qedf_global_lock);
+ rc = fcoe_get_paged_crc_eof(skb, tlen, &qedf_global);
+ spin_unlock(&qedf_global_lock);
+
+ return rc;
+}
+
+static struct qedf_rport *qedf_fcport_lookup(struct qedf_ctx *qedf, u32 port_id)
+{
+ struct qedf_rport *fcport;
+ struct fc_rport_priv *rdata;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(fcport, &qedf->fcports, peers) {
+ rdata = fcport->rdata;
+ if (rdata == NULL)
+ continue;
+ if (rdata->ids.port_id == port_id) {
+ rcu_read_unlock();
+ return fcport;
+ }
+ }
+ rcu_read_unlock();
+
+ /* Return NULL to caller to let them know fcport was not found */
+ return NULL;
+}
+
+/* Transmits an ELS frame over an offloaded session */
+static int qedf_xmit_l2_frame(struct qedf_rport *fcport, struct fc_frame *fp)
+{
+ struct fc_frame_header *fh;
+ int rc = 0;
+
+ fh = fc_frame_header_get(fp);
+ if ((fh->fh_type == FC_TYPE_ELS) &&
+ (fh->fh_r_ctl == FC_RCTL_ELS_REQ)) {
+ switch (fc_frame_payload_op(fp)) {
+ case ELS_ADISC:
+ qedf_send_adisc(fcport, fp);
+ rc = 1;
+ break;
+ }
+ }
+
+ return rc;
+}
+
+/**
+ * qedf_xmit - qedf FCoE frame transmit function
+ *
+ */
+static int qedf_xmit(struct fc_lport *lport, struct fc_frame *fp)
+{
+ struct fc_lport *base_lport;
+ struct qedf_ctx *qedf;
+ struct ethhdr *eh;
+ struct fcoe_crc_eof *cp;
+ struct sk_buff *skb;
+ struct fc_frame_header *fh;
+ struct fcoe_hdr *hp;
+ u8 sof, eof;
+ u32 crc;
+ unsigned int hlen, tlen, elen;
+ int wlen;
+ struct fc_stats *stats;
+ struct fc_lport *tmp_lport;
+ struct fc_lport *vn_port = NULL;
+ struct qedf_rport *fcport;
+ int rc;
+ u16 vlan_tci = 0;
+
+ qedf = (struct qedf_ctx *)lport_priv(lport);
+
+ fh = fc_frame_header_get(fp);
+ skb = fp_skb(fp);
+
+ /* Filter out traffic to other NPIV ports on the same host */
+ if (lport->vport)
+ base_lport = shost_priv(vport_to_shost(lport->vport));
+ else
+ base_lport = lport;
+
+ /* Flag if the destination is the base port */
+ if (base_lport->port_id == ntoh24(fh->fh_d_id)) {
+ vn_port = base_lport;
+ } else {
+ /* Got through the list of vports attached to the base_lport
+ * and see if we have a match with the destination address.
+ */
+ list_for_each_entry(tmp_lport, &base_lport->vports, list) {
+ if (tmp_lport->port_id == ntoh24(fh->fh_d_id)) {
+ vn_port = tmp_lport;
+ break;
+ }
+ }
+ }
+ if (vn_port && ntoh24(fh->fh_d_id) != FC_FID_FLOGI) {
+ struct fc_rport_priv *rdata = NULL;
+
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
+ "Dropping FCoE frame to %06x.\n", ntoh24(fh->fh_d_id));
+ kfree_skb(skb);
+ rdata = fc_rport_lookup(lport, ntoh24(fh->fh_d_id));
+ if (rdata)
+ rdata->retries = lport->max_rport_retry_count;
+ return -EINVAL;
+ }
+ /* End NPIV filtering */
+
+ if (!qedf->ctlr.sel_fcf) {
+ kfree_skb(skb);
+ return 0;
+ }
+
+ if (!test_bit(QEDF_LL2_STARTED, &qedf->flags)) {
+ QEDF_WARN(&(qedf->dbg_ctx), "LL2 not started\n");
+ kfree_skb(skb);
+ return 0;
+ }
+
+ if (atomic_read(&qedf->link_state) != QEDF_LINK_UP) {
+ QEDF_WARN(&(qedf->dbg_ctx), "qedf link down\n");
+ kfree_skb(skb);
+ return 0;
+ }
+
+ if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ)) {
+ if (fcoe_ctlr_els_send(&qedf->ctlr, lport, skb))
+ return 0;
+ }
+
+ /* Check to see if this needs to be sent on an offloaded session */
+ fcport = qedf_fcport_lookup(qedf, ntoh24(fh->fh_d_id));
+
+ if (fcport && test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
+ rc = qedf_xmit_l2_frame(fcport, fp);
+ /*
+ * If the frame was successfully sent over the middle path
+ * then do not try to also send it over the LL2 path
+ */
+ if (rc)
+ return 0;
+ }
+
+ sof = fr_sof(fp);
+ eof = fr_eof(fp);
+
+ elen = sizeof(struct ethhdr);
+ hlen = sizeof(struct fcoe_hdr);
+ tlen = sizeof(struct fcoe_crc_eof);
+ wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE;
+
+ skb->ip_summed = CHECKSUM_NONE;
+ crc = fcoe_fc_crc(fp);
+
+ /* copy port crc and eof to the skb buff */
+ if (skb_is_nonlinear(skb)) {
+ skb_frag_t *frag;
+
+ if (qedf_get_paged_crc_eof(skb, tlen)) {
+ kfree_skb(skb);
+ return -ENOMEM;
+ }
+ frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
+ cp = kmap_atomic(skb_frag_page(frag)) + frag->page_offset;
+ } else {
+ cp = (struct fcoe_crc_eof *)skb_put(skb, tlen);
+ }
+
+ memset(cp, 0, sizeof(*cp));
+ cp->fcoe_eof = eof;
+ cp->fcoe_crc32 = cpu_to_le32(~crc);
+ if (skb_is_nonlinear(skb)) {
+ kunmap_atomic(cp);
+ cp = NULL;
+ }
+
+
+ /* adjust skb network/transport offsets to match mac/fcoe/port */
+ skb_push(skb, elen + hlen);
+ skb_reset_mac_header(skb);
+ skb_reset_network_header(skb);
+ skb->mac_len = elen;
+ skb->protocol = htons(ETH_P_FCOE);
+
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), qedf->vlan_id);
+
+ /* fill up mac and fcoe headers */
+ eh = eth_hdr(skb);
+ eh->h_proto = htons(ETH_P_FCOE);
+ if (qedf->ctlr.map_dest)
+ fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id);
+ else
+ /* insert GW address */
+ ether_addr_copy(eh->h_dest, qedf->ctlr.dest_addr);
+
+ /* Set the source MAC address */
+ fc_fcoe_set_mac(eh->h_source, fh->fh_s_id);
+
+ hp = (struct fcoe_hdr *)(eh + 1);
+ memset(hp, 0, sizeof(*hp));
+ if (FC_FCOE_VER)
+ FC_FCOE_ENCAPS_VER(hp, FC_FCOE_VER);
+ hp->fcoe_sof = sof;
+
+ /*update tx stats */
+ stats = per_cpu_ptr(lport->stats, get_cpu());
+ stats->TxFrames++;
+ stats->TxWords += wlen;
+ put_cpu();
+
+ /* Get VLAN ID from skb for printing purposes */
+ __vlan_hwaccel_get_tag(skb, &vlan_tci);
+
+ /* send down to lld */
+ fr_dev(fp) = lport;
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, "FCoE frame send: "
+ "src=%06x dest=%06x r_ctl=%x type=%x vlan=%04x.\n",
+ ntoh24(fh->fh_s_id), ntoh24(fh->fh_d_id), fh->fh_r_ctl, fh->fh_type,
+ vlan_tci);
+ if (qedf_dump_frames)
+ print_hex_dump(KERN_WARNING, "fcoe: ", DUMP_PREFIX_OFFSET, 16,
+ 1, skb->data, skb->len, false);
+ qed_ops->ll2->start_xmit(qedf->cdev, skb);
+
+ return 0;
+}
+
+static int qedf_alloc_sq(struct qedf_ctx *qedf, struct qedf_rport *fcport)
+{
+ int rval = 0;
+ u32 *pbl;
+ dma_addr_t page;
+ int num_pages;
+
+ /* Calculate appropriate queue and PBL sizes */
+ fcport->sq_mem_size = SQ_NUM_ENTRIES * sizeof(struct fcoe_wqe);
+ fcport->sq_mem_size = ALIGN(fcport->sq_mem_size, QEDF_PAGE_SIZE);
+ fcport->sq_pbl_size = (fcport->sq_mem_size / QEDF_PAGE_SIZE) *
+ sizeof(void *);
+ fcport->sq_pbl_size = fcport->sq_pbl_size + QEDF_PAGE_SIZE;
+
+ fcport->sq = dma_alloc_coherent(&qedf->pdev->dev, fcport->sq_mem_size,
+ &fcport->sq_dma, GFP_KERNEL);
+ if (!fcport->sq) {
+ QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate send "
+ "queue.\n");
+ rval = 1;
+ goto out;
+ }
+ memset(fcport->sq, 0, fcport->sq_mem_size);
+
+ fcport->sq_pbl = dma_alloc_coherent(&qedf->pdev->dev,
+ fcport->sq_pbl_size, &fcport->sq_pbl_dma, GFP_KERNEL);
+ if (!fcport->sq_pbl) {
+ QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate send "
+ "queue PBL.\n");
+ rval = 1;
+ goto out_free_sq;
+ }
+ memset(fcport->sq_pbl, 0, fcport->sq_pbl_size);
+
+ /* Create PBL */
+ num_pages = fcport->sq_mem_size / QEDF_PAGE_SIZE;
+ page = fcport->sq_dma;
+ pbl = (u32 *)fcport->sq_pbl;
+
+ while (num_pages--) {
+ *pbl = U64_LO(page);
+ pbl++;
+ *pbl = U64_HI(page);
+ pbl++;
+ page += QEDF_PAGE_SIZE;
+ }
+
+ return rval;
+
+out_free_sq:
+ dma_free_coherent(&qedf->pdev->dev, fcport->sq_mem_size, fcport->sq,
+ fcport->sq_dma);
+out:
+ return rval;
+}
+
+static void qedf_free_sq(struct qedf_ctx *qedf, struct qedf_rport *fcport)
+{
+ if (fcport->sq_pbl)
+ dma_free_coherent(&qedf->pdev->dev, fcport->sq_pbl_size,
+ fcport->sq_pbl, fcport->sq_pbl_dma);
+ if (fcport->sq)
+ dma_free_coherent(&qedf->pdev->dev, fcport->sq_mem_size,
+ fcport->sq, fcport->sq_dma);
+}
+
+static int qedf_offload_connection(struct qedf_ctx *qedf,
+ struct qedf_rport *fcport)
+{
+ struct qed_fcoe_params_offload conn_info;
+ u32 port_id;
+ u8 lport_src_id[3];
+ int rval;
+ uint16_t total_sqe = (fcport->sq_mem_size / sizeof(struct fcoe_wqe));
+
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Offloading connection "
+ "portid=%06x.\n", fcport->rdata->ids.port_id);
+ rval = qed_ops->acquire_conn(qedf->cdev, &fcport->handle,
+ &fcport->fw_cid, &fcport->p_doorbell);
+ if (rval) {
+ QEDF_WARN(&(qedf->dbg_ctx), "Could not acquire connection "
+ "for portid=%06x.\n", fcport->rdata->ids.port_id);
+ rval = 1; /* For some reason qed returns 0 on failure here */
+ goto out;
+ }
+
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "portid=%06x "
+ "fw_cid=%08x handle=%d.\n", fcport->rdata->ids.port_id,
+ fcport->fw_cid, fcport->handle);
+
+ memset(&conn_info, 0, sizeof(struct qed_fcoe_params_offload));
+
+ /* Fill in the offload connection info */
+ conn_info.sq_pbl_addr = fcport->sq_pbl_dma;
+
+ conn_info.sq_curr_page_addr = (dma_addr_t)(*(u64 *)fcport->sq_pbl);
+ conn_info.sq_next_page_addr =
+ (dma_addr_t)(*(u64 *)(fcport->sq_pbl + 8));
+
+ /* Need to use our FCoE MAC for the offload session */
+ port_id = fc_host_port_id(qedf->lport->host);
+ lport_src_id[2] = (port_id & 0x000000FF);
+ lport_src_id[1] = (port_id & 0x0000FF00) >> 8;
+ lport_src_id[0] = (port_id & 0x00FF0000) >> 16;
+ fc_fcoe_set_mac(conn_info.src_mac, lport_src_id);
+
+ ether_addr_copy(conn_info.dst_mac, qedf->ctlr.dest_addr);
+
+ conn_info.tx_max_fc_pay_len = fcport->rdata->maxframe_size;
+ conn_info.e_d_tov_timer_val = qedf->lport->e_d_tov / 20;
+ conn_info.rec_tov_timer_val = 3; /* I think this is what E3 was */
+ conn_info.rx_max_fc_pay_len = fcport->rdata->maxframe_size;
+
+ /* Set VLAN data */
+ conn_info.vlan_tag = qedf->vlan_id <<
+ FCOE_CONN_OFFLOAD_RAMROD_DATA_VLAN_ID_SHIFT;
+ conn_info.vlan_tag |=
+ qedf_default_prio << FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_SHIFT;
+ conn_info.flags |= (FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_MASK <<
+ FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_SHIFT);
+
+ /* Set host port source id */
+ port_id = fc_host_port_id(qedf->lport->host);
+ fcport->sid = port_id;
+ conn_info.s_id.addr_hi = (port_id & 0x000000FF);
+ conn_info.s_id.addr_mid = (port_id & 0x0000FF00) >> 8;
+ conn_info.s_id.addr_lo = (port_id & 0x00FF0000) >> 16;
+
+ conn_info.max_conc_seqs_c3 = fcport->rdata->max_seq;
+
+ /* Set remote port destination id */
+ port_id = fcport->rdata->rport->port_id;
+ conn_info.d_id.addr_hi = (port_id & 0x000000FF);
+ conn_info.d_id.addr_mid = (port_id & 0x0000FF00) >> 8;
+ conn_info.d_id.addr_lo = (port_id & 0x00FF0000) >> 16;
+
+ conn_info.def_q_idx = 0; /* Default index for send queue? */
+
+ /* Set FC-TAPE specific flags if needed */
+ if (fcport->dev_type == QEDF_RPORT_TYPE_TAPE) {
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN,
+ "Enable CONF, REC for portid=%06x.\n",
+ fcport->rdata->ids.port_id);
+ conn_info.flags |= 1 <<
+ FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONF_REQ_SHIFT;
+ conn_info.flags |=
+ ((fcport->rdata->sp_features & FC_SP_FT_SEQC) ? 1 : 0) <<
+ FCOE_CONN_OFFLOAD_RAMROD_DATA_B_REC_VALID_SHIFT;
+ }
+
+ rval = qed_ops->offload_conn(qedf->cdev, fcport->handle, &conn_info);
+ if (rval) {
+ QEDF_WARN(&(qedf->dbg_ctx), "Could not offload connection "
+ "for portid=%06x.\n", fcport->rdata->ids.port_id);
+ goto out_free_conn;
+ } else
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Offload "
+ "succeeded portid=%06x total_sqe=%d.\n",
+ fcport->rdata->ids.port_id, total_sqe);
+
+ spin_lock_init(&fcport->rport_lock);
+ atomic_set(&fcport->free_sqes, total_sqe);
+ return 0;
+out_free_conn:
+ qed_ops->release_conn(qedf->cdev, fcport->handle);
+out:
+ return rval;
+}
+
+#define QEDF_TERM_BUFF_SIZE 10
+static void qedf_upload_connection(struct qedf_ctx *qedf,
+ struct qedf_rport *fcport)
+{
+ void *term_params;
+ dma_addr_t term_params_dma;
+
+ /* Term params needs to be a DMA coherent buffer as qed shared the
+ * physical DMA address with the firmware. The buffer may be used in
+ * the receive path so we may eventually have to move this.
+ */
+ term_params = dma_alloc_coherent(&qedf->pdev->dev, QEDF_TERM_BUFF_SIZE,
+ &term_params_dma, GFP_KERNEL);
+
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Uploading connection "
+ "port_id=%06x.\n", fcport->rdata->ids.port_id);
+
+ qed_ops->destroy_conn(qedf->cdev, fcport->handle, term_params_dma);
+ qed_ops->release_conn(qedf->cdev, fcport->handle);
+
+ dma_free_coherent(&qedf->pdev->dev, QEDF_TERM_BUFF_SIZE, term_params,
+ term_params_dma);
+}
+
+static void qedf_cleanup_fcport(struct qedf_ctx *qedf,
+ struct qedf_rport *fcport)
+{
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Cleaning up portid=%06x.\n",
+ fcport->rdata->ids.port_id);
+
+ /* Flush any remaining i/o's before we upload the connection */
+ qedf_flush_active_ios(fcport, -1);
+
+ if (test_and_clear_bit(QEDF_RPORT_SESSION_READY, &fcport->flags))
+ qedf_upload_connection(qedf, fcport);
+ qedf_free_sq(qedf, fcport);
+ fcport->rdata = NULL;
+ fcport->qedf = NULL;
+}
+
+/**
+ * This event_callback is called after successful completion of libfc
+ * initiated target login. qedf can proceed with initiating the session
+ * establishment.
+ */
+static void qedf_rport_event_handler(struct fc_lport *lport,
+ struct fc_rport_priv *rdata,
+ enum fc_rport_event event)
+{
+ struct qedf_ctx *qedf = lport_priv(lport);
+ struct fc_rport *rport = rdata->rport;
+ struct fc_rport_libfc_priv *rp;
+ struct qedf_rport *fcport;
+ u32 port_id;
+ int rval;
+ unsigned long flags;
+
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "event = %d, "
+ "port_id = 0x%x\n", event, rdata->ids.port_id);
+
+ switch (event) {
+ case RPORT_EV_READY:
+ if (!rport) {
+ QEDF_WARN(&(qedf->dbg_ctx), "rport is NULL.\n");
+ break;
+ }
+
+ rp = rport->dd_data;
+ fcport = (struct qedf_rport *)&rp[1];
+ fcport->qedf = qedf;
+
+ if (atomic_read(&qedf->num_offloads) >= QEDF_MAX_SESSIONS) {
+ QEDF_ERR(&(qedf->dbg_ctx), "Not offloading "
+ "portid=0x%x as max number of offloaded sessions "
+ "reached.\n", rdata->ids.port_id);
+ return;
+ }
+
+ /*
+ * Don't try to offload the session again. Can happen when we
+ * get an ADISC
+ */
+ if (test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
+ QEDF_WARN(&(qedf->dbg_ctx), "Session already "
+ "offloaded, portid=0x%x.\n",
+ rdata->ids.port_id);
+ return;
+ }
+
+ if (rport->port_id == FC_FID_DIR_SERV) {
+ /*
+ * qedf_rport structure doesn't exist for
+ * directory server.
+ * We should not come here, as lport will
+ * take care of fabric login
+ */
+ QEDF_WARN(&(qedf->dbg_ctx), "rport struct does not "
+ "exist for dir server port_id=%x\n",
+ rdata->ids.port_id);
+ break;
+ }
+
+ if (rdata->spp_type != FC_TYPE_FCP) {
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
+ "Not offlading since since spp type isn't FCP\n");
+ break;
+ }
+ if (!(rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET)) {
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
+ "Not FCP target so not offloading\n");
+ break;
+ }
+
+ fcport->rdata = rdata;
+ fcport->rport = rport;
+
+ rval = qedf_alloc_sq(qedf, fcport);
+ if (rval) {
+ qedf_cleanup_fcport(qedf, fcport);
+ break;
+ }
+
+ /* Set device type */
+ if (rdata->flags & FC_RP_FLAGS_RETRY &&
+ rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET &&
+ !(rdata->ids.roles & FC_RPORT_ROLE_FCP_INITIATOR)) {
+ fcport->dev_type = QEDF_RPORT_TYPE_TAPE;
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
+ "portid=%06x is a TAPE device.\n",
+ rdata->ids.port_id);
+ } else {
+ fcport->dev_type = QEDF_RPORT_TYPE_DISK;
+ }
+
+ rval = qedf_offload_connection(qedf, fcport);
+ if (rval) {
+ qedf_cleanup_fcport(qedf, fcport);
+ break;
+ }
+
+ /* Add fcport to list of qedf_ctx list of offloaded ports */
+ spin_lock_irqsave(&qedf->hba_lock, flags);
+ list_add_rcu(&fcport->peers, &qedf->fcports);
+ spin_unlock_irqrestore(&qedf->hba_lock, flags);
+
+ /*
+ * Set the session ready bit to let everyone know that this
+ * connection is ready for I/O
+ */
+ set_bit(QEDF_RPORT_SESSION_READY, &fcport->flags);
+ atomic_inc(&qedf->num_offloads);
+
+ break;
+ case RPORT_EV_LOGO:
+ case RPORT_EV_FAILED:
+ case RPORT_EV_STOP:
+ port_id = rdata->ids.port_id;
+ if (port_id == FC_FID_DIR_SERV)
+ break;
+
+ if (!rport) {
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
+ "port_id=%x - rport notcreated Yet!!\n", port_id);
+ break;
+ }
+ rp = rport->dd_data;
+ /*
+ * Perform session upload. Note that rdata->peers is already
+ * removed from disc->rports list before we get this event.
+ */
+ fcport = (struct qedf_rport *)&rp[1];
+
+ /* Only free this fcport if it is offloaded already */
+ if (test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
+ set_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags);
+ qedf_cleanup_fcport(qedf, fcport);
+
+ /*
+ * Remove fcport to list of qedf_ctx list of offloaded
+ * ports
+ */
+ spin_lock_irqsave(&qedf->hba_lock, flags);
+ list_del_rcu(&fcport->peers);
+ spin_unlock_irqrestore(&qedf->hba_lock, flags);
+
+ clear_bit(QEDF_RPORT_UPLOADING_CONNECTION,
+ &fcport->flags);
+ atomic_dec(&qedf->num_offloads);
+ }
+
+ break;
+
+ case RPORT_EV_NONE:
+ break;
+ }
+}
+
+static void qedf_abort_io(struct fc_lport *lport)
+{
+ /* NO-OP but need to fill in the template */
+}
+
+static void qedf_fcp_cleanup(struct fc_lport *lport)
+{
+ /*
+ * NO-OP but need to fill in template to prevent a NULL
+ * function pointer dereference during link down. I/Os
+ * will be flushed when port is uploaded.
+ */
+}
+
+static struct libfc_function_template qedf_lport_template = {
+ .frame_send = qedf_xmit,
+ .fcp_abort_io = qedf_abort_io,
+ .fcp_cleanup = qedf_fcp_cleanup,
+ .rport_event_callback = qedf_rport_event_handler,
+ .elsct_send = qedf_elsct_send,
+};
+
+static void qedf_fcoe_ctlr_setup(struct qedf_ctx *qedf)
+{
+ fcoe_ctlr_init(&qedf->ctlr, FIP_ST_AUTO);
+
+ qedf->ctlr.send = qedf_fip_send;
+ qedf->ctlr.update_mac = qedf_update_src_mac;
+ qedf->ctlr.get_src_addr = qedf_get_src_mac;
+ ether_addr_copy(qedf->ctlr.ctl_src_addr, qedf->mac);
+}
+
+static int qedf_lport_setup(struct qedf_ctx *qedf)
+{
+ struct fc_lport *lport = qedf->lport;
+
+ lport->link_up = 0;
+ lport->max_retry_count = QEDF_FLOGI_RETRY_CNT;
+ lport->max_rport_retry_count = QEDF_RPORT_RETRY_CNT;
+ lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
+ FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL);
+ lport->boot_time = jiffies;
+ lport->e_d_tov = 2 * 1000;
+ lport->r_a_tov = 10 * 1000;
+
+ /* Set NPIV support */
+ lport->does_npiv = 1;
+ fc_host_max_npiv_vports(lport->host) = QEDF_MAX_NPIV;
+
+ fc_set_wwnn(lport, qedf->wwnn);
+ fc_set_wwpn(lport, qedf->wwpn);
+
+ fcoe_libfc_config(lport, &qedf->ctlr, &qedf_lport_template, 0);
+
+ /* Allocate the exchange manager */
+ fc_exch_mgr_alloc(lport, FC_CLASS_3, qedf->max_scsi_xid + 1,
+ qedf->max_els_xid, NULL);
+
+ if (fc_lport_init_stats(lport))
+ return -ENOMEM;
+
+ /* Finish lport config */
+ fc_lport_config(lport);
+
+ /* Set max frame size */
+ fc_set_mfs(lport, QEDF_MFS);
+ fc_host_maxframe_size(lport->host) = lport->mfs;
+
+ /* Set default dev_loss_tmo based on module parameter */
+ fc_host_dev_loss_tmo(lport->host) = qedf_dev_loss_tmo;
+
+ /* Set symbolic node name */
+ snprintf(fc_host_symbolic_name(lport->host), 256,
+ "QLogic %s v%s", QEDF_MODULE_NAME, QEDF_VERSION);
+
+ return 0;
+}
+
+/*
+ * NPIV functions
+ */
+
+static int qedf_vport_libfc_config(struct fc_vport *vport,
+ struct fc_lport *lport)
+{
+ lport->link_up = 0;
+ lport->qfull = 0;
+ lport->max_retry_count = QEDF_FLOGI_RETRY_CNT;
+ lport->max_rport_retry_count = QEDF_RPORT_RETRY_CNT;
+ lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
+ FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL);
+ lport->boot_time = jiffies;
+ lport->e_d_tov = 2 * 1000;
+ lport->r_a_tov = 10 * 1000;
+ lport->does_npiv = 1; /* Temporary until we add NPIV support */
+
+ /* Allocate stats for vport */
+ if (fc_lport_init_stats(lport))
+ return -ENOMEM;
+
+ /* Finish lport config */
+ fc_lport_config(lport);
+
+ /* offload related configuration */
+ lport->crc_offload = 0;
+ lport->seq_offload = 0;
+ lport->lro_enabled = 0;
+ lport->lro_xid = 0;
+ lport->lso_max = 0;
+
+ return 0;
+}
+
+static int qedf_vport_create(struct fc_vport *vport, bool disabled)
+{
+ struct Scsi_Host *shost = vport_to_shost(vport);
+ struct fc_lport *n_port = shost_priv(shost);
+ struct fc_lport *vn_port;
+ struct qedf_ctx *base_qedf = lport_priv(n_port);
+ struct qedf_ctx *vport_qedf;
+
+ char buf[32];
+ int rc = 0;
+
+ rc = fcoe_validate_vport_create(vport);
+ if (rc) {
+ fcoe_wwn_to_str(vport->port_name, buf, sizeof(buf));
+ QEDF_WARN(&(base_qedf->dbg_ctx), "Failed to create vport, "
+ "WWPN (0x%s) already exists.\n", buf);
+ goto err1;
+ }
+
+ if (atomic_read(&base_qedf->link_state) != QEDF_LINK_UP) {
+ QEDF_WARN(&(base_qedf->dbg_ctx), "Cannot create vport "
+ "because link is not up.\n");
+ rc = -EIO;
+ goto err1;
+ }
+
+ vn_port = libfc_vport_create(vport, sizeof(struct qedf_ctx));
+ if (!vn_port) {
+ QEDF_WARN(&(base_qedf->dbg_ctx), "Could not create lport "
+ "for vport.\n");
+ rc = -ENOMEM;
+ goto err1;
+ }
+
+ fcoe_wwn_to_str(vport->port_name, buf, sizeof(buf));
+ QEDF_ERR(&(base_qedf->dbg_ctx), "Creating NPIV port, WWPN=%s.\n",
+ buf);
+
+ /* Copy some fields from base_qedf */
+ vport_qedf = lport_priv(vn_port);
+ memcpy(vport_qedf, base_qedf, sizeof(struct qedf_ctx));
+
+ /* Set qedf data specific to this vport */
+ vport_qedf->lport = vn_port;
+ /* Use same hba_lock as base_qedf */
+ vport_qedf->hba_lock = base_qedf->hba_lock;
+ vport_qedf->pdev = base_qedf->pdev;
+ vport_qedf->cmd_mgr = base_qedf->cmd_mgr;
+ init_completion(&vport_qedf->flogi_compl);
+ INIT_LIST_HEAD(&vport_qedf->fcports);
+
+ rc = qedf_vport_libfc_config(vport, vn_port);
+ if (rc) {
+ QEDF_ERR(&(base_qedf->dbg_ctx), "Could not allocate memory "
+ "for lport stats.\n");
+ goto err2;
+ }
+
+ fc_set_wwnn(vn_port, vport->node_name);
+ fc_set_wwpn(vn_port, vport->port_name);
+ vport_qedf->wwnn = vn_port->wwnn;
+ vport_qedf->wwpn = vn_port->wwpn;
+
+ vn_port->host->transportt = qedf_fc_vport_transport_template;
+ vn_port->host->can_queue = QEDF_MAX_ELS_XID;
+ vn_port->host->max_lun = qedf_max_lun;
+ vn_port->host->sg_tablesize = QEDF_MAX_BDS_PER_CMD;
+ vn_port->host->max_cmd_len = QEDF_MAX_CDB_LEN;
+
+ rc = scsi_add_host(vn_port->host, &vport->dev);
+ if (rc) {
+ QEDF_WARN(&(base_qedf->dbg_ctx), "Error adding Scsi_Host.\n");
+ goto err2;
+ }
+
+ /* Set default dev_loss_tmo based on module parameter */
+ fc_host_dev_loss_tmo(vn_port->host) = qedf_dev_loss_tmo;
+
+ /* Init libfc stuffs */
+ memcpy(&vn_port->tt, &qedf_lport_template,
+ sizeof(qedf_lport_template));
+ fc_exch_init(vn_port);
+ fc_elsct_init(vn_port);
+ fc_lport_init(vn_port);
+ fc_disc_init(vn_port);
+ fc_disc_config(vn_port, vn_port);
+
+
+ /* Allocate the exchange manager */
+ shost = vport_to_shost(vport);
+ n_port = shost_priv(shost);
+ fc_exch_mgr_list_clone(n_port, vn_port);
+
+ /* Set max frame size */
+ fc_set_mfs(vn_port, QEDF_MFS);
+
+ fc_host_port_type(vn_port->host) = FC_PORTTYPE_UNKNOWN;
+
+ if (disabled) {
+ fc_vport_set_state(vport, FC_VPORT_DISABLED);
+ } else {
+ vn_port->boot_time = jiffies;
+ fc_fabric_login(vn_port);
+ fc_vport_setlink(vn_port);
+ }
+
+ QEDF_INFO(&(base_qedf->dbg_ctx), QEDF_LOG_NPIV, "vn_port=%p.\n",
+ vn_port);
+
+ /* Set up debug context for vport */
+ vport_qedf->dbg_ctx.host_no = vn_port->host->host_no;
+ vport_qedf->dbg_ctx.pdev = base_qedf->pdev;
+
+err2:
+ scsi_host_put(vn_port->host);
+err1:
+ return rc;
+}
+
+static int qedf_vport_destroy(struct fc_vport *vport)
+{
+ struct Scsi_Host *shost = vport_to_shost(vport);
+ struct fc_lport *n_port = shost_priv(shost);
+ struct fc_lport *vn_port = vport->dd_data;
+
+ mutex_lock(&n_port->lp_mutex);
+ list_del(&vn_port->list);
+ mutex_unlock(&n_port->lp_mutex);
+
+ fc_fabric_logoff(vn_port);
+ fc_lport_destroy(vn_port);
+
+ /* Detach from scsi-ml */
+ fc_remove_host(vn_port->host);
+ scsi_remove_host(vn_port->host);
+
+ /*
+ * Only try to release the exchange manager if the vn_port
+ * configuration is complete.
+ */
+ if (vn_port->state == LPORT_ST_READY)
+ fc_exch_mgr_free(vn_port);
+
+ /* Free memory used by statistical counters */
+ fc_lport_free_stats(vn_port);
+
+ /* Release Scsi_Host */
+ if (vn_port->host)
+ scsi_host_put(vn_port->host);
+
+ return 0;
+}
+
+static int qedf_vport_disable(struct fc_vport *vport, bool disable)
+{
+ struct fc_lport *lport = vport->dd_data;
+
+ if (disable) {
+ fc_vport_set_state(vport, FC_VPORT_DISABLED);
+ fc_fabric_logoff(lport);
+ } else {
+ lport->boot_time = jiffies;
+ fc_fabric_login(lport);
+ fc_vport_setlink(lport);
+ }
+ return 0;
+}
+
+/*
+ * During removal we need to wait for all the vports associated with a port
+ * to be destroyed so we avoid a race condition where libfc is still trying
+ * to reap vports while the driver remove function has already reaped the
+ * driver contexts associated with the physical port.
+ */
+static void qedf_wait_for_vport_destroy(struct qedf_ctx *qedf)
+{
+ struct fc_host_attrs *fc_host = shost_to_fc_host(qedf->lport->host);
+
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_NPIV,
+ "Entered.\n");
+ while (fc_host->npiv_vports_inuse > 0) {
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_NPIV,
+ "Waiting for all vports to be reaped.\n");
+ msleep(1000);
+ }
+}
+
+/**
+ * qedf_fcoe_reset - Resets the fcoe
+ *
+ * @shost: shost the reset is from
+ *
+ * Returns: always 0
+ */
+static int qedf_fcoe_reset(struct Scsi_Host *shost)
+{
+ struct fc_lport *lport = shost_priv(shost);
+
+ fc_fabric_logoff(lport);
+ fc_fabric_login(lport);
+ return 0;
+}
+
+static struct fc_host_statistics *qedf_fc_get_host_stats(struct Scsi_Host
+ *shost)
+{
+ struct fc_host_statistics *qedf_stats;
+ struct fc_lport *lport = shost_priv(shost);
+ struct qedf_ctx *qedf = lport_priv(lport);
+ struct qed_fcoe_stats *fw_fcoe_stats;
+
+ qedf_stats = fc_get_host_stats(shost);
+
+ /* We don't collect offload stats for specific NPIV ports */
+ if (lport->vport)
+ goto out;
+
+ fw_fcoe_stats = kmalloc(sizeof(struct qed_fcoe_stats), GFP_KERNEL);
+ if (!fw_fcoe_stats) {
+ QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate memory for "
+ "fw_fcoe_stats.\n");
+ goto out;
+ }
+
+ /* Query firmware for offload stats */
+ qed_ops->get_stats(qedf->cdev, fw_fcoe_stats);
+
+ /*
+ * The expectation is that we add our offload stats to the stats
+ * being maintained by libfc each time the fc_get_host_status callback
+ * is invoked. The additions are not carried over for each call to
+ * the fc_get_host_stats callback.
+ */
+ qedf_stats->tx_frames += fw_fcoe_stats->fcoe_tx_data_pkt_cnt +
+ fw_fcoe_stats->fcoe_tx_xfer_pkt_cnt +
+ fw_fcoe_stats->fcoe_tx_other_pkt_cnt;
+ qedf_stats->rx_frames += fw_fcoe_stats->fcoe_rx_data_pkt_cnt +
+ fw_fcoe_stats->fcoe_rx_xfer_pkt_cnt +
+ fw_fcoe_stats->fcoe_rx_other_pkt_cnt;
+ qedf_stats->fcp_input_megabytes +=
+ do_div(fw_fcoe_stats->fcoe_rx_byte_cnt, 1000000);
+ qedf_stats->fcp_output_megabytes +=
+ do_div(fw_fcoe_stats->fcoe_tx_byte_cnt, 1000000);
+ qedf_stats->rx_words += fw_fcoe_stats->fcoe_rx_byte_cnt / 4;
+ qedf_stats->tx_words += fw_fcoe_stats->fcoe_tx_byte_cnt / 4;
+ qedf_stats->invalid_crc_count +=
+ fw_fcoe_stats->fcoe_silent_drop_pkt_crc_error_cnt;
+ qedf_stats->dumped_frames =
+ fw_fcoe_stats->fcoe_silent_drop_total_pkt_cnt;
+ qedf_stats->error_frames +=
+ fw_fcoe_stats->fcoe_silent_drop_total_pkt_cnt;
+ qedf_stats->fcp_input_requests += qedf->input_requests;
+ qedf_stats->fcp_output_requests += qedf->output_requests;
+ qedf_stats->fcp_control_requests += qedf->control_requests;
+ qedf_stats->fcp_packet_aborts += qedf->packet_aborts;
+ qedf_stats->fcp_frame_alloc_failures += qedf->alloc_failures;
+
+ kfree(fw_fcoe_stats);
+out:
+ return qedf_stats;
+}
+
+static struct fc_function_template qedf_fc_transport_fn = {
+ .show_host_node_name = 1,
+ .show_host_port_name = 1,
+ .show_host_supported_classes = 1,
+ .show_host_supported_fc4s = 1,
+ .show_host_active_fc4s = 1,
+ .show_host_maxframe_size = 1,
+
+ .show_host_port_id = 1,
+ .show_host_supported_speeds = 1,
+ .get_host_speed = fc_get_host_speed,
+ .show_host_speed = 1,
+ .show_host_port_type = 1,
+ .get_host_port_state = fc_get_host_port_state,
+ .show_host_port_state = 1,
+ .show_host_symbolic_name = 1,
+
+ /*
+ * Tell FC transport to allocate enough space to store the backpointer
+ * for the associate qedf_rport struct.
+ */
+ .dd_fcrport_size = (sizeof(struct fc_rport_libfc_priv) +
+ sizeof(struct qedf_rport)),
+ .show_rport_maxframe_size = 1,
+ .show_rport_supported_classes = 1,
+ .show_host_fabric_name = 1,
+ .show_starget_node_name = 1,
+ .show_starget_port_name = 1,
+ .show_starget_port_id = 1,
+ .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo,
+ .show_rport_dev_loss_tmo = 1,
+ .get_fc_host_stats = qedf_fc_get_host_stats,
+ .issue_fc_host_lip = qedf_fcoe_reset,
+ .vport_create = qedf_vport_create,
+ .vport_delete = qedf_vport_destroy,
+ .vport_disable = qedf_vport_disable,
+ .bsg_request = fc_lport_bsg_request,
+};
+
+static struct fc_function_template qedf_fc_vport_transport_fn = {
+ .show_host_node_name = 1,
+ .show_host_port_name = 1,
+ .show_host_supported_classes = 1,
+ .show_host_supported_fc4s = 1,
+ .show_host_active_fc4s = 1,
+ .show_host_maxframe_size = 1,
+ .show_host_port_id = 1,
+ .show_host_supported_speeds = 1,
+ .get_host_speed = fc_get_host_speed,
+ .show_host_speed = 1,
+ .show_host_port_type = 1,
+ .get_host_port_state = fc_get_host_port_state,
+ .show_host_port_state = 1,
+ .show_host_symbolic_name = 1,
+ .dd_fcrport_size = (sizeof(struct fc_rport_libfc_priv) +
+ sizeof(struct qedf_rport)),
+ .show_rport_maxframe_size = 1,
+ .show_rport_supported_classes = 1,
+ .show_host_fabric_name = 1,
+ .show_starget_node_name = 1,
+ .show_starget_port_name = 1,
+ .show_starget_port_id = 1,
+ .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo,
+ .show_rport_dev_loss_tmo = 1,
+ .get_fc_host_stats = fc_get_host_stats,
+ .issue_fc_host_lip = qedf_fcoe_reset,
+ .bsg_request = fc_lport_bsg_request,
+};
+
+static bool qedf_fp_has_work(struct qedf_fastpath *fp)
+{
+ struct qedf_ctx *qedf = fp->qedf;
+ struct global_queue *que;
+ struct qed_sb_info *sb_info = fp->sb_info;
+ struct status_block *sb = sb_info->sb_virt;
+ u16 prod_idx;
+
+ /* Get the pointer to the global CQ this completion is on */
+ que = qedf->global_queues[fp->sb_id];
+
+ /* Be sure all responses have been written to PI */
+ rmb();
+
+ /* Get the current firmware producer index */
+ prod_idx = sb->pi_array[QEDF_FCOE_PARAMS_GL_RQ_PI];
+
+ return (que->cq_prod_idx != prod_idx);
+}
+
+/*
+ * Interrupt handler code.
+ */
+
+/* Process completion queue and copy CQE contents for deferred processesing
+ *
+ * Return true if we should wake the I/O thread, false if not.
+ */
+static bool qedf_process_completions(struct qedf_fastpath *fp)
+{
+ struct qedf_ctx *qedf = fp->qedf;
+ struct qed_sb_info *sb_info = fp->sb_info;
+ struct status_block *sb = sb_info->sb_virt;
+ struct global_queue *que;
+ u16 prod_idx;
+ struct fcoe_cqe *cqe;
+ struct qedf_io_work *io_work;
+ int num_handled = 0;
+ unsigned int cpu;
+ struct qedf_ioreq *io_req = NULL;
+ u16 xid;
+ u16 new_cqes;
+ u32 comp_type;
+
+ /* Get the current firmware producer index */
+ prod_idx = sb->pi_array[QEDF_FCOE_PARAMS_GL_RQ_PI];
+
+ /* Get the pointer to the global CQ this completion is on */
+ que = qedf->global_queues[fp->sb_id];
+
+ /* Calculate the amount of new elements since last processing */
+ new_cqes = (prod_idx >= que->cq_prod_idx) ?
+ (prod_idx - que->cq_prod_idx) :
+ 0x10000 - que->cq_prod_idx + prod_idx;
+
+ /* Save producer index */
+ que->cq_prod_idx = prod_idx;
+
+ while (new_cqes) {
+ fp->completions++;
+ num_handled++;
+ cqe = &que->cq[que->cq_cons_idx];
+
+ comp_type = (cqe->cqe_data >> FCOE_CQE_CQE_TYPE_SHIFT) &
+ FCOE_CQE_CQE_TYPE_MASK;
+
+ /*
+ * Process unsolicited CQEs directly in the interrupt handler
+ * sine we need the fastpath ID
+ */
+ if (comp_type == FCOE_UNSOLIC_CQE_TYPE) {
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_UNSOL,
+ "Unsolicated CQE.\n");
+ qedf_process_unsol_compl(qedf, fp->sb_id, cqe);
+ /*
+ * Don't add a work list item. Increment consumer
+ * consumer index and move on.
+ */
+ goto inc_idx;
+ }
+
+ xid = cqe->cqe_data & FCOE_CQE_TASK_ID_MASK;
+ io_req = &qedf->cmd_mgr->cmds[xid];
+
+ /*
+ * Figure out which percpu thread we should queue this I/O
+ * on.
+ */
+ if (!io_req)
+ /* If there is not io_req assocated with this CQE
+ * just queue it on CPU 0
+ */
+ cpu = 0;
+ else {
+ cpu = io_req->cpu;
+ io_req->int_cpu = smp_processor_id();
+ }
+
+ io_work = mempool_alloc(qedf->io_mempool, GFP_ATOMIC);
+ if (!io_work) {
+ QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate "
+ "work for I/O completion.\n");
+ continue;
+ }
+ memset(io_work, 0, sizeof(struct qedf_io_work));
+
+ INIT_WORK(&io_work->work, qedf_fp_io_handler);
+
+ /* Copy contents of CQE for deferred processing */
+ memcpy(&io_work->cqe, cqe, sizeof(struct fcoe_cqe));
+
+ io_work->qedf = fp->qedf;
+ io_work->fp = NULL; /* Only used for unsolicited frames */
+
+ queue_work_on(cpu, qedf_io_wq, &io_work->work);
+
+inc_idx:
+ que->cq_cons_idx++;
+ if (que->cq_cons_idx == fp->cq_num_entries)
+ que->cq_cons_idx = 0;
+ new_cqes--;
+ }
+
+ return true;
+}
+
+
+/* MSI-X fastpath handler code */
+static irqreturn_t qedf_msix_handler(int irq, void *dev_id)
+{
+ struct qedf_fastpath *fp = dev_id;
+
+ if (!fp) {
+ QEDF_ERR(NULL, "fp is null.\n");
+ return IRQ_HANDLED;
+ }
+ if (!fp->sb_info) {
+ QEDF_ERR(NULL, "fp->sb_info in null.");
+ return IRQ_HANDLED;
+ }
+
+ /*
+ * Disable interrupts for this status block while we process new
+ * completions
+ */
+ qed_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0 /*do not update*/);
+
+ while (1) {
+ qedf_process_completions(fp);
+
+ if (qedf_fp_has_work(fp) == 0) {
+ /* Update the sb information */
+ qed_sb_update_sb_idx(fp->sb_info);
+
+ /* Check for more work */
+ rmb();
+
+ if (qedf_fp_has_work(fp) == 0) {
+ /* Re-enable interrupts */
+ qed_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1);
+ return IRQ_HANDLED;
+ }
+ }
+ }
+
+ /* Do we ever want to break out of above loop? */
+ return IRQ_HANDLED;
+}
+
+/* simd handler for MSI/INTa */
+static void qedf_simd_int_handler(void *cookie)
+{
+ /* Cookie is qedf_ctx struct */
+ struct qedf_ctx *qedf = (struct qedf_ctx *)cookie;
+
+ QEDF_WARN(&(qedf->dbg_ctx), "qedf=%p.\n", qedf);
+}
+
+#define QEDF_SIMD_HANDLER_NUM 0
+static void qedf_sync_free_irqs(struct qedf_ctx *qedf)
+{
+ int i;
+
+ if (qedf->int_info.msix_cnt) {
+ for (i = 0; i < qedf->int_info.used_cnt; i++) {
+ synchronize_irq(qedf->int_info.msix[i].vector);
+ irq_set_affinity_hint(qedf->int_info.msix[i].vector,
+ NULL);
+ irq_set_affinity_notifier(qedf->int_info.msix[i].vector,
+ NULL);
+ free_irq(qedf->int_info.msix[i].vector,
+ &qedf->fp_array[i]);
+ }
+ } else
+ qed_ops->common->simd_handler_clean(qedf->cdev,
+ QEDF_SIMD_HANDLER_NUM);
+
+ qedf->int_info.used_cnt = 0;
+ qed_ops->common->set_fp_int(qedf->cdev, 0);
+}
+
+static int qedf_request_msix_irq(struct qedf_ctx *qedf)
+{
+ int i, rc, cpu;
+
+ cpu = cpumask_first(cpu_online_mask);
+ for (i = 0; i < qedf->num_queues; i++) {
+ rc = request_irq(qedf->int_info.msix[i].vector,
+ qedf_msix_handler, 0, "qedf", &qedf->fp_array[i]);
+
+ if (rc) {
+ QEDF_WARN(&(qedf->dbg_ctx), "request_irq failed.\n");
+ qedf_sync_free_irqs(qedf);
+ return rc;
+ }
+
+ qedf->int_info.used_cnt++;
+ rc = irq_set_affinity_hint(qedf->int_info.msix[i].vector,
+ get_cpu_mask(cpu));
+ cpu = cpumask_next(cpu, cpu_online_mask);
+ }
+
+ return 0;
+}
+
+static int qedf_setup_int(struct qedf_ctx *qedf)
+{
+ int rc = 0;
+
+ /*
+ * Learn interrupt configuration
+ */
+ rc = qed_ops->common->set_fp_int(qedf->cdev, num_online_cpus());
+
+ rc = qed_ops->common->get_fp_int(qedf->cdev, &qedf->int_info);
+ if (rc)
+ return 0;
+
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Number of msix_cnt = "
+ "0x%x num of cpus = 0x%x\n", qedf->int_info.msix_cnt,
+ num_online_cpus());
+
+ if (qedf->int_info.msix_cnt)
+ return qedf_request_msix_irq(qedf);
+
+ qed_ops->common->simd_handler_config(qedf->cdev, &qedf,
+ QEDF_SIMD_HANDLER_NUM, qedf_simd_int_handler);
+ qedf->int_info.used_cnt = 1;
+
+ return 0;
+}
+
+/* Main function for libfc frame reception */
+static void qedf_recv_frame(struct qedf_ctx *qedf,
+ struct sk_buff *skb)
+{
+ u32 fr_len;
+ struct fc_lport *lport;
+ struct fc_frame_header *fh;
+ struct fcoe_crc_eof crc_eof;
+ struct fc_frame *fp;
+ u8 *mac = NULL;
+ u8 *dest_mac = NULL;
+ struct fcoe_hdr *hp;
+ struct qedf_rport *fcport;
+
+ lport = qedf->lport;
+ if (lport == NULL || lport->state == LPORT_ST_DISABLED) {
+ QEDF_WARN(NULL, "Invalid lport struct or lport disabled.\n");
+ kfree_skb(skb);
+ return;
+ }
+
+ if (skb_is_nonlinear(skb))
+ skb_linearize(skb);
+ mac = eth_hdr(skb)->h_source;
+ dest_mac = eth_hdr(skb)->h_dest;
+
+ /* Pull the header */
+ hp = (struct fcoe_hdr *)skb->data;
+ fh = (struct fc_frame_header *) skb_transport_header(skb);
+ skb_pull(skb, sizeof(struct fcoe_hdr));
+ fr_len = skb->len - sizeof(struct fcoe_crc_eof);
+
+ fp = (struct fc_frame *)skb;
+ fc_frame_init(fp);
+ fr_dev(fp) = lport;
+ fr_sof(fp) = hp->fcoe_sof;
+ if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) {
+ kfree_skb(skb);
+ return;
+ }
+ fr_eof(fp) = crc_eof.fcoe_eof;
+ fr_crc(fp) = crc_eof.fcoe_crc32;
+ if (pskb_trim(skb, fr_len)) {
+ kfree_skb(skb);
+ return;
+ }
+
+ fh = fc_frame_header_get(fp);
+
+ if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA &&
+ fh->fh_type == FC_TYPE_FCP) {
+ /* Drop FCP data. We dont this in L2 path */
+ kfree_skb(skb);
+ return;
+ }
+ if (fh->fh_r_ctl == FC_RCTL_ELS_REQ &&
+ fh->fh_type == FC_TYPE_ELS) {
+ switch (fc_frame_payload_op(fp)) {
+ case ELS_LOGO:
+ if (ntoh24(fh->fh_s_id) == FC_FID_FLOGI) {
+ /* drop non-FIP LOGO */
+ kfree_skb(skb);
+ return;
+ }
+ break;
+ }
+ }
+
+ if (fh->fh_r_ctl == FC_RCTL_BA_ABTS) {
+ /* Drop incoming ABTS */
+ kfree_skb(skb);
+ return;
+ }
+
+ /*
+ * If a connection is uploading, drop incoming FCoE frames as there
+ * is a small window where we could try to return a frame while libfc
+ * is trying to clean things up.
+ */
+
+ /* Get fcport associated with d_id if it exists */
+ fcport = qedf_fcport_lookup(qedf, ntoh24(fh->fh_d_id));
+
+ if (fcport && test_bit(QEDF_RPORT_UPLOADING_CONNECTION,
+ &fcport->flags)) {
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
+ "Connection uploading, dropping fp=%p.\n", fp);
+ kfree_skb(skb);
+ return;
+ }
+
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, "FCoE frame receive: "
+ "skb=%p fp=%p src=%06x dest=%06x r_ctl=%x fh_type=%x.\n", skb, fp,
+ ntoh24(fh->fh_s_id), ntoh24(fh->fh_d_id), fh->fh_r_ctl,
+ fh->fh_type);
+ if (qedf_dump_frames)
+ print_hex_dump(KERN_WARNING, "fcoe: ", DUMP_PREFIX_OFFSET, 16,
+ 1, skb->data, skb->len, false);
+ fc_exch_recv(lport, fp);
+}
+
+static void qedf_ll2_process_skb(struct work_struct *work)
+{
+ struct qedf_skb_work *skb_work =
+ container_of(work, struct qedf_skb_work, work);
+ struct qedf_ctx *qedf = skb_work->qedf;
+ struct sk_buff *skb = skb_work->skb;
+ struct ethhdr *eh;
+
+ if (!qedf) {
+ QEDF_ERR(NULL, "qedf is NULL\n");
+ goto err_out;
+ }
+
+ eh = (struct ethhdr *)skb->data;
+
+ /* Undo VLAN encapsulation */
+ if (eh->h_proto == htons(ETH_P_8021Q)) {
+ memmove((u8 *)eh + VLAN_HLEN, eh, ETH_ALEN * 2);
+ eh = (struct ethhdr *)skb_pull(skb, VLAN_HLEN);
+ skb_reset_mac_header(skb);
+ }
+
+ /*
+ * Process either a FIP frame or FCoE frame based on the
+ * protocol value. If it's not either just drop the
+ * frame.
+ */
+ if (eh->h_proto == htons(ETH_P_FIP)) {
+ qedf_fip_recv(qedf, skb);
+ goto out;
+ } else if (eh->h_proto == htons(ETH_P_FCOE)) {
+ __skb_pull(skb, ETH_HLEN);
+ qedf_recv_frame(qedf, skb);
+ goto out;
+ } else
+ goto err_out;
+
+err_out:
+ kfree_skb(skb);
+out:
+ kfree(skb_work);
+ return;
+}
+
+static int qedf_ll2_rx(void *cookie, struct sk_buff *skb,
+ u32 arg1, u32 arg2)
+{
+ struct qedf_ctx *qedf = (struct qedf_ctx *)cookie;
+ struct qedf_skb_work *skb_work;
+
+ skb_work = kzalloc(sizeof(struct qedf_skb_work), GFP_ATOMIC);
+ if (!skb_work) {
+ QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate skb_work so "
+ "dropping frame.\n");
+ kfree_skb(skb);
+ return 0;
+ }
+
+ INIT_WORK(&skb_work->work, qedf_ll2_process_skb);
+ skb_work->skb = skb;
+ skb_work->qedf = qedf;
+ queue_work(qedf->ll2_recv_wq, &skb_work->work);
+
+ return 0;
+}
+
+static struct qed_ll2_cb_ops qedf_ll2_cb_ops = {
+ .rx_cb = qedf_ll2_rx,
+ .tx_cb = NULL,
+};
+
+/* Main thread to process I/O completions */
+void qedf_fp_io_handler(struct work_struct *work)
+{
+ struct qedf_io_work *io_work =
+ container_of(work, struct qedf_io_work, work);
+ u32 comp_type;
+
+ /*
+ * Deferred part of unsolicited CQE sends
+ * frame to libfc.
+ */
+ comp_type = (io_work->cqe.cqe_data >>
+ FCOE_CQE_CQE_TYPE_SHIFT) &
+ FCOE_CQE_CQE_TYPE_MASK;
+ if (comp_type == FCOE_UNSOLIC_CQE_TYPE &&
+ io_work->fp)
+ fc_exch_recv(io_work->qedf->lport, io_work->fp);
+ else
+ qedf_process_cqe(io_work->qedf, &io_work->cqe);
+
+ kfree(io_work);
+}
+
+static int qedf_alloc_and_init_sb(struct qedf_ctx *qedf,
+ struct qed_sb_info *sb_info, u16 sb_id)
+{
+ struct status_block *sb_virt;
+ dma_addr_t sb_phys;
+ int ret;
+
+ sb_virt = dma_alloc_coherent(&qedf->pdev->dev,
+ sizeof(struct status_block), &sb_phys, GFP_KERNEL);
+
+ if (!sb_virt) {
+ QEDF_ERR(&(qedf->dbg_ctx), "Status block allocation failed "
+ "for id = %d.\n", sb_id);
+ return -ENOMEM;
+ }
+
+ ret = qed_ops->common->sb_init(qedf->cdev, sb_info, sb_virt, sb_phys,
+ sb_id, QED_SB_TYPE_STORAGE);
+
+ if (ret) {
+ QEDF_ERR(&(qedf->dbg_ctx), "Status block initialization "
+ "failed for id = %d.\n", sb_id);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void qedf_free_sb(struct qedf_ctx *qedf, struct qed_sb_info *sb_info)
+{
+ if (sb_info->sb_virt)
+ dma_free_coherent(&qedf->pdev->dev, sizeof(*sb_info->sb_virt),
+ (void *)sb_info->sb_virt, sb_info->sb_phys);
+}
+
+static void qedf_destroy_sb(struct qedf_ctx *qedf)
+{
+ int id;
+ struct qedf_fastpath *fp = NULL;
+
+ for (id = 0; id < qedf->num_queues; id++) {
+ fp = &(qedf->fp_array[id]);
+ if (fp->sb_id == QEDF_SB_ID_NULL)
+ break;
+ qedf_free_sb(qedf, fp->sb_info);
+ kfree(fp->sb_info);
+ }
+ kfree(qedf->fp_array);
+}
+
+static int qedf_prepare_sb(struct qedf_ctx *qedf)
+{
+ int id;
+ struct qedf_fastpath *fp;
+ int ret;
+
+ qedf->fp_array =
+ kcalloc(qedf->num_queues, sizeof(struct qedf_fastpath),
+ GFP_KERNEL);
+
+ if (!qedf->fp_array) {
+ QEDF_ERR(&(qedf->dbg_ctx), "fastpath array allocation "
+ "failed.\n");
+ return -ENOMEM;
+ }
+
+ for (id = 0; id < qedf->num_queues; id++) {
+ fp = &(qedf->fp_array[id]);
+ fp->sb_id = QEDF_SB_ID_NULL;
+ fp->sb_info = kcalloc(1, sizeof(*fp->sb_info), GFP_KERNEL);
+ if (!fp->sb_info) {
+ QEDF_ERR(&(qedf->dbg_ctx), "SB info struct "
+ "allocation failed.\n");
+ goto err;
+ }
+ ret = qedf_alloc_and_init_sb(qedf, fp->sb_info, id);
+ if (ret) {
+ QEDF_ERR(&(qedf->dbg_ctx), "SB allocation and "
+ "initialization failed.\n");
+ goto err;
+ }
+ fp->sb_id = id;
+ fp->qedf = qedf;
+ fp->cq_num_entries =
+ qedf->global_queues[id]->cq_mem_size /
+ sizeof(struct fcoe_cqe);
+ }
+err:
+ return 0;
+}
+
+void qedf_process_cqe(struct qedf_ctx *qedf, struct fcoe_cqe *cqe)
+{
+ u16 xid;
+ struct qedf_ioreq *io_req;
+ struct qedf_rport *fcport;
+ u32 comp_type;
+
+ comp_type = (cqe->cqe_data >> FCOE_CQE_CQE_TYPE_SHIFT) &
+ FCOE_CQE_CQE_TYPE_MASK;
+
+ xid = cqe->cqe_data & FCOE_CQE_TASK_ID_MASK;
+ io_req = &qedf->cmd_mgr->cmds[xid];
+
+ /* Completion not for a valid I/O anymore so just return */
+ if (!io_req)
+ return;
+
+ fcport = io_req->fcport;
+
+ if (fcport == NULL) {
+ QEDF_ERR(&(qedf->dbg_ctx), "fcport is NULL.\n");
+ return;
+ }
+
+ /*
+ * Check that fcport is offloaded. If it isn't then the spinlock
+ * isn't valid and shouldn't be taken. We should just return.
+ */
+ if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
+ QEDF_ERR(&(qedf->dbg_ctx), "Session not offloaded yet.\n");
+ return;
+ }
+
+
+ switch (comp_type) {
+ case FCOE_GOOD_COMPLETION_CQE_TYPE:
+ atomic_inc(&fcport->free_sqes);
+ switch (io_req->cmd_type) {
+ case QEDF_SCSI_CMD:
+ qedf_scsi_completion(qedf, cqe, io_req);
+ break;
+ case QEDF_ELS:
+ qedf_process_els_compl(qedf, cqe, io_req);
+ break;
+ case QEDF_TASK_MGMT_CMD:
+ qedf_process_tmf_compl(qedf, cqe, io_req);
+ break;
+ case QEDF_SEQ_CLEANUP:
+ qedf_process_seq_cleanup_compl(qedf, cqe, io_req);
+ break;
+ }
+ break;
+ case FCOE_ERROR_DETECTION_CQE_TYPE:
+ atomic_inc(&fcport->free_sqes);
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
+ "Error detect CQE.\n");
+ qedf_process_error_detect(qedf, cqe, io_req);
+ break;
+ case FCOE_EXCH_CLEANUP_CQE_TYPE:
+ atomic_inc(&fcport->free_sqes);
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
+ "Cleanup CQE.\n");
+ qedf_process_cleanup_compl(qedf, cqe, io_req);
+ break;
+ case FCOE_ABTS_CQE_TYPE:
+ atomic_inc(&fcport->free_sqes);
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
+ "Abort CQE.\n");
+ qedf_process_abts_compl(qedf, cqe, io_req);
+ break;
+ case FCOE_DUMMY_CQE_TYPE:
+ atomic_inc(&fcport->free_sqes);
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
+ "Dummy CQE.\n");
+ break;
+ case FCOE_LOCAL_COMP_CQE_TYPE:
+ atomic_inc(&fcport->free_sqes);
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
+ "Local completion CQE.\n");
+ break;
+ case FCOE_WARNING_CQE_TYPE:
+ atomic_inc(&fcport->free_sqes);
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
+ "Warning CQE.\n");
+ qedf_process_warning_compl(qedf, cqe, io_req);
+ break;
+ case MAX_FCOE_CQE_TYPE:
+ atomic_inc(&fcport->free_sqes);
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
+ "Max FCoE CQE.\n");
+ break;
+ default:
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
+ "Default CQE.\n");
+ break;
+ }
+}
+
+static void qedf_free_bdq(struct qedf_ctx *qedf)
+{
+ int i;
+
+ if (qedf->bdq_pbl_list)
+ dma_free_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE,
+ qedf->bdq_pbl_list, qedf->bdq_pbl_list_dma);
+
+ if (qedf->bdq_pbl)
+ dma_free_coherent(&qedf->pdev->dev, qedf->bdq_pbl_mem_size,
+ qedf->bdq_pbl, qedf->bdq_pbl_dma);
+
+ for (i = 0; i < QEDF_BDQ_SIZE; i++) {
+ if (qedf->bdq[i].buf_addr) {
+ dma_free_coherent(&qedf->pdev->dev, QEDF_BDQ_BUF_SIZE,
+ qedf->bdq[i].buf_addr, qedf->bdq[i].buf_dma);
+ }
+ }
+}
+
+static void qedf_free_global_queues(struct qedf_ctx *qedf)
+{
+ int i;
+ struct global_queue **gl = qedf->global_queues;
+
+ for (i = 0; i < qedf->num_queues; i++) {
+ if (!gl[i])
+ continue;
+
+ if (gl[i]->cq)
+ dma_free_coherent(&qedf->pdev->dev,
+ gl[i]->cq_mem_size, gl[i]->cq, gl[i]->cq_dma);
+ if (gl[i]->cq_pbl)
+ dma_free_coherent(&qedf->pdev->dev, gl[i]->cq_pbl_size,
+ gl[i]->cq_pbl, gl[i]->cq_pbl_dma);
+
+ kfree(gl[i]);
+ }
+
+ qedf_free_bdq(qedf);
+}
+
+static int qedf_alloc_bdq(struct qedf_ctx *qedf)
+{
+ int i;
+ struct scsi_bd *pbl;
+ u64 *list;
+ dma_addr_t page;
+
+ /* Alloc dma memory for BDQ buffers */
+ for (i = 0; i < QEDF_BDQ_SIZE; i++) {
+ qedf->bdq[i].buf_addr = dma_alloc_coherent(&qedf->pdev->dev,
+ QEDF_BDQ_BUF_SIZE, &qedf->bdq[i].buf_dma, GFP_KERNEL);
+ if (!qedf->bdq[i].buf_addr) {
+ QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate BDQ "
+ "buffer %d.\n", i);
+ return -ENOMEM;
+ }
+ }
+
+ /* Alloc dma memory for BDQ page buffer list */
+ qedf->bdq_pbl_mem_size =
+ QEDF_BDQ_SIZE * sizeof(struct scsi_bd);
+ qedf->bdq_pbl_mem_size =
+ ALIGN(qedf->bdq_pbl_mem_size, QEDF_PAGE_SIZE);
+
+ qedf->bdq_pbl = dma_alloc_coherent(&qedf->pdev->dev,
+ qedf->bdq_pbl_mem_size, &qedf->bdq_pbl_dma, GFP_KERNEL);
+ if (!qedf->bdq_pbl) {
+ QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate BDQ PBL.\n");
+ return -ENOMEM;
+ }
+
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
+ "BDQ PBL addr=0x%p dma=0x%llx.\n", qedf->bdq_pbl,
+ qedf->bdq_pbl_dma);
+
+ /*
+ * Populate BDQ PBL with physical and virtual address of individual
+ * BDQ buffers
+ */
+ pbl = (struct scsi_bd *)qedf->bdq_pbl;
+ for (i = 0; i < QEDF_BDQ_SIZE; i++) {
+ pbl->address.hi = cpu_to_le32(U64_HI(qedf->bdq[i].buf_dma));
+ pbl->address.lo = cpu_to_le32(U64_LO(qedf->bdq[i].buf_dma));
+ pbl->opaque.hi = 0;
+ /* Opaque lo data is an index into the BDQ array */
+ pbl->opaque.lo = cpu_to_le32(i);
+ pbl++;
+ }
+
+ /* Allocate list of PBL pages */
+ qedf->bdq_pbl_list = dma_alloc_coherent(&qedf->pdev->dev,
+ QEDF_PAGE_SIZE, &qedf->bdq_pbl_list_dma, GFP_KERNEL);
+ if (!qedf->bdq_pbl_list) {
+ QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate list of PBL "
+ "pages.\n");
+ return -ENOMEM;
+ }
+ memset(qedf->bdq_pbl_list, 0, QEDF_PAGE_SIZE);
+
+ /*
+ * Now populate PBL list with pages that contain pointers to the
+ * individual buffers.
+ */
+ qedf->bdq_pbl_list_num_entries = qedf->bdq_pbl_mem_size /
+ QEDF_PAGE_SIZE;
+ list = (u64 *)qedf->bdq_pbl_list;
+ page = qedf->bdq_pbl_list_dma;
+ for (i = 0; i < qedf->bdq_pbl_list_num_entries; i++) {
+ *list = qedf->bdq_pbl_dma;
+ list++;
+ page += QEDF_PAGE_SIZE;
+ }
+
+ return 0;
+}
+
+static int qedf_alloc_global_queues(struct qedf_ctx *qedf)
+{
+ u32 *list;
+ int i;
+ int status = 0, rc;
+ u32 *pbl;
+ dma_addr_t page;
+ int num_pages;
+
+ /* Allocate and map CQs, RQs */
+ /*
+ * Number of global queues (CQ / RQ). This should
+ * be <= number of available MSIX vectors for the PF
+ */
+ if (!qedf->num_queues) {
+ QEDF_ERR(&(qedf->dbg_ctx), "No MSI-X vectors available!\n");
+ return 1;
+ }
+
+ /*
+ * Make sure we allocated the PBL that will contain the physical
+ * addresses of our queues
+ */
+ if (!qedf->p_cpuq) {
+ status = 1;
+ goto mem_alloc_failure;
+ }
+
+ qedf->global_queues = kzalloc((sizeof(struct global_queue *)
+ * qedf->num_queues), GFP_KERNEL);
+ if (!qedf->global_queues) {
+ QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate global "
+ "queues array ptr memory\n");
+ return -ENOMEM;
+ }
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
+ "qedf->global_queues=%p.\n", qedf->global_queues);
+
+ /* Allocate DMA coherent buffers for BDQ */
+ rc = qedf_alloc_bdq(qedf);
+ if (rc)
+ goto mem_alloc_failure;
+
+ /* Allocate a CQ and an associated PBL for each MSI-X vector */
+ for (i = 0; i < qedf->num_queues; i++) {
+ qedf->global_queues[i] = kzalloc(sizeof(struct global_queue),
+ GFP_KERNEL);
+ if (!qedf->global_queues[i]) {
+ QEDF_WARN(&(qedf->dbg_ctx), "Unable to allocation "
+ "global queue %d.\n", i);
+ goto mem_alloc_failure;
+ }
+
+ qedf->global_queues[i]->cq_mem_size =
+ FCOE_PARAMS_CQ_NUM_ENTRIES * sizeof(struct fcoe_cqe);
+ qedf->global_queues[i]->cq_mem_size =
+ ALIGN(qedf->global_queues[i]->cq_mem_size, QEDF_PAGE_SIZE);
+
+ qedf->global_queues[i]->cq_pbl_size =
+ (qedf->global_queues[i]->cq_mem_size /
+ PAGE_SIZE) * sizeof(void *);
+ qedf->global_queues[i]->cq_pbl_size =
+ ALIGN(qedf->global_queues[i]->cq_pbl_size, QEDF_PAGE_SIZE);
+
+ qedf->global_queues[i]->cq =
+ dma_alloc_coherent(&qedf->pdev->dev,
+ qedf->global_queues[i]->cq_mem_size,
+ &qedf->global_queues[i]->cq_dma, GFP_KERNEL);
+
+ if (!qedf->global_queues[i]->cq) {
+ QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate "
+ "cq.\n");
+ status = -ENOMEM;
+ goto mem_alloc_failure;
+ }
+ memset(qedf->global_queues[i]->cq, 0,
+ qedf->global_queues[i]->cq_mem_size);
+
+ qedf->global_queues[i]->cq_pbl =
+ dma_alloc_coherent(&qedf->pdev->dev,
+ qedf->global_queues[i]->cq_pbl_size,
+ &qedf->global_queues[i]->cq_pbl_dma, GFP_KERNEL);
+
+ if (!qedf->global_queues[i]->cq_pbl) {
+ QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate "
+ "cq PBL.\n");
+ status = -ENOMEM;
+ goto mem_alloc_failure;
+ }
+ memset(qedf->global_queues[i]->cq_pbl, 0,
+ qedf->global_queues[i]->cq_pbl_size);
+
+ /* Create PBL */
+ num_pages = qedf->global_queues[i]->cq_mem_size /
+ QEDF_PAGE_SIZE;
+ page = qedf->global_queues[i]->cq_dma;
+ pbl = (u32 *)qedf->global_queues[i]->cq_pbl;
+
+ while (num_pages--) {
+ *pbl = U64_LO(page);
+ pbl++;
+ *pbl = U64_HI(page);
+ pbl++;
+ page += QEDF_PAGE_SIZE;
+ }
+ /* Set the initial consumer index for cq */
+ qedf->global_queues[i]->cq_cons_idx = 0;
+ }
+
+ list = (u32 *)qedf->p_cpuq;
+
+ /*
+ * The list is built as follows: CQ#0 PBL pointer, RQ#0 PBL pointer,
+ * CQ#1 PBL pointer, RQ#1 PBL pointer, etc. Each PBL pointer points
+ * to the physical address which contains an array of pointers to
+ * the physical addresses of the specific queue pages.
+ */
+ for (i = 0; i < qedf->num_queues; i++) {
+ *list = U64_LO(qedf->global_queues[i]->cq_pbl_dma);
+ list++;
+ *list = U64_HI(qedf->global_queues[i]->cq_pbl_dma);
+ list++;
+ *list = U64_LO(0);
+ list++;
+ *list = U64_HI(0);
+ list++;
+ }
+
+ return 0;
+
+mem_alloc_failure:
+ qedf_free_global_queues(qedf);
+ return status;
+}
+
+static int qedf_set_fcoe_pf_param(struct qedf_ctx *qedf)
+{
+ u8 sq_num_pbl_pages;
+ u32 sq_mem_size;
+ u32 cq_mem_size;
+ u32 cq_num_entries;
+ int rval;
+
+ /*
+ * The number of completion queues/fastpath interrupts/status blocks
+ * we allocation is the minimum off:
+ *
+ * Number of CPUs
+ * Number of MSI-X vectors
+ * Max number allocated in hardware (QEDF_MAX_NUM_CQS)
+ */
+ qedf->num_queues = min((unsigned int)QEDF_MAX_NUM_CQS,
+ num_online_cpus());
+
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Number of CQs is %d.\n",
+ qedf->num_queues);
+
+ qedf->p_cpuq = pci_alloc_consistent(qedf->pdev,
+ qedf->num_queues * sizeof(struct qedf_glbl_q_params),
+ &qedf->hw_p_cpuq);
+
+ if (!qedf->p_cpuq) {
+ QEDF_ERR(&(qedf->dbg_ctx), "pci_alloc_consistent failed.\n");
+ return 1;
+ }
+
+ rval = qedf_alloc_global_queues(qedf);
+ if (rval) {
+ QEDF_ERR(&(qedf->dbg_ctx), "Global queue allocation "
+ "failed.\n");
+ return 1;
+ }
+
+ /* Calculate SQ PBL size in the same manner as in qedf_sq_alloc() */
+ sq_mem_size = SQ_NUM_ENTRIES * sizeof(struct fcoe_wqe);
+ sq_mem_size = ALIGN(sq_mem_size, QEDF_PAGE_SIZE);
+ sq_num_pbl_pages = (sq_mem_size / QEDF_PAGE_SIZE);
+
+ /* Calculate CQ num entries */
+ cq_mem_size = FCOE_PARAMS_CQ_NUM_ENTRIES * sizeof(struct fcoe_cqe);
+ cq_mem_size = ALIGN(cq_mem_size, QEDF_PAGE_SIZE);
+ cq_num_entries = cq_mem_size / sizeof(struct fcoe_cqe);
+
+ memset(&(qedf->pf_params), 0,
+ sizeof(qedf->pf_params));
+
+ /* Setup the value for fcoe PF */
+ qedf->pf_params.fcoe_pf_params.num_cons = QEDF_MAX_SESSIONS;
+ qedf->pf_params.fcoe_pf_params.num_tasks = FCOE_PARAMS_NUM_TASKS;
+ qedf->pf_params.fcoe_pf_params.glbl_q_params_addr =
+ (u64)qedf->hw_p_cpuq;
+ qedf->pf_params.fcoe_pf_params.sq_num_pbl_pages = sq_num_pbl_pages;
+
+ qedf->pf_params.fcoe_pf_params.rq_buffer_log_size = 0;
+
+ qedf->pf_params.fcoe_pf_params.cq_num_entries = cq_num_entries;
+ qedf->pf_params.fcoe_pf_params.num_cqs = qedf->num_queues;
+
+ /* log_page_size: 12 for 4KB pages */
+ qedf->pf_params.fcoe_pf_params.log_page_size = ilog2(QEDF_PAGE_SIZE);
+
+ qedf->pf_params.fcoe_pf_params.mtu = 9000;
+ qedf->pf_params.fcoe_pf_params.gl_rq_pi = QEDF_FCOE_PARAMS_GL_RQ_PI;
+ qedf->pf_params.fcoe_pf_params.gl_cmd_pi = QEDF_FCOE_PARAMS_GL_CMD_PI;
+
+ /* BDQ address and size */
+ qedf->pf_params.fcoe_pf_params.bdq_pbl_base_addr[0] =
+ qedf->bdq_pbl_list_dma;
+ qedf->pf_params.fcoe_pf_params.bdq_pbl_num_entries[0] =
+ qedf->bdq_pbl_list_num_entries;
+ qedf->pf_params.fcoe_pf_params.rq_buffer_size = QEDF_BDQ_BUF_SIZE;
+
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
+ "bdq_list=%p bdq_pbl_list_dma=%llx bdq_pbl_list_entries=%d.\n",
+ qedf->bdq_pbl_list,
+ qedf->pf_params.fcoe_pf_params.bdq_pbl_base_addr[0],
+ qedf->pf_params.fcoe_pf_params.bdq_pbl_num_entries[0]);
+
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
+ "cq_num_entries=%d.\n",
+ qedf->pf_params.fcoe_pf_params.cq_num_entries);
+
+ return 0;
+}
+
+/* Free DMA coherent memory for array of queue pointers we pass to qed */
+static void qedf_free_fcoe_pf_param(struct qedf_ctx *qedf)
+{
+ size_t size = 0;
+
+ if (qedf->p_cpuq) {
+ size = qedf->num_queues * sizeof(struct qedf_glbl_q_params);
+ pci_free_consistent(qedf->pdev, size, qedf->p_cpuq,
+ qedf->hw_p_cpuq);
+ }
+
+ qedf_free_global_queues(qedf);
+
+ if (qedf->global_queues)
+ kfree(qedf->global_queues);
+}
+
+/*
+ * PCI driver functions
+ */
+
+static const struct pci_device_id qedf_pci_tbl[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, 0x165c) },
+ { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, 0x8080) },
+ {0}
+};
+MODULE_DEVICE_TABLE(pci, qedf_pci_tbl);
+
+static struct pci_driver qedf_pci_driver = {
+ .name = QEDF_MODULE_NAME,
+ .id_table = qedf_pci_tbl,
+ .probe = qedf_probe,
+ .remove = qedf_remove,
+};
+
+static int __qedf_probe(struct pci_dev *pdev, int mode)
+{
+ int rc = -EINVAL;
+ struct fc_lport *lport;
+ struct qedf_ctx *qedf;
+ struct Scsi_Host *host;
+ bool is_vf = false;
+ struct qed_ll2_params params;
+ char host_buf[20];
+ struct qed_link_params link_params;
+ int status;
+ void *task_start, *task_end;
+ struct qed_slowpath_params slowpath_params;
+ struct qed_probe_params qed_params;
+ u16 tmp;
+
+ /*
+ * When doing error recovery we didn't reap the lport so don't try
+ * to reallocate it.
+ */
+ if (mode != QEDF_MODE_RECOVERY) {
+ lport = libfc_host_alloc(&qedf_host_template,
+ sizeof(struct qedf_ctx));
+
+ if (!lport) {
+ QEDF_ERR(NULL, "Could not allocate lport.\n");
+ rc = -ENOMEM;
+ goto err0;
+ }
+
+ /* Initialize qedf_ctx */
+ qedf = lport_priv(lport);
+ qedf->lport = lport;
+ qedf->ctlr.lp = lport;
+ qedf->pdev = pdev;
+ qedf->dbg_ctx.pdev = pdev;
+ qedf->dbg_ctx.host_no = lport->host->host_no;
+ spin_lock_init(&qedf->hba_lock);
+ INIT_LIST_HEAD(&qedf->fcports);
+ qedf->curr_conn_id = QEDF_MAX_SESSIONS - 1;
+ atomic_set(&qedf->num_offloads, 0);
+ qedf->stop_io_on_error = false;
+ pci_set_drvdata(pdev, qedf);
+
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_INFO,
+ "QLogic FastLinQ FCoE Module qedf %s, "
+ "FW %d.%d.%d.%d\n", QEDF_VERSION,
+ FW_MAJOR_VERSION, FW_MINOR_VERSION, FW_REVISION_VERSION,
+ FW_ENGINEERING_VERSION);
+ } else {
+ /* Init pointers during recovery */
+ qedf = pci_get_drvdata(pdev);
+ lport = qedf->lport;
+ }
+
+ host = lport->host;
+
+ /* Allocate mempool for qedf_io_work structs */
+ qedf->io_mempool = mempool_create_slab_pool(QEDF_IO_WORK_MIN,
+ qedf_io_work_cache);
+ if (qedf->io_mempool == NULL) {
+ QEDF_ERR(&(qedf->dbg_ctx), "qedf->io_mempool is NULL.\n");
+ goto err1;
+ }
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_INFO, "qedf->io_mempool=%p.\n",
+ qedf->io_mempool);
+
+ sprintf(host_buf, "qedf_%u_link",
+ qedf->lport->host->host_no);
+ qedf->link_update_wq = create_singlethread_workqueue(host_buf);
+ INIT_DELAYED_WORK(&qedf->link_update, qedf_handle_link_update);
+ INIT_DELAYED_WORK(&qedf->link_recovery, qedf_link_recovery);
+
+ qedf->fipvlan_retries = qedf_fipvlan_retries;
+
+ /*
+ * Common probe. Takes care of basic hardware init and pci_*
+ * functions.
+ */
+ memset(&qed_params, 0, sizeof(qed_params));
+ qed_params.protocol = QED_PROTOCOL_FCOE;
+ qed_params.dp_module = qedf_dp_module;
+ qed_params.dp_level = qedf_dp_level;
+ qed_params.is_vf = is_vf;
+ qedf->cdev = qed_ops->common->probe(pdev, &qed_params);
+ if (!qedf->cdev) {
+ rc = -ENODEV;
+ goto err1;
+ }
+
+ /* queue allocation code should come here
+ * order should be
+ * slowpath_start
+ * status block allocation
+ * interrupt registration (to get min number of queues)
+ * set_fcoe_pf_param
+ * qed_sp_fcoe_func_start
+ */
+ rc = qedf_set_fcoe_pf_param(qedf);
+ if (rc) {
+ QEDF_ERR(&(qedf->dbg_ctx), "Cannot set fcoe pf param.\n");
+ goto err2;
+ }
+ qed_ops->common->update_pf_params(qedf->cdev, &qedf->pf_params);
+
+ /* Learn information crucial for qedf to progress */
+ rc = qed_ops->fill_dev_info(qedf->cdev, &qedf->dev_info);
+ if (rc) {
+ QEDF_ERR(&(qedf->dbg_ctx), "Failed to dev info.\n");
+ goto err1;
+ }
+
+ /* Record BDQ producer doorbell addresses */
+ qedf->bdq_primary_prod = qedf->dev_info.primary_dbq_rq_addr;
+ qedf->bdq_secondary_prod = qedf->dev_info.secondary_bdq_rq_addr;
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
+ "BDQ primary_prod=%p secondary_prod=%p.\n", qedf->bdq_primary_prod,
+ qedf->bdq_secondary_prod);
+
+ qed_ops->register_ops(qedf->cdev, &qedf_cb_ops, qedf);
+
+ rc = qedf_prepare_sb(qedf);
+ if (rc) {
+
+ QEDF_ERR(&(qedf->dbg_ctx), "Cannot start slowpath.\n");
+ goto err2;
+ }
+
+ /* Start the Slowpath-process */
+ slowpath_params.int_mode = QED_INT_MODE_MSIX;
+ slowpath_params.drv_major = QEDF_DRIVER_MAJOR_VER;
+ slowpath_params.drv_minor = QEDF_DRIVER_MINOR_VER;
+ slowpath_params.drv_rev = QEDF_DRIVER_REV_VER;
+ slowpath_params.drv_eng = QEDF_DRIVER_ENG_VER;
+ memcpy(slowpath_params.name, "qedf", QED_DRV_VER_STR_SIZE);
+ rc = qed_ops->common->slowpath_start(qedf->cdev, &slowpath_params);
+ if (rc) {
+ QEDF_ERR(&(qedf->dbg_ctx), "Cannot start slowpath.\n");
+ goto err2;
+ }
+
+ /*
+ * update_pf_params needs to be called before and after slowpath
+ * start
+ */
+ qed_ops->common->update_pf_params(qedf->cdev, &qedf->pf_params);
+
+ /* Setup interrupts */
+ rc = qedf_setup_int(qedf);
+ if (rc)
+ goto err3;
+
+ rc = qed_ops->start(qedf->cdev, &qedf->tasks);
+ if (rc) {
+ QEDF_ERR(&(qedf->dbg_ctx), "Cannot start FCoE function.\n");
+ goto err4;
+ }
+ task_start = qedf_get_task_mem(&qedf->tasks, 0);
+ task_end = qedf_get_task_mem(&qedf->tasks, MAX_TID_BLOCKS_FCOE - 1);
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Task context start=%p, "
+ "end=%p block_size=%u.\n", task_start, task_end,
+ qedf->tasks.size);
+
+ /*
+ * We need to write the number of BDs in the BDQ we've preallocated so
+ * the f/w will do a prefetch and we'll get an unsolicited CQE when a
+ * packet arrives.
+ */
+ qedf->bdq_prod_idx = QEDF_BDQ_SIZE;
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
+ "Writing %d to primary and secondary BDQ doorbell registers.\n",
+ qedf->bdq_prod_idx);
+ writew(qedf->bdq_prod_idx, qedf->bdq_primary_prod);
+ tmp = readw(qedf->bdq_primary_prod);
+ writew(qedf->bdq_prod_idx, qedf->bdq_secondary_prod);
+ tmp = readw(qedf->bdq_secondary_prod);
+
+ qed_ops->common->set_power_state(qedf->cdev, PCI_D0);
+
+ /* Now that the dev_info struct has been filled in set the MAC
+ * address
+ */
+ ether_addr_copy(qedf->mac, qedf->dev_info.common.hw_mac);
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "MAC address is %pM.\n",
+ qedf->mac);
+
+ /* Set the WWNN and WWPN based on the MAC address */
+ qedf->wwnn = fcoe_wwn_from_mac(qedf->mac, 1, 0);
+ qedf->wwpn = fcoe_wwn_from_mac(qedf->mac, 2, 0);
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "WWNN=%016llx "
+ "WWPN=%016llx.\n", qedf->wwnn, qedf->wwpn);
+
+ sprintf(host_buf, "host_%d", host->host_no);
+ qed_ops->common->set_id(qedf->cdev, host_buf, QEDF_VERSION);
+
+
+ /* Set xid max values */
+ qedf->max_scsi_xid = QEDF_MAX_SCSI_XID;
+ qedf->max_els_xid = QEDF_MAX_ELS_XID;
+
+ /* Allocate cmd mgr */
+ qedf->cmd_mgr = qedf_cmd_mgr_alloc(qedf);
+ if (!qedf->cmd_mgr) {
+ QEDF_ERR(&(qedf->dbg_ctx), "Failed to allocate cmd mgr.\n");
+ goto err5;
+ }
+
+ if (mode != QEDF_MODE_RECOVERY) {
+ host->transportt = qedf_fc_transport_template;
+ host->can_queue = QEDF_MAX_ELS_XID;
+ host->max_lun = qedf_max_lun;
+ host->max_cmd_len = QEDF_MAX_CDB_LEN;
+ rc = scsi_add_host(host, &pdev->dev);
+ if (rc)
+ goto err6;
+ }
+
+ memset(&params, 0, sizeof(params));
+ params.mtu = 9000;
+ ether_addr_copy(params.ll2_mac_address, qedf->mac);
+
+ /* Start LL2 processing thread */
+ snprintf(host_buf, 20, "qedf_%d_ll2", host->host_no);
+ qedf->ll2_recv_wq =
+ create_singlethread_workqueue(host_buf);
+ if (!qedf->ll2_recv_wq) {
+ QEDF_ERR(&(qedf->dbg_ctx), "Failed to LL2 workqueue.\n");
+ goto err7;
+ }
+
+#ifdef CONFIG_DEBUG_FS
+ qedf_dbg_host_init(&(qedf->dbg_ctx), &qedf_debugfs_ops,
+ &qedf_dbg_fops);
+#endif
+
+ /* Start LL2 */
+ qed_ops->ll2->register_cb_ops(qedf->cdev, &qedf_ll2_cb_ops, qedf);
+ rc = qed_ops->ll2->start(qedf->cdev, &params);
+ if (rc) {
+ QEDF_ERR(&(qedf->dbg_ctx), "Could not start Light L2.\n");
+ goto err7;
+ }
+ set_bit(QEDF_LL2_STARTED, &qedf->flags);
+
+ /* hw will be insterting vlan tag*/
+ qedf->vlan_hw_insert = 1;
+ qedf->vlan_id = 0;
+
+ /*
+ * No need to setup fcoe_ctlr or fc_lport objects during recovery since
+ * they were not reaped during the unload process.
+ */
+ if (mode != QEDF_MODE_RECOVERY) {
+ /* Setup imbedded fcoe controller */
+ qedf_fcoe_ctlr_setup(qedf);
+
+ /* Setup lport */
+ rc = qedf_lport_setup(qedf);
+ if (rc) {
+ QEDF_ERR(&(qedf->dbg_ctx),
+ "qedf_lport_setup failed.\n");
+ goto err7;
+ }
+ }
+
+ sprintf(host_buf, "qedf_%u_timer", qedf->lport->host->host_no);
+ qedf->timer_work_queue =
+ create_singlethread_workqueue(host_buf);
+ if (!qedf->timer_work_queue) {
+ QEDF_ERR(&(qedf->dbg_ctx), "Failed to start timer "
+ "workqueue.\n");
+ goto err7;
+ }
+
+ /* DPC workqueue is not reaped during recovery unload */
+ if (mode != QEDF_MODE_RECOVERY) {
+ sprintf(host_buf, "qedf_%u_dpc",
+ qedf->lport->host->host_no);
+ qedf->dpc_wq = create_singlethread_workqueue(host_buf);
+ }
+
+ /*
+ * GRC dump and sysfs parameters are not reaped during the recovery
+ * unload process.
+ */
+ if (mode != QEDF_MODE_RECOVERY) {
+ qedf->grcdump_size = qed_ops->common->dbg_grc_size(qedf->cdev);
+ if (qedf->grcdump_size) {
+ rc = qedf_alloc_grc_dump_buf(&qedf->grcdump,
+ qedf->grcdump_size);
+ if (rc) {
+ QEDF_ERR(&(qedf->dbg_ctx),
+ "GRC Dump buffer alloc failed.\n");
+ qedf->grcdump = NULL;
+ }
+
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
+ "grcdump: addr=%p, size=%u.\n",
+ qedf->grcdump, qedf->grcdump_size);
+ }
+ qedf_create_sysfs_ctx_attr(qedf);
+
+ /* Initialize I/O tracing for this adapter */
+ spin_lock_init(&qedf->io_trace_lock);
+ qedf->io_trace_idx = 0;
+ }
+
+ init_completion(&qedf->flogi_compl);
+
+ memset(&link_params, 0, sizeof(struct qed_link_params));
+ link_params.link_up = true;
+ status = qed_ops->common->set_link(qedf->cdev, &link_params);
+ if (status)
+ QEDF_WARN(&(qedf->dbg_ctx), "set_link failed.\n");
+
+ /* Start/restart discovery */
+ if (mode == QEDF_MODE_RECOVERY)
+ fcoe_ctlr_link_up(&qedf->ctlr);
+ else
+ fc_fabric_login(lport);
+
+ /* All good */
+ return 0;
+
+err7:
+ if (qedf->ll2_recv_wq)
+ destroy_workqueue(qedf->ll2_recv_wq);
+ fc_remove_host(qedf->lport->host);
+ scsi_remove_host(qedf->lport->host);
+#ifdef CONFIG_DEBUG_FS
+ qedf_dbg_host_exit(&(qedf->dbg_ctx));
+#endif
+err6:
+ qedf_cmd_mgr_free(qedf->cmd_mgr);
+err5:
+ qed_ops->stop(qedf->cdev);
+err4:
+ qedf_free_fcoe_pf_param(qedf);
+ qedf_sync_free_irqs(qedf);
+err3:
+ qed_ops->common->slowpath_stop(qedf->cdev);
+err2:
+ qed_ops->common->remove(qedf->cdev);
+err1:
+ scsi_host_put(lport->host);
+err0:
+ return rc;
+}
+
+static int qedf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ return __qedf_probe(pdev, QEDF_MODE_NORMAL);
+}
+
+static void __qedf_remove(struct pci_dev *pdev, int mode)
+{
+ struct qedf_ctx *qedf;
+
+ if (!pdev) {
+ QEDF_ERR(NULL, "pdev is NULL.\n");
+ return;
+ }
+
+ qedf = pci_get_drvdata(pdev);
+
+ /*
+ * Prevent race where we're in board disable work and then try to
+ * rmmod the module.
+ */
+ if (test_bit(QEDF_UNLOADING, &qedf->flags)) {
+ QEDF_ERR(&qedf->dbg_ctx, "Already removing PCI function.\n");
+ return;
+ }
+
+ if (mode != QEDF_MODE_RECOVERY)
+ set_bit(QEDF_UNLOADING, &qedf->flags);
+
+ /* Logoff the fabric to upload all connections */
+ if (mode == QEDF_MODE_RECOVERY)
+ fcoe_ctlr_link_down(&qedf->ctlr);
+ else
+ fc_fabric_logoff(qedf->lport);
+ qedf_wait_for_upload(qedf);
+
+#ifdef CONFIG_DEBUG_FS
+ qedf_dbg_host_exit(&(qedf->dbg_ctx));
+#endif
+
+ /* Stop any link update handling */
+ cancel_delayed_work_sync(&qedf->link_update);
+ destroy_workqueue(qedf->link_update_wq);
+ qedf->link_update_wq = NULL;
+
+ if (qedf->timer_work_queue)
+ destroy_workqueue(qedf->timer_work_queue);
+
+ /* Stop Light L2 */
+ clear_bit(QEDF_LL2_STARTED, &qedf->flags);
+ qed_ops->ll2->stop(qedf->cdev);
+ if (qedf->ll2_recv_wq)
+ destroy_workqueue(qedf->ll2_recv_wq);
+
+ /* Stop fastpath */
+ qedf_sync_free_irqs(qedf);
+ qedf_destroy_sb(qedf);
+
+ /*
+ * During recovery don't destroy OS constructs that represent the
+ * physical port.
+ */
+ if (mode != QEDF_MODE_RECOVERY) {
+ qedf_free_grc_dump_buf(&qedf->grcdump);
+ qedf_remove_sysfs_ctx_attr(qedf);
+
+ /* Remove all SCSI/libfc/libfcoe structures */
+ fcoe_ctlr_destroy(&qedf->ctlr);
+ fc_lport_destroy(qedf->lport);
+ fc_remove_host(qedf->lport->host);
+ scsi_remove_host(qedf->lport->host);
+ }
+
+ qedf_cmd_mgr_free(qedf->cmd_mgr);
+
+ if (mode != QEDF_MODE_RECOVERY) {
+ fc_exch_mgr_free(qedf->lport);
+ fc_lport_free_stats(qedf->lport);
+
+ /* Wait for all vports to be reaped */
+ qedf_wait_for_vport_destroy(qedf);
+ }
+
+ /*
+ * Now that all connections have been uploaded we can stop the
+ * rest of the qed operations
+ */
+ qed_ops->stop(qedf->cdev);
+
+ if (mode != QEDF_MODE_RECOVERY) {
+ if (qedf->dpc_wq) {
+ /* Stop general DPC handling */
+ destroy_workqueue(qedf->dpc_wq);
+ qedf->dpc_wq = NULL;
+ }
+ }
+
+ /* Final shutdown for the board */
+ qedf_free_fcoe_pf_param(qedf);
+ if (mode != QEDF_MODE_RECOVERY) {
+ qed_ops->common->set_power_state(qedf->cdev, PCI_D0);
+ pci_set_drvdata(pdev, NULL);
+ }
+ qed_ops->common->slowpath_stop(qedf->cdev);
+ qed_ops->common->remove(qedf->cdev);
+
+ mempool_destroy(qedf->io_mempool);
+
+ /* Only reap the Scsi_host on a real removal */
+ if (mode != QEDF_MODE_RECOVERY)
+ scsi_host_put(qedf->lport->host);
+}
+
+static void qedf_remove(struct pci_dev *pdev)
+{
+ /* Check to make sure this function wasn't already disabled */
+ if (!atomic_read(&pdev->enable_cnt))
+ return;
+
+ __qedf_remove(pdev, QEDF_MODE_NORMAL);
+}
+
+/*
+ * Module Init/Remove
+ */
+
+static int __init qedf_init(void)
+{
+ int ret;
+
+ /* If debug=1 passed, set the default log mask */
+ if (qedf_debug == QEDF_LOG_DEFAULT)
+ qedf_debug = QEDF_DEFAULT_LOG_MASK;
+
+ /* Print driver banner */
+ QEDF_INFO(NULL, QEDF_LOG_INFO, "%s v%s.\n", QEDF_DESCR,
+ QEDF_VERSION);
+
+ /* Create kmem_cache for qedf_io_work structs */
+ qedf_io_work_cache = kmem_cache_create("qedf_io_work_cache",
+ sizeof(struct qedf_io_work), 0, SLAB_HWCACHE_ALIGN, NULL);
+ if (qedf_io_work_cache == NULL) {
+ QEDF_ERR(NULL, "qedf_io_work_cache is NULL.\n");
+ goto err1;
+ }
+ QEDF_INFO(NULL, QEDF_LOG_DISC, "qedf_io_work_cache=%p.\n",
+ qedf_io_work_cache);
+
+ qed_ops = qed_get_fcoe_ops();
+ if (!qed_ops) {
+ QEDF_ERR(NULL, "Failed to get qed fcoe operations\n");
+ goto err1;
+ }
+
+#ifdef CONFIG_DEBUG_FS
+ qedf_dbg_init("qedf");
+#endif
+
+ qedf_fc_transport_template =
+ fc_attach_transport(&qedf_fc_transport_fn);
+ if (!qedf_fc_transport_template) {
+ QEDF_ERR(NULL, "Could not register with FC transport\n");
+ goto err2;
+ }
+
+ qedf_fc_vport_transport_template =
+ fc_attach_transport(&qedf_fc_vport_transport_fn);
+ if (!qedf_fc_vport_transport_template) {
+ QEDF_ERR(NULL, "Could not register vport template with FC "
+ "transport\n");
+ goto err3;
+ }
+
+ qedf_io_wq = create_workqueue("qedf_io_wq");
+ if (!qedf_io_wq) {
+ QEDF_ERR(NULL, "Could not create qedf_io_wq.\n");
+ goto err4;
+ }
+
+ qedf_cb_ops.get_login_failures = qedf_get_login_failures;
+
+ ret = pci_register_driver(&qedf_pci_driver);
+ if (ret) {
+ QEDF_ERR(NULL, "Failed to register driver\n");
+ goto err5;
+ }
+
+ return 0;
+
+err5:
+ destroy_workqueue(qedf_io_wq);
+err4:
+ fc_release_transport(qedf_fc_vport_transport_template);
+err3:
+ fc_release_transport(qedf_fc_transport_template);
+err2:
+#ifdef CONFIG_DEBUG_FS
+ qedf_dbg_exit();
+#endif
+ qed_put_fcoe_ops();
+err1:
+ return -EINVAL;
+}
+
+static void __exit qedf_cleanup(void)
+{
+ pci_unregister_driver(&qedf_pci_driver);
+
+ destroy_workqueue(qedf_io_wq);
+
+ fc_release_transport(qedf_fc_vport_transport_template);
+ fc_release_transport(qedf_fc_transport_template);
+#ifdef CONFIG_DEBUG_FS
+ qedf_dbg_exit();
+#endif
+ qed_put_fcoe_ops();
+
+ kmem_cache_destroy(qedf_io_work_cache);
+}
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("QLogic QEDF 25/40/50/100Gb FCoE Driver");
+MODULE_AUTHOR("QLogic Corporation");
+MODULE_VERSION(QEDF_VERSION);
+module_init(qedf_init);
+module_exit(qedf_cleanup);
diff --git a/drivers/scsi/qedf/qedf_version.h b/drivers/scsi/qedf/qedf_version.h
new file mode 100644
index 000000000000..4ae5f537a440
--- /dev/null
+++ b/drivers/scsi/qedf/qedf_version.h
@@ -0,0 +1,15 @@
+/*
+ * QLogic FCoE Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#define QEDF_VERSION "8.10.7.0"
+#define QEDF_DRIVER_MAJOR_VER 8
+#define QEDF_DRIVER_MINOR_VER 10
+#define QEDF_DRIVER_REV_VER 7
+#define QEDF_DRIVER_ENG_VER 0
+
diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c
index b1d3904ae8fd..c9f0ef4e11b3 100644
--- a/drivers/scsi/qedi/qedi_fw.c
+++ b/drivers/scsi/qedi/qedi_fw.c
@@ -165,10 +165,9 @@ static void qedi_tmf_resp_work(struct work_struct *work)
iscsi_block_session(session->cls_session);
rval = qedi_cleanup_all_io(qedi, qedi_conn, qedi_cmd->task, true);
if (rval) {
- clear_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
qedi_clear_task_idx(qedi, qedi_cmd->task_id);
iscsi_unblock_session(session->cls_session);
- return;
+ goto exit_tmf_resp;
}
iscsi_unblock_session(session->cls_session);
@@ -177,6 +176,8 @@ static void qedi_tmf_resp_work(struct work_struct *work)
spin_lock(&session->back_lock);
__iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr_ptr, NULL, 0);
spin_unlock(&session->back_lock);
+
+exit_tmf_resp:
kfree(resp_hdr_ptr);
clear_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
}
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index f201f4099620..f610103994af 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -2163,6 +2163,9 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
clear_bit(vha->vp_idx, ha->vp_idx_map);
mutex_unlock(&ha->vport_lock);
+ dma_free_coherent(&ha->pdev->dev, vha->gnl.size, vha->gnl.l,
+ vha->gnl.ldma);
+
if (vha->qpair->vp_idx == vha->vp_idx) {
if (qla2xxx_delete_qpair(vha, vha->qpair) != QLA_SUCCESS)
ql_log(ql_log_warn, vha, 0x7087,
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index 40ca75bbcb9d..84c9098cc089 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -13,28 +13,25 @@
/* BSG support for ELS/CT pass through */
void
-qla2x00_bsg_job_done(void *data, void *ptr, int res)
+qla2x00_bsg_job_done(void *ptr, int res)
{
- srb_t *sp = (srb_t *)ptr;
- struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
+ srb_t *sp = ptr;
struct bsg_job *bsg_job = sp->u.bsg_job;
struct fc_bsg_reply *bsg_reply = bsg_job->reply;
bsg_reply->result = res;
bsg_job_done(bsg_job, bsg_reply->result,
bsg_reply->reply_payload_rcv_len);
- sp->free(vha, sp);
+ sp->free(sp);
}
void
-qla2x00_bsg_sp_free(void *data, void *ptr)
+qla2x00_bsg_sp_free(void *ptr)
{
- srb_t *sp = (srb_t *)ptr;
- struct scsi_qla_host *vha = sp->fcport->vha;
+ srb_t *sp = ptr;
+ struct qla_hw_data *ha = sp->vha->hw;
struct bsg_job *bsg_job = sp->u.bsg_job;
struct fc_bsg_request *bsg_request = bsg_job->request;
-
- struct qla_hw_data *ha = vha->hw;
struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
if (sp->type == SRB_FXIOCB_BCMD) {
@@ -62,7 +59,7 @@ qla2x00_bsg_sp_free(void *data, void *ptr)
sp->type == SRB_FXIOCB_BCMD ||
sp->type == SRB_ELS_CMD_HST)
kfree(sp->fcport);
- qla2x00_rel_sp(vha, sp);
+ qla2x00_rel_sp(sp);
}
int
@@ -394,7 +391,7 @@ qla2x00_process_els(struct bsg_job *bsg_job)
if (rval != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x700e,
"qla2x00_start_sp failed = %d\n", rval);
- qla2x00_rel_sp(vha, sp);
+ qla2x00_rel_sp(sp);
rval = -EIO;
goto done_unmap_sg;
}
@@ -542,7 +539,7 @@ qla2x00_process_ct(struct bsg_job *bsg_job)
if (rval != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x7017,
"qla2x00_start_sp failed=%d.\n", rval);
- qla2x00_rel_sp(vha, sp);
+ qla2x00_rel_sp(sp);
rval = -EIO;
goto done_free_fcport;
}
@@ -2578,6 +2575,6 @@ qla24xx_bsg_timeout(struct bsg_job *bsg_job)
done:
spin_unlock_irqrestore(&ha->hardware_lock, flags);
- sp->free(vha, sp);
+ sp->free(sp);
return 0;
}
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 2f14adfab018..625d438e3cce 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -55,6 +55,8 @@
#include "qla_settings.h"
+#define MODE_DUAL (MODE_TARGET | MODE_INITIATOR)
+
/*
* Data bit definitions
*/
@@ -251,6 +253,14 @@
#define MAX_CMDSZ 16 /* SCSI maximum CDB size. */
#include "qla_fw.h"
+
+struct name_list_extended {
+ struct get_name_list_extended *l;
+ dma_addr_t ldma;
+ struct list_head fcports; /* protect by sess_list */
+ u32 size;
+ u8 sent;
+};
/*
* Timeout timer counts in seconds
*/
@@ -309,6 +319,17 @@ struct els_logo_payload {
uint8_t wwpn[WWN_SIZE];
};
+struct ct_arg {
+ void *iocb;
+ u16 nport_handle;
+ dma_addr_t req_dma;
+ dma_addr_t rsp_dma;
+ u32 req_size;
+ u32 rsp_size;
+ void *req;
+ void *rsp;
+};
+
/*
* SRB extensions.
*/
@@ -320,6 +341,7 @@ struct srb_iocb {
#define SRB_LOGIN_COND_PLOGI BIT_1
#define SRB_LOGIN_SKIP_PRLI BIT_2
uint16_t data[2];
+ u32 iop[2];
} logio;
struct {
#define ELS_DCMD_TIMEOUT 20
@@ -372,6 +394,16 @@ struct srb_iocb {
__le16 comp_status;
struct completion comp;
} abt;
+ struct ct_arg ctarg;
+ struct {
+ __le16 in_mb[28]; /* fr fw */
+ __le16 out_mb[28]; /* to fw */
+ void *out, *in;
+ dma_addr_t out_dma, in_dma;
+ } mbx;
+ struct {
+ struct imm_ntfy_from_isp *ntfy;
+ } nack;
} u;
struct timer_list timer;
@@ -392,23 +424,31 @@ struct srb_iocb {
#define SRB_FXIOCB_BCMD 11
#define SRB_ABT_CMD 12
#define SRB_ELS_DCMD 13
+#define SRB_MB_IOCB 14
+#define SRB_CT_PTHRU_CMD 15
+#define SRB_NACK_PLOGI 16
+#define SRB_NACK_PRLI 17
+#define SRB_NACK_LOGO 18
typedef struct srb {
atomic_t ref_count;
struct fc_port *fcport;
+ struct scsi_qla_host *vha;
uint32_t handle;
uint16_t flags;
uint16_t type;
char *name;
int iocbs;
struct qla_qpair *qpair;
+ u32 gen1; /* scratch */
+ u32 gen2; /* scratch */
union {
struct srb_iocb iocb_cmd;
struct bsg_job *bsg_job;
struct srb_cmd scmd;
} u;
- void (*done)(void *, void *, int);
- void (*free)(void *, void *);
+ void (*done)(void *, int);
+ void (*free)(void *);
} srb_t;
#define GET_CMD_SP(sp) (sp->u.scmd.cmd)
@@ -1794,6 +1834,7 @@ typedef struct {
#define SS_RESIDUAL_OVER BIT_10
#define SS_SENSE_LEN_VALID BIT_9
#define SS_RESPONSE_INFO_LEN_VALID BIT_8
+#define SS_SCSI_STATUS_BYTE 0xff
#define SS_RESERVE_CONFLICT (BIT_4 | BIT_3)
#define SS_BUSY_CONDITION BIT_3
@@ -1975,6 +2016,84 @@ struct mbx_entry {
uint8_t port_name[WWN_SIZE];
};
+#ifndef IMMED_NOTIFY_TYPE
+#define IMMED_NOTIFY_TYPE 0x0D /* Immediate notify entry. */
+/*
+ * ISP queue - immediate notify entry structure definition.
+ * This is sent by the ISP to the Target driver.
+ * This IOCB would have report of events sent by the
+ * initiator, that needs to be handled by the target
+ * driver immediately.
+ */
+struct imm_ntfy_from_isp {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define; /* System defined. */
+ uint8_t entry_status; /* Entry Status. */
+ union {
+ struct {
+ uint32_t sys_define_2; /* System defined. */
+ target_id_t target;
+ uint16_t lun;
+ uint8_t target_id;
+ uint8_t reserved_1;
+ uint16_t status_modifier;
+ uint16_t status;
+ uint16_t task_flags;
+ uint16_t seq_id;
+ uint16_t srr_rx_id;
+ uint32_t srr_rel_offs;
+ uint16_t srr_ui;
+#define SRR_IU_DATA_IN 0x1
+#define SRR_IU_DATA_OUT 0x5
+#define SRR_IU_STATUS 0x7
+ uint16_t srr_ox_id;
+ uint8_t reserved_2[28];
+ } isp2x;
+ struct {
+ uint32_t reserved;
+ uint16_t nport_handle;
+ uint16_t reserved_2;
+ uint16_t flags;
+#define NOTIFY24XX_FLAGS_GLOBAL_TPRLO BIT_1
+#define NOTIFY24XX_FLAGS_PUREX_IOCB BIT_0
+ uint16_t srr_rx_id;
+ uint16_t status;
+ uint8_t status_subcode;
+ uint8_t fw_handle;
+ uint32_t exchange_address;
+ uint32_t srr_rel_offs;
+ uint16_t srr_ui;
+ uint16_t srr_ox_id;
+ union {
+ struct {
+ uint8_t node_name[8];
+ } plogi; /* PLOGI/ADISC/PDISC */
+ struct {
+ /* PRLI word 3 bit 0-15 */
+ uint16_t wd3_lo;
+ uint8_t resv0[6];
+ } prli;
+ struct {
+ uint8_t port_id[3];
+ uint8_t resv1;
+ uint16_t nport_handle;
+ uint16_t resv2;
+ } req_els;
+ } u;
+ uint8_t port_name[8];
+ uint8_t resv3[3];
+ uint8_t vp_index;
+ uint32_t reserved_5;
+ uint8_t port_id[3];
+ uint8_t reserved_6;
+ } isp24;
+ } u;
+ uint16_t reserved_7;
+ uint16_t ox_id;
+} __packed;
+#endif
+
/*
* ISP request and response queue entry sizes
*/
@@ -2022,10 +2141,22 @@ typedef struct {
#define FC4_TYPE_OTHER 0x0
#define FC4_TYPE_UNKNOWN 0xff
+/* mailbox command 4G & above */
+struct mbx_24xx_entry {
+ uint8_t entry_type;
+ uint8_t entry_count;
+ uint8_t sys_define1;
+ uint8_t entry_status;
+ uint32_t handle;
+ uint16_t mb[28];
+};
+
+#define IOCB_SIZE 64
+
/*
* Fibre channel port type.
*/
- typedef enum {
+typedef enum {
FCT_UNKNOWN,
FCT_RSCN,
FCT_SWITCH,
@@ -2034,6 +2165,74 @@ typedef struct {
FCT_TARGET
} fc_port_type_t;
+enum qla_sess_deletion {
+ QLA_SESS_DELETION_NONE = 0,
+ QLA_SESS_DELETION_IN_PROGRESS,
+ QLA_SESS_DELETED,
+};
+
+enum qlt_plogi_link_t {
+ QLT_PLOGI_LINK_SAME_WWN,
+ QLT_PLOGI_LINK_CONFLICT,
+ QLT_PLOGI_LINK_MAX
+};
+
+struct qlt_plogi_ack_t {
+ struct list_head list;
+ struct imm_ntfy_from_isp iocb;
+ port_id_t id;
+ int ref_count;
+ void *fcport;
+};
+
+struct ct_sns_desc {
+ struct ct_sns_pkt *ct_sns;
+ dma_addr_t ct_sns_dma;
+};
+
+enum discovery_state {
+ DSC_DELETED,
+ DSC_GID_PN,
+ DSC_GNL,
+ DSC_LOGIN_PEND,
+ DSC_LOGIN_FAILED,
+ DSC_GPDB,
+ DSC_GPSC,
+ DSC_UPD_FCPORT,
+ DSC_LOGIN_COMPLETE,
+ DSC_DELETE_PEND,
+};
+
+enum login_state { /* FW control Target side */
+ DSC_LS_LLIOCB_SENT = 2,
+ DSC_LS_PLOGI_PEND,
+ DSC_LS_PLOGI_COMP,
+ DSC_LS_PRLI_PEND,
+ DSC_LS_PRLI_COMP,
+ DSC_LS_PORT_UNAVAIL,
+ DSC_LS_PRLO_PEND = 9,
+ DSC_LS_LOGO_PEND,
+};
+
+enum fcport_mgt_event {
+ FCME_RELOGIN = 1,
+ FCME_RSCN,
+ FCME_GIDPN_DONE,
+ FCME_PLOGI_DONE, /* Initiator side sent LLIOCB */
+ FCME_GNL_DONE,
+ FCME_GPSC_DONE,
+ FCME_GPDB_DONE,
+ FCME_GPNID_DONE,
+ FCME_DELETE_DONE,
+};
+
+enum rscn_addr_format {
+ RSCN_PORT_ADDR,
+ RSCN_AREA_ADDR,
+ RSCN_DOM_ADDR,
+ RSCN_FAB_ADDR,
+};
+
/*
* Fibre channel port structure.
*/
@@ -2047,6 +2246,29 @@ typedef struct fc_port {
uint16_t loop_id;
uint16_t old_loop_id;
+ unsigned int conf_compl_supported:1;
+ unsigned int deleted:2;
+ unsigned int local:1;
+ unsigned int logout_on_delete:1;
+ unsigned int logo_ack_needed:1;
+ unsigned int keep_nport_handle:1;
+ unsigned int send_els_logo:1;
+ unsigned int login_pause:1;
+ unsigned int login_succ:1;
+
+ struct fc_port *conflict;
+ unsigned char logout_completed;
+ int generation;
+
+ struct se_session *se_sess;
+ struct kref sess_kref;
+ struct qla_tgt *tgt;
+ unsigned long expires;
+ struct list_head del_list_entry;
+ struct work_struct free_work;
+
+ struct qlt_plogi_ack_t *plogi_link[QLT_PLOGI_LINK_MAX];
+
uint16_t tgt_id;
uint16_t old_tgt_id;
@@ -2075,8 +2297,30 @@ typedef struct fc_port {
unsigned long retry_delay_timestamp;
struct qla_tgt_sess *tgt_session;
+ struct ct_sns_desc ct_desc;
+ enum discovery_state disc_state;
+ enum login_state fw_login_state;
+ u32 login_gen, last_login_gen;
+ u32 rscn_gen, last_rscn_gen;
+ u32 chip_reset;
+ struct list_head gnl_entry;
+ struct work_struct del_work;
+ u8 iocb[IOCB_SIZE];
} fc_port_t;
+#define QLA_FCPORT_SCAN 1
+#define QLA_FCPORT_FOUND 2
+
+struct event_arg {
+ enum fcport_mgt_event event;
+ fc_port_t *fcport;
+ srb_t *sp;
+ port_id_t id;
+ u16 data[2], rc;
+ u8 port_name[WWN_SIZE];
+ u32 iop[2];
+};
+
#include "qla_mr.h"
/*
@@ -2154,6 +2398,10 @@ static const char * const port_state_str[] = {
#define GFT_ID_REQ_SIZE (16 + 4)
#define GFT_ID_RSP_SIZE (16 + 32)
+#define GID_PN_CMD 0x121
+#define GID_PN_REQ_SIZE (16 + 8)
+#define GID_PN_RSP_SIZE (16 + 4)
+
#define RFT_ID_CMD 0x217
#define RFT_ID_REQ_SIZE (16 + 4 + 32)
#define RFT_ID_RSP_SIZE 16
@@ -2479,6 +2727,10 @@ struct ct_sns_req {
uint8_t reserved;
uint8_t port_name[3];
} gff_id;
+
+ struct {
+ uint8_t port_name[8];
+ } gid_pn;
} req;
};
@@ -2558,6 +2810,10 @@ struct ct_sns_rsp {
struct {
uint8_t fc4_features[128];
} gff_id;
+ struct {
+ uint8_t reserved;
+ uint8_t port_id[3];
+ } gid_pn;
} rsp;
};
@@ -2699,11 +2955,11 @@ struct isp_operations {
uint16_t (*calc_req_entries) (uint16_t);
void (*build_iocbs) (srb_t *, cmd_entry_t *, uint16_t);
- void * (*prep_ms_iocb) (struct scsi_qla_host *, uint32_t, uint32_t);
- void * (*prep_ms_fdmi_iocb) (struct scsi_qla_host *, uint32_t,
+ void *(*prep_ms_iocb) (struct scsi_qla_host *, struct ct_arg *);
+ void *(*prep_ms_fdmi_iocb) (struct scsi_qla_host *, uint32_t,
uint32_t);
- uint8_t * (*read_nvram) (struct scsi_qla_host *, uint8_t *,
+ uint8_t *(*read_nvram) (struct scsi_qla_host *, uint8_t *,
uint32_t, uint32_t);
int (*write_nvram) (struct scsi_qla_host *, uint8_t *, uint32_t,
uint32_t);
@@ -2765,13 +3021,21 @@ enum qla_work_type {
QLA_EVT_AEN,
QLA_EVT_IDC_ACK,
QLA_EVT_ASYNC_LOGIN,
- QLA_EVT_ASYNC_LOGIN_DONE,
QLA_EVT_ASYNC_LOGOUT,
QLA_EVT_ASYNC_LOGOUT_DONE,
QLA_EVT_ASYNC_ADISC,
QLA_EVT_ASYNC_ADISC_DONE,
QLA_EVT_UEVENT,
QLA_EVT_AENFX,
+ QLA_EVT_GIDPN,
+ QLA_EVT_GPNID,
+ QLA_EVT_GPNID_DONE,
+ QLA_EVT_NEW_SESS,
+ QLA_EVT_GPDB,
+ QLA_EVT_GPSC,
+ QLA_EVT_UPD_FCPORT,
+ QLA_EVT_GNL,
+ QLA_EVT_NACK,
};
@@ -2807,6 +3071,23 @@ struct qla_work_evt {
struct {
srb_t *sp;
} iosb;
+ struct {
+ port_id_t id;
+ } gpnid;
+ struct {
+ port_id_t id;
+ u8 port_name[8];
+ void *pla;
+ } new_sess;
+ struct { /*Get PDB, Get Speed, update fcport, gnl, gidpn */
+ fc_port_t *fcport;
+ u8 opt;
+ } fcport;
+ struct {
+ fc_port_t *fcport;
+ u8 iocb[IOCB_SIZE];
+ int type;
+ } nack;
} u;
};
@@ -2943,6 +3224,7 @@ struct qla_qpair {
struct qla_hw_data *hw;
struct work_struct q_work;
struct list_head qp_list_elem; /* vha->qp_list */
+ struct scsi_qla_host *vha;
};
/* Place holder for FW buffer parameters */
@@ -2963,7 +3245,6 @@ struct qlt_hw_data {
/* Protected by hw lock */
uint32_t enable_class_2:1;
uint32_t enable_explicit_conf:1;
- uint32_t ini_mode_force_reverse:1;
uint32_t node_name_set:1;
dma_addr_t atio_dma; /* Physical address. */
@@ -3115,6 +3396,7 @@ struct qla_hw_data {
#define FLOGI_SP_SUPPORT BIT_13
uint8_t port_no; /* Physical port of adapter */
+ uint8_t exch_starvation;
/* Timeout timers. */
uint8_t loop_down_abort_time; /* port down timer */
@@ -3682,7 +3964,7 @@ typedef struct scsi_qla_host {
#define FCOE_CTX_RESET_NEEDED 18 /* Initiate FCoE context reset */
#define MPI_RESET_NEEDED 19 /* Initiate MPI FW reset */
#define ISP_QUIESCE_NEEDED 20 /* Driver need some quiescence */
-#define SCR_PENDING 21 /* SCR in target mode */
+#define FREE_BIT 21
#define PORT_UPDATE_NEEDED 22
#define FX00_RESET_RECOVERY 23
#define FX00_TARGET_SCAN 24
@@ -3736,7 +4018,9 @@ typedef struct scsi_qla_host {
/* list of commands waiting on workqueue */
struct list_head qla_cmd_list;
struct list_head qla_sess_op_cmd_list;
+ struct list_head unknown_atio_list;
spinlock_t cmd_list_lock;
+ struct delayed_work unknown_atio_work;
/* Counter to detect races between ELS and RSCN events */
atomic_t generation_tick;
@@ -3788,6 +4072,10 @@ typedef struct scsi_qla_host {
struct qla8044_reset_template reset_tmplt;
struct qla_tgt_counters tgt_counters;
uint16_t bbcr;
+ struct name_list_extended gnl;
+ /* Count of active session/fcport */
+ int fcport_count;
+ wait_queue_head_t fcport_waitQ;
} scsi_qla_host_t;
struct qla27xx_image_status {
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
index 34272fde8a5b..b48cce696bac 100644
--- a/drivers/scsi/qla2xxx/qla_dfs.c
+++ b/drivers/scsi/qla2xxx/qla_dfs.c
@@ -18,7 +18,7 @@ qla2x00_dfs_tgt_sess_show(struct seq_file *s, void *unused)
scsi_qla_host_t *vha = s->private;
struct qla_hw_data *ha = vha->hw;
unsigned long flags;
- struct qla_tgt_sess *sess = NULL;
+ struct fc_port *sess = NULL;
struct qla_tgt *tgt= vha->vha_tgt.qla_tgt;
seq_printf(s, "%s\n",vha->host_str);
@@ -26,12 +26,11 @@ qla2x00_dfs_tgt_sess_show(struct seq_file *s, void *unused)
seq_printf(s, "Port ID Port Name Handle\n");
spin_lock_irqsave(&ha->tgt.sess_lock, flags);
- list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
+ list_for_each_entry(sess, &vha->vp_fcports, list)
seq_printf(s, "%02x:%02x:%02x %8phC %d\n",
- sess->s_id.b.domain,sess->s_id.b.area,
- sess->s_id.b.al_pa, sess->port_name,
- sess->loop_id);
- }
+ sess->d_id.b.domain, sess->d_id.b.area,
+ sess->d_id.b.al_pa, sess->port_name,
+ sess->loop_id);
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
}
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index 8a2368b32dec..1f808928763b 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -72,6 +72,37 @@ struct port_database_24xx {
uint8_t reserved_3[24];
};
+/*
+ * MB 75h returns a list of DB entries similar to port_database_24xx(64B).
+ * However, in this case it returns 1st 40 bytes.
+ */
+struct get_name_list_extended {
+ __le16 flags;
+ u8 current_login_state;
+ u8 last_login_state;
+ u8 hard_address[3];
+ u8 reserved_1;
+ u8 port_id[3];
+ u8 sequence_id;
+ __le16 port_timer;
+ __le16 nport_handle; /* N_PORT handle. */
+ __le16 receive_data_size;
+ __le16 reserved_2;
+
+ /* PRLI SVC Param are Big endian */
+ u8 prli_svc_param_word_0[2]; /* Bits 15-0 of word 0 */
+ u8 prli_svc_param_word_3[2]; /* Bits 15-0 of word 3 */
+ u8 port_name[WWN_SIZE];
+ u8 node_name[WWN_SIZE];
+};
+
+/* MB 75h: This is the short version of the database */
+struct get_name_list {
+ u8 port_node_name[WWN_SIZE]; /* B7 most sig, B0 least sig */
+ __le16 nport_handle;
+ u8 reserved;
+};
+
struct vp_database_24xx {
uint16_t vp_status;
uint8_t options;
@@ -1270,27 +1301,76 @@ struct vp_config_entry_24xx {
};
#define VP_RPT_ID_IOCB_TYPE 0x32 /* Report ID Acquisition entry. */
+enum VP_STATUS {
+ VP_STAT_COMPL,
+ VP_STAT_FAIL,
+ VP_STAT_ID_CHG,
+ VP_STAT_SNS_TO, /* timeout */
+ VP_STAT_SNS_RJT,
+ VP_STAT_SCR_TO, /* timeout */
+ VP_STAT_SCR_RJT,
+};
+
+enum VP_FLAGS {
+ VP_FLAGS_CON_FLOOP = 1,
+ VP_FLAGS_CON_P2P = 2,
+ VP_FLAGS_CON_FABRIC = 3,
+ VP_FLAGS_NAME_VALID = BIT_5,
+};
+
struct vp_rpt_id_entry_24xx {
uint8_t entry_type; /* Entry type. */
uint8_t entry_count; /* Entry count. */
uint8_t sys_define; /* System defined. */
uint8_t entry_status; /* Entry Status. */
-
- uint32_t handle; /* System handle. */
-
- uint16_t vp_count; /* Format 0 -- | VP setup | VP acq |. */
- /* Format 1 -- | VP count |. */
- uint16_t vp_idx; /* Format 0 -- Reserved. */
- /* Format 1 -- VP status and index. */
+ uint32_t resv1;
+ uint8_t vp_acquired;
+ uint8_t vp_setup;
+ uint8_t vp_idx; /* Format 0=reserved */
+ uint8_t vp_status; /* Format 0=reserved */
uint8_t port_id[3];
uint8_t format;
-
- uint8_t vp_idx_map[16];
-
- uint8_t reserved_4[24];
- uint16_t bbcr;
- uint8_t reserved_5[6];
+ union {
+ struct {
+ /* format 0 loop */
+ uint8_t vp_idx_map[16];
+ uint8_t reserved_4[32];
+ } f0;
+ struct {
+ /* format 1 fabric */
+ uint8_t vpstat1_subcode; /* vp_status=1 subcode */
+ uint8_t flags;
+ uint16_t fip_flags;
+ uint8_t rsv2[12];
+
+ uint8_t ls_rjt_vendor;
+ uint8_t ls_rjt_explanation;
+ uint8_t ls_rjt_reason;
+ uint8_t rsv3[5];
+
+ uint8_t port_name[8];
+ uint8_t node_name[8];
+ uint16_t bbcr;
+ uint8_t reserved_5[6];
+ } f1;
+ struct { /* format 2: N2N direct connect */
+ uint8_t vpstat1_subcode;
+ uint8_t flags;
+ uint16_t rsv6;
+ uint8_t rsv2[12];
+
+ uint8_t ls_rjt_vendor;
+ uint8_t ls_rjt_explanation;
+ uint8_t ls_rjt_reason;
+ uint8_t rsv3[5];
+
+ uint8_t port_name[8];
+ uint8_t node_name[8];
+ uint32_t remote_nport_id;
+ uint32_t reserved_5;
+ } f2;
+ } u;
};
#define VF_EVFP_IOCB_TYPE 0x26 /* Exchange Virtual Fabric Parameters entry. */
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index afa0116a163b..b3d6441d1d90 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -73,6 +73,10 @@ extern void qla2x00_async_logout_done(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
extern void qla2x00_async_adisc_done(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
+struct qla_work_evt *qla2x00_alloc_work(struct scsi_qla_host *,
+ enum qla_work_type);
+extern int qla24xx_async_gnl(struct scsi_qla_host *, fc_port_t *);
+int qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e);
extern void *qla2x00_alloc_iocbs(struct scsi_qla_host *, srb_t *);
extern void *qla2x00_alloc_iocbs_ready(struct scsi_qla_host *, srb_t *);
extern int qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *, fc_port_t *);
@@ -94,6 +98,13 @@ extern uint8_t qla27xx_find_valid_image(struct scsi_qla_host *);
extern struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *,
int, int);
extern int qla2xxx_delete_qpair(struct scsi_qla_host *, struct qla_qpair *);
+void qla2x00_fcport_event_handler(scsi_qla_host_t *, struct event_arg *);
+int qla24xx_async_gpdb(struct scsi_qla_host *, fc_port_t *, u8);
+int qla24xx_async_notify_ack(scsi_qla_host_t *, fc_port_t *,
+ struct imm_ntfy_from_isp *, int);
+int qla24xx_post_newsess_work(struct scsi_qla_host *, port_id_t *, u8 *,
+ void *);
+int qla24xx_fcport_handle_login(struct scsi_qla_host *, fc_port_t *);
/*
* Global Data in qla_os.c source file.
@@ -127,6 +138,7 @@ extern int ql2xmdenable;
extern int ql2xexlogins;
extern int ql2xexchoffld;
extern int ql2xfwholdabts;
+extern int ql2xmvasynctoatio;
extern int qla2x00_loop_reset(scsi_qla_host_t *);
extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
@@ -135,8 +147,6 @@ extern int qla2x00_post_aen_work(struct scsi_qla_host *, enum
extern int qla2x00_post_idc_ack_work(struct scsi_qla_host *, uint16_t *);
extern int qla2x00_post_async_login_work(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
-extern int qla2x00_post_async_login_done_work(struct scsi_qla_host *,
- fc_port_t *, uint16_t *);
extern int qla2x00_post_async_logout_work(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
extern int qla2x00_post_async_logout_done_work(struct scsi_qla_host *,
@@ -176,9 +186,13 @@ extern int qla2x00_post_uevent_work(struct scsi_qla_host *, u32);
extern int qla2x00_post_uevent_work(struct scsi_qla_host *, u32);
extern void qla2x00_disable_board_on_pci_error(struct work_struct *);
-extern void qla2x00_sp_compl(void *, void *, int);
-extern void qla2xxx_qpair_sp_free_dma(void *, void *);
-extern void qla2xxx_qpair_sp_compl(void *, void *, int);
+extern void qla2x00_sp_compl(void *, int);
+extern void qla2xxx_qpair_sp_free_dma(void *);
+extern void qla2xxx_qpair_sp_compl(void *, int);
+extern int qla24xx_post_upd_fcport_work(struct scsi_qla_host *, fc_port_t *);
+void qla2x00_handle_login_done_event(struct scsi_qla_host *, fc_port_t *,
+ uint16_t *);
+int qla24xx_post_gnl_work(struct scsi_qla_host *, fc_port_t *);
/*
* Global Functions in qla_mid.c source file.
@@ -201,7 +215,7 @@ extern void qla2x00_do_dpc_all_vps(scsi_qla_host_t *);
extern int qla24xx_vport_create_req_sanity_check(struct fc_vport *);
extern scsi_qla_host_t * qla24xx_create_vhost(struct fc_vport *);
-extern void qla2x00_sp_free_dma(void *, void *);
+extern void qla2x00_sp_free_dma(void *);
extern char *qla2x00_get_fw_version_str(struct scsi_qla_host *, char *);
extern void qla2x00_mark_device_lost(scsi_qla_host_t *, fc_port_t *, int, int);
@@ -302,9 +316,6 @@ extern int
qla2x00_init_firmware(scsi_qla_host_t *, uint16_t);
extern int
-qla2x00_get_node_name_list(scsi_qla_host_t *, void **, int *);
-
-extern int
qla2x00_get_port_database(scsi_qla_host_t *, fc_port_t *, uint8_t);
extern int
@@ -483,6 +494,9 @@ qla2x00_process_completed_request(struct scsi_qla_host *, struct req_que *,
uint32_t);
extern irqreturn_t
qla2xxx_msix_rsp_q(int irq, void *dev_id);
+fc_port_t *qla2x00_find_fcport_by_loopid(scsi_qla_host_t *, uint16_t);
+fc_port_t *qla2x00_find_fcport_by_wwpn(scsi_qla_host_t *, u8 *, u8);
+fc_port_t *qla2x00_find_fcport_by_nportid(scsi_qla_host_t *, port_id_t *, u8);
/*
* Global Function Prototypes in qla_sup.c source file.
@@ -574,8 +588,8 @@ extern void qla2xxx_dump_post_process(scsi_qla_host_t *, int);
/*
* Global Function Prototypes in qla_gs.c source file.
*/
-extern void *qla2x00_prep_ms_iocb(scsi_qla_host_t *, uint32_t, uint32_t);
-extern void *qla24xx_prep_ms_iocb(scsi_qla_host_t *, uint32_t, uint32_t);
+extern void *qla2x00_prep_ms_iocb(scsi_qla_host_t *, struct ct_arg *);
+extern void *qla24xx_prep_ms_iocb(scsi_qla_host_t *, struct ct_arg *);
extern int qla2x00_ga_nxt(scsi_qla_host_t *, fc_port_t *);
extern int qla2x00_gid_pt(scsi_qla_host_t *, sw_info_t *);
extern int qla2x00_gpn_id(scsi_qla_host_t *, sw_info_t *);
@@ -591,6 +605,23 @@ extern int qla2x00_fdmi_register(scsi_qla_host_t *);
extern int qla2x00_gfpn_id(scsi_qla_host_t *, sw_info_t *);
extern int qla2x00_gpsc(scsi_qla_host_t *, sw_info_t *);
extern void qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *, size_t);
+extern int qla2x00_chk_ms_status(scsi_qla_host_t *, ms_iocb_entry_t *,
+ struct ct_sns_rsp *, const char *);
+extern void qla2x00_async_iocb_timeout(void *data);
+extern int qla24xx_async_gidpn(scsi_qla_host_t *, fc_port_t *);
+int qla24xx_post_gidpn_work(struct scsi_qla_host *, fc_port_t *);
+void qla24xx_handle_gidpn_event(scsi_qla_host_t *, struct event_arg *);
+
+extern void qla2x00_free_fcport(fc_port_t *);
+
+extern int qla24xx_post_gpnid_work(struct scsi_qla_host *, port_id_t *);
+extern int qla24xx_async_gpnid(scsi_qla_host_t *, port_id_t *);
+void qla24xx_async_gpnid_done(scsi_qla_host_t *, srb_t*);
+void qla24xx_handle_gpnid_event(scsi_qla_host_t *, struct event_arg *);
+
+int qla24xx_post_gpsc_work(struct scsi_qla_host *, fc_port_t *);
+int qla24xx_async_gpsc(scsi_qla_host_t *, fc_port_t *);
+int qla2x00_mgmt_svr_login(scsi_qla_host_t *);
/*
* Global Function Prototypes in qla_attr.c source file.
@@ -702,10 +733,10 @@ extern int qla82xx_restart_isp(scsi_qla_host_t *);
/* IOCB related functions */
extern int qla82xx_start_scsi(srb_t *);
-extern void qla2x00_sp_free(void *, void *);
+extern void qla2x00_sp_free(void *);
extern void qla2x00_sp_timeout(unsigned long);
-extern void qla2x00_bsg_job_done(void *, void *, int);
-extern void qla2x00_bsg_sp_free(void *, void *);
+extern void qla2x00_bsg_job_done(void *, int);
+extern void qla2x00_bsg_sp_free(void *);
extern void qla2x00_start_iocbs(struct scsi_qla_host *, struct req_que *);
/* Interrupt related */
@@ -803,4 +834,17 @@ extern int qla_get_exchoffld_status(scsi_qla_host_t *, uint16_t *, uint16_t *);
extern int qla_set_exchoffld_mem_cfg(scsi_qla_host_t *, dma_addr_t);
extern void qlt_handle_abts_recv(struct scsi_qla_host *, response_t *);
+int qla24xx_async_notify_ack(scsi_qla_host_t *, fc_port_t *,
+ struct imm_ntfy_from_isp *, int);
+void qla24xx_do_nack_work(struct scsi_qla_host *, struct qla_work_evt *);
+void qlt_plogi_ack_link(struct scsi_qla_host *, struct qlt_plogi_ack_t *,
+ struct fc_port *, enum qlt_plogi_link_t);
+void qlt_plogi_ack_unref(struct scsi_qla_host *, struct qlt_plogi_ack_t *);
+extern void qlt_schedule_sess_for_deletion(struct fc_port *, bool);
+extern void qlt_schedule_sess_for_deletion_lock(struct fc_port *);
+extern struct fc_port *qlt_find_sess_invalidate_other(scsi_qla_host_t *,
+ uint64_t wwn, port_id_t port_id, uint16_t loop_id, struct fc_port **);
+void qla24xx_delete_sess_fn(struct work_struct *);
+void qlt_unknown_atio_work_fn(struct work_struct *);
+
#endif /* _QLA_GBL_H */
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index ee3df8794806..ab0f873fd6a1 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -24,12 +24,12 @@ static int qla2x00_sns_rnn_id(scsi_qla_host_t *);
* Returns a pointer to the @ha's ms_iocb.
*/
void *
-qla2x00_prep_ms_iocb(scsi_qla_host_t *vha, uint32_t req_size, uint32_t rsp_size)
+qla2x00_prep_ms_iocb(scsi_qla_host_t *vha, struct ct_arg *arg)
{
struct qla_hw_data *ha = vha->hw;
ms_iocb_entry_t *ms_pkt;
- ms_pkt = ha->ms_iocb;
+ ms_pkt = (ms_iocb_entry_t *)arg->iocb;
memset(ms_pkt, 0, sizeof(ms_iocb_entry_t));
ms_pkt->entry_type = MS_IOCB_TYPE;
@@ -39,15 +39,15 @@ qla2x00_prep_ms_iocb(scsi_qla_host_t *vha, uint32_t req_size, uint32_t rsp_size)
ms_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
ms_pkt->cmd_dsd_count = cpu_to_le16(1);
ms_pkt->total_dsd_count = cpu_to_le16(2);
- ms_pkt->rsp_bytecount = cpu_to_le32(rsp_size);
- ms_pkt->req_bytecount = cpu_to_le32(req_size);
+ ms_pkt->rsp_bytecount = cpu_to_le32(arg->rsp_size);
+ ms_pkt->req_bytecount = cpu_to_le32(arg->req_size);
- ms_pkt->dseg_req_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
- ms_pkt->dseg_req_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
+ ms_pkt->dseg_req_address[0] = cpu_to_le32(LSD(arg->req_dma));
+ ms_pkt->dseg_req_address[1] = cpu_to_le32(MSD(arg->req_dma));
ms_pkt->dseg_req_length = ms_pkt->req_bytecount;
- ms_pkt->dseg_rsp_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
- ms_pkt->dseg_rsp_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
+ ms_pkt->dseg_rsp_address[0] = cpu_to_le32(LSD(arg->rsp_dma));
+ ms_pkt->dseg_rsp_address[1] = cpu_to_le32(MSD(arg->rsp_dma));
ms_pkt->dseg_rsp_length = ms_pkt->rsp_bytecount;
vha->qla_stats.control_requests++;
@@ -64,29 +64,29 @@ qla2x00_prep_ms_iocb(scsi_qla_host_t *vha, uint32_t req_size, uint32_t rsp_size)
* Returns a pointer to the @ha's ms_iocb.
*/
void *
-qla24xx_prep_ms_iocb(scsi_qla_host_t *vha, uint32_t req_size, uint32_t rsp_size)
+qla24xx_prep_ms_iocb(scsi_qla_host_t *vha, struct ct_arg *arg)
{
struct qla_hw_data *ha = vha->hw;
struct ct_entry_24xx *ct_pkt;
- ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb;
+ ct_pkt = (struct ct_entry_24xx *)arg->iocb;
memset(ct_pkt, 0, sizeof(struct ct_entry_24xx));
ct_pkt->entry_type = CT_IOCB_TYPE;
ct_pkt->entry_count = 1;
- ct_pkt->nport_handle = cpu_to_le16(NPH_SNS);
+ ct_pkt->nport_handle = cpu_to_le16(arg->nport_handle);
ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
ct_pkt->cmd_dsd_count = cpu_to_le16(1);
ct_pkt->rsp_dsd_count = cpu_to_le16(1);
- ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size);
- ct_pkt->cmd_byte_count = cpu_to_le32(req_size);
+ ct_pkt->rsp_byte_count = cpu_to_le32(arg->rsp_size);
+ ct_pkt->cmd_byte_count = cpu_to_le32(arg->req_size);
- ct_pkt->dseg_0_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
- ct_pkt->dseg_0_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
+ ct_pkt->dseg_0_address[0] = cpu_to_le32(LSD(arg->req_dma));
+ ct_pkt->dseg_0_address[1] = cpu_to_le32(MSD(arg->req_dma));
ct_pkt->dseg_0_len = ct_pkt->cmd_byte_count;
- ct_pkt->dseg_1_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
- ct_pkt->dseg_1_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
+ ct_pkt->dseg_1_address[0] = cpu_to_le32(LSD(arg->rsp_dma));
+ ct_pkt->dseg_1_address[1] = cpu_to_le32(MSD(arg->rsp_dma));
ct_pkt->dseg_1_len = ct_pkt->rsp_byte_count;
ct_pkt->vp_index = vha->vp_idx;
@@ -117,7 +117,7 @@ qla2x00_prep_ct_req(struct ct_sns_pkt *p, uint16_t cmd, uint16_t rsp_size)
return &p->p.req;
}
-static int
+int
qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt,
struct ct_sns_rsp *ct_rsp, const char *routine)
{
@@ -183,14 +183,21 @@ qla2x00_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
struct ct_sns_req *ct_req;
struct ct_sns_rsp *ct_rsp;
struct qla_hw_data *ha = vha->hw;
+ struct ct_arg arg;
if (IS_QLA2100(ha) || IS_QLA2200(ha))
return qla2x00_sns_ga_nxt(vha, fcport);
+ arg.iocb = ha->ms_iocb;
+ arg.req_dma = ha->ct_sns_dma;
+ arg.rsp_dma = ha->ct_sns_dma;
+ arg.req_size = GA_NXT_REQ_SIZE;
+ arg.rsp_size = GA_NXT_RSP_SIZE;
+ arg.nport_handle = NPH_SNS;
+
/* Issue GA_NXT */
/* Prepare common MS IOCB */
- ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GA_NXT_REQ_SIZE,
- GA_NXT_RSP_SIZE);
+ ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
/* Prepare CT request */
ct_req = qla2x00_prep_ct_req(ha->ct_sns, GA_NXT_CMD,
@@ -269,16 +276,24 @@ qla2x00_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
struct ct_sns_gid_pt_data *gid_data;
struct qla_hw_data *ha = vha->hw;
uint16_t gid_pt_rsp_size;
+ struct ct_arg arg;
if (IS_QLA2100(ha) || IS_QLA2200(ha))
return qla2x00_sns_gid_pt(vha, list);
gid_data = NULL;
gid_pt_rsp_size = qla2x00_gid_pt_rsp_size(vha);
+
+ arg.iocb = ha->ms_iocb;
+ arg.req_dma = ha->ct_sns_dma;
+ arg.rsp_dma = ha->ct_sns_dma;
+ arg.req_size = GID_PT_REQ_SIZE;
+ arg.rsp_size = gid_pt_rsp_size;
+ arg.nport_handle = NPH_SNS;
+
/* Issue GID_PT */
/* Prepare common MS IOCB */
- ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GID_PT_REQ_SIZE,
- gid_pt_rsp_size);
+ ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
/* Prepare CT request */
ct_req = qla2x00_prep_ct_req(ha->ct_sns, GID_PT_CMD, gid_pt_rsp_size);
@@ -344,15 +359,22 @@ qla2x00_gpn_id(scsi_qla_host_t *vha, sw_info_t *list)
struct ct_sns_req *ct_req;
struct ct_sns_rsp *ct_rsp;
struct qla_hw_data *ha = vha->hw;
+ struct ct_arg arg;
if (IS_QLA2100(ha) || IS_QLA2200(ha))
return qla2x00_sns_gpn_id(vha, list);
+ arg.iocb = ha->ms_iocb;
+ arg.req_dma = ha->ct_sns_dma;
+ arg.rsp_dma = ha->ct_sns_dma;
+ arg.req_size = GPN_ID_REQ_SIZE;
+ arg.rsp_size = GPN_ID_RSP_SIZE;
+ arg.nport_handle = NPH_SNS;
+
for (i = 0; i < ha->max_fibre_devices; i++) {
/* Issue GPN_ID */
/* Prepare common MS IOCB */
- ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GPN_ID_REQ_SIZE,
- GPN_ID_RSP_SIZE);
+ ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
/* Prepare CT request */
ct_req = qla2x00_prep_ct_req(ha->ct_sns, GPN_ID_CMD,
@@ -406,15 +428,22 @@ qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
ms_iocb_entry_t *ms_pkt;
struct ct_sns_req *ct_req;
struct ct_sns_rsp *ct_rsp;
+ struct ct_arg arg;
if (IS_QLA2100(ha) || IS_QLA2200(ha))
return qla2x00_sns_gnn_id(vha, list);
+ arg.iocb = ha->ms_iocb;
+ arg.req_dma = ha->ct_sns_dma;
+ arg.rsp_dma = ha->ct_sns_dma;
+ arg.req_size = GNN_ID_REQ_SIZE;
+ arg.rsp_size = GNN_ID_RSP_SIZE;
+ arg.nport_handle = NPH_SNS;
+
for (i = 0; i < ha->max_fibre_devices; i++) {
/* Issue GNN_ID */
/* Prepare common MS IOCB */
- ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GNN_ID_REQ_SIZE,
- GNN_ID_RSP_SIZE);
+ ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
/* Prepare CT request */
ct_req = qla2x00_prep_ct_req(ha->ct_sns, GNN_ID_CMD,
@@ -473,14 +502,21 @@ qla2x00_rft_id(scsi_qla_host_t *vha)
ms_iocb_entry_t *ms_pkt;
struct ct_sns_req *ct_req;
struct ct_sns_rsp *ct_rsp;
+ struct ct_arg arg;
if (IS_QLA2100(ha) || IS_QLA2200(ha))
return qla2x00_sns_rft_id(vha);
+ arg.iocb = ha->ms_iocb;
+ arg.req_dma = ha->ct_sns_dma;
+ arg.rsp_dma = ha->ct_sns_dma;
+ arg.req_size = RFT_ID_REQ_SIZE;
+ arg.rsp_size = RFT_ID_RSP_SIZE;
+ arg.nport_handle = NPH_SNS;
+
/* Issue RFT_ID */
/* Prepare common MS IOCB */
- ms_pkt = ha->isp_ops->prep_ms_iocb(vha, RFT_ID_REQ_SIZE,
- RFT_ID_RSP_SIZE);
+ ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
/* Prepare CT request */
ct_req = qla2x00_prep_ct_req(ha->ct_sns, RFT_ID_CMD,
@@ -526,6 +562,7 @@ qla2x00_rff_id(scsi_qla_host_t *vha)
ms_iocb_entry_t *ms_pkt;
struct ct_sns_req *ct_req;
struct ct_sns_rsp *ct_rsp;
+ struct ct_arg arg;
if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
ql_dbg(ql_dbg_disc, vha, 0x2046,
@@ -533,10 +570,16 @@ qla2x00_rff_id(scsi_qla_host_t *vha)
return (QLA_SUCCESS);
}
+ arg.iocb = ha->ms_iocb;
+ arg.req_dma = ha->ct_sns_dma;
+ arg.rsp_dma = ha->ct_sns_dma;
+ arg.req_size = RFF_ID_REQ_SIZE;
+ arg.rsp_size = RFF_ID_RSP_SIZE;
+ arg.nport_handle = NPH_SNS;
+
/* Issue RFF_ID */
/* Prepare common MS IOCB */
- ms_pkt = ha->isp_ops->prep_ms_iocb(vha, RFF_ID_REQ_SIZE,
- RFF_ID_RSP_SIZE);
+ ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
/* Prepare CT request */
ct_req = qla2x00_prep_ct_req(ha->ct_sns, RFF_ID_CMD,
@@ -584,14 +627,21 @@ qla2x00_rnn_id(scsi_qla_host_t *vha)
ms_iocb_entry_t *ms_pkt;
struct ct_sns_req *ct_req;
struct ct_sns_rsp *ct_rsp;
+ struct ct_arg arg;
if (IS_QLA2100(ha) || IS_QLA2200(ha))
return qla2x00_sns_rnn_id(vha);
+ arg.iocb = ha->ms_iocb;
+ arg.req_dma = ha->ct_sns_dma;
+ arg.rsp_dma = ha->ct_sns_dma;
+ arg.req_size = RNN_ID_REQ_SIZE;
+ arg.rsp_size = RNN_ID_RSP_SIZE;
+ arg.nport_handle = NPH_SNS;
+
/* Issue RNN_ID */
/* Prepare common MS IOCB */
- ms_pkt = ha->isp_ops->prep_ms_iocb(vha, RNN_ID_REQ_SIZE,
- RNN_ID_RSP_SIZE);
+ ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
/* Prepare CT request */
ct_req = qla2x00_prep_ct_req(ha->ct_sns, RNN_ID_CMD, RNN_ID_RSP_SIZE);
@@ -651,6 +701,7 @@ qla2x00_rsnn_nn(scsi_qla_host_t *vha)
ms_iocb_entry_t *ms_pkt;
struct ct_sns_req *ct_req;
struct ct_sns_rsp *ct_rsp;
+ struct ct_arg arg;
if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
ql_dbg(ql_dbg_disc, vha, 0x2050,
@@ -658,10 +709,17 @@ qla2x00_rsnn_nn(scsi_qla_host_t *vha)
return (QLA_SUCCESS);
}
+ arg.iocb = ha->ms_iocb;
+ arg.req_dma = ha->ct_sns_dma;
+ arg.rsp_dma = ha->ct_sns_dma;
+ arg.req_size = 0;
+ arg.rsp_size = RSNN_NN_RSP_SIZE;
+ arg.nport_handle = NPH_SNS;
+
/* Issue RSNN_NN */
/* Prepare common MS IOCB */
/* Request size adjusted after CT preparation */
- ms_pkt = ha->isp_ops->prep_ms_iocb(vha, 0, RSNN_NN_RSP_SIZE);
+ ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
/* Prepare CT request */
ct_req = qla2x00_prep_ct_req(ha->ct_sns, RSNN_NN_CMD,
@@ -1103,7 +1161,7 @@ qla2x00_sns_rnn_id(scsi_qla_host_t *vha)
*
* Returns 0 on success.
*/
-static int
+int
qla2x00_mgmt_svr_login(scsi_qla_host_t *vha)
{
int ret, rval;
@@ -2425,15 +2483,22 @@ qla2x00_gfpn_id(scsi_qla_host_t *vha, sw_info_t *list)
ms_iocb_entry_t *ms_pkt;
struct ct_sns_req *ct_req;
struct ct_sns_rsp *ct_rsp;
+ struct ct_arg arg;
if (!IS_IIDMA_CAPABLE(ha))
return QLA_FUNCTION_FAILED;
+ arg.iocb = ha->ms_iocb;
+ arg.req_dma = ha->ct_sns_dma;
+ arg.rsp_dma = ha->ct_sns_dma;
+ arg.req_size = GFPN_ID_REQ_SIZE;
+ arg.rsp_size = GFPN_ID_RSP_SIZE;
+ arg.nport_handle = NPH_SNS;
+
for (i = 0; i < ha->max_fibre_devices; i++) {
/* Issue GFPN_ID */
/* Prepare common MS IOCB */
- ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GFPN_ID_REQ_SIZE,
- GFPN_ID_RSP_SIZE);
+ ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
/* Prepare CT request */
ct_req = qla2x00_prep_ct_req(ha->ct_sns, GFPN_ID_CMD,
@@ -2471,36 +2536,6 @@ qla2x00_gfpn_id(scsi_qla_host_t *vha, sw_info_t *list)
return (rval);
}
-static inline void *
-qla24xx_prep_ms_fm_iocb(scsi_qla_host_t *vha, uint32_t req_size,
- uint32_t rsp_size)
-{
- struct ct_entry_24xx *ct_pkt;
- struct qla_hw_data *ha = vha->hw;
- ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb;
- memset(ct_pkt, 0, sizeof(struct ct_entry_24xx));
-
- ct_pkt->entry_type = CT_IOCB_TYPE;
- ct_pkt->entry_count = 1;
- ct_pkt->nport_handle = cpu_to_le16(vha->mgmt_svr_loop_id);
- ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
- ct_pkt->cmd_dsd_count = cpu_to_le16(1);
- ct_pkt->rsp_dsd_count = cpu_to_le16(1);
- ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size);
- ct_pkt->cmd_byte_count = cpu_to_le32(req_size);
-
- ct_pkt->dseg_0_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
- ct_pkt->dseg_0_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
- ct_pkt->dseg_0_len = ct_pkt->cmd_byte_count;
-
- ct_pkt->dseg_1_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
- ct_pkt->dseg_1_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
- ct_pkt->dseg_1_len = ct_pkt->rsp_byte_count;
- ct_pkt->vp_index = vha->vp_idx;
-
- return ct_pkt;
-}
-
static inline struct ct_sns_req *
qla24xx_prep_ct_fm_req(struct ct_sns_pkt *p, uint16_t cmd,
@@ -2530,9 +2565,10 @@ qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list)
int rval;
uint16_t i;
struct qla_hw_data *ha = vha->hw;
- ms_iocb_entry_t *ms_pkt;
+ ms_iocb_entry_t *ms_pkt;
struct ct_sns_req *ct_req;
struct ct_sns_rsp *ct_rsp;
+ struct ct_arg arg;
if (!IS_IIDMA_CAPABLE(ha))
return QLA_FUNCTION_FAILED;
@@ -2543,11 +2579,17 @@ qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list)
if (rval)
return rval;
+ arg.iocb = ha->ms_iocb;
+ arg.req_dma = ha->ct_sns_dma;
+ arg.rsp_dma = ha->ct_sns_dma;
+ arg.req_size = GPSC_REQ_SIZE;
+ arg.rsp_size = GPSC_RSP_SIZE;
+ arg.nport_handle = vha->mgmt_svr_loop_id;
+
for (i = 0; i < ha->max_fibre_devices; i++) {
/* Issue GFPN_ID */
/* Prepare common MS IOCB */
- ms_pkt = qla24xx_prep_ms_fm_iocb(vha, GPSC_REQ_SIZE,
- GPSC_RSP_SIZE);
+ ms_pkt = qla24xx_prep_ms_iocb(vha, &arg);
/* Prepare CT request */
ct_req = qla24xx_prep_ct_fm_req(ha->ct_sns, GPSC_CMD,
@@ -2641,6 +2683,7 @@ qla2x00_gff_id(scsi_qla_host_t *vha, sw_info_t *list)
struct ct_sns_rsp *ct_rsp;
struct qla_hw_data *ha = vha->hw;
uint8_t fcp_scsi_features = 0;
+ struct ct_arg arg;
for (i = 0; i < ha->max_fibre_devices; i++) {
/* Set default FC4 Type as UNKNOWN so the default is to
@@ -2651,9 +2694,15 @@ qla2x00_gff_id(scsi_qla_host_t *vha, sw_info_t *list)
if (!IS_FWI2_CAPABLE(ha))
continue;
+ arg.iocb = ha->ms_iocb;
+ arg.req_dma = ha->ct_sns_dma;
+ arg.rsp_dma = ha->ct_sns_dma;
+ arg.req_size = GFF_ID_REQ_SIZE;
+ arg.rsp_size = GFF_ID_RSP_SIZE;
+ arg.nport_handle = NPH_SNS;
+
/* Prepare common MS IOCB */
- ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GFF_ID_REQ_SIZE,
- GFF_ID_RSP_SIZE);
+ ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
/* Prepare CT request */
ct_req = qla2x00_prep_ct_req(ha->ct_sns, GFF_ID_CMD,
@@ -2692,3 +2741,538 @@ qla2x00_gff_id(scsi_qla_host_t *vha, sw_info_t *list)
break;
}
}
+
+/* GID_PN completion processing. */
+void qla24xx_handle_gidpn_event(scsi_qla_host_t *vha, struct event_arg *ea)
+{
+ fc_port_t *fcport = ea->fcport;
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %8phC login state %d \n",
+ __func__, fcport->port_name, fcport->fw_login_state);
+
+ if (ea->sp->gen2 != fcport->login_gen) {
+ /* PLOGI/PRLI/LOGO came in while cmd was out.*/
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %8phC generation changed rscn %d|%d login %d|%d \n",
+ __func__, fcport->port_name, fcport->last_rscn_gen,
+ fcport->rscn_gen, fcport->last_login_gen, fcport->login_gen);
+ return;
+ }
+
+ if (!ea->rc) {
+ if (ea->sp->gen1 == fcport->rscn_gen) {
+ fcport->scan_state = QLA_FCPORT_FOUND;
+ fcport->flags |= FCF_FABRIC_DEVICE;
+
+ if (fcport->d_id.b24 == ea->id.b24) {
+ /* cable plugged into the same place */
+ switch (vha->host->active_mode) {
+ case MODE_TARGET:
+ /* NOOP. let the other guy login to us.*/
+ break;
+ case MODE_INITIATOR:
+ case MODE_DUAL:
+ default:
+ if (atomic_read(&fcport->state) ==
+ FCS_ONLINE)
+ break;
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC post gnl\n",
+ __func__, __LINE__, fcport->port_name);
+ qla24xx_post_gnl_work(vha, fcport);
+ break;
+ }
+ } else { /* fcport->d_id.b24 != ea->id.b24 */
+ fcport->d_id.b24 = ea->id.b24;
+ if (fcport->deleted == QLA_SESS_DELETED) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC post del sess\n",
+ __func__, __LINE__, fcport->port_name);
+ qlt_schedule_sess_for_deletion_lock(fcport);
+ }
+ }
+ } else { /* ea->sp->gen1 != fcport->rscn_gen */
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC post gidpn\n",
+ __func__, __LINE__, fcport->port_name);
+ /* rscn came in while cmd was out */
+ qla24xx_post_gidpn_work(vha, fcport);
+ }
+ } else { /* ea->rc */
+ /* cable pulled */
+ if (ea->sp->gen1 == fcport->rscn_gen) {
+ if (ea->sp->gen2 == fcport->login_gen) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC post del sess\n", __func__,
+ __LINE__, fcport->port_name);
+ qlt_schedule_sess_for_deletion_lock(fcport);
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC login\n", __func__, __LINE__,
+ fcport->port_name);
+ qla24xx_fcport_handle_login(vha, fcport);
+ }
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC post gidpn\n", __func__, __LINE__,
+ fcport->port_name);
+ qla24xx_post_gidpn_work(vha, fcport);
+ }
+ }
+} /* gidpn_event */
+
+static void qla2x00_async_gidpn_sp_done(void *s, int res)
+{
+ struct srb *sp = s;
+ struct scsi_qla_host *vha = sp->vha;
+ fc_port_t *fcport = sp->fcport;
+ u8 *id = fcport->ct_desc.ct_sns->p.rsp.rsp.gid_pn.port_id;
+ struct event_arg ea;
+
+ fcport->flags &= ~FCF_ASYNC_SENT;
+
+ memset(&ea, 0, sizeof(ea));
+ ea.fcport = fcport;
+ ea.id.b.domain = id[0];
+ ea.id.b.area = id[1];
+ ea.id.b.al_pa = id[2];
+ ea.sp = sp;
+ ea.rc = res;
+ ea.event = FCME_GIDPN_DONE;
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Async done-%s res %x, WWPN %8phC ID %3phC \n",
+ sp->name, res, fcport->port_name, id);
+
+ qla2x00_fcport_event_handler(vha, &ea);
+
+ sp->free(sp);
+}
+
+int qla24xx_async_gidpn(scsi_qla_host_t *vha, fc_port_t *fcport)
+{
+ int rval = QLA_FUNCTION_FAILED;
+ struct ct_sns_req *ct_req;
+ srb_t *sp;
+
+ if (!vha->flags.online)
+ goto done;
+
+ fcport->flags |= FCF_ASYNC_SENT;
+ fcport->disc_state = DSC_GID_PN;
+ fcport->scan_state = QLA_FCPORT_SCAN;
+ sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
+ if (!sp)
+ goto done;
+
+ sp->type = SRB_CT_PTHRU_CMD;
+ sp->name = "gidpn";
+ sp->gen1 = fcport->rscn_gen;
+ sp->gen2 = fcport->login_gen;
+
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
+ /* CT_IU preamble */
+ ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GID_PN_CMD,
+ GID_PN_RSP_SIZE);
+
+ /* GIDPN req */
+ memcpy(ct_req->req.gid_pn.port_name, fcport->port_name,
+ WWN_SIZE);
+
+ /* req & rsp use the same buffer */
+ sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
+ sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma;
+ sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns;
+ sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma;
+ sp->u.iocb_cmd.u.ctarg.req_size = GID_PN_REQ_SIZE;
+ sp->u.iocb_cmd.u.ctarg.rsp_size = GID_PN_RSP_SIZE;
+ sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
+
+ sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
+ sp->done = qla2x00_async_gidpn_sp_done;
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS)
+ goto done_free_sp;
+
+ ql_dbg(ql_dbg_disc, vha, 0x206f,
+ "Async-%s - %8phC hdl=%x loopid=%x portid %02x%02x%02x.\n",
+ sp->name, fcport->port_name,
+ sp->handle, fcport->loop_id, fcport->d_id.b.domain,
+ fcport->d_id.b.area, fcport->d_id.b.al_pa);
+ return rval;
+
+done_free_sp:
+ sp->free(sp);
+done:
+ fcport->flags &= ~FCF_ASYNC_SENT;
+ return rval;
+}
+
+int qla24xx_post_gidpn_work(struct scsi_qla_host *vha, fc_port_t *fcport)
+{
+ struct qla_work_evt *e;
+ int ls;
+
+ ls = atomic_read(&vha->loop_state);
+ if (((ls != LOOP_READY) && (ls != LOOP_UP)) ||
+ test_bit(UNLOADING, &vha->dpc_flags))
+ return 0;
+
+ e = qla2x00_alloc_work(vha, QLA_EVT_GIDPN);
+ if (!e)
+ return QLA_FUNCTION_FAILED;
+
+ e->u.fcport.fcport = fcport;
+ return qla2x00_post_work(vha, e);
+}
+
+int qla24xx_post_gpsc_work(struct scsi_qla_host *vha, fc_port_t *fcport)
+{
+ struct qla_work_evt *e;
+
+ e = qla2x00_alloc_work(vha, QLA_EVT_GPSC);
+ if (!e)
+ return QLA_FUNCTION_FAILED;
+
+ e->u.fcport.fcport = fcport;
+ return qla2x00_post_work(vha, e);
+}
+
+static void qla24xx_async_gpsc_sp_done(void *s, int res)
+{
+ struct srb *sp = s;
+ struct scsi_qla_host *vha = sp->vha;
+ struct qla_hw_data *ha = vha->hw;
+ fc_port_t *fcport = sp->fcport;
+ struct ct_sns_rsp *ct_rsp;
+ struct event_arg ea;
+
+ ct_rsp = &fcport->ct_desc.ct_sns->p.rsp;
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Async done-%s res %x, WWPN %8phC \n",
+ sp->name, res, fcport->port_name);
+
+ fcport->flags &= ~FCF_ASYNC_SENT;
+
+ if (res == (DID_ERROR << 16)) {
+ /* entry status error */
+ goto done;
+ } else if (res) {
+ if ((ct_rsp->header.reason_code ==
+ CT_REASON_INVALID_COMMAND_CODE) ||
+ (ct_rsp->header.reason_code ==
+ CT_REASON_COMMAND_UNSUPPORTED)) {
+ ql_dbg(ql_dbg_disc, vha, 0x205a,
+ "GPSC command unsupported, disabling "
+ "query.\n");
+ ha->flags.gpsc_supported = 0;
+ res = QLA_SUCCESS;
+ }
+ } else {
+ switch (be16_to_cpu(ct_rsp->rsp.gpsc.speed)) {
+ case BIT_15:
+ fcport->fp_speed = PORT_SPEED_1GB;
+ break;
+ case BIT_14:
+ fcport->fp_speed = PORT_SPEED_2GB;
+ break;
+ case BIT_13:
+ fcport->fp_speed = PORT_SPEED_4GB;
+ break;
+ case BIT_12:
+ fcport->fp_speed = PORT_SPEED_10GB;
+ break;
+ case BIT_11:
+ fcport->fp_speed = PORT_SPEED_8GB;
+ break;
+ case BIT_10:
+ fcport->fp_speed = PORT_SPEED_16GB;
+ break;
+ case BIT_8:
+ fcport->fp_speed = PORT_SPEED_32GB;
+ break;
+ }
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Async-%s OUT WWPN %8phC speeds=%04x speed=%04x.\n",
+ sp->name,
+ fcport->fabric_port_name,
+ be16_to_cpu(ct_rsp->rsp.gpsc.speeds),
+ be16_to_cpu(ct_rsp->rsp.gpsc.speed));
+ }
+done:
+ memset(&ea, 0, sizeof(ea));
+ ea.event = FCME_GPSC_DONE;
+ ea.rc = res;
+ ea.fcport = fcport;
+ qla2x00_fcport_event_handler(vha, &ea);
+
+ sp->free(sp);
+}
+
+int qla24xx_async_gpsc(scsi_qla_host_t *vha, fc_port_t *fcport)
+{
+ int rval = QLA_FUNCTION_FAILED;
+ struct ct_sns_req *ct_req;
+ srb_t *sp;
+
+ if (!vha->flags.online)
+ goto done;
+
+ fcport->flags |= FCF_ASYNC_SENT;
+ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
+ if (!sp)
+ goto done;
+
+ sp->type = SRB_CT_PTHRU_CMD;
+ sp->name = "gpsc";
+ sp->gen1 = fcport->rscn_gen;
+ sp->gen2 = fcport->login_gen;
+
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
+ /* CT_IU preamble */
+ ct_req = qla24xx_prep_ct_fm_req(fcport->ct_desc.ct_sns, GPSC_CMD,
+ GPSC_RSP_SIZE);
+
+ /* GPSC req */
+ memcpy(ct_req->req.gpsc.port_name, fcport->port_name,
+ WWN_SIZE);
+
+ sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
+ sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma;
+ sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns;
+ sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma;
+ sp->u.iocb_cmd.u.ctarg.req_size = GPSC_REQ_SIZE;
+ sp->u.iocb_cmd.u.ctarg.rsp_size = GPSC_RSP_SIZE;
+ sp->u.iocb_cmd.u.ctarg.nport_handle = vha->mgmt_svr_loop_id;
+
+ sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
+ sp->done = qla24xx_async_gpsc_sp_done;
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS)
+ goto done_free_sp;
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Async-%s %8phC hdl=%x loopid=%x portid=%02x%02x%02x.\n",
+ sp->name, fcport->port_name, sp->handle,
+ fcport->loop_id, fcport->d_id.b.domain,
+ fcport->d_id.b.area, fcport->d_id.b.al_pa);
+ return rval;
+
+done_free_sp:
+ sp->free(sp);
+done:
+ fcport->flags &= ~FCF_ASYNC_SENT;
+ return rval;
+}
+
+int qla24xx_post_gpnid_work(struct scsi_qla_host *vha, port_id_t *id)
+{
+ struct qla_work_evt *e;
+
+ if (test_bit(UNLOADING, &vha->dpc_flags))
+ return 0;
+
+ e = qla2x00_alloc_work(vha, QLA_EVT_GPNID);
+ if (!e)
+ return QLA_FUNCTION_FAILED;
+
+ e->u.gpnid.id = *id;
+ return qla2x00_post_work(vha, e);
+}
+
+void qla24xx_async_gpnid_done(scsi_qla_host_t *vha, srb_t *sp)
+{
+ if (sp->u.iocb_cmd.u.ctarg.req) {
+ dma_free_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt),
+ sp->u.iocb_cmd.u.ctarg.req,
+ sp->u.iocb_cmd.u.ctarg.req_dma);
+ sp->u.iocb_cmd.u.ctarg.req = NULL;
+ }
+ if (sp->u.iocb_cmd.u.ctarg.rsp) {
+ dma_free_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt),
+ sp->u.iocb_cmd.u.ctarg.rsp,
+ sp->u.iocb_cmd.u.ctarg.rsp_dma);
+ sp->u.iocb_cmd.u.ctarg.rsp = NULL;
+ }
+
+ sp->free(sp);
+}
+
+void qla24xx_handle_gpnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
+{
+ fc_port_t *fcport;
+ unsigned long flags;
+
+ spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+ fcport = qla2x00_find_fcport_by_wwpn(vha, ea->port_name, 1);
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+
+ if (fcport) {
+ /* cable moved. just plugged in */
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC post del sess\n",
+ __func__, __LINE__, fcport->port_name);
+
+ fcport->rscn_gen++;
+ fcport->d_id = ea->id;
+ fcport->scan_state = QLA_FCPORT_FOUND;
+ fcport->flags |= FCF_FABRIC_DEVICE;
+
+ qlt_schedule_sess_for_deletion_lock(fcport);
+ } else {
+ /* create new fcport */
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC post new sess\n",
+ __func__, __LINE__, ea->port_name);
+
+ qla24xx_post_newsess_work(vha, &ea->id, ea->port_name, NULL);
+ }
+}
+
+static void qla2x00_async_gpnid_sp_done(void *s, int res)
+{
+ struct srb *sp = s;
+ struct scsi_qla_host *vha = sp->vha;
+ struct ct_sns_req *ct_req =
+ (struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req;
+ struct ct_sns_rsp *ct_rsp =
+ (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp;
+ struct event_arg ea;
+ struct qla_work_evt *e;
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Async done-%s res %x ID %3phC. %8phC\n",
+ sp->name, res, ct_req->req.port_id.port_id,
+ ct_rsp->rsp.gpn_id.port_name);
+
+ memset(&ea, 0, sizeof(ea));
+ memcpy(ea.port_name, ct_rsp->rsp.gpn_id.port_name, WWN_SIZE);
+ ea.sp = sp;
+ ea.id.b.domain = ct_req->req.port_id.port_id[0];
+ ea.id.b.area = ct_req->req.port_id.port_id[1];
+ ea.id.b.al_pa = ct_req->req.port_id.port_id[2];
+ ea.rc = res;
+ ea.event = FCME_GPNID_DONE;
+
+ qla2x00_fcport_event_handler(vha, &ea);
+
+ e = qla2x00_alloc_work(vha, QLA_EVT_GPNID_DONE);
+ if (!e) {
+ /* please ignore kernel warning. otherwise, we have mem leak. */
+ if (sp->u.iocb_cmd.u.ctarg.req) {
+ dma_free_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt),
+ sp->u.iocb_cmd.u.ctarg.req,
+ sp->u.iocb_cmd.u.ctarg.req_dma);
+ sp->u.iocb_cmd.u.ctarg.req = NULL;
+ }
+ if (sp->u.iocb_cmd.u.ctarg.rsp) {
+ dma_free_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt),
+ sp->u.iocb_cmd.u.ctarg.rsp,
+ sp->u.iocb_cmd.u.ctarg.rsp_dma);
+ sp->u.iocb_cmd.u.ctarg.rsp = NULL;
+ }
+
+ sp->free(sp);
+ return;
+ }
+
+ e->u.iosb.sp = sp;
+ qla2x00_post_work(vha, e);
+}
+
+/* Get WWPN with Nport ID. */
+int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id)
+{
+ int rval = QLA_FUNCTION_FAILED;
+ struct ct_sns_req *ct_req;
+ srb_t *sp;
+ struct ct_sns_pkt *ct_sns;
+
+ if (!vha->flags.online)
+ goto done;
+
+ sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
+ if (!sp)
+ goto done;
+
+ sp->type = SRB_CT_PTHRU_CMD;
+ sp->name = "gpnid";
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
+ sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
+ GFP_KERNEL);
+ if (!sp->u.iocb_cmd.u.ctarg.req) {
+ ql_log(ql_log_warn, vha, 0xffff,
+ "Failed to allocate ct_sns request.\n");
+ goto done_free_sp;
+ }
+
+ sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
+ GFP_KERNEL);
+ if (!sp->u.iocb_cmd.u.ctarg.rsp) {
+ ql_log(ql_log_warn, vha, 0xffff,
+ "Failed to allocate ct_sns request.\n");
+ goto done_free_sp;
+ }
+
+ ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
+ memset(ct_sns, 0, sizeof(*ct_sns));
+
+ ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
+ /* CT_IU preamble */
+ ct_req = qla2x00_prep_ct_req(ct_sns, GPN_ID_CMD, GPN_ID_RSP_SIZE);
+
+ /* GPN_ID req */
+ ct_req->req.port_id.port_id[0] = id->b.domain;
+ ct_req->req.port_id.port_id[1] = id->b.area;
+ ct_req->req.port_id.port_id[2] = id->b.al_pa;
+
+ sp->u.iocb_cmd.u.ctarg.req_size = GPN_ID_REQ_SIZE;
+ sp->u.iocb_cmd.u.ctarg.rsp_size = GPN_ID_RSP_SIZE;
+ sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
+
+ sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
+ sp->done = qla2x00_async_gpnid_sp_done;
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS)
+ goto done_free_sp;
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Async-%s hdl=%x ID %3phC.\n", sp->name,
+ sp->handle, ct_req->req.port_id.port_id);
+ return rval;
+
+done_free_sp:
+ if (sp->u.iocb_cmd.u.ctarg.req) {
+ dma_free_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt),
+ sp->u.iocb_cmd.u.ctarg.req,
+ sp->u.iocb_cmd.u.ctarg.req_dma);
+ sp->u.iocb_cmd.u.ctarg.req = NULL;
+ }
+ if (sp->u.iocb_cmd.u.ctarg.rsp) {
+ dma_free_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt),
+ sp->u.iocb_cmd.u.ctarg.rsp,
+ sp->u.iocb_cmd.u.ctarg.rsp_dma);
+ sp->u.iocb_cmd.u.ctarg.rsp = NULL;
+ }
+
+ sp->free(sp);
+done:
+ return rval;
+}
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 265e1395bdb8..32fb9007f137 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -30,15 +30,15 @@ static int qla2x00_configure_hba(scsi_qla_host_t *);
static int qla2x00_configure_loop(scsi_qla_host_t *);
static int qla2x00_configure_local_loop(scsi_qla_host_t *);
static int qla2x00_configure_fabric(scsi_qla_host_t *);
-static int qla2x00_find_all_fabric_devs(scsi_qla_host_t *, struct list_head *);
-static int qla2x00_fabric_dev_login(scsi_qla_host_t *, fc_port_t *,
- uint16_t *);
-
+static int qla2x00_find_all_fabric_devs(scsi_qla_host_t *);
static int qla2x00_restart_isp(scsi_qla_host_t *);
static struct qla_chip_state_84xx *qla84xx_get_chip(struct scsi_qla_host *);
static int qla84xx_init_chip(scsi_qla_host_t *);
static int qla25xx_init_queues(struct qla_hw_data *);
+static int qla24xx_post_gpdb_work(struct scsi_qla_host *, fc_port_t *, u8);
+static void qla24xx_handle_plogi_done_event(struct scsi_qla_host *,
+ struct event_arg *);
/* SRB Extensions ---------------------------------------------------------- */
@@ -47,29 +47,27 @@ qla2x00_sp_timeout(unsigned long __data)
{
srb_t *sp = (srb_t *)__data;
struct srb_iocb *iocb;
- fc_port_t *fcport = sp->fcport;
- struct qla_hw_data *ha = fcport->vha->hw;
+ scsi_qla_host_t *vha = sp->vha;
struct req_que *req;
unsigned long flags;
- spin_lock_irqsave(&ha->hardware_lock, flags);
- req = ha->req_q_map[0];
+ spin_lock_irqsave(&vha->hw->hardware_lock, flags);
+ req = vha->hw->req_q_map[0];
req->outstanding_cmds[sp->handle] = NULL;
iocb = &sp->u.iocb_cmd;
iocb->timeout(sp);
- sp->free(fcport->vha, sp);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ sp->free(sp);
+ spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
}
void
-qla2x00_sp_free(void *data, void *ptr)
+qla2x00_sp_free(void *ptr)
{
- srb_t *sp = (srb_t *)ptr;
+ srb_t *sp = ptr;
struct srb_iocb *iocb = &sp->u.iocb_cmd;
- struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
del_timer(&iocb->timer);
- qla2x00_rel_sp(vha, sp);
+ qla2x00_rel_sp(sp);
}
/* Asynchronous Login/Logout Routines -------------------------------------- */
@@ -94,43 +92,72 @@ qla2x00_get_async_timeout(struct scsi_qla_host *vha)
return tmo;
}
-static void
+void
qla2x00_async_iocb_timeout(void *data)
{
- srb_t *sp = (srb_t *)data;
+ srb_t *sp = data;
fc_port_t *fcport = sp->fcport;
+ struct srb_iocb *lio = &sp->u.iocb_cmd;
+ struct event_arg ea;
ql_dbg(ql_dbg_disc, fcport->vha, 0x2071,
- "Async-%s timeout - hdl=%x portid=%02x%02x%02x.\n",
- sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area,
- fcport->d_id.b.al_pa);
+ "Async-%s timeout - hdl=%x portid=%06x %8phC.\n",
+ sp->name, sp->handle, fcport->d_id.b24, fcport->port_name);
fcport->flags &= ~FCF_ASYNC_SENT;
- if (sp->type == SRB_LOGIN_CMD) {
- struct srb_iocb *lio = &sp->u.iocb_cmd;
- qla2x00_post_async_logout_work(fcport->vha, fcport, NULL);
+
+ switch (sp->type) {
+ case SRB_LOGIN_CMD:
/* Retry as needed. */
lio->u.logio.data[0] = MBS_COMMAND_ERROR;
lio->u.logio.data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
QLA_LOGIO_LOGIN_RETRIED : 0;
- qla2x00_post_async_login_done_work(fcport->vha, fcport,
- lio->u.logio.data);
- } else if (sp->type == SRB_LOGOUT_CMD) {
+ memset(&ea, 0, sizeof(ea));
+ ea.event = FCME_PLOGI_DONE;
+ ea.fcport = sp->fcport;
+ ea.data[0] = lio->u.logio.data[0];
+ ea.data[1] = lio->u.logio.data[1];
+ ea.sp = sp;
+ qla24xx_handle_plogi_done_event(fcport->vha, &ea);
+ break;
+ case SRB_LOGOUT_CMD:
qlt_logo_completion_handler(fcport, QLA_FUNCTION_TIMEOUT);
+ break;
+ case SRB_CT_PTHRU_CMD:
+ case SRB_MB_IOCB:
+ case SRB_NACK_PLOGI:
+ case SRB_NACK_PRLI:
+ case SRB_NACK_LOGO:
+ sp->done(sp, QLA_FUNCTION_TIMEOUT);
+ break;
}
}
static void
-qla2x00_async_login_sp_done(void *data, void *ptr, int res)
+qla2x00_async_login_sp_done(void *ptr, int res)
{
- srb_t *sp = (srb_t *)ptr;
+ srb_t *sp = ptr;
+ struct scsi_qla_host *vha = sp->vha;
struct srb_iocb *lio = &sp->u.iocb_cmd;
- struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
+ struct event_arg ea;
- if (!test_bit(UNLOADING, &vha->dpc_flags))
- qla2x00_post_async_login_done_work(sp->fcport->vha, sp->fcport,
- lio->u.logio.data);
- sp->free(sp->fcport->vha, sp);
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %8phC res %d \n", __func__, sp->fcport->port_name, res);
+
+ sp->fcport->flags &= ~FCF_ASYNC_SENT;
+ if (!test_bit(UNLOADING, &vha->dpc_flags)) {
+ memset(&ea, 0, sizeof(ea));
+ ea.event = FCME_PLOGI_DONE;
+ ea.fcport = sp->fcport;
+ ea.data[0] = lio->u.logio.data[0];
+ ea.data[1] = lio->u.logio.data[1];
+ ea.iop[0] = lio->u.logio.iop[0];
+ ea.iop[1] = lio->u.logio.iop[1];
+ ea.sp = sp;
+ qla2x00_fcport_event_handler(vha, &ea);
+ }
+
+ sp->free(sp);
}
int
@@ -139,13 +166,23 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
{
srb_t *sp;
struct srb_iocb *lio;
- int rval;
+ int rval = QLA_FUNCTION_FAILED;
+
+ if (!vha->flags.online)
+ goto done;
+
+ if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
+ (fcport->fw_login_state == DSC_LS_PLOGI_COMP) ||
+ (fcport->fw_login_state == DSC_LS_PRLI_PEND))
+ goto done;
- rval = QLA_FUNCTION_FAILED;
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp)
goto done;
+ fcport->flags |= FCF_ASYNC_SENT;
+ fcport->logout_completed = 0;
+
sp->type = SRB_LOGIN_CMD;
sp->name = "login";
qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
@@ -165,29 +202,30 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
}
ql_dbg(ql_dbg_disc, vha, 0x2072,
- "Async-login - hdl=%x, loopid=%x portid=%02x%02x%02x "
- "retries=%d.\n", sp->handle, fcport->loop_id,
+ "Async-login - %8phC hdl=%x, loopid=%x portid=%02x%02x%02x "
+ "retries=%d.\n", fcport->port_name, sp->handle, fcport->loop_id,
fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
fcport->login_retry);
return rval;
done_free_sp:
- sp->free(fcport->vha, sp);
+ sp->free(sp);
done:
+ fcport->flags &= ~FCF_ASYNC_SENT;
return rval;
}
static void
-qla2x00_async_logout_sp_done(void *data, void *ptr, int res)
+qla2x00_async_logout_sp_done(void *ptr, int res)
{
- srb_t *sp = (srb_t *)ptr;
+ srb_t *sp = ptr;
struct srb_iocb *lio = &sp->u.iocb_cmd;
- struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
- if (!test_bit(UNLOADING, &vha->dpc_flags))
- qla2x00_post_async_logout_done_work(sp->fcport->vha, sp->fcport,
+ sp->fcport->flags &= ~FCF_ASYNC_SENT;
+ if (!test_bit(UNLOADING, &sp->vha->dpc_flags))
+ qla2x00_post_async_logout_done_work(sp->vha, sp->fcport,
lio->u.logio.data);
- sp->free(sp->fcport->vha, sp);
+ sp->free(sp);
}
int
@@ -198,6 +236,7 @@ qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
int rval;
rval = QLA_FUNCTION_FAILED;
+ fcport->flags |= FCF_ASYNC_SENT;
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp)
goto done;
@@ -214,28 +253,30 @@ qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
goto done_free_sp;
ql_dbg(ql_dbg_disc, vha, 0x2070,
- "Async-logout - hdl=%x loop-id=%x portid=%02x%02x%02x.\n",
+ "Async-logout - hdl=%x loop-id=%x portid=%02x%02x%02x %8phC.\n",
sp->handle, fcport->loop_id, fcport->d_id.b.domain,
- fcport->d_id.b.area, fcport->d_id.b.al_pa);
+ fcport->d_id.b.area, fcport->d_id.b.al_pa,
+ fcport->port_name);
return rval;
done_free_sp:
- sp->free(fcport->vha, sp);
+ sp->free(sp);
done:
+ fcport->flags &= ~FCF_ASYNC_SENT;
return rval;
}
static void
-qla2x00_async_adisc_sp_done(void *data, void *ptr, int res)
+qla2x00_async_adisc_sp_done(void *ptr, int res)
{
- srb_t *sp = (srb_t *)ptr;
+ srb_t *sp = ptr;
+ struct scsi_qla_host *vha = sp->vha;
struct srb_iocb *lio = &sp->u.iocb_cmd;
- struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
if (!test_bit(UNLOADING, &vha->dpc_flags))
- qla2x00_post_async_adisc_done_work(sp->fcport->vha, sp->fcport,
+ qla2x00_post_async_adisc_done_work(sp->vha, sp->fcport,
lio->u.logio.data);
- sp->free(sp->fcport->vha, sp);
+ sp->free(sp);
}
int
@@ -247,6 +288,7 @@ qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
int rval;
rval = QLA_FUNCTION_FAILED;
+ fcport->flags |= FCF_ASYNC_SENT;
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp)
goto done;
@@ -271,15 +313,858 @@ qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
return rval;
done_free_sp:
- sp->free(fcport->vha, sp);
+ sp->free(sp);
done:
+ fcport->flags &= ~FCF_ASYNC_SENT;
return rval;
}
+static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
+ struct event_arg *ea)
+{
+ fc_port_t *fcport, *conflict_fcport;
+ struct get_name_list_extended *e;
+ u16 i, n, found = 0, loop_id;
+ port_id_t id;
+ u64 wwn;
+ u8 opt = 0;
+
+ fcport = ea->fcport;
+
+ if (ea->rc) { /* rval */
+ if (fcport->login_retry == 0) {
+ fcport->login_retry = vha->hw->login_retry_count;
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "GNL failed Port login retry %8phN, retry cnt=%d.\n",
+ fcport->port_name, fcport->login_retry);
+ }
+ return;
+ }
+
+ if (fcport->last_rscn_gen != fcport->rscn_gen) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %8phC rscn gen changed rscn %d|%d \n",
+ __func__, fcport->port_name,
+ fcport->last_rscn_gen, fcport->rscn_gen);
+ qla24xx_post_gidpn_work(vha, fcport);
+ return;
+ } else if (fcport->last_login_gen != fcport->login_gen) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %8phC login gen changed login %d|%d \n",
+ __func__, fcport->port_name,
+ fcport->last_login_gen, fcport->login_gen);
+ return;
+ }
+
+ n = ea->data[0] / sizeof(struct get_name_list_extended);
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC n %d %02x%02x%02x lid %d \n",
+ __func__, __LINE__, fcport->port_name, n,
+ fcport->d_id.b.domain, fcport->d_id.b.area,
+ fcport->d_id.b.al_pa, fcport->loop_id);
+
+ for (i = 0; i < n; i++) {
+ e = &vha->gnl.l[i];
+ wwn = wwn_to_u64(e->port_name);
+
+ if (memcmp((u8 *)&wwn, fcport->port_name, WWN_SIZE))
+ continue;
+
+ found = 1;
+ id.b.domain = e->port_id[2];
+ id.b.area = e->port_id[1];
+ id.b.al_pa = e->port_id[0];
+ id.b.rsvd_1 = 0;
+
+ loop_id = le16_to_cpu(e->nport_handle);
+ loop_id = (loop_id & 0x7fff);
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s found %8phC CLS [%d|%d] ID[%02x%02x%02x|%02x%02x%02x] lid[%d|%d]\n",
+ __func__, fcport->port_name,
+ e->current_login_state, fcport->fw_login_state,
+ id.b.domain, id.b.area, id.b.al_pa,
+ fcport->d_id.b.domain, fcport->d_id.b.area,
+ fcport->d_id.b.al_pa, loop_id, fcport->loop_id);
+
+ if ((id.b24 != fcport->d_id.b24) ||
+ ((fcport->loop_id != FC_NO_LOOP_ID) &&
+ (fcport->loop_id != loop_id))) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC post del sess\n",
+ __func__, __LINE__, fcport->port_name);
+ qlt_schedule_sess_for_deletion(fcport, 1);
+ return;
+ }
+
+ fcport->loop_id = loop_id;
+
+ wwn = wwn_to_u64(fcport->port_name);
+ qlt_find_sess_invalidate_other(vha, wwn,
+ id, loop_id, &conflict_fcport);
+
+ if (conflict_fcport) {
+ /*
+ * Another share fcport share the same loop_id &
+ * nport id. Conflict fcport needs to finish
+ * cleanup before this fcport can proceed to login.
+ */
+ conflict_fcport->conflict = fcport;
+ fcport->login_pause = 1;
+ }
+
+ switch (e->current_login_state) {
+ case DSC_LS_PRLI_COMP:
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC post gpdb\n",
+ __func__, __LINE__, fcport->port_name);
+ opt = PDO_FORCE_ADISC;
+ qla24xx_post_gpdb_work(vha, fcport, opt);
+ break;
+
+ case DSC_LS_PORT_UNAVAIL:
+ default:
+ if (fcport->loop_id == FC_NO_LOOP_ID) {
+ qla2x00_find_new_loop_id(vha, fcport);
+ fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
+ }
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC \n",
+ __func__, __LINE__, fcport->port_name);
+ qla24xx_fcport_handle_login(vha, fcport);
+ break;
+ }
+ }
+
+ if (!found) {
+ /* fw has no record of this port */
+ if (fcport->loop_id == FC_NO_LOOP_ID) {
+ qla2x00_find_new_loop_id(vha, fcport);
+ fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
+ } else {
+ for (i = 0; i < n; i++) {
+ e = &vha->gnl.l[i];
+ id.b.domain = e->port_id[0];
+ id.b.area = e->port_id[1];
+ id.b.al_pa = e->port_id[2];
+ id.b.rsvd_1 = 0;
+ loop_id = le16_to_cpu(e->nport_handle);
+
+ if (fcport->d_id.b24 == id.b24) {
+ conflict_fcport =
+ qla2x00_find_fcport_by_wwpn(vha,
+ e->port_name, 0);
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC post del sess\n",
+ __func__, __LINE__,
+ conflict_fcport->port_name);
+ qlt_schedule_sess_for_deletion
+ (conflict_fcport, 1);
+ }
+
+ if (fcport->loop_id == loop_id) {
+ /* FW already picked this loop id for another fcport */
+ qla2x00_find_new_loop_id(vha, fcport);
+ }
+ }
+ }
+ qla24xx_fcport_handle_login(vha, fcport);
+ }
+} /* gnl_event */
+
+static void
+qla24xx_async_gnl_sp_done(void *s, int res)
+{
+ struct srb *sp = s;
+ struct scsi_qla_host *vha = sp->vha;
+ unsigned long flags;
+ struct fc_port *fcport = NULL, *tf;
+ u16 i, n = 0, loop_id;
+ struct event_arg ea;
+ struct get_name_list_extended *e;
+ u64 wwn;
+ struct list_head h;
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Async done-%s res %x mb[1]=%x mb[2]=%x \n",
+ sp->name, res, sp->u.iocb_cmd.u.mbx.in_mb[1],
+ sp->u.iocb_cmd.u.mbx.in_mb[2]);
+
+ memset(&ea, 0, sizeof(ea));
+ ea.sp = sp;
+ ea.rc = res;
+ ea.event = FCME_GNL_DONE;
+
+ if (sp->u.iocb_cmd.u.mbx.in_mb[1] >=
+ sizeof(struct get_name_list_extended)) {
+ n = sp->u.iocb_cmd.u.mbx.in_mb[1] /
+ sizeof(struct get_name_list_extended);
+ ea.data[0] = sp->u.iocb_cmd.u.mbx.in_mb[1]; /* amnt xfered */
+ }
+
+ for (i = 0; i < n; i++) {
+ e = &vha->gnl.l[i];
+ loop_id = le16_to_cpu(e->nport_handle);
+ /* mask out reserve bit */
+ loop_id = (loop_id & 0x7fff);
+ set_bit(loop_id, vha->hw->loop_id_map);
+ wwn = wwn_to_u64(e->port_name);
+
+ ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
+ "%s %8phC %02x:%02x:%02x state %d/%d lid %x \n",
+ __func__, (void *)&wwn, e->port_id[2], e->port_id[1],
+ e->port_id[0], e->current_login_state, e->last_login_state,
+ (loop_id & 0x7fff));
+ }
+
+ spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+ vha->gnl.sent = 0;
+
+ INIT_LIST_HEAD(&h);
+ fcport = tf = NULL;
+ if (!list_empty(&vha->gnl.fcports))
+ list_splice_init(&vha->gnl.fcports, &h);
+
+ list_for_each_entry_safe(fcport, tf, &h, gnl_entry) {
+ list_del_init(&fcport->gnl_entry);
+ fcport->flags &= ~FCF_ASYNC_SENT;
+ ea.fcport = fcport;
+
+ qla2x00_fcport_event_handler(vha, &ea);
+ }
+
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+
+ sp->free(sp);
+}
+
+int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
+{
+ srb_t *sp;
+ struct srb_iocb *mbx;
+ int rval = QLA_FUNCTION_FAILED;
+ unsigned long flags;
+ u16 *mb;
+
+ if (!vha->flags.online)
+ goto done;
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Async-gnlist WWPN %8phC \n", fcport->port_name);
+
+ spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+ fcport->flags |= FCF_ASYNC_SENT;
+ fcport->disc_state = DSC_GNL;
+ fcport->last_rscn_gen = fcport->rscn_gen;
+ fcport->last_login_gen = fcport->login_gen;
+
+ list_add_tail(&fcport->gnl_entry, &vha->gnl.fcports);
+ if (vha->gnl.sent) {
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+ rval = QLA_SUCCESS;
+ goto done;
+ }
+ vha->gnl.sent = 1;
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+
+ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
+ if (!sp)
+ goto done;
+ sp->type = SRB_MB_IOCB;
+ sp->name = "gnlist";
+ sp->gen1 = fcport->rscn_gen;
+ sp->gen2 = fcport->login_gen;
+
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)+2);
+
+ mb = sp->u.iocb_cmd.u.mbx.out_mb;
+ mb[0] = MBC_PORT_NODE_NAME_LIST;
+ mb[1] = BIT_2 | BIT_3;
+ mb[2] = MSW(vha->gnl.ldma);
+ mb[3] = LSW(vha->gnl.ldma);
+ mb[6] = MSW(MSD(vha->gnl.ldma));
+ mb[7] = LSW(MSD(vha->gnl.ldma));
+ mb[8] = vha->gnl.size;
+ mb[9] = vha->vp_idx;
+
+ mbx = &sp->u.iocb_cmd;
+ mbx->timeout = qla2x00_async_iocb_timeout;
+
+ sp->done = qla24xx_async_gnl_sp_done;
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS)
+ goto done_free_sp;
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Async-%s - OUT WWPN %8phC hndl %x\n",
+ sp->name, fcport->port_name, sp->handle);
+
+ return rval;
+
+done_free_sp:
+ sp->free(sp);
+done:
+ fcport->flags &= ~FCF_ASYNC_SENT;
+ return rval;
+}
+
+int qla24xx_post_gnl_work(struct scsi_qla_host *vha, fc_port_t *fcport)
+{
+ struct qla_work_evt *e;
+
+ e = qla2x00_alloc_work(vha, QLA_EVT_GNL);
+ if (!e)
+ return QLA_FUNCTION_FAILED;
+
+ e->u.fcport.fcport = fcport;
+ return qla2x00_post_work(vha, e);
+}
+
+static
+void qla24xx_async_gpdb_sp_done(void *s, int res)
+{
+ struct srb *sp = s;
+ struct scsi_qla_host *vha = sp->vha;
+ struct qla_hw_data *ha = vha->hw;
+ uint64_t zero = 0;
+ struct port_database_24xx *pd;
+ fc_port_t *fcport = sp->fcport;
+ u16 *mb = sp->u.iocb_cmd.u.mbx.in_mb;
+ int rval = QLA_SUCCESS;
+ struct event_arg ea;
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Async done-%s res %x, WWPN %8phC mb[1]=%x mb[2]=%x \n",
+ sp->name, res, fcport->port_name, mb[1], mb[2]);
+
+ fcport->flags &= ~FCF_ASYNC_SENT;
+
+ if (res) {
+ rval = res;
+ goto gpd_error_out;
+ }
+
+ pd = (struct port_database_24xx *)sp->u.iocb_cmd.u.mbx.in;
+
+ /* Check for logged in state. */
+ if (pd->current_login_state != PDS_PRLI_COMPLETE &&
+ pd->last_login_state != PDS_PRLI_COMPLETE) {
+ ql_dbg(ql_dbg_mbx, vha, 0xffff,
+ "Unable to verify login-state (%x/%x) for "
+ "loop_id %x.\n", pd->current_login_state,
+ pd->last_login_state, fcport->loop_id);
+ rval = QLA_FUNCTION_FAILED;
+ goto gpd_error_out;
+ }
+
+ if (fcport->loop_id == FC_NO_LOOP_ID ||
+ (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
+ memcmp(fcport->port_name, pd->port_name, 8))) {
+ /* We lost the device mid way. */
+ rval = QLA_NOT_LOGGED_IN;
+ goto gpd_error_out;
+ }
+
+ /* Names are little-endian. */
+ memcpy(fcport->node_name, pd->node_name, WWN_SIZE);
+
+ /* Get port_id of device. */
+ fcport->d_id.b.domain = pd->port_id[0];
+ fcport->d_id.b.area = pd->port_id[1];
+ fcport->d_id.b.al_pa = pd->port_id[2];
+ fcport->d_id.b.rsvd_1 = 0;
+
+ /* If not target must be initiator or unknown type. */
+ if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
+ fcport->port_type = FCT_INITIATOR;
+ else
+ fcport->port_type = FCT_TARGET;
+
+ /* Passback COS information. */
+ fcport->supported_classes = (pd->flags & PDF_CLASS_2) ?
+ FC_COS_CLASS2 : FC_COS_CLASS3;
+
+ if (pd->prli_svc_param_word_3[0] & BIT_7) {
+ fcport->flags |= FCF_CONF_COMP_SUPPORTED;
+ fcport->conf_compl_supported = 1;
+ }
+
+gpd_error_out:
+ memset(&ea, 0, sizeof(ea));
+ ea.event = FCME_GPDB_DONE;
+ ea.rc = rval;
+ ea.fcport = fcport;
+ ea.sp = sp;
+
+ qla2x00_fcport_event_handler(vha, &ea);
+
+ dma_pool_free(ha->s_dma_pool, sp->u.iocb_cmd.u.mbx.in,
+ sp->u.iocb_cmd.u.mbx.in_dma);
+
+ sp->free(sp);
+}
+
+static int qla24xx_post_gpdb_work(struct scsi_qla_host *vha, fc_port_t *fcport,
+ u8 opt)
+{
+ struct qla_work_evt *e;
+
+ e = qla2x00_alloc_work(vha, QLA_EVT_GPDB);
+ if (!e)
+ return QLA_FUNCTION_FAILED;
+
+ e->u.fcport.fcport = fcport;
+ e->u.fcport.opt = opt;
+ return qla2x00_post_work(vha, e);
+}
+
+int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
+{
+ srb_t *sp;
+ struct srb_iocb *mbx;
+ int rval = QLA_FUNCTION_FAILED;
+ u16 *mb;
+ dma_addr_t pd_dma;
+ struct port_database_24xx *pd;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!vha->flags.online)
+ goto done;
+
+ fcport->flags |= FCF_ASYNC_SENT;
+ fcport->disc_state = DSC_GPDB;
+
+ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
+ if (!sp)
+ goto done;
+
+ pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
+ if (pd == NULL) {
+ ql_log(ql_log_warn, vha, 0xffff,
+ "Failed to allocate port database structure.\n");
+ goto done_free_sp;
+ }
+ memset(pd, 0, max(PORT_DATABASE_SIZE, PORT_DATABASE_24XX_SIZE));
+
+ sp->type = SRB_MB_IOCB;
+ sp->name = "gpdb";
+ sp->gen1 = fcport->rscn_gen;
+ sp->gen2 = fcport->login_gen;
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
+ mb = sp->u.iocb_cmd.u.mbx.out_mb;
+ mb[0] = MBC_GET_PORT_DATABASE;
+ mb[1] = fcport->loop_id;
+ mb[2] = MSW(pd_dma);
+ mb[3] = LSW(pd_dma);
+ mb[6] = MSW(MSD(pd_dma));
+ mb[7] = LSW(MSD(pd_dma));
+ mb[9] = vha->vp_idx;
+ mb[10] = opt;
+
+ mbx = &sp->u.iocb_cmd;
+ mbx->timeout = qla2x00_async_iocb_timeout;
+ mbx->u.mbx.in = (void *)pd;
+ mbx->u.mbx.in_dma = pd_dma;
+
+ sp->done = qla24xx_async_gpdb_sp_done;
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS)
+ goto done_free_sp;
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Async-%s %8phC hndl %x opt %x\n",
+ sp->name, fcport->port_name, sp->handle, opt);
+
+ return rval;
+
+done_free_sp:
+ if (pd)
+ dma_pool_free(ha->s_dma_pool, pd, pd_dma);
+
+ sp->free(sp);
+done:
+ fcport->flags &= ~FCF_ASYNC_SENT;
+ qla24xx_post_gpdb_work(vha, fcport, opt);
+ return rval;
+}
+
+static
+void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
+{
+ int rval = ea->rc;
+ fc_port_t *fcport = ea->fcport;
+ unsigned long flags;
+
+ fcport->flags &= ~FCF_ASYNC_SENT;
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %8phC DS %d LS %d rval %d\n", __func__, fcport->port_name,
+ fcport->disc_state, fcport->fw_login_state, rval);
+
+ if (ea->sp->gen2 != fcport->login_gen) {
+ /* target side must have changed it. */
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %8phC generation changed rscn %d|%d login %d|%d \n",
+ __func__, fcport->port_name, fcport->last_rscn_gen,
+ fcport->rscn_gen, fcport->last_login_gen,
+ fcport->login_gen);
+ return;
+ } else if (ea->sp->gen1 != fcport->rscn_gen) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d %8phC post gidpn\n",
+ __func__, __LINE__, fcport->port_name);
+ qla24xx_post_gidpn_work(vha, fcport);
+ return;
+ }
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d %8phC post del sess\n",
+ __func__, __LINE__, fcport->port_name);
+ qlt_schedule_sess_for_deletion_lock(fcport);
+ return;
+ }
+
+ spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+ ea->fcport->login_gen++;
+ ea->fcport->deleted = 0;
+ ea->fcport->logout_on_delete = 1;
+
+ if (!ea->fcport->login_succ && !IS_SW_RESV_ADDR(ea->fcport->d_id)) {
+ vha->fcport_count++;
+ ea->fcport->login_succ = 1;
+
+ if (!IS_IIDMA_CAPABLE(vha->hw) ||
+ !vha->hw->flags.gpsc_supported) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC post upd_fcport fcp_cnt %d\n",
+ __func__, __LINE__, fcport->port_name,
+ vha->fcport_count);
+
+ qla24xx_post_upd_fcport_work(vha, fcport);
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC post gpsc fcp_cnt %d\n",
+ __func__, __LINE__, fcport->port_name,
+ vha->fcport_count);
+
+ qla24xx_post_gpsc_work(vha, fcport);
+ }
+ }
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+} /* gpdb event */
+
+int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
+{
+ if (fcport->login_retry == 0)
+ return 0;
+
+ if (fcport->scan_state != QLA_FCPORT_FOUND)
+ return 0;
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %8phC DS %d LS %d P %d fl %x confl %p rscn %d|%d login %d|%d retry %d lid %d\n",
+ __func__, fcport->port_name, fcport->disc_state,
+ fcport->fw_login_state, fcport->login_pause, fcport->flags,
+ fcport->conflict, fcport->last_rscn_gen, fcport->rscn_gen,
+ fcport->last_login_gen, fcport->login_gen, fcport->login_retry,
+ fcport->loop_id);
+
+ fcport->login_retry--;
+
+ if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
+ (fcport->fw_login_state == DSC_LS_PLOGI_COMP) ||
+ (fcport->fw_login_state == DSC_LS_PRLI_PEND))
+ return 0;
+
+ /* for pure Target Mode. Login will not be initiated */
+ if (vha->host->active_mode == MODE_TARGET)
+ return 0;
+
+ if (fcport->flags & FCF_ASYNC_SENT) {
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ return 0;
+ }
+
+ switch (fcport->disc_state) {
+ case DSC_DELETED:
+ if (fcport->loop_id == FC_NO_LOOP_ID) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC post gnl\n",
+ __func__, __LINE__, fcport->port_name);
+ qla24xx_async_gnl(vha, fcport);
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC post login\n",
+ __func__, __LINE__, fcport->port_name);
+ fcport->disc_state = DSC_LOGIN_PEND;
+ qla2x00_post_async_login_work(vha, fcport, NULL);
+ }
+ break;
+
+ case DSC_GNL:
+ if (fcport->login_pause) {
+ fcport->last_rscn_gen = fcport->rscn_gen;
+ fcport->last_login_gen = fcport->login_gen;
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ break;
+ }
+
+ if (fcport->flags & FCF_FCP2_DEVICE) {
+ u8 opt = PDO_FORCE_ADISC;
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC post gpdb\n",
+ __func__, __LINE__, fcport->port_name);
+
+ fcport->disc_state = DSC_GPDB;
+ qla24xx_post_gpdb_work(vha, fcport, opt);
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC post login \n",
+ __func__, __LINE__, fcport->port_name);
+ fcport->disc_state = DSC_LOGIN_PEND;
+ qla2x00_post_async_login_work(vha, fcport, NULL);
+ }
+
+ break;
+
+ case DSC_LOGIN_FAILED:
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC post gidpn \n",
+ __func__, __LINE__, fcport->port_name);
+
+ qla24xx_post_gidpn_work(vha, fcport);
+ break;
+
+ case DSC_LOGIN_COMPLETE:
+ /* recheck login state */
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC post gpdb \n",
+ __func__, __LINE__, fcport->port_name);
+
+ qla24xx_post_gpdb_work(vha, fcport, PDO_FORCE_ADISC);
+ break;
+
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static
+void qla24xx_handle_rscn_event(fc_port_t *fcport, struct event_arg *ea)
+{
+ fcport->rscn_gen++;
+
+ ql_dbg(ql_dbg_disc, fcport->vha, 0xffff,
+ "%s %8phC DS %d LS %d\n",
+ __func__, fcport->port_name, fcport->disc_state,
+ fcport->fw_login_state);
+
+ if (fcport->flags & FCF_ASYNC_SENT)
+ return;
+
+ switch (fcport->disc_state) {
+ case DSC_DELETED:
+ case DSC_LOGIN_COMPLETE:
+ qla24xx_post_gidpn_work(fcport->vha, fcport);
+ break;
+
+ default:
+ break;
+ }
+}
+
+int qla24xx_post_newsess_work(struct scsi_qla_host *vha, port_id_t *id,
+ u8 *port_name, void *pla)
+{
+ struct qla_work_evt *e;
+ e = qla2x00_alloc_work(vha, QLA_EVT_NEW_SESS);
+ if (!e)
+ return QLA_FUNCTION_FAILED;
+
+ e->u.new_sess.id = *id;
+ e->u.new_sess.pla = pla;
+ memcpy(e->u.new_sess.port_name, port_name, WWN_SIZE);
+
+ return qla2x00_post_work(vha, e);
+}
+
+static
+int qla24xx_handle_delete_done_event(scsi_qla_host_t *vha,
+ struct event_arg *ea)
+{
+ fc_port_t *fcport = ea->fcport;
+
+ if (test_bit(UNLOADING, &vha->dpc_flags))
+ return 0;
+
+ switch (vha->host->active_mode) {
+ case MODE_INITIATOR:
+ case MODE_DUAL:
+ if (fcport->scan_state == QLA_FCPORT_FOUND)
+ qla24xx_fcport_handle_login(vha, fcport);
+ break;
+
+ case MODE_TARGET:
+ default:
+ /* no-op */
+ break;
+ }
+
+ return 0;
+}
+
+static
+void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
+ struct event_arg *ea)
+{
+ fc_port_t *fcport = ea->fcport;
+
+ if (fcport->scan_state != QLA_FCPORT_FOUND) {
+ fcport->login_retry++;
+ return;
+ }
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %8phC DS %d LS %d P %d del %d cnfl %p rscn %d|%d login %d|%d fl %x\n",
+ __func__, fcport->port_name, fcport->disc_state,
+ fcport->fw_login_state, fcport->login_pause,
+ fcport->deleted, fcport->conflict,
+ fcport->last_rscn_gen, fcport->rscn_gen,
+ fcport->last_login_gen, fcport->login_gen,
+ fcport->flags);
+
+ if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
+ (fcport->fw_login_state == DSC_LS_PLOGI_COMP) ||
+ (fcport->fw_login_state == DSC_LS_PRLI_PEND))
+ return;
+
+ if (fcport->flags & FCF_ASYNC_SENT) {
+ fcport->login_retry++;
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ return;
+ }
+
+ if (fcport->disc_state == DSC_DELETE_PEND) {
+ fcport->login_retry++;
+ return;
+ }
+
+ if (fcport->last_rscn_gen != fcport->rscn_gen) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d %8phC post gidpn\n",
+ __func__, __LINE__, fcport->port_name);
+
+ qla24xx_async_gidpn(vha, fcport);
+ return;
+ }
+
+ qla24xx_fcport_handle_login(vha, fcport);
+}
+
+void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
+{
+ fc_port_t *fcport, *f, *tf;
+ uint32_t id = 0, mask, rid;
+ int rc;
+
+ switch (ea->event) {
+ case FCME_RELOGIN:
+ if (test_bit(UNLOADING, &vha->dpc_flags))
+ return;
+
+ qla24xx_handle_relogin_event(vha, ea);
+ break;
+ case FCME_RSCN:
+ if (test_bit(UNLOADING, &vha->dpc_flags))
+ return;
+ switch (ea->id.b.rsvd_1) {
+ case RSCN_PORT_ADDR:
+ fcport = qla2x00_find_fcport_by_nportid(vha, &ea->id, 1);
+ if (!fcport) {
+ /* cable moved */
+ rc = qla24xx_post_gpnid_work(vha, &ea->id);
+ if (rc) {
+ ql_log(ql_log_warn, vha, 0xffff,
+ "RSCN GPNID work failed %02x%02x%02x\n",
+ ea->id.b.domain, ea->id.b.area,
+ ea->id.b.al_pa);
+ }
+ } else {
+ ea->fcport = fcport;
+ qla24xx_handle_rscn_event(fcport, ea);
+ }
+ break;
+ case RSCN_AREA_ADDR:
+ case RSCN_DOM_ADDR:
+ if (ea->id.b.rsvd_1 == RSCN_AREA_ADDR) {
+ mask = 0xffff00;
+ ql_log(ql_dbg_async, vha, 0xffff,
+ "RSCN: Area 0x%06x was affected\n",
+ ea->id.b24);
+ } else {
+ mask = 0xff0000;
+ ql_log(ql_dbg_async, vha, 0xffff,
+ "RSCN: Domain 0x%06x was affected\n",
+ ea->id.b24);
+ }
+
+ rid = ea->id.b24 & mask;
+ list_for_each_entry_safe(f, tf, &vha->vp_fcports,
+ list) {
+ id = f->d_id.b24 & mask;
+ if (rid == id) {
+ ea->fcport = f;
+ qla24xx_handle_rscn_event(f, ea);
+ }
+ }
+ break;
+ case RSCN_FAB_ADDR:
+ default:
+ ql_log(ql_log_warn, vha, 0xffff,
+ "RSCN: Fabric was affected. Addr format %d\n",
+ ea->id.b.rsvd_1);
+ qla2x00_mark_all_devices_lost(vha, 1);
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+ set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+ }
+ break;
+ case FCME_GIDPN_DONE:
+ qla24xx_handle_gidpn_event(vha, ea);
+ break;
+ case FCME_GNL_DONE:
+ qla24xx_handle_gnl_done_event(vha, ea);
+ break;
+ case FCME_GPSC_DONE:
+ qla24xx_post_upd_fcport_work(vha, ea->fcport);
+ break;
+ case FCME_PLOGI_DONE: /* Initiator side sent LLIOCB */
+ qla24xx_handle_plogi_done_event(vha, ea);
+ break;
+ case FCME_GPDB_DONE:
+ qla24xx_handle_gpdb_event(vha, ea);
+ break;
+ case FCME_GPNID_DONE:
+ qla24xx_handle_gpnid_event(vha, ea);
+ break;
+ case FCME_DELETE_DONE:
+ qla24xx_handle_delete_done_event(vha, ea);
+ break;
+ default:
+ BUG_ON(1);
+ break;
+ }
+}
+
static void
qla2x00_tmf_iocb_timeout(void *data)
{
- srb_t *sp = (srb_t *)data;
+ srb_t *sp = data;
struct srb_iocb *tmf = &sp->u.iocb_cmd;
tmf->u.tmf.comp_status = CS_TIMEOUT;
@@ -287,10 +1172,11 @@ qla2x00_tmf_iocb_timeout(void *data)
}
static void
-qla2x00_tmf_sp_done(void *data, void *ptr, int res)
+qla2x00_tmf_sp_done(void *ptr, int res)
{
- srb_t *sp = (srb_t *)ptr;
+ srb_t *sp = ptr;
struct srb_iocb *tmf = &sp->u.iocb_cmd;
+
complete(&tmf->u.tmf.comp);
}
@@ -348,7 +1234,7 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
}
done_free_sp:
- sp->free(vha, sp);
+ sp->free(sp);
done:
return rval;
}
@@ -356,7 +1242,7 @@ done:
static void
qla24xx_abort_iocb_timeout(void *data)
{
- srb_t *sp = (srb_t *)data;
+ srb_t *sp = data;
struct srb_iocb *abt = &sp->u.iocb_cmd;
abt->u.abt.comp_status = CS_TIMEOUT;
@@ -364,9 +1250,9 @@ qla24xx_abort_iocb_timeout(void *data)
}
static void
-qla24xx_abort_sp_done(void *data, void *ptr, int res)
+qla24xx_abort_sp_done(void *ptr, int res)
{
- srb_t *sp = (srb_t *)ptr;
+ srb_t *sp = ptr;
struct srb_iocb *abt = &sp->u.iocb_cmd;
complete(&abt->u.abt.comp);
@@ -375,7 +1261,7 @@ qla24xx_abort_sp_done(void *data, void *ptr, int res)
static int
qla24xx_async_abort_cmd(srb_t *cmd_sp)
{
- scsi_qla_host_t *vha = cmd_sp->fcport->vha;
+ scsi_qla_host_t *vha = cmd_sp->vha;
fc_port_t *fcport = cmd_sp->fcport;
struct srb_iocb *abt_iocb;
srb_t *sp;
@@ -408,7 +1294,7 @@ qla24xx_async_abort_cmd(srb_t *cmd_sp)
QLA_SUCCESS : QLA_FUNCTION_FAILED;
done_free_sp:
- sp->free(vha, sp);
+ sp->free(sp);
done:
return rval;
}
@@ -441,59 +1327,65 @@ qla24xx_async_abort_command(srb_t *sp)
return qla24xx_async_abort_cmd(sp);
}
-void
-qla2x00_async_login_done(struct scsi_qla_host *vha, fc_port_t *fcport,
- uint16_t *data)
+static void
+qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
{
- int rval;
+ port_id_t cid; /* conflict Nport id */
- switch (data[0]) {
+ switch (ea->data[0]) {
case MBS_COMMAND_COMPLETE:
/*
* Driver must validate login state - If PRLI not complete,
* force a relogin attempt via implicit LOGO, PLOGI, and PRLI
* requests.
*/
- rval = qla2x00_get_port_database(vha, fcport, 0);
- if (rval == QLA_NOT_LOGGED_IN) {
- fcport->flags &= ~FCF_ASYNC_SENT;
- fcport->flags |= FCF_LOGIN_NEEDED;
- set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
- break;
- }
-
- if (rval != QLA_SUCCESS) {
- qla2x00_post_async_logout_work(vha, fcport, NULL);
- qla2x00_post_async_login_work(vha, fcport, NULL);
- break;
- }
- if (fcport->flags & FCF_FCP2_DEVICE) {
- qla2x00_post_async_adisc_work(vha, fcport, data);
- break;
- }
- qla2x00_update_fcport(vha, fcport);
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC post gpdb\n",
+ __func__, __LINE__, ea->fcport->port_name);
+ ea->fcport->chip_reset = vha->hw->chip_reset;
+ ea->fcport->logout_on_delete = 1;
+ qla24xx_post_gpdb_work(vha, ea->fcport, 0);
break;
case MBS_COMMAND_ERROR:
- fcport->flags &= ~FCF_ASYNC_SENT;
- if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
+ ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d %8phC cmd error %x\n",
+ __func__, __LINE__, ea->fcport->port_name, ea->data[1]);
+
+ ea->fcport->flags &= ~FCF_ASYNC_SENT;
+ ea->fcport->disc_state = DSC_LOGIN_FAILED;
+ if (ea->data[1] & QLA_LOGIO_LOGIN_RETRIED)
set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
else
- qla2x00_mark_device_lost(vha, fcport, 1, 0);
- break;
- case MBS_PORT_ID_USED:
- fcport->loop_id = data[1];
- qla2x00_post_async_logout_work(vha, fcport, NULL);
- qla2x00_post_async_login_work(vha, fcport, NULL);
+ qla2x00_mark_device_lost(vha, ea->fcport, 1, 0);
break;
case MBS_LOOP_ID_USED:
- fcport->loop_id++;
- rval = qla2x00_find_new_loop_id(vha, fcport);
- if (rval != QLA_SUCCESS) {
- fcport->flags &= ~FCF_ASYNC_SENT;
- qla2x00_mark_device_lost(vha, fcport, 1, 0);
- break;
+ /* data[1] = IO PARAM 1 = nport ID */
+ cid.b.domain = (ea->iop[1] >> 16) & 0xff;
+ cid.b.area = (ea->iop[1] >> 8) & 0xff;
+ cid.b.al_pa = ea->iop[1] & 0xff;
+ cid.b.rsvd_1 = 0;
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC LoopID 0x%x in use post gnl\n",
+ __func__, __LINE__, ea->fcport->port_name,
+ ea->fcport->loop_id);
+
+ if (IS_SW_RESV_ADDR(cid)) {
+ set_bit(ea->fcport->loop_id, vha->hw->loop_id_map);
+ ea->fcport->loop_id = FC_NO_LOOP_ID;
+ } else {
+ qla2x00_clear_loop_id(ea->fcport);
}
- qla2x00_post_async_login_work(vha, fcport, NULL);
+ qla24xx_post_gnl_work(vha, ea->fcport);
+ break;
+ case MBS_PORT_ID_USED:
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC NPortId %02x%02x%02x inuse post gidpn\n",
+ __func__, __LINE__, ea->fcport->port_name,
+ ea->fcport->d_id.b.domain, ea->fcport->d_id.b.area,
+ ea->fcport->d_id.b.al_pa);
+
+ qla2x00_clear_loop_id(ea->fcport);
+ qla24xx_post_gidpn_work(vha, ea->fcport);
break;
}
return;
@@ -503,10 +1395,9 @@ void
qla2x00_async_logout_done(struct scsi_qla_host *vha, fc_port_t *fcport,
uint16_t *data)
{
- /* Don't re-login in target mode */
- if (!fcport->tgt_session)
- qla2x00_mark_device_lost(vha, fcport, 1, 0);
+ qla2x00_mark_device_lost(vha, fcport, 1, 0);
qlt_logo_completion_handler(fcport, data[0]);
+ fcport->login_gen++;
return;
}
@@ -709,7 +1600,7 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
}
}
- if (qla_ini_mode_enabled(vha))
+ if (qla_ini_mode_enabled(vha) || qla_dual_mode_enabled(vha))
rval = qla2x00_init_rings(vha);
ha->flags.chip_reset_done = 1;
@@ -2088,6 +2979,21 @@ qla24xx_update_fw_options(scsi_qla_host_t *vha)
__func__, ha->fw_options[2]);
}
+ /* Move PUREX, ABTS RX & RIDA to ATIOQ */
+ if (ql2xmvasynctoatio) {
+ if (qla_tgt_mode_enabled(vha) ||
+ qla_dual_mode_enabled(vha))
+ ha->fw_options[2] |= BIT_11;
+ else
+ ha->fw_options[2] &= ~BIT_11;
+ }
+
+ ql_dbg(ql_dbg_init, vha, 0xffff,
+ "%s, add FW options 1-3 = 0x%04x 0x%04x 0x%04x mode %x\n",
+ __func__, ha->fw_options[1], ha->fw_options[2],
+ ha->fw_options[3], vha->host->active_mode);
+ qla2x00_set_fw_options(vha, ha->fw_options);
+
/* Update Serial Link options. */
if ((le16_to_cpu(ha->fw_seriallink_options24[0]) & BIT_0) == 0)
return;
@@ -2968,8 +3874,14 @@ qla2x00_rport_del(void *data)
rport = fcport->drport ? fcport->drport: fcport->rport;
fcport->drport = NULL;
spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
- if (rport)
+ if (rport) {
+ ql_dbg(ql_dbg_disc, fcport->vha, 0xffff,
+ "%s %8phN. rport %p roles %x \n",
+ __func__, fcport->port_name, rport,
+ rport->roles);
+
fc_remote_port_delete(rport);
+ }
}
/**
@@ -2995,9 +3907,42 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
fcport->supported_classes = FC_COS_UNSPECIFIED;
+ fcport->ct_desc.ct_sns = dma_alloc_coherent(&vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt), &fcport->ct_desc.ct_sns_dma,
+ flags);
+ fcport->disc_state = DSC_DELETED;
+ fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
+ fcport->deleted = QLA_SESS_DELETED;
+ fcport->login_retry = vha->hw->login_retry_count;
+ fcport->login_retry = 5;
+ fcport->logout_on_delete = 1;
+
+ if (!fcport->ct_desc.ct_sns) {
+ ql_log(ql_log_warn, vha, 0xffff,
+ "Failed to allocate ct_sns request.\n");
+ kfree(fcport);
+ fcport = NULL;
+ }
+ INIT_WORK(&fcport->del_work, qla24xx_delete_sess_fn);
+ INIT_LIST_HEAD(&fcport->gnl_entry);
+ INIT_LIST_HEAD(&fcport->list);
+
return fcport;
}
+void
+qla2x00_free_fcport(fc_port_t *fcport)
+{
+ if (fcport->ct_desc.ct_sns) {
+ dma_free_coherent(&fcport->vha->hw->pdev->dev,
+ sizeof(struct ct_sns_pkt), fcport->ct_desc.ct_sns,
+ fcport->ct_desc.ct_sns_dma);
+
+ fcport->ct_desc.ct_sns = NULL;
+ }
+ kfree(fcport);
+}
+
/*
* qla2x00_configure_loop
* Updates Fibre Channel Device Database with what is actually on loop.
@@ -3055,10 +4000,11 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
} else if (ha->current_topology == ISP_CFG_N) {
clear_bit(RSCN_UPDATE, &flags);
-
+ } else if (ha->current_topology == ISP_CFG_NL) {
+ clear_bit(RSCN_UPDATE, &flags);
+ set_bit(LOCAL_LOOP_UPDATE, &flags);
} else if (!vha->flags.online ||
(test_bit(ABORT_ISP_ACTIVE, &flags))) {
-
set_bit(RSCN_UPDATE, &flags);
set_bit(LOCAL_LOOP_UPDATE, &flags);
}
@@ -3095,7 +4041,8 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
* Process any ATIO queue entries that came in
* while we weren't online.
*/
- if (qla_tgt_mode_enabled(vha)) {
+ if (qla_tgt_mode_enabled(vha) ||
+ qla_dual_mode_enabled(vha)) {
if (IS_QLA27XX(ha) || IS_QLA83XX(ha)) {
spin_lock_irqsave(&ha->tgt.atio_lock,
flags);
@@ -3159,6 +4106,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
uint16_t loop_id;
uint8_t domain, area, al_pa;
struct qla_hw_data *ha = vha->hw;
+ unsigned long flags;
found_devs = 0;
new_fcport = NULL;
@@ -3199,7 +4147,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
"Marking port lost loop_id=0x%04x.\n",
fcport->loop_id);
- qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
+ qla2x00_mark_device_lost(vha, fcport, 0, 0);
}
}
@@ -3230,13 +4178,14 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
if (loop_id > LAST_LOCAL_LOOP_ID)
continue;
- memset(new_fcport, 0, sizeof(fc_port_t));
+ memset(new_fcport->port_name, 0, WWN_SIZE);
/* Fill in member data. */
new_fcport->d_id.b.domain = domain;
new_fcport->d_id.b.area = area;
new_fcport->d_id.b.al_pa = al_pa;
new_fcport->loop_id = loop_id;
+
rval2 = qla2x00_get_port_database(vha, new_fcport, 0);
if (rval2 != QLA_SUCCESS) {
ql_dbg(ql_dbg_disc, vha, 0x201a,
@@ -3249,6 +4198,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
continue;
}
+ spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
/* Check for matching device in port list. */
found = 0;
fcport = NULL;
@@ -3264,6 +4214,12 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
memcpy(fcport->node_name, new_fcport->node_name,
WWN_SIZE);
+ if (!fcport->login_succ) {
+ vha->fcport_count++;
+ fcport->login_succ = 1;
+ fcport->disc_state = DSC_LOGIN_COMPLETE;
+ }
+
found++;
break;
}
@@ -3274,16 +4230,28 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
/* Allocate a new replacement fcport. */
fcport = new_fcport;
+ if (!fcport->login_succ) {
+ vha->fcport_count++;
+ fcport->login_succ = 1;
+ fcport->disc_state = DSC_LOGIN_COMPLETE;
+ }
+
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+
new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
+
if (new_fcport == NULL) {
ql_log(ql_log_warn, vha, 0x201c,
"Failed to allocate memory for fcport.\n");
rval = QLA_MEMORY_ALLOC_FAILED;
goto cleanup_allocation;
}
+ spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
new_fcport->flags &= ~FCF_FABRIC_DEVICE;
}
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+
/* Base iIDMA settings on HBA port speed. */
fcport->fp_speed = ha->link_data_rate;
@@ -3334,6 +4302,7 @@ qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
}
}
+/* qla2x00_reg_remote_port is reserved for Initiator Mode only.*/
static void
qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
{
@@ -3352,12 +4321,6 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
"Unable to allocate fc remote port.\n");
return;
}
- /*
- * Create target mode FC NEXUS in qla_target.c if target mode is
- * enabled..
- */
-
- qlt_fc_port_added(vha, fcport);
spin_lock_irqsave(fcport->vha->host->host_lock, flags);
*((fc_port_t **)rport->dd_data) = fcport;
@@ -3370,6 +4333,12 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
if (fcport->port_type == FCT_TARGET)
rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %8phN. rport %p is %s mode \n",
+ __func__, fcport->port_name, rport,
+ (fcport->port_type == FCT_TARGET) ? "tgt" : "ini");
+
fc_remote_port_rolechg(rport, rport_ids.roles);
}
@@ -3393,25 +4362,44 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
{
fcport->vha = vha;
+ if (IS_SW_RESV_ADDR(fcport->d_id))
+ return;
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %8phC \n",
+ __func__, fcport->port_name);
+
if (IS_QLAFX00(vha->hw)) {
qla2x00_set_fcport_state(fcport, FCS_ONLINE);
goto reg_port;
}
fcport->login_retry = 0;
fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
+ fcport->disc_state = DSC_LOGIN_COMPLETE;
+ fcport->deleted = 0;
+ fcport->logout_on_delete = 1;
qla2x00_set_fcport_state(fcport, FCS_ONLINE);
qla2x00_iidma_fcport(vha, fcport);
qla24xx_update_fcport_fcp_prio(vha, fcport);
reg_port:
- if (qla_ini_mode_enabled(vha))
+ switch (vha->host->active_mode) {
+ case MODE_INITIATOR:
qla2x00_reg_remote_port(vha, fcport);
- else {
- /*
- * Create target mode FC NEXUS in qla_target.c
- */
- qlt_fc_port_added(vha, fcport);
+ break;
+ case MODE_TARGET:
+ if (!vha->vha_tgt.qla_tgt->tgt_stop &&
+ !vha->vha_tgt.qla_tgt->tgt_stopped)
+ qlt_fc_port_added(vha, fcport);
+ break;
+ case MODE_DUAL:
+ qla2x00_reg_remote_port(vha, fcport);
+ if (!vha->vha_tgt.qla_tgt->tgt_stop &&
+ !vha->vha_tgt.qla_tgt->tgt_stopped)
+ qlt_fc_port_added(vha, fcport);
+ break;
+ default:
+ break;
}
}
@@ -3430,13 +4418,11 @@ static int
qla2x00_configure_fabric(scsi_qla_host_t *vha)
{
int rval;
- fc_port_t *fcport, *fcptemp;
- uint16_t next_loopid;
+ fc_port_t *fcport;
uint16_t mb[MAILBOX_REGISTER_COUNT];
uint16_t loop_id;
LIST_HEAD(new_fcports);
struct qla_hw_data *ha = vha->hw;
- struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
int discovery_gen;
/* If FL port exists, then SNS is present */
@@ -3454,7 +4440,19 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
}
vha->device_flags |= SWITCH_FOUND;
+
+ if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) {
+ rval = qla2x00_send_change_request(vha, 0x3, 0);
+ if (rval != QLA_SUCCESS)
+ ql_log(ql_log_warn, vha, 0x121,
+ "Failed to enable receiving of RSCN requests: 0x%x.\n",
+ rval);
+ }
+
+
do {
+ qla2x00_mgmt_svr_login(vha);
+
/* FDMI support. */
if (ql2xfdmienable &&
test_and_clear_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags))
@@ -3501,9 +4499,6 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
}
}
-#define QLA_FCPORT_SCAN 1
-#define QLA_FCPORT_FOUND 2
-
list_for_each_entry(fcport, &vha->vp_fcports, list) {
fcport->scan_state = QLA_FCPORT_SCAN;
}
@@ -3516,174 +4511,14 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
* will be newer than discovery_gen. */
qlt_do_generation_tick(vha, &discovery_gen);
- rval = qla2x00_find_all_fabric_devs(vha, &new_fcports);
+ rval = qla2x00_find_all_fabric_devs(vha);
if (rval != QLA_SUCCESS)
break;
-
- /*
- * Logout all previous fabric devices marked lost, except
- * FCP2 devices.
- */
- list_for_each_entry(fcport, &vha->vp_fcports, list) {
- if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
- break;
-
- if ((fcport->flags & FCF_FABRIC_DEVICE) == 0)
- continue;
-
- if (fcport->scan_state == QLA_FCPORT_SCAN) {
- if (qla_ini_mode_enabled(base_vha) &&
- atomic_read(&fcport->state) == FCS_ONLINE) {
- qla2x00_mark_device_lost(vha, fcport,
- ql2xplogiabsentdevice, 0);
- if (fcport->loop_id != FC_NO_LOOP_ID &&
- (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
- fcport->port_type != FCT_INITIATOR &&
- fcport->port_type != FCT_BROADCAST) {
- ha->isp_ops->fabric_logout(vha,
- fcport->loop_id,
- fcport->d_id.b.domain,
- fcport->d_id.b.area,
- fcport->d_id.b.al_pa);
- qla2x00_clear_loop_id(fcport);
- }
- } else if (!qla_ini_mode_enabled(base_vha)) {
- /*
- * In target mode, explicitly kill
- * sessions and log out of devices
- * that are gone, so that we don't
- * end up with an initiator using the
- * wrong ACL (if the fabric recycles
- * an FC address and we have a stale
- * session around) and so that we don't
- * report initiators that are no longer
- * on the fabric.
- */
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf077,
- "port gone, logging out/killing session: "
- "%8phC state 0x%x flags 0x%x fc4_type 0x%x "
- "scan_state %d\n",
- fcport->port_name,
- atomic_read(&fcport->state),
- fcport->flags, fcport->fc4_type,
- fcport->scan_state);
- qlt_fc_port_deleted(vha, fcport,
- discovery_gen);
- }
- }
- }
-
- /* Starting free loop ID. */
- next_loopid = ha->min_external_loopid;
-
- /*
- * Scan through our port list and login entries that need to be
- * logged in.
- */
- list_for_each_entry(fcport, &vha->vp_fcports, list) {
- if (atomic_read(&vha->loop_down_timer) ||
- test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
- break;
-
- if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 ||
- (fcport->flags & FCF_LOGIN_NEEDED) == 0)
- continue;
-
- /*
- * If we're not an initiator, skip looking for devices
- * and logging in. There's no reason for us to do it,
- * and it seems to actively cause problems in target
- * mode if we race with the initiator logging into us
- * (we might get the "port ID used" status back from
- * our login command and log out the initiator, which
- * seems to cause havoc).
- */
- if (!qla_ini_mode_enabled(base_vha)) {
- if (fcport->scan_state == QLA_FCPORT_FOUND) {
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf078,
- "port %8phC state 0x%x flags 0x%x fc4_type 0x%x "
- "scan_state %d (initiator mode disabled; skipping "
- "login)\n", fcport->port_name,
- atomic_read(&fcport->state),
- fcport->flags, fcport->fc4_type,
- fcport->scan_state);
- }
- continue;
- }
-
- if (fcport->loop_id == FC_NO_LOOP_ID) {
- fcport->loop_id = next_loopid;
- rval = qla2x00_find_new_loop_id(
- base_vha, fcport);
- if (rval != QLA_SUCCESS) {
- /* Ran out of IDs to use */
- break;
- }
- }
- /* Login and update database */
- qla2x00_fabric_dev_login(vha, fcport, &next_loopid);
- }
-
- /* Exit if out of loop IDs. */
- if (rval != QLA_SUCCESS) {
- break;
- }
-
- /*
- * Login and add the new devices to our port list.
- */
- list_for_each_entry_safe(fcport, fcptemp, &new_fcports, list) {
- if (atomic_read(&vha->loop_down_timer) ||
- test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
- break;
-
- /*
- * If we're not an initiator, skip looking for devices
- * and logging in. There's no reason for us to do it,
- * and it seems to actively cause problems in target
- * mode if we race with the initiator logging into us
- * (we might get the "port ID used" status back from
- * our login command and log out the initiator, which
- * seems to cause havoc).
- */
- if (qla_ini_mode_enabled(base_vha)) {
- /* Find a new loop ID to use. */
- fcport->loop_id = next_loopid;
- rval = qla2x00_find_new_loop_id(base_vha,
- fcport);
- if (rval != QLA_SUCCESS) {
- /* Ran out of IDs to use */
- break;
- }
-
- /* Login and update database */
- qla2x00_fabric_dev_login(vha, fcport,
- &next_loopid);
- } else {
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf079,
- "new port %8phC state 0x%x flags 0x%x fc4_type "
- "0x%x scan_state %d (initiator mode disabled; "
- "skipping login)\n",
- fcport->port_name,
- atomic_read(&fcport->state),
- fcport->flags, fcport->fc4_type,
- fcport->scan_state);
- }
-
- list_move_tail(&fcport->list, &vha->vp_fcports);
- }
} while (0);
- /* Free all new device structures not processed. */
- list_for_each_entry_safe(fcport, fcptemp, &new_fcports, list) {
- list_del(&fcport->list);
- kfree(fcport);
- }
-
- if (rval) {
+ if (rval)
ql_dbg(ql_dbg_disc, vha, 0x2068,
"Configure fabric error exit rval=%d.\n", rval);
- }
return (rval);
}
@@ -3702,12 +4537,11 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
* Kernel context.
*/
static int
-qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
- struct list_head *new_fcports)
+qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha)
{
int rval;
uint16_t loop_id;
- fc_port_t *fcport, *new_fcport, *fcptemp;
+ fc_port_t *fcport, *new_fcport;
int found;
sw_info_t *swl;
@@ -3716,6 +4550,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
port_id_t wrap = {}, nxt_d_id;
struct qla_hw_data *ha = vha->hw;
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
+ unsigned long flags;
rval = QLA_SUCCESS;
@@ -3736,9 +4571,8 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
swl = NULL;
} else if (qla2x00_gnn_id(vha, swl) != QLA_SUCCESS) {
swl = NULL;
- } else if (ql2xiidmaenable &&
- qla2x00_gfpn_id(vha, swl) == QLA_SUCCESS) {
- qla2x00_gpsc(vha, swl);
+ } else if (qla2x00_gfpn_id(vha, swl) != QLA_SUCCESS) {
+ swl = NULL;
}
/* If other queries succeeded probe for FC-4 type */
@@ -3800,11 +4634,6 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
ql_log(ql_log_warn, vha, 0x2064,
"SNS scan failed -- assuming "
"zero-entry result.\n");
- list_for_each_entry_safe(fcport, fcptemp,
- new_fcports, list) {
- list_del(&fcport->list);
- kfree(fcport);
- }
rval = QLA_SUCCESS;
break;
}
@@ -3847,6 +4676,8 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
new_fcport->fc4_type != FC4_TYPE_UNKNOWN))
continue;
+ spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+
/* Locate matching device in database. */
found = 0;
list_for_each_entry(fcport, &vha->vp_fcports, list) {
@@ -3869,7 +4700,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
*/
if (fcport->d_id.b24 == new_fcport->d_id.b24 &&
(atomic_read(&fcport->state) == FCS_ONLINE ||
- !qla_ini_mode_enabled(base_vha))) {
+ (vha->host->active_mode == MODE_TARGET))) {
break;
}
@@ -3889,7 +4720,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
* Log it out if still logged in and mark it for
* relogin later.
*/
- if (!qla_ini_mode_enabled(base_vha)) {
+ if (qla_tgt_mode_enabled(base_vha)) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf080,
"port changed FC ID, %8phC"
" old %x:%x:%x (loop_id 0x%04x)-> new %x:%x:%x\n",
@@ -3907,25 +4738,19 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
fcport->d_id.b24 = new_fcport->d_id.b24;
fcport->flags |= FCF_LOGIN_NEEDED;
- if (fcport->loop_id != FC_NO_LOOP_ID &&
- (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
- (fcport->flags & FCF_ASYNC_SENT) == 0 &&
- fcport->port_type != FCT_INITIATOR &&
- fcport->port_type != FCT_BROADCAST) {
- ha->isp_ops->fabric_logout(vha, fcport->loop_id,
- fcport->d_id.b.domain, fcport->d_id.b.area,
- fcport->d_id.b.al_pa);
- qla2x00_clear_loop_id(fcport);
- }
-
break;
}
- if (found)
+ if (found) {
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
continue;
+ }
/* If device was not in our fcports list, then add it. */
new_fcport->scan_state = QLA_FCPORT_FOUND;
- list_add_tail(&new_fcport->list, new_fcports);
+ list_add_tail(&new_fcport->list, &vha->vp_fcports);
+
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+
/* Allocate a new replacement fcport. */
nxt_d_id.b24 = new_fcport->d_id.b24;
@@ -3939,8 +4764,44 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
new_fcport->d_id.b24 = nxt_d_id.b24;
}
- kfree(new_fcport);
+ qla2x00_free_fcport(new_fcport);
+ /*
+ * Logout all previous fabric dev marked lost, except FCP2 devices.
+ */
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
+ break;
+
+ if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 ||
+ (fcport->flags & FCF_LOGIN_NEEDED) == 0)
+ continue;
+
+ if (fcport->scan_state == QLA_FCPORT_SCAN) {
+ if ((qla_dual_mode_enabled(vha) ||
+ qla_ini_mode_enabled(vha)) &&
+ atomic_read(&fcport->state) == FCS_ONLINE) {
+ qla2x00_mark_device_lost(vha, fcport,
+ ql2xplogiabsentdevice, 0);
+ if (fcport->loop_id != FC_NO_LOOP_ID &&
+ (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
+ fcport->port_type != FCT_INITIATOR &&
+ fcport->port_type != FCT_BROADCAST) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC post del sess\n",
+ __func__, __LINE__,
+ fcport->port_name);
+
+ qlt_schedule_sess_for_deletion_lock
+ (fcport);
+ continue;
+ }
+ }
+ }
+
+ if (fcport->scan_state == QLA_FCPORT_FOUND)
+ qla24xx_fcport_handle_login(vha, fcport);
+ }
return (rval);
}
@@ -3992,64 +4853,6 @@ qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev)
return (rval);
}
-/*
- * qla2x00_fabric_dev_login
- * Login fabric target device and update FC port database.
- *
- * Input:
- * ha: adapter state pointer.
- * fcport: port structure list pointer.
- * next_loopid: contains value of a new loop ID that can be used
- * by the next login attempt.
- *
- * Returns:
- * qla2x00 local function return status code.
- *
- * Context:
- * Kernel context.
- */
-static int
-qla2x00_fabric_dev_login(scsi_qla_host_t *vha, fc_port_t *fcport,
- uint16_t *next_loopid)
-{
- int rval;
- uint8_t opts;
- struct qla_hw_data *ha = vha->hw;
-
- rval = QLA_SUCCESS;
-
- if (IS_ALOGIO_CAPABLE(ha)) {
- if (fcport->flags & FCF_ASYNC_SENT)
- return rval;
- fcport->flags |= FCF_ASYNC_SENT;
- rval = qla2x00_post_async_login_work(vha, fcport, NULL);
- if (!rval)
- return rval;
- }
-
- fcport->flags &= ~FCF_ASYNC_SENT;
- rval = qla2x00_fabric_login(vha, fcport, next_loopid);
- if (rval == QLA_SUCCESS) {
- /* Send an ADISC to FCP2 devices.*/
- opts = 0;
- if (fcport->flags & FCF_FCP2_DEVICE)
- opts |= BIT_1;
- rval = qla2x00_get_port_database(vha, fcport, opts);
- if (rval != QLA_SUCCESS) {
- ha->isp_ops->fabric_logout(vha, fcport->loop_id,
- fcport->d_id.b.domain, fcport->d_id.b.area,
- fcport->d_id.b.al_pa);
- qla2x00_mark_device_lost(vha, fcport, 1, 0);
- } else {
- qla2x00_update_fcport(vha, fcport);
- }
- } else {
- /* Retry Login. */
- qla2x00_mark_device_lost(vha, fcport, 1, 0);
- }
-
- return (rval);
-}
/*
* qla2x00_fabric_login
@@ -4341,13 +5144,6 @@ qla2x00_update_fcports(scsi_qla_host_t *base_vha)
spin_unlock_irqrestore(&ha->vport_slock, flags);
qla2x00_rport_del(fcport);
- /*
- * Release the target mode FC NEXUS in
- * qla_target.c, if target mod is enabled.
- */
- qlt_fc_port_deleted(vha, fcport,
- base_vha->total_fcport_update_gen);
-
spin_lock_irqsave(&ha->vport_slock, flags);
}
}
@@ -4730,6 +5526,8 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
if (!(IS_P3P_TYPE(ha)))
ha->isp_ops->reset_chip(vha);
+ ha->chip_reset++;
+
atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
atomic_set(&vha->loop_state, LOOP_DOWN);
@@ -4784,8 +5582,6 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
/* Requeue all commands in outstanding command list. */
qla2x00_abort_all_cmds(vha, DID_RESET << 16);
}
-
- ha->chip_reset++;
/* memory barrier */
wmb();
}
@@ -4981,7 +5777,6 @@ qla2x00_restart_isp(scsi_qla_host_t *vha)
if (!status) {
/* Issue a marker after FW becomes ready. */
qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
-
set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
}
@@ -5209,7 +6004,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
rval = 1;
}
- if (!qla_ini_mode_enabled(vha)) {
+ if (qla_tgt_mode_enabled(vha)) {
/* Don't enable full login after initial LIP */
nv->firmware_options_1 &= cpu_to_le32(~BIT_13);
/* Don't enable LIP full login for initiator */
@@ -5400,6 +6195,7 @@ uint8_t qla27xx_find_valid_image(struct scsi_qla_host *vha)
for (chksum = 0; cnt--; wptr++)
chksum += le32_to_cpu(*wptr);
+
if (chksum) {
ql_dbg(ql_dbg_init, vha, 0x018c,
"Checksum validation failed for primary image (0x%x)\n",
@@ -6412,6 +7208,10 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
vha->flags.process_response_queue = 1;
}
+ /* enable RIDA Format2 */
+ if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha))
+ icb->firmware_options_3 |= BIT_0;
+
if (rval) {
ql_log(ql_log_warn, vha, 0x0076,
"NVRAM configuration failed.\n");
@@ -6536,13 +7336,26 @@ qla81xx_update_fw_options(scsi_qla_host_t *vha)
__func__, ha->fw_options[2]);
}
- if (!ql2xetsenable)
- goto out;
+ /* Move PUREX, ABTS RX & RIDA to ATIOQ */
+ if (ql2xmvasynctoatio) {
+ if (qla_tgt_mode_enabled(vha) ||
+ qla_dual_mode_enabled(vha))
+ ha->fw_options[2] |= BIT_11;
+ else
+ ha->fw_options[2] &= ~BIT_11;
+ }
+
+ if (ql2xetsenable) {
+ /* Enable ETS Burst. */
+ memset(ha->fw_options, 0, sizeof(ha->fw_options));
+ ha->fw_options[2] |= BIT_9;
+ }
+
+ ql_dbg(ql_dbg_init, vha, 0xffff,
+ "%s, add FW options 1-3 = 0x%04x 0x%04x 0x%04x mode %x\n",
+ __func__, ha->fw_options[1], ha->fw_options[2],
+ ha->fw_options[3], vha->host->active_mode);
- /* Enable ETS Burst. */
- memset(ha->fw_options, 0, sizeof(ha->fw_options));
- ha->fw_options[2] |= BIT_9;
-out:
qla2x00_set_fw_options(vha, ha->fw_options);
}
@@ -6748,6 +7561,7 @@ struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos, int v
memset(qpair, 0, sizeof(struct qla_qpair));
qpair->hw = vha->hw;
+ qpair->vha = vha;
/* Assign available que pair id */
mutex_lock(&ha->mq_lock);
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index 44e404583c86..66df6cec59da 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -166,8 +166,8 @@ qla2x00_set_fcport_state(fc_port_t *fcport, int state)
/* Don't print state transitions during initial allocation of fcport */
if (old_state && old_state != state) {
ql_dbg(ql_dbg_disc, fcport->vha, 0x207d,
- "FCPort state transitioned from %s to %s - "
- "portid=%02x%02x%02x.\n",
+ "FCPort %8phC state transitioned from %s to %s - "
+ "portid=%02x%02x%02x.\n", fcport->port_name,
port_state_str[old_state], port_state_str[state],
fcport->d_id.b.domain, fcport->d_id.b.area,
fcport->d_id.b.al_pa);
@@ -232,6 +232,7 @@ qla2xxx_get_qpair_sp(struct qla_qpair *qpair, fc_port_t *fcport, gfp_t flag)
memset(sp, 0, sizeof(*sp));
sp->fcport = fcport;
sp->iocbs = 1;
+ sp->vha = qpair->vha;
done:
if (!sp)
QLA_QPAIR_MARK_NOT_BUSY(qpair);
@@ -249,20 +250,20 @@ static inline srb_t *
qla2x00_get_sp(scsi_qla_host_t *vha, fc_port_t *fcport, gfp_t flag)
{
srb_t *sp = NULL;
- struct qla_hw_data *ha = vha->hw;
uint8_t bail;
QLA_VHA_MARK_BUSY(vha, bail);
if (unlikely(bail))
return NULL;
- sp = mempool_alloc(ha->srb_mempool, flag);
+ sp = mempool_alloc(vha->hw->srb_mempool, flag);
if (!sp)
goto done;
memset(sp, 0, sizeof(*sp));
sp->fcport = fcport;
sp->iocbs = 1;
+ sp->vha = vha;
done:
if (!sp)
QLA_VHA_MARK_NOT_BUSY(vha);
@@ -270,10 +271,10 @@ done:
}
static inline void
-qla2x00_rel_sp(scsi_qla_host_t *vha, srb_t *sp)
+qla2x00_rel_sp(srb_t *sp)
{
- mempool_free(sp, vha->hw->srb_mempool);
- QLA_VHA_MARK_NOT_BUSY(vha);
+ QLA_VHA_MARK_NOT_BUSY(sp->vha);
+ mempool_free(sp, sp->vha->hw->srb_mempool);
}
static inline void
@@ -285,8 +286,7 @@ qla2x00_init_timer(srb_t *sp, unsigned long tmo)
sp->u.iocb_cmd.timer.function = qla2x00_sp_timeout;
add_timer(&sp->u.iocb_cmd.timer);
sp->free = qla2x00_sp_free;
- if ((IS_QLAFX00(sp->fcport->vha->hw)) &&
- (sp->type == SRB_FXIOCB_DCMD))
+ if (IS_QLAFX00(sp->vha->hw) && (sp->type == SRB_FXIOCB_DCMD))
init_completion(&sp->u.iocb_cmd.u.fxiocb.fxiocb_comp);
if (sp->type == SRB_ELS_DCMD)
init_completion(&sp->u.iocb_cmd.u.els_logo.comp);
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 58e49a3e1de8..535079280288 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -23,7 +23,7 @@ qla2x00_get_cmd_direction(srb_t *sp)
{
uint16_t cflags;
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
- struct scsi_qla_host *vha = sp->fcport->vha;
+ struct scsi_qla_host *vha = sp->vha;
cflags = 0;
@@ -210,7 +210,7 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
return;
}
- vha = sp->fcport->vha;
+ vha = sp->vha;
cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
/* Three DSDs are available in the Command Type 2 IOCB */
@@ -267,7 +267,7 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
return;
}
- vha = sp->fcport->vha;
+ vha = sp->vha;
cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
/* Two DSDs are available in the Command Type 3 IOCB */
@@ -324,7 +324,7 @@ qla2x00_start_scsi(srb_t *sp)
struct rsp_que *rsp;
/* Setup device pointers. */
- vha = sp->fcport->vha;
+ vha = sp->vha;
ha = vha->hw;
reg = &ha->iobase->isp;
cmd = GET_CMD_SP(sp);
@@ -601,7 +601,7 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
return 0;
}
- vha = sp->fcport->vha;
+ vha = sp->vha;
ha = vha->hw;
/* Set transfer direction */
@@ -716,7 +716,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
return;
}
- vha = sp->fcport->vha;
+ vha = sp->vha;
/* Set transfer direction */
if (cmd->sc_data_direction == DMA_TO_DEVICE) {
@@ -1108,7 +1108,7 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
if (sp) {
cmd = GET_CMD_SP(sp);
sgl = scsi_prot_sglist(cmd);
- vha = sp->fcport->vha;
+ vha = sp->vha;
} else if (tc) {
vha = tc->vha;
sgl = tc->prot_sg;
@@ -1215,7 +1215,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
/* Update entry type to indicate Command Type CRC_2 IOCB */
*((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_CRC_2);
- vha = sp->fcport->vha;
+ vha = sp->vha;
ha = vha->hw;
/* No data transfer */
@@ -1225,7 +1225,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
return QLA_SUCCESS;
}
- cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
+ cmd_pkt->vp_index = sp->vha->vp_idx;
/* Set transfer direction */
if (cmd->sc_data_direction == DMA_TO_DEVICE) {
@@ -1415,7 +1415,7 @@ qla24xx_start_scsi(srb_t *sp)
struct req_que *req = NULL;
struct rsp_que *rsp = NULL;
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
- struct scsi_qla_host *vha = sp->fcport->vha;
+ struct scsi_qla_host *vha = sp->vha;
struct qla_hw_data *ha = vha->hw;
/* Setup device pointers. */
@@ -1492,7 +1492,7 @@ qla24xx_start_scsi(srb_t *sp)
cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
- cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
+ cmd_pkt->vp_index = sp->vha->vp_idx;
int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
@@ -1564,7 +1564,7 @@ qla24xx_dif_start_scsi(srb_t *sp)
struct req_que *req = NULL;
struct rsp_que *rsp = NULL;
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
- struct scsi_qla_host *vha = sp->fcport->vha;
+ struct scsi_qla_host *vha = sp->vha;
struct qla_hw_data *ha = vha->hw;
struct cmd_type_crc_2 *cmd_pkt;
uint32_t status = 0;
@@ -2214,13 +2214,13 @@ qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
logio->port_id[0] = sp->fcport->d_id.b.al_pa;
logio->port_id[1] = sp->fcport->d_id.b.area;
logio->port_id[2] = sp->fcport->d_id.b.domain;
- logio->vp_index = sp->fcport->vha->vp_idx;
+ logio->vp_index = sp->vha->vp_idx;
}
static void
qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
{
- struct qla_hw_data *ha = sp->fcport->vha->hw;
+ struct qla_hw_data *ha = sp->vha->hw;
struct srb_iocb *lio = &sp->u.iocb_cmd;
uint16_t opts;
@@ -2238,7 +2238,7 @@ qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
sp->fcport->d_id.b.al_pa);
- mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
+ mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
}
static void
@@ -2247,20 +2247,20 @@ qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
logio->control_flags =
cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
- if (!sp->fcport->tgt_session ||
- !sp->fcport->tgt_session->keep_nport_handle)
+ if (!sp->fcport->se_sess ||
+ !sp->fcport->keep_nport_handle)
logio->control_flags |= cpu_to_le16(LCF_FREE_NPORT);
logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
logio->port_id[0] = sp->fcport->d_id.b.al_pa;
logio->port_id[1] = sp->fcport->d_id.b.area;
logio->port_id[2] = sp->fcport->d_id.b.domain;
- logio->vp_index = sp->fcport->vha->vp_idx;
+ logio->vp_index = sp->vha->vp_idx;
}
static void
qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
{
- struct qla_hw_data *ha = sp->fcport->vha->hw;
+ struct qla_hw_data *ha = sp->vha->hw;
mbx->entry_type = MBX_IOCB_TYPE;
SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
@@ -2271,7 +2271,7 @@ qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
sp->fcport->d_id.b.al_pa);
- mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
+ mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
/* Implicit: mbx->mbx10 = 0. */
}
@@ -2281,13 +2281,13 @@ qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
- logio->vp_index = sp->fcport->vha->vp_idx;
+ logio->vp_index = sp->vha->vp_idx;
}
static void
qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
{
- struct qla_hw_data *ha = sp->fcport->vha->hw;
+ struct qla_hw_data *ha = sp->vha->hw;
mbx->entry_type = MBX_IOCB_TYPE;
SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
@@ -2302,7 +2302,7 @@ qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
- mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
+ mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
}
static void
@@ -2338,32 +2338,30 @@ qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
}
static void
-qla2x00_els_dcmd_sp_free(void *ptr, void *data)
+qla2x00_els_dcmd_sp_free(void *data)
{
- struct scsi_qla_host *vha = (scsi_qla_host_t *)ptr;
- struct qla_hw_data *ha = vha->hw;
- srb_t *sp = (srb_t *)data;
+ srb_t *sp = data;
struct srb_iocb *elsio = &sp->u.iocb_cmd;
kfree(sp->fcport);
if (elsio->u.els_logo.els_logo_pyld)
- dma_free_coherent(&ha->pdev->dev, DMA_POOL_SIZE,
+ dma_free_coherent(&sp->vha->hw->pdev->dev, DMA_POOL_SIZE,
elsio->u.els_logo.els_logo_pyld,
elsio->u.els_logo.els_logo_pyld_dma);
del_timer(&elsio->timer);
- qla2x00_rel_sp(vha, sp);
+ qla2x00_rel_sp(sp);
}
static void
qla2x00_els_dcmd_iocb_timeout(void *data)
{
- srb_t *sp = (srb_t *)data;
- struct srb_iocb *lio = &sp->u.iocb_cmd;
+ srb_t *sp = data;
fc_port_t *fcport = sp->fcport;
- struct scsi_qla_host *vha = fcport->vha;
+ struct scsi_qla_host *vha = sp->vha;
struct qla_hw_data *ha = vha->hw;
+ struct srb_iocb *lio = &sp->u.iocb_cmd;
unsigned long flags = 0;
ql_dbg(ql_dbg_io, vha, 0x3069,
@@ -2386,12 +2384,12 @@ qla2x00_els_dcmd_iocb_timeout(void *data)
}
static void
-qla2x00_els_dcmd_sp_done(void *data, void *ptr, int res)
+qla2x00_els_dcmd_sp_done(void *ptr, int res)
{
- srb_t *sp = (srb_t *)ptr;
+ srb_t *sp = ptr;
fc_port_t *fcport = sp->fcport;
struct srb_iocb *lio = &sp->u.iocb_cmd;
- struct scsi_qla_host *vha = fcport->vha;
+ struct scsi_qla_host *vha = sp->vha;
ql_dbg(ql_dbg_io, vha, 0x3072,
"%s hdl=%x, portid=%02x%02x%02x done\n",
@@ -2449,7 +2447,7 @@ qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode,
GFP_KERNEL);
if (!elsio->u.els_logo.els_logo_pyld) {
- sp->free(vha, sp);
+ sp->free(sp);
return QLA_FUNCTION_FAILED;
}
@@ -2468,7 +2466,7 @@ qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode,
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
- sp->free(vha, sp);
+ sp->free(sp);
return QLA_FUNCTION_FAILED;
}
@@ -2479,14 +2477,14 @@ qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode,
wait_for_completion(&elsio->u.els_logo.comp);
- sp->free(vha, sp);
+ sp->free(sp);
return rval;
}
static void
qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
{
- scsi_qla_host_t *vha = sp->fcport->vha;
+ scsi_qla_host_t *vha = sp->vha;
struct srb_iocb *elsio = &sp->u.iocb_cmd;
els_iocb->entry_type = ELS_IOCB_TYPE;
@@ -2518,7 +2516,7 @@ qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
els_iocb->rx_address[1] = 0;
els_iocb->rx_len = 0;
- sp->fcport->vha->qla_stats.control_requests++;
+ sp->vha->qla_stats.control_requests++;
}
static void
@@ -2534,7 +2532,7 @@ qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
els_iocb->handle = sp->handle;
els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
els_iocb->tx_dsd_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
- els_iocb->vp_index = sp->fcport->vha->vp_idx;
+ els_iocb->vp_index = sp->vha->vp_idx;
els_iocb->sof_type = EST_SOFI3;
els_iocb->rx_dsd_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
@@ -2565,7 +2563,7 @@ qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
els_iocb->rx_len = cpu_to_le32(sg_dma_len
(bsg_job->reply_payload.sg_list));
- sp->fcport->vha->qla_stats.control_requests++;
+ sp->vha->qla_stats.control_requests++;
}
static void
@@ -2576,7 +2574,7 @@ qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
struct scatterlist *sg;
int index;
uint16_t tot_dsds;
- scsi_qla_host_t *vha = sp->fcport->vha;
+ scsi_qla_host_t *vha = sp->vha;
struct qla_hw_data *ha = vha->hw;
struct bsg_job *bsg_job = sp->u.bsg_job;
int loop_iterartion = 0;
@@ -2642,7 +2640,7 @@ qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
}
ct_iocb->entry_count = entry_count;
- sp->fcport->vha->qla_stats.control_requests++;
+ sp->vha->qla_stats.control_requests++;
}
static void
@@ -2653,7 +2651,7 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
struct scatterlist *sg;
int index;
uint16_t tot_dsds;
- scsi_qla_host_t *vha = sp->fcport->vha;
+ scsi_qla_host_t *vha = sp->vha;
struct qla_hw_data *ha = vha->hw;
struct bsg_job *bsg_job = sp->u.bsg_job;
int loop_iterartion = 0;
@@ -2665,7 +2663,7 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
ct_iocb->handle = sp->handle;
ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
- ct_iocb->vp_index = sp->fcport->vha->vp_idx;
+ ct_iocb->vp_index = sp->vha->vp_idx;
ct_iocb->comp_status = cpu_to_le16(0);
ct_iocb->cmd_dsd_count =
@@ -2739,7 +2737,7 @@ qla82xx_start_scsi(srb_t *sp)
uint32_t *fcp_dl;
uint8_t additional_cdb_len;
struct ct6_dsd *ctx;
- struct scsi_qla_host *vha = sp->fcport->vha;
+ struct scsi_qla_host *vha = sp->vha;
struct qla_hw_data *ha = vha->hw;
struct req_que *req = NULL;
struct rsp_que *rsp = NULL;
@@ -2901,7 +2899,7 @@ sufficient_dsds:
cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
- cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
+ cmd_pkt->vp_index = sp->vha->vp_idx;
/* Build IOCB segments */
if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
@@ -2974,7 +2972,7 @@ sufficient_dsds:
cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
- cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
+ cmd_pkt->vp_index = sp->vha->vp_idx;
int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
@@ -3060,7 +3058,7 @@ static void
qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
{
struct srb_iocb *aio = &sp->u.iocb_cmd;
- scsi_qla_host_t *vha = sp->fcport->vha;
+ scsi_qla_host_t *vha = sp->vha;
struct req_que *req = vha->req;
memset(abt_iocb, 0, sizeof(struct abort_entry_24xx));
@@ -3079,19 +3077,69 @@ qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
wmb();
}
+static void
+qla2x00_mb_iocb(srb_t *sp, struct mbx_24xx_entry *mbx)
+{
+ int i, sz;
+
+ mbx->entry_type = MBX_IOCB_TYPE;
+ mbx->handle = sp->handle;
+ sz = min(ARRAY_SIZE(mbx->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.out_mb));
+
+ for (i = 0; i < sz; i++)
+ mbx->mb[i] = cpu_to_le16(sp->u.iocb_cmd.u.mbx.out_mb[i]);
+}
+
+static void
+qla2x00_ctpthru_cmd_iocb(srb_t *sp, struct ct_entry_24xx *ct_pkt)
+{
+ sp->u.iocb_cmd.u.ctarg.iocb = ct_pkt;
+ qla24xx_prep_ms_iocb(sp->vha, &sp->u.iocb_cmd.u.ctarg);
+ ct_pkt->handle = sp->handle;
+}
+
+static void qla2x00_send_notify_ack_iocb(srb_t *sp,
+ struct nack_to_isp *nack)
+{
+ struct imm_ntfy_from_isp *ntfy = sp->u.iocb_cmd.u.nack.ntfy;
+
+ nack->entry_type = NOTIFY_ACK_TYPE;
+ nack->entry_count = 1;
+ nack->ox_id = ntfy->ox_id;
+
+ nack->u.isp24.handle = sp->handle;
+ nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
+ if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
+ nack->u.isp24.flags = ntfy->u.isp24.flags &
+ cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
+ }
+ nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
+ nack->u.isp24.status = ntfy->u.isp24.status;
+ nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
+ nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
+ nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
+ nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
+ nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
+ nack->u.isp24.srr_flags = 0;
+ nack->u.isp24.srr_reject_code = 0;
+ nack->u.isp24.srr_reject_code_expl = 0;
+ nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
+}
+
int
qla2x00_start_sp(srb_t *sp)
{
int rval;
- struct qla_hw_data *ha = sp->fcport->vha->hw;
+ scsi_qla_host_t *vha = sp->vha;
+ struct qla_hw_data *ha = vha->hw;
void *pkt;
unsigned long flags;
rval = QLA_FUNCTION_FAILED;
spin_lock_irqsave(&ha->hardware_lock, flags);
- pkt = qla2x00_alloc_iocbs(sp->fcport->vha, sp);
+ pkt = qla2x00_alloc_iocbs(vha, sp);
if (!pkt) {
- ql_log(ql_log_warn, sp->fcport->vha, 0x700c,
+ ql_log(ql_log_warn, vha, 0x700c,
"qla2x00_alloc_iocbs failed.\n");
goto done;
}
@@ -3139,12 +3187,23 @@ qla2x00_start_sp(srb_t *sp)
case SRB_ELS_DCMD:
qla24xx_els_logo_iocb(sp, pkt);
break;
+ case SRB_CT_PTHRU_CMD:
+ qla2x00_ctpthru_cmd_iocb(sp, pkt);
+ break;
+ case SRB_MB_IOCB:
+ qla2x00_mb_iocb(sp, pkt);
+ break;
+ case SRB_NACK_PLOGI:
+ case SRB_NACK_PRLI:
+ case SRB_NACK_LOGO:
+ qla2x00_send_notify_ack_iocb(sp, pkt);
+ break;
default:
break;
}
wmb();
- qla2x00_start_iocbs(sp->fcport->vha, ha->req_q_map[0]);
+ qla2x00_start_iocbs(vha, ha->req_q_map[0]);
done:
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return rval;
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index edc2264db45b..3c66ea29de27 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -561,14 +561,50 @@ qla2x00_is_a_vp_did(scsi_qla_host_t *vha, uint32_t rscn_entry)
return ret;
}
-static inline fc_port_t *
+fc_port_t *
qla2x00_find_fcport_by_loopid(scsi_qla_host_t *vha, uint16_t loop_id)
{
- fc_port_t *fcport;
+ fc_port_t *f, *tf;
+
+ f = tf = NULL;
+ list_for_each_entry_safe(f, tf, &vha->vp_fcports, list)
+ if (f->loop_id == loop_id)
+ return f;
+ return NULL;
+}
+
+fc_port_t *
+qla2x00_find_fcport_by_wwpn(scsi_qla_host_t *vha, u8 *wwpn, u8 incl_deleted)
+{
+ fc_port_t *f, *tf;
+
+ f = tf = NULL;
+ list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) {
+ if (memcmp(f->port_name, wwpn, WWN_SIZE) == 0) {
+ if (incl_deleted)
+ return f;
+ else if (f->deleted == 0)
+ return f;
+ }
+ }
+ return NULL;
+}
- list_for_each_entry(fcport, &vha->vp_fcports, list)
- if (fcport->loop_id == loop_id)
- return fcport;
+fc_port_t *
+qla2x00_find_fcport_by_nportid(scsi_qla_host_t *vha, port_id_t *id,
+ u8 incl_deleted)
+{
+ fc_port_t *f, *tf;
+
+ f = tf = NULL;
+ list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) {
+ if (f->d_id.b24 == id->b24) {
+ if (incl_deleted)
+ return f;
+ else if (f->deleted == 0)
+ return f;
+ }
+ }
return NULL;
}
@@ -934,7 +970,11 @@ skip_rio:
ql_dbg(ql_dbg_async, vha, 0x508a,
"Marking port lost loopid=%04x portid=%06x.\n",
fcport->loop_id, fcport->d_id.b24);
- qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
+ if (qla_ini_mode_enabled(vha)) {
+ qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
+ fcport->logout_on_delete = 0;
+ qlt_schedule_sess_for_deletion_lock(fcport);
+ }
break;
global_port_update:
@@ -985,9 +1025,6 @@ global_port_update:
qla2x00_mark_all_devices_lost(vha, 1);
- if (vha->vp_idx == 0 && !qla_ini_mode_enabled(vha))
- set_bit(SCR_PENDING, &vha->dpc_flags);
-
set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
set_bit(VP_CONFIG_OK, &vha->vp_flags);
@@ -1024,27 +1061,19 @@ global_port_update:
if (qla2x00_is_a_vp_did(vha, rscn_entry))
break;
- /*
- * Search for the rport related to this RSCN entry and mark it
- * as lost.
- */
- list_for_each_entry(fcport, &vha->vp_fcports, list) {
- if (atomic_read(&fcport->state) != FCS_ONLINE)
- continue;
- if (fcport->d_id.b24 == rscn_entry) {
- qla2x00_mark_device_lost(vha, fcport, 0, 0);
- break;
- }
- }
-
atomic_set(&vha->loop_down_timer, 0);
vha->flags.management_server_logged_in = 0;
-
- set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
- set_bit(RSCN_UPDATE, &vha->dpc_flags);
- qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry);
+ {
+ struct event_arg ea;
+
+ memset(&ea, 0, sizeof(ea));
+ ea.event = FCME_RSCN;
+ ea.id.b24 = rscn_entry;
+ ea.id.b.rsvd_1 = rscn_entry >> 24;
+ qla2x00_fcport_event_handler(vha, &ea);
+ qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry);
+ }
break;
-
/* case MBA_RIO_RESPONSE: */
case MBA_ZIO_RESPONSE:
ql_dbg(ql_dbg_async, vha, 0x5015,
@@ -1212,7 +1241,7 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha,
req->outstanding_cmds[index] = NULL;
/* Save ISP completion status */
- sp->done(ha, sp, DID_OK << 16);
+ sp->done(sp, DID_OK << 16);
} else {
ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n");
@@ -1235,7 +1264,8 @@ qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
index = LSW(pkt->handle);
if (index >= req->num_outstanding_cmds) {
ql_log(ql_log_warn, vha, 0x5031,
- "Invalid command index (%x).\n", index);
+ "Invalid command index (%x) type %8ph.\n",
+ index, iocb);
if (IS_P3P_TYPE(ha))
set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
else
@@ -1343,66 +1373,122 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
le16_to_cpu(mbx->mb7));
logio_done:
- sp->done(vha, sp, 0);
+ sp->done(sp, 0);
}
static void
-qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
- sts_entry_t *pkt, int iocb_type)
+qla24xx_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
+ struct mbx_24xx_entry *pkt)
{
- const char func[] = "CT_IOCB";
- const char *type;
+ const char func[] = "MBX-IOCB2";
srb_t *sp;
- struct bsg_job *bsg_job;
- struct fc_bsg_reply *bsg_reply;
- uint16_t comp_status;
+ struct srb_iocb *si;
+ u16 sz, i;
int res;
sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
if (!sp)
return;
- bsg_job = sp->u.bsg_job;
- bsg_reply = bsg_job->reply;
+ si = &sp->u.iocb_cmd;
+ sz = min(ARRAY_SIZE(pkt->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.in_mb));
- type = "ct pass-through";
+ for (i = 0; i < sz; i++)
+ si->u.mbx.in_mb[i] = le16_to_cpu(pkt->mb[i]);
- comp_status = le16_to_cpu(pkt->comp_status);
+ res = (si->u.mbx.in_mb[0] & MBS_MASK);
- /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
- * fc payload to the caller
- */
- bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
- bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+ sp->done(sp, res);
+}
- if (comp_status != CS_COMPLETE) {
- if (comp_status == CS_DATA_UNDERRUN) {
- res = DID_OK << 16;
- bsg_reply->reply_payload_rcv_len =
- le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len);
+static void
+qla24xxx_nack_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
+ struct nack_to_isp *pkt)
+{
+ const char func[] = "nack";
+ srb_t *sp;
+ int res = 0;
- ql_log(ql_log_warn, vha, 0x5048,
- "CT pass-through-%s error "
- "comp_status-status=0x%x total_byte = 0x%x.\n",
- type, comp_status,
- bsg_reply->reply_payload_rcv_len);
- } else {
- ql_log(ql_log_warn, vha, 0x5049,
- "CT pass-through-%s error "
- "comp_status-status=0x%x.\n", type, comp_status);
- res = DID_ERROR << 16;
- bsg_reply->reply_payload_rcv_len = 0;
- }
- ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035,
- (uint8_t *)pkt, sizeof(*pkt));
- } else {
- res = DID_OK << 16;
- bsg_reply->reply_payload_rcv_len =
- bsg_job->reply_payload.payload_len;
- bsg_job->reply_len = 0;
- }
+ sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
+ if (!sp)
+ return;
+
+ if (pkt->u.isp2x.status != cpu_to_le16(NOTIFY_ACK_SUCCESS))
+ res = QLA_FUNCTION_FAILED;
+
+ sp->done(sp, res);
+}
+
+static void
+qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
+ sts_entry_t *pkt, int iocb_type)
+{
+ const char func[] = "CT_IOCB";
+ const char *type;
+ srb_t *sp;
+ struct bsg_job *bsg_job;
+ struct fc_bsg_reply *bsg_reply;
+ uint16_t comp_status;
+ int res = 0;
+
+ sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
+ if (!sp)
+ return;
- sp->done(vha, sp, res);
+ switch (sp->type) {
+ case SRB_CT_CMD:
+ bsg_job = sp->u.bsg_job;
+ bsg_reply = bsg_job->reply;
+
+ type = "ct pass-through";
+
+ comp_status = le16_to_cpu(pkt->comp_status);
+
+ /*
+ * return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
+ * fc payload to the caller
+ */
+ bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
+ bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+
+ if (comp_status != CS_COMPLETE) {
+ if (comp_status == CS_DATA_UNDERRUN) {
+ res = DID_OK << 16;
+ bsg_reply->reply_payload_rcv_len =
+ le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len);
+
+ ql_log(ql_log_warn, vha, 0x5048,
+ "CT pass-through-%s error comp_status=0x%x total_byte=0x%x.\n",
+ type, comp_status,
+ bsg_reply->reply_payload_rcv_len);
+ } else {
+ ql_log(ql_log_warn, vha, 0x5049,
+ "CT pass-through-%s error comp_status=0x%x.\n",
+ type, comp_status);
+ res = DID_ERROR << 16;
+ bsg_reply->reply_payload_rcv_len = 0;
+ }
+ ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035,
+ (uint8_t *)pkt, sizeof(*pkt));
+ } else {
+ res = DID_OK << 16;
+ bsg_reply->reply_payload_rcv_len =
+ bsg_job->reply_payload.payload_len;
+ bsg_job->reply_len = 0;
+ }
+ break;
+ case SRB_CT_PTHRU_CMD:
+ /*
+ * borrowing sts_entry_24xx.comp_status.
+ * same location as ct_entry_24xx.comp_status
+ */
+ res = qla2x00_chk_ms_status(vha, (ms_iocb_entry_t *)pkt,
+ (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp,
+ sp->name);
+ break;
+ }
+
+ sp->done(sp, res);
}
static void
@@ -1438,7 +1524,16 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
type = "Driver ELS logo";
ql_dbg(ql_dbg_user, vha, 0x5047,
"Completing %s: (%p) type=%d.\n", type, sp, sp->type);
- sp->done(vha, sp, 0);
+ sp->done(sp, 0);
+ return;
+ case SRB_CT_PTHRU_CMD:
+ /* borrowing sts_entry_24xx.comp_status.
+ same location as ct_entry_24xx.comp_status
+ */
+ res = qla2x00_chk_ms_status(vha, (ms_iocb_entry_t *)pkt,
+ (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp,
+ sp->name);
+ sp->done(sp, res);
return;
default:
ql_dbg(ql_dbg_user, vha, 0x503e,
@@ -1496,7 +1591,7 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
bsg_job->reply_len = 0;
}
- sp->done(vha, sp, res);
+ sp->done(sp, res);
}
static void
@@ -1543,6 +1638,7 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
fcport->d_id.b.area, fcport->d_id.b.al_pa,
le32_to_cpu(logio->io_parameter[0]));
+ vha->hw->exch_starvation = 0;
data[0] = MBS_COMMAND_COMPLETE;
if (sp->type != SRB_LOGIN_CMD)
goto logio_done;
@@ -1568,6 +1664,8 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
iop[0] = le32_to_cpu(logio->io_parameter[0]);
iop[1] = le32_to_cpu(logio->io_parameter[1]);
+ lio->u.logio.iop[0] = iop[0];
+ lio->u.logio.iop[1] = iop[1];
switch (iop[0]) {
case LSC_SCODE_PORTID_USED:
data[0] = MBS_PORT_ID_USED;
@@ -1576,6 +1674,21 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
case LSC_SCODE_NPORT_USED:
data[0] = MBS_LOOP_ID_USED;
break;
+ case LSC_SCODE_NOXCB:
+ vha->hw->exch_starvation++;
+ if (vha->hw->exch_starvation > 5) {
+ ql_log(ql_log_warn, vha, 0xffff,
+ "Exchange starvation. Resetting RISC\n");
+
+ vha->hw->exch_starvation = 0;
+
+ if (IS_P3P_TYPE(vha->hw))
+ set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
+ else
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ }
+ /* drop through */
default:
data[0] = MBS_COMMAND_ERROR;
break;
@@ -1590,7 +1703,7 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
le32_to_cpu(logio->io_parameter[1]));
logio_done:
- sp->done(vha, sp, 0);
+ sp->done(sp, 0);
}
static void
@@ -1640,7 +1753,7 @@ qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk)
ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5055,
(uint8_t *)sts, sizeof(*sts));
- sp->done(vha, sp, 0);
+ sp->done(sp, 0);
}
/**
@@ -1728,7 +1841,7 @@ static inline void
qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
uint32_t sense_len, struct rsp_que *rsp, int res)
{
- struct scsi_qla_host *vha = sp->fcport->vha;
+ struct scsi_qla_host *vha = sp->vha;
struct scsi_cmnd *cp = GET_CMD_SP(sp);
uint32_t track_sense_len;
@@ -1756,7 +1869,7 @@ qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
if (sense_len) {
ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c,
"Check condition Sense data, nexus%ld:%d:%llu cmd=%p.\n",
- sp->fcport->vha->host_no, cp->device->id, cp->device->lun,
+ sp->vha->host_no, cp->device->id, cp->device->lun,
cp);
ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b,
cp->sense_buffer, sense_len);
@@ -1778,7 +1891,7 @@ struct scsi_dif_tuple {
static inline int
qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
{
- struct scsi_qla_host *vha = sp->fcport->vha;
+ struct scsi_qla_host *vha = sp->vha;
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
uint8_t *ap = &sts24->data[12];
uint8_t *ep = &sts24->data[20];
@@ -2043,7 +2156,7 @@ done:
bsg_job->reply_len = sizeof(struct fc_bsg_reply);
/* Always return DID_OK, bsg will send the vendor specific response
* in this case only */
- sp->done(vha, sp, (DID_OK << 6));
+ sp->done(sp, DID_OK << 6);
}
@@ -2076,6 +2189,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
int res = 0;
uint16_t state_flags = 0;
uint16_t retry_delay = 0;
+ uint8_t no_logout = 0;
sts = (sts_entry_t *) pkt;
sts24 = (struct sts_entry_24xx *) pkt;
@@ -2336,6 +2450,7 @@ check_scsi_status:
break;
case CS_PORT_LOGGED_OUT:
+ no_logout = 1;
case CS_PORT_CONFIG_CHG:
case CS_PORT_BUSY:
case CS_INCOMPLETE:
@@ -2358,14 +2473,21 @@ check_scsi_status:
break;
}
- ql_dbg(ql_dbg_io, fcport->vha, 0x3021,
- "Port to be marked lost on fcport=%02x%02x%02x, current "
- "port state= %s.\n", fcport->d_id.b.domain,
- fcport->d_id.b.area, fcport->d_id.b.al_pa,
- port_state_str[atomic_read(&fcport->state)]);
+ if (atomic_read(&fcport->state) == FCS_ONLINE) {
+ ql_dbg(ql_dbg_disc, fcport->vha, 0x3021,
+ "Port to be marked lost on fcport=%02x%02x%02x, current "
+ "port state= %s comp_status %x.\n", fcport->d_id.b.domain,
+ fcport->d_id.b.area, fcport->d_id.b.al_pa,
+ port_state_str[atomic_read(&fcport->state)],
+ comp_status);
+
+ if (no_logout)
+ fcport->logout_on_delete = 0;
- if (atomic_read(&fcport->state) == FCS_ONLINE)
qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
+ qlt_schedule_sess_for_deletion_lock(fcport);
+ }
+
break;
case CS_ABORTED:
@@ -2407,7 +2529,7 @@ out:
resid_len, fw_resid_len, sp, cp);
if (rsp->status_srb == NULL)
- sp->done(ha, sp, res);
+ sp->done(sp, res);
}
/**
@@ -2464,7 +2586,7 @@ qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
/* Place command on done queue. */
if (sense_len == 0) {
rsp->status_srb = NULL;
- sp->done(ha, sp, cp->result);
+ sp->done(sp, cp->result);
}
}
@@ -2500,7 +2622,7 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
if (sp) {
- sp->done(ha, sp, res);
+ sp->done(sp, res);
return;
}
fatal:
@@ -2558,7 +2680,7 @@ qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
abt = &sp->u.iocb_cmd;
abt->u.abt.comp_status = le32_to_cpu(pkt->nport_handle);
- sp->done(vha, sp, 0);
+ sp->done(sp, 0);
}
/**
@@ -2629,10 +2751,16 @@ process_err:
}
case ABTS_RESP_24XX:
case CTIO_TYPE7:
- case NOTIFY_ACK_TYPE:
case CTIO_CRC2:
qlt_response_pkt_all_vps(vha, (response_t *)pkt);
break;
+ case NOTIFY_ACK_TYPE:
+ if (pkt->handle == QLA_TGT_SKIP_HANDLE)
+ qlt_response_pkt_all_vps(vha, (response_t *)pkt);
+ else
+ qla24xxx_nack_iocb_entry(vha, rsp->req,
+ (struct nack_to_isp *)pkt);
+ break;
case MARKER_TYPE:
/* Do nothing in this case, this check is to prevent it
* from falling into default case
@@ -2642,6 +2770,10 @@ process_err:
qla24xx_abort_iocb_entry(vha, rsp->req,
(struct abort_entry_24xx *)pkt);
break;
+ case MBX_IOCB_TYPE:
+ qla24xx_mbx_iocb_entry(vha, rsp->req,
+ (struct mbx_24xx_entry *)pkt);
+ break;
default:
/* Type Not Supported. */
ql_dbg(ql_dbg_async, vha, 0x5042,
@@ -2658,8 +2790,9 @@ process_err:
if (IS_P3P_TYPE(ha)) {
struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
WRT_REG_DWORD(&reg->rsp_q_out[0], rsp->ring_index);
- } else
+ } else {
WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index);
+ }
}
static void
@@ -3015,14 +3148,17 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
int i, ret;
struct qla_msix_entry *qentry;
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+ int min_vecs = QLA_BASE_VECTORS;
struct irq_affinity desc = {
.pre_vectors = QLA_BASE_VECTORS,
};
- if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha))
+ if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) {
desc.pre_vectors++;
+ min_vecs++;
+ }
- ret = pci_alloc_irq_vectors_affinity(ha->pdev, QLA_BASE_VECTORS,
+ ret = pci_alloc_irq_vectors_affinity(ha->pdev, min_vecs,
ha->msix_count, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY,
&desc);
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 67f64db390b0..35079f417417 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -1637,94 +1637,6 @@ qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
return rval;
}
-/*
- * qla2x00_get_node_name_list
- * Issue get node name list mailbox command, kmalloc()
- * and return the resulting list. Caller must kfree() it!
- *
- * Input:
- * ha = adapter state pointer.
- * out_data = resulting list
- * out_len = length of the resulting list
- *
- * Returns:
- * qla2x00 local function return status code.
- *
- * Context:
- * Kernel context.
- */
-int
-qla2x00_get_node_name_list(scsi_qla_host_t *vha, void **out_data, int *out_len)
-{
- struct qla_hw_data *ha = vha->hw;
- struct qla_port_24xx_data *list = NULL;
- void *pmap;
- mbx_cmd_t mc;
- dma_addr_t pmap_dma;
- ulong dma_size;
- int rval, left;
-
- left = 1;
- while (left > 0) {
- dma_size = left * sizeof(*list);
- pmap = dma_alloc_coherent(&ha->pdev->dev, dma_size,
- &pmap_dma, GFP_KERNEL);
- if (!pmap) {
- ql_log(ql_log_warn, vha, 0x113f,
- "%s(%ld): DMA Alloc failed of %ld\n",
- __func__, vha->host_no, dma_size);
- rval = QLA_MEMORY_ALLOC_FAILED;
- goto out;
- }
-
- mc.mb[0] = MBC_PORT_NODE_NAME_LIST;
- mc.mb[1] = BIT_1 | BIT_3;
- mc.mb[2] = MSW(pmap_dma);
- mc.mb[3] = LSW(pmap_dma);
- mc.mb[6] = MSW(MSD(pmap_dma));
- mc.mb[7] = LSW(MSD(pmap_dma));
- mc.mb[8] = dma_size;
- mc.out_mb = MBX_0|MBX_1|MBX_2|MBX_3|MBX_6|MBX_7|MBX_8;
- mc.in_mb = MBX_0|MBX_1;
- mc.tov = 30;
- mc.flags = MBX_DMA_IN;
-
- rval = qla2x00_mailbox_command(vha, &mc);
- if (rval != QLA_SUCCESS) {
- if ((mc.mb[0] == MBS_COMMAND_ERROR) &&
- (mc.mb[1] == 0xA)) {
- left += le16_to_cpu(mc.mb[2]) /
- sizeof(struct qla_port_24xx_data);
- goto restart;
- }
- goto out_free;
- }
-
- left = 0;
-
- list = kmemdup(pmap, dma_size, GFP_KERNEL);
- if (!list) {
- ql_log(ql_log_warn, vha, 0x1140,
- "%s(%ld): failed to allocate node names list "
- "structure.\n", __func__, vha->host_no);
- rval = QLA_MEMORY_ALLOC_FAILED;
- goto out_free;
- }
-
-restart:
- dma_free_coherent(&ha->pdev->dev, dma_size, pmap, pmap_dma);
- }
-
- *out_data = list;
- *out_len = dma_size;
-
-out:
- return rval;
-
-out_free:
- dma_free_coherent(&ha->pdev->dev, dma_size, pmap, pmap_dma);
- return rval;
-}
/*
* qla2x00_get_port_database
@@ -3687,10 +3599,8 @@ void
qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
struct vp_rpt_id_entry_24xx *rptid_entry)
{
- uint8_t vp_idx;
- uint16_t stat = le16_to_cpu(rptid_entry->vp_idx);
struct qla_hw_data *ha = vha->hw;
- scsi_qla_host_t *vp;
+ scsi_qla_host_t *vp = NULL;
unsigned long flags;
int found;
@@ -3701,80 +3611,124 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
return;
if (rptid_entry->format == 0) {
+ /* loop */
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b7,
"Format 0 : Number of VPs setup %d, number of "
- "VPs acquired %d.\n",
- MSB(le16_to_cpu(rptid_entry->vp_count)),
- LSB(le16_to_cpu(rptid_entry->vp_count)));
+ "VPs acquired %d.\n", rptid_entry->vp_setup,
+ rptid_entry->vp_acquired);
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b8,
"Primary port id %02x%02x%02x.\n",
rptid_entry->port_id[2], rptid_entry->port_id[1],
rptid_entry->port_id[0]);
+
+ vha->d_id.b.domain = rptid_entry->port_id[2];
+ vha->d_id.b.area = rptid_entry->port_id[1];
+ vha->d_id.b.al_pa = rptid_entry->port_id[0];
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ qlt_update_vp_map(vha, SET_AL_PA);
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
} else if (rptid_entry->format == 1) {
- vp_idx = LSB(stat);
+ /* fabric */
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b9,
"Format 1: VP[%d] enabled - status %d - with "
- "port id %02x%02x%02x.\n", vp_idx, MSB(stat),
+ "port id %02x%02x%02x.\n", rptid_entry->vp_idx,
+ rptid_entry->vp_status,
rptid_entry->port_id[2], rptid_entry->port_id[1],
rptid_entry->port_id[0]);
/* buffer to buffer credit flag */
- vha->flags.bbcr_enable = (rptid_entry->bbcr & 0xf) != 0;
-
- /* FA-WWN is only for physical port */
- if (!vp_idx) {
- void *wwpn = ha->init_cb->port_name;
+ vha->flags.bbcr_enable = (rptid_entry->u.f1.bbcr & 0xf) != 0;
+
+ if (rptid_entry->vp_idx == 0) {
+ if (rptid_entry->vp_status == VP_STAT_COMPL) {
+ /* FA-WWN is only for physical port */
+ if (qla_ini_mode_enabled(vha) &&
+ ha->flags.fawwpn_enabled &&
+ (rptid_entry->u.f1.flags &
+ VP_FLAGS_NAME_VALID)) {
+ memcpy(vha->port_name,
+ rptid_entry->u.f1.port_name,
+ WWN_SIZE);
+ }
- if (!MSB(stat)) {
- if (rptid_entry->vp_idx_map[1] & BIT_6)
- wwpn = rptid_entry->reserved_4 + 8;
+ vha->d_id.b.domain = rptid_entry->port_id[2];
+ vha->d_id.b.area = rptid_entry->port_id[1];
+ vha->d_id.b.al_pa = rptid_entry->port_id[0];
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ qlt_update_vp_map(vha, SET_AL_PA);
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
}
- memcpy(vha->port_name, wwpn, WWN_SIZE);
+
fc_host_port_name(vha->host) =
wwn_to_u64(vha->port_name);
- ql_dbg(ql_dbg_mbx, vha, 0x1018,
- "FA-WWN portname %016llx (%x)\n",
- fc_host_port_name(vha->host), MSB(stat));
- }
-
- vp = vha;
- if (vp_idx == 0)
- goto reg_needed;
- if (MSB(stat) != 0 && MSB(stat) != 2) {
- ql_dbg(ql_dbg_mbx, vha, 0x10ba,
- "Could not acquire ID for VP[%d].\n", vp_idx);
- return;
- }
+ if (qla_ini_mode_enabled(vha))
+ ql_dbg(ql_dbg_mbx, vha, 0x1018,
+ "FA-WWN portname %016llx (%x)\n",
+ fc_host_port_name(vha->host),
+ rptid_entry->vp_status);
- found = 0;
- spin_lock_irqsave(&ha->vport_slock, flags);
- list_for_each_entry(vp, &ha->vp_list, list) {
- if (vp_idx == vp->vp_idx) {
- found = 1;
- break;
+ set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
+ set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
+ } else {
+ if (rptid_entry->vp_status != VP_STAT_COMPL &&
+ rptid_entry->vp_status != VP_STAT_ID_CHG) {
+ ql_dbg(ql_dbg_mbx, vha, 0x10ba,
+ "Could not acquire ID for VP[%d].\n",
+ rptid_entry->vp_idx);
+ return;
}
- }
- spin_unlock_irqrestore(&ha->vport_slock, flags);
- if (!found)
- return;
+ found = 0;
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ list_for_each_entry(vp, &ha->vp_list, list) {
+ if (rptid_entry->vp_idx == vp->vp_idx) {
+ found = 1;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
- vp->d_id.b.domain = rptid_entry->port_id[2];
- vp->d_id.b.area = rptid_entry->port_id[1];
- vp->d_id.b.al_pa = rptid_entry->port_id[0];
+ if (!found)
+ return;
- /*
- * Cannot configure here as we are still sitting on the
- * response queue. Handle it in dpc context.
- */
- set_bit(VP_IDX_ACQUIRED, &vp->vp_flags);
+ vp->d_id.b.domain = rptid_entry->port_id[2];
+ vp->d_id.b.area = rptid_entry->port_id[1];
+ vp->d_id.b.al_pa = rptid_entry->port_id[0];
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ qlt_update_vp_map(vp, SET_AL_PA);
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
-reg_needed:
- set_bit(REGISTER_FC4_NEEDED, &vp->dpc_flags);
- set_bit(REGISTER_FDMI_NEEDED, &vp->dpc_flags);
+ /*
+ * Cannot configure here as we are still sitting on the
+ * response queue. Handle it in dpc context.
+ */
+ set_bit(VP_IDX_ACQUIRED, &vp->vp_flags);
+ set_bit(REGISTER_FC4_NEEDED, &vp->dpc_flags);
+ set_bit(REGISTER_FDMI_NEEDED, &vp->dpc_flags);
+ }
set_bit(VP_DPC_NEEDED, &vha->dpc_flags);
qla2xxx_wake_dpc(vha);
+ } else if (rptid_entry->format == 2) {
+ ql_dbg(ql_dbg_async, vha, 0xffff,
+ "RIDA: format 2/N2N Primary port id %02x%02x%02x.\n",
+ rptid_entry->port_id[2], rptid_entry->port_id[1],
+ rptid_entry->port_id[0]);
+
+ ql_dbg(ql_dbg_async, vha, 0xffff,
+ "N2N: Remote WWPN %8phC.\n",
+ rptid_entry->u.f2.port_name);
+
+ /* N2N. direct connect */
+ vha->d_id.b.domain = rptid_entry->port_id[2];
+ vha->d_id.b.area = rptid_entry->port_id[1];
+ vha->d_id.b.al_pa = rptid_entry->port_id[0];
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ qlt_update_vp_map(vha, SET_AL_PA);
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
}
}
diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c
index 96c33e292eba..10b742d27e16 100644
--- a/drivers/scsi/qla2xxx/qla_mr.c
+++ b/drivers/scsi/qla2xxx/qla_mr.c
@@ -1789,16 +1789,16 @@ qlafx00_update_host_attr(scsi_qla_host_t *vha, struct port_info_data *pinfo)
static void
qla2x00_fxdisc_iocb_timeout(void *data)
{
- srb_t *sp = (srb_t *)data;
+ srb_t *sp = data;
struct srb_iocb *lio = &sp->u.iocb_cmd;
complete(&lio->u.fxiocb.fxiocb_comp);
}
static void
-qla2x00_fxdisc_sp_done(void *data, void *ptr, int res)
+qla2x00_fxdisc_sp_done(void *ptr, int res)
{
- srb_t *sp = (srb_t *)ptr;
+ srb_t *sp = ptr;
struct srb_iocb *lio = &sp->u.iocb_cmd;
complete(&lio->u.fxiocb.fxiocb_comp);
@@ -1999,7 +1999,7 @@ done_unmap_req:
dma_free_coherent(&ha->pdev->dev, fdisc->u.fxiocb.req_len,
fdisc->u.fxiocb.req_addr, fdisc->u.fxiocb.req_dma_handle);
done_free_sp:
- sp->free(vha, sp);
+ sp->free(sp);
done:
return rval;
}
@@ -2127,7 +2127,7 @@ static inline void
qlafx00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
uint32_t sense_len, struct rsp_que *rsp, int res)
{
- struct scsi_qla_host *vha = sp->fcport->vha;
+ struct scsi_qla_host *vha = sp->vha;
struct scsi_cmnd *cp = GET_CMD_SP(sp);
uint32_t track_sense_len;
@@ -2162,7 +2162,7 @@ qlafx00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
if (sense_len) {
ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3039,
"Check condition Sense data, nexus%ld:%d:%llu cmd=%p.\n",
- sp->fcport->vha->host_no, cp->device->id, cp->device->lun,
+ sp->vha->host_no, cp->device->id, cp->device->lun,
cp);
ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x3049,
cp->sense_buffer, sense_len);
@@ -2181,7 +2181,7 @@ qlafx00_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
(sstatus & cpu_to_le16((uint16_t)SS_RESPONSE_INFO_LEN_VALID)))
cpstatus = cpu_to_le16((uint16_t)CS_INCOMPLETE);
tmf->u.tmf.comp_status = cpstatus;
- sp->done(vha, sp, 0);
+ sp->done(sp, 0);
}
static void
@@ -2198,7 +2198,7 @@ qlafx00_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
abt = &sp->u.iocb_cmd;
abt->u.abt.comp_status = pkt->tgt_id_sts;
- sp->done(vha, sp, 0);
+ sp->done(sp, 0);
}
static void
@@ -2264,7 +2264,7 @@ qlafx00_ioctl_iosb_entry(scsi_qla_host_t *vha, struct req_que *req,
bsg_reply->reply_payload_rcv_len =
bsg_job->reply_payload.payload_len;
}
- sp->done(vha, sp, res);
+ sp->done(sp, res);
}
/**
@@ -2537,7 +2537,7 @@ check_scsi_status:
par_sense_len, rsp_info_len);
if (rsp->status_srb == NULL)
- sp->done(ha, sp, res);
+ sp->done(sp, res);
}
/**
@@ -2614,7 +2614,7 @@ qlafx00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
/* Place command on done queue. */
if (sense_len == 0) {
rsp->status_srb = NULL;
- sp->done(ha, sp, cp->result);
+ sp->done(sp, cp->result);
}
}
@@ -2695,7 +2695,7 @@ qlafx00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp,
sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
if (sp) {
- sp->done(ha, sp, res);
+ sp->done(sp, res);
return;
}
@@ -2997,7 +2997,7 @@ qlafx00_build_scsi_iocbs(srb_t *sp, struct cmd_type_7_fx00 *cmd_pkt,
cont_a64_entry_t lcont_pkt;
cont_a64_entry_t *cont_pkt;
- vha = sp->fcport->vha;
+ vha = sp->vha;
req = vha->req;
cmd = GET_CMD_SP(sp);
@@ -3081,7 +3081,7 @@ qlafx00_start_scsi(srb_t *sp)
struct req_que *req = NULL;
struct rsp_que *rsp = NULL;
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
- struct scsi_qla_host *vha = sp->fcport->vha;
+ struct scsi_qla_host *vha = sp->vha;
struct qla_hw_data *ha = vha->hw;
struct cmd_type_7_fx00 *cmd_pkt;
struct cmd_type_7_fx00 lcmd_pkt;
@@ -3205,7 +3205,7 @@ void
qlafx00_tm_iocb(srb_t *sp, struct tsk_mgmt_entry_fx00 *ptm_iocb)
{
struct srb_iocb *fxio = &sp->u.iocb_cmd;
- scsi_qla_host_t *vha = sp->fcport->vha;
+ scsi_qla_host_t *vha = sp->vha;
struct req_que *req = vha->req;
struct tsk_mgmt_entry_fx00 tm_iocb;
struct scsi_lun llun;
@@ -3232,7 +3232,7 @@ void
qlafx00_abort_iocb(srb_t *sp, struct abort_iocb_entry_fx00 *pabt_iocb)
{
struct srb_iocb *fxio = &sp->u.iocb_cmd;
- scsi_qla_host_t *vha = sp->fcport->vha;
+ scsi_qla_host_t *vha = sp->vha;
struct req_que *req = vha->req;
struct abort_iocb_entry_fx00 abt_iocb;
@@ -3346,8 +3346,7 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb)
REQUEST_ENTRY_SIZE);
cont_pkt =
qlafx00_prep_cont_type1_iocb(
- sp->fcport->vha->req,
- &lcont_pkt);
+ sp->vha->req, &lcont_pkt);
cur_dsd = (__le32 *)
lcont_pkt.dseg_0_address;
avail_dsds = 5;
@@ -3368,7 +3367,7 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb)
&lcont_pkt, REQUEST_ENTRY_SIZE);
ql_dump_buffer(
ql_dbg_user + ql_dbg_verbose,
- sp->fcport->vha, 0x3042,
+ sp->vha, 0x3042,
(uint8_t *)&lcont_pkt,
REQUEST_ENTRY_SIZE);
}
@@ -3377,7 +3376,7 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb)
memcpy_toio((void __iomem *)cont_pkt,
&lcont_pkt, REQUEST_ENTRY_SIZE);
ql_dump_buffer(ql_dbg_user + ql_dbg_verbose,
- sp->fcport->vha, 0x3043,
+ sp->vha, 0x3043,
(uint8_t *)&lcont_pkt, REQUEST_ENTRY_SIZE);
}
}
@@ -3409,8 +3408,7 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb)
REQUEST_ENTRY_SIZE);
cont_pkt =
qlafx00_prep_cont_type1_iocb(
- sp->fcport->vha->req,
- &lcont_pkt);
+ sp->vha->req, &lcont_pkt);
cur_dsd = (__le32 *)
lcont_pkt.dseg_0_address;
avail_dsds = 5;
@@ -3431,7 +3429,7 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb)
REQUEST_ENTRY_SIZE);
ql_dump_buffer(
ql_dbg_user + ql_dbg_verbose,
- sp->fcport->vha, 0x3045,
+ sp->vha, 0x3045,
(uint8_t *)&lcont_pkt,
REQUEST_ENTRY_SIZE);
}
@@ -3440,7 +3438,7 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb)
memcpy_toio((void __iomem *)cont_pkt,
&lcont_pkt, REQUEST_ENTRY_SIZE);
ql_dump_buffer(ql_dbg_user + ql_dbg_verbose,
- sp->fcport->vha, 0x3046,
+ sp->vha, 0x3046,
(uint8_t *)&lcont_pkt, REQUEST_ENTRY_SIZE);
}
}
@@ -3452,7 +3450,7 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb)
}
ql_dump_buffer(ql_dbg_user + ql_dbg_verbose,
- sp->fcport->vha, 0x3047,
+ sp->vha, 0x3047,
(uint8_t *)&fx_iocb, sizeof(struct fxdisc_entry_fx00));
memcpy_toio((void __iomem *)pfxiocb, &fx_iocb,
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index d01c90c7dd04..1fed235a1b4a 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -237,6 +237,13 @@ MODULE_PARM_DESC(ql2xfwholdabts,
"0 (Default) Do not set fw option. "
"1 - Set fw option to hold ABTS.");
+int ql2xmvasynctoatio = 1;
+module_param(ql2xmvasynctoatio, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(ql2xmvasynctoatio,
+ "Move PUREX, ABTS RX and RIDA IOCBs to ATIOQ"
+ "0 (Default). Do not move IOCBs"
+ "1 - Move IOCBs.");
+
/*
* SCSI host template entry points
*/
@@ -607,11 +614,11 @@ qla24xx_fw_version_str(struct scsi_qla_host *vha, char *str, size_t size)
}
void
-qla2x00_sp_free_dma(void *vha, void *ptr)
+qla2x00_sp_free_dma(void *ptr)
{
- srb_t *sp = (srb_t *)ptr;
+ srb_t *sp = ptr;
+ struct qla_hw_data *ha = sp->vha->hw;
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
- struct qla_hw_data *ha = sp->fcport->vha->hw;
void *ctx = GET_CMD_CTX_SP(sp);
if (sp->flags & SRB_DMA_VALID) {
@@ -650,20 +657,19 @@ qla2x00_sp_free_dma(void *vha, void *ptr)
}
CMD_SP(cmd) = NULL;
- qla2x00_rel_sp(sp->fcport->vha, sp);
+ qla2x00_rel_sp(sp);
}
void
-qla2x00_sp_compl(void *data, void *ptr, int res)
+qla2x00_sp_compl(void *ptr, int res)
{
- struct qla_hw_data *ha = (struct qla_hw_data *)data;
- srb_t *sp = (srb_t *)ptr;
+ srb_t *sp = ptr;
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
cmd->result = res;
if (atomic_read(&sp->ref_count) == 0) {
- ql_dbg(ql_dbg_io, sp->fcport->vha, 0x3015,
+ ql_dbg(ql_dbg_io, sp->vha, 0x3015,
"SP reference-count to ZERO -- sp=%p cmd=%p.\n",
sp, GET_CMD_SP(sp));
if (ql2xextended_error_logging & ql_dbg_io)
@@ -673,12 +679,12 @@ qla2x00_sp_compl(void *data, void *ptr, int res)
if (!atomic_dec_and_test(&sp->ref_count))
return;
- qla2x00_sp_free_dma(ha, sp);
+ qla2x00_sp_free_dma(sp);
cmd->scsi_done(cmd);
}
void
-qla2xxx_qpair_sp_free_dma(void *vha, void *ptr)
+qla2xxx_qpair_sp_free_dma(void *ptr)
{
srb_t *sp = (srb_t *)ptr;
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
@@ -724,9 +730,9 @@ qla2xxx_qpair_sp_free_dma(void *vha, void *ptr)
}
void
-qla2xxx_qpair_sp_compl(void *data, void *ptr, int res)
+qla2xxx_qpair_sp_compl(void *ptr, int res)
{
- srb_t *sp = (srb_t *)ptr;
+ srb_t *sp = ptr;
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
cmd->result = res;
@@ -742,7 +748,7 @@ qla2xxx_qpair_sp_compl(void *data, void *ptr, int res)
if (!atomic_dec_and_test(&sp->ref_count))
return;
- qla2xxx_qpair_sp_free_dma(sp->fcport->vha, sp);
+ qla2xxx_qpair_sp_free_dma(sp);
cmd->scsi_done(cmd);
}
@@ -863,7 +869,7 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
return 0;
qc24_host_busy_free_sp:
- qla2x00_sp_free_dma(ha, sp);
+ qla2x00_sp_free_dma(sp);
qc24_host_busy:
return SCSI_MLQUEUE_HOST_BUSY;
@@ -952,7 +958,7 @@ qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd,
return 0;
qc24_host_busy_free_sp:
- qla2xxx_qpair_sp_free_dma(vha, sp);
+ qla2xxx_qpair_sp_free_dma(sp);
qc24_host_busy:
return SCSI_MLQUEUE_HOST_BUSY;
@@ -1044,6 +1050,34 @@ qla2x00_wait_for_hba_online(scsi_qla_host_t *vha)
return (return_status);
}
+static inline int test_fcport_count(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ unsigned long flags;
+ int res;
+
+ spin_lock_irqsave(&ha->tgt.sess_lock, flags);
+ ql_dbg(ql_dbg_init, vha, 0xffff,
+ "tgt %p, fcport_count=%d\n",
+ vha, vha->fcport_count);
+ res = (vha->fcport_count == 0);
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+
+ return res;
+}
+
+/*
+ * qla2x00_wait_for_sess_deletion can only be called from remove_one.
+ * it has dependency on UNLOADING flag to stop device discovery
+ */
+static void
+qla2x00_wait_for_sess_deletion(scsi_qla_host_t *vha)
+{
+ qla2x00_mark_all_devices_lost(vha, 0);
+
+ wait_event(vha->fcport_waitQ, test_fcport_count(vha));
+}
+
/*
* qla2x00_wait_for_hba_ready
* Wait till the HBA is ready before doing driver unload
@@ -1204,7 +1238,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
}
spin_lock_irqsave(&ha->hardware_lock, flags);
- sp->done(ha, sp, 0);
+ sp->done(sp, 0);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
/* Did the command return during mailbox execution? */
@@ -1249,7 +1283,7 @@ qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t,
continue;
if (sp->type != SRB_SCSI_CMD)
continue;
- if (vha->vp_idx != sp->fcport->vha->vp_idx)
+ if (vha->vp_idx != sp->vha->vp_idx)
continue;
match = 0;
cmd = GET_CMD_SP(sp);
@@ -1629,7 +1663,7 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
spin_lock_irqsave(&ha->hardware_lock, flags);
}
req->outstanding_cmds[cnt] = NULL;
- sp->done(vha, sp, res);
+ sp->done(sp, res);
}
}
}
@@ -1815,6 +1849,7 @@ skip_pio:
/* Determine queue resources */
ha->max_req_queues = ha->max_rsp_queues = 1;
+ ha->msix_count = QLA_BASE_VECTORS;
if (!ql2xmqsupport || (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)))
goto mqiobase_exit;
@@ -1842,9 +1877,8 @@ skip_pio:
"BAR 3 not enabled.\n");
mqiobase_exit:
- ha->msix_count = ha->max_rsp_queues + 1;
ql_dbg_pci(ql_dbg_init, ha->pdev, 0x001c,
- "MSIX Count:%d.\n", ha->msix_count);
+ "MSIX Count: %d.\n", ha->msix_count);
return (0);
iospace_error_exit:
@@ -1892,6 +1926,7 @@ qla83xx_iospace_config(struct qla_hw_data *ha)
/* 83XX 26XX always use MQ type access for queues
* - mbar 2, a.k.a region 4 */
ha->max_req_queues = ha->max_rsp_queues = 1;
+ ha->msix_count = QLA_BASE_VECTORS;
ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 4),
pci_resource_len(ha->pdev, 4));
@@ -1915,12 +1950,13 @@ qla83xx_iospace_config(struct qla_hw_data *ha)
if (ql2xmqsupport) {
/* MB interrupt uses 1 vector */
ha->max_req_queues = ha->msix_count - 1;
- ha->max_rsp_queues = ha->max_req_queues;
/* ATIOQ needs 1 vector. That's 1 less QPair */
if (QLA_TGT_MODE_ENABLED())
ha->max_req_queues--;
+ ha->max_rsp_queues = ha->max_req_queues;
+
/* Queue pairs is the max value minus
* the base queue pair */
ha->max_qpairs = ha->max_req_queues - 1;
@@ -1934,14 +1970,8 @@ qla83xx_iospace_config(struct qla_hw_data *ha)
"BAR 1 not enabled.\n");
mqiobase_exit:
- ha->msix_count = ha->max_rsp_queues + 1;
- if (QLA_TGT_MODE_ENABLED())
- ha->msix_count++;
-
- qlt_83xx_iospace_config(ha);
-
ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011f,
- "MSIX Count:%d.\n", ha->msix_count);
+ "MSIX Count: %d.\n", ha->msix_count);
return 0;
iospace_error_exit:
@@ -3124,7 +3154,8 @@ skip_dpc:
ql_dbg(ql_dbg_init, base_vha, 0x00f2,
"Init done and hba is online.\n");
- if (qla_ini_mode_enabled(base_vha))
+ if (qla_ini_mode_enabled(base_vha) ||
+ qla_dual_mode_enabled(base_vha))
scsi_scan_host(host);
else
ql_dbg(ql_dbg_init, base_vha, 0x0122,
@@ -3373,21 +3404,26 @@ qla2x00_remove_one(struct pci_dev *pdev)
* resources.
*/
if (!atomic_read(&pdev->enable_cnt)) {
+ dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size,
+ base_vha->gnl.l, base_vha->gnl.ldma);
+
scsi_host_put(base_vha->host);
kfree(ha);
pci_set_drvdata(pdev, NULL);
return;
}
-
qla2x00_wait_for_hba_ready(base_vha);
- /* if UNLOAD flag is already set, then continue unload,
+ /*
+ * if UNLOAD flag is already set, then continue unload,
* where it was set first.
*/
if (test_bit(UNLOADING, &base_vha->dpc_flags))
return;
set_bit(UNLOADING, &base_vha->dpc_flags);
+ dma_free_coherent(&ha->pdev->dev,
+ base_vha->gnl.size, base_vha->gnl.l, base_vha->gnl.ldma);
if (IS_QLAFX00(ha))
qlafx00_driver_shutdown(base_vha, 20);
@@ -3536,10 +3572,14 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport,
qla2xxx_wake_dpc(base_vha);
} else {
int now;
- if (rport)
+ if (rport) {
+ ql_dbg(ql_dbg_disc, fcport->vha, 0xffff,
+ "%s %8phN. rport %p roles %x \n",
+ __func__, fcport->port_name, rport,
+ rport->roles);
fc_remote_port_delete(rport);
+ }
qlt_do_generation_tick(vha, &now);
- qlt_fc_port_deleted(vha, fcport, now);
}
}
@@ -3582,7 +3622,7 @@ void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport,
fcport->login_retry = vha->hw->login_retry_count;
ql_dbg(ql_dbg_disc, vha, 0x2067,
- "Port login retry %8phN, id = 0x%04x retry cnt=%d.\n",
+ "Port login retry %8phN, lid 0x%04x retry cnt=%d.\n",
fcport->port_name, fcport->loop_id, fcport->login_retry);
}
}
@@ -3605,7 +3645,13 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha, int defer)
{
fc_port_t *fcport;
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Mark all dev lost\n");
+
list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ fcport->scan_state = 0;
+ qlt_schedule_sess_for_deletion_lock(fcport);
+
if (vha->vp_idx != 0 && vha->vp_idx != fcport->vha->vp_idx)
continue;
@@ -4195,10 +4241,10 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
struct scsi_qla_host *vha = NULL;
host = scsi_host_alloc(sht, sizeof(scsi_qla_host_t));
- if (host == NULL) {
+ if (!host) {
ql_log_pci(ql_log_fatal, ha->pdev, 0x0107,
"Failed to allocate host from the scsi layer, aborting.\n");
- goto fail;
+ return NULL;
}
/* Clear our data area */
@@ -4217,9 +4263,22 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
INIT_LIST_HEAD(&vha->logo_list);
INIT_LIST_HEAD(&vha->plogi_ack_list);
INIT_LIST_HEAD(&vha->qp_list);
+ INIT_LIST_HEAD(&vha->gnl.fcports);
spin_lock_init(&vha->work_lock);
spin_lock_init(&vha->cmd_list_lock);
+ init_waitqueue_head(&vha->fcport_waitQ);
+
+ vha->gnl.size = sizeof(struct get_name_list_extended) *
+ (ha->max_loop_id + 1);
+ vha->gnl.l = dma_alloc_coherent(&ha->pdev->dev,
+ vha->gnl.size, &vha->gnl.ldma, GFP_KERNEL);
+ if (!vha->gnl.l) {
+ ql_log(ql_log_fatal, vha, 0xffff,
+ "Alloc failed for name list.\n");
+ scsi_remove_host(vha->host);
+ return NULL;
+ }
sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no);
ql_dbg(ql_dbg_init, vha, 0x0041,
@@ -4228,12 +4287,9 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
dev_name(&(ha->pdev->dev)));
return vha;
-
-fail:
- return vha;
}
-static struct qla_work_evt *
+struct qla_work_evt *
qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type)
{
struct qla_work_evt *e;
@@ -4255,7 +4311,7 @@ qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type)
return e;
}
-static int
+int
qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e)
{
unsigned long flags;
@@ -4316,7 +4372,6 @@ int qla2x00_post_async_##name##_work( \
}
qla2x00_post_async_work(login, QLA_EVT_ASYNC_LOGIN);
-qla2x00_post_async_work(login_done, QLA_EVT_ASYNC_LOGIN_DONE);
qla2x00_post_async_work(logout, QLA_EVT_ASYNC_LOGOUT);
qla2x00_post_async_work(logout_done, QLA_EVT_ASYNC_LOGOUT_DONE);
qla2x00_post_async_work(adisc, QLA_EVT_ASYNC_ADISC);
@@ -4369,6 +4424,67 @@ qlafx00_post_aenfx_work(struct scsi_qla_host *vha, uint32_t evtcode,
return qla2x00_post_work(vha, e);
}
+int qla24xx_post_upd_fcport_work(struct scsi_qla_host *vha, fc_port_t *fcport)
+{
+ struct qla_work_evt *e;
+
+ e = qla2x00_alloc_work(vha, QLA_EVT_UPD_FCPORT);
+ if (!e)
+ return QLA_FUNCTION_FAILED;
+
+ e->u.fcport.fcport = fcport;
+ return qla2x00_post_work(vha, e);
+}
+
+static
+void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
+{
+ unsigned long flags;
+ fc_port_t *fcport = NULL;
+ struct qlt_plogi_ack_t *pla =
+ (struct qlt_plogi_ack_t *)e->u.new_sess.pla;
+
+ spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+ fcport = qla2x00_find_fcport_by_wwpn(vha, e->u.new_sess.port_name, 1);
+ if (fcport) {
+ fcport->d_id = e->u.new_sess.id;
+ if (pla) {
+ fcport->fw_login_state = DSC_LS_PLOGI_PEND;
+ qlt_plogi_ack_link(vha, pla, fcport, QLT_PLOGI_LINK_SAME_WWN);
+ /* we took an extra ref_count to prevent PLOGI ACK when
+ * fcport/sess has not been created.
+ */
+ pla->ref_count--;
+ }
+ } else {
+ fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
+ if (fcport) {
+ fcport->d_id = e->u.new_sess.id;
+ fcport->scan_state = QLA_FCPORT_FOUND;
+ fcport->flags |= FCF_FABRIC_DEVICE;
+ fcport->fw_login_state = DSC_LS_PLOGI_PEND;
+
+ memcpy(fcport->port_name, e->u.new_sess.port_name,
+ WWN_SIZE);
+ list_add_tail(&fcport->list, &vha->vp_fcports);
+
+ if (pla) {
+ qlt_plogi_ack_link(vha, pla, fcport,
+ QLT_PLOGI_LINK_SAME_WWN);
+ pla->ref_count--;
+ }
+ }
+ }
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+
+ if (fcport) {
+ if (pla)
+ qlt_plogi_ack_unref(vha, pla);
+ else
+ qla24xx_async_gnl(vha, fcport);
+ }
+}
+
void
qla2x00_do_work(struct scsi_qla_host *vha)
{
@@ -4395,10 +4511,6 @@ qla2x00_do_work(struct scsi_qla_host *vha)
qla2x00_async_login(vha, e->u.logio.fcport,
e->u.logio.data);
break;
- case QLA_EVT_ASYNC_LOGIN_DONE:
- qla2x00_async_login_done(vha, e->u.logio.fcport,
- e->u.logio.data);
- break;
case QLA_EVT_ASYNC_LOGOUT:
qla2x00_async_logout(vha, e->u.logio.fcport);
break;
@@ -4420,6 +4532,34 @@ qla2x00_do_work(struct scsi_qla_host *vha)
case QLA_EVT_AENFX:
qlafx00_process_aen(vha, e);
break;
+ case QLA_EVT_GIDPN:
+ qla24xx_async_gidpn(vha, e->u.fcport.fcport);
+ break;
+ case QLA_EVT_GPNID:
+ qla24xx_async_gpnid(vha, &e->u.gpnid.id);
+ break;
+ case QLA_EVT_GPNID_DONE:
+ qla24xx_async_gpnid_done(vha, e->u.iosb.sp);
+ break;
+ case QLA_EVT_NEW_SESS:
+ qla24xx_create_new_sess(vha, e);
+ break;
+ case QLA_EVT_GPDB:
+ qla24xx_async_gpdb(vha, e->u.fcport.fcport,
+ e->u.fcport.opt);
+ break;
+ case QLA_EVT_GPSC:
+ qla24xx_async_gpsc(vha, e->u.fcport.fcport);
+ break;
+ case QLA_EVT_UPD_FCPORT:
+ qla2x00_update_fcport(vha, e->u.fcport.fcport);
+ break;
+ case QLA_EVT_GNL:
+ qla24xx_async_gnl(vha, e->u.fcport.fcport);
+ break;
+ case QLA_EVT_NACK:
+ qla24xx_do_nack_work(vha, e);
+ break;
}
if (e->flags & QLA_EVT_FLAG_FREE)
kfree(e);
@@ -4436,9 +4576,7 @@ void qla2x00_relogin(struct scsi_qla_host *vha)
{
fc_port_t *fcport;
int status;
- uint16_t next_loopid = 0;
- struct qla_hw_data *ha = vha->hw;
- uint16_t data[2];
+ struct event_arg ea;
list_for_each_entry(fcport, &vha->vp_fcports, list) {
/*
@@ -4449,77 +4587,38 @@ void qla2x00_relogin(struct scsi_qla_host *vha)
fcport->login_retry && !(fcport->flags & FCF_ASYNC_SENT)) {
fcport->login_retry--;
if (fcport->flags & FCF_FABRIC_DEVICE) {
- if (fcport->flags & FCF_FCP2_DEVICE)
- ha->isp_ops->fabric_logout(vha,
- fcport->loop_id,
- fcport->d_id.b.domain,
- fcport->d_id.b.area,
- fcport->d_id.b.al_pa);
-
- if (fcport->loop_id == FC_NO_LOOP_ID) {
- fcport->loop_id = next_loopid =
- ha->min_external_loopid;
- status = qla2x00_find_new_loop_id(
- vha, fcport);
- if (status != QLA_SUCCESS) {
- /* Ran out of IDs to use */
- break;
- }
- }
-
- if (IS_ALOGIO_CAPABLE(ha)) {
- fcport->flags |= FCF_ASYNC_SENT;
- data[0] = 0;
- data[1] = QLA_LOGIO_LOGIN_RETRIED;
- status = qla2x00_post_async_login_work(
- vha, fcport, data);
- if (status == QLA_SUCCESS)
- continue;
- /* Attempt a retry. */
- status = 1;
- } else {
- status = qla2x00_fabric_login(vha,
- fcport, &next_loopid);
- if (status == QLA_SUCCESS) {
- int status2;
- uint8_t opts;
-
- opts = 0;
- if (fcport->flags &
- FCF_FCP2_DEVICE)
- opts |= BIT_1;
- status2 =
- qla2x00_get_port_database(
- vha, fcport, opts);
- if (status2 != QLA_SUCCESS)
- status = 1;
- }
- }
- } else
+ ql_dbg(ql_dbg_disc, fcport->vha, 0xffff,
+ "%s %8phC DS %d LS %d\n", __func__,
+ fcport->port_name, fcport->disc_state,
+ fcport->fw_login_state);
+ memset(&ea, 0, sizeof(ea));
+ ea.event = FCME_RELOGIN;
+ ea.fcport = fcport;
+ qla2x00_fcport_event_handler(vha, &ea);
+ } else {
status = qla2x00_local_device_login(vha,
fcport);
+ if (status == QLA_SUCCESS) {
+ fcport->old_loop_id = fcport->loop_id;
+ ql_dbg(ql_dbg_disc, vha, 0x2003,
+ "Port login OK: logged in ID 0x%x.\n",
+ fcport->loop_id);
+ qla2x00_update_fcport(vha, fcport);
+ } else if (status == 1) {
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ /* retry the login again */
+ ql_dbg(ql_dbg_disc, vha, 0x2007,
+ "Retrying %d login again loop_id 0x%x.\n",
+ fcport->login_retry,
+ fcport->loop_id);
+ } else {
+ fcport->login_retry = 0;
+ }
- if (status == QLA_SUCCESS) {
- fcport->old_loop_id = fcport->loop_id;
-
- ql_dbg(ql_dbg_disc, vha, 0x2003,
- "Port login OK: logged in ID 0x%x.\n",
- fcport->loop_id);
-
- qla2x00_update_fcport(vha, fcport);
-
- } else if (status == 1) {
- set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
- /* retry the login again */
- ql_dbg(ql_dbg_disc, vha, 0x2007,
- "Retrying %d login again loop_id 0x%x.\n",
- fcport->login_retry, fcport->loop_id);
- } else {
- fcport->login_retry = 0;
+ if (fcport->login_retry == 0 &&
+ status != QLA_SUCCESS)
+ qla2x00_clear_loop_id(fcport);
}
-
- if (fcport->login_retry == 0 && status != QLA_SUCCESS)
- qla2x00_clear_loop_id(fcport);
}
if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
break;
@@ -5183,7 +5282,8 @@ qla2x00_disable_board_on_pci_error(struct work_struct *work)
struct pci_dev *pdev = ha->pdev;
scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
- /* if UNLOAD flag is already set, then continue unload,
+ /*
+ * if UNLOAD flag is already set, then continue unload,
* where it was set first.
*/
if (test_bit(UNLOADING, &base_vha->dpc_flags))
@@ -5192,6 +5292,8 @@ qla2x00_disable_board_on_pci_error(struct work_struct *work)
ql_log(ql_log_warn, base_vha, 0x015b,
"Disabling adapter.\n");
+ qla2x00_wait_for_sess_deletion(base_vha);
+
set_bit(UNLOADING, &base_vha->dpc_flags);
qla2x00_delete_all_vps(ha, base_vha);
@@ -5410,16 +5512,6 @@ qla2x00_do_dpc(void *data)
qla2x00_update_fcports(base_vha);
}
- if (test_bit(SCR_PENDING, &base_vha->dpc_flags)) {
- int ret;
- ret = qla2x00_send_change_request(base_vha, 0x3, 0);
- if (ret != QLA_SUCCESS)
- ql_log(ql_log_warn, base_vha, 0x121,
- "Failed to enable receiving of RSCN "
- "requests: 0x%x.\n", ret);
- clear_bit(SCR_PENDING, &base_vha->dpc_flags);
- }
-
if (IS_QLAFX00(ha))
goto loop_resync_check;
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index e4fda84b959e..45f5077684f0 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -55,8 +55,17 @@ MODULE_PARM_DESC(qlini_mode,
"disabled on enabling target mode and then on disabling target mode "
"enabled back; "
"\"disabled\" - initiator mode will never be enabled; "
+ "\"dual\" - Initiator Modes will be enabled. Target Mode can be activated "
+ "when ready "
"\"enabled\" (default) - initiator mode will always stay enabled.");
+static int ql_dm_tgt_ex_pct = 50;
+module_param(ql_dm_tgt_ex_pct, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(ql_dm_tgt_ex_pct,
+ "For Dual Mode (qlini_mode=dual), this parameter determines "
+ "the percentage of exchanges/cmds FW will allocate resources "
+ "for Target mode.");
+
int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
static int temp_sam_status = SAM_STAT_BUSY;
@@ -102,12 +111,10 @@ enum fcp_resp_rsp_codes {
static void qlt_24xx_atio_pkt(struct scsi_qla_host *ha,
struct atio_from_isp *pkt, uint8_t);
static void qlt_response_pkt(struct scsi_qla_host *ha, response_t *pkt);
-static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
+static int qlt_issue_task_mgmt(struct fc_port *sess, u64 lun,
int fn, void *iocb, int flags);
static void qlt_send_term_exchange(struct scsi_qla_host *ha, struct qla_tgt_cmd
*cmd, struct atio_from_isp *atio, int ha_locked, int ul_abort);
-static void qlt_reject_free_srr_imm(struct scsi_qla_host *ha,
- struct qla_tgt_srr_imm *imm, int ha_lock);
static void qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha,
struct qla_tgt_cmd *cmd);
static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
@@ -120,6 +127,9 @@ static void qlt_send_notify_ack(struct scsi_qla_host *vha,
uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan);
static void qlt_send_term_imm_notif(struct scsi_qla_host *vha,
struct imm_ntfy_from_isp *imm, int ha_locked);
+static struct fc_port *qlt_create_sess(struct scsi_qla_host *vha,
+ fc_port_t *fcport, bool local);
+void qlt_unreg_sess(struct fc_port *sess);
/*
* Global Variables
*/
@@ -140,21 +150,6 @@ void qlt_do_generation_tick(struct scsi_qla_host *vha, int *dest)
wmb();
}
-/* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) */
-static struct qla_tgt_sess *qlt_find_sess_by_port_name(
- struct qla_tgt *tgt,
- const uint8_t *port_name)
-{
- struct qla_tgt_sess *sess;
-
- list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
- if (!memcmp(sess->port_name, port_name, WWN_SIZE))
- return sess;
- }
-
- return NULL;
-}
-
/* Might release hw lock, then reaquire!! */
static inline int qlt_issue_marker(struct scsi_qla_host *vha, int vha_locked)
{
@@ -229,6 +224,105 @@ static inline void qlt_decr_num_pend_cmds(struct scsi_qla_host *vha)
spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
}
+
+static void qlt_queue_unknown_atio(scsi_qla_host_t *vha,
+ struct atio_from_isp *atio, uint8_t ha_locked)
+{
+ struct qla_tgt_sess_op *u;
+ struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
+ unsigned long flags;
+
+ if (tgt->tgt_stop) {
+ ql_dbg(ql_dbg_async, vha, 0xffff,
+ "qla_target(%d): dropping unknown ATIO_TYPE7, "
+ "because tgt is being stopped", vha->vp_idx);
+ goto out_term;
+ }
+
+ u = kzalloc(sizeof(*u), GFP_ATOMIC);
+ if (u == NULL) {
+ ql_dbg(ql_dbg_async, vha, 0xffff,
+ "Alloc of struct unknown_atio (size %zd) failed", sizeof(*u));
+ /* It should be harmless and on the next retry should work well */
+ goto out_term;
+ }
+
+ u->vha = vha;
+ memcpy(&u->atio, atio, sizeof(*atio));
+ INIT_LIST_HEAD(&u->cmd_list);
+
+ spin_lock_irqsave(&vha->cmd_list_lock, flags);
+ list_add_tail(&u->cmd_list, &vha->unknown_atio_list);
+ spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
+
+ schedule_delayed_work(&vha->unknown_atio_work, 1);
+
+out:
+ return;
+
+out_term:
+ qlt_send_term_exchange(vha, NULL, atio, ha_locked, 0);
+ goto out;
+}
+
+static void qlt_try_to_dequeue_unknown_atios(struct scsi_qla_host *vha,
+ uint8_t ha_locked)
+{
+ struct qla_tgt_sess_op *u, *t;
+ scsi_qla_host_t *host;
+ struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
+ unsigned long flags;
+ uint8_t queued = 0;
+
+ list_for_each_entry_safe(u, t, &vha->unknown_atio_list, cmd_list) {
+ if (u->aborted) {
+ ql_dbg(ql_dbg_async, vha, 0xffff,
+ "Freeing unknown %s %p, because of Abort",
+ "ATIO_TYPE7", u);
+ qlt_send_term_exchange(vha, NULL, &u->atio,
+ ha_locked, 0);
+ goto abort;
+ }
+
+ host = qlt_find_host_by_d_id(vha, u->atio.u.isp24.fcp_hdr.d_id);
+ if (host != NULL) {
+ ql_dbg(ql_dbg_async, vha, 0xffff,
+ "Requeuing unknown ATIO_TYPE7 %p", u);
+ qlt_24xx_atio_pkt(host, &u->atio, ha_locked);
+ } else if (tgt->tgt_stop) {
+ ql_dbg(ql_dbg_async, vha, 0xffff,
+ "Freeing unknown %s %p, because tgt is being stopped",
+ "ATIO_TYPE7", u);
+ qlt_send_term_exchange(vha, NULL, &u->atio,
+ ha_locked, 0);
+ } else {
+ ql_dbg(ql_dbg_async, vha, 0xffff,
+ "u %p, vha %p, host %p, sched again..", u,
+ vha, host);
+ if (!queued) {
+ queued = 1;
+ schedule_delayed_work(&vha->unknown_atio_work,
+ 1);
+ }
+ continue;
+ }
+
+abort:
+ spin_lock_irqsave(&vha->cmd_list_lock, flags);
+ list_del(&u->cmd_list);
+ spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
+ kfree(u);
+ }
+}
+
+void qlt_unknown_atio_work_fn(struct work_struct *work)
+{
+ struct scsi_qla_host *vha = container_of(to_delayed_work(work),
+ struct scsi_qla_host, unknown_atio_work);
+
+ qlt_try_to_dequeue_unknown_atios(vha, 0);
+}
+
static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
struct atio_from_isp *atio, uint8_t ha_locked)
{
@@ -249,8 +343,14 @@ static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
atio->u.isp24.fcp_hdr.d_id[0],
atio->u.isp24.fcp_hdr.d_id[1],
atio->u.isp24.fcp_hdr.d_id[2]);
+
+
+ qlt_queue_unknown_atio(vha, atio, ha_locked);
break;
}
+ if (unlikely(!list_empty(&vha->unknown_atio_list)))
+ qlt_try_to_dequeue_unknown_atios(vha, ha_locked);
+
qlt_24xx_atio_pkt(host, atio, ha_locked);
break;
}
@@ -278,6 +378,31 @@ static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
break;
}
+ case VP_RPT_ID_IOCB_TYPE:
+ qla24xx_report_id_acquisition(vha,
+ (struct vp_rpt_id_entry_24xx *)atio);
+ break;
+
+ case ABTS_RECV_24XX:
+ {
+ struct abts_recv_from_24xx *entry =
+ (struct abts_recv_from_24xx *)atio;
+ struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
+ entry->vp_index);
+ if (unlikely(!host)) {
+ ql_dbg(ql_dbg_tgt, vha, 0xffff,
+ "qla_target(%d): Response pkt (ABTS_RECV_24XX) "
+ "received, with unknown vp_index %d\n",
+ vha->vp_idx, entry->vp_index);
+ break;
+ }
+ qlt_response_pkt(host, (response_t *)atio);
+ break;
+
+ }
+
+ /* case PUREX_IOCB_TYPE: ql2xmvasynctoatio */
+
default:
ql_dbg(ql_dbg_tgt, vha, 0xe040,
"qla_target(%d): Received unknown ATIO atio "
@@ -395,22 +520,263 @@ void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, response_t *pkt)
/*
* All qlt_plogi_ack_t operations are protected by hardware_lock
*/
+static int qla24xx_post_nack_work(struct scsi_qla_host *vha, fc_port_t *fcport,
+ struct imm_ntfy_from_isp *ntfy, int type)
+{
+ struct qla_work_evt *e;
+ e = qla2x00_alloc_work(vha, QLA_EVT_NACK);
+ if (!e)
+ return QLA_FUNCTION_FAILED;
+
+ e->u.nack.fcport = fcport;
+ e->u.nack.type = type;
+ memcpy(e->u.nack.iocb, ntfy, sizeof(struct imm_ntfy_from_isp));
+ return qla2x00_post_work(vha, e);
+}
+
+static
+void qla2x00_async_nack_sp_done(void *s, int res)
+{
+ struct srb *sp = (struct srb *)s;
+ struct scsi_qla_host *vha = sp->vha;
+ unsigned long flags;
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Async done-%s res %x %8phC type %d\n",
+ sp->name, res, sp->fcport->port_name, sp->type);
+
+ spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+ sp->fcport->flags &= ~FCF_ASYNC_SENT;
+ sp->fcport->chip_reset = vha->hw->chip_reset;
+
+ switch (sp->type) {
+ case SRB_NACK_PLOGI:
+ sp->fcport->login_gen++;
+ sp->fcport->fw_login_state = DSC_LS_PLOGI_COMP;
+ sp->fcport->logout_on_delete = 1;
+ break;
+
+ case SRB_NACK_PRLI:
+ sp->fcport->fw_login_state = DSC_LS_PRLI_COMP;
+ sp->fcport->deleted = 0;
+
+ if (!sp->fcport->login_succ &&
+ !IS_SW_RESV_ADDR(sp->fcport->d_id)) {
+ sp->fcport->login_succ = 1;
+
+ vha->fcport_count++;
+
+ if (!IS_IIDMA_CAPABLE(vha->hw) ||
+ !vha->hw->flags.gpsc_supported) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC post upd_fcport fcp_cnt %d\n",
+ __func__, __LINE__,
+ sp->fcport->port_name,
+ vha->fcport_count);
+
+ qla24xx_post_upd_fcport_work(vha, sp->fcport);
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC post gpsc fcp_cnt %d\n",
+ __func__, __LINE__,
+ sp->fcport->port_name,
+ vha->fcport_count);
+
+ qla24xx_post_gpsc_work(vha, sp->fcport);
+ }
+ }
+ break;
+
+ case SRB_NACK_LOGO:
+ sp->fcport->login_gen++;
+ sp->fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
+ qlt_logo_completion_handler(sp->fcport, MBS_COMMAND_COMPLETE);
+ break;
+ }
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+
+ sp->free(sp);
+}
+
+int qla24xx_async_notify_ack(scsi_qla_host_t *vha, fc_port_t *fcport,
+ struct imm_ntfy_from_isp *ntfy, int type)
+{
+ int rval = QLA_FUNCTION_FAILED;
+ srb_t *sp;
+ char *c = NULL;
+
+ fcport->flags |= FCF_ASYNC_SENT;
+ switch (type) {
+ case SRB_NACK_PLOGI:
+ fcport->fw_login_state = DSC_LS_PLOGI_PEND;
+ c = "PLOGI";
+ break;
+ case SRB_NACK_PRLI:
+ fcport->fw_login_state = DSC_LS_PRLI_PEND;
+ c = "PRLI";
+ break;
+ case SRB_NACK_LOGO:
+ fcport->fw_login_state = DSC_LS_LOGO_PEND;
+ c = "LOGO";
+ break;
+ }
+
+ sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
+ if (!sp)
+ goto done;
+
+ sp->type = type;
+ sp->name = "nack";
+
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)+2);
+
+ sp->u.iocb_cmd.u.nack.ntfy = ntfy;
+
+ sp->done = qla2x00_async_nack_sp_done;
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS)
+ goto done_free_sp;
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Async-%s %8phC hndl %x %s\n",
+ sp->name, fcport->port_name, sp->handle, c);
+
+ return rval;
+
+done_free_sp:
+ sp->free(sp);
+done:
+ fcport->flags &= ~FCF_ASYNC_SENT;
+ return rval;
+}
+
+void qla24xx_do_nack_work(struct scsi_qla_host *vha, struct qla_work_evt *e)
+{
+ fc_port_t *t;
+ unsigned long flags;
+
+ switch (e->u.nack.type) {
+ case SRB_NACK_PRLI:
+ mutex_lock(&vha->vha_tgt.tgt_mutex);
+ t = qlt_create_sess(vha, e->u.nack.fcport, 0);
+ mutex_unlock(&vha->vha_tgt.tgt_mutex);
+ if (t) {
+ ql_log(ql_log_info, vha, 0xffff,
+ "%s create sess success %p", __func__, t);
+ spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+ /* create sess has an extra kref */
+ vha->hw->tgt.tgt_ops->put_sess(e->u.nack.fcport);
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+ }
+ break;
+ }
+ qla24xx_async_notify_ack(vha, e->u.nack.fcport,
+ (struct imm_ntfy_from_isp*)e->u.nack.iocb, e->u.nack.type);
+}
+
+void qla24xx_delete_sess_fn(struct work_struct *work)
+{
+ fc_port_t *fcport = container_of(work, struct fc_port, del_work);
+ struct qla_hw_data *ha = fcport->vha->hw;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ha->tgt.sess_lock, flags);
+
+ if (fcport->se_sess) {
+ ha->tgt.tgt_ops->shutdown_sess(fcport);
+ ha->tgt.tgt_ops->put_sess(fcport);
+ } else {
+ qlt_unreg_sess(fcport);
+ }
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+}
+
+/*
+ * Called from qla2x00_reg_remote_port()
+ */
+void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
+ struct fc_port *sess = fcport;
+ unsigned long flags;
+
+ if (!vha->hw->tgt.tgt_ops)
+ return;
+
+ spin_lock_irqsave(&ha->tgt.sess_lock, flags);
+ if (tgt->tgt_stop) {
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+ return;
+ }
+
+ if (fcport->disc_state == DSC_DELETE_PEND) {
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+ return;
+ }
+
+ if (!sess->se_sess) {
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+
+ mutex_lock(&vha->vha_tgt.tgt_mutex);
+ sess = qlt_create_sess(vha, fcport, false);
+ mutex_unlock(&vha->vha_tgt.tgt_mutex);
+
+ spin_lock_irqsave(&ha->tgt.sess_lock, flags);
+ } else {
+ if (fcport->fw_login_state == DSC_LS_PRLI_COMP) {
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+ return;
+ }
+
+ if (!kref_get_unless_zero(&sess->sess_kref)) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s: kref_get fail sess %8phC \n",
+ __func__, sess->port_name);
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+ return;
+ }
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04c,
+ "qla_target(%u): %ssession for port %8phC "
+ "(loop ID %d) reappeared\n", vha->vp_idx,
+ sess->local ? "local " : "", sess->port_name, sess->loop_id);
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf007,
+ "Reappeared sess %p\n", sess);
+
+ ha->tgt.tgt_ops->update_sess(sess, fcport->d_id,
+ fcport->loop_id,
+ (fcport->flags & FCF_CONF_COMP_SUPPORTED));
+ }
+
+ if (sess && sess->local) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04d,
+ "qla_target(%u): local session for "
+ "port %8phC (loop ID %d) became global\n", vha->vp_idx,
+ fcport->port_name, sess->loop_id);
+ sess->local = 0;
+ }
+ ha->tgt.tgt_ops->put_sess(sess);
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+}
/*
* This is a zero-base ref-counting solution, since hardware_lock
* guarantees that ref_count is not modified concurrently.
* Upon successful return content of iocb is undefined
*/
-static qlt_plogi_ack_t *
+static struct qlt_plogi_ack_t *
qlt_plogi_ack_find_add(struct scsi_qla_host *vha, port_id_t *id,
struct imm_ntfy_from_isp *iocb)
{
- qlt_plogi_ack_t *pla;
+ struct qlt_plogi_ack_t *pla;
list_for_each_entry(pla, &vha->plogi_ack_list, list) {
if (pla->id.b24 == id->b24) {
qlt_send_term_imm_notif(vha, &pla->iocb, 1);
- pla->iocb = *iocb;
+ memcpy(&pla->iocb, iocb, sizeof(pla->iocb));
return pla;
}
}
@@ -423,50 +789,78 @@ qlt_plogi_ack_find_add(struct scsi_qla_host *vha, port_id_t *id,
return NULL;
}
- pla->iocb = *iocb;
+ memcpy(&pla->iocb, iocb, sizeof(pla->iocb));
pla->id = *id;
list_add_tail(&pla->list, &vha->plogi_ack_list);
return pla;
}
-static void qlt_plogi_ack_unref(struct scsi_qla_host *vha, qlt_plogi_ack_t *pla)
+void qlt_plogi_ack_unref(struct scsi_qla_host *vha,
+ struct qlt_plogi_ack_t *pla)
{
+ struct imm_ntfy_from_isp *iocb = &pla->iocb;
+ port_id_t port_id;
+ uint16_t loop_id;
+ fc_port_t *fcport = pla->fcport;
+
BUG_ON(!pla->ref_count);
pla->ref_count--;
if (pla->ref_count)
return;
- ql_dbg(ql_dbg_async, vha, 0x5089,
+ ql_dbg(ql_dbg_disc, vha, 0x5089,
"Sending PLOGI ACK to wwn %8phC s_id %02x:%02x:%02x loop_id %#04x"
- " exch %#x ox_id %#x\n", pla->iocb.u.isp24.port_name,
- pla->iocb.u.isp24.port_id[2], pla->iocb.u.isp24.port_id[1],
- pla->iocb.u.isp24.port_id[0],
- le16_to_cpu(pla->iocb.u.isp24.nport_handle),
- pla->iocb.u.isp24.exchange_address, pla->iocb.ox_id);
- qlt_send_notify_ack(vha, &pla->iocb, 0, 0, 0, 0, 0, 0);
+ " exch %#x ox_id %#x\n", iocb->u.isp24.port_name,
+ iocb->u.isp24.port_id[2], iocb->u.isp24.port_id[1],
+ iocb->u.isp24.port_id[0],
+ le16_to_cpu(iocb->u.isp24.nport_handle),
+ iocb->u.isp24.exchange_address, iocb->ox_id);
+
+ port_id.b.domain = iocb->u.isp24.port_id[2];
+ port_id.b.area = iocb->u.isp24.port_id[1];
+ port_id.b.al_pa = iocb->u.isp24.port_id[0];
+ port_id.b.rsvd_1 = 0;
+
+ loop_id = le16_to_cpu(iocb->u.isp24.nport_handle);
+
+ fcport->loop_id = loop_id;
+ fcport->d_id = port_id;
+ qla24xx_post_nack_work(vha, fcport, iocb, SRB_NACK_PLOGI);
+
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ if (fcport->plogi_link[QLT_PLOGI_LINK_SAME_WWN] == pla)
+ fcport->plogi_link[QLT_PLOGI_LINK_SAME_WWN] = NULL;
+ if (fcport->plogi_link[QLT_PLOGI_LINK_CONFLICT] == pla)
+ fcport->plogi_link[QLT_PLOGI_LINK_CONFLICT] = NULL;
+ }
list_del(&pla->list);
kmem_cache_free(qla_tgt_plogi_cachep, pla);
}
-static void
-qlt_plogi_ack_link(struct scsi_qla_host *vha, qlt_plogi_ack_t *pla,
- struct qla_tgt_sess *sess, qlt_plogi_link_t link)
+void
+qlt_plogi_ack_link(struct scsi_qla_host *vha, struct qlt_plogi_ack_t *pla,
+ struct fc_port *sess, enum qlt_plogi_link_t link)
{
+ struct imm_ntfy_from_isp *iocb = &pla->iocb;
/* Inc ref_count first because link might already be pointing at pla */
pla->ref_count++;
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf097,
+ "Linking sess %p [%d] wwn %8phC with PLOGI ACK to wwn %8phC"
+ " s_id %02x:%02x:%02x, ref=%d pla %p link %d\n",
+ sess, link, sess->port_name,
+ iocb->u.isp24.port_name, iocb->u.isp24.port_id[2],
+ iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[0],
+ pla->ref_count, pla, link);
+
if (sess->plogi_link[link])
qlt_plogi_ack_unref(vha, sess->plogi_link[link]);
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf097,
- "Linking sess %p [%d] wwn %8phC with PLOGI ACK to wwn %8phC"
- " s_id %02x:%02x:%02x, ref=%d\n", sess, link, sess->port_name,
- pla->iocb.u.isp24.port_name, pla->iocb.u.isp24.port_id[2],
- pla->iocb.u.isp24.port_id[1], pla->iocb.u.isp24.port_id[0],
- pla->ref_count);
+ if (link == QLT_PLOGI_LINK_SAME_WWN)
+ pla->fcport = sess;
sess->plogi_link[link] = pla;
}
@@ -519,49 +913,45 @@ qlt_send_first_logo(struct scsi_qla_host *vha, qlt_port_logo_t *logo)
static void qlt_free_session_done(struct work_struct *work)
{
- struct qla_tgt_sess *sess = container_of(work, struct qla_tgt_sess,
+ struct fc_port *sess = container_of(work, struct fc_port,
free_work);
struct qla_tgt *tgt = sess->tgt;
struct scsi_qla_host *vha = sess->vha;
struct qla_hw_data *ha = vha->hw;
unsigned long flags;
bool logout_started = false;
- fc_port_t fcport;
+ struct event_arg ea;
+ scsi_qla_host_t *base_vha;
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf084,
"%s: se_sess %p / sess %p from port %8phC loop_id %#04x"
" s_id %02x:%02x:%02x logout %d keep %d els_logo %d\n",
__func__, sess->se_sess, sess, sess->port_name, sess->loop_id,
- sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa,
+ sess->d_id.b.domain, sess->d_id.b.area, sess->d_id.b.al_pa,
sess->logout_on_delete, sess->keep_nport_handle,
sess->send_els_logo);
- BUG_ON(!tgt);
- if (sess->send_els_logo) {
- qlt_port_logo_t logo;
- logo.id = sess->s_id;
- logo.cmd_count = 0;
- qlt_send_first_logo(vha, &logo);
- }
+ if (!IS_SW_RESV_ADDR(sess->d_id)) {
+ if (sess->send_els_logo) {
+ qlt_port_logo_t logo;
- if (sess->logout_on_delete) {
- int rc;
+ logo.id = sess->d_id;
+ logo.cmd_count = 0;
+ qlt_send_first_logo(vha, &logo);
+ }
- memset(&fcport, 0, sizeof(fcport));
- fcport.loop_id = sess->loop_id;
- fcport.d_id = sess->s_id;
- memcpy(fcport.port_name, sess->port_name, WWN_SIZE);
- fcport.vha = vha;
- fcport.tgt_session = sess;
-
- rc = qla2x00_post_async_logout_work(vha, &fcport, NULL);
- if (rc != QLA_SUCCESS)
- ql_log(ql_log_warn, vha, 0xf085,
- "Schedule logo failed sess %p rc %d\n",
- sess, rc);
- else
- logout_started = true;
+ if (sess->logout_on_delete) {
+ int rc;
+
+ rc = qla2x00_post_async_logout_work(vha, sess, NULL);
+ if (rc != QLA_SUCCESS)
+ ql_log(ql_log_warn, vha, 0xf085,
+ "Schedule logo failed sess %p rc %d\n",
+ sess, rc);
+ else
+ logout_started = true;
+ }
}
/*
@@ -583,29 +973,61 @@ static void qlt_free_session_done(struct work_struct *work)
msleep(100);
}
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf087,
- "%s: sess %p logout completed\n",
- __func__, sess);
+ ql_dbg(ql_dbg_disc, vha, 0xf087,
+ "%s: sess %p logout completed\n",__func__, sess);
}
- spin_lock_irqsave(&ha->hardware_lock, flags);
+ if (sess->logo_ack_needed) {
+ sess->logo_ack_needed = 0;
+ qla24xx_async_notify_ack(vha, sess,
+ (struct imm_ntfy_from_isp *)sess->iocb, SRB_NACK_LOGO);
+ }
+
+ spin_lock_irqsave(&ha->tgt.sess_lock, flags);
+ if (sess->se_sess) {
+ sess->se_sess = NULL;
+ if (tgt && !IS_SW_RESV_ADDR(sess->d_id))
+ tgt->sess_count--;
+ }
+
+ sess->disc_state = DSC_DELETED;
+ sess->fw_login_state = DSC_LS_PORT_UNAVAIL;
+ sess->deleted = QLA_SESS_DELETED;
+ sess->login_retry = vha->hw->login_retry_count;
+
+ if (sess->login_succ && !IS_SW_RESV_ADDR(sess->d_id)) {
+ vha->fcport_count--;
+ sess->login_succ = 0;
+ }
+
+ if (sess->chip_reset != sess->vha->hw->chip_reset)
+ qla2x00_clear_loop_id(sess);
+
+ if (sess->conflict) {
+ sess->conflict->login_pause = 0;
+ sess->conflict = NULL;
+ if (!test_bit(UNLOADING, &vha->dpc_flags))
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ }
{
- qlt_plogi_ack_t *own =
+ struct qlt_plogi_ack_t *own =
sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN];
- qlt_plogi_ack_t *con =
+ struct qlt_plogi_ack_t *con =
sess->plogi_link[QLT_PLOGI_LINK_CONFLICT];
+ struct imm_ntfy_from_isp *iocb;
if (con) {
+ iocb = &con->iocb;
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf099,
- "se_sess %p / sess %p port %8phC is gone,"
- " %s (ref=%d), releasing PLOGI for %8phC (ref=%d)\n",
- sess->se_sess, sess, sess->port_name,
- own ? "releasing own PLOGI" :
- "no own PLOGI pending",
- own ? own->ref_count : -1,
- con->iocb.u.isp24.port_name, con->ref_count);
+ "se_sess %p / sess %p port %8phC is gone,"
+ " %s (ref=%d), releasing PLOGI for %8phC (ref=%d)\n",
+ sess->se_sess, sess, sess->port_name,
+ own ? "releasing own PLOGI" : "no own PLOGI pending",
+ own ? own->ref_count : -1,
+ iocb->u.isp24.port_name, con->ref_count);
qlt_plogi_ack_unref(vha, con);
+ sess->plogi_link[QLT_PLOGI_LINK_CONFLICT] = NULL;
} else {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09a,
"se_sess %p / sess %p port %8phC is gone, %s (ref=%d)\n",
@@ -615,59 +1037,64 @@ static void qlt_free_session_done(struct work_struct *work)
own ? own->ref_count : -1);
}
- if (own)
+ if (own) {
+ sess->fw_login_state = DSC_LS_PLOGI_PEND;
qlt_plogi_ack_unref(vha, own);
+ sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN] = NULL;
+ }
}
-
- list_del(&sess->sess_list_entry);
-
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001,
- "Unregistration of sess %p finished\n", sess);
+ "Unregistration of sess %p %8phC finished fcp_cnt %d\n",
+ sess, sess->port_name, vha->fcport_count);
- kfree(sess);
- /*
- * We need to protect against race, when tgt is freed before or
- * inside wake_up()
- */
- tgt->sess_count--;
- if (tgt->sess_count == 0)
+ if (tgt && (tgt->sess_count == 0))
wake_up_all(&tgt->waitQ);
+
+ if (vha->fcport_count == 0)
+ wake_up_all(&vha->fcport_waitQ);
+
+ base_vha = pci_get_drvdata(ha->pdev);
+ if (test_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags))
+ return;
+
+ if (!tgt || !tgt->tgt_stop) {
+ memset(&ea, 0, sizeof(ea));
+ ea.event = FCME_DELETE_DONE;
+ ea.fcport = sess;
+ qla2x00_fcport_event_handler(vha, &ea);
+ }
}
/* ha->tgt.sess_lock supposed to be held on entry */
-static void qlt_release_session(struct kref *kref)
+void qlt_unreg_sess(struct fc_port *sess)
{
- struct qla_tgt_sess *sess =
- container_of(kref, struct qla_tgt_sess, sess_kref);
struct scsi_qla_host *vha = sess->vha;
+ ql_dbg(ql_dbg_disc, sess->vha, 0xffff,
+ "%s sess %p for deletion %8phC\n",
+ __func__, sess, sess->port_name);
+
if (sess->se_sess)
vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);
- if (!list_empty(&sess->del_list_entry))
- list_del_init(&sess->del_list_entry);
+ qla2x00_mark_device_lost(vha, sess, 1, 1);
+
sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
+ sess->disc_state = DSC_DELETE_PEND;
+ sess->last_rscn_gen = sess->rscn_gen;
+ sess->last_login_gen = sess->login_gen;
INIT_WORK(&sess->free_work, qlt_free_session_done);
schedule_work(&sess->free_work);
}
-
-void qlt_put_sess(struct qla_tgt_sess *sess)
-{
- if (!sess)
- return;
-
- assert_spin_locked(&sess->vha->hw->tgt.sess_lock);
- kref_put(&sess->sess_kref, qlt_release_session);
-}
-EXPORT_SYMBOL(qlt_put_sess);
+EXPORT_SYMBOL(qlt_unreg_sess);
static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
{
struct qla_hw_data *ha = vha->hw;
- struct qla_tgt_sess *sess = NULL;
+ struct fc_port *sess = NULL;
uint16_t loop_id;
int res = 0;
struct imm_ntfy_from_isp *n = (struct imm_ntfy_from_isp *)iocb;
@@ -680,31 +1107,6 @@ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
spin_lock_irqsave(&ha->tgt.sess_lock, flags);
qlt_clear_tgt_db(vha->vha_tgt.qla_tgt);
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
-#if 0 /* FIXME: do we need to choose a session here? */
- if (!list_empty(&ha->tgt.qla_tgt->sess_list)) {
- sess = list_entry(ha->tgt.qla_tgt->sess_list.next,
- typeof(*sess), sess_list_entry);
- switch (mcmd) {
- case QLA_TGT_NEXUS_LOSS_SESS:
- mcmd = QLA_TGT_NEXUS_LOSS;
- break;
- case QLA_TGT_ABORT_ALL_SESS:
- mcmd = QLA_TGT_ABORT_ALL;
- break;
- case QLA_TGT_NEXUS_LOSS:
- case QLA_TGT_ABORT_ALL:
- break;
- default:
- ql_dbg(ql_dbg_tgt, vha, 0xe046,
- "qla_target(%d): Not allowed "
- "command %x in %s", vha->vp_idx,
- mcmd, __func__);
- sess = NULL;
- break;
- }
- } else
- sess = NULL;
-#endif
} else {
spin_lock_irqsave(&ha->tgt.sess_lock, flags);
sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
@@ -726,57 +1128,69 @@ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
return qlt_issue_task_mgmt(sess, 0, mcmd, iocb, QLA24XX_MGMT_SEND_NACK);
}
+static void qla24xx_chk_fcp_state(struct fc_port *sess)
+{
+ if (sess->chip_reset != sess->vha->hw->chip_reset) {
+ sess->logout_on_delete = 0;
+ sess->logo_ack_needed = 0;
+ sess->fw_login_state = DSC_LS_PORT_UNAVAIL;
+ sess->scan_state = 0;
+ }
+}
+
/* ha->tgt.sess_lock supposed to be held on entry */
-static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess *sess,
+void qlt_schedule_sess_for_deletion(struct fc_port *sess,
bool immediate)
{
struct qla_tgt *tgt = sess->tgt;
- uint32_t dev_loss_tmo = tgt->ha->port_down_retry_count + 5;
- if (sess->deleted) {
- /* Upgrade to unconditional deletion in case it was temporary */
- if (immediate && sess->deleted == QLA_SESS_DELETION_PENDING)
- list_del(&sess->del_list_entry);
- else
+ if (sess->disc_state == DSC_DELETE_PEND)
+ return;
+
+ if (sess->disc_state == DSC_DELETED) {
+ if (tgt && tgt->tgt_stop && (tgt->sess_count == 0))
+ wake_up_all(&tgt->waitQ);
+ if (sess->vha->fcport_count == 0)
+ wake_up_all(&sess->vha->fcport_waitQ);
+
+ if (!sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN] &&
+ !sess->plogi_link[QLT_PLOGI_LINK_CONFLICT])
return;
}
- ql_dbg(ql_dbg_tgt, sess->vha, 0xe001,
- "Scheduling sess %p for deletion\n", sess);
+ sess->disc_state = DSC_DELETE_PEND;
- if (immediate) {
- dev_loss_tmo = 0;
- sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
- list_add(&sess->del_list_entry, &tgt->del_sess_list);
- } else {
- sess->deleted = QLA_SESS_DELETION_PENDING;
- list_add_tail(&sess->del_list_entry, &tgt->del_sess_list);
- }
+ if (sess->deleted == QLA_SESS_DELETED)
+ sess->logout_on_delete = 0;
- sess->expires = jiffies + dev_loss_tmo * HZ;
+ sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
+ qla24xx_chk_fcp_state(sess);
- ql_dbg(ql_dbg_tgt, sess->vha, 0xe048,
- "qla_target(%d): session for port %8phC (loop ID %d s_id %02x:%02x:%02x)"
- " scheduled for deletion in %u secs (expires: %lu) immed: %d, logout: %d, gen: %#x\n",
- sess->vha->vp_idx, sess->port_name, sess->loop_id,
- sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa,
- dev_loss_tmo, sess->expires, immediate, sess->logout_on_delete,
- sess->generation);
+ ql_dbg(ql_dbg_tgt, sess->vha, 0xe001,
+ "Scheduling sess %p for deletion\n", sess);
- if (immediate)
- mod_delayed_work(system_wq, &tgt->sess_del_work, 0);
- else
- schedule_delayed_work(&tgt->sess_del_work,
- sess->expires - jiffies);
+ schedule_work(&sess->del_work);
+}
+
+void qlt_schedule_sess_for_deletion_lock(struct fc_port *sess)
+{
+ unsigned long flags;
+ struct qla_hw_data *ha = sess->vha->hw;
+ spin_lock_irqsave(&ha->tgt.sess_lock, flags);
+ qlt_schedule_sess_for_deletion(sess, 1);
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
}
/* ha->tgt.sess_lock supposed to be held on entry */
static void qlt_clear_tgt_db(struct qla_tgt *tgt)
{
- struct qla_tgt_sess *sess;
+ struct fc_port *sess;
+ scsi_qla_host_t *vha = tgt->vha;
- list_for_each_entry(sess, &tgt->sess_list, sess_list_entry)
- qlt_schedule_sess_for_deletion(sess, true);
+ list_for_each_entry(sess, &vha->vp_fcports, list) {
+ if (sess->se_sess)
+ qlt_schedule_sess_for_deletion(sess, 1);
+ }
/* At this point tgt could be already dead */
}
@@ -830,240 +1244,84 @@ out_free_id_list:
return res;
}
-/* ha->tgt.sess_lock supposed to be held on entry */
-static void qlt_undelete_sess(struct qla_tgt_sess *sess)
-{
- BUG_ON(sess->deleted != QLA_SESS_DELETION_PENDING);
-
- list_del_init(&sess->del_list_entry);
- sess->deleted = 0;
-}
-
-static void qlt_del_sess_work_fn(struct delayed_work *work)
-{
- struct qla_tgt *tgt = container_of(work, struct qla_tgt,
- sess_del_work);
- struct scsi_qla_host *vha = tgt->vha;
- struct qla_hw_data *ha = vha->hw;
- struct qla_tgt_sess *sess;
- unsigned long flags, elapsed;
-
- spin_lock_irqsave(&ha->tgt.sess_lock, flags);
- while (!list_empty(&tgt->del_sess_list)) {
- sess = list_entry(tgt->del_sess_list.next, typeof(*sess),
- del_list_entry);
- elapsed = jiffies;
- if (time_after_eq(elapsed, sess->expires)) {
- /* No turning back */
- list_del_init(&sess->del_list_entry);
- sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
-
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004,
- "Timeout: sess %p about to be deleted\n",
- sess);
- if (sess->se_sess)
- ha->tgt.tgt_ops->shutdown_sess(sess);
- qlt_put_sess(sess);
- } else {
- schedule_delayed_work(&tgt->sess_del_work,
- sess->expires - elapsed);
- break;
- }
- }
- spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
-}
-
/*
* Adds an extra ref to allow to drop hw lock after adding sess to the list.
* Caller must put it.
*/
-static struct qla_tgt_sess *qlt_create_sess(
+static struct fc_port *qlt_create_sess(
struct scsi_qla_host *vha,
fc_port_t *fcport,
bool local)
{
struct qla_hw_data *ha = vha->hw;
- struct qla_tgt_sess *sess;
+ struct fc_port *sess = fcport;
unsigned long flags;
- /* Check to avoid double sessions */
- spin_lock_irqsave(&ha->tgt.sess_lock, flags);
- list_for_each_entry(sess, &vha->vha_tgt.qla_tgt->sess_list,
- sess_list_entry) {
- if (!memcmp(sess->port_name, fcport->port_name, WWN_SIZE)) {
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf005,
- "Double sess %p found (s_id %x:%x:%x, "
- "loop_id %d), updating to d_id %x:%x:%x, "
- "loop_id %d", sess, sess->s_id.b.domain,
- sess->s_id.b.al_pa, sess->s_id.b.area,
- sess->loop_id, fcport->d_id.b.domain,
- fcport->d_id.b.al_pa, fcport->d_id.b.area,
- fcport->loop_id);
-
- /* Cannot undelete at this point */
- if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
- spin_unlock_irqrestore(&ha->tgt.sess_lock,
- flags);
- return NULL;
- }
-
- if (sess->deleted)
- qlt_undelete_sess(sess);
-
- if (!sess->se_sess) {
- if (ha->tgt.tgt_ops->check_initiator_node_acl(vha,
- &sess->port_name[0], sess) < 0) {
- spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
- return NULL;
- }
- }
-
- kref_get(&sess->sess_kref);
- ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, fcport->loop_id,
- (fcport->flags & FCF_CONF_COMP_SUPPORTED));
-
- if (sess->local && !local)
- sess->local = 0;
-
- qlt_do_generation_tick(vha, &sess->generation);
-
- spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+ if (vha->vha_tgt.qla_tgt->tgt_stop)
+ return NULL;
- return sess;
+ if (fcport->se_sess) {
+ if (!kref_get_unless_zero(&sess->sess_kref)) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s: kref_get_unless_zero failed for %8phC\n",
+ __func__, sess->port_name);
+ return NULL;
}
- }
- spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
-
- sess = kzalloc(sizeof(*sess), GFP_KERNEL);
- if (!sess) {
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04a,
- "qla_target(%u): session allocation failed, all commands "
- "from port %8phC will be refused", vha->vp_idx,
- fcport->port_name);
-
- return NULL;
+ return fcport;
}
sess->tgt = vha->vha_tgt.qla_tgt;
- sess->vha = vha;
- sess->s_id = fcport->d_id;
- sess->loop_id = fcport->loop_id;
sess->local = local;
- kref_init(&sess->sess_kref);
- INIT_LIST_HEAD(&sess->del_list_entry);
- /* Under normal circumstances we want to logout from firmware when
+ /*
+ * Under normal circumstances we want to logout from firmware when
* session eventually ends and release corresponding nport handle.
* In the exception cases (e.g. when new PLOGI is waiting) corresponding
- * code will adjust these flags as necessary. */
+ * code will adjust these flags as necessary.
+ */
sess->logout_on_delete = 1;
sess->keep_nport_handle = 0;
+ sess->logout_completed = 0;
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006,
- "Adding sess %p to tgt %p via ->check_initiator_node_acl()\n",
- sess, vha->vha_tgt.qla_tgt);
-
- sess->conf_compl_supported = (fcport->flags & FCF_CONF_COMP_SUPPORTED);
- BUILD_BUG_ON(sizeof(sess->port_name) != sizeof(fcport->port_name));
- memcpy(sess->port_name, fcport->port_name, sizeof(sess->port_name));
-
- spin_lock_irqsave(&ha->tgt.sess_lock, flags);
- list_add_tail(&sess->sess_list_entry, &vha->vha_tgt.qla_tgt->sess_list);
- vha->vha_tgt.qla_tgt->sess_count++;
- qlt_do_generation_tick(vha, &sess->generation);
- spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
-
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b,
- "qla_target(%d): %ssession for wwn %8phC (loop_id %d, "
- "s_id %x:%x:%x, confirmed completion %ssupported) added\n",
- vha->vp_idx, local ? "local " : "", fcport->port_name,
- fcport->loop_id, sess->s_id.b.domain, sess->s_id.b.area,
- sess->s_id.b.al_pa, sess->conf_compl_supported ? "" : "not ");
-
- /*
- * Determine if this fc_port->port_name is allowed to access
- * target mode using explict NodeACLs+MappedLUNs, or using
- * TPG demo mode. If this is successful a target mode FC nexus
- * is created.
- */
if (ha->tgt.tgt_ops->check_initiator_node_acl(vha,
&fcport->port_name[0], sess) < 0) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff,
+ "(%d) %8phC check_initiator_node_acl failed\n",
+ vha->vp_idx, fcport->port_name);
return NULL;
} else {
+ kref_init(&fcport->sess_kref);
/*
- * Take an extra reference to ->sess_kref here to handle qla_tgt_sess
- * access across ->tgt.sess_lock reaquire.
+ * Take an extra reference to ->sess_kref here to handle
+ * fc_port access across ->tgt.sess_lock reaquire.
*/
- kref_get(&sess->sess_kref);
- }
-
- return sess;
-}
-
-/*
- * Called from qla2x00_reg_remote_port()
- */
-void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
-{
- struct qla_hw_data *ha = vha->hw;
- struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
- struct qla_tgt_sess *sess;
- unsigned long flags;
-
- if (!vha->hw->tgt.tgt_ops)
- return;
-
- if (!tgt || (fcport->port_type != FCT_INITIATOR))
- return;
+ if (!kref_get_unless_zero(&sess->sess_kref)) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s: kref_get_unless_zero failed for %8phC\n",
+ __func__, sess->port_name);
+ return NULL;
+ }
- if (qla_ini_mode_enabled(vha))
- return;
+ spin_lock_irqsave(&ha->tgt.sess_lock, flags);
+ if (!IS_SW_RESV_ADDR(sess->d_id))
+ vha->vha_tgt.qla_tgt->sess_count++;
- spin_lock_irqsave(&ha->tgt.sess_lock, flags);
- if (tgt->tgt_stop) {
+ qlt_do_generation_tick(vha, &sess->generation);
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
- return;
}
- sess = qlt_find_sess_by_port_name(tgt, fcport->port_name);
- if (!sess) {
- spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
-
- mutex_lock(&vha->vha_tgt.tgt_mutex);
- sess = qlt_create_sess(vha, fcport, false);
- mutex_unlock(&vha->vha_tgt.tgt_mutex);
- spin_lock_irqsave(&ha->tgt.sess_lock, flags);
- } else if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
- /* Point of no return */
- spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
- return;
- } else {
- kref_get(&sess->sess_kref);
-
- if (sess->deleted) {
- qlt_undelete_sess(sess);
-
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04c,
- "qla_target(%u): %ssession for port %8phC "
- "(loop ID %d) reappeared\n", vha->vp_idx,
- sess->local ? "local " : "", sess->port_name,
- sess->loop_id);
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006,
+ "Adding sess %p se_sess %p to tgt %p sess_count %d\n",
+ sess, sess->se_sess, vha->vha_tgt.qla_tgt,
+ vha->vha_tgt.qla_tgt->sess_count);
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf007,
- "Reappeared sess %p\n", sess);
- }
- ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, fcport->loop_id,
- (fcport->flags & FCF_CONF_COMP_SUPPORTED));
- }
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b,
+ "qla_target(%d): %ssession for wwn %8phC (loop_id %d, "
+ "s_id %x:%x:%x, confirmed completion %ssupported) added\n",
+ vha->vp_idx, local ? "local " : "", fcport->port_name,
+ fcport->loop_id, sess->d_id.b.domain, sess->d_id.b.area,
+ sess->d_id.b.al_pa, sess->conf_compl_supported ? "" : "not ");
- if (sess && sess->local) {
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04d,
- "qla_target(%u): local session for "
- "port %8phC (loop ID %d) became global\n", vha->vp_idx,
- fcport->port_name, sess->loop_id);
- sess->local = 0;
- }
- qlt_put_sess(sess);
- spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+ return sess;
}
/*
@@ -1074,7 +1332,7 @@ void
qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport, int max_gen)
{
struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
- struct qla_tgt_sess *sess;
+ struct fc_port *sess = fcport;
unsigned long flags;
if (!vha->hw->tgt.tgt_ops)
@@ -1088,8 +1346,7 @@ qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport, int max_gen)
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
return;
}
- sess = qlt_find_sess_by_port_name(tgt, fcport->port_name);
- if (!sess) {
+ if (!sess->se_sess) {
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
return;
}
@@ -1120,12 +1377,12 @@ static inline int test_tgt_sess_count(struct qla_tgt *tgt)
* We need to protect against race, when tgt is freed before or
* inside wake_up()
*/
- spin_lock_irqsave(&ha->hardware_lock, flags);
+ spin_lock_irqsave(&ha->tgt.sess_lock, flags);
ql_dbg(ql_dbg_tgt, tgt->vha, 0xe002,
- "tgt %p, empty(sess_list)=%d sess_count=%d\n",
- tgt, list_empty(&tgt->sess_list), tgt->sess_count);
+ "tgt %p, sess_count=%d\n",
+ tgt, tgt->sess_count);
res = (tgt->sess_count == 0);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
return res;
}
@@ -1173,8 +1430,6 @@ int qlt_stop_phase1(struct qla_tgt *tgt)
mutex_unlock(&vha->vha_tgt.tgt_mutex);
mutex_unlock(&qla_tgt_mutex);
- flush_delayed_work(&tgt->sess_del_work);
-
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf009,
"Waiting for sess works (tgt %p)", tgt);
spin_lock_irqsave(&tgt->sess_work_lock, flags);
@@ -1186,14 +1441,13 @@ int qlt_stop_phase1(struct qla_tgt *tgt)
spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00a,
- "Waiting for tgt %p: list_empty(sess_list)=%d "
- "sess_count=%d\n", tgt, list_empty(&tgt->sess_list),
- tgt->sess_count);
+ "Waiting for tgt %p: sess_count=%d\n", tgt, tgt->sess_count);
wait_event(tgt->waitQ, test_tgt_sess_count(tgt));
/* Big hammer */
- if (!ha->flags.host_shutting_down && qla_tgt_mode_enabled(vha))
+ if (!ha->flags.host_shutting_down &&
+ (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)))
qlt_disable_vha(vha);
/* Wait for sessions to clear out (just in case) */
@@ -1320,6 +1574,7 @@ static void qlt_send_notify_ack(struct scsi_qla_host *vha,
nack = (struct nack_to_isp *)pkt;
nack->ox_id = ntfy->ox_id;
+ nack->u.isp24.handle = QLA_TGT_SKIP_HANDLE;
nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
nack->u.isp24.flags = ntfy->u.isp24.flags &
@@ -1489,6 +1744,14 @@ static int abort_cmd_for_tag(struct scsi_qla_host *vha, uint32_t tag)
}
}
+ list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) {
+ if (tag == op->atio.u.isp24.exchange_addr) {
+ op->aborted = true;
+ spin_unlock(&vha->cmd_list_lock);
+ return 1;
+ }
+ }
+
list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
if (tag == cmd->atio.u.isp24.exchange_addr) {
cmd->aborted = 1;
@@ -1525,6 +1788,18 @@ static void abort_cmds_for_lun(struct scsi_qla_host *vha,
if (op_key == key && op_lun == lun)
op->aborted = true;
}
+
+ list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) {
+ uint32_t op_key;
+ u64 op_lun;
+
+ op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
+ op_lun = scsilun_to_int(
+ (struct scsi_lun *)&op->atio.u.isp24.fcp_cmnd.lun);
+ if (op_key == key && op_lun == lun)
+ op->aborted = true;
+ }
+
list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
uint32_t cmd_key;
uint32_t cmd_lun;
@@ -1540,7 +1815,7 @@ static void abort_cmds_for_lun(struct scsi_qla_host *vha,
/* ha->hardware_lock supposed to be held on entry */
static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
- struct abts_recv_from_24xx *abts, struct qla_tgt_sess *sess)
+ struct abts_recv_from_24xx *abts, struct fc_port *sess)
{
struct qla_hw_data *ha = vha->hw;
struct se_session *se_sess = sess->se_sess;
@@ -1549,8 +1824,9 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
u32 lun = 0;
int rc;
bool found_lun = false;
+ unsigned long flags;
- spin_lock(&se_sess->sess_cmd_lock);
+ spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) {
struct qla_tgt_cmd *cmd =
container_of(se_cmd, struct qla_tgt_cmd, se_cmd);
@@ -1560,7 +1836,7 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
break;
}
}
- spin_unlock(&se_sess->sess_cmd_lock);
+ spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
/* cmd not in LIO lists, look in qla list */
if (!found_lun) {
@@ -1592,8 +1868,9 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
mcmd->sess = sess;
memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts));
mcmd->reset_count = vha->hw->chip_reset;
+ mcmd->tmr_func = QLA_TGT_ABTS;
- rc = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, TMR_ABORT_TASK,
+ rc = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, mcmd->tmr_func,
abts->exchange_addr_to_abort);
if (rc != 0) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf052,
@@ -1613,7 +1890,7 @@ static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
struct abts_recv_from_24xx *abts)
{
struct qla_hw_data *ha = vha->hw;
- struct qla_tgt_sess *sess;
+ struct fc_port *sess;
uint32_t tag = abts->exchange_addr_to_abort;
uint8_t s_id[3];
int rc;
@@ -1665,7 +1942,7 @@ static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
- if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
+ if (sess->deleted) {
qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
return;
}
@@ -1763,10 +2040,23 @@ void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
return;
}
- if (mcmd->flags == QLA24XX_MGMT_SEND_NACK)
- qlt_send_notify_ack(vha, &mcmd->orig_iocb.imm_ntfy,
- 0, 0, 0, 0, 0, 0);
- else {
+ if (mcmd->flags == QLA24XX_MGMT_SEND_NACK) {
+ if (mcmd->orig_iocb.imm_ntfy.u.isp24.status_subcode ==
+ ELS_LOGO ||
+ mcmd->orig_iocb.imm_ntfy.u.isp24.status_subcode ==
+ ELS_PRLO ||
+ mcmd->orig_iocb.imm_ntfy.u.isp24.status_subcode ==
+ ELS_TPRLO) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "TM response logo %phC status %#x state %#x",
+ mcmd->sess->port_name, mcmd->fc_tm_rsp,
+ mcmd->flags);
+ qlt_schedule_sess_for_deletion_lock(mcmd->sess);
+ } else {
+ qlt_send_notify_ack(vha, &mcmd->orig_iocb.imm_ntfy,
+ 0, 0, 0, 0, 0, 0);
+ }
+ } else {
if (mcmd->orig_iocb.atio.u.raw.entry_type == ABTS_RECV_24XX)
qlt_24xx_send_abts_resp(vha, &mcmd->orig_iocb.abts,
mcmd->fc_tm_rsp, false);
@@ -2182,95 +2472,6 @@ static inline int qlt_need_explicit_conf(struct qla_hw_data *ha,
cmd->conf_compl_supported;
}
-#ifdef CONFIG_QLA_TGT_DEBUG_SRR
-/*
- * Original taken from the XFS code
- */
-static unsigned long qlt_srr_random(void)
-{
- static int Inited;
- static unsigned long RandomValue;
- static DEFINE_SPINLOCK(lock);
- /* cycles pseudo-randomly through all values between 1 and 2^31 - 2 */
- register long rv;
- register long lo;
- register long hi;
- unsigned long flags;
-
- spin_lock_irqsave(&lock, flags);
- if (!Inited) {
- RandomValue = jiffies;
- Inited = 1;
- }
- rv = RandomValue;
- hi = rv / 127773;
- lo = rv % 127773;
- rv = 16807 * lo - 2836 * hi;
- if (rv <= 0)
- rv += 2147483647;
- RandomValue = rv;
- spin_unlock_irqrestore(&lock, flags);
- return rv;
-}
-
-static void qlt_check_srr_debug(struct qla_tgt_cmd *cmd, int *xmit_type)
-{
-#if 0 /* This is not a real status packets lost, so it won't lead to SRR */
- if ((*xmit_type & QLA_TGT_XMIT_STATUS) && (qlt_srr_random() % 200)
- == 50) {
- *xmit_type &= ~QLA_TGT_XMIT_STATUS;
- ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf015,
- "Dropping cmd %p (tag %d) status", cmd, se_cmd->tag);
- }
-#endif
- /*
- * It's currently not possible to simulate SRRs for FCP_WRITE without
- * a physical link layer failure, so don't even try here..
- */
- if (cmd->dma_data_direction != DMA_FROM_DEVICE)
- return;
-
- if (qlt_has_data(cmd) && (cmd->sg_cnt > 1) &&
- ((qlt_srr_random() % 100) == 20)) {
- int i, leave = 0;
- unsigned int tot_len = 0;
-
- while (leave == 0)
- leave = qlt_srr_random() % cmd->sg_cnt;
-
- for (i = 0; i < leave; i++)
- tot_len += cmd->sg[i].length;
-
- ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf016,
- "Cutting cmd %p (tag %d) buffer"
- " tail to len %d, sg_cnt %d (cmd->bufflen %d,"
- " cmd->sg_cnt %d)", cmd, se_cmd->tag, tot_len, leave,
- cmd->bufflen, cmd->sg_cnt);
-
- cmd->bufflen = tot_len;
- cmd->sg_cnt = leave;
- }
-
- if (qlt_has_data(cmd) && ((qlt_srr_random() % 100) == 70)) {
- unsigned int offset = qlt_srr_random() % cmd->bufflen;
-
- ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf017,
- "Cutting cmd %p (tag %d) buffer head "
- "to offset %d (cmd->bufflen %d)", cmd, se_cmd->tag, offset,
- cmd->bufflen);
- if (offset == 0)
- *xmit_type &= ~QLA_TGT_XMIT_DATA;
- else if (qlt_set_data_offset(cmd, offset)) {
- ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf018,
- "qlt_set_data_offset() failed (tag %d)", se_cmd->tag);
- }
- }
-}
-#else
-static inline void qlt_check_srr_debug(struct qla_tgt_cmd *cmd, int *xmit_type)
-{}
-#endif
-
static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio,
struct qla_tgt_prm *prm)
{
@@ -2288,7 +2489,7 @@ static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio,
int i;
if (qlt_need_explicit_conf(prm->tgt->ha, prm->cmd, 1)) {
- if (prm->cmd->se_cmd.scsi_status != 0) {
+ if ((prm->rq_result & SS_SCSI_STATUS_BYTE) != 0) {
ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe017,
"Skipping EXPLICIT_CONFORM and "
"CTIO7_FLAGS_CONFORM_REQ for FCP READ w/ "
@@ -2672,7 +2873,7 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
int res;
spin_lock_irqsave(&ha->hardware_lock, flags);
- if (cmd->sess && cmd->sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
+ if (cmd->sess && cmd->sess->deleted) {
cmd->state = QLA_TGT_STATE_PROCESSED;
if (cmd->sess->logout_completed)
/* no need to terminate. FW already freed exchange. */
@@ -2685,7 +2886,6 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
spin_unlock_irqrestore(&ha->hardware_lock, flags);
memset(&prm, 0, sizeof(prm));
- qlt_check_srr_debug(cmd, &xmit_type);
ql_dbg(ql_dbg_tgt, cmd->vha, 0xe018,
"is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, cmd->dma_data_direction=%d se_cmd[%p]\n",
@@ -2848,7 +3048,7 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
spin_lock_irqsave(&ha->hardware_lock, flags);
if (!vha->flags.online || (cmd->reset_count != ha->chip_reset) ||
- (cmd->sess && cmd->sess->deleted == QLA_SESS_DELETION_IN_PROGRESS)) {
+ (cmd->sess && cmd->sess->deleted)) {
/*
* Either the port is not online or this request was from
* previous life, just abort the processing.
@@ -3296,7 +3496,7 @@ int qlt_abort_cmd(struct qla_tgt_cmd *cmd)
return EIO;
}
cmd->aborted = 1;
- cmd->cmd_flags |= BIT_6;
+ cmd->trc_flags |= TRC_ABORT;
spin_unlock_irqrestore(&cmd->cmd_lock, flags);
qlt_send_term_exchange(vha, cmd, &cmd->atio, 0, 1);
@@ -3306,7 +3506,7 @@ EXPORT_SYMBOL(qlt_abort_cmd);
void qlt_free_cmd(struct qla_tgt_cmd *cmd)
{
- struct qla_tgt_sess *sess = cmd->sess;
+ struct fc_port *sess = cmd->sess;
ql_dbg(ql_dbg_tgt, cmd->vha, 0xe074,
"%s: se_cmd[%p] ox_id %04x\n",
@@ -3335,90 +3535,6 @@ void qlt_free_cmd(struct qla_tgt_cmd *cmd)
}
EXPORT_SYMBOL(qlt_free_cmd);
-/* ha->hardware_lock supposed to be held on entry */
-static int qlt_prepare_srr_ctio(struct scsi_qla_host *vha,
- struct qla_tgt_cmd *cmd, void *ctio)
-{
- struct qla_tgt_srr_ctio *sc;
- struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
- struct qla_tgt_srr_imm *imm;
-
- tgt->ctio_srr_id++;
- cmd->cmd_flags |= BIT_15;
-
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf019,
- "qla_target(%d): CTIO with SRR status received\n", vha->vp_idx);
-
- if (!ctio) {
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf055,
- "qla_target(%d): SRR CTIO, but ctio is NULL\n",
- vha->vp_idx);
- return -EINVAL;
- }
-
- sc = kzalloc(sizeof(*sc), GFP_ATOMIC);
- if (sc != NULL) {
- sc->cmd = cmd;
- /* IRQ is already OFF */
- spin_lock(&tgt->srr_lock);
- sc->srr_id = tgt->ctio_srr_id;
- list_add_tail(&sc->srr_list_entry,
- &tgt->srr_ctio_list);
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01a,
- "CTIO SRR %p added (id %d)\n", sc, sc->srr_id);
- if (tgt->imm_srr_id == tgt->ctio_srr_id) {
- int found = 0;
- list_for_each_entry(imm, &tgt->srr_imm_list,
- srr_list_entry) {
- if (imm->srr_id == sc->srr_id) {
- found = 1;
- break;
- }
- }
- if (found) {
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01b,
- "Scheduling srr work\n");
- schedule_work(&tgt->srr_work);
- } else {
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf056,
- "qla_target(%d): imm_srr_id "
- "== ctio_srr_id (%d), but there is no "
- "corresponding SRR IMM, deleting CTIO "
- "SRR %p\n", vha->vp_idx,
- tgt->ctio_srr_id, sc);
- list_del(&sc->srr_list_entry);
- spin_unlock(&tgt->srr_lock);
-
- kfree(sc);
- return -EINVAL;
- }
- }
- spin_unlock(&tgt->srr_lock);
- } else {
- struct qla_tgt_srr_imm *ti;
-
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf057,
- "qla_target(%d): Unable to allocate SRR CTIO entry\n",
- vha->vp_idx);
- spin_lock(&tgt->srr_lock);
- list_for_each_entry_safe(imm, ti, &tgt->srr_imm_list,
- srr_list_entry) {
- if (imm->srr_id == tgt->ctio_srr_id) {
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01c,
- "IMM SRR %p deleted (id %d)\n",
- imm, imm->srr_id);
- list_del(&imm->srr_list_entry);
- qlt_reject_free_srr_imm(vha, imm, 1);
- }
- }
- spin_unlock(&tgt->srr_lock);
-
- return -ENOMEM;
- }
-
- return 0;
-}
-
/*
* ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
*/
@@ -3527,7 +3643,7 @@ qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
dump_stack();
}
- cmd->cmd_flags |= BIT_17;
+ cmd->trc_flags |= TRC_FLUSH;
ha->tgt.tgt_ops->free_cmd(cmd);
}
@@ -3632,20 +3748,14 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
*/
cmd->sess->logout_on_delete = 0;
cmd->sess->send_els_logo = 1;
- qlt_schedule_sess_for_deletion(cmd->sess, true);
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC post del sess\n",
+ __func__, __LINE__, cmd->sess->port_name);
+
+ qlt_schedule_sess_for_deletion_lock(cmd->sess);
}
break;
}
- case CTIO_SRR_RECEIVED:
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05a,
- "qla_target(%d): CTIO with SRR_RECEIVED"
- " status %x received (state %x, se_cmd %p)\n",
- vha->vp_idx, status, cmd->state, se_cmd);
- if (qlt_prepare_srr_ctio(vha, cmd, ctio) != 0)
- break;
- else
- return;
-
case CTIO_DIF_ERROR: {
struct ctio_crc_from_fw *crc =
(struct ctio_crc_from_fw *)ctio;
@@ -3693,7 +3803,7 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
*/
if ((cmd->state != QLA_TGT_STATE_NEED_DATA) &&
(!cmd->aborted)) {
- cmd->cmd_flags |= BIT_13;
+ cmd->trc_flags |= TRC_CTIO_ERR;
if (qlt_term_ctio_exchange(vha, ctio, cmd, status))
return;
}
@@ -3701,7 +3811,7 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
skip_term:
if (cmd->state == QLA_TGT_STATE_PROCESSED) {
- cmd->cmd_flags |= BIT_12;
+ cmd->trc_flags |= TRC_CTIO_DONE;
} else if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
cmd->state = QLA_TGT_STATE_DATA_IN;
@@ -3711,11 +3821,11 @@ skip_term:
ha->tgt.tgt_ops->handle_data(cmd);
return;
} else if (cmd->aborted) {
- cmd->cmd_flags |= BIT_18;
+ cmd->trc_flags |= TRC_CTIO_ABORTED;
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e,
"Aborted command %p (tag %lld) finished\n", cmd, se_cmd->tag);
} else {
- cmd->cmd_flags |= BIT_19;
+ cmd->trc_flags |= TRC_CTIO_STRANGE;
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05c,
"qla_target(%d): A command in state (%d) should "
"not return a CTIO complete\n", vha->vp_idx, cmd->state);
@@ -3762,7 +3872,7 @@ static inline int qlt_get_fcp_task_attr(struct scsi_qla_host *vha,
return fcp_task_attr;
}
-static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *,
+static struct fc_port *qlt_make_local_sess(struct scsi_qla_host *,
uint8_t *);
/*
* Process context for I/O path into tcm_qla2xxx code
@@ -3772,7 +3882,7 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd)
scsi_qla_host_t *vha = cmd->vha;
struct qla_hw_data *ha = vha->hw;
struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
- struct qla_tgt_sess *sess = cmd->sess;
+ struct fc_port *sess = cmd->sess;
struct atio_from_isp *atio = &cmd->atio;
unsigned char *cdb;
unsigned long flags;
@@ -3780,7 +3890,7 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd)
int ret, fcp_task_attr, data_dir, bidi = 0;
cmd->cmd_in_wq = 0;
- cmd->cmd_flags |= BIT_1;
+ cmd->trc_flags |= TRC_DO_WORK;
if (tgt->tgt_stop)
goto out_term;
@@ -3822,7 +3932,7 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd)
* Drop extra session reference from qla_tgt_handle_cmd_for_atio*(
*/
spin_lock_irqsave(&ha->tgt.sess_lock, flags);
- qlt_put_sess(sess);
+ ha->tgt.tgt_ops->put_sess(sess);
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
return;
@@ -3832,7 +3942,7 @@ out_term:
* cmd has not sent to target yet, so pass NULL as the second
* argument to qlt_send_term_exchange() and free the memory here.
*/
- cmd->cmd_flags |= BIT_2;
+ cmd->trc_flags |= TRC_DO_WORK_ERR;
spin_lock_irqsave(&ha->hardware_lock, flags);
qlt_send_term_exchange(vha, NULL, &cmd->atio, 1, 0);
@@ -3841,7 +3951,7 @@ out_term:
spin_unlock_irqrestore(&ha->hardware_lock, flags);
spin_lock_irqsave(&ha->tgt.sess_lock, flags);
- qlt_put_sess(sess);
+ ha->tgt.tgt_ops->put_sess(sess);
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
}
@@ -3859,7 +3969,7 @@ static void qlt_do_work(struct work_struct *work)
}
static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha,
- struct qla_tgt_sess *sess,
+ struct fc_port *sess,
struct atio_from_isp *atio)
{
struct se_session *se_sess = sess->se_sess;
@@ -3883,7 +3993,7 @@ static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha,
cmd->loop_id = sess->loop_id;
cmd->conf_compl_supported = sess->conf_compl_supported;
- cmd->cmd_flags = 0;
+ cmd->trc_flags = 0;
cmd->jiffies_at_alloc = get_jiffies_64();
cmd->reset_count = vha->hw->chip_reset;
@@ -3900,7 +4010,7 @@ static void qlt_create_sess_from_atio(struct work_struct *work)
struct qla_tgt_sess_op, work);
scsi_qla_host_t *vha = op->vha;
struct qla_hw_data *ha = vha->hw;
- struct qla_tgt_sess *sess;
+ struct fc_port *sess;
struct qla_tgt_cmd *cmd;
unsigned long flags;
uint8_t *s_id = op->atio.u.isp24.fcp_hdr.s_id;
@@ -3941,11 +4051,12 @@ static void qlt_create_sess_from_atio(struct work_struct *work)
if (!cmd) {
spin_lock_irqsave(&ha->hardware_lock, flags);
qlt_send_busy(vha, &op->atio, SAM_STAT_BUSY);
- qlt_put_sess(sess);
+ ha->tgt.tgt_ops->put_sess(sess);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
kfree(op);
return;
}
+
/*
* __qlt_do_work() will call qlt_put_sess() to release
* the extra reference taken above by qlt_make_local_sess()
@@ -3953,13 +4064,11 @@ static void qlt_create_sess_from_atio(struct work_struct *work)
__qlt_do_work(cmd);
kfree(op);
return;
-
out_term:
spin_lock_irqsave(&ha->hardware_lock, flags);
qlt_send_term_exchange(vha, NULL, &op->atio, 1, 0);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
kfree(op);
-
}
/* ha->hardware_lock supposed to be held on entry */
@@ -3968,8 +4077,9 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
{
struct qla_hw_data *ha = vha->hw;
struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
- struct qla_tgt_sess *sess;
+ struct fc_port *sess;
struct qla_tgt_cmd *cmd;
+ unsigned long flags;
if (unlikely(tgt->tgt_stop)) {
ql_dbg(ql_dbg_io, vha, 0x3061,
@@ -3998,7 +4108,7 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
/* Another WWN used to have our s_id. Our PLOGI scheduled its
* session deletion, but it's still in sess_del_work wq */
- if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
+ if (sess->deleted) {
ql_dbg(ql_dbg_io, vha, 0x3061,
"New command while old session %p is being deleted\n",
sess);
@@ -4008,24 +4118,32 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
/*
* Do kref_get() before returning + dropping qla_hw_data->hardware_lock.
*/
- kref_get(&sess->sess_kref);
+ if (!kref_get_unless_zero(&sess->sess_kref)) {
+ ql_dbg(ql_dbg_tgt, vha, 0xffff,
+ "%s: kref_get fail, %8phC oxid %x \n",
+ __func__, sess->port_name,
+ be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id));
+ return -EFAULT;
+ }
cmd = qlt_get_tag(vha, sess, atio);
if (!cmd) {
ql_dbg(ql_dbg_io, vha, 0x3062,
"qla_target(%d): Allocation of cmd failed\n", vha->vp_idx);
- qlt_put_sess(sess);
+ spin_lock_irqsave(&ha->tgt.sess_lock, flags);
+ ha->tgt.tgt_ops->put_sess(sess);
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
return -ENOMEM;
}
cmd->cmd_in_wq = 1;
- cmd->cmd_flags |= BIT_0;
+ cmd->trc_flags |= TRC_NEW_CMD;
cmd->se_cmd.cpuid = ha->msix_count ?
ha->tgt.rspq_vector_cpuid : WORK_CPU_UNBOUND;
- spin_lock(&vha->cmd_list_lock);
+ spin_lock_irqsave(&vha->cmd_list_lock, flags);
list_add_tail(&cmd->cmd_list, &vha->qla_cmd_list);
- spin_unlock(&vha->cmd_list_lock);
+ spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
INIT_WORK(&cmd->work, qlt_do_work);
if (ha->msix_count) {
@@ -4043,7 +4161,7 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
}
/* ha->hardware_lock supposed to be held on entry */
-static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
+static int qlt_issue_task_mgmt(struct fc_port *sess, u64 lun,
int fn, void *iocb, int flags)
{
struct scsi_qla_host *vha = sess->vha;
@@ -4051,7 +4169,6 @@ static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
struct qla_tgt_mgmt_cmd *mcmd;
struct atio_from_isp *a = (struct atio_from_isp *)iocb;
int res;
- uint8_t tmr_func;
mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
if (!mcmd) {
@@ -4073,74 +4190,12 @@ static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
mcmd->reset_count = vha->hw->chip_reset;
switch (fn) {
- case QLA_TGT_CLEAR_ACA:
- ql_dbg(ql_dbg_tgt_tmr, vha, 0x10000,
- "qla_target(%d): CLEAR_ACA received\n", sess->vha->vp_idx);
- tmr_func = TMR_CLEAR_ACA;
- break;
-
- case QLA_TGT_TARGET_RESET:
- ql_dbg(ql_dbg_tgt_tmr, vha, 0x10001,
- "qla_target(%d): TARGET_RESET received\n",
- sess->vha->vp_idx);
- tmr_func = TMR_TARGET_WARM_RESET;
- break;
-
case QLA_TGT_LUN_RESET:
- ql_dbg(ql_dbg_tgt_tmr, vha, 0x10002,
- "qla_target(%d): LUN_RESET received\n", sess->vha->vp_idx);
- tmr_func = TMR_LUN_RESET;
- abort_cmds_for_lun(vha, lun, a->u.isp24.fcp_hdr.s_id);
- break;
-
- case QLA_TGT_CLEAR_TS:
- ql_dbg(ql_dbg_tgt_tmr, vha, 0x10003,
- "qla_target(%d): CLEAR_TS received\n", sess->vha->vp_idx);
- tmr_func = TMR_CLEAR_TASK_SET;
- break;
-
- case QLA_TGT_ABORT_TS:
- ql_dbg(ql_dbg_tgt_tmr, vha, 0x10004,
- "qla_target(%d): ABORT_TS received\n", sess->vha->vp_idx);
- tmr_func = TMR_ABORT_TASK_SET;
- break;
-#if 0
- case QLA_TGT_ABORT_ALL:
- ql_dbg(ql_dbg_tgt_tmr, vha, 0x10005,
- "qla_target(%d): Doing ABORT_ALL_TASKS\n",
- sess->vha->vp_idx);
- tmr_func = 0;
- break;
-
- case QLA_TGT_ABORT_ALL_SESS:
- ql_dbg(ql_dbg_tgt_tmr, vha, 0x10006,
- "qla_target(%d): Doing ABORT_ALL_TASKS_SESS\n",
- sess->vha->vp_idx);
- tmr_func = 0;
- break;
-
- case QLA_TGT_NEXUS_LOSS_SESS:
- ql_dbg(ql_dbg_tgt_tmr, vha, 0x10007,
- "qla_target(%d): Doing NEXUS_LOSS_SESS\n",
- sess->vha->vp_idx);
- tmr_func = 0;
- break;
-
- case QLA_TGT_NEXUS_LOSS:
- ql_dbg(ql_dbg_tgt_tmr, vha, 0x10008,
- "qla_target(%d): Doing NEXUS_LOSS\n", sess->vha->vp_idx);
- tmr_func = 0;
- break;
-#endif
- default:
- ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000a,
- "qla_target(%d): Unknown task mgmt fn 0x%x\n",
- sess->vha->vp_idx, fn);
- mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
- return -ENOSYS;
+ abort_cmds_for_lun(vha, lun, a->u.isp24.fcp_hdr.s_id);
+ break;
}
- res = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, tmr_func, 0);
+ res = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, mcmd->tmr_func, 0);
if (res != 0) {
ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000b,
"qla_target(%d): tgt.tgt_ops->handle_tmr() failed: %d\n",
@@ -4158,7 +4213,7 @@ static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb)
struct atio_from_isp *a = (struct atio_from_isp *)iocb;
struct qla_hw_data *ha = vha->hw;
struct qla_tgt *tgt;
- struct qla_tgt_sess *sess;
+ struct fc_port *sess;
uint32_t lun, unpacked_lun;
int fn;
unsigned long flags;
@@ -4183,7 +4238,7 @@ static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb)
sizeof(struct atio_from_isp));
}
- if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS)
+ if (sess->deleted)
return -EFAULT;
return qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
@@ -4191,7 +4246,7 @@ static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb)
/* ha->hardware_lock supposed to be held on entry */
static int __qlt_abort_task(struct scsi_qla_host *vha,
- struct imm_ntfy_from_isp *iocb, struct qla_tgt_sess *sess)
+ struct imm_ntfy_from_isp *iocb, struct fc_port *sess)
{
struct atio_from_isp *a = (struct atio_from_isp *)iocb;
struct qla_hw_data *ha = vha->hw;
@@ -4215,8 +4270,9 @@ static int __qlt_abort_task(struct scsi_qla_host *vha,
lun = a->u.isp24.fcp_cmnd.lun;
unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
mcmd->reset_count = vha->hw->chip_reset;
+ mcmd->tmr_func = QLA_TGT_2G_ABORT_TASK;
- rc = ha->tgt.tgt_ops->handle_tmr(mcmd, unpacked_lun, TMR_ABORT_TASK,
+ rc = ha->tgt.tgt_ops->handle_tmr(mcmd, unpacked_lun, mcmd->tmr_func,
le16_to_cpu(iocb->u.isp2x.seq_id));
if (rc != 0) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf060,
@@ -4234,7 +4290,7 @@ static int qlt_abort_task(struct scsi_qla_host *vha,
struct imm_ntfy_from_isp *iocb)
{
struct qla_hw_data *ha = vha->hw;
- struct qla_tgt_sess *sess;
+ struct fc_port *sess;
int loop_id;
unsigned long flags;
@@ -4257,22 +4313,20 @@ static int qlt_abort_task(struct scsi_qla_host *vha,
void qlt_logo_completion_handler(fc_port_t *fcport, int rc)
{
- if (fcport->tgt_session) {
- if (rc != MBS_COMMAND_COMPLETE) {
- ql_dbg(ql_dbg_tgt_mgt, fcport->vha, 0xf093,
- "%s: se_sess %p / sess %p from"
- " port %8phC loop_id %#04x s_id %02x:%02x:%02x"
- " LOGO failed: %#x\n",
- __func__,
- fcport->tgt_session->se_sess,
- fcport->tgt_session,
- fcport->port_name, fcport->loop_id,
- fcport->d_id.b.domain, fcport->d_id.b.area,
- fcport->d_id.b.al_pa, rc);
- }
-
- fcport->tgt_session->logout_completed = 1;
+ if (rc != MBS_COMMAND_COMPLETE) {
+ ql_dbg(ql_dbg_tgt_mgt, fcport->vha, 0xf093,
+ "%s: se_sess %p / sess %p from"
+ " port %8phC loop_id %#04x s_id %02x:%02x:%02x"
+ " LOGO failed: %#x\n",
+ __func__,
+ fcport->se_sess,
+ fcport,
+ fcport->port_name, fcport->loop_id,
+ fcport->d_id.b.domain, fcport->d_id.b.area,
+ fcport->d_id.b.al_pa, rc);
}
+
+ fcport->logout_completed = 1;
}
/*
@@ -4282,16 +4336,16 @@ void qlt_logo_completion_handler(fc_port_t *fcport, int rc)
* deletion. Returns existing session with matching wwn if present.
* Null otherwise.
*/
-static struct qla_tgt_sess *
-qlt_find_sess_invalidate_other(struct qla_tgt *tgt, uint64_t wwn,
- port_id_t port_id, uint16_t loop_id, struct qla_tgt_sess **conflict_sess)
+struct fc_port *
+qlt_find_sess_invalidate_other(scsi_qla_host_t *vha, uint64_t wwn,
+ port_id_t port_id, uint16_t loop_id, struct fc_port **conflict_sess)
{
- struct qla_tgt_sess *sess = NULL, *other_sess;
+ struct fc_port *sess = NULL, *other_sess;
uint64_t other_wwn;
*conflict_sess = NULL;
- list_for_each_entry(other_sess, &tgt->sess_list, sess_list_entry) {
+ list_for_each_entry(other_sess, &vha->vp_fcports, list) {
other_wwn = wwn_to_u64(other_sess->port_name);
@@ -4302,9 +4356,9 @@ qlt_find_sess_invalidate_other(struct qla_tgt *tgt, uint64_t wwn,
}
/* find other sess with nport_id collision */
- if (port_id.b24 == other_sess->s_id.b24) {
+ if (port_id.b24 == other_sess->d_id.b24) {
if (loop_id != other_sess->loop_id) {
- ql_dbg(ql_dbg_tgt_tmr, tgt->vha, 0x1000c,
+ ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000c,
"Invalidating sess %p loop_id %d wwn %llx.\n",
other_sess, other_sess->loop_id, other_wwn);
@@ -4320,6 +4374,11 @@ qlt_find_sess_invalidate_other(struct qla_tgt *tgt, uint64_t wwn,
* Another wwn used to have our s_id/loop_id
* kill the session, but don't free the loop_id
*/
+ ql_dbg(ql_dbg_tgt_tmr, vha, 0xffff,
+ "Invalidating sess %p loop_id %d wwn %llx.\n",
+ other_sess, other_sess->loop_id, other_wwn);
+
+
other_sess->keep_nport_handle = 1;
*conflict_sess = other_sess;
qlt_schedule_sess_for_deletion(other_sess,
@@ -4329,8 +4388,9 @@ qlt_find_sess_invalidate_other(struct qla_tgt *tgt, uint64_t wwn,
}
/* find other sess with nport handle collision */
- if (loop_id == other_sess->loop_id) {
- ql_dbg(ql_dbg_tgt_tmr, tgt->vha, 0x1000d,
+ if ((loop_id == other_sess->loop_id) &&
+ (loop_id != FC_NO_LOOP_ID)) {
+ ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000d,
"Invalidating sess %p loop_id %d wwn %llx.\n",
other_sess, other_sess->loop_id, other_wwn);
@@ -4358,11 +4418,21 @@ static int abort_cmds_for_s_id(struct scsi_qla_host *vha, port_id_t *s_id)
spin_lock(&vha->cmd_list_lock);
list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
uint32_t op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
+
if (op_key == key) {
op->aborted = true;
count++;
}
}
+
+ list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) {
+ uint32_t op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
+ if (op_key == key) {
+ op->aborted = true;
+ count++;
+ }
+ }
+
list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
uint32_t cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id);
if (cmd_key == key) {
@@ -4383,13 +4453,13 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
{
struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
struct qla_hw_data *ha = vha->hw;
- struct qla_tgt_sess *sess = NULL, *conflict_sess = NULL;
+ struct fc_port *sess = NULL, *conflict_sess = NULL;
uint64_t wwn;
port_id_t port_id;
uint16_t loop_id;
uint16_t wd3_lo;
int res = 0;
- qlt_plogi_ack_t *pla;
+ struct qlt_plogi_ack_t *pla;
unsigned long flags;
wwn = wwn_to_u64(iocb->u.isp24.port_name);
@@ -4401,9 +4471,12 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
loop_id = le16_to_cpu(iocb->u.isp24.nport_handle);
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf026,
- "qla_target(%d): Port ID: 0x%3phC ELS opcode: 0x%02x\n",
- vha->vp_idx, iocb->u.isp24.port_id, iocb->u.isp24.status_subcode);
+ ql_dbg(ql_dbg_disc, vha, 0xf026,
+ "qla_target(%d): Port ID: %02x:%02x:%02x ELS opcode: 0x%02x lid %d %8phC\n",
+ vha->vp_idx, iocb->u.isp24.port_id[2],
+ iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[0],
+ iocb->u.isp24.status_subcode, loop_id,
+ iocb->u.isp24.port_name);
/* res = 1 means ack at the end of thread
* res = 0 means ack async/later.
@@ -4416,12 +4489,12 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
if (wwn) {
spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags);
- sess = qlt_find_sess_invalidate_other(tgt, wwn,
- port_id, loop_id, &conflict_sess);
+ sess = qlt_find_sess_invalidate_other(vha, wwn,
+ port_id, loop_id, &conflict_sess);
spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags);
}
- if (IS_SW_RESV_ADDR(port_id) || (!sess && !conflict_sess)) {
+ if (IS_SW_RESV_ADDR(port_id)) {
res = 1;
break;
}
@@ -4429,42 +4502,66 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
pla = qlt_plogi_ack_find_add(vha, &port_id, iocb);
if (!pla) {
qlt_send_term_imm_notif(vha, iocb, 1);
-
- res = 0;
break;
}
res = 0;
- if (conflict_sess)
+ if (conflict_sess) {
+ conflict_sess->login_gen++;
qlt_plogi_ack_link(vha, pla, conflict_sess,
- QLT_PLOGI_LINK_CONFLICT);
+ QLT_PLOGI_LINK_CONFLICT);
+ }
- if (!sess)
+ if (!sess) {
+ pla->ref_count++;
+ qla24xx_post_newsess_work(vha, &port_id,
+ iocb->u.isp24.port_name, pla);
+ res = 0;
break;
+ }
qlt_plogi_ack_link(vha, pla, sess, QLT_PLOGI_LINK_SAME_WWN);
- /*
- * Under normal circumstances we want to release nport handle
- * during LOGO process to avoid nport handle leaks inside FW.
- * The exception is when LOGO is done while another PLOGI with
- * the same nport handle is waiting as might be the case here.
- * Note: there is always a possibily of a race where session
- * deletion has already started for other reasons (e.g. ACL
- * removal) and now PLOGI arrives:
- * 1. if PLOGI arrived in FW after nport handle has been freed,
- * FW must have assigned this PLOGI a new/same handle and we
- * can proceed ACK'ing it as usual when session deletion
- * completes.
- * 2. if PLOGI arrived in FW before LOGO with LCF_FREE_NPORT
- * bit reached it, the handle has now been released. We'll
- * get an error when we ACK this PLOGI. Nothing will be sent
- * back to initiator. Initiator should eventually retry
- * PLOGI and situation will correct itself.
- */
- sess->keep_nport_handle = ((sess->loop_id == loop_id) &&
- (sess->s_id.b24 == port_id.b24));
- qlt_schedule_sess_for_deletion(sess, true);
+ sess->fw_login_state = DSC_LS_PLOGI_PEND;
+ sess->d_id = port_id;
+ sess->login_gen++;
+
+ switch (sess->disc_state) {
+ case DSC_DELETED:
+ qlt_plogi_ack_unref(vha, pla);
+ break;
+
+ default:
+ /*
+ * Under normal circumstances we want to release nport handle
+ * during LOGO process to avoid nport handle leaks inside FW.
+ * The exception is when LOGO is done while another PLOGI with
+ * the same nport handle is waiting as might be the case here.
+ * Note: there is always a possibily of a race where session
+ * deletion has already started for other reasons (e.g. ACL
+ * removal) and now PLOGI arrives:
+ * 1. if PLOGI arrived in FW after nport handle has been freed,
+ * FW must have assigned this PLOGI a new/same handle and we
+ * can proceed ACK'ing it as usual when session deletion
+ * completes.
+ * 2. if PLOGI arrived in FW before LOGO with LCF_FREE_NPORT
+ * bit reached it, the handle has now been released. We'll
+ * get an error when we ACK this PLOGI. Nothing will be sent
+ * back to initiator. Initiator should eventually retry
+ * PLOGI and situation will correct itself.
+ */
+ sess->keep_nport_handle = ((sess->loop_id == loop_id) &&
+ (sess->d_id.b24 == port_id.b24));
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC post del sess\n",
+ __func__, __LINE__, sess->port_name);
+
+
+ qlt_schedule_sess_for_deletion_lock(sess);
+ break;
+ }
+
break;
case ELS_PRLI:
@@ -4472,8 +4569,8 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
if (wwn) {
spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags);
- sess = qlt_find_sess_invalidate_other(tgt, wwn, port_id,
- loop_id, &conflict_sess);
+ sess = qlt_find_sess_invalidate_other(vha, wwn, port_id,
+ loop_id, &conflict_sess);
spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags);
}
@@ -4487,7 +4584,7 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
}
if (sess != NULL) {
- if (sess->deleted) {
+ if (sess->fw_login_state == DSC_LS_PLOGI_PEND) {
/*
* Impatient initiator sent PRLI before last
* PLOGI could finish. Will force him to re-try,
@@ -4511,11 +4608,16 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
sess->local = 0;
sess->loop_id = loop_id;
- sess->s_id = port_id;
+ sess->d_id = port_id;
+ sess->fw_login_state = DSC_LS_PRLI_PEND;
if (wd3_lo & BIT_7)
sess->conf_compl_supported = 1;
+ if ((wd3_lo & BIT_4) == 0)
+ sess->port_type = FCT_INITIATOR;
+ else
+ sess->port_type = FCT_TARGET;
}
res = 1; /* send notify ack */
@@ -4525,15 +4627,61 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
qla2xxx_wake_dpc(vha);
} else {
- /* todo: else - create sess here. */
- res = 1; /* send notify ack */
- }
+ if (sess) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC post nack\n",
+ __func__, __LINE__, sess->port_name);
+ qla24xx_post_nack_work(vha, sess, iocb,
+ SRB_NACK_PRLI);
+ res = 0;
+ }
+ }
break;
+
+ case ELS_TPRLO:
+ if (le16_to_cpu(iocb->u.isp24.flags) &
+ NOTIFY24XX_FLAGS_GLOBAL_TPRLO) {
+ loop_id = 0xFFFF;
+ qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS);
+ res = 1;
+ break;
+ }
+ /* drop through */
case ELS_LOGO:
case ELS_PRLO:
+ spin_lock_irqsave(&ha->tgt.sess_lock, flags);
+ sess = qla2x00_find_fcport_by_loopid(vha, loop_id);
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+
+ if (sess) {
+ sess->login_gen++;
+ sess->fw_login_state = DSC_LS_LOGO_PEND;
+ sess->logo_ack_needed = 1;
+ memcpy(sess->iocb, iocb, IOCB_SIZE);
+ }
+
res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
+
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s: logo %llx res %d sess %p ",
+ __func__, wwn, res, sess);
+ if (res == 0) {
+ /*
+ * cmd went upper layer, look for qlt_xmit_tm_rsp()
+ * for LOGO_ACK & sess delete
+ */
+ BUG_ON(!sess);
+ res = 0;
+ } else {
+ /* cmd did not go to upper layer. */
+ if (sess) {
+ qlt_schedule_sess_for_deletion_lock(sess);
+ res = 0;
+ }
+ /* else logo will be ack */
+ }
break;
case ELS_PDISC:
case ELS_ADISC:
@@ -4544,6 +4692,16 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
0, 0, 0, 0, 0, 0);
tgt->link_reinit_iocb_pending = 0;
}
+
+ sess = qla2x00_find_fcport_by_wwpn(vha,
+ iocb->u.isp24.port_name, 1);
+ if (sess) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "sess %p lid %d|%d DS %d LS %d\n",
+ sess, sess->loop_id, loop_id,
+ sess->disc_state, sess->fw_login_state);
+ }
+
res = 1; /* send notify ack */
break;
}
@@ -4560,451 +4718,6 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
return res;
}
-static int qlt_set_data_offset(struct qla_tgt_cmd *cmd, uint32_t offset)
-{
-#if 1
- /*
- * FIXME: Reject non zero SRR relative offset until we can test
- * this code properly.
- */
- pr_debug("Rejecting non zero SRR rel_offs: %u\n", offset);
- return -1;
-#else
- struct scatterlist *sg, *sgp, *sg_srr, *sg_srr_start = NULL;
- size_t first_offset = 0, rem_offset = offset, tmp = 0;
- int i, sg_srr_cnt, bufflen = 0;
-
- ql_dbg(ql_dbg_tgt, cmd->vha, 0xe023,
- "Entering qla_tgt_set_data_offset: cmd: %p, cmd->sg: %p, "
- "cmd->sg_cnt: %u, direction: %d\n",
- cmd, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction);
-
- if (!cmd->sg || !cmd->sg_cnt) {
- ql_dbg(ql_dbg_tgt, cmd->vha, 0xe055,
- "Missing cmd->sg or zero cmd->sg_cnt in"
- " qla_tgt_set_data_offset\n");
- return -EINVAL;
- }
- /*
- * Walk the current cmd->sg list until we locate the new sg_srr_start
- */
- for_each_sg(cmd->sg, sg, cmd->sg_cnt, i) {
- ql_dbg(ql_dbg_tgt, cmd->vha, 0xe024,
- "sg[%d]: %p page: %p, length: %d, offset: %d\n",
- i, sg, sg_page(sg), sg->length, sg->offset);
-
- if ((sg->length + tmp) > offset) {
- first_offset = rem_offset;
- sg_srr_start = sg;
- ql_dbg(ql_dbg_tgt, cmd->vha, 0xe025,
- "Found matching sg[%d], using %p as sg_srr_start, "
- "and using first_offset: %zu\n", i, sg,
- first_offset);
- break;
- }
- tmp += sg->length;
- rem_offset -= sg->length;
- }
-
- if (!sg_srr_start) {
- ql_dbg(ql_dbg_tgt, cmd->vha, 0xe056,
- "Unable to locate sg_srr_start for offset: %u\n", offset);
- return -EINVAL;
- }
- sg_srr_cnt = (cmd->sg_cnt - i);
-
- sg_srr = kzalloc(sizeof(struct scatterlist) * sg_srr_cnt, GFP_KERNEL);
- if (!sg_srr) {
- ql_dbg(ql_dbg_tgt, cmd->vha, 0xe057,
- "Unable to allocate sgp\n");
- return -ENOMEM;
- }
- sg_init_table(sg_srr, sg_srr_cnt);
- sgp = &sg_srr[0];
- /*
- * Walk the remaining list for sg_srr_start, mapping to the newly
- * allocated sg_srr taking first_offset into account.
- */
- for_each_sg(sg_srr_start, sg, sg_srr_cnt, i) {
- if (first_offset) {
- sg_set_page(sgp, sg_page(sg),
- (sg->length - first_offset), first_offset);
- first_offset = 0;
- } else {
- sg_set_page(sgp, sg_page(sg), sg->length, 0);
- }
- bufflen += sgp->length;
-
- sgp = sg_next(sgp);
- if (!sgp)
- break;
- }
-
- cmd->sg = sg_srr;
- cmd->sg_cnt = sg_srr_cnt;
- cmd->bufflen = bufflen;
- cmd->offset += offset;
- cmd->free_sg = 1;
-
- ql_dbg(ql_dbg_tgt, cmd->vha, 0xe026, "New cmd->sg: %p\n", cmd->sg);
- ql_dbg(ql_dbg_tgt, cmd->vha, 0xe027, "New cmd->sg_cnt: %u\n",
- cmd->sg_cnt);
- ql_dbg(ql_dbg_tgt, cmd->vha, 0xe028, "New cmd->bufflen: %u\n",
- cmd->bufflen);
- ql_dbg(ql_dbg_tgt, cmd->vha, 0xe029, "New cmd->offset: %u\n",
- cmd->offset);
-
- if (cmd->sg_cnt < 0)
- BUG();
-
- if (cmd->bufflen < 0)
- BUG();
-
- return 0;
-#endif
-}
-
-static inline int qlt_srr_adjust_data(struct qla_tgt_cmd *cmd,
- uint32_t srr_rel_offs, int *xmit_type)
-{
- int res = 0, rel_offs;
-
- rel_offs = srr_rel_offs - cmd->offset;
- ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf027, "srr_rel_offs=%d, rel_offs=%d",
- srr_rel_offs, rel_offs);
-
- *xmit_type = QLA_TGT_XMIT_ALL;
-
- if (rel_offs < 0) {
- ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf062,
- "qla_target(%d): SRR rel_offs (%d) < 0",
- cmd->vha->vp_idx, rel_offs);
- res = -1;
- } else if (rel_offs == cmd->bufflen)
- *xmit_type = QLA_TGT_XMIT_STATUS;
- else if (rel_offs > 0)
- res = qlt_set_data_offset(cmd, rel_offs);
-
- return res;
-}
-
-/* No locks, thread context */
-static void qlt_handle_srr(struct scsi_qla_host *vha,
- struct qla_tgt_srr_ctio *sctio, struct qla_tgt_srr_imm *imm)
-{
- struct imm_ntfy_from_isp *ntfy =
- (struct imm_ntfy_from_isp *)&imm->imm_ntfy;
- struct qla_hw_data *ha = vha->hw;
- struct qla_tgt_cmd *cmd = sctio->cmd;
- struct se_cmd *se_cmd = &cmd->se_cmd;
- unsigned long flags;
- int xmit_type = 0, resp = 0;
- uint32_t offset;
- uint16_t srr_ui;
-
- offset = le32_to_cpu(ntfy->u.isp24.srr_rel_offs);
- srr_ui = ntfy->u.isp24.srr_ui;
-
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf028, "SRR cmd %p, srr_ui %x\n",
- cmd, srr_ui);
-
- switch (srr_ui) {
- case SRR_IU_STATUS:
- spin_lock_irqsave(&ha->hardware_lock, flags);
- qlt_send_notify_ack(vha, ntfy,
- 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
- xmit_type = QLA_TGT_XMIT_STATUS;
- resp = 1;
- break;
- case SRR_IU_DATA_IN:
- if (!cmd->sg || !cmd->sg_cnt) {
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf063,
- "Unable to process SRR_IU_DATA_IN due to"
- " missing cmd->sg, state: %d\n", cmd->state);
- dump_stack();
- goto out_reject;
- }
- if (se_cmd->scsi_status != 0) {
- ql_dbg(ql_dbg_tgt, vha, 0xe02a,
- "Rejecting SRR_IU_DATA_IN with non GOOD "
- "scsi_status\n");
- goto out_reject;
- }
- cmd->bufflen = se_cmd->data_length;
-
- if (qlt_has_data(cmd)) {
- if (qlt_srr_adjust_data(cmd, offset, &xmit_type) != 0)
- goto out_reject;
- spin_lock_irqsave(&ha->hardware_lock, flags);
- qlt_send_notify_ack(vha, ntfy,
- 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
- resp = 1;
- } else {
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf064,
- "qla_target(%d): SRR for in data for cmd without them (tag %lld, SCSI status %d), reject",
- vha->vp_idx, se_cmd->tag,
- cmd->se_cmd.scsi_status);
- goto out_reject;
- }
- break;
- case SRR_IU_DATA_OUT:
- if (!cmd->sg || !cmd->sg_cnt) {
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf065,
- "Unable to process SRR_IU_DATA_OUT due to"
- " missing cmd->sg\n");
- dump_stack();
- goto out_reject;
- }
- if (se_cmd->scsi_status != 0) {
- ql_dbg(ql_dbg_tgt, vha, 0xe02b,
- "Rejecting SRR_IU_DATA_OUT"
- " with non GOOD scsi_status\n");
- goto out_reject;
- }
- cmd->bufflen = se_cmd->data_length;
-
- if (qlt_has_data(cmd)) {
- if (qlt_srr_adjust_data(cmd, offset, &xmit_type) != 0)
- goto out_reject;
- spin_lock_irqsave(&ha->hardware_lock, flags);
- qlt_send_notify_ack(vha, ntfy,
- 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
- if (xmit_type & QLA_TGT_XMIT_DATA) {
- cmd->cmd_flags |= BIT_8;
- qlt_rdy_to_xfer(cmd);
- }
- } else {
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf066,
- "qla_target(%d): SRR for out data for cmd without them (tag %lld, SCSI status %d), reject",
- vha->vp_idx, se_cmd->tag, cmd->se_cmd.scsi_status);
- goto out_reject;
- }
- break;
- default:
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf067,
- "qla_target(%d): Unknown srr_ui value %x",
- vha->vp_idx, srr_ui);
- goto out_reject;
- }
-
- /* Transmit response in case of status and data-in cases */
- if (resp) {
- cmd->cmd_flags |= BIT_7;
- qlt_xmit_response(cmd, xmit_type, se_cmd->scsi_status);
- }
-
- return;
-
-out_reject:
- spin_lock_irqsave(&ha->hardware_lock, flags);
- qlt_send_notify_ack(vha, ntfy, 0, 0, 0,
- NOTIFY_ACK_SRR_FLAGS_REJECT,
- NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
- NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL);
- if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
- cmd->state = QLA_TGT_STATE_DATA_IN;
- dump_stack();
- } else {
- cmd->cmd_flags |= BIT_9;
- qlt_send_term_exchange(vha, cmd, &cmd->atio, 1, 0);
- }
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
-}
-
-static void qlt_reject_free_srr_imm(struct scsi_qla_host *vha,
- struct qla_tgt_srr_imm *imm, int ha_locked)
-{
- struct qla_hw_data *ha = vha->hw;
- unsigned long flags = 0;
-
-#ifndef __CHECKER__
- if (!ha_locked)
- spin_lock_irqsave(&ha->hardware_lock, flags);
-#endif
-
- qlt_send_notify_ack(vha, (void *)&imm->imm_ntfy, 0, 0, 0,
- NOTIFY_ACK_SRR_FLAGS_REJECT,
- NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
- NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL);
-
-#ifndef __CHECKER__
- if (!ha_locked)
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
-#endif
-
- kfree(imm);
-}
-
-static void qlt_handle_srr_work(struct work_struct *work)
-{
- struct qla_tgt *tgt = container_of(work, struct qla_tgt, srr_work);
- struct scsi_qla_host *vha = tgt->vha;
- struct qla_tgt_srr_ctio *sctio;
- unsigned long flags;
-
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf029, "Entering SRR work (tgt %p)\n",
- tgt);
-
-restart:
- spin_lock_irqsave(&tgt->srr_lock, flags);
- list_for_each_entry(sctio, &tgt->srr_ctio_list, srr_list_entry) {
- struct qla_tgt_srr_imm *imm, *i, *ti;
- struct qla_tgt_cmd *cmd;
- struct se_cmd *se_cmd;
-
- imm = NULL;
- list_for_each_entry_safe(i, ti, &tgt->srr_imm_list,
- srr_list_entry) {
- if (i->srr_id == sctio->srr_id) {
- list_del(&i->srr_list_entry);
- if (imm) {
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf068,
- "qla_target(%d): There must be "
- "only one IMM SRR per CTIO SRR "
- "(IMM SRR %p, id %d, CTIO %p\n",
- vha->vp_idx, i, i->srr_id, sctio);
- qlt_reject_free_srr_imm(tgt->vha, i, 0);
- } else
- imm = i;
- }
- }
-
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02a,
- "IMM SRR %p, CTIO SRR %p (id %d)\n", imm, sctio,
- sctio->srr_id);
-
- if (imm == NULL) {
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02b,
- "Not found matching IMM for SRR CTIO (id %d)\n",
- sctio->srr_id);
- continue;
- } else
- list_del(&sctio->srr_list_entry);
-
- spin_unlock_irqrestore(&tgt->srr_lock, flags);
-
- cmd = sctio->cmd;
- /*
- * Reset qla_tgt_cmd SRR values and SGL pointer+count to follow
- * tcm_qla2xxx_write_pending() and tcm_qla2xxx_queue_data_in()
- * logic..
- */
- cmd->offset = 0;
- if (cmd->free_sg) {
- kfree(cmd->sg);
- cmd->sg = NULL;
- cmd->free_sg = 0;
- }
- se_cmd = &cmd->se_cmd;
-
- cmd->sg_cnt = se_cmd->t_data_nents;
- cmd->sg = se_cmd->t_data_sg;
-
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02c,
- "SRR cmd %p (se_cmd %p, tag %lld, op %x), sg_cnt=%d, offset=%d",
- cmd, &cmd->se_cmd, se_cmd->tag, se_cmd->t_task_cdb ?
- se_cmd->t_task_cdb[0] : 0, cmd->sg_cnt, cmd->offset);
-
- qlt_handle_srr(vha, sctio, imm);
-
- kfree(imm);
- kfree(sctio);
- goto restart;
- }
- spin_unlock_irqrestore(&tgt->srr_lock, flags);
-}
-
-/* ha->hardware_lock supposed to be held on entry */
-static void qlt_prepare_srr_imm(struct scsi_qla_host *vha,
- struct imm_ntfy_from_isp *iocb)
-{
- struct qla_tgt_srr_imm *imm;
- struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
- struct qla_tgt_srr_ctio *sctio;
-
- tgt->imm_srr_id++;
-
- ql_log(ql_log_warn, vha, 0xf02d, "qla_target(%d): SRR received\n",
- vha->vp_idx);
-
- imm = kzalloc(sizeof(*imm), GFP_ATOMIC);
- if (imm != NULL) {
- memcpy(&imm->imm_ntfy, iocb, sizeof(imm->imm_ntfy));
-
- /* IRQ is already OFF */
- spin_lock(&tgt->srr_lock);
- imm->srr_id = tgt->imm_srr_id;
- list_add_tail(&imm->srr_list_entry,
- &tgt->srr_imm_list);
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02e,
- "IMM NTFY SRR %p added (id %d, ui %x)\n",
- imm, imm->srr_id, iocb->u.isp24.srr_ui);
- if (tgt->imm_srr_id == tgt->ctio_srr_id) {
- int found = 0;
- list_for_each_entry(sctio, &tgt->srr_ctio_list,
- srr_list_entry) {
- if (sctio->srr_id == imm->srr_id) {
- found = 1;
- break;
- }
- }
- if (found) {
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02f, "%s",
- "Scheduling srr work\n");
- schedule_work(&tgt->srr_work);
- } else {
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf030,
- "qla_target(%d): imm_srr_id "
- "== ctio_srr_id (%d), but there is no "
- "corresponding SRR CTIO, deleting IMM "
- "SRR %p\n", vha->vp_idx, tgt->ctio_srr_id,
- imm);
- list_del(&imm->srr_list_entry);
-
- kfree(imm);
-
- spin_unlock(&tgt->srr_lock);
- goto out_reject;
- }
- }
- spin_unlock(&tgt->srr_lock);
- } else {
- struct qla_tgt_srr_ctio *ts;
-
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf069,
- "qla_target(%d): Unable to allocate SRR IMM "
- "entry, SRR request will be rejected\n", vha->vp_idx);
-
- /* IRQ is already OFF */
- spin_lock(&tgt->srr_lock);
- list_for_each_entry_safe(sctio, ts, &tgt->srr_ctio_list,
- srr_list_entry) {
- if (sctio->srr_id == tgt->imm_srr_id) {
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf031,
- "CTIO SRR %p deleted (id %d)\n",
- sctio, sctio->srr_id);
- list_del(&sctio->srr_list_entry);
- qlt_send_term_exchange(vha, sctio->cmd,
- &sctio->cmd->atio, 1, 0);
- kfree(sctio);
- }
- }
- spin_unlock(&tgt->srr_lock);
- goto out_reject;
- }
-
- return;
-
-out_reject:
- qlt_send_notify_ack(vha, iocb, 0, 0, 0,
- NOTIFY_ACK_SRR_FLAGS_REJECT,
- NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
- NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL);
-}
-
/*
* ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
*/
@@ -5126,12 +4839,6 @@ static void qlt_handle_imm_notify(struct scsi_qla_host *vha,
if (qlt_24xx_handle_els(vha, iocb) == 0)
send_notify_ack = 0;
break;
-
- case IMM_NTFY_SRR:
- qlt_prepare_srr_imm(vha, iocb);
- send_notify_ack = 0;
- break;
-
default:
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06d,
"qla_target(%d): Received unknown immediate "
@@ -5153,7 +4860,7 @@ static int __qlt_send_busy(struct scsi_qla_host *vha,
struct ctio7_to_24xx *ctio24;
struct qla_hw_data *ha = vha->hw;
request_t *pkt;
- struct qla_tgt_sess *sess = NULL;
+ struct fc_port *sess = NULL;
unsigned long flags;
spin_lock_irqsave(&ha->tgt.sess_lock, flags);
@@ -5214,7 +4921,7 @@ qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
{
struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
struct qla_hw_data *ha = vha->hw;
- struct qla_tgt_sess *sess;
+ struct fc_port *sess;
struct se_session *se_sess;
struct qla_tgt_cmd *cmd;
int tag;
@@ -5756,6 +5463,32 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
break;
+ case MBA_REJECTED_FCP_CMD:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff,
+ "qla_target(%d): Async event LS_REJECT occurred "
+ "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx,
+ le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
+ le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
+
+ if (le16_to_cpu(mailbox[3]) == 1) {
+ /* exchange starvation. */
+ vha->hw->exch_starvation++;
+ if (vha->hw->exch_starvation > 5) {
+ ql_log(ql_log_warn, vha, 0xffff,
+ "Exchange starvation-. Resetting RISC\n");
+
+ vha->hw->exch_starvation = 0;
+ if (IS_P3P_TYPE(vha->hw))
+ set_bit(FCOE_CTX_RESET_NEEDED,
+ &vha->dpc_flags);
+ else
+ set_bit(ISP_ABORT_NEEDED,
+ &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ }
+ }
+ break;
+
case MBA_PORT_UPDATE:
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03d,
"qla_target(%d): Port update async event %#x "
@@ -5765,14 +5498,14 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
login_code = le16_to_cpu(mailbox[2]);
- if (login_code == 0x4)
+ if (login_code == 0x4) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03e,
"Async MB 2: Got PLOGI Complete\n");
- else if (login_code == 0x7)
+ vha->hw->exch_starvation = 0;
+ } else if (login_code == 0x7)
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03f,
"Async MB 2: Port Logged Out\n");
break;
-
default:
break;
}
@@ -5783,8 +5516,10 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
uint16_t loop_id)
{
- fc_port_t *fcport;
+ fc_port_t *fcport, *tfcp, *del;
int rc;
+ unsigned long flags;
+ u8 newfcport = 0;
fcport = kzalloc(sizeof(*fcport), GFP_KERNEL);
if (!fcport) {
@@ -5806,18 +5541,82 @@ static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
return NULL;
}
+ del = NULL;
+ spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+ tfcp = qla2x00_find_fcport_by_wwpn(vha, fcport->port_name, 1);
+
+ if (tfcp) {
+ tfcp->d_id = fcport->d_id;
+ tfcp->port_type = fcport->port_type;
+ tfcp->supported_classes = fcport->supported_classes;
+ tfcp->flags |= fcport->flags;
+
+ del = fcport;
+ fcport = tfcp;
+ } else {
+ if (vha->hw->current_topology == ISP_CFG_F)
+ fcport->flags |= FCF_FABRIC_DEVICE;
+
+ list_add_tail(&fcport->list, &vha->vp_fcports);
+ if (!IS_SW_RESV_ADDR(fcport->d_id))
+ vha->fcport_count++;
+ fcport->login_gen++;
+ fcport->disc_state = DSC_LOGIN_COMPLETE;
+ fcport->login_succ = 1;
+ newfcport = 1;
+ }
+
+ fcport->deleted = 0;
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+
+ switch (vha->host->active_mode) {
+ case MODE_INITIATOR:
+ case MODE_DUAL:
+ if (newfcport) {
+ if (!IS_IIDMA_CAPABLE(vha->hw) || !vha->hw->flags.gpsc_supported) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC post upd_fcport fcp_cnt %d\n",
+ __func__, __LINE__, fcport->port_name, vha->fcport_count);
+ qla24xx_post_upd_fcport_work(vha, fcport);
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC post gpsc fcp_cnt %d\n",
+ __func__, __LINE__, fcport->port_name, vha->fcport_count);
+ qla24xx_post_gpsc_work(vha, fcport);
+ }
+ }
+ break;
+
+ case MODE_TARGET:
+ default:
+ break;
+ }
+ if (del)
+ qla2x00_free_fcport(del);
+
return fcport;
}
/* Must be called under tgt_mutex */
-static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *vha,
+static struct fc_port *qlt_make_local_sess(struct scsi_qla_host *vha,
uint8_t *s_id)
{
- struct qla_tgt_sess *sess = NULL;
+ struct fc_port *sess = NULL;
fc_port_t *fcport = NULL;
int rc, global_resets;
uint16_t loop_id = 0;
+ if ((s_id[0] == 0xFF) && (s_id[1] == 0xFC)) {
+ /*
+ * This is Domain Controller, so it should be
+ * OK to drop SCSI commands from it.
+ */
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf042,
+ "Unable to find initiator with S_ID %x:%x:%x",
+ s_id[0], s_id[1], s_id[2]);
+ return NULL;
+ }
+
mutex_lock(&vha->vha_tgt.tgt_mutex);
retry:
@@ -5828,21 +5627,11 @@ retry:
if (rc != 0) {
mutex_unlock(&vha->vha_tgt.tgt_mutex);
- if ((s_id[0] == 0xFF) &&
- (s_id[1] == 0xFC)) {
- /*
- * This is Domain Controller, so it should be
- * OK to drop SCSI commands from it.
- */
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf042,
- "Unable to find initiator with S_ID %x:%x:%x",
- s_id[0], s_id[1], s_id[2]);
- } else
- ql_log(ql_log_info, vha, 0xf071,
- "qla_target(%d): Unable to find "
- "initiator with S_ID %x:%x:%x",
- vha->vp_idx, s_id[0], s_id[1],
- s_id[2]);
+ ql_log(ql_log_info, vha, 0xf071,
+ "qla_target(%d): Unable to find "
+ "initiator with S_ID %x:%x:%x",
+ vha->vp_idx, s_id[0], s_id[1],
+ s_id[2]);
if (rc == -ENOENT) {
qlt_port_logo_t logo;
@@ -5875,7 +5664,6 @@ retry:
mutex_unlock(&vha->vha_tgt.tgt_mutex);
- kfree(fcport);
return sess;
}
@@ -5884,7 +5672,7 @@ static void qlt_abort_work(struct qla_tgt *tgt,
{
struct scsi_qla_host *vha = tgt->vha;
struct qla_hw_data *ha = vha->hw;
- struct qla_tgt_sess *sess = NULL;
+ struct fc_port *sess = NULL;
unsigned long flags = 0, flags2 = 0;
uint32_t be_s_id;
uint8_t s_id[3];
@@ -5911,12 +5699,18 @@ static void qlt_abort_work(struct qla_tgt *tgt,
if (!sess)
goto out_term2;
} else {
- if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
+ if (sess->deleted) {
sess = NULL;
goto out_term2;
}
- kref_get(&sess->sess_kref);
+ if (!kref_get_unless_zero(&sess->sess_kref)) {
+ ql_dbg(ql_dbg_tgt_tmr, vha, 0xffff,
+ "%s: kref_get fail %8phC \n",
+ __func__, sess->port_name);
+ sess = NULL;
+ goto out_term2;
+ }
}
spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -5928,8 +5722,8 @@ static void qlt_abort_work(struct qla_tgt *tgt,
if (rc != 0)
goto out_term;
spin_unlock_irqrestore(&ha->hardware_lock, flags);
-
- qlt_put_sess(sess);
+ if (sess)
+ ha->tgt.tgt_ops->put_sess(sess);
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
return;
@@ -5940,7 +5734,8 @@ out_term:
qlt_24xx_send_abts_resp(vha, &prm->abts, FCP_TMF_REJECTED, false);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
- qlt_put_sess(sess);
+ if (sess)
+ ha->tgt.tgt_ops->put_sess(sess);
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
}
@@ -5950,7 +5745,7 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
struct atio_from_isp *a = &prm->tm_iocb2;
struct scsi_qla_host *vha = tgt->vha;
struct qla_hw_data *ha = vha->hw;
- struct qla_tgt_sess *sess = NULL;
+ struct fc_port *sess = NULL;
unsigned long flags;
uint8_t *s_id = NULL; /* to hide compiler warnings */
int rc;
@@ -5975,12 +5770,18 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
if (!sess)
goto out_term;
} else {
- if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
+ if (sess->deleted) {
sess = NULL;
goto out_term;
}
- kref_get(&sess->sess_kref);
+ if (!kref_get_unless_zero(&sess->sess_kref)) {
+ ql_dbg(ql_dbg_tgt_tmr, vha, 0xffff,
+ "%s: kref_get fail %8phC\n",
+ __func__, sess->port_name);
+ sess = NULL;
+ goto out_term;
+ }
}
iocb = a;
@@ -5992,13 +5793,13 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
if (rc != 0)
goto out_term;
- qlt_put_sess(sess);
+ ha->tgt.tgt_ops->put_sess(sess);
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
return;
out_term:
qlt_send_term_exchange(vha, NULL, &prm->tm_iocb2, 1, 0);
- qlt_put_sess(sess);
+ ha->tgt.tgt_ops->put_sess(sess);
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
}
@@ -6075,17 +5876,10 @@ int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
tgt->ha = ha;
tgt->vha = base_vha;
init_waitqueue_head(&tgt->waitQ);
- INIT_LIST_HEAD(&tgt->sess_list);
INIT_LIST_HEAD(&tgt->del_sess_list);
- INIT_DELAYED_WORK(&tgt->sess_del_work,
- (void (*)(struct work_struct *))qlt_del_sess_work_fn);
spin_lock_init(&tgt->sess_work_lock);
INIT_WORK(&tgt->sess_work, qlt_sess_work_fn);
INIT_LIST_HEAD(&tgt->sess_works_list);
- spin_lock_init(&tgt->srr_lock);
- INIT_LIST_HEAD(&tgt->srr_ctio_list);
- INIT_LIST_HEAD(&tgt->srr_imm_list);
- INIT_WORK(&tgt->srr_work, qlt_handle_srr_work);
atomic_set(&tgt->tgt_global_resets_count, 0);
base_vha->vha_tgt.qla_tgt = tgt;
@@ -6251,29 +6045,25 @@ EXPORT_SYMBOL(qlt_lport_deregister);
/* Must be called under HW lock */
static void qlt_set_mode(struct scsi_qla_host *vha)
{
- struct qla_hw_data *ha = vha->hw;
-
switch (ql2x_ini_mode) {
case QLA2XXX_INI_MODE_DISABLED:
case QLA2XXX_INI_MODE_EXCLUSIVE:
vha->host->active_mode = MODE_TARGET;
break;
case QLA2XXX_INI_MODE_ENABLED:
- vha->host->active_mode |= MODE_TARGET;
+ vha->host->active_mode = MODE_UNKNOWN;
+ break;
+ case QLA2XXX_INI_MODE_DUAL:
+ vha->host->active_mode = MODE_DUAL;
break;
default:
break;
}
-
- if (ha->tgt.ini_mode_force_reverse)
- qla_reverse_ini_mode(vha);
}
/* Must be called under HW lock */
static void qlt_clear_mode(struct scsi_qla_host *vha)
{
- struct qla_hw_data *ha = vha->hw;
-
switch (ql2x_ini_mode) {
case QLA2XXX_INI_MODE_DISABLED:
vha->host->active_mode = MODE_UNKNOWN;
@@ -6282,14 +6072,12 @@ static void qlt_clear_mode(struct scsi_qla_host *vha)
vha->host->active_mode = MODE_INITIATOR;
break;
case QLA2XXX_INI_MODE_ENABLED:
- vha->host->active_mode &= ~MODE_TARGET;
+ case QLA2XXX_INI_MODE_DUAL:
+ vha->host->active_mode = MODE_INITIATOR;
break;
default:
break;
}
-
- if (ha->tgt.ini_mode_force_reverse)
- qla_reverse_ini_mode(vha);
}
/*
@@ -6377,9 +6165,6 @@ static void qlt_disable_vha(struct scsi_qla_host *vha)
void
qlt_vport_create(struct scsi_qla_host *vha, struct qla_hw_data *ha)
{
- if (!qla_tgt_mode_enabled(vha))
- return;
-
vha->vha_tgt.qla_tgt = NULL;
mutex_init(&vha->vha_tgt.tgt_mutex);
@@ -6405,13 +6190,11 @@ qlt_rff_id(struct scsi_qla_host *vha, struct ct_sns_req *ct_req)
* FC-4 Feature bit 0 indicates target functionality to the name server.
*/
if (qla_tgt_mode_enabled(vha)) {
- if (qla_ini_mode_enabled(vha))
- ct_req->req.rff_id.fc4_feature = BIT_0 | BIT_1;
- else
- ct_req->req.rff_id.fc4_feature = BIT_0;
+ ct_req->req.rff_id.fc4_feature = BIT_0;
} else if (qla_ini_mode_enabled(vha)) {
ct_req->req.rff_id.fc4_feature = BIT_1;
- }
+ } else if (qla_dual_mode_enabled(vha))
+ ct_req->req.rff_id.fc4_feature = BIT_0 | BIT_1;
}
/*
@@ -6430,7 +6213,7 @@ qlt_init_atio_q_entries(struct scsi_qla_host *vha)
uint16_t cnt;
struct atio_from_isp *pkt = (struct atio_from_isp *)ha->tgt.atio_ring;
- if (!qla_tgt_mode_enabled(vha))
+ if (qla_ini_mode_enabled(vha))
return;
for (cnt = 0; cnt < ha->tgt.atio_q_length; cnt++) {
@@ -6523,8 +6306,10 @@ void
qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
{
struct qla_hw_data *ha = vha->hw;
+ u32 tmp;
+ u16 t;
- if (qla_tgt_mode_enabled(vha)) {
+ if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) {
if (!ha->tgt.saved_set) {
/* We save only once */
ha->tgt.saved_exchange_count = nv->exchange_count;
@@ -6537,13 +6322,30 @@ qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
ha->tgt.saved_set = 1;
}
- nv->exchange_count = cpu_to_le16(0xFFFF);
+ if (qla_tgt_mode_enabled(vha)) {
+ nv->exchange_count = cpu_to_le16(0xFFFF);
+ } else { /* dual */
+ if (ql_dm_tgt_ex_pct > 100) {
+ ql_dm_tgt_ex_pct = 50;
+ } else if (ql_dm_tgt_ex_pct == 100) {
+ /* leave some for FW */
+ ql_dm_tgt_ex_pct = 95;
+ }
+
+ tmp = ha->orig_fw_xcb_count * ql_dm_tgt_ex_pct;
+ tmp = tmp/100;
+ if (tmp > 0xffff)
+ tmp = 0xffff;
+
+ t = tmp & 0xffff;
+ nv->exchange_count = cpu_to_le16(t);
+ }
/* Enable target mode */
nv->firmware_options_1 |= cpu_to_le32(BIT_4);
/* Disable ini mode, if requested */
- if (!qla_ini_mode_enabled(vha))
+ if (qla_tgt_mode_enabled(vha))
nv->firmware_options_1 |= cpu_to_le32(BIT_5);
/* Disable Full Login after LIP */
@@ -6622,11 +6424,13 @@ void
qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
{
struct qla_hw_data *ha = vha->hw;
+ u32 tmp;
+ u16 t;
if (!QLA_TGT_MODE_ENABLED())
return;
- if (qla_tgt_mode_enabled(vha)) {
+ if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) {
if (!ha->tgt.saved_set) {
/* We save only once */
ha->tgt.saved_exchange_count = nv->exchange_count;
@@ -6639,13 +6443,29 @@ qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
ha->tgt.saved_set = 1;
}
- nv->exchange_count = cpu_to_le16(0xFFFF);
+ if (qla_tgt_mode_enabled(vha)) {
+ nv->exchange_count = cpu_to_le16(0xFFFF);
+ } else { /* dual */
+ if (ql_dm_tgt_ex_pct > 100) {
+ ql_dm_tgt_ex_pct = 50;
+ } else if (ql_dm_tgt_ex_pct == 100) {
+ /* leave some for FW */
+ ql_dm_tgt_ex_pct = 95;
+ }
+
+ tmp = ha->orig_fw_xcb_count * ql_dm_tgt_ex_pct;
+ tmp = tmp/100;
+ if (tmp > 0xffff)
+ tmp = 0xffff;
+ t = tmp & 0xffff;
+ nv->exchange_count = cpu_to_le16(t);
+ }
/* Enable target mode */
nv->firmware_options_1 |= cpu_to_le32(BIT_4);
/* Disable ini mode, if requested */
- if (!qla_ini_mode_enabled(vha))
+ if (qla_tgt_mode_enabled(vha))
nv->firmware_options_1 |= cpu_to_le32(BIT_5);
/* Disable Full Login after LIP */
nv->firmware_options_1 &= cpu_to_le32(~BIT_13);
@@ -6749,10 +6569,12 @@ void
qlt_modify_vp_config(struct scsi_qla_host *vha,
struct vp_config_entry_24xx *vpmod)
{
- if (qla_tgt_mode_enabled(vha))
+ /* enable target mode. Bit5 = 1 => disable */
+ if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha))
vpmod->options_idx1 &= ~BIT_5;
- /* Disable ini mode, if requested */
- if (!qla_ini_mode_enabled(vha))
+
+ /* Disable ini mode, if requested. bit4 = 1 => disable */
+ if (qla_tgt_mode_enabled(vha))
vpmod->options_idx1 &= ~BIT_4;
}
@@ -6772,6 +6594,11 @@ qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
mutex_init(&base_vha->vha_tgt.tgt_mutex);
mutex_init(&base_vha->vha_tgt.tgt_host_action_mutex);
+
+ INIT_LIST_HEAD(&base_vha->unknown_atio_list);
+ INIT_DELAYED_WORK(&base_vha->unknown_atio_work,
+ qlt_unknown_atio_work_fn);
+
qlt_clear_mode(base_vha);
}
@@ -6906,6 +6733,8 @@ static int __init qlt_parse_ini_mode(void)
ql2x_ini_mode = QLA2XXX_INI_MODE_DISABLED;
else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_ENABLED) == 0)
ql2x_ini_mode = QLA2XXX_INI_MODE_ENABLED;
+ else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_DUAL) == 0)
+ ql2x_ini_mode = QLA2XXX_INI_MODE_DUAL;
else
return false;
@@ -6935,9 +6764,8 @@ int __init qlt_init(void)
}
qla_tgt_plogi_cachep = kmem_cache_create("qla_tgt_plogi_cachep",
- sizeof(qlt_plogi_ack_t),
- __alignof__(qlt_plogi_ack_t),
- 0, NULL);
+ sizeof(struct qlt_plogi_ack_t), __alignof__(struct qlt_plogi_ack_t),
+ 0, NULL);
if (!qla_tgt_plogi_cachep) {
ql_log(ql_log_fatal, NULL, 0xe06d,
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index 0824a8164a24..a7f90dcaae37 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -45,10 +45,12 @@
#define QLA2XXX_INI_MODE_STR_EXCLUSIVE "exclusive"
#define QLA2XXX_INI_MODE_STR_DISABLED "disabled"
#define QLA2XXX_INI_MODE_STR_ENABLED "enabled"
+#define QLA2XXX_INI_MODE_STR_DUAL "dual"
#define QLA2XXX_INI_MODE_EXCLUSIVE 0
#define QLA2XXX_INI_MODE_DISABLED 1
#define QLA2XXX_INI_MODE_ENABLED 2
+#define QLA2XXX_INI_MODE_DUAL 3
#define QLA2XXX_COMMAND_COUNT_INIT 250
#define QLA2XXX_IMMED_NOTIFY_COUNT_INIT 250
@@ -118,84 +120,6 @@
? le16_to_cpu((iocb)->u.isp2x.target.extended) \
: (uint16_t)(iocb)->u.isp2x.target.id.standard)
-#ifndef IMMED_NOTIFY_TYPE
-#define IMMED_NOTIFY_TYPE 0x0D /* Immediate notify entry. */
-/*
- * ISP queue - immediate notify entry structure definition.
- * This is sent by the ISP to the Target driver.
- * This IOCB would have report of events sent by the
- * initiator, that needs to be handled by the target
- * driver immediately.
- */
-struct imm_ntfy_from_isp {
- uint8_t entry_type; /* Entry type. */
- uint8_t entry_count; /* Entry count. */
- uint8_t sys_define; /* System defined. */
- uint8_t entry_status; /* Entry Status. */
- union {
- struct {
- uint32_t sys_define_2; /* System defined. */
- target_id_t target;
- uint16_t lun;
- uint8_t target_id;
- uint8_t reserved_1;
- uint16_t status_modifier;
- uint16_t status;
- uint16_t task_flags;
- uint16_t seq_id;
- uint16_t srr_rx_id;
- uint32_t srr_rel_offs;
- uint16_t srr_ui;
-#define SRR_IU_DATA_IN 0x1
-#define SRR_IU_DATA_OUT 0x5
-#define SRR_IU_STATUS 0x7
- uint16_t srr_ox_id;
- uint8_t reserved_2[28];
- } isp2x;
- struct {
- uint32_t reserved;
- uint16_t nport_handle;
- uint16_t reserved_2;
- uint16_t flags;
-#define NOTIFY24XX_FLAGS_GLOBAL_TPRLO BIT_1
-#define NOTIFY24XX_FLAGS_PUREX_IOCB BIT_0
- uint16_t srr_rx_id;
- uint16_t status;
- uint8_t status_subcode;
- uint8_t fw_handle;
- uint32_t exchange_address;
- uint32_t srr_rel_offs;
- uint16_t srr_ui;
- uint16_t srr_ox_id;
- union {
- struct {
- uint8_t node_name[8];
- } plogi; /* PLOGI/ADISC/PDISC */
- struct {
- /* PRLI word 3 bit 0-15 */
- uint16_t wd3_lo;
- uint8_t resv0[6];
- } prli;
- struct {
- uint8_t port_id[3];
- uint8_t resv1;
- uint16_t nport_handle;
- uint16_t resv2;
- } req_els;
- } u;
- uint8_t port_name[8];
- uint8_t resv3[3];
- uint8_t vp_index;
- uint32_t reserved_5;
- uint8_t port_id[3];
- uint8_t reserved_6;
- } isp24;
- } u;
- uint16_t reserved_7;
- uint16_t ox_id;
-} __packed;
-#endif
-
#ifndef NOTIFY_ACK_TYPE
#define NOTIFY_ACK_TYPE 0x0E /* Notify acknowledge entry. */
/*
@@ -731,7 +655,7 @@ struct abts_resp_from_24xx_fw {
\********************************************************************/
struct qla_tgt_mgmt_cmd;
-struct qla_tgt_sess;
+struct fc_port;
/*
* This structure provides a template of function calls that the
@@ -744,21 +668,22 @@ struct qla_tgt_func_tmpl {
unsigned char *, uint32_t, int, int, int);
void (*handle_data)(struct qla_tgt_cmd *);
void (*handle_dif_err)(struct qla_tgt_cmd *);
- int (*handle_tmr)(struct qla_tgt_mgmt_cmd *, uint32_t, uint8_t,
+ int (*handle_tmr)(struct qla_tgt_mgmt_cmd *, uint32_t, uint16_t,
uint32_t);
void (*free_cmd)(struct qla_tgt_cmd *);
void (*free_mcmd)(struct qla_tgt_mgmt_cmd *);
- void (*free_session)(struct qla_tgt_sess *);
+ void (*free_session)(struct fc_port *);
int (*check_initiator_node_acl)(struct scsi_qla_host *, unsigned char *,
- struct qla_tgt_sess *);
- void (*update_sess)(struct qla_tgt_sess *, port_id_t, uint16_t, bool);
- struct qla_tgt_sess *(*find_sess_by_loop_id)(struct scsi_qla_host *,
+ struct fc_port *);
+ void (*update_sess)(struct fc_port *, port_id_t, uint16_t, bool);
+ struct fc_port *(*find_sess_by_loop_id)(struct scsi_qla_host *,
const uint16_t);
- struct qla_tgt_sess *(*find_sess_by_s_id)(struct scsi_qla_host *,
+ struct fc_port *(*find_sess_by_s_id)(struct scsi_qla_host *,
const uint8_t *);
- void (*clear_nacl_from_fcport_map)(struct qla_tgt_sess *);
- void (*shutdown_sess)(struct qla_tgt_sess *);
+ void (*clear_nacl_from_fcport_map)(struct fc_port *);
+ void (*put_sess)(struct fc_port *);
+ void (*shutdown_sess)(struct fc_port *);
};
int qla2x00_wait_for_hba_online(struct scsi_qla_host *);
@@ -795,6 +720,8 @@ int qla2x00_wait_for_hba_online(struct scsi_qla_host *);
#define QLA_TGT_ABORT_ALL 0xFFFE
#define QLA_TGT_NEXUS_LOSS_SESS 0xFFFD
#define QLA_TGT_NEXUS_LOSS 0xFFFC
+#define QLA_TGT_ABTS 0xFFFB
+#define QLA_TGT_2G_ABORT_TASK 0xFFFA
/* Notify Acknowledge flags */
#define NOTIFY_ACK_RES_COUNT BIT_8
@@ -872,12 +799,8 @@ struct qla_tgt {
/* Count of sessions refering qla_tgt. Protected by hardware_lock. */
int sess_count;
- /* Protected by hardware_lock. Addition also protected by tgt_mutex. */
- struct list_head sess_list;
-
/* Protected by hardware_lock */
struct list_head del_sess_list;
- struct delayed_work sess_del_work;
spinlock_t sess_work_lock;
struct list_head sess_works_list;
@@ -888,16 +811,7 @@ struct qla_tgt {
int notify_ack_expected;
int abts_resp_expected;
int modify_lun_expected;
-
- int ctio_srr_id;
- int imm_srr_id;
- spinlock_t srr_lock;
- struct list_head srr_ctio_list;
- struct list_head srr_imm_list;
- struct work_struct srr_work;
-
atomic_t tgt_global_resets_count;
-
struct list_head tgt_list_entry;
};
@@ -910,92 +824,32 @@ struct qla_tgt_sess_op {
bool aborted;
};
-enum qla_sess_deletion {
- QLA_SESS_DELETION_NONE = 0,
- QLA_SESS_DELETION_PENDING = 1, /* hopefully we can get rid of
- * this one */
- QLA_SESS_DELETION_IN_PROGRESS = 2,
-};
-
-typedef enum {
- QLT_PLOGI_LINK_SAME_WWN,
- QLT_PLOGI_LINK_CONFLICT,
- QLT_PLOGI_LINK_MAX
-} qlt_plogi_link_t;
-
-typedef struct {
- struct list_head list;
- struct imm_ntfy_from_isp iocb;
- port_id_t id;
- int ref_count;
-} qlt_plogi_ack_t;
-
-/*
- * Equivilant to IT Nexus (Initiator-Target)
- */
-struct qla_tgt_sess {
- uint16_t loop_id;
- port_id_t s_id;
-
- unsigned int conf_compl_supported:1;
- unsigned int deleted:2;
- unsigned int local:1;
- unsigned int logout_on_delete:1;
- unsigned int keep_nport_handle:1;
- unsigned int send_els_logo:1;
-
- unsigned char logout_completed;
-
- int generation;
-
- struct se_session *se_sess;
- struct kref sess_kref;
- struct scsi_qla_host *vha;
- struct qla_tgt *tgt;
-
- struct list_head sess_list_entry;
- unsigned long expires;
- struct list_head del_list_entry;
-
- uint8_t port_name[WWN_SIZE];
- struct work_struct free_work;
-
- qlt_plogi_ack_t *plogi_link[QLT_PLOGI_LINK_MAX];
+enum trace_flags {
+ TRC_NEW_CMD = BIT_0,
+ TRC_DO_WORK = BIT_1,
+ TRC_DO_WORK_ERR = BIT_2,
+ TRC_XFR_RDY = BIT_3,
+ TRC_XMIT_DATA = BIT_4,
+ TRC_XMIT_STATUS = BIT_5,
+ TRC_SRR_RSP = BIT_6,
+ TRC_SRR_XRDY = BIT_7,
+ TRC_SRR_TERM = BIT_8,
+ TRC_SRR_CTIO = BIT_9,
+ TRC_FLUSH = BIT_10,
+ TRC_CTIO_ERR = BIT_11,
+ TRC_CTIO_DONE = BIT_12,
+ TRC_CTIO_ABORTED = BIT_13,
+ TRC_CTIO_STRANGE= BIT_14,
+ TRC_CMD_DONE = BIT_15,
+ TRC_CMD_CHK_STOP = BIT_16,
+ TRC_CMD_FREE = BIT_17,
+ TRC_DATA_IN = BIT_18,
+ TRC_ABORT = BIT_19,
};
-typedef enum {
- /*
- * BIT_0 - Atio Arrival / schedule to work
- * BIT_1 - qlt_do_work
- * BIT_2 - qlt_do work failed
- * BIT_3 - xfer rdy/tcm_qla2xxx_write_pending
- * BIT_4 - read respond/tcm_qla2xx_queue_data_in
- * BIT_5 - status respond / tcm_qla2xx_queue_status
- * BIT_6 - tcm request to abort/Term exchange.
- * pre_xmit_response->qlt_send_term_exchange
- * BIT_7 - SRR received (qlt_handle_srr->qlt_xmit_response)
- * BIT_8 - SRR received (qlt_handle_srr->qlt_rdy_to_xfer)
- * BIT_9 - SRR received (qla_handle_srr->qlt_send_term_exchange)
- * BIT_10 - Data in - hanlde_data->tcm_qla2xxx_handle_data
-
- * BIT_12 - good completion - qlt_ctio_do_completion -->free_cmd
- * BIT_13 - Bad completion -
- * qlt_ctio_do_completion --> qlt_term_ctio_exchange
- * BIT_14 - Back end data received/sent.
- * BIT_15 - SRR prepare ctio
- * BIT_16 - complete free
- * BIT_17 - flush - qlt_abort_cmd_on_host_reset
- * BIT_18 - completion w/abort status
- * BIT_19 - completion w/unknown status
- * BIT_20 - tcm_qla2xxx_free_cmd
- */
- CMD_FLAG_DATA_WORK = BIT_11,
- CMD_FLAG_DATA_WORK_FREE = BIT_21,
-} cmd_flags_t;
-
struct qla_tgt_cmd {
struct se_cmd se_cmd;
- struct qla_tgt_sess *sess;
+ struct fc_port *sess;
int state;
struct work_struct free_work;
struct work_struct work;
@@ -1014,6 +868,8 @@ struct qla_tgt_cmd {
unsigned int cmd_sent_to_fw:1;
unsigned int cmd_in_wq:1;
unsigned int aborted:1;
+ unsigned int data_work:1;
+ unsigned int data_work_free:1;
struct scatterlist *sg; /* cmd data buffer SG vector */
int sg_cnt; /* SG segments count */
@@ -1038,7 +894,7 @@ struct qla_tgt_cmd {
uint64_t jiffies_at_alloc;
uint64_t jiffies_at_free;
- cmd_flags_t cmd_flags;
+ enum trace_flags trc_flags;
};
struct qla_tgt_sess_work_param {
@@ -1056,9 +912,9 @@ struct qla_tgt_sess_work_param {
};
struct qla_tgt_mgmt_cmd {
- uint8_t tmr_func;
+ uint16_t tmr_func;
uint8_t fc_tm_rsp;
- struct qla_tgt_sess *sess;
+ struct fc_port *sess;
struct se_cmd se_cmd;
struct work_struct free_work;
unsigned int flags;
@@ -1090,18 +946,6 @@ struct qla_tgt_prm {
uint16_t tot_dsds;
};
-struct qla_tgt_srr_imm {
- struct list_head srr_list_entry;
- int srr_id;
- struct imm_ntfy_from_isp imm_ntfy;
-};
-
-struct qla_tgt_srr_ctio {
- struct list_head srr_list_entry;
- int srr_id;
- struct qla_tgt_cmd *cmd;
-};
-
/* Check for Switch reserved address */
#define IS_SW_RESV_ADDR(_s_id) \
((_s_id.b.domain == 0xff) && (_s_id.b.area == 0xfc))
@@ -1121,7 +965,7 @@ extern int qlt_remove_target(struct qla_hw_data *, struct scsi_qla_host *);
extern int qlt_lport_register(void *, u64, u64, u64,
int (*callback)(struct scsi_qla_host *, void *, u64, u64));
extern void qlt_lport_deregister(struct scsi_qla_host *);
-void qlt_put_sess(struct qla_tgt_sess *sess);
+extern void qlt_unreg_sess(struct fc_port *);
extern void qlt_fc_port_added(struct scsi_qla_host *, fc_port_t *);
extern void qlt_fc_port_deleted(struct scsi_qla_host *, fc_port_t *, int);
extern int __init qlt_init(void);
@@ -1133,24 +977,22 @@ extern void qlt_update_vp_map(struct scsi_qla_host *, int);
* is not set. Right now, ha value is ignored.
*/
#define QLA_TGT_MODE_ENABLED() (ql2x_ini_mode != QLA2XXX_INI_MODE_ENABLED)
+
extern int ql2x_ini_mode;
static inline bool qla_tgt_mode_enabled(struct scsi_qla_host *ha)
{
- return ha->host->active_mode & MODE_TARGET;
+ return ha->host->active_mode == MODE_TARGET;
}
static inline bool qla_ini_mode_enabled(struct scsi_qla_host *ha)
{
- return ha->host->active_mode & MODE_INITIATOR;
+ return ha->host->active_mode == MODE_INITIATOR;
}
-static inline void qla_reverse_ini_mode(struct scsi_qla_host *ha)
+static inline bool qla_dual_mode_enabled(struct scsi_qla_host *ha)
{
- if (ha->host->active_mode & MODE_INITIATOR)
- ha->host->active_mode &= ~MODE_INITIATOR;
- else
- ha->host->active_mode |= MODE_INITIATOR;
+ return (ha->host->active_mode == MODE_DUAL);
}
static inline uint32_t sid_to_key(const uint8_t *s_id)
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 3084983c1287..8e8ab0fa9672 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -282,10 +282,10 @@ static void tcm_qla2xxx_complete_free(struct work_struct *work)
cmd->cmd_in_wq = 0;
- WARN_ON(cmd->cmd_flags & BIT_16);
+ WARN_ON(cmd->trc_flags & TRC_CMD_FREE);
cmd->vha->tgt_counters.qla_core_ret_sta_ctio++;
- cmd->cmd_flags |= BIT_16;
+ cmd->trc_flags |= TRC_CMD_FREE;
transport_generic_free_cmd(&cmd->se_cmd, 0);
}
@@ -299,8 +299,8 @@ static void tcm_qla2xxx_free_cmd(struct qla_tgt_cmd *cmd)
cmd->vha->tgt_counters.core_qla_free_cmd++;
cmd->cmd_in_wq = 1;
- BUG_ON(cmd->cmd_flags & BIT_20);
- cmd->cmd_flags |= BIT_20;
+ WARN_ON(cmd->trc_flags & TRC_CMD_DONE);
+ cmd->trc_flags |= TRC_CMD_DONE;
INIT_WORK(&cmd->work, tcm_qla2xxx_complete_free);
queue_work_on(smp_processor_id(), tcm_qla2xxx_free_wq, &cmd->work);
@@ -315,7 +315,7 @@ static int tcm_qla2xxx_check_stop_free(struct se_cmd *se_cmd)
if ((se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) == 0) {
cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd);
- cmd->cmd_flags |= BIT_14;
+ cmd->trc_flags |= TRC_CMD_CHK_STOP;
}
return target_put_sess_cmd(se_cmd);
@@ -339,9 +339,26 @@ static void tcm_qla2xxx_release_cmd(struct se_cmd *se_cmd)
qlt_free_cmd(cmd);
}
+static void tcm_qla2xxx_release_session(struct kref *kref)
+{
+ struct fc_port *sess = container_of(kref,
+ struct fc_port, sess_kref);
+
+ qlt_unreg_sess(sess);
+}
+
+static void tcm_qla2xxx_put_sess(struct fc_port *sess)
+{
+ if (!sess)
+ return;
+
+ assert_spin_locked(&sess->vha->hw->tgt.sess_lock);
+ kref_put(&sess->sess_kref, tcm_qla2xxx_release_session);
+}
+
static void tcm_qla2xxx_close_session(struct se_session *se_sess)
{
- struct qla_tgt_sess *sess = se_sess->fabric_sess_ptr;
+ struct fc_port *sess = se_sess->fabric_sess_ptr;
struct scsi_qla_host *vha;
unsigned long flags;
@@ -350,7 +367,7 @@ static void tcm_qla2xxx_close_session(struct se_session *se_sess)
spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
target_sess_cmd_list_set_waiting(se_sess);
- qlt_put_sess(sess);
+ tcm_qla2xxx_put_sess(sess);
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
}
@@ -377,7 +394,7 @@ static int tcm_qla2xxx_write_pending(struct se_cmd *se_cmd)
cmd->se_cmd.se_cmd_flags);
return 0;
}
- cmd->cmd_flags |= BIT_3;
+ cmd->trc_flags |= TRC_XFR_RDY;
cmd->bufflen = se_cmd->data_length;
cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
@@ -441,7 +458,7 @@ static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd,
{
struct se_cmd *se_cmd = &cmd->se_cmd;
struct se_session *se_sess;
- struct qla_tgt_sess *sess;
+ struct fc_port *sess;
#ifdef CONFIG_TCM_QLA2XXX_DEBUG
struct se_portal_group *se_tpg;
struct tcm_qla2xxx_tpg *tpg;
@@ -456,7 +473,7 @@ static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd,
sess = cmd->sess;
if (!sess) {
- pr_err("Unable to locate struct qla_tgt_sess from qla_tgt_cmd\n");
+ pr_err("Unable to locate struct fc_port from qla_tgt_cmd\n");
return -EINVAL;
}
@@ -493,9 +510,9 @@ static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
cmd->cmd_in_wq = 0;
spin_lock_irqsave(&cmd->cmd_lock, flags);
- cmd->cmd_flags |= CMD_FLAG_DATA_WORK;
+ cmd->data_work = 1;
if (cmd->aborted) {
- cmd->cmd_flags |= CMD_FLAG_DATA_WORK_FREE;
+ cmd->data_work_free = 1;
spin_unlock_irqrestore(&cmd->cmd_lock, flags);
tcm_qla2xxx_free_cmd(cmd);
@@ -532,7 +549,7 @@ static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
*/
static void tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd)
{
- cmd->cmd_flags |= BIT_10;
+ cmd->trc_flags |= TRC_DATA_IN;
cmd->cmd_in_wq = 1;
INIT_WORK(&cmd->work, tcm_qla2xxx_handle_data_work);
queue_work_on(smp_processor_id(), tcm_qla2xxx_free_wq, &cmd->work);
@@ -563,13 +580,49 @@ static void tcm_qla2xxx_handle_dif_err(struct qla_tgt_cmd *cmd)
* Called from qla_target.c:qlt_issue_task_mgmt()
*/
static int tcm_qla2xxx_handle_tmr(struct qla_tgt_mgmt_cmd *mcmd, uint32_t lun,
- uint8_t tmr_func, uint32_t tag)
+ uint16_t tmr_func, uint32_t tag)
{
- struct qla_tgt_sess *sess = mcmd->sess;
+ struct fc_port *sess = mcmd->sess;
struct se_cmd *se_cmd = &mcmd->se_cmd;
+ int transl_tmr_func = 0;
+
+ switch (tmr_func) {
+ case QLA_TGT_ABTS:
+ pr_debug("%ld: ABTS received\n", sess->vha->host_no);
+ transl_tmr_func = TMR_ABORT_TASK;
+ break;
+ case QLA_TGT_2G_ABORT_TASK:
+ pr_debug("%ld: 2G Abort Task received\n", sess->vha->host_no);
+ transl_tmr_func = TMR_ABORT_TASK;
+ break;
+ case QLA_TGT_CLEAR_ACA:
+ pr_debug("%ld: CLEAR_ACA received\n", sess->vha->host_no);
+ transl_tmr_func = TMR_CLEAR_ACA;
+ break;
+ case QLA_TGT_TARGET_RESET:
+ pr_debug("%ld: TARGET_RESET received\n", sess->vha->host_no);
+ transl_tmr_func = TMR_TARGET_WARM_RESET;
+ break;
+ case QLA_TGT_LUN_RESET:
+ pr_debug("%ld: LUN_RESET received\n", sess->vha->host_no);
+ transl_tmr_func = TMR_LUN_RESET;
+ break;
+ case QLA_TGT_CLEAR_TS:
+ pr_debug("%ld: CLEAR_TS received\n", sess->vha->host_no);
+ transl_tmr_func = TMR_CLEAR_TASK_SET;
+ break;
+ case QLA_TGT_ABORT_TS:
+ pr_debug("%ld: ABORT_TS received\n", sess->vha->host_no);
+ transl_tmr_func = TMR_ABORT_TASK_SET;
+ break;
+ default:
+ pr_debug("%ld: Unknown task mgmt fn 0x%x\n",
+ sess->vha->host_no, tmr_func);
+ return -ENOSYS;
+ }
return target_submit_tmr(se_cmd, sess->se_sess, NULL, lun, mcmd,
- tmr_func, GFP_ATOMIC, tag, TARGET_SCF_ACK_KREF);
+ transl_tmr_func, GFP_ATOMIC, tag, TARGET_SCF_ACK_KREF);
}
static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd)
@@ -591,7 +644,7 @@ static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd)
return 0;
}
- cmd->cmd_flags |= BIT_4;
+ cmd->trc_flags |= TRC_XMIT_DATA;
cmd->bufflen = se_cmd->data_length;
cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
@@ -622,11 +675,11 @@ static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd)
cmd->sg_cnt = 0;
cmd->offset = 0;
cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
- if (cmd->cmd_flags & BIT_5) {
- pr_crit("Bit_5 already set for cmd = %p.\n", cmd);
+ if (cmd->trc_flags & TRC_XMIT_STATUS) {
+ pr_crit("Multiple calls for status = %p.\n", cmd);
dump_stack();
}
- cmd->cmd_flags |= BIT_5;
+ cmd->trc_flags |= TRC_XMIT_STATUS;
if (se_cmd->data_direction == DMA_FROM_DEVICE) {
/*
@@ -682,10 +735,7 @@ static void tcm_qla2xxx_queue_tm_rsp(struct se_cmd *se_cmd)
qlt_xmit_tm_rsp(mcmd);
}
-
-#define DATA_WORK_NOT_FREE(_flags) \
- (( _flags & (CMD_FLAG_DATA_WORK|CMD_FLAG_DATA_WORK_FREE)) == \
- CMD_FLAG_DATA_WORK)
+#define DATA_WORK_NOT_FREE(_cmd) (_cmd->data_work && !_cmd->data_work_free)
static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd)
{
struct qla_tgt_cmd *cmd = container_of(se_cmd,
@@ -697,13 +747,13 @@ static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd)
spin_lock_irqsave(&cmd->cmd_lock, flags);
if ((cmd->state == QLA_TGT_STATE_NEW)||
- ((cmd->state == QLA_TGT_STATE_DATA_IN) &&
- DATA_WORK_NOT_FREE(cmd->cmd_flags)) ) {
-
- cmd->cmd_flags |= CMD_FLAG_DATA_WORK_FREE;
+ ((cmd->state == QLA_TGT_STATE_DATA_IN) &&
+ DATA_WORK_NOT_FREE(cmd))) {
+ cmd->data_work_free = 1;
spin_unlock_irqrestore(&cmd->cmd_lock, flags);
- /* Cmd have not reached firmware.
- * Use this trigger to free it. */
+ /*
+ * cmd has not reached fw, Use this trigger to free it.
+ */
tcm_qla2xxx_free_cmd(cmd);
return;
}
@@ -713,11 +763,11 @@ static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd)
}
static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *,
- struct tcm_qla2xxx_nacl *, struct qla_tgt_sess *);
+ struct tcm_qla2xxx_nacl *, struct fc_port *);
/*
* Expected to be called with struct qla_hw_data->tgt.sess_lock held
*/
-static void tcm_qla2xxx_clear_nacl_from_fcport_map(struct qla_tgt_sess *sess)
+static void tcm_qla2xxx_clear_nacl_from_fcport_map(struct fc_port *sess)
{
struct se_node_acl *se_nacl = sess->se_sess->se_node_acl;
struct se_portal_group *se_tpg = se_nacl->se_tpg;
@@ -756,7 +806,7 @@ static void tcm_qla2xxx_clear_nacl_from_fcport_map(struct qla_tgt_sess *sess)
tcm_qla2xxx_clear_sess_lookup(lport, nacl, sess);
}
-static void tcm_qla2xxx_shutdown_sess(struct qla_tgt_sess *sess)
+static void tcm_qla2xxx_shutdown_sess(struct fc_port *sess)
{
assert_spin_locked(&sess->vha->hw->tgt.sess_lock);
target_sess_cmd_list_set_waiting(sess->se_sess);
@@ -1141,7 +1191,7 @@ static struct se_portal_group *tcm_qla2xxx_npiv_make_tpg(
/*
* Expected to be called with struct qla_hw_data->tgt.sess_lock held
*/
-static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_s_id(
+static struct fc_port *tcm_qla2xxx_find_sess_by_s_id(
scsi_qla_host_t *vha,
const uint8_t *s_id)
{
@@ -1169,12 +1219,12 @@ static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_s_id(
se_nacl, se_nacl->initiatorname);
nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
- if (!nacl->qla_tgt_sess) {
- pr_err("Unable to locate struct qla_tgt_sess\n");
+ if (!nacl->fc_port) {
+ pr_err("Unable to locate struct fc_port\n");
return NULL;
}
- return nacl->qla_tgt_sess;
+ return nacl->fc_port;
}
/*
@@ -1185,7 +1235,7 @@ static void tcm_qla2xxx_set_sess_by_s_id(
struct se_node_acl *new_se_nacl,
struct tcm_qla2xxx_nacl *nacl,
struct se_session *se_sess,
- struct qla_tgt_sess *qla_tgt_sess,
+ struct fc_port *fc_port,
uint8_t *s_id)
{
u32 key;
@@ -1209,22 +1259,22 @@ static void tcm_qla2xxx_set_sess_by_s_id(
pr_debug("Wiping nonexisting fc_port entry\n");
}
- qla_tgt_sess->se_sess = se_sess;
- nacl->qla_tgt_sess = qla_tgt_sess;
+ fc_port->se_sess = se_sess;
+ nacl->fc_port = fc_port;
return;
}
- if (nacl->qla_tgt_sess) {
+ if (nacl->fc_port) {
if (new_se_nacl == NULL) {
- pr_debug("Clearing existing nacl->qla_tgt_sess and fc_port entry\n");
+ pr_debug("Clearing existing nacl->fc_port and fc_port entry\n");
btree_remove32(&lport->lport_fcport_map, key);
- nacl->qla_tgt_sess = NULL;
+ nacl->fc_port = NULL;
return;
}
- pr_debug("Replacing existing nacl->qla_tgt_sess and fc_port entry\n");
+ pr_debug("Replacing existing nacl->fc_port and fc_port entry\n");
btree_update32(&lport->lport_fcport_map, key, new_se_nacl);
- qla_tgt_sess->se_sess = se_sess;
- nacl->qla_tgt_sess = qla_tgt_sess;
+ fc_port->se_sess = se_sess;
+ nacl->fc_port = fc_port;
return;
}
@@ -1234,19 +1284,19 @@ static void tcm_qla2xxx_set_sess_by_s_id(
return;
}
- pr_debug("Replacing existing fc_port entry w/o active nacl->qla_tgt_sess\n");
+ pr_debug("Replacing existing fc_port entry w/o active nacl->fc_port\n");
btree_update32(&lport->lport_fcport_map, key, new_se_nacl);
- qla_tgt_sess->se_sess = se_sess;
- nacl->qla_tgt_sess = qla_tgt_sess;
+ fc_port->se_sess = se_sess;
+ nacl->fc_port = fc_port;
- pr_debug("Setup nacl->qla_tgt_sess %p by s_id for se_nacl: %p, initiatorname: %s\n",
- nacl->qla_tgt_sess, new_se_nacl, new_se_nacl->initiatorname);
+ pr_debug("Setup nacl->fc_port %p by s_id for se_nacl: %p, initiatorname: %s\n",
+ nacl->fc_port, new_se_nacl, new_se_nacl->initiatorname);
}
/*
* Expected to be called with struct qla_hw_data->tgt.sess_lock held
*/
-static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_loop_id(
+static struct fc_port *tcm_qla2xxx_find_sess_by_loop_id(
scsi_qla_host_t *vha,
const uint16_t loop_id)
{
@@ -1274,12 +1324,12 @@ static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_loop_id(
nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
- if (!nacl->qla_tgt_sess) {
- pr_err("Unable to locate struct qla_tgt_sess\n");
+ if (!nacl->fc_port) {
+ pr_err("Unable to locate struct fc_port\n");
return NULL;
}
- return nacl->qla_tgt_sess;
+ return nacl->fc_port;
}
/*
@@ -1290,7 +1340,7 @@ static void tcm_qla2xxx_set_sess_by_loop_id(
struct se_node_acl *new_se_nacl,
struct tcm_qla2xxx_nacl *nacl,
struct se_session *se_sess,
- struct qla_tgt_sess *qla_tgt_sess,
+ struct fc_port *fc_port,
uint16_t loop_id)
{
struct se_node_acl *saved_nacl;
@@ -1305,27 +1355,27 @@ static void tcm_qla2xxx_set_sess_by_loop_id(
if (!saved_nacl) {
pr_debug("Setting up new fc_loopid->se_nacl to new_se_nacl\n");
fc_loopid->se_nacl = new_se_nacl;
- if (qla_tgt_sess->se_sess != se_sess)
- qla_tgt_sess->se_sess = se_sess;
- if (nacl->qla_tgt_sess != qla_tgt_sess)
- nacl->qla_tgt_sess = qla_tgt_sess;
+ if (fc_port->se_sess != se_sess)
+ fc_port->se_sess = se_sess;
+ if (nacl->fc_port != fc_port)
+ nacl->fc_port = fc_port;
return;
}
- if (nacl->qla_tgt_sess) {
+ if (nacl->fc_port) {
if (new_se_nacl == NULL) {
- pr_debug("Clearing nacl->qla_tgt_sess and fc_loopid->se_nacl\n");
+ pr_debug("Clearing nacl->fc_port and fc_loopid->se_nacl\n");
fc_loopid->se_nacl = NULL;
- nacl->qla_tgt_sess = NULL;
+ nacl->fc_port = NULL;
return;
}
- pr_debug("Replacing existing nacl->qla_tgt_sess and fc_loopid->se_nacl\n");
+ pr_debug("Replacing existing nacl->fc_port and fc_loopid->se_nacl\n");
fc_loopid->se_nacl = new_se_nacl;
- if (qla_tgt_sess->se_sess != se_sess)
- qla_tgt_sess->se_sess = se_sess;
- if (nacl->qla_tgt_sess != qla_tgt_sess)
- nacl->qla_tgt_sess = qla_tgt_sess;
+ if (fc_port->se_sess != se_sess)
+ fc_port->se_sess = se_sess;
+ if (nacl->fc_port != fc_port)
+ nacl->fc_port = fc_port;
return;
}
@@ -1335,29 +1385,29 @@ static void tcm_qla2xxx_set_sess_by_loop_id(
return;
}
- pr_debug("Replacing existing fc_loopid->se_nacl w/o active nacl->qla_tgt_sess\n");
+ pr_debug("Replacing existing fc_loopid->se_nacl w/o active nacl->fc_port\n");
fc_loopid->se_nacl = new_se_nacl;
- if (qla_tgt_sess->se_sess != se_sess)
- qla_tgt_sess->se_sess = se_sess;
- if (nacl->qla_tgt_sess != qla_tgt_sess)
- nacl->qla_tgt_sess = qla_tgt_sess;
+ if (fc_port->se_sess != se_sess)
+ fc_port->se_sess = se_sess;
+ if (nacl->fc_port != fc_port)
+ nacl->fc_port = fc_port;
- pr_debug("Setup nacl->qla_tgt_sess %p by loop_id for se_nacl: %p, initiatorname: %s\n",
- nacl->qla_tgt_sess, new_se_nacl, new_se_nacl->initiatorname);
+ pr_debug("Setup nacl->fc_port %p by loop_id for se_nacl: %p, initiatorname: %s\n",
+ nacl->fc_port, new_se_nacl, new_se_nacl->initiatorname);
}
/*
* Should always be called with qla_hw_data->tgt.sess_lock held.
*/
static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *lport,
- struct tcm_qla2xxx_nacl *nacl, struct qla_tgt_sess *sess)
+ struct tcm_qla2xxx_nacl *nacl, struct fc_port *sess)
{
struct se_session *se_sess = sess->se_sess;
unsigned char be_sid[3];
- be_sid[0] = sess->s_id.b.domain;
- be_sid[1] = sess->s_id.b.area;
- be_sid[2] = sess->s_id.b.al_pa;
+ be_sid[0] = sess->d_id.b.domain;
+ be_sid[1] = sess->d_id.b.area;
+ be_sid[2] = sess->d_id.b.al_pa;
tcm_qla2xxx_set_sess_by_s_id(lport, NULL, nacl, se_sess,
sess, be_sid);
@@ -1365,7 +1415,7 @@ static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *lport,
sess, sess->loop_id);
}
-static void tcm_qla2xxx_free_session(struct qla_tgt_sess *sess)
+static void tcm_qla2xxx_free_session(struct fc_port *sess)
{
struct qla_tgt *tgt = sess->tgt;
struct qla_hw_data *ha = tgt->ha;
@@ -1377,7 +1427,7 @@ static void tcm_qla2xxx_free_session(struct qla_tgt_sess *sess)
se_sess = sess->se_sess;
if (!se_sess) {
- pr_err("struct qla_tgt_sess->se_sess is NULL\n");
+ pr_err("struct fc_port->se_sess is NULL\n");
dump_stack();
return;
}
@@ -1404,14 +1454,14 @@ static int tcm_qla2xxx_session_cb(struct se_portal_group *se_tpg,
struct se_node_acl *se_nacl = se_sess->se_node_acl;
struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl,
struct tcm_qla2xxx_nacl, se_node_acl);
- struct qla_tgt_sess *qlat_sess = p;
+ struct fc_port *qlat_sess = p;
uint16_t loop_id = qlat_sess->loop_id;
unsigned long flags;
unsigned char be_sid[3];
- be_sid[0] = qlat_sess->s_id.b.domain;
- be_sid[1] = qlat_sess->s_id.b.area;
- be_sid[2] = qlat_sess->s_id.b.al_pa;
+ be_sid[0] = qlat_sess->d_id.b.domain;
+ be_sid[1] = qlat_sess->d_id.b.area;
+ be_sid[2] = qlat_sess->d_id.b.al_pa;
/*
* And now setup se_nacl and session pointers into HW lport internal
@@ -1434,7 +1484,7 @@ static int tcm_qla2xxx_session_cb(struct se_portal_group *se_tpg,
static int tcm_qla2xxx_check_initiator_node_acl(
scsi_qla_host_t *vha,
unsigned char *fc_wwpn,
- struct qla_tgt_sess *qlat_sess)
+ struct fc_port *qlat_sess)
{
struct qla_hw_data *ha = vha->hw;
struct tcm_qla2xxx_lport *lport;
@@ -1478,7 +1528,7 @@ static int tcm_qla2xxx_check_initiator_node_acl(
return 0;
}
-static void tcm_qla2xxx_update_sess(struct qla_tgt_sess *sess, port_id_t s_id,
+static void tcm_qla2xxx_update_sess(struct fc_port *sess, port_id_t s_id,
uint16_t loop_id, bool conf_compl_supported)
{
struct qla_tgt *tgt = sess->tgt;
@@ -1491,11 +1541,11 @@ static void tcm_qla2xxx_update_sess(struct qla_tgt_sess *sess, port_id_t s_id,
u32 key;
- if (sess->loop_id != loop_id || sess->s_id.b24 != s_id.b24)
+ if (sess->loop_id != loop_id || sess->d_id.b24 != s_id.b24)
pr_info("Updating session %p from port %8phC loop_id %d -> %d s_id %x:%x:%x -> %x:%x:%x\n",
sess, sess->port_name,
- sess->loop_id, loop_id, sess->s_id.b.domain,
- sess->s_id.b.area, sess->s_id.b.al_pa, s_id.b.domain,
+ sess->loop_id, loop_id, sess->d_id.b.domain,
+ sess->d_id.b.area, sess->d_id.b.al_pa, s_id.b.domain,
s_id.b.area, s_id.b.al_pa);
if (sess->loop_id != loop_id) {
@@ -1515,18 +1565,20 @@ static void tcm_qla2xxx_update_sess(struct qla_tgt_sess *sess, port_id_t s_id,
sess->loop_id = loop_id;
}
- if (sess->s_id.b24 != s_id.b24) {
- key = (((u32) sess->s_id.b.domain << 16) |
- ((u32) sess->s_id.b.area << 8) |
- ((u32) sess->s_id.b.al_pa));
+ if (sess->d_id.b24 != s_id.b24) {
+ key = (((u32) sess->d_id.b.domain << 16) |
+ ((u32) sess->d_id.b.area << 8) |
+ ((u32) sess->d_id.b.al_pa));
if (btree_lookup32(&lport->lport_fcport_map, key))
- WARN(btree_remove32(&lport->lport_fcport_map, key) != se_nacl,
- "Found wrong se_nacl when updating s_id %x:%x:%x\n",
- sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa);
+ WARN(btree_remove32(&lport->lport_fcport_map, key) !=
+ se_nacl, "Found wrong se_nacl when updating s_id %x:%x:%x\n",
+ sess->d_id.b.domain, sess->d_id.b.area,
+ sess->d_id.b.al_pa);
else
WARN(1, "No lport_fcport_map entry for s_id %x:%x:%x\n",
- sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa);
+ sess->d_id.b.domain, sess->d_id.b.area,
+ sess->d_id.b.al_pa);
key = (((u32) s_id.b.domain << 16) |
((u32) s_id.b.area << 8) |
@@ -1537,10 +1589,11 @@ static void tcm_qla2xxx_update_sess(struct qla_tgt_sess *sess, port_id_t s_id,
s_id.b.domain, s_id.b.area, s_id.b.al_pa);
btree_update32(&lport->lport_fcport_map, key, se_nacl);
} else {
- btree_insert32(&lport->lport_fcport_map, key, se_nacl, GFP_ATOMIC);
+ btree_insert32(&lport->lport_fcport_map, key, se_nacl,
+ GFP_ATOMIC);
}
- sess->s_id = s_id;
+ sess->d_id = s_id;
nacl->nport_id = key;
}
@@ -1567,6 +1620,7 @@ static struct qla_tgt_func_tmpl tcm_qla2xxx_template = {
.find_sess_by_s_id = tcm_qla2xxx_find_sess_by_s_id,
.find_sess_by_loop_id = tcm_qla2xxx_find_sess_by_loop_id,
.clear_nacl_from_fcport_map = tcm_qla2xxx_clear_nacl_from_fcport_map,
+ .put_sess = tcm_qla2xxx_put_sess,
.shutdown_sess = tcm_qla2xxx_shutdown_sess,
};
@@ -1690,7 +1744,7 @@ static int tcm_qla2xxx_lport_register_npiv_cb(struct scsi_qla_host *base_vha,
(struct tcm_qla2xxx_lport *)base_vha->vha_tgt.target_lport_ptr;
struct fc_vport_identifiers vport_id;
- if (!qla_tgt_mode_enabled(base_vha)) {
+ if (qla_ini_mode_enabled(base_vha)) {
pr_err("qla2xxx base_vha not enabled for target mode\n");
return -EPERM;
}
@@ -1738,7 +1792,7 @@ static struct se_wwn *tcm_qla2xxx_npiv_make_lport(
p = strchr(tmp, '@');
if (!p) {
- pr_err("Unable to locate NPIV '@' seperator\n");
+ pr_err("Unable to locate NPIV '@' separator\n");
return ERR_PTR(-EINVAL);
}
*p++ = '\0';
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.h b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
index cf8430be183b..071035dfa99a 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.h
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
@@ -20,8 +20,8 @@ struct tcm_qla2xxx_nacl {
u64 nport_wwnn;
/* ASCII formatted WWPN for FC Initiator Nport */
char nport_name[TCM_QLA2XXX_NAMELEN];
- /* Pointer to qla_tgt_sess */
- struct qla_tgt_sess *qla_tgt_sess;
+ /* Pointer to fc_port */
+ struct fc_port *fc_port;
/* Pointer to TCM FC nexus */
struct se_session *nport_nexus;
};
diff --git a/drivers/scsi/scsi_common.c b/drivers/scsi/scsi_common.c
index b1383a71400e..a75673bb82b3 100644
--- a/drivers/scsi/scsi_common.c
+++ b/drivers/scsi/scsi_common.c
@@ -137,11 +137,11 @@ EXPORT_SYMBOL(int_to_scsilun);
bool scsi_normalize_sense(const u8 *sense_buffer, int sb_len,
struct scsi_sense_hdr *sshdr)
{
+ memset(sshdr, 0, sizeof(struct scsi_sense_hdr));
+
if (!sense_buffer || !sb_len)
return false;
- memset(sshdr, 0, sizeof(struct scsi_sense_hdr));
-
sshdr->response_code = (sense_buffer[0] & 0x7f);
if (!scsi_sense_valid(sshdr))
diff --git a/drivers/scsi/scsi_dh.c b/drivers/scsi/scsi_dh.c
index b8d3b97b217a..84addee05be6 100644
--- a/drivers/scsi/scsi_dh.c
+++ b/drivers/scsi/scsi_dh.c
@@ -219,20 +219,6 @@ int scsi_unregister_device_handler(struct scsi_device_handler *scsi_dh)
}
EXPORT_SYMBOL_GPL(scsi_unregister_device_handler);
-static struct scsi_device *get_sdev_from_queue(struct request_queue *q)
-{
- struct scsi_device *sdev;
- unsigned long flags;
-
- spin_lock_irqsave(q->queue_lock, flags);
- sdev = q->queuedata;
- if (!sdev || !get_device(&sdev->sdev_gendev))
- sdev = NULL;
- spin_unlock_irqrestore(q->queue_lock, flags);
-
- return sdev;
-}
-
/*
* scsi_dh_activate - activate the path associated with the scsi_device
* corresponding to the given request queue.
@@ -251,7 +237,7 @@ int scsi_dh_activate(struct request_queue *q, activate_complete fn, void *data)
struct scsi_device *sdev;
int err = SCSI_DH_NOSYS;
- sdev = get_sdev_from_queue(q);
+ sdev = scsi_device_from_queue(q);
if (!sdev) {
if (fn)
fn(data, err);
@@ -298,7 +284,7 @@ int scsi_dh_set_params(struct request_queue *q, const char *params)
struct scsi_device *sdev;
int err = -SCSI_DH_NOSYS;
- sdev = get_sdev_from_queue(q);
+ sdev = scsi_device_from_queue(q);
if (!sdev)
return err;
@@ -321,7 +307,7 @@ int scsi_dh_attach(struct request_queue *q, const char *name)
struct scsi_device_handler *scsi_dh;
int err = 0;
- sdev = get_sdev_from_queue(q);
+ sdev = scsi_device_from_queue(q);
if (!sdev)
return -ENODEV;
@@ -359,7 +345,7 @@ const char *scsi_dh_attached_handler_name(struct request_queue *q, gfp_t gfp)
struct scsi_device *sdev;
const char *handler_name = NULL;
- sdev = get_sdev_from_queue(q);
+ sdev = scsi_device_from_queue(q);
if (!sdev)
return NULL;
diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c
index 8b8c814df5c7..b6bf3f29a12a 100644
--- a/drivers/scsi/scsi_ioctl.c
+++ b/drivers/scsi/scsi_ioctl.c
@@ -199,6 +199,7 @@ static int scsi_ioctl_get_pci(struct scsi_device *sdev, void __user *arg)
int scsi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
{
char scsi_cmd[MAX_COMMAND_SIZE];
+ struct scsi_sense_hdr sense_hdr;
/* Check for deprecated ioctls ... all the ioctls which don't
* follow the new unique numbering scheme are deprecated */
@@ -243,7 +244,7 @@ int scsi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
return scsi_set_medium_removal(sdev, SCSI_REMOVAL_ALLOW);
case SCSI_IOCTL_TEST_UNIT_READY:
return scsi_test_unit_ready(sdev, IOCTL_NORMAL_TIMEOUT,
- NORMAL_RETRIES, NULL);
+ NORMAL_RETRIES, &sense_hdr);
case SCSI_IOCTL_START_UNIT:
scsi_cmd[0] = START_STOP;
scsi_cmd[1] = 0;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 3e32dc954c3c..ba2286652ff6 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -213,10 +213,30 @@ void scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
__scsi_queue_insert(cmd, reason, 1);
}
-static int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
+
+/**
+ * scsi_execute - insert request and wait for the result
+ * @sdev: scsi device
+ * @cmd: scsi command
+ * @data_direction: data direction
+ * @buffer: data buffer
+ * @bufflen: len of buffer
+ * @sense: optional sense buffer
+ * @sshdr: optional decoded sense header
+ * @timeout: request timeout in seconds
+ * @retries: number of times to retry request
+ * @flags: flags for ->cmd_flags
+ * @rq_flags: flags for ->rq_flags
+ * @resid: optional residual length
+ *
+ * returns the req->errors value which is the scsi_cmnd result
+ * field.
+ */
+int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
int data_direction, void *buffer, unsigned bufflen,
- unsigned char *sense, int timeout, int retries, u64 flags,
- req_flags_t rq_flags, int *resid)
+ unsigned char *sense, struct scsi_sense_hdr *sshdr,
+ int timeout, int retries, u64 flags, req_flags_t rq_flags,
+ int *resid)
{
struct request *req;
struct scsi_request *rq;
@@ -259,62 +279,16 @@ static int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
*resid = rq->resid_len;
if (sense && rq->sense_len)
memcpy(sense, rq->sense, SCSI_SENSE_BUFFERSIZE);
+ if (sshdr)
+ scsi_normalize_sense(rq->sense, rq->sense_len, sshdr);
ret = req->errors;
out:
blk_put_request(req);
return ret;
}
-
-/**
- * scsi_execute - insert request and wait for the result
- * @sdev: scsi device
- * @cmd: scsi command
- * @data_direction: data direction
- * @buffer: data buffer
- * @bufflen: len of buffer
- * @sense: optional sense buffer
- * @timeout: request timeout in seconds
- * @retries: number of times to retry request
- * @flags: or into request flags;
- * @resid: optional residual length
- *
- * returns the req->errors value which is the scsi_cmnd result
- * field.
- */
-int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
- int data_direction, void *buffer, unsigned bufflen,
- unsigned char *sense, int timeout, int retries, u64 flags,
- int *resid)
-{
- return __scsi_execute(sdev, cmd, data_direction, buffer, bufflen, sense,
- timeout, retries, flags, 0, resid);
-}
EXPORT_SYMBOL(scsi_execute);
-int scsi_execute_req_flags(struct scsi_device *sdev, const unsigned char *cmd,
- int data_direction, void *buffer, unsigned bufflen,
- struct scsi_sense_hdr *sshdr, int timeout, int retries,
- int *resid, u64 flags, req_flags_t rq_flags)
-{
- char *sense = NULL;
- int result;
-
- if (sshdr) {
- sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO);
- if (!sense)
- return DRIVER_ERROR << 24;
- }
- result = __scsi_execute(sdev, cmd, data_direction, buffer, bufflen,
- sense, timeout, retries, flags, rq_flags, resid);
- if (sshdr)
- scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr);
-
- kfree(sense);
- return result;
-}
-EXPORT_SYMBOL(scsi_execute_req_flags);
-
/*
* Function: scsi_init_cmd_errh()
*
@@ -2231,6 +2205,29 @@ void scsi_mq_destroy_tags(struct Scsi_Host *shost)
blk_mq_free_tag_set(&shost->tag_set);
}
+/**
+ * scsi_device_from_queue - return sdev associated with a request_queue
+ * @q: The request queue to return the sdev from
+ *
+ * Return the sdev associated with a request queue or NULL if the
+ * request_queue does not reference a SCSI device.
+ */
+struct scsi_device *scsi_device_from_queue(struct request_queue *q)
+{
+ struct scsi_device *sdev = NULL;
+
+ if (q->mq_ops) {
+ if (q->mq_ops == &scsi_mq_ops)
+ sdev = q->queuedata;
+ } else if (q->request_fn == scsi_request_fn)
+ sdev = q->queuedata;
+ if (!sdev || !get_device(&sdev->sdev_gendev))
+ sdev = NULL;
+
+ return sdev;
+}
+EXPORT_SYMBOL_GPL(scsi_device_from_queue);
+
/*
* Function: scsi_block_requests()
*
@@ -2497,28 +2494,20 @@ EXPORT_SYMBOL(scsi_mode_sense);
* @sdev: scsi device to change the state of.
* @timeout: command timeout
* @retries: number of retries before failing
- * @sshdr_external: Optional pointer to struct scsi_sense_hdr for
- * returning sense. Make sure that this is cleared before passing
- * in.
+ * @sshdr: outpout pointer for decoded sense information.
*
* Returns zero if unsuccessful or an error if TUR failed. For
* removable media, UNIT_ATTENTION sets ->changed flag.
**/
int
scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries,
- struct scsi_sense_hdr *sshdr_external)
+ struct scsi_sense_hdr *sshdr)
{
char cmd[] = {
TEST_UNIT_READY, 0, 0, 0, 0, 0,
};
- struct scsi_sense_hdr *sshdr;
int result;
- if (!sshdr_external)
- sshdr = kzalloc(sizeof(*sshdr), GFP_KERNEL);
- else
- sshdr = sshdr_external;
-
/* try to eat the UNIT_ATTENTION if there are enough retries */
do {
result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, sshdr,
@@ -2529,8 +2518,6 @@ scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries,
} while (scsi_sense_valid(sshdr) &&
sshdr->sense_key == UNIT_ATTENTION && --retries);
- if (!sshdr_external)
- kfree(sshdr);
return result;
}
EXPORT_SYMBOL(scsi_test_unit_ready);
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
index 319868f3f674..d0219e36080c 100644
--- a/drivers/scsi/scsi_transport_spi.c
+++ b/drivers/scsi/scsi_transport_spi.c
@@ -123,25 +123,21 @@ static int spi_execute(struct scsi_device *sdev, const void *cmd,
{
int i, result;
unsigned char sense[SCSI_SENSE_BUFFERSIZE];
+ struct scsi_sense_hdr sshdr_tmp;
+
+ if (!sshdr)
+ sshdr = &sshdr_tmp;
for(i = 0; i < DV_RETRIES; i++) {
- result = scsi_execute(sdev, cmd, dir, buffer, bufflen,
- sense, DV_TIMEOUT, /* retries */ 1,
+ result = scsi_execute(sdev, cmd, dir, buffer, bufflen, sense,
+ sshdr, DV_TIMEOUT, /* retries */ 1,
REQ_FAILFAST_DEV |
REQ_FAILFAST_TRANSPORT |
REQ_FAILFAST_DRIVER,
- NULL);
- if (driver_byte(result) & DRIVER_SENSE) {
- struct scsi_sense_hdr sshdr_tmp;
- if (!sshdr)
- sshdr = &sshdr_tmp;
-
- if (scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE,
- sshdr)
- && sshdr->sense_key == UNIT_ATTENTION)
- continue;
- }
- break;
+ 0, NULL);
+ if (!(driver_byte(result) & DRIVER_SENSE) ||
+ sshdr->sense_key != UNIT_ATTENTION)
+ break;
}
return result;
}
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index cb6e68dd6df0..d277e8620e3e 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1425,7 +1425,6 @@ static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing)
{
struct scsi_disk *sdkp = scsi_disk_get(disk);
struct scsi_device *sdp;
- struct scsi_sense_hdr *sshdr = NULL;
int retval;
if (!sdkp)
@@ -1454,22 +1453,21 @@ static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing)
* by sd_spinup_disk() from sd_revalidate_disk(), which happens whenever
* sd_revalidate() is called.
*/
- retval = -ENODEV;
-
if (scsi_block_when_processing_errors(sdp)) {
- sshdr = kzalloc(sizeof(*sshdr), GFP_KERNEL);
+ struct scsi_sense_hdr sshdr = { 0, };
+
retval = scsi_test_unit_ready(sdp, SD_TIMEOUT, SD_MAX_RETRIES,
- sshdr);
- }
+ &sshdr);
- /* failed to execute TUR, assume media not present */
- if (host_byte(retval)) {
- set_media_not_present(sdkp);
- goto out;
- }
+ /* failed to execute TUR, assume media not present */
+ if (host_byte(retval)) {
+ set_media_not_present(sdkp);
+ goto out;
+ }
- if (media_not_present(sdkp, sshdr))
- goto out;
+ if (media_not_present(sdkp, &sshdr))
+ goto out;
+ }
/*
* For removable scsi disk we have to recognise the presence
@@ -1485,7 +1483,6 @@ out:
* Medium present state has changed in either direction.
* Device has indicated UNIT_ATTENTION.
*/
- kfree(sshdr);
retval = sdp->changed ? DISK_EVENT_MEDIA_CHANGE : 0;
sdp->changed = 0;
scsi_disk_put(sdkp);
@@ -1511,9 +1508,8 @@ static int sd_sync_cache(struct scsi_disk *sdkp)
* Leave the rest of the command zero to indicate
* flush everything.
*/
- res = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0,
- &sshdr, timeout, SD_MAX_RETRIES,
- NULL, 0, RQF_PM);
+ res = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr,
+ timeout, SD_MAX_RETRIES, 0, RQF_PM, NULL);
if (res == 0)
break;
}
@@ -3079,23 +3075,6 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
put_device(&sdkp->dev);
}
-struct sd_devt {
- int idx;
- struct disk_devt disk_devt;
-};
-
-void sd_devt_release(struct disk_devt *disk_devt)
-{
- struct sd_devt *sd_devt = container_of(disk_devt, struct sd_devt,
- disk_devt);
-
- spin_lock(&sd_index_lock);
- ida_remove(&sd_index_ida, sd_devt->idx);
- spin_unlock(&sd_index_lock);
-
- kfree(sd_devt);
-}
-
/**
* sd_probe - called during driver initialization and whenever a
* new scsi device is attached to the system. It is called once
@@ -3117,7 +3096,6 @@ void sd_devt_release(struct disk_devt *disk_devt)
static int sd_probe(struct device *dev)
{
struct scsi_device *sdp = to_scsi_device(dev);
- struct sd_devt *sd_devt;
struct scsi_disk *sdkp;
struct gendisk *gd;
int index;
@@ -3143,13 +3121,9 @@ static int sd_probe(struct device *dev)
if (!sdkp)
goto out;
- sd_devt = kzalloc(sizeof(*sd_devt), GFP_KERNEL);
- if (!sd_devt)
- goto out_free;
-
gd = alloc_disk(SD_MINORS);
if (!gd)
- goto out_free_devt;
+ goto out_free;
do {
if (!ida_pre_get(&sd_index_ida, GFP_KERNEL))
@@ -3165,11 +3139,6 @@ static int sd_probe(struct device *dev)
goto out_put;
}
- atomic_set(&sd_devt->disk_devt.count, 1);
- sd_devt->disk_devt.release = sd_devt_release;
- sd_devt->idx = index;
- gd->disk_devt = &sd_devt->disk_devt;
-
error = sd_format_disk_name("sd", index, gd->disk_name, DISK_NAME_LEN);
if (error) {
sdev_printk(KERN_WARNING, sdp, "SCSI disk (sd) name length exceeded.\n");
@@ -3209,14 +3178,13 @@ static int sd_probe(struct device *dev)
return 0;
out_free_index:
- put_disk_devt(&sd_devt->disk_devt);
- sd_devt = NULL;
+ spin_lock(&sd_index_lock);
+ ida_remove(&sd_index_ida, index);
+ spin_unlock(&sd_index_lock);
out_put:
put_disk(gd);
out_free:
kfree(sdkp);
- out_free_devt:
- kfree(sd_devt);
out:
scsi_autopm_put_device(sdp);
return error;
@@ -3275,7 +3243,10 @@ static void scsi_disk_release(struct device *dev)
struct scsi_disk *sdkp = to_scsi_disk(dev);
struct gendisk *disk = sdkp->disk;
- put_disk_devt(disk->disk_devt);
+ spin_lock(&sd_index_lock);
+ ida_remove(&sd_index_ida, sdkp->index);
+ spin_unlock(&sd_index_lock);
+
disk->private_data = NULL;
put_disk(disk);
put_device(&sdkp->device->sdev_gendev);
@@ -3299,8 +3270,8 @@ static int sd_start_stop_device(struct scsi_disk *sdkp, int start)
if (!scsi_device_online(sdp))
return -ENODEV;
- res = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0, &sshdr,
- SD_TIMEOUT, SD_MAX_RETRIES, NULL, 0, RQF_PM);
+ res = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr,
+ SD_TIMEOUT, SD_MAX_RETRIES, 0, RQF_PM, NULL);
if (res) {
sd_print_result(sdkp, "Start/Stop Unit failed", res);
if (driver_byte(res) & DRIVER_SENSE)
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index 11c0dfb3dfa3..657ad15682a3 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -534,8 +534,7 @@ static int pqi_write_current_time_to_host_wellness(
size_t buffer_length;
time64_t local_time;
unsigned int year;
- struct timeval time;
- struct rtc_time tm;
+ struct tm tm;
buffer_length = sizeof(*buffer);
@@ -552,9 +551,8 @@ static int pqi_write_current_time_to_host_wellness(
put_unaligned_le16(sizeof(buffer->time),
&buffer->time_length);
- do_gettimeofday(&time);
- local_time = time.tv_sec - (sys_tz.tz_minuteswest * 60);
- rtc_time64_to_tm(local_time, &tm);
+ local_time = ktime_get_real_seconds();
+ time64_to_tm(local_time, -sys_tz.tz_minuteswest * 60, &tm);
year = tm.tm_year + 1900;
buffer->time[0] = bin2bcd(tm.tm_hour);
diff --git a/drivers/scsi/sr_ioctl.c b/drivers/scsi/sr_ioctl.c
index dfffdf63e44c..4610c8c5693f 100644
--- a/drivers/scsi/sr_ioctl.c
+++ b/drivers/scsi/sr_ioctl.c
@@ -187,30 +187,19 @@ int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc)
struct scsi_device *SDev;
struct scsi_sense_hdr sshdr;
int result, err = 0, retries = 0;
- struct request_sense *sense = cgc->sense;
SDev = cd->device;
- if (!sense) {
- sense = kmalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
- if (!sense) {
- err = -ENOMEM;
- goto out;
- }
- }
-
retry:
if (!scsi_block_when_processing_errors(SDev)) {
err = -ENODEV;
goto out;
}
- memset(sense, 0, sizeof(*sense));
result = scsi_execute(SDev, cgc->cmd, cgc->data_direction,
- cgc->buffer, cgc->buflen, (char *)sense,
- cgc->timeout, IOCTL_RETRIES, 0, NULL);
-
- scsi_normalize_sense((char *)sense, sizeof(*sense), &sshdr);
+ cgc->buffer, cgc->buflen,
+ (unsigned char *)cgc->sense, &sshdr,
+ cgc->timeout, IOCTL_RETRIES, 0, 0, NULL);
/* Minimal error checking. Ignore cases we know about, and report the rest. */
if (driver_byte(result) != 0) {
@@ -261,8 +250,6 @@ int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc)
/* Wake up a process waiting for device */
out:
- if (!cgc->sense)
- kfree(sense);
cgc->stat = err;
return err;
}
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 81212d4bd9bf..e5ef78a6848e 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -23,7 +23,7 @@ static const char *verstr = "20160209";
#include <linux/fs.h>
#include <linux/kernel.h>
-#include <linux/sched.h>
+#include <linux/sched/signal.h>
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/string.h>
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index ce5d023c1c91..c87d770b519a 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -1523,18 +1523,6 @@ static bool ufs_qcom_testbus_cfg_is_ok(struct ufs_qcom_host *host)
return false;
}
- /*
- * Not performing check for each individual select_major
- * mappings of select_minor, since there is no harm in
- * configuring a non-existent select_minor
- */
- if (host->testbus.select_minor > 0xFF) {
- dev_err(host->hba->dev,
- "%s: 0x%05X is not a legal testbus option\n",
- __func__, host->testbus.select_minor);
- return false;
- }
-
return true;
}
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 8b721f431dd0..dc6efbd1be8e 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -6915,9 +6915,9 @@ ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp)
goto out;
}
- ret = scsi_execute_req_flags(sdp, cmd, DMA_FROM_DEVICE, buffer,
- UFSHCD_REQ_SENSE_SIZE, NULL,
- msecs_to_jiffies(1000), 3, NULL, 0, RQF_PM);
+ ret = scsi_execute(sdp, cmd, DMA_FROM_DEVICE, buffer,
+ UFSHCD_REQ_SENSE_SIZE, NULL, NULL,
+ msecs_to_jiffies(1000), 3, 0, RQF_PM, NULL);
if (ret)
pr_err("%s: failed with err %d\n", __func__, ret);
@@ -6982,8 +6982,8 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
* callbacks hence set the RQF_PM flag so that it doesn't resume the
* already suspended childs.
*/
- ret = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0, &sshdr,
- START_STOP_TIMEOUT, 0, NULL, 0, RQF_PM);
+ ret = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr,
+ START_STOP_TIMEOUT, 0, 0, RQF_PM, NULL);
if (ret) {
sdev_printk(KERN_WARNING, sdp,
"START_STOP failed for power mode: %d, result %x\n",
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index c680d7641311..939c47df73fa 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -18,6 +18,7 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/mempool.h>
+#include <linux/interrupt.h>
#include <linux/virtio.h>
#include <linux/virtio_ids.h>
#include <linux/virtio_config.h>
@@ -29,6 +30,7 @@
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_tcq.h>
#include <linux/seqlock.h>
+#include <linux/blk-mq-virtio.h>
#define VIRTIO_SCSI_MEMPOOL_SZ 64
#define VIRTIO_SCSI_EVENT_LEN 8
@@ -108,7 +110,6 @@ struct virtio_scsi {
bool affinity_hint_set;
struct hlist_node node;
- struct hlist_node node_dead;
/* Protected by event_vq lock */
bool stop_events;
@@ -118,7 +119,6 @@ struct virtio_scsi {
struct virtio_scsi_vq req_vqs[];
};
-static enum cpuhp_state virtioscsi_online;
static struct kmem_cache *virtscsi_cmd_cache;
static mempool_t *virtscsi_cmd_pool;
@@ -766,6 +766,13 @@ static void virtscsi_target_destroy(struct scsi_target *starget)
kfree(tgt);
}
+static int virtscsi_map_queues(struct Scsi_Host *shost)
+{
+ struct virtio_scsi *vscsi = shost_priv(shost);
+
+ return blk_mq_virtio_map_queues(&shost->tag_set, vscsi->vdev, 2);
+}
+
static struct scsi_host_template virtscsi_host_template_single = {
.module = THIS_MODULE,
.name = "Virtio SCSI HBA",
@@ -801,6 +808,7 @@ static struct scsi_host_template virtscsi_host_template_multi = {
.use_clustering = ENABLE_CLUSTERING,
.target_alloc = virtscsi_target_alloc,
.target_destroy = virtscsi_target_destroy,
+ .map_queues = virtscsi_map_queues,
.track_queue_depth = 1,
};
@@ -817,80 +825,6 @@ static struct scsi_host_template virtscsi_host_template_multi = {
virtio_cwrite(vdev, struct virtio_scsi_config, fld, &__val); \
} while(0)
-static void __virtscsi_set_affinity(struct virtio_scsi *vscsi, bool affinity)
-{
- int i;
- int cpu;
-
- /* In multiqueue mode, when the number of cpu is equal
- * to the number of request queues, we let the qeueues
- * to be private to one cpu by setting the affinity hint
- * to eliminate the contention.
- */
- if ((vscsi->num_queues == 1 ||
- vscsi->num_queues != num_online_cpus()) && affinity) {
- if (vscsi->affinity_hint_set)
- affinity = false;
- else
- return;
- }
-
- if (affinity) {
- i = 0;
- for_each_online_cpu(cpu) {
- virtqueue_set_affinity(vscsi->req_vqs[i].vq, cpu);
- i++;
- }
-
- vscsi->affinity_hint_set = true;
- } else {
- for (i = 0; i < vscsi->num_queues; i++) {
- if (!vscsi->req_vqs[i].vq)
- continue;
-
- virtqueue_set_affinity(vscsi->req_vqs[i].vq, -1);
- }
-
- vscsi->affinity_hint_set = false;
- }
-}
-
-static void virtscsi_set_affinity(struct virtio_scsi *vscsi, bool affinity)
-{
- get_online_cpus();
- __virtscsi_set_affinity(vscsi, affinity);
- put_online_cpus();
-}
-
-static int virtscsi_cpu_online(unsigned int cpu, struct hlist_node *node)
-{
- struct virtio_scsi *vscsi = hlist_entry_safe(node, struct virtio_scsi,
- node);
- __virtscsi_set_affinity(vscsi, true);
- return 0;
-}
-
-static int virtscsi_cpu_notif_add(struct virtio_scsi *vi)
-{
- int ret;
-
- ret = cpuhp_state_add_instance(virtioscsi_online, &vi->node);
- if (ret)
- return ret;
-
- ret = cpuhp_state_add_instance(CPUHP_VIRT_SCSI_DEAD, &vi->node_dead);
- if (ret)
- cpuhp_state_remove_instance(virtioscsi_online, &vi->node);
- return ret;
-}
-
-static void virtscsi_cpu_notif_remove(struct virtio_scsi *vi)
-{
- cpuhp_state_remove_instance_nocalls(virtioscsi_online, &vi->node);
- cpuhp_state_remove_instance_nocalls(CPUHP_VIRT_SCSI_DEAD,
- &vi->node_dead);
-}
-
static void virtscsi_init_vq(struct virtio_scsi_vq *virtscsi_vq,
struct virtqueue *vq)
{
@@ -900,14 +834,8 @@ static void virtscsi_init_vq(struct virtio_scsi_vq *virtscsi_vq,
static void virtscsi_remove_vqs(struct virtio_device *vdev)
{
- struct Scsi_Host *sh = virtio_scsi_host(vdev);
- struct virtio_scsi *vscsi = shost_priv(sh);
-
- virtscsi_set_affinity(vscsi, false);
-
/* Stop all the virtqueues. */
vdev->config->reset(vdev);
-
vdev->config->del_vqs(vdev);
}
@@ -920,6 +848,7 @@ static int virtscsi_init(struct virtio_device *vdev,
vq_callback_t **callbacks;
const char **names;
struct virtqueue **vqs;
+ struct irq_affinity desc = { .pre_vectors = 2 };
num_vqs = vscsi->num_queues + VIRTIO_SCSI_VQ_BASE;
vqs = kmalloc(num_vqs * sizeof(struct virtqueue *), GFP_KERNEL);
@@ -941,7 +870,8 @@ static int virtscsi_init(struct virtio_device *vdev,
}
/* Discover virtqueues and write information to configuration. */
- err = vdev->config->find_vqs(vdev, num_vqs, vqs, callbacks, names);
+ err = vdev->config->find_vqs(vdev, num_vqs, vqs, callbacks, names,
+ &desc);
if (err)
goto out;
@@ -1007,10 +937,6 @@ static int virtscsi_probe(struct virtio_device *vdev)
if (err)
goto virtscsi_init_failed;
- err = virtscsi_cpu_notif_add(vscsi);
- if (err)
- goto scsi_add_host_failed;
-
cmd_per_lun = virtscsi_config_get(vdev, cmd_per_lun) ?: 1;
shost->cmd_per_lun = min_t(u32, cmd_per_lun, shost->can_queue);
shost->max_sectors = virtscsi_config_get(vdev, max_sectors) ?: 0xFFFF;
@@ -1065,9 +991,6 @@ static void virtscsi_remove(struct virtio_device *vdev)
virtscsi_cancel_event_work(vscsi);
scsi_remove_host(shost);
-
- virtscsi_cpu_notif_remove(vscsi);
-
virtscsi_remove_vqs(vdev);
scsi_host_put(shost);
}
@@ -1075,10 +998,6 @@ static void virtscsi_remove(struct virtio_device *vdev)
#ifdef CONFIG_PM_SLEEP
static int virtscsi_freeze(struct virtio_device *vdev)
{
- struct Scsi_Host *sh = virtio_scsi_host(vdev);
- struct virtio_scsi *vscsi = shost_priv(sh);
-
- virtscsi_cpu_notif_remove(vscsi);
virtscsi_remove_vqs(vdev);
return 0;
}
@@ -1093,11 +1012,6 @@ static int virtscsi_restore(struct virtio_device *vdev)
if (err)
return err;
- err = virtscsi_cpu_notif_add(vscsi);
- if (err) {
- vdev->config->del_vqs(vdev);
- return err;
- }
virtio_device_ready(vdev);
if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG))
@@ -1152,16 +1066,6 @@ static int __init init(void)
pr_err("mempool_create() for virtscsi_cmd_pool failed\n");
goto error;
}
- ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
- "scsi/virtio:online",
- virtscsi_cpu_online, NULL);
- if (ret < 0)
- goto error;
- virtioscsi_online = ret;
- ret = cpuhp_setup_state_multi(CPUHP_VIRT_SCSI_DEAD, "scsi/virtio:dead",
- NULL, virtscsi_cpu_online);
- if (ret)
- goto error;
ret = register_virtio_driver(&virtio_scsi_driver);
if (ret < 0)
goto error;
@@ -1177,17 +1081,12 @@ error:
kmem_cache_destroy(virtscsi_cmd_cache);
virtscsi_cmd_cache = NULL;
}
- if (virtioscsi_online)
- cpuhp_remove_multi_state(virtioscsi_online);
- cpuhp_remove_multi_state(CPUHP_VIRT_SCSI_DEAD);
return ret;
}
static void __exit fini(void)
{
unregister_virtio_driver(&virtio_scsi_driver);
- cpuhp_remove_multi_state(virtioscsi_online);
- cpuhp_remove_multi_state(CPUHP_VIRT_SCSI_DEAD);
mempool_destroy(virtscsi_cmd_pool);
kmem_cache_destroy(virtscsi_cmd_cache);
}