summaryrefslogtreecommitdiffstats
path: root/drivers/scsi/megaraid
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/megaraid')
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h64
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c1025
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fp.c20
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c676
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.h29
5 files changed, 1207 insertions, 607 deletions
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index a6722c93a295..f5a36ccb8606 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -35,8 +35,8 @@
/*
* MegaRAID SAS Driver meta data
*/
-#define MEGASAS_VERSION "07.702.06.00-rc1"
-#define MEGASAS_RELDATE "June 21, 2017"
+#define MEGASAS_VERSION "07.703.05.00-rc1"
+#define MEGASAS_RELDATE "October 5, 2017"
/*
* Device IDs
@@ -57,6 +57,7 @@
#define PCI_DEVICE_ID_LSI_CUTLASS_52 0x0052
#define PCI_DEVICE_ID_LSI_CUTLASS_53 0x0053
#define PCI_DEVICE_ID_LSI_VENTURA 0x0014
+#define PCI_DEVICE_ID_LSI_CRUSADER 0x0015
#define PCI_DEVICE_ID_LSI_HARPOON 0x0016
#define PCI_DEVICE_ID_LSI_TOMCAT 0x0017
#define PCI_DEVICE_ID_LSI_VENTURA_4PORT 0x001B
@@ -186,16 +187,19 @@
/*
* MFI command opcodes
*/
-#define MFI_CMD_INIT 0x00
-#define MFI_CMD_LD_READ 0x01
-#define MFI_CMD_LD_WRITE 0x02
-#define MFI_CMD_LD_SCSI_IO 0x03
-#define MFI_CMD_PD_SCSI_IO 0x04
-#define MFI_CMD_DCMD 0x05
-#define MFI_CMD_ABORT 0x06
-#define MFI_CMD_SMP 0x07
-#define MFI_CMD_STP 0x08
-#define MFI_CMD_INVALID 0xff
+enum MFI_CMD_OP {
+ MFI_CMD_INIT = 0x0,
+ MFI_CMD_LD_READ = 0x1,
+ MFI_CMD_LD_WRITE = 0x2,
+ MFI_CMD_LD_SCSI_IO = 0x3,
+ MFI_CMD_PD_SCSI_IO = 0x4,
+ MFI_CMD_DCMD = 0x5,
+ MFI_CMD_ABORT = 0x6,
+ MFI_CMD_SMP = 0x7,
+ MFI_CMD_STP = 0x8,
+ MFI_CMD_OP_COUNT,
+ MFI_CMD_INVALID = 0xff
+};
#define MR_DCMD_CTRL_GET_INFO 0x01010000
#define MR_DCMD_LD_GET_LIST 0x03010000
@@ -1504,6 +1508,15 @@ enum FW_BOOT_CONTEXT {
#define MR_CAN_HANDLE_SYNC_CACHE_OFFSET 0X01000000
+#define MR_CAN_HANDLE_64_BIT_DMA_OFFSET (1 << 25)
+
+enum MR_ADAPTER_TYPE {
+ MFI_SERIES = 1,
+ THUNDERBOLT_SERIES = 2,
+ INVADER_SERIES = 3,
+ VENTURA_SERIES = 4,
+};
+
/*
* register set for both 1068 and 1078 controllers
* structure extended for 1078 registers
@@ -1617,7 +1630,8 @@ union megasas_sgl_frame {
typedef union _MFI_CAPABILITIES {
struct {
#if defined(__BIG_ENDIAN_BITFIELD)
- u32 reserved:19;
+ u32 reserved:18;
+ u32 support_64bit_mode:1;
u32 support_pd_map_target_id:1;
u32 support_qd_throttling:1;
u32 support_fp_rlbypass:1;
@@ -1645,7 +1659,8 @@ typedef union _MFI_CAPABILITIES {
u32 support_fp_rlbypass:1;
u32 support_qd_throttling:1;
u32 support_pd_map_target_id:1;
- u32 reserved:19;
+ u32 support_64bit_mode:1;
+ u32 reserved:18;
#endif
} mfi_capabilities;
__le32 reg;
@@ -2114,6 +2129,19 @@ struct megasas_instance {
u32 *crash_dump_buf;
dma_addr_t crash_dump_h;
+
+ struct MR_PD_LIST *pd_list_buf;
+ dma_addr_t pd_list_buf_h;
+
+ struct megasas_ctrl_info *ctrl_info_buf;
+ dma_addr_t ctrl_info_buf_h;
+
+ struct MR_LD_LIST *ld_list_buf;
+ dma_addr_t ld_list_buf_h;
+
+ struct MR_LD_TARGETID_LIST *ld_targetid_list_buf;
+ dma_addr_t ld_targetid_list_buf_h;
+
void *crash_buf[MAX_CRASH_DUMP_SIZE];
unsigned int fw_crash_buffer_size;
unsigned int fw_crash_state;
@@ -2210,8 +2238,6 @@ struct megasas_instance {
/* Ptr to hba specific information */
void *ctrl_context;
- u32 ctrl_context_pages;
- struct megasas_ctrl_info *ctrl_info;
unsigned int msix_vectors;
struct megasas_irq_context irq_context[MEGASAS_MAX_MSIX_QUEUES];
u64 map_id;
@@ -2236,12 +2262,13 @@ struct megasas_instance {
bool dev_handle;
bool fw_sync_cache_support;
u32 mfi_frame_size;
- bool is_ventura;
bool msix_combined;
u16 max_raid_mapsize;
/* preffered count to send as LDIO irrspective of FP capable.*/
u8 r1_ldio_hint_default;
u32 nvme_page_size;
+ u8 adapter_type;
+ bool consistent_mask_64bit;
};
struct MR_LD_VF_MAP {
u32 size;
@@ -2488,4 +2515,7 @@ int megasas_reset_target_fusion(struct scsi_cmnd *scmd);
u32 mega_mod64(u64 dividend, u32 divisor);
int megasas_alloc_fusion_context(struct megasas_instance *instance);
void megasas_free_fusion_context(struct megasas_instance *instance);
+void megasas_set_dma_settings(struct megasas_instance *instance,
+ struct megasas_dcmd_frame *dcmd,
+ dma_addr_t dma_addr, u32 dma_len);
#endif /*LSI_MEGARAID_SAS_H */
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index a36e18156e49..cc54bdb5c712 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -161,6 +161,7 @@ static struct pci_device_id megasas_pci_table[] = {
{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_53)},
/* VENTURA */
{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA)},
+ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CRUSADER)},
{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_HARPOON)},
{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_TOMCAT)},
{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA_4PORT)},
@@ -205,6 +206,43 @@ wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
void megasas_fusion_ocr_wq(struct work_struct *work);
static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
int initial);
+static int
+megasas_set_dma_mask(struct megasas_instance *instance);
+static int
+megasas_alloc_ctrl_mem(struct megasas_instance *instance);
+static inline void
+megasas_free_ctrl_mem(struct megasas_instance *instance);
+static inline int
+megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance);
+static inline void
+megasas_free_ctrl_dma_buffers(struct megasas_instance *instance);
+static inline void
+megasas_init_ctrl_params(struct megasas_instance *instance);
+
+/**
+ * megasas_set_dma_settings - Populate DMA address, length and flags for DCMDs
+ * @instance: Adapter soft state
+ * @dcmd: DCMD frame inside MFI command
+ * @dma_addr: DMA address of buffer to be passed to FW
+ * @dma_len: Length of DMA buffer to be passed to FW
+ * @return: void
+ */
+void megasas_set_dma_settings(struct megasas_instance *instance,
+ struct megasas_dcmd_frame *dcmd,
+ dma_addr_t dma_addr, u32 dma_len)
+{
+ if (instance->consistent_mask_64bit) {
+ dcmd->sgl.sge64[0].phys_addr = cpu_to_le64(dma_addr);
+ dcmd->sgl.sge64[0].length = cpu_to_le32(dma_len);
+ dcmd->flags = cpu_to_le16(dcmd->flags | MFI_FRAME_SGL64);
+
+ } else {
+ dcmd->sgl.sge32[0].phys_addr =
+ cpu_to_le32(lower_32_bits(dma_addr));
+ dcmd->sgl.sge32[0].length = cpu_to_le32(dma_len);
+ dcmd->flags = cpu_to_le16(dcmd->flags);
+ }
+}
void
megasas_issue_dcmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
@@ -2023,7 +2061,7 @@ void megaraid_sas_kill_hba(struct megasas_instance *instance)
msleep(1000);
if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
(instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
- (instance->ctrl_context)) {
+ (instance->adapter_type != MFI_SERIES)) {
writel(MFI_STOP_ADP, &instance->reg_set->doorbell);
/* Flush */
readl(&instance->reg_set->doorbell);
@@ -2485,13 +2523,15 @@ int megasas_sriov_start_heartbeat(struct megasas_instance *instance,
dcmd->pad_0 = 0;
dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_CTRL_HB_HOST_MEM));
dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC);
- dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->hb_host_mem_h);
- dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_CTRL_HB_HOST_MEM));
+
+ megasas_set_dma_settings(instance, dcmd, instance->hb_host_mem_h,
+ sizeof(struct MR_CTRL_HB_HOST_MEM));
dev_warn(&instance->pdev->dev, "SR-IOV: Starting heartbeat for scsi%d\n",
instance->host->host_no);
- if (instance->ctrl_context && !instance->mask_interrupts)
+ if ((instance->adapter_type != MFI_SERIES) &&
+ !instance->mask_interrupts)
retval = megasas_issue_blocked_cmd(instance, cmd,
MEGASAS_ROUTINE_WAIT_TIME_VF);
else
@@ -2787,7 +2827,9 @@ static int megasas_reset_bus_host(struct scsi_cmnd *scmd)
/*
* First wait for all commands to complete
*/
- if (instance->ctrl_context) {
+ if (instance->adapter_type == MFI_SERIES) {
+ ret = megasas_generic_reset(scmd);
+ } else {
struct megasas_cmd_fusion *cmd;
cmd = (struct megasas_cmd_fusion *)scmd->SCp.ptr;
if (cmd)
@@ -2795,8 +2837,7 @@ static int megasas_reset_bus_host(struct scsi_cmnd *scmd)
MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE);
ret = megasas_reset_fusion(scmd->device->host,
SCSIIO_TIMEOUT_OCR);
- } else
- ret = megasas_generic_reset(scmd);
+ }
return ret;
}
@@ -2813,7 +2854,7 @@ static int megasas_task_abort(struct scsi_cmnd *scmd)
instance = (struct megasas_instance *)scmd->device->host->hostdata;
- if (instance->ctrl_context)
+ if (instance->adapter_type != MFI_SERIES)
ret = megasas_task_abort_fusion(scmd);
else {
sdev_printk(KERN_NOTICE, scmd->device, "TASK ABORT not supported\n");
@@ -2835,7 +2876,7 @@ static int megasas_reset_target(struct scsi_cmnd *scmd)
instance = (struct megasas_instance *)scmd->device->host->hostdata;
- if (instance->ctrl_context)
+ if (instance->adapter_type != MFI_SERIES)
ret = megasas_reset_target_fusion(scmd);
else {
sdev_printk(KERN_NOTICE, scmd->device, "TARGET RESET not supported\n");
@@ -3280,6 +3321,9 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
case MFI_CMD_SMP:
case MFI_CMD_STP:
+ megasas_complete_int_cmd(instance, cmd);
+ break;
+
case MFI_CMD_DCMD:
opcode = le32_to_cpu(cmd->frame->dcmd.opcode);
/* Check for LD map update */
@@ -3366,6 +3410,7 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
default:
dev_info(&instance->pdev->dev, "Unknown command completed! [0x%X]\n",
hdr->cmd);
+ megasas_complete_int_cmd(instance, cmd);
break;
}
}
@@ -3712,7 +3757,7 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
(instance->pdev->device ==
PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
- (instance->ctrl_context))
+ (instance->adapter_type != MFI_SERIES))
writel(
MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG,
&instance->reg_set->doorbell);
@@ -3730,7 +3775,7 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
(instance->pdev->device ==
PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
- (instance->ctrl_context))
+ (instance->adapter_type != MFI_SERIES))
writel(MFI_INIT_HOTPLUG,
&instance->reg_set->doorbell);
else
@@ -3750,11 +3795,11 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
(instance->pdev->device ==
PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
- (instance->ctrl_context)) {
+ (instance->adapter_type != MFI_SERIES)) {
writel(MFI_RESET_FLAGS,
&instance->reg_set->doorbell);
- if (instance->ctrl_context) {
+ if (instance->adapter_type != MFI_SERIES) {
for (i = 0; i < (10 * 1000); i += 20) {
if (readl(
&instance->
@@ -3921,7 +3966,8 @@ static int megasas_create_frame_pool(struct megasas_instance *instance)
* max_sge_sz = 12 byte (sizeof megasas_sge64)
* Total 192 byte (3 MFI frame of 64 byte)
*/
- frame_count = instance->ctrl_context ? (3 + 1) : (15 + 1);
+ frame_count = (instance->adapter_type == MFI_SERIES) ?
+ (15 + 1) : (3 + 1);
instance->mfi_frame_size = MEGAMFI_FRAME_SIZE * frame_count;
/*
* Use DMA pool facility provided by PCI layer
@@ -3976,7 +4022,7 @@ static int megasas_create_frame_pool(struct megasas_instance *instance)
memset(cmd->frame, 0, instance->mfi_frame_size);
cmd->frame->io.context = cpu_to_le32(cmd->index);
cmd->frame->io.pad_0 = 0;
- if (!instance->ctrl_context && reset_devices)
+ if ((instance->adapter_type == MFI_SERIES) && reset_devices)
cmd->frame->hdr.cmd = MFI_CMD_INVALID;
}
@@ -4030,9 +4076,7 @@ int megasas_alloc_cmds(struct megasas_instance *instance)
int j;
u16 max_cmd;
struct megasas_cmd *cmd;
- struct fusion_context *fusion;
- fusion = instance->ctrl_context;
max_cmd = instance->max_mfi_cmds;
/*
@@ -4096,7 +4140,7 @@ int megasas_alloc_cmds(struct megasas_instance *instance)
inline int
dcmd_timeout_ocr_possible(struct megasas_instance *instance) {
- if (!instance->ctrl_context)
+ if (instance->adapter_type == MFI_SERIES)
return KILL_ADAPTER;
else if (instance->unload ||
test_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags))
@@ -4132,15 +4176,17 @@ megasas_get_pd_info(struct megasas_instance *instance, struct scsi_device *sdev)
dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = 0xFF;
dcmd->sge_count = 1;
- dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
+ dcmd->flags = MFI_FRAME_DIR_READ;
dcmd->timeout = 0;
dcmd->pad_0 = 0;
dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_PD_INFO));
dcmd->opcode = cpu_to_le32(MR_DCMD_PD_GET_INFO);
- dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->pd_info_h);
- dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_PD_INFO));
- if (instance->ctrl_context && !instance->mask_interrupts)
+ megasas_set_dma_settings(instance, dcmd, instance->pd_info_h,
+ sizeof(struct MR_PD_INFO));
+
+ if ((instance->adapter_type != MFI_SERIES) &&
+ !instance->mask_interrupts)
ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
else
ret = megasas_issue_polled(instance, cmd);
@@ -4203,6 +4249,9 @@ megasas_get_pd_list(struct megasas_instance *instance)
return ret;
}
+ ci = instance->pd_list_buf;
+ ci_h = instance->pd_list_buf_h;
+
cmd = megasas_get_cmd(instance);
if (!cmd) {
@@ -4212,15 +4261,6 @@ megasas_get_pd_list(struct megasas_instance *instance)
dcmd = &cmd->frame->dcmd;
- ci = pci_alloc_consistent(instance->pdev,
- MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST), &ci_h);
-
- if (!ci) {
- dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc mem for pd_list\n");
- megasas_return_cmd(instance, cmd);
- return -ENOMEM;
- }
-
memset(ci, 0, sizeof(*ci));
memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
@@ -4229,15 +4269,17 @@ megasas_get_pd_list(struct megasas_instance *instance)
dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
dcmd->sge_count = 1;
- dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
+ dcmd->flags = MFI_FRAME_DIR_READ;
dcmd->timeout = 0;
dcmd->pad_0 = 0;
dcmd->data_xfer_len = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST));
dcmd->opcode = cpu_to_le32(MR_DCMD_PD_LIST_QUERY);
- dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
- dcmd->sgl.sge32[0].length = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST));
- if (instance->ctrl_context && !instance->mask_interrupts)
+ megasas_set_dma_settings(instance, dcmd, instance->pd_list_buf_h,
+ (MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST)));
+
+ if ((instance->adapter_type != MFI_SERIES) &&
+ !instance->mask_interrupts)
ret = megasas_issue_blocked_cmd(instance, cmd,
MFI_IO_TIMEOUT_SECS);
else
@@ -4248,7 +4290,7 @@ megasas_get_pd_list(struct megasas_instance *instance)
dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY "
"failed/not supported by firmware\n");
- if (instance->ctrl_context)
+ if (instance->adapter_type != MFI_SERIES)
megaraid_sas_kill_hba(instance);
else
instance->pd_list_not_supported = 1;
@@ -4305,10 +4347,6 @@ megasas_get_pd_list(struct megasas_instance *instance)
}
- pci_free_consistent(instance->pdev,
- MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST),
- ci, ci_h);
-
if (ret != DCMD_TIMEOUT)
megasas_return_cmd(instance, cmd);
@@ -4334,6 +4372,9 @@ megasas_get_ld_list(struct megasas_instance *instance)
dma_addr_t ci_h = 0;
u32 ld_count;
+ ci = instance->ld_list_buf;
+ ci_h = instance->ld_list_buf_h;
+
cmd = megasas_get_cmd(instance);
if (!cmd) {
@@ -4343,16 +4384,6 @@ megasas_get_ld_list(struct megasas_instance *instance)
dcmd = &cmd->frame->dcmd;
- ci = pci_alloc_consistent(instance->pdev,
- sizeof(struct MR_LD_LIST),
- &ci_h);
-
- if (!ci) {
- dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc mem in get_ld_list\n");
- megasas_return_cmd(instance, cmd);
- return -ENOMEM;
- }
-
memset(ci, 0, sizeof(*ci));
memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
@@ -4361,15 +4392,17 @@ megasas_get_ld_list(struct megasas_instance *instance)
dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
dcmd->sge_count = 1;
- dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
+ dcmd->flags = MFI_FRAME_DIR_READ;
dcmd->timeout = 0;
dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_LIST));
dcmd->opcode = cpu_to_le32(MR_DCMD_LD_GET_LIST);
- dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
- dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_LD_LIST));
dcmd->pad_0 = 0;
- if (instance->ctrl_context && !instance->mask_interrupts)
+ megasas_set_dma_settings(instance, dcmd, ci_h,
+ sizeof(struct MR_LD_LIST));
+
+ if ((instance->adapter_type != MFI_SERIES) &&
+ !instance->mask_interrupts)
ret = megasas_issue_blocked_cmd(instance, cmd,
MFI_IO_TIMEOUT_SECS);
else
@@ -4423,8 +4456,6 @@ megasas_get_ld_list(struct megasas_instance *instance)
break;
}
- pci_free_consistent(instance->pdev, sizeof(struct MR_LD_LIST), ci, ci_h);
-
if (ret != DCMD_TIMEOUT)
megasas_return_cmd(instance, cmd);
@@ -4450,6 +4481,9 @@ megasas_ld_list_query(struct megasas_instance *instance, u8 query_type)
dma_addr_t ci_h = 0;
u32 tgtid_count;
+ ci = instance->ld_targetid_list_buf;
+ ci_h = instance->ld_targetid_list_buf_h;
+
cmd = megasas_get_cmd(instance);
if (!cmd) {
@@ -4460,16 +4494,6 @@ megasas_ld_list_query(struct megasas_instance *instance, u8 query_type)
dcmd = &cmd->frame->dcmd;
- ci = pci_alloc_consistent(instance->pdev,
- sizeof(struct MR_LD_TARGETID_LIST), &ci_h);
-
- if (!ci) {
- dev_warn(&instance->pdev->dev,
- "Failed to alloc mem for ld_list_query\n");
- megasas_return_cmd(instance, cmd);
- return -ENOMEM;
- }
-
memset(ci, 0, sizeof(*ci));
memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
@@ -4480,15 +4504,17 @@ megasas_ld_list_query(struct megasas_instance *instance, u8 query_type)
dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
dcmd->sge_count = 1;
- dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
+ dcmd->flags = MFI_FRAME_DIR_READ;
dcmd->timeout = 0;
dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST));
dcmd->opcode = cpu_to_le32(MR_DCMD_LD_LIST_QUERY);
- dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
- dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST));
dcmd->pad_0 = 0;
- if (instance->ctrl_context && !instance->mask_interrupts)
+ megasas_set_dma_settings(instance, dcmd, ci_h,
+ sizeof(struct MR_LD_TARGETID_LIST));
+
+ if ((instance->adapter_type != MFI_SERIES) &&
+ !instance->mask_interrupts)
ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
else
ret = megasas_issue_polled(instance, cmd);
@@ -4539,9 +4565,6 @@ megasas_ld_list_query(struct megasas_instance *instance, u8 query_type)
break;
}
- pci_free_consistent(instance->pdev, sizeof(struct MR_LD_TARGETID_LIST),
- ci, ci_h);
-
if (ret != DCMD_TIMEOUT)
megasas_return_cmd(instance, cmd);
@@ -4563,9 +4586,9 @@ static void megasas_update_ext_vd_details(struct megasas_instance *instance)
return;
instance->supportmax256vd =
- instance->ctrl_info->adapterOperations3.supportMaxExtLDs;
+ instance->ctrl_info_buf->adapterOperations3.supportMaxExtLDs;
/* Below is additional check to address future FW enhancement */
- if (instance->ctrl_info->max_lds > 64)
+ if (instance->ctrl_info_buf->max_lds > 64)
instance->supportmax256vd = 1;
instance->drv_supported_vd_count = MEGASAS_MAX_LD_CHANNELS
@@ -4623,10 +4646,10 @@ megasas_get_ctrl_info(struct megasas_instance *instance)
struct megasas_cmd *cmd;
struct megasas_dcmd_frame *dcmd;
struct megasas_ctrl_info *ci;
- struct megasas_ctrl_info *ctrl_info;
dma_addr_t ci_h = 0;
- ctrl_info = instance->ctrl_info;
+ ci = instance->ctrl_info_buf;
+ ci_h = instance->ctrl_info_buf_h;
cmd = megasas_get_cmd(instance);
@@ -4637,45 +4660,37 @@ megasas_get_ctrl_info(struct megasas_instance *instance)
dcmd = &cmd->frame->dcmd;
- ci = pci_alloc_consistent(instance->pdev,
- sizeof(struct megasas_ctrl_info), &ci_h);
-
- if (!ci) {
- dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc mem for ctrl info\n");
- megasas_return_cmd(instance, cmd);
- return -ENOMEM;
- }
-
memset(ci, 0, sizeof(*ci));
memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
dcmd->sge_count = 1;
- dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
+ dcmd->flags = MFI_FRAME_DIR_READ;
dcmd->timeout = 0;
dcmd->pad_0 = 0;
dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_ctrl_info));
dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_GET_INFO);
- dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
- dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_ctrl_info));
dcmd->mbox.b[0] = 1;
- if (instance->ctrl_context && !instance->mask_interrupts)
+ megasas_set_dma_settings(instance, dcmd, ci_h,
+ sizeof(struct megasas_ctrl_info));
+
+ if ((instance->adapter_type != MFI_SERIES) &&
+ !instance->mask_interrupts)
ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
else
ret = megasas_issue_polled(instance, cmd);
switch (ret) {
case DCMD_SUCCESS:
- memcpy(ctrl_info, ci, sizeof(struct megasas_ctrl_info));
/* Save required controller information in
* CPU endianness format.
*/
- le32_to_cpus((u32 *)&ctrl_info->properties.OnOffProperties);
- le32_to_cpus((u32 *)&ctrl_info->adapterOperations2);
- le32_to_cpus((u32 *)&ctrl_info->adapterOperations3);
- le16_to_cpus((u16 *)&ctrl_info->adapter_operations4);
+ le32_to_cpus((u32 *)&ci->properties.OnOffProperties);
+ le32_to_cpus((u32 *)&ci->adapterOperations2);
+ le32_to_cpus((u32 *)&ci->adapterOperations3);
+ le16_to_cpus((u16 *)&ci->adapter_operations4);
/* Update the latest Ext VD info.
* From Init path, store current firmware details.
@@ -4684,21 +4699,21 @@ megasas_get_ctrl_info(struct megasas_instance *instance)
*/
megasas_update_ext_vd_details(instance);
instance->use_seqnum_jbod_fp =
- ctrl_info->adapterOperations3.useSeqNumJbodFP;
+ ci->adapterOperations3.useSeqNumJbodFP;
instance->support_morethan256jbod =
- ctrl_info->adapter_operations4.support_pd_map_target_id;
+ ci->adapter_operations4.support_pd_map_target_id;
/*Check whether controller is iMR or MR */
- instance->is_imr = (ctrl_info->memory_size ? 0 : 1);
+ instance->is_imr = (ci->memory_size ? 0 : 1);
dev_info(&instance->pdev->dev,
"controller type\t: %s(%dMB)\n",
instance->is_imr ? "iMR" : "MR",
- le16_to_cpu(ctrl_info->memory_size));
+ le16_to_cpu(ci->memory_size));
instance->disableOnlineCtrlReset =
- ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
+ ci->properties.OnOffProperties.disableOnlineCtrlReset;
instance->secure_jbod_support =
- ctrl_info->adapterOperations3.supportSecurityonJBOD;
+ ci->adapterOperations3.supportSecurityonJBOD;
dev_info(&instance->pdev->dev, "Online Controller Reset(OCR)\t: %s\n",
instance->disableOnlineCtrlReset ? "Disabled" : "Enabled");
dev_info(&instance->pdev->dev, "Secure JBOD support\t: %s\n",
@@ -4726,9 +4741,6 @@ megasas_get_ctrl_info(struct megasas_instance *instance)
}
- pci_free_consistent(instance->pdev, sizeof(struct megasas_ctrl_info),
- ci, ci_h);
-
megasas_return_cmd(instance, cmd);
@@ -4772,15 +4784,17 @@ int megasas_set_crash_dump_params(struct megasas_instance *instance,
dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
dcmd->sge_count = 1;
- dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE);
+ dcmd->flags = MFI_FRAME_DIR_NONE;
dcmd->timeout = 0;
dcmd->pad_0 = 0;
dcmd->data_xfer_len = cpu_to_le32(CRASH_DMA_BUF_SIZE);
dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SET_CRASH_DUMP_PARAMS);
- dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->crash_dump_h);
- dcmd->sgl.sge32[0].length = cpu_to_le32(CRASH_DMA_BUF_SIZE);
- if (instance->ctrl_context && !instance->mask_interrupts)
+ megasas_set_dma_settings(instance, dcmd, instance->crash_dump_h,
+ CRASH_DMA_BUF_SIZE);
+
+ if ((instance->adapter_type != MFI_SERIES) &&
+ !instance->mask_interrupts)
ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
else
ret = megasas_issue_polled(instance, cmd);
@@ -5088,7 +5102,7 @@ megasas_setup_jbod_map(struct megasas_instance *instance)
(sizeof(struct MR_PD_CFG_SEQ) * (MAX_PHYSICAL_DEVICES - 1));
if (reset_devices || !fusion ||
- !instance->ctrl_info->adapterOperations3.useSeqNumJbodFP) {
+ !instance->ctrl_info_buf->adapterOperations3.useSeqNumJbodFP) {
dev_info(&instance->pdev->dev,
"Jbod map is not supported %s %d\n",
__func__, __LINE__);
@@ -5167,7 +5181,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
reg_set = instance->reg_set;
- if (fusion)
+ if (instance->adapter_type != MFI_SERIES)
instance->instancet = &megasas_instance_template_fusion;
else {
switch (instance->pdev->device) {
@@ -5208,7 +5222,20 @@ static int megasas_init_fw(struct megasas_instance *instance)
goto fail_ready_state;
}
- if (instance->is_ventura) {
+ megasas_init_ctrl_params(instance);
+
+ if (megasas_set_dma_mask(instance))
+ goto fail_ready_state;
+
+ if (megasas_alloc_ctrl_mem(instance))
+ goto fail_alloc_dma_buf;
+
+ if (megasas_alloc_ctrl_dma_buffers(instance))
+ goto fail_alloc_dma_buf;
+
+ fusion = instance->ctrl_context;
+
+ if (instance->adapter_type == VENTURA_SERIES) {
scratch_pad_3 =
readl(&instance->reg_set->outbound_scratch_pad_3);
instance->max_raid_mapsize = ((scratch_pad_3 >>
@@ -5226,7 +5253,8 @@ static int megasas_init_fw(struct megasas_instance *instance)
(&instance->reg_set->outbound_scratch_pad_2);
/* Check max MSI-X vectors */
if (fusion) {
- if (fusion->adapter_type == THUNDERBOLT_SERIES) { /* Thunderbolt Series*/
+ if (instance->adapter_type == THUNDERBOLT_SERIES) {
+ /* Thunderbolt Series*/
instance->msix_vectors = (scratch_pad_2
& MR_MAX_REPLY_QUEUES_OFFSET) + 1;
fw_msix_count = instance->msix_vectors;
@@ -5301,11 +5329,6 @@ static int megasas_init_fw(struct megasas_instance *instance)
tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
(unsigned long)instance);
- instance->ctrl_info = kzalloc(sizeof(struct megasas_ctrl_info),
- GFP_KERNEL);
- if (instance->ctrl_info == NULL)
- goto fail_init_adapter;
-
/*
* Below are default value for legacy Firmware.
* non-fusion based controllers
@@ -5316,7 +5339,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
if (instance->instancet->init_adapter(instance))
goto fail_init_adapter;
- if (instance->is_ventura) {
+ if (instance->adapter_type == VENTURA_SERIES) {
scratch_pad_4 =
readl(&instance->reg_set->outbound_scratch_pad_4);
if ((scratch_pad_4 & MR_NVME_PAGE_SIZE_MASK) >=
@@ -5352,7 +5375,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
/* stream detection initialization */
- if (instance->is_ventura && fusion) {
+ if (instance->adapter_type == VENTURA_SERIES) {
fusion->stream_detect_by_ld =
kzalloc(sizeof(struct LD_STREAM_DETECT *)
* MAX_LOGICAL_DRIVES_EXT,
@@ -5394,7 +5417,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
* to calculate max_sectors_1. So the number ended up as zero always.
*/
tmp_sectors = 0;
- ctrl_info = instance->ctrl_info;
+ ctrl_info = instance->ctrl_info_buf;
max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) *
le16_to_cpu(ctrl_info->max_strips_per_io);
@@ -5505,9 +5528,10 @@ fail_setup_irqs:
if (instance->msix_vectors)
pci_free_irq_vectors(instance->pdev);
instance->msix_vectors = 0;
+fail_alloc_dma_buf:
+ megasas_free_ctrl_dma_buffers(instance);
+ megasas_free_ctrl_mem(instance);
fail_ready_state:
- kfree(instance->ctrl_info);
- instance->ctrl_info = NULL;
iounmap(instance->reg_set);
fail_ioremap:
@@ -5580,13 +5604,14 @@ megasas_get_seq_num(struct megasas_instance *instance,
dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = 0x0;
dcmd->sge_count = 1;
- dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
+ dcmd->flags = MFI_FRAME_DIR_READ;
dcmd->timeout = 0;
dcmd->pad_0 = 0;
dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_log_info));
dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_GET_INFO);
- dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(el_info_h);
- dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_evt_log_info));
+
+ megasas_set_dma_settings(instance, dcmd, el_info_h,
+ sizeof(struct megasas_evt_log_info));
if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS) ==
DCMD_SUCCESS) {
@@ -5711,7 +5736,7 @@ megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = 0x0;
dcmd->sge_count = 1;
- dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
+ dcmd->flags = MFI_FRAME_DIR_READ;
dcmd->timeout = 0;
dcmd->pad_0 = 0;
dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_detail));
@@ -5719,8 +5744,9 @@ megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
dcmd->mbox.w[0] = cpu_to_le32(seq_num);
instance->last_seq_num = seq_num;
dcmd->mbox.w[1] = cpu_to_le32(curr_aen.word);
- dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->evt_detail_h);
- dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_evt_detail));
+
+ megasas_set_dma_settings(instance, dcmd, instance->evt_detail_h,
+ sizeof(struct megasas_evt_detail));
if (instance->aen_cmd != NULL) {
megasas_return_cmd(instance, cmd);
@@ -5787,18 +5813,18 @@ megasas_get_target_prop(struct megasas_instance *instance,
dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = 0xFF;
dcmd->sge_count = 1;
- dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
+ dcmd->flags = MFI_FRAME_DIR_READ;
dcmd->timeout = 0;
dcmd->pad_0 = 0;
dcmd->data_xfer_len =
cpu_to_le32(sizeof(struct MR_TARGET_PROPERTIES));
dcmd->opcode = cpu_to_le32(MR_DCMD_DRV_GET_TARGET_PROP);
- dcmd->sgl.sge32[0].phys_addr =
- cpu_to_le32(instance->tgt_prop_h);
- dcmd->sgl.sge32[0].length =
- cpu_to_le32(sizeof(struct MR_TARGET_PROPERTIES));
- if (instance->ctrl_context && !instance->mask_interrupts)
+ megasas_set_dma_settings(instance, dcmd, instance->tgt_prop_h,
+ sizeof(struct MR_TARGET_PROPERTIES));
+
+ if ((instance->adapter_type != MFI_SERIES) &&
+ !instance->mask_interrupts)
ret = megasas_issue_blocked_cmd(instance,
cmd, MFI_IO_TIMEOUT_SECS);
else
@@ -5923,234 +5949,408 @@ static int megasas_io_attach(struct megasas_instance *instance)
return 0;
}
+/**
+ * megasas_set_dma_mask - Set DMA mask for supported controllers
+ *
+ * @instance: Adapter soft state
+ * Description:
+ *
+ * For Ventura, driver/FW will operate in 64bit DMA addresses.
+ *
+ * For invader-
+ * By default, driver/FW will operate in 32bit DMA addresses
+ * for consistent DMA mapping but if 32 bit consistent
+ * DMA mask fails, driver will try with 64 bit consistent
+ * mask provided FW is true 64bit DMA capable
+ *
+ * For older controllers(Thunderbolt and MFI based adapters)-
+ * driver/FW will operate in 32 bit consistent DMA addresses.
+ */
static int
-megasas_set_dma_mask(struct pci_dev *pdev)
+megasas_set_dma_mask(struct megasas_instance *instance)
{
- /*
- * All our controllers are capable of performing 64-bit DMA
- */
+ u64 consistent_mask;
+ struct pci_dev *pdev;
+ u32 scratch_pad_2;
+
+ pdev = instance->pdev;
+ consistent_mask = (instance->adapter_type == VENTURA_SERIES) ?
+ DMA_BIT_MASK(64) : DMA_BIT_MASK(32);
+
if (IS_DMA64) {
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
+ if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
+ goto fail_set_dma_mask;
+
+ if ((*pdev->dev.dma_mask == DMA_BIT_MASK(64)) &&
+ (dma_set_coherent_mask(&pdev->dev, consistent_mask) &&
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))) {
+ /*
+ * If 32 bit DMA mask fails, then try for 64 bit mask
+ * for FW capable of handling 64 bit DMA.
+ */
+ scratch_pad_2 = readl
+ (&instance->reg_set->outbound_scratch_pad_2);
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
+ if (!(scratch_pad_2 & MR_CAN_HANDLE_64_BIT_DMA_OFFSET))
+ goto fail_set_dma_mask;
+ else if (dma_set_mask_and_coherent(&pdev->dev,
+ DMA_BIT_MASK(64)))
goto fail_set_dma_mask;
}
- } else {
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
- goto fail_set_dma_mask;
- }
- /*
- * Ensure that all data structures are allocated in 32-bit
- * memory.
- */
- if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
- /* Try 32bit DMA mask and 32 bit Consistent dma mask */
- if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
- && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
- dev_info(&pdev->dev, "set 32bit DMA mask"
- "and 32 bit consistent mask\n");
- else
- goto fail_set_dma_mask;
- }
+ } else if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
+ goto fail_set_dma_mask;
+
+ if (pdev->dev.coherent_dma_mask == DMA_BIT_MASK(32))
+ instance->consistent_mask_64bit = false;
+ else
+ instance->consistent_mask_64bit = true;
+
+ dev_info(&pdev->dev, "%s bit DMA mask and %s bit consistent mask\n",
+ ((*pdev->dev.dma_mask == DMA_BIT_MASK(64)) ? "64" : "32"),
+ (instance->consistent_mask_64bit ? "64" : "32"));
return 0;
fail_set_dma_mask:
- return 1;
+ dev_err(&pdev->dev, "Failed to set DMA mask\n");
+ return -1;
+
}
-/**
- * megasas_probe_one - PCI hotplug entry point
- * @pdev: PCI device structure
- * @id: PCI ids of supported hotplugged adapter
+/*
+ * megasas_set_adapter_type - Set adapter type.
+ * Supported controllers can be divided in
+ * 4 categories- enum MR_ADAPTER_TYPE {
+ * MFI_SERIES = 1,
+ * THUNDERBOLT_SERIES = 2,
+ * INVADER_SERIES = 3,
+ * VENTURA_SERIES = 4,
+ * };
+ * @instance: Adapter soft state
+ * return: void
*/
-static int megasas_probe_one(struct pci_dev *pdev,
- const struct pci_device_id *id)
+static inline void megasas_set_adapter_type(struct megasas_instance *instance)
{
- int rval, pos;
- struct Scsi_Host *host;
- struct megasas_instance *instance;
- u16 control = 0;
- struct fusion_context *fusion = NULL;
-
- /* Reset MSI-X in the kdump kernel */
- if (reset_devices) {
- pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
- if (pos) {
- pci_read_config_word(pdev, pos + PCI_MSIX_FLAGS,
- &control);
- if (control & PCI_MSIX_FLAGS_ENABLE) {
- dev_info(&pdev->dev, "resetting MSI-X\n");
- pci_write_config_word(pdev,
- pos + PCI_MSIX_FLAGS,
- control &
- ~PCI_MSIX_FLAGS_ENABLE);
- }
+ if ((instance->pdev->vendor == PCI_VENDOR_ID_DELL) &&
+ (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5)) {
+ instance->adapter_type = MFI_SERIES;
+ } else {
+ switch (instance->pdev->device) {
+ case PCI_DEVICE_ID_LSI_VENTURA:
+ case PCI_DEVICE_ID_LSI_CRUSADER:
+ case PCI_DEVICE_ID_LSI_HARPOON:
+ case PCI_DEVICE_ID_LSI_TOMCAT:
+ case PCI_DEVICE_ID_LSI_VENTURA_4PORT:
+ case PCI_DEVICE_ID_LSI_CRUSADER_4PORT:
+ instance->adapter_type = VENTURA_SERIES;
+ break;
+ case PCI_DEVICE_ID_LSI_FUSION:
+ case PCI_DEVICE_ID_LSI_PLASMA:
+ instance->adapter_type = THUNDERBOLT_SERIES;
+ break;
+ case PCI_DEVICE_ID_LSI_INVADER:
+ case PCI_DEVICE_ID_LSI_INTRUDER:
+ case PCI_DEVICE_ID_LSI_INTRUDER_24:
+ case PCI_DEVICE_ID_LSI_CUTLASS_52:
+ case PCI_DEVICE_ID_LSI_CUTLASS_53:
+ case PCI_DEVICE_ID_LSI_FURY:
+ instance->adapter_type = INVADER_SERIES;
+ break;
+ default: /* For all other supported controllers */
+ instance->adapter_type = MFI_SERIES;
+ break;
}
}
+}
- /*
- * PCI prepping: enable device set bus mastering and dma mask
- */
- rval = pci_enable_device_mem(pdev);
+static inline int megasas_alloc_mfi_ctrl_mem(struct megasas_instance *instance)
+{
+ instance->producer = pci_alloc_consistent(instance->pdev, sizeof(u32),
+ &instance->producer_h);
+ instance->consumer = pci_alloc_consistent(instance->pdev, sizeof(u32),
+ &instance->consumer_h);
- if (rval) {
- return rval;
+ if (!instance->producer || !instance->consumer) {
+ dev_err(&instance->pdev->dev,
+ "Failed to allocate memory for producer, consumer\n");
+ return -1;
}
- pci_set_master(pdev);
+ *instance->producer = 0;
+ *instance->consumer = 0;
+ return 0;
+}
- if (megasas_set_dma_mask(pdev))
- goto fail_set_dma_mask;
+/**
+ * megasas_alloc_ctrl_mem - Allocate per controller memory for core data
+ * structures which are not common across MFI
+ * adapters and fusion adapters.
+ * For MFI based adapters, allocate producer and
+ * consumer buffers. For fusion adapters, allocate
+ * memory for fusion context.
+ * @instance: Adapter soft state
+ * return: 0 for SUCCESS
+ */
+static int megasas_alloc_ctrl_mem(struct megasas_instance *instance)
+{
+ switch (instance->adapter_type) {
+ case MFI_SERIES:
+ if (megasas_alloc_mfi_ctrl_mem(instance))
+ return -ENOMEM;
+ break;
+ case VENTURA_SERIES:
+ case THUNDERBOLT_SERIES:
+ case INVADER_SERIES:
+ if (megasas_alloc_fusion_context(instance))
+ return -ENOMEM;
+ break;
+ }
- host = scsi_host_alloc(&megasas_template,
- sizeof(struct megasas_instance));
+ return 0;
+}
- if (!host) {
- dev_printk(KERN_DEBUG, &pdev->dev, "scsi_host_alloc failed\n");
- goto fail_alloc_instance;
+/*
+ * megasas_free_ctrl_mem - Free fusion context for fusion adapters and
+ * producer, consumer buffers for MFI adapters
+ *
+ * @instance - Adapter soft instance
+ *
+ */
+static inline void megasas_free_ctrl_mem(struct megasas_instance *instance)
+{
+ if (instance->adapter_type == MFI_SERIES) {
+ if (instance->producer)
+ pci_free_consistent(instance->pdev, sizeof(u32),
+ instance->producer,
+ instance->producer_h);
+ if (instance->consumer)
+ pci_free_consistent(instance->pdev, sizeof(u32),
+ instance->consumer,
+ instance->consumer_h);
+ } else {
+ megasas_free_fusion_context(instance);
}
+}
- instance = (struct megasas_instance *)host->hostdata;
- memset(instance, 0, sizeof(*instance));
- atomic_set(&instance->fw_reset_no_pci_access, 0);
- instance->pdev = pdev;
+/**
+ * megasas_alloc_ctrl_dma_buffers - Allocate consistent DMA buffers during
+ * driver load time
+ *
+ * @instance- Adapter soft instance
+ * @return- O for SUCCESS
+ */
+static inline
+int megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance)
+{
+ struct pci_dev *pdev = instance->pdev;
+ struct fusion_context *fusion = instance->ctrl_context;
- switch (instance->pdev->device) {
- case PCI_DEVICE_ID_LSI_VENTURA:
- case PCI_DEVICE_ID_LSI_HARPOON:
- case PCI_DEVICE_ID_LSI_TOMCAT:
- case PCI_DEVICE_ID_LSI_VENTURA_4PORT:
- case PCI_DEVICE_ID_LSI_CRUSADER_4PORT:
- instance->is_ventura = true;
- case PCI_DEVICE_ID_LSI_FUSION:
- case PCI_DEVICE_ID_LSI_PLASMA:
- case PCI_DEVICE_ID_LSI_INVADER:
- case PCI_DEVICE_ID_LSI_FURY:
- case PCI_DEVICE_ID_LSI_INTRUDER:
- case PCI_DEVICE_ID_LSI_INTRUDER_24:
- case PCI_DEVICE_ID_LSI_CUTLASS_52:
- case PCI_DEVICE_ID_LSI_CUTLASS_53:
- {
- if (megasas_alloc_fusion_context(instance)) {
- megasas_free_fusion_context(instance);
- goto fail_alloc_dma_buf;
- }
- fusion = instance->ctrl_context;
+ instance->evt_detail =
+ pci_alloc_consistent(pdev,
+ sizeof(struct megasas_evt_detail),
+ &instance->evt_detail_h);
- if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ||
- (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA))
- fusion->adapter_type = THUNDERBOLT_SERIES;
- else if (instance->is_ventura)
- fusion->adapter_type = VENTURA_SERIES;
- else
- fusion->adapter_type = INVADER_SERIES;
- }
- break;
- default: /* For all other supported controllers */
-
- instance->producer =
- pci_alloc_consistent(pdev, sizeof(u32),
- &instance->producer_h);
- instance->consumer =
- pci_alloc_consistent(pdev, sizeof(u32),
- &instance->consumer_h);
-
- if (!instance->producer || !instance->consumer) {
- dev_printk(KERN_DEBUG, &pdev->dev, "Failed to allocate "
- "memory for producer, consumer\n");
- goto fail_alloc_dma_buf;
+ if (!instance->evt_detail) {
+ dev_err(&instance->pdev->dev,
+ "Failed to allocate event detail buffer\n");
+ return -ENOMEM;
+ }
+
+ if (fusion) {
+ fusion->ioc_init_request =
+ dma_alloc_coherent(&pdev->dev,
+ sizeof(struct MPI2_IOC_INIT_REQUEST),
+ &fusion->ioc_init_request_phys,
+ GFP_KERNEL);
+
+ if (!fusion->ioc_init_request) {
+ dev_err(&pdev->dev,
+ "Failed to allocate PD list buffer\n");
+ return -ENOMEM;
}
+ }
- *instance->producer = 0;
- *instance->consumer = 0;
- break;
+ instance->pd_list_buf =
+ pci_alloc_consistent(pdev,
+ MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST),
+ &instance->pd_list_buf_h);
+
+ if (!instance->pd_list_buf) {
+ dev_err(&pdev->dev, "Failed to allocate PD list buffer\n");
+ return -ENOMEM;
}
- /* Crash dump feature related initialisation*/
- instance->drv_buf_index = 0;
- instance->drv_buf_alloc = 0;
- instance->crash_dump_fw_support = 0;
- instance->crash_dump_app_support = 0;
- instance->fw_crash_state = UNAVAILABLE;
- spin_lock_init(&instance->crashdump_lock);
- instance->crash_dump_buf = NULL;
+ instance->ctrl_info_buf =
+ pci_alloc_consistent(pdev,
+ sizeof(struct megasas_ctrl_info),
+ &instance->ctrl_info_buf_h);
- megasas_poll_wait_aen = 0;
- instance->flag_ieee = 0;
- instance->ev = NULL;
- instance->issuepend_done = 1;
- atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
- instance->is_imr = 0;
+ if (!instance->ctrl_info_buf) {
+ dev_err(&pdev->dev,
+ "Failed to allocate controller info buffer\n");
+ return -ENOMEM;
+ }
- instance->evt_detail = pci_alloc_consistent(pdev,
- sizeof(struct
- megasas_evt_detail),
- &instance->evt_detail_h);
+ instance->ld_list_buf =
+ pci_alloc_consistent(pdev,
+ sizeof(struct MR_LD_LIST),
+ &instance->ld_list_buf_h);
- if (!instance->evt_detail) {
- dev_printk(KERN_DEBUG, &pdev->dev, "Failed to allocate memory for "
- "event detail structure\n");
- goto fail_alloc_dma_buf;
+ if (!instance->ld_list_buf) {
+ dev_err(&pdev->dev, "Failed to allocate LD list buffer\n");
+ return -ENOMEM;
+ }
+
+ instance->ld_targetid_list_buf =
+ pci_alloc_consistent(pdev,
+ sizeof(struct MR_LD_TARGETID_LIST),
+ &instance->ld_targetid_list_buf_h);
+
+ if (!instance->ld_targetid_list_buf) {
+ dev_err(&pdev->dev,
+ "Failed to allocate LD targetid list buffer\n");
+ return -ENOMEM;
}
if (!reset_devices) {
- instance->system_info_buf = pci_zalloc_consistent(pdev,
- sizeof(struct MR_DRV_SYSTEM_INFO),
- &instance->system_info_h);
- if (!instance->system_info_buf)
- dev_info(&instance->pdev->dev, "Can't allocate system info buffer\n");
+ instance->system_info_buf =
+ pci_alloc_consistent(pdev,
+ sizeof(struct MR_DRV_SYSTEM_INFO),
+ &instance->system_info_h);
+ instance->pd_info =
+ pci_alloc_consistent(pdev,
+ sizeof(struct MR_PD_INFO),
+ &instance->pd_info_h);
+ instance->tgt_prop =
+ pci_alloc_consistent(pdev,
+ sizeof(struct MR_TARGET_PROPERTIES),
+ &instance->tgt_prop_h);
+ instance->crash_dump_buf =
+ pci_alloc_consistent(pdev,
+ CRASH_DMA_BUF_SIZE,
+ &instance->crash_dump_h);
- instance->pd_info = pci_alloc_consistent(pdev,
- sizeof(struct MR_PD_INFO), &instance->pd_info_h);
+ if (!instance->system_info_buf)
+ dev_err(&instance->pdev->dev,
+ "Failed to allocate system info buffer\n");
if (!instance->pd_info)
- dev_err(&instance->pdev->dev, "Failed to alloc mem for pd_info\n");
-
- instance->tgt_prop = pci_alloc_consistent(pdev,
- sizeof(struct MR_TARGET_PROPERTIES), &instance->tgt_prop_h);
+ dev_err(&instance->pdev->dev,
+ "Failed to allocate pd_info buffer\n");
if (!instance->tgt_prop)
- dev_err(&instance->pdev->dev, "Failed to alloc mem for tgt_prop\n");
+ dev_err(&instance->pdev->dev,
+ "Failed to allocate tgt_prop buffer\n");
- instance->crash_dump_buf = pci_alloc_consistent(pdev,
- CRASH_DMA_BUF_SIZE,
- &instance->crash_dump_h);
if (!instance->crash_dump_buf)
- dev_err(&pdev->dev, "Can't allocate Firmware "
- "crash dump DMA buffer\n");
+ dev_err(&instance->pdev->dev,
+ "Failed to allocate crash dump buffer\n");
}
+ return 0;
+}
+
+/*
+ * megasas_free_ctrl_dma_buffers - Free consistent DMA buffers allocated
+ * during driver load time
+ *
+ * @instance- Adapter soft instance
+ *
+ */
+static inline
+void megasas_free_ctrl_dma_buffers(struct megasas_instance *instance)
+{
+ struct pci_dev *pdev = instance->pdev;
+ struct fusion_context *fusion = instance->ctrl_context;
+
+ if (instance->evt_detail)
+ pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
+ instance->evt_detail,
+ instance->evt_detail_h);
+
+ if (fusion && fusion->ioc_init_request)
+ dma_free_coherent(&pdev->dev,
+ sizeof(struct MPI2_IOC_INIT_REQUEST),
+ fusion->ioc_init_request,
+ fusion->ioc_init_request_phys);
+
+ if (instance->pd_list_buf)
+ pci_free_consistent(pdev,
+ MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST),
+ instance->pd_list_buf,
+ instance->pd_list_buf_h);
+
+ if (instance->ld_list_buf)
+ pci_free_consistent(pdev, sizeof(struct MR_LD_LIST),
+ instance->ld_list_buf,
+ instance->ld_list_buf_h);
+
+ if (instance->ld_targetid_list_buf)
+ pci_free_consistent(pdev, sizeof(struct MR_LD_TARGETID_LIST),
+ instance->ld_targetid_list_buf,
+ instance->ld_targetid_list_buf_h);
+
+ if (instance->ctrl_info_buf)
+ pci_free_consistent(pdev, sizeof(struct megasas_ctrl_info),
+ instance->ctrl_info_buf,
+ instance->ctrl_info_buf_h);
+
+ if (instance->system_info_buf)
+ pci_free_consistent(pdev, sizeof(struct MR_DRV_SYSTEM_INFO),
+ instance->system_info_buf,
+ instance->system_info_h);
+
+ if (instance->pd_info)
+ pci_free_consistent(pdev, sizeof(struct MR_PD_INFO),
+ instance->pd_info, instance->pd_info_h);
+
+ if (instance->tgt_prop)
+ pci_free_consistent(pdev, sizeof(struct MR_TARGET_PROPERTIES),
+ instance->tgt_prop, instance->tgt_prop_h);
+
+ if (instance->crash_dump_buf)
+ pci_free_consistent(pdev, CRASH_DMA_BUF_SIZE,
+ instance->crash_dump_buf,
+ instance->crash_dump_h);
+}
+
+/*
+ * megasas_init_ctrl_params - Initialize controller's instance
+ * parameters before FW init
+ * @instance - Adapter soft instance
+ * @return - void
+ */
+static inline void megasas_init_ctrl_params(struct megasas_instance *instance)
+{
+ instance->fw_crash_state = UNAVAILABLE;
+
+ megasas_poll_wait_aen = 0;
+ instance->issuepend_done = 1;
+ atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
+
/*
* Initialize locks and queues
*/
INIT_LIST_HEAD(&instance->cmd_pool);
INIT_LIST_HEAD(&instance->internal_reset_pending_q);
- atomic_set(&instance->fw_outstanding,0);
+ atomic_set(&instance->fw_outstanding, 0);
init_waitqueue_head(&instance->int_cmd_wait_q);
init_waitqueue_head(&instance->abort_cmd_wait_q);
+ spin_lock_init(&instance->crashdump_lock);
spin_lock_init(&instance->mfi_pool_lock);
spin_lock_init(&instance->hba_lock);
spin_lock_init(&instance->stream_lock);
spin_lock_init(&instance->completion_lock);
- mutex_init(&instance->reset_mutex);
mutex_init(&instance->hba_mutex);
-
- /*
- * Initialize PCI related and misc parameters
- */
- instance->host = host;
- instance->unique_id = pdev->bus->number << 8 | pdev->devfn;
- instance->init_id = MEGASAS_DEFAULT_INIT_ID;
- instance->ctrl_info = NULL;
-
+ mutex_init(&instance->reset_mutex);
if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
- (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY))
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY))
instance->flag_ieee = 1;
megasas_dbg_lvl = 0;
@@ -6160,11 +6360,75 @@ static int megasas_probe_one(struct pci_dev *pdev,
instance->disableOnlineCtrlReset = 1;
instance->UnevenSpanSupport = 0;
- if (instance->ctrl_context) {
+ if (instance->adapter_type != MFI_SERIES) {
INIT_WORK(&instance->work_init, megasas_fusion_ocr_wq);
INIT_WORK(&instance->crash_init, megasas_fusion_crash_dump_wq);
- } else
+ } else {
INIT_WORK(&instance->work_init, process_fw_state_change_wq);
+ }
+}
+
+/**
+ * megasas_probe_one - PCI hotplug entry point
+ * @pdev: PCI device structure
+ * @id: PCI ids of supported hotplugged adapter
+ */
+static int megasas_probe_one(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ int rval, pos;
+ struct Scsi_Host *host;
+ struct megasas_instance *instance;
+ u16 control = 0;
+
+ /* Reset MSI-X in the kdump kernel */
+ if (reset_devices) {
+ pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
+ if (pos) {
+ pci_read_config_word(pdev, pos + PCI_MSIX_FLAGS,
+ &control);
+ if (control & PCI_MSIX_FLAGS_ENABLE) {
+ dev_info(&pdev->dev, "resetting MSI-X\n");
+ pci_write_config_word(pdev,
+ pos + PCI_MSIX_FLAGS,
+ control &
+ ~PCI_MSIX_FLAGS_ENABLE);
+ }
+ }
+ }
+
+ /*
+ * PCI prepping: enable device set bus mastering and dma mask
+ */
+ rval = pci_enable_device_mem(pdev);
+
+ if (rval) {
+ return rval;
+ }
+
+ pci_set_master(pdev);
+
+ host = scsi_host_alloc(&megasas_template,
+ sizeof(struct megasas_instance));
+
+ if (!host) {
+ dev_printk(KERN_DEBUG, &pdev->dev, "scsi_host_alloc failed\n");
+ goto fail_alloc_instance;
+ }
+
+ instance = (struct megasas_instance *)host->hostdata;
+ memset(instance, 0, sizeof(*instance));
+ atomic_set(&instance->fw_reset_no_pci_access, 0);
+
+ /*
+ * Initialize PCI related and misc parameters
+ */
+ instance->pdev = pdev;
+ instance->host = host;
+ instance->unique_id = pdev->bus->number << 8 | pdev->devfn;
+ instance->init_id = MEGASAS_DEFAULT_INIT_ID;
+
+ megasas_set_adapter_type(instance);
/*
* Initialize MFI Firmware
@@ -6240,37 +6504,16 @@ fail_io_attach:
instance->instancet->disable_intr(instance);
megasas_destroy_irqs(instance);
- if (instance->ctrl_context)
+ if (instance->adapter_type != MFI_SERIES)
megasas_release_fusion(instance);
else
megasas_release_mfi(instance);
if (instance->msix_vectors)
pci_free_irq_vectors(instance->pdev);
fail_init_mfi:
-fail_alloc_dma_buf:
- if (instance->evt_detail)
- pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
- instance->evt_detail,
- instance->evt_detail_h);
-
- if (instance->pd_info)
- pci_free_consistent(pdev, sizeof(struct MR_PD_INFO),
- instance->pd_info,
- instance->pd_info_h);
- if (instance->tgt_prop)
- pci_free_consistent(pdev, sizeof(struct MR_TARGET_PROPERTIES),
- instance->tgt_prop,
- instance->tgt_prop_h);
- if (instance->producer)
- pci_free_consistent(pdev, sizeof(u32), instance->producer,
- instance->producer_h);
- if (instance->consumer)
- pci_free_consistent(pdev, sizeof(u32), instance->consumer,
- instance->consumer_h);
scsi_host_put(host);
fail_alloc_instance:
-fail_set_dma_mask:
pci_disable_device(pdev);
return -ENODEV;
@@ -6447,7 +6690,13 @@ megasas_resume(struct pci_dev *pdev)
pci_set_master(pdev);
- if (megasas_set_dma_mask(pdev))
+ /*
+ * We expect the FW state to be READY
+ */
+ if (megasas_transition_to_ready(instance, 0))
+ goto fail_ready_state;
+
+ if (megasas_set_dma_mask(instance))
goto fail_set_dma_mask;
/*
@@ -6456,12 +6705,6 @@ megasas_resume(struct pci_dev *pdev)
atomic_set(&instance->fw_outstanding, 0);
- /*
- * We expect the FW state to be READY
- */
- if (megasas_transition_to_ready(instance, 0))
- goto fail_ready_state;
-
/* Now re-enable MSI-X */
if (instance->msix_vectors) {
irq_flags = PCI_IRQ_MSIX;
@@ -6474,7 +6717,7 @@ megasas_resume(struct pci_dev *pdev)
if (rval < 0)
goto fail_reenable_msix;
- if (instance->ctrl_context) {
+ if (instance->adapter_type != MFI_SERIES) {
megasas_reset_reply_desc(instance);
if (megasas_ioc_init_fusion(instance)) {
megasas_free_cmds(instance);
@@ -6521,30 +6764,13 @@ megasas_resume(struct pci_dev *pdev)
return 0;
fail_init_mfi:
- if (instance->evt_detail)
- pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
- instance->evt_detail,
- instance->evt_detail_h);
-
- if (instance->pd_info)
- pci_free_consistent(pdev, sizeof(struct MR_PD_INFO),
- instance->pd_info,
- instance->pd_info_h);
- if (instance->tgt_prop)
- pci_free_consistent(pdev, sizeof(struct MR_TARGET_PROPERTIES),
- instance->tgt_prop,
- instance->tgt_prop_h);
- if (instance->producer)
- pci_free_consistent(pdev, sizeof(u32), instance->producer,
- instance->producer_h);
- if (instance->consumer)
- pci_free_consistent(pdev, sizeof(u32), instance->consumer,
- instance->consumer_h);
+ megasas_free_ctrl_dma_buffers(instance);
+ megasas_free_ctrl_mem(instance);
scsi_host_put(host);
+fail_reenable_msix:
fail_set_dma_mask:
fail_ready_state:
-fail_reenable_msix:
pci_disable_device(pdev);
@@ -6647,7 +6873,7 @@ skip_firing_dcmds:
if (instance->msix_vectors)
pci_free_irq_vectors(instance->pdev);
- if (instance->is_ventura) {
+ if (instance->adapter_type == VENTURA_SERIES) {
for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i)
kfree(fusion->stream_detect_by_ld[i]);
kfree(fusion->stream_detect_by_ld);
@@ -6655,7 +6881,7 @@ skip_firing_dcmds:
}
- if (instance->ctrl_context) {
+ if (instance->adapter_type != MFI_SERIES) {
megasas_release_fusion(instance);
pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
(sizeof(struct MR_PD_CFG_SEQ) *
@@ -6680,30 +6906,10 @@ skip_firing_dcmds:
fusion->pd_seq_sync[i],
fusion->pd_seq_phys[i]);
}
- megasas_free_fusion_context(instance);
} else {
megasas_release_mfi(instance);
- pci_free_consistent(pdev, sizeof(u32),
- instance->producer,
- instance->producer_h);
- pci_free_consistent(pdev, sizeof(u32),
- instance->consumer,
- instance->consumer_h);
}
- kfree(instance->ctrl_info);
-
- if (instance->evt_detail)
- pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
- instance->evt_detail, instance->evt_detail_h);
- if (instance->pd_info)
- pci_free_consistent(pdev, sizeof(struct MR_PD_INFO),
- instance->pd_info,
- instance->pd_info_h);
- if (instance->tgt_prop)
- pci_free_consistent(pdev, sizeof(struct MR_TARGET_PROPERTIES),
- instance->tgt_prop,
- instance->tgt_prop_h);
if (instance->vf_affiliation)
pci_free_consistent(pdev, (MAX_LOGICAL_DRIVES + 1) *
sizeof(struct MR_LD_VF_AFFILIATION),
@@ -6721,13 +6927,9 @@ skip_firing_dcmds:
instance->hb_host_mem,
instance->hb_host_mem_h);
- if (instance->crash_dump_buf)
- pci_free_consistent(pdev, CRASH_DMA_BUF_SIZE,
- instance->crash_dump_buf, instance->crash_dump_h);
+ megasas_free_ctrl_dma_buffers(instance);
- if (instance->system_info_buf)
- pci_free_consistent(pdev, sizeof(struct MR_DRV_SYSTEM_INFO),
- instance->system_info_buf, instance->system_info_h);
+ megasas_free_ctrl_mem(instance);
scsi_host_put(host);
@@ -6866,7 +7068,8 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
struct megasas_iocpacket __user * user_ioc,
struct megasas_iocpacket *ioc)
{
- struct megasas_sge32 *kern_sge32;
+ struct megasas_sge64 *kern_sge64 = NULL;
+ struct megasas_sge32 *kern_sge32 = NULL;
struct megasas_cmd *cmd;
void *kbuff_arr[MAX_IOCTL_SGE];
dma_addr_t buf_handle = 0;
@@ -6874,7 +7077,7 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
void *sense = NULL;
dma_addr_t sense_handle;
unsigned long *sense_ptr;
- u32 opcode;
+ u32 opcode = 0;
memset(kbuff_arr, 0, sizeof(kbuff_arr));
@@ -6884,6 +7087,13 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
return -EINVAL;
}
+ if (ioc->frame.hdr.cmd >= MFI_CMD_OP_COUNT) {
+ dev_err(&instance->pdev->dev,
+ "Received invalid ioctl command 0x%x\n",
+ ioc->frame.hdr.cmd);
+ return -ENOTSUPP;
+ }
+
cmd = megasas_get_cmd(instance);
if (!cmd) {
dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a cmd packet\n");
@@ -6899,10 +7109,18 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE);
cmd->frame->hdr.context = cpu_to_le32(cmd->index);
cmd->frame->hdr.pad_0 = 0;
- cmd->frame->hdr.flags &= cpu_to_le16(~(MFI_FRAME_IEEE |
- MFI_FRAME_SGL64 |
+
+ cmd->frame->hdr.flags &= (~MFI_FRAME_IEEE);
+
+ if (instance->consistent_mask_64bit)
+ cmd->frame->hdr.flags |= cpu_to_le16((MFI_FRAME_SGL64 |
+ MFI_FRAME_SENSE64));
+ else
+ cmd->frame->hdr.flags &= cpu_to_le16(~(MFI_FRAME_SGL64 |
MFI_FRAME_SENSE64));
- opcode = le32_to_cpu(cmd->frame->dcmd.opcode);
+
+ if (cmd->frame->hdr.cmd == MFI_CMD_DCMD)
+ opcode = le32_to_cpu(cmd->frame->dcmd.opcode);
if (opcode == MR_DCMD_CTRL_SHUTDOWN) {
if (megasas_get_ctrl_info(instance) != DCMD_SUCCESS) {
@@ -6925,8 +7143,12 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
* kernel buffers in SGLs. The location of SGL is embedded in the
* struct iocpacket itself.
*/
- kern_sge32 = (struct megasas_sge32 *)
- ((unsigned long)cmd->frame + ioc->sgl_off);
+ if (instance->consistent_mask_64bit)
+ kern_sge64 = (struct megasas_sge64 *)
+ ((unsigned long)cmd->frame + ioc->sgl_off);
+ else
+ kern_sge32 = (struct megasas_sge32 *)
+ ((unsigned long)cmd->frame + ioc->sgl_off);
/*
* For each user buffer, create a mirror buffer and copy in
@@ -6949,8 +7171,13 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
* We don't change the dma_coherent_mask, so
* pci_alloc_consistent only returns 32bit addresses
*/
- kern_sge32[i].phys_addr = cpu_to_le32(buf_handle);
- kern_sge32[i].length = cpu_to_le32(ioc->sgl[i].iov_len);
+ if (instance->consistent_mask_64bit) {
+ kern_sge64[i].phys_addr = cpu_to_le64(buf_handle);
+ kern_sge64[i].length = cpu_to_le32(ioc->sgl[i].iov_len);
+ } else {
+ kern_sge32[i].phys_addr = cpu_to_le32(buf_handle);
+ kern_sge32[i].length = cpu_to_le32(ioc->sgl[i].iov_len);
+ }
/*
* We created a kernel buffer corresponding to the
@@ -6973,7 +7200,10 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
sense_ptr =
(unsigned long *) ((unsigned long)cmd->frame + ioc->sense_off);
- *sense_ptr = cpu_to_le32(sense_handle);
+ if (instance->consistent_mask_64bit)
+ *sense_ptr = cpu_to_le64(sense_handle);
+ else
+ *sense_ptr = cpu_to_le32(sense_handle);
}
/*
@@ -6984,8 +7214,9 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
if (megasas_issue_blocked_cmd(instance, cmd, 0) == DCMD_NOT_FIRED) {
cmd->sync_cmd = 0;
dev_err(&instance->pdev->dev,
- "return -EBUSY from %s %d opcode 0x%x cmd->cmd_status_drv 0x%x\n",
- __func__, __LINE__, opcode, cmd->cmd_status_drv);
+ "return -EBUSY from %s %d cmd 0x%x opcode 0x%x cmd->cmd_status_drv 0x%x\n",
+ __func__, __LINE__, cmd->frame->hdr.cmd, opcode,
+ cmd->cmd_status_drv);
return -EBUSY;
}
@@ -7045,10 +7276,16 @@ out:
for (i = 0; i < ioc->sge_count; i++) {
if (kbuff_arr[i]) {
- dma_free_coherent(&instance->pdev->dev,
- le32_to_cpu(kern_sge32[i].length),
- kbuff_arr[i],
- le32_to_cpu(kern_sge32[i].phys_addr));
+ if (instance->consistent_mask_64bit)
+ dma_free_coherent(&instance->pdev->dev,
+ le32_to_cpu(kern_sge64[i].length),
+ kbuff_arr[i],
+ le64_to_cpu(kern_sge64[i].phys_addr));
+ else
+ dma_free_coherent(&instance->pdev->dev,
+ le32_to_cpu(kern_sge32[i].length),
+ kbuff_arr[i],
+ le32_to_cpu(kern_sge32[i].phys_addr));
kbuff_arr[i] = NULL;
}
}
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
index ecc699a65bac..bfad9bfc313f 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
@@ -737,7 +737,7 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
*pDevHandle = MR_PdDevHandleGet(pd, map);
*pPdInterface = MR_PdInterfaceTypeGet(pd, map);
/* get second pd also for raid 1/10 fast path writes*/
- if (instance->is_ventura &&
+ if ((instance->adapter_type == VENTURA_SERIES) &&
(raid->level == 1) &&
!io_info->isRead) {
r1_alt_pd = MR_ArPdGet(arRef, physArm + 1, map);
@@ -747,8 +747,8 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
}
} else {
if ((raid->level >= 5) &&
- ((fusion->adapter_type == THUNDERBOLT_SERIES) ||
- ((fusion->adapter_type == INVADER_SERIES) &&
+ ((instance->adapter_type == THUNDERBOLT_SERIES) ||
+ ((instance->adapter_type == INVADER_SERIES) &&
(raid->regTypeReqOnRead != REGION_TYPE_UNUSED))))
pRAID_Context->reg_lock_flags = REGION_TYPE_EXCLUSIVE;
else if (raid->level == 1) {
@@ -762,7 +762,7 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
}
*pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk);
- if (instance->is_ventura) {
+ if (instance->adapter_type == VENTURA_SERIES) {
((struct RAID_CONTEXT_G35 *)pRAID_Context)->span_arm =
(span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
io_info->span_arm =
@@ -853,7 +853,7 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
*pDevHandle = MR_PdDevHandleGet(pd, map);
*pPdInterface = MR_PdInterfaceTypeGet(pd, map);
/* get second pd also for raid 1/10 fast path writes*/
- if (instance->is_ventura &&
+ if ((instance->adapter_type == VENTURA_SERIES) &&
(raid->level == 1) &&
!io_info->isRead) {
r1_alt_pd = MR_ArPdGet(arRef, physArm + 1, map);
@@ -863,8 +863,8 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
}
} else {
if ((raid->level >= 5) &&
- ((fusion->adapter_type == THUNDERBOLT_SERIES) ||
- ((fusion->adapter_type == INVADER_SERIES) &&
+ ((instance->adapter_type == THUNDERBOLT_SERIES) ||
+ ((instance->adapter_type == INVADER_SERIES) &&
(raid->regTypeReqOnRead != REGION_TYPE_UNUSED))))
pRAID_Context->reg_lock_flags = REGION_TYPE_EXCLUSIVE;
else if (raid->level == 1) {
@@ -880,7 +880,7 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
}
*pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk);
- if (instance->is_ventura) {
+ if (instance->adapter_type == VENTURA_SERIES) {
((struct RAID_CONTEXT_G35 *)pRAID_Context)->span_arm =
(span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
io_info->span_arm =
@@ -1088,10 +1088,10 @@ MR_BuildRaidContext(struct megasas_instance *instance,
cpu_to_le16(raid->fpIoTimeoutForLd ?
raid->fpIoTimeoutForLd :
map->raidMap.fpPdIoTimeoutSec);
- if (fusion->adapter_type == INVADER_SERIES)
+ if (instance->adapter_type == INVADER_SERIES)
pRAID_Context->reg_lock_flags = (isRead) ?
raid->regTypeReqOnRead : raid->regTypeReqOnWrite;
- else if (!instance->is_ventura)
+ else if (instance->adapter_type == THUNDERBOLT_SERIES)
pRAID_Context->reg_lock_flags = (isRead) ?
REGION_TYPE_SHARED_READ : raid->regTypeReqOnWrite;
pRAID_Context->virtual_disk_tgt_id = raid->targetId;
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 3c399e7b3fe1..65dc4fea6352 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -93,8 +93,37 @@ extern unsigned int resetwaittime;
extern unsigned int dual_qdepth_disable;
static void megasas_free_rdpq_fusion(struct megasas_instance *instance);
static void megasas_free_reply_fusion(struct megasas_instance *instance);
+static inline
+void megasas_configure_queue_sizes(struct megasas_instance *instance);
+/**
+ * megasas_check_same_4gb_region - check if allocation
+ * crosses same 4GB boundary or not
+ * @instance - adapter's soft instance
+ * start_addr - start address of DMA allocation
+ * size - size of allocation in bytes
+ * return - true : allocation does not cross same
+ * 4GB boundary
+ * false: allocation crosses same
+ * 4GB boundary
+ */
+static inline bool megasas_check_same_4gb_region
+ (struct megasas_instance *instance, dma_addr_t start_addr, size_t size)
+{
+ dma_addr_t end_addr;
+
+ end_addr = start_addr + size;
+ if (upper_32_bits(start_addr) != upper_32_bits(end_addr)) {
+ dev_err(&instance->pdev->dev,
+ "Failed to get same 4GB boundary: start_addr: 0x%llx end_addr: 0x%llx\n",
+ (unsigned long long)start_addr,
+ (unsigned long long)end_addr);
+ return false;
+ }
+
+ return true;
+}
/**
* megasas_enable_intr_fusion - Enables interrupts
@@ -197,7 +226,7 @@ static void
megasas_fire_cmd_fusion(struct megasas_instance *instance,
union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc)
{
- if (instance->is_ventura)
+ if (instance->adapter_type == VENTURA_SERIES)
writel(le32_to_cpu(req_desc->u.low),
&instance->reg_set->inbound_single_queue_port);
else {
@@ -240,7 +269,7 @@ megasas_fusion_update_can_queue(struct megasas_instance *instance, int fw_boot_c
reg_set = instance->reg_set;
/* ventura FW does not fill outbound_scratch_pad_3 with queue depth */
- if (!instance->is_ventura)
+ if (instance->adapter_type < VENTURA_SERIES)
cur_max_fw_cmds =
readl(&instance->reg_set->outbound_scratch_pad_3) & 0x00FFFF;
@@ -251,8 +280,8 @@ megasas_fusion_update_can_queue(struct megasas_instance *instance, int fw_boot_c
(instance->instancet->read_fw_status_reg(reg_set) & 0x00FFFF) - MEGASAS_FUSION_IOCTL_CMDS;
dev_info(&instance->pdev->dev,
- "Current firmware maximum commands: %d\t LDIO threshold: %d\n",
- cur_max_fw_cmds, ldio_threshold);
+ "Current firmware supports maximum commands: %d\t LDIO threshold: %d\n",
+ cur_max_fw_cmds, ldio_threshold);
if (fw_boot_context == OCR_CONTEXT) {
cur_max_fw_cmds = cur_max_fw_cmds - 1;
@@ -267,10 +296,6 @@ megasas_fusion_update_can_queue(struct megasas_instance *instance, int fw_boot_c
instance->max_fw_cmds = cur_max_fw_cmds;
instance->ldio_threshold = ldio_threshold;
- if (!instance->is_rdpq)
- instance->max_fw_cmds =
- min_t(u16, instance->max_fw_cmds, 1024);
-
if (reset_devices)
instance->max_fw_cmds = min(instance->max_fw_cmds,
(u16)MEGASAS_KDUMP_QUEUE_DEPTH);
@@ -280,19 +305,7 @@ megasas_fusion_update_can_queue(struct megasas_instance *instance, int fw_boot_c
* does not exceed max cmds that the FW can support
*/
instance->max_fw_cmds = instance->max_fw_cmds-1;
-
- instance->max_scsi_cmds = instance->max_fw_cmds -
- (MEGASAS_FUSION_INTERNAL_CMDS +
- MEGASAS_FUSION_IOCTL_CMDS);
- instance->cur_can_queue = instance->max_scsi_cmds;
- instance->host->can_queue = instance->cur_can_queue;
}
-
- if (instance->is_ventura)
- instance->max_mpt_cmds =
- instance->max_fw_cmds * RAID_1_PEER_CMDS;
- else
- instance->max_mpt_cmds = instance->max_fw_cmds;
}
/**
* megasas_free_cmds_fusion - Free all the cmds in the free cmd pool
@@ -305,17 +318,23 @@ megasas_free_cmds_fusion(struct megasas_instance *instance)
struct fusion_context *fusion = instance->ctrl_context;
struct megasas_cmd_fusion *cmd;
- /* SG, Sense */
- for (i = 0; i < instance->max_mpt_cmds; i++) {
- cmd = fusion->cmd_list[i];
- if (cmd) {
- if (cmd->sg_frame)
- dma_pool_free(fusion->sg_dma_pool, cmd->sg_frame,
- cmd->sg_frame_phys_addr);
- if (cmd->sense)
- dma_pool_free(fusion->sense_dma_pool, cmd->sense,
- cmd->sense_phys_addr);
+ if (fusion->sense)
+ dma_pool_free(fusion->sense_dma_pool, fusion->sense,
+ fusion->sense_phys_addr);
+
+ /* SG */
+ if (fusion->cmd_list) {
+ for (i = 0; i < instance->max_mpt_cmds; i++) {
+ cmd = fusion->cmd_list[i];
+ if (cmd) {
+ if (cmd->sg_frame)
+ dma_pool_free(fusion->sg_dma_pool,
+ cmd->sg_frame,
+ cmd->sg_frame_phys_addr);
+ }
+ kfree(cmd);
}
+ kfree(fusion->cmd_list);
}
if (fusion->sg_dma_pool) {
@@ -347,13 +366,6 @@ megasas_free_cmds_fusion(struct megasas_instance *instance)
dma_pool_destroy(fusion->io_request_frames_pool);
fusion->io_request_frames_pool = NULL;
}
-
-
- /* cmd_list */
- for (i = 0; i < instance->max_mpt_cmds; i++)
- kfree(fusion->cmd_list[i]);
-
- kfree(fusion->cmd_list);
}
/**
@@ -367,10 +379,12 @@ static int megasas_create_sg_sense_fusion(struct megasas_instance *instance)
u16 max_cmd;
struct fusion_context *fusion;
struct megasas_cmd_fusion *cmd;
+ int sense_sz;
+ u32 offset;
fusion = instance->ctrl_context;
max_cmd = instance->max_fw_cmds;
-
+ sense_sz = instance->max_mpt_cmds * SCSI_SENSE_BUFFERSIZE;
fusion->sg_dma_pool =
dma_pool_create("mr_sg", &instance->pdev->dev,
@@ -379,7 +393,7 @@ static int megasas_create_sg_sense_fusion(struct megasas_instance *instance)
/* SCSI_SENSE_BUFFERSIZE = 96 bytes */
fusion->sense_dma_pool =
dma_pool_create("mr_sense", &instance->pdev->dev,
- SCSI_SENSE_BUFFERSIZE, 64, 0);
+ sense_sz, 64, 0);
if (!fusion->sense_dma_pool || !fusion->sg_dma_pool) {
dev_err(&instance->pdev->dev,
@@ -387,6 +401,51 @@ static int megasas_create_sg_sense_fusion(struct megasas_instance *instance)
return -ENOMEM;
}
+ fusion->sense = dma_pool_alloc(fusion->sense_dma_pool,
+ GFP_KERNEL, &fusion->sense_phys_addr);
+ if (!fusion->sense) {
+ dev_err(&instance->pdev->dev,
+ "failed from %s %d\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+
+ /* sense buffer, request frame and reply desc pool requires to be in
+ * same 4 gb region. Below function will check this.
+ * In case of failure, new pci pool will be created with updated
+ * alignment.
+ * Older allocation and pool will be destroyed.
+ * Alignment will be used such a way that next allocation if success,
+ * will always meet same 4gb region requirement.
+ * Actual requirement is not alignment, but we need start and end of
+ * DMA address must have same upper 32 bit address.
+ */
+
+ if (!megasas_check_same_4gb_region(instance, fusion->sense_phys_addr,
+ sense_sz)) {
+ dma_pool_free(fusion->sense_dma_pool, fusion->sense,
+ fusion->sense_phys_addr);
+ fusion->sense = NULL;
+ dma_pool_destroy(fusion->sense_dma_pool);
+
+ fusion->sense_dma_pool =
+ dma_pool_create("mr_sense_align", &instance->pdev->dev,
+ sense_sz, roundup_pow_of_two(sense_sz),
+ 0);
+ if (!fusion->sense_dma_pool) {
+ dev_err(&instance->pdev->dev,
+ "Failed from %s %d\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+ fusion->sense = dma_pool_alloc(fusion->sense_dma_pool,
+ GFP_KERNEL,
+ &fusion->sense_phys_addr);
+ if (!fusion->sense) {
+ dev_err(&instance->pdev->dev,
+ "failed from %s %d\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+ }
+
/*
* Allocate and attach a frame to each of the commands in cmd_list
*/
@@ -395,9 +454,11 @@ static int megasas_create_sg_sense_fusion(struct megasas_instance *instance)
cmd->sg_frame = dma_pool_alloc(fusion->sg_dma_pool,
GFP_KERNEL, &cmd->sg_frame_phys_addr);
- cmd->sense = dma_pool_alloc(fusion->sense_dma_pool,
- GFP_KERNEL, &cmd->sense_phys_addr);
- if (!cmd->sg_frame || !cmd->sense) {
+ offset = SCSI_SENSE_BUFFERSIZE * i;
+ cmd->sense = (u8 *)fusion->sense + offset;
+ cmd->sense_phys_addr = fusion->sense_phys_addr + offset;
+
+ if (!cmd->sg_frame) {
dev_err(&instance->pdev->dev,
"Failed from %s %d\n", __func__, __LINE__);
return -ENOMEM;
@@ -407,13 +468,10 @@ static int megasas_create_sg_sense_fusion(struct megasas_instance *instance)
/* create sense buffer for the raid 1/10 fp */
for (i = max_cmd; i < instance->max_mpt_cmds; i++) {
cmd = fusion->cmd_list[i];
- cmd->sense = dma_pool_alloc(fusion->sense_dma_pool,
- GFP_KERNEL, &cmd->sense_phys_addr);
- if (!cmd->sense) {
- dev_err(&instance->pdev->dev,
- "Failed from %s %d\n", __func__, __LINE__);
- return -ENOMEM;
- }
+ offset = SCSI_SENSE_BUFFERSIZE * i;
+ cmd->sense = (u8 *)fusion->sense + offset;
+ cmd->sense_phys_addr = fusion->sense_phys_addr + offset;
+
}
return 0;
@@ -465,16 +523,7 @@ megasas_alloc_request_fusion(struct megasas_instance *instance)
fusion = instance->ctrl_context;
- fusion->req_frames_desc =
- dma_alloc_coherent(&instance->pdev->dev,
- fusion->request_alloc_sz,
- &fusion->req_frames_desc_phys, GFP_KERNEL);
- if (!fusion->req_frames_desc) {
- dev_err(&instance->pdev->dev,
- "Failed from %s %d\n", __func__, __LINE__);
- return -ENOMEM;
- }
-
+retry_alloc:
fusion->io_request_frames_pool =
dma_pool_create("mr_ioreq", &instance->pdev->dev,
fusion->io_frames_alloc_sz, 16, 0);
@@ -489,10 +538,62 @@ megasas_alloc_request_fusion(struct megasas_instance *instance)
dma_pool_alloc(fusion->io_request_frames_pool,
GFP_KERNEL, &fusion->io_request_frames_phys);
if (!fusion->io_request_frames) {
+ if (instance->max_fw_cmds >= (MEGASAS_REDUCE_QD_COUNT * 2)) {
+ instance->max_fw_cmds -= MEGASAS_REDUCE_QD_COUNT;
+ dma_pool_destroy(fusion->io_request_frames_pool);
+ megasas_configure_queue_sizes(instance);
+ goto retry_alloc;
+ } else {
+ dev_err(&instance->pdev->dev,
+ "Failed from %s %d\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+ }
+
+ if (!megasas_check_same_4gb_region(instance,
+ fusion->io_request_frames_phys,
+ fusion->io_frames_alloc_sz)) {
+ dma_pool_free(fusion->io_request_frames_pool,
+ fusion->io_request_frames,
+ fusion->io_request_frames_phys);
+ fusion->io_request_frames = NULL;
+ dma_pool_destroy(fusion->io_request_frames_pool);
+
+ fusion->io_request_frames_pool =
+ dma_pool_create("mr_ioreq_align",
+ &instance->pdev->dev,
+ fusion->io_frames_alloc_sz,
+ roundup_pow_of_two(fusion->io_frames_alloc_sz),
+ 0);
+
+ if (!fusion->io_request_frames_pool) {
+ dev_err(&instance->pdev->dev,
+ "Failed from %s %d\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+
+ fusion->io_request_frames =
+ dma_pool_alloc(fusion->io_request_frames_pool,
+ GFP_KERNEL,
+ &fusion->io_request_frames_phys);
+
+ if (!fusion->io_request_frames) {
+ dev_err(&instance->pdev->dev,
+ "Failed from %s %d\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+ }
+
+ fusion->req_frames_desc =
+ dma_alloc_coherent(&instance->pdev->dev,
+ fusion->request_alloc_sz,
+ &fusion->req_frames_desc_phys, GFP_KERNEL);
+ if (!fusion->req_frames_desc) {
dev_err(&instance->pdev->dev,
"Failed from %s %d\n", __func__, __LINE__);
return -ENOMEM;
}
+
return 0;
}
@@ -523,6 +624,41 @@ megasas_alloc_reply_fusion(struct megasas_instance *instance)
"Failed from %s %d\n", __func__, __LINE__);
return -ENOMEM;
}
+
+ if (!megasas_check_same_4gb_region(instance,
+ fusion->reply_frames_desc_phys[0],
+ (fusion->reply_alloc_sz * count))) {
+ dma_pool_free(fusion->reply_frames_desc_pool,
+ fusion->reply_frames_desc[0],
+ fusion->reply_frames_desc_phys[0]);
+ fusion->reply_frames_desc[0] = NULL;
+ dma_pool_destroy(fusion->reply_frames_desc_pool);
+
+ fusion->reply_frames_desc_pool =
+ dma_pool_create("mr_reply_align",
+ &instance->pdev->dev,
+ fusion->reply_alloc_sz * count,
+ roundup_pow_of_two(fusion->reply_alloc_sz * count),
+ 0);
+
+ if (!fusion->reply_frames_desc_pool) {
+ dev_err(&instance->pdev->dev,
+ "Failed from %s %d\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+
+ fusion->reply_frames_desc[0] =
+ dma_pool_alloc(fusion->reply_frames_desc_pool,
+ GFP_KERNEL,
+ &fusion->reply_frames_desc_phys[0]);
+
+ if (!fusion->reply_frames_desc[0]) {
+ dev_err(&instance->pdev->dev,
+ "Failed from %s %d\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+ }
+
reply_desc = fusion->reply_frames_desc[0];
for (i = 0; i < fusion->reply_q_depth * count; i++, reply_desc++)
reply_desc->Words = cpu_to_le64(ULLONG_MAX);
@@ -541,52 +677,124 @@ megasas_alloc_reply_fusion(struct megasas_instance *instance)
int
megasas_alloc_rdpq_fusion(struct megasas_instance *instance)
{
- int i, j, count;
+ int i, j, k, msix_count;
struct fusion_context *fusion;
union MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
+ union MPI2_REPLY_DESCRIPTORS_UNION *rdpq_chunk_virt[RDPQ_MAX_CHUNK_COUNT];
+ dma_addr_t rdpq_chunk_phys[RDPQ_MAX_CHUNK_COUNT];
+ u8 dma_alloc_count, abs_index;
+ u32 chunk_size, array_size, offset;
fusion = instance->ctrl_context;
+ chunk_size = fusion->reply_alloc_sz * RDPQ_MAX_INDEX_IN_ONE_CHUNK;
+ array_size = sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) *
+ MAX_MSIX_QUEUES_FUSION;
- fusion->rdpq_virt = pci_alloc_consistent(instance->pdev,
- sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) * MAX_MSIX_QUEUES_FUSION,
- &fusion->rdpq_phys);
+ fusion->rdpq_virt = pci_alloc_consistent(instance->pdev, array_size,
+ &fusion->rdpq_phys);
if (!fusion->rdpq_virt) {
dev_err(&instance->pdev->dev,
"Failed from %s %d\n", __func__, __LINE__);
return -ENOMEM;
}
- memset(fusion->rdpq_virt, 0,
- sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) * MAX_MSIX_QUEUES_FUSION);
- count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
+ memset(fusion->rdpq_virt, 0, array_size);
+ msix_count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
+
fusion->reply_frames_desc_pool = dma_pool_create("mr_rdpq",
&instance->pdev->dev,
- fusion->reply_alloc_sz,
- 16, 0);
-
- if (!fusion->reply_frames_desc_pool) {
+ chunk_size, 16, 0);
+ fusion->reply_frames_desc_pool_align =
+ dma_pool_create("mr_rdpq_align",
+ &instance->pdev->dev,
+ chunk_size,
+ roundup_pow_of_two(chunk_size),
+ 0);
+
+ if (!fusion->reply_frames_desc_pool ||
+ !fusion->reply_frames_desc_pool_align) {
dev_err(&instance->pdev->dev,
"Failed from %s %d\n", __func__, __LINE__);
return -ENOMEM;
}
- for (i = 0; i < count; i++) {
- fusion->reply_frames_desc[i] =
- dma_pool_alloc(fusion->reply_frames_desc_pool,
- GFP_KERNEL, &fusion->reply_frames_desc_phys[i]);
- if (!fusion->reply_frames_desc[i]) {
+/*
+ * For INVADER_SERIES each set of 8 reply queues(0-7, 8-15, ..) and
+ * VENTURA_SERIES each set of 16 reply queues(0-15, 16-31, ..) should be
+ * within 4GB boundary and also reply queues in a set must have same
+ * upper 32-bits in their memory address. so here driver is allocating the
+ * DMA'able memory for reply queues according. Driver uses limitation of
+ * VENTURA_SERIES to manage INVADER_SERIES as well.
+ */
+ dma_alloc_count = DIV_ROUND_UP(msix_count, RDPQ_MAX_INDEX_IN_ONE_CHUNK);
+
+ for (i = 0; i < dma_alloc_count; i++) {
+ rdpq_chunk_virt[i] =
+ dma_pool_alloc(fusion->reply_frames_desc_pool,
+ GFP_KERNEL, &rdpq_chunk_phys[i]);
+ if (!rdpq_chunk_virt[i]) {
dev_err(&instance->pdev->dev,
"Failed from %s %d\n", __func__, __LINE__);
return -ENOMEM;
}
+ /* reply desc pool requires to be in same 4 gb region.
+ * Below function will check this.
+ * In case of failure, new pci pool will be created with updated
+ * alignment.
+ * For RDPQ buffers, driver always allocate two separate pci pool.
+ * Alignment will be used such a way that next allocation if
+ * success, will always meet same 4gb region requirement.
+ * rdpq_tracker keep track of each buffer's physical,
+ * virtual address and pci pool descriptor. It will help driver
+ * while freeing the resources.
+ *
+ */
+ if (!megasas_check_same_4gb_region(instance, rdpq_chunk_phys[i],
+ chunk_size)) {
+ dma_pool_free(fusion->reply_frames_desc_pool,
+ rdpq_chunk_virt[i],
+ rdpq_chunk_phys[i]);
- fusion->rdpq_virt[i].RDPQBaseAddress =
- cpu_to_le64(fusion->reply_frames_desc_phys[i]);
+ rdpq_chunk_virt[i] =
+ dma_pool_alloc(fusion->reply_frames_desc_pool_align,
+ GFP_KERNEL, &rdpq_chunk_phys[i]);
+ if (!rdpq_chunk_virt[i]) {
+ dev_err(&instance->pdev->dev,
+ "Failed from %s %d\n",
+ __func__, __LINE__);
+ return -ENOMEM;
+ }
+ fusion->rdpq_tracker[i].dma_pool_ptr =
+ fusion->reply_frames_desc_pool_align;
+ } else {
+ fusion->rdpq_tracker[i].dma_pool_ptr =
+ fusion->reply_frames_desc_pool;
+ }
- reply_desc = fusion->reply_frames_desc[i];
- for (j = 0; j < fusion->reply_q_depth; j++, reply_desc++)
- reply_desc->Words = cpu_to_le64(ULLONG_MAX);
+ fusion->rdpq_tracker[i].pool_entry_phys = rdpq_chunk_phys[i];
+ fusion->rdpq_tracker[i].pool_entry_virt = rdpq_chunk_virt[i];
}
+
+ for (k = 0; k < dma_alloc_count; k++) {
+ for (i = 0; i < RDPQ_MAX_INDEX_IN_ONE_CHUNK; i++) {
+ abs_index = (k * RDPQ_MAX_INDEX_IN_ONE_CHUNK) + i;
+
+ if (abs_index == msix_count)
+ break;
+ offset = fusion->reply_alloc_sz * i;
+ fusion->rdpq_virt[abs_index].RDPQBaseAddress =
+ cpu_to_le64(rdpq_chunk_phys[k] + offset);
+ fusion->reply_frames_desc_phys[abs_index] =
+ rdpq_chunk_phys[k] + offset;
+ fusion->reply_frames_desc[abs_index] =
+ (union MPI2_REPLY_DESCRIPTORS_UNION *)((u8 *)rdpq_chunk_virt[k] + offset);
+
+ reply_desc = fusion->reply_frames_desc[abs_index];
+ for (j = 0; j < fusion->reply_q_depth; j++, reply_desc++)
+ reply_desc->Words = ULLONG_MAX;
+ }
+ }
+
return 0;
}
@@ -598,15 +806,18 @@ megasas_free_rdpq_fusion(struct megasas_instance *instance) {
fusion = instance->ctrl_context;
- for (i = 0; i < MAX_MSIX_QUEUES_FUSION; i++) {
- if (fusion->reply_frames_desc[i])
- dma_pool_free(fusion->reply_frames_desc_pool,
- fusion->reply_frames_desc[i],
- fusion->reply_frames_desc_phys[i]);
+ for (i = 0; i < RDPQ_MAX_CHUNK_COUNT; i++) {
+ if (fusion->rdpq_tracker[i].pool_entry_virt)
+ dma_pool_free(fusion->rdpq_tracker[i].dma_pool_ptr,
+ fusion->rdpq_tracker[i].pool_entry_virt,
+ fusion->rdpq_tracker[i].pool_entry_phys);
+
}
if (fusion->reply_frames_desc_pool)
dma_pool_destroy(fusion->reply_frames_desc_pool);
+ if (fusion->reply_frames_desc_pool_align)
+ dma_pool_destroy(fusion->reply_frames_desc_pool_align);
if (fusion->rdpq_virt)
pci_free_consistent(instance->pdev,
@@ -661,9 +872,6 @@ megasas_alloc_cmds_fusion(struct megasas_instance *instance)
fusion = instance->ctrl_context;
- if (megasas_alloc_cmdlist_fusion(instance))
- goto fail_exit;
-
if (megasas_alloc_request_fusion(instance))
goto fail_exit;
@@ -674,6 +882,11 @@ megasas_alloc_cmds_fusion(struct megasas_instance *instance)
if (megasas_alloc_reply_fusion(instance))
goto fail_exit;
+ if (megasas_alloc_cmdlist_fusion(instance))
+ goto fail_exit;
+
+ dev_info(&instance->pdev->dev, "Configured max firmware commands: %d\n",
+ instance->max_fw_cmds);
/* The first 256 bytes (SMID 0) is not used. Don't add to the cmd list */
io_req_base = fusion->io_request_frames + MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
@@ -770,22 +983,34 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
MFI_CAPABILITIES *drv_ops;
u32 scratch_pad_2;
unsigned long flags;
+ struct timeval tv;
+ bool cur_fw_64bit_dma_capable;
fusion = instance->ctrl_context;
- cmd = megasas_get_cmd(instance);
+ ioc_init_handle = fusion->ioc_init_request_phys;
+ IOCInitMessage = fusion->ioc_init_request;
- if (!cmd) {
- dev_err(&instance->pdev->dev, "Could not allocate cmd for INIT Frame\n");
- ret = 1;
- goto fail_get_cmd;
- }
+ cmd = fusion->ioc_init_cmd;
scratch_pad_2 = readl
(&instance->reg_set->outbound_scratch_pad_2);
cur_rdpq_mode = (scratch_pad_2 & MR_RDPQ_MODE_OFFSET) ? 1 : 0;
+ if (instance->adapter_type == INVADER_SERIES) {
+ cur_fw_64bit_dma_capable =
+ (scratch_pad_2 & MR_CAN_HANDLE_64_BIT_DMA_OFFSET) ? true : false;
+
+ if (instance->consistent_mask_64bit && !cur_fw_64bit_dma_capable) {
+ dev_err(&instance->pdev->dev, "Driver was operating on 64bit "
+ "DMA mask, but upcoming FW does not support 64bit DMA mask\n");
+ megaraid_sas_kill_hba(instance);
+ ret = 1;
+ goto fail_fw_init;
+ }
+ }
+
if (instance->is_rdpq && !cur_rdpq_mode) {
dev_err(&instance->pdev->dev, "Firmware downgrade *NOT SUPPORTED*"
" from RDPQ mode to non RDPQ mode\n");
@@ -798,18 +1023,6 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
dev_info(&instance->pdev->dev, "FW supports sync cache\t: %s\n",
instance->fw_sync_cache_support ? "Yes" : "No");
- IOCInitMessage =
- dma_alloc_coherent(&instance->pdev->dev,
- sizeof(struct MPI2_IOC_INIT_REQUEST),
- &ioc_init_handle, GFP_KERNEL);
-
- if (!IOCInitMessage) {
- dev_err(&instance->pdev->dev, "Could not allocate memory for "
- "IOCInitMessage\n");
- ret = 1;
- goto fail_fw_init;
- }
-
memset(IOCInitMessage, 0, sizeof(struct MPI2_IOC_INIT_REQUEST));
IOCInitMessage->Function = MPI2_FUNCTION_IOC_INIT;
@@ -825,8 +1038,15 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
IOCInitMessage->MsgFlags = instance->is_rdpq ?
MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE : 0;
IOCInitMessage->SystemRequestFrameBaseAddress = cpu_to_le64(fusion->io_request_frames_phys);
+ IOCInitMessage->SenseBufferAddressHigh = cpu_to_le32(upper_32_bits(fusion->sense_phys_addr));
IOCInitMessage->HostMSIxVectors = instance->msix_vectors;
IOCInitMessage->HostPageSize = MR_DEFAULT_NVME_PAGE_SHIFT;
+
+ do_gettimeofday(&tv);
+ /* Convert to milliseconds as per FW requirement */
+ IOCInitMessage->TimeStamp = cpu_to_le64((tv.tv_sec * 1000) +
+ (tv.tv_usec / 1000));
+
init_frame = (struct megasas_init_frame *)cmd->frame;
memset(init_frame, 0, MEGAMFI_FRAME_SIZE);
@@ -842,7 +1062,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
drv_ops = (MFI_CAPABILITIES *) &(init_frame->driver_operations);
/* driver support Extended MSIX */
- if (fusion->adapter_type >= INVADER_SERIES)
+ if (instance->adapter_type >= INVADER_SERIES)
drv_ops->mfi_capabilities.support_additional_msix = 1;
/* driver supports HA / Remote LUN over Fast Path interface */
drv_ops->mfi_capabilities.support_fp_remote_lun = 1;
@@ -860,6 +1080,10 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
drv_ops->mfi_capabilities.support_qd_throttling = 1;
drv_ops->mfi_capabilities.support_pd_map_target_id = 1;
+
+ if (instance->consistent_mask_64bit)
+ drv_ops->mfi_capabilities.support_64bit_mode = 1;
+
/* Convert capability to LE32 */
cpu_to_le32s((u32 *)&init_frame->driver_operations.mfi_capabilities);
@@ -869,8 +1093,8 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
strlen(sys_info) > 64 ? 64 : strlen(sys_info));
instance->system_info_buf->systemIdLength =
strlen(sys_info) > 64 ? 64 : strlen(sys_info);
- init_frame->system_info_lo = instance->system_info_h;
- init_frame->system_info_hi = 0;
+ init_frame->system_info_lo = cpu_to_le32(lower_32_bits(instance->system_info_h));
+ init_frame->system_info_hi = cpu_to_le32(upper_32_bits(instance->system_info_h));
}
init_frame->queue_info_new_phys_addr_hi =
@@ -917,12 +1141,6 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
ret = 0;
fail_fw_init:
- megasas_return_cmd(instance, cmd);
- if (IOCInitMessage)
- dma_free_coherent(&instance->pdev->dev,
- sizeof(struct MPI2_IOC_INIT_REQUEST),
- IOCInitMessage, ioc_init_handle);
-fail_get_cmd:
dev_err(&instance->pdev->dev,
"Init cmd return status %s for SCSI host %d\n",
ret ? "FAILED" : "SUCCESS", instance->host->host_no);
@@ -967,6 +1185,15 @@ megasas_sync_pd_seq_num(struct megasas_instance *instance, bool pend) {
memset(pd_sync, 0, pd_seq_map_sz);
memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+ if (pend) {
+ dcmd->mbox.b[0] = MEGASAS_DCMD_MBOX_PEND_FLAG;
+ dcmd->flags = MFI_FRAME_DIR_WRITE;
+ instance->jbod_seq_cmd = cmd;
+ } else {
+ dcmd->flags = MFI_FRAME_DIR_READ;
+ }
+
dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = 0xFF;
dcmd->sge_count = 1;
@@ -974,21 +1201,16 @@ megasas_sync_pd_seq_num(struct megasas_instance *instance, bool pend) {
dcmd->pad_0 = 0;
dcmd->data_xfer_len = cpu_to_le32(pd_seq_map_sz);
dcmd->opcode = cpu_to_le32(MR_DCMD_SYSTEM_PD_MAP_GET_INFO);
- dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(pd_seq_h);
- dcmd->sgl.sge32[0].length = cpu_to_le32(pd_seq_map_sz);
+
+ megasas_set_dma_settings(instance, dcmd, pd_seq_h, pd_seq_map_sz);
if (pend) {
- dcmd->mbox.b[0] = MEGASAS_DCMD_MBOX_PEND_FLAG;
- dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_WRITE);
- instance->jbod_seq_cmd = cmd;
instance->instancet->issue_dcmd(instance, cmd);
return 0;
}
- dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
-
/* Below code is only for non pended DCMD */
- if (instance->ctrl_context && !instance->mask_interrupts)
+ if (!instance->mask_interrupts)
ret = megasas_issue_blocked_cmd(instance, cmd,
MFI_IO_TIMEOUT_SECS);
else
@@ -1001,7 +1223,7 @@ megasas_sync_pd_seq_num(struct megasas_instance *instance, bool pend) {
ret = -EINVAL;
}
- if (ret == DCMD_TIMEOUT && instance->ctrl_context)
+ if (ret == DCMD_TIMEOUT)
megaraid_sas_kill_hba(instance);
if (ret == DCMD_SUCCESS)
@@ -1069,21 +1291,21 @@ megasas_get_ld_map_info(struct megasas_instance *instance)
dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = 0xFF;
dcmd->sge_count = 1;
- dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
+ dcmd->flags = MFI_FRAME_DIR_READ;
dcmd->timeout = 0;
dcmd->pad_0 = 0;
dcmd->data_xfer_len = cpu_to_le32(size_map_info);
dcmd->opcode = cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO);
- dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
- dcmd->sgl.sge32[0].length = cpu_to_le32(size_map_info);
- if (instance->ctrl_context && !instance->mask_interrupts)
+ megasas_set_dma_settings(instance, dcmd, ci_h, size_map_info);
+
+ if (!instance->mask_interrupts)
ret = megasas_issue_blocked_cmd(instance, cmd,
MFI_IO_TIMEOUT_SECS);
else
ret = megasas_issue_polled(instance, cmd);
- if (ret == DCMD_TIMEOUT && instance->ctrl_context)
+ if (ret == DCMD_TIMEOUT)
megaraid_sas_kill_hba(instance);
megasas_return_cmd(instance, cmd);
@@ -1173,15 +1395,15 @@ megasas_sync_map_info(struct megasas_instance *instance)
dcmd->cmd = MFI_CMD_DCMD;
dcmd->cmd_status = 0xFF;
dcmd->sge_count = 1;
- dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_WRITE);
+ dcmd->flags = MFI_FRAME_DIR_WRITE;
dcmd->timeout = 0;
dcmd->pad_0 = 0;
dcmd->data_xfer_len = cpu_to_le32(size_map_info);
dcmd->mbox.b[0] = num_lds;
dcmd->mbox.b[1] = MEGASAS_DCMD_MBOX_PEND_FLAG;
dcmd->opcode = cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO);
- dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
- dcmd->sgl.sge32[0].length = cpu_to_le32(size_map_info);
+
+ megasas_set_dma_settings(instance, dcmd, ci_h, size_map_info);
instance->map_update_cmd = cmd;
@@ -1337,6 +1559,94 @@ ld_drv_map_alloc_fail:
}
/**
+ * megasas_configure_queue_sizes - Calculate size of request desc queue,
+ * reply desc queue,
+ * IO request frame queue, set can_queue.
+ * @instance: Adapter soft state
+ * @return: void
+ */
+static inline
+void megasas_configure_queue_sizes(struct megasas_instance *instance)
+{
+ struct fusion_context *fusion;
+ u16 max_cmd;
+
+ fusion = instance->ctrl_context;
+ max_cmd = instance->max_fw_cmds;
+
+ if (instance->adapter_type == VENTURA_SERIES)
+ instance->max_mpt_cmds = instance->max_fw_cmds * RAID_1_PEER_CMDS;
+ else
+ instance->max_mpt_cmds = instance->max_fw_cmds;
+
+ instance->max_scsi_cmds = instance->max_fw_cmds -
+ (MEGASAS_FUSION_INTERNAL_CMDS +
+ MEGASAS_FUSION_IOCTL_CMDS);
+ instance->cur_can_queue = instance->max_scsi_cmds;
+ instance->host->can_queue = instance->cur_can_queue;
+
+ fusion->reply_q_depth = 2 * ((max_cmd + 1 + 15) / 16) * 16;
+
+ fusion->request_alloc_sz = sizeof(union MEGASAS_REQUEST_DESCRIPTOR_UNION) *
+ instance->max_mpt_cmds;
+ fusion->reply_alloc_sz = sizeof(union MPI2_REPLY_DESCRIPTORS_UNION) *
+ (fusion->reply_q_depth);
+ fusion->io_frames_alloc_sz = MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE +
+ (MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE
+ * (instance->max_mpt_cmds + 1)); /* Extra 1 for SMID 0 */
+}
+
+static int megasas_alloc_ioc_init_frame(struct megasas_instance *instance)
+{
+ struct fusion_context *fusion;
+ struct megasas_cmd *cmd;
+
+ fusion = instance->ctrl_context;
+
+ cmd = kmalloc(sizeof(struct megasas_cmd), GFP_KERNEL);
+
+ if (!cmd) {
+ dev_err(&instance->pdev->dev, "Failed from func: %s line: %d\n",
+ __func__, __LINE__);
+ return -ENOMEM;
+ }
+
+ cmd->frame = dma_alloc_coherent(&instance->pdev->dev,
+ IOC_INIT_FRAME_SIZE,
+ &cmd->frame_phys_addr, GFP_KERNEL);
+
+ if (!cmd->frame) {
+ dev_err(&instance->pdev->dev, "Failed from func: %s line: %d\n",
+ __func__, __LINE__);
+ kfree(cmd);
+ return -ENOMEM;
+ }
+
+ fusion->ioc_init_cmd = cmd;
+ return 0;
+}
+
+/**
+ * megasas_free_ioc_init_cmd - Free IOC INIT command frame
+ * @instance: Adapter soft state
+ */
+static inline void megasas_free_ioc_init_cmd(struct megasas_instance *instance)
+{
+ struct fusion_context *fusion;
+
+ fusion = instance->ctrl_context;
+
+ if (fusion->ioc_init_cmd && fusion->ioc_init_cmd->frame)
+ dma_free_coherent(&instance->pdev->dev,
+ IOC_INIT_FRAME_SIZE,
+ fusion->ioc_init_cmd->frame,
+ fusion->ioc_init_cmd->frame_phys_addr);
+
+ if (fusion->ioc_init_cmd)
+ kfree(fusion->ioc_init_cmd);
+}
+
+/**
* megasas_init_adapter_fusion - Initializes the FW
* @instance: Adapter soft state
*
@@ -1347,7 +1657,6 @@ megasas_init_adapter_fusion(struct megasas_instance *instance)
{
struct megasas_register_set __iomem *reg_set;
struct fusion_context *fusion;
- u16 max_cmd;
u32 scratch_pad_2;
int i = 0, count;
@@ -1363,17 +1672,7 @@ megasas_init_adapter_fusion(struct megasas_instance *instance)
instance->max_mfi_cmds =
MEGASAS_FUSION_INTERNAL_CMDS + MEGASAS_FUSION_IOCTL_CMDS;
- max_cmd = instance->max_fw_cmds;
-
- fusion->reply_q_depth = 2 * (((max_cmd + 1 + 15)/16)*16);
-
- fusion->request_alloc_sz =
- sizeof(union MEGASAS_REQUEST_DESCRIPTOR_UNION) * instance->max_mpt_cmds;
- fusion->reply_alloc_sz = sizeof(union MPI2_REPLY_DESCRIPTORS_UNION)
- *(fusion->reply_q_depth);
- fusion->io_frames_alloc_sz = MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE +
- (MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE
- * (instance->max_mpt_cmds + 1)); /* Extra 1 for SMID 0 */
+ megasas_configure_queue_sizes(instance);
scratch_pad_2 = readl(&instance->reg_set->outbound_scratch_pad_2);
/* If scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK is set,
@@ -1431,6 +1730,9 @@ megasas_init_adapter_fusion(struct megasas_instance *instance)
MEGASAS_FUSION_IOCTL_CMDS);
sema_init(&instance->ioctl_sem, MEGASAS_FUSION_IOCTL_CMDS);
+ if (megasas_alloc_ioc_init_frame(instance))
+ return 1;
+
/*
* Allocate memory for descriptors
* Create a pool of commands
@@ -1468,6 +1770,7 @@ fail_ioc_init:
fail_alloc_cmds:
megasas_free_cmds(instance);
fail_alloc_mfi_cmds:
+ megasas_free_ioc_init_cmd(instance);
return 1;
}
@@ -1800,7 +2103,7 @@ megasas_make_sgl_fusion(struct megasas_instance *instance,
fusion = instance->ctrl_context;
- if (fusion->adapter_type >= INVADER_SERIES) {
+ if (instance->adapter_type >= INVADER_SERIES) {
struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr_end = sgl_ptr;
sgl_ptr_end += fusion->max_sge_in_main_msg - 1;
sgl_ptr_end->Flags = 0;
@@ -1810,7 +2113,7 @@ megasas_make_sgl_fusion(struct megasas_instance *instance,
sgl_ptr->Length = cpu_to_le32(sg_dma_len(os_sgl));
sgl_ptr->Address = cpu_to_le64(sg_dma_address(os_sgl));
sgl_ptr->Flags = 0;
- if (fusion->adapter_type >= INVADER_SERIES)
+ if (instance->adapter_type >= INVADER_SERIES)
if (i == sge_count - 1)
sgl_ptr->Flags = IEEE_SGE_FLAGS_END_OF_LIST;
sgl_ptr++;
@@ -1820,7 +2123,7 @@ megasas_make_sgl_fusion(struct megasas_instance *instance,
(sge_count > fusion->max_sge_in_main_msg)) {
struct MPI25_IEEE_SGE_CHAIN64 *sg_chain;
- if (fusion->adapter_type >= INVADER_SERIES) {
+ if (instance->adapter_type >= INVADER_SERIES) {
if ((le16_to_cpu(cmd->io_request->IoFlags) &
MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) !=
MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH)
@@ -1836,7 +2139,7 @@ megasas_make_sgl_fusion(struct megasas_instance *instance,
sg_chain = sgl_ptr;
/* Prepare chain element */
sg_chain->NextChainOffset = 0;
- if (fusion->adapter_type >= INVADER_SERIES)
+ if (instance->adapter_type >= INVADER_SERIES)
sg_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT;
else
sg_chain->Flags =
@@ -2360,7 +2663,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
praid_context = &io_request->RaidContext;
- if (instance->is_ventura) {
+ if (instance->adapter_type == VENTURA_SERIES) {
spin_lock_irqsave(&instance->stream_lock, spinlock_flags);
megasas_stream_detect(instance, cmd, &io_info);
spin_unlock_irqrestore(&instance->stream_lock, spinlock_flags);
@@ -2413,7 +2716,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
cmd->request_desc->SCSIIO.RequestFlags =
(MPI2_REQ_DESCRIPT_FLAGS_FP_IO
<< MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
- if (fusion->adapter_type == INVADER_SERIES) {
+ if (instance->adapter_type == INVADER_SERIES) {
if (io_request->RaidContext.raid_context.reg_lock_flags ==
REGION_TYPE_UNUSED)
cmd->request_desc->SCSIIO.RequestFlags =
@@ -2426,7 +2729,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
io_request->RaidContext.raid_context.reg_lock_flags |=
(MR_RL_FLAGS_GRANT_DESTINATION_CUDA |
MR_RL_FLAGS_SEQ_NUM_ENABLE);
- } else if (instance->is_ventura) {
+ } else if (instance->adapter_type == VENTURA_SERIES) {
io_request->RaidContext.raid_context_g35.nseg_type |=
(1 << RAID_CONTEXT_NSEG_SHIFT);
io_request->RaidContext.raid_context_g35.nseg_type |=
@@ -2445,7 +2748,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
&io_info, local_map_ptr);
scp->SCp.Status |= MEGASAS_LOAD_BALANCE_FLAG;
cmd->pd_r1_lb = io_info.pd_after_lb;
- if (instance->is_ventura)
+ if (instance->adapter_type == VENTURA_SERIES)
io_request->RaidContext.raid_context_g35.span_arm
= io_info.span_arm;
else
@@ -2455,7 +2758,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
} else
scp->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG;
- if (instance->is_ventura)
+ if (instance->adapter_type == VENTURA_SERIES)
cmd->r1_alt_dev_handle = io_info.r1_alt_dev_handle;
else
cmd->r1_alt_dev_handle = MR_DEVHANDLE_INVALID;
@@ -2478,7 +2781,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
cmd->request_desc->SCSIIO.RequestFlags =
(MEGASAS_REQ_DESCRIPT_FLAGS_LD_IO
<< MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
- if (fusion->adapter_type == INVADER_SERIES) {
+ if (instance->adapter_type == INVADER_SERIES) {
if (io_info.do_fp_rlbypass ||
(io_request->RaidContext.raid_context.reg_lock_flags
== REGION_TYPE_UNUSED))
@@ -2491,7 +2794,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
(MR_RL_FLAGS_GRANT_DESTINATION_CPU0 |
MR_RL_FLAGS_SEQ_NUM_ENABLE);
io_request->RaidContext.raid_context.nseg = 0x1;
- } else if (instance->is_ventura) {
+ } else if (instance->adapter_type == VENTURA_SERIES) {
io_request->RaidContext.raid_context_g35.routing_flags |=
(1 << MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT);
io_request->RaidContext.raid_context_g35.nseg_type |=
@@ -2566,7 +2869,7 @@ static void megasas_build_ld_nonrw_fusion(struct megasas_instance *instance,
/* set RAID context values */
pRAID_Context->config_seq_num = raid->seqNum;
- if (!instance->is_ventura)
+ if (instance->adapter_type != VENTURA_SERIES)
pRAID_Context->reg_lock_flags = REGION_TYPE_SHARED_READ;
pRAID_Context->timeout_value =
cpu_to_le16(raid->fpIoTimeoutForLd);
@@ -2651,7 +2954,7 @@ megasas_build_syspd_fusion(struct megasas_instance *instance,
cpu_to_le16(device_id + (MAX_PHYSICAL_DEVICES - 1));
pRAID_Context->config_seq_num = pd_sync->seq[pd_index].seqNum;
io_request->DevHandle = pd_sync->seq[pd_index].devHandle;
- if (instance->is_ventura) {
+ if (instance->adapter_type == VENTURA_SERIES) {
io_request->RaidContext.raid_context_g35.routing_flags |=
(1 << MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT);
io_request->RaidContext.raid_context_g35.nseg_type |=
@@ -2699,7 +3002,7 @@ megasas_build_syspd_fusion(struct megasas_instance *instance,
pRAID_Context->timeout_value =
cpu_to_le16((os_timeout_value > timeout_limit) ?
timeout_limit : os_timeout_value);
- if (fusion->adapter_type >= INVADER_SERIES)
+ if (instance->adapter_type >= INVADER_SERIES)
io_request->IoFlags |=
cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
@@ -2782,7 +3085,7 @@ megasas_build_io_fusion(struct megasas_instance *instance,
return 1;
}
- if (instance->is_ventura) {
+ if (instance->adapter_type == VENTURA_SERIES) {
set_num_sge(&io_request->RaidContext.raid_context_g35, sge_count);
cpu_to_le16s(&io_request->RaidContext.raid_context_g35.routing_flags);
cpu_to_le16s(&io_request->RaidContext.raid_context_g35.nseg_type);
@@ -2805,7 +3108,8 @@ megasas_build_io_fusion(struct megasas_instance *instance,
io_request->SGLOffset0 =
offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4;
- io_request->SenseBufferLowAddress = cpu_to_le32(cmd->sense_phys_addr);
+ io_request->SenseBufferLowAddress =
+ cpu_to_le32(lower_32_bits(cmd->sense_phys_addr));
io_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE;
cmd->scmd = scp;
@@ -2846,7 +3150,7 @@ void megasas_prepare_secondRaid1_IO(struct megasas_instance *instance,
(fusion->max_sge_in_main_msg * sizeof(union MPI2_SGE_IO_UNION)));
/*sense buffer is different for r1 command*/
r1_cmd->io_request->SenseBufferLowAddress =
- cpu_to_le32(r1_cmd->sense_phys_addr);
+ cpu_to_le32(lower_32_bits(r1_cmd->sense_phys_addr));
r1_cmd->scmd = cmd->scmd;
req_desc2 = megasas_get_request_descriptor(instance,
(r1_cmd->index - 1));
@@ -3312,7 +3616,7 @@ build_mpt_mfi_pass_thru(struct megasas_instance *instance,
io_req = cmd->io_request;
- if (fusion->adapter_type >= INVADER_SERIES) {
+ if (instance->adapter_type >= INVADER_SERIES) {
struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr_end =
(struct MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL;
sgl_ptr_end += fusion->max_sge_in_main_msg - 1;
@@ -3386,6 +3690,7 @@ megasas_issue_dcmd_fusion(struct megasas_instance *instance,
void
megasas_release_fusion(struct megasas_instance *instance)
{
+ megasas_free_ioc_init_cmd(instance);
megasas_free_cmds(instance);
megasas_free_cmds_fusion(instance);
@@ -4244,7 +4549,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int reason)
for (i = 0 ; i < instance->max_scsi_cmds; i++) {
cmd_fusion = fusion->cmd_list[i];
/*check for extra commands issued by driver*/
- if (instance->is_ventura) {
+ if (instance->adapter_type == VENTURA_SERIES) {
r1_cmd = fusion->cmd_list[i + instance->max_fw_cmds];
megasas_return_cmd_fusion(instance, r1_cmd);
}
@@ -4345,7 +4650,7 @@ transition_to_ready:
megasas_set_dynamic_target_properties(sdev);
/* reset stream detection array */
- if (instance->is_ventura) {
+ if (instance->adapter_type == VENTURA_SERIES) {
for (j = 0; j < MAX_LOGICAL_DRIVES_EXT; ++j) {
memset(fusion->stream_detect_by_ld[j],
0, sizeof(struct LD_STREAM_DETECT));
@@ -4493,20 +4798,31 @@ megasas_alloc_fusion_context(struct megasas_instance *instance)
{
struct fusion_context *fusion;
- instance->ctrl_context_pages = get_order(sizeof(struct fusion_context));
- instance->ctrl_context = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
- instance->ctrl_context_pages);
+ instance->ctrl_context = kzalloc(sizeof(struct fusion_context),
+ GFP_KERNEL);
if (!instance->ctrl_context) {
- /* fall back to using vmalloc for fusion_context */
- instance->ctrl_context = vzalloc(sizeof(struct fusion_context));
- if (!instance->ctrl_context) {
- dev_err(&instance->pdev->dev, "Failed from %s %d\n", __func__, __LINE__);
- return -ENOMEM;
- }
+ dev_err(&instance->pdev->dev, "Failed from %s %d\n",
+ __func__, __LINE__);
+ return -ENOMEM;
}
fusion = instance->ctrl_context;
+ fusion->log_to_span_pages = get_order(MAX_LOGICAL_DRIVES_EXT *
+ sizeof(LD_SPAN_INFO));
+ fusion->log_to_span =
+ (PLD_SPAN_INFO)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
+ fusion->log_to_span_pages);
+ if (!fusion->log_to_span) {
+ fusion->log_to_span = vzalloc(MAX_LOGICAL_DRIVES_EXT *
+ sizeof(LD_SPAN_INFO));
+ if (!fusion->log_to_span) {
+ dev_err(&instance->pdev->dev, "Failed from %s %d\n",
+ __func__, __LINE__);
+ return -ENOMEM;
+ }
+ }
+
fusion->load_balance_info_pages = get_order(MAX_LOGICAL_DRIVES_EXT *
sizeof(struct LD_LOAD_BALANCE_INFO));
fusion->load_balance_info =
@@ -4537,11 +4853,15 @@ megasas_free_fusion_context(struct megasas_instance *instance)
fusion->load_balance_info_pages);
}
- if (is_vmalloc_addr(fusion))
- vfree(fusion);
- else
- free_pages((ulong)fusion,
- instance->ctrl_context_pages);
+ if (fusion->log_to_span) {
+ if (is_vmalloc_addr(fusion->log_to_span))
+ vfree(fusion->log_to_span);
+ else
+ free_pages((ulong)fusion->log_to_span,
+ fusion->log_to_span_pages);
+ }
+
+ kfree(fusion);
}
}
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h
index d78d76112501..1814d79cb98d 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.h
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h
@@ -51,6 +51,8 @@
#define HOST_DIAG_RESET_ADAPTER 0x4
#define MEGASAS_FUSION_MAX_RESET_TRIES 3
#define MAX_MSIX_QUEUES_FUSION 128
+#define RDPQ_MAX_INDEX_IN_ONE_CHUNK 16
+#define RDPQ_MAX_CHUNK_COUNT (MAX_MSIX_QUEUES_FUSION / RDPQ_MAX_INDEX_IN_ONE_CHUNK)
/* Invader defines */
#define MPI2_TYPE_CUDA 0x2
@@ -103,12 +105,8 @@ enum MR_RAID_FLAGS_IO_SUB_TYPE {
#define THRESHOLD_REPLY_COUNT 50
#define RAID_1_PEER_CMDS 2
#define JBOD_MAPS_COUNT 2
-
-enum MR_FUSION_ADAPTER_TYPE {
- THUNDERBOLT_SERIES = 0,
- INVADER_SERIES = 1,
- VENTURA_SERIES = 2,
-};
+#define MEGASAS_REDUCE_QD_COUNT 64
+#define IOC_INIT_FRAME_SIZE 4096
/*
* Raid Context structure which describes MegaRAID specific IO Parameters
@@ -1270,6 +1268,12 @@ struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY {
u32 Reserved2;
};
+struct rdpq_alloc_detail {
+ struct dma_pool *dma_pool_ptr;
+ dma_addr_t pool_entry_phys;
+ union MPI2_REPLY_DESCRIPTORS_UNION *pool_entry_virt;
+};
+
struct fusion_context {
struct megasas_cmd_fusion **cmd_list;
dma_addr_t req_frames_desc_phys;
@@ -1282,9 +1286,14 @@ struct fusion_context {
struct dma_pool *sg_dma_pool;
struct dma_pool *sense_dma_pool;
+ u8 *sense;
+ dma_addr_t sense_phys_addr;
+
dma_addr_t reply_frames_desc_phys[MAX_MSIX_QUEUES_FUSION];
union MPI2_REPLY_DESCRIPTORS_UNION *reply_frames_desc[MAX_MSIX_QUEUES_FUSION];
+ struct rdpq_alloc_detail rdpq_tracker[RDPQ_MAX_CHUNK_COUNT];
struct dma_pool *reply_frames_desc_pool;
+ struct dma_pool *reply_frames_desc_pool_align;
u16 last_reply_idx[MAX_MSIX_QUEUES_FUSION];
@@ -1318,9 +1327,13 @@ struct fusion_context {
u8 fast_path_io;
struct LD_LOAD_BALANCE_INFO *load_balance_info;
u32 load_balance_info_pages;
- LD_SPAN_INFO log_to_span[MAX_LOGICAL_DRIVES_EXT];
- u8 adapter_type;
+ LD_SPAN_INFO *log_to_span;
+ u32 log_to_span_pages;
struct LD_STREAM_DETECT **stream_detect_by_ld;
+ dma_addr_t ioc_init_request_phys;
+ struct MPI2_IOC_INIT_REQUEST *ioc_init_request;
+ struct megasas_cmd *ioc_init_cmd;
+
};
union desc_value {