diff options
Diffstat (limited to 'drivers/scsi/mpt3sas')
-rw-r--r-- | drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h | 2 | ||||
-rw-r--r-- | drivers/scsi/mpt3sas/mpt3sas_base.c | 514 | ||||
-rw-r--r-- | drivers/scsi/mpt3sas/mpt3sas_base.h | 5 | ||||
-rw-r--r-- | drivers/scsi/mpt3sas/mpt3sas_config.c | 10 | ||||
-rw-r--r-- | drivers/scsi/mpt3sas/mpt3sas_ctl.c | 45 | ||||
-rw-r--r-- | drivers/scsi/mpt3sas/mpt3sas_ctl.h | 12 | ||||
-rw-r--r-- | drivers/scsi/mpt3sas/mpt3sas_scsih.c | 65 | ||||
-rw-r--r-- | drivers/scsi/mpt3sas/mpt3sas_transport.c | 7 |
8 files changed, 413 insertions, 247 deletions
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h index 43a3bf8ff428..d00431f553e1 100644 --- a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h +++ b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h @@ -992,7 +992,7 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_1 { *one and check the value returned for GPIOCount at runtime. */ #ifndef MPI2_IO_UNIT_PAGE_3_GPIO_VAL_MAX -#define MPI2_IO_UNIT_PAGE_3_GPIO_VAL_MAX (1) +#define MPI2_IO_UNIT_PAGE_3_GPIO_VAL_MAX (36) #endif typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_3 { diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index ac0eef975f17..5779f313f6f8 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c @@ -2905,23 +2905,22 @@ static int _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev) { struct sysinfo s; - int dma_mask; if (ioc->is_mcpu_endpoint || sizeof(dma_addr_t) == 4 || ioc->use_32bit_dma || dma_get_required_mask(&pdev->dev) <= 32) - dma_mask = 32; + ioc->dma_mask = 32; /* Set 63 bit DMA mask for all SAS3 and SAS35 controllers */ else if (ioc->hba_mpi_version_belonged > MPI2_VERSION) - dma_mask = 63; + ioc->dma_mask = 63; else - dma_mask = 64; + ioc->dma_mask = 64; - if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(dma_mask)) || - dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(dma_mask))) + if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(ioc->dma_mask)) || + dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(ioc->dma_mask))) return -ENODEV; - if (dma_mask > 32) { + if (ioc->dma_mask > 32) { ioc->base_add_sg_single = &_base_add_sg_single_64; ioc->sge_size = sizeof(Mpi2SGESimple64_t); } else { @@ -2931,7 +2930,7 @@ _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev) si_meminfo(&s); ioc_info(ioc, "%d BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n", - dma_mask, convert_to_kb(s.totalram)); + ioc->dma_mask, convert_to_kb(s.totalram)); return 0; } @@ -3678,8 +3677,7 @@ _base_get_high_iops_msix_index(struct MPT3SAS_ADAPTER *ioc, * IOs on the target device is >=8. */ - if (atomic_read(&scmd->device->device_busy) > - MPT3SAS_DEVICE_HIGH_IOPS_DEPTH) + if (scsi_device_busy(scmd->device) > MPT3SAS_DEVICE_HIGH_IOPS_DEPTH) return base_mod64(( atomic64_add_return(1, &ioc->high_iops_outstanding) / MPT3SAS_HIGH_IOPS_BATCH_COUNT), @@ -4173,7 +4171,7 @@ _base_put_smid_hi_priority_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid, } /** - * _base_put_smid_default - Default, primarily used for config pages + * _base_put_smid_default_atomic - Default, primarily used for config pages * use Atomic Request Descriptor * @ioc: per adapter object * @smid: system request message index @@ -5232,7 +5230,7 @@ _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc) * mpt3sas_free_enclosure_list - release memory * @ioc: per adapter object * - * Free memory allocated during encloure add. + * Free memory allocated during enclosure add. */ void mpt3sas_free_enclosure_list(struct MPT3SAS_ADAPTER *ioc) @@ -5338,10 +5336,10 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc) dma_pool_free(ioc->pcie_sgl_dma_pool, ioc->pcie_sg_lookup[i].pcie_sgl, ioc->pcie_sg_lookup[i].pcie_sgl_dma); + ioc->pcie_sg_lookup[i].pcie_sgl = NULL; } dma_pool_destroy(ioc->pcie_sgl_dma_pool); } - if (ioc->config_page) { dexitprintk(ioc, ioc_info(ioc, "config_page(0x%p): free\n", @@ -5400,6 +5398,271 @@ mpt3sas_check_same_4gb_region(long reply_pool_start_address, u32 pool_sz) } /** + * _base_reduce_hba_queue_depth- Retry with reduced queue depth + * @ioc: Adapter object + * + * Return: 0 for success, non-zero for failure. + **/ +static inline int +_base_reduce_hba_queue_depth(struct MPT3SAS_ADAPTER *ioc) +{ + int reduce_sz = 64; + + if ((ioc->hba_queue_depth - reduce_sz) > + (ioc->internal_depth + INTERNAL_SCSIIO_CMDS_COUNT)) { + ioc->hba_queue_depth -= reduce_sz; + return 0; + } else + return -ENOMEM; +} + +/** + * _base_allocate_pcie_sgl_pool - Allocating DMA'able memory + * for pcie sgl pools. + * @ioc: Adapter object + * @sz: DMA Pool size + * + * Return: 0 for success, non-zero for failure. + */ + +static int +_base_allocate_pcie_sgl_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz) +{ + int i = 0, j = 0; + struct chain_tracker *ct; + + ioc->pcie_sgl_dma_pool = + dma_pool_create("PCIe SGL pool", &ioc->pdev->dev, sz, + ioc->page_size, 0); + if (!ioc->pcie_sgl_dma_pool) { + ioc_err(ioc, "PCIe SGL pool: dma_pool_create failed\n"); + return -ENOMEM; + } + + ioc->chains_per_prp_buffer = sz/ioc->chain_segment_sz; + ioc->chains_per_prp_buffer = + min(ioc->chains_per_prp_buffer, ioc->chains_needed_per_io); + for (i = 0; i < ioc->scsiio_depth; i++) { + ioc->pcie_sg_lookup[i].pcie_sgl = + dma_pool_alloc(ioc->pcie_sgl_dma_pool, GFP_KERNEL, + &ioc->pcie_sg_lookup[i].pcie_sgl_dma); + if (!ioc->pcie_sg_lookup[i].pcie_sgl) { + ioc_err(ioc, "PCIe SGL pool: dma_pool_alloc failed\n"); + return -EAGAIN; + } + + if (!mpt3sas_check_same_4gb_region( + (long)ioc->pcie_sg_lookup[i].pcie_sgl, sz)) { + ioc_err(ioc, "PCIE SGLs are not in same 4G !! pcie sgl (0x%p) dma = (0x%llx)\n", + ioc->pcie_sg_lookup[i].pcie_sgl, + (unsigned long long) + ioc->pcie_sg_lookup[i].pcie_sgl_dma); + ioc->use_32bit_dma = true; + return -EAGAIN; + } + + for (j = 0; j < ioc->chains_per_prp_buffer; j++) { + ct = &ioc->chain_lookup[i].chains_per_smid[j]; + ct->chain_buffer = + ioc->pcie_sg_lookup[i].pcie_sgl + + (j * ioc->chain_segment_sz); + ct->chain_buffer_dma = + ioc->pcie_sg_lookup[i].pcie_sgl_dma + + (j * ioc->chain_segment_sz); + } + } + dinitprintk(ioc, ioc_info(ioc, + "PCIe sgl pool depth(%d), element_size(%d), pool_size(%d kB)\n", + ioc->scsiio_depth, sz, (sz * ioc->scsiio_depth)/1024)); + dinitprintk(ioc, ioc_info(ioc, + "Number of chains can fit in a PRP page(%d)\n", + ioc->chains_per_prp_buffer)); + return 0; +} + +/** + * _base_allocate_chain_dma_pool - Allocating DMA'able memory + * for chain dma pool. + * @ioc: Adapter object + * @sz: DMA Pool size + * + * Return: 0 for success, non-zero for failure. + */ +static int +_base_allocate_chain_dma_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz) +{ + int i = 0, j = 0; + struct chain_tracker *ctr; + + ioc->chain_dma_pool = dma_pool_create("chain pool", &ioc->pdev->dev, + ioc->chain_segment_sz, 16, 0); + if (!ioc->chain_dma_pool) + return -ENOMEM; + + for (i = 0; i < ioc->scsiio_depth; i++) { + for (j = ioc->chains_per_prp_buffer; + j < ioc->chains_needed_per_io; j++) { + ctr = &ioc->chain_lookup[i].chains_per_smid[j]; + ctr->chain_buffer = dma_pool_alloc(ioc->chain_dma_pool, + GFP_KERNEL, &ctr->chain_buffer_dma); + if (!ctr->chain_buffer) + return -EAGAIN; + if (!mpt3sas_check_same_4gb_region((long) + ctr->chain_buffer, ioc->chain_segment_sz)) { + ioc_err(ioc, + "Chain buffers are not in same 4G !!! Chain buff (0x%p) dma = (0x%llx)\n", + ctr->chain_buffer, + (unsigned long long)ctr->chain_buffer_dma); + ioc->use_32bit_dma = true; + return -EAGAIN; + } + } + } + dinitprintk(ioc, ioc_info(ioc, + "chain_lookup depth (%d), frame_size(%d), pool_size(%d kB)\n", + ioc->scsiio_depth, ioc->chain_segment_sz, ((ioc->scsiio_depth * + (ioc->chains_needed_per_io - ioc->chains_per_prp_buffer) * + ioc->chain_segment_sz))/1024)); + return 0; +} + +/** + * _base_allocate_sense_dma_pool - Allocating DMA'able memory + * for sense dma pool. + * @ioc: Adapter object + * @sz: DMA Pool size + * Return: 0 for success, non-zero for failure. + */ +static int +_base_allocate_sense_dma_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz) +{ + ioc->sense_dma_pool = + dma_pool_create("sense pool", &ioc->pdev->dev, sz, 4, 0); + if (!ioc->sense_dma_pool) + return -ENOMEM; + ioc->sense = dma_pool_alloc(ioc->sense_dma_pool, + GFP_KERNEL, &ioc->sense_dma); + if (!ioc->sense) + return -EAGAIN; + if (!mpt3sas_check_same_4gb_region((long)ioc->sense, sz)) { + dinitprintk(ioc, pr_err( + "Bad Sense Pool! sense (0x%p) sense_dma = (0x%llx)\n", + ioc->sense, (unsigned long long) ioc->sense_dma)); + ioc->use_32bit_dma = true; + return -EAGAIN; + } + ioc_info(ioc, + "sense pool(0x%p) - dma(0x%llx): depth(%d), element_size(%d), pool_size (%d kB)\n", + ioc->sense, (unsigned long long)ioc->sense_dma, + ioc->scsiio_depth, SCSI_SENSE_BUFFERSIZE, sz/1024); + return 0; +} + +/** + * _base_allocate_reply_pool - Allocating DMA'able memory + * for reply pool. + * @ioc: Adapter object + * @sz: DMA Pool size + * Return: 0 for success, non-zero for failure. + */ +static int +_base_allocate_reply_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz) +{ + /* reply pool, 4 byte align */ + ioc->reply_dma_pool = dma_pool_create("reply pool", + &ioc->pdev->dev, sz, 4, 0); + if (!ioc->reply_dma_pool) + return -ENOMEM; + ioc->reply = dma_pool_alloc(ioc->reply_dma_pool, GFP_KERNEL, + &ioc->reply_dma); + if (!ioc->reply) + return -EAGAIN; + if (!mpt3sas_check_same_4gb_region((long)ioc->reply_free, sz)) { + dinitprintk(ioc, pr_err( + "Bad Reply Pool! Reply (0x%p) Reply dma = (0x%llx)\n", + ioc->reply, (unsigned long long) ioc->reply_dma)); + ioc->use_32bit_dma = true; + return -EAGAIN; + } + ioc->reply_dma_min_address = (u32)(ioc->reply_dma); + ioc->reply_dma_max_address = (u32)(ioc->reply_dma) + sz; + ioc_info(ioc, + "reply pool(0x%p) - dma(0x%llx): depth(%d), frame_size(%d), pool_size(%d kB)\n", + ioc->reply, (unsigned long long)ioc->reply_dma, + ioc->reply_free_queue_depth, ioc->reply_sz, sz/1024); + return 0; +} + +/** + * _base_allocate_reply_free_dma_pool - Allocating DMA'able memory + * for reply free dma pool. + * @ioc: Adapter object + * @sz: DMA Pool size + * Return: 0 for success, non-zero for failure. + */ +static int +_base_allocate_reply_free_dma_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz) +{ + /* reply free queue, 16 byte align */ + ioc->reply_free_dma_pool = dma_pool_create( + "reply_free pool", &ioc->pdev->dev, sz, 16, 0); + if (!ioc->reply_free_dma_pool) + return -ENOMEM; + ioc->reply_free = dma_pool_alloc(ioc->reply_free_dma_pool, + GFP_KERNEL, &ioc->reply_free_dma); + if (!ioc->reply_free) + return -EAGAIN; + if (!mpt3sas_check_same_4gb_region((long)ioc->reply_free, sz)) { + dinitprintk(ioc, + pr_err("Bad Reply Free Pool! Reply Free (0x%p) Reply Free dma = (0x%llx)\n", + ioc->reply_free, (unsigned long long) ioc->reply_free_dma)); + ioc->use_32bit_dma = true; + return -EAGAIN; + } + memset(ioc->reply_free, 0, sz); + dinitprintk(ioc, ioc_info(ioc, + "reply_free pool(0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n", + ioc->reply_free, ioc->reply_free_queue_depth, 4, sz/1024)); + dinitprintk(ioc, ioc_info(ioc, + "reply_free_dma (0x%llx)\n", + (unsigned long long)ioc->reply_free_dma)); + return 0; +} + +/** + * _base_allocate_reply_post_free_array - Allocating DMA'able memory + * for reply post free array. + * @ioc: Adapter object + * @reply_post_free_array_sz: DMA Pool size + * Return: 0 for success, non-zero for failure. + */ + +static int +_base_allocate_reply_post_free_array(struct MPT3SAS_ADAPTER *ioc, + u32 reply_post_free_array_sz) +{ + ioc->reply_post_free_array_dma_pool = + dma_pool_create("reply_post_free_array pool", + &ioc->pdev->dev, reply_post_free_array_sz, 16, 0); + if (!ioc->reply_post_free_array_dma_pool) + return -ENOMEM; + ioc->reply_post_free_array = + dma_pool_alloc(ioc->reply_post_free_array_dma_pool, + GFP_KERNEL, &ioc->reply_post_free_array_dma); + if (!ioc->reply_post_free_array) + return -EAGAIN; + if (!mpt3sas_check_same_4gb_region((long)ioc->reply_post_free_array, + reply_post_free_array_sz)) { + dinitprintk(ioc, pr_err( + "Bad Reply Free Pool! Reply Free (0x%p) Reply Free dma = (0x%llx)\n", + ioc->reply_free, + (unsigned long long) ioc->reply_free_dma)); + ioc->use_32bit_dma = true; + return -EAGAIN; + } + return 0; +} +/** * base_alloc_rdpq_dma_pool - Allocating DMA'able memory * for reply queues. * @ioc: per adapter object @@ -5492,13 +5755,12 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc) u16 chains_needed_per_io; u32 sz, total_sz, reply_post_free_sz, reply_post_free_array_sz; u32 retry_sz; - u32 rdpq_sz = 0; + u32 rdpq_sz = 0, sense_sz = 0; u16 max_request_credit, nvme_blocks_needed; unsigned short sg_tablesize; u16 sge_size; - int i, j; - int ret = 0; - struct chain_tracker *ct; + int i; + int ret = 0, rc = 0; dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); @@ -5802,6 +6064,7 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc) * be required for NVMe PRP's, only each set of NVMe blocks will be * contiguous, so a new set is allocated for each possible I/O. */ + ioc->chains_per_prp_buffer = 0; if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES) { nvme_blocks_needed = @@ -5816,190 +6079,67 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc) goto out; } sz = nvme_blocks_needed * ioc->page_size; - ioc->pcie_sgl_dma_pool = - dma_pool_create("PCIe SGL pool", &ioc->pdev->dev, sz, 16, 0); - if (!ioc->pcie_sgl_dma_pool) { - ioc_info(ioc, "PCIe SGL pool: dma_pool_create failed\n"); - goto out; - } - - ioc->chains_per_prp_buffer = sz/ioc->chain_segment_sz; - ioc->chains_per_prp_buffer = min(ioc->chains_per_prp_buffer, - ioc->chains_needed_per_io); - - for (i = 0; i < ioc->scsiio_depth; i++) { - ioc->pcie_sg_lookup[i].pcie_sgl = dma_pool_alloc( - ioc->pcie_sgl_dma_pool, GFP_KERNEL, - &ioc->pcie_sg_lookup[i].pcie_sgl_dma); - if (!ioc->pcie_sg_lookup[i].pcie_sgl) { - ioc_info(ioc, "PCIe SGL pool: dma_pool_alloc failed\n"); - goto out; - } - for (j = 0; j < ioc->chains_per_prp_buffer; j++) { - ct = &ioc->chain_lookup[i].chains_per_smid[j]; - ct->chain_buffer = - ioc->pcie_sg_lookup[i].pcie_sgl + - (j * ioc->chain_segment_sz); - ct->chain_buffer_dma = - ioc->pcie_sg_lookup[i].pcie_sgl_dma + - (j * ioc->chain_segment_sz); - } - } - - dinitprintk(ioc, - ioc_info(ioc, "PCIe sgl pool depth(%d), element_size(%d), pool_size(%d kB)\n", - ioc->scsiio_depth, sz, - (sz * ioc->scsiio_depth) / 1024)); - dinitprintk(ioc, - ioc_info(ioc, "Number of chains can fit in a PRP page(%d)\n", - ioc->chains_per_prp_buffer)); + rc = _base_allocate_pcie_sgl_pool(ioc, sz); + if (rc == -ENOMEM) + return -ENOMEM; + else if (rc == -EAGAIN) + goto try_32bit_dma; total_sz += sz * ioc->scsiio_depth; } - ioc->chain_dma_pool = dma_pool_create("chain pool", &ioc->pdev->dev, - ioc->chain_segment_sz, 16, 0); - if (!ioc->chain_dma_pool) { - ioc_err(ioc, "chain_dma_pool: dma_pool_create failed\n"); - goto out; - } - for (i = 0; i < ioc->scsiio_depth; i++) { - for (j = ioc->chains_per_prp_buffer; - j < ioc->chains_needed_per_io; j++) { - ct = &ioc->chain_lookup[i].chains_per_smid[j]; - ct->chain_buffer = dma_pool_alloc( - ioc->chain_dma_pool, GFP_KERNEL, - &ct->chain_buffer_dma); - if (!ct->chain_buffer) { - ioc_err(ioc, "chain_lookup: pci_pool_alloc failed\n"); - goto out; - } - } - total_sz += ioc->chain_segment_sz; - } - + rc = _base_allocate_chain_dma_pool(ioc, ioc->chain_segment_sz); + if (rc == -ENOMEM) + return -ENOMEM; + else if (rc == -EAGAIN) + goto try_32bit_dma; + total_sz += ioc->chain_segment_sz * ((ioc->chains_needed_per_io - + ioc->chains_per_prp_buffer) * ioc->scsiio_depth); dinitprintk(ioc, - ioc_info(ioc, "chain pool depth(%d), frame_size(%d), pool_size(%d kB)\n", - ioc->chain_depth, ioc->chain_segment_sz, - (ioc->chain_depth * ioc->chain_segment_sz) / 1024)); - + ioc_info(ioc, "chain pool depth(%d), frame_size(%d), pool_size(%d kB)\n", + ioc->chain_depth, ioc->chain_segment_sz, + (ioc->chain_depth * ioc->chain_segment_sz) / 1024)); /* sense buffers, 4 byte align */ - sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE; - ioc->sense_dma_pool = dma_pool_create("sense pool", &ioc->pdev->dev, sz, - 4, 0); - if (!ioc->sense_dma_pool) { - ioc_err(ioc, "sense pool: dma_pool_create failed\n"); - goto out; - } - ioc->sense = dma_pool_alloc(ioc->sense_dma_pool, GFP_KERNEL, - &ioc->sense_dma); - if (!ioc->sense) { - ioc_err(ioc, "sense pool: dma_pool_alloc failed\n"); - goto out; - } - /* sense buffer requires to be in same 4 gb region. - * Below function will check the same. - * In case of failure, new pci pool will be created with updated - * alignment. Older allocation and pool will be destroyed. - * Alignment will be used such a way that next allocation if - * success, will always meet same 4gb region requirement. - * Actual requirement is not alignment, but we need start and end of - * DMA address must have same upper 32 bit address. - */ - if (!mpt3sas_check_same_4gb_region((long)ioc->sense, sz)) { - //Release Sense pool & Reallocate - dma_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma); - dma_pool_destroy(ioc->sense_dma_pool); - ioc->sense = NULL; - - ioc->sense_dma_pool = - dma_pool_create("sense pool", &ioc->pdev->dev, sz, - roundup_pow_of_two(sz), 0); - if (!ioc->sense_dma_pool) { - ioc_err(ioc, "sense pool: pci_pool_create failed\n"); - goto out; - } - ioc->sense = dma_pool_alloc(ioc->sense_dma_pool, GFP_KERNEL, - &ioc->sense_dma); - if (!ioc->sense) { - ioc_err(ioc, "sense pool: pci_pool_alloc failed\n"); - goto out; - } - } + sense_sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE; + rc = _base_allocate_sense_dma_pool(ioc, sense_sz); + if (rc == -ENOMEM) + return -ENOMEM; + else if (rc == -EAGAIN) + goto try_32bit_dma; + total_sz += sense_sz; ioc_info(ioc, "sense pool(0x%p)- dma(0x%llx): depth(%d)," "element_size(%d), pool_size(%d kB)\n", ioc->sense, (unsigned long long)ioc->sense_dma, ioc->scsiio_depth, SCSI_SENSE_BUFFERSIZE, sz / 1024); - - total_sz += sz; - /* reply pool, 4 byte align */ sz = ioc->reply_free_queue_depth * ioc->reply_sz; - ioc->reply_dma_pool = dma_pool_create("reply pool", &ioc->pdev->dev, sz, - 4, 0); - if (!ioc->reply_dma_pool) { - ioc_err(ioc, "reply pool: dma_pool_create failed\n"); - goto out; - } - ioc->reply = dma_pool_alloc(ioc->reply_dma_pool, GFP_KERNEL, - &ioc->reply_dma); - if (!ioc->reply) { - ioc_err(ioc, "reply pool: dma_pool_alloc failed\n"); - goto out; - } - ioc->reply_dma_min_address = (u32)(ioc->reply_dma); - ioc->reply_dma_max_address = (u32)(ioc->reply_dma) + sz; - dinitprintk(ioc, - ioc_info(ioc, "reply pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n", - ioc->reply, ioc->reply_free_queue_depth, - ioc->reply_sz, sz / 1024)); - dinitprintk(ioc, - ioc_info(ioc, "reply_dma(0x%llx)\n", - (unsigned long long)ioc->reply_dma)); + rc = _base_allocate_reply_pool(ioc, sz); + if (rc == -ENOMEM) + return -ENOMEM; + else if (rc == -EAGAIN) + goto try_32bit_dma; total_sz += sz; /* reply free queue, 16 byte align */ sz = ioc->reply_free_queue_depth * 4; - ioc->reply_free_dma_pool = dma_pool_create("reply_free pool", - &ioc->pdev->dev, sz, 16, 0); - if (!ioc->reply_free_dma_pool) { - ioc_err(ioc, "reply_free pool: dma_pool_create failed\n"); - goto out; - } - ioc->reply_free = dma_pool_zalloc(ioc->reply_free_dma_pool, GFP_KERNEL, - &ioc->reply_free_dma); - if (!ioc->reply_free) { - ioc_err(ioc, "reply_free pool: dma_pool_alloc failed\n"); - goto out; - } - dinitprintk(ioc, - ioc_info(ioc, "reply_free pool(0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n", - ioc->reply_free, ioc->reply_free_queue_depth, - 4, sz / 1024)); + rc = _base_allocate_reply_free_dma_pool(ioc, sz); + if (rc == -ENOMEM) + return -ENOMEM; + else if (rc == -EAGAIN) + goto try_32bit_dma; dinitprintk(ioc, ioc_info(ioc, "reply_free_dma (0x%llx)\n", (unsigned long long)ioc->reply_free_dma)); total_sz += sz; - if (ioc->rdpq_array_enable) { reply_post_free_array_sz = ioc->reply_queue_count * sizeof(Mpi2IOCInitRDPQArrayEntry); - ioc->reply_post_free_array_dma_pool = - dma_pool_create("reply_post_free_array pool", - &ioc->pdev->dev, reply_post_free_array_sz, 16, 0); - if (!ioc->reply_post_free_array_dma_pool) { - dinitprintk(ioc, - ioc_info(ioc, "reply_post_free_array pool: dma_pool_create failed\n")); - goto out; - } - ioc->reply_post_free_array = - dma_pool_alloc(ioc->reply_post_free_array_dma_pool, - GFP_KERNEL, &ioc->reply_post_free_array_dma); - if (!ioc->reply_post_free_array) { - dinitprintk(ioc, - ioc_info(ioc, "reply_post_free_array pool: dma_pool_alloc failed\n")); - goto out; - } + rc = _base_allocate_reply_post_free_array(ioc, + reply_post_free_array_sz); + if (rc == -ENOMEM) + return -ENOMEM; + else if (rc == -EAGAIN) + goto try_32bit_dma; } ioc->config_page_sz = 512; ioc->config_page = dma_alloc_coherent(&ioc->pdev->dev, @@ -6022,6 +6162,19 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc) ioc->shost->sg_tablesize); return 0; +try_32bit_dma: + _base_release_memory_pools(ioc); + if (ioc->use_32bit_dma && (ioc->dma_mask > 32)) { + /* Change dma coherent mask to 32 bit and reallocate */ + if (_base_config_dma_addressing(ioc, ioc->pdev) != 0) { + pr_err("Setting 32 bit coherent DMA mask Failed %s\n", + pci_name(ioc->pdev)); + return -ENODEV; + } + } else if (_base_reduce_hba_queue_depth(ioc) != 0) + return -ENOMEM; + goto retry_allocation; + out: return -ENOMEM; } @@ -7252,6 +7405,8 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc) ioc_info(ioc, "sending diag reset !!\n"); + pci_cfg_access_lock(ioc->pdev); + drsprintk(ioc, ioc_info(ioc, "clear interrupts\n")); count = 0; @@ -7342,10 +7497,12 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc) goto out; } + pci_cfg_access_unlock(ioc->pdev); ioc_info(ioc, "diag reset: SUCCESS\n"); return 0; out: + pci_cfg_access_unlock(ioc->pdev); ioc_err(ioc, "diag reset: FAILED\n"); return -EFAULT; } @@ -7682,6 +7839,7 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc) ioc->rdpq_array_enable_assigned = 0; ioc->use_32bit_dma = false; + ioc->dma_mask = 64; if (ioc->is_aero_ioc) ioc->base_readl = &_base_readl_aero; else diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h index 315aee6ef86f..98558d9c8c2d 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.h +++ b/drivers/scsi/mpt3sas/mpt3sas_base.h @@ -77,9 +77,9 @@ #define MPT3SAS_DRIVER_NAME "mpt3sas" #define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>" #define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver" -#define MPT3SAS_DRIVER_VERSION "37.100.00.00" +#define MPT3SAS_DRIVER_VERSION "37.101.00.00" #define MPT3SAS_MAJOR_VERSION 37 -#define MPT3SAS_MINOR_VERSION 100 +#define MPT3SAS_MINOR_VERSION 101 #define MPT3SAS_BUILD_VERSION 0 #define MPT3SAS_RELEASE_VERSION 00 @@ -1371,6 +1371,7 @@ struct MPT3SAS_ADAPTER { u16 thresh_hold; u8 high_iops_queues; u32 drv_support_bitmap; + u32 dma_mask; bool enable_sdev_max_qd; bool use_32bit_dma; diff --git a/drivers/scsi/mpt3sas/mpt3sas_config.c b/drivers/scsi/mpt3sas/mpt3sas_config.c index 8238843523b5..55cd32908924 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_config.c +++ b/drivers/scsi/mpt3sas/mpt3sas_config.c @@ -1781,7 +1781,7 @@ mpt3sas_config_get_driver_trigger_pg0(struct MPT3SAS_ADAPTER *ioc, } /** - * mpt3sas_config_set_driver_trigger_pg0 - write driver trigger page 0 + * _config_set_driver_trigger_pg0 - write driver trigger page 0 * @ioc: per adapter object * @mpi_reply: reply mf payload returned from firmware * @config_page: contents of the config page @@ -1915,7 +1915,7 @@ mpt3sas_config_get_driver_trigger_pg1(struct MPT3SAS_ADAPTER *ioc, } /** - * mpt3sas_config_set_driver_trigger_pg1 - write driver trigger page 1 + * _config_set_driver_trigger_pg1 - write driver trigger page 1 * @ioc: per adapter object * @mpi_reply: reply mf payload returned from firmware * @config_page: contents of the config page @@ -2066,7 +2066,7 @@ mpt3sas_config_get_driver_trigger_pg2(struct MPT3SAS_ADAPTER *ioc, } /** - * mpt3sas_config_set_driver_trigger_pg2 - write driver trigger page 2 + * _config_set_driver_trigger_pg2 - write driver trigger page 2 * @ioc: per adapter object * @mpi_reply: reply mf payload returned from firmware * @config_page: contents of the config page @@ -2226,7 +2226,7 @@ mpt3sas_config_get_driver_trigger_pg3(struct MPT3SAS_ADAPTER *ioc, } /** - * mpt3sas_config_set_driver_trigger_pg3 - write driver trigger page 3 + * _config_set_driver_trigger_pg3 - write driver trigger page 3 * @ioc: per adapter object * @mpi_reply: reply mf payload returned from firmware * @config_page: contents of the config page @@ -2383,7 +2383,7 @@ mpt3sas_config_get_driver_trigger_pg4(struct MPT3SAS_ADAPTER *ioc, } /** - * mpt3sas_config_set_driver_trigger_pg4 - write driver trigger page 4 + * _config_set_driver_trigger_pg4 - write driver trigger page 4 * @ioc: per adapter object * @mpi_reply: reply mf payload returned from firmware * @config_page: contents of the config page diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c index 44f9a05db94e..b66140e4c370 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c +++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c @@ -454,7 +454,7 @@ out: } /** - * mpt3sas_ctl_reset_handler - reset callback handler (for ctl) + * mpt3sas_ctl_pre_reset_handler - reset callback handler (for ctl) * @ioc: per adapter object * * The handler for doing any required cleanup or initialization. @@ -486,7 +486,7 @@ void mpt3sas_ctl_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc) } /** - * mpt3sas_ctl_reset_handler - clears outstanding ioctl cmd. + * mpt3sas_ctl_clear_outstanding_ioctls - clears outstanding ioctl cmd. * @ioc: per adapter object * * The handler for doing any required cleanup or initialization. @@ -503,7 +503,7 @@ void mpt3sas_ctl_clear_outstanding_ioctls(struct MPT3SAS_ADAPTER *ioc) } /** - * mpt3sas_ctl_reset_handler - reset callback handler (for ctl) + * mpt3sas_ctl_reset_done_handler - reset callback handler (for ctl) * @ioc: per adapter object * * The handler for doing any required cleanup or initialization. @@ -2507,7 +2507,7 @@ _ctl_addnl_diag_query(struct MPT3SAS_ADAPTER *ioc, void __user *arg) __func__, karg.unique_id); return -EPERM; } - memset(&karg.buffer_rel_condition, 0, sizeof(struct htb_rel_query)); + memset(&karg.rel_query, 0, sizeof(karg.rel_query)); if ((ioc->diag_buffer_status[buffer_type] & MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { ioc_info(ioc, "%s: buffer_type(0x%02x) is not registered\n", @@ -2520,8 +2520,7 @@ _ctl_addnl_diag_query(struct MPT3SAS_ADAPTER *ioc, void __user *arg) __func__, buffer_type); return -EPERM; } - memcpy(&karg.buffer_rel_condition, &ioc->htb_rel, - sizeof(struct htb_rel_query)); + memcpy(&karg.rel_query, &ioc->htb_rel, sizeof(karg.rel_query)); out: if (copy_to_user(arg, &karg, sizeof(struct mpt3_addnl_diag_query))) { ioc_err(ioc, "%s: unable to write mpt3_addnl_diag_query data @ %p\n", @@ -2759,7 +2758,7 @@ _ctl_mpt2_ioctl(struct file *file, unsigned int cmd, unsigned long arg) } #ifdef CONFIG_COMPAT /** - *_ ctl_ioctl_compat - main ioctl entry point (compat) + * _ctl_ioctl_compat - main ioctl entry point (compat) * @file: ? * @cmd: ? * @arg: ? @@ -2777,7 +2776,7 @@ _ctl_ioctl_compat(struct file *file, unsigned cmd, unsigned long arg) } /** - *_ ctl_mpt2_ioctl_compat - main ioctl entry point (compat) + * _ctl_mpt2_ioctl_compat - main ioctl entry point (compat) * @file: ? * @cmd: ? * @arg: ? @@ -3045,7 +3044,7 @@ fw_queue_depth_show(struct device *cdev, struct device_attribute *attr, static DEVICE_ATTR_RO(fw_queue_depth); /** - * sas_address_show - sas address + * host_sas_address_show - sas address * @cdev: pointer to embedded class device * @attr: ? * @buf: the buffer returned @@ -3203,7 +3202,7 @@ BRM_status_show(struct device *cdev, struct device_attribute *attr, { struct Scsi_Host *shost = class_to_shost(cdev); struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); - Mpi2IOUnitPage3_t *io_unit_pg3 = NULL; + Mpi2IOUnitPage3_t io_unit_pg3; Mpi2ConfigReply_t mpi_reply; u16 backup_rail_monitor_status = 0; u16 ioc_status; @@ -3220,17 +3219,10 @@ BRM_status_show(struct device *cdev, struct device_attribute *attr, if (ioc->pci_error_recovery || ioc->remove_host) goto out; - /* allocate upto GPIOVal 36 entries */ - sz = offsetof(Mpi2IOUnitPage3_t, GPIOVal) + (sizeof(u16) * 36); - io_unit_pg3 = kzalloc(sz, GFP_KERNEL); - if (!io_unit_pg3) { - rc = -ENOMEM; - ioc_err(ioc, "%s: failed allocating memory for iounit_pg3: (%d) bytes\n", - __func__, sz); - goto out; - } + sz = sizeof(io_unit_pg3); + memset(&io_unit_pg3, 0, sz); - if (mpt3sas_config_get_iounit_pg3(ioc, &mpi_reply, io_unit_pg3, sz) != + if (mpt3sas_config_get_iounit_pg3(ioc, &mpi_reply, &io_unit_pg3, sz) != 0) { ioc_err(ioc, "%s: failed reading iounit_pg3\n", __func__); @@ -3246,19 +3238,18 @@ BRM_status_show(struct device *cdev, struct device_attribute *attr, goto out; } - if (io_unit_pg3->GPIOCount < 25) { - ioc_err(ioc, "%s: iounit_pg3->GPIOCount less than 25 entries, detected (%d) entries\n", - __func__, io_unit_pg3->GPIOCount); + if (io_unit_pg3.GPIOCount < 25) { + ioc_err(ioc, "%s: iounit_pg3.GPIOCount less than 25 entries, detected (%d) entries\n", + __func__, io_unit_pg3.GPIOCount); rc = -EINVAL; goto out; } /* BRM status is in bit zero of GPIOVal[24] */ - backup_rail_monitor_status = le16_to_cpu(io_unit_pg3->GPIOVal[24]); + backup_rail_monitor_status = le16_to_cpu(io_unit_pg3.GPIOVal[24]); rc = snprintf(buf, PAGE_SIZE, "%d\n", (backup_rail_monitor_status & 1)); out: - kfree(io_unit_pg3); mutex_unlock(&ioc->pci_access_mutex); return rc; } @@ -3669,7 +3660,7 @@ static DEVICE_ATTR_RW(diag_trigger_scsi); /** - * diag_trigger_scsi_show - show the diag_trigger_mpi attribute + * diag_trigger_mpi_show - show the diag_trigger_mpi attribute * @cdev: pointer to embedded class device * @attr: ? * @buf: the buffer returned @@ -3928,7 +3919,7 @@ sas_device_handle_show(struct device *dev, struct device_attribute *attr, static DEVICE_ATTR_RO(sas_device_handle); /** - * sas_ncq_io_prio_show - send prioritized io commands to device + * sas_ncq_prio_enable_show - send prioritized io commands to device * @dev: pointer to embedded device * @attr: ? * @buf: the buffer returned diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.h b/drivers/scsi/mpt3sas/mpt3sas_ctl.h index d2ccdafb8df2..8f6ffb40261c 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_ctl.h +++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.h @@ -50,6 +50,8 @@ #include <linux/miscdevice.h> #endif +#include "mpt3sas_base.h" + #ifndef MPT2SAS_MINOR #define MPT2SAS_MINOR (MPT_MINOR + 1) #endif @@ -436,19 +438,13 @@ struct mpt3_diag_read_buffer { * struct mpt3_addnl_diag_query - diagnostic buffer release reason * @hdr - generic header * @unique_id - unique id associated with this buffer. - * @buffer_rel_condition - Release condition ioctl/sysfs/reset - * @reserved1 - * @trigger_type - Master/Event/scsi/MPI - * @trigger_info_dwords - Data Correspondig to trigger type + * @rel_query - release query. * @reserved2 */ struct mpt3_addnl_diag_query { struct mpt3_ioctl_header hdr; uint32_t unique_id; - uint16_t buffer_rel_condition; - uint16_t reserved1; - uint32_t trigger_type; - uint32_t trigger_info_dwords[2]; + struct htb_rel_query rel_query; uint32_t reserved2[2]; }; diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index 6aa6de729187..d00aca3c77ce 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c @@ -749,9 +749,10 @@ __mpt3sas_get_sdev_by_rphy(struct MPT3SAS_ADAPTER *ioc, } /** - * mpt3sas_get_sdev_by_addr - get _sas_device object corresponding to provided + * __mpt3sas_get_sdev_by_addr - get _sas_device object corresponding to provided * sas address from sas_device_list list * @ioc: per adapter object + * @sas_address: device sas address * @port: port number * * Search for _sas_device object corresponding to provided sas address, @@ -3423,7 +3424,7 @@ scsih_dev_reset(struct scsi_cmnd *scmd) MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 0, tr_timeout, tr_method); /* Check for busy commands after reset */ - if (r == SUCCESS && atomic_read(&scmd->device->device_busy)) + if (r == SUCCESS && scsi_device_busy(scmd->device)) r = FAILED; out: sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(0x%p)\n", @@ -4518,7 +4519,7 @@ _scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER *ioc, } /** - * _scsih_check_for_pending_internal_cmds - check for pending internal messages + * mpt3sas_check_for_pending_internal_cmds - check for pending internal messages * @ioc: per adapter object * @smid: system request message index * @@ -6174,10 +6175,10 @@ enum hba_port_matched_codes { * _scsih_look_and_get_matched_port_entry - Get matched hba port entry * from HBA port table * @ioc: per adapter object - * @port_entry - hba port entry from temporary port table which needs to be + * @port_entry: hba port entry from temporary port table which needs to be * searched for matched entry in the HBA port table - * @matched_port_entry - save matched hba port entry here - * @count - count of matched entries + * @matched_port_entry: save matched hba port entry here + * @count: count of matched entries * * return type of matched entry found. */ @@ -6483,6 +6484,9 @@ _scsih_alloc_vphy(struct MPT3SAS_ADAPTER *ioc, u8 port_id, u8 phy_num) if (!vphy) return NULL; + if (!port->vphys_mask) + INIT_LIST_HEAD(&port->vphys_list); + /* * Enable bit corresponding to HBA phy number on its * parent hba_port object's vphys_mask field. @@ -6490,7 +6494,6 @@ _scsih_alloc_vphy(struct MPT3SAS_ADAPTER *ioc, u8 port_id, u8 phy_num) port->vphys_mask |= (1 << phy_num); vphy->phy_mask |= (1 << phy_num); - INIT_LIST_HEAD(&port->vphys_list); list_add_tail(&vphy->list, &port->vphys_list); ioc_info(ioc, @@ -6952,6 +6955,7 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle) * mpt3sas_expander_remove - removing expander object * @ioc: per adapter object * @sas_address: expander sas_address + * @port: hba port entry */ void mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address, @@ -10219,8 +10223,8 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc) Mpi2ExpanderPage0_t expander_pg0; Mpi2SasDevicePage0_t sas_device_pg0; Mpi26PCIeDevicePage0_t pcie_device_pg0; - Mpi2RaidVolPage1_t volume_pg1; - Mpi2RaidVolPage0_t volume_pg0; + Mpi2RaidVolPage1_t *volume_pg1; + Mpi2RaidVolPage0_t *volume_pg0; Mpi2RaidPhysDiskPage0_t pd_pg0; Mpi2EventIrConfigElement_t element; Mpi2ConfigReply_t mpi_reply; @@ -10235,6 +10239,16 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc) u8 retry_count; unsigned long flags; + volume_pg0 = kzalloc(sizeof(*volume_pg0), GFP_KERNEL); + if (!volume_pg0) + return; + + volume_pg1 = kzalloc(sizeof(*volume_pg1), GFP_KERNEL); + if (!volume_pg1) { + kfree(volume_pg0); + return; + } + ioc_info(ioc, "scan devices: start\n"); _scsih_sas_host_refresh(ioc); @@ -10344,7 +10358,7 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc) /* volumes */ handle = 0xFFFF; while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply, - &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) { + volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) { ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { @@ -10352,15 +10366,15 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc) ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo)); break; } - handle = le16_to_cpu(volume_pg1.DevHandle); + handle = le16_to_cpu(volume_pg1->DevHandle); spin_lock_irqsave(&ioc->raid_device_lock, flags); raid_device = _scsih_raid_device_find_by_wwid(ioc, - le64_to_cpu(volume_pg1.WWID)); + le64_to_cpu(volume_pg1->WWID)); spin_unlock_irqrestore(&ioc->raid_device_lock, flags); if (raid_device) continue; if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, - &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle, + volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle, sizeof(Mpi2RaidVolPage0_t))) continue; ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & @@ -10370,17 +10384,17 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc) ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo)); break; } - if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL || - volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE || - volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED) { + if (volume_pg0->VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL || + volume_pg0->VolumeState == MPI2_RAID_VOL_STATE_ONLINE || + volume_pg0->VolumeState == MPI2_RAID_VOL_STATE_DEGRADED) { memset(&element, 0, sizeof(Mpi2EventIrConfigElement_t)); element.ReasonCode = MPI2_EVENT_IR_CHANGE_RC_ADDED; - element.VolDevHandle = volume_pg1.DevHandle; + element.VolDevHandle = volume_pg1->DevHandle; ioc_info(ioc, "\tBEFORE adding volume: handle (0x%04x)\n", - volume_pg1.DevHandle); + volume_pg1->DevHandle); _scsih_sas_volume_add(ioc, &element); ioc_info(ioc, "\tAFTER adding volume: handle (0x%04x)\n", - volume_pg1.DevHandle); + volume_pg1->DevHandle); } } @@ -10468,12 +10482,16 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc) ioc_info(ioc, "\tAFTER adding pcie end device: handle (0x%04x), wwid(0x%016llx)\n", handle, (u64)le64_to_cpu(pcie_device_pg0.WWID)); } + + kfree(volume_pg0); + kfree(volume_pg1); + ioc_info(ioc, "\tpcie devices: pcie end devices complete\n"); ioc_info(ioc, "scan devices: complete\n"); } /** - * mpt3sas_scsih_reset_handler - reset callback handler (for scsih) + * mpt3sas_scsih_pre_reset_handler - reset callback handler (for scsih) * @ioc: per adapter object * * The handler for doing any required cleanup or initialization. @@ -10514,7 +10532,7 @@ mpt3sas_scsih_clear_outstanding_scsi_tm_commands(struct MPT3SAS_ADAPTER *ioc) } /** - * mpt3sas_scsih_reset_handler - reset callback handler (for scsih) + * mpt3sas_scsih_reset_done_handler - reset callback handler (for scsih) * @ioc: per adapter object * * The handler for doing any required cleanup or initialization. @@ -10802,7 +10820,8 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, pr_notice("cannot be powered and devices connected\n"); pr_notice("to this active cable will not be seen\n"); pr_notice("This active cable requires %d mW of power\n", - ActiveCableEventData->ActiveCablePowerRequirement); + le32_to_cpu( + ActiveCableEventData->ActiveCablePowerRequirement)); break; case MPI26_EVENT_ACTIVE_CABLE_DEGRADED: @@ -12281,7 +12300,7 @@ scsih_pci_mmio_enabled(struct pci_dev *pdev) } /** - * scsih__ncq_prio_supp - Check for NCQ command priority support + * scsih_ncq_prio_supp - Check for NCQ command priority support * @sdev: scsi device struct * * This is called when a user indicates they would like to enable diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c index 6f4708224755..0681daee6c14 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_transport.c +++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c @@ -62,7 +62,7 @@ /** * _transport_get_port_id_by_sas_phy - get zone's port id that Phy belong to - * @phy - sas_phy object + * @phy: sas_phy object * * Return Port number */ @@ -339,10 +339,11 @@ struct rep_manu_reply { }; /** - * transport_expander_report_manufacture - obtain SMP report_manufacture + * _transport_expander_report_manufacture - obtain SMP report_manufacture * @ioc: per adapter object * @sas_address: expander sas address * @edev: the sas_expander_device object + * @port_id: Port ID number * * Fills in the sas_expander_device object when SMP port is created. * @@ -671,7 +672,7 @@ _transport_sanity_check(struct MPT3SAS_ADAPTER *ioc, struct _sas_node *sas_node, * @ioc: per adapter object * @handle: handle of attached device * @sas_address: sas address of parent expander or sas host - * @port: hba port entry + * @hba_port: hba port entry * Context: This function will acquire ioc->sas_node_lock. * * Adding new port object to the sas_node->sas_port_list. |