diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-05-04 21:19:44 +0200 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-05-04 21:19:44 +0200 |
commit | 8d5e72dfdf0fa29a21143fd72746c6f43295ce9f (patch) | |
tree | cd51765801a1ad27a6db13809e00085b2677d351 /drivers | |
parent | Merge tag 'gpio-v4.12-1' of git://git.kernel.org/pub/scm/linux/kernel/git/lin... (diff) | |
parent | scsi: qla4xxx: fix spelling mistake: "Tempalate" -> "Template" (diff) | |
download | linux-8d5e72dfdf0fa29a21143fd72746c6f43295ce9f.tar.xz linux-8d5e72dfdf0fa29a21143fd72746c6f43295ce9f.zip |
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull SCSI updates from James Bottomley:
"This update includes the usual round of major driver updates
(hisi_sas, ufs, fnic, cxlflash, be2iscsi, ipr, stex). There's also the
usual amount of cosmetic and spelling stuff"
* tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (155 commits)
scsi: qla4xxx: fix spelling mistake: "Tempalate" -> "Template"
scsi: stex: make S6flag static
scsi: mac_esp: fix to pass correct device identity to free_irq()
scsi: aacraid: pci_alloc_consistent() failures on ARM64
scsi: ufs: make ufshcd_get_lists_status() register operation obvious
scsi: ufs: use MASK_EE_STATUS
scsi: mac_esp: Replace bogus memory barrier with spinlock
scsi: fcoe: make fcoe_e_d_tov and fcoe_r_a_tov static
scsi: sd_zbc: Do not write lock zones for reset
scsi: sd_zbc: Remove superfluous assignments
scsi: sd: sd_zbc: Rename sd_zbc_setup_write_cmnd
scsi: Improve scsi_get_sense_info_fld
scsi: sd: Cleanup sd_done sense data handling
scsi: sd: Improve sd_completed_bytes
scsi: sd: Fix function descriptions
scsi: mpt3sas: remove redundant wmb
scsi: mpt: Move scsi_remove_host() out of mptscsih_remove_host()
scsi: sg: reset 'res_in_use' after unlinking reserved array
scsi: mvumi: remove code handling zero scsi_sg_count(scmd) case
scsi: fusion: fix spelling mistake: "Persistancy" -> "Persistency"
...
Diffstat (limited to 'drivers')
107 files changed, 4272 insertions, 2717 deletions
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c index 1e73064b0fb2..62cff5afc6bd 100644 --- a/drivers/message/fusion/mptbase.c +++ b/drivers/message/fusion/mptbase.c @@ -7396,7 +7396,7 @@ mpt_display_event_info(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply) break; case MPI_EVENT_SAS_DEV_STAT_RC_NO_PERSIST_ADDED: snprintf(evStr, EVENT_DESCR_STR_SZ, - "SAS Device Status Change: No Persistancy: " + "SAS Device Status Change: No Persistency: " "id=%d channel=%d", id, channel); break; case MPI_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED: diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c index 98eafae78576..d065062240bc 100644 --- a/drivers/message/fusion/mptfc.c +++ b/drivers/message/fusion/mptfc.c @@ -1329,7 +1329,7 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id) WQ_MEM_RECLAIM); if (!ioc->fc_rescan_work_q) { error = -ENOMEM; - goto out_mptfc_probe; + goto out_mptfc_host; } /* @@ -1351,6 +1351,9 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id) return 0; +out_mptfc_host: + scsi_remove_host(sh); + out_mptfc_probe: mptscsih_remove(pdev); @@ -1530,6 +1533,8 @@ static void mptfc_remove(struct pci_dev *pdev) } } + scsi_remove_host(ioc->sh); + mptscsih_remove(pdev); } diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c index 08a807d6a44f..6ba07c7feb92 100644 --- a/drivers/message/fusion/mptscsih.c +++ b/drivers/message/fusion/mptscsih.c @@ -1176,8 +1176,6 @@ mptscsih_remove(struct pci_dev *pdev) MPT_SCSI_HOST *hd; int sz1; - scsi_remove_host(host); - if((hd = shost_priv(host)) == NULL) return; diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c index 031e088edb5e..9a336a161d9f 100644 --- a/drivers/message/fusion/mptspi.c +++ b/drivers/message/fusion/mptspi.c @@ -1548,11 +1548,19 @@ out_mptspi_probe: return error; } +static void mptspi_remove(struct pci_dev *pdev) +{ + MPT_ADAPTER *ioc = pci_get_drvdata(pdev); + + scsi_remove_host(ioc->sh); + mptscsih_remove(pdev); +} + static struct pci_driver mptspi_driver = { .name = "mptspi", .id_table = mptspi_pci_table, .probe = mptspi_probe, - .remove = mptscsih_remove, + .remove = mptspi_remove, .shutdown = mptscsih_shutdown, #ifdef CONFIG_PM .suspend = mptscsih_suspend, diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c index 65fed7146e9b..d3fe3ea902d4 100644 --- a/drivers/misc/enclosure.c +++ b/drivers/misc/enclosure.c @@ -148,7 +148,7 @@ enclosure_register(struct device *dev, const char *name, int components, for (i = 0; i < components; i++) { edev->component[i].number = -1; edev->component[i].slot = -1; - edev->component[i].power_status = 1; + edev->component[i].power_status = -1; } mutex_lock(&container_list_lock); @@ -594,6 +594,11 @@ static ssize_t get_component_power_status(struct device *cdev, if (edev->cb->get_power_status) edev->cb->get_power_status(edev, ecomp); + + /* If still uninitialized, the callback failed or does not exist. */ + if (ecomp->power_status == -1) + return (edev->cb->get_power_status) ? -EIO : -ENOTTY; + return snprintf(buf, 40, "%s\n", ecomp->power_status ? "on" : "off"); } diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c index c7be7bb37209..35380a58d3f0 100644 --- a/drivers/scsi/BusLogic.c +++ b/drivers/scsi/BusLogic.c @@ -3009,7 +3009,7 @@ static int blogic_hostreset(struct scsi_cmnd *SCpnt) spin_lock_irq(SCpnt->device->host->host_lock); - blogic_inc_count(&stats->adatper_reset_req); + blogic_inc_count(&stats->adapter_reset_req); rc = blogic_resetadapter(adapter, false); spin_unlock_irq(SCpnt->device->host->host_lock); @@ -3560,8 +3560,16 @@ Target Requested Completed Requested Completed Requested Completed\n\ struct blogic_tgt_flags *tgt_flags = &adapter->tgt_flags[tgt]; if (!tgt_flags->tgt_exists) continue; - seq_printf(m, "\ - %2d %5d %5d %5d %5d %5d %5d %5d %5d %5d\n", tgt, tgt_stats[tgt].aborts_request, tgt_stats[tgt].aborts_tried, tgt_stats[tgt].aborts_done, tgt_stats[tgt].bdr_request, tgt_stats[tgt].bdr_tried, tgt_stats[tgt].bdr_done, tgt_stats[tgt].adatper_reset_req, tgt_stats[tgt].adapter_reset_attempt, tgt_stats[tgt].adapter_reset_done); + seq_printf(m, " %2d %5d %5d %5d %5d %5d %5d %5d %5d %5d\n", + tgt, tgt_stats[tgt].aborts_request, + tgt_stats[tgt].aborts_tried, + tgt_stats[tgt].aborts_done, + tgt_stats[tgt].bdr_request, + tgt_stats[tgt].bdr_tried, + tgt_stats[tgt].bdr_done, + tgt_stats[tgt].adapter_reset_req, + tgt_stats[tgt].adapter_reset_attempt, + tgt_stats[tgt].adapter_reset_done); } seq_printf(m, "\nExternal Host Adapter Resets: %d\n", adapter->ext_resets); seq_printf(m, "Host Adapter Internal Errors: %d\n", adapter->adapter_intern_errors); diff --git a/drivers/scsi/BusLogic.h b/drivers/scsi/BusLogic.h index b53ec2f1e8cd..8d47e2c88d24 100644 --- a/drivers/scsi/BusLogic.h +++ b/drivers/scsi/BusLogic.h @@ -935,7 +935,7 @@ struct blogic_tgt_stats { unsigned short bdr_request; unsigned short bdr_tried; unsigned short bdr_done; - unsigned short adatper_reset_req; + unsigned short adapter_reset_req; unsigned short adapter_reset_attempt; unsigned short adapter_reset_done; }; diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c index e3e93def722b..43d88389e899 100644 --- a/drivers/scsi/aacraid/aachba.c +++ b/drivers/scsi/aacraid/aachba.c @@ -1678,8 +1678,8 @@ int aac_issue_bmic_identify(struct aac_dev *dev, u32 bus, u32 target) sizeof(struct sgentry) + sizeof(struct sgentry64); datasize = sizeof(struct aac_ciss_identify_pd); - identify_resp = pci_alloc_consistent(dev->pdev, datasize, &addr); - + identify_resp = dma_alloc_coherent(&dev->pdev->dev, datasize, &addr, + GFP_KERNEL); if (!identify_resp) goto fib_free_ptr; @@ -1720,7 +1720,7 @@ int aac_issue_bmic_identify(struct aac_dev *dev, u32 bus, u32 target) dev->hba_map[bus][target].qd_limit = identify_resp->current_queue_depth_limit; - pci_free_consistent(dev->pdev, datasize, (void *)identify_resp, addr); + dma_free_coherent(&dev->pdev->dev, datasize, identify_resp, addr); aac_fib_complete(fibptr); @@ -1814,9 +1814,8 @@ int aac_report_phys_luns(struct aac_dev *dev, struct fib *fibptr, int rescan) datasize = sizeof(struct aac_ciss_phys_luns_resp) + (AAC_MAX_TARGETS - 1) * sizeof(struct _ciss_lun); - phys_luns = (struct aac_ciss_phys_luns_resp *) pci_alloc_consistent( - dev->pdev, datasize, &addr); - + phys_luns = dma_alloc_coherent(&dev->pdev->dev, datasize, &addr, + GFP_KERNEL); if (phys_luns == NULL) { rcode = -ENOMEM; goto err_out; @@ -1861,7 +1860,7 @@ int aac_report_phys_luns(struct aac_dev *dev, struct fib *fibptr, int rescan) aac_update_hba_map(dev, phys_luns, rescan); } - pci_free_consistent(dev->pdev, datasize, (void *) phys_luns, addr); + dma_free_coherent(&dev->pdev->dev, datasize, phys_luns, addr); err_out: return rcode; } diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c index f6afd50579c0..d2f8d5954840 100644 --- a/drivers/scsi/aacraid/commctrl.c +++ b/drivers/scsi/aacraid/commctrl.c @@ -100,7 +100,8 @@ static int ioctl_send_fib(struct aac_dev * dev, void __user *arg) goto cleanup; } - kfib = pci_alloc_consistent(dev->pdev, size, &daddr); + kfib = dma_alloc_coherent(&dev->pdev->dev, size, &daddr, + GFP_KERNEL); if (!kfib) { retval = -ENOMEM; goto cleanup; @@ -160,7 +161,8 @@ static int ioctl_send_fib(struct aac_dev * dev, void __user *arg) retval = -EFAULT; cleanup: if (hw_fib) { - pci_free_consistent(dev->pdev, size, kfib, fibptr->hw_fib_pa); + dma_free_coherent(&dev->pdev->dev, size, kfib, + fibptr->hw_fib_pa); fibptr->hw_fib_pa = hw_fib_pa; fibptr->hw_fib_va = hw_fib; } diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c index 35607005f7e1..1151505853cf 100644 --- a/drivers/scsi/aacraid/comminit.c +++ b/drivers/scsi/aacraid/comminit.c @@ -99,8 +99,7 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co size = fibsize + aac_init_size + commsize + commalign + printfbufsiz + host_rrq_size; - base = pci_alloc_consistent(dev->pdev, size, &phys); - + base = dma_alloc_coherent(&dev->pdev->dev, size, &phys, GFP_KERNEL); if (base == NULL) { printk(KERN_ERR "aacraid: unable to create mapping.\n"); return 0; diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index 1f4918355fdb..7a1b8a2ce658 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c @@ -73,13 +73,13 @@ static int fib_map_alloc(struct aac_dev *dev) } dprintk((KERN_INFO - "allocate hardware fibs pci_alloc_consistent(%p, %d * (%d + %d), %p)\n", - dev->pdev, dev->max_cmd_size, dev->scsi_host_ptr->can_queue, + "allocate hardware fibs dma_alloc_coherent(%p, %d * (%d + %d), %p)\n", + &dev->pdev->dev, dev->max_cmd_size, dev->scsi_host_ptr->can_queue, AAC_NUM_MGT_FIB, &dev->hw_fib_pa)); - dev->hw_fib_va = pci_alloc_consistent(dev->pdev, + dev->hw_fib_va = dma_alloc_coherent(&dev->pdev->dev, (dev->max_cmd_size + sizeof(struct aac_fib_xporthdr)) * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB) + (ALIGN32 - 1), - &dev->hw_fib_pa); + &dev->hw_fib_pa, GFP_KERNEL); if (dev->hw_fib_va == NULL) return -ENOMEM; return 0; @@ -106,8 +106,8 @@ void aac_fib_map_free(struct aac_dev *dev) fib_size = dev->max_fib_size + sizeof(struct aac_fib_xporthdr); alloc_size = fib_size * num_fibs + ALIGN32 - 1; - pci_free_consistent(dev->pdev, alloc_size, dev->hw_fib_va, - dev->hw_fib_pa); + dma_free_coherent(&dev->pdev->dev, alloc_size, dev->hw_fib_va, + dev->hw_fib_pa); dev->hw_fib_va = NULL; dev->hw_fib_pa = 0; @@ -1571,7 +1571,8 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type) * case. */ aac_fib_map_free(aac); - pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr, aac->comm_phys); + dma_free_coherent(&aac->pdev->dev, aac->comm_size, aac->comm_addr, + aac->comm_phys); aac->comm_addr = NULL; aac->comm_phys = 0; kfree(aac->queues); @@ -2319,7 +2320,8 @@ static int aac_send_wellness_command(struct aac_dev *dev, char *wellness_str, if (!fibptr) goto out; - dma_buf = pci_alloc_consistent(dev->pdev, datasize, &addr); + dma_buf = dma_alloc_coherent(&dev->pdev->dev, datasize, &addr, + GFP_KERNEL); if (!dma_buf) goto fib_free_out; @@ -2354,7 +2356,7 @@ static int aac_send_wellness_command(struct aac_dev *dev, char *wellness_str, ret = aac_fib_send(ScsiPortCommand64, fibptr, sizeof(struct aac_srb), FsaNormal, 1, 1, NULL, NULL); - pci_free_consistent(dev->pdev, datasize, (void *)dma_buf, addr); + dma_free_coherent(&dev->pdev->dev, datasize, dma_buf, addr); /* * Do not set XferState to zero unless diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index 520ada8266af..372a07533026 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c @@ -1592,8 +1592,8 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) out_unmap: aac_fib_map_free(aac); if (aac->comm_addr) - pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr, - aac->comm_phys); + dma_free_coherent(&aac->pdev->dev, aac->comm_size, + aac->comm_addr, aac->comm_phys); kfree(aac->queues); aac_adapter_ioremap(aac, 0); kfree(aac->fibs); @@ -1729,8 +1729,8 @@ static void aac_remove_one(struct pci_dev *pdev) __aac_shutdown(aac); aac_fib_map_free(aac); - pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr, - aac->comm_phys); + dma_free_coherent(&aac->pdev->dev, aac->comm_size, aac->comm_addr, + aac->comm_phys); kfree(aac->queues); aac_adapter_ioremap(aac, 0); diff --git a/drivers/scsi/aacraid/rx.c b/drivers/scsi/aacraid/rx.c index 5d19c31e3bba..93ef7c37e568 100644 --- a/drivers/scsi/aacraid/rx.c +++ b/drivers/scsi/aacraid/rx.c @@ -355,14 +355,16 @@ static int aac_rx_check_health(struct aac_dev *dev) if (likely((status & 0xFF000000L) == 0xBC000000L)) return (status >> 16) & 0xFF; - buffer = pci_alloc_consistent(dev->pdev, 512, &baddr); + buffer = dma_alloc_coherent(&dev->pdev->dev, 512, &baddr, + GFP_KERNEL); ret = -2; if (unlikely(buffer == NULL)) return ret; - post = pci_alloc_consistent(dev->pdev, - sizeof(struct POSTSTATUS), &paddr); + post = dma_alloc_coherent(&dev->pdev->dev, + sizeof(struct POSTSTATUS), &paddr, + GFP_KERNEL); if (unlikely(post == NULL)) { - pci_free_consistent(dev->pdev, 512, buffer, baddr); + dma_free_coherent(&dev->pdev->dev, 512, buffer, baddr); return ret; } memset(buffer, 0, 512); @@ -371,13 +373,13 @@ static int aac_rx_check_health(struct aac_dev *dev) rx_writel(dev, MUnit.IMRx[0], paddr); rx_sync_cmd(dev, COMMAND_POST_RESULTS, baddr, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL); - pci_free_consistent(dev->pdev, sizeof(struct POSTSTATUS), - post, paddr); + dma_free_coherent(&dev->pdev->dev, sizeof(struct POSTSTATUS), + post, paddr); if (likely((buffer[0] == '0') && ((buffer[1] == 'x') || (buffer[1] == 'X')))) { ret = (hex_to_bin(buffer[2]) << 4) + hex_to_bin(buffer[3]); } - pci_free_consistent(dev->pdev, 512, buffer, baddr); + dma_free_coherent(&dev->pdev->dev, 512, buffer, baddr); return ret; } /* diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c index 81dd0927246b..24e57e770432 100644 --- a/drivers/scsi/advansys.c +++ b/drivers/scsi/advansys.c @@ -6291,18 +6291,17 @@ static uchar AscGetSynPeriodIndex(ASC_DVC_VAR *asc_dvc, uchar syn_time) static uchar AscMsgOutSDTR(ASC_DVC_VAR *asc_dvc, uchar sdtr_period, uchar sdtr_offset) { - EXT_MSG sdtr_buf; - uchar sdtr_period_index; - PortAddr iop_base; - - iop_base = asc_dvc->iop_base; - sdtr_buf.msg_type = EXTENDED_MESSAGE; - sdtr_buf.msg_len = MS_SDTR_LEN; - sdtr_buf.msg_req = EXTENDED_SDTR; - sdtr_buf.xfer_period = sdtr_period; + PortAddr iop_base = asc_dvc->iop_base; + uchar sdtr_period_index = AscGetSynPeriodIndex(asc_dvc, sdtr_period); + EXT_MSG sdtr_buf = { + .msg_type = EXTENDED_MESSAGE, + .msg_len = MS_SDTR_LEN, + .msg_req = EXTENDED_SDTR, + .xfer_period = sdtr_period, + .req_ack_offset = sdtr_offset, + }; sdtr_offset &= ASC_SYN_MAX_OFFSET; - sdtr_buf.req_ack_offset = sdtr_offset; - sdtr_period_index = AscGetSynPeriodIndex(asc_dvc, sdtr_period); + if (sdtr_period_index <= asc_dvc->max_sdtr_index) { AscMemWordCopyPtrToLram(iop_base, ASCV_MSGOUT_BEG, (uchar *)&sdtr_buf, diff --git a/drivers/scsi/aic7xxx/aic7xxx_pci.c b/drivers/scsi/aic7xxx/aic7xxx_pci.c index 22d5a949ec83..673e826d7adb 100644 --- a/drivers/scsi/aic7xxx/aic7xxx_pci.c +++ b/drivers/scsi/aic7xxx/aic7xxx_pci.c @@ -601,8 +601,8 @@ static const u_int ahc_num_pci_devs = ARRAY_SIZE(ahc_pci_ident_table); #define STA 0x08 #define DPR 0x01 -static int ahc_9005_subdevinfo_valid(uint16_t vendor, uint16_t device, - uint16_t subvendor, uint16_t subdevice); +static int ahc_9005_subdevinfo_valid(uint16_t device, uint16_t vendor, + uint16_t subdevice, uint16_t subvendor); static int ahc_ext_scbram_present(struct ahc_softc *ahc); static void ahc_scbram_config(struct ahc_softc *ahc, int enable, int pcheck, int fast, int large); diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c index 662b2321d1b0..a14ba7a6b81e 100644 --- a/drivers/scsi/aic94xx/aic94xx_init.c +++ b/drivers/scsi/aic94xx/aic94xx_init.c @@ -703,7 +703,6 @@ static int asd_unregister_sas_ha(struct asd_ha_struct *asd_ha) { int err; - scsi_remove_host(asd_ha->sas_ha.core.shost); err = sas_unregister_ha(&asd_ha->sas_ha); sas_remove_host(asd_ha->sas_ha.core.shost); diff --git a/drivers/scsi/be2iscsi/be.h b/drivers/scsi/be2iscsi/be.h index ca9440fb2325..55e3f8b40eb3 100644 --- a/drivers/scsi/be2iscsi/be.h +++ b/drivers/scsi/be2iscsi/be.h @@ -1,18 +1,15 @@ -/** - * Copyright (C) 2005 - 2016 Broadcom - * All rights reserved. +/* + * Copyright 2017 Broadcom. All Rights Reserved. + * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 - * as published by the Free Software Foundation. The full GNU General + * as published by the Free Software Foundation. The full GNU General * Public License is included in this distribution in the file called COPYING. * * Contact Information: * linux-drivers@broadcom.com * - * Emulex - * 3333 Susan Street - * Costa Mesa, CA 92626 */ #ifndef BEISCSI_H @@ -154,7 +151,6 @@ struct be_ctrl_info { #define PAGE_SHIFT_4K 12 #define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K) #define mcc_timeout 120000 /* 12s timeout */ -#define BEISCSI_LOGOUT_SYNC_DELAY 250 /* Returns number of pages spanned by the data starting at the given addr */ #define PAGES_4K_SPANNED(_address, size) \ diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c index 5d59e2630ce6..a79a5e72c777 100644 --- a/drivers/scsi/be2iscsi/be_cmds.c +++ b/drivers/scsi/be2iscsi/be_cmds.c @@ -1,18 +1,15 @@ -/** - * Copyright (C) 2005 - 2016 Broadcom - * All rights reserved. +/* + * Copyright 2017 Broadcom. All Rights Reserved. + * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 - * as published by the Free Software Foundation. The full GNU General + * as published by the Free Software Foundation. The full GNU General * Public License is included in this distribution in the file called COPYING. * * Contact Information: * linux-drivers@broadcom.com * - * Emulex - * 3333 Susan Street - * Costa Mesa, CA 92626 */ #include <scsi/iscsi_proto.h> @@ -246,6 +243,12 @@ int beiscsi_mccq_compl_wait(struct beiscsi_hba *phba, { int rc = 0; + if (!tag || tag > MAX_MCC_CMD) { + __beiscsi_log(phba, KERN_ERR, + "BC_%d : invalid tag %u\n", tag); + return -EINVAL; + } + if (beiscsi_hba_in_error(phba)) { clear_bit(MCC_TAG_STATE_RUNNING, &phba->ctrl.ptag_state[tag].tag_state); diff --git a/drivers/scsi/be2iscsi/be_cmds.h b/drivers/scsi/be2iscsi/be_cmds.h index 1d40e83b0790..d9b6773facdb 100644 --- a/drivers/scsi/be2iscsi/be_cmds.h +++ b/drivers/scsi/be2iscsi/be_cmds.h @@ -1,18 +1,15 @@ -/** - * Copyright (C) 2005 - 2016 Broadcom - * All rights reserved. +/* + * Copyright 2017 Broadcom. All Rights Reserved. + * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 - * as published by the Free Software Foundation. The full GNU General + * as published by the Free Software Foundation. The full GNU General * Public License is included in this distribution in the file called COPYING. * * Contact Information: * linux-drivers@broadcom.com * - * Emulex - * 3333 Susan Street - * Costa Mesa, CA 92626 */ #ifndef BEISCSI_CMDS_H @@ -1145,24 +1142,49 @@ struct tcp_connect_and_offload_out { #define DB_DEF_PDU_EVENT_SHIFT 15 #define DB_DEF_PDU_CQPROC_SHIFT 16 -struct dmsg_cqe { - u32 dw[4]; +struct be_invalidate_connection_params_in { + struct be_cmd_req_hdr hdr; + u32 session_handle; + u16 cid; + u16 unused; +#define BE_CLEANUP_TYPE_INVALIDATE 0x8001 +#define BE_CLEANUP_TYPE_ISSUE_TCP_RST 0x8002 + u16 cleanup_type; + u16 save_cfg; +} __packed; + +struct be_invalidate_connection_params_out { + u32 session_handle; + u16 cid; + u16 unused; +} __packed; + +union be_invalidate_connection_params { + struct be_invalidate_connection_params_in req; + struct be_invalidate_connection_params_out resp; } __packed; -struct tcp_upload_params_in { +struct be_tcp_upload_params_in { struct be_cmd_req_hdr hdr; u16 id; +#define BE_UPLOAD_TYPE_GRACEFUL 1 +/* abortive upload with reset */ +#define BE_UPLOAD_TYPE_ABORT_RESET 2 +/* abortive upload without reset */ +#define BE_UPLOAD_TYPE_ABORT 3 +/* abortive upload with reset, sequence number by driver */ +#define BE_UPLOAD_TYPE_ABORT_WITH_SEQ 4 u16 upload_type; u32 reset_seq; } __packed; -struct tcp_upload_params_out { +struct be_tcp_upload_params_out { u32 dw[32]; } __packed; -union tcp_upload_params { - struct tcp_upload_params_in request; - struct tcp_upload_params_out response; +union be_tcp_upload_params { + struct be_tcp_upload_params_in request; + struct be_tcp_upload_params_out response; } __packed; struct be_ulp_fw_cfg { @@ -1243,10 +1265,7 @@ struct be_cmd_get_port_name { #define OPCODE_COMMON_WRITE_FLASH 96 #define OPCODE_COMMON_READ_FLASH 97 -/* --- CMD_ISCSI_INVALIDATE_CONNECTION_TYPE --- */ #define CMD_ISCSI_COMMAND_INVALIDATE 1 -#define CMD_ISCSI_CONNECTION_INVALIDATE 0x8001 -#define CMD_ISCSI_CONNECTION_ISSUE_TCP_RST 0x8002 #define INI_WR_CMD 1 /* Initiator write command */ #define INI_TMF_CMD 2 /* Initiator TMF command */ @@ -1269,27 +1288,6 @@ struct be_cmd_get_port_name { * preparedby * driver should not be touched */ -/* --- CMD_CHUTE_TYPE --- */ -#define CMD_CONNECTION_CHUTE_0 1 -#define CMD_CONNECTION_CHUTE_1 2 -#define CMD_CONNECTION_CHUTE_2 3 - -#define EQ_MAJOR_CODE_COMPLETION 0 - -#define CMD_ISCSI_SESSION_DEL_CFG_FROM_FLASH 0 -#define CMD_ISCSI_SESSION_SAVE_CFG_ON_FLASH 1 - -/* --- CONNECTION_UPLOAD_PARAMS --- */ -/* These parameters are used to define the type of upload desired. */ -#define CONNECTION_UPLOAD_GRACEFUL 1 /* Graceful upload */ -#define CONNECTION_UPLOAD_ABORT_RESET 2 /* Abortive upload with - * reset - */ -#define CONNECTION_UPLOAD_ABORT 3 /* Abortive upload without - * reset - */ -#define CONNECTION_UPLOAD_ABORT_WITH_SEQ 4 /* Abortive upload with reset, - * sequence number by driver */ /* Returns the number of items in the field array. */ #define BE_NUMBER_OF_FIELD(_type_, _field_) \ diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c index a4844578e357..97dca4681784 100644 --- a/drivers/scsi/be2iscsi/be_iscsi.c +++ b/drivers/scsi/be2iscsi/be_iscsi.c @@ -1,20 +1,15 @@ -/** - * Copyright (C) 2005 - 2016 Broadcom - * All rights reserved. +/* + * Copyright 2017 Broadcom. All Rights Reserved. + * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 - * as published by the Free Software Foundation. The full GNU General + * as published by the Free Software Foundation. The full GNU General * Public License is included in this distribution in the file called COPYING. * - * Written by: Jayamohan Kallickal (jayamohan.kallickal@broadcom.com) - * * Contact Information: * linux-drivers@broadcom.com * - * Emulex - * 3333 Susan Street - * Costa Mesa, CA 92626 */ #include <scsi/libiscsi.h> @@ -1263,31 +1258,58 @@ static void beiscsi_flush_cq(struct beiscsi_hba *phba) } /** - * beiscsi_close_conn - Upload the connection + * beiscsi_conn_close - Invalidate and upload connection * @ep: The iscsi endpoint - * @flag: The type of connection closure + * + * Returns 0 on success, -1 on failure. */ -static int beiscsi_close_conn(struct beiscsi_endpoint *beiscsi_ep, int flag) +static int beiscsi_conn_close(struct beiscsi_endpoint *beiscsi_ep) { - int ret = 0; - unsigned int tag; struct beiscsi_hba *phba = beiscsi_ep->phba; + unsigned int tag, attempts; + int ret; - tag = mgmt_upload_connection(phba, beiscsi_ep->ep_cid, flag); - if (!tag) { - beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, - "BS_%d : upload failed for cid 0x%x\n", - beiscsi_ep->ep_cid); - - ret = -EAGAIN; + /** + * Without successfully invalidating and uploading connection + * driver can't reuse the CID so attempt more than once. + */ + attempts = 0; + while (attempts++ < 3) { + tag = beiscsi_invalidate_cxn(phba, beiscsi_ep); + if (tag) { + ret = beiscsi_mccq_compl_wait(phba, tag, NULL, NULL); + if (!ret) + break; + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, + "BS_%d : invalidate conn failed cid %d\n", + beiscsi_ep->ep_cid); + } } - ret = beiscsi_mccq_compl_wait(phba, tag, NULL, NULL); - - /* Flush the CQ entries */ + /* wait for all completions to arrive, then process them */ + msleep(250); + /* flush CQ entries */ beiscsi_flush_cq(phba); - return ret; + if (attempts > 3) + return -1; + + attempts = 0; + while (attempts++ < 3) { + tag = beiscsi_upload_cxn(phba, beiscsi_ep); + if (tag) { + ret = beiscsi_mccq_compl_wait(phba, tag, NULL, NULL); + if (!ret) + break; + beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, + "BS_%d : upload conn failed cid %d\n", + beiscsi_ep->ep_cid); + } + } + if (attempts > 3) + return -1; + + return 0; } /** @@ -1298,12 +1320,9 @@ static int beiscsi_close_conn(struct beiscsi_endpoint *beiscsi_ep, int flag) */ void beiscsi_ep_disconnect(struct iscsi_endpoint *ep) { - struct beiscsi_conn *beiscsi_conn; struct beiscsi_endpoint *beiscsi_ep; + struct beiscsi_conn *beiscsi_conn; struct beiscsi_hba *phba; - unsigned int tag; - uint8_t mgmt_invalidate_flag, tcp_upload_flag; - unsigned short savecfg_flag = CMD_ISCSI_SESSION_SAVE_CFG_ON_FLASH; uint16_t cri_index; beiscsi_ep = ep->dd_data; @@ -1324,39 +1343,27 @@ void beiscsi_ep_disconnect(struct iscsi_endpoint *ep) if (beiscsi_ep->conn) { beiscsi_conn = beiscsi_ep->conn; iscsi_suspend_queue(beiscsi_conn->conn); - mgmt_invalidate_flag = ~BEISCSI_NO_RST_ISSUE; - tcp_upload_flag = CONNECTION_UPLOAD_GRACEFUL; - } else { - mgmt_invalidate_flag = BEISCSI_NO_RST_ISSUE; - tcp_upload_flag = CONNECTION_UPLOAD_ABORT; } if (!beiscsi_hba_is_online(phba)) { beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, "BS_%d : HBA in error 0x%lx\n", phba->state); - goto free_ep; - } - - tag = mgmt_invalidate_connection(phba, beiscsi_ep, - beiscsi_ep->ep_cid, - mgmt_invalidate_flag, - savecfg_flag); - if (!tag) { - beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, - "BS_%d : mgmt_invalidate_connection Failed for cid=%d\n", - beiscsi_ep->ep_cid); + } else { + /** + * Make CID available even if close fails. + * If not freed, FW might fail open using the CID. + */ + if (beiscsi_conn_close(beiscsi_ep) < 0) + __beiscsi_log(phba, KERN_ERR, + "BS_%d : close conn failed cid %d\n", + beiscsi_ep->ep_cid); } - beiscsi_mccq_compl_wait(phba, tag, NULL, NULL); - beiscsi_close_conn(beiscsi_ep, tcp_upload_flag); -free_ep: - msleep(BEISCSI_LOGOUT_SYNC_DELAY); beiscsi_free_ep(beiscsi_ep); if (!phba->conn_table[cri_index]) __beiscsi_log(phba, KERN_ERR, - "BS_%d : conn_table empty at %u: cid %u\n", - cri_index, - beiscsi_ep->ep_cid); + "BS_%d : conn_table empty at %u: cid %u\n", + cri_index, beiscsi_ep->ep_cid); phba->conn_table[cri_index] = NULL; iscsi_destroy_endpoint(beiscsi_ep->openiscsi_ep); } diff --git a/drivers/scsi/be2iscsi/be_iscsi.h b/drivers/scsi/be2iscsi/be_iscsi.h index e4d67dfea4cb..b9d459a21f25 100644 --- a/drivers/scsi/be2iscsi/be_iscsi.h +++ b/drivers/scsi/be2iscsi/be_iscsi.h @@ -1,20 +1,15 @@ -/** - * Copyright (C) 2005 - 2016 Broadcom - * All rights reserved. +/* + * Copyright 2017 Broadcom. All Rights Reserved. + * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 - * as published by the Free Software Foundation. The full GNU General + * as published by the Free Software Foundation. The full GNU General * Public License is included in this distribution in the file called COPYING. * - * Written by: Jayamohan Kallickal (jayamohan.kallickal@broadcom.com) - * * Contact Information: * linux-drivers@broadcom.com * - * Avago Technologies - * 3333 Susan Street - * Costa Mesa, CA 92626 */ #ifndef _BE_ISCSI_ diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c index 32b2713cec93..f862332261f8 100644 --- a/drivers/scsi/be2iscsi/be_main.c +++ b/drivers/scsi/be2iscsi/be_main.c @@ -1,20 +1,15 @@ -/** - * Copyright (C) 2005 - 2016 Broadcom - * All rights reserved. +/* + * Copyright 2017 Broadcom. All Rights Reserved. + * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 - * as published by the Free Software Foundation. The full GNU General + * as published by the Free Software Foundation. The full GNU General * Public License is included in this distribution in the file called COPYING. * - * Written by: Jayamohan Kallickal (jayamohan.kallickal@broadcom.com) - * * Contact Information: * linux-drivers@broadcom.com * - * Emulex - * 3333 Susan Street - * Costa Mesa, CA 92626 */ #include <linux/reboot.h> @@ -337,7 +332,7 @@ static int beiscsi_eh_device_reset(struct scsi_cmnd *sc) inv_tbl->task[nents] = task; nents++; } - spin_unlock_bh(&session->back_lock); + spin_unlock(&session->back_lock); spin_unlock_bh(&session->frwd_lock); rc = SUCCESS; @@ -636,7 +631,6 @@ static void beiscsi_get_params(struct beiscsi_hba *phba) (total_cid_count + BE2_TMFS + BE2_NOPOUT_REQ)); phba->params.cxns_per_ctrl = total_cid_count; - phba->params.asyncpdus_per_ctrl = total_cid_count; phba->params.icds_per_ctrl = total_icd_count; phba->params.num_sge_per_io = BE2_SGE; phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ; @@ -802,12 +796,12 @@ static int beiscsi_init_irqs(struct beiscsi_hba *phba) struct pci_dev *pcidev = phba->pcidev; struct hwi_controller *phwi_ctrlr; struct hwi_context_memory *phwi_context; - int ret, msix_vec, i, j; + int ret, i, j; phwi_ctrlr = phba->phwi_ctrlr; phwi_context = phwi_ctrlr->phwi_ctxt; - if (phba->msix_enabled) { + if (pcidev->msix_enabled) { for (i = 0; i < phba->num_cpus; i++) { phba->msi_name[i] = kzalloc(BEISCSI_MSI_NAME, GFP_KERNEL); @@ -818,9 +812,8 @@ static int beiscsi_init_irqs(struct beiscsi_hba *phba) sprintf(phba->msi_name[i], "beiscsi_%02x_%02x", phba->shost->host_no, i); - msix_vec = phba->msix_entries[i].vector; - ret = request_irq(msix_vec, be_isr_msix, 0, - phba->msi_name[i], + ret = request_irq(pci_irq_vector(pcidev, i), + be_isr_msix, 0, phba->msi_name[i], &phwi_context->be_eq[i]); if (ret) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, @@ -838,9 +831,8 @@ static int beiscsi_init_irqs(struct beiscsi_hba *phba) } sprintf(phba->msi_name[i], "beiscsi_mcc_%02x", phba->shost->host_no); - msix_vec = phba->msix_entries[i].vector; - ret = request_irq(msix_vec, be_isr_mcc, 0, phba->msi_name[i], - &phwi_context->be_eq[i]); + ret = request_irq(pci_irq_vector(pcidev, i), be_isr_mcc, 0, + phba->msi_name[i], &phwi_context->be_eq[i]); if (ret) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT , "BM_%d : beiscsi_init_irqs-" @@ -862,9 +854,8 @@ static int beiscsi_init_irqs(struct beiscsi_hba *phba) return 0; free_msix_irqs: for (j = i - 1; j >= 0; j--) { + free_irq(pci_irq_vector(pcidev, i), &phwi_context->be_eq[j]); kfree(phba->msi_name[j]); - msix_vec = phba->msix_entries[j].vector; - free_irq(msix_vec, &phwi_context->be_eq[j]); } return ret; } @@ -1454,30 +1445,45 @@ static inline void beiscsi_hdl_put_handle(struct hd_async_context *pasync_ctx, struct hd_async_handle *pasync_handle) { - if (pasync_handle->is_header) { - list_add_tail(&pasync_handle->link, - &pasync_ctx->async_header.free_list); - pasync_ctx->async_header.free_entries++; - } else { - list_add_tail(&pasync_handle->link, - &pasync_ctx->async_data.free_list); - pasync_ctx->async_data.free_entries++; - } + pasync_handle->is_final = 0; + pasync_handle->buffer_len = 0; + pasync_handle->in_use = 0; + list_del_init(&pasync_handle->link); +} + +static void +beiscsi_hdl_purge_handles(struct beiscsi_hba *phba, + struct hd_async_context *pasync_ctx, + u16 cri) +{ + struct hd_async_handle *pasync_handle, *tmp_handle; + struct list_head *plist; + + plist = &pasync_ctx->async_entry[cri].wq.list; + list_for_each_entry_safe(pasync_handle, tmp_handle, plist, link) + beiscsi_hdl_put_handle(pasync_ctx, pasync_handle); + + INIT_LIST_HEAD(&pasync_ctx->async_entry[cri].wq.list); + pasync_ctx->async_entry[cri].wq.hdr_len = 0; + pasync_ctx->async_entry[cri].wq.bytes_received = 0; + pasync_ctx->async_entry[cri].wq.bytes_needed = 0; } static struct hd_async_handle * beiscsi_hdl_get_handle(struct beiscsi_conn *beiscsi_conn, struct hd_async_context *pasync_ctx, - struct i_t_dpdu_cqe *pdpdu_cqe) + struct i_t_dpdu_cqe *pdpdu_cqe, + u8 *header) { struct beiscsi_hba *phba = beiscsi_conn->phba; struct hd_async_handle *pasync_handle; struct be_bus_address phys_addr; + u16 cid, code, ci, cri; u8 final, error = 0; - u16 cid, code, ci; u32 dpl; cid = beiscsi_conn->beiscsi_conn_cid; + cri = BE_GET_ASYNC_CRI_FROM_CID(cid); /** * This function is invoked to get the right async_handle structure * from a given DEF PDU CQ entry. @@ -1516,6 +1522,7 @@ beiscsi_hdl_get_handle(struct beiscsi_conn *beiscsi_conn, switch (code) { case UNSOL_HDR_NOTIFY: pasync_handle = pasync_ctx->async_entry[ci].header; + *header = 1; break; case UNSOL_DATA_DIGEST_ERROR_NOTIFY: error = 1; @@ -1524,15 +1531,7 @@ beiscsi_hdl_get_handle(struct beiscsi_conn *beiscsi_conn, break; /* called only for above codes */ default: - pasync_handle = NULL; - break; - } - - if (!pasync_handle) { - beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_ISCSI, - "BM_%d : cid %d async PDU handle not found - code %d ci %d addr %llx\n", - cid, code, ci, phys_addr.u.a64.address); - return pasync_handle; + return NULL; } if (pasync_handle->pa.u.a64.address != phys_addr.u.a64.address || @@ -1549,47 +1548,32 @@ beiscsi_hdl_get_handle(struct beiscsi_conn *beiscsi_conn, } /** - * Each CID is associated with unique CRI. - * ASYNC_CRI_FROM_CID mapping and CRI_FROM_CID are totaly different. - **/ - pasync_handle->cri = BE_GET_ASYNC_CRI_FROM_CID(cid); - pasync_handle->is_final = final; - pasync_handle->buffer_len = dpl; - /* empty the slot */ - if (pasync_handle->is_header) - pasync_ctx->async_entry[ci].header = NULL; - else - pasync_ctx->async_entry[ci].data = NULL; - - /** * DEF PDU header and data buffers with errors should be simply * dropped as there are no consumers for it. */ if (error) { beiscsi_hdl_put_handle(pasync_ctx, pasync_handle); - pasync_handle = NULL; + return NULL; } - return pasync_handle; -} - -static void -beiscsi_hdl_purge_handles(struct beiscsi_hba *phba, - struct hd_async_context *pasync_ctx, - u16 cri) -{ - struct hd_async_handle *pasync_handle, *tmp_handle; - struct list_head *plist; - plist = &pasync_ctx->async_entry[cri].wq.list; - list_for_each_entry_safe(pasync_handle, tmp_handle, plist, link) { - list_del(&pasync_handle->link); - beiscsi_hdl_put_handle(pasync_ctx, pasync_handle); + if (pasync_handle->in_use || !list_empty(&pasync_handle->link)) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_ISCSI, + "BM_%d : cid %d async PDU handle in use - code %d ci %d addr %llx\n", + cid, code, ci, phys_addr.u.a64.address); + beiscsi_hdl_purge_handles(phba, pasync_ctx, cri); } - INIT_LIST_HEAD(&pasync_ctx->async_entry[cri].wq.list); - pasync_ctx->async_entry[cri].wq.hdr_len = 0; - pasync_ctx->async_entry[cri].wq.bytes_received = 0; - pasync_ctx->async_entry[cri].wq.bytes_needed = 0; + list_del_init(&pasync_handle->link); + /** + * Each CID is associated with unique CRI. + * ASYNC_CRI_FROM_CID mapping and CRI_FROM_CID are totaly different. + **/ + pasync_handle->cri = cri; + pasync_handle->is_final = final; + pasync_handle->buffer_len = dpl; + pasync_handle->in_use = 1; + + return pasync_handle; } static unsigned int @@ -1619,6 +1603,10 @@ beiscsi_hdl_fwd_pdu(struct beiscsi_conn *beiscsi_conn, dlen = pasync_handle->buffer_len; continue; } + if (!pasync_handle->buffer_len || + (dlen + pasync_handle->buffer_len) > + pasync_ctx->async_data.buffer_size) + break; memcpy(pdata + dlen, pasync_handle->pbuffer, pasync_handle->buffer_len); dlen += pasync_handle->buffer_len; @@ -1627,8 +1615,9 @@ beiscsi_hdl_fwd_pdu(struct beiscsi_conn *beiscsi_conn, if (!plast_handle->is_final) { /* last handle should have final PDU notification from FW */ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_ISCSI, - "BM_%d : cid %u %p fwd async PDU with last handle missing - HL%u:DN%u:DR%u\n", + "BM_%d : cid %u %p fwd async PDU opcode %x with last handle missing - HL%u:DN%u:DR%u\n", beiscsi_conn->beiscsi_conn_cid, plast_handle, + AMAP_GET_BITS(struct amap_pdu_base, opcode, phdr), pasync_ctx->async_entry[cri].wq.hdr_len, pasync_ctx->async_entry[cri].wq.bytes_needed, pasync_ctx->async_entry[cri].wq.bytes_received); @@ -1709,85 +1698,53 @@ drop_pdu: static void beiscsi_hdq_post_handles(struct beiscsi_hba *phba, - u8 header, u8 ulp_num) + u8 header, u8 ulp_num, u16 nbuf) { - struct hd_async_handle *pasync_handle, *tmp, **slot; + struct hd_async_handle *pasync_handle; struct hd_async_context *pasync_ctx; struct hwi_controller *phwi_ctrlr; - struct list_head *hfree_list; struct phys_addr *pasync_sge; u32 ring_id, doorbell = 0; u32 doorbell_offset; - u16 prod = 0, cons; - u16 index; + u16 prod, pi; phwi_ctrlr = phba->phwi_ctrlr; pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr, ulp_num); if (header) { - cons = pasync_ctx->async_header.free_entries; - hfree_list = &pasync_ctx->async_header.free_list; + pasync_sge = pasync_ctx->async_header.ring_base; + pi = pasync_ctx->async_header.pi; ring_id = phwi_ctrlr->default_pdu_hdr[ulp_num].id; doorbell_offset = phwi_ctrlr->default_pdu_hdr[ulp_num]. doorbell_offset; } else { - cons = pasync_ctx->async_data.free_entries; - hfree_list = &pasync_ctx->async_data.free_list; + pasync_sge = pasync_ctx->async_data.ring_base; + pi = pasync_ctx->async_data.pi; ring_id = phwi_ctrlr->default_pdu_data[ulp_num].id; doorbell_offset = phwi_ctrlr->default_pdu_data[ulp_num]. doorbell_offset; } - /* number of entries posted must be in multiples of 8 */ - if (cons % 8) - return; - - list_for_each_entry_safe(pasync_handle, tmp, hfree_list, link) { - list_del_init(&pasync_handle->link); - pasync_handle->is_final = 0; - pasync_handle->buffer_len = 0; - /* handles can be consumed out of order, use index in handle */ - index = pasync_handle->index; - WARN_ON(pasync_handle->is_header != header); + for (prod = 0; prod < nbuf; prod++) { if (header) - slot = &pasync_ctx->async_entry[index].header; + pasync_handle = pasync_ctx->async_entry[pi].header; else - slot = &pasync_ctx->async_entry[index].data; - /** - * The slot just tracks handle's hold and release, so - * overwriting at the same index won't do any harm but - * needs to be caught. - */ - if (*slot != NULL) { - beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_ISCSI, - "BM_%d : async PDU %s slot at %u not empty\n", - header ? "header" : "data", index); + pasync_handle = pasync_ctx->async_entry[pi].data; + WARN_ON(pasync_handle->is_header != header); + WARN_ON(pasync_handle->index != pi); + /* setup the ring only once */ + if (nbuf == pasync_ctx->num_entries) { + /* note hi is lo */ + pasync_sge[pi].hi = pasync_handle->pa.u.a32.address_lo; + pasync_sge[pi].lo = pasync_handle->pa.u.a32.address_hi; } - /** - * We use same freed index as in completion to post so this - * operation is not required for refills. Its required only - * for ring creation. - */ - if (header) - pasync_sge = pasync_ctx->async_header.ring_base; - else - pasync_sge = pasync_ctx->async_data.ring_base; - pasync_sge += index; - /* if its a refill then address is same; hi is lo */ - WARN_ON(pasync_sge->hi && - pasync_sge->hi != pasync_handle->pa.u.a32.address_lo); - WARN_ON(pasync_sge->lo && - pasync_sge->lo != pasync_handle->pa.u.a32.address_hi); - pasync_sge->hi = pasync_handle->pa.u.a32.address_lo; - pasync_sge->lo = pasync_handle->pa.u.a32.address_hi; - - *slot = pasync_handle; - if (++prod == cons) - break; + if (++pi == pasync_ctx->num_entries) + pi = 0; } + if (header) - pasync_ctx->async_header.free_entries -= prod; + pasync_ctx->async_header.pi = pi; else - pasync_ctx->async_data.free_entries -= prod; + pasync_ctx->async_data.pi = pi; doorbell |= ring_id & DB_DEF_PDU_RING_ID_MASK; doorbell |= 1 << DB_DEF_PDU_REARM_SHIFT; @@ -1804,20 +1761,26 @@ beiscsi_hdq_process_compl(struct beiscsi_conn *beiscsi_conn, struct hd_async_handle *pasync_handle = NULL; struct hd_async_context *pasync_ctx; struct hwi_controller *phwi_ctrlr; + u8 ulp_num, consumed, header = 0; u16 cid_cri; - u8 ulp_num; phwi_ctrlr = phba->phwi_ctrlr; cid_cri = BE_GET_CRI_FROM_CID(beiscsi_conn->beiscsi_conn_cid); ulp_num = BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr, cid_cri); pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr, ulp_num); pasync_handle = beiscsi_hdl_get_handle(beiscsi_conn, pasync_ctx, - pdpdu_cqe); - if (!pasync_handle) - return; - - beiscsi_hdl_gather_pdu(beiscsi_conn, pasync_ctx, pasync_handle); - beiscsi_hdq_post_handles(phba, pasync_handle->is_header, ulp_num); + pdpdu_cqe, &header); + if (is_chip_be2_be3r(phba)) + consumed = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, + num_cons, pdpdu_cqe); + else + consumed = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2, + num_cons, pdpdu_cqe); + if (pasync_handle) + beiscsi_hdl_gather_pdu(beiscsi_conn, pasync_ctx, pasync_handle); + /* num_cons indicates number of 8 RQEs consumed */ + if (consumed) + beiscsi_hdq_post_handles(phba, header, ulp_num, 8 * consumed); } void beiscsi_process_mcc_cq(struct beiscsi_hba *phba) @@ -2407,22 +2370,22 @@ static void beiscsi_find_mem_req(struct beiscsi_hba *phba) if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { num_async_pdu_buf_sgl_pages = - PAGES_REQUIRED(BEISCSI_GET_CID_COUNT( + PAGES_REQUIRED(BEISCSI_ASYNC_HDQ_SIZE( phba, ulp_num) * sizeof(struct phys_addr)); num_async_pdu_buf_pages = - PAGES_REQUIRED(BEISCSI_GET_CID_COUNT( + PAGES_REQUIRED(BEISCSI_ASYNC_HDQ_SIZE( phba, ulp_num) * phba->params.defpdu_hdr_sz); num_async_pdu_data_pages = - PAGES_REQUIRED(BEISCSI_GET_CID_COUNT( + PAGES_REQUIRED(BEISCSI_ASYNC_HDQ_SIZE( phba, ulp_num) * phba->params.defpdu_data_sz); num_async_pdu_data_sgl_pages = - PAGES_REQUIRED(BEISCSI_GET_CID_COUNT( + PAGES_REQUIRED(BEISCSI_ASYNC_HDQ_SIZE( phba, ulp_num) * sizeof(struct phys_addr)); @@ -2459,21 +2422,21 @@ static void beiscsi_find_mem_req(struct beiscsi_hba *phba) mem_descr_index = (HWI_MEM_ASYNC_HEADER_HANDLE_ULP0 + (ulp_num * MEM_DESCR_OFFSET)); phba->mem_req[mem_descr_index] = - BEISCSI_GET_CID_COUNT(phba, ulp_num) * - sizeof(struct hd_async_handle); + BEISCSI_ASYNC_HDQ_SIZE(phba, ulp_num) * + sizeof(struct hd_async_handle); mem_descr_index = (HWI_MEM_ASYNC_DATA_HANDLE_ULP0 + (ulp_num * MEM_DESCR_OFFSET)); phba->mem_req[mem_descr_index] = - BEISCSI_GET_CID_COUNT(phba, ulp_num) * - sizeof(struct hd_async_handle); + BEISCSI_ASYNC_HDQ_SIZE(phba, ulp_num) * + sizeof(struct hd_async_handle); mem_descr_index = (HWI_MEM_ASYNC_PDU_CONTEXT_ULP0 + (ulp_num * MEM_DESCR_OFFSET)); phba->mem_req[mem_descr_index] = - sizeof(struct hd_async_context) + - (BEISCSI_GET_CID_COUNT(phba, ulp_num) * - sizeof(struct hd_async_entry)); + sizeof(struct hd_async_context) + + (BEISCSI_ASYNC_HDQ_SIZE(phba, ulp_num) * + sizeof(struct hd_async_entry)); } } } @@ -2757,7 +2720,7 @@ static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba) ((long unsigned int)pasync_ctx + sizeof(struct hd_async_context)); - pasync_ctx->num_entries = BEISCSI_GET_CID_COUNT(phba, + pasync_ctx->num_entries = BEISCSI_ASYNC_HDQ_SIZE(phba, ulp_num); /* setup header buffers */ mem_descr = (struct be_mem_descriptor *)phba->init_mem; @@ -2776,6 +2739,7 @@ static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba) "BM_%d : No Virtual address for ULP : %d\n", ulp_num); + pasync_ctx->async_header.pi = 0; pasync_ctx->async_header.buffer_size = p->defpdu_hdr_sz; pasync_ctx->async_header.va_base = mem_descr->mem_array[0].virtual_address; @@ -2823,7 +2787,6 @@ static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba) pasync_ctx->async_header.handle_base = mem_descr->mem_array[0].virtual_address; - INIT_LIST_HEAD(&pasync_ctx->async_header.free_list); /* setup data buffer sgls */ mem_descr = (struct be_mem_descriptor *)phba->init_mem; @@ -2857,7 +2820,6 @@ static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba) pasync_ctx->async_data.handle_base = mem_descr->mem_array[0].virtual_address; - INIT_LIST_HEAD(&pasync_ctx->async_data.free_list); pasync_header_h = (struct hd_async_handle *) @@ -2884,6 +2846,7 @@ static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba) ulp_num); idx = 0; + pasync_ctx->async_data.pi = 0; pasync_ctx->async_data.buffer_size = p->defpdu_data_sz; pasync_ctx->async_data.va_base = mem_descr->mem_array[idx].virtual_address; @@ -2895,7 +2858,7 @@ static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba) phba->params.defpdu_data_sz); num_per_mem = 0; - for (index = 0; index < BEISCSI_GET_CID_COUNT + for (index = 0; index < BEISCSI_ASYNC_HDQ_SIZE (phba, ulp_num); index++) { pasync_header_h->cri = -1; pasync_header_h->is_header = 1; @@ -2911,14 +2874,11 @@ static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba) pasync_ctx->async_header.pa_base.u.a64. address + (p->defpdu_hdr_sz * index); - list_add_tail(&pasync_header_h->link, - &pasync_ctx->async_header. - free_list); + pasync_ctx->async_entry[index].header = + pasync_header_h; pasync_header_h++; - pasync_ctx->async_header.free_entries++; INIT_LIST_HEAD(&pasync_ctx->async_entry[index]. wq.list); - pasync_ctx->async_entry[index].header = NULL; pasync_data_h->cri = -1; pasync_data_h->is_header = 0; @@ -2952,12 +2912,9 @@ static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba) num_per_mem++; num_async_data--; - list_add_tail(&pasync_data_h->link, - &pasync_ctx->async_data. - free_list); + pasync_ctx->async_entry[index].data = + pasync_data_h; pasync_data_h++; - pasync_ctx->async_data.free_entries++; - pasync_ctx->async_entry[index].data = NULL; } } } @@ -3040,7 +2997,7 @@ static int beiscsi_create_eqs(struct beiscsi_hba *phba, num_eq_pages = PAGES_REQUIRED(phba->params.num_eq_entries * \ sizeof(struct be_eq_entry)); - if (phba->msix_enabled) + if (phba->pcidev->msix_enabled) eq_for_mcc = 1; else eq_for_mcc = 0; @@ -3550,7 +3507,7 @@ static int be_mcc_queues_create(struct beiscsi_hba *phba, sizeof(struct be_mcc_compl))) goto err; /* Ask BE to create MCC compl queue; */ - if (phba->msix_enabled) { + if (phba->pcidev->msix_enabled) { if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq [phba->num_cpus].q, false, true, 0)) goto mcc_cq_free; @@ -3581,42 +3538,35 @@ err: return -ENOMEM; } -/** - * find_num_cpus()- Get the CPU online count - * @phba: ptr to priv structure - * - * CPU count is used for creating EQ. - **/ -static void find_num_cpus(struct beiscsi_hba *phba) +static void be2iscsi_enable_msix(struct beiscsi_hba *phba) { - int num_cpus = 0; - - num_cpus = num_online_cpus(); + int nvec = 1; switch (phba->generation) { case BE_GEN2: case BE_GEN3: - phba->num_cpus = (num_cpus > BEISCSI_MAX_NUM_CPUS) ? - BEISCSI_MAX_NUM_CPUS : num_cpus; + nvec = BEISCSI_MAX_NUM_CPUS + 1; break; case BE_GEN4: - /* - * If eqid_count == 1 fall back to - * INTX mechanism - **/ - if (phba->fw_config.eqid_count == 1) { - enable_msix = 0; - phba->num_cpus = 1; - return; - } - - phba->num_cpus = - (num_cpus > (phba->fw_config.eqid_count - 1)) ? - (phba->fw_config.eqid_count - 1) : num_cpus; + nvec = phba->fw_config.eqid_count; break; default: - phba->num_cpus = 1; + nvec = 2; + break; } + + /* if eqid_count == 1 fall back to INTX */ + if (enable_msix && nvec > 1) { + const struct irq_affinity desc = { .post_vectors = 1 }; + + if (pci_alloc_irq_vectors_affinity(phba->pcidev, 2, nvec, + PCI_IRQ_MSIX | PCI_IRQ_AFFINITY, &desc) < 0) { + phba->num_cpus = nvec - 1; + return; + } + } + + phba->num_cpus = 1; } static void hwi_purge_eq(struct beiscsi_hba *phba) @@ -3633,7 +3583,7 @@ static void hwi_purge_eq(struct beiscsi_hba *phba) phwi_ctrlr = phba->phwi_ctrlr; phwi_context = phwi_ctrlr->phwi_ctxt; - if (phba->msix_enabled) + if (phba->pcidev->msix_enabled) eq_msix = 1; else eq_msix = 0; @@ -3711,7 +3661,7 @@ static void hwi_cleanup_port(struct beiscsi_hba *phba) } be_mcc_queues_destroy(phba); - if (phba->msix_enabled) + if (phba->pcidev->msix_enabled) eq_for_mcc = 1; else eq_for_mcc = 0; @@ -3735,6 +3685,7 @@ static int hwi_init_port(struct beiscsi_hba *phba) unsigned int def_pdu_ring_sz; struct be_ctrl_info *ctrl = &phba->ctrl; int status, ulp_num; + u16 nbufs; phwi_ctrlr = phba->phwi_ctrlr; phwi_context = phwi_ctrlr->phwi_ctxt; @@ -3771,9 +3722,8 @@ static int hwi_init_port(struct beiscsi_hba *phba) for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { - def_pdu_ring_sz = - BEISCSI_GET_CID_COUNT(phba, ulp_num) * - sizeof(struct phys_addr); + nbufs = phwi_context->pasync_ctx[ulp_num]->num_entries; + def_pdu_ring_sz = nbufs * sizeof(struct phys_addr); status = beiscsi_create_def_hdr(phba, phwi_context, phwi_ctrlr, @@ -3801,9 +3751,9 @@ static int hwi_init_port(struct beiscsi_hba *phba) * let EP know about it. */ beiscsi_hdq_post_handles(phba, BEISCSI_DEFQ_HDR, - ulp_num); + ulp_num, nbufs); beiscsi_hdq_post_handles(phba, BEISCSI_DEFQ_DATA, - ulp_num); + ulp_num, nbufs); } } @@ -4157,7 +4107,7 @@ static void hwi_enable_intr(struct beiscsi_hba *phba) iowrite32(reg, addr); } - if (!phba->msix_enabled) { + if (!phba->pcidev->msix_enabled) { eq = &phwi_context->be_eq[0].q; beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, "BM_%d : eq->id=%d\n", eq->id); @@ -5280,19 +5230,6 @@ static void beiscsi_eqd_update_work(struct work_struct *work) msecs_to_jiffies(BEISCSI_EQD_UPDATE_INTERVAL)); } -static void beiscsi_msix_enable(struct beiscsi_hba *phba) -{ - int i, status; - - for (i = 0; i <= phba->num_cpus; i++) - phba->msix_entries[i].entry = i; - - status = pci_enable_msix_range(phba->pcidev, phba->msix_entries, - phba->num_cpus + 1, phba->num_cpus + 1); - if (status > 0) - phba->msix_enabled = true; -} - static void beiscsi_hw_tpe_check(unsigned long ptr) { struct beiscsi_hba *phba; @@ -5360,15 +5297,7 @@ static int beiscsi_enable_port(struct beiscsi_hba *phba) if (ret) return ret; - if (enable_msix) - find_num_cpus(phba); - else - phba->num_cpus = 1; - if (enable_msix) { - beiscsi_msix_enable(phba); - if (!phba->msix_enabled) - phba->num_cpus = 1; - } + be2iscsi_enable_msix(phba); beiscsi_get_params(phba); /* Re-enable UER. If different TPE occurs then it is recoverable. */ @@ -5397,7 +5326,7 @@ static int beiscsi_enable_port(struct beiscsi_hba *phba) irq_poll_init(&pbe_eq->iopoll, be_iopoll_budget, be_iopoll); } - i = (phba->msix_enabled) ? i : 0; + i = (phba->pcidev->msix_enabled) ? i : 0; /* Work item for MCC handling */ pbe_eq = &phwi_context->be_eq[i]; INIT_WORK(&pbe_eq->mcc_work, beiscsi_mcc_work); @@ -5435,9 +5364,7 @@ cleanup_port: hwi_cleanup_port(phba); disable_msix: - if (phba->msix_enabled) - pci_disable_msix(phba->pcidev); - + pci_free_irq_vectors(phba->pcidev); return ret; } @@ -5454,7 +5381,7 @@ static void beiscsi_disable_port(struct beiscsi_hba *phba, int unload) struct hwi_context_memory *phwi_context; struct hwi_controller *phwi_ctrlr; struct be_eq_obj *pbe_eq; - unsigned int i, msix_vec; + unsigned int i; if (!test_and_clear_bit(BEISCSI_HBA_ONLINE, &phba->state)) return; @@ -5462,16 +5389,16 @@ static void beiscsi_disable_port(struct beiscsi_hba *phba, int unload) phwi_ctrlr = phba->phwi_ctrlr; phwi_context = phwi_ctrlr->phwi_ctxt; hwi_disable_intr(phba); - if (phba->msix_enabled) { + if (phba->pcidev->msix_enabled) { for (i = 0; i <= phba->num_cpus; i++) { - msix_vec = phba->msix_entries[i].vector; - free_irq(msix_vec, &phwi_context->be_eq[i]); + free_irq(pci_irq_vector(phba->pcidev, i), + &phwi_context->be_eq[i]); kfree(phba->msi_name[i]); } } else if (phba->pcidev->irq) free_irq(phba->pcidev->irq, phba); - pci_disable_msix(phba->pcidev); + pci_free_irq_vectors(phba->pcidev); for (i = 0; i < phba->num_cpus; i++) { pbe_eq = &phwi_context->be_eq[i]; @@ -5681,21 +5608,12 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev, beiscsi_get_params(phba); beiscsi_set_uer_feature(phba); - if (enable_msix) - find_num_cpus(phba); - else - phba->num_cpus = 1; + be2iscsi_enable_msix(phba); beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, "BM_%d : num_cpus = %d\n", phba->num_cpus); - if (enable_msix) { - beiscsi_msix_enable(phba); - if (!phba->msix_enabled) - phba->num_cpus = 1; - } - phba->shost->max_id = phba->params.cxns_per_ctrl; phba->shost->can_queue = phba->params.ios_per_ctrl; ret = beiscsi_get_memory(phba); @@ -5745,7 +5663,7 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev, irq_poll_init(&pbe_eq->iopoll, be_iopoll_budget, be_iopoll); } - i = (phba->msix_enabled) ? i : 0; + i = (phba->pcidev->msix_enabled) ? i : 0; /* Work item for MCC handling */ pbe_eq = &phwi_context->be_eq[i]; INIT_WORK(&pbe_eq->mcc_work, beiscsi_mcc_work); @@ -5816,8 +5734,7 @@ free_port: phba->ctrl.mbox_mem_alloced.dma); beiscsi_unmap_pci_function(phba); hba_free: - if (phba->msix_enabled) - pci_disable_msix(phba->pcidev); + pci_disable_msix(phba->pcidev); pci_dev_put(phba->pcidev); iscsi_host_free(phba->shost); pci_set_drvdata(pcidev, NULL); diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h index 218857926566..338dbe0800c1 100644 --- a/drivers/scsi/be2iscsi/be_main.h +++ b/drivers/scsi/be2iscsi/be_main.h @@ -1,20 +1,15 @@ -/** - * Copyright (C) 2005 - 2016 Broadcom - * All rights reserved. +/* + * Copyright 2017 Broadcom. All Rights Reserved. + * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 - * as published by the Free Software Foundation. The full GNU General + * as published by the Free Software Foundation. The full GNU General * Public License is included in this distribution in the file called COPYING. * - * Written by: Jayamohan Kallickal (jayamohan.kallickal@broadcom.com) - * * Contact Information: * linux-drivers@broadcom.com * - * Emulex - * 3333 Susan Street - * Costa Mesa, CA 92626 */ #ifndef _BEISCSI_MAIN_ @@ -36,7 +31,7 @@ #include <scsi/scsi_transport_iscsi.h> #define DRV_NAME "be2iscsi" -#define BUILD_STR "11.2.1.0" +#define BUILD_STR "11.4.0.0" #define BE_NAME "Emulex OneConnect" \ "Open-iSCSI Driver version" BUILD_STR #define DRV_DESC BE_NAME " " "Driver" @@ -235,7 +230,6 @@ struct sgl_handle { struct hba_parameters { unsigned int ios_per_ctrl; unsigned int cxns_per_ctrl; - unsigned int asyncpdus_per_ctrl; unsigned int icds_per_ctrl; unsigned int num_sge_per_io; unsigned int defpdu_hdr_sz; @@ -323,9 +317,7 @@ struct beiscsi_hba { struct pci_dev *pcidev; unsigned int num_cpus; unsigned int nxt_cqid; - struct msix_entry msix_entries[MAX_CPUS]; char *msi_name[MAX_CPUS]; - bool msix_enabled; struct be_mem_descriptor *init_mem; unsigned short io_sgl_alloc_index; @@ -597,8 +589,12 @@ struct hd_async_handle { u16 cri; u8 is_header; u8 is_final; + u8 in_use; }; +#define BEISCSI_ASYNC_HDQ_SIZE(phba, ulp) \ + (BEISCSI_GET_CID_COUNT((phba), (ulp)) * 2) + /** * This has list of async PDUs that are waiting to be processed. * Buffers live in this list for a brief duration before they get @@ -624,14 +620,8 @@ struct hd_async_buf_context { void *va_base; void *ring_base; struct hd_async_handle *handle_base; - u16 free_entries; u32 buffer_size; - /** - * Once iSCSI layer finishes processing an async PDU, the - * handles used for the PDU are added to this list. - * They are posted back to FW in groups of 8. - */ - struct list_head free_list; + u16 pi; }; /** diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c index 2f6d5c2ac329..c73775368d09 100644 --- a/drivers/scsi/be2iscsi/be_mgmt.c +++ b/drivers/scsi/be2iscsi/be_mgmt.c @@ -1,20 +1,15 @@ -/** - * Copyright (C) 2005 - 2016 Broadcom - * All rights reserved. +/* + * Copyright 2017 Broadcom. All Rights Reserved. + * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 - * as published by the Free Software Foundation. The full GNU General + * as published by the Free Software Foundation. The full GNU General * Public License is included in this distribution in the file called COPYING. * - * Written by: Jayamohan Kallickal (jayamohan.kallickal@broadcom.com) - * * Contact Information: * linux-drivers@broadcom.com * - * Emulex - * 3333 Susan Street - * Costa Mesa, CA 92626 */ #include <linux/bsg-lib.h> @@ -126,67 +121,6 @@ unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl, return tag; } -unsigned int mgmt_invalidate_connection(struct beiscsi_hba *phba, - struct beiscsi_endpoint *beiscsi_ep, - unsigned short cid, - unsigned short issue_reset, - unsigned short savecfg_flag) -{ - struct be_ctrl_info *ctrl = &phba->ctrl; - struct be_mcc_wrb *wrb; - struct iscsi_invalidate_connection_params_in *req; - unsigned int tag = 0; - - mutex_lock(&ctrl->mbox_lock); - wrb = alloc_mcc_wrb(phba, &tag); - if (!wrb) { - mutex_unlock(&ctrl->mbox_lock); - return 0; - } - - req = embedded_payload(wrb); - be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); - be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI, - OPCODE_ISCSI_INI_DRIVER_INVALIDATE_CONNECTION, - sizeof(*req)); - req->session_handle = beiscsi_ep->fw_handle; - req->cid = cid; - if (issue_reset) - req->cleanup_type = CMD_ISCSI_CONNECTION_ISSUE_TCP_RST; - else - req->cleanup_type = CMD_ISCSI_CONNECTION_INVALIDATE; - req->save_cfg = savecfg_flag; - be_mcc_notify(phba, tag); - mutex_unlock(&ctrl->mbox_lock); - return tag; -} - -unsigned int mgmt_upload_connection(struct beiscsi_hba *phba, - unsigned short cid, unsigned int upload_flag) -{ - struct be_ctrl_info *ctrl = &phba->ctrl; - struct be_mcc_wrb *wrb; - struct tcp_upload_params_in *req; - unsigned int tag; - - mutex_lock(&ctrl->mbox_lock); - wrb = alloc_mcc_wrb(phba, &tag); - if (!wrb) { - mutex_unlock(&ctrl->mbox_lock); - return 0; - } - - req = embedded_payload(wrb); - be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); - be_cmd_hdr_prepare(&req->hdr, CMD_COMMON_TCP_UPLOAD, - OPCODE_COMMON_TCP_UPLOAD, sizeof(*req)); - req->id = (unsigned short)cid; - req->upload_type = (unsigned char)upload_flag; - be_mcc_notify(phba, tag); - mutex_unlock(&ctrl->mbox_lock); - return tag; -} - /** * mgmt_open_connection()- Establish a TCP CXN * @dst_addr: Destination Address @@ -1449,6 +1383,72 @@ void beiscsi_offload_cxn_v2(struct beiscsi_offload_params *params, exp_statsn) / 32] + 1)); } +unsigned int beiscsi_invalidate_cxn(struct beiscsi_hba *phba, + struct beiscsi_endpoint *beiscsi_ep) +{ + struct be_invalidate_connection_params_in *req; + struct be_ctrl_info *ctrl = &phba->ctrl; + struct be_mcc_wrb *wrb; + unsigned int tag = 0; + + mutex_lock(&ctrl->mbox_lock); + wrb = alloc_mcc_wrb(phba, &tag); + if (!wrb) { + mutex_unlock(&ctrl->mbox_lock); + return 0; + } + + req = embedded_payload(wrb); + be_wrb_hdr_prepare(wrb, sizeof(union be_invalidate_connection_params), + true, 0); + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI, + OPCODE_ISCSI_INI_DRIVER_INVALIDATE_CONNECTION, + sizeof(*req)); + req->session_handle = beiscsi_ep->fw_handle; + req->cid = beiscsi_ep->ep_cid; + if (beiscsi_ep->conn) + req->cleanup_type = BE_CLEANUP_TYPE_INVALIDATE; + else + req->cleanup_type = BE_CLEANUP_TYPE_ISSUE_TCP_RST; + /** + * 0 - non-persistent targets + * 1 - save session info on flash + */ + req->save_cfg = 0; + be_mcc_notify(phba, tag); + mutex_unlock(&ctrl->mbox_lock); + return tag; +} + +unsigned int beiscsi_upload_cxn(struct beiscsi_hba *phba, + struct beiscsi_endpoint *beiscsi_ep) +{ + struct be_ctrl_info *ctrl = &phba->ctrl; + struct be_mcc_wrb *wrb; + struct be_tcp_upload_params_in *req; + unsigned int tag; + + mutex_lock(&ctrl->mbox_lock); + wrb = alloc_mcc_wrb(phba, &tag); + if (!wrb) { + mutex_unlock(&ctrl->mbox_lock); + return 0; + } + + req = embedded_payload(wrb); + be_wrb_hdr_prepare(wrb, sizeof(union be_tcp_upload_params), true, 0); + be_cmd_hdr_prepare(&req->hdr, CMD_COMMON_TCP_UPLOAD, + OPCODE_COMMON_TCP_UPLOAD, sizeof(*req)); + req->id = beiscsi_ep->ep_cid; + if (beiscsi_ep->conn) + req->upload_type = BE_UPLOAD_TYPE_GRACEFUL; + else + req->upload_type = BE_UPLOAD_TYPE_ABORT; + be_mcc_notify(phba, tag); + mutex_unlock(&ctrl->mbox_lock); + return tag; +} + int beiscsi_mgmt_invalidate_icds(struct beiscsi_hba *phba, struct invldt_cmd_tbl *inv_tbl, unsigned int nents) diff --git a/drivers/scsi/be2iscsi/be_mgmt.h b/drivers/scsi/be2iscsi/be_mgmt.h index 308f1472f98a..06ddc5ad6874 100644 --- a/drivers/scsi/be2iscsi/be_mgmt.h +++ b/drivers/scsi/be2iscsi/be_mgmt.h @@ -1,20 +1,15 @@ -/** - * Copyright (C) 2005 - 2016 Broadcom - * All rights reserved. +/* + * Copyright 2017 Broadcom. All Rights Reserved. + * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 - * as published by the Free Software Foundation. The full GNU General + * as published by the Free Software Foundation. The full GNU General * Public License is included in this distribution in the file called COPYING. * - * Written by: Jayamohan Kallickal (jayamohan.kallickal@broadcom.com) - * * Contact Information: * linux-drivers@broadcom.com * - * Emulex - * 3333 Susan Street - * Costa Mesa, CA 92626 */ #ifndef _BEISCSI_MGMT_ @@ -41,35 +36,11 @@ int mgmt_open_connection(struct beiscsi_hba *phba, struct beiscsi_endpoint *beiscsi_ep, struct be_dma_mem *nonemb_cmd); -unsigned int mgmt_upload_connection(struct beiscsi_hba *phba, - unsigned short cid, - unsigned int upload_flag); unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl, struct beiscsi_hba *phba, struct bsg_job *job, struct be_dma_mem *nonemb_cmd); -#define BEISCSI_NO_RST_ISSUE 0 -struct iscsi_invalidate_connection_params_in { - struct be_cmd_req_hdr hdr; - unsigned int session_handle; - unsigned short cid; - unsigned short unused; - unsigned short cleanup_type; - unsigned short save_cfg; -} __packed; - -struct iscsi_invalidate_connection_params_out { - unsigned int session_handle; - unsigned short cid; - unsigned short unused; -} __packed; - -union iscsi_invalidate_connection_params { - struct iscsi_invalidate_connection_params_in request; - struct iscsi_invalidate_connection_params_out response; -} __packed; - #define BE_INVLDT_CMD_TBL_SZ 128 struct invldt_cmd_tbl { unsigned short icd; @@ -265,6 +236,12 @@ void beiscsi_offload_cxn_v2(struct beiscsi_offload_params *params, struct wrb_handle *pwrb_handle, struct hwi_wrb_context *pwrb_context); +unsigned int beiscsi_invalidate_cxn(struct beiscsi_hba *phba, + struct beiscsi_endpoint *beiscsi_ep); + +unsigned int beiscsi_upload_cxn(struct beiscsi_hba *phba, + struct beiscsi_endpoint *beiscsi_ep); + int be_cmd_modify_eq_delay(struct beiscsi_hba *phba, struct be_set_eqd *, int num); diff --git a/drivers/scsi/bfa/bfa_core.c b/drivers/scsi/bfa/bfa_core.c index 7209afad82f7..3e1caec82554 100644 --- a/drivers/scsi/bfa/bfa_core.c +++ b/drivers/scsi/bfa/bfa_core.c @@ -23,22 +23,6 @@ BFA_TRC_FILE(HAL, CORE); /* - * BFA module list terminated by NULL - */ -static struct bfa_module_s *hal_mods[] = { - &hal_mod_fcdiag, - &hal_mod_sgpg, - &hal_mod_fcport, - &hal_mod_fcxp, - &hal_mod_lps, - &hal_mod_uf, - &hal_mod_rport, - &hal_mod_fcp, - &hal_mod_dconf, - NULL -}; - -/* * Message handlers for various modules. */ static bfa_isr_func_t bfa_isrs[BFI_MC_MAX] = { @@ -1191,8 +1175,13 @@ bfa_iocfc_start_submod(struct bfa_s *bfa) for (i = 0; i < BFI_IOC_MAX_CQS; i++) bfa_isr_rspq_ack(bfa, i, bfa_rspq_ci(bfa, i)); - for (i = 0; hal_mods[i]; i++) - hal_mods[i]->start(bfa); + bfa_fcport_start(bfa); + bfa_uf_start(bfa); + /* + * bfa_init() with flash read is complete. now invalidate the stale + * content of lun mask like unit attention, rp tag and lp tag. + */ + bfa_ioim_lm_init(BFA_FCP_MOD(bfa)->bfa); bfa->iocfc.submod_enabled = BFA_TRUE; } @@ -1203,13 +1192,16 @@ bfa_iocfc_start_submod(struct bfa_s *bfa) static void bfa_iocfc_disable_submod(struct bfa_s *bfa) { - int i; - if (bfa->iocfc.submod_enabled == BFA_FALSE) return; - for (i = 0; hal_mods[i]; i++) - hal_mods[i]->iocdisable(bfa); + bfa_fcdiag_iocdisable(bfa); + bfa_fcport_iocdisable(bfa); + bfa_fcxp_iocdisable(bfa); + bfa_lps_iocdisable(bfa); + bfa_rport_iocdisable(bfa); + bfa_fcp_iocdisable(bfa); + bfa_dconf_iocdisable(bfa); bfa->iocfc.submod_enabled = BFA_FALSE; } @@ -1773,7 +1765,6 @@ void bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo, struct bfa_s *bfa) { - int i; struct bfa_mem_dma_s *port_dma = BFA_MEM_PORT_DMA(bfa); struct bfa_mem_dma_s *ablk_dma = BFA_MEM_ABLK_DMA(bfa); struct bfa_mem_dma_s *cee_dma = BFA_MEM_CEE_DMA(bfa); @@ -1792,9 +1783,14 @@ bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo, INIT_LIST_HEAD(&meminfo->kva_info.qe); bfa_iocfc_meminfo(cfg, meminfo, bfa); - - for (i = 0; hal_mods[i]; i++) - hal_mods[i]->meminfo(cfg, meminfo, bfa); + bfa_sgpg_meminfo(cfg, meminfo, bfa); + bfa_fcport_meminfo(cfg, meminfo, bfa); + bfa_fcxp_meminfo(cfg, meminfo, bfa); + bfa_lps_meminfo(cfg, meminfo, bfa); + bfa_uf_meminfo(cfg, meminfo, bfa); + bfa_rport_meminfo(cfg, meminfo, bfa); + bfa_fcp_meminfo(cfg, meminfo, bfa); + bfa_dconf_meminfo(cfg, meminfo, bfa); /* dma info setup */ bfa_mem_dma_setup(meminfo, port_dma, bfa_port_meminfo()); @@ -1840,7 +1836,6 @@ void bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev) { - int i; struct bfa_mem_dma_s *dma_info, *dma_elem; struct bfa_mem_kva_s *kva_info, *kva_elem; struct list_head *dm_qe, *km_qe; @@ -1869,10 +1864,15 @@ bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, } bfa_iocfc_attach(bfa, bfad, cfg, pcidev); - - for (i = 0; hal_mods[i]; i++) - hal_mods[i]->attach(bfa, bfad, cfg, pcidev); - + bfa_fcdiag_attach(bfa, bfad, cfg, pcidev); + bfa_sgpg_attach(bfa, bfad, cfg, pcidev); + bfa_fcport_attach(bfa, bfad, cfg, pcidev); + bfa_fcxp_attach(bfa, bfad, cfg, pcidev); + bfa_lps_attach(bfa, bfad, cfg, pcidev); + bfa_uf_attach(bfa, bfad, cfg, pcidev); + bfa_rport_attach(bfa, bfad, cfg, pcidev); + bfa_fcp_attach(bfa, bfad, cfg, pcidev); + bfa_dconf_attach(bfa, bfad, cfg); bfa_com_port_attach(bfa); bfa_com_ablk_attach(bfa); bfa_com_cee_attach(bfa); @@ -1899,10 +1899,6 @@ bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, void bfa_detach(struct bfa_s *bfa) { - int i; - - for (i = 0; hal_mods[i]; i++) - hal_mods[i]->detach(bfa); bfa_ioc_detach(&bfa->ioc); } diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c index 20982e7cdd81..5f53b3276234 100644 --- a/drivers/scsi/bfa/bfa_fcpim.c +++ b/drivers/scsi/bfa/bfa_fcpim.c @@ -25,7 +25,6 @@ BFA_TRC_FILE(HAL, FCPIM); * BFA ITNIM Related definitions */ static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim); -static void bfa_ioim_lm_init(struct bfa_s *bfa); #define BFA_ITNIM_FROM_TAG(_fcpim, _tag) \ (((_fcpim)->itnim_arr + ((_tag) & ((_fcpim)->num_itnims - 1)))) @@ -339,7 +338,7 @@ bfa_fcpim_attach(struct bfa_fcp_mod_s *fcp, void *bfad, bfa_ioim_attach(fcpim); } -static void +void bfa_fcpim_iocdisable(struct bfa_fcp_mod_s *fcp) { struct bfa_fcpim_s *fcpim = &fcp->fcpim; @@ -2105,7 +2104,7 @@ bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) * is complete by driver. now invalidate the stale content of lun mask * like unit attention, rp tag and lp tag. */ -static void +void bfa_ioim_lm_init(struct bfa_s *bfa) { struct bfa_lun_mask_s *lunm_list; @@ -3634,11 +3633,7 @@ bfa_tskim_res_recfg(struct bfa_s *bfa, u16 num_tskim_fw) } } -/* BFA FCP module - parent module for fcpim */ - -BFA_MODULE(fcp); - -static void +void bfa_fcp_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo, struct bfa_s *bfa) { @@ -3696,7 +3691,7 @@ bfa_fcp_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo, bfa_mem_kva_setup(minfo, fcp_kva, km_len); } -static void +void bfa_fcp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, struct bfa_pcidev_s *pcidev) { @@ -3739,29 +3734,7 @@ bfa_fcp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, (fcp->num_itns * sizeof(struct bfa_itn_s))); } -static void -bfa_fcp_detach(struct bfa_s *bfa) -{ -} - -static void -bfa_fcp_start(struct bfa_s *bfa) -{ - struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa); - - /* - * bfa_init() with flash read is complete. now invalidate the stale - * content of lun mask like unit attention, rp tag and lp tag. - */ - bfa_ioim_lm_init(fcp->bfa); -} - -static void -bfa_fcp_stop(struct bfa_s *bfa) -{ -} - -static void +void bfa_fcp_iocdisable(struct bfa_s *bfa) { struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa); diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c index 4ddda72f60e6..638c0a2857f7 100644 --- a/drivers/scsi/bfa/bfa_fcs_lport.c +++ b/drivers/scsi/bfa/bfa_fcs_lport.c @@ -89,16 +89,27 @@ static struct { void (*online) (struct bfa_fcs_lport_s *port); void (*offline) (struct bfa_fcs_lport_s *port); } __port_action[] = { - { - bfa_fcs_lport_unknown_init, bfa_fcs_lport_unknown_online, - bfa_fcs_lport_unknown_offline}, { - bfa_fcs_lport_fab_init, bfa_fcs_lport_fab_online, - bfa_fcs_lport_fab_offline}, { - bfa_fcs_lport_n2n_init, bfa_fcs_lport_n2n_online, - bfa_fcs_lport_n2n_offline}, { - bfa_fcs_lport_loop_init, bfa_fcs_lport_loop_online, - bfa_fcs_lport_loop_offline}, - }; + [BFA_FCS_FABRIC_UNKNOWN] = { + .init = bfa_fcs_lport_unknown_init, + .online = bfa_fcs_lport_unknown_online, + .offline = bfa_fcs_lport_unknown_offline + }, + [BFA_FCS_FABRIC_SWITCHED] = { + .init = bfa_fcs_lport_fab_init, + .online = bfa_fcs_lport_fab_online, + .offline = bfa_fcs_lport_fab_offline + }, + [BFA_FCS_FABRIC_N2N] = { + .init = bfa_fcs_lport_n2n_init, + .online = bfa_fcs_lport_n2n_online, + .offline = bfa_fcs_lport_n2n_offline + }, + [BFA_FCS_FABRIC_LOOP] = { + .init = bfa_fcs_lport_loop_init, + .online = bfa_fcs_lport_loop_online, + .offline = bfa_fcs_lport_loop_offline + }, +}; /* * fcs_port_sm FCS logical port state machine diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c index a1ada4a31c97..256f4afaccf9 100644 --- a/drivers/scsi/bfa/bfa_ioc.c +++ b/drivers/scsi/bfa/bfa_ioc.c @@ -5822,12 +5822,6 @@ bfa_phy_intr(void *phyarg, struct bfi_mbmsg_s *msg) } /* - * DCONF module specific - */ - -BFA_MODULE(dconf); - -/* * DCONF state machine events */ enum bfa_dconf_event { @@ -6073,7 +6067,7 @@ bfa_dconf_sm_iocdown_dirty(struct bfa_dconf_mod_s *dconf, /* * Compute and return memory needed by DRV_CFG module. */ -static void +void bfa_dconf_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo, struct bfa_s *bfa) { @@ -6087,9 +6081,8 @@ bfa_dconf_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo, sizeof(struct bfa_dconf_s)); } -static void -bfa_dconf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, - struct bfa_pcidev_s *pcidev) +void +bfa_dconf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg) { struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa); @@ -6134,33 +6127,20 @@ bfa_dconf_modinit(struct bfa_s *bfa) struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa); bfa_sm_send_event(dconf, BFA_DCONF_SM_INIT); } -static void -bfa_dconf_start(struct bfa_s *bfa) -{ -} - -static void -bfa_dconf_stop(struct bfa_s *bfa) -{ -} static void bfa_dconf_timer(void *cbarg) { struct bfa_dconf_mod_s *dconf = cbarg; bfa_sm_send_event(dconf, BFA_DCONF_SM_TIMEOUT); } -static void + +void bfa_dconf_iocdisable(struct bfa_s *bfa) { struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa); bfa_sm_send_event(dconf, BFA_DCONF_SM_IOCDISABLE); } -static void -bfa_dconf_detach(struct bfa_s *bfa) -{ -} - static bfa_status_t bfa_dconf_flash_write(struct bfa_dconf_mod_s *dconf) { diff --git a/drivers/scsi/bfa/bfa_modules.h b/drivers/scsi/bfa/bfa_modules.h index 53135f21fa0e..1c2ab395e616 100644 --- a/drivers/scsi/bfa/bfa_modules.h +++ b/drivers/scsi/bfa/bfa_modules.h @@ -61,54 +61,8 @@ enum { BFA_TRC_HAL_IOCFC_CB = 5, }; -/* - * Macro to define a new BFA module - */ -#define BFA_MODULE(__mod) \ - static void bfa_ ## __mod ## _meminfo( \ - struct bfa_iocfc_cfg_s *cfg, \ - struct bfa_meminfo_s *meminfo, \ - struct bfa_s *bfa); \ - static void bfa_ ## __mod ## _attach(struct bfa_s *bfa, \ - void *bfad, struct bfa_iocfc_cfg_s *cfg, \ - struct bfa_pcidev_s *pcidev); \ - static void bfa_ ## __mod ## _detach(struct bfa_s *bfa); \ - static void bfa_ ## __mod ## _start(struct bfa_s *bfa); \ - static void bfa_ ## __mod ## _stop(struct bfa_s *bfa); \ - static void bfa_ ## __mod ## _iocdisable(struct bfa_s *bfa); \ - \ - extern struct bfa_module_s hal_mod_ ## __mod; \ - struct bfa_module_s hal_mod_ ## __mod = { \ - bfa_ ## __mod ## _meminfo, \ - bfa_ ## __mod ## _attach, \ - bfa_ ## __mod ## _detach, \ - bfa_ ## __mod ## _start, \ - bfa_ ## __mod ## _stop, \ - bfa_ ## __mod ## _iocdisable, \ - } - #define BFA_CACHELINE_SZ (256) -/* - * Structure used to interact between different BFA sub modules - * - * Each sub module needs to implement only the entry points relevant to it (and - * can leave entry points as NULL) - */ -struct bfa_module_s { - void (*meminfo) (struct bfa_iocfc_cfg_s *cfg, - struct bfa_meminfo_s *meminfo, - struct bfa_s *bfa); - void (*attach) (struct bfa_s *bfa, void *bfad, - struct bfa_iocfc_cfg_s *cfg, - struct bfa_pcidev_s *pcidev); - void (*detach) (struct bfa_s *bfa); - void (*start) (struct bfa_s *bfa); - void (*stop) (struct bfa_s *bfa); - void (*iocdisable) (struct bfa_s *bfa); -}; - - struct bfa_s { void *bfad; /* BFA driver instance */ struct bfa_plog_s *plog; /* portlog buffer */ @@ -127,14 +81,51 @@ struct bfa_s { }; extern bfa_boolean_t bfa_auto_recover; -extern struct bfa_module_s hal_mod_fcdiag; -extern struct bfa_module_s hal_mod_sgpg; -extern struct bfa_module_s hal_mod_fcport; -extern struct bfa_module_s hal_mod_fcxp; -extern struct bfa_module_s hal_mod_lps; -extern struct bfa_module_s hal_mod_uf; -extern struct bfa_module_s hal_mod_rport; -extern struct bfa_module_s hal_mod_fcp; -extern struct bfa_module_s hal_mod_dconf; + +void bfa_dconf_attach(struct bfa_s *, void *, struct bfa_iocfc_cfg_s *); +void bfa_dconf_meminfo(struct bfa_iocfc_cfg_s *, struct bfa_meminfo_s *, + struct bfa_s *); +void bfa_dconf_iocdisable(struct bfa_s *); +void bfa_fcp_attach(struct bfa_s *, void *, struct bfa_iocfc_cfg_s *, + struct bfa_pcidev_s *); +void bfa_fcp_iocdisable(struct bfa_s *bfa); +void bfa_fcp_meminfo(struct bfa_iocfc_cfg_s *, struct bfa_meminfo_s *, + struct bfa_s *); +void bfa_fcpim_iocdisable(struct bfa_fcp_mod_s *); +void bfa_fcport_start(struct bfa_s *); +void bfa_fcport_iocdisable(struct bfa_s *); +void bfa_fcport_meminfo(struct bfa_iocfc_cfg_s *, struct bfa_meminfo_s *, + struct bfa_s *); +void bfa_fcport_attach(struct bfa_s *, void *, struct bfa_iocfc_cfg_s *, + struct bfa_pcidev_s *); +void bfa_fcxp_iocdisable(struct bfa_s *); +void bfa_fcxp_meminfo(struct bfa_iocfc_cfg_s *, struct bfa_meminfo_s *, + struct bfa_s *); +void bfa_fcxp_attach(struct bfa_s *, void *, struct bfa_iocfc_cfg_s *, + struct bfa_pcidev_s *); +void bfa_fcdiag_iocdisable(struct bfa_s *); +void bfa_fcdiag_attach(struct bfa_s *bfa, void *, struct bfa_iocfc_cfg_s *, + struct bfa_pcidev_s *); +void bfa_ioim_lm_init(struct bfa_s *); +void bfa_lps_iocdisable(struct bfa_s *bfa); +void bfa_lps_meminfo(struct bfa_iocfc_cfg_s *, struct bfa_meminfo_s *, + struct bfa_s *); +void bfa_lps_attach(struct bfa_s *, void *, struct bfa_iocfc_cfg_s *, + struct bfa_pcidev_s *); +void bfa_rport_iocdisable(struct bfa_s *bfa); +void bfa_rport_meminfo(struct bfa_iocfc_cfg_s *, struct bfa_meminfo_s *, + struct bfa_s *); +void bfa_rport_attach(struct bfa_s *, void *, struct bfa_iocfc_cfg_s *, + struct bfa_pcidev_s *); +void bfa_sgpg_meminfo(struct bfa_iocfc_cfg_s *, struct bfa_meminfo_s *, + struct bfa_s *); +void bfa_sgpg_attach(struct bfa_s *, void *bfad, struct bfa_iocfc_cfg_s *, + struct bfa_pcidev_s *); +void bfa_uf_iocdisable(struct bfa_s *); +void bfa_uf_meminfo(struct bfa_iocfc_cfg_s *, struct bfa_meminfo_s *, + struct bfa_s *); +void bfa_uf_attach(struct bfa_s *, void *, struct bfa_iocfc_cfg_s *, + struct bfa_pcidev_s *); +void bfa_uf_start(struct bfa_s *); #endif /* __BFA_MODULES_H__ */ diff --git a/drivers/scsi/bfa/bfa_svc.c b/drivers/scsi/bfa/bfa_svc.c index 12de292175ef..e640223bab3c 100644 --- a/drivers/scsi/bfa/bfa_svc.c +++ b/drivers/scsi/bfa/bfa_svc.c @@ -23,13 +23,6 @@ #include "bfa_modules.h" BFA_TRC_FILE(HAL, FCXP); -BFA_MODULE(fcdiag); -BFA_MODULE(fcxp); -BFA_MODULE(sgpg); -BFA_MODULE(lps); -BFA_MODULE(fcport); -BFA_MODULE(rport); -BFA_MODULE(uf); /* * LPS related definitions @@ -121,15 +114,6 @@ static void bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, /* * forward declarations for LPS functions */ -static void bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, - struct bfa_meminfo_s *minfo, struct bfa_s *bfa); -static void bfa_lps_attach(struct bfa_s *bfa, void *bfad, - struct bfa_iocfc_cfg_s *cfg, - struct bfa_pcidev_s *pcidev); -static void bfa_lps_detach(struct bfa_s *bfa); -static void bfa_lps_start(struct bfa_s *bfa); -static void bfa_lps_stop(struct bfa_s *bfa); -static void bfa_lps_iocdisable(struct bfa_s *bfa); static void bfa_lps_login_rsp(struct bfa_s *bfa, struct bfi_lps_login_rsp_s *rsp); static void bfa_lps_no_res(struct bfa_lps_s *first_lps, u8 count); @@ -484,7 +468,7 @@ claim_fcxps_mem(struct bfa_fcxp_mod_s *mod) bfa_mem_kva_curp(mod) = (void *)fcxp; } -static void +void bfa_fcxp_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo, struct bfa_s *bfa) { @@ -522,7 +506,7 @@ bfa_fcxp_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo, cfg->fwcfg.num_fcxp_reqs * sizeof(struct bfa_fcxp_s)); } -static void +void bfa_fcxp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, struct bfa_pcidev_s *pcidev) { @@ -544,22 +528,7 @@ bfa_fcxp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, claim_fcxps_mem(mod); } -static void -bfa_fcxp_detach(struct bfa_s *bfa) -{ -} - -static void -bfa_fcxp_start(struct bfa_s *bfa) -{ -} - -static void -bfa_fcxp_stop(struct bfa_s *bfa) -{ -} - -static void +void bfa_fcxp_iocdisable(struct bfa_s *bfa) { struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa); @@ -1510,7 +1479,7 @@ bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event event) /* * return memory requirement */ -static void +void bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo, struct bfa_s *bfa) { @@ -1527,7 +1496,7 @@ bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo, /* * bfa module attach at initialization time */ -static void +void bfa_lps_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, struct bfa_pcidev_s *pcidev) { @@ -1557,25 +1526,10 @@ bfa_lps_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, } } -static void -bfa_lps_detach(struct bfa_s *bfa) -{ -} - -static void -bfa_lps_start(struct bfa_s *bfa) -{ -} - -static void -bfa_lps_stop(struct bfa_s *bfa) -{ -} - /* * IOC in disabled state -- consider all lps offline */ -static void +void bfa_lps_iocdisable(struct bfa_s *bfa) { struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa); @@ -3055,7 +3009,7 @@ bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln, enum bfa_port_linkstate event) #define FCPORT_STATS_DMA_SZ (BFA_ROUNDUP(sizeof(union bfa_fcport_stats_u), \ BFA_CACHELINE_SZ)) -static void +void bfa_fcport_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo, struct bfa_s *bfa) { @@ -3086,7 +3040,7 @@ bfa_fcport_mem_claim(struct bfa_fcport_s *fcport) /* * Memory initialization. */ -static void +void bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, struct bfa_pcidev_s *pcidev) { @@ -3131,34 +3085,16 @@ bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, bfa_reqq_winit(&fcport->reqq_wait, bfa_fcport_qresume, fcport); } -static void -bfa_fcport_detach(struct bfa_s *bfa) -{ -} - -/* - * Called when IOC is ready. - */ -static void +void bfa_fcport_start(struct bfa_s *bfa) { bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_START); } /* - * Called before IOC is stopped. - */ -static void -bfa_fcport_stop(struct bfa_s *bfa) -{ - bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_STOP); - bfa_trunk_iocdisable(bfa); -} - -/* * Called when IOC failure is detected. */ -static void +void bfa_fcport_iocdisable(struct bfa_s *bfa) { struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); @@ -4886,7 +4822,7 @@ bfa_rport_qresume(void *cbarg) bfa_sm_send_event(rp, BFA_RPORT_SM_QRESUME); } -static void +void bfa_rport_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo, struct bfa_s *bfa) { @@ -4900,7 +4836,7 @@ bfa_rport_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo, cfg->fwcfg.num_rports * sizeof(struct bfa_rport_s)); } -static void +void bfa_rport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, struct bfa_pcidev_s *pcidev) { @@ -4940,22 +4876,7 @@ bfa_rport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, bfa_mem_kva_curp(mod) = (u8 *) rp; } -static void -bfa_rport_detach(struct bfa_s *bfa) -{ -} - -static void -bfa_rport_start(struct bfa_s *bfa) -{ -} - -static void -bfa_rport_stop(struct bfa_s *bfa) -{ -} - -static void +void bfa_rport_iocdisable(struct bfa_s *bfa) { struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa); @@ -5246,7 +5167,7 @@ bfa_rport_unset_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp) /* * Compute and return memory needed by FCP(im) module. */ -static void +void bfa_sgpg_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo, struct bfa_s *bfa) { @@ -5281,7 +5202,7 @@ bfa_sgpg_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo, cfg->drvcfg.num_sgpgs * sizeof(struct bfa_sgpg_s)); } -static void +void bfa_sgpg_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, struct bfa_pcidev_s *pcidev) { @@ -5344,26 +5265,6 @@ bfa_sgpg_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, bfa_mem_kva_curp(mod) = (u8 *) hsgpg; } -static void -bfa_sgpg_detach(struct bfa_s *bfa) -{ -} - -static void -bfa_sgpg_start(struct bfa_s *bfa) -{ -} - -static void -bfa_sgpg_stop(struct bfa_s *bfa) -{ -} - -static void -bfa_sgpg_iocdisable(struct bfa_s *bfa) -{ -} - bfa_status_t bfa_sgpg_malloc(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpgs) { @@ -5547,7 +5448,7 @@ uf_mem_claim(struct bfa_uf_mod_s *ufm) claim_uf_post_msgs(ufm); } -static void +void bfa_uf_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo, struct bfa_s *bfa) { @@ -5575,7 +5476,7 @@ bfa_uf_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo, (sizeof(struct bfa_uf_s) + sizeof(struct bfi_uf_buf_post_s))); } -static void +void bfa_uf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, struct bfa_pcidev_s *pcidev) { @@ -5590,11 +5491,6 @@ bfa_uf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, uf_mem_claim(ufm); } -static void -bfa_uf_detach(struct bfa_s *bfa) -{ -} - static struct bfa_uf_s * bfa_uf_get(struct bfa_uf_mod_s *uf_mod) { @@ -5682,12 +5578,7 @@ uf_recv(struct bfa_s *bfa, struct bfi_uf_frm_rcvd_s *m) bfa_cb_queue(bfa, &uf->hcb_qe, __bfa_cb_uf_recv, uf); } -static void -bfa_uf_stop(struct bfa_s *bfa) -{ -} - -static void +void bfa_uf_iocdisable(struct bfa_s *bfa) { struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa); @@ -5704,7 +5595,7 @@ bfa_uf_iocdisable(struct bfa_s *bfa) } } -static void +void bfa_uf_start(struct bfa_s *bfa) { bfa_uf_post_all(BFA_UF_MOD(bfa)); @@ -5845,13 +5736,7 @@ bfa_fcdiag_set_busy_status(struct bfa_fcdiag_s *fcdiag) fcport->diag_busy = BFA_FALSE; } -static void -bfa_fcdiag_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo, - struct bfa_s *bfa) -{ -} - -static void +void bfa_fcdiag_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, struct bfa_pcidev_s *pcidev) { @@ -5870,7 +5755,7 @@ bfa_fcdiag_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, memset(&dport->result, 0, sizeof(struct bfa_diag_dport_result_s)); } -static void +void bfa_fcdiag_iocdisable(struct bfa_s *bfa) { struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa); @@ -5888,21 +5773,6 @@ bfa_fcdiag_iocdisable(struct bfa_s *bfa) } static void -bfa_fcdiag_detach(struct bfa_s *bfa) -{ -} - -static void -bfa_fcdiag_start(struct bfa_s *bfa) -{ -} - -static void -bfa_fcdiag_stop(struct bfa_s *bfa) -{ -} - -static void bfa_fcdiag_queuetest_timeout(void *cbarg) { struct bfa_fcdiag_s *fcdiag = cbarg; diff --git a/drivers/scsi/csiostor/csio_hw.h b/drivers/scsi/csiostor/csio_hw.h index 029bef82c057..62758e830d3b 100644 --- a/drivers/scsi/csiostor/csio_hw.h +++ b/drivers/scsi/csiostor/csio_hw.h @@ -95,7 +95,6 @@ enum { }; struct csio_msix_entries { - unsigned short vector; /* Assigned MSI-X vector */ void *dev_id; /* Priv object associated w/ this msix*/ char desc[24]; /* Description of this vector */ }; diff --git a/drivers/scsi/csiostor/csio_isr.c b/drivers/scsi/csiostor/csio_isr.c index 2fb71c6c3b37..7c8814715711 100644 --- a/drivers/scsi/csiostor/csio_isr.c +++ b/drivers/scsi/csiostor/csio_isr.c @@ -383,17 +383,15 @@ csio_request_irqs(struct csio_hw *hw) int rv, i, j, k = 0; struct csio_msix_entries *entryp = &hw->msix_entries[0]; struct csio_scsi_cpu_info *info; + struct pci_dev *pdev = hw->pdev; if (hw->intr_mode != CSIO_IM_MSIX) { - rv = request_irq(hw->pdev->irq, csio_fcoe_isr, - (hw->intr_mode == CSIO_IM_MSI) ? - 0 : IRQF_SHARED, - KBUILD_MODNAME, hw); + rv = request_irq(pci_irq_vector(pdev, 0), csio_fcoe_isr, + hw->intr_mode == CSIO_IM_MSI ? 0 : IRQF_SHARED, + KBUILD_MODNAME, hw); if (rv) { - if (hw->intr_mode == CSIO_IM_MSI) - pci_disable_msi(hw->pdev); csio_err(hw, "Failed to allocate interrupt line.\n"); - return -EINVAL; + goto out_free_irqs; } goto out; @@ -402,22 +400,22 @@ csio_request_irqs(struct csio_hw *hw) /* Add the MSIX vector descriptions */ csio_add_msix_desc(hw); - rv = request_irq(entryp[k].vector, csio_nondata_isr, 0, + rv = request_irq(pci_irq_vector(pdev, k), csio_nondata_isr, 0, entryp[k].desc, hw); if (rv) { csio_err(hw, "IRQ request failed for vec %d err:%d\n", - entryp[k].vector, rv); - goto err; + pci_irq_vector(pdev, k), rv); + goto out_free_irqs; } - entryp[k++].dev_id = (void *)hw; + entryp[k++].dev_id = hw; - rv = request_irq(entryp[k].vector, csio_fwevt_isr, 0, + rv = request_irq(pci_irq_vector(pdev, k), csio_fwevt_isr, 0, entryp[k].desc, hw); if (rv) { csio_err(hw, "IRQ request failed for vec %d err:%d\n", - entryp[k].vector, rv); - goto err; + pci_irq_vector(pdev, k), rv); + goto out_free_irqs; } entryp[k++].dev_id = (void *)hw; @@ -429,51 +427,31 @@ csio_request_irqs(struct csio_hw *hw) struct csio_scsi_qset *sqset = &hw->sqset[i][j]; struct csio_q *q = hw->wrm.q_arr[sqset->iq_idx]; - rv = request_irq(entryp[k].vector, csio_scsi_isr, 0, + rv = request_irq(pci_irq_vector(pdev, k), csio_scsi_isr, 0, entryp[k].desc, q); if (rv) { csio_err(hw, "IRQ request failed for vec %d err:%d\n", - entryp[k].vector, rv); - goto err; + pci_irq_vector(pdev, k), rv); + goto out_free_irqs; } - entryp[k].dev_id = (void *)q; + entryp[k].dev_id = q; } /* for all scsi cpus */ } /* for all ports */ out: hw->flags |= CSIO_HWF_HOST_INTR_ENABLED; - return 0; -err: - for (i = 0; i < k; i++) { - entryp = &hw->msix_entries[i]; - free_irq(entryp->vector, entryp->dev_id); - } - pci_disable_msix(hw->pdev); - +out_free_irqs: + for (i = 0; i < k; i++) + free_irq(pci_irq_vector(pdev, i), hw->msix_entries[i].dev_id); + pci_free_irq_vectors(hw->pdev); return -EINVAL; } -static void -csio_disable_msix(struct csio_hw *hw, bool free) -{ - int i; - struct csio_msix_entries *entryp; - int cnt = hw->num_sqsets + CSIO_EXTRA_VECS; - - if (free) { - for (i = 0; i < cnt; i++) { - entryp = &hw->msix_entries[i]; - free_irq(entryp->vector, entryp->dev_id); - } - } - pci_disable_msix(hw->pdev); -} - /* Reduce per-port max possible CPUs */ static void csio_reduce_sqsets(struct csio_hw *hw, int cnt) @@ -500,10 +478,9 @@ static int csio_enable_msix(struct csio_hw *hw) { int i, j, k, n, min, cnt; - struct csio_msix_entries *entryp; - struct msix_entry *entries; int extra = CSIO_EXTRA_VECS; struct csio_scsi_cpu_info *info; + struct irq_affinity desc = { .pre_vectors = 2 }; min = hw->num_pports + extra; cnt = hw->num_sqsets + extra; @@ -512,50 +489,35 @@ csio_enable_msix(struct csio_hw *hw) if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS || !csio_is_hw_master(hw)) cnt = min_t(uint8_t, hw->cfg_niq, cnt); - entries = kzalloc(sizeof(struct msix_entry) * cnt, GFP_KERNEL); - if (!entries) - return -ENOMEM; - - for (i = 0; i < cnt; i++) - entries[i].entry = (uint16_t)i; - csio_dbg(hw, "FW supp #niq:%d, trying %d msix's\n", hw->cfg_niq, cnt); - cnt = pci_enable_msix_range(hw->pdev, entries, min, cnt); - if (cnt < 0) { - kfree(entries); + cnt = pci_alloc_irq_vectors_affinity(hw->pdev, min, cnt, + PCI_IRQ_MSIX | PCI_IRQ_AFFINITY, &desc); + if (cnt < 0) return cnt; - } if (cnt < (hw->num_sqsets + extra)) { csio_dbg(hw, "Reducing sqsets to %d\n", cnt - extra); csio_reduce_sqsets(hw, cnt - extra); } - /* Save off vectors */ - for (i = 0; i < cnt; i++) { - entryp = &hw->msix_entries[i]; - entryp->vector = entries[i].vector; - } - /* Distribute vectors */ k = 0; - csio_set_nondata_intr_idx(hw, entries[k].entry); - csio_set_mb_intr_idx(csio_hw_to_mbm(hw), entries[k++].entry); - csio_set_fwevt_intr_idx(hw, entries[k++].entry); + csio_set_nondata_intr_idx(hw, k); + csio_set_mb_intr_idx(csio_hw_to_mbm(hw), k++); + csio_set_fwevt_intr_idx(hw, k++); for (i = 0; i < hw->num_pports; i++) { info = &hw->scsi_cpu_info[i]; for (j = 0; j < hw->num_scsi_msix_cpus; j++) { n = (j % info->max_cpus) + k; - hw->sqset[i][j].intr_idx = entries[n].entry; + hw->sqset[i][j].intr_idx = n; } k += info->max_cpus; } - kfree(entries); return 0; } @@ -597,22 +559,26 @@ csio_intr_disable(struct csio_hw *hw, bool free) { csio_hw_intr_disable(hw); - switch (hw->intr_mode) { - case CSIO_IM_MSIX: - csio_disable_msix(hw, free); - break; - case CSIO_IM_MSI: - if (free) - free_irq(hw->pdev->irq, hw); - pci_disable_msi(hw->pdev); - break; - case CSIO_IM_INTX: - if (free) - free_irq(hw->pdev->irq, hw); - break; - default: - break; + if (free) { + int i; + + switch (hw->intr_mode) { + case CSIO_IM_MSIX: + for (i = 0; i < hw->num_sqsets + CSIO_EXTRA_VECS; i++) { + free_irq(pci_irq_vector(hw->pdev, i), + hw->msix_entries[i].dev_id); + } + break; + case CSIO_IM_MSI: + case CSIO_IM_INTX: + free_irq(pci_irq_vector(hw->pdev, 0), hw); + break; + default: + break; + } } + + pci_free_irq_vectors(hw->pdev); hw->intr_mode = CSIO_IM_NONE; hw->flags &= ~CSIO_HWF_HOST_INTR_ENABLED; } diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c index 3fb3f5708ff7..1076c1578322 100644 --- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c +++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c @@ -36,7 +36,7 @@ static unsigned int dbg_level; #include "../libcxgbi.h" #define DRV_MODULE_NAME "cxgb4i" -#define DRV_MODULE_DESC "Chelsio T4/T5 iSCSI Driver" +#define DRV_MODULE_DESC "Chelsio T4-T6 iSCSI Driver" #define DRV_MODULE_VERSION "0.9.5-ko" #define DRV_MODULE_RELDATE "Apr. 2015" diff --git a/drivers/scsi/cxlflash/common.h b/drivers/scsi/cxlflash/common.h index d11dcc59ff46..256af819377d 100644 --- a/drivers/scsi/cxlflash/common.h +++ b/drivers/scsi/cxlflash/common.h @@ -15,6 +15,7 @@ #ifndef _CXLFLASH_COMMON_H #define _CXLFLASH_COMMON_H +#include <linux/irq_poll.h> #include <linux/list.h> #include <linux/rwsem.h> #include <linux/types.h> @@ -24,30 +25,32 @@ extern const struct file_operations cxlflash_cxl_fops; -#define MAX_CONTEXT CXLFLASH_MAX_CONTEXT /* num contexts per afu */ +#define MAX_CONTEXT CXLFLASH_MAX_CONTEXT /* num contexts per afu */ +#define MAX_FC_PORTS CXLFLASH_MAX_FC_PORTS /* max ports per AFU */ +#define LEGACY_FC_PORTS 2 /* legacy ports per AFU */ -#define CXLFLASH_BLOCK_SIZE 4096 /* 4K blocks */ +#define CHAN2PORTBANK(_x) ((_x) >> ilog2(CXLFLASH_NUM_FC_PORTS_PER_BANK)) +#define CHAN2BANKPORT(_x) ((_x) & (CXLFLASH_NUM_FC_PORTS_PER_BANK - 1)) + +#define CHAN2PORTMASK(_x) (1 << (_x)) /* channel to port mask */ +#define PORTMASK2CHAN(_x) (ilog2((_x))) /* port mask to channel */ +#define PORTNUM2CHAN(_x) ((_x) - 1) /* port number to channel */ + +#define CXLFLASH_BLOCK_SIZE 4096 /* 4K blocks */ #define CXLFLASH_MAX_XFER_SIZE 16777216 /* 16MB transfer */ #define CXLFLASH_MAX_SECTORS (CXLFLASH_MAX_XFER_SIZE/512) /* SCSI wants - max_sectors - in units of - 512 byte - sectors - */ + * max_sectors + * in units of + * 512 byte + * sectors + */ #define MAX_RHT_PER_CONTEXT (PAGE_SIZE / sizeof(struct sisl_rht_entry)) /* AFU command retry limit */ -#define MC_RETRY_CNT 5 /* sufficient for SCSI check and - certain AFU errors */ +#define MC_RETRY_CNT 5 /* Sufficient for SCSI and certain AFU errors */ /* Command management definitions */ -#define CXLFLASH_NUM_CMDS (2 * CXLFLASH_MAX_CMDS) /* Must be a pow2 for - alignment and more - efficient array - index derivation - */ - #define CXLFLASH_MAX_CMDS 256 #define CXLFLASH_MAX_CMDS_PER_LUN CXLFLASH_MAX_CMDS @@ -57,10 +60,16 @@ extern const struct file_operations cxlflash_cxl_fops; /* SQ for master issued cmds */ #define NUM_SQ_ENTRY CXLFLASH_MAX_CMDS +/* Hardware queue definitions */ +#define CXLFLASH_DEF_HWQS 1 +#define CXLFLASH_MAX_HWQS 8 +#define PRIMARY_HWQ 0 + static inline void check_sizes(void) { - BUILD_BUG_ON_NOT_POWER_OF_2(CXLFLASH_NUM_CMDS); + BUILD_BUG_ON_NOT_POWER_OF_2(CXLFLASH_NUM_FC_PORTS_PER_BANK); + BUILD_BUG_ON_NOT_POWER_OF_2(CXLFLASH_MAX_CMDS); } /* AFU defines a fixed size of 4K for command buffers (borrow 4K page define) */ @@ -80,11 +89,20 @@ enum cxlflash_init_state { }; enum cxlflash_state { + STATE_PROBING, /* Initial state during probe */ + STATE_PROBED, /* Temporary state, probe completed but EEH occurred */ STATE_NORMAL, /* Normal running state, everything good */ STATE_RESET, /* Reset state, trying to reset/recover */ STATE_FAILTERM /* Failed/terminating state, error out users/threads */ }; +enum cxlflash_hwq_mode { + HWQ_MODE_RR, /* Roundrobin (default) */ + HWQ_MODE_TAG, /* Distribute based on block MQ tag */ + HWQ_MODE_CPU, /* CPU affinity */ + MAX_HWQ_MODE +}; + /* * Each context has its own set of resource handles that is visible * only from that context. @@ -92,11 +110,11 @@ enum cxlflash_state { struct cxlflash_cfg { struct afu *afu; - struct cxl_context *mcctx; struct pci_dev *dev; struct pci_device_id *dev_id; struct Scsi_Host *host; + int num_fc_ports; ulong cxlflash_regs_pci; @@ -117,7 +135,7 @@ struct cxlflash_cfg { struct file_operations cxl_fops; /* Parameters that are LUN table related */ - int last_lun_index[CXLFLASH_NUM_FC_PORTS]; + int last_lun_index[MAX_FC_PORTS]; int promote_lun_index; struct list_head lluns; /* list of llun_info structs */ @@ -134,6 +152,8 @@ struct afu_cmd { struct afu *parent; struct scsi_cmnd *scp; struct completion cevent; + struct list_head queue; + u32 hwq_index; u8 cmd_tmf:1; @@ -156,7 +176,7 @@ static inline struct afu_cmd *sc_to_afucz(struct scsi_cmnd *sc) return afuc; } -struct afu { +struct hwq { /* Stuff requiring alignment go first. */ struct sisl_ioarcb sq[NUM_SQ_ENTRY]; /* 16K SQ */ u64 rrq_entry[NUM_RRQ_ENTRY]; /* 2K RRQ */ @@ -164,40 +184,67 @@ struct afu { /* Beware of alignment till here. Preferably introduce new * fields after this point */ - - int (*send_cmd)(struct afu *, struct afu_cmd *); - void (*context_reset)(struct afu_cmd *); - - /* AFU HW */ + struct afu *afu; + struct cxl_context *ctx; struct cxl_ioctl_start_work work; - struct cxlflash_afu_map __iomem *afu_map; /* entire MMIO map */ struct sisl_host_map __iomem *host_map; /* MC host map */ struct sisl_ctrl_map __iomem *ctrl_map; /* MC control map */ - ctx_hndl_t ctx_hndl; /* master's context handle */ + u32 index; /* Index of this hwq */ atomic_t hsq_credits; spinlock_t hsq_slock; struct sisl_ioarcb *hsq_start; struct sisl_ioarcb *hsq_end; struct sisl_ioarcb *hsq_curr; + spinlock_t hrrq_slock; u64 *hrrq_start; u64 *hrrq_end; u64 *hrrq_curr; bool toggle; - atomic_t cmds_active; /* Number of currently active AFU commands */ + s64 room; spinlock_t rrin_slock; /* Lock to rrin queuing and cmd_room updates */ + + struct irq_poll irqpoll; +} __aligned(cache_line_size()); + +struct afu { + struct hwq hwqs[CXLFLASH_MAX_HWQS]; + int (*send_cmd)(struct afu *, struct afu_cmd *); + void (*context_reset)(struct afu_cmd *); + + /* AFU HW */ + struct cxlflash_afu_map __iomem *afu_map; /* entire MMIO map */ + + atomic_t cmds_active; /* Number of currently active AFU commands */ u64 hb; u32 internal_lun; /* User-desired LUN mode for this AFU */ + u32 num_hwqs; /* Number of hardware queues */ + u32 desired_hwqs; /* Desired h/w queues, effective on AFU reset */ + enum cxlflash_hwq_mode hwq_mode; /* Steering mode for h/w queues */ + u32 hwq_rr_count; /* Count to distribute traffic for roundrobin */ + char version[16]; u64 interface_version; + u32 irqpoll_weight; struct cxlflash_cfg *parent; /* Pointer back to parent cxlflash_cfg */ - }; +static inline struct hwq *get_hwq(struct afu *afu, u32 index) +{ + WARN_ON(index >= CXLFLASH_MAX_HWQS); + + return &afu->hwqs[index]; +} + +static inline bool afu_is_irqpoll_enabled(struct afu *afu) +{ + return !!afu->irqpoll_weight; +} + static inline bool afu_is_cmd_mode(struct afu *afu, u64 cmd_mode) { u64 afu_cap = afu->interface_version >> SISL_INTVER_CAP_SHIFT; @@ -223,14 +270,36 @@ static inline u64 lun_to_lunid(u64 lun) return be64_to_cpu(lun_id); } -int cxlflash_afu_sync(struct afu *, ctx_hndl_t, res_hndl_t, u8); +static inline struct fc_port_bank __iomem *get_fc_port_bank( + struct cxlflash_cfg *cfg, int i) +{ + struct afu *afu = cfg->afu; + + return &afu->afu_map->global.bank[CHAN2PORTBANK(i)]; +} + +static inline __be64 __iomem *get_fc_port_regs(struct cxlflash_cfg *cfg, int i) +{ + struct fc_port_bank __iomem *fcpb = get_fc_port_bank(cfg, i); + + return &fcpb->fc_port_regs[CHAN2BANKPORT(i)][0]; +} + +static inline __be64 __iomem *get_fc_port_luns(struct cxlflash_cfg *cfg, int i) +{ + struct fc_port_bank __iomem *fcpb = get_fc_port_bank(cfg, i); + + return &fcpb->fc_port_luns[CHAN2BANKPORT(i)][0]; +} + +int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t c, res_hndl_t r, u8 mode); void cxlflash_list_init(void); void cxlflash_term_global_luns(void); void cxlflash_free_errpage(void); -int cxlflash_ioctl(struct scsi_device *, int, void __user *); -void cxlflash_stop_term_user_contexts(struct cxlflash_cfg *); -int cxlflash_mark_contexts_error(struct cxlflash_cfg *); -void cxlflash_term_local_luns(struct cxlflash_cfg *); -void cxlflash_restore_luntable(struct cxlflash_cfg *); +int cxlflash_ioctl(struct scsi_device *sdev, int cmd, void __user *arg); +void cxlflash_stop_term_user_contexts(struct cxlflash_cfg *cfg); +int cxlflash_mark_contexts_error(struct cxlflash_cfg *cfg); +void cxlflash_term_local_luns(struct cxlflash_cfg *cfg); +void cxlflash_restore_luntable(struct cxlflash_cfg *cfg); #endif /* ifndef _CXLFLASH_COMMON_H */ diff --git a/drivers/scsi/cxlflash/lunmgt.c b/drivers/scsi/cxlflash/lunmgt.c index 0efed177cc8b..4d232e271af6 100644 --- a/drivers/scsi/cxlflash/lunmgt.c +++ b/drivers/scsi/cxlflash/lunmgt.c @@ -252,7 +252,7 @@ int cxlflash_manage_lun(struct scsi_device *sdev, * in unpacked, AFU-friendly format, and hang LUN reference in * the sdev. */ - lli->port_sel |= CHAN2PORT(chan); + lli->port_sel |= CHAN2PORTMASK(chan); lli->lun_id[chan] = lun_to_lunid(sdev->lun); sdev->hostdata = lli; } else if (flags & DK_CXLFLASH_MANAGE_LUN_DISABLE_SUPERPIPE) { @@ -264,7 +264,7 @@ int cxlflash_manage_lun(struct scsi_device *sdev, * tracking when no more references exist. */ sdev->hostdata = NULL; - lli->port_sel &= ~CHAN2PORT(chan); + lli->port_sel &= ~CHAN2PORTMASK(chan); if (lli->port_sel == 0U) lli->in_table = false; } diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c index 3061d8045382..a7d57c343492 100644 --- a/drivers/scsi/cxlflash/main.c +++ b/drivers/scsi/cxlflash/main.c @@ -176,7 +176,6 @@ static void cmd_complete(struct afu_cmd *cmd) dev_dbg_ratelimited(dev, "%s:scp=%p result=%08x ioasc=%08x\n", __func__, scp, scp->result, cmd->sa.ioasc); - scsi_dma_unmap(scp); scp->scsi_done(scp); if (cmd_is_tmf) { @@ -224,8 +223,9 @@ static void context_reset(struct afu_cmd *cmd, __be64 __iomem *reset_reg) static void context_reset_ioarrin(struct afu_cmd *cmd) { struct afu *afu = cmd->parent; + struct hwq *hwq = get_hwq(afu, cmd->hwq_index); - context_reset(cmd, &afu->host_map->ioarrin); + context_reset(cmd, &hwq->host_map->ioarrin); } /** @@ -235,8 +235,9 @@ static void context_reset_ioarrin(struct afu_cmd *cmd) static void context_reset_sq(struct afu_cmd *cmd) { struct afu *afu = cmd->parent; + struct hwq *hwq = get_hwq(afu, cmd->hwq_index); - context_reset(cmd, &afu->host_map->sq_ctx_reset); + context_reset(cmd, &hwq->host_map->sq_ctx_reset); } /** @@ -251,6 +252,7 @@ static int send_cmd_ioarrin(struct afu *afu, struct afu_cmd *cmd) { struct cxlflash_cfg *cfg = afu->parent; struct device *dev = &cfg->dev->dev; + struct hwq *hwq = get_hwq(afu, cmd->hwq_index); int rc = 0; s64 room; ulong lock_flags; @@ -259,23 +261,23 @@ static int send_cmd_ioarrin(struct afu *afu, struct afu_cmd *cmd) * To avoid the performance penalty of MMIO, spread the update of * 'room' over multiple commands. */ - spin_lock_irqsave(&afu->rrin_slock, lock_flags); - if (--afu->room < 0) { - room = readq_be(&afu->host_map->cmd_room); + spin_lock_irqsave(&hwq->rrin_slock, lock_flags); + if (--hwq->room < 0) { + room = readq_be(&hwq->host_map->cmd_room); if (room <= 0) { dev_dbg_ratelimited(dev, "%s: no cmd_room to send " "0x%02X, room=0x%016llX\n", __func__, cmd->rcb.cdb[0], room); - afu->room = 0; + hwq->room = 0; rc = SCSI_MLQUEUE_HOST_BUSY; goto out; } - afu->room = room - 1; + hwq->room = room - 1; } - writeq_be((u64)&cmd->rcb, &afu->host_map->ioarrin); + writeq_be((u64)&cmd->rcb, &hwq->host_map->ioarrin); out: - spin_unlock_irqrestore(&afu->rrin_slock, lock_flags); + spin_unlock_irqrestore(&hwq->rrin_slock, lock_flags); dev_dbg(dev, "%s: cmd=%p len=%u ea=%016llx rc=%d\n", __func__, cmd, cmd->rcb.data_len, cmd->rcb.data_ea, rc); return rc; @@ -293,11 +295,12 @@ static int send_cmd_sq(struct afu *afu, struct afu_cmd *cmd) { struct cxlflash_cfg *cfg = afu->parent; struct device *dev = &cfg->dev->dev; + struct hwq *hwq = get_hwq(afu, cmd->hwq_index); int rc = 0; int newval; ulong lock_flags; - newval = atomic_dec_if_positive(&afu->hsq_credits); + newval = atomic_dec_if_positive(&hwq->hsq_credits); if (newval <= 0) { rc = SCSI_MLQUEUE_HOST_BUSY; goto out; @@ -305,22 +308,22 @@ static int send_cmd_sq(struct afu *afu, struct afu_cmd *cmd) cmd->rcb.ioasa = &cmd->sa; - spin_lock_irqsave(&afu->hsq_slock, lock_flags); + spin_lock_irqsave(&hwq->hsq_slock, lock_flags); - *afu->hsq_curr = cmd->rcb; - if (afu->hsq_curr < afu->hsq_end) - afu->hsq_curr++; + *hwq->hsq_curr = cmd->rcb; + if (hwq->hsq_curr < hwq->hsq_end) + hwq->hsq_curr++; else - afu->hsq_curr = afu->hsq_start; - writeq_be((u64)afu->hsq_curr, &afu->host_map->sq_tail); + hwq->hsq_curr = hwq->hsq_start; + writeq_be((u64)hwq->hsq_curr, &hwq->host_map->sq_tail); - spin_unlock_irqrestore(&afu->hsq_slock, lock_flags); + spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags); out: dev_dbg(dev, "%s: cmd=%p len=%u ea=%016llx ioasa=%p rc=%d curr=%p " "head=%016llx tail=%016llx\n", __func__, cmd, cmd->rcb.data_len, - cmd->rcb.data_ea, cmd->rcb.ioasa, rc, afu->hsq_curr, - readq_be(&afu->host_map->sq_head), - readq_be(&afu->host_map->sq_tail)); + cmd->rcb.data_ea, cmd->rcb.ioasa, rc, hwq->hsq_curr, + readq_be(&hwq->host_map->sq_head), + readq_be(&hwq->host_map->sq_tail)); return rc; } @@ -355,6 +358,43 @@ static int wait_resp(struct afu *afu, struct afu_cmd *cmd) } /** + * cmd_to_target_hwq() - selects a target hardware queue for a SCSI command + * @host: SCSI host associated with device. + * @scp: SCSI command to send. + * @afu: SCSI command to send. + * + * Hashes a command based upon the hardware queue mode. + * + * Return: Trusted index of target hardware queue + */ +static u32 cmd_to_target_hwq(struct Scsi_Host *host, struct scsi_cmnd *scp, + struct afu *afu) +{ + u32 tag; + u32 hwq = 0; + + if (afu->num_hwqs == 1) + return 0; + + switch (afu->hwq_mode) { + case HWQ_MODE_RR: + hwq = afu->hwq_rr_count++ % afu->num_hwqs; + break; + case HWQ_MODE_TAG: + tag = blk_mq_unique_tag(scp->request); + hwq = blk_mq_unique_tag_to_hwq(tag); + break; + case HWQ_MODE_CPU: + hwq = smp_processor_id() % afu->num_hwqs; + break; + default: + WARN_ON_ONCE(1); + } + + return hwq; +} + +/** * send_tmf() - sends a Task Management Function (TMF) * @afu: AFU to checkout from. * @scp: SCSI command from stack. @@ -365,10 +405,12 @@ static int wait_resp(struct afu *afu, struct afu_cmd *cmd) */ static int send_tmf(struct afu *afu, struct scsi_cmnd *scp, u64 tmfcmd) { - u32 port_sel = scp->device->channel + 1; - struct cxlflash_cfg *cfg = shost_priv(scp->device->host); + struct Scsi_Host *host = scp->device->host; + struct cxlflash_cfg *cfg = shost_priv(host); struct afu_cmd *cmd = sc_to_afucz(scp); struct device *dev = &cfg->dev->dev; + int hwq_index = cmd_to_target_hwq(host, scp, afu); + struct hwq *hwq = get_hwq(afu, hwq_index); ulong lock_flags; int rc = 0; ulong to; @@ -385,10 +427,11 @@ static int send_tmf(struct afu *afu, struct scsi_cmnd *scp, u64 tmfcmd) cmd->scp = scp; cmd->parent = afu; cmd->cmd_tmf = true; + cmd->hwq_index = hwq_index; - cmd->rcb.ctx_id = afu->ctx_hndl; + cmd->rcb.ctx_id = hwq->ctx_hndl; cmd->rcb.msi = SISL_MSI_RRQ_UPDATED; - cmd->rcb.port_sel = port_sel; + cmd->rcb.port_sel = CHAN2PORTMASK(scp->device->channel); cmd->rcb.lun_id = lun_to_lunid(scp->device->lun); cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID | SISL_REQ_FLAGS_SUP_UNDERRUN | @@ -444,10 +487,10 @@ static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp) struct device *dev = &cfg->dev->dev; struct afu_cmd *cmd = sc_to_afucz(scp); struct scatterlist *sg = scsi_sglist(scp); - u32 port_sel = scp->device->channel + 1; + int hwq_index = cmd_to_target_hwq(host, scp, afu); + struct hwq *hwq = get_hwq(afu, hwq_index); u16 req_flags = SISL_REQ_FLAGS_SUP_UNDERRUN; ulong lock_flags; - int nseg = 0; int rc = 0; dev_dbg_ratelimited(dev, "%s: (scp=%p) %d/%d/%d/%llu " @@ -472,6 +515,8 @@ static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp) spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags); switch (cfg->state) { + case STATE_PROBING: + case STATE_PROBED: case STATE_RESET: dev_dbg_ratelimited(dev, "%s: device is in reset\n", __func__); rc = SCSI_MLQUEUE_HOST_BUSY; @@ -487,23 +532,17 @@ static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp) } if (likely(sg)) { - nseg = scsi_dma_map(scp); - if (unlikely(nseg < 0)) { - dev_err(dev, "%s: Fail DMA map\n", __func__); - rc = SCSI_MLQUEUE_HOST_BUSY; - goto out; - } - - cmd->rcb.data_len = sg_dma_len(sg); - cmd->rcb.data_ea = sg_dma_address(sg); + cmd->rcb.data_len = sg->length; + cmd->rcb.data_ea = (uintptr_t)sg_virt(sg); } cmd->scp = scp; cmd->parent = afu; + cmd->hwq_index = hwq_index; - cmd->rcb.ctx_id = afu->ctx_hndl; + cmd->rcb.ctx_id = hwq->ctx_hndl; cmd->rcb.msi = SISL_MSI_RRQ_UPDATED; - cmd->rcb.port_sel = port_sel; + cmd->rcb.port_sel = CHAN2PORTMASK(scp->device->channel); cmd->rcb.lun_id = lun_to_lunid(scp->device->lun); if (scp->sc_data_direction == DMA_TO_DEVICE) @@ -513,8 +552,6 @@ static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp) memcpy(cmd->rcb.cdb, scp->cmnd, sizeof(cmd->rcb.cdb)); rc = afu->send_cmd(afu, cmd); - if (unlikely(rc)) - scsi_dma_unmap(scp); out: return rc; } @@ -554,17 +591,28 @@ static void free_mem(struct cxlflash_cfg *cfg) * Safe to call with AFU in a partially allocated/initialized state. * * Cancels scheduled worker threads, waits for any active internal AFU - * commands to timeout and then unmaps the MMIO space. + * commands to timeout, disables IRQ polling and then unmaps the MMIO space. */ static void stop_afu(struct cxlflash_cfg *cfg) { struct afu *afu = cfg->afu; + struct hwq *hwq; + int i; cancel_work_sync(&cfg->work_q); if (likely(afu)) { while (atomic_read(&afu->cmds_active)) ssleep(1); + + if (afu_is_irqpoll_enabled(afu)) { + for (i = 0; i < afu->num_hwqs; i++) { + hwq = get_hwq(afu, i); + + irq_poll_disable(&hwq->irqpoll); + } + } + if (likely(afu->afu_map)) { cxl_psa_unmap((void __iomem *)afu->afu_map); afu->afu_map = NULL; @@ -576,28 +624,40 @@ static void stop_afu(struct cxlflash_cfg *cfg) * term_intr() - disables all AFU interrupts * @cfg: Internal structure associated with the host. * @level: Depth of allocation, where to begin waterfall tear down. + * @index: Index of the hardware queue. * * Safe to call with AFU/MC in partially allocated/initialized state. */ -static void term_intr(struct cxlflash_cfg *cfg, enum undo_level level) +static void term_intr(struct cxlflash_cfg *cfg, enum undo_level level, + u32 index) { struct afu *afu = cfg->afu; struct device *dev = &cfg->dev->dev; + struct hwq *hwq; - if (!afu || !cfg->mcctx) { - dev_err(dev, "%s: returning with NULL afu or MC\n", __func__); + if (!afu) { + dev_err(dev, "%s: returning with NULL afu\n", __func__); + return; + } + + hwq = get_hwq(afu, index); + + if (!hwq->ctx) { + dev_err(dev, "%s: returning with NULL MC\n", __func__); return; } switch (level) { case UNMAP_THREE: - cxl_unmap_afu_irq(cfg->mcctx, 3, afu); + /* SISL_MSI_ASYNC_ERROR is setup only for the primary HWQ */ + if (index == PRIMARY_HWQ) + cxl_unmap_afu_irq(hwq->ctx, 3, hwq); case UNMAP_TWO: - cxl_unmap_afu_irq(cfg->mcctx, 2, afu); + cxl_unmap_afu_irq(hwq->ctx, 2, hwq); case UNMAP_ONE: - cxl_unmap_afu_irq(cfg->mcctx, 1, afu); + cxl_unmap_afu_irq(hwq->ctx, 1, hwq); case FREE_IRQ: - cxl_free_afu_irqs(cfg->mcctx); + cxl_free_afu_irqs(hwq->ctx); /* fall through */ case UNDO_NOOP: /* No action required */ @@ -608,24 +668,32 @@ static void term_intr(struct cxlflash_cfg *cfg, enum undo_level level) /** * term_mc() - terminates the master context * @cfg: Internal structure associated with the host. - * @level: Depth of allocation, where to begin waterfall tear down. + * @index: Index of the hardware queue. * * Safe to call with AFU/MC in partially allocated/initialized state. */ -static void term_mc(struct cxlflash_cfg *cfg) +static void term_mc(struct cxlflash_cfg *cfg, u32 index) { - int rc = 0; struct afu *afu = cfg->afu; struct device *dev = &cfg->dev->dev; + struct hwq *hwq; - if (!afu || !cfg->mcctx) { - dev_err(dev, "%s: returning with NULL afu or MC\n", __func__); + if (!afu) { + dev_err(dev, "%s: returning with NULL afu\n", __func__); return; } - rc = cxl_stop_context(cfg->mcctx); - WARN_ON(rc); - cfg->mcctx = NULL; + hwq = get_hwq(afu, index); + + if (!hwq->ctx) { + dev_err(dev, "%s: returning with NULL MC\n", __func__); + return; + } + + WARN_ON(cxl_stop_context(hwq->ctx)); + if (index != PRIMARY_HWQ) + WARN_ON(cxl_release_context(hwq->ctx)); + hwq->ctx = NULL; } /** @@ -637,21 +705,25 @@ static void term_mc(struct cxlflash_cfg *cfg) static void term_afu(struct cxlflash_cfg *cfg) { struct device *dev = &cfg->dev->dev; + int k; /* * Tear down is carefully orchestrated to ensure * no interrupts can come in when the problem state * area is unmapped. * - * 1) Disable all AFU interrupts + * 1) Disable all AFU interrupts for each master * 2) Unmap the problem state area - * 3) Stop the master context + * 3) Stop each master context */ - term_intr(cfg, UNMAP_THREE); + for (k = cfg->afu->num_hwqs - 1; k >= 0; k--) + term_intr(cfg, UNMAP_THREE, k); + if (cfg->afu) stop_afu(cfg); - term_mc(cfg); + for (k = cfg->afu->num_hwqs - 1; k >= 0; k--) + term_mc(cfg, k); dev_dbg(dev, "%s: returning\n", __func__); } @@ -670,8 +742,8 @@ static void notify_shutdown(struct cxlflash_cfg *cfg, bool wait) { struct afu *afu = cfg->afu; struct device *dev = &cfg->dev->dev; - struct sisl_global_map __iomem *global; struct dev_dependent_vals *ddv; + __be64 __iomem *fc_port_regs; u64 reg, status; int i, retry_cnt = 0; @@ -684,23 +756,25 @@ static void notify_shutdown(struct cxlflash_cfg *cfg, bool wait) return; } - global = &afu->afu_map->global; - /* Notify AFU */ - for (i = 0; i < NUM_FC_PORTS; i++) { - reg = readq_be(&global->fc_regs[i][FC_CONFIG2 / 8]); + for (i = 0; i < cfg->num_fc_ports; i++) { + fc_port_regs = get_fc_port_regs(cfg, i); + + reg = readq_be(&fc_port_regs[FC_CONFIG2 / 8]); reg |= SISL_FC_SHUTDOWN_NORMAL; - writeq_be(reg, &global->fc_regs[i][FC_CONFIG2 / 8]); + writeq_be(reg, &fc_port_regs[FC_CONFIG2 / 8]); } if (!wait) return; /* Wait up to 1.5 seconds for shutdown processing to complete */ - for (i = 0; i < NUM_FC_PORTS; i++) { + for (i = 0; i < cfg->num_fc_ports; i++) { + fc_port_regs = get_fc_port_regs(cfg, i); retry_cnt = 0; + while (true) { - status = readq_be(&global->fc_regs[i][FC_STATUS / 8]); + status = readq_be(&fc_port_regs[FC_STATUS / 8]); if (status & SISL_STATUS_SHUTDOWN_COMPLETE) break; if (++retry_cnt >= MC_RETRY_CNT) { @@ -717,7 +791,8 @@ static void notify_shutdown(struct cxlflash_cfg *cfg, bool wait) * cxlflash_remove() - PCI entry point to tear down host * @pdev: PCI device associated with the host. * - * Safe to use as a cleanup in partially allocated/initialized state. + * Safe to use as a cleanup in partially allocated/initialized state. Note that + * the reset_waitq is flushed as part of the stop/termination of user contexts. */ static void cxlflash_remove(struct pci_dev *pdev) { @@ -750,7 +825,6 @@ static void cxlflash_remove(struct pci_dev *pdev) case INIT_STATE_SCSI: cxlflash_term_local_luns(cfg); scsi_remove_host(cfg->host); - /* fall through */ case INIT_STATE_AFU: term_afu(cfg); case INIT_STATE_PCI: @@ -789,6 +863,7 @@ static int alloc_mem(struct cxlflash_cfg *cfg) goto out; } cfg->afu->parent = cfg; + cfg->afu->desired_hwqs = CXLFLASH_DEF_HWQS; cfg->afu->afu_map = NULL; out: return rc; @@ -1024,53 +1099,16 @@ static void afu_link_reset(struct afu *afu, int port, __be64 __iomem *fc_regs) dev_dbg(dev, "%s: returning port_sel=%016llx\n", __func__, port_sel); } -/* - * Asynchronous interrupt information table - */ -static const struct asyc_intr_info ainfo[] = { - {SISL_ASTATUS_FC0_OTHER, "other error", 0, CLR_FC_ERROR | LINK_RESET}, - {SISL_ASTATUS_FC0_LOGO, "target initiated LOGO", 0, 0}, - {SISL_ASTATUS_FC0_CRC_T, "CRC threshold exceeded", 0, LINK_RESET}, - {SISL_ASTATUS_FC0_LOGI_R, "login timed out, retrying", 0, LINK_RESET}, - {SISL_ASTATUS_FC0_LOGI_F, "login failed", 0, CLR_FC_ERROR}, - {SISL_ASTATUS_FC0_LOGI_S, "login succeeded", 0, SCAN_HOST}, - {SISL_ASTATUS_FC0_LINK_DN, "link down", 0, 0}, - {SISL_ASTATUS_FC0_LINK_UP, "link up", 0, 0}, - {SISL_ASTATUS_FC1_OTHER, "other error", 1, CLR_FC_ERROR | LINK_RESET}, - {SISL_ASTATUS_FC1_LOGO, "target initiated LOGO", 1, 0}, - {SISL_ASTATUS_FC1_CRC_T, "CRC threshold exceeded", 1, LINK_RESET}, - {SISL_ASTATUS_FC1_LOGI_R, "login timed out, retrying", 1, LINK_RESET}, - {SISL_ASTATUS_FC1_LOGI_F, "login failed", 1, CLR_FC_ERROR}, - {SISL_ASTATUS_FC1_LOGI_S, "login succeeded", 1, SCAN_HOST}, - {SISL_ASTATUS_FC1_LINK_DN, "link down", 1, 0}, - {SISL_ASTATUS_FC1_LINK_UP, "link up", 1, 0}, - {0x0, "", 0, 0} /* terminator */ -}; - -/** - * find_ainfo() - locates and returns asynchronous interrupt information - * @status: Status code set by AFU on error. - * - * Return: The located information or NULL when the status code is invalid. - */ -static const struct asyc_intr_info *find_ainfo(u64 status) -{ - const struct asyc_intr_info *info; - - for (info = &ainfo[0]; info->status; info++) - if (info->status == status) - return info; - - return NULL; -} - /** * afu_err_intr_init() - clears and initializes the AFU for error interrupts * @afu: AFU associated with the host. */ static void afu_err_intr_init(struct afu *afu) { + struct cxlflash_cfg *cfg = afu->parent; + __be64 __iomem *fc_port_regs; int i; + struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ); u64 reg; /* global async interrupts: AFU clears afu_ctrl on context exit @@ -1082,8 +1120,8 @@ static void afu_err_intr_init(struct afu *afu) /* mask all */ writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_mask); - /* set LISN# to send and point to master context */ - reg = ((u64) (((afu->ctx_hndl << 8) | SISL_MSI_ASYNC_ERROR)) << 40); + /* set LISN# to send and point to primary master context */ + reg = ((u64) (((hwq->ctx_hndl << 8) | SISL_MSI_ASYNC_ERROR)) << 40); if (afu->internal_lun) reg |= 1; /* Bit 63 indicates local lun */ @@ -1098,17 +1136,19 @@ static void afu_err_intr_init(struct afu *afu) writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear); /* Clear/Set internal lun bits */ - reg = readq_be(&afu->afu_map->global.fc_regs[0][FC_CONFIG2 / 8]); + fc_port_regs = get_fc_port_regs(cfg, 0); + reg = readq_be(&fc_port_regs[FC_CONFIG2 / 8]); reg &= SISL_FC_INTERNAL_MASK; if (afu->internal_lun) reg |= ((u64)(afu->internal_lun - 1) << SISL_FC_INTERNAL_SHIFT); - writeq_be(reg, &afu->afu_map->global.fc_regs[0][FC_CONFIG2 / 8]); + writeq_be(reg, &fc_port_regs[FC_CONFIG2 / 8]); /* now clear FC errors */ - for (i = 0; i < NUM_FC_PORTS; i++) { - writeq_be(0xFFFFFFFFU, - &afu->afu_map->global.fc_regs[i][FC_ERROR / 8]); - writeq_be(0, &afu->afu_map->global.fc_regs[i][FC_ERRCAP / 8]); + for (i = 0; i < cfg->num_fc_ports; i++) { + fc_port_regs = get_fc_port_regs(cfg, i); + + writeq_be(0xFFFFFFFFU, &fc_port_regs[FC_ERROR / 8]); + writeq_be(0, &fc_port_regs[FC_ERRCAP / 8]); } /* sync interrupts for master's IOARRIN write */ @@ -1117,8 +1157,12 @@ static void afu_err_intr_init(struct afu *afu) /* IOARRIN yet), so there is nothing to clear. */ /* set LISN#, it is always sent to the context that wrote IOARRIN */ - writeq_be(SISL_MSI_SYNC_ERROR, &afu->host_map->ctx_ctrl); - writeq_be(SISL_ISTATUS_MASK, &afu->host_map->intr_mask); + for (i = 0; i < afu->num_hwqs; i++) { + hwq = get_hwq(afu, i); + + writeq_be(SISL_MSI_SYNC_ERROR, &hwq->host_map->ctx_ctrl); + writeq_be(SISL_ISTATUS_MASK, &hwq->host_map->intr_mask); + } } /** @@ -1130,13 +1174,13 @@ static void afu_err_intr_init(struct afu *afu) */ static irqreturn_t cxlflash_sync_err_irq(int irq, void *data) { - struct afu *afu = (struct afu *)data; - struct cxlflash_cfg *cfg = afu->parent; + struct hwq *hwq = (struct hwq *)data; + struct cxlflash_cfg *cfg = hwq->afu->parent; struct device *dev = &cfg->dev->dev; u64 reg; u64 reg_unmasked; - reg = readq_be(&afu->host_map->intr_status); + reg = readq_be(&hwq->host_map->intr_status); reg_unmasked = (reg & SISL_ISTATUS_UNMASK); if (reg_unmasked == 0UL) { @@ -1148,32 +1192,36 @@ static irqreturn_t cxlflash_sync_err_irq(int irq, void *data) dev_err(dev, "%s: unexpected interrupt, intr_status=%016llx\n", __func__, reg); - writeq_be(reg_unmasked, &afu->host_map->intr_clear); + writeq_be(reg_unmasked, &hwq->host_map->intr_clear); cxlflash_sync_err_irq_exit: return IRQ_HANDLED; } /** - * cxlflash_rrq_irq() - interrupt handler for read-response queue (normal path) - * @irq: Interrupt number. - * @data: Private data provided at interrupt registration, the AFU. + * process_hrrq() - process the read-response queue + * @afu: AFU associated with the host. + * @doneq: Queue of commands harvested from the RRQ. + * @budget: Threshold of RRQ entries to process. * - * Return: Always return IRQ_HANDLED. + * This routine must be called holding the disabled RRQ spin lock. + * + * Return: The number of entries processed. */ -static irqreturn_t cxlflash_rrq_irq(int irq, void *data) +static int process_hrrq(struct hwq *hwq, struct list_head *doneq, int budget) { - struct afu *afu = (struct afu *)data; + struct afu *afu = hwq->afu; struct afu_cmd *cmd; struct sisl_ioasa *ioasa; struct sisl_ioarcb *ioarcb; - bool toggle = afu->toggle; + bool toggle = hwq->toggle; + int num_hrrq = 0; u64 entry, - *hrrq_start = afu->hrrq_start, - *hrrq_end = afu->hrrq_end, - *hrrq_curr = afu->hrrq_curr; + *hrrq_start = hwq->hrrq_start, + *hrrq_end = hwq->hrrq_end, + *hrrq_curr = hwq->hrrq_curr; - /* Process however many RRQ entries that are ready */ + /* Process ready RRQ entries up to the specified budget (if any) */ while (true) { entry = *hrrq_curr; @@ -1190,7 +1238,7 @@ static irqreturn_t cxlflash_rrq_irq(int irq, void *data) cmd = container_of(ioarcb, struct afu_cmd, rcb); } - cmd_complete(cmd); + list_add_tail(&cmd->queue, doneq); /* Advance to next entry or wrap and flip the toggle bit */ if (hrrq_curr < hrrq_end) @@ -1200,15 +1248,123 @@ static irqreturn_t cxlflash_rrq_irq(int irq, void *data) toggle ^= SISL_RESP_HANDLE_T_BIT; } - atomic_inc(&afu->hsq_credits); + atomic_inc(&hwq->hsq_credits); + num_hrrq++; + + if (budget > 0 && num_hrrq >= budget) + break; + } + + hwq->hrrq_curr = hrrq_curr; + hwq->toggle = toggle; + + return num_hrrq; +} + +/** + * process_cmd_doneq() - process a queue of harvested RRQ commands + * @doneq: Queue of completed commands. + * + * Note that upon return the queue can no longer be trusted. + */ +static void process_cmd_doneq(struct list_head *doneq) +{ + struct afu_cmd *cmd, *tmp; + + WARN_ON(list_empty(doneq)); + + list_for_each_entry_safe(cmd, tmp, doneq, queue) + cmd_complete(cmd); +} + +/** + * cxlflash_irqpoll() - process a queue of harvested RRQ commands + * @irqpoll: IRQ poll structure associated with queue to poll. + * @budget: Threshold of RRQ entries to process per poll. + * + * Return: The number of entries processed. + */ +static int cxlflash_irqpoll(struct irq_poll *irqpoll, int budget) +{ + struct hwq *hwq = container_of(irqpoll, struct hwq, irqpoll); + unsigned long hrrq_flags; + LIST_HEAD(doneq); + int num_entries = 0; + + spin_lock_irqsave(&hwq->hrrq_slock, hrrq_flags); + + num_entries = process_hrrq(hwq, &doneq, budget); + if (num_entries < budget) + irq_poll_complete(irqpoll); + + spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags); + + process_cmd_doneq(&doneq); + return num_entries; +} + +/** + * cxlflash_rrq_irq() - interrupt handler for read-response queue (normal path) + * @irq: Interrupt number. + * @data: Private data provided at interrupt registration, the AFU. + * + * Return: IRQ_HANDLED or IRQ_NONE when no ready entries found. + */ +static irqreturn_t cxlflash_rrq_irq(int irq, void *data) +{ + struct hwq *hwq = (struct hwq *)data; + struct afu *afu = hwq->afu; + unsigned long hrrq_flags; + LIST_HEAD(doneq); + int num_entries = 0; + + spin_lock_irqsave(&hwq->hrrq_slock, hrrq_flags); + + if (afu_is_irqpoll_enabled(afu)) { + irq_poll_sched(&hwq->irqpoll); + spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags); + return IRQ_HANDLED; } - afu->hrrq_curr = hrrq_curr; - afu->toggle = toggle; + num_entries = process_hrrq(hwq, &doneq, -1); + spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags); + + if (num_entries == 0) + return IRQ_NONE; + process_cmd_doneq(&doneq); return IRQ_HANDLED; } +/* + * Asynchronous interrupt information table + * + * NOTE: + * - Order matters here as this array is indexed by bit position. + * + * - The checkpatch script considers the BUILD_SISL_ASTATUS_FC_PORT macro + * as complex and complains due to a lack of parentheses/braces. + */ +#define ASTATUS_FC(_a, _b, _c, _d) \ + { SISL_ASTATUS_FC##_a##_##_b, _c, _a, (_d) } + +#define BUILD_SISL_ASTATUS_FC_PORT(_a) \ + ASTATUS_FC(_a, LINK_UP, "link up", 0), \ + ASTATUS_FC(_a, LINK_DN, "link down", 0), \ + ASTATUS_FC(_a, LOGI_S, "login succeeded", SCAN_HOST), \ + ASTATUS_FC(_a, LOGI_F, "login failed", CLR_FC_ERROR), \ + ASTATUS_FC(_a, LOGI_R, "login timed out, retrying", LINK_RESET), \ + ASTATUS_FC(_a, CRC_T, "CRC threshold exceeded", LINK_RESET), \ + ASTATUS_FC(_a, LOGO, "target initiated LOGO", 0), \ + ASTATUS_FC(_a, OTHER, "other error", CLR_FC_ERROR | LINK_RESET) + +static const struct asyc_intr_info ainfo[] = { + BUILD_SISL_ASTATUS_FC_PORT(1), + BUILD_SISL_ASTATUS_FC_PORT(0), + BUILD_SISL_ASTATUS_FC_PORT(3), + BUILD_SISL_ASTATUS_FC_PORT(2) +}; + /** * cxlflash_async_err_irq() - interrupt handler for asynchronous errors * @irq: Interrupt number. @@ -1218,20 +1374,22 @@ static irqreturn_t cxlflash_rrq_irq(int irq, void *data) */ static irqreturn_t cxlflash_async_err_irq(int irq, void *data) { - struct afu *afu = (struct afu *)data; + struct hwq *hwq = (struct hwq *)data; + struct afu *afu = hwq->afu; struct cxlflash_cfg *cfg = afu->parent; struct device *dev = &cfg->dev->dev; - u64 reg_unmasked; const struct asyc_intr_info *info; struct sisl_global_map __iomem *global = &afu->afu_map->global; + __be64 __iomem *fc_port_regs; + u64 reg_unmasked; u64 reg; + u64 bit; u8 port; - int i; reg = readq_be(&global->regs.aintr_status); reg_unmasked = (reg & SISL_ASTATUS_UNMASK); - if (reg_unmasked == 0) { + if (unlikely(reg_unmasked == 0)) { dev_err(dev, "%s: spurious interrupt, aintr_status=%016llx\n", __func__, reg); goto out; @@ -1241,16 +1399,24 @@ static irqreturn_t cxlflash_async_err_irq(int irq, void *data) writeq_be(reg_unmasked, &global->regs.aintr_clear); /* Check each bit that is on */ - for (i = 0; reg_unmasked; i++, reg_unmasked = (reg_unmasked >> 1)) { - info = find_ainfo(1ULL << i); - if (((reg_unmasked & 0x1) == 0) || !info) + for_each_set_bit(bit, (ulong *)®_unmasked, BITS_PER_LONG) { + if (unlikely(bit >= ARRAY_SIZE(ainfo))) { + WARN_ON_ONCE(1); continue; + } + + info = &ainfo[bit]; + if (unlikely(info->status != 1ULL << bit)) { + WARN_ON_ONCE(1); + continue; + } port = info->port; + fc_port_regs = get_fc_port_regs(cfg, port); dev_err(dev, "%s: FC Port %d -> %s, fc_status=%016llx\n", __func__, port, info->desc, - readq_be(&global->fc_regs[port][FC_STATUS / 8])); + readq_be(&fc_port_regs[FC_STATUS / 8])); /* * Do link reset first, some OTHER errors will set FC_ERROR @@ -1265,7 +1431,7 @@ static irqreturn_t cxlflash_async_err_irq(int irq, void *data) } if (info->action & CLR_FC_ERROR) { - reg = readq_be(&global->fc_regs[port][FC_ERROR / 8]); + reg = readq_be(&fc_port_regs[FC_ERROR / 8]); /* * Since all errors are unmasked, FC_ERROR and FC_ERRCAP @@ -1275,8 +1441,8 @@ static irqreturn_t cxlflash_async_err_irq(int irq, void *data) dev_err(dev, "%s: fc %d: clearing fc_error=%016llx\n", __func__, port, reg); - writeq_be(reg, &global->fc_regs[port][FC_ERROR / 8]); - writeq_be(0, &global->fc_regs[port][FC_ERRCAP / 8]); + writeq_be(reg, &fc_port_regs[FC_ERROR / 8]); + writeq_be(0, &fc_port_regs[FC_ERRCAP / 8]); } if (info->action & SCAN_HOST) { @@ -1292,16 +1458,18 @@ out: /** * start_context() - starts the master context * @cfg: Internal structure associated with the host. + * @index: Index of the hardware queue. * * Return: A success or failure value from CXL services. */ -static int start_context(struct cxlflash_cfg *cfg) +static int start_context(struct cxlflash_cfg *cfg, u32 index) { struct device *dev = &cfg->dev->dev; + struct hwq *hwq = get_hwq(cfg->afu, index); int rc = 0; - rc = cxl_start_context(cfg->mcctx, - cfg->afu->work.work_element_descriptor, + rc = cxl_start_context(hwq->ctx, + hwq->work.work_element_descriptor, NULL); dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); @@ -1311,7 +1479,7 @@ static int start_context(struct cxlflash_cfg *cfg) /** * read_vpd() - obtains the WWPNs from VPD * @cfg: Internal structure associated with the host. - * @wwpn: Array of size NUM_FC_PORTS to pass back WWPNs + * @wwpn: Array of size MAX_FC_PORTS to pass back WWPNs * * Return: 0 on success, -errno on failure */ @@ -1324,7 +1492,7 @@ static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[]) ssize_t vpd_size; char vpd_data[CXLFLASH_VPD_LEN]; char tmp_buf[WWPN_BUF_LEN] = { 0 }; - char *wwpn_vpd_tags[NUM_FC_PORTS] = { "V5", "V6" }; + char *wwpn_vpd_tags[MAX_FC_PORTS] = { "V5", "V6", "V7", "V8" }; /* Get the VPD data from the device */ vpd_size = cxl_read_adapter_vpd(pdev, vpd_data, sizeof(vpd_data)); @@ -1362,7 +1530,7 @@ static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[]) * because the conversion service requires that the ASCII * string be terminated. */ - for (k = 0; k < NUM_FC_PORTS; k++) { + for (k = 0; k < cfg->num_fc_ports; k++) { j = ro_size; i = ro_start + PCI_VPD_LRDT_TAG_SIZE; @@ -1391,6 +1559,8 @@ static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[]) rc = -ENODEV; goto out; } + + dev_dbg(dev, "%s: wwpn%d=%016llx\n", __func__, k, wwpn[k]); } out: @@ -1409,6 +1579,7 @@ static void init_pcr(struct cxlflash_cfg *cfg) { struct afu *afu = cfg->afu; struct sisl_ctrl_map __iomem *ctrl_map; + struct hwq *hwq; int i; for (i = 0; i < MAX_CONTEXT; i++) { @@ -1420,13 +1591,17 @@ static void init_pcr(struct cxlflash_cfg *cfg) writeq_be(0, &ctrl_map->ctx_cap); } - /* Copy frequently used fields into afu */ - afu->ctx_hndl = (u16) cxl_process_element(cfg->mcctx); - afu->host_map = &afu->afu_map->hosts[afu->ctx_hndl].host; - afu->ctrl_map = &afu->afu_map->ctrls[afu->ctx_hndl].ctrl; + /* Copy frequently used fields into hwq */ + for (i = 0; i < afu->num_hwqs; i++) { + hwq = get_hwq(afu, i); + + hwq->ctx_hndl = (u16) cxl_process_element(hwq->ctx); + hwq->host_map = &afu->afu_map->hosts[hwq->ctx_hndl].host; + hwq->ctrl_map = &afu->afu_map->ctrls[hwq->ctx_hndl].ctrl; - /* Program the Endian Control for the master context */ - writeq_be(SISL_ENDIAN_CTRL, &afu->host_map->endian_ctrl); + /* Program the Endian Control for the master context */ + writeq_be(SISL_ENDIAN_CTRL, &hwq->host_map->endian_ctrl); + } } /** @@ -1437,7 +1612,10 @@ static int init_global(struct cxlflash_cfg *cfg) { struct afu *afu = cfg->afu; struct device *dev = &cfg->dev->dev; - u64 wwpn[NUM_FC_PORTS]; /* wwpn of AFU ports */ + struct hwq *hwq; + struct sisl_host_map __iomem *hmap; + __be64 __iomem *fc_port_regs; + u64 wwpn[MAX_FC_PORTS]; /* wwpn of AFU ports */ int i = 0, num_ports = 0; int rc = 0; u64 reg; @@ -1448,16 +1626,18 @@ static int init_global(struct cxlflash_cfg *cfg) goto out; } - dev_dbg(dev, "%s: wwpn0=%016llx wwpn1=%016llx\n", - __func__, wwpn[0], wwpn[1]); + /* Set up RRQ and SQ in HWQ for master issued cmds */ + for (i = 0; i < afu->num_hwqs; i++) { + hwq = get_hwq(afu, i); + hmap = hwq->host_map; - /* Set up RRQ and SQ in AFU for master issued cmds */ - writeq_be((u64) afu->hrrq_start, &afu->host_map->rrq_start); - writeq_be((u64) afu->hrrq_end, &afu->host_map->rrq_end); + writeq_be((u64) hwq->hrrq_start, &hmap->rrq_start); + writeq_be((u64) hwq->hrrq_end, &hmap->rrq_end); - if (afu_is_sq_cmd_mode(afu)) { - writeq_be((u64)afu->hsq_start, &afu->host_map->sq_start); - writeq_be((u64)afu->hsq_end, &afu->host_map->sq_end); + if (afu_is_sq_cmd_mode(afu)) { + writeq_be((u64)hwq->hsq_start, &hmap->sq_start); + writeq_be((u64)hwq->hsq_end, &hmap->sq_end); + } } /* AFU configuration */ @@ -1473,26 +1653,25 @@ static int init_global(struct cxlflash_cfg *cfg) if (afu->internal_lun) { /* Only use port 0 */ writeq_be(PORT0, &afu->afu_map->global.regs.afu_port_sel); - num_ports = NUM_FC_PORTS - 1; + num_ports = 0; } else { - writeq_be(BOTH_PORTS, &afu->afu_map->global.regs.afu_port_sel); - num_ports = NUM_FC_PORTS; + writeq_be(PORT_MASK(cfg->num_fc_ports), + &afu->afu_map->global.regs.afu_port_sel); + num_ports = cfg->num_fc_ports; } for (i = 0; i < num_ports; i++) { + fc_port_regs = get_fc_port_regs(cfg, i); + /* Unmask all errors (but they are still masked at AFU) */ - writeq_be(0, &afu->afu_map->global.fc_regs[i][FC_ERRMSK / 8]); + writeq_be(0, &fc_port_regs[FC_ERRMSK / 8]); /* Clear CRC error cnt & set a threshold */ - (void)readq_be(&afu->afu_map->global. - fc_regs[i][FC_CNT_CRCERR / 8]); - writeq_be(MC_CRC_THRESH, &afu->afu_map->global.fc_regs[i] - [FC_CRC_THRESH / 8]); + (void)readq_be(&fc_port_regs[FC_CNT_CRCERR / 8]); + writeq_be(MC_CRC_THRESH, &fc_port_regs[FC_CRC_THRESH / 8]); /* Set WWPNs. If already programmed, wwpn[i] is 0 */ if (wwpn[i] != 0) - afu_set_wwpn(afu, i, - &afu->afu_map->global.fc_regs[i][0], - wwpn[i]); + afu_set_wwpn(afu, i, &fc_port_regs[0], wwpn[i]); /* Programming WWPN back to back causes additional * offline/online transitions and a PLOGI */ @@ -1502,11 +1681,15 @@ static int init_global(struct cxlflash_cfg *cfg) /* Set up master's own CTX_CAP to allow real mode, host translation */ /* tables, afu cmds and read/write GSCSI cmds. */ /* First, unlock ctx_cap write by reading mbox */ - (void)readq_be(&afu->ctrl_map->mbox_r); /* unlock ctx_cap */ - writeq_be((SISL_CTX_CAP_REAL_MODE | SISL_CTX_CAP_HOST_XLATE | - SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD | - SISL_CTX_CAP_AFU_CMD | SISL_CTX_CAP_GSCSI_CMD), - &afu->ctrl_map->ctx_cap); + for (i = 0; i < afu->num_hwqs; i++) { + hwq = get_hwq(afu, i); + + (void)readq_be(&hwq->ctrl_map->mbox_r); /* unlock ctx_cap */ + writeq_be((SISL_CTX_CAP_REAL_MODE | SISL_CTX_CAP_HOST_XLATE | + SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD | + SISL_CTX_CAP_AFU_CMD | SISL_CTX_CAP_GSCSI_CMD), + &hwq->ctrl_map->ctx_cap); + } /* Initialize heartbeat */ afu->hb = readq_be(&afu->afu_map->global.regs.afu_hb); out: @@ -1521,28 +1704,42 @@ static int start_afu(struct cxlflash_cfg *cfg) { struct afu *afu = cfg->afu; struct device *dev = &cfg->dev->dev; + struct hwq *hwq; int rc = 0; + int i; init_pcr(cfg); - /* After an AFU reset, RRQ entries are stale, clear them */ - memset(&afu->rrq_entry, 0, sizeof(afu->rrq_entry)); + /* Initialize each HWQ */ + for (i = 0; i < afu->num_hwqs; i++) { + hwq = get_hwq(afu, i); - /* Initialize RRQ pointers */ - afu->hrrq_start = &afu->rrq_entry[0]; - afu->hrrq_end = &afu->rrq_entry[NUM_RRQ_ENTRY - 1]; - afu->hrrq_curr = afu->hrrq_start; - afu->toggle = 1; + /* After an AFU reset, RRQ entries are stale, clear them */ + memset(&hwq->rrq_entry, 0, sizeof(hwq->rrq_entry)); - /* Initialize SQ */ - if (afu_is_sq_cmd_mode(afu)) { - memset(&afu->sq, 0, sizeof(afu->sq)); - afu->hsq_start = &afu->sq[0]; - afu->hsq_end = &afu->sq[NUM_SQ_ENTRY - 1]; - afu->hsq_curr = afu->hsq_start; + /* Initialize RRQ pointers */ + hwq->hrrq_start = &hwq->rrq_entry[0]; + hwq->hrrq_end = &hwq->rrq_entry[NUM_RRQ_ENTRY - 1]; + hwq->hrrq_curr = hwq->hrrq_start; + hwq->toggle = 1; + spin_lock_init(&hwq->hrrq_slock); + + /* Initialize SQ */ + if (afu_is_sq_cmd_mode(afu)) { + memset(&hwq->sq, 0, sizeof(hwq->sq)); + hwq->hsq_start = &hwq->sq[0]; + hwq->hsq_end = &hwq->sq[NUM_SQ_ENTRY - 1]; + hwq->hsq_curr = hwq->hsq_start; + + spin_lock_init(&hwq->hsq_slock); + atomic_set(&hwq->hsq_credits, NUM_SQ_ENTRY - 1); + } + + /* Initialize IRQ poll */ + if (afu_is_irqpoll_enabled(afu)) + irq_poll_init(&hwq->irqpoll, afu->irqpoll_weight, + cxlflash_irqpoll); - spin_lock_init(&afu->hsq_slock); - atomic_set(&afu->hsq_credits, NUM_SQ_ENTRY - 1); } rc = init_global(cfg); @@ -1554,18 +1751,21 @@ static int start_afu(struct cxlflash_cfg *cfg) /** * init_intr() - setup interrupt handlers for the master context * @cfg: Internal structure associated with the host. + * @hwq: Hardware queue to initialize. * * Return: 0 on success, -errno on failure */ static enum undo_level init_intr(struct cxlflash_cfg *cfg, - struct cxl_context *ctx) + struct hwq *hwq) { - struct afu *afu = cfg->afu; struct device *dev = &cfg->dev->dev; + struct cxl_context *ctx = hwq->ctx; int rc = 0; enum undo_level level = UNDO_NOOP; + bool is_primary_hwq = (hwq->index == PRIMARY_HWQ); + int num_irqs = is_primary_hwq ? 3 : 2; - rc = cxl_allocate_afu_irqs(ctx, 3); + rc = cxl_allocate_afu_irqs(ctx, num_irqs); if (unlikely(rc)) { dev_err(dev, "%s: allocate_afu_irqs failed rc=%d\n", __func__, rc); @@ -1573,7 +1773,7 @@ static enum undo_level init_intr(struct cxlflash_cfg *cfg, goto out; } - rc = cxl_map_afu_irq(ctx, 1, cxlflash_sync_err_irq, afu, + rc = cxl_map_afu_irq(ctx, 1, cxlflash_sync_err_irq, hwq, "SISL_MSI_SYNC_ERROR"); if (unlikely(rc <= 0)) { dev_err(dev, "%s: SISL_MSI_SYNC_ERROR map failed\n", __func__); @@ -1581,7 +1781,7 @@ static enum undo_level init_intr(struct cxlflash_cfg *cfg, goto out; } - rc = cxl_map_afu_irq(ctx, 2, cxlflash_rrq_irq, afu, + rc = cxl_map_afu_irq(ctx, 2, cxlflash_rrq_irq, hwq, "SISL_MSI_RRQ_UPDATED"); if (unlikely(rc <= 0)) { dev_err(dev, "%s: SISL_MSI_RRQ_UPDATED map failed\n", __func__); @@ -1589,7 +1789,11 @@ static enum undo_level init_intr(struct cxlflash_cfg *cfg, goto out; } - rc = cxl_map_afu_irq(ctx, 3, cxlflash_async_err_irq, afu, + /* SISL_MSI_ASYNC_ERROR is setup only for the primary HWQ */ + if (!is_primary_hwq) + goto out; + + rc = cxl_map_afu_irq(ctx, 3, cxlflash_async_err_irq, hwq, "SISL_MSI_ASYNC_ERROR"); if (unlikely(rc <= 0)) { dev_err(dev, "%s: SISL_MSI_ASYNC_ERROR map failed\n", __func__); @@ -1603,55 +1807,106 @@ out: /** * init_mc() - create and register as the master context * @cfg: Internal structure associated with the host. + * index: HWQ Index of the master context. * * Return: 0 on success, -errno on failure */ -static int init_mc(struct cxlflash_cfg *cfg) +static int init_mc(struct cxlflash_cfg *cfg, u32 index) { struct cxl_context *ctx; struct device *dev = &cfg->dev->dev; + struct hwq *hwq = get_hwq(cfg->afu, index); int rc = 0; enum undo_level level; - ctx = cxl_get_context(cfg->dev); + hwq->afu = cfg->afu; + hwq->index = index; + + if (index == PRIMARY_HWQ) + ctx = cxl_get_context(cfg->dev); + else + ctx = cxl_dev_context_init(cfg->dev); if (unlikely(!ctx)) { rc = -ENOMEM; - goto ret; + goto err1; } - cfg->mcctx = ctx; + + WARN_ON(hwq->ctx); + hwq->ctx = ctx; /* Set it up as a master with the CXL */ cxl_set_master(ctx); - /* During initialization reset the AFU to start from a clean slate */ - rc = cxl_afu_reset(cfg->mcctx); - if (unlikely(rc)) { - dev_err(dev, "%s: AFU reset failed rc=%d\n", __func__, rc); - goto ret; + /* Reset AFU when initializing primary context */ + if (index == PRIMARY_HWQ) { + rc = cxl_afu_reset(ctx); + if (unlikely(rc)) { + dev_err(dev, "%s: AFU reset failed rc=%d\n", + __func__, rc); + goto err1; + } } - level = init_intr(cfg, ctx); + level = init_intr(cfg, hwq); if (unlikely(level)) { dev_err(dev, "%s: interrupt init failed rc=%d\n", __func__, rc); - goto out; + goto err2; } /* This performs the equivalent of the CXL_IOCTL_START_WORK. * The CXL_IOCTL_GET_PROCESS_ELEMENT is implicit in the process * element (pe) that is embedded in the context (ctx) */ - rc = start_context(cfg); + rc = start_context(cfg, index); if (unlikely(rc)) { dev_err(dev, "%s: start context failed rc=%d\n", __func__, rc); level = UNMAP_THREE; - goto out; + goto err2; } -ret: + +out: dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); return rc; -out: - term_intr(cfg, level); - goto ret; +err2: + term_intr(cfg, level, index); + if (index != PRIMARY_HWQ) + cxl_release_context(ctx); +err1: + hwq->ctx = NULL; + goto out; +} + +/** + * get_num_afu_ports() - determines and configures the number of AFU ports + * @cfg: Internal structure associated with the host. + * + * This routine determines the number of AFU ports by converting the global + * port selection mask. The converted value is only valid following an AFU + * reset (explicit or power-on). This routine must be invoked shortly after + * mapping as other routines are dependent on the number of ports during the + * initialization sequence. + * + * To support legacy AFUs that might not have reflected an initial global + * port mask (value read is 0), default to the number of ports originally + * supported by the cxlflash driver (2) before hardware with other port + * offerings was introduced. + */ +static void get_num_afu_ports(struct cxlflash_cfg *cfg) +{ + struct afu *afu = cfg->afu; + struct device *dev = &cfg->dev->dev; + u64 port_mask; + int num_fc_ports = LEGACY_FC_PORTS; + + port_mask = readq_be(&afu->afu_map->global.regs.afu_port_sel); + if (port_mask != 0ULL) + num_fc_ports = min(ilog2(port_mask) + 1, MAX_FC_PORTS); + + dev_dbg(dev, "%s: port_mask=%016llx num_fc_ports=%d\n", + __func__, port_mask, num_fc_ports); + + cfg->num_fc_ports = num_fc_ports; + cfg->host->max_channel = PORTNUM2CHAN(num_fc_ports); } /** @@ -1669,18 +1924,24 @@ static int init_afu(struct cxlflash_cfg *cfg) int rc = 0; struct afu *afu = cfg->afu; struct device *dev = &cfg->dev->dev; + struct hwq *hwq; + int i; cxl_perst_reloads_same_image(cfg->cxl_afu, true); - rc = init_mc(cfg); - if (rc) { - dev_err(dev, "%s: init_mc failed rc=%d\n", - __func__, rc); - goto out; + afu->num_hwqs = afu->desired_hwqs; + for (i = 0; i < afu->num_hwqs; i++) { + rc = init_mc(cfg, i); + if (rc) { + dev_err(dev, "%s: init_mc failed rc=%d index=%d\n", + __func__, rc, i); + goto err1; + } } - /* Map the entire MMIO space of the AFU */ - afu->afu_map = cxl_psa_map(cfg->mcctx); + /* Map the entire MMIO space of the AFU using the first context */ + hwq = get_hwq(afu, PRIMARY_HWQ); + afu->afu_map = cxl_psa_map(hwq->ctx); if (!afu->afu_map) { dev_err(dev, "%s: cxl_psa_map failed\n", __func__); rc = -ENOMEM; @@ -1711,6 +1972,8 @@ static int init_afu(struct cxlflash_cfg *cfg) dev_dbg(dev, "%s: afu_ver=%s interface_ver=%016llx\n", __func__, afu->version, afu->interface_version); + get_num_afu_ports(cfg); + rc = start_afu(cfg); if (rc) { dev_err(dev, "%s: start_afu failed, rc=%d\n", __func__, rc); @@ -1718,8 +1981,12 @@ static int init_afu(struct cxlflash_cfg *cfg) } afu_err_intr_init(cfg->afu); - spin_lock_init(&afu->rrin_slock); - afu->room = readq_be(&afu->host_map->cmd_room); + for (i = 0; i < afu->num_hwqs; i++) { + hwq = get_hwq(afu, i); + + spin_lock_init(&hwq->rrin_slock); + hwq->room = readq_be(&hwq->host_map->cmd_room); + } /* Restore the LUN mappings */ cxlflash_restore_luntable(cfg); @@ -1728,8 +1995,10 @@ out: return rc; err1: - term_intr(cfg, UNMAP_THREE); - term_mc(cfg); + for (i = afu->num_hwqs - 1; i >= 0; i--) { + term_intr(cfg, UNMAP_THREE, i); + term_mc(cfg, i); + } goto out; } @@ -1761,6 +2030,7 @@ int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx_hndl_u, struct cxlflash_cfg *cfg = afu->parent; struct device *dev = &cfg->dev->dev; struct afu_cmd *cmd = NULL; + struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ); char *buf = NULL; int rc = 0; static DEFINE_MUTEX(sync_active); @@ -1783,11 +2053,12 @@ int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx_hndl_u, cmd = (struct afu_cmd *)PTR_ALIGN(buf, __alignof__(*cmd)); init_completion(&cmd->cevent); cmd->parent = afu; + cmd->hwq_index = hwq->index; dev_dbg(dev, "%s: afu=%p cmd=%p %d\n", __func__, afu, cmd, ctx_hndl_u); cmd->rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD; - cmd->rcb.ctx_id = afu->ctx_hndl; + cmd->rcb.ctx_id = hwq->ctx_hndl; cmd->rcb.msi = SISL_MSI_RRQ_UPDATED; cmd->rcb.timeout = MC_AFU_SYNC_TIMEOUT; @@ -1971,22 +2242,30 @@ static int cxlflash_change_queue_depth(struct scsi_device *sdev, int qdepth) /** * cxlflash_show_port_status() - queries and presents the current port status * @port: Desired port for status reporting. - * @afu: AFU owning the specified port. + * @cfg: Internal structure associated with the host. * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. * - * Return: The size of the ASCII string returned in @buf. + * Return: The size of the ASCII string returned in @buf or -EINVAL. */ -static ssize_t cxlflash_show_port_status(u32 port, struct afu *afu, char *buf) +static ssize_t cxlflash_show_port_status(u32 port, + struct cxlflash_cfg *cfg, + char *buf) { + struct device *dev = &cfg->dev->dev; char *disp_status; u64 status; - __be64 __iomem *fc_regs; + __be64 __iomem *fc_port_regs; - if (port >= NUM_FC_PORTS) - return 0; + WARN_ON(port >= MAX_FC_PORTS); + + if (port >= cfg->num_fc_ports) { + dev_info(dev, "%s: Port %d not supported on this card.\n", + __func__, port); + return -EINVAL; + } - fc_regs = &afu->afu_map->global.fc_regs[port][0]; - status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]); + fc_port_regs = get_fc_port_regs(cfg, port); + status = readq_be(&fc_port_regs[FC_MTIP_STATUS / 8]); status &= FC_MTIP_STATUS_MASK; if (status == FC_MTIP_STATUS_ONLINE) @@ -2012,9 +2291,8 @@ static ssize_t port0_show(struct device *dev, char *buf) { struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); - struct afu *afu = cfg->afu; - return cxlflash_show_port_status(0, afu, buf); + return cxlflash_show_port_status(0, cfg, buf); } /** @@ -2030,9 +2308,42 @@ static ssize_t port1_show(struct device *dev, char *buf) { struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); - struct afu *afu = cfg->afu; - return cxlflash_show_port_status(1, afu, buf); + return cxlflash_show_port_status(1, cfg, buf); +} + +/** + * port2_show() - queries and presents the current status of port 2 + * @dev: Generic device associated with the host owning the port. + * @attr: Device attribute representing the port. + * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. + * + * Return: The size of the ASCII string returned in @buf. + */ +static ssize_t port2_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); + + return cxlflash_show_port_status(2, cfg, buf); +} + +/** + * port3_show() - queries and presents the current status of port 3 + * @dev: Generic device associated with the host owning the port. + * @attr: Device attribute representing the port. + * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. + * + * Return: The size of the ASCII string returned in @buf. + */ +static ssize_t port3_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); + + return cxlflash_show_port_status(3, cfg, buf); } /** @@ -2090,12 +2401,13 @@ static ssize_t lun_mode_store(struct device *dev, /* * When configured for internal LUN, there is only one channel, - * channel number 0, else there will be 2 (default). + * channel number 0, else there will be one less than the number + * of fc ports for this card. */ if (afu->internal_lun) shost->max_channel = 0; else - shost->max_channel = NUM_FC_PORTS - 1; + shost->max_channel = PORTNUM2CHAN(cfg->num_fc_ports); afu_reset(cfg); scsi_scan_host(cfg->host); @@ -2121,27 +2433,34 @@ static ssize_t ioctl_version_show(struct device *dev, /** * cxlflash_show_port_lun_table() - queries and presents the port LUN table * @port: Desired port for status reporting. - * @afu: AFU owning the specified port. + * @cfg: Internal structure associated with the host. * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. * - * Return: The size of the ASCII string returned in @buf. + * Return: The size of the ASCII string returned in @buf or -EINVAL. */ static ssize_t cxlflash_show_port_lun_table(u32 port, - struct afu *afu, + struct cxlflash_cfg *cfg, char *buf) { + struct device *dev = &cfg->dev->dev; + __be64 __iomem *fc_port_luns; int i; ssize_t bytes = 0; - __be64 __iomem *fc_port; - if (port >= NUM_FC_PORTS) - return 0; + WARN_ON(port >= MAX_FC_PORTS); - fc_port = &afu->afu_map->global.fc_port[port][0]; + if (port >= cfg->num_fc_ports) { + dev_info(dev, "%s: Port %d not supported on this card.\n", + __func__, port); + return -EINVAL; + } + + fc_port_luns = get_fc_port_luns(cfg, port); for (i = 0; i < CXLFLASH_NUM_VLUNS; i++) bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes, - "%03d: %016llx\n", i, readq_be(&fc_port[i])); + "%03d: %016llx\n", + i, readq_be(&fc_port_luns[i])); return bytes; } @@ -2158,9 +2477,8 @@ static ssize_t port0_lun_table_show(struct device *dev, char *buf) { struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); - struct afu *afu = cfg->afu; - return cxlflash_show_port_lun_table(0, afu, buf); + return cxlflash_show_port_lun_table(0, cfg, buf); } /** @@ -2176,9 +2494,272 @@ static ssize_t port1_lun_table_show(struct device *dev, char *buf) { struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); + + return cxlflash_show_port_lun_table(1, cfg, buf); +} + +/** + * port2_lun_table_show() - presents the current LUN table of port 2 + * @dev: Generic device associated with the host owning the port. + * @attr: Device attribute representing the port. + * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. + * + * Return: The size of the ASCII string returned in @buf. + */ +static ssize_t port2_lun_table_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); + + return cxlflash_show_port_lun_table(2, cfg, buf); +} + +/** + * port3_lun_table_show() - presents the current LUN table of port 3 + * @dev: Generic device associated with the host owning the port. + * @attr: Device attribute representing the port. + * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. + * + * Return: The size of the ASCII string returned in @buf. + */ +static ssize_t port3_lun_table_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); + + return cxlflash_show_port_lun_table(3, cfg, buf); +} + +/** + * irqpoll_weight_show() - presents the current IRQ poll weight for the host + * @dev: Generic device associated with the host. + * @attr: Device attribute representing the IRQ poll weight. + * @buf: Buffer of length PAGE_SIZE to report back the current IRQ poll + * weight in ASCII. + * + * An IRQ poll weight of 0 indicates polling is disabled. + * + * Return: The size of the ASCII string returned in @buf. + */ +static ssize_t irqpoll_weight_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); + struct afu *afu = cfg->afu; + + return scnprintf(buf, PAGE_SIZE, "%u\n", afu->irqpoll_weight); +} + +/** + * irqpoll_weight_store() - sets the current IRQ poll weight for the host + * @dev: Generic device associated with the host. + * @attr: Device attribute representing the IRQ poll weight. + * @buf: Buffer of length PAGE_SIZE containing the desired IRQ poll + * weight in ASCII. + * @count: Length of data resizing in @buf. + * + * An IRQ poll weight of 0 indicates polling is disabled. + * + * Return: The size of the ASCII string returned in @buf. + */ +static ssize_t irqpoll_weight_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); + struct device *cfgdev = &cfg->dev->dev; + struct afu *afu = cfg->afu; + struct hwq *hwq; + u32 weight; + int rc, i; + + rc = kstrtouint(buf, 10, &weight); + if (rc) + return -EINVAL; + + if (weight > 256) { + dev_info(cfgdev, + "Invalid IRQ poll weight. It must be 256 or less.\n"); + return -EINVAL; + } + + if (weight == afu->irqpoll_weight) { + dev_info(cfgdev, + "Current IRQ poll weight has the same weight.\n"); + return -EINVAL; + } + + if (afu_is_irqpoll_enabled(afu)) { + for (i = 0; i < afu->num_hwqs; i++) { + hwq = get_hwq(afu, i); + + irq_poll_disable(&hwq->irqpoll); + } + } + + afu->irqpoll_weight = weight; + + if (weight > 0) { + for (i = 0; i < afu->num_hwqs; i++) { + hwq = get_hwq(afu, i); + + irq_poll_init(&hwq->irqpoll, weight, cxlflash_irqpoll); + } + } + + return count; +} + +/** + * num_hwqs_show() - presents the number of hardware queues for the host + * @dev: Generic device associated with the host. + * @attr: Device attribute representing the number of hardware queues. + * @buf: Buffer of length PAGE_SIZE to report back the number of hardware + * queues in ASCII. + * + * Return: The size of the ASCII string returned in @buf. + */ +static ssize_t num_hwqs_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); struct afu *afu = cfg->afu; - return cxlflash_show_port_lun_table(1, afu, buf); + return scnprintf(buf, PAGE_SIZE, "%u\n", afu->num_hwqs); +} + +/** + * num_hwqs_store() - sets the number of hardware queues for the host + * @dev: Generic device associated with the host. + * @attr: Device attribute representing the number of hardware queues. + * @buf: Buffer of length PAGE_SIZE containing the number of hardware + * queues in ASCII. + * @count: Length of data resizing in @buf. + * + * n > 0: num_hwqs = n + * n = 0: num_hwqs = num_online_cpus() + * n < 0: num_online_cpus() / abs(n) + * + * Return: The size of the ASCII string returned in @buf. + */ +static ssize_t num_hwqs_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); + struct afu *afu = cfg->afu; + int rc; + int nhwqs, num_hwqs; + + rc = kstrtoint(buf, 10, &nhwqs); + if (rc) + return -EINVAL; + + if (nhwqs >= 1) + num_hwqs = nhwqs; + else if (nhwqs == 0) + num_hwqs = num_online_cpus(); + else + num_hwqs = num_online_cpus() / abs(nhwqs); + + afu->desired_hwqs = min(num_hwqs, CXLFLASH_MAX_HWQS); + WARN_ON_ONCE(afu->desired_hwqs == 0); + +retry: + switch (cfg->state) { + case STATE_NORMAL: + cfg->state = STATE_RESET; + drain_ioctls(cfg); + cxlflash_mark_contexts_error(cfg); + rc = afu_reset(cfg); + if (rc) + cfg->state = STATE_FAILTERM; + else + cfg->state = STATE_NORMAL; + wake_up_all(&cfg->reset_waitq); + break; + case STATE_RESET: + wait_event(cfg->reset_waitq, cfg->state != STATE_RESET); + if (cfg->state == STATE_NORMAL) + goto retry; + default: + /* Ideally should not happen */ + dev_err(dev, "%s: Device is not ready, state=%d\n", + __func__, cfg->state); + break; + } + + return count; +} + +static const char *hwq_mode_name[MAX_HWQ_MODE] = { "rr", "tag", "cpu" }; + +/** + * hwq_mode_show() - presents the HWQ steering mode for the host + * @dev: Generic device associated with the host. + * @attr: Device attribute representing the HWQ steering mode. + * @buf: Buffer of length PAGE_SIZE to report back the HWQ steering mode + * as a character string. + * + * Return: The size of the ASCII string returned in @buf. + */ +static ssize_t hwq_mode_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); + struct afu *afu = cfg->afu; + + return scnprintf(buf, PAGE_SIZE, "%s\n", hwq_mode_name[afu->hwq_mode]); +} + +/** + * hwq_mode_store() - sets the HWQ steering mode for the host + * @dev: Generic device associated with the host. + * @attr: Device attribute representing the HWQ steering mode. + * @buf: Buffer of length PAGE_SIZE containing the HWQ steering mode + * as a character string. + * @count: Length of data resizing in @buf. + * + * rr = Round-Robin + * tag = Block MQ Tagging + * cpu = CPU Affinity + * + * Return: The size of the ASCII string returned in @buf. + */ +static ssize_t hwq_mode_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct cxlflash_cfg *cfg = shost_priv(shost); + struct device *cfgdev = &cfg->dev->dev; + struct afu *afu = cfg->afu; + int i; + u32 mode = MAX_HWQ_MODE; + + for (i = 0; i < MAX_HWQ_MODE; i++) { + if (!strncmp(hwq_mode_name[i], buf, strlen(hwq_mode_name[i]))) { + mode = i; + break; + } + } + + if (mode >= MAX_HWQ_MODE) { + dev_info(cfgdev, "Invalid HWQ steering mode.\n"); + return -EINVAL; + } + + if ((mode == HWQ_MODE_TAG) && !shost_use_blk_mq(shost)) { + dev_info(cfgdev, "SCSI-MQ is not enabled, use a different " + "HWQ steering mode.\n"); + return -EINVAL; + } + + afu->hwq_mode = mode; + + return count; } /** @@ -2203,18 +2784,32 @@ static ssize_t mode_show(struct device *dev, */ static DEVICE_ATTR_RO(port0); static DEVICE_ATTR_RO(port1); +static DEVICE_ATTR_RO(port2); +static DEVICE_ATTR_RO(port3); static DEVICE_ATTR_RW(lun_mode); static DEVICE_ATTR_RO(ioctl_version); static DEVICE_ATTR_RO(port0_lun_table); static DEVICE_ATTR_RO(port1_lun_table); +static DEVICE_ATTR_RO(port2_lun_table); +static DEVICE_ATTR_RO(port3_lun_table); +static DEVICE_ATTR_RW(irqpoll_weight); +static DEVICE_ATTR_RW(num_hwqs); +static DEVICE_ATTR_RW(hwq_mode); static struct device_attribute *cxlflash_host_attrs[] = { &dev_attr_port0, &dev_attr_port1, + &dev_attr_port2, + &dev_attr_port3, &dev_attr_lun_mode, &dev_attr_ioctl_version, &dev_attr_port0_lun_table, &dev_attr_port1_lun_table, + &dev_attr_port2_lun_table, + &dev_attr_port3_lun_table, + &dev_attr_irqpoll_weight, + &dev_attr_num_hwqs, + &dev_attr_hwq_mode, NULL }; @@ -2292,6 +2887,7 @@ static void cxlflash_worker_thread(struct work_struct *work) work_q); struct afu *afu = cfg->afu; struct device *dev = &cfg->dev->dev; + __be64 __iomem *fc_port_regs; int port; ulong lock_flags; @@ -2312,8 +2908,8 @@ static void cxlflash_worker_thread(struct work_struct *work) lock_flags); /* The reset can block... */ - afu_link_reset(afu, port, - &afu->afu_map->global.fc_regs[port][0]); + fc_port_regs = get_fc_port_regs(cfg, port); + afu_link_reset(afu, port, fc_port_regs); spin_lock_irqsave(cfg->host->host_lock, lock_flags); } @@ -2331,6 +2927,15 @@ static void cxlflash_worker_thread(struct work_struct *work) * @pdev: PCI device associated with the host. * @dev_id: PCI device id associated with device. * + * The device will initially start out in a 'probing' state and + * transition to the 'normal' state at the end of a successful + * probe. Should an EEH event occur during probe, the notification + * thread (error_detected()) will wait until the probe handler + * is nearly complete. At that time, the device will be moved to + * a 'probed' state and the EEH thread woken up to drive the slot + * reset and recovery (device moves to 'normal' state). Meanwhile, + * the probe will be allowed to exit successfully. + * * Return: 0 on success, -errno on failure */ static int cxlflash_probe(struct pci_dev *pdev, @@ -2341,6 +2946,7 @@ static int cxlflash_probe(struct pci_dev *pdev, struct device *dev = &pdev->dev; struct dev_dependent_vals *ddv; int rc = 0; + int k; dev_dbg(&pdev->dev, "%s: Found CXLFLASH with IRQ: %d\n", __func__, pdev->irq); @@ -2357,7 +2963,6 @@ static int cxlflash_probe(struct pci_dev *pdev, host->max_id = CXLFLASH_MAX_NUM_TARGETS_PER_BUS; host->max_lun = CXLFLASH_MAX_NUM_LUNS_PER_TARGET; - host->max_channel = NUM_FC_PORTS - 1; host->unique_id = host->host_no; host->max_cmd_len = CXLFLASH_MAX_CDB_LEN; @@ -2376,14 +2981,16 @@ static int cxlflash_probe(struct pci_dev *pdev, cfg->cxl_fops = cxlflash_cxl_fops; /* - * The promoted LUNs move to the top of the LUN table. The rest stay - * on the bottom half. The bottom half grows from the end - * (index = 255), whereas the top half grows from the beginning - * (index = 0). + * Promoted LUNs move to the top of the LUN table. The rest stay on + * the bottom half. The bottom half grows from the end (index = 255), + * whereas the top half grows from the beginning (index = 0). + * + * Initialize the last LUN index for all possible ports. */ - cfg->promote_lun_index = 0; - cfg->last_lun_index[0] = CXLFLASH_NUM_VLUNS/2 - 1; - cfg->last_lun_index[1] = CXLFLASH_NUM_VLUNS/2 - 1; + cfg->promote_lun_index = 0; + + for (k = 0; k < MAX_FC_PORTS; k++) + cfg->last_lun_index[k] = CXLFLASH_NUM_VLUNS/2 - 1; cfg->dev_id = (struct pci_device_id *)dev_id; @@ -2412,7 +3019,7 @@ static int cxlflash_probe(struct pci_dev *pdev, cfg->init_state = INIT_STATE_PCI; rc = init_afu(cfg); - if (rc) { + if (rc && !wq_has_sleeper(&cfg->reset_waitq)) { dev_err(dev, "%s: init_afu failed rc=%d\n", __func__, rc); goto out_remove; } @@ -2425,6 +3032,11 @@ static int cxlflash_probe(struct pci_dev *pdev, } cfg->init_state = INIT_STATE_SCSI; + if (wq_has_sleeper(&cfg->reset_waitq)) { + cfg->state = STATE_PROBED; + wake_up_all(&cfg->reset_waitq); + } else + cfg->state = STATE_NORMAL; out: dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); return rc; @@ -2455,7 +3067,8 @@ static pci_ers_result_t cxlflash_pci_error_detected(struct pci_dev *pdev, switch (state) { case pci_channel_io_frozen: - wait_event(cfg->reset_waitq, cfg->state != STATE_RESET); + wait_event(cfg->reset_waitq, cfg->state != STATE_RESET && + cfg->state != STATE_PROBING); if (cfg->state == STATE_FAILTERM) return PCI_ERS_RESULT_DISCONNECT; @@ -2546,6 +3159,7 @@ static struct pci_driver cxlflash_driver = { */ static int __init init_cxlflash(void) { + check_sizes(); cxlflash_list_init(); return pci_register_driver(&cxlflash_driver); diff --git a/drivers/scsi/cxlflash/main.h b/drivers/scsi/cxlflash/main.h index 0be2261e6312..49657f1f409e 100644 --- a/drivers/scsi/cxlflash/main.h +++ b/drivers/scsi/cxlflash/main.h @@ -37,8 +37,6 @@ #define CXLFLASH_PCI_ERROR_RECOVERY_TIMEOUT (120 * HZ) -#define NUM_FC_PORTS CXLFLASH_NUM_FC_PORTS /* ports per AFU */ - /* FC defines */ #define FC_MTIP_CMDCONFIG 0x010 #define FC_MTIP_STATUS 0x018 diff --git a/drivers/scsi/cxlflash/sislite.h b/drivers/scsi/cxlflash/sislite.h index a6e48a893fef..a768360d2fa6 100644 --- a/drivers/scsi/cxlflash/sislite.h +++ b/drivers/scsi/cxlflash/sislite.h @@ -90,15 +90,15 @@ struct sisl_rc { #define SISL_AFU_RC_RHT_UNALIGNED 0x02U /* should never happen */ #define SISL_AFU_RC_RHT_OUT_OF_BOUNDS 0x03u /* user error */ #define SISL_AFU_RC_RHT_DMA_ERR 0x04u /* see afu_extra - may retry if afu_retry is off - possible on master exit + * may retry if afu_retry is off + * possible on master exit */ #define SISL_AFU_RC_RHT_RW_PERM 0x05u /* no RW perms, user error */ #define SISL_AFU_RC_LXT_UNALIGNED 0x12U /* should never happen */ #define SISL_AFU_RC_LXT_OUT_OF_BOUNDS 0x13u /* user error */ #define SISL_AFU_RC_LXT_DMA_ERR 0x14u /* see afu_extra - may retry if afu_retry is off - possible on master exit + * may retry if afu_retry is off + * possible on master exit */ #define SISL_AFU_RC_LXT_RW_PERM 0x15u /* no RW perms, user error */ @@ -111,11 +111,11 @@ struct sisl_rc { */ #define SISL_AFU_RC_NO_CHANNELS 0x20U /* see afu_extra, may retry */ #define SISL_AFU_RC_CAP_VIOLATION 0x21U /* either user error or - afu reset/master restart + * afu reset/master restart */ #define SISL_AFU_RC_OUT_OF_DATA_BUFS 0x30U /* always retry */ #define SISL_AFU_RC_DATA_DMA_ERR 0x31U /* see afu_extra - may retry if afu_retry is off + * may retry if afu_retry is off */ u8 scsi_rc; /* SCSI status byte, retry as appropriate */ @@ -149,8 +149,9 @@ struct sisl_rc { #define SISL_FC_RC_ABORTFAIL 0x59 /* pending abort completed w/fail */ #define SISL_FC_RC_RESID 0x5A /* ioasa underrun/overrun flags set */ #define SISL_FC_RC_RESIDERR 0x5B /* actual data len does not match SCSI - reported len, possibly due to dropped - frames */ + * reported len, possibly due to dropped + * frames + */ #define SISL_FC_RC_TGTABORT 0x5C /* command aborted by target */ }; @@ -227,10 +228,10 @@ struct sisl_ioasa { /* per context host transport MMIO */ struct sisl_host_map { - __be64 endian_ctrl; /* Per context Endian Control. The AFU will - * operate on whatever the context is of the - * host application. - */ + __be64 endian_ctrl; /* Per context Endian Control. The AFU will + * operate on whatever the context is of the + * host application. + */ __be64 intr_status; /* this sends LISN# programmed in ctx_ctrl. * Only recovery in a PERM_ERR is a context @@ -292,28 +293,54 @@ struct sisl_ctrl_map { /* single copy global regs */ struct sisl_global_regs { __be64 aintr_status; - /* In cxlflash, each FC port/link gets a byte of status */ -#define SISL_ASTATUS_FC0_OTHER 0x8000ULL /* b48, other err, - FC_ERRCAP[31:20] */ -#define SISL_ASTATUS_FC0_LOGO 0x4000ULL /* b49, target sent FLOGI/PLOGI/LOGO - while logged in */ -#define SISL_ASTATUS_FC0_CRC_T 0x2000ULL /* b50, CRC threshold exceeded */ -#define SISL_ASTATUS_FC0_LOGI_R 0x1000ULL /* b51, login state machine timed out - and retrying */ -#define SISL_ASTATUS_FC0_LOGI_F 0x0800ULL /* b52, login failed, - FC_ERROR[19:0] */ -#define SISL_ASTATUS_FC0_LOGI_S 0x0400ULL /* b53, login succeeded */ -#define SISL_ASTATUS_FC0_LINK_DN 0x0200ULL /* b54, link online to offline */ -#define SISL_ASTATUS_FC0_LINK_UP 0x0100ULL /* b55, link offline to online */ - -#define SISL_ASTATUS_FC1_OTHER 0x0080ULL /* b56 */ -#define SISL_ASTATUS_FC1_LOGO 0x0040ULL /* b57 */ -#define SISL_ASTATUS_FC1_CRC_T 0x0020ULL /* b58 */ -#define SISL_ASTATUS_FC1_LOGI_R 0x0010ULL /* b59 */ -#define SISL_ASTATUS_FC1_LOGI_F 0x0008ULL /* b60 */ -#define SISL_ASTATUS_FC1_LOGI_S 0x0004ULL /* b61 */ -#define SISL_ASTATUS_FC1_LINK_DN 0x0002ULL /* b62 */ -#define SISL_ASTATUS_FC1_LINK_UP 0x0001ULL /* b63 */ + /* + * In cxlflash, FC port/link are arranged in port pairs, each + * gets a byte of status: + * + * *_OTHER: other err, FC_ERRCAP[31:20] + * *_LOGO: target sent FLOGI/PLOGI/LOGO while logged in + * *_CRC_T: CRC threshold exceeded + * *_LOGI_R: login state machine timed out and retrying + * *_LOGI_F: login failed, FC_ERROR[19:0] + * *_LOGI_S: login succeeded + * *_LINK_DN: link online to offline + * *_LINK_UP: link offline to online + */ +#define SISL_ASTATUS_FC2_OTHER 0x80000000ULL /* b32 */ +#define SISL_ASTATUS_FC2_LOGO 0x40000000ULL /* b33 */ +#define SISL_ASTATUS_FC2_CRC_T 0x20000000ULL /* b34 */ +#define SISL_ASTATUS_FC2_LOGI_R 0x10000000ULL /* b35 */ +#define SISL_ASTATUS_FC2_LOGI_F 0x08000000ULL /* b36 */ +#define SISL_ASTATUS_FC2_LOGI_S 0x04000000ULL /* b37 */ +#define SISL_ASTATUS_FC2_LINK_DN 0x02000000ULL /* b38 */ +#define SISL_ASTATUS_FC2_LINK_UP 0x01000000ULL /* b39 */ + +#define SISL_ASTATUS_FC3_OTHER 0x00800000ULL /* b40 */ +#define SISL_ASTATUS_FC3_LOGO 0x00400000ULL /* b41 */ +#define SISL_ASTATUS_FC3_CRC_T 0x00200000ULL /* b42 */ +#define SISL_ASTATUS_FC3_LOGI_R 0x00100000ULL /* b43 */ +#define SISL_ASTATUS_FC3_LOGI_F 0x00080000ULL /* b44 */ +#define SISL_ASTATUS_FC3_LOGI_S 0x00040000ULL /* b45 */ +#define SISL_ASTATUS_FC3_LINK_DN 0x00020000ULL /* b46 */ +#define SISL_ASTATUS_FC3_LINK_UP 0x00010000ULL /* b47 */ + +#define SISL_ASTATUS_FC0_OTHER 0x00008000ULL /* b48 */ +#define SISL_ASTATUS_FC0_LOGO 0x00004000ULL /* b49 */ +#define SISL_ASTATUS_FC0_CRC_T 0x00002000ULL /* b50 */ +#define SISL_ASTATUS_FC0_LOGI_R 0x00001000ULL /* b51 */ +#define SISL_ASTATUS_FC0_LOGI_F 0x00000800ULL /* b52 */ +#define SISL_ASTATUS_FC0_LOGI_S 0x00000400ULL /* b53 */ +#define SISL_ASTATUS_FC0_LINK_DN 0x00000200ULL /* b54 */ +#define SISL_ASTATUS_FC0_LINK_UP 0x00000100ULL /* b55 */ + +#define SISL_ASTATUS_FC1_OTHER 0x00000080ULL /* b56 */ +#define SISL_ASTATUS_FC1_LOGO 0x00000040ULL /* b57 */ +#define SISL_ASTATUS_FC1_CRC_T 0x00000020ULL /* b58 */ +#define SISL_ASTATUS_FC1_LOGI_R 0x00000010ULL /* b59 */ +#define SISL_ASTATUS_FC1_LOGI_F 0x00000008ULL /* b60 */ +#define SISL_ASTATUS_FC1_LOGI_S 0x00000004ULL /* b61 */ +#define SISL_ASTATUS_FC1_LINK_DN 0x00000002ULL /* b62 */ +#define SISL_ASTATUS_FC1_LINK_UP 0x00000001ULL /* b63 */ #define SISL_FC_INTERNAL_UNMASK 0x0000000300000000ULL /* 1 means unmasked */ #define SISL_FC_INTERNAL_MASK ~(SISL_FC_INTERNAL_UNMASK) @@ -325,7 +352,7 @@ struct sisl_global_regs { #define SISL_STATUS_SHUTDOWN_ACTIVE 0x0000000000000010ULL #define SISL_STATUS_SHUTDOWN_COMPLETE 0x0000000000000020ULL -#define SISL_ASTATUS_UNMASK 0xFFFFULL /* 1 means unmasked */ +#define SISL_ASTATUS_UNMASK 0xFFFFFFFFULL /* 1 means unmasked */ #define SISL_ASTATUS_MASK ~(SISL_ASTATUS_UNMASK) /* 1 means masked */ __be64 aintr_clear; @@ -367,9 +394,18 @@ struct sisl_global_regs { #define SISL_INTVER_CAP_RESERVED_CMD_MODE_B 0x100000000000ULL }; -#define CXLFLASH_NUM_FC_PORTS 2 -#define CXLFLASH_MAX_CONTEXT 512 /* how many contexts per afu */ -#define CXLFLASH_NUM_VLUNS 512 +#define CXLFLASH_NUM_FC_PORTS_PER_BANK 2 /* fixed # of ports per bank */ +#define CXLFLASH_MAX_FC_BANKS 2 /* max # of banks supported */ +#define CXLFLASH_MAX_FC_PORTS (CXLFLASH_NUM_FC_PORTS_PER_BANK * \ + CXLFLASH_MAX_FC_BANKS) +#define CXLFLASH_MAX_CONTEXT 512 /* number of contexts per AFU */ +#define CXLFLASH_NUM_VLUNS 512 /* number of vluns per AFU/port */ +#define CXLFLASH_NUM_REGS 512 /* number of registers per port */ + +struct fc_port_bank { + __be64 fc_port_regs[CXLFLASH_NUM_FC_PORTS_PER_BANK][CXLFLASH_NUM_REGS]; + __be64 fc_port_luns[CXLFLASH_NUM_FC_PORTS_PER_BANK][CXLFLASH_NUM_VLUNS]; +}; struct sisl_global_map { union { @@ -379,11 +415,9 @@ struct sisl_global_map { char page1[SIZE_4K]; /* page 1 */ - /* pages 2 & 3 */ - __be64 fc_regs[CXLFLASH_NUM_FC_PORTS][CXLFLASH_NUM_VLUNS]; + struct fc_port_bank bank[CXLFLASH_MAX_FC_BANKS]; /* pages 2 - 9 */ - /* pages 4 & 5 (lun tbl) */ - __be64 fc_port[CXLFLASH_NUM_FC_PORTS][CXLFLASH_NUM_VLUNS]; + /* pages 10 - 15 are reserved */ }; @@ -402,7 +436,7 @@ struct sisl_global_map { * | 64 KB Global | * | Trusted Process accessible | * +-------------------------------+ -*/ + */ struct cxlflash_afu_map { union { struct sisl_host_map host; @@ -478,7 +512,9 @@ struct sisl_rht_entry_f1 { #define PORT0 0x01U #define PORT1 0x02U -#define BOTH_PORTS (PORT0 | PORT1) +#define PORT2 0x04U +#define PORT3 0x08U +#define PORT_MASK(_n) ((1 << (_n)) - 1) /* AFU Sync Mode byte */ #define AFU_LW_SYNC 0x0U diff --git a/drivers/scsi/cxlflash/superpipe.c b/drivers/scsi/cxlflash/superpipe.c index b46fd2f45628..fe9f17a6268b 100644 --- a/drivers/scsi/cxlflash/superpipe.c +++ b/drivers/scsi/cxlflash/superpipe.c @@ -78,17 +78,18 @@ void cxlflash_free_errpage(void) * memory freed. This is accomplished by putting the contexts in error * state which will notify the user and let them 'drive' the tear down. * Meanwhile, this routine camps until all user contexts have been removed. + * + * Note that the main loop in this routine will always execute at least once + * to flush the reset_waitq. */ void cxlflash_stop_term_user_contexts(struct cxlflash_cfg *cfg) { struct device *dev = &cfg->dev->dev; - int i, found; + int i, found = true; cxlflash_mark_contexts_error(cfg); while (true) { - found = false; - for (i = 0; i < MAX_CONTEXT; i++) if (cfg->ctx_tbl[i]) { found = true; @@ -102,6 +103,7 @@ void cxlflash_stop_term_user_contexts(struct cxlflash_cfg *cfg) __func__); wake_up_all(&cfg->reset_waitq); ssleep(1); + found = false; } } @@ -252,6 +254,7 @@ static int afu_attach(struct cxlflash_cfg *cfg, struct ctx_info *ctxi) struct afu *afu = cfg->afu; struct sisl_ctrl_map __iomem *ctrl_map = ctxi->ctrl_map; int rc = 0; + struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ); u64 val; /* Unlock cap and restrict user to read/write cmds in translated mode */ @@ -268,7 +271,7 @@ static int afu_attach(struct cxlflash_cfg *cfg, struct ctx_info *ctxi) /* Set up MMIO registers pointing to the RHT */ writeq_be((u64)ctxi->rht_start, &ctrl_map->rht_start); - val = SISL_RHT_CNT_ID((u64)MAX_RHT_PER_CONTEXT, (u64)(afu->ctx_hndl)); + val = SISL_RHT_CNT_ID((u64)MAX_RHT_PER_CONTEXT, (u64)(hwq->ctx_hndl)); writeq_be(val, &ctrl_map->rht_cnt_id); out: dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); @@ -1624,6 +1627,7 @@ static int cxlflash_afu_recover(struct scsi_device *sdev, struct afu *afu = cfg->afu; struct ctx_info *ctxi = NULL; struct mutex *mutex = &cfg->ctx_recovery_mutex; + struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ); u64 flags; u64 ctxid = DECODE_CTXID(recover->context_id), rctxid = recover->context_id; @@ -1694,7 +1698,7 @@ retry_recover: } /* Test if in error state */ - reg = readq_be(&afu->ctrl_map->mbox_r); + reg = readq_be(&hwq->ctrl_map->mbox_r); if (reg == -1) { dev_dbg(dev, "%s: MMIO fail, wait for recovery.\n", __func__); @@ -1933,7 +1937,7 @@ static int cxlflash_disk_direct_open(struct scsi_device *sdev, void *arg) u64 lun_size = 0; u64 last_lba = 0; u64 rsrc_handle = -1; - u32 port = CHAN2PORT(sdev->channel); + u32 port = CHAN2PORTMASK(sdev->channel); int rc = 0; diff --git a/drivers/scsi/cxlflash/superpipe.h b/drivers/scsi/cxlflash/superpipe.h index 9e62ff304e4b..0b5976829913 100644 --- a/drivers/scsi/cxlflash/superpipe.h +++ b/drivers/scsi/cxlflash/superpipe.h @@ -24,8 +24,8 @@ extern struct cxlflash_global global; */ /* Chunk size parms: note sislite minimum chunk size is - 0x10000 LBAs corresponding to a NMASK or 16. -*/ + * 0x10000 LBAs corresponding to a NMASK or 16. + */ #define MC_CHUNK_SIZE (1 << MC_RHT_NMASK) /* in LBAs */ #define CMD_TIMEOUT 30 /* 30 secs */ @@ -33,9 +33,6 @@ extern struct cxlflash_global global; #define MAX_SECTOR_UNIT 512 /* max_sector is in 512 byte multiples */ -#define CHAN2PORT(_x) ((_x) + 1) -#define PORT2CHAN(_x) ((_x) - 1) - enum lun_mode { MODE_NONE = 0, MODE_VIRTUAL, @@ -59,7 +56,7 @@ struct glun_info { /* Local (per-adapter) lun_info structure */ struct llun_info { - u64 lun_id[CXLFLASH_NUM_FC_PORTS]; /* from REPORT_LUNS */ + u64 lun_id[MAX_FC_PORTS]; /* from REPORT_LUNS */ u32 lun_index; /* Index in the LUN table */ u32 host_no; /* host_no from Scsi_host */ u32 port_sel; /* What port to use for this LUN */ @@ -92,7 +89,8 @@ enum ctx_ctrl { struct ctx_info { struct sisl_ctrl_map __iomem *ctrl_map; /* initialized at startup */ struct sisl_rht_entry *rht_start; /* 1 page (req'd for alignment), - alloc/free on attach/detach */ + * alloc/free on attach/detach + */ u32 rht_out; /* Number of checked out RHT entries */ u32 rht_perms; /* User-defined permissions for RHT entries */ struct llun_info **rht_lun; /* Mapping of RHT entries to LUNs */ @@ -120,34 +118,40 @@ struct cxlflash_global { struct page *err_page; /* One page of all 0xF for error notification */ }; -int cxlflash_vlun_resize(struct scsi_device *, struct dk_cxlflash_resize *); -int _cxlflash_vlun_resize(struct scsi_device *, struct ctx_info *, - struct dk_cxlflash_resize *); +int cxlflash_vlun_resize(struct scsi_device *sdev, + struct dk_cxlflash_resize *resize); +int _cxlflash_vlun_resize(struct scsi_device *sdev, struct ctx_info *ctxi, + struct dk_cxlflash_resize *resize); -int cxlflash_disk_release(struct scsi_device *, struct dk_cxlflash_release *); -int _cxlflash_disk_release(struct scsi_device *, struct ctx_info *, - struct dk_cxlflash_release *); +int cxlflash_disk_release(struct scsi_device *sdev, + struct dk_cxlflash_release *release); +int _cxlflash_disk_release(struct scsi_device *sdev, struct ctx_info *ctxi, + struct dk_cxlflash_release *release); -int cxlflash_disk_clone(struct scsi_device *, struct dk_cxlflash_clone *); +int cxlflash_disk_clone(struct scsi_device *sdev, + struct dk_cxlflash_clone *clone); -int cxlflash_disk_virtual_open(struct scsi_device *, void *); +int cxlflash_disk_virtual_open(struct scsi_device *sdev, void *arg); -int cxlflash_lun_attach(struct glun_info *, enum lun_mode, bool); -void cxlflash_lun_detach(struct glun_info *); +int cxlflash_lun_attach(struct glun_info *gli, enum lun_mode mode, bool locked); +void cxlflash_lun_detach(struct glun_info *gli); -struct ctx_info *get_context(struct cxlflash_cfg *, u64, void *, enum ctx_ctrl); -void put_context(struct ctx_info *); +struct ctx_info *get_context(struct cxlflash_cfg *cfg, u64 rctxit, void *arg, + enum ctx_ctrl ctrl); +void put_context(struct ctx_info *ctxi); -struct sisl_rht_entry *get_rhte(struct ctx_info *, res_hndl_t, - struct llun_info *); +struct sisl_rht_entry *get_rhte(struct ctx_info *ctxi, res_hndl_t rhndl, + struct llun_info *lli); -struct sisl_rht_entry *rhte_checkout(struct ctx_info *, struct llun_info *); -void rhte_checkin(struct ctx_info *, struct sisl_rht_entry *); +struct sisl_rht_entry *rhte_checkout(struct ctx_info *ctxi, + struct llun_info *lli); +void rhte_checkin(struct ctx_info *ctxi, struct sisl_rht_entry *rhte); -void cxlflash_ba_terminate(struct ba_lun *); +void cxlflash_ba_terminate(struct ba_lun *ba_lun); -int cxlflash_manage_lun(struct scsi_device *, struct dk_cxlflash_manage_lun *); +int cxlflash_manage_lun(struct scsi_device *sdev, + struct dk_cxlflash_manage_lun *manage); -int check_state(struct cxlflash_cfg *); +int check_state(struct cxlflash_cfg *cfg); #endif /* ifndef _CXLFLASH_SUPERPIPE_H */ diff --git a/drivers/scsi/cxlflash/vlun.c b/drivers/scsi/cxlflash/vlun.c index 7aa06ef229fd..90b5c19f81f0 100644 --- a/drivers/scsi/cxlflash/vlun.c +++ b/drivers/scsi/cxlflash/vlun.c @@ -819,11 +819,10 @@ int cxlflash_vlun_resize(struct scsi_device *sdev, void cxlflash_restore_luntable(struct cxlflash_cfg *cfg) { struct llun_info *lli, *temp; - u32 chan; u32 lind; - struct afu *afu = cfg->afu; + int k; struct device *dev = &cfg->dev->dev; - struct sisl_global_map __iomem *agm = &afu->afu_map->global; + __be64 __iomem *fc_port_luns; mutex_lock(&global.mutex); @@ -832,33 +831,41 @@ void cxlflash_restore_luntable(struct cxlflash_cfg *cfg) continue; lind = lli->lun_index; + dev_dbg(dev, "%s: Virtual LUNs on slot %d:\n", __func__, lind); - if (lli->port_sel == BOTH_PORTS) { - writeq_be(lli->lun_id[0], &agm->fc_port[0][lind]); - writeq_be(lli->lun_id[1], &agm->fc_port[1][lind]); - dev_dbg(dev, "%s: Virtual LUN on slot %d id0=%llx " - "id1=%llx\n", __func__, lind, - lli->lun_id[0], lli->lun_id[1]); - } else { - chan = PORT2CHAN(lli->port_sel); - writeq_be(lli->lun_id[chan], &agm->fc_port[chan][lind]); - dev_dbg(dev, "%s: Virtual LUN on slot %d chan=%d " - "id=%llx\n", __func__, lind, chan, - lli->lun_id[chan]); - } + for (k = 0; k < cfg->num_fc_ports; k++) + if (lli->port_sel & (1 << k)) { + fc_port_luns = get_fc_port_luns(cfg, k); + writeq_be(lli->lun_id[k], &fc_port_luns[lind]); + dev_dbg(dev, "\t%d=%llx\n", k, lli->lun_id[k]); + } } mutex_unlock(&global.mutex); } /** + * get_num_ports() - compute number of ports from port selection mask + * @psm: Port selection mask. + * + * Return: Population count of port selection mask + */ +static inline u8 get_num_ports(u32 psm) +{ + static const u8 bits[16] = { 0, 1, 1, 2, 1, 2, 2, 3, + 1, 2, 2, 3, 2, 3, 3, 4 }; + + return bits[psm & 0xf]; +} + +/** * init_luntable() - write an entry in the LUN table * @cfg: Internal structure associated with the host. * @lli: Per adapter LUN information structure. * - * On successful return, a LUN table entry is created. - * At the top for LUNs visible on both ports. - * At the bottom for LUNs visible only on one port. + * On successful return, a LUN table entry is created: + * - at the top for LUNs visible on multiple ports. + * - at the bottom for LUNs visible only on one port. * * Return: 0 on success, -errno on failure */ @@ -866,48 +873,68 @@ static int init_luntable(struct cxlflash_cfg *cfg, struct llun_info *lli) { u32 chan; u32 lind; + u32 nports; int rc = 0; - struct afu *afu = cfg->afu; + int k; struct device *dev = &cfg->dev->dev; - struct sisl_global_map __iomem *agm = &afu->afu_map->global; + __be64 __iomem *fc_port_luns; mutex_lock(&global.mutex); if (lli->in_table) goto out; - if (lli->port_sel == BOTH_PORTS) { + nports = get_num_ports(lli->port_sel); + if (nports == 0 || nports > cfg->num_fc_ports) { + WARN(1, "Unsupported port configuration nports=%u", nports); + rc = -EIO; + goto out; + } + + if (nports > 1) { /* - * If this LUN is visible from both ports, we will put + * When LUN is visible from multiple ports, we will put * it in the top half of the LUN table. */ - if ((cfg->promote_lun_index == cfg->last_lun_index[0]) || - (cfg->promote_lun_index == cfg->last_lun_index[1])) { - rc = -ENOSPC; - goto out; + for (k = 0; k < cfg->num_fc_ports; k++) { + if (!(lli->port_sel & (1 << k))) + continue; + + if (cfg->promote_lun_index == cfg->last_lun_index[k]) { + rc = -ENOSPC; + goto out; + } } lind = lli->lun_index = cfg->promote_lun_index; - writeq_be(lli->lun_id[0], &agm->fc_port[0][lind]); - writeq_be(lli->lun_id[1], &agm->fc_port[1][lind]); + dev_dbg(dev, "%s: Virtual LUNs on slot %d:\n", __func__, lind); + + for (k = 0; k < cfg->num_fc_ports; k++) { + if (!(lli->port_sel & (1 << k))) + continue; + + fc_port_luns = get_fc_port_luns(cfg, k); + writeq_be(lli->lun_id[k], &fc_port_luns[lind]); + dev_dbg(dev, "\t%d=%llx\n", k, lli->lun_id[k]); + } + cfg->promote_lun_index++; - dev_dbg(dev, "%s: Virtual LUN on slot %d id0=%llx id1=%llx\n", - __func__, lind, lli->lun_id[0], lli->lun_id[1]); } else { /* - * If this LUN is visible only from one port, we will put + * When LUN is visible only from one port, we will put * it in the bottom half of the LUN table. */ - chan = PORT2CHAN(lli->port_sel); + chan = PORTMASK2CHAN(lli->port_sel); if (cfg->promote_lun_index == cfg->last_lun_index[chan]) { rc = -ENOSPC; goto out; } lind = lli->lun_index = cfg->last_lun_index[chan]; - writeq_be(lli->lun_id[chan], &agm->fc_port[chan][lind]); + fc_port_luns = get_fc_port_luns(cfg, chan); + writeq_be(lli->lun_id[chan], &fc_port_luns[lind]); cfg->last_lun_index[chan]--; - dev_dbg(dev, "%s: Virtual LUN on slot %d chan=%d id=%llx\n", + dev_dbg(dev, "%s: Virtual LUNs on slot %d:\n\t%d=%llx\n", __func__, lind, chan, lli->lun_id[chan]); } @@ -1016,7 +1043,7 @@ int cxlflash_disk_virtual_open(struct scsi_device *sdev, void *arg) virt->last_lba = last_lba; virt->rsrc_handle = rsrc_handle; - if (lli->port_sel == BOTH_PORTS) + if (get_num_ports(lli->port_sel) > 1) virt->hdr.return_flags |= DK_CXLFLASH_ALL_PORTS_ACTIVE; out: if (likely(ctxi)) diff --git a/drivers/scsi/cxlflash/vlun.h b/drivers/scsi/cxlflash/vlun.h index 8b29a74946e4..27a63a0155ce 100644 --- a/drivers/scsi/cxlflash/vlun.h +++ b/drivers/scsi/cxlflash/vlun.h @@ -47,7 +47,7 @@ * not stored anywhere. * * The LXT table is re-allocated whenever it needs to cross into another group. -*/ + */ #define LXT_GROUP_SIZE 8 #define LXT_NUM_GROUPS(lxt_cnt) (((lxt_cnt) + 7)/8) /* alloc'ed groups */ #define LXT_LUNIDX_SHIFT 8 /* LXT entry, shift for LUN index */ diff --git a/drivers/scsi/esas2r/esas2r_log.c b/drivers/scsi/esas2r/esas2r_log.c index a82030aa8577..65fdf22b0ba9 100644 --- a/drivers/scsi/esas2r/esas2r_log.c +++ b/drivers/scsi/esas2r/esas2r_log.c @@ -130,11 +130,6 @@ static int esas2r_log_master(const long level, spin_lock_irqsave(&event_buffer_lock, flags); - if (buffer == NULL) { - spin_unlock_irqrestore(&event_buffer_lock, flags); - return -1; - } - memset(buffer, 0, buflen); /* diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index ab7bc1505e0b..90939f66bc0d 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -63,11 +63,11 @@ unsigned int fcoe_debug_logging; module_param_named(debug_logging, fcoe_debug_logging, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels"); -unsigned int fcoe_e_d_tov = 2 * 1000; +static unsigned int fcoe_e_d_tov = 2 * 1000; module_param_named(e_d_tov, fcoe_e_d_tov, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(e_d_tov, "E_D_TOV in ms, default 2000"); -unsigned int fcoe_r_a_tov = 2 * 2 * 1000; +static unsigned int fcoe_r_a_tov = 2 * 2 * 1000; module_param_named(r_a_tov, fcoe_r_a_tov, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(r_a_tov, "R_A_TOV in ms, default 4000"); diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h index 9e4b7709043e..67aab965c0f4 100644 --- a/drivers/scsi/fnic/fnic.h +++ b/drivers/scsi/fnic/fnic.h @@ -39,7 +39,7 @@ #define DRV_NAME "fnic" #define DRV_DESCRIPTION "Cisco FCoE HBA Driver" -#define DRV_VERSION "1.6.0.21" +#define DRV_VERSION "1.6.0.34" #define PFX DRV_NAME ": " #define DFX DRV_NAME "%d: " @@ -217,7 +217,6 @@ struct fnic { struct fcoe_ctlr ctlr; /* FIP FCoE controller structure */ struct vnic_dev_bar bar0; - struct msix_entry msix_entry[FNIC_MSIX_INTR_MAX]; struct fnic_msix_entry msix[FNIC_MSIX_INTR_MAX]; struct vnic_stats *stats; diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c index 3b7da66e2771..245dcd95e11f 100644 --- a/drivers/scsi/fnic/fnic_fcs.c +++ b/drivers/scsi/fnic/fnic_fcs.c @@ -342,8 +342,11 @@ static void fnic_fcoe_send_vlan_req(struct fnic *fnic) fnic_fcoe_reset_vlans(fnic); fnic->set_vlan(fnic, 0); - FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, - "Sending VLAN request...\n"); + + if (printk_ratelimit()) + FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, + "Sending VLAN request...\n"); + skb = dev_alloc_skb(sizeof(struct fip_vlan)); if (!skb) return; @@ -359,7 +362,7 @@ static void fnic_fcoe_send_vlan_req(struct fnic *fnic) vlan->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER); vlan->fip.fip_op = htons(FIP_OP_VLAN); - vlan->fip.fip_subcode = FIP_SC_VL_NOTE; + vlan->fip.fip_subcode = FIP_SC_VL_REQ; vlan->fip.fip_dl_len = htons(sizeof(vlan->desc) / FIP_BPW); vlan->desc.mac.fd_desc.fip_dtype = FIP_DT_MAC; @@ -1313,10 +1316,11 @@ void fnic_handle_fip_timer(struct fnic *fnic) spin_lock_irqsave(&fnic->vlans_lock, flags); if (list_empty(&fnic->vlans)) { - /* no vlans available, try again */ - FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, - "Start VLAN Discovery\n"); spin_unlock_irqrestore(&fnic->vlans_lock, flags); + /* no vlans available, try again */ + if (printk_ratelimit()) + FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, + "Start VLAN Discovery\n"); fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC); return; } @@ -1332,10 +1336,11 @@ void fnic_handle_fip_timer(struct fnic *fnic) spin_unlock_irqrestore(&fnic->vlans_lock, flags); break; case FIP_VLAN_FAILED: - /* if all vlans are in failed state, restart vlan disc */ - FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, - "Start VLAN Discovery\n"); spin_unlock_irqrestore(&fnic->vlans_lock, flags); + /* if all vlans are in failed state, restart vlan disc */ + if (printk_ratelimit()) + FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, + "Start VLAN Discovery\n"); fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC); break; case FIP_VLAN_SENT: diff --git a/drivers/scsi/fnic/fnic_isr.c b/drivers/scsi/fnic/fnic_isr.c index a0dd1b67a467..4e3a50202e8c 100644 --- a/drivers/scsi/fnic/fnic_isr.c +++ b/drivers/scsi/fnic/fnic_isr.c @@ -154,13 +154,13 @@ void fnic_free_intr(struct fnic *fnic) switch (vnic_dev_get_intr_mode(fnic->vdev)) { case VNIC_DEV_INTR_MODE_INTX: case VNIC_DEV_INTR_MODE_MSI: - free_irq(fnic->pdev->irq, fnic); + free_irq(pci_irq_vector(fnic->pdev, 0), fnic); break; case VNIC_DEV_INTR_MODE_MSIX: for (i = 0; i < ARRAY_SIZE(fnic->msix); i++) if (fnic->msix[i].requested) - free_irq(fnic->msix_entry[i].vector, + free_irq(pci_irq_vector(fnic->pdev, i), fnic->msix[i].devid); break; @@ -177,12 +177,12 @@ int fnic_request_intr(struct fnic *fnic) switch (vnic_dev_get_intr_mode(fnic->vdev)) { case VNIC_DEV_INTR_MODE_INTX: - err = request_irq(fnic->pdev->irq, &fnic_isr_legacy, - IRQF_SHARED, DRV_NAME, fnic); + err = request_irq(pci_irq_vector(fnic->pdev, 0), + &fnic_isr_legacy, IRQF_SHARED, DRV_NAME, fnic); break; case VNIC_DEV_INTR_MODE_MSI: - err = request_irq(fnic->pdev->irq, &fnic_isr_msi, + err = request_irq(pci_irq_vector(fnic->pdev, 0), &fnic_isr_msi, 0, fnic->name, fnic); break; @@ -210,7 +210,7 @@ int fnic_request_intr(struct fnic *fnic) fnic->msix[FNIC_MSIX_ERR_NOTIFY].devid = fnic; for (i = 0; i < ARRAY_SIZE(fnic->msix); i++) { - err = request_irq(fnic->msix_entry[i].vector, + err = request_irq(pci_irq_vector(fnic->pdev, i), fnic->msix[i].isr, 0, fnic->msix[i].devname, fnic->msix[i].devid); @@ -237,7 +237,6 @@ int fnic_set_intr_mode(struct fnic *fnic) unsigned int n = ARRAY_SIZE(fnic->rq); unsigned int m = ARRAY_SIZE(fnic->wq); unsigned int o = ARRAY_SIZE(fnic->wq_copy); - unsigned int i; /* * Set interrupt mode (INTx, MSI, MSI-X) depending @@ -248,23 +247,20 @@ int fnic_set_intr_mode(struct fnic *fnic) * We need n RQs, m WQs, o Copy WQs, n+m+o CQs, and n+m+o+1 INTRs * (last INTR is used for WQ/RQ errors and notification area) */ - - BUG_ON(ARRAY_SIZE(fnic->msix_entry) < n + m + o + 1); - for (i = 0; i < n + m + o + 1; i++) - fnic->msix_entry[i].entry = i; - if (fnic->rq_count >= n && fnic->raw_wq_count >= m && fnic->wq_copy_count >= o && fnic->cq_count >= n + m + o) { - if (!pci_enable_msix_exact(fnic->pdev, fnic->msix_entry, - n + m + o + 1)) { + int vecs = n + m + o + 1; + + if (pci_alloc_irq_vectors(fnic->pdev, vecs, vecs, + PCI_IRQ_MSIX) < 0) { fnic->rq_count = n; fnic->raw_wq_count = m; fnic->wq_copy_count = o; fnic->wq_count = m + o; fnic->cq_count = n + m + o; - fnic->intr_count = n + m + o + 1; + fnic->intr_count = vecs; fnic->err_intr_offset = FNIC_MSIX_ERR_NOTIFY; FNIC_ISR_DBG(KERN_DEBUG, fnic->lport->host, @@ -284,8 +280,7 @@ int fnic_set_intr_mode(struct fnic *fnic) fnic->wq_copy_count >= 1 && fnic->cq_count >= 3 && fnic->intr_count >= 1 && - !pci_enable_msi(fnic->pdev)) { - + pci_alloc_irq_vectors(fnic->pdev, 1, 1, PCI_IRQ_MSI) < 0) { fnic->rq_count = 1; fnic->raw_wq_count = 1; fnic->wq_copy_count = 1; @@ -334,17 +329,7 @@ int fnic_set_intr_mode(struct fnic *fnic) void fnic_clear_intr_mode(struct fnic *fnic) { - switch (vnic_dev_get_intr_mode(fnic->vdev)) { - case VNIC_DEV_INTR_MODE_MSIX: - pci_disable_msix(fnic->pdev); - break; - case VNIC_DEV_INTR_MODE_MSI: - pci_disable_msi(fnic->pdev); - break; - default: - break; - } - + pci_free_irq_vectors(fnic->pdev); vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_INTX); } diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c index adb3d5871e74..d048f3b5006f 100644 --- a/drivers/scsi/fnic/fnic_scsi.c +++ b/drivers/scsi/fnic/fnic_scsi.c @@ -823,6 +823,7 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, spinlock_t *io_lock; u64 cmd_trace; unsigned long start_time; + unsigned long io_duration_time; /* Decode the cmpl description to get the io_req id */ fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag); @@ -876,32 +877,28 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, /* * if SCSI-ML has already issued abort on this command, - * ignore completion of the IO. The abts path will clean it up + * set completion of the IO. The abts path will clean it up */ if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) { - spin_unlock_irqrestore(io_lock, flags); + + /* + * set the FNIC_IO_DONE so that this doesn't get + * flagged as 'out of order' if it was not aborted + */ + CMD_FLAGS(sc) |= FNIC_IO_DONE; CMD_FLAGS(sc) |= FNIC_IO_ABTS_PENDING; - switch (hdr_status) { - case FCPIO_SUCCESS: - CMD_FLAGS(sc) |= FNIC_IO_DONE; - FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, - "icmnd_cmpl ABTS pending hdr status = %s " - "sc 0x%p scsi_status %x residual %d\n", - fnic_fcpio_status_to_str(hdr_status), sc, - icmnd_cmpl->scsi_status, - icmnd_cmpl->residual); - break; - case FCPIO_ABORTED: + spin_unlock_irqrestore(io_lock, flags); + if(FCPIO_ABORTED == hdr_status) CMD_FLAGS(sc) |= FNIC_IO_ABORTED; - break; - default: - FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, - "icmnd_cmpl abts pending " - "hdr status = %s tag = 0x%x sc = 0x%p\n", - fnic_fcpio_status_to_str(hdr_status), - id, sc); - break; - } + + FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, + "icmnd_cmpl abts pending " + "hdr status = %s tag = 0x%x sc = 0x%p" + "scsi_status = %x residual = %d\n", + fnic_fcpio_status_to_str(hdr_status), + id, sc, + icmnd_cmpl->scsi_status, + icmnd_cmpl->residual); return; } @@ -919,6 +916,9 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, if (icmnd_cmpl->flags & FCPIO_ICMND_CMPL_RESID_UNDER) xfer_len -= icmnd_cmpl->residual; + if (icmnd_cmpl->scsi_status == SAM_STAT_CHECK_CONDITION) + atomic64_inc(&fnic_stats->misc_stats.check_condition); + if (icmnd_cmpl->scsi_status == SAM_STAT_TASK_SET_FULL) atomic64_inc(&fnic_stats->misc_stats.queue_fulls); break; @@ -1017,6 +1017,28 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, else atomic64_inc(&fnic_stats->io_stats.io_completions); + + io_duration_time = jiffies_to_msecs(jiffies) - jiffies_to_msecs(io_req->start_time); + + if(io_duration_time <= 10) + atomic64_inc(&fnic_stats->io_stats.io_btw_0_to_10_msec); + else if(io_duration_time <= 100) + atomic64_inc(&fnic_stats->io_stats.io_btw_10_to_100_msec); + else if(io_duration_time <= 500) + atomic64_inc(&fnic_stats->io_stats.io_btw_100_to_500_msec); + else if(io_duration_time <= 5000) + atomic64_inc(&fnic_stats->io_stats.io_btw_500_to_5000_msec); + else if(io_duration_time <= 10000) + atomic64_inc(&fnic_stats->io_stats.io_btw_5000_to_10000_msec); + else if(io_duration_time <= 30000) + atomic64_inc(&fnic_stats->io_stats.io_btw_10000_to_30000_msec); + else { + atomic64_inc(&fnic_stats->io_stats.io_greater_than_30000_msec); + + if(io_duration_time > atomic64_read(&fnic_stats->io_stats.current_max_io_time)) + atomic64_set(&fnic_stats->io_stats.current_max_io_time, io_duration_time); + } + /* Call SCSI completion function to complete the IO */ if (sc->scsi_done) sc->scsi_done(sc); @@ -1128,18 +1150,11 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, } CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE; + CMD_ABTS_STATUS(sc) = hdr_status; /* If the status is IO not found consider it as success */ if (hdr_status == FCPIO_IO_NOT_FOUND) CMD_ABTS_STATUS(sc) = FCPIO_SUCCESS; - else - CMD_ABTS_STATUS(sc) = hdr_status; - - atomic64_dec(&fnic_stats->io_stats.active_ios); - if (atomic64_read(&fnic->io_cmpl_skip)) - atomic64_dec(&fnic->io_cmpl_skip); - else - atomic64_inc(&fnic_stats->io_stats.io_completions); if (!(CMD_FLAGS(sc) & (FNIC_IO_ABORTED | FNIC_IO_DONE))) atomic64_inc(&misc_stats->no_icmnd_itmf_cmpls); @@ -1181,6 +1196,11 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc))); sc->scsi_done(sc); + atomic64_dec(&fnic_stats->io_stats.active_ios); + if (atomic64_read(&fnic->io_cmpl_skip)) + atomic64_dec(&fnic->io_cmpl_skip); + else + atomic64_inc(&fnic_stats->io_stats.io_completions); } } @@ -1793,6 +1813,7 @@ int fnic_abort_cmd(struct scsi_cmnd *sc) struct terminate_stats *term_stats; enum fnic_ioreq_state old_ioreq_state; int tag; + unsigned long abt_issued_time; DECLARE_COMPLETION_ONSTACK(tm_done); /* Wait for rport to unblock */ @@ -1846,6 +1867,25 @@ int fnic_abort_cmd(struct scsi_cmnd *sc) spin_unlock_irqrestore(io_lock, flags); goto wait_pending; } + + abt_issued_time = jiffies_to_msecs(jiffies) - jiffies_to_msecs(io_req->start_time); + if (abt_issued_time <= 6000) + atomic64_inc(&abts_stats->abort_issued_btw_0_to_6_sec); + else if (abt_issued_time > 6000 && abt_issued_time <= 20000) + atomic64_inc(&abts_stats->abort_issued_btw_6_to_20_sec); + else if (abt_issued_time > 20000 && abt_issued_time <= 30000) + atomic64_inc(&abts_stats->abort_issued_btw_20_to_30_sec); + else if (abt_issued_time > 30000 && abt_issued_time <= 40000) + atomic64_inc(&abts_stats->abort_issued_btw_30_to_40_sec); + else if (abt_issued_time > 40000 && abt_issued_time <= 50000) + atomic64_inc(&abts_stats->abort_issued_btw_40_to_50_sec); + else if (abt_issued_time > 50000 && abt_issued_time <= 60000) + atomic64_inc(&abts_stats->abort_issued_btw_50_to_60_sec); + else + atomic64_inc(&abts_stats->abort_issued_greater_than_60_sec); + + FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, + "CBD Opcode: %02x Abort issued time: %lu msec\n", sc->cmnd[0], abt_issued_time); /* * Command is still pending, need to abort it * If the firmware completes the command after this point, @@ -1970,6 +2010,11 @@ int fnic_abort_cmd(struct scsi_cmnd *sc) /* Call SCSI completion function to complete the IO */ sc->result = (DID_ABORT << 16); sc->scsi_done(sc); + atomic64_dec(&fnic_stats->io_stats.active_ios); + if (atomic64_read(&fnic->io_cmpl_skip)) + atomic64_dec(&fnic->io_cmpl_skip); + else + atomic64_inc(&fnic_stats->io_stats.io_completions); } fnic_abort_cmd_end: diff --git a/drivers/scsi/fnic/fnic_stats.h b/drivers/scsi/fnic/fnic_stats.h index 540cceb843cd..88c73cccb015 100644 --- a/drivers/scsi/fnic/fnic_stats.h +++ b/drivers/scsi/fnic/fnic_stats.h @@ -26,6 +26,14 @@ struct io_path_stats { atomic64_t sc_null; atomic64_t io_not_found; atomic64_t num_ios; + atomic64_t io_btw_0_to_10_msec; + atomic64_t io_btw_10_to_100_msec; + atomic64_t io_btw_100_to_500_msec; + atomic64_t io_btw_500_to_5000_msec; + atomic64_t io_btw_5000_to_10000_msec; + atomic64_t io_btw_10000_to_30000_msec; + atomic64_t io_greater_than_30000_msec; + atomic64_t current_max_io_time; }; struct abort_stats { @@ -34,6 +42,13 @@ struct abort_stats { atomic64_t abort_drv_timeouts; atomic64_t abort_fw_timeouts; atomic64_t abort_io_not_found; + atomic64_t abort_issued_btw_0_to_6_sec; + atomic64_t abort_issued_btw_6_to_20_sec; + atomic64_t abort_issued_btw_20_to_30_sec; + atomic64_t abort_issued_btw_30_to_40_sec; + atomic64_t abort_issued_btw_40_to_50_sec; + atomic64_t abort_issued_btw_50_to_60_sec; + atomic64_t abort_issued_greater_than_60_sec; }; struct terminate_stats { @@ -88,6 +103,7 @@ struct misc_stats { atomic64_t devrst_cpwq_alloc_failures; atomic64_t io_cpwq_alloc_failures; atomic64_t no_icmnd_itmf_cmpls; + atomic64_t check_condition; atomic64_t queue_fulls; atomic64_t rport_not_ready; atomic64_t frame_errors; diff --git a/drivers/scsi/fnic/fnic_trace.c b/drivers/scsi/fnic/fnic_trace.c index 5a5fa01576b7..b5ac5381a0d7 100644 --- a/drivers/scsi/fnic/fnic_trace.c +++ b/drivers/scsi/fnic/fnic_trace.c @@ -229,7 +229,16 @@ int fnic_get_stats_data(struct stats_debug_info *debug, "Number of IO Failures: %lld\nNumber of IO NOT Found: %lld\n" "Number of Memory alloc Failures: %lld\n" "Number of IOREQ Null: %lld\n" - "Number of SCSI cmd pointer Null: %lld\n", + "Number of SCSI cmd pointer Null: %lld\n" + + "\nIO completion times: \n" + " < 10 ms : %lld\n" + " 10 ms - 100 ms : %lld\n" + " 100 ms - 500 ms : %lld\n" + " 500 ms - 5 sec: %lld\n" + " 5 sec - 10 sec: %lld\n" + " 10 sec - 30 sec: %lld\n" + " > 30 sec: %lld\n", (u64)atomic64_read(&stats->io_stats.active_ios), (u64)atomic64_read(&stats->io_stats.max_active_ios), (u64)atomic64_read(&stats->io_stats.num_ios), @@ -238,28 +247,58 @@ int fnic_get_stats_data(struct stats_debug_info *debug, (u64)atomic64_read(&stats->io_stats.io_not_found), (u64)atomic64_read(&stats->io_stats.alloc_failures), (u64)atomic64_read(&stats->io_stats.ioreq_null), - (u64)atomic64_read(&stats->io_stats.sc_null)); + (u64)atomic64_read(&stats->io_stats.sc_null), + (u64)atomic64_read(&stats->io_stats.io_btw_0_to_10_msec), + (u64)atomic64_read(&stats->io_stats.io_btw_10_to_100_msec), + (u64)atomic64_read(&stats->io_stats.io_btw_100_to_500_msec), + (u64)atomic64_read(&stats->io_stats.io_btw_500_to_5000_msec), + (u64)atomic64_read(&stats->io_stats.io_btw_5000_to_10000_msec), + (u64)atomic64_read(&stats->io_stats.io_btw_10000_to_30000_msec), + (u64)atomic64_read(&stats->io_stats.io_greater_than_30000_msec)); + + len += snprintf(debug->debug_buffer + len, buf_size - len, + "\nCurrent Max IO time : %lld\n", + (u64)atomic64_read(&stats->io_stats.current_max_io_time)); len += snprintf(debug->debug_buffer + len, buf_size - len, "\n------------------------------------------\n" "\t\tAbort Statistics\n" "------------------------------------------\n"); + len += snprintf(debug->debug_buffer + len, buf_size - len, "Number of Aborts: %lld\n" "Number of Abort Failures: %lld\n" "Number of Abort Driver Timeouts: %lld\n" "Number of Abort FW Timeouts: %lld\n" - "Number of Abort IO NOT Found: %lld\n", + "Number of Abort IO NOT Found: %lld\n" + + "Abord issued times: \n" + " < 6 sec : %lld\n" + " 6 sec - 20 sec : %lld\n" + " 20 sec - 30 sec : %lld\n" + " 30 sec - 40 sec : %lld\n" + " 40 sec - 50 sec : %lld\n" + " 50 sec - 60 sec : %lld\n" + " > 60 sec: %lld\n", + (u64)atomic64_read(&stats->abts_stats.aborts), (u64)atomic64_read(&stats->abts_stats.abort_failures), (u64)atomic64_read(&stats->abts_stats.abort_drv_timeouts), (u64)atomic64_read(&stats->abts_stats.abort_fw_timeouts), - (u64)atomic64_read(&stats->abts_stats.abort_io_not_found)); + (u64)atomic64_read(&stats->abts_stats.abort_io_not_found), + (u64)atomic64_read(&stats->abts_stats.abort_issued_btw_0_to_6_sec), + (u64)atomic64_read(&stats->abts_stats.abort_issued_btw_6_to_20_sec), + (u64)atomic64_read(&stats->abts_stats.abort_issued_btw_20_to_30_sec), + (u64)atomic64_read(&stats->abts_stats.abort_issued_btw_30_to_40_sec), + (u64)atomic64_read(&stats->abts_stats.abort_issued_btw_40_to_50_sec), + (u64)atomic64_read(&stats->abts_stats.abort_issued_btw_50_to_60_sec), + (u64)atomic64_read(&stats->abts_stats.abort_issued_greater_than_60_sec)); len += snprintf(debug->debug_buffer + len, buf_size - len, "\n------------------------------------------\n" "\t\tTerminate Statistics\n" "------------------------------------------\n"); + len += snprintf(debug->debug_buffer + len, buf_size - len, "Number of Terminates: %lld\n" "Maximum Terminates: %lld\n" @@ -357,6 +396,7 @@ int fnic_get_stats_data(struct stats_debug_info *debug, "Number of Copy WQ Alloc Failures for Device Reset: %lld\n" "Number of Copy WQ Alloc Failures for IOs: %lld\n" "Number of no icmnd itmf Completions: %lld\n" + "Number of Check Conditions encountered: %lld\n" "Number of QUEUE Fulls: %lld\n" "Number of rport not ready: %lld\n" "Number of receive frame errors: %lld\n", @@ -377,6 +417,7 @@ int fnic_get_stats_data(struct stats_debug_info *debug, &stats->misc_stats.devrst_cpwq_alloc_failures), (u64)atomic64_read(&stats->misc_stats.io_cpwq_alloc_failures), (u64)atomic64_read(&stats->misc_stats.no_icmnd_itmf_cmpls), + (u64)atomic64_read(&stats->misc_stats.check_condition), (u64)atomic64_read(&stats->misc_stats.queue_fulls), (u64)atomic64_read(&stats->misc_stats.rport_not_ready), (u64)atomic64_read(&stats->misc_stats.frame_errors)); diff --git a/drivers/scsi/hisi_sas/Kconfig b/drivers/scsi/hisi_sas/Kconfig index d1dd1616f983..374a329b91fc 100644 --- a/drivers/scsi/hisi_sas/Kconfig +++ b/drivers/scsi/hisi_sas/Kconfig @@ -4,5 +4,6 @@ config SCSI_HISI_SAS depends on ARM64 || COMPILE_TEST select SCSI_SAS_LIBSAS select BLK_DEV_INTEGRITY + depends on ATA help This driver supports HiSilicon's SAS HBA diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h index 9216deaa3ff5..4e28f32e90b0 100644 --- a/drivers/scsi/hisi_sas/hisi_sas.h +++ b/drivers/scsi/hisi_sas/hisi_sas.h @@ -31,6 +31,7 @@ #define HISI_SAS_QUEUE_SLOTS 512 #define HISI_SAS_MAX_ITCT_ENTRIES 2048 #define HISI_SAS_MAX_DEVICES HISI_SAS_MAX_ITCT_ENTRIES +#define HISI_SAS_RESET_BIT 0 #define HISI_SAS_STATUS_BUF_SZ \ (sizeof(struct hisi_sas_err_record) + 1024) @@ -90,7 +91,6 @@ struct hisi_sas_port { struct asd_sas_port sas_port; u8 port_attached; u8 id; /* from hw */ - struct list_head list; }; struct hisi_sas_cq { @@ -113,7 +113,9 @@ struct hisi_sas_device { u64 attached_phy; u64 device_id; atomic64_t running_req; + struct list_head list; u8 dev_status; + int sata_idx; }; struct hisi_sas_slot { @@ -136,6 +138,7 @@ struct hisi_sas_slot { struct hisi_sas_sge_page *sge_page; dma_addr_t sge_page_dma; struct work_struct abort_slot; + struct timer_list internal_abort_timer; }; struct hisi_sas_tmf_task { @@ -165,7 +168,8 @@ struct hisi_sas_hw { struct hisi_sas_slot *slot, int device_id, int abort_flag, int tag_to_abort); int (*slot_complete)(struct hisi_hba *hisi_hba, - struct hisi_sas_slot *slot, int abort); + struct hisi_sas_slot *slot); + void (*phys_init)(struct hisi_hba *hisi_hba); void (*phy_enable)(struct hisi_hba *hisi_hba, int phy_no); void (*phy_disable)(struct hisi_hba *hisi_hba, int phy_no); void (*phy_hard_reset)(struct hisi_hba *hisi_hba, int phy_no); @@ -175,6 +179,7 @@ struct hisi_sas_hw { void (*free_device)(struct hisi_hba *hisi_hba, struct hisi_sas_device *dev); int (*get_wideport_bitmap)(struct hisi_hba *hisi_hba, int port_id); + int (*soft_reset)(struct hisi_hba *hisi_hba); int max_command_entries; int complete_hdr_size; }; @@ -193,7 +198,6 @@ struct hisi_hba { u8 sas_addr[SAS_ADDR_SIZE]; int n_phy; - int scan_finished; spinlock_t lock; struct timer_list timer; @@ -201,6 +205,7 @@ struct hisi_hba { int slot_index_count; unsigned long *slot_index_tags; + unsigned long reject_stp_links_msk; /* SCSI/SAS glue */ struct sas_ha_struct sha; @@ -233,7 +238,10 @@ struct hisi_hba { struct hisi_sas_breakpoint *sata_breakpoint; dma_addr_t sata_breakpoint_dma; struct hisi_sas_slot *slot_info; + unsigned long flags; const struct hisi_sas_hw *hw; /* Low level hw interface */ + unsigned long sata_dev_bitmap[BITS_TO_LONGS(HISI_SAS_MAX_DEVICES)]; + struct work_struct rst_work; }; /* Generic HW DMA host memory structures */ @@ -346,6 +354,8 @@ union hisi_sas_command_table { struct hisi_sas_command_table_smp smp; struct hisi_sas_command_table_stp stp; }; + +extern struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port); extern int hisi_sas_probe(struct platform_device *pdev, const struct hisi_sas_hw *ops); extern int hisi_sas_remove(struct platform_device *pdev); @@ -354,4 +364,7 @@ extern void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy); extern void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task, struct hisi_sas_slot *slot); +extern void hisi_sas_init_mem(struct hisi_hba *hisi_hba); +extern void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 old_state, + u32 state); #endif diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c index 53637a941b94..d622db502ec9 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_main.c +++ b/drivers/scsi/hisi_sas/hisi_sas_main.c @@ -21,12 +21,19 @@ static int hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba, struct domain_device *device, int abort_flag, int tag); +static int hisi_sas_softreset_ata_disk(struct domain_device *device); static struct hisi_hba *dev_to_hisi_hba(struct domain_device *device) { return device->port->ha->lldd_ha; } +struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port) +{ + return container_of(sas_port, struct hisi_sas_port, sas_port); +} +EXPORT_SYMBOL_GPL(to_hisi_sas_port); + static void hisi_sas_slot_index_clear(struct hisi_hba *hisi_hba, int slot_idx) { void *bitmap = hisi_hba->slot_index_tags; @@ -70,17 +77,22 @@ static void hisi_sas_slot_index_init(struct hisi_hba *hisi_hba) void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task, struct hisi_sas_slot *slot) { - struct device *dev = &hisi_hba->pdev->dev; - struct domain_device *device = task->dev; - struct hisi_sas_device *sas_dev = device->lldd_dev; - if (!slot->task) - return; + if (task) { + struct device *dev = &hisi_hba->pdev->dev; + struct domain_device *device = task->dev; + struct hisi_sas_device *sas_dev = device->lldd_dev; - if (!sas_protocol_ata(task->task_proto)) - if (slot->n_elem) - dma_unmap_sg(dev, task->scatter, slot->n_elem, - task->data_dir); + if (!sas_protocol_ata(task->task_proto)) + if (slot->n_elem) + dma_unmap_sg(dev, task->scatter, slot->n_elem, + task->data_dir); + + task->lldd_task = NULL; + + if (sas_dev) + atomic64_dec(&sas_dev->running_req); + } if (slot->command_table) dma_pool_free(hisi_hba->command_table_pool, @@ -95,12 +107,10 @@ void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task, slot->sge_page_dma); list_del_init(&slot->entry); - task->lldd_task = NULL; slot->task = NULL; slot->port = NULL; hisi_sas_slot_index_free(hisi_hba, slot->idx); - if (sas_dev) - atomic64_dec(&sas_dev->running_req); + /* slot memory is fully zeroed when it is reused */ } EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free); @@ -178,10 +188,12 @@ static int hisi_sas_task_prep(struct sas_task *task, struct hisi_hba *hisi_hba, struct hisi_sas_port *port; struct hisi_sas_slot *slot; struct hisi_sas_cmd_hdr *cmd_hdr_base; + struct asd_sas_port *sas_port = device->port; struct device *dev = &hisi_hba->pdev->dev; int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx; + unsigned long flags; - if (!device->port) { + if (!sas_port) { struct task_status_struct *ts = &task->task_status; ts->resp = SAS_TASK_UNDELIVERED; @@ -192,7 +204,7 @@ static int hisi_sas_task_prep(struct sas_task *task, struct hisi_hba *hisi_hba, */ if (device->dev_type != SAS_SATA_DEV) task->task_done(task); - return 0; + return SAS_PHY_DOWN; } if (DEV_IS_GONE(sas_dev)) { @@ -203,13 +215,13 @@ static int hisi_sas_task_prep(struct sas_task *task, struct hisi_hba *hisi_hba, dev_info(dev, "task prep: device %016llx not ready\n", SAS_ADDR(device->sas_addr)); - rc = SAS_PHY_DOWN; - return rc; + return SAS_PHY_DOWN; } - port = device->port->lldd_port; + + port = to_hisi_sas_port(sas_port); if (port && !port->port_attached) { dev_info(dev, "task prep: %s port%d not attach device\n", - (sas_protocol_ata(task->task_proto)) ? + (dev_is_sata(device)) ? "SATA/STP" : "SAS", device->port->id); @@ -299,10 +311,10 @@ static int hisi_sas_task_prep(struct sas_task *task, struct hisi_hba *hisi_hba, goto err_out_command_table; } - list_add_tail(&slot->entry, &port->list); - spin_lock(&task->task_state_lock); + list_add_tail(&slot->entry, &sas_dev->list); + spin_lock_irqsave(&task->task_state_lock, flags); task->task_state_flags |= SAS_TASK_AT_INITIATOR; - spin_unlock(&task->task_state_lock); + spin_unlock_irqrestore(&task->task_state_lock, flags); hisi_hba->slot_prep = slot; @@ -343,6 +355,9 @@ static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags, struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev); struct device *dev = &hisi_hba->pdev->dev; + if (unlikely(test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))) + return -EINVAL; + /* protect task_prep and start_delivery sequence */ spin_lock_irqsave(&hisi_hba->lock, flags); rc = hisi_sas_task_prep(task, hisi_hba, is_tmf, tmf, &pass); @@ -412,6 +427,7 @@ static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device) sas_dev->dev_type = device->dev_type; sas_dev->hisi_hba = hisi_hba; sas_dev->sas_device = device; + INIT_LIST_HEAD(&hisi_hba->devices[i].list); break; } } @@ -482,12 +498,8 @@ static int hisi_sas_slave_configure(struct scsi_device *sdev) static void hisi_sas_scan_start(struct Scsi_Host *shost) { struct hisi_hba *hisi_hba = shost_priv(shost); - int i; - - for (i = 0; i < hisi_hba->n_phy; ++i) - hisi_sas_bytes_dmaed(hisi_hba, i); - hisi_hba->scan_finished = 1; + hisi_hba->hw->phys_init(hisi_hba); } static int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time) @@ -495,7 +507,8 @@ static int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time) struct hisi_hba *hisi_hba = shost_priv(shost); struct sas_ha_struct *sha = &hisi_hba->sha; - if (hisi_hba->scan_finished == 0) + /* Wait for PHY up interrupt to occur */ + if (time < HZ) return 0; sas_drain_work(sha); @@ -545,7 +558,7 @@ static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy) struct hisi_hba *hisi_hba = sas_ha->lldd_ha; struct hisi_sas_phy *phy = sas_phy->lldd_phy; struct asd_sas_port *sas_port = sas_phy->port; - struct hisi_sas_port *port = &hisi_hba->port[phy->port_id]; + struct hisi_sas_port *port = to_hisi_sas_port(sas_port); unsigned long flags; if (!sas_port) @@ -559,50 +572,54 @@ static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy) spin_unlock_irqrestore(&hisi_hba->lock, flags); } -static void hisi_sas_do_release_task(struct hisi_hba *hisi_hba, int phy_no, - struct domain_device *device) +static void hisi_sas_do_release_task(struct hisi_hba *hisi_hba, struct sas_task *task, + struct hisi_sas_slot *slot) { - struct hisi_sas_phy *phy; - struct hisi_sas_port *port; - struct hisi_sas_slot *slot, *slot2; - struct device *dev = &hisi_hba->pdev->dev; - - phy = &hisi_hba->phy[phy_no]; - port = phy->port; - if (!port) - return; + if (task) { + unsigned long flags; + struct task_status_struct *ts; + + ts = &task->task_status; + + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_ABORTED_TASK; + spin_lock_irqsave(&task->task_state_lock, flags); + task->task_state_flags &= + ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR); + task->task_state_flags |= SAS_TASK_STATE_DONE; + spin_unlock_irqrestore(&task->task_state_lock, flags); + } - list_for_each_entry_safe(slot, slot2, &port->list, entry) { - struct sas_task *task; + hisi_sas_slot_task_free(hisi_hba, task, slot); +} - task = slot->task; - if (device && task->dev != device) - continue; +/* hisi_hba.lock should be locked */ +static void hisi_sas_release_task(struct hisi_hba *hisi_hba, + struct domain_device *device) +{ + struct hisi_sas_slot *slot, *slot2; + struct hisi_sas_device *sas_dev = device->lldd_dev; - dev_info(dev, "Release slot [%d:%d], task [%p]:\n", - slot->dlvry_queue, slot->dlvry_queue_slot, task); - hisi_hba->hw->slot_complete(hisi_hba, slot, 1); - } + list_for_each_entry_safe(slot, slot2, &sas_dev->list, entry) + hisi_sas_do_release_task(hisi_hba, slot->task, slot); } -static void hisi_sas_port_notify_deformed(struct asd_sas_phy *sas_phy) +static void hisi_sas_release_tasks(struct hisi_hba *hisi_hba) { + struct hisi_sas_device *sas_dev; struct domain_device *device; - struct hisi_sas_phy *phy = sas_phy->lldd_phy; - struct asd_sas_port *sas_port = sas_phy->port; + int i; - list_for_each_entry(device, &sas_port->dev_list, dev_list_node) - hisi_sas_do_release_task(phy->hisi_hba, sas_phy->id, device); -} + for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { + sas_dev = &hisi_hba->devices[i]; + device = sas_dev->sas_device; -static void hisi_sas_release_task(struct hisi_hba *hisi_hba, - struct domain_device *device) -{ - struct asd_sas_port *port = device->port; - struct asd_sas_phy *sas_phy; + if ((sas_dev->dev_type == SAS_PHY_UNUSED) || + !device) + continue; - list_for_each_entry(sas_phy, &port->phy_list, port_phy_el) - hisi_sas_do_release_task(hisi_hba, sas_phy->id, device); + hisi_sas_release_task(hisi_hba, device); + } } static void hisi_sas_dev_gone(struct domain_device *device) @@ -644,8 +661,9 @@ static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func, break; case PHY_FUNC_LINK_RESET: + hisi_hba->hw->phy_disable(hisi_hba, phy_no); + msleep(100); hisi_hba->hw->phy_enable(hisi_hba, phy_no); - hisi_hba->hw->phy_hard_reset(hisi_hba, phy_no); break; case PHY_FUNC_DISABLE: @@ -698,7 +716,12 @@ static int hisi_sas_exec_internal_tmf_task(struct domain_device *device, task->dev = device; task->task_proto = device->tproto; - memcpy(&task->ssp_task, parameter, para_len); + if (dev_is_sata(device)) { + task->ata_task.device_control_reg_update = 1; + memcpy(&task->ata_task.fis, parameter, para_len); + } else { + memcpy(&task->ssp_task, parameter, para_len); + } task->task_done = hisi_sas_task_done; task->slow_task->timer.data = (unsigned long) task; @@ -720,15 +743,11 @@ static int hisi_sas_exec_internal_tmf_task(struct domain_device *device, /* Even TMF timed out, return direct. */ if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { - dev_err(dev, "abort tmf: TMF task[%d] timeout\n", - tmf->tag_of_task_to_be_managed); - if (task->lldd_task) { - struct hisi_sas_slot *slot = - task->lldd_task; + struct hisi_sas_slot *slot = task->lldd_task; - hisi_sas_slot_task_free(hisi_hba, - task, slot); - } + dev_err(dev, "abort tmf: TMF task timeout\n"); + if (slot) + slot->task = NULL; goto ex_err; } @@ -781,6 +800,63 @@ ex_err: return res; } +static void hisi_sas_fill_ata_reset_cmd(struct ata_device *dev, + bool reset, int pmp, u8 *fis) +{ + struct ata_taskfile tf; + + ata_tf_init(dev, &tf); + if (reset) + tf.ctl |= ATA_SRST; + else + tf.ctl &= ~ATA_SRST; + tf.command = ATA_CMD_DEV_RESET; + ata_tf_to_fis(&tf, pmp, 0, fis); +} + +static int hisi_sas_softreset_ata_disk(struct domain_device *device) +{ + u8 fis[20] = {0}; + struct ata_port *ap = device->sata_dev.ap; + struct ata_link *link; + int rc = TMF_RESP_FUNC_FAILED; + struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); + struct device *dev = &hisi_hba->pdev->dev; + int s = sizeof(struct host_to_dev_fis); + unsigned long flags; + + ata_for_each_link(link, ap, EDGE) { + int pmp = sata_srst_pmp(link); + + hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis); + rc = hisi_sas_exec_internal_tmf_task(device, fis, s, NULL); + if (rc != TMF_RESP_FUNC_COMPLETE) + break; + } + + if (rc == TMF_RESP_FUNC_COMPLETE) { + ata_for_each_link(link, ap, EDGE) { + int pmp = sata_srst_pmp(link); + + hisi_sas_fill_ata_reset_cmd(link->device, 0, pmp, fis); + rc = hisi_sas_exec_internal_tmf_task(device, fis, + s, NULL); + if (rc != TMF_RESP_FUNC_COMPLETE) + dev_err(dev, "ata disk de-reset failed\n"); + } + } else { + dev_err(dev, "ata disk reset failed\n"); + } + + if (rc == TMF_RESP_FUNC_COMPLETE) { + spin_lock_irqsave(&hisi_hba->lock, flags); + hisi_sas_release_task(hisi_hba, device); + spin_unlock_irqrestore(&hisi_hba->lock, flags); + } + + return rc; +} + static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device, u8 *lun, struct hisi_sas_tmf_task *tmf) { @@ -795,6 +871,40 @@ static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device, sizeof(ssp_task), tmf); } +static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba) +{ + int rc; + + if (!hisi_hba->hw->soft_reset) + return -1; + + if (!test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) { + struct device *dev = &hisi_hba->pdev->dev; + struct sas_ha_struct *sas_ha = &hisi_hba->sha; + unsigned long flags; + + dev_dbg(dev, "controller reset begins!\n"); + scsi_block_requests(hisi_hba->shost); + rc = hisi_hba->hw->soft_reset(hisi_hba); + if (rc) { + dev_warn(dev, "controller reset failed (%d)\n", rc); + goto out; + } + spin_lock_irqsave(&hisi_hba->lock, flags); + hisi_sas_release_tasks(hisi_hba); + spin_unlock_irqrestore(&hisi_hba->lock, flags); + + sas_ha->notify_ha_event(sas_ha, HAE_RESET); + dev_dbg(dev, "controller reset successful!\n"); + } else + return -1; + +out: + scsi_unblock_requests(hisi_hba->shost); + clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags); + return rc; +} + static int hisi_sas_abort_task(struct sas_task *task) { struct scsi_lun lun; @@ -811,19 +921,17 @@ static int hisi_sas_abort_task(struct sas_task *task) return TMF_RESP_FUNC_FAILED; } - spin_lock_irqsave(&task->task_state_lock, flags); if (task->task_state_flags & SAS_TASK_STATE_DONE) { - spin_unlock_irqrestore(&task->task_state_lock, flags); rc = TMF_RESP_FUNC_COMPLETE; goto out; } - spin_unlock_irqrestore(&task->task_state_lock, flags); sas_dev->dev_status = HISI_SAS_DEV_EH; if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) { struct scsi_cmnd *cmnd = task->uldd_task; struct hisi_sas_slot *slot = task->lldd_task; u32 tag = slot->idx; + int rc2; int_to_scsilun(cmnd->device->lun, &lun); tmf_task.tmf = TMF_ABORT_TASK; @@ -832,35 +940,41 @@ static int hisi_sas_abort_task(struct sas_task *task) rc = hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun, &tmf_task); - /* if successful, clear the task and callback forwards.*/ - if (rc == TMF_RESP_FUNC_COMPLETE) { + rc2 = hisi_sas_internal_task_abort(hisi_hba, device, + HISI_SAS_INT_ABT_CMD, tag); + /* + * If the TMF finds that the IO is not in the device and also + * the internal abort does not succeed, then it is safe to + * free the slot. + * Note: if the internal abort succeeds then the slot + * will have already been completed + */ + if (rc == TMF_RESP_FUNC_COMPLETE && rc2 != TMF_RESP_FUNC_SUCC) { if (task->lldd_task) { - struct hisi_sas_slot *slot; - - slot = &hisi_hba->slot_info - [tmf_task.tag_of_task_to_be_managed]; spin_lock_irqsave(&hisi_hba->lock, flags); - hisi_hba->hw->slot_complete(hisi_hba, slot, 1); + hisi_sas_do_release_task(hisi_hba, task, slot); spin_unlock_irqrestore(&hisi_hba->lock, flags); } } - - hisi_sas_internal_task_abort(hisi_hba, device, - HISI_SAS_INT_ABT_CMD, tag); } else if (task->task_proto & SAS_PROTOCOL_SATA || task->task_proto & SAS_PROTOCOL_STP) { if (task->dev->dev_type == SAS_SATA_DEV) { hisi_sas_internal_task_abort(hisi_hba, device, HISI_SAS_INT_ABT_DEV, 0); - rc = TMF_RESP_FUNC_COMPLETE; + rc = hisi_sas_softreset_ata_disk(device); } } else if (task->task_proto & SAS_PROTOCOL_SMP) { /* SMP */ struct hisi_sas_slot *slot = task->lldd_task; u32 tag = slot->idx; - hisi_sas_internal_task_abort(hisi_hba, device, - HISI_SAS_INT_ABT_CMD, tag); + rc = hisi_sas_internal_task_abort(hisi_hba, device, + HISI_SAS_INT_ABT_CMD, tag); + if (rc == TMF_RESP_FUNC_FAILED) { + spin_lock_irqsave(&hisi_hba->lock, flags); + hisi_sas_do_release_task(hisi_hba, task, slot); + spin_unlock_irqrestore(&hisi_hba->lock, flags); + } } out: @@ -915,37 +1029,66 @@ static int hisi_sas_I_T_nexus_reset(struct domain_device *device) rc = hisi_sas_debug_I_T_nexus_reset(device); - spin_lock_irqsave(&hisi_hba->lock, flags); - hisi_sas_release_task(hisi_hba, device); - spin_unlock_irqrestore(&hisi_hba->lock, flags); - - return 0; + if (rc == TMF_RESP_FUNC_COMPLETE) { + spin_lock_irqsave(&hisi_hba->lock, flags); + hisi_sas_release_task(hisi_hba, device); + spin_unlock_irqrestore(&hisi_hba->lock, flags); + } + return rc; } static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun) { - struct hisi_sas_tmf_task tmf_task; struct hisi_sas_device *sas_dev = device->lldd_dev; struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); struct device *dev = &hisi_hba->pdev->dev; unsigned long flags; int rc = TMF_RESP_FUNC_FAILED; - tmf_task.tmf = TMF_LU_RESET; sas_dev->dev_status = HISI_SAS_DEV_EH; - rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task); - if (rc == TMF_RESP_FUNC_COMPLETE) { - spin_lock_irqsave(&hisi_hba->lock, flags); - hisi_sas_release_task(hisi_hba, device); - spin_unlock_irqrestore(&hisi_hba->lock, flags); - } + if (dev_is_sata(device)) { + struct sas_phy *phy; + + /* Clear internal IO and then hardreset */ + rc = hisi_sas_internal_task_abort(hisi_hba, device, + HISI_SAS_INT_ABT_DEV, 0); + if (rc == TMF_RESP_FUNC_FAILED) + goto out; + + phy = sas_get_local_phy(device); - /* If failed, fall-through I_T_Nexus reset */ - dev_err(dev, "lu_reset: for device[%llx]:rc= %d\n", - sas_dev->device_id, rc); + rc = sas_phy_reset(phy, 1); + + if (rc == 0) { + spin_lock_irqsave(&hisi_hba->lock, flags); + hisi_sas_release_task(hisi_hba, device); + spin_unlock_irqrestore(&hisi_hba->lock, flags); + } + sas_put_local_phy(phy); + } else { + struct hisi_sas_tmf_task tmf_task = { .tmf = TMF_LU_RESET }; + + rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task); + if (rc == TMF_RESP_FUNC_COMPLETE) { + spin_lock_irqsave(&hisi_hba->lock, flags); + hisi_sas_release_task(hisi_hba, device); + spin_unlock_irqrestore(&hisi_hba->lock, flags); + } + } +out: + if (rc != TMF_RESP_FUNC_COMPLETE) + dev_err(dev, "lu_reset: for device[%llx]:rc= %d\n", + sas_dev->device_id, rc); return rc; } +static int hisi_sas_clear_nexus_ha(struct sas_ha_struct *sas_ha) +{ + struct hisi_hba *hisi_hba = sas_ha->lldd_ha; + + return hisi_sas_controller_reset(hisi_hba); +} + static int hisi_sas_query_task(struct sas_task *task) { struct scsi_lun lun; @@ -990,13 +1133,18 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, u64 device_id, struct device *dev = &hisi_hba->pdev->dev; struct hisi_sas_port *port; struct hisi_sas_slot *slot; + struct asd_sas_port *sas_port = device->port; struct hisi_sas_cmd_hdr *cmd_hdr_base; int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx; + unsigned long flags; + + if (unlikely(test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))) + return -EINVAL; if (!device->port) return -1; - port = device->port->lldd_port; + port = to_hisi_sas_port(sas_port); /* simply get a slot and send abort command */ rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx); @@ -1027,14 +1175,11 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, u64 device_id, if (rc) goto err_out_tag; - /* Port structure is static for the HBA, so - * even if the port is deformed it is ok - * to reference. - */ - list_add_tail(&slot->entry, &port->list); - spin_lock(&task->task_state_lock); + + list_add_tail(&slot->entry, &sas_dev->list); + spin_lock_irqsave(&task->task_state_lock, flags); task->task_state_flags |= SAS_TASK_AT_INITIATOR; - spin_unlock(&task->task_state_lock); + spin_unlock_irqrestore(&task->task_state_lock, flags); hisi_hba->slot_prep = slot; @@ -1085,7 +1230,7 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba, task->task_done = hisi_sas_task_done; task->slow_task->timer.data = (unsigned long)task; task->slow_task->timer.function = hisi_sas_tmf_timedout; - task->slow_task->timer.expires = jiffies + 20*HZ; + task->slow_task->timer.expires = jiffies + msecs_to_jiffies(110); add_timer(&task->slow_task->timer); /* Lock as we are alloc'ing a slot, which cannot be interrupted */ @@ -1108,15 +1253,16 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba, goto exit; } - /* TMF timed out, return direct. */ + if (task->task_status.resp == SAS_TASK_COMPLETE && + task->task_status.stat == TMF_RESP_FUNC_SUCC) { + res = TMF_RESP_FUNC_SUCC; + goto exit; + } + + /* Internal abort timed out */ if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { dev_err(dev, "internal task abort: timeout.\n"); - if (task->lldd_task) { - struct hisi_sas_slot *slot = task->lldd_task; - - hisi_sas_slot_task_free(hisi_hba, task, slot); - } } } @@ -1137,11 +1283,6 @@ static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy) hisi_sas_port_notify_formed(sas_phy); } -static void hisi_sas_port_deformed(struct asd_sas_phy *sas_phy) -{ - hisi_sas_port_notify_deformed(sas_phy); -} - static void hisi_sas_phy_disconnected(struct hisi_sas_phy *phy) { phy->phy_attached = 0; @@ -1181,6 +1322,37 @@ void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy) } EXPORT_SYMBOL_GPL(hisi_sas_phy_down); +void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 old_state, + u32 state) +{ + struct sas_ha_struct *sas_ha = &hisi_hba->sha; + int phy_no; + + for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) { + struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; + struct asd_sas_phy *sas_phy = &phy->sas_phy; + struct asd_sas_port *sas_port = sas_phy->port; + struct domain_device *dev; + + if (sas_phy->enabled) { + /* Report PHY state change to libsas */ + if (state & (1 << phy_no)) + continue; + + if (old_state & (1 << phy_no)) + /* PHY down but was up before */ + hisi_sas_phy_down(hisi_hba, phy_no, 0); + } + if (!sas_port) + continue; + dev = sas_port->port_dev; + + if (DEV_IS_EXPANDER(dev->dev_type)) + sas_ha->notify_phy_event(sas_phy, PORTE_BROADCAST_RCVD); + } +} +EXPORT_SYMBOL_GPL(hisi_sas_rescan_topology); + static struct scsi_transport_template *hisi_sas_stt; static struct scsi_host_template hisi_sas_sht = { @@ -1215,10 +1387,41 @@ static struct sas_domain_function_template hisi_sas_transport_ops = { .lldd_I_T_nexus_reset = hisi_sas_I_T_nexus_reset, .lldd_lu_reset = hisi_sas_lu_reset, .lldd_query_task = hisi_sas_query_task, + .lldd_clear_nexus_ha = hisi_sas_clear_nexus_ha, .lldd_port_formed = hisi_sas_port_formed, - .lldd_port_deformed = hisi_sas_port_deformed, }; +void hisi_sas_init_mem(struct hisi_hba *hisi_hba) +{ + int i, s, max_command_entries = hisi_hba->hw->max_command_entries; + + for (i = 0; i < hisi_hba->queue_count; i++) { + struct hisi_sas_cq *cq = &hisi_hba->cq[i]; + struct hisi_sas_dq *dq = &hisi_hba->dq[i]; + + s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS; + memset(hisi_hba->cmd_hdr[i], 0, s); + dq->wr_point = 0; + + s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS; + memset(hisi_hba->complete_hdr[i], 0, s); + cq->rd_point = 0; + } + + s = sizeof(struct hisi_sas_initial_fis) * hisi_hba->n_phy; + memset(hisi_hba->initial_fis, 0, s); + + s = max_command_entries * sizeof(struct hisi_sas_iost); + memset(hisi_hba->iost, 0, s); + + s = max_command_entries * sizeof(struct hisi_sas_breakpoint); + memset(hisi_hba->breakpoint, 0, s); + + s = max_command_entries * sizeof(struct hisi_sas_breakpoint) * 2; + memset(hisi_hba->sata_breakpoint, 0, s); +} +EXPORT_SYMBOL_GPL(hisi_sas_init_mem); + static int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost) { struct platform_device *pdev = hisi_hba->pdev; @@ -1230,7 +1433,6 @@ static int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost) hisi_sas_phy_init(hisi_hba, i); hisi_hba->port[i].port_attached = 0; hisi_hba->port[i].id = -1; - INIT_LIST_HEAD(&hisi_hba->port[i].list); } for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { @@ -1257,7 +1459,6 @@ static int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost) &hisi_hba->cmd_hdr_dma[i], GFP_KERNEL); if (!hisi_hba->cmd_hdr[i]) goto err_out; - memset(hisi_hba->cmd_hdr[i], 0, s); /* Completion queue */ s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS; @@ -1265,7 +1466,6 @@ static int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost) &hisi_hba->complete_hdr_dma[i], GFP_KERNEL); if (!hisi_hba->complete_hdr[i]) goto err_out; - memset(hisi_hba->complete_hdr[i], 0, s); } s = HISI_SAS_STATUS_BUF_SZ; @@ -1300,16 +1500,12 @@ static int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost) if (!hisi_hba->iost) goto err_out; - memset(hisi_hba->iost, 0, s); - s = max_command_entries * sizeof(struct hisi_sas_breakpoint); hisi_hba->breakpoint = dma_alloc_coherent(dev, s, &hisi_hba->breakpoint_dma, GFP_KERNEL); if (!hisi_hba->breakpoint) goto err_out; - memset(hisi_hba->breakpoint, 0, s); - hisi_hba->slot_index_count = max_command_entries; s = hisi_hba->slot_index_count / BITS_PER_BYTE; hisi_hba->slot_index_tags = devm_kzalloc(dev, s, GFP_KERNEL); @@ -1326,14 +1522,13 @@ static int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost) &hisi_hba->initial_fis_dma, GFP_KERNEL); if (!hisi_hba->initial_fis) goto err_out; - memset(hisi_hba->initial_fis, 0, s); s = max_command_entries * sizeof(struct hisi_sas_breakpoint) * 2; hisi_hba->sata_breakpoint = dma_alloc_coherent(dev, s, &hisi_hba->sata_breakpoint_dma, GFP_KERNEL); if (!hisi_hba->sata_breakpoint) goto err_out; - memset(hisi_hba->sata_breakpoint, 0, s); + hisi_sas_init_mem(hisi_hba); hisi_sas_slot_index_init(hisi_hba); @@ -1404,6 +1599,14 @@ static void hisi_sas_free(struct hisi_hba *hisi_hba) destroy_workqueue(hisi_hba->wq); } +static void hisi_sas_rst_work_handler(struct work_struct *work) +{ + struct hisi_hba *hisi_hba = + container_of(work, struct hisi_hba, rst_work); + + hisi_sas_controller_reset(hisi_hba); +} + static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev, const struct hisi_sas_hw *hw) { @@ -1421,6 +1624,7 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev, } hisi_hba = shost_priv(shost); + INIT_WORK(&hisi_hba->rst_work, hisi_sas_rst_work_handler); hisi_hba->hw = hw; hisi_hba->pdev = pdev; hisi_hba->shost = shost; @@ -1583,7 +1787,6 @@ int hisi_sas_remove(struct platform_device *pdev) struct hisi_hba *hisi_hba = sha->lldd_ha; struct Scsi_Host *shost = sha->core.shost; - scsi_remove_host(sha->core.shost); sas_unregister_ha(sha); sas_remove_host(sha->core.shost); diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c index 854fbeaade3e..fc1c1b2c1a19 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c @@ -508,6 +508,8 @@ static void setup_itct_v1_hw(struct hisi_hba *hisi_hba, struct device *dev = &hisi_hba->pdev->dev; u64 qw0, device_id = sas_dev->device_id; struct hisi_sas_itct *itct = &hisi_hba->itct[device_id]; + struct asd_sas_port *sas_port = device->port; + struct hisi_sas_port *port = to_hisi_sas_port(sas_port); memset(itct, 0, sizeof(*itct)); @@ -528,7 +530,7 @@ static void setup_itct_v1_hw(struct hisi_hba *hisi_hba, (1 << ITCT_HDR_AWT_CONTROL_OFF) | (device->max_linkrate << ITCT_HDR_MAX_CONN_RATE_OFF) | (1 << ITCT_HDR_VALID_LINK_NUM_OFF) | - (device->port->id << ITCT_HDR_PORT_ID_OFF)); + (port->id << ITCT_HDR_PORT_ID_OFF)); itct->qw0 = cpu_to_le64(qw0); /* qw1 */ @@ -1275,7 +1277,7 @@ static void slot_err_v1_hw(struct hisi_hba *hisi_hba, } static int slot_complete_v1_hw(struct hisi_hba *hisi_hba, - struct hisi_sas_slot *slot, int abort) + struct hisi_sas_slot *slot) { struct sas_task *task = slot->task; struct hisi_sas_device *sas_dev; @@ -1286,6 +1288,7 @@ static int slot_complete_v1_hw(struct hisi_hba *hisi_hba, struct hisi_sas_complete_v1_hdr *complete_queue = hisi_hba->complete_hdr[slot->cmplt_queue]; struct hisi_sas_complete_v1_hdr *complete_hdr; + unsigned long flags; u32 cmplt_hdr_data; complete_hdr = &complete_queue[slot->cmplt_queue_slot]; @@ -1298,16 +1301,17 @@ static int slot_complete_v1_hw(struct hisi_hba *hisi_hba, device = task->dev; sas_dev = device->lldd_dev; + spin_lock_irqsave(&task->task_state_lock, flags); task->task_state_flags &= ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR); task->task_state_flags |= SAS_TASK_STATE_DONE; + spin_unlock_irqrestore(&task->task_state_lock, flags); memset(ts, 0, sizeof(*ts)); ts->resp = SAS_TASK_COMPLETE; - if (unlikely(!sas_dev || abort)) { - if (!sas_dev) - dev_dbg(dev, "slot complete: port has not device\n"); + if (unlikely(!sas_dev)) { + dev_dbg(dev, "slot complete: port has no device\n"); ts->stat = SAS_PHY_DOWN; goto out; } @@ -1620,7 +1624,7 @@ static irqreturn_t cq_interrupt_v1_hw(int irq, void *p) */ slot->cmplt_queue_slot = rd_point; slot->cmplt_queue = queue; - slot_complete_v1_hw(hisi_hba, slot, 0); + slot_complete_v1_hw(hisi_hba, slot); if (++rd_point >= HISI_SAS_QUEUE_SLOTS) rd_point = 0; @@ -1845,8 +1849,6 @@ static int hisi_sas_v1_init(struct hisi_hba *hisi_hba) if (rc) return rc; - phys_init_v1_hw(hisi_hba); - return 0; } @@ -1860,6 +1862,7 @@ static const struct hisi_sas_hw hisi_sas_v1_hw = { .get_free_slot = get_free_slot_v1_hw, .start_delivery = start_delivery_v1_hw, .slot_complete = slot_complete_v1_hw, + .phys_init = phys_init_v1_hw, .phy_enable = enable_phy_v1_hw, .phy_disable = disable_phy_v1_hw, .phy_hard_reset = phy_hard_reset_v1_hw, diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c index 1b214450dcb5..e241921bee10 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c @@ -194,9 +194,9 @@ #define SL_CONTROL_NOTIFY_EN_MSK (0x1 << SL_CONTROL_NOTIFY_EN_OFF) #define SL_CONTROL_CTA_OFF 17 #define SL_CONTROL_CTA_MSK (0x1 << SL_CONTROL_CTA_OFF) -#define RX_PRIMS_STATUS (PORT_BASE + 0x98) -#define RX_BCAST_CHG_OFF 1 -#define RX_BCAST_CHG_MSK (0x1 << RX_BCAST_CHG_OFF) +#define RX_PRIMS_STATUS (PORT_BASE + 0x98) +#define RX_BCAST_CHG_OFF 1 +#define RX_BCAST_CHG_MSK (0x1 << RX_BCAST_CHG_OFF) #define TX_ID_DWORD0 (PORT_BASE + 0x9c) #define TX_ID_DWORD1 (PORT_BASE + 0xa0) #define TX_ID_DWORD2 (PORT_BASE + 0xa4) @@ -207,8 +207,10 @@ #define TXID_AUTO (PORT_BASE + 0xb8) #define TXID_AUTO_CT3_OFF 1 #define TXID_AUTO_CT3_MSK (0x1 << TXID_AUTO_CT3_OFF) -#define TX_HARDRST_OFF 2 -#define TX_HARDRST_MSK (0x1 << TX_HARDRST_OFF) +#define TXID_AUTO_CTB_OFF 11 +#define TXID_AUTO_CTB_MSK (0x1 << TXID_AUTO_CTB_OFF) +#define TX_HARDRST_OFF 2 +#define TX_HARDRST_MSK (0x1 << TX_HARDRST_OFF) #define RX_IDAF_DWORD0 (PORT_BASE + 0xc4) #define RX_IDAF_DWORD1 (PORT_BASE + 0xc8) #define RX_IDAF_DWORD2 (PORT_BASE + 0xcc) @@ -218,6 +220,9 @@ #define RX_IDAF_DWORD6 (PORT_BASE + 0xdc) #define RXOP_CHECK_CFG_H (PORT_BASE + 0xfc) #define CON_CONTROL (PORT_BASE + 0x118) +#define CON_CONTROL_CFG_OPEN_ACC_STP_OFF 0 +#define CON_CONTROL_CFG_OPEN_ACC_STP_MSK \ + (0x01 << CON_CONTROL_CFG_OPEN_ACC_STP_OFF) #define DONE_RECEIVED_TIME (PORT_BASE + 0x11c) #define CHL_INT0 (PORT_BASE + 0x1b4) #define CHL_INT0_HOTPLUG_TOUT_OFF 0 @@ -240,6 +245,17 @@ #define CHL_INT1_MSK (PORT_BASE + 0x1c4) #define CHL_INT2_MSK (PORT_BASE + 0x1c8) #define CHL_INT_COAL_EN (PORT_BASE + 0x1d0) +#define DMA_TX_DFX0 (PORT_BASE + 0x200) +#define DMA_TX_DFX1 (PORT_BASE + 0x204) +#define DMA_TX_DFX1_IPTT_OFF 0 +#define DMA_TX_DFX1_IPTT_MSK (0xffff << DMA_TX_DFX1_IPTT_OFF) +#define DMA_TX_FIFO_DFX0 (PORT_BASE + 0x240) +#define PORT_DFX0 (PORT_BASE + 0x258) +#define LINK_DFX2 (PORT_BASE + 0X264) +#define LINK_DFX2_RCVR_HOLD_STS_OFF 9 +#define LINK_DFX2_RCVR_HOLD_STS_MSK (0x1 << LINK_DFX2_RCVR_HOLD_STS_OFF) +#define LINK_DFX2_SEND_HOLD_STS_OFF 10 +#define LINK_DFX2_SEND_HOLD_STS_MSK (0x1 << LINK_DFX2_SEND_HOLD_STS_OFF) #define PHY_CTRL_RDY_MSK (PORT_BASE + 0x2b0) #define PHYCTRL_NOT_RDY_MSK (PORT_BASE + 0x2b4) #define PHYCTRL_DWS_RESET_MSK (PORT_BASE + 0x2b8) @@ -257,6 +273,10 @@ #define AM_CFG_MAX_TRANS (0x5010) #define AM_CFG_SINGLE_PORT_MAX_TRANS (0x5014) +#define AXI_MASTER_CFG_BASE (0x5000) +#define AM_CTRL_GLOBAL (0x0) +#define AM_CURR_TRANS_RETURN (0x150) + /* HW dma structures */ /* Delivery queue header */ /* dw0 */ @@ -309,6 +329,8 @@ /* Completion header */ /* dw0 */ +#define CMPLT_HDR_ERR_PHASE_OFF 2 +#define CMPLT_HDR_ERR_PHASE_MSK (0xff << CMPLT_HDR_ERR_PHASE_OFF) #define CMPLT_HDR_RSPNS_XFRD_OFF 10 #define CMPLT_HDR_RSPNS_XFRD_MSK (0x1 << CMPLT_HDR_RSPNS_XFRD_OFF) #define CMPLT_HDR_ERX_OFF 12 @@ -385,10 +407,10 @@ enum { enum { TRANS_TX_FAIL_BASE = 0x0, /* dw0 */ - TRANS_RX_FAIL_BASE = 0x100, /* dw1 */ - DMA_TX_ERR_BASE = 0x200, /* dw2 bit 15-0 */ - SIPC_RX_ERR_BASE = 0x300, /* dw2 bit 31-16*/ - DMA_RX_ERR_BASE = 0x400, /* dw3 */ + TRANS_RX_FAIL_BASE = 0x20, /* dw1 */ + DMA_TX_ERR_BASE = 0x40, /* dw2 bit 15-0 */ + SIPC_RX_ERR_BASE = 0x50, /* dw2 bit 31-16*/ + DMA_RX_ERR_BASE = 0x60, /* dw3 */ /* trans tx*/ TRANS_TX_OPEN_FAIL_WITH_IT_NEXUS_LOSS = TRANS_TX_FAIL_BASE, /* 0x0 */ @@ -428,100 +450,104 @@ enum { TRANS_TX_ERR_WITH_WAIT_RECV_TIMEOUT, /* 0x1f for sata/stp */ /* trans rx */ - TRANS_RX_ERR_WITH_RXFRAME_CRC_ERR = TRANS_RX_FAIL_BASE, /* 0x100 */ - TRANS_RX_ERR_WITH_RXFIS_8B10B_DISP_ERR, /* 0x101 for sata/stp */ - TRANS_RX_ERR_WITH_RXFRAME_HAVE_ERRPRM, /* 0x102 for ssp/smp */ - /*IO_ERR_WITH_RXFIS_8B10B_CODE_ERR, [> 0x102 <] for sata/stp */ - TRANS_RX_ERR_WITH_RXFIS_DECODE_ERROR, /* 0x103 for sata/stp */ - TRANS_RX_ERR_WITH_RXFIS_CRC_ERR, /* 0x104 for sata/stp */ - TRANS_RX_ERR_WITH_RXFRAME_LENGTH_OVERRUN, /* 0x105 for smp */ - /*IO_ERR_WITH_RXFIS_TX SYNCP, [> 0x105 <] for sata/stp */ - TRANS_RX_ERR_WITH_RXFIS_RX_SYNCP, /* 0x106 for sata/stp*/ - TRANS_RX_ERR_WITH_LINK_BUF_OVERRUN, /* 0x107 */ - TRANS_RX_ERR_WITH_BREAK_TIMEOUT, /* 0x108 */ - TRANS_RX_ERR_WITH_BREAK_REQUEST, /* 0x109 */ - TRANS_RX_ERR_WITH_BREAK_RECEVIED, /* 0x10a */ - RESERVED1, /* 0x10b */ - TRANS_RX_ERR_WITH_CLOSE_NORMAL, /* 0x10c */ - TRANS_RX_ERR_WITH_CLOSE_PHY_DISABLE, /* 0x10d */ - TRANS_RX_ERR_WITH_CLOSE_DWS_TIMEOUT, /* 0x10e */ - TRANS_RX_ERR_WITH_CLOSE_COMINIT, /* 0x10f */ - TRANS_RX_ERR_WITH_DATA_LEN0, /* 0x110 for ssp/smp */ - TRANS_RX_ERR_WITH_BAD_HASH, /* 0x111 for ssp */ - /*IO_RX_ERR_WITH_FIS_TOO_SHORT, [> 0x111 <] for sata/stp */ - TRANS_RX_XRDY_WLEN_ZERO_ERR, /* 0x112 for ssp*/ - /*IO_RX_ERR_WITH_FIS_TOO_LONG, [> 0x112 <] for sata/stp */ - TRANS_RX_SSP_FRM_LEN_ERR, /* 0x113 for ssp */ - /*IO_RX_ERR_WITH_SATA_DEVICE_LOST, [> 0x113 <] for sata */ - RESERVED2, /* 0x114 */ - RESERVED3, /* 0x115 */ - RESERVED4, /* 0x116 */ - RESERVED5, /* 0x117 */ - TRANS_RX_ERR_WITH_BAD_FRM_TYPE, /* 0x118 */ - TRANS_RX_SMP_FRM_LEN_ERR, /* 0x119 */ - TRANS_RX_SMP_RESP_TIMEOUT_ERR, /* 0x11a */ - RESERVED6, /* 0x11b */ - RESERVED7, /* 0x11c */ - RESERVED8, /* 0x11d */ - RESERVED9, /* 0x11e */ - TRANS_RX_R_ERR, /* 0x11f */ + TRANS_RX_ERR_WITH_RXFRAME_CRC_ERR = TRANS_RX_FAIL_BASE, /* 0x20 */ + TRANS_RX_ERR_WITH_RXFIS_8B10B_DISP_ERR, /* 0x21 for sata/stp */ + TRANS_RX_ERR_WITH_RXFRAME_HAVE_ERRPRM, /* 0x22 for ssp/smp */ + /*IO_ERR_WITH_RXFIS_8B10B_CODE_ERR, [> 0x22 <] for sata/stp */ + TRANS_RX_ERR_WITH_RXFIS_DECODE_ERROR, /* 0x23 for sata/stp */ + TRANS_RX_ERR_WITH_RXFIS_CRC_ERR, /* 0x24 for sata/stp */ + TRANS_RX_ERR_WITH_RXFRAME_LENGTH_OVERRUN, /* 0x25 for smp */ + /*IO_ERR_WITH_RXFIS_TX SYNCP, [> 0x25 <] for sata/stp */ + TRANS_RX_ERR_WITH_RXFIS_RX_SYNCP, /* 0x26 for sata/stp*/ + TRANS_RX_ERR_WITH_LINK_BUF_OVERRUN, /* 0x27 */ + TRANS_RX_ERR_WITH_BREAK_TIMEOUT, /* 0x28 */ + TRANS_RX_ERR_WITH_BREAK_REQUEST, /* 0x29 */ + TRANS_RX_ERR_WITH_BREAK_RECEVIED, /* 0x2a */ + RESERVED1, /* 0x2b */ + TRANS_RX_ERR_WITH_CLOSE_NORMAL, /* 0x2c */ + TRANS_RX_ERR_WITH_CLOSE_PHY_DISABLE, /* 0x2d */ + TRANS_RX_ERR_WITH_CLOSE_DWS_TIMEOUT, /* 0x2e */ + TRANS_RX_ERR_WITH_CLOSE_COMINIT, /* 0x2f */ + TRANS_RX_ERR_WITH_DATA_LEN0, /* 0x30 for ssp/smp */ + TRANS_RX_ERR_WITH_BAD_HASH, /* 0x31 for ssp */ + /*IO_RX_ERR_WITH_FIS_TOO_SHORT, [> 0x31 <] for sata/stp */ + TRANS_RX_XRDY_WLEN_ZERO_ERR, /* 0x32 for ssp*/ + /*IO_RX_ERR_WITH_FIS_TOO_LONG, [> 0x32 <] for sata/stp */ + TRANS_RX_SSP_FRM_LEN_ERR, /* 0x33 for ssp */ + /*IO_RX_ERR_WITH_SATA_DEVICE_LOST, [> 0x33 <] for sata */ + RESERVED2, /* 0x34 */ + RESERVED3, /* 0x35 */ + RESERVED4, /* 0x36 */ + RESERVED5, /* 0x37 */ + TRANS_RX_ERR_WITH_BAD_FRM_TYPE, /* 0x38 */ + TRANS_RX_SMP_FRM_LEN_ERR, /* 0x39 */ + TRANS_RX_SMP_RESP_TIMEOUT_ERR, /* 0x3a */ + RESERVED6, /* 0x3b */ + RESERVED7, /* 0x3c */ + RESERVED8, /* 0x3d */ + RESERVED9, /* 0x3e */ + TRANS_RX_R_ERR, /* 0x3f */ /* dma tx */ - DMA_TX_DIF_CRC_ERR = DMA_TX_ERR_BASE, /* 0x200 */ - DMA_TX_DIF_APP_ERR, /* 0x201 */ - DMA_TX_DIF_RPP_ERR, /* 0x202 */ - DMA_TX_DATA_SGL_OVERFLOW, /* 0x203 */ - DMA_TX_DIF_SGL_OVERFLOW, /* 0x204 */ - DMA_TX_UNEXP_XFER_ERR, /* 0x205 */ - DMA_TX_UNEXP_RETRANS_ERR, /* 0x206 */ - DMA_TX_XFER_LEN_OVERFLOW, /* 0x207 */ - DMA_TX_XFER_OFFSET_ERR, /* 0x208 */ - DMA_TX_RAM_ECC_ERR, /* 0x209 */ - DMA_TX_DIF_LEN_ALIGN_ERR, /* 0x20a */ + DMA_TX_DIF_CRC_ERR = DMA_TX_ERR_BASE, /* 0x40 */ + DMA_TX_DIF_APP_ERR, /* 0x41 */ + DMA_TX_DIF_RPP_ERR, /* 0x42 */ + DMA_TX_DATA_SGL_OVERFLOW, /* 0x43 */ + DMA_TX_DIF_SGL_OVERFLOW, /* 0x44 */ + DMA_TX_UNEXP_XFER_ERR, /* 0x45 */ + DMA_TX_UNEXP_RETRANS_ERR, /* 0x46 */ + DMA_TX_XFER_LEN_OVERFLOW, /* 0x47 */ + DMA_TX_XFER_OFFSET_ERR, /* 0x48 */ + DMA_TX_RAM_ECC_ERR, /* 0x49 */ + DMA_TX_DIF_LEN_ALIGN_ERR, /* 0x4a */ + DMA_TX_MAX_ERR_CODE, /* sipc rx */ - SIPC_RX_FIS_STATUS_ERR_BIT_VLD = SIPC_RX_ERR_BASE, /* 0x300 */ - SIPC_RX_PIO_WRSETUP_STATUS_DRQ_ERR, /* 0x301 */ - SIPC_RX_FIS_STATUS_BSY_BIT_ERR, /* 0x302 */ - SIPC_RX_WRSETUP_LEN_ODD_ERR, /* 0x303 */ - SIPC_RX_WRSETUP_LEN_ZERO_ERR, /* 0x304 */ - SIPC_RX_WRDATA_LEN_NOT_MATCH_ERR, /* 0x305 */ - SIPC_RX_NCQ_WRSETUP_OFFSET_ERR, /* 0x306 */ - SIPC_RX_NCQ_WRSETUP_AUTO_ACTIVE_ERR, /* 0x307 */ - SIPC_RX_SATA_UNEXP_FIS_ERR, /* 0x308 */ - SIPC_RX_WRSETUP_ESTATUS_ERR, /* 0x309 */ - SIPC_RX_DATA_UNDERFLOW_ERR, /* 0x30a */ + SIPC_RX_FIS_STATUS_ERR_BIT_VLD = SIPC_RX_ERR_BASE, /* 0x50 */ + SIPC_RX_PIO_WRSETUP_STATUS_DRQ_ERR, /* 0x51 */ + SIPC_RX_FIS_STATUS_BSY_BIT_ERR, /* 0x52 */ + SIPC_RX_WRSETUP_LEN_ODD_ERR, /* 0x53 */ + SIPC_RX_WRSETUP_LEN_ZERO_ERR, /* 0x54 */ + SIPC_RX_WRDATA_LEN_NOT_MATCH_ERR, /* 0x55 */ + SIPC_RX_NCQ_WRSETUP_OFFSET_ERR, /* 0x56 */ + SIPC_RX_NCQ_WRSETUP_AUTO_ACTIVE_ERR, /* 0x57 */ + SIPC_RX_SATA_UNEXP_FIS_ERR, /* 0x58 */ + SIPC_RX_WRSETUP_ESTATUS_ERR, /* 0x59 */ + SIPC_RX_DATA_UNDERFLOW_ERR, /* 0x5a */ + SIPC_RX_MAX_ERR_CODE, /* dma rx */ - DMA_RX_DIF_CRC_ERR = DMA_RX_ERR_BASE, /* 0x400 */ - DMA_RX_DIF_APP_ERR, /* 0x401 */ - DMA_RX_DIF_RPP_ERR, /* 0x402 */ - DMA_RX_DATA_SGL_OVERFLOW, /* 0x403 */ - DMA_RX_DIF_SGL_OVERFLOW, /* 0x404 */ - DMA_RX_DATA_LEN_OVERFLOW, /* 0x405 */ - DMA_RX_DATA_LEN_UNDERFLOW, /* 0x406 */ - DMA_RX_DATA_OFFSET_ERR, /* 0x407 */ - RESERVED10, /* 0x408 */ - DMA_RX_SATA_FRAME_TYPE_ERR, /* 0x409 */ - DMA_RX_RESP_BUF_OVERFLOW, /* 0x40a */ - DMA_RX_UNEXP_RETRANS_RESP_ERR, /* 0x40b */ - DMA_RX_UNEXP_NORM_RESP_ERR, /* 0x40c */ - DMA_RX_UNEXP_RDFRAME_ERR, /* 0x40d */ - DMA_RX_PIO_DATA_LEN_ERR, /* 0x40e */ - DMA_RX_RDSETUP_STATUS_ERR, /* 0x40f */ - DMA_RX_RDSETUP_STATUS_DRQ_ERR, /* 0x410 */ - DMA_RX_RDSETUP_STATUS_BSY_ERR, /* 0x411 */ - DMA_RX_RDSETUP_LEN_ODD_ERR, /* 0x412 */ - DMA_RX_RDSETUP_LEN_ZERO_ERR, /* 0x413 */ - DMA_RX_RDSETUP_LEN_OVER_ERR, /* 0x414 */ - DMA_RX_RDSETUP_OFFSET_ERR, /* 0x415 */ - DMA_RX_RDSETUP_ACTIVE_ERR, /* 0x416 */ - DMA_RX_RDSETUP_ESTATUS_ERR, /* 0x417 */ - DMA_RX_RAM_ECC_ERR, /* 0x418 */ - DMA_RX_UNKNOWN_FRM_ERR, /* 0x419 */ + DMA_RX_DIF_CRC_ERR = DMA_RX_ERR_BASE, /* 0x60 */ + DMA_RX_DIF_APP_ERR, /* 0x61 */ + DMA_RX_DIF_RPP_ERR, /* 0x62 */ + DMA_RX_DATA_SGL_OVERFLOW, /* 0x63 */ + DMA_RX_DIF_SGL_OVERFLOW, /* 0x64 */ + DMA_RX_DATA_LEN_OVERFLOW, /* 0x65 */ + DMA_RX_DATA_LEN_UNDERFLOW, /* 0x66 */ + DMA_RX_DATA_OFFSET_ERR, /* 0x67 */ + RESERVED10, /* 0x68 */ + DMA_RX_SATA_FRAME_TYPE_ERR, /* 0x69 */ + DMA_RX_RESP_BUF_OVERFLOW, /* 0x6a */ + DMA_RX_UNEXP_RETRANS_RESP_ERR, /* 0x6b */ + DMA_RX_UNEXP_NORM_RESP_ERR, /* 0x6c */ + DMA_RX_UNEXP_RDFRAME_ERR, /* 0x6d */ + DMA_RX_PIO_DATA_LEN_ERR, /* 0x6e */ + DMA_RX_RDSETUP_STATUS_ERR, /* 0x6f */ + DMA_RX_RDSETUP_STATUS_DRQ_ERR, /* 0x70 */ + DMA_RX_RDSETUP_STATUS_BSY_ERR, /* 0x71 */ + DMA_RX_RDSETUP_LEN_ODD_ERR, /* 0x72 */ + DMA_RX_RDSETUP_LEN_ZERO_ERR, /* 0x73 */ + DMA_RX_RDSETUP_LEN_OVER_ERR, /* 0x74 */ + DMA_RX_RDSETUP_OFFSET_ERR, /* 0x75 */ + DMA_RX_RDSETUP_ACTIVE_ERR, /* 0x76 */ + DMA_RX_RDSETUP_ESTATUS_ERR, /* 0x77 */ + DMA_RX_RAM_ECC_ERR, /* 0x78 */ + DMA_RX_UNKNOWN_FRM_ERR, /* 0x79 */ + DMA_RX_MAX_ERR_CODE, }; #define HISI_SAS_COMMAND_ENTRIES_V2_HW 4096 +#define HISI_MAX_SATA_SUPPORT_V2_HW (HISI_SAS_COMMAND_ENTRIES_V2_HW/64 - 1) #define DIR_NO_DATA 0 #define DIR_TO_INI 1 @@ -534,7 +560,13 @@ enum { #define SATA_PROTOCOL_FPDMA 0x8 #define SATA_PROTOCOL_ATAPI 0x10 -static void hisi_sas_link_timeout_disable_link(unsigned long data); +#define ERR_ON_TX_PHASE(err_phase) (err_phase == 0x2 || \ + err_phase == 0x4 || err_phase == 0x8 ||\ + err_phase == 0x6 || err_phase == 0xa) +#define ERR_ON_RX_PHASE(err_phase) (err_phase == 0x10 || \ + err_phase == 0x20 || err_phase == 0x40) + +static void link_timeout_disable_link(unsigned long data); static u32 hisi_sas_read32(struct hisi_hba *hisi_hba, u32 off) { @@ -576,38 +608,86 @@ static u32 hisi_sas_phy_read32(struct hisi_hba *hisi_hba, /* This function needs to be protected from pre-emption. */ static int slot_index_alloc_quirk_v2_hw(struct hisi_hba *hisi_hba, int *slot_idx, - struct domain_device *device) + struct domain_device *device) { - unsigned int index = 0; - void *bitmap = hisi_hba->slot_index_tags; int sata_dev = dev_is_sata(device); + void *bitmap = hisi_hba->slot_index_tags; + struct hisi_sas_device *sas_dev = device->lldd_dev; + int sata_idx = sas_dev->sata_idx; + int start, end; + + if (!sata_dev) { + /* + * STP link SoC bug workaround: index starts from 1. + * additionally, we can only allocate odd IPTT(1~4095) + * for SAS/SMP device. + */ + start = 1; + end = hisi_hba->slot_index_count; + } else { + if (sata_idx >= HISI_MAX_SATA_SUPPORT_V2_HW) + return -EINVAL; + + /* + * For SATA device: allocate even IPTT in this interval + * [64*(sata_idx+1), 64*(sata_idx+2)], then each SATA device + * own 32 IPTTs. IPTT 0 shall not be used duing to STP link + * SoC bug workaround. So we ignore the first 32 even IPTTs. + */ + start = 64 * (sata_idx + 1); + end = 64 * (sata_idx + 2); + } while (1) { - index = find_next_zero_bit(bitmap, hisi_hba->slot_index_count, - index); - if (index >= hisi_hba->slot_index_count) + start = find_next_zero_bit(bitmap, + hisi_hba->slot_index_count, start); + if (start >= end) return -SAS_QUEUE_FULL; /* - * SAS IPTT bit0 should be 1 - */ - if (sata_dev || (index & 1)) + * SAS IPTT bit0 should be 1, and SATA IPTT bit0 should be 0. + */ + if (sata_dev ^ (start & 1)) break; - index++; + start++; } - set_bit(index, bitmap); - *slot_idx = index; + set_bit(start, bitmap); + *slot_idx = start; return 0; } +static bool sata_index_alloc_v2_hw(struct hisi_hba *hisi_hba, int *idx) +{ + unsigned int index; + struct device *dev = &hisi_hba->pdev->dev; + void *bitmap = hisi_hba->sata_dev_bitmap; + + index = find_first_zero_bit(bitmap, HISI_MAX_SATA_SUPPORT_V2_HW); + if (index >= HISI_MAX_SATA_SUPPORT_V2_HW) { + dev_warn(dev, "alloc sata index failed, index=%d\n", index); + return false; + } + + set_bit(index, bitmap); + *idx = index; + return true; +} + + static struct hisi_sas_device *alloc_dev_quirk_v2_hw(struct domain_device *device) { struct hisi_hba *hisi_hba = device->port->ha->lldd_ha; struct hisi_sas_device *sas_dev = NULL; int i, sata_dev = dev_is_sata(device); + int sata_idx = -1; spin_lock(&hisi_hba->lock); + + if (sata_dev) + if (!sata_index_alloc_v2_hw(hisi_hba, &sata_idx)) + goto out; + for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { /* * SATA device id bit0 should be 0 @@ -621,9 +701,13 @@ hisi_sas_device *alloc_dev_quirk_v2_hw(struct domain_device *device) sas_dev->dev_type = device->dev_type; sas_dev->hisi_hba = hisi_hba; sas_dev->sas_device = device; + sas_dev->sata_idx = sata_idx; + INIT_LIST_HEAD(&hisi_hba->devices[i].list); break; } } + +out: spin_unlock(&hisi_hba->lock); return sas_dev; @@ -676,7 +760,8 @@ static void setup_itct_v2_hw(struct hisi_hba *hisi_hba, u64 qw0, device_id = sas_dev->device_id; struct hisi_sas_itct *itct = &hisi_hba->itct[device_id]; struct domain_device *parent_dev = device->parent; - struct hisi_sas_port *port = device->port->lldd_port; + struct asd_sas_port *sas_port = device->port; + struct hisi_sas_port *port = to_hisi_sas_port(sas_port); memset(itct, 0, sizeof(*itct)); @@ -729,6 +814,10 @@ static void free_device_v2_hw(struct hisi_hba *hisi_hba, u32 reg_val = hisi_sas_read32(hisi_hba, ENT_INT_SRC3); int i; + /* SoC bug workaround */ + if (dev_is_sata(sas_dev->sas_device)) + clear_bit(sas_dev->sata_idx, hisi_hba->sata_dev_bitmap); + /* clear the itct interrupt state */ if (ENT_INT_SRC3_ITC_INT_MSK & reg_val) hisi_sas_write32(hisi_hba, ENT_INT_SRC3, @@ -858,6 +947,46 @@ static int reset_hw_v2_hw(struct hisi_hba *hisi_hba) return 0; } +/* This function needs to be called after resetting SAS controller. */ +static void phys_reject_stp_links_v2_hw(struct hisi_hba *hisi_hba) +{ + u32 cfg; + int phy_no; + + hisi_hba->reject_stp_links_msk = (1 << hisi_hba->n_phy) - 1; + for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) { + cfg = hisi_sas_phy_read32(hisi_hba, phy_no, CON_CONTROL); + if (!(cfg & CON_CONTROL_CFG_OPEN_ACC_STP_MSK)) + continue; + + cfg &= ~CON_CONTROL_CFG_OPEN_ACC_STP_MSK; + hisi_sas_phy_write32(hisi_hba, phy_no, CON_CONTROL, cfg); + } +} + +static void phys_try_accept_stp_links_v2_hw(struct hisi_hba *hisi_hba) +{ + int phy_no; + u32 dma_tx_dfx1; + + for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) { + if (!(hisi_hba->reject_stp_links_msk & BIT(phy_no))) + continue; + + dma_tx_dfx1 = hisi_sas_phy_read32(hisi_hba, phy_no, + DMA_TX_DFX1); + if (dma_tx_dfx1 & DMA_TX_DFX1_IPTT_MSK) { + u32 cfg = hisi_sas_phy_read32(hisi_hba, + phy_no, CON_CONTROL); + + cfg |= CON_CONTROL_CFG_OPEN_ACC_STP_MSK; + hisi_sas_phy_write32(hisi_hba, phy_no, + CON_CONTROL, cfg); + clear_bit(phy_no, &hisi_hba->reject_stp_links_msk); + } + } +} + static void init_reg_v2_hw(struct hisi_hba *hisi_hba) { struct device *dev = &hisi_hba->pdev->dev; @@ -876,7 +1005,7 @@ static void init_reg_v2_hw(struct hisi_hba *hisi_hba) (u32)((1ULL << hisi_hba->queue_count) - 1)); hisi_sas_write32(hisi_hba, AXI_USER1, 0xc0000000); hisi_sas_write32(hisi_hba, AXI_USER2, 0x10000); - hisi_sas_write32(hisi_hba, HGC_SAS_TXFAIL_RETRY_CTRL, 0x108); + hisi_sas_write32(hisi_hba, HGC_SAS_TXFAIL_RETRY_CTRL, 0x0); hisi_sas_write32(hisi_hba, HGC_SAS_TX_OPEN_FAIL_RETRY_CTRL, 0x7FF); hisi_sas_write32(hisi_hba, OPENA_WT_CONTI_TIME, 0x1); hisi_sas_write32(hisi_hba, I_T_NEXUS_LOSS_TIME, 0x1F4); @@ -885,9 +1014,9 @@ static void init_reg_v2_hw(struct hisi_hba *hisi_hba) hisi_sas_write32(hisi_hba, CFG_AGING_TIME, 0x1); hisi_sas_write32(hisi_hba, HGC_ERR_STAT_EN, 0x1); hisi_sas_write32(hisi_hba, HGC_GET_ITV_TIME, 0x1); - hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x1); - hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, 0x1); - hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, 0x1); + hisi_sas_write32(hisi_hba, INT_COAL_EN, 0xc); + hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, 0x60); + hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, 0x3); hisi_sas_write32(hisi_hba, ENT_INT_COAL_TIME, 0x1); hisi_sas_write32(hisi_hba, ENT_INT_COAL_CNT, 0x1); hisi_sas_write32(hisi_hba, OQ_INT_SRC, 0x0); @@ -910,14 +1039,14 @@ static void init_reg_v2_hw(struct hisi_hba *hisi_hba) hisi_sas_phy_write32(hisi_hba, i, SL_TOUT_CFG, 0x7d7d7d7d); hisi_sas_phy_write32(hisi_hba, i, SL_CONTROL, 0x0); hisi_sas_phy_write32(hisi_hba, i, TXID_AUTO, 0x2); - hisi_sas_phy_write32(hisi_hba, i, DONE_RECEIVED_TIME, 0x10); + hisi_sas_phy_write32(hisi_hba, i, DONE_RECEIVED_TIME, 0x8); hisi_sas_phy_write32(hisi_hba, i, CHL_INT0, 0xffffffff); hisi_sas_phy_write32(hisi_hba, i, CHL_INT1, 0xffffffff); hisi_sas_phy_write32(hisi_hba, i, CHL_INT2, 0xfff87fff); hisi_sas_phy_write32(hisi_hba, i, RXOP_CHECK_CFG_H, 0x1000); hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xffffffff); hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0x8ffffbff); - hisi_sas_phy_write32(hisi_hba, i, SL_CFG, 0x23f801fc); + hisi_sas_phy_write32(hisi_hba, i, SL_CFG, 0x13f801fc); hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL_RDY_MSK, 0x0); hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_NOT_RDY_MSK, 0x0); hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_DWS_RESET_MSK, 0x0); @@ -989,12 +1118,15 @@ static void init_reg_v2_hw(struct hisi_hba *hisi_hba) upper_32_bits(hisi_hba->initial_fis_dma)); } -static void hisi_sas_link_timeout_enable_link(unsigned long data) +static void link_timeout_enable_link(unsigned long data) { struct hisi_hba *hisi_hba = (struct hisi_hba *)data; int i, reg_val; for (i = 0; i < hisi_hba->n_phy; i++) { + if (hisi_hba->reject_stp_links_msk & BIT(i)) + continue; + reg_val = hisi_sas_phy_read32(hisi_hba, i, CON_CONTROL); if (!(reg_val & BIT(0))) { hisi_sas_phy_write32(hisi_hba, i, @@ -1003,17 +1135,20 @@ static void hisi_sas_link_timeout_enable_link(unsigned long data) } } - hisi_hba->timer.function = hisi_sas_link_timeout_disable_link; + hisi_hba->timer.function = link_timeout_disable_link; mod_timer(&hisi_hba->timer, jiffies + msecs_to_jiffies(900)); } -static void hisi_sas_link_timeout_disable_link(unsigned long data) +static void link_timeout_disable_link(unsigned long data) { struct hisi_hba *hisi_hba = (struct hisi_hba *)data; int i, reg_val; reg_val = hisi_sas_read32(hisi_hba, PHY_STATE); for (i = 0; i < hisi_hba->n_phy && reg_val; i++) { + if (hisi_hba->reject_stp_links_msk & BIT(i)) + continue; + if (reg_val & BIT(i)) { hisi_sas_phy_write32(hisi_hba, i, CON_CONTROL, 0x6); @@ -1021,14 +1156,14 @@ static void hisi_sas_link_timeout_disable_link(unsigned long data) } } - hisi_hba->timer.function = hisi_sas_link_timeout_enable_link; + hisi_hba->timer.function = link_timeout_enable_link; mod_timer(&hisi_hba->timer, jiffies + msecs_to_jiffies(100)); } static void set_link_timer_quirk(struct hisi_hba *hisi_hba) { hisi_hba->timer.data = (unsigned long)hisi_hba; - hisi_hba->timer.function = hisi_sas_link_timeout_disable_link; + hisi_hba->timer.function = link_timeout_disable_link; hisi_hba->timer.expires = jiffies + msecs_to_jiffies(1000); add_timer(&hisi_hba->timer); } @@ -1058,12 +1193,138 @@ static void enable_phy_v2_hw(struct hisi_hba *hisi_hba, int phy_no) hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg); } +static bool is_sata_phy_v2_hw(struct hisi_hba *hisi_hba, int phy_no) +{ + u32 context; + + context = hisi_sas_read32(hisi_hba, PHY_CONTEXT); + if (context & (1 << phy_no)) + return true; + + return false; +} + +static bool tx_fifo_is_empty_v2_hw(struct hisi_hba *hisi_hba, int phy_no) +{ + u32 dfx_val; + + dfx_val = hisi_sas_phy_read32(hisi_hba, phy_no, DMA_TX_DFX1); + + if (dfx_val & BIT(16)) + return false; + + return true; +} + +static bool axi_bus_is_idle_v2_hw(struct hisi_hba *hisi_hba, int phy_no) +{ + int i, max_loop = 1000; + struct device *dev = &hisi_hba->pdev->dev; + u32 status, axi_status, dfx_val, dfx_tx_val; + + for (i = 0; i < max_loop; i++) { + status = hisi_sas_read32_relaxed(hisi_hba, + AXI_MASTER_CFG_BASE + AM_CURR_TRANS_RETURN); + + axi_status = hisi_sas_read32(hisi_hba, AXI_CFG); + dfx_val = hisi_sas_phy_read32(hisi_hba, phy_no, DMA_TX_DFX1); + dfx_tx_val = hisi_sas_phy_read32(hisi_hba, + phy_no, DMA_TX_FIFO_DFX0); + + if ((status == 0x3) && (axi_status == 0x0) && + (dfx_val & BIT(20)) && (dfx_tx_val & BIT(10))) + return true; + udelay(10); + } + dev_err(dev, "bus is not idle phy%d, axi150:0x%x axi100:0x%x port204:0x%x port240:0x%x\n", + phy_no, status, axi_status, + dfx_val, dfx_tx_val); + return false; +} + +static bool wait_io_done_v2_hw(struct hisi_hba *hisi_hba, int phy_no) +{ + int i, max_loop = 1000; + struct device *dev = &hisi_hba->pdev->dev; + u32 status, tx_dfx0; + + for (i = 0; i < max_loop; i++) { + status = hisi_sas_phy_read32(hisi_hba, phy_no, LINK_DFX2); + status = (status & 0x3fc0) >> 6; + + if (status != 0x1) + return true; + + tx_dfx0 = hisi_sas_phy_read32(hisi_hba, phy_no, DMA_TX_DFX0); + if ((tx_dfx0 & 0x1ff) == 0x2) + return true; + udelay(10); + } + dev_err(dev, "IO not done phy%d, port264:0x%x port200:0x%x\n", + phy_no, status, tx_dfx0); + return false; +} + +static bool allowed_disable_phy_v2_hw(struct hisi_hba *hisi_hba, int phy_no) +{ + if (tx_fifo_is_empty_v2_hw(hisi_hba, phy_no)) + return true; + + if (!axi_bus_is_idle_v2_hw(hisi_hba, phy_no)) + return false; + + if (!wait_io_done_v2_hw(hisi_hba, phy_no)) + return false; + + return true; +} + + static void disable_phy_v2_hw(struct hisi_hba *hisi_hba, int phy_no) { - u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG); + u32 cfg, axi_val, dfx0_val, txid_auto; + struct device *dev = &hisi_hba->pdev->dev; + + /* Close axi bus. */ + axi_val = hisi_sas_read32(hisi_hba, AXI_MASTER_CFG_BASE + + AM_CTRL_GLOBAL); + axi_val |= 0x1; + hisi_sas_write32(hisi_hba, AXI_MASTER_CFG_BASE + + AM_CTRL_GLOBAL, axi_val); + + if (is_sata_phy_v2_hw(hisi_hba, phy_no)) { + if (allowed_disable_phy_v2_hw(hisi_hba, phy_no)) + goto do_disable; + + /* Reset host controller. */ + queue_work(hisi_hba->wq, &hisi_hba->rst_work); + return; + } + + dfx0_val = hisi_sas_phy_read32(hisi_hba, phy_no, PORT_DFX0); + dfx0_val = (dfx0_val & 0x1fc0) >> 6; + if (dfx0_val != 0x4) + goto do_disable; + if (!tx_fifo_is_empty_v2_hw(hisi_hba, phy_no)) { + dev_warn(dev, "phy%d, wait tx fifo need send break\n", + phy_no); + txid_auto = hisi_sas_phy_read32(hisi_hba, phy_no, + TXID_AUTO); + txid_auto |= TXID_AUTO_CTB_MSK; + hisi_sas_phy_write32(hisi_hba, phy_no, TXID_AUTO, + txid_auto); + } + +do_disable: + cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG); cfg &= ~PHY_CFG_ENA_MSK; hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg); + + /* Open axi bus. */ + axi_val &= ~0x1; + hisi_sas_write32(hisi_hba, AXI_MASTER_CFG_BASE + + AM_CTRL_GLOBAL, axi_val); } static void start_phy_v2_hw(struct hisi_hba *hisi_hba, int phy_no) @@ -1078,6 +1339,14 @@ static void stop_phy_v2_hw(struct hisi_hba *hisi_hba, int phy_no) disable_phy_v2_hw(hisi_hba, phy_no); } +static void stop_phys_v2_hw(struct hisi_hba *hisi_hba) +{ + int i; + + for (i = 0; i < hisi_hba->n_phy; i++) + stop_phy_v2_hw(hisi_hba, i); +} + static void phy_hard_reset_v2_hw(struct hisi_hba *hisi_hba, int phy_no) { struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; @@ -1437,10 +1706,205 @@ static void sata_done_v2_hw(struct hisi_hba *hisi_hba, struct sas_task *task, ts->buf_valid_size = sizeof(*resp); } +#define TRANS_TX_ERR 0 +#define TRANS_RX_ERR 1 +#define DMA_TX_ERR 2 +#define SIPC_RX_ERR 3 +#define DMA_RX_ERR 4 + +#define DMA_TX_ERR_OFF 0 +#define DMA_TX_ERR_MSK (0xffff << DMA_TX_ERR_OFF) +#define SIPC_RX_ERR_OFF 16 +#define SIPC_RX_ERR_MSK (0xffff << SIPC_RX_ERR_OFF) + +static int parse_trans_tx_err_code_v2_hw(u32 err_msk) +{ + const u8 trans_tx_err_code_prio[] = { + TRANS_TX_OPEN_FAIL_WITH_IT_NEXUS_LOSS, + TRANS_TX_ERR_PHY_NOT_ENABLE, + TRANS_TX_OPEN_CNX_ERR_WRONG_DESTINATION, + TRANS_TX_OPEN_CNX_ERR_ZONE_VIOLATION, + TRANS_TX_OPEN_CNX_ERR_BY_OTHER, + RESERVED0, + TRANS_TX_OPEN_CNX_ERR_AIP_TIMEOUT, + TRANS_TX_OPEN_CNX_ERR_STP_RESOURCES_BUSY, + TRANS_TX_OPEN_CNX_ERR_PROTOCOL_NOT_SUPPORTED, + TRANS_TX_OPEN_CNX_ERR_CONNECTION_RATE_NOT_SUPPORTED, + TRANS_TX_OPEN_CNX_ERR_BAD_DESTINATION, + TRANS_TX_OPEN_CNX_ERR_BREAK_RCVD, + TRANS_TX_OPEN_CNX_ERR_LOW_PHY_POWER, + TRANS_TX_OPEN_CNX_ERR_PATHWAY_BLOCKED, + TRANS_TX_OPEN_CNX_ERR_OPEN_TIMEOUT, + TRANS_TX_OPEN_CNX_ERR_NO_DESTINATION, + TRANS_TX_OPEN_RETRY_ERR_THRESHOLD_REACHED, + TRANS_TX_ERR_WITH_CLOSE_PHYDISALE, + TRANS_TX_ERR_WITH_CLOSE_DWS_TIMEOUT, + TRANS_TX_ERR_WITH_CLOSE_COMINIT, + TRANS_TX_ERR_WITH_BREAK_TIMEOUT, + TRANS_TX_ERR_WITH_BREAK_REQUEST, + TRANS_TX_ERR_WITH_BREAK_RECEVIED, + TRANS_TX_ERR_WITH_CLOSE_TIMEOUT, + TRANS_TX_ERR_WITH_CLOSE_NORMAL, + TRANS_TX_ERR_WITH_NAK_RECEVIED, + TRANS_TX_ERR_WITH_ACK_NAK_TIMEOUT, + TRANS_TX_ERR_WITH_CREDIT_TIMEOUT, + TRANS_TX_ERR_WITH_IPTT_CONFLICT, + TRANS_TX_ERR_WITH_OPEN_BY_DES_OR_OTHERS, + TRANS_TX_ERR_WITH_WAIT_RECV_TIMEOUT, + }; + int index, i; + + for (i = 0; i < ARRAY_SIZE(trans_tx_err_code_prio); i++) { + index = trans_tx_err_code_prio[i] - TRANS_TX_FAIL_BASE; + if (err_msk & (1 << index)) + return trans_tx_err_code_prio[i]; + } + return -1; +} + +static int parse_trans_rx_err_code_v2_hw(u32 err_msk) +{ + const u8 trans_rx_err_code_prio[] = { + TRANS_RX_ERR_WITH_RXFRAME_CRC_ERR, + TRANS_RX_ERR_WITH_RXFIS_8B10B_DISP_ERR, + TRANS_RX_ERR_WITH_RXFRAME_HAVE_ERRPRM, + TRANS_RX_ERR_WITH_RXFIS_DECODE_ERROR, + TRANS_RX_ERR_WITH_RXFIS_CRC_ERR, + TRANS_RX_ERR_WITH_RXFRAME_LENGTH_OVERRUN, + TRANS_RX_ERR_WITH_RXFIS_RX_SYNCP, + TRANS_RX_ERR_WITH_LINK_BUF_OVERRUN, + TRANS_RX_ERR_WITH_CLOSE_PHY_DISABLE, + TRANS_RX_ERR_WITH_CLOSE_DWS_TIMEOUT, + TRANS_RX_ERR_WITH_CLOSE_COMINIT, + TRANS_RX_ERR_WITH_BREAK_TIMEOUT, + TRANS_RX_ERR_WITH_BREAK_REQUEST, + TRANS_RX_ERR_WITH_BREAK_RECEVIED, + RESERVED1, + TRANS_RX_ERR_WITH_CLOSE_NORMAL, + TRANS_RX_ERR_WITH_DATA_LEN0, + TRANS_RX_ERR_WITH_BAD_HASH, + TRANS_RX_XRDY_WLEN_ZERO_ERR, + TRANS_RX_SSP_FRM_LEN_ERR, + RESERVED2, + RESERVED3, + RESERVED4, + RESERVED5, + TRANS_RX_ERR_WITH_BAD_FRM_TYPE, + TRANS_RX_SMP_FRM_LEN_ERR, + TRANS_RX_SMP_RESP_TIMEOUT_ERR, + RESERVED6, + RESERVED7, + RESERVED8, + RESERVED9, + TRANS_RX_R_ERR, + }; + int index, i; + + for (i = 0; i < ARRAY_SIZE(trans_rx_err_code_prio); i++) { + index = trans_rx_err_code_prio[i] - TRANS_RX_FAIL_BASE; + if (err_msk & (1 << index)) + return trans_rx_err_code_prio[i]; + } + return -1; +} + +static int parse_dma_tx_err_code_v2_hw(u32 err_msk) +{ + const u8 dma_tx_err_code_prio[] = { + DMA_TX_UNEXP_XFER_ERR, + DMA_TX_UNEXP_RETRANS_ERR, + DMA_TX_XFER_LEN_OVERFLOW, + DMA_TX_XFER_OFFSET_ERR, + DMA_TX_RAM_ECC_ERR, + DMA_TX_DIF_LEN_ALIGN_ERR, + DMA_TX_DIF_CRC_ERR, + DMA_TX_DIF_APP_ERR, + DMA_TX_DIF_RPP_ERR, + DMA_TX_DATA_SGL_OVERFLOW, + DMA_TX_DIF_SGL_OVERFLOW, + }; + int index, i; + + for (i = 0; i < ARRAY_SIZE(dma_tx_err_code_prio); i++) { + index = dma_tx_err_code_prio[i] - DMA_TX_ERR_BASE; + err_msk = err_msk & DMA_TX_ERR_MSK; + if (err_msk & (1 << index)) + return dma_tx_err_code_prio[i]; + } + return -1; +} + +static int parse_sipc_rx_err_code_v2_hw(u32 err_msk) +{ + const u8 sipc_rx_err_code_prio[] = { + SIPC_RX_FIS_STATUS_ERR_BIT_VLD, + SIPC_RX_PIO_WRSETUP_STATUS_DRQ_ERR, + SIPC_RX_FIS_STATUS_BSY_BIT_ERR, + SIPC_RX_WRSETUP_LEN_ODD_ERR, + SIPC_RX_WRSETUP_LEN_ZERO_ERR, + SIPC_RX_WRDATA_LEN_NOT_MATCH_ERR, + SIPC_RX_NCQ_WRSETUP_OFFSET_ERR, + SIPC_RX_NCQ_WRSETUP_AUTO_ACTIVE_ERR, + SIPC_RX_SATA_UNEXP_FIS_ERR, + SIPC_RX_WRSETUP_ESTATUS_ERR, + SIPC_RX_DATA_UNDERFLOW_ERR, + }; + int index, i; + + for (i = 0; i < ARRAY_SIZE(sipc_rx_err_code_prio); i++) { + index = sipc_rx_err_code_prio[i] - SIPC_RX_ERR_BASE; + err_msk = err_msk & SIPC_RX_ERR_MSK; + if (err_msk & (1 << (index + 0x10))) + return sipc_rx_err_code_prio[i]; + } + return -1; +} + +static int parse_dma_rx_err_code_v2_hw(u32 err_msk) +{ + const u8 dma_rx_err_code_prio[] = { + DMA_RX_UNKNOWN_FRM_ERR, + DMA_RX_DATA_LEN_OVERFLOW, + DMA_RX_DATA_LEN_UNDERFLOW, + DMA_RX_DATA_OFFSET_ERR, + RESERVED10, + DMA_RX_SATA_FRAME_TYPE_ERR, + DMA_RX_RESP_BUF_OVERFLOW, + DMA_RX_UNEXP_RETRANS_RESP_ERR, + DMA_RX_UNEXP_NORM_RESP_ERR, + DMA_RX_UNEXP_RDFRAME_ERR, + DMA_RX_PIO_DATA_LEN_ERR, + DMA_RX_RDSETUP_STATUS_ERR, + DMA_RX_RDSETUP_STATUS_DRQ_ERR, + DMA_RX_RDSETUP_STATUS_BSY_ERR, + DMA_RX_RDSETUP_LEN_ODD_ERR, + DMA_RX_RDSETUP_LEN_ZERO_ERR, + DMA_RX_RDSETUP_LEN_OVER_ERR, + DMA_RX_RDSETUP_OFFSET_ERR, + DMA_RX_RDSETUP_ACTIVE_ERR, + DMA_RX_RDSETUP_ESTATUS_ERR, + DMA_RX_RAM_ECC_ERR, + DMA_RX_DIF_CRC_ERR, + DMA_RX_DIF_APP_ERR, + DMA_RX_DIF_RPP_ERR, + DMA_RX_DATA_SGL_OVERFLOW, + DMA_RX_DIF_SGL_OVERFLOW, + }; + int index, i; + + for (i = 0; i < ARRAY_SIZE(dma_rx_err_code_prio); i++) { + index = dma_rx_err_code_prio[i] - DMA_RX_ERR_BASE; + if (err_msk & (1 << index)) + return dma_rx_err_code_prio[i]; + } + return -1; +} + /* by default, task resp is complete */ static void slot_err_v2_hw(struct hisi_hba *hisi_hba, struct sas_task *task, - struct hisi_sas_slot *slot) + struct hisi_sas_slot *slot, + int err_phase) { struct task_status_struct *ts = &task->task_status; struct hisi_sas_err_record_v2 *err_record = slot->status_buffer; @@ -1451,21 +1915,23 @@ static void slot_err_v2_hw(struct hisi_hba *hisi_hba, u32 dma_rx_err_type = cpu_to_le32(err_record->dma_rx_err_type); int error = -1; - if (dma_rx_err_type) { - error = ffs(dma_rx_err_type) - - 1 + DMA_RX_ERR_BASE; - } else if (sipc_rx_err_type) { - error = ffs(sipc_rx_err_type) - - 1 + SIPC_RX_ERR_BASE; - } else if (dma_tx_err_type) { - error = ffs(dma_tx_err_type) - - 1 + DMA_TX_ERR_BASE; - } else if (trans_rx_fail_type) { - error = ffs(trans_rx_fail_type) - - 1 + TRANS_RX_FAIL_BASE; - } else if (trans_tx_fail_type) { - error = ffs(trans_tx_fail_type) - - 1 + TRANS_TX_FAIL_BASE; + if (err_phase == 1) { + /* error in TX phase, the priority of error is: DW2 > DW0 */ + error = parse_dma_tx_err_code_v2_hw(dma_tx_err_type); + if (error == -1) + error = parse_trans_tx_err_code_v2_hw( + trans_tx_fail_type); + } else if (err_phase == 2) { + /* error in RX phase, the priority is: DW1 > DW3 > DW2 */ + error = parse_trans_rx_err_code_v2_hw( + trans_rx_fail_type); + if (error == -1) { + error = parse_dma_rx_err_code_v2_hw( + dma_rx_err_type); + if (error == -1) + error = parse_sipc_rx_err_code_v2_hw( + sipc_rx_err_type); + } } switch (task->task_proto) { @@ -1478,12 +1944,6 @@ static void slot_err_v2_hw(struct hisi_hba *hisi_hba, ts->open_rej_reason = SAS_OREJ_NO_DEST; break; } - case TRANS_TX_OPEN_CNX_ERR_PATHWAY_BLOCKED: - { - ts->stat = SAS_OPEN_REJECT; - ts->open_rej_reason = SAS_OREJ_PATH_BLOCKED; - break; - } case TRANS_TX_OPEN_CNX_ERR_PROTOCOL_NOT_SUPPORTED: { ts->stat = SAS_OPEN_REJECT; @@ -1502,19 +1962,15 @@ static void slot_err_v2_hw(struct hisi_hba *hisi_hba, ts->open_rej_reason = SAS_OREJ_BAD_DEST; break; } - case TRANS_TX_OPEN_CNX_ERR_BREAK_RCVD: - { - ts->stat = SAS_OPEN_REJECT; - ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; - break; - } case TRANS_TX_OPEN_CNX_ERR_WRONG_DESTINATION: { ts->stat = SAS_OPEN_REJECT; ts->open_rej_reason = SAS_OREJ_WRONG_DEST; break; } + case DMA_RX_UNEXP_NORM_RESP_ERR: case TRANS_TX_OPEN_CNX_ERR_ZONE_VIOLATION: + case DMA_RX_RESP_BUF_OVERFLOW: { ts->stat = SAS_OPEN_REJECT; ts->open_rej_reason = SAS_OREJ_UNKNOWN; @@ -1526,16 +1982,6 @@ static void slot_err_v2_hw(struct hisi_hba *hisi_hba, ts->stat = SAS_DEV_NO_RESPONSE; break; } - case TRANS_RX_ERR_WITH_CLOSE_PHY_DISABLE: - { - ts->stat = SAS_PHY_DOWN; - break; - } - case TRANS_TX_OPEN_CNX_ERR_OPEN_TIMEOUT: - { - ts->stat = SAS_OPEN_TO; - break; - } case DMA_RX_DATA_LEN_OVERFLOW: { ts->stat = SAS_DATA_OVERRUN; @@ -1543,60 +1989,65 @@ static void slot_err_v2_hw(struct hisi_hba *hisi_hba, break; } case DMA_RX_DATA_LEN_UNDERFLOW: - case SIPC_RX_DATA_UNDERFLOW_ERR: { - ts->residual = trans_tx_fail_type; + ts->residual = dma_rx_err_type; ts->stat = SAS_DATA_UNDERRUN; break; } - case TRANS_TX_ERR_FRAME_TXED: - { - /* This will request a retry */ - ts->stat = SAS_QUEUE_FULL; - slot->abort = 1; - break; - } case TRANS_TX_OPEN_FAIL_WITH_IT_NEXUS_LOSS: case TRANS_TX_ERR_PHY_NOT_ENABLE: case TRANS_TX_OPEN_CNX_ERR_BY_OTHER: case TRANS_TX_OPEN_CNX_ERR_AIP_TIMEOUT: + case TRANS_TX_OPEN_CNX_ERR_BREAK_RCVD: + case TRANS_TX_OPEN_CNX_ERR_PATHWAY_BLOCKED: + case TRANS_TX_OPEN_CNX_ERR_OPEN_TIMEOUT: case TRANS_TX_OPEN_RETRY_ERR_THRESHOLD_REACHED: case TRANS_TX_ERR_WITH_BREAK_TIMEOUT: case TRANS_TX_ERR_WITH_BREAK_REQUEST: case TRANS_TX_ERR_WITH_BREAK_RECEVIED: case TRANS_TX_ERR_WITH_CLOSE_TIMEOUT: case TRANS_TX_ERR_WITH_CLOSE_NORMAL: + case TRANS_TX_ERR_WITH_CLOSE_PHYDISALE: case TRANS_TX_ERR_WITH_CLOSE_DWS_TIMEOUT: case TRANS_TX_ERR_WITH_CLOSE_COMINIT: case TRANS_TX_ERR_WITH_NAK_RECEVIED: case TRANS_TX_ERR_WITH_ACK_NAK_TIMEOUT: - case TRANS_TX_ERR_WITH_IPTT_CONFLICT: case TRANS_TX_ERR_WITH_CREDIT_TIMEOUT: + case TRANS_TX_ERR_WITH_IPTT_CONFLICT: case TRANS_RX_ERR_WITH_RXFRAME_CRC_ERR: case TRANS_RX_ERR_WITH_RXFIS_8B10B_DISP_ERR: case TRANS_RX_ERR_WITH_RXFRAME_HAVE_ERRPRM: + case TRANS_RX_ERR_WITH_LINK_BUF_OVERRUN: case TRANS_RX_ERR_WITH_BREAK_TIMEOUT: case TRANS_RX_ERR_WITH_BREAK_REQUEST: case TRANS_RX_ERR_WITH_BREAK_RECEVIED: case TRANS_RX_ERR_WITH_CLOSE_NORMAL: case TRANS_RX_ERR_WITH_CLOSE_DWS_TIMEOUT: case TRANS_RX_ERR_WITH_CLOSE_COMINIT: + case TRANS_TX_ERR_FRAME_TXED: + case TRANS_RX_ERR_WITH_CLOSE_PHY_DISABLE: case TRANS_RX_ERR_WITH_DATA_LEN0: case TRANS_RX_ERR_WITH_BAD_HASH: case TRANS_RX_XRDY_WLEN_ZERO_ERR: case TRANS_RX_SSP_FRM_LEN_ERR: case TRANS_RX_ERR_WITH_BAD_FRM_TYPE: + case DMA_TX_DATA_SGL_OVERFLOW: case DMA_TX_UNEXP_XFER_ERR: case DMA_TX_UNEXP_RETRANS_ERR: case DMA_TX_XFER_LEN_OVERFLOW: case DMA_TX_XFER_OFFSET_ERR: + case SIPC_RX_DATA_UNDERFLOW_ERR: + case DMA_RX_DATA_SGL_OVERFLOW: case DMA_RX_DATA_OFFSET_ERR: - case DMA_RX_UNEXP_NORM_RESP_ERR: - case DMA_RX_UNEXP_RDFRAME_ERR: + case DMA_RX_RDSETUP_LEN_ODD_ERR: + case DMA_RX_RDSETUP_LEN_ZERO_ERR: + case DMA_RX_RDSETUP_LEN_OVER_ERR: + case DMA_RX_SATA_FRAME_TYPE_ERR: case DMA_RX_UNKNOWN_FRM_ERR: { - ts->stat = SAS_OPEN_REJECT; - ts->open_rej_reason = SAS_OREJ_UNKNOWN; + /* This will request a retry */ + ts->stat = SAS_QUEUE_FULL; + slot->abort = 1; break; } default: @@ -1613,57 +2064,92 @@ static void slot_err_v2_hw(struct hisi_hba *hisi_hba, case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: { switch (error) { - case TRANS_TX_OPEN_CNX_ERR_LOW_PHY_POWER: - case TRANS_TX_OPEN_CNX_ERR_PATHWAY_BLOCKED: case TRANS_TX_OPEN_CNX_ERR_NO_DESTINATION: { + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_NO_DEST; + break; + } + case TRANS_TX_OPEN_CNX_ERR_LOW_PHY_POWER: + { ts->resp = SAS_TASK_UNDELIVERED; ts->stat = SAS_DEV_NO_RESPONSE; break; } case TRANS_TX_OPEN_CNX_ERR_PROTOCOL_NOT_SUPPORTED: + { + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_EPROTO; + break; + } case TRANS_TX_OPEN_CNX_ERR_CONNECTION_RATE_NOT_SUPPORTED: + { + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_CONN_RATE; + break; + } case TRANS_TX_OPEN_CNX_ERR_BAD_DESTINATION: - case TRANS_TX_OPEN_CNX_ERR_BREAK_RCVD: + { + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_CONN_RATE; + break; + } case TRANS_TX_OPEN_CNX_ERR_WRONG_DESTINATION: - case TRANS_TX_OPEN_CNX_ERR_ZONE_VIOLATION: - case TRANS_TX_OPEN_CNX_ERR_STP_RESOURCES_BUSY: { ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_WRONG_DEST; break; } - case TRANS_TX_OPEN_CNX_ERR_OPEN_TIMEOUT: + case DMA_RX_RESP_BUF_OVERFLOW: + case DMA_RX_UNEXP_NORM_RESP_ERR: + case TRANS_TX_OPEN_CNX_ERR_ZONE_VIOLATION: { - ts->stat = SAS_OPEN_TO; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; break; } case DMA_RX_DATA_LEN_OVERFLOW: { ts->stat = SAS_DATA_OVERRUN; + ts->residual = 0; + break; + } + case DMA_RX_DATA_LEN_UNDERFLOW: + { + ts->residual = dma_rx_err_type; + ts->stat = SAS_DATA_UNDERRUN; break; } case TRANS_TX_OPEN_FAIL_WITH_IT_NEXUS_LOSS: case TRANS_TX_ERR_PHY_NOT_ENABLE: case TRANS_TX_OPEN_CNX_ERR_BY_OTHER: case TRANS_TX_OPEN_CNX_ERR_AIP_TIMEOUT: + case TRANS_TX_OPEN_CNX_ERR_BREAK_RCVD: + case TRANS_TX_OPEN_CNX_ERR_PATHWAY_BLOCKED: + case TRANS_TX_OPEN_CNX_ERR_OPEN_TIMEOUT: case TRANS_TX_OPEN_RETRY_ERR_THRESHOLD_REACHED: case TRANS_TX_ERR_WITH_BREAK_TIMEOUT: case TRANS_TX_ERR_WITH_BREAK_REQUEST: case TRANS_TX_ERR_WITH_BREAK_RECEVIED: case TRANS_TX_ERR_WITH_CLOSE_TIMEOUT: case TRANS_TX_ERR_WITH_CLOSE_NORMAL: + case TRANS_TX_ERR_WITH_CLOSE_PHYDISALE: case TRANS_TX_ERR_WITH_CLOSE_DWS_TIMEOUT: case TRANS_TX_ERR_WITH_CLOSE_COMINIT: - case TRANS_TX_ERR_WITH_NAK_RECEVIED: case TRANS_TX_ERR_WITH_ACK_NAK_TIMEOUT: case TRANS_TX_ERR_WITH_CREDIT_TIMEOUT: + case TRANS_TX_ERR_WITH_OPEN_BY_DES_OR_OTHERS: case TRANS_TX_ERR_WITH_WAIT_RECV_TIMEOUT: - case TRANS_RX_ERR_WITH_RXFIS_8B10B_DISP_ERR: case TRANS_RX_ERR_WITH_RXFRAME_HAVE_ERRPRM: + case TRANS_RX_ERR_WITH_RXFIS_8B10B_DISP_ERR: case TRANS_RX_ERR_WITH_RXFIS_DECODE_ERROR: case TRANS_RX_ERR_WITH_RXFIS_CRC_ERR: case TRANS_RX_ERR_WITH_RXFRAME_LENGTH_OVERRUN: case TRANS_RX_ERR_WITH_RXFIS_RX_SYNCP: + case TRANS_RX_ERR_WITH_LINK_BUF_OVERRUN: + case TRANS_RX_ERR_WITH_BREAK_TIMEOUT: + case TRANS_RX_ERR_WITH_BREAK_REQUEST: + case TRANS_RX_ERR_WITH_BREAK_RECEVIED: case TRANS_RX_ERR_WITH_CLOSE_NORMAL: case TRANS_RX_ERR_WITH_CLOSE_PHY_DISABLE: case TRANS_RX_ERR_WITH_CLOSE_DWS_TIMEOUT: @@ -1671,7 +2157,12 @@ static void slot_err_v2_hw(struct hisi_hba *hisi_hba, case TRANS_RX_ERR_WITH_DATA_LEN0: case TRANS_RX_ERR_WITH_BAD_HASH: case TRANS_RX_XRDY_WLEN_ZERO_ERR: - case TRANS_RX_SSP_FRM_LEN_ERR: + case TRANS_RX_ERR_WITH_BAD_FRM_TYPE: + case DMA_TX_DATA_SGL_OVERFLOW: + case DMA_TX_UNEXP_XFER_ERR: + case DMA_TX_UNEXP_RETRANS_ERR: + case DMA_TX_XFER_LEN_OVERFLOW: + case DMA_TX_XFER_OFFSET_ERR: case SIPC_RX_FIS_STATUS_ERR_BIT_VLD: case SIPC_RX_PIO_WRSETUP_STATUS_DRQ_ERR: case SIPC_RX_FIS_STATUS_BSY_BIT_ERR: @@ -1679,6 +2170,8 @@ static void slot_err_v2_hw(struct hisi_hba *hisi_hba, case SIPC_RX_WRSETUP_LEN_ZERO_ERR: case SIPC_RX_WRDATA_LEN_NOT_MATCH_ERR: case SIPC_RX_SATA_UNEXP_FIS_ERR: + case DMA_RX_DATA_SGL_OVERFLOW: + case DMA_RX_DATA_OFFSET_ERR: case DMA_RX_SATA_FRAME_TYPE_ERR: case DMA_RX_UNEXP_RDFRAME_ERR: case DMA_RX_PIO_DATA_LEN_ERR: @@ -1692,8 +2185,11 @@ static void slot_err_v2_hw(struct hisi_hba *hisi_hba, case DMA_RX_RDSETUP_ACTIVE_ERR: case DMA_RX_RDSETUP_ESTATUS_ERR: case DMA_RX_UNKNOWN_FRM_ERR: + case TRANS_RX_SSP_FRM_LEN_ERR: + case TRANS_TX_OPEN_CNX_ERR_STP_RESOURCES_BUSY: { - ts->stat = SAS_OPEN_REJECT; + slot->abort = 1; + ts->stat = SAS_PHY_DOWN; break; } default: @@ -1711,8 +2207,7 @@ static void slot_err_v2_hw(struct hisi_hba *hisi_hba, } static int -slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot, - int abort) +slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot) { struct sas_task *task = slot->task; struct hisi_sas_device *sas_dev; @@ -1724,6 +2219,8 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot, hisi_hba->complete_hdr[slot->cmplt_queue]; struct hisi_sas_complete_v2_hdr *complete_hdr = &complete_queue[slot->cmplt_queue_slot]; + unsigned long flags; + int aborted; if (unlikely(!task || !task->lldd_task || !task->dev)) return -EINVAL; @@ -1732,16 +2229,23 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot, device = task->dev; sas_dev = device->lldd_dev; + spin_lock_irqsave(&task->task_state_lock, flags); + aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED; task->task_state_flags &= ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR); - task->task_state_flags |= SAS_TASK_STATE_DONE; + spin_unlock_irqrestore(&task->task_state_lock, flags); memset(ts, 0, sizeof(*ts)); ts->resp = SAS_TASK_COMPLETE; - if (unlikely(!sas_dev || abort)) { - if (!sas_dev) - dev_dbg(dev, "slot complete: port has not device\n"); + if (unlikely(aborted)) { + ts->stat = SAS_ABORTED_TASK; + hisi_sas_slot_task_free(hisi_hba, task, slot); + return -1; + } + + if (unlikely(!sas_dev)) { + dev_dbg(dev, "slot complete: port has no device\n"); ts->stat = SAS_PHY_DOWN; goto out; } @@ -1755,16 +2259,19 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot, goto out; case STAT_IO_COMPLETE: /* internal abort command complete */ - ts->stat = TMF_RESP_FUNC_COMPLETE; + ts->stat = TMF_RESP_FUNC_SUCC; + del_timer(&slot->internal_abort_timer); goto out; case STAT_IO_NO_DEVICE: ts->stat = TMF_RESP_FUNC_COMPLETE; + del_timer(&slot->internal_abort_timer); goto out; case STAT_IO_NOT_VALID: /* abort single io, controller don't find * the io need to abort */ ts->stat = TMF_RESP_FUNC_FAILED; + del_timer(&slot->internal_abort_timer); goto out; default: break; @@ -1772,13 +2279,17 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot, if ((complete_hdr->dw0 & CMPLT_HDR_ERX_MSK) && (!(complete_hdr->dw0 & CMPLT_HDR_RSPNS_XFRD_MSK))) { + u32 err_phase = (complete_hdr->dw0 & CMPLT_HDR_ERR_PHASE_MSK) + >> CMPLT_HDR_ERR_PHASE_OFF; + + /* Analyse error happens on which phase TX or RX */ + if (ERR_ON_TX_PHASE(err_phase)) + slot_err_v2_hw(hisi_hba, task, slot, 1); + else if (ERR_ON_RX_PHASE(err_phase)) + slot_err_v2_hw(hisi_hba, task, slot, 2); - slot_err_v2_hw(hisi_hba, task, slot); - if (unlikely(slot->abort)) { - queue_work(hisi_hba->wq, &slot->abort_slot); - /* immediately return and do not complete */ + if (unlikely(slot->abort)) return ts->stat; - } goto out; } @@ -1830,7 +2341,9 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot, } out: - + spin_lock_irqsave(&task->task_state_lock, flags); + task->task_state_flags |= SAS_TASK_STATE_DONE; + spin_unlock_irqrestore(&task->task_state_lock, flags); hisi_sas_slot_task_free(hisi_hba, task, slot); sts = ts->stat; @@ -1920,7 +2433,8 @@ static int prep_ata_v2_hw(struct hisi_hba *hisi_hba, struct domain_device *parent_dev = device->parent; struct hisi_sas_device *sas_dev = device->lldd_dev; struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr; - struct hisi_sas_port *port = device->port->lldd_port; + struct asd_sas_port *sas_port = device->port; + struct hisi_sas_port *port = to_hisi_sas_port(sas_port); u8 *buf_cmd; int has_data = 0, rc = 0, hdr_tag = 0; u32 dw1 = 0, dw2 = 0; @@ -1947,7 +2461,8 @@ static int prep_ata_v2_hw(struct hisi_hba *hisi_hba, dw1 &= ~CMD_HDR_DIR_MSK; } - if (0 == task->ata_task.fis.command) + if ((task->ata_task.fis.command == ATA_CMD_DEV_RESET) && + (task->ata_task.fis.control & ATA_SRST)) dw1 |= 1 << CMD_HDR_RESET_OFF; dw1 |= (get_ata_protocol(task->ata_task.fis.command, task->data_dir)) @@ -1990,6 +2505,40 @@ static int prep_ata_v2_hw(struct hisi_hba *hisi_hba, return 0; } +static void hisi_sas_internal_abort_quirk_timeout(unsigned long data) +{ + struct hisi_sas_slot *slot = (struct hisi_sas_slot *)data; + struct hisi_sas_port *port = slot->port; + struct asd_sas_port *asd_sas_port; + struct asd_sas_phy *sas_phy; + + if (!port) + return; + + asd_sas_port = &port->sas_port; + + /* Kick the hardware - send break command */ + list_for_each_entry(sas_phy, &asd_sas_port->phy_list, port_phy_el) { + struct hisi_sas_phy *phy = sas_phy->lldd_phy; + struct hisi_hba *hisi_hba = phy->hisi_hba; + int phy_no = sas_phy->id; + u32 link_dfx2; + + link_dfx2 = hisi_sas_phy_read32(hisi_hba, phy_no, LINK_DFX2); + if ((link_dfx2 == LINK_DFX2_RCVR_HOLD_STS_MSK) || + (link_dfx2 & LINK_DFX2_SEND_HOLD_STS_MSK)) { + u32 txid_auto; + + txid_auto = hisi_sas_phy_read32(hisi_hba, phy_no, + TXID_AUTO); + txid_auto |= TXID_AUTO_CTB_MSK; + hisi_sas_phy_write32(hisi_hba, phy_no, TXID_AUTO, + txid_auto); + return; + } + } +} + static int prep_abort_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot, int device_id, int abort_flag, int tag_to_abort) @@ -1998,6 +2547,13 @@ static int prep_abort_v2_hw(struct hisi_hba *hisi_hba, struct domain_device *dev = task->dev; struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr; struct hisi_sas_port *port = slot->port; + struct timer_list *timer = &slot->internal_abort_timer; + + /* setup the quirk timer */ + setup_timer(timer, hisi_sas_internal_abort_quirk_timeout, + (unsigned long)slot); + /* Set the timeout to 10ms less than internal abort timeout */ + mod_timer(timer, jiffies + msecs_to_jiffies(100)); /* dw0 */ hdr->dw0 = cpu_to_le32((5 << CMD_HDR_CMD_OFF) | /*abort*/ @@ -2018,8 +2574,8 @@ static int prep_abort_v2_hw(struct hisi_hba *hisi_hba, static int phy_up_v2_hw(int phy_no, struct hisi_hba *hisi_hba) { - int i, res = 0; - u32 context, port_id, link_rate, hard_phy_linkrate; + int i, res = IRQ_HANDLED; + u32 port_id, link_rate, hard_phy_linkrate; struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; struct asd_sas_phy *sas_phy = &phy->sas_phy; struct device *dev = &hisi_hba->pdev->dev; @@ -2028,9 +2584,7 @@ static int phy_up_v2_hw(int phy_no, struct hisi_hba *hisi_hba) hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 1); - /* Check for SATA dev */ - context = hisi_sas_read32(hisi_hba, PHY_CONTEXT); - if (context & (1 << phy_no)) + if (is_sata_phy_v2_hw(hisi_hba, phy_no)) goto end; if (phy_no == 8) { @@ -2106,7 +2660,6 @@ static bool check_any_wideports_v2_hw(struct hisi_hba *hisi_hba) static int phy_down_v2_hw(int phy_no, struct hisi_hba *hisi_hba) { - int res = 0; u32 phy_state, sl_ctrl, txid_auto; struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; struct hisi_sas_port *port = phy->port; @@ -2131,7 +2684,7 @@ static int phy_down_v2_hw(int phy_no, struct hisi_hba *hisi_hba) hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, CHL_INT0_NOT_RDY_MSK); hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 0); - return res; + return IRQ_HANDLED; } static irqreturn_t int_phy_updown_v2_hw(int irq_no, void *p) @@ -2139,35 +2692,58 @@ static irqreturn_t int_phy_updown_v2_hw(int irq_no, void *p) struct hisi_hba *hisi_hba = p; u32 irq_msk; int phy_no = 0; - irqreturn_t res = IRQ_HANDLED; irq_msk = (hisi_sas_read32(hisi_hba, HGC_INVLD_DQE_INFO) >> HGC_INVLD_DQE_INFO_FB_CH0_OFF) & 0x1ff; while (irq_msk) { if (irq_msk & 1) { - u32 irq_value = hisi_sas_phy_read32(hisi_hba, phy_no, - CHL_INT0); + u32 reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, + CHL_INT0); - if (irq_value & CHL_INT0_SL_PHY_ENABLE_MSK) + switch (reg_value & (CHL_INT0_NOT_RDY_MSK | + CHL_INT0_SL_PHY_ENABLE_MSK)) { + + case CHL_INT0_SL_PHY_ENABLE_MSK: /* phy up */ - if (phy_up_v2_hw(phy_no, hisi_hba)) { - res = IRQ_NONE; - goto end; - } + if (phy_up_v2_hw(phy_no, hisi_hba) == + IRQ_NONE) + return IRQ_NONE; + break; - if (irq_value & CHL_INT0_NOT_RDY_MSK) + case CHL_INT0_NOT_RDY_MSK: /* phy down */ - if (phy_down_v2_hw(phy_no, hisi_hba)) { - res = IRQ_NONE; - goto end; + if (phy_down_v2_hw(phy_no, hisi_hba) == + IRQ_NONE) + return IRQ_NONE; + break; + + case (CHL_INT0_NOT_RDY_MSK | + CHL_INT0_SL_PHY_ENABLE_MSK): + reg_value = hisi_sas_read32(hisi_hba, + PHY_STATE); + if (reg_value & BIT(phy_no)) { + /* phy up */ + if (phy_up_v2_hw(phy_no, hisi_hba) == + IRQ_NONE) + return IRQ_NONE; + } else { + /* phy down */ + if (phy_down_v2_hw(phy_no, hisi_hba) == + IRQ_NONE) + return IRQ_NONE; } + break; + + default: + break; + } + } irq_msk >>= 1; phy_no++; } -end: - return res; + return IRQ_HANDLED; } static void phy_bcast_v2_hw(int phy_no, struct hisi_hba *hisi_hba) @@ -2342,94 +2918,105 @@ static void multi_bit_ecc_error_process_v2_hw(struct hisi_hba *hisi_hba, if (irq_value & BIT(SAS_ECC_INTR_DQE_ECC_MB_OFF)) { reg_val = hisi_sas_read32(hisi_hba, HGC_DQE_ECC_ADDR); - panic("%s: hgc_dqe_accbad_intr (0x%x) found: \ + dev_warn(dev, "hgc_dqe_accbad_intr (0x%x) found: \ Ram address is 0x%08X\n", - dev_name(dev), irq_value, + irq_value, (reg_val & HGC_DQE_ECC_MB_ADDR_MSK) >> HGC_DQE_ECC_MB_ADDR_OFF); + queue_work(hisi_hba->wq, &hisi_hba->rst_work); } if (irq_value & BIT(SAS_ECC_INTR_IOST_ECC_MB_OFF)) { reg_val = hisi_sas_read32(hisi_hba, HGC_IOST_ECC_ADDR); - panic("%s: hgc_iost_accbad_intr (0x%x) found: \ + dev_warn(dev, "hgc_iost_accbad_intr (0x%x) found: \ Ram address is 0x%08X\n", - dev_name(dev), irq_value, + irq_value, (reg_val & HGC_IOST_ECC_MB_ADDR_MSK) >> HGC_IOST_ECC_MB_ADDR_OFF); + queue_work(hisi_hba->wq, &hisi_hba->rst_work); } if (irq_value & BIT(SAS_ECC_INTR_ITCT_ECC_MB_OFF)) { reg_val = hisi_sas_read32(hisi_hba, HGC_ITCT_ECC_ADDR); - panic("%s: hgc_itct_accbad_intr (0x%x) found: \ + dev_warn(dev,"hgc_itct_accbad_intr (0x%x) found: \ Ram address is 0x%08X\n", - dev_name(dev), irq_value, + irq_value, (reg_val & HGC_ITCT_ECC_MB_ADDR_MSK) >> HGC_ITCT_ECC_MB_ADDR_OFF); + queue_work(hisi_hba->wq, &hisi_hba->rst_work); } if (irq_value & BIT(SAS_ECC_INTR_IOSTLIST_ECC_MB_OFF)) { reg_val = hisi_sas_read32(hisi_hba, HGC_LM_DFX_STATUS2); - panic("%s: hgc_iostl_accbad_intr (0x%x) found: \ + dev_warn(dev, "hgc_iostl_accbad_intr (0x%x) found: \ memory address is 0x%08X\n", - dev_name(dev), irq_value, + irq_value, (reg_val & HGC_LM_DFX_STATUS2_IOSTLIST_MSK) >> HGC_LM_DFX_STATUS2_IOSTLIST_OFF); + queue_work(hisi_hba->wq, &hisi_hba->rst_work); } if (irq_value & BIT(SAS_ECC_INTR_ITCTLIST_ECC_MB_OFF)) { reg_val = hisi_sas_read32(hisi_hba, HGC_LM_DFX_STATUS2); - panic("%s: hgc_itctl_accbad_intr (0x%x) found: \ + dev_warn(dev, "hgc_itctl_accbad_intr (0x%x) found: \ memory address is 0x%08X\n", - dev_name(dev), irq_value, + irq_value, (reg_val & HGC_LM_DFX_STATUS2_ITCTLIST_MSK) >> HGC_LM_DFX_STATUS2_ITCTLIST_OFF); + queue_work(hisi_hba->wq, &hisi_hba->rst_work); } if (irq_value & BIT(SAS_ECC_INTR_CQE_ECC_MB_OFF)) { reg_val = hisi_sas_read32(hisi_hba, HGC_CQE_ECC_ADDR); - panic("%s: hgc_cqe_accbad_intr (0x%x) found: \ + dev_warn(dev, "hgc_cqe_accbad_intr (0x%x) found: \ Ram address is 0x%08X\n", - dev_name(dev), irq_value, + irq_value, (reg_val & HGC_CQE_ECC_MB_ADDR_MSK) >> HGC_CQE_ECC_MB_ADDR_OFF); + queue_work(hisi_hba->wq, &hisi_hba->rst_work); } if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM0_ECC_MB_OFF)) { reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS14); - panic("%s: rxm_mem0_accbad_intr (0x%x) found: \ + dev_warn(dev, "rxm_mem0_accbad_intr (0x%x) found: \ memory address is 0x%08X\n", - dev_name(dev), irq_value, + irq_value, (reg_val & HGC_RXM_DFX_STATUS14_MEM0_MSK) >> HGC_RXM_DFX_STATUS14_MEM0_OFF); + queue_work(hisi_hba->wq, &hisi_hba->rst_work); } if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM1_ECC_MB_OFF)) { reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS14); - panic("%s: rxm_mem1_accbad_intr (0x%x) found: \ + dev_warn(dev, "rxm_mem1_accbad_intr (0x%x) found: \ memory address is 0x%08X\n", - dev_name(dev), irq_value, + irq_value, (reg_val & HGC_RXM_DFX_STATUS14_MEM1_MSK) >> HGC_RXM_DFX_STATUS14_MEM1_OFF); + queue_work(hisi_hba->wq, &hisi_hba->rst_work); } if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM2_ECC_MB_OFF)) { reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS14); - panic("%s: rxm_mem2_accbad_intr (0x%x) found: \ + dev_warn(dev, "rxm_mem2_accbad_intr (0x%x) found: \ memory address is 0x%08X\n", - dev_name(dev), irq_value, + irq_value, (reg_val & HGC_RXM_DFX_STATUS14_MEM2_MSK) >> HGC_RXM_DFX_STATUS14_MEM2_OFF); + queue_work(hisi_hba->wq, &hisi_hba->rst_work); } if (irq_value & BIT(SAS_ECC_INTR_NCQ_MEM3_ECC_MB_OFF)) { reg_val = hisi_sas_read32(hisi_hba, HGC_RXM_DFX_STATUS15); - panic("%s: rxm_mem3_accbad_intr (0x%x) found: \ + dev_warn(dev, "rxm_mem3_accbad_intr (0x%x) found: \ memory address is 0x%08X\n", - dev_name(dev), irq_value, + irq_value, (reg_val & HGC_RXM_DFX_STATUS15_MEM3_MSK) >> HGC_RXM_DFX_STATUS15_MEM3_OFF); + queue_work(hisi_hba->wq, &hisi_hba->rst_work); } + return; } static irqreturn_t fatal_ecc_int_v2_hw(int irq_no, void *p) @@ -2487,23 +3074,27 @@ static irqreturn_t fatal_axi_int_v2_hw(int irq_no, void *p) if (irq_value & BIT(ENT_INT_SRC3_WP_DEPTH_OFF)) { hisi_sas_write32(hisi_hba, ENT_INT_SRC3, 1 << ENT_INT_SRC3_WP_DEPTH_OFF); - panic("%s: write pointer and depth error (0x%x) \ + dev_warn(dev, "write pointer and depth error (0x%x) \ found!\n", - dev_name(dev), irq_value); + irq_value); + queue_work(hisi_hba->wq, &hisi_hba->rst_work); } if (irq_value & BIT(ENT_INT_SRC3_IPTT_SLOT_NOMATCH_OFF)) { hisi_sas_write32(hisi_hba, ENT_INT_SRC3, 1 << ENT_INT_SRC3_IPTT_SLOT_NOMATCH_OFF); - panic("%s: iptt no match slot error (0x%x) found!\n", - dev_name(dev), irq_value); + dev_warn(dev, "iptt no match slot error (0x%x) found!\n", + irq_value); + queue_work(hisi_hba->wq, &hisi_hba->rst_work); } - if (irq_value & BIT(ENT_INT_SRC3_RP_DEPTH_OFF)) - panic("%s: read pointer and depth error (0x%x) \ + if (irq_value & BIT(ENT_INT_SRC3_RP_DEPTH_OFF)) { + dev_warn(dev, "read pointer and depth error (0x%x) \ found!\n", - dev_name(dev), irq_value); + irq_value); + queue_work(hisi_hba->wq, &hisi_hba->rst_work); + } if (irq_value & BIT(ENT_INT_SRC3_AXI_OFF)) { int i; @@ -2514,10 +3105,11 @@ static irqreturn_t fatal_axi_int_v2_hw(int irq_no, void *p) HGC_AXI_FIFO_ERR_INFO); for (i = 0; i < AXI_ERR_NR; i++) { - if (err_value & BIT(i)) - panic("%s: %s (0x%x) found!\n", - dev_name(dev), + if (err_value & BIT(i)) { + dev_warn(dev, "%s (0x%x) found!\n", axi_err_info[i], irq_value); + queue_work(hisi_hba->wq, &hisi_hba->rst_work); + } } } @@ -2530,10 +3122,11 @@ static irqreturn_t fatal_axi_int_v2_hw(int irq_no, void *p) HGC_AXI_FIFO_ERR_INFO); for (i = 0; i < FIFO_ERR_NR; i++) { - if (err_value & BIT(AXI_ERR_NR + i)) - panic("%s: %s (0x%x) found!\n", - dev_name(dev), + if (err_value & BIT(AXI_ERR_NR + i)) { + dev_warn(dev, "%s (0x%x) found!\n", fifo_err_info[i], irq_value); + queue_work(hisi_hba->wq, &hisi_hba->rst_work); + } } } @@ -2541,15 +3134,17 @@ static irqreturn_t fatal_axi_int_v2_hw(int irq_no, void *p) if (irq_value & BIT(ENT_INT_SRC3_LM_OFF)) { hisi_sas_write32(hisi_hba, ENT_INT_SRC3, 1 << ENT_INT_SRC3_LM_OFF); - panic("%s: LM add/fetch list error (0x%x) found!\n", - dev_name(dev), irq_value); + dev_warn(dev, "LM add/fetch list error (0x%x) found!\n", + irq_value); + queue_work(hisi_hba->wq, &hisi_hba->rst_work); } if (irq_value & BIT(ENT_INT_SRC3_ABT_OFF)) { hisi_sas_write32(hisi_hba, ENT_INT_SRC3, 1 << ENT_INT_SRC3_ABT_OFF); - panic("%s: SAS_HGC_ABT fetch LM list error (0x%x) found!\n", - dev_name(dev), irq_value); + dev_warn(dev, "SAS_HGC_ABT fetch LM list error (0x%x) found!\n", + irq_value); + queue_work(hisi_hba->wq, &hisi_hba->rst_work); } } @@ -2568,6 +3163,9 @@ static void cq_tasklet_v2_hw(unsigned long val) u32 rd_point = cq->rd_point, wr_point, dev_id; int queue = cq->id; + if (unlikely(hisi_hba->reject_stp_links_msk)) + phys_try_accept_stp_links_v2_hw(hisi_hba); + complete_queue = hisi_hba->complete_hdr[queue]; spin_lock(&hisi_hba->lock); @@ -2600,7 +3198,7 @@ static void cq_tasklet_v2_hw(unsigned long val) slot = &hisi_hba->slot_info[iptt]; slot->cmplt_queue_slot = rd_point; slot->cmplt_queue = queue; - slot_complete_v2_hw(hisi_hba, slot, 0); + slot_complete_v2_hw(hisi_hba, slot); act_tmp &= ~(1 << ncq_tag_count); ncq_tag_count = ffs(act_tmp); @@ -2610,7 +3208,7 @@ static void cq_tasklet_v2_hw(unsigned long val) slot = &hisi_hba->slot_info[iptt]; slot->cmplt_queue_slot = rd_point; slot->cmplt_queue = queue; - slot_complete_v2_hw(hisi_hba, slot, 0); + slot_complete_v2_hw(hisi_hba, slot); } if (++rd_point >= HISI_SAS_QUEUE_SLOTS) @@ -2842,6 +3440,8 @@ static int hisi_sas_v2_init(struct hisi_hba *hisi_hba) { int rc; + memset(hisi_hba->sata_dev_bitmap, 0, sizeof(hisi_hba->sata_dev_bitmap)); + rc = hw_init_v2_hw(hisi_hba); if (rc) return rc; @@ -2850,7 +3450,88 @@ static int hisi_sas_v2_init(struct hisi_hba *hisi_hba) if (rc) return rc; - phys_init_v2_hw(hisi_hba); + return 0; +} + +static void interrupt_disable_v2_hw(struct hisi_hba *hisi_hba) +{ + struct platform_device *pdev = hisi_hba->pdev; + int i; + + for (i = 0; i < hisi_hba->queue_count; i++) + hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK + 0x4 * i, 0x1); + + hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0xffffffff); + hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0xffffffff); + hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xffffffff); + hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0xffffffff); + + for (i = 0; i < hisi_hba->n_phy; i++) { + hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xffffffff); + hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0xffffffff); + } + + for (i = 0; i < 128; i++) + synchronize_irq(platform_get_irq(pdev, i)); +} + +static int soft_reset_v2_hw(struct hisi_hba *hisi_hba) +{ + struct device *dev = &hisi_hba->pdev->dev; + u32 old_state, state; + int rc, cnt; + int phy_no; + + old_state = hisi_sas_read32(hisi_hba, PHY_STATE); + + interrupt_disable_v2_hw(hisi_hba); + hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0x0); + + stop_phys_v2_hw(hisi_hba); + + mdelay(10); + + hisi_sas_write32(hisi_hba, AXI_MASTER_CFG_BASE + AM_CTRL_GLOBAL, 0x1); + + /* wait until bus idle */ + cnt = 0; + while (1) { + u32 status = hisi_sas_read32_relaxed(hisi_hba, + AXI_MASTER_CFG_BASE + AM_CURR_TRANS_RETURN); + + if (status == 0x3) + break; + + udelay(10); + if (cnt++ > 10) { + dev_info(dev, "wait axi bus state to idle timeout!\n"); + return -1; + } + } + + hisi_sas_init_mem(hisi_hba); + + rc = hw_init_v2_hw(hisi_hba); + if (rc) + return rc; + + phys_reject_stp_links_v2_hw(hisi_hba); + + /* Re-enable the PHYs */ + for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) { + struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; + struct asd_sas_phy *sas_phy = &phy->sas_phy; + + if (sas_phy->enabled) + start_phy_v2_hw(hisi_hba, phy_no); + } + + /* Wait for the PHYs to come up and read the PHY state */ + msleep(1000); + + state = hisi_sas_read32(hisi_hba, PHY_STATE); + + hisi_sas_rescan_topology(hisi_hba, old_state, state); return 0; } @@ -2870,6 +3551,7 @@ static const struct hisi_sas_hw hisi_sas_v2_hw = { .get_free_slot = get_free_slot_v2_hw, .start_delivery = start_delivery_v2_hw, .slot_complete = slot_complete_v2_hw, + .phys_init = phys_init_v2_hw, .phy_enable = enable_phy_v2_hw, .phy_disable = disable_phy_v2_hw, .phy_hard_reset = phy_hard_reset_v2_hw, @@ -2877,6 +3559,7 @@ static const struct hisi_sas_hw hisi_sas_v2_hw = { .phy_get_max_linkrate = phy_get_max_linkrate_v2_hw, .max_command_entries = HISI_SAS_COMMAND_ENTRIES_V2_HW, .complete_hdr_size = sizeof(struct hisi_sas_complete_v2_hdr), + .soft_reset = soft_reset_v2_hw, }; static int hisi_sas_v2_probe(struct platform_device *pdev) diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 9d659aaace15..73daace478cb 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -60,7 +60,7 @@ * HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' * with an optional trailing '-' followed by a byte value (0-255). */ -#define HPSA_DRIVER_VERSION "3.4.16-0" +#define HPSA_DRIVER_VERSION "3.4.18-0" #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")" #define HPSA "hpsa" @@ -108,10 +108,12 @@ static const struct pci_device_id hpsa_pci_device_id[] = { {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103c, 0x1920}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1921}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1922}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1923}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1924}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103c, 0x1925}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1926}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1928}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1929}, @@ -171,10 +173,12 @@ static struct board_type products[] = { {0x3354103C, "Smart Array P420i", &SA5_access}, {0x3355103C, "Smart Array P220i", &SA5_access}, {0x3356103C, "Smart Array P721m", &SA5_access}, + {0x1920103C, "Smart Array P430i", &SA5_access}, {0x1921103C, "Smart Array P830i", &SA5_access}, {0x1922103C, "Smart Array P430", &SA5_access}, {0x1923103C, "Smart Array P431", &SA5_access}, {0x1924103C, "Smart Array P830", &SA5_access}, + {0x1925103C, "Smart Array P831", &SA5_access}, {0x1926103C, "Smart Array P731m", &SA5_access}, {0x1928103C, "Smart Array P230i", &SA5_access}, {0x1929103C, "Smart Array P530", &SA5_access}, diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index 2c92dabb55f6..26cd3c28186a 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c @@ -3910,12 +3910,6 @@ static int ibmvfc_alloc_target(struct ibmvfc_host *vhost, u64 scsi_id) spin_unlock_irqrestore(vhost->host->host_lock, flags); tgt = mempool_alloc(vhost->tgt_pool, GFP_NOIO); - if (!tgt) { - dev_err(vhost->dev, "Target allocation failure for scsi id %08llx\n", - scsi_id); - return -ENOMEM; - } - memset(tgt, 0, sizeof(*tgt)); tgt->scsi_id = scsi_id; tgt->new_scsi_id = scsi_id; diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 5d5e272fd815..b0c68d24db01 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -820,7 +820,7 @@ static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg) } /** - * ipr_sata_eh_done - done function for aborted SATA commands + * __ipr_sata_eh_done - done function for aborted SATA commands * @ipr_cmd: ipr command struct * * This function is invoked for ops generated to SATA @@ -829,19 +829,41 @@ static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg) * Return value: * none **/ -static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd) +static void __ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd) { struct ata_queued_cmd *qc = ipr_cmd->qc; struct ipr_sata_port *sata_port = qc->ap->private_data; qc->err_mask |= AC_ERR_OTHER; sata_port->ioasa.status |= ATA_BUSY; - list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); ata_qc_complete(qc); + if (ipr_cmd->eh_comp) + complete(ipr_cmd->eh_comp); + list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); } /** - * ipr_scsi_eh_done - mid-layer done function for aborted ops + * ipr_sata_eh_done - done function for aborted SATA commands + * @ipr_cmd: ipr command struct + * + * This function is invoked for ops generated to SATA + * devices which are being aborted. + * + * Return value: + * none + **/ +static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq; + unsigned long hrrq_flags; + + spin_lock_irqsave(&hrrq->_lock, hrrq_flags); + __ipr_sata_eh_done(ipr_cmd); + spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags); +} + +/** + * __ipr_scsi_eh_done - mid-layer done function for aborted ops * @ipr_cmd: ipr command struct * * This function is invoked by the interrupt handler for @@ -850,7 +872,7 @@ static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd) * Return value: * none **/ -static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd) +static void __ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd) { struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; @@ -864,6 +886,26 @@ static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd) } /** + * ipr_scsi_eh_done - mid-layer done function for aborted ops + * @ipr_cmd: ipr command struct + * + * This function is invoked by the interrupt handler for + * ops generated by the SCSI mid-layer which are being aborted. + * + * Return value: + * none + **/ +static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd) +{ + unsigned long hrrq_flags; + struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq; + + spin_lock_irqsave(&hrrq->_lock, hrrq_flags); + __ipr_scsi_eh_done(ipr_cmd); + spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags); +} + +/** * ipr_fail_all_ops - Fails all outstanding ops. * @ioa_cfg: ioa config struct * @@ -890,9 +932,9 @@ static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg) cpu_to_be32(IPR_DRIVER_ILID); if (ipr_cmd->scsi_cmd) - ipr_cmd->done = ipr_scsi_eh_done; + ipr_cmd->done = __ipr_scsi_eh_done; else if (ipr_cmd->qc) - ipr_cmd->done = ipr_sata_eh_done; + ipr_cmd->done = __ipr_sata_eh_done; ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, IPR_IOASC_IOA_WAS_RESET); @@ -5006,6 +5048,42 @@ static int ipr_match_lun(struct ipr_cmnd *ipr_cmd, void *device) } /** + * ipr_cmnd_is_free - Check if a command is free or not + * @ipr_cmd ipr command struct + * + * Returns: + * true / false + **/ +static bool ipr_cmnd_is_free(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_cmnd *loop_cmd; + + list_for_each_entry(loop_cmd, &ipr_cmd->hrrq->hrrq_free_q, queue) { + if (loop_cmd == ipr_cmd) + return true; + } + + return false; +} + +/** + * ipr_match_res - Match function for specified resource entry + * @ipr_cmd: ipr command struct + * @resource: resource entry to match + * + * Returns: + * 1 if command matches sdev / 0 if command does not match sdev + **/ +static int ipr_match_res(struct ipr_cmnd *ipr_cmd, void *resource) +{ + struct ipr_resource_entry *res = resource; + + if (res && ipr_cmd->ioarcb.res_handle == res->res_handle) + return 1; + return 0; +} + +/** * ipr_wait_for_ops - Wait for matching commands to complete * @ipr_cmd: ipr command struct * @device: device to match (sdev) @@ -5018,7 +5096,7 @@ static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device, int (*match)(struct ipr_cmnd *, void *)) { struct ipr_cmnd *ipr_cmd; - int wait; + int wait, i; unsigned long flags; struct ipr_hrr_queue *hrrq; signed long timeout = IPR_ABORT_TASK_TIMEOUT; @@ -5030,10 +5108,13 @@ static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device, for_each_hrrq(hrrq, ioa_cfg) { spin_lock_irqsave(hrrq->lock, flags); - list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) { - if (match(ipr_cmd, device)) { - ipr_cmd->eh_comp = ∁ - wait++; + for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) { + ipr_cmd = ioa_cfg->ipr_cmnd_list[i]; + if (!ipr_cmnd_is_free(ipr_cmd)) { + if (match(ipr_cmd, device)) { + ipr_cmd->eh_comp = ∁ + wait++; + } } } spin_unlock_irqrestore(hrrq->lock, flags); @@ -5047,10 +5128,13 @@ static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device, for_each_hrrq(hrrq, ioa_cfg) { spin_lock_irqsave(hrrq->lock, flags); - list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) { - if (match(ipr_cmd, device)) { - ipr_cmd->eh_comp = NULL; - wait++; + for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) { + ipr_cmd = ioa_cfg->ipr_cmnd_list[i]; + if (!ipr_cmnd_is_free(ipr_cmd)) { + if (match(ipr_cmd, device)) { + ipr_cmd->eh_comp = NULL; + wait++; + } } } spin_unlock_irqrestore(hrrq->lock, flags); @@ -5179,7 +5263,7 @@ static int ipr_sata_reset(struct ata_link *link, unsigned int *classes, struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg; struct ipr_resource_entry *res; unsigned long lock_flags = 0; - int rc = -ENXIO; + int rc = -ENXIO, ret; ENTER; spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); @@ -5193,9 +5277,19 @@ static int ipr_sata_reset(struct ata_link *link, unsigned int *classes, if (res) { rc = ipr_device_reset(ioa_cfg, res); *classes = res->ata_class; - } + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + + ret = ipr_wait_for_ops(ioa_cfg, res, ipr_match_res); + if (ret != SUCCESS) { + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV); + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + + wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); + } + } else + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); - spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); LEAVE; return rc; } @@ -5217,16 +5311,13 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd) struct ipr_ioa_cfg *ioa_cfg; struct ipr_resource_entry *res; struct ata_port *ap; - int rc = 0; + int rc = 0, i; struct ipr_hrr_queue *hrrq; ENTER; ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata; res = scsi_cmd->device->hostdata; - if (!res) - return FAILED; - /* * If we are currently going through reset/reload, return failed. This will force the * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the @@ -5239,14 +5330,17 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd) for_each_hrrq(hrrq, ioa_cfg) { spin_lock(&hrrq->_lock); - list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) { + for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) { + ipr_cmd = ioa_cfg->ipr_cmnd_list[i]; + if (ipr_cmd->ioarcb.res_handle == res->res_handle) { - if (ipr_cmd->scsi_cmd) - ipr_cmd->done = ipr_scsi_eh_done; - if (ipr_cmd->qc) - ipr_cmd->done = ipr_sata_eh_done; - if (ipr_cmd->qc && - !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) { + if (!ipr_cmd->qc) + continue; + if (ipr_cmnd_is_free(ipr_cmd)) + continue; + + ipr_cmd->done = ipr_sata_eh_done; + if (!(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) { ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT; ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED; } @@ -5262,19 +5356,6 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd) spin_unlock_irq(scsi_cmd->device->host->host_lock); ata_std_error_handler(ap); spin_lock_irq(scsi_cmd->device->host->host_lock); - - for_each_hrrq(hrrq, ioa_cfg) { - spin_lock(&hrrq->_lock); - list_for_each_entry(ipr_cmd, - &hrrq->hrrq_pending_q, queue) { - if (ipr_cmd->ioarcb.res_handle == - res->res_handle) { - rc = -EIO; - break; - } - } - spin_unlock(&hrrq->_lock); - } } else rc = ipr_device_reset(ioa_cfg, res); res->resetting_device = 0; @@ -5288,15 +5369,24 @@ static int ipr_eh_dev_reset(struct scsi_cmnd *cmd) { int rc; struct ipr_ioa_cfg *ioa_cfg; + struct ipr_resource_entry *res; ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata; + res = cmd->device->hostdata; + + if (!res) + return FAILED; spin_lock_irq(cmd->device->host->host_lock); rc = __ipr_eh_dev_reset(cmd); spin_unlock_irq(cmd->device->host->host_lock); - if (rc == SUCCESS) - rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun); + if (rc == SUCCESS) { + if (ipr_is_gata(res) && res->sata_port) + rc = ipr_wait_for_ops(ioa_cfg, res, ipr_match_res); + else + rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun); + } return rc; } @@ -5393,7 +5483,7 @@ static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd) struct ipr_resource_entry *res; struct ipr_cmd_pkt *cmd_pkt; u32 ioasc, int_reg; - int op_found = 0; + int i, op_found = 0; struct ipr_hrr_queue *hrrq; ENTER; @@ -5422,11 +5512,12 @@ static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd) for_each_hrrq(hrrq, ioa_cfg) { spin_lock(&hrrq->_lock); - list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) { - if (ipr_cmd->scsi_cmd == scsi_cmd) { - ipr_cmd->done = ipr_scsi_eh_done; - op_found = 1; - break; + for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) { + if (ioa_cfg->ipr_cmnd_list[i]->scsi_cmd == scsi_cmd) { + if (!ipr_cmnd_is_free(ioa_cfg->ipr_cmnd_list[i])) { + op_found = 1; + break; + } } } spin_unlock(&hrrq->_lock); @@ -5917,7 +6008,7 @@ static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg, } /** - * ipr_erp_done - Process completion of ERP for a device + * __ipr_erp_done - Process completion of ERP for a device * @ipr_cmd: ipr command struct * * This function copies the sense buffer into the scsi_cmd @@ -5926,7 +6017,7 @@ static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg, * Return value: * nothing **/ -static void ipr_erp_done(struct ipr_cmnd *ipr_cmd) +static void __ipr_erp_done(struct ipr_cmnd *ipr_cmd) { struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; struct ipr_resource_entry *res = scsi_cmd->device->hostdata; @@ -5947,8 +6038,30 @@ static void ipr_erp_done(struct ipr_cmnd *ipr_cmd) res->in_erp = 0; } scsi_dma_unmap(ipr_cmd->scsi_cmd); - list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); scsi_cmd->scsi_done(scsi_cmd); + if (ipr_cmd->eh_comp) + complete(ipr_cmd->eh_comp); + list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); +} + +/** + * ipr_erp_done - Process completion of ERP for a device + * @ipr_cmd: ipr command struct + * + * This function copies the sense buffer into the scsi_cmd + * struct and pushes the scsi_done function. + * + * Return value: + * nothing + **/ +static void ipr_erp_done(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq; + unsigned long hrrq_flags; + + spin_lock_irqsave(&hrrq->_lock, hrrq_flags); + __ipr_erp_done(ipr_cmd); + spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags); } /** @@ -5983,7 +6096,7 @@ static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd) } /** - * ipr_erp_request_sense - Send request sense to a device + * __ipr_erp_request_sense - Send request sense to a device * @ipr_cmd: ipr command struct * * This function sends a request sense to a device as a result @@ -5992,13 +6105,13 @@ static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd) * Return value: * nothing **/ -static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd) +static void __ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd) { struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt; u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); if (IPR_IOASC_SENSE_KEY(ioasc) > 0) { - ipr_erp_done(ipr_cmd); + __ipr_erp_done(ipr_cmd); return; } @@ -6019,6 +6132,26 @@ static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd) } /** + * ipr_erp_request_sense - Send request sense to a device + * @ipr_cmd: ipr command struct + * + * This function sends a request sense to a device as a result + * of a check condition. + * + * Return value: + * nothing + **/ +static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd) +{ + struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq; + unsigned long hrrq_flags; + + spin_lock_irqsave(&hrrq->_lock, hrrq_flags); + __ipr_erp_request_sense(ipr_cmd); + spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags); +} + +/** * ipr_erp_cancel_all - Send cancel all to a device * @ipr_cmd: ipr command struct * @@ -6041,7 +6174,7 @@ static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd) ipr_reinit_ipr_cmnd_for_erp(ipr_cmd); if (!scsi_cmd->device->simple_tags) { - ipr_erp_request_sense(ipr_cmd); + __ipr_erp_request_sense(ipr_cmd); return; } @@ -6261,7 +6394,7 @@ static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg, u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK; if (!res) { - ipr_scsi_eh_done(ipr_cmd); + __ipr_scsi_eh_done(ipr_cmd); return; } @@ -6343,8 +6476,10 @@ static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg, } scsi_dma_unmap(ipr_cmd->scsi_cmd); - list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); scsi_cmd->scsi_done(scsi_cmd); + if (ipr_cmd->eh_comp) + complete(ipr_cmd->eh_comp); + list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); } /** @@ -6370,8 +6505,10 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd) scsi_dma_unmap(scsi_cmd); spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags); - list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); scsi_cmd->scsi_done(scsi_cmd); + if (ipr_cmd->eh_comp) + complete(ipr_cmd->eh_comp); + list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags); } else { spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h index b7d2e98eb45b..e98a87a65335 100644 --- a/drivers/scsi/ipr.h +++ b/drivers/scsi/ipr.h @@ -39,8 +39,8 @@ /* * Literals */ -#define IPR_DRIVER_VERSION "2.6.3" -#define IPR_DRIVER_DATE "(October 17, 2015)" +#define IPR_DRIVER_VERSION "2.6.4" +#define IPR_DRIVER_DATE "(March 14, 2017)" /* * IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c index 0b5b5db0d0f8..45371179ab87 100644 --- a/drivers/scsi/isci/init.c +++ b/drivers/scsi/isci/init.c @@ -272,7 +272,6 @@ static void isci_unregister(struct isci_host *isci_host) return; shost = to_shost(isci_host); - scsi_remove_host(shost); sas_unregister_ha(&isci_host->sas_ha); sas_remove_host(shost); diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c index 0e67621477a8..a808e8ef1d08 100644 --- a/drivers/scsi/libfc/fc_fcp.c +++ b/drivers/scsi/libfc/fc_fcp.c @@ -154,7 +154,7 @@ static struct fc_fcp_pkt *fc_fcp_pkt_alloc(struct fc_lport *lport, gfp_t gfp) memset(fsp, 0, sizeof(*fsp)); fsp->lp = lport; fsp->xfer_ddp = FC_XID_UNKNOWN; - atomic_set(&fsp->ref_cnt, 1); + refcount_set(&fsp->ref_cnt, 1); init_timer(&fsp->timer); fsp->timer.data = (unsigned long)fsp; INIT_LIST_HEAD(&fsp->list); @@ -175,7 +175,7 @@ static struct fc_fcp_pkt *fc_fcp_pkt_alloc(struct fc_lport *lport, gfp_t gfp) */ static void fc_fcp_pkt_release(struct fc_fcp_pkt *fsp) { - if (atomic_dec_and_test(&fsp->ref_cnt)) { + if (refcount_dec_and_test(&fsp->ref_cnt)) { struct fc_fcp_internal *si = fc_get_scsi_internal(fsp->lp); mempool_free(fsp, si->scsi_pkt_pool); @@ -188,7 +188,7 @@ static void fc_fcp_pkt_release(struct fc_fcp_pkt *fsp) */ static void fc_fcp_pkt_hold(struct fc_fcp_pkt *fsp) { - atomic_inc(&fsp->ref_cnt); + refcount_inc(&fsp->ref_cnt); } /** diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index aa76f36abe03..2fd0ec651170 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c @@ -887,8 +887,6 @@ out: static void fc_lport_recv_els_req(struct fc_lport *lport, struct fc_frame *fp) { - void (*recv)(struct fc_lport *, struct fc_frame *); - mutex_lock(&lport->lp_mutex); /* @@ -902,31 +900,31 @@ static void fc_lport_recv_els_req(struct fc_lport *lport, /* * Check opcode. */ - recv = fc_rport_recv_req; switch (fc_frame_payload_op(fp)) { case ELS_FLOGI: if (!lport->point_to_multipoint) - recv = fc_lport_recv_flogi_req; + fc_lport_recv_flogi_req(lport, fp); break; case ELS_LOGO: if (fc_frame_sid(fp) == FC_FID_FLOGI) - recv = fc_lport_recv_logo_req; + fc_lport_recv_logo_req(lport, fp); break; case ELS_RSCN: - recv = lport->tt.disc_recv_req; + lport->tt.disc_recv_req(lport, fp); break; case ELS_ECHO: - recv = fc_lport_recv_echo_req; + fc_lport_recv_echo_req(lport, fp); break; case ELS_RLIR: - recv = fc_lport_recv_rlir_req; + fc_lport_recv_rlir_req(lport, fp); break; case ELS_RNID: - recv = fc_lport_recv_rnid_req; + fc_lport_recv_rnid_req(lport, fp); + break; + default: + fc_rport_recv_req(lport, fp); break; } - - recv(lport, fp); } mutex_unlock(&lport->lp_mutex); } diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index 894b1e3ebd56..dd6828f7f772 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -517,13 +517,13 @@ static void iscsi_free_task(struct iscsi_task *task) void __iscsi_get_task(struct iscsi_task *task) { - atomic_inc(&task->refcount); + refcount_inc(&task->refcount); } EXPORT_SYMBOL_GPL(__iscsi_get_task); void __iscsi_put_task(struct iscsi_task *task) { - if (atomic_dec_and_test(&task->refcount)) + if (refcount_dec_and_test(&task->refcount)) iscsi_free_task(task); } EXPORT_SYMBOL_GPL(__iscsi_put_task); @@ -749,7 +749,7 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, * released by the lld when it has transmitted the task for * pdus we do not expect a response for. */ - atomic_set(&task->refcount, 1); + refcount_set(&task->refcount, 1); task->conn = conn; task->sc = NULL; INIT_LIST_HEAD(&task->running); @@ -1638,7 +1638,7 @@ static inline struct iscsi_task *iscsi_alloc_task(struct iscsi_conn *conn, sc->SCp.phase = conn->session->age; sc->SCp.ptr = (char *) task; - atomic_set(&task->refcount, 1); + refcount_set(&task->refcount, 1); task->state = ISCSI_TASK_PENDING; task->conn = conn; task->sc = sc; diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c index 15ef8e2e685c..64e9cdda1c3c 100644 --- a/drivers/scsi/libsas/sas_init.c +++ b/drivers/scsi/libsas/sas_init.c @@ -566,13 +566,6 @@ sas_domain_attach_transport(struct sas_domain_function_template *dft) } EXPORT_SYMBOL_GPL(sas_domain_attach_transport); - -void sas_domain_release_transport(struct scsi_transport_template *stt) -{ - sas_release_transport(stt); -} -EXPORT_SYMBOL_GPL(sas_domain_release_transport); - /* ---------- SAS Class register/unregister ---------- */ static int __init sas_class_init(void) diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c index 9bd55bce83af..87e5079d816b 100644 --- a/drivers/scsi/libsas/sas_scsi_host.c +++ b/drivers/scsi/libsas/sas_scsi_host.c @@ -491,9 +491,6 @@ int sas_eh_abort_handler(struct scsi_cmnd *cmd) struct Scsi_Host *host = cmd->device->host; struct sas_internal *i = to_sas_internal(host->transportt); - if (current != host->ehandler) - return FAILED; - if (!i->dft->lldd_abort_task) return FAILED; @@ -616,8 +613,6 @@ static void sas_eh_handle_sas_errors(struct Scsi_Host *shost, struct list_head * SAS_DPRINTK("trying to find task 0x%p\n", task); res = sas_scsi_find_task(task); - cmd->eh_eflags = 0; - switch (res) { case TASK_IS_DONE: SAS_DPRINTK("%s: task 0x%p is done\n", __func__, diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 513fd07715cd..4830370bfab1 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -181,7 +181,7 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, wwn_to_u64(vport->fc_nodename.u.wwn), phba->targetport->port_id); - len += snprintf(buf + len, PAGE_SIZE, + len += snprintf(buf + len, PAGE_SIZE - len, "\nNVME Target: Statistics\n"); tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; len += snprintf(buf+len, PAGE_SIZE-len, @@ -326,7 +326,7 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, } spin_unlock_irq(shost->host_lock); - len += snprintf(buf + len, PAGE_SIZE, "\nNVME Statistics\n"); + len += snprintf(buf + len, PAGE_SIZE - len, "\nNVME Statistics\n"); len += snprintf(buf+len, PAGE_SIZE-len, "LS: Xmt %016llx Cmpl %016llx\n", phba->fc4NvmeLsRequests, diff --git a/drivers/scsi/mac_esp.c b/drivers/scsi/mac_esp.c index bb567d3b0693..cdb61eaa2d1f 100644 --- a/drivers/scsi/mac_esp.c +++ b/drivers/scsi/mac_esp.c @@ -55,6 +55,7 @@ struct mac_esp_priv { int error; }; static struct esp *esp_chips[2]; +static DEFINE_SPINLOCK(esp_chips_lock); #define MAC_ESP_GET_PRIV(esp) ((struct mac_esp_priv *) \ platform_get_drvdata((struct platform_device *) \ @@ -562,15 +563,18 @@ static int esp_mac_probe(struct platform_device *dev) } host->irq = IRQ_MAC_SCSI; - esp_chips[dev->id] = esp; - mb(); - if (esp_chips[!dev->id] == NULL) { - err = request_irq(host->irq, mac_scsi_esp_intr, 0, "ESP", NULL); - if (err < 0) { - esp_chips[dev->id] = NULL; - goto fail_free_priv; - } + + /* The request_irq() call is intended to succeed for the first device + * and fail for the second device. + */ + err = request_irq(host->irq, mac_scsi_esp_intr, 0, "ESP", NULL); + spin_lock(&esp_chips_lock); + if (err < 0 && esp_chips[!dev->id] == NULL) { + spin_unlock(&esp_chips_lock); + goto fail_free_priv; } + esp_chips[dev->id] = esp; + spin_unlock(&esp_chips_lock); err = scsi_esp_register(esp, &dev->dev); if (err) @@ -579,8 +583,13 @@ static int esp_mac_probe(struct platform_device *dev) return 0; fail_free_irq: - if (esp_chips[!dev->id] == NULL) - free_irq(host->irq, esp); + spin_lock(&esp_chips_lock); + esp_chips[dev->id] = NULL; + if (esp_chips[!dev->id] == NULL) { + spin_unlock(&esp_chips_lock); + free_irq(host->irq, NULL); + } else + spin_unlock(&esp_chips_lock); fail_free_priv: kfree(mep); fail_free_command_block: @@ -599,9 +608,13 @@ static int esp_mac_remove(struct platform_device *dev) scsi_esp_unregister(esp); + spin_lock(&esp_chips_lock); esp_chips[dev->id] = NULL; - if (!(esp_chips[0] || esp_chips[1])) + if (esp_chips[!dev->id] == NULL) { + spin_unlock(&esp_chips_lock); free_irq(irq, NULL); + } else + spin_unlock(&esp_chips_lock); kfree(mep); diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index 5b7aec5d575a..18039bba26c4 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c @@ -1025,7 +1025,6 @@ _base_interrupt(int irq, void *bus_id) 0 : ioc->reply_free_host_index + 1; ioc->reply_free[ioc->reply_free_host_index] = cpu_to_le32(reply); - wmb(); writel(ioc->reply_free_host_index, &ioc->chip->ReplyFreeHostIndex); } @@ -1074,7 +1073,6 @@ _base_interrupt(int irq, void *bus_id) return IRQ_NONE; } - wmb(); if (ioc->is_warpdrive) { writel(reply_q->reply_post_host_index, ioc->reply_post_host_index[msix_index]); diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index 919ba2bb15f1..a5d872664257 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c @@ -8283,7 +8283,6 @@ static void scsih_remove(struct pci_dev *pdev) } sas_remove_host(shost); - scsi_remove_host(shost); mpt3sas_base_detach(ioc); spin_lock(&gioc_lock); list_del(&ioc->list); diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c index 8280046fd1f0..4e047b5001a6 100644 --- a/drivers/scsi/mvsas/mv_init.c +++ b/drivers/scsi/mvsas/mv_init.c @@ -642,7 +642,6 @@ static void mvs_pci_remove(struct pci_dev *pdev) tasklet_kill(&((struct mvs_prv_info *)sha->lldd_ha)->mv_tasklet); #endif - scsi_remove_host(mvi->shost); sas_unregister_ha(sha); sas_remove_host(mvi->shost); diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c index 247df5e79b71..fe97401ad192 100644 --- a/drivers/scsi/mvumi.c +++ b/drivers/scsi/mvumi.c @@ -210,39 +210,27 @@ static int mvumi_make_sgl(struct mvumi_hba *mhba, struct scsi_cmnd *scmd, unsigned int sgnum = scsi_sg_count(scmd); dma_addr_t busaddr; - if (sgnum) { - sg = scsi_sglist(scmd); - *sg_count = pci_map_sg(mhba->pdev, sg, sgnum, - (int) scmd->sc_data_direction); - if (*sg_count > mhba->max_sge) { - dev_err(&mhba->pdev->dev, "sg count[0x%x] is bigger " - "than max sg[0x%x].\n", - *sg_count, mhba->max_sge); - return -1; - } - for (i = 0; i < *sg_count; i++) { - busaddr = sg_dma_address(&sg[i]); - m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(busaddr)); - m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(busaddr)); - m_sg->flags = 0; - sgd_setsz(mhba, m_sg, cpu_to_le32(sg_dma_len(&sg[i]))); - if ((i + 1) == *sg_count) - m_sg->flags |= 1U << mhba->eot_flag; - - sgd_inc(mhba, m_sg); - } - } else { - scmd->SCp.dma_handle = scsi_bufflen(scmd) ? - pci_map_single(mhba->pdev, scsi_sglist(scmd), - scsi_bufflen(scmd), - (int) scmd->sc_data_direction) - : 0; - busaddr = scmd->SCp.dma_handle; + sg = scsi_sglist(scmd); + *sg_count = pci_map_sg(mhba->pdev, sg, sgnum, + (int) scmd->sc_data_direction); + if (*sg_count > mhba->max_sge) { + dev_err(&mhba->pdev->dev, + "sg count[0x%x] is bigger than max sg[0x%x].\n", + *sg_count, mhba->max_sge); + pci_unmap_sg(mhba->pdev, sg, sgnum, + (int) scmd->sc_data_direction); + return -1; + } + for (i = 0; i < *sg_count; i++) { + busaddr = sg_dma_address(&sg[i]); m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(busaddr)); m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(busaddr)); - m_sg->flags = 1U << mhba->eot_flag; - sgd_setsz(mhba, m_sg, cpu_to_le32(scsi_bufflen(scmd))); - *sg_count = 1; + m_sg->flags = 0; + sgd_setsz(mhba, m_sg, cpu_to_le32(sg_dma_len(&sg[i]))); + if ((i + 1) == *sg_count) + m_sg->flags |= 1U << mhba->eot_flag; + + sgd_inc(mhba, m_sg); } return 0; @@ -1350,21 +1338,10 @@ static void mvumi_complete_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd, break; } - if (scsi_bufflen(scmd)) { - if (scsi_sg_count(scmd)) { - pci_unmap_sg(mhba->pdev, - scsi_sglist(scmd), - scsi_sg_count(scmd), - (int) scmd->sc_data_direction); - } else { - pci_unmap_single(mhba->pdev, - scmd->SCp.dma_handle, - scsi_bufflen(scmd), - (int) scmd->sc_data_direction); - - scmd->SCp.dma_handle = 0; - } - } + if (scsi_bufflen(scmd)) + pci_unmap_sg(mhba->pdev, scsi_sglist(scmd), + scsi_sg_count(scmd), + (int) scmd->sc_data_direction); cmd->scmd->scsi_done(scmd); mvumi_return_cmd(mhba, cmd); } @@ -2171,19 +2148,9 @@ static enum blk_eh_timer_return mvumi_timed_out(struct scsi_cmnd *scmd) scmd->result = (DRIVER_INVALID << 24) | (DID_ABORT << 16); scmd->SCp.ptr = NULL; if (scsi_bufflen(scmd)) { - if (scsi_sg_count(scmd)) { - pci_unmap_sg(mhba->pdev, - scsi_sglist(scmd), - scsi_sg_count(scmd), - (int)scmd->sc_data_direction); - } else { - pci_unmap_single(mhba->pdev, - scmd->SCp.dma_handle, - scsi_bufflen(scmd), - (int)scmd->sc_data_direction); - - scmd->SCp.dma_handle = 0; - } + pci_unmap_sg(mhba->pdev, scsi_sglist(scmd), + scsi_sg_count(scmd), + (int)scmd->sc_data_direction); } mvumi_return_cmd(mhba, cmd); spin_unlock_irqrestore(mhba->shost->host_lock, flags); diff --git a/drivers/scsi/osd/osd_uld.c b/drivers/scsi/osd/osd_uld.c index e0ce5d2fd14d..ed948025112c 100644 --- a/drivers/scsi/osd/osd_uld.c +++ b/drivers/scsi/osd/osd_uld.c @@ -464,14 +464,15 @@ static int osd_probe(struct device *dev) /* hold one more reference to the scsi_device that will get released * in __release, in case a logout is happening while fs is mounted */ - scsi_device_get(scsi_device); + if (scsi_device_get(scsi_device)) + goto err_put_disk; osd_dev_init(&oud->od, scsi_device); /* Detect the OSD Version */ error = __detect_osd(oud); if (error) { OSD_ERR("osd detection failed, non-compatible OSD device\n"); - goto err_put_disk; + goto err_put_sdev; } /* init the char-device for communication with user-mode */ @@ -508,8 +509,9 @@ static int osd_probe(struct device *dev) err_put_cdev: cdev_del(&oud->cdev); -err_put_disk: +err_put_sdev: scsi_device_put(scsi_device); +err_put_disk: put_disk(disk); err_free_osd: dev_set_drvdata(dev, NULL); @@ -524,10 +526,9 @@ static int osd_remove(struct device *dev) struct scsi_device *scsi_device = to_scsi_device(dev); struct osd_uld_device *oud = dev_get_drvdata(dev); - if (!oud || (oud->od.scsi_device != scsi_device)) { - OSD_ERR("Half cooked osd-device %p,%p || %p!=%p", - dev, oud, oud ? oud->od.scsi_device : NULL, - scsi_device); + if (oud->od.scsi_device != scsi_device) { + OSD_ERR("Half cooked osd-device %p, || %p!=%p", + dev, oud->od.scsi_device, scsi_device); } device_unregister(&oud->class_dev); diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c index 417368ccb686..034b2f7d1135 100644 --- a/drivers/scsi/pm8001/pm8001_init.c +++ b/drivers/scsi/pm8001/pm8001_init.c @@ -1088,7 +1088,6 @@ static void pm8001_pci_remove(struct pci_dev *pdev) struct pm8001_hba_info *pm8001_ha; int i, j; pm8001_ha = sha->lldd_ha; - scsi_remove_host(pm8001_ha->shost); sas_unregister_ha(sha); sas_remove_host(pm8001_ha->shost); list_del(&pm8001_ha->list); diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c index 49e70a383afa..a4aadf5f4dc6 100644 --- a/drivers/scsi/pmcraid.c +++ b/drivers/scsi/pmcraid.c @@ -77,7 +77,7 @@ static atomic_t pmcraid_adapter_count = ATOMIC_INIT(0); */ static unsigned int pmcraid_major; static struct class *pmcraid_class; -DECLARE_BITMAP(pmcraid_minor, PMCRAID_MAX_ADAPTERS); +static DECLARE_BITMAP(pmcraid_minor, PMCRAID_MAX_ADAPTERS); /* * Module parameters @@ -175,7 +175,7 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev) if (fw_version <= PMCRAID_FW_VERSION_1) target = temp->cfg_entry.unique_flags1; else - target = temp->cfg_entry.array_id & 0xFF; + target = le16_to_cpu(temp->cfg_entry.array_id) & 0xFF; if (target > PMCRAID_MAX_VSET_TARGETS) continue; @@ -330,7 +330,7 @@ static void pmcraid_init_cmdblk(struct pmcraid_cmd *cmd, int index) ioarcb->request_flags0 = 0; ioarcb->request_flags1 = 0; ioarcb->cmd_timeout = 0; - ioarcb->ioarcb_bus_addr &= (~0x1FULL); + ioarcb->ioarcb_bus_addr &= cpu_to_le64(~0x1FULL); ioarcb->ioadl_bus_addr = 0; ioarcb->ioadl_length = 0; ioarcb->data_transfer_length = 0; @@ -345,7 +345,7 @@ static void pmcraid_init_cmdblk(struct pmcraid_cmd *cmd, int index) cmd->scsi_cmd = NULL; cmd->release = 0; cmd->completion_req = 0; - cmd->sense_buffer = 0; + cmd->sense_buffer = NULL; cmd->sense_buffer_dma = 0; cmd->dma_handle = 0; init_timer(&cmd->timer); @@ -898,8 +898,7 @@ static void _pmcraid_fire_command(struct pmcraid_cmd *cmd) /* driver writes lower 32-bit value of IOARCB address only */ mb(); - iowrite32(le32_to_cpu(cmd->ioa_cb->ioarcb.ioarcb_bus_addr), - pinstance->ioarrin); + iowrite32(le64_to_cpu(cmd->ioa_cb->ioarcb.ioarcb_bus_addr), pinstance->ioarrin); } /** @@ -1051,7 +1050,7 @@ static void pmcraid_get_fwversion(struct pmcraid_cmd *cmd) offsetof(struct pmcraid_ioarcb, add_data.u.ioadl[0])); ioarcb->ioadl_length = cpu_to_le32(sizeof(struct pmcraid_ioadl_desc)); - ioarcb->ioarcb_bus_addr &= ~(0x1FULL); + ioarcb->ioarcb_bus_addr &= cpu_to_le64(~(0x1FULL)); ioarcb->request_flags0 |= NO_LINK_DESCS; ioarcb->data_transfer_length = cpu_to_le32(data_size); @@ -1077,7 +1076,7 @@ static void pmcraid_identify_hrrq(struct pmcraid_cmd *cmd) struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb; int index = cmd->hrrq_index; __be64 hrrq_addr = cpu_to_be64(pinstance->hrrq_start_bus_addr[index]); - u32 hrrq_size = cpu_to_be32(sizeof(u32) * PMCRAID_MAX_CMD); + __be32 hrrq_size = cpu_to_be32(sizeof(u32) * PMCRAID_MAX_CMD); void (*done_function)(struct pmcraid_cmd *); pmcraid_reinit_cmdblk(cmd); @@ -1202,7 +1201,7 @@ static struct pmcraid_cmd *pmcraid_init_hcam ioadl[0].flags |= IOADL_FLAGS_READ_LAST; ioadl[0].data_len = cpu_to_le32(rcb_size); - ioadl[0].address = cpu_to_le32(dma); + ioadl[0].address = cpu_to_le64(dma); cmd->cmd_done = cmd_done; return cmd; @@ -1237,7 +1236,13 @@ static void pmcraid_prepare_cancel_cmd( ) { struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb; - __be64 ioarcb_addr = cmd_to_cancel->ioa_cb->ioarcb.ioarcb_bus_addr; + __be64 ioarcb_addr; + + /* IOARCB address of the command to be cancelled is given in + * cdb[2]..cdb[9] is Big-Endian format. Note that length bits in + * IOARCB address are not masked. + */ + ioarcb_addr = cpu_to_be64(le64_to_cpu(cmd_to_cancel->ioa_cb->ioarcb.ioarcb_bus_addr)); /* Get the resource handle to where the command to be aborted has been * sent. @@ -1247,11 +1252,6 @@ static void pmcraid_prepare_cancel_cmd( memset(ioarcb->cdb, 0, PMCRAID_MAX_CDB_LEN); ioarcb->cdb[0] = PMCRAID_ABORT_CMD; - /* IOARCB address of the command to be cancelled is given in - * cdb[2]..cdb[9] is Big-Endian format. Note that length bits in - * IOARCB address are not masked. - */ - ioarcb_addr = cpu_to_be64(ioarcb_addr); memcpy(&(ioarcb->cdb[2]), &ioarcb_addr, sizeof(ioarcb_addr)); } @@ -1493,7 +1493,7 @@ static int pmcraid_notify_ccn(struct pmcraid_instance *pinstance) { return pmcraid_notify_aen(pinstance, pinstance->ccn.msg, - pinstance->ccn.hcam->data_len + + le32_to_cpu(pinstance->ccn.hcam->data_len) + sizeof(struct pmcraid_hcam_hdr)); } @@ -1508,7 +1508,7 @@ static int pmcraid_notify_ldn(struct pmcraid_instance *pinstance) { return pmcraid_notify_aen(pinstance, pinstance->ldn.msg, - pinstance->ldn.hcam->data_len + + le32_to_cpu(pinstance->ldn.hcam->data_len) + sizeof(struct pmcraid_hcam_hdr)); } @@ -1556,10 +1556,10 @@ static void pmcraid_handle_config_change(struct pmcraid_instance *pinstance) pmcraid_info("CCN(%x): %x timestamp: %llx type: %x lost: %x flags: %x \ res: %x:%x:%x:%x\n", - pinstance->ccn.hcam->ilid, + le32_to_cpu(pinstance->ccn.hcam->ilid), pinstance->ccn.hcam->op_code, - ((pinstance->ccn.hcam->timestamp1) | - ((pinstance->ccn.hcam->timestamp2 & 0xffffffffLL) << 32)), + (le32_to_cpu(pinstance->ccn.hcam->timestamp1) | + ((le32_to_cpu(pinstance->ccn.hcam->timestamp2) & 0xffffffffLL) << 32)), pinstance->ccn.hcam->notification_type, pinstance->ccn.hcam->notification_lost, pinstance->ccn.hcam->flags, @@ -1570,7 +1570,7 @@ static void pmcraid_handle_config_change(struct pmcraid_instance *pinstance) RES_IS_VSET(*cfg_entry) ? (fw_version <= PMCRAID_FW_VERSION_1 ? cfg_entry->unique_flags1 : - cfg_entry->array_id & 0xFF) : + le16_to_cpu(cfg_entry->array_id) & 0xFF) : RES_TARGET(cfg_entry->resource_address), RES_LUN(cfg_entry->resource_address)); @@ -1658,7 +1658,7 @@ static void pmcraid_handle_config_change(struct pmcraid_instance *pinstance) if (fw_version <= PMCRAID_FW_VERSION_1) res->cfg_entry.unique_flags1 &= 0x7F; else - res->cfg_entry.array_id &= 0xFF; + res->cfg_entry.array_id &= cpu_to_le16(0xFF); res->change_detected = RES_CHANGE_DEL; res->cfg_entry.resource_handle = PMCRAID_INVALID_RES_HANDLE; @@ -1716,8 +1716,8 @@ static void pmcraid_ioasc_logger(u32 ioasc, struct pmcraid_cmd *cmd) /* log the error string */ pmcraid_err("cmd [%x] for resource %x failed with %x(%s)\n", cmd->ioa_cb->ioarcb.cdb[0], - cmd->ioa_cb->ioarcb.resource_handle, - le32_to_cpu(ioasc), error_info->error_string); + le32_to_cpu(cmd->ioa_cb->ioarcb.resource_handle), + ioasc, error_info->error_string); } /** @@ -2034,7 +2034,7 @@ static void pmcraid_fail_outstanding_cmds(struct pmcraid_instance *pinstance) cmd->ioa_cb->ioasa.ioasc = cpu_to_le32(PMCRAID_IOASC_IOA_WAS_RESET); cmd->ioa_cb->ioasa.ilid = - cpu_to_be32(PMCRAID_DRIVER_ILID); + cpu_to_le32(PMCRAID_DRIVER_ILID); /* In case the command timer is still running */ del_timer(&cmd->timer); @@ -2373,46 +2373,43 @@ static int pmcraid_reset_reload( spin_lock_irqsave(pinstance->host->host_lock, lock_flags); if (pinstance->ioa_state == IOA_STATE_DEAD) { - spin_unlock_irqrestore(pinstance->host->host_lock, - lock_flags); pmcraid_info("reset_reload: IOA is dead\n"); - return reset; - } else if (pinstance->ioa_state == target_state) { + goto out_unlock; + } + + if (pinstance->ioa_state == target_state) { reset = 0; + goto out_unlock; } } - if (reset) { - pmcraid_info("reset_reload: proceeding with reset\n"); - scsi_block_requests(pinstance->host); - reset_cmd = pmcraid_get_free_cmd(pinstance); - - if (reset_cmd == NULL) { - pmcraid_err("no free cmnd for reset_reload\n"); - spin_unlock_irqrestore(pinstance->host->host_lock, - lock_flags); - return reset; - } + pmcraid_info("reset_reload: proceeding with reset\n"); + scsi_block_requests(pinstance->host); + reset_cmd = pmcraid_get_free_cmd(pinstance); + if (reset_cmd == NULL) { + pmcraid_err("no free cmnd for reset_reload\n"); + goto out_unlock; + } - if (shutdown_type == SHUTDOWN_NORMAL) - pinstance->ioa_bringdown = 1; + if (shutdown_type == SHUTDOWN_NORMAL) + pinstance->ioa_bringdown = 1; - pinstance->ioa_shutdown_type = shutdown_type; - pinstance->reset_cmd = reset_cmd; - pinstance->force_ioa_reset = reset; - pmcraid_info("reset_reload: initiating reset\n"); - pmcraid_ioa_reset(reset_cmd); - spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags); - pmcraid_info("reset_reload: waiting for reset to complete\n"); - wait_event(pinstance->reset_wait_q, - !pinstance->ioa_reset_in_progress); + pinstance->ioa_shutdown_type = shutdown_type; + pinstance->reset_cmd = reset_cmd; + pinstance->force_ioa_reset = reset; + pmcraid_info("reset_reload: initiating reset\n"); + pmcraid_ioa_reset(reset_cmd); + spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags); + pmcraid_info("reset_reload: waiting for reset to complete\n"); + wait_event(pinstance->reset_wait_q, + !pinstance->ioa_reset_in_progress); - pmcraid_info("reset_reload: reset is complete !!\n"); - scsi_unblock_requests(pinstance->host); - if (pinstance->ioa_state == target_state) - reset = 0; - } + pmcraid_info("reset_reload: reset is complete !!\n"); + scsi_unblock_requests(pinstance->host); + return pinstance->ioa_state != target_state; +out_unlock: + spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags); return reset; } @@ -2529,7 +2526,7 @@ static void pmcraid_cancel_all(struct pmcraid_cmd *cmd, u32 sense) ioarcb->ioadl_bus_addr = 0; ioarcb->ioadl_length = 0; ioarcb->data_transfer_length = 0; - ioarcb->ioarcb_bus_addr &= (~0x1FULL); + ioarcb->ioarcb_bus_addr &= cpu_to_le64((~0x1FULL)); /* writing to IOARRIN must be protected by host_lock, as mid-layer * schedule queuecommand while we are doing this @@ -2692,8 +2689,8 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd) * mid-layer */ if (ioasa->auto_sense_length != 0) { - short sense_len = ioasa->auto_sense_length; - int data_size = min_t(u16, le16_to_cpu(sense_len), + short sense_len = le16_to_cpu(ioasa->auto_sense_length); + int data_size = min_t(u16, sense_len, SCSI_SENSE_BUFFERSIZE); memcpy(scsi_cmd->sense_buffer, @@ -2915,7 +2912,7 @@ static struct pmcraid_cmd *pmcraid_abort_cmd(struct pmcraid_cmd *cmd) pmcraid_info("aborting command CDB[0]= %x with index = %d\n", cmd->ioa_cb->ioarcb.cdb[0], - cmd->ioa_cb->ioarcb.response_handle >> 2); + le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle) >> 2); init_completion(&cancel_cmd->wait_for_completion); cancel_cmd->completion_req = 1; @@ -3140,9 +3137,8 @@ pmcraid_init_ioadls(struct pmcraid_cmd *cmd, int sgcount) int ioadl_count = 0; if (ioarcb->add_cmd_param_length) - ioadl_count = DIV_ROUND_UP(ioarcb->add_cmd_param_length, 16); - ioarcb->ioadl_length = - sizeof(struct pmcraid_ioadl_desc) * sgcount; + ioadl_count = DIV_ROUND_UP(le16_to_cpu(ioarcb->add_cmd_param_length), 16); + ioarcb->ioadl_length = cpu_to_le32(sizeof(struct pmcraid_ioadl_desc) * sgcount); if ((sgcount + ioadl_count) > (ARRAY_SIZE(ioarcb->add_data.u.ioadl))) { /* external ioadls start at offset 0x80 from control_block @@ -3150,7 +3146,7 @@ pmcraid_init_ioadls(struct pmcraid_cmd *cmd, int sgcount) * It is necessary to indicate to firmware that driver is * using ioadls to be treated as external to IOARCB. */ - ioarcb->ioarcb_bus_addr &= ~(0x1FULL); + ioarcb->ioarcb_bus_addr &= cpu_to_le64(~(0x1FULL)); ioarcb->ioadl_bus_addr = cpu_to_le64((cmd->ioa_cb_bus_addr) + offsetof(struct pmcraid_ioarcb, @@ -3164,7 +3160,7 @@ pmcraid_init_ioadls(struct pmcraid_cmd *cmd, int sgcount) ioadl = &ioarcb->add_data.u.ioadl[ioadl_count]; ioarcb->ioarcb_bus_addr |= - DIV_ROUND_CLOSEST(sgcount + ioadl_count, 8); + cpu_to_le64(DIV_ROUND_CLOSEST(sgcount + ioadl_count, 8)); } return ioadl; @@ -3325,7 +3321,7 @@ static struct pmcraid_sglist *pmcraid_alloc_sglist(int buflen) */ static int pmcraid_copy_sglist( struct pmcraid_sglist *sglist, - unsigned long buffer, + void __user *buffer, u32 len, int direction ) @@ -3346,11 +3342,9 @@ static int pmcraid_copy_sglist( kaddr = kmap(page); if (direction == DMA_TO_DEVICE) - rc = __copy_from_user(kaddr, - (void *)buffer, - bsize_elem); + rc = copy_from_user(kaddr, buffer, bsize_elem); else - rc = __copy_to_user((void *)buffer, kaddr, bsize_elem); + rc = copy_to_user(buffer, kaddr, bsize_elem); kunmap(page); @@ -3368,13 +3362,9 @@ static int pmcraid_copy_sglist( kaddr = kmap(page); if (direction == DMA_TO_DEVICE) - rc = __copy_from_user(kaddr, - (void *)buffer, - len % bsize_elem); + rc = copy_from_user(kaddr, buffer, len % bsize_elem); else - rc = __copy_to_user((void *)buffer, - kaddr, - len % bsize_elem); + rc = copy_to_user(buffer, kaddr, len % bsize_elem); kunmap(page); @@ -3496,7 +3486,7 @@ static int pmcraid_queuecommand_lck( RES_IS_VSET(res->cfg_entry) ? (fw_version <= PMCRAID_FW_VERSION_1 ? res->cfg_entry.unique_flags1 : - res->cfg_entry.array_id & 0xFF) : + le16_to_cpu(res->cfg_entry.array_id) & 0xFF) : RES_TARGET(res->cfg_entry.resource_address), RES_LUN(res->cfg_entry.resource_address)); @@ -3652,17 +3642,17 @@ static long pmcraid_ioctl_passthrough( struct pmcraid_instance *pinstance, unsigned int ioctl_cmd, unsigned int buflen, - unsigned long arg + void __user *arg ) { struct pmcraid_passthrough_ioctl_buffer *buffer; struct pmcraid_ioarcb *ioarcb; struct pmcraid_cmd *cmd; struct pmcraid_cmd *cancel_cmd; - unsigned long request_buffer; + void __user *request_buffer; unsigned long request_offset; unsigned long lock_flags; - void *ioasa; + void __user *ioasa; u32 ioasc; int request_size; int buffer_size; @@ -3701,13 +3691,10 @@ static long pmcraid_ioctl_passthrough( request_buffer = arg + request_offset; - rc = __copy_from_user(buffer, - (struct pmcraid_passthrough_ioctl_buffer *) arg, + rc = copy_from_user(buffer, arg, sizeof(struct pmcraid_passthrough_ioctl_buffer)); - ioasa = - (void *)(arg + - offsetof(struct pmcraid_passthrough_ioctl_buffer, ioasa)); + ioasa = arg + offsetof(struct pmcraid_passthrough_ioctl_buffer, ioasa); if (rc) { pmcraid_err("ioctl: can't copy passthrough buffer\n"); @@ -3715,7 +3702,7 @@ static long pmcraid_ioctl_passthrough( goto out_free_buffer; } - request_size = buffer->ioarcb.data_transfer_length; + request_size = le32_to_cpu(buffer->ioarcb.data_transfer_length); if (buffer->ioarcb.request_flags0 & TRANSFER_DIR_WRITE) { access = VERIFY_READ; @@ -3725,20 +3712,14 @@ static long pmcraid_ioctl_passthrough( direction = DMA_FROM_DEVICE; } - if (request_size > 0) { - rc = access_ok(access, arg, request_offset + request_size); - - if (!rc) { - rc = -EFAULT; - goto out_free_buffer; - } - } else if (request_size < 0) { + if (request_size < 0) { rc = -EINVAL; goto out_free_buffer; } /* check if we have any additional command parameters */ - if (buffer->ioarcb.add_cmd_param_length > PMCRAID_ADD_CMD_PARAM_LEN) { + if (le16_to_cpu(buffer->ioarcb.add_cmd_param_length) + > PMCRAID_ADD_CMD_PARAM_LEN) { rc = -EINVAL; goto out_free_buffer; } @@ -3770,7 +3751,7 @@ static long pmcraid_ioctl_passthrough( buffer->ioarcb.add_cmd_param_offset; memcpy(ioarcb->add_data.u.add_cmd_params, buffer->ioarcb.add_data.u.add_cmd_params, - buffer->ioarcb.add_cmd_param_length); + le16_to_cpu(buffer->ioarcb.add_cmd_param_length)); } /* set hrrq number where the IOA should respond to. Note that all cmds @@ -3840,10 +3821,10 @@ static long pmcraid_ioctl_passthrough( wait_for_completion(&cmd->wait_for_completion); } else if (!wait_for_completion_timeout( &cmd->wait_for_completion, - msecs_to_jiffies(buffer->ioarcb.cmd_timeout * 1000))) { + msecs_to_jiffies(le16_to_cpu(buffer->ioarcb.cmd_timeout) * 1000))) { pmcraid_info("aborting cmd %d (CDB[0] = %x) due to timeout\n", - le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle >> 2), + le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle) >> 2, cmd->ioa_cb->ioarcb.cdb[0]); spin_lock_irqsave(pinstance->host->host_lock, lock_flags); @@ -3852,7 +3833,7 @@ static long pmcraid_ioctl_passthrough( if (cancel_cmd) { wait_for_completion(&cancel_cmd->wait_for_completion); - ioasc = cancel_cmd->ioa_cb->ioasa.ioasc; + ioasc = le32_to_cpu(cancel_cmd->ioa_cb->ioasa.ioasc); pmcraid_return_cmd(cancel_cmd); /* if abort task couldn't find the command i.e it got @@ -3941,11 +3922,6 @@ static long pmcraid_ioctl_driver( { int rc = -ENOSYS; - if (!access_ok(VERIFY_READ, user_buffer, _IOC_SIZE(cmd))) { - pmcraid_err("ioctl_driver: access fault in request buffer\n"); - return -EFAULT; - } - switch (cmd) { case PMCRAID_IOCTL_RESET_ADAPTER: pmcraid_reset_bringup(pinstance); @@ -3977,8 +3953,7 @@ static int pmcraid_check_ioctl_buffer( struct pmcraid_ioctl_header *hdr ) { - int rc = 0; - int access = VERIFY_READ; + int rc; if (copy_from_user(hdr, arg, sizeof(struct pmcraid_ioctl_header))) { pmcraid_err("couldn't copy ioctl header from user buffer\n"); @@ -3994,19 +3969,6 @@ static int pmcraid_check_ioctl_buffer( return -EINVAL; } - /* check for appropriate buffer access */ - if ((_IOC_DIR(cmd) & _IOC_READ) == _IOC_READ) - access = VERIFY_WRITE; - - rc = access_ok(access, - (arg + sizeof(struct pmcraid_ioctl_header)), - hdr->buffer_length); - if (!rc) { - pmcraid_err("access failed for user buffer of size %d\n", - hdr->buffer_length); - return -EFAULT; - } - return 0; } @@ -4021,6 +3983,7 @@ static long pmcraid_chr_ioctl( { struct pmcraid_instance *pinstance = NULL; struct pmcraid_ioctl_header *hdr = NULL; + void __user *argp = (void __user *)arg; int retval = -ENOTTY; hdr = kmalloc(sizeof(struct pmcraid_ioctl_header), GFP_KERNEL); @@ -4030,7 +3993,7 @@ static long pmcraid_chr_ioctl( return -ENOMEM; } - retval = pmcraid_check_ioctl_buffer(cmd, (void *)arg, hdr); + retval = pmcraid_check_ioctl_buffer(cmd, argp, hdr); if (retval) { pmcraid_info("chr_ioctl: header check failed\n"); @@ -4055,10 +4018,8 @@ static long pmcraid_chr_ioctl( if (cmd == PMCRAID_IOCTL_DOWNLOAD_MICROCODE) scsi_block_requests(pinstance->host); - retval = pmcraid_ioctl_passthrough(pinstance, - cmd, - hdr->buffer_length, - arg); + retval = pmcraid_ioctl_passthrough(pinstance, cmd, + hdr->buffer_length, argp); if (cmd == PMCRAID_IOCTL_DOWNLOAD_MICROCODE) scsi_unblock_requests(pinstance->host); @@ -4066,10 +4027,8 @@ static long pmcraid_chr_ioctl( case PMCRAID_DRIVER_IOCTL: arg += sizeof(struct pmcraid_ioctl_header); - retval = pmcraid_ioctl_driver(pinstance, - cmd, - hdr->buffer_length, - (void __user *)arg); + retval = pmcraid_ioctl_driver(pinstance, cmd, + hdr->buffer_length, argp); break; default: @@ -4470,7 +4429,7 @@ static void pmcraid_worker_function(struct work_struct *workp) if (fw_version <= PMCRAID_FW_VERSION_1) target = res->cfg_entry.unique_flags1; else - target = res->cfg_entry.array_id & 0xFF; + target = le16_to_cpu(res->cfg_entry.array_id) & 0xFF; lun = PMCRAID_VSET_LUN_ID; } else { bus = PMCRAID_PHYS_BUS_ID; @@ -4509,7 +4468,7 @@ static void pmcraid_tasklet_function(unsigned long instance) unsigned long host_lock_flags; spinlock_t *lockp; /* hrrq buffer lock */ int id; - __le32 resp; + u32 resp; hrrq_vector = (struct pmcraid_isr_param *)instance; pinstance = hrrq_vector->drv_inst; @@ -4833,7 +4792,7 @@ static int pmcraid_allocate_host_rrqs(struct pmcraid_instance *pinstance) buffer_size, &(pinstance->hrrq_start_bus_addr[i])); - if (pinstance->hrrq_start[i] == 0) { + if (!pinstance->hrrq_start[i]) { pmcraid_err("pci_alloc failed for hrrq vector : %d\n", i); pmcraid_release_host_rrqs(pinstance, i); @@ -5549,8 +5508,7 @@ static void pmcraid_set_timestamp(struct pmcraid_cmd *cmd) struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb; __be32 time_stamp_len = cpu_to_be32(PMCRAID_TIMESTAMP_LEN); struct pmcraid_ioadl_desc *ioadl = ioarcb->add_data.u.ioadl; - - __le64 timestamp; + u64 timestamp; timestamp = ktime_get_real_seconds() * 1000; @@ -5572,7 +5530,7 @@ static void pmcraid_set_timestamp(struct pmcraid_cmd *cmd) offsetof(struct pmcraid_ioarcb, add_data.u.ioadl[0])); ioarcb->ioadl_length = cpu_to_le32(sizeof(struct pmcraid_ioadl_desc)); - ioarcb->ioarcb_bus_addr &= ~(0x1FULL); + ioarcb->ioarcb_bus_addr &= cpu_to_le64(~(0x1FULL)); ioarcb->request_flags0 |= NO_LINK_DESCS; ioarcb->request_flags0 |= TRANSFER_DIR_WRITE; @@ -5631,7 +5589,7 @@ static void pmcraid_init_res_table(struct pmcraid_cmd *cmd) list_for_each_entry_safe(res, temp, &pinstance->used_res_q, queue) list_move_tail(&res->queue, &old_res); - for (i = 0; i < pinstance->cfg_table->num_entries; i++) { + for (i = 0; i < le16_to_cpu(pinstance->cfg_table->num_entries); i++) { if (be16_to_cpu(pinstance->inq_data->fw_version) <= PMCRAID_FW_VERSION_1) cfgte = &pinstance->cfg_table->entries[i]; @@ -5686,7 +5644,7 @@ static void pmcraid_init_res_table(struct pmcraid_cmd *cmd) res->cfg_entry.resource_type, (fw_version <= PMCRAID_FW_VERSION_1 ? res->cfg_entry.unique_flags1 : - res->cfg_entry.array_id & 0xFF), + le16_to_cpu(res->cfg_entry.array_id) & 0xFF), le32_to_cpu(res->cfg_entry.resource_address)); } } @@ -5724,7 +5682,7 @@ static void pmcraid_querycfg(struct pmcraid_cmd *cmd) struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb; struct pmcraid_ioadl_desc *ioadl = ioarcb->add_data.u.ioadl; struct pmcraid_instance *pinstance = cmd->drv_inst; - int cfg_table_size = cpu_to_be32(sizeof(struct pmcraid_config_table)); + __be32 cfg_table_size = cpu_to_be32(sizeof(struct pmcraid_config_table)); if (be16_to_cpu(pinstance->inq_data->fw_version) <= PMCRAID_FW_VERSION_1) @@ -5749,7 +5707,7 @@ static void pmcraid_querycfg(struct pmcraid_cmd *cmd) offsetof(struct pmcraid_ioarcb, add_data.u.ioadl[0])); ioarcb->ioadl_length = cpu_to_le32(sizeof(struct pmcraid_ioadl_desc)); - ioarcb->ioarcb_bus_addr &= ~(0x1FULL); + ioarcb->ioarcb_bus_addr &= cpu_to_le64(~0x1FULL); ioarcb->request_flags0 |= NO_LINK_DESCS; ioarcb->data_transfer_length = diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h index 568b18a2f47d..01eb2bc16dc1 100644 --- a/drivers/scsi/pmcraid.h +++ b/drivers/scsi/pmcraid.h @@ -554,7 +554,7 @@ struct pmcraid_inquiry_data { __u8 add_page_len; __u8 length; __u8 reserved2; - __le16 fw_version; + __be16 fw_version; __u8 reserved3[16]; }; @@ -697,13 +697,13 @@ struct pmcraid_instance { dma_addr_t hrrq_start_bus_addr[PMCRAID_NUM_MSIX_VECTORS]; /* Pointer to 1st entry of HRRQ */ - __be32 *hrrq_start[PMCRAID_NUM_MSIX_VECTORS]; + __le32 *hrrq_start[PMCRAID_NUM_MSIX_VECTORS]; /* Pointer to last entry of HRRQ */ - __be32 *hrrq_end[PMCRAID_NUM_MSIX_VECTORS]; + __le32 *hrrq_end[PMCRAID_NUM_MSIX_VECTORS]; /* Pointer to current pointer of hrrq */ - __be32 *hrrq_curr[PMCRAID_NUM_MSIX_VECTORS]; + __le32 *hrrq_curr[PMCRAID_NUM_MSIX_VECTORS]; /* Lock for HRRQ access */ spinlock_t hrrq_lock[PMCRAID_NUM_MSIX_VECTORS]; diff --git a/drivers/scsi/qedf/qedf_debugfs.c b/drivers/scsi/qedf/qedf_debugfs.c index cb08b625c594..00a1d6405ebe 100644 --- a/drivers/scsi/qedf/qedf_debugfs.c +++ b/drivers/scsi/qedf/qedf_debugfs.c @@ -449,7 +449,7 @@ const struct file_operations qedf_dbg_fops[] = { qedf_dbg_fileops(qedf, clear_stats), qedf_dbg_fileops_seq(qedf, offload_stats), /* This must be last */ - { NULL, NULL }, + { }, }; #else /* CONFIG_DEBUG_FS */ diff --git a/drivers/scsi/qedi/qedi_debugfs.c b/drivers/scsi/qedi/qedi_debugfs.c index 59417199bf36..39d77818a677 100644 --- a/drivers/scsi/qedi/qedi_debugfs.c +++ b/drivers/scsi/qedi/qedi_debugfs.c @@ -240,5 +240,5 @@ const struct file_operations qedi_dbg_fops[] = { qedi_dbg_fileops_seq(qedi, gbl_ctx), qedi_dbg_fileops(qedi, do_not_recover), qedi_dbg_fileops_seq(qedi, io_trace), - { NULL, NULL }, + { }, }; diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c index d1de172bebac..3548d46f9b27 100644 --- a/drivers/scsi/qedi/qedi_iscsi.c +++ b/drivers/scsi/qedi/qedi_iscsi.c @@ -1370,7 +1370,7 @@ static void qedi_cleanup_task(struct iscsi_task *task) { if (!task->sc || task->state == ISCSI_TASK_PENDING) { QEDI_INFO(NULL, QEDI_LOG_IO, "Returning ref_cnt=%d\n", - atomic_read(&task->refcount)); + refcount_read(&task->refcount)); return; } diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 435ff7fd6384..7c8d6c54ab70 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -695,7 +695,7 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj, case 0x2025e: if (!IS_P3P_TYPE(ha) || vha != base_vha) { ql_log(ql_log_info, vha, 0x7071, - "FCoE ctx reset no supported.\n"); + "FCoE ctx reset not supported.\n"); return -EPERM; } diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c index b6e40fd4c3c1..16d1cd50feed 100644 --- a/drivers/scsi/qla2xxx/qla_bsg.c +++ b/drivers/scsi/qla2xxx/qla_bsg.c @@ -1822,7 +1822,7 @@ qla24xx_process_bidir_cmd(struct bsg_job *bsg_job) /* Check if operating mode is P2P */ if (ha->operating_mode != P2P) { ql_log(ql_log_warn, vha, 0x70a4, - "Host is operating mode is not P2p\n"); + "Host operating mode is not P2p\n"); rval = EXT_STATUS_INVALID_CFG; goto done; } diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c index ab0f873fd6a1..9bc9aa9e164a 100644 --- a/drivers/scsi/qla2xxx/qla_gs.c +++ b/drivers/scsi/qla2xxx/qla_gs.c @@ -144,7 +144,7 @@ qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt, if (ct_rsp->header.response != cpu_to_be16(CT_ACCEPT_RESPONSE)) { ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2077, - "%s failed rejected request on port_id: %02x%02x%02x Compeltion status 0x%x, response 0x%x\n", + "%s failed rejected request on port_id: %02x%02x%02x Completion status 0x%x, response 0x%x\n", routine, vha->d_id.b.domain, vha->d_id.b.area, vha->d_id.b.al_pa, comp_status, ct_rsp->header.response); diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index f9d2fe7b1ade..034743309ada 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -2289,7 +2289,7 @@ qla2x00_chip_diag(scsi_qla_host_t *vha) goto chip_diag_failed; /* Check product ID of chip */ - ql_dbg(ql_dbg_init, vha, 0x007d, "Checking product Id of chip.\n"); + ql_dbg(ql_dbg_init, vha, 0x007d, "Checking product ID of chip.\n"); mb[1] = RD_MAILBOX_REG(ha, reg, 1); mb[2] = RD_MAILBOX_REG(ha, reg, 2); diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 3203367a4f42..aac03504d9a3 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -2100,14 +2100,14 @@ qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt, case CS_DATA_OVERRUN: ql_dbg(ql_dbg_user, vha, 0x70b1, - "Command completed with date overrun thread_id=%d\n", + "Command completed with data overrun thread_id=%d\n", thread_id); rval = EXT_STATUS_DATA_OVERRUN; break; case CS_DATA_UNDERRUN: ql_dbg(ql_dbg_user, vha, 0x70b2, - "Command completed with date underrun thread_id=%d\n", + "Command completed with data underrun thread_id=%d\n", thread_id); rval = EXT_STATUS_DATA_UNDERRUN; break; @@ -2134,7 +2134,7 @@ qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt, case CS_BIDIR_RD_UNDERRUN: ql_dbg(ql_dbg_user, vha, 0x70b6, - "Command completed with read data data underrun " + "Command completed with read data underrun " "thread_id=%d\n", thread_id); rval = EXT_STATUS_DATA_UNDERRUN; break; diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 83d61d2142e9..1c7957903283 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -423,7 +423,6 @@ static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req) kfree(req->outstanding_cmds); kfree(req); - req = NULL; } static void qla2x00_free_rsp_que(struct qla_hw_data *ha, struct rsp_que *rsp) @@ -439,7 +438,6 @@ static void qla2x00_free_rsp_que(struct qla_hw_data *ha, struct rsp_que *rsp) rsp->ring, rsp->dma); } kfree(rsp); - rsp = NULL; } static void qla2x00_free_queues(struct qla_hw_data *ha) @@ -653,7 +651,6 @@ qla2x00_sp_free_dma(void *ptr) ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt; ha->gbl_dsd_avail += ctx1->dsd_use_cnt; mempool_free(ctx1, ha->ctx_mempool); - ctx1 = NULL; } CMD_SP(cmd) = NULL; @@ -3256,7 +3253,6 @@ iospace_config_failed: } pci_release_selected_regions(ha->pdev, ha->bars); kfree(ha); - ha = NULL; probe_out: pci_disable_device(pdev); @@ -3504,7 +3500,6 @@ qla2x00_remove_one(struct pci_dev *pdev) pci_release_selected_regions(ha->pdev, ha->bars); kfree(ha); - ha = NULL; pci_disable_pcie_error_reporting(pdev); @@ -3568,7 +3563,6 @@ void qla2x00_free_fcports(struct scsi_qla_host *vha) list_del(&fcport->list); qla2x00_clear_loop_id(fcport); kfree(fcport); - fcport = NULL; } } diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c index 4180d6d9fe78..5d6d158bbfd6 100644 --- a/drivers/scsi/qla4xxx/ql4_init.c +++ b/drivers/scsi/qla4xxx/ql4_init.c @@ -389,7 +389,7 @@ void qla4xxx_alloc_fw_dump(struct scsi_qla_host *ha) goto alloc_cleanup; DEBUG2(ql4_printk(KERN_INFO, ha, - "Minidump Tempalate Size = 0x%x KB\n", + "Minidump Template Size = 0x%x KB\n", ha->fw_dump_tmplt_size)); DEBUG2(ql4_printk(KERN_INFO, ha, "Total Minidump size = 0x%x KB\n", ha->fw_dump_size)); diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c index ac52150d1569..64c6fa563fdb 100644 --- a/drivers/scsi/qla4xxx/ql4_os.c +++ b/drivers/scsi/qla4xxx/ql4_os.c @@ -8664,7 +8664,6 @@ static int qla4xxx_probe_adapter(struct pci_dev *pdev, init_completion(&ha->disable_acb_comp); init_completion(&ha->idc_comp); init_completion(&ha->link_up_comp); - init_completion(&ha->disable_acb_comp); spin_lock_init(&ha->hardware_lock); spin_lock_init(&ha->work_lock); diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index 2db412dd4b44..ecc07dab893d 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c @@ -46,6 +46,8 @@ #include <trace/events/scsi.h> +#include <asm/unaligned.h> + static void scsi_eh_done(struct scsi_cmnd *scmd); /* @@ -162,13 +164,7 @@ scmd_eh_abort_handler(struct work_struct *work) } } - if (!scsi_eh_scmd_add(scmd, 0)) { - SCSI_LOG_ERROR_RECOVERY(3, - scmd_printk(KERN_WARNING, scmd, - "terminate aborted command\n")); - set_host_byte(scmd, DID_TIME_OUT); - scsi_finish_command(scmd); - } + scsi_eh_scmd_add(scmd); } /** @@ -188,7 +184,6 @@ scsi_abort_command(struct scsi_cmnd *scmd) /* * Retry after abort failed, escalate to next level. */ - scmd->eh_eflags &= ~SCSI_EH_ABORT_SCHEDULED; SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd, "previous abort failed\n")); @@ -196,19 +191,7 @@ scsi_abort_command(struct scsi_cmnd *scmd) return FAILED; } - /* - * Do not try a command abort if - * SCSI EH has already started. - */ spin_lock_irqsave(shost->host_lock, flags); - if (scsi_host_in_recovery(shost)) { - spin_unlock_irqrestore(shost->host_lock, flags); - SCSI_LOG_ERROR_RECOVERY(3, - scmd_printk(KERN_INFO, scmd, - "not aborting, host in recovery\n")); - return FAILED; - } - if (shost->eh_deadline != -1 && !shost->last_reset) shost->last_reset = jiffies; spin_unlock_irqrestore(shost->host_lock, flags); @@ -221,40 +204,47 @@ scsi_abort_command(struct scsi_cmnd *scmd) } /** - * scsi_eh_scmd_add - add scsi cmd to error handling. + * scsi_eh_reset - call into ->eh_action to reset internal counters * @scmd: scmd to run eh on. - * @eh_flag: optional SCSI_EH flag. * - * Return value: - * 0 on failure. + * The scsi driver might be carrying internal state about the + * devices, so we need to call into the driver to reset the + * internal state once the error handler is started. */ -int scsi_eh_scmd_add(struct scsi_cmnd *scmd, int eh_flag) +static void scsi_eh_reset(struct scsi_cmnd *scmd) +{ + if (!blk_rq_is_passthrough(scmd->request)) { + struct scsi_driver *sdrv = scsi_cmd_to_driver(scmd); + if (sdrv->eh_reset) + sdrv->eh_reset(scmd); + } +} + +/** + * scsi_eh_scmd_add - add scsi cmd to error handling. + * @scmd: scmd to run eh on. + */ +void scsi_eh_scmd_add(struct scsi_cmnd *scmd) { struct Scsi_Host *shost = scmd->device->host; unsigned long flags; - int ret = 0; + int ret; - if (!shost->ehandler) - return 0; + WARN_ON_ONCE(!shost->ehandler); spin_lock_irqsave(shost->host_lock, flags); - if (scsi_host_set_state(shost, SHOST_RECOVERY)) - if (scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY)) - goto out_unlock; - + if (scsi_host_set_state(shost, SHOST_RECOVERY)) { + ret = scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY); + WARN_ON_ONCE(ret); + } if (shost->eh_deadline != -1 && !shost->last_reset) shost->last_reset = jiffies; - ret = 1; - if (scmd->eh_eflags & SCSI_EH_ABORT_SCHEDULED) - eh_flag &= ~SCSI_EH_CANCEL_CMD; - scmd->eh_eflags |= eh_flag; + scsi_eh_reset(scmd); list_add_tail(&scmd->eh_entry, &shost->eh_cmd_q); shost->host_failed++; scsi_eh_wakeup(shost); - out_unlock: spin_unlock_irqrestore(shost->host_lock, flags); - return ret; } /** @@ -283,13 +273,10 @@ enum blk_eh_timer_return scsi_times_out(struct request *req) rtn = host->hostt->eh_timed_out(scmd); if (rtn == BLK_EH_NOT_HANDLED) { - if (!host->hostt->no_async_abort && - scsi_abort_command(scmd) == SUCCESS) - return BLK_EH_NOT_HANDLED; - - set_host_byte(scmd, DID_TIME_OUT); - if (!scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD)) - rtn = BLK_EH_HANDLED; + if (scsi_abort_command(scmd) != SUCCESS) { + set_host_byte(scmd, DID_TIME_OUT); + scsi_eh_scmd_add(scmd); + } } return rtn; @@ -341,7 +328,7 @@ static inline void scsi_eh_prt_fail_stats(struct Scsi_Host *shost, list_for_each_entry(scmd, work_q, eh_entry) { if (scmd->device == sdev) { ++total_failures; - if (scmd->eh_eflags & SCSI_EH_CANCEL_CMD) + if (scmd->eh_eflags & SCSI_EH_ABORT_SCHEDULED) ++cmd_cancel; else ++cmd_failed; @@ -931,6 +918,7 @@ void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses, ses->result = scmd->result; ses->underflow = scmd->underflow; ses->prot_op = scmd->prot_op; + ses->eh_eflags = scmd->eh_eflags; scmd->prot_op = SCSI_PROT_NORMAL; scmd->eh_eflags = 0; @@ -994,6 +982,7 @@ void scsi_eh_restore_cmnd(struct scsi_cmnd* scmd, struct scsi_eh_save *ses) scmd->result = ses->result; scmd->underflow = ses->underflow; scmd->prot_op = ses->prot_op; + scmd->eh_eflags = ses->eh_eflags; } EXPORT_SYMBOL(scsi_eh_restore_cmnd); @@ -1126,7 +1115,6 @@ static int scsi_eh_action(struct scsi_cmnd *scmd, int rtn) */ void scsi_eh_finish_cmd(struct scsi_cmnd *scmd, struct list_head *done_q) { - scmd->eh_eflags = 0; list_move_tail(&scmd->eh_entry, done_q); } EXPORT_SYMBOL(scsi_eh_finish_cmd); @@ -1163,8 +1151,7 @@ int scsi_eh_get_sense(struct list_head *work_q, * should not get sense. */ list_for_each_entry_safe(scmd, next, work_q, eh_entry) { - if ((scmd->eh_eflags & SCSI_EH_CANCEL_CMD) || - (scmd->eh_eflags & SCSI_EH_ABORT_SCHEDULED) || + if ((scmd->eh_eflags & SCSI_EH_ABORT_SCHEDULED) || SCSI_SENSE_VALID(scmd)) continue; @@ -1304,61 +1291,6 @@ static int scsi_eh_test_devices(struct list_head *cmd_list, return list_empty(work_q); } - -/** - * scsi_eh_abort_cmds - abort pending commands. - * @work_q: &list_head for pending commands. - * @done_q: &list_head for processed commands. - * - * Decription: - * Try and see whether or not it makes sense to try and abort the - * running command. This only works out to be the case if we have one - * command that has timed out. If the command simply failed, it makes - * no sense to try and abort the command, since as far as the shost - * adapter is concerned, it isn't running. - */ -static int scsi_eh_abort_cmds(struct list_head *work_q, - struct list_head *done_q) -{ - struct scsi_cmnd *scmd, *next; - LIST_HEAD(check_list); - int rtn; - struct Scsi_Host *shost; - - list_for_each_entry_safe(scmd, next, work_q, eh_entry) { - if (!(scmd->eh_eflags & SCSI_EH_CANCEL_CMD)) - continue; - shost = scmd->device->host; - if (scsi_host_eh_past_deadline(shost)) { - list_splice_init(&check_list, work_q); - SCSI_LOG_ERROR_RECOVERY(3, - scmd_printk(KERN_INFO, scmd, - "%s: skip aborting cmd, past eh deadline\n", - current->comm)); - return list_empty(work_q); - } - SCSI_LOG_ERROR_RECOVERY(3, - scmd_printk(KERN_INFO, scmd, - "%s: aborting cmd\n", current->comm)); - rtn = scsi_try_to_abort_cmd(shost->hostt, scmd); - if (rtn == FAILED) { - SCSI_LOG_ERROR_RECOVERY(3, - scmd_printk(KERN_INFO, scmd, - "%s: aborting cmd failed\n", - current->comm)); - list_splice_init(&check_list, work_q); - return list_empty(work_q); - } - scmd->eh_eflags &= ~SCSI_EH_CANCEL_CMD; - if (rtn == FAST_IO_FAIL) - scsi_eh_finish_cmd(scmd, done_q); - else - list_move_tail(&scmd->eh_entry, &check_list); - } - - return scsi_eh_test_devices(&check_list, work_q, done_q, 0); -} - /** * scsi_eh_try_stu - Send START_UNIT to device. * @scmd: &scsi_cmnd to send START_UNIT @@ -1701,11 +1633,6 @@ static void scsi_eh_offline_sdevs(struct list_head *work_q, sdev_printk(KERN_INFO, scmd->device, "Device offlined - " "not ready after error recovery\n"); scsi_device_set_state(scmd->device, SDEV_OFFLINE); - if (scmd->eh_eflags & SCSI_EH_CANCEL_CMD) { - /* - * FIXME: Handle lost cmds. - */ - } scsi_eh_finish_cmd(scmd, done_q); } return; @@ -2149,8 +2076,7 @@ static void scsi_unjam_host(struct Scsi_Host *shost) SCSI_LOG_ERROR_RECOVERY(1, scsi_eh_prt_fail_stats(shost, &eh_work_q)); if (!scsi_eh_get_sense(&eh_work_q, &eh_done_q)) - if (!scsi_eh_abort_cmds(&eh_work_q, &eh_done_q)) - scsi_eh_ready_devs(shost, &eh_work_q, &eh_done_q); + scsi_eh_ready_devs(shost, &eh_work_q, &eh_done_q); spin_lock_irqsave(shost->host_lock, flags); if (shost->eh_deadline != -1) @@ -2437,44 +2363,34 @@ EXPORT_SYMBOL(scsi_command_normalize_sense); * field will be placed if found. * * Return value: - * 1 if information field found, 0 if not found. + * true if information field found, false if not found. */ -int scsi_get_sense_info_fld(const u8 * sense_buffer, int sb_len, - u64 * info_out) +bool scsi_get_sense_info_fld(const u8 *sense_buffer, int sb_len, + u64 *info_out) { - int j; const u8 * ucp; - u64 ull; if (sb_len < 7) - return 0; + return false; switch (sense_buffer[0] & 0x7f) { case 0x70: case 0x71: if (sense_buffer[0] & 0x80) { - *info_out = (sense_buffer[3] << 24) + - (sense_buffer[4] << 16) + - (sense_buffer[5] << 8) + sense_buffer[6]; - return 1; - } else - return 0; + *info_out = get_unaligned_be32(&sense_buffer[3]); + return true; + } + return false; case 0x72: case 0x73: ucp = scsi_sense_desc_find(sense_buffer, sb_len, 0 /* info desc */); if (ucp && (0xa == ucp[1])) { - ull = 0; - for (j = 0; j < 8; ++j) { - if (j > 0) - ull <<= 8; - ull |= ucp[4 + j]; - } - *info_out = ull; - return 1; - } else - return 0; + *info_out = get_unaligned_be64(&ucp[4]); + return true; + } + return false; default: - return 0; + return false; } } EXPORT_SYMBOL(scsi_get_sense_info_fld); diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 1c3e87d6c48f..a1effcce67b3 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -1593,8 +1593,8 @@ static void scsi_softirq_done(struct request *rq) scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY); break; default: - if (!scsi_eh_scmd_add(cmd, 0)) - scsi_finish_command(cmd); + scsi_eh_scmd_add(cmd); + break; } } diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h index f11bd102d6d5..59ebc1795bb3 100644 --- a/drivers/scsi/scsi_priv.h +++ b/drivers/scsi/scsi_priv.h @@ -18,7 +18,6 @@ struct scsi_nl_hdr; /* * Scsi Error Handler Flags */ -#define SCSI_EH_CANCEL_CMD 0x0001 /* Cancel this cmd */ #define SCSI_EH_ABORT_SCHEDULED 0x0002 /* Abort has been scheduled */ #define SCSI_SENSE_VALID(scmd) \ @@ -72,7 +71,7 @@ extern enum blk_eh_timer_return scsi_times_out(struct request *req); extern int scsi_error_handler(void *host); extern int scsi_decide_disposition(struct scsi_cmnd *cmd); extern void scsi_eh_wakeup(struct Scsi_Host *shost); -extern int scsi_eh_scmd_add(struct scsi_cmnd *, int); +extern void scsi_eh_scmd_add(struct scsi_cmnd *); void scsi_eh_ready_devs(struct Scsi_Host *shost, struct list_head *work_q, struct list_head *done_q); diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c index 2d753c93e07a..d4cf32d55546 100644 --- a/drivers/scsi/scsi_transport_fc.c +++ b/drivers/scsi/scsi_transport_fc.c @@ -289,9 +289,10 @@ static const struct { u32 value; char *name; } fc_port_role_names[] = { - { FC_PORT_ROLE_FCP_TARGET, "FCP Target" }, - { FC_PORT_ROLE_FCP_INITIATOR, "FCP Initiator" }, - { FC_PORT_ROLE_IP_PORT, "IP Port" }, + { FC_PORT_ROLE_FCP_TARGET, "FCP Target" }, + { FC_PORT_ROLE_FCP_INITIATOR, "FCP Initiator" }, + { FC_PORT_ROLE_IP_PORT, "IP Port" }, + { FC_PORT_ROLE_FCP_DUMMY_INITIATOR, "FCP Dummy Initiator" }, }; fc_bitfield_name_search(port_roles, fc_port_role_names) @@ -850,7 +851,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val) char *cp; *val = simple_strtoul(buf, &cp, 0); - if ((*cp && (*cp != '\n')) || (*val < 0)) + if (*cp && (*cp != '\n')) return -EINVAL; /* * Check for overflow; dev_loss_tmo is u32 @@ -2628,7 +2629,8 @@ fc_remote_port_create(struct Scsi_Host *shost, int channel, spin_lock_irqsave(shost->host_lock, flags); rport->number = fc_host->next_rport_number++; - if (rport->roles & FC_PORT_ROLE_FCP_TARGET) + if ((rport->roles & FC_PORT_ROLE_FCP_TARGET) || + (rport->roles & FC_PORT_ROLE_FCP_DUMMY_INITIATOR)) rport->scsi_target_id = fc_host->next_target_id++; else rport->scsi_target_id = -1; diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index 568c9f26a561..a424eaeafeb0 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -2158,7 +2158,6 @@ static int iscsi_iter_destroy_conn_fn(struct device *dev, void *data) void iscsi_remove_session(struct iscsi_cls_session *session) { - struct Scsi_Host *shost = iscsi_session_to_shost(session); unsigned long flags; int err; @@ -2185,7 +2184,7 @@ void iscsi_remove_session(struct iscsi_cls_session *session) scsi_target_unblock(&session->dev, SDEV_TRANSPORT_OFFLINE); /* flush running scans then delete devices */ - scsi_flush_work(shost); + flush_work(&session->scan_work); __iscsi_unbind_session(&session->unbind_work); /* hw iscsi may not have removed all connections from session */ diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c index 9fdbd50c31b4..0ebe2f1bb908 100644 --- a/drivers/scsi/scsi_transport_sas.c +++ b/drivers/scsi/scsi_transport_sas.c @@ -370,12 +370,16 @@ EXPORT_SYMBOL(sas_remove_children); * sas_remove_host - tear down a Scsi_Host's SAS data structures * @shost: Scsi Host that is torn down * - * Removes all SAS PHYs and remote PHYs for a given Scsi_Host. - * Must be called just before scsi_remove_host for SAS HBAs. + * Removes all SAS PHYs and remote PHYs for a given Scsi_Host and remove the + * Scsi_Host as well. + * + * Note: Do not call scsi_remove_host() on the Scsi_Host any more, as it is + * already removed. */ void sas_remove_host(struct Scsi_Host *shost) { sas_remove_children(&shost->shost_gendev); + scsi_remove_host(shost); } EXPORT_SYMBOL(sas_remove_host); diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 0dc95e102e69..f9d1432d7cc5 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -115,6 +115,7 @@ static void sd_rescan(struct device *); static int sd_init_command(struct scsi_cmnd *SCpnt); static void sd_uninit_command(struct scsi_cmnd *SCpnt); static int sd_done(struct scsi_cmnd *); +static void sd_eh_reset(struct scsi_cmnd *); static int sd_eh_action(struct scsi_cmnd *, int); static void sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer); static void scsi_disk_release(struct device *cdev); @@ -573,6 +574,7 @@ static struct scsi_driver sd_template = { .uninit_command = sd_uninit_command, .done = sd_done, .eh_action = sd_eh_action, + .eh_reset = sd_eh_reset, }; /* @@ -888,8 +890,8 @@ out: * sd_setup_write_same_cmnd - write the same data to multiple blocks * @cmd: command to prepare * - * Will issue either WRITE SAME(10) or WRITE SAME(16) depending on - * preference indicated by target device. + * Will set up either WRITE SAME(10) or WRITE SAME(16) depending on + * the preference indicated by the target device. **/ static int sd_setup_write_same_cmnd(struct scsi_cmnd *cmd) { @@ -908,7 +910,7 @@ static int sd_setup_write_same_cmnd(struct scsi_cmnd *cmd) BUG_ON(bio_offset(bio) || bio_iovec(bio).bv_len != sdp->sector_size); if (sd_is_zoned(sdkp)) { - ret = sd_zbc_setup_write_cmnd(cmd); + ret = sd_zbc_write_lock_zone(cmd); if (ret != BLKPREP_OK) return ret; } @@ -980,7 +982,7 @@ static int sd_setup_read_write_cmnd(struct scsi_cmnd *SCpnt) unsigned char protect; if (zoned_write) { - ret = sd_zbc_setup_write_cmnd(SCpnt); + ret = sd_zbc_write_lock_zone(SCpnt); if (ret != BLKPREP_OK) return ret; } @@ -1207,7 +1209,7 @@ static int sd_setup_read_write_cmnd(struct scsi_cmnd *SCpnt) ret = BLKPREP_OK; out: if (zoned_write && ret != BLKPREP_OK) - sd_zbc_cancel_write_cmnd(SCpnt); + sd_zbc_write_unlock_zone(SCpnt); return ret; } @@ -1264,8 +1266,8 @@ static void sd_uninit_command(struct scsi_cmnd *SCpnt) /** * sd_open - open a scsi disk device - * @inode: only i_rdev member may be used - * @filp: only f_mode and f_flags may be used + * @bdev: Block device of the scsi disk to open + * @mode: FMODE_* mask * * Returns 0 if successful. Returns a negated errno value in case * of error. @@ -1341,8 +1343,8 @@ error_out: /** * sd_release - invoked when the (last) close(2) is called on this * scsi disk. - * @inode: only i_rdev member may be used - * @filp: only f_mode and f_flags may be used + * @disk: disk to release + * @mode: FMODE_* mask * * Returns 0. * @@ -1398,8 +1400,8 @@ static int sd_getgeo(struct block_device *bdev, struct hd_geometry *geo) /** * sd_ioctl - process an ioctl - * @inode: only i_rdev/i_bdev members may be used - * @filp: only f_mode and f_flags may be used + * @bdev: target block device + * @mode: FMODE_* mask * @cmd: ioctl command number * @arg: this is third argument given to ioctl(2) system call. * Often contains a pointer. @@ -1762,6 +1764,26 @@ static const struct block_device_operations sd_fops = { }; /** + * sd_eh_reset - reset error handling callback + * @scmd: sd-issued command that has failed + * + * This function is called by the SCSI midlayer before starting + * SCSI EH. When counting medium access failures we have to be + * careful to register it only only once per device and SCSI EH run; + * there might be several timed out commands which will cause the + * 'max_medium_access_timeouts' counter to trigger after the first + * SCSI EH run already and set the device to offline. + * So this function resets the internal counter before starting SCSI EH. + **/ +static void sd_eh_reset(struct scsi_cmnd *scmd) +{ + struct scsi_disk *sdkp = scsi_disk(scmd->request->rq_disk); + + /* New SCSI EH run, reset gate variable */ + sdkp->ignore_medium_access_errors = false; +} + +/** * sd_eh_action - error handling callback * @scmd: sd-issued command that has failed * @eh_disp: The recovery disposition suggested by the midlayer @@ -1790,7 +1812,10 @@ static int sd_eh_action(struct scsi_cmnd *scmd, int eh_disp) * process of recovering or has it suffered an internal failure * that prevents access to the storage medium. */ - sdkp->medium_access_timed_out++; + if (!sdkp->ignore_medium_access_errors) { + sdkp->medium_access_timed_out++; + sdkp->ignore_medium_access_errors = true; + } /* * If the device keeps failing read/write commands but TEST UNIT @@ -1802,7 +1827,7 @@ static int sd_eh_action(struct scsi_cmnd *scmd, int eh_disp) "Medium access timeout failure. Offlining disk!\n"); scsi_device_set_state(scmd->device, SDEV_OFFLINE); - return FAILED; + return SUCCESS; } return eh_disp; @@ -1810,41 +1835,44 @@ static int sd_eh_action(struct scsi_cmnd *scmd, int eh_disp) static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd) { - u64 start_lba = blk_rq_pos(scmd->request); - u64 end_lba = blk_rq_pos(scmd->request) + (scsi_bufflen(scmd) / 512); - u64 factor = scmd->device->sector_size / 512; - u64 bad_lba; - int info_valid; + struct request *req = scmd->request; + struct scsi_device *sdev = scmd->device; + unsigned int transferred, good_bytes; + u64 start_lba, end_lba, bad_lba; + /* - * resid is optional but mostly filled in. When it's unused, - * its value is zero, so we assume the whole buffer transferred + * Some commands have a payload smaller than the device logical + * block size (e.g. INQUIRY on a 4K disk). */ - unsigned int transferred = scsi_bufflen(scmd) - scsi_get_resid(scmd); - unsigned int good_bytes; - - info_valid = scsi_get_sense_info_fld(scmd->sense_buffer, - SCSI_SENSE_BUFFERSIZE, - &bad_lba); - if (!info_valid) + if (scsi_bufflen(scmd) <= sdev->sector_size) return 0; - if (scsi_bufflen(scmd) <= scmd->device->sector_size) + /* Check if we have a 'bad_lba' information */ + if (!scsi_get_sense_info_fld(scmd->sense_buffer, + SCSI_SENSE_BUFFERSIZE, + &bad_lba)) return 0; - /* be careful ... don't want any overflows */ - do_div(start_lba, factor); - do_div(end_lba, factor); - - /* The bad lba was reported incorrectly, we have no idea where + /* + * If the bad lba was reported incorrectly, we have no idea where * the error is. */ - if (bad_lba < start_lba || bad_lba >= end_lba) + start_lba = sectors_to_logical(sdev, blk_rq_pos(req)); + end_lba = start_lba + bytes_to_logical(sdev, scsi_bufflen(scmd)); + if (bad_lba < start_lba || bad_lba >= end_lba) return 0; - /* This computation should always be done in terms of - * the resolution of the device's medium. + /* + * resid is optional but mostly filled in. When it's unused, + * its value is zero, so we assume the whole buffer transferred + */ + transferred = scsi_bufflen(scmd) - scsi_get_resid(scmd); + + /* This computation should always be done in terms of the + * resolution of the device's medium. */ - good_bytes = (bad_lba - start_lba) * scmd->device->sector_size; + good_bytes = logical_to_bytes(sdev, bad_lba - start_lba); + return min(good_bytes, transferred); } @@ -1866,8 +1894,6 @@ static int sd_done(struct scsi_cmnd *SCpnt) struct request *req = SCpnt->request; int sense_valid = 0; int sense_deferred = 0; - unsigned char op = SCpnt->cmnd[0]; - unsigned char unmap = SCpnt->cmnd[1] & 8; switch (req_op(req)) { case REQ_OP_DISCARD: @@ -1941,26 +1967,27 @@ static int sd_done(struct scsi_cmnd *SCpnt) good_bytes = sd_completed_bytes(SCpnt); break; case ILLEGAL_REQUEST: - if (sshdr.asc == 0x10) /* DIX: Host detected corruption */ + switch (sshdr.asc) { + case 0x10: /* DIX: Host detected corruption */ good_bytes = sd_completed_bytes(SCpnt); - /* INVALID COMMAND OPCODE or INVALID FIELD IN CDB */ - if (sshdr.asc == 0x20 || sshdr.asc == 0x24) { - switch (op) { + break; + case 0x20: /* INVALID COMMAND OPCODE */ + case 0x24: /* INVALID FIELD IN CDB */ + switch (SCpnt->cmnd[0]) { case UNMAP: sd_config_discard(sdkp, SD_LBP_DISABLE); break; case WRITE_SAME_16: case WRITE_SAME: - if (unmap) + if (SCpnt->cmnd[1] & 8) { /* UNMAP */ sd_config_discard(sdkp, SD_LBP_DISABLE); - else { + } else { sdkp->device->no_write_same = 1; sd_config_write_same(sdkp); - - good_bytes = 0; req->__data_len = blk_rq_bytes(req); req->rq_flags |= RQF_QUIET; } + break; } } break; @@ -2798,7 +2825,7 @@ static void sd_read_app_tag_own(struct scsi_disk *sdkp, unsigned char *buffer) /** * sd_read_block_limits - Query disk device for preferred I/O sizes. - * @disk: disk to query + * @sdkp: disk to query */ static void sd_read_block_limits(struct scsi_disk *sdkp) { @@ -2864,7 +2891,7 @@ static void sd_read_block_limits(struct scsi_disk *sdkp) /** * sd_read_block_characteristics - Query block dev. characteristics - * @disk: disk to query + * @sdkp: disk to query */ static void sd_read_block_characteristics(struct scsi_disk *sdkp) { @@ -2912,7 +2939,7 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp) /** * sd_read_block_provisioning - Query provisioning VPD page - * @disk: disk to query + * @sdkp: disk to query */ static void sd_read_block_provisioning(struct scsi_disk *sdkp) { diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h index a2c4b5c35379..61d02efd366c 100644 --- a/drivers/scsi/sd.h +++ b/drivers/scsi/sd.h @@ -114,6 +114,7 @@ struct scsi_disk { unsigned rc_basis: 2; unsigned zoned: 2; unsigned urswrz : 1; + unsigned ignore_medium_access_errors : 1; }; #define to_scsi_disk(obj) container_of(obj,struct scsi_disk,dev) @@ -176,6 +177,11 @@ static inline unsigned int logical_to_bytes(struct scsi_device *sdev, sector_t b return blocks * sdev->sector_size; } +static inline sector_t bytes_to_logical(struct scsi_device *sdev, unsigned int bytes) +{ + return bytes >> ilog2(sdev->sector_size); +} + static inline sector_t sectors_to_logical(struct scsi_device *sdev, sector_t sector) { return sector >> (ilog2(sdev->sector_size) - 9); @@ -274,8 +280,8 @@ static inline int sd_is_zoned(struct scsi_disk *sdkp) extern int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buffer); extern void sd_zbc_remove(struct scsi_disk *sdkp); extern void sd_zbc_print_zones(struct scsi_disk *sdkp); -extern int sd_zbc_setup_write_cmnd(struct scsi_cmnd *cmd); -extern void sd_zbc_cancel_write_cmnd(struct scsi_cmnd *cmd); +extern int sd_zbc_write_lock_zone(struct scsi_cmnd *cmd); +extern void sd_zbc_write_unlock_zone(struct scsi_cmnd *cmd); extern int sd_zbc_setup_report_cmnd(struct scsi_cmnd *cmd); extern int sd_zbc_setup_reset_cmnd(struct scsi_cmnd *cmd); extern void sd_zbc_complete(struct scsi_cmnd *cmd, unsigned int good_bytes, @@ -293,13 +299,13 @@ static inline void sd_zbc_remove(struct scsi_disk *sdkp) {} static inline void sd_zbc_print_zones(struct scsi_disk *sdkp) {} -static inline int sd_zbc_setup_write_cmnd(struct scsi_cmnd *cmd) +static inline int sd_zbc_write_lock_zone(struct scsi_cmnd *cmd) { /* Let the drive fail requests */ return BLKPREP_OK; } -static inline void sd_zbc_cancel_write_cmnd(struct scsi_cmnd *cmd) {} +static inline void sd_zbc_write_unlock_zone(struct scsi_cmnd *cmd) {} static inline int sd_zbc_setup_report_cmnd(struct scsi_cmnd *cmd) { diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c index 1994f7799fce..96855df9f49d 100644 --- a/drivers/scsi/sd_zbc.c +++ b/drivers/scsi/sd_zbc.c @@ -237,7 +237,6 @@ int sd_zbc_setup_reset_cmnd(struct scsi_cmnd *cmd) struct scsi_disk *sdkp = scsi_disk(rq->rq_disk); sector_t sector = blk_rq_pos(rq); sector_t block = sectors_to_logical(sdkp->device, sector); - unsigned int zno = block >> sdkp->zone_shift; if (!sd_is_zoned(sdkp)) /* Not a zoned device */ @@ -250,11 +249,6 @@ int sd_zbc_setup_reset_cmnd(struct scsi_cmnd *cmd) /* Unaligned request */ return BLKPREP_KILL; - /* Do not allow concurrent reset and writes */ - if (sdkp->zones_wlock && - test_and_set_bit(zno, sdkp->zones_wlock)) - return BLKPREP_DEFER; - cmd->cmd_len = 16; memset(cmd->cmnd, 0, cmd->cmd_len); cmd->cmnd[0] = ZBC_OUT; @@ -269,7 +263,7 @@ int sd_zbc_setup_reset_cmnd(struct scsi_cmnd *cmd) return BLKPREP_OK; } -int sd_zbc_setup_write_cmnd(struct scsi_cmnd *cmd) +int sd_zbc_write_lock_zone(struct scsi_cmnd *cmd) { struct request *rq = cmd->request; struct scsi_disk *sdkp = scsi_disk(rq->rq_disk); @@ -303,8 +297,9 @@ int sd_zbc_setup_write_cmnd(struct scsi_cmnd *cmd) return BLKPREP_OK; } -static void sd_zbc_unlock_zone(struct request *rq) +void sd_zbc_write_unlock_zone(struct scsi_cmnd *cmd) { + struct request *rq = cmd->request; struct scsi_disk *sdkp = scsi_disk(rq->rq_disk); if (sdkp->zones_wlock) { @@ -315,11 +310,6 @@ static void sd_zbc_unlock_zone(struct request *rq) } } -void sd_zbc_cancel_write_cmnd(struct scsi_cmnd *cmd) -{ - sd_zbc_unlock_zone(cmd->request); -} - void sd_zbc_complete(struct scsi_cmnd *cmd, unsigned int good_bytes, struct scsi_sense_hdr *sshdr) @@ -328,39 +318,35 @@ void sd_zbc_complete(struct scsi_cmnd *cmd, struct request *rq = cmd->request; switch (req_op(rq)) { + case REQ_OP_ZONE_RESET: + + if (result && + sshdr->sense_key == ILLEGAL_REQUEST && + sshdr->asc == 0x24) + /* + * INVALID FIELD IN CDB error: reset of a conventional + * zone was attempted. Nothing to worry about, so be + * quiet about the error. + */ + rq->rq_flags |= RQF_QUIET; + break; + case REQ_OP_WRITE: case REQ_OP_WRITE_ZEROES: case REQ_OP_WRITE_SAME: - case REQ_OP_ZONE_RESET: /* Unlock the zone */ - sd_zbc_unlock_zone(rq); + sd_zbc_write_unlock_zone(cmd); - if (!result || - sshdr->sense_key != ILLEGAL_REQUEST) - break; - - switch (sshdr->asc) { - case 0x24: - /* - * INVALID FIELD IN CDB error: For a zone reset, - * this means that a reset of a conventional - * zone was attempted. Nothing to worry about in - * this case, so be quiet about the error. - */ - if (req_op(rq) == REQ_OP_ZONE_RESET) - rq->rq_flags |= RQF_QUIET; - break; - case 0x21: + if (result && + sshdr->sense_key == ILLEGAL_REQUEST && + sshdr->asc == 0x21) /* * INVALID ADDRESS FOR WRITE error: It is unlikely that * retrying write requests failed with any kind of * alignement error will result in success. So don't. */ cmd->allowed = 0; - break; - } - break; case REQ_OP_ZONE_REPORT: @@ -565,8 +551,7 @@ static int sd_zbc_setup(struct scsi_disk *sdkp) int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf) { - sector_t capacity; - int ret = 0; + int ret; if (!sd_is_zoned(sdkp)) /* @@ -598,7 +583,6 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, ret = sd_zbc_check_capacity(sdkp, buf); if (ret) goto err; - capacity = logical_to_sectors(sdkp->device, sdkp->capacity); /* * Check zone size: only devices with a constant zone size (except diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c index 50adabbb5808..f1cdf32d7514 100644 --- a/drivers/scsi/ses.c +++ b/drivers/scsi/ses.c @@ -548,7 +548,6 @@ static void ses_enclosure_data_process(struct enclosure_device *edev, ecomp = &edev->component[components++]; if (!IS_ERR(ecomp)) { - ses_get_power_status(edev, ecomp); if (addl_desc_ptr) ses_process_descriptor( ecomp, diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index 0b60245bd740..0a38ba01b7b4 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -122,7 +122,7 @@ struct sg_device; /* forward declarations */ struct sg_fd; typedef struct sg_request { /* SG_MAX_QUEUE requests outstanding per file */ - struct sg_request *nextrp; /* NULL -> tail request (slist) */ + struct list_head entry; /* list entry */ struct sg_fd *parentfp; /* NULL -> not in use */ Sg_scatter_hold data; /* hold buffer, perhaps scatter list */ sg_io_hdr_t header; /* scsi command+info, see <scsi/sg.h> */ @@ -142,19 +142,19 @@ typedef struct sg_fd { /* holds the state of a file descriptor */ struct sg_device *parentdp; /* owning device */ wait_queue_head_t read_wait; /* queue read until command done */ rwlock_t rq_list_lock; /* protect access to list in req_arr */ + struct mutex f_mutex; /* protect against changes in this fd */ int timeout; /* defaults to SG_DEFAULT_TIMEOUT */ int timeout_user; /* defaults to SG_DEFAULT_TIMEOUT_USER */ Sg_scatter_hold reserve; /* buffer held for this file descriptor */ - unsigned save_scat_len; /* original length of trunc. scat. element */ - Sg_request *headrp; /* head of request slist, NULL->empty */ + struct list_head rq_list; /* head of request list */ struct fasync_struct *async_qp; /* used by asynchronous notification */ Sg_request req_arr[SG_MAX_QUEUE]; /* used as singly-linked list */ - char low_dma; /* as in parent but possibly overridden to 1 */ char force_packid; /* 1 -> pack_id input to read(), 0 -> ignored */ char cmd_q; /* 1 -> allow command queuing, 0 -> don't */ unsigned char next_cmd_len; /* 0: automatic, >0: use on next write() */ char keep_orphan; /* 0 -> drop orphan (def), 1 -> keep for read() */ char mmap_called; /* 0 -> mmap() never called on this fd */ + char res_in_use; /* 1 -> 'reserve' array in use */ struct kref f_ref; struct execute_work ew; } Sg_fd; @@ -198,7 +198,6 @@ static void sg_remove_sfp(struct kref *); static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id); static Sg_request *sg_add_request(Sg_fd * sfp); static int sg_remove_request(Sg_fd * sfp, Sg_request * srp); -static int sg_res_in_use(Sg_fd * sfp); static Sg_device *sg_get_dev(int dev); static void sg_device_destroy(struct kref *kref); @@ -525,6 +524,7 @@ sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos) } else count = (old_hdr->result == 0) ? 0 : -EIO; sg_finish_rem_req(srp); + sg_remove_request(sfp, srp); retval = count; free_old_hdr: kfree(old_hdr); @@ -565,6 +565,7 @@ sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp) } err_out: err2 = sg_finish_rem_req(srp); + sg_remove_request(sfp, srp); return err ? : err2 ? : count; } @@ -614,6 +615,7 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos) } buf += SZ_SG_HEADER; __get_user(opcode, buf); + mutex_lock(&sfp->f_mutex); if (sfp->next_cmd_len > 0) { cmd_size = sfp->next_cmd_len; sfp->next_cmd_len = 0; /* reset so only this write() effected */ @@ -622,6 +624,7 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos) if ((opcode >= 0xc0) && old_hdr.twelve_byte) cmd_size = 12; } + mutex_unlock(&sfp->f_mutex); SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sdp, "sg_write: scsi opcode=0x%02x, cmd_size=%d\n", (int) opcode, cmd_size)); /* Determine buffer size. */ @@ -662,18 +665,14 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos) * is a non-zero input_size, so emit a warning. */ if (hp->dxfer_direction == SG_DXFER_TO_FROM_DEV) { - static char cmd[TASK_COMM_LEN]; - if (strcmp(current->comm, cmd)) { - printk_ratelimited(KERN_WARNING - "sg_write: data in/out %d/%d bytes " - "for SCSI command 0x%x-- guessing " - "data in;\n program %s not setting " - "count and/or reply_len properly\n", - old_hdr.reply_len - (int)SZ_SG_HEADER, - input_size, (unsigned int) cmnd[0], - current->comm); - strcpy(cmd, current->comm); - } + printk_ratelimited(KERN_WARNING + "sg_write: data in/out %d/%d bytes " + "for SCSI command 0x%x-- guessing " + "data in;\n program %s not setting " + "count and/or reply_len properly\n", + old_hdr.reply_len - (int)SZ_SG_HEADER, + input_size, (unsigned int) cmnd[0], + current->comm); } k = sg_common_write(sfp, srp, cmnd, sfp->timeout, blocking); return (k < 0) ? k : count; @@ -721,7 +720,7 @@ sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf, sg_remove_request(sfp, srp); return -EINVAL; /* either MMAP_IO or DIRECT_IO (not both) */ } - if (sg_res_in_use(sfp)) { + if (sfp->res_in_use) { sg_remove_request(sfp, srp); return -EBUSY; /* reserve buffer already being used */ } @@ -752,6 +751,29 @@ sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf, return count; } +static bool sg_is_valid_dxfer(sg_io_hdr_t *hp) +{ + switch (hp->dxfer_direction) { + case SG_DXFER_NONE: + if (hp->dxferp || hp->dxfer_len > 0) + return false; + return true; + case SG_DXFER_TO_DEV: + case SG_DXFER_FROM_DEV: + case SG_DXFER_TO_FROM_DEV: + if (!hp->dxferp || hp->dxfer_len == 0) + return false; + return true; + case SG_DXFER_UNKNOWN: + if ((!hp->dxferp && hp->dxfer_len) || + (hp->dxferp && hp->dxfer_len == 0)) + return false; + return true; + default: + return false; + } +} + static int sg_common_write(Sg_fd * sfp, Sg_request * srp, unsigned char *cmnd, int timeout, int blocking) @@ -772,11 +794,15 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp, "sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n", (int) cmnd[0], (int) hp->cmd_len)); + if (!sg_is_valid_dxfer(hp)) + return -EINVAL; + k = sg_start_req(srp, cmnd); if (k) { SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sfp->parentdp, "sg_common_write: start_req err=%d\n", k)); sg_finish_rem_req(srp); + sg_remove_request(sfp, srp); return k; /* probably out of space --> ENOMEM */ } if (atomic_read(&sdp->detaching)) { @@ -787,6 +813,7 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp, } sg_finish_rem_req(srp); + sg_remove_request(sfp, srp); return -ENODEV; } @@ -885,24 +912,14 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) /* strange ..., for backward compatibility */ return sfp->timeout_user; case SG_SET_FORCE_LOW_DMA: - result = get_user(val, ip); - if (result) - return result; - if (val) { - sfp->low_dma = 1; - if ((0 == sfp->low_dma) && (0 == sg_res_in_use(sfp))) { - val = (int) sfp->reserve.bufflen; - sg_remove_scat(sfp, &sfp->reserve); - sg_build_reserve(sfp, val); - } - } else { - if (atomic_read(&sdp->detaching)) - return -ENODEV; - sfp->low_dma = sdp->device->host->unchecked_isa_dma; - } + /* + * N.B. This ioctl never worked properly, but failed to + * return an error value. So returning '0' to keep compability + * with legacy applications. + */ return 0; case SG_GET_LOW_DMA: - return put_user((int) sfp->low_dma, ip); + return put_user((int) sdp->device->host->unchecked_isa_dma, ip); case SG_GET_SCSI_ID: if (!access_ok(VERIFY_WRITE, p, sizeof (sg_scsi_id_t))) return -EFAULT; @@ -936,7 +953,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) if (!access_ok(VERIFY_WRITE, ip, sizeof (int))) return -EFAULT; read_lock_irqsave(&sfp->rq_list_lock, iflags); - for (srp = sfp->headrp; srp; srp = srp->nextrp) { + list_for_each_entry(srp, &sfp->rq_list, entry) { if ((1 == srp->done) && (!srp->sg_io_owned)) { read_unlock_irqrestore(&sfp->rq_list_lock, iflags); @@ -949,7 +966,8 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) return 0; case SG_GET_NUM_WAITING: read_lock_irqsave(&sfp->rq_list_lock, iflags); - for (val = 0, srp = sfp->headrp; srp; srp = srp->nextrp) { + val = 0; + list_for_each_entry(srp, &sfp->rq_list, entry) { if ((1 == srp->done) && (!srp->sg_io_owned)) ++val; } @@ -965,12 +983,18 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) return -EINVAL; val = min_t(int, val, max_sectors_bytes(sdp->device->request_queue)); + mutex_lock(&sfp->f_mutex); if (val != sfp->reserve.bufflen) { - if (sg_res_in_use(sfp) || sfp->mmap_called) + if (sfp->mmap_called || + sfp->res_in_use) { + mutex_unlock(&sfp->f_mutex); return -EBUSY; + } + sg_remove_scat(sfp, &sfp->reserve); sg_build_reserve(sfp, val); } + mutex_unlock(&sfp->f_mutex); return 0; case SG_GET_RESERVED_SIZE: val = min_t(int, sfp->reserve.bufflen, @@ -1018,35 +1042,33 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) if (!rinfo) return -ENOMEM; read_lock_irqsave(&sfp->rq_list_lock, iflags); - for (srp = sfp->headrp, val = 0; val < SG_MAX_QUEUE; - ++val, srp = srp ? srp->nextrp : srp) { + val = 0; + list_for_each_entry(srp, &sfp->rq_list, entry) { + if (val > SG_MAX_QUEUE) + break; memset(&rinfo[val], 0, SZ_SG_REQ_INFO); - if (srp) { - rinfo[val].req_state = srp->done + 1; - rinfo[val].problem = - srp->header.masked_status & - srp->header.host_status & - srp->header.driver_status; - if (srp->done) - rinfo[val].duration = - srp->header.duration; - else { - ms = jiffies_to_msecs(jiffies); - rinfo[val].duration = - (ms > srp->header.duration) ? - (ms - srp->header.duration) : 0; - } - rinfo[val].orphan = srp->orphan; - rinfo[val].sg_io_owned = - srp->sg_io_owned; - rinfo[val].pack_id = - srp->header.pack_id; - rinfo[val].usr_ptr = - srp->header.usr_ptr; + rinfo[val].req_state = srp->done + 1; + rinfo[val].problem = + srp->header.masked_status & + srp->header.host_status & + srp->header.driver_status; + if (srp->done) + rinfo[val].duration = + srp->header.duration; + else { + ms = jiffies_to_msecs(jiffies); + rinfo[val].duration = + (ms > srp->header.duration) ? + (ms - srp->header.duration) : 0; } + rinfo[val].orphan = srp->orphan; + rinfo[val].sg_io_owned = srp->sg_io_owned; + rinfo[val].pack_id = srp->header.pack_id; + rinfo[val].usr_ptr = srp->header.usr_ptr; + val++; } read_unlock_irqrestore(&sfp->rq_list_lock, iflags); - result = __copy_to_user(p, rinfo, + result = __copy_to_user(p, rinfo, SZ_SG_REQ_INFO * SG_MAX_QUEUE); result = result ? -EFAULT : 0; kfree(rinfo); @@ -1152,7 +1174,7 @@ sg_poll(struct file *filp, poll_table * wait) return POLLERR; poll_wait(filp, &sfp->read_wait, wait); read_lock_irqsave(&sfp->rq_list_lock, iflags); - for (srp = sfp->headrp; srp; srp = srp->nextrp) { + list_for_each_entry(srp, &sfp->rq_list, entry) { /* if any read waiting, flag it */ if ((0 == res) && (1 == srp->done) && (!srp->sg_io_owned)) res = POLLIN | POLLRDNORM; @@ -1269,6 +1291,7 @@ sg_rq_end_io_usercontext(struct work_struct *work) struct sg_fd *sfp = srp->parentfp; sg_finish_rem_req(srp); + sg_remove_request(sfp, srp); kref_put(&sfp->f_ref, sg_remove_sfp); } @@ -1732,13 +1755,22 @@ sg_start_req(Sg_request *srp, unsigned char *cmd) md = &map_data; if (md) { - if (!sg_res_in_use(sfp) && dxfer_len <= rsv_schp->bufflen) + mutex_lock(&sfp->f_mutex); + if (dxfer_len <= rsv_schp->bufflen && + !sfp->res_in_use) { + sfp->res_in_use = 1; sg_link_reserve(sfp, srp, dxfer_len); - else { + } else if ((hp->flags & SG_FLAG_MMAP_IO) && sfp->res_in_use) { + mutex_unlock(&sfp->f_mutex); + return -EBUSY; + } else { res = sg_build_indirect(req_schp, sfp, dxfer_len); - if (res) + if (res) { + mutex_unlock(&sfp->f_mutex); return res; + } } + mutex_unlock(&sfp->f_mutex); md->pages = req_schp->pages; md->page_order = req_schp->page_order; @@ -1806,8 +1838,6 @@ sg_finish_rem_req(Sg_request *srp) else sg_remove_scat(sfp, req_schp); - sg_remove_request(sfp, srp); - return ret; } @@ -1831,6 +1861,7 @@ sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size) int sg_tablesize = sfp->parentdp->sg_tablesize; int blk_size = buff_size, order; gfp_t gfp_mask = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN; + struct sg_device *sdp = sfp->parentdp; if (blk_size < 0) return -EFAULT; @@ -1856,7 +1887,7 @@ sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size) scatter_elem_sz_prev = num; } - if (sfp->low_dma) + if (sdp->device->host->unchecked_isa_dma) gfp_mask |= GFP_DMA; if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) @@ -2026,8 +2057,9 @@ sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp) req_schp->pages = NULL; req_schp->page_order = 0; req_schp->sglist_len = 0; - sfp->save_scat_len = 0; srp->res_used = 0; + /* Called without mutex lock to avoid deadlock */ + sfp->res_in_use = 0; } static Sg_request * @@ -2037,7 +2069,7 @@ sg_get_rq_mark(Sg_fd * sfp, int pack_id) unsigned long iflags; write_lock_irqsave(&sfp->rq_list_lock, iflags); - for (resp = sfp->headrp; resp; resp = resp->nextrp) { + list_for_each_entry(resp, &sfp->rq_list, entry) { /* look for requests that are ready + not SG_IO owned */ if ((1 == resp->done) && (!resp->sg_io_owned) && ((-1 == pack_id) || (resp->header.pack_id == pack_id))) { @@ -2055,70 +2087,45 @@ sg_add_request(Sg_fd * sfp) { int k; unsigned long iflags; - Sg_request *resp; Sg_request *rp = sfp->req_arr; write_lock_irqsave(&sfp->rq_list_lock, iflags); - resp = sfp->headrp; - if (!resp) { - memset(rp, 0, sizeof (Sg_request)); - rp->parentfp = sfp; - resp = rp; - sfp->headrp = resp; - } else { - if (0 == sfp->cmd_q) - resp = NULL; /* command queuing disallowed */ - else { - for (k = 0; k < SG_MAX_QUEUE; ++k, ++rp) { - if (!rp->parentfp) - break; - } - if (k < SG_MAX_QUEUE) { - memset(rp, 0, sizeof (Sg_request)); - rp->parentfp = sfp; - while (resp->nextrp) - resp = resp->nextrp; - resp->nextrp = rp; - resp = rp; - } else - resp = NULL; + if (!list_empty(&sfp->rq_list)) { + if (!sfp->cmd_q) + goto out_unlock; + + for (k = 0; k < SG_MAX_QUEUE; ++k, ++rp) { + if (!rp->parentfp) + break; } + if (k >= SG_MAX_QUEUE) + goto out_unlock; } - if (resp) { - resp->nextrp = NULL; - resp->header.duration = jiffies_to_msecs(jiffies); - } + memset(rp, 0, sizeof (Sg_request)); + rp->parentfp = sfp; + rp->header.duration = jiffies_to_msecs(jiffies); + list_add_tail(&rp->entry, &sfp->rq_list); write_unlock_irqrestore(&sfp->rq_list_lock, iflags); - return resp; + return rp; +out_unlock: + write_unlock_irqrestore(&sfp->rq_list_lock, iflags); + return NULL; } /* Return of 1 for found; 0 for not found */ static int sg_remove_request(Sg_fd * sfp, Sg_request * srp) { - Sg_request *prev_rp; - Sg_request *rp; unsigned long iflags; int res = 0; - if ((!sfp) || (!srp) || (!sfp->headrp)) + if (!sfp || !srp || list_empty(&sfp->rq_list)) return res; write_lock_irqsave(&sfp->rq_list_lock, iflags); - prev_rp = sfp->headrp; - if (srp == prev_rp) { - sfp->headrp = prev_rp->nextrp; - prev_rp->parentfp = NULL; + if (!list_empty(&srp->entry)) { + list_del(&srp->entry); + srp->parentfp = NULL; res = 1; - } else { - while ((rp = prev_rp->nextrp)) { - if (srp == rp) { - prev_rp->nextrp = rp->nextrp; - rp->parentfp = NULL; - res = 1; - break; - } - prev_rp = rp; - } } write_unlock_irqrestore(&sfp->rq_list_lock, iflags); return res; @@ -2137,13 +2144,12 @@ sg_add_sfp(Sg_device * sdp) init_waitqueue_head(&sfp->read_wait); rwlock_init(&sfp->rq_list_lock); - + INIT_LIST_HEAD(&sfp->rq_list); kref_init(&sfp->f_ref); + mutex_init(&sfp->f_mutex); sfp->timeout = SG_DEFAULT_TIMEOUT; sfp->timeout_user = SG_DEFAULT_TIMEOUT_USER; sfp->force_packid = SG_DEF_FORCE_PACK_ID; - sfp->low_dma = (SG_DEF_FORCE_LOW_DMA == 0) ? - sdp->device->host->unchecked_isa_dma : 1; sfp->cmd_q = SG_DEF_COMMAND_Q; sfp->keep_orphan = SG_DEF_KEEP_ORPHAN; sfp->parentdp = sdp; @@ -2177,10 +2183,18 @@ sg_remove_sfp_usercontext(struct work_struct *work) { struct sg_fd *sfp = container_of(work, struct sg_fd, ew.work); struct sg_device *sdp = sfp->parentdp; + Sg_request *srp; + unsigned long iflags; /* Cleanup any responses which were never read(). */ - while (sfp->headrp) - sg_finish_rem_req(sfp->headrp); + write_lock_irqsave(&sfp->rq_list_lock, iflags); + while (!list_empty(&sfp->rq_list)) { + srp = list_first_entry(&sfp->rq_list, Sg_request, entry); + sg_finish_rem_req(srp); + list_del(&srp->entry); + srp->parentfp = NULL; + } + write_unlock_irqrestore(&sfp->rq_list_lock, iflags); if (sfp->reserve.bufflen > 0) { SCSI_LOG_TIMEOUT(6, sg_printk(KERN_INFO, sdp, @@ -2214,20 +2228,6 @@ sg_remove_sfp(struct kref *kref) schedule_work(&sfp->ew.work); } -static int -sg_res_in_use(Sg_fd * sfp) -{ - const Sg_request *srp; - unsigned long iflags; - - read_lock_irqsave(&sfp->rq_list_lock, iflags); - for (srp = sfp->headrp; srp; srp = srp->nextrp) - if (srp->res_used) - break; - read_unlock_irqrestore(&sfp->rq_list_lock, iflags); - return srp ? 1 : 0; -} - #ifdef CONFIG_SCSI_PROC_FS static int sg_idr_max_id(int id, void *p, void *data) @@ -2597,7 +2597,7 @@ static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v) /* must be called while holding sg_index_lock */ static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp) { - int k, m, new_interface, blen, usg; + int k, new_interface, blen, usg; Sg_request *srp; Sg_fd *fp; const sg_io_hdr_t *hp; @@ -2613,17 +2613,15 @@ static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp) jiffies_to_msecs(fp->timeout), fp->reserve.bufflen, (int) fp->reserve.k_use_sg, - (int) fp->low_dma); + (int) sdp->device->host->unchecked_isa_dma); seq_printf(s, " cmd_q=%d f_packid=%d k_orphan=%d closed=0\n", (int) fp->cmd_q, (int) fp->force_packid, (int) fp->keep_orphan); - for (m = 0, srp = fp->headrp; - srp != NULL; - ++m, srp = srp->nextrp) { + list_for_each_entry(srp, &fp->rq_list, entry) { hp = &srp->header; new_interface = (hp->interface_id == '\0') ? 0 : 1; if (srp->res_used) { - if (new_interface && + if (new_interface && (SG_FLAG_MMAP_IO & hp->flags)) cp = " mmap>> "; else @@ -2654,7 +2652,7 @@ static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp) seq_printf(s, "ms sgat=%d op=0x%02x\n", usg, (int) srp->data.cmd_opcode); } - if (0 == m) + if (list_empty(&fp->rq_list)) seq_puts(s, " No requests active\n"); read_unlock(&fp->rq_list_lock); } diff --git a/drivers/scsi/sgiwd93.c b/drivers/scsi/sgiwd93.c index 6d215e2fb46d..71b4b91d2215 100644 --- a/drivers/scsi/sgiwd93.c +++ b/drivers/scsi/sgiwd93.c @@ -297,7 +297,7 @@ out: return err; } -static int __exit sgiwd93_remove(struct platform_device *pdev) +static int sgiwd93_remove(struct platform_device *pdev) { struct Scsi_Host *host = platform_get_drvdata(pdev); struct ip22_hostdata *hdata = (struct ip22_hostdata *) host->hostdata; diff --git a/drivers/scsi/sni_53c710.c b/drivers/scsi/sni_53c710.c index 76278072147e..1f9a087daf69 100644 --- a/drivers/scsi/sni_53c710.c +++ b/drivers/scsi/sni_53c710.c @@ -117,7 +117,7 @@ static int snirm710_probe(struct platform_device *dev) return -ENODEV; } -static int __exit snirm710_driver_remove(struct platform_device *dev) +static int snirm710_driver_remove(struct platform_device *dev) { struct Scsi_Host *host = dev_get_drvdata(&dev->dev); struct NCR_700_Host_Parameters *hostdata = diff --git a/drivers/scsi/snic/snic_debugfs.c b/drivers/scsi/snic/snic_debugfs.c index d30280326bde..269ddf791a73 100644 --- a/drivers/scsi/snic/snic_debugfs.c +++ b/drivers/scsi/snic/snic_debugfs.c @@ -548,7 +548,7 @@ snic_trc_debugfs_init(void) &snic_trc_fops); if (!de) { - SNIC_ERR("Cann't create trace file.\n"); + SNIC_ERR("Cannot create trace file.\n"); return ret; } diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c index 5b23175a584c..9b20643ab49d 100644 --- a/drivers/scsi/stex.c +++ b/drivers/scsi/stex.c @@ -26,6 +26,7 @@ #include <linux/module.h> #include <linux/spinlock.h> #include <linux/ktime.h> +#include <linux/reboot.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/byteorder.h> @@ -38,9 +39,9 @@ #include <scsi/scsi_eh.h> #define DRV_NAME "stex" -#define ST_DRIVER_VERSION "5.00.0000.01" -#define ST_VER_MAJOR 5 -#define ST_VER_MINOR 00 +#define ST_DRIVER_VERSION "6.02.0000.01" +#define ST_VER_MAJOR 6 +#define ST_VER_MINOR 02 #define ST_OEM 0000 #define ST_BUILD_VER 01 @@ -64,6 +65,13 @@ enum { YI2H_INT_C = 0xa0, YH2I_REQ = 0xc0, YH2I_REQ_HI = 0xc4, + PSCRATCH0 = 0xb0, + PSCRATCH1 = 0xb4, + PSCRATCH2 = 0xb8, + PSCRATCH3 = 0xbc, + PSCRATCH4 = 0xc8, + MAILBOX_BASE = 0x1000, + MAILBOX_HNDSHK_STS = 0x0, /* MU register value */ MU_INBOUND_DOORBELL_HANDSHAKE = (1 << 0), @@ -87,7 +95,7 @@ enum { MU_STATE_STOP = 5, MU_STATE_NOCONNECT = 6, - MU_MAX_DELAY = 120, + MU_MAX_DELAY = 50, MU_HANDSHAKE_SIGNATURE = 0x55aaaa55, MU_HANDSHAKE_SIGNATURE_HALF = 0x5a5a0000, MU_HARD_RESET_WAIT = 30000, @@ -135,6 +143,7 @@ enum { st_yosemite = 2, st_seq = 3, st_yel = 4, + st_P3 = 5, PASSTHRU_REQ_TYPE = 0x00000001, PASSTHRU_REQ_NO_WAKEUP = 0x00000100, @@ -339,6 +348,7 @@ struct st_hba { u16 rq_size; u16 sts_count; u8 supports_pm; + int msi_lock; }; struct st_card_info { @@ -353,6 +363,12 @@ struct st_card_info { u16 sts_count; }; +static int S6flag; +static int stex_halt(struct notifier_block *nb, ulong event, void *buf); +static struct notifier_block stex_notifier = { + stex_halt, NULL, 0 +}; + static int msi; module_param(msi, int, 0); MODULE_PARM_DESC(msi, "Enable Message Signaled Interrupts(0=off, 1=on)"); @@ -540,11 +556,15 @@ stex_ss_send_cmd(struct st_hba *hba, struct req_msg *req, u16 tag) ++hba->req_head; hba->req_head %= hba->rq_count+1; - - writel((addr >> 16) >> 16, hba->mmio_base + YH2I_REQ_HI); - readl(hba->mmio_base + YH2I_REQ_HI); /* flush */ - writel(addr, hba->mmio_base + YH2I_REQ); - readl(hba->mmio_base + YH2I_REQ); /* flush */ + if (hba->cardtype == st_P3) { + writel((addr >> 16) >> 16, hba->mmio_base + YH2I_REQ_HI); + writel(addr, hba->mmio_base + YH2I_REQ); + } else { + writel((addr >> 16) >> 16, hba->mmio_base + YH2I_REQ_HI); + readl(hba->mmio_base + YH2I_REQ_HI); /* flush */ + writel(addr, hba->mmio_base + YH2I_REQ); + readl(hba->mmio_base + YH2I_REQ); /* flush */ + } } static void return_abnormal_state(struct st_hba *hba, int status) @@ -974,15 +994,31 @@ static irqreturn_t stex_ss_intr(int irq, void *__hba) spin_lock_irqsave(hba->host->host_lock, flags); - data = readl(base + YI2H_INT); - if (data && data != 0xffffffff) { - /* clear the interrupt */ - writel(data, base + YI2H_INT_C); - stex_ss_mu_intr(hba); - spin_unlock_irqrestore(hba->host->host_lock, flags); - if (unlikely(data & SS_I2H_REQUEST_RESET)) - queue_work(hba->work_q, &hba->reset_work); - return IRQ_HANDLED; + if (hba->cardtype == st_yel) { + data = readl(base + YI2H_INT); + if (data && data != 0xffffffff) { + /* clear the interrupt */ + writel(data, base + YI2H_INT_C); + stex_ss_mu_intr(hba); + spin_unlock_irqrestore(hba->host->host_lock, flags); + if (unlikely(data & SS_I2H_REQUEST_RESET)) + queue_work(hba->work_q, &hba->reset_work); + return IRQ_HANDLED; + } + } else { + data = readl(base + PSCRATCH4); + if (data != 0xffffffff) { + if (data != 0) { + /* clear the interrupt */ + writel(data, base + PSCRATCH1); + writel((1 << 22), base + YH2I_INT); + } + stex_ss_mu_intr(hba); + spin_unlock_irqrestore(hba->host->host_lock, flags); + if (unlikely(data & SS_I2H_REQUEST_RESET)) + queue_work(hba->work_q, &hba->reset_work); + return IRQ_HANDLED; + } } spin_unlock_irqrestore(hba->host->host_lock, flags); @@ -1080,19 +1116,36 @@ static int stex_ss_handshake(struct st_hba *hba) struct st_msg_header *msg_h; struct handshake_frame *h; __le32 *scratch; - u32 data, scratch_size; + u32 data, scratch_size, mailboxdata, operationaldata; unsigned long before; int ret = 0; before = jiffies; - while ((readl(base + YIOA_STATUS) & SS_MU_OPERATIONAL) == 0) { - if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) { - printk(KERN_ERR DRV_NAME - "(%s): firmware not operational\n", - pci_name(hba->pdev)); - return -1; + + if (hba->cardtype == st_yel) { + operationaldata = readl(base + YIOA_STATUS); + while (operationaldata != SS_MU_OPERATIONAL) { + if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) { + printk(KERN_ERR DRV_NAME + "(%s): firmware not operational\n", + pci_name(hba->pdev)); + return -1; + } + msleep(1); + operationaldata = readl(base + YIOA_STATUS); + } + } else { + operationaldata = readl(base + PSCRATCH3); + while (operationaldata != SS_MU_OPERATIONAL) { + if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) { + printk(KERN_ERR DRV_NAME + "(%s): firmware not operational\n", + pci_name(hba->pdev)); + return -1; + } + msleep(1); + operationaldata = readl(base + PSCRATCH3); } - msleep(1); } msg_h = (struct st_msg_header *)hba->dma_mem; @@ -1111,30 +1164,60 @@ static int stex_ss_handshake(struct st_hba *hba) scratch_size = (hba->sts_count+1)*sizeof(u32); h->scratch_size = cpu_to_le32(scratch_size); - data = readl(base + YINT_EN); - data &= ~4; - writel(data, base + YINT_EN); - writel((hba->dma_handle >> 16) >> 16, base + YH2I_REQ_HI); - readl(base + YH2I_REQ_HI); - writel(hba->dma_handle, base + YH2I_REQ); - readl(base + YH2I_REQ); /* flush */ + if (hba->cardtype == st_yel) { + data = readl(base + YINT_EN); + data &= ~4; + writel(data, base + YINT_EN); + writel((hba->dma_handle >> 16) >> 16, base + YH2I_REQ_HI); + readl(base + YH2I_REQ_HI); + writel(hba->dma_handle, base + YH2I_REQ); + readl(base + YH2I_REQ); /* flush */ + } else { + data = readl(base + YINT_EN); + data &= ~(1 << 0); + data &= ~(1 << 2); + writel(data, base + YINT_EN); + if (hba->msi_lock == 0) { + /* P3 MSI Register cannot access twice */ + writel((1 << 6), base + YH2I_INT); + hba->msi_lock = 1; + } + writel((hba->dma_handle >> 16) >> 16, base + YH2I_REQ_HI); + writel(hba->dma_handle, base + YH2I_REQ); + } - scratch = hba->scratch; before = jiffies; - while (!(le32_to_cpu(*scratch) & SS_STS_HANDSHAKE)) { - if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) { - printk(KERN_ERR DRV_NAME - "(%s): no signature after handshake frame\n", - pci_name(hba->pdev)); - ret = -1; - break; + scratch = hba->scratch; + if (hba->cardtype == st_yel) { + while (!(le32_to_cpu(*scratch) & SS_STS_HANDSHAKE)) { + if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) { + printk(KERN_ERR DRV_NAME + "(%s): no signature after handshake frame\n", + pci_name(hba->pdev)); + ret = -1; + break; + } + rmb(); + msleep(1); + } + } else { + mailboxdata = readl(base + MAILBOX_BASE + MAILBOX_HNDSHK_STS); + while (mailboxdata != SS_STS_HANDSHAKE) { + if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) { + printk(KERN_ERR DRV_NAME + "(%s): no signature after handshake frame\n", + pci_name(hba->pdev)); + ret = -1; + break; + } + rmb(); + msleep(1); + mailboxdata = readl(base + MAILBOX_BASE + MAILBOX_HNDSHK_STS); } - rmb(); - msleep(1); } - memset(scratch, 0, scratch_size); msg_h->flag = 0; + return ret; } @@ -1144,8 +1227,10 @@ static int stex_handshake(struct st_hba *hba) unsigned long flags; unsigned int mu_status; - err = (hba->cardtype == st_yel) ? - stex_ss_handshake(hba) : stex_common_handshake(hba); + if (hba->cardtype == st_yel || hba->cardtype == st_P3) + err = stex_ss_handshake(hba); + else + err = stex_common_handshake(hba); spin_lock_irqsave(hba->host->host_lock, flags); mu_status = hba->mu_status; if (err == 0) { @@ -1190,6 +1275,15 @@ static int stex_abort(struct scsi_cmnd *cmd) writel(data, base + YI2H_INT_C); stex_ss_mu_intr(hba); + } else if (hba->cardtype == st_P3) { + data = readl(base + PSCRATCH4); + if (data == 0xffffffff) + goto fail_out; + if (data != 0) { + writel(data, base + PSCRATCH1); + writel((1 << 22), base + YH2I_INT); + } + stex_ss_mu_intr(hba); } else { data = readl(base + ODBL); if (data == 0 || data == 0xffffffff) @@ -1197,7 +1291,6 @@ static int stex_abort(struct scsi_cmnd *cmd) writel(data, base + ODBL); readl(base + ODBL); /* flush */ - stex_mu_intr(hba, data); } if (hba->wait_ccb == NULL) { @@ -1293,6 +1386,12 @@ static void stex_ss_reset(struct st_hba *hba) ssleep(5); } +static void stex_p3_reset(struct st_hba *hba) +{ + writel(SS_H2I_INT_RESET, hba->mmio_base + YH2I_INT); + ssleep(5); +} + static int stex_do_reset(struct st_hba *hba) { unsigned long flags; @@ -1329,7 +1428,8 @@ static int stex_do_reset(struct st_hba *hba) stex_hard_reset(hba); else if (hba->cardtype == st_yel) stex_ss_reset(hba); - + else if (hba->cardtype == st_P3) + stex_p3_reset(hba); return_abnormal_state(hba, DID_RESET); @@ -1414,6 +1514,26 @@ static struct pci_device_id stex_pci_tbl[] = { /* st_yel */ { 0x105a, 0x8650, 0x1033, PCI_ANY_ID, 0, 0, st_yel }, { 0x105a, 0x8760, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_yel }, + + /* st_P3, pluto */ + { PCI_VENDOR_ID_PROMISE, 0x8870, PCI_VENDOR_ID_PROMISE, + 0x8870, 0, 0, st_P3 }, + /* st_P3, p3 */ + { PCI_VENDOR_ID_PROMISE, 0x8870, PCI_VENDOR_ID_PROMISE, + 0x4300, 0, 0, st_P3 }, + + /* st_P3, SymplyStor4E */ + { PCI_VENDOR_ID_PROMISE, 0x8871, PCI_VENDOR_ID_PROMISE, + 0x4311, 0, 0, st_P3 }, + /* st_P3, SymplyStor8E */ + { PCI_VENDOR_ID_PROMISE, 0x8871, PCI_VENDOR_ID_PROMISE, + 0x4312, 0, 0, st_P3 }, + /* st_P3, SymplyStor4 */ + { PCI_VENDOR_ID_PROMISE, 0x8871, PCI_VENDOR_ID_PROMISE, + 0x4321, 0, 0, st_P3 }, + /* st_P3, SymplyStor8 */ + { PCI_VENDOR_ID_PROMISE, 0x8871, PCI_VENDOR_ID_PROMISE, + 0x4322, 0, 0, st_P3 }, { } /* terminate list */ }; @@ -1482,6 +1602,19 @@ static struct st_card_info stex_card_info[] = { .map_sg = stex_ss_map_sg, .send = stex_ss_send_cmd, }, + + /* st_P3 */ + { + .max_id = 129, + .max_lun = 256, + .max_channel = 0, + .rq_count = 801, + .rq_size = 512, + .sts_count = 801, + .alloc_rq = stex_ss_alloc_req, + .map_sg = stex_ss_map_sg, + .send = stex_ss_send_cmd, + }, }; static int stex_set_dma_mask(struct pci_dev * pdev) @@ -1502,7 +1635,7 @@ static int stex_request_irq(struct st_hba *hba) struct pci_dev *pdev = hba->pdev; int status; - if (msi) { + if (msi || hba->cardtype == st_P3) { status = pci_enable_msi(pdev); if (status != 0) printk(KERN_ERR DRV_NAME @@ -1513,7 +1646,8 @@ static int stex_request_irq(struct st_hba *hba) } else hba->msi_enabled = 0; - status = request_irq(pdev->irq, hba->cardtype == st_yel ? + status = request_irq(pdev->irq, + (hba->cardtype == st_yel || hba->cardtype == st_P3) ? stex_ss_intr : stex_intr, IRQF_SHARED, DRV_NAME, hba); if (status != 0) { @@ -1546,6 +1680,9 @@ static int stex_probe(struct pci_dev *pdev, const struct pci_device_id *id) pci_set_master(pdev); + S6flag = 0; + register_reboot_notifier(&stex_notifier); + host = scsi_host_alloc(&driver_template, sizeof(struct st_hba)); if (!host) { @@ -1597,12 +1734,12 @@ static int stex_probe(struct pci_dev *pdev, const struct pci_device_id *id) case 0x4265: break; default: - if (hba->cardtype == st_yel) + if (hba->cardtype == st_yel || hba->cardtype == st_P3) hba->supports_pm = 1; } sts_offset = scratch_offset = (ci->rq_count+1) * ci->rq_size; - if (hba->cardtype == st_yel) + if (hba->cardtype == st_yel || hba->cardtype == st_P3) sts_offset += (ci->sts_count+1) * sizeof(u32); cp_offset = sts_offset + (ci->sts_count+1) * sizeof(struct status_msg); hba->dma_size = cp_offset + sizeof(struct st_frame); @@ -1642,7 +1779,7 @@ static int stex_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto out_pci_free; } - if (hba->cardtype == st_yel) + if (hba->cardtype == st_yel || hba->cardtype == st_P3) hba->scratch = (__le32 *)(hba->dma_mem + scratch_offset); hba->status_buffer = (struct status_msg *)(hba->dma_mem + sts_offset); hba->copy_buffer = hba->dma_mem + cp_offset; @@ -1653,8 +1790,9 @@ static int stex_probe(struct pci_dev *pdev, const struct pci_device_id *id) hba->map_sg = ci->map_sg; hba->send = ci->send; hba->mu_status = MU_STATE_STARTING; + hba->msi_lock = 0; - if (hba->cardtype == st_yel) + if (hba->cardtype == st_yel || hba->cardtype == st_P3) host->sg_tablesize = 38; else host->sg_tablesize = 32; @@ -1736,28 +1874,29 @@ static void stex_hba_stop(struct st_hba *hba, int st_sleep_mic) spin_lock_irqsave(hba->host->host_lock, flags); - if (hba->cardtype == st_yel && hba->supports_pm == 1) - { - if(st_sleep_mic == ST_NOTHANDLED) - { + if ((hba->cardtype == st_yel || hba->cardtype == st_P3) && + hba->supports_pm == 1) { + if (st_sleep_mic == ST_NOTHANDLED) { spin_unlock_irqrestore(hba->host->host_lock, flags); return; } } req = hba->alloc_rq(hba); - if (hba->cardtype == st_yel) { + if (hba->cardtype == st_yel || hba->cardtype == st_P3) { msg_h = (struct st_msg_header *)req - 1; memset(msg_h, 0, hba->rq_size); } else memset(req, 0, hba->rq_size); - if ((hba->cardtype == st_yosemite || hba->cardtype == st_yel) + if ((hba->cardtype == st_yosemite || hba->cardtype == st_yel + || hba->cardtype == st_P3) && st_sleep_mic == ST_IGNORED) { req->cdb[0] = MGT_CMD; req->cdb[1] = MGT_CMD_SIGNATURE; req->cdb[2] = CTLR_CONFIG_CMD; req->cdb[3] = CTLR_SHUTDOWN; - } else if (hba->cardtype == st_yel && st_sleep_mic != ST_IGNORED) { + } else if ((hba->cardtype == st_yel || hba->cardtype == st_P3) + && st_sleep_mic != ST_IGNORED) { req->cdb[0] = MGT_CMD; req->cdb[1] = MGT_CMD_SIGNATURE; req->cdb[2] = CTLR_CONFIG_CMD; @@ -1768,16 +1907,13 @@ static void stex_hba_stop(struct st_hba *hba, int st_sleep_mic) req->cdb[1] = CTLR_POWER_STATE_CHANGE; req->cdb[2] = CTLR_POWER_SAVING; } - hba->ccb[tag].cmd = NULL; hba->ccb[tag].sg_count = 0; hba->ccb[tag].sense_bufflen = 0; hba->ccb[tag].sense_buffer = NULL; hba->ccb[tag].req_type = PASSTHRU_REQ_TYPE; - hba->send(hba, req, tag); spin_unlock_irqrestore(hba->host->host_lock, flags); - before = jiffies; while (hba->ccb[tag].req_type & PASSTHRU_REQ_TYPE) { if (time_after(jiffies, before + ST_INTERNAL_TIMEOUT * HZ)) { @@ -1821,24 +1957,30 @@ static void stex_remove(struct pci_dev *pdev) scsi_host_put(hba->host); pci_disable_device(pdev); + + unregister_reboot_notifier(&stex_notifier); } static void stex_shutdown(struct pci_dev *pdev) { struct st_hba *hba = pci_get_drvdata(pdev); - if (hba->supports_pm == 0) + if (hba->supports_pm == 0) { stex_hba_stop(hba, ST_IGNORED); - else + } else if (hba->supports_pm == 1 && S6flag) { + unregister_reboot_notifier(&stex_notifier); + stex_hba_stop(hba, ST_S6); + } else stex_hba_stop(hba, ST_S5); } -static int stex_choice_sleep_mic(pm_message_t state) +static int stex_choice_sleep_mic(struct st_hba *hba, pm_message_t state) { switch (state.event) { case PM_EVENT_SUSPEND: return ST_S3; case PM_EVENT_HIBERNATE: + hba->msi_lock = 0; return ST_S4; default: return ST_NOTHANDLED; @@ -1849,8 +1991,9 @@ static int stex_suspend(struct pci_dev *pdev, pm_message_t state) { struct st_hba *hba = pci_get_drvdata(pdev); - if (hba->cardtype == st_yel && hba->supports_pm == 1) - stex_hba_stop(hba, stex_choice_sleep_mic(state)); + if ((hba->cardtype == st_yel || hba->cardtype == st_P3) + && hba->supports_pm == 1) + stex_hba_stop(hba, stex_choice_sleep_mic(hba, state)); else stex_hba_stop(hba, ST_IGNORED); return 0; @@ -1864,6 +2007,12 @@ static int stex_resume(struct pci_dev *pdev) stex_handshake(hba); return 0; } + +static int stex_halt(struct notifier_block *nb, unsigned long event, void *buf) +{ + S6flag = 1; + return NOTIFY_OK; +} MODULE_DEVICE_TABLE(pci, stex_pci_tbl); static struct pci_driver stex_pci_driver = { diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index 016639d7fef1..ae966dc3bbc5 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c @@ -476,6 +476,9 @@ struct storvsc_device { */ u64 node_name; u64 port_name; +#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS) + struct fc_rport *rport; +#endif }; struct hv_host_device { @@ -864,7 +867,7 @@ static int storvsc_channel_init(struct hv_device *device, bool is_fc) * We will however populate all the slots to evenly distribute * the load. */ - stor_device->stor_chns = kzalloc(sizeof(void *) * num_possible_cpus(), + stor_device->stor_chns = kcalloc(num_possible_cpus(), sizeof(void *), GFP_KERNEL); if (stor_device->stor_chns == NULL) return -ENOMEM; @@ -1189,8 +1192,6 @@ static void storvsc_on_channel_callback(void *context) break; } } while (1); - - return; } static int storvsc_connect_to_vsp(struct hv_device *device, u32 ring_size, @@ -1823,19 +1824,27 @@ static int storvsc_probe(struct hv_device *device, target = (device->dev_instance.b[5] << 8 | device->dev_instance.b[4]); ret = scsi_add_device(host, 0, target, 0); - if (ret) { - scsi_remove_host(host); - goto err_out2; - } + if (ret) + goto err_out3; } #if IS_ENABLED(CONFIG_SCSI_FC_ATTRS) if (host->transportt == fc_transport_template) { + struct fc_rport_identifiers ids = { + .roles = FC_PORT_ROLE_FCP_DUMMY_INITIATOR, + }; + fc_host_node_name(host) = stor_device->node_name; fc_host_port_name(host) = stor_device->port_name; + stor_device->rport = fc_remote_port_add(host, 0, &ids); + if (!stor_device->rport) + goto err_out3; } #endif return 0; +err_out3: + scsi_remove_host(host); + err_out2: /* * Once we have connected with the host, we would need to @@ -1861,8 +1870,10 @@ static int storvsc_remove(struct hv_device *dev) struct Scsi_Host *host = stor_device->host; #if IS_ENABLED(CONFIG_SCSI_FC_ATTRS) - if (host->transportt == fc_transport_template) + if (host->transportt == fc_transport_template) { + fc_remote_port_delete(stor_device->rport); fc_remove_host(host); + } #endif scsi_remove_host(host); storvsc_dev_remove(dev); diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 096e95b911bd..abc7e87937cc 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -130,19 +130,12 @@ enum { UFSHCD_UIC_DME_ERROR = (1 << 5), /* DME error */ }; -/* Interrupt configuration options */ -enum { - UFSHCD_INT_DISABLE, - UFSHCD_INT_ENABLE, - UFSHCD_INT_CLEAR, -}; - #define ufshcd_set_eh_in_progress(h) \ - (h->eh_flags |= UFSHCD_EH_IN_PROGRESS) + ((h)->eh_flags |= UFSHCD_EH_IN_PROGRESS) #define ufshcd_eh_in_progress(h) \ - (h->eh_flags & UFSHCD_EH_IN_PROGRESS) + ((h)->eh_flags & UFSHCD_EH_IN_PROGRESS) #define ufshcd_clear_eh_in_progress(h) \ - (h->eh_flags &= ~UFSHCD_EH_IN_PROGRESS) + ((h)->eh_flags &= ~UFSHCD_EH_IN_PROGRESS) #define ufshcd_set_ufs_dev_active(h) \ ((h)->curr_dev_pwr_mode = UFS_ACTIVE_PWR_MODE) @@ -540,15 +533,14 @@ static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba) case UFSHCI_VERSION_10: intr_mask = INTERRUPT_MASK_ALL_VER_10; break; - /* allow fall through */ case UFSHCI_VERSION_11: case UFSHCI_VERSION_20: intr_mask = INTERRUPT_MASK_ALL_VER_11; break; - /* allow fall through */ case UFSHCI_VERSION_21: default: intr_mask = INTERRUPT_MASK_ALL_VER_21; + break; } return intr_mask; @@ -573,12 +565,12 @@ static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba) * the host controller * @hba: pointer to adapter instance * - * Returns 1 if device present, 0 if no device detected + * Returns true if device present, false if no device detected */ -static inline int ufshcd_is_device_present(struct ufs_hba *hba) +static inline bool ufshcd_is_device_present(struct ufs_hba *hba) { return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & - DEVICE_PRESENT) ? 1 : 0; + DEVICE_PRESENT) ? true : false; } /** @@ -668,16 +660,7 @@ static inline void ufshcd_outstanding_req_clear(struct ufs_hba *hba, int tag) */ static inline int ufshcd_get_lists_status(u32 reg) { - /* - * The mask 0xFF is for the following HCS register bits - * Bit Description - * 0 Device Present - * 1 UTRLRDY - * 2 UTMRLRDY - * 3 UCRDY - * 4-7 reserved - */ - return ((reg & 0xFF) >> 1) ^ 0x07; + return !((reg & UFSHCD_STATUS_READY) == UFSHCD_STATUS_READY); } /** @@ -820,11 +803,12 @@ static inline void ufshcd_hba_start(struct ufs_hba *hba) * ufshcd_is_hba_active - Get controller state * @hba: per adapter instance * - * Returns zero if controller is active, 1 otherwise + * Returns false if controller is active, true otherwise */ -static inline int ufshcd_is_hba_active(struct ufs_hba *hba) +static inline bool ufshcd_is_hba_active(struct ufs_hba *hba) { - return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & 0x1) ? 0 : 1; + return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & CONTROLLER_ENABLE) + ? false : true; } static const char *ufschd_uic_link_state_to_string( @@ -1478,7 +1462,7 @@ start: break; } /* - * If we here, it means gating work is either done or + * If we are here, it means gating work is either done or * currently running. Hence, fall through to cancel gating * work and to enable clocks. */ @@ -3103,18 +3087,7 @@ static inline int ufshcd_read_power_desc(struct ufs_hba *hba, u8 *buf, u32 size) { - int err = 0; - int retries; - - for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) { - /* Read descriptor*/ - err = ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size); - if (!err) - break; - dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err); - } - - return err; + return ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size); } static int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size) @@ -4272,24 +4245,16 @@ static void ufshcd_set_queue_depth(struct scsi_device *sdev) { int ret = 0; u8 lun_qdepth; - int retries; struct ufs_hba *hba; hba = shost_priv(sdev->host); lun_qdepth = hba->nutrs; - for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) { - /* Read descriptor*/ - ret = ufshcd_read_unit_desc_param(hba, - ufshcd_scsi_to_upiu_lun(sdev->lun), - UNIT_DESC_PARAM_LU_Q_DEPTH, - &lun_qdepth, - sizeof(lun_qdepth)); - if (!ret || ret == -ENOTSUPP) - break; - - dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, ret); - } + ret = ufshcd_read_unit_desc_param(hba, + ufshcd_scsi_to_upiu_lun(sdev->lun), + UNIT_DESC_PARAM_LU_Q_DEPTH, + &lun_qdepth, + sizeof(lun_qdepth)); /* Some WLUN doesn't support unit descriptor */ if (ret == -EOPNOTSUPP) @@ -4717,7 +4682,7 @@ static int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask) goto out; val = hba->ee_ctrl_mask & ~mask; - val &= 0xFFFF; /* 2 bytes */ + val &= MASK_EE_STATUS; err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val); if (!err) @@ -4745,7 +4710,7 @@ static int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask) goto out; val = hba->ee_ctrl_mask | mask; - val &= 0xFFFF; /* 2 bytes */ + val &= MASK_EE_STATUS; err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val); if (!err) @@ -5960,24 +5925,6 @@ out: return icc_level; } -static int ufshcd_set_icc_levels_attr(struct ufs_hba *hba, u32 icc_level) -{ - int ret = 0; - int retries; - - for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) { - /* write attribute */ - ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, - QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0, &icc_level); - if (!ret) - break; - - dev_dbg(hba->dev, "%s: failed with error %d\n", __func__, ret); - } - - return ret; -} - static void ufshcd_init_icc_levels(struct ufs_hba *hba) { int ret; @@ -5998,8 +5945,9 @@ static void ufshcd_init_icc_levels(struct ufs_hba *hba) dev_dbg(hba->dev, "%s: setting icc_level 0x%x", __func__, hba->init_prefetch_data.icc_level); - ret = ufshcd_set_icc_levels_attr(hba, - hba->init_prefetch_data.icc_level); + ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, + QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0, + &hba->init_prefetch_data.icc_level); if (ret) dev_err(hba->dev, @@ -8000,7 +7948,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) INIT_WORK(&hba->clk_scaling.resume_work, ufshcd_clk_scaling_resume_work); - snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clkscaling_%d", + snprintf(wq_name, sizeof(wq_name), "ufs_clkscaling_%d", host->host_no); hba->clk_scaling.workq = create_singlethread_workqueue(wq_name); diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h index d14e9b965d1e..f60145d4a66e 100644 --- a/drivers/scsi/ufs/ufshci.h +++ b/drivers/scsi/ufs/ufshci.h @@ -48,6 +48,7 @@ enum { REG_UFS_VERSION = 0x08, REG_CONTROLLER_DEV_ID = 0x10, REG_CONTROLLER_PROD_ID = 0x14, + REG_AUTO_HIBERNATE_IDLE_TIMER = 0x18, REG_INTERRUPT_STATUS = 0x20, REG_INTERRUPT_ENABLE = 0x24, REG_CONTROLLER_STATUS = 0x30, @@ -159,6 +160,10 @@ enum { #define DEVICE_ERROR_INDICATOR UFS_BIT(5) #define UIC_POWER_MODE_CHANGE_REQ_STATUS_MASK UFS_MASK(0x7, 8) +#define UFSHCD_STATUS_READY (UTP_TRANSFER_REQ_LIST_READY |\ + UTP_TASK_REQ_LIST_READY |\ + UIC_COMMAND_READY) + enum { PWR_OK = 0x0, PWR_LOCAL = 0x01, @@ -171,6 +176,7 @@ enum { /* HCE - Host Controller Enable 34h */ #define CONTROLLER_ENABLE UFS_BIT(0) #define CONTROLLER_DISABLE 0x0 +#define CRYPTO_GENERAL_ENABLE UFS_BIT(1) /* UECPA - Host UIC Error Code PHY Adapter Layer 38h */ #define UIC_PHY_ADAPTER_LAYER_ERROR UFS_BIT(31) diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c index 939c47df73fa..a29d068b7696 100644 --- a/drivers/scsi/virtio_scsi.c +++ b/drivers/scsi/virtio_scsi.c @@ -29,6 +29,7 @@ #include <scsi/scsi_device.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_tcq.h> +#include <scsi/scsi_devinfo.h> #include <linux/seqlock.h> #include <linux/blk-mq-virtio.h> @@ -705,6 +706,28 @@ static int virtscsi_device_reset(struct scsi_cmnd *sc) return virtscsi_tmf(vscsi, cmd); } +static int virtscsi_device_alloc(struct scsi_device *sdevice) +{ + /* + * Passed through SCSI targets (e.g. with qemu's 'scsi-block') + * may have transfer limits which come from the host SCSI + * controller or something on the host side other than the + * target itself. + * + * To make this work properly, the hypervisor can adjust the + * target's VPD information to advertise these limits. But + * for that to work, the guest has to look at the VPD pages, + * which we won't do by default if it is an SPC-2 device, even + * if it does actually support it. + * + * So, set the blist to always try to read the VPD pages. + */ + sdevice->sdev_bflags = BLIST_TRY_VPD_PAGES; + + return 0; +} + + /** * virtscsi_change_queue_depth() - Change a virtscsi target's queue depth * @sdev: Virtscsi target whose queue depth to change @@ -783,6 +806,7 @@ static struct scsi_host_template virtscsi_host_template_single = { .change_queue_depth = virtscsi_change_queue_depth, .eh_abort_handler = virtscsi_abort, .eh_device_reset_handler = virtscsi_device_reset, + .slave_alloc = virtscsi_device_alloc, .can_queue = 1024, .dma_boundary = UINT_MAX, diff --git a/drivers/scsi/zalon.c b/drivers/scsi/zalon.c index 97ccb0383539..b2cf1faa819d 100644 --- a/drivers/scsi/zalon.c +++ b/drivers/scsi/zalon.c @@ -167,7 +167,7 @@ static struct parisc_device_id zalon_tbl[] = { MODULE_DEVICE_TABLE(parisc, zalon_tbl); -static int __exit zalon_remove(struct parisc_device *dev) +static int zalon_remove(struct parisc_device *dev) { struct Scsi_Host *host = dev_get_drvdata(&dev->dev); |