diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2023-06-30 20:57:07 +0200 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2023-06-30 20:57:07 +0200 |
commit | ca7ce08d6a063e0ccb91dc57f9bc213120d0d1a7 (patch) | |
tree | 99ce2811fdf52befc76d2cad95b16040423ecd10 | |
parent | Merge tag 'ata-6.5-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/dlem... (diff) | |
parent | Merge patch series "scsi: fixes for targets with many LUNs, and scsi_target_b... (diff) | |
download | linux-ca7ce08d6a063e0ccb91dc57f9bc213120d0d1a7.tar.xz linux-ca7ce08d6a063e0ccb91dc57f9bc213120d0d1a7.zip |
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull SCSI updates from James Bottomley:
"Updates to the usual drivers (ufs, pm80xx, libata-scsi, smartpqi,
lpfc, qla2xxx).
We have a couple of major core changes impacting other systems:
- Command Duration Limits, which spills into block and ATA
- block level Persistent Reservation Operations, which touches block,
nvme, target and dm
Both of these are added with merge commits containing a cover letter
explaining what's going on"
* tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (187 commits)
scsi: core: Improve warning message in scsi_device_block()
scsi: core: Replace scsi_target_block() with scsi_block_targets()
scsi: core: Don't wait for quiesce in scsi_device_block()
scsi: core: Don't wait for quiesce in scsi_stop_queue()
scsi: core: Merge scsi_internal_device_block() and device_block()
scsi: sg: Increase number of devices
scsi: bsg: Increase number of devices
scsi: qla2xxx: Remove unused nvme_ls_waitq wait queue
scsi: ufs: ufs-pci: Add support for Intel Arrow Lake
scsi: sd: sd_zbc: Use PAGE_SECTORS_SHIFT
scsi: ufs: wb: Add explicit flush_threshold sysfs attribute
scsi: ufs: ufs-qcom: Switch to the new ICE API
scsi: ufs: dt-bindings: qcom: Add ICE phandle
scsi: ufs: ufs-mediatek: Set UFSHCD_QUIRK_MCQ_BROKEN_RTC quirk
scsi: ufs: ufs-mediatek: Set UFSHCD_QUIRK_MCQ_BROKEN_INTR quirk
scsi: ufs: core: Add host quirk UFSHCD_QUIRK_MCQ_BROKEN_RTC
scsi: ufs: core: Add host quirk UFSHCD_QUIRK_MCQ_BROKEN_INTR
scsi: ufs: core: Remove dedicated hwq for dev command
scsi: ufs: core: mcq: Fix the incorrect OCS value for the device command
scsi: ufs: dt-bindings: samsung,exynos: Drop unneeded quotes
...
177 files changed, 4837 insertions, 2244 deletions
diff --git a/Documentation/ABI/testing/sysfs-block-device b/Documentation/ABI/testing/sysfs-block-device index 7ac7b19b2f72..2d543cfa4079 100644 --- a/Documentation/ABI/testing/sysfs-block-device +++ b/Documentation/ABI/testing/sysfs-block-device @@ -95,3 +95,25 @@ Description: This file does not exist if the HBA driver does not implement support for the SATA NCQ priority feature, regardless of the device support for this feature. + + +What: /sys/block/*/device/cdl_supported +Date: May, 2023 +KernelVersion: v6.5 +Contact: linux-scsi@vger.kernel.org +Description: + (RO) Indicates if the device supports the command duration + limits feature found in some ATA and SCSI devices. + + +What: /sys/block/*/device/cdl_enable +Date: May, 2023 +KernelVersion: v6.5 +Contact: linux-scsi@vger.kernel.org +Description: + (RW) For a device supporting the command duration limits + feature, write to the file to turn on or off the feature. + By default this feature is turned off. + Writing "1" to this file enables the use of command duration + limits for read and write commands in the kernel and turns on + the feature on the device. Writing "0" disables the feature. diff --git a/Documentation/ABI/testing/sysfs-driver-ufs b/Documentation/ABI/testing/sysfs-driver-ufs index 228aa43e14ed..d5f44fc5b9dc 100644 --- a/Documentation/ABI/testing/sysfs-driver-ufs +++ b/Documentation/ABI/testing/sysfs-driver-ufs @@ -1426,6 +1426,17 @@ Description: This entry shows the status of WriteBooster buffer flushing If flushing is enabled, the device executes the flush operation when the command queue is empty. +What: /sys/bus/platform/drivers/ufshcd/*/wb_flush_threshold +What: /sys/bus/platform/devices/*.ufs/wb_flush_threshold +Date: June 2023 +Contact: Lu Hongfei <luhongfei@vivo.com> +Description: + wb_flush_threshold represents the threshold for flushing WriteBooster buffer, + whose value expressed in unit of 10% granularity, such as '1' representing 10%, + '2' representing 20%, and so on. + If avail_wb_buff < wb_flush_threshold, it indicates that WriteBooster buffer needs to + be flushed, otherwise it is not necessary. + What: /sys/bus/platform/drivers/ufshcd/*/device_descriptor/hpb_version What: /sys/bus/platform/devices/*.ufs/device_descriptor/hpb_version Date: June 2021 diff --git a/Documentation/devicetree/bindings/ufs/qcom,ufs.yaml b/Documentation/devicetree/bindings/ufs/qcom,ufs.yaml index c5a06c048389..943dafb69529 100644 --- a/Documentation/devicetree/bindings/ufs/qcom,ufs.yaml +++ b/Documentation/devicetree/bindings/ufs/qcom,ufs.yaml @@ -26,6 +26,7 @@ properties: - qcom,msm8994-ufshc - qcom,msm8996-ufshc - qcom,msm8998-ufshc + - qcom,sa8775p-ufshc - qcom,sc8280xp-ufshc - qcom,sdm845-ufshc - qcom,sm6350-ufshc @@ -70,6 +71,10 @@ properties: power-domains: maxItems: 1 + qcom,ice: + $ref: /schemas/types.yaml#/definitions/phandle + description: phandle to the Inline Crypto Engine node + reg: minItems: 1 maxItems: 2 @@ -105,6 +110,7 @@ allOf: contains: enum: - qcom,msm8998-ufshc + - qcom,sa8775p-ufshc - qcom,sc8280xp-ufshc - qcom,sm8250-ufshc - qcom,sm8350-ufshc @@ -187,6 +193,26 @@ allOf: # TODO: define clock bindings for qcom,msm8994-ufshc + - if: + properties: + qcom,ice: + maxItems: 1 + then: + properties: + reg: + maxItems: 1 + clocks: + minItems: 8 + maxItems: 8 + else: + properties: + reg: + minItems: 2 + maxItems: 2 + clocks: + minItems: 9 + maxItems: 11 + unevaluatedProperties: false examples: diff --git a/Documentation/devicetree/bindings/ufs/samsung,exynos-ufs.yaml b/Documentation/devicetree/bindings/ufs/samsung,exynos-ufs.yaml index a9988798898d..88cc1e3a0c88 100644 --- a/Documentation/devicetree/bindings/ufs/samsung,exynos-ufs.yaml +++ b/Documentation/devicetree/bindings/ufs/samsung,exynos-ufs.yaml @@ -54,7 +54,7 @@ properties: const: ufs-phy samsung,sysreg: - $ref: '/schemas/types.yaml#/definitions/phandle-array' + $ref: /schemas/types.yaml#/definitions/phandle-array description: Should be phandle/offset pair. The phandle to the syscon node which indicates the FSYSx sysreg interface and the offset of the control register for UFS io coherency setting. diff --git a/Documentation/scsi/arcmsr_spec.rst b/Documentation/scsi/arcmsr_spec.rst index 83dd53bcff78..792c731b6570 100644 --- a/Documentation/scsi/arcmsr_spec.rst +++ b/Documentation/scsi/arcmsr_spec.rst @@ -1,3 +1,4 @@ +=================== ARECA FIRMWARE SPEC =================== diff --git a/Documentation/scsi/dc395x.rst b/Documentation/scsi/dc395x.rst index d779e782b1cb..d92947c175a5 100644 --- a/Documentation/scsi/dc395x.rst +++ b/Documentation/scsi/dc395x.rst @@ -1,8 +1,8 @@ .. SPDX-License-Identifier: GPL-2.0 -====================================== -README file for the dc395x SCSI driver -====================================== +================== +dc395x SCSI driver +================== Status ------ @@ -11,13 +11,10 @@ be safe to use. Testing with hard disks has not been done to any great degree and caution should be exercised if you want to attempt to use this driver with hard disks. -This is a 2.5 only driver. For a 2.4 driver please see the original -driver (which this driver started from) at -http://www.garloff.de/kurt/linux/dc395/ - -Problems, questions and patches should be submitted to the mailing -list. Details on the list, including archives, are available at -http://lists.twibble.org/mailman/listinfo/dc395x/ +This driver is evolved from `the original 2.4 driver +<https://web.archive.org/web/20140129181343/http://www.garloff.de/kurt/linux/dc395/>`_. +Problems, questions and patches should be submitted to the `Linux SCSI +mailing list <linux-scsi@vger.kernel.org>`_. Parameters ---------- diff --git a/Documentation/scsi/g_NCR5380.rst b/Documentation/scsi/g_NCR5380.rst index a282059fec43..b250c24fc760 100644 --- a/Documentation/scsi/g_NCR5380.rst +++ b/Documentation/scsi/g_NCR5380.rst @@ -1,9 +1,9 @@ .. SPDX-License-Identifier: GPL-2.0 .. include:: <isonum.txt> -========================================== -README file for the Linux g_NCR5380 driver -========================================== +================ +g_NCR5380 driver +================ Copyright |copy| 1993 Drew Eckhard diff --git a/Documentation/scsi/index.rst b/Documentation/scsi/index.rst index 919f3edfe1bf..f15a0f348ae4 100644 --- a/Documentation/scsi/index.rst +++ b/Documentation/scsi/index.rst @@ -7,6 +7,38 @@ SCSI Subsystem .. toctree:: :maxdepth: 1 +Introduction +============ + +.. toctree:: + :maxdepth: 1 + + scsi + +SCSI driver APIs +================ + +.. toctree:: + :maxdepth: 1 + + scsi_mid_low_api + scsi_eh + +SCSI driver parameters +====================== + +.. toctree:: + :maxdepth: 1 + + scsi-parameters + link_power_management_policy + +SCSI host adapter drivers +========================= + +.. toctree:: + :maxdepth: 1 + 53c700 aacraid advansys @@ -25,7 +57,6 @@ SCSI Subsystem hpsa hptiop libsas - link_power_management_policy lpfc megaraid ncr53c8xx @@ -33,12 +64,8 @@ SCSI Subsystem ppa qlogicfas scsi-changer - scsi_eh scsi_fc_transport scsi-generic - scsi_mid_low_api - scsi-parameters - scsi sd-parameters smartpqi st diff --git a/Documentation/scsi/megaraid.rst b/Documentation/scsi/megaraid.rst index 22b75a86ba72..10a6b05fc7c4 100644 --- a/Documentation/scsi/megaraid.rst +++ b/Documentation/scsi/megaraid.rst @@ -1,8 +1,8 @@ .. SPDX-License-Identifier: GPL-2.0 -========================== -Notes on Management Module -========================== +================================= +Megaraid Common Management Module +================================= Overview -------- diff --git a/Documentation/scsi/ncr53c8xx.rst b/Documentation/scsi/ncr53c8xx.rst index 1c79e08ec964..fd8d26dc5dab 100644 --- a/Documentation/scsi/ncr53c8xx.rst +++ b/Documentation/scsi/ncr53c8xx.rst @@ -1,8 +1,8 @@ .. SPDX-License-Identifier: GPL-2.0 -================================================= -The Linux NCR53C8XX/SYM53C8XX drivers README file -================================================= +=========================== +NCR53C8XX/SYM53C8XX drivers +=========================== Written by Gerard Roudier <groudier@free.fr> diff --git a/Documentation/scsi/scsi-changer.rst b/Documentation/scsi/scsi-changer.rst index ab60e7e61a6c..5d828c7f492d 100644 --- a/Documentation/scsi/scsi-changer.rst +++ b/Documentation/scsi/scsi-changer.rst @@ -1,8 +1,8 @@ .. SPDX-License-Identifier: GPL-2.0 -======================================== -README for the SCSI media changer driver -======================================== +========================= +SCSI media changer driver +========================= This is a driver for SCSI Medium Changer devices, which are listed with "Type: Medium Changer" in /proc/scsi/scsi. diff --git a/Documentation/scsi/scsi-generic.rst b/Documentation/scsi/scsi-generic.rst index 258505e557a6..b82ffe4d8892 100644 --- a/Documentation/scsi/scsi-generic.rst +++ b/Documentation/scsi/scsi-generic.rst @@ -1,15 +1,15 @@ .. SPDX-License-Identifier: GPL-2.0 -======================================= -Notes on Linux SCSI Generic (sg) driver -======================================= +======================== +SCSI Generic (sg) driver +======================== 20020126 Introduction ============ The SCSI Generic driver (sg) is one of the four "high level" SCSI device -drivers along with sd, st and sr (disk, tape and CDROM respectively). Sg +drivers along with sd, st and sr (disk, tape and CD-ROM respectively). Sg is more generalized (but lower level) than its siblings and tends to be used on SCSI devices that don't fit into the already serviced categories. Thus sg is used for scanners, CD writers and reading audio CDs digitally @@ -22,7 +22,7 @@ and examples. Major versions of the sg driver =============================== -There are three major versions of sg found in the linux kernel (lk): +There are three major versions of sg found in the Linux kernel (lk): - sg version 1 (original) from 1992 to early 1999 (lk 2.2.5) . It is based in the sg_header interface structure. - sg version 2 from lk 2.2.6 in the 2.2 series. It is based on @@ -33,34 +33,24 @@ There are three major versions of sg found in the linux kernel (lk): Sg driver documentation ======================= -The most recent documentation of the sg driver is kept at the Linux -Documentation Project's (LDP) site: +The most recent documentation of the sg driver is kept at -- http://www.tldp.org/HOWTO/SCSI-Generic-HOWTO +- https://sg.danny.cz/sg/ This describes the sg version 3 driver found in the lk 2.4 series. -The LDP renders documents in single and multiple page HTML, postscript -and pdf. This document can also be found at: +Documentation (large version) for the version 2 sg driver found in the +lk 2.2 series can be found at -- http://sg.danny.cz/sg/p/sg_v3_ho.html - -Documentation for the version 2 sg driver found in the lk 2.2 series can -be found at http://sg.danny.cz/sg/. A larger version -is at: http://sg.danny.cz/sg/p/scsi-generic_long.txt. +- https://sg.danny.cz/sg/p/scsi-generic_long.txt. The original documentation for the sg driver (prior to lk 2.2.6) can be -found at http://www.torque.net/sg/p/original/SCSI-Programming-HOWTO.txt -and in the LDP archives. +found in the LDP archives at -A changelog with brief notes can be found in the -/usr/src/linux/include/scsi/sg.h file. Note that the glibc maintainers copy -and edit this file (removing its changelog for example) before placing it -in /usr/include/scsi/sg.h . Driver debugging information and other notes -can be found at the top of the /usr/src/linux/drivers/scsi/sg.c file. +- https://tldp.org/HOWTO/archived/SCSI-Programming-HOWTO/index.html A more general description of the Linux SCSI subsystem of which sg is a -part can be found at http://www.tldp.org/HOWTO/SCSI-2.4-HOWTO . +part can be found at https://www.tldp.org/HOWTO/SCSI-2.4-HOWTO . Example code and utilities @@ -73,8 +63,8 @@ There are two packages of sg utilities: and earlier ========= ========================================================== -Both packages will work in the lk 2.4 series however sg3_utils offers more -capabilities. They can be found at: http://sg.danny.cz/sg/sg3_utils.html and +Both packages will work in the lk 2.4 series. However, sg3_utils offers more +capabilities. They can be found at: https://sg.danny.cz/sg/sg3_utils.html and freecode.com Another approach is to look at the applications that use the sg driver. @@ -83,7 +73,7 @@ These include cdrecord, cdparanoia, SANE and cdrdao. Mapping of Linux kernel versions to sg driver versions ====================================================== -Here is a list of linux kernels in the 2.4 series that had new version +Here is a list of Linux kernels in the 2.4 series that had the new version of the sg driver: - lk 2.4.0 : sg version 3.1.17 @@ -92,10 +82,10 @@ of the sg driver: - lk 2.4.17 : sg version 3.1.22 .. [#] There were 3 changes to sg version 3.1.20 by third parties in the - next six linux kernel versions. + next six Linux kernel versions. -For reference here is a list of linux kernels in the 2.2 series that had -new version of the sg driver: +For reference here is a list of Linux kernels in the 2.2 series that had +the new version of the sg driver: - lk 2.2.0 : original sg version [with no version number] - lk 2.2.6 : sg version 2.1.31 @@ -106,9 +96,8 @@ new version of the sg driver: - lk 2.2.17 : sg version 2.1.39 - lk 2.2.20 : sg version 2.1.40 -The lk 2.5 development series has recently commenced and it currently -contains sg version 3.5.23 which is functionally equivalent to sg -version 3.1.22 found in lk 2.4.17. +The lk 2.5 development series currently contains sg version 3.5.23 +which is functionally equivalent to sg version 3.1.22 found in lk 2.4.17. Douglas Gilbert diff --git a/Documentation/scsi/scsi.rst b/Documentation/scsi/scsi.rst index 276918eb4d74..8556846b9f63 100644 --- a/Documentation/scsi/scsi.rst +++ b/Documentation/scsi/scsi.rst @@ -6,30 +6,28 @@ SCSI subsystem documentation The Linux Documentation Project (LDP) maintains a document describing the SCSI subsystem in the Linux kernel (lk) 2.4 series. See: -http://www.tldp.org/HOWTO/SCSI-2.4-HOWTO . The LDP has single +https://www.tldp.org/HOWTO/SCSI-2.4-HOWTO . The LDP has single and multiple page HTML renderings as well as postscript and pdf. -It can also be found at: -http://web.archive.org/web/%2E/http://www.torque.net/scsi/SCSI-2.4-HOWTO Notes on using modules in the SCSI subsystem ============================================ -The scsi support in the linux kernel can be modularized in a number of +The SCSI support in the Linux kernel can be modularized in a number of different ways depending upon the needs of the end user. To understand your options, we should first define a few terms. -The scsi-core (also known as the "mid level") contains the core of scsi -support. Without it you can do nothing with any of the other scsi drivers. -The scsi core support can be a module (scsi_mod.o), or it can be built into -the kernel. If the core is a module, it must be the first scsi module +The scsi-core (also known as the "mid level") contains the core of SCSI +support. Without it you can do nothing with any of the other SCSI drivers. +The SCSI core support can be a module (scsi_mod.o), or it can be built into +the kernel. If the core is a module, it must be the first SCSI module loaded, and if you unload the modules, it will have to be the last one -unloaded. In practice the modprobe and rmmod commands (and "autoclean") +unloaded. In practice the modprobe and rmmod commands will enforce the correct ordering of loading and unloading modules in the SCSI subsystem. The individual upper and lower level drivers can be loaded in any order -once the scsi core is present in the kernel (either compiled in or loaded -as a module). The disk driver (sd_mod.o), cdrom driver (sr_mod.o), -tape driver [1]_ (st.o) and scsi generics driver (sg.o) represent the upper +once the SCSI core is present in the kernel (either compiled in or loaded +as a module). The disk driver (sd_mod.o), CD-ROM driver (sr_mod.o), +tape driver [1]_ (st.o) and SCSI generics driver (sg.o) represent the upper level drivers to support the various assorted devices which can be controlled. You can for example load the tape driver to use the tape drive, and then unload it once you have no further need for the driver (and release @@ -44,4 +42,3 @@ built into the kernel. .. [1] There is a variant of the st driver for controlling OnStream tape devices. Its module name is osst.o . - diff --git a/Documentation/scsi/scsi_fc_transport.rst b/Documentation/scsi/scsi_fc_transport.rst index 176c1862cb9b..e3ddcfb7f8fd 100644 --- a/Documentation/scsi/scsi_fc_transport.rst +++ b/Documentation/scsi/scsi_fc_transport.rst @@ -1,8 +1,8 @@ .. SPDX-License-Identifier: GPL-2.0 -================ -SCSI FC Tansport -================ +================= +SCSI FC Transport +================= Date: 11/18/2008 @@ -556,5 +556,5 @@ The following people have contributed to this document: James Smart -james.smart@emulex.com +james.smart@broadcom.com diff --git a/Documentation/scsi/sym53c8xx_2.rst b/Documentation/scsi/sym53c8xx_2.rst index 004f1a750e7d..4eb047921dce 100644 --- a/Documentation/scsi/sym53c8xx_2.rst +++ b/Documentation/scsi/sym53c8xx_2.rst @@ -1,8 +1,8 @@ .. SPDX-License-Identifier: GPL-2.0 -========================================= -The Linux SYM-2 driver documentation file -========================================= +============ +SYM-2 driver +============ Written by Gerard Roudier <groudier@free.fr> diff --git a/MAINTAINERS b/MAINTAINERS index 1545c55b9173..d5cead4b03b2 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -5732,10 +5732,7 @@ DC395x SCSI driver M: Oliver Neukum <oliver@neukum.org> M: Ali Akcaagac <aliakc@web.de> M: Jamie Lenehan <lenehan@twibble.org> -L: dc395x@twibble.org S: Maintained -W: http://twibble.org/dist/dc395x/ -W: http://lists.twibble.org/mailman/listinfo/dc395x/ F: Documentation/scsi/dc395x.rst F: drivers/scsi/dc395x.* @@ -18918,6 +18915,16 @@ F: include/linux/wait.h F: include/uapi/linux/sched.h F: kernel/sched/ +SCSI LIBSAS SUBSYSTEM +R: John Garry <john.g.garry@oracle.com> +R: Jason Yan <yanaijie@huawei.com> +L: linux-scsi@vger.kernel.org +S: Supported +F: drivers/scsi/libsas/ +F: include/scsi/libsas.h +F: include/scsi/sas_ata.h +F: Documentation/scsi/libsas.rst + SCSI RDMA PROTOCOL (SRP) INITIATOR M: Bart Van Assche <bvanassche@acm.org> L: linux-rdma@vger.kernel.org diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c index 09bbbcf9e049..3cce6de464a7 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c @@ -5528,16 +5528,16 @@ bfq_set_next_ioprio_data(struct bfq_queue *bfqq, struct bfq_io_cq *bic) bfqq->new_ioprio_class = task_nice_ioclass(tsk); break; case IOPRIO_CLASS_RT: - bfqq->new_ioprio = IOPRIO_PRIO_DATA(bic->ioprio); + bfqq->new_ioprio = IOPRIO_PRIO_LEVEL(bic->ioprio); bfqq->new_ioprio_class = IOPRIO_CLASS_RT; break; case IOPRIO_CLASS_BE: - bfqq->new_ioprio = IOPRIO_PRIO_DATA(bic->ioprio); + bfqq->new_ioprio = IOPRIO_PRIO_LEVEL(bic->ioprio); bfqq->new_ioprio_class = IOPRIO_CLASS_BE; break; case IOPRIO_CLASS_IDLE: bfqq->new_ioprio_class = IOPRIO_CLASS_IDLE; - bfqq->new_ioprio = 7; + bfqq->new_ioprio = IOPRIO_NR_LEVELS - 1; break; } @@ -5834,7 +5834,7 @@ static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd, struct bfq_io_cq *bic, bool respawn) { - const int ioprio = IOPRIO_PRIO_DATA(bic->ioprio); + const int ioprio = IOPRIO_PRIO_LEVEL(bic->ioprio); const int ioprio_class = IOPRIO_PRIO_CLASS(bic->ioprio); struct bfq_queue **async_bfqq = NULL; struct bfq_queue *bfqq; diff --git a/block/blk-core.c b/block/blk-core.c index 3fc68b944479..99d8b9812b18 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -155,7 +155,7 @@ static const struct { [BLK_STS_NOSPC] = { -ENOSPC, "critical space allocation" }, [BLK_STS_TRANSPORT] = { -ENOLINK, "recoverable transport" }, [BLK_STS_TARGET] = { -EREMOTEIO, "critical target" }, - [BLK_STS_NEXUS] = { -EBADE, "critical nexus" }, + [BLK_STS_RESV_CONFLICT] = { -EBADE, "reservation conflict" }, [BLK_STS_MEDIUM] = { -ENODATA, "critical medium" }, [BLK_STS_PROTECTION] = { -EILSEQ, "protection" }, [BLK_STS_RESOURCE] = { -ENOMEM, "kernel resource" }, @@ -170,6 +170,9 @@ static const struct { [BLK_STS_ZONE_OPEN_RESOURCE] = { -ETOOMANYREFS, "open zones exceeded" }, [BLK_STS_ZONE_ACTIVE_RESOURCE] = { -EOVERFLOW, "active zones exceeded" }, + /* Command duration limit device-side timeout */ + [BLK_STS_DURATION_LIMIT] = { -ETIME, "duration limit exceeded" }, + /* everything else not covered above: */ [BLK_STS_IOERR] = { -EIO, "I/O" }, }; diff --git a/block/bsg.c b/block/bsg.c index 1a9396a3b7d7..72157a59b788 100644 --- a/block/bsg.c +++ b/block/bsg.c @@ -36,7 +36,7 @@ static inline struct bsg_device *to_bsg_device(struct inode *inode) } #define BSG_DEFAULT_CMDS 64 -#define BSG_MAX_DEVS 32768 +#define BSG_MAX_DEVS (1 << MINORBITS) static DEFINE_IDA(bsg_minor_ida); static const struct class bsg_class; diff --git a/block/ioprio.c b/block/ioprio.c index 32a456b45804..b5a942519a79 100644 --- a/block/ioprio.c +++ b/block/ioprio.c @@ -33,7 +33,7 @@ int ioprio_check_cap(int ioprio) { int class = IOPRIO_PRIO_CLASS(ioprio); - int data = IOPRIO_PRIO_DATA(ioprio); + int level = IOPRIO_PRIO_LEVEL(ioprio); switch (class) { case IOPRIO_CLASS_RT: @@ -49,15 +49,16 @@ int ioprio_check_cap(int ioprio) fallthrough; /* rt has prio field too */ case IOPRIO_CLASS_BE: - if (data >= IOPRIO_NR_LEVELS || data < 0) + if (level >= IOPRIO_NR_LEVELS) return -EINVAL; break; case IOPRIO_CLASS_IDLE: break; case IOPRIO_CLASS_NONE: - if (data) + if (level) return -EINVAL; break; + case IOPRIO_CLASS_INVALID: default: return -EINVAL; } diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 0e4c2da50821..d37ab6087f2f 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -665,12 +665,33 @@ u64 ata_tf_read_block(const struct ata_taskfile *tf, struct ata_device *dev) return block; } +/* + * Set a taskfile command duration limit index. + */ +static inline void ata_set_tf_cdl(struct ata_queued_cmd *qc, int cdl) +{ + struct ata_taskfile *tf = &qc->tf; + + if (tf->protocol == ATA_PROT_NCQ) + tf->auxiliary |= cdl; + else + tf->feature |= cdl; + + /* + * Mark this command as having a CDL and request the result + * task file so that we can inspect the sense data available + * bit on completion. + */ + qc->flags |= ATA_QCFLAG_HAS_CDL | ATA_QCFLAG_RESULT_TF; +} + /** * ata_build_rw_tf - Build ATA taskfile for given read/write request * @qc: Metadata associated with the taskfile to build * @block: Block address * @n_block: Number of blocks * @tf_flags: RW/FUA etc... + * @cdl: Command duration limit index * @class: IO priority class * * LOCKING: @@ -685,7 +706,7 @@ u64 ata_tf_read_block(const struct ata_taskfile *tf, struct ata_device *dev) * -EINVAL if the request is invalid. */ int ata_build_rw_tf(struct ata_queued_cmd *qc, u64 block, u32 n_block, - unsigned int tf_flags, int class) + unsigned int tf_flags, int cdl, int class) { struct ata_taskfile *tf = &qc->tf; struct ata_device *dev = qc->dev; @@ -724,11 +745,20 @@ int ata_build_rw_tf(struct ata_queued_cmd *qc, u64 block, u32 n_block, if (dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLED && class == IOPRIO_CLASS_RT) tf->hob_nsect |= ATA_PRIO_HIGH << ATA_SHIFT_PRIO; + + if ((dev->flags & ATA_DFLAG_CDL_ENABLED) && cdl) + ata_set_tf_cdl(qc, cdl); + } else if (dev->flags & ATA_DFLAG_LBA) { tf->flags |= ATA_TFLAG_LBA; - /* We need LBA48 for FUA writes */ - if (!(tf->flags & ATA_TFLAG_FUA) && lba_28_ok(block, n_block)) { + if ((dev->flags & ATA_DFLAG_CDL_ENABLED) && cdl) + ata_set_tf_cdl(qc, cdl); + + /* Both FUA writes and a CDL index require 48-bit commands */ + if (!(tf->flags & ATA_TFLAG_FUA) && + !(qc->flags & ATA_QCFLAG_HAS_CDL) && + lba_28_ok(block, n_block)) { /* use LBA28 */ tf->device |= (block >> 24) & 0xf; } else if (lba_48_ok(block, n_block)) { @@ -2367,6 +2397,139 @@ static void ata_dev_config_trusted(struct ata_device *dev) dev->flags |= ATA_DFLAG_TRUSTED; } +static void ata_dev_config_cdl(struct ata_device *dev) +{ + struct ata_port *ap = dev->link->ap; + unsigned int err_mask; + bool cdl_enabled; + u64 val; + + if (ata_id_major_version(dev->id) < 12) + goto not_supported; + + if (!ata_log_supported(dev, ATA_LOG_IDENTIFY_DEVICE) || + !ata_identify_page_supported(dev, ATA_LOG_SUPPORTED_CAPABILITIES) || + !ata_identify_page_supported(dev, ATA_LOG_CURRENT_SETTINGS)) + goto not_supported; + + err_mask = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE, + ATA_LOG_SUPPORTED_CAPABILITIES, + ap->sector_buf, 1); + if (err_mask) + goto not_supported; + + /* Check Command Duration Limit Supported bits */ + val = get_unaligned_le64(&ap->sector_buf[168]); + if (!(val & BIT_ULL(63)) || !(val & BIT_ULL(0))) + goto not_supported; + + /* Warn the user if command duration guideline is not supported */ + if (!(val & BIT_ULL(1))) + ata_dev_warn(dev, + "Command duration guideline is not supported\n"); + + /* + * We must have support for the sense data for successful NCQ commands + * log indicated by the successful NCQ command sense data supported bit. + */ + val = get_unaligned_le64(&ap->sector_buf[8]); + if (!(val & BIT_ULL(63)) || !(val & BIT_ULL(47))) { + ata_dev_warn(dev, + "CDL supported but Successful NCQ Command Sense Data is not supported\n"); + goto not_supported; + } + + /* Without NCQ autosense, the successful NCQ commands log is useless. */ + if (!ata_id_has_ncq_autosense(dev->id)) { + ata_dev_warn(dev, + "CDL supported but NCQ autosense is not supported\n"); + goto not_supported; + } + + /* + * If CDL is marked as enabled, make sure the feature is enabled too. + * Conversely, if CDL is disabled, make sure the feature is turned off. + */ + err_mask = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE, + ATA_LOG_CURRENT_SETTINGS, + ap->sector_buf, 1); + if (err_mask) + goto not_supported; + + val = get_unaligned_le64(&ap->sector_buf[8]); + cdl_enabled = val & BIT_ULL(63) && val & BIT_ULL(21); + if (dev->flags & ATA_DFLAG_CDL_ENABLED) { + if (!cdl_enabled) { + /* Enable CDL on the device */ + err_mask = ata_dev_set_feature(dev, SETFEATURES_CDL, 1); + if (err_mask) { + ata_dev_err(dev, + "Enable CDL feature failed\n"); + goto not_supported; + } + } + } else { + if (cdl_enabled) { + /* Disable CDL on the device */ + err_mask = ata_dev_set_feature(dev, SETFEATURES_CDL, 0); + if (err_mask) { + ata_dev_err(dev, + "Disable CDL feature failed\n"); + goto not_supported; + } + } + } + + /* + * While CDL itself has to be enabled using sysfs, CDL requires that + * sense data for successful NCQ commands is enabled to work properly. + * Just like ata_dev_config_sense_reporting(), enable it unconditionally + * if supported. + */ + if (!(val & BIT_ULL(63)) || !(val & BIT_ULL(18))) { + err_mask = ata_dev_set_feature(dev, + SETFEATURE_SENSE_DATA_SUCC_NCQ, 0x1); + if (err_mask) { + ata_dev_warn(dev, + "failed to enable Sense Data for successful NCQ commands, Emask 0x%x\n", + err_mask); + goto not_supported; + } + } + + /* + * Allocate a buffer to handle reading the sense data for successful + * NCQ Commands log page for commands using a CDL with one of the limit + * policy set to 0xD (successful completion with sense data available + * bit set). + */ + if (!ap->ncq_sense_buf) { + ap->ncq_sense_buf = kmalloc(ATA_LOG_SENSE_NCQ_SIZE, GFP_KERNEL); + if (!ap->ncq_sense_buf) + goto not_supported; + } + + /* + * Command duration limits is supported: cache the CDL log page 18h + * (command duration descriptors). + */ + err_mask = ata_read_log_page(dev, ATA_LOG_CDL, 0, ap->sector_buf, 1); + if (err_mask) { + ata_dev_warn(dev, "Read Command Duration Limits log failed\n"); + goto not_supported; + } + + memcpy(dev->cdl, ap->sector_buf, ATA_LOG_CDL_SIZE); + dev->flags |= ATA_DFLAG_CDL; + + return; + +not_supported: + dev->flags &= ~(ATA_DFLAG_CDL | ATA_DFLAG_CDL_ENABLED); + kfree(ap->ncq_sense_buf); + ap->ncq_sense_buf = NULL; +} + static int ata_dev_config_lba(struct ata_device *dev) { const u16 *id = dev->id; @@ -2534,13 +2697,14 @@ static void ata_dev_print_features(struct ata_device *dev) return; ata_dev_info(dev, - "Features:%s%s%s%s%s%s%s\n", + "Features:%s%s%s%s%s%s%s%s\n", dev->flags & ATA_DFLAG_FUA ? " FUA" : "", dev->flags & ATA_DFLAG_TRUSTED ? " Trust" : "", dev->flags & ATA_DFLAG_DA ? " Dev-Attention" : "", dev->flags & ATA_DFLAG_DEVSLP ? " Dev-Sleep" : "", dev->flags & ATA_DFLAG_NCQ_SEND_RECV ? " NCQ-sndrcv" : "", dev->flags & ATA_DFLAG_NCQ_PRIO ? " NCQ-prio" : "", + dev->flags & ATA_DFLAG_CDL ? " CDL" : "", dev->cpr_log ? " CPR" : ""); } @@ -2702,6 +2866,7 @@ int ata_dev_configure(struct ata_device *dev) ata_dev_config_zac(dev); ata_dev_config_trusted(dev); ata_dev_config_cpr(dev); + ata_dev_config_cdl(dev); dev->cdb_len = 32; if (print_info) @@ -4762,6 +4927,36 @@ void ata_qc_complete(struct ata_queued_cmd *qc) fill_result_tf(qc); trace_ata_qc_complete_done(qc); + + /* + * For CDL commands that completed without an error, check if + * we have sense data (ATA_SENSE is set). If we do, then the + * command may have been aborted by the device due to a limit + * timeout using the policy 0xD. For these commands, invoke EH + * to get the command sense data. + */ + if (qc->result_tf.status & ATA_SENSE && + ((ata_is_ncq(qc->tf.protocol) && + dev->flags & ATA_DFLAG_CDL_ENABLED) || + (!(ata_is_ncq(qc->tf.protocol) && + ata_id_sense_reporting_enabled(dev->id))))) { + /* + * Tell SCSI EH to not overwrite scmd->result even if + * this command is finished with result SAM_STAT_GOOD. + */ + qc->scsicmd->flags |= SCMD_FORCE_EH_SUCCESS; + qc->flags |= ATA_QCFLAG_EH_SUCCESS_CMD; + ehi->dev_action[dev->devno] |= ATA_EH_GET_SUCCESS_SENSE; + + /* + * set pending so that ata_qc_schedule_eh() does not + * trigger fast drain, and freeze the port. + */ + ap->pflags |= ATA_PFLAG_EH_PENDING; + ata_qc_schedule_eh(qc); + return; + } + /* Some commands need post-processing after successful * completion. */ @@ -5394,6 +5589,7 @@ static void ata_host_release(struct kref *kref) kfree(ap->pmp_link); kfree(ap->slave_link); + kfree(ap->ncq_sense_buf); kfree(ap); host->ports[i] = NULL; } diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index b3b526411f36..35e03679b0bf 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c @@ -1401,8 +1401,11 @@ unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key) * * LOCKING: * Kernel thread context (may sleep). + * + * RETURNS: + * true if sense data could be fetched, false otherwise. */ -static void ata_eh_request_sense(struct ata_queued_cmd *qc) +static bool ata_eh_request_sense(struct ata_queued_cmd *qc) { struct scsi_cmnd *cmd = qc->scsicmd; struct ata_device *dev = qc->dev; @@ -1411,15 +1414,12 @@ static void ata_eh_request_sense(struct ata_queued_cmd *qc) if (ata_port_is_frozen(qc->ap)) { ata_dev_warn(dev, "sense data available but port frozen\n"); - return; + return false; } - if (!cmd || qc->flags & ATA_QCFLAG_SENSE_VALID) - return; - if (!ata_id_sense_reporting_enabled(dev->id)) { ata_dev_warn(qc->dev, "sense data reporting disabled\n"); - return; + return false; } ata_tf_init(dev, &tf); @@ -1432,13 +1432,19 @@ static void ata_eh_request_sense(struct ata_queued_cmd *qc) /* Ignore err_mask; ATA_ERR might be set */ if (tf.status & ATA_SENSE) { if (ata_scsi_sense_is_valid(tf.lbah, tf.lbam, tf.lbal)) { - ata_scsi_set_sense(dev, cmd, tf.lbah, tf.lbam, tf.lbal); + /* Set sense without also setting scsicmd->result */ + scsi_build_sense_buffer(dev->flags & ATA_DFLAG_D_SENSE, + cmd->sense_buffer, tf.lbah, + tf.lbam, tf.lbal); qc->flags |= ATA_QCFLAG_SENSE_VALID; + return true; } } else { ata_dev_warn(dev, "request sense failed stat %02x emask %x\n", tf.status, err_mask); } + + return false; } /** @@ -1588,8 +1594,9 @@ static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc) * was not included in the NCQ command error log * (i.e. NCQ autosense is not supported by the device). */ - if (!(qc->flags & ATA_QCFLAG_SENSE_VALID) && (stat & ATA_SENSE)) - ata_eh_request_sense(qc); + if (!(qc->flags & ATA_QCFLAG_SENSE_VALID) && + (stat & ATA_SENSE) && ata_eh_request_sense(qc)) + set_status_byte(qc->scsicmd, SAM_STAT_CHECK_CONDITION); if (err & ATA_ICRC) qc->err_mask |= AC_ERR_ATA_BUS; if (err & (ATA_UNC | ATA_AMNF)) @@ -1908,6 +1915,99 @@ static inline bool ata_eh_quiet(struct ata_queued_cmd *qc) return qc->flags & ATA_QCFLAG_QUIET; } +static int ata_eh_read_sense_success_non_ncq(struct ata_link *link) +{ + struct ata_port *ap = link->ap; + struct ata_queued_cmd *qc; + + qc = __ata_qc_from_tag(ap, link->active_tag); + if (!qc) + return -EIO; + + if (!(qc->flags & ATA_QCFLAG_EH) || + !(qc->flags & ATA_QCFLAG_EH_SUCCESS_CMD) || + qc->err_mask) + return -EIO; + + if (!ata_eh_request_sense(qc)) + return -EIO; + + /* + * If we have sense data, call scsi_check_sense() in order to set the + * correct SCSI ML byte (if any). No point in checking the return value, + * since the command has already completed successfully. + */ + scsi_check_sense(qc->scsicmd); + + return 0; +} + +static void ata_eh_get_success_sense(struct ata_link *link) +{ + struct ata_eh_context *ehc = &link->eh_context; + struct ata_device *dev = link->device; + struct ata_port *ap = link->ap; + struct ata_queued_cmd *qc; + int tag, ret = 0; + + if (!(ehc->i.dev_action[dev->devno] & ATA_EH_GET_SUCCESS_SENSE)) + return; + + /* if frozen, we can't do much */ + if (ata_port_is_frozen(ap)) { + ata_dev_warn(dev, + "successful sense data available but port frozen\n"); + goto out; + } + + /* + * If the link has sactive set, then we have outstanding NCQ commands + * and have to read the Successful NCQ Commands log to get the sense + * data. Otherwise, we are dealing with a non-NCQ command and use + * request sense ext command to retrieve the sense data. + */ + if (link->sactive) + ret = ata_eh_read_sense_success_ncq_log(link); + else + ret = ata_eh_read_sense_success_non_ncq(link); + if (ret) + goto out; + + ata_eh_done(link, dev, ATA_EH_GET_SUCCESS_SENSE); + return; + +out: + /* + * If we failed to get sense data for a successful command that ought to + * have sense data, we cannot simply return BLK_STS_OK to user space. + * This is because we can't know if the sense data that we couldn't get + * was actually "DATA CURRENTLY UNAVAILABLE". Reporting such a command + * as success to user space would result in a silent data corruption. + * Thus, add a bogus ABORTED_COMMAND sense data to such commands, such + * that SCSI will report these commands as BLK_STS_IOERR to user space. + */ + ata_qc_for_each_raw(ap, qc, tag) { + if (!(qc->flags & ATA_QCFLAG_EH) || + !(qc->flags & ATA_QCFLAG_EH_SUCCESS_CMD) || + qc->err_mask || + ata_dev_phys_link(qc->dev) != link) + continue; + + /* We managed to get sense for this success command, skip. */ + if (qc->flags & ATA_QCFLAG_SENSE_VALID) + continue; + + /* This success command did not have any sense data, skip. */ + if (!(qc->result_tf.status & ATA_SENSE)) + continue; + + /* This success command had sense data, but we failed to get. */ + ata_scsi_set_sense(dev, qc->scsicmd, ABORTED_COMMAND, 0, 0); + qc->flags |= ATA_QCFLAG_SENSE_VALID; + } + ata_eh_done(link, dev, ATA_EH_GET_SUCCESS_SENSE); +} + /** * ata_eh_link_autopsy - analyze error and determine recovery action * @link: host link to perform autopsy on @@ -1948,6 +2048,14 @@ static void ata_eh_link_autopsy(struct ata_link *link) /* analyze NCQ failure */ ata_eh_analyze_ncq_error(link); + /* + * Check if this was a successful command that simply needs sense data. + * Since the sense data is not part of the completion, we need to fetch + * it using an additional command. Since this can't be done from irq + * context, the sense data for successful commands are fetched by EH. + */ + ata_eh_get_success_sense(link); + /* any real error trumps AC_ERR_OTHER */ if (ehc->i.err_mask & ~AC_ERR_OTHER) ehc->i.err_mask &= ~AC_ERR_OTHER; @@ -1957,6 +2065,7 @@ static void ata_eh_link_autopsy(struct ata_link *link) ata_qc_for_each_raw(ap, qc, tag) { if (!(qc->flags & ATA_QCFLAG_EH) || qc->flags & ATA_QCFLAG_RETRY || + qc->flags & ATA_QCFLAG_EH_SUCCESS_CMD || ata_dev_phys_link(qc->dev) != link) continue; @@ -3823,7 +3932,8 @@ void ata_eh_finish(struct ata_port *ap) ata_eh_qc_complete(qc); } } else { - if (qc->flags & ATA_QCFLAG_SENSE_VALID) { + if (qc->flags & ATA_QCFLAG_SENSE_VALID || + qc->flags & ATA_QCFLAG_EH_SUCCESS_CMD) { ata_eh_qc_complete(qc); } else { /* feed zero TF to sense generation */ diff --git a/drivers/ata/libata-sata.c b/drivers/ata/libata-sata.c index 6c07025011ed..85e279a12f62 100644 --- a/drivers/ata/libata-sata.c +++ b/drivers/ata/libata-sata.c @@ -11,7 +11,9 @@ #include <linux/module.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> +#include <scsi/scsi_eh.h> #include <linux/libata.h> +#include <asm/unaligned.h> #include "libata.h" #include "libata-transport.h" @@ -907,10 +909,17 @@ static ssize_t ata_ncq_prio_enable_store(struct device *device, goto unlock; } - if (input) + if (input) { + if (dev->flags & ATA_DFLAG_CDL_ENABLED) { + ata_dev_err(dev, + "CDL must be disabled to enable NCQ priority\n"); + rc = -EINVAL; + goto unlock; + } dev->flags |= ATA_DFLAG_NCQ_PRIO_ENABLED; - else + } else { dev->flags &= ~ATA_DFLAG_NCQ_PRIO_ENABLED; + } unlock: spin_unlock_irq(ap->lock); @@ -1414,6 +1423,95 @@ static int ata_eh_read_log_10h(struct ata_device *dev, } /** + * ata_eh_read_sense_success_ncq_log - Read the sense data for successful + * NCQ commands log + * @link: ATA link to get sense data for + * + * Read the sense data for successful NCQ commands log page to obtain + * sense data for all NCQ commands that completed successfully with + * the sense data available bit set. + * + * LOCKING: + * Kernel thread context (may sleep). + * + * RETURNS: + * 0 on success, -errno otherwise. + */ +int ata_eh_read_sense_success_ncq_log(struct ata_link *link) +{ + struct ata_device *dev = link->device; + struct ata_port *ap = dev->link->ap; + u8 *buf = ap->ncq_sense_buf; + struct ata_queued_cmd *qc; + unsigned int err_mask, tag; + u8 *sense, sk = 0, asc = 0, ascq = 0; + u64 sense_valid, val; + int ret = 0; + + err_mask = ata_read_log_page(dev, ATA_LOG_SENSE_NCQ, 0, buf, 2); + if (err_mask) { + ata_dev_err(dev, + "Failed to read Sense Data for Successful NCQ Commands log\n"); + return -EIO; + } + + /* Check the log header */ + val = get_unaligned_le64(&buf[0]); + if ((val & 0xffff) != 1 || ((val >> 16) & 0xff) != 0x0f) { + ata_dev_err(dev, + "Invalid Sense Data for Successful NCQ Commands log\n"); + return -EIO; + } + + sense_valid = (u64)buf[8] | ((u64)buf[9] << 8) | + ((u64)buf[10] << 16) | ((u64)buf[11] << 24); + + ata_qc_for_each_raw(ap, qc, tag) { + if (!(qc->flags & ATA_QCFLAG_EH) || + !(qc->flags & ATA_QCFLAG_EH_SUCCESS_CMD) || + qc->err_mask || + ata_dev_phys_link(qc->dev) != link) + continue; + + /* + * If the command does not have any sense data, clear ATA_SENSE. + * Keep ATA_QCFLAG_EH_SUCCESS_CMD so that command is finished. + */ + if (!(sense_valid & (1ULL << tag))) { + qc->result_tf.status &= ~ATA_SENSE; + continue; + } + + sense = &buf[32 + 24 * tag]; + sk = sense[0]; + asc = sense[1]; + ascq = sense[2]; + + if (!ata_scsi_sense_is_valid(sk, asc, ascq)) { + ret = -EIO; + continue; + } + + /* Set sense without also setting scsicmd->result */ + scsi_build_sense_buffer(dev->flags & ATA_DFLAG_D_SENSE, + qc->scsicmd->sense_buffer, sk, + asc, ascq); + qc->flags |= ATA_QCFLAG_SENSE_VALID; + + /* + * If we have sense data, call scsi_check_sense() in order to + * set the correct SCSI ML byte (if any). No point in checking + * the return value, since the command has already completed + * successfully. + */ + scsi_check_sense(qc->scsicmd); + } + + return ret; +} +EXPORT_SYMBOL_GPL(ata_eh_read_sense_success_ncq_log); + +/** * ata_eh_analyze_ncq_error - analyze NCQ error * @link: ATA link to analyze NCQ error for * @@ -1493,6 +1591,7 @@ void ata_eh_analyze_ncq_error(struct ata_link *link) ata_qc_for_each_raw(ap, qc, tag) { if (!(qc->flags & ATA_QCFLAG_EH) || + qc->flags & ATA_QCFLAG_EH_SUCCESS_CMD || ata_dev_phys_link(qc->dev) != link) continue; diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 4a5555cd8b6b..370d18aca71e 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -37,7 +37,7 @@ #include "libata.h" #include "libata-transport.h" -#define ATA_SCSI_RBUF_SIZE 576 +#define ATA_SCSI_RBUF_SIZE 2048 static DEFINE_SPINLOCK(ata_scsi_rbuf_lock); static u8 ata_scsi_rbuf[ATA_SCSI_RBUF_SIZE]; @@ -47,15 +47,19 @@ typedef unsigned int (*ata_xlat_func_t)(struct ata_queued_cmd *qc); static struct ata_device *__ata_scsi_find_dev(struct ata_port *ap, const struct scsi_device *scsidev); -#define RW_RECOVERY_MPAGE 0x1 -#define RW_RECOVERY_MPAGE_LEN 12 -#define CACHE_MPAGE 0x8 -#define CACHE_MPAGE_LEN 20 -#define CONTROL_MPAGE 0xa -#define CONTROL_MPAGE_LEN 12 -#define ALL_MPAGES 0x3f -#define ALL_SUB_MPAGES 0xff - +#define RW_RECOVERY_MPAGE 0x1 +#define RW_RECOVERY_MPAGE_LEN 12 +#define CACHE_MPAGE 0x8 +#define CACHE_MPAGE_LEN 20 +#define CONTROL_MPAGE 0xa +#define CONTROL_MPAGE_LEN 12 +#define ALL_MPAGES 0x3f +#define ALL_SUB_MPAGES 0xff +#define CDL_T2A_SUB_MPAGE 0x07 +#define CDL_T2B_SUB_MPAGE 0x08 +#define CDL_T2_SUB_MPAGE_LEN 232 +#define ATA_FEATURE_SUB_MPAGE 0xf2 +#define ATA_FEATURE_SUB_MPAGE_LEN 16 static const u8 def_rw_recovery_mpage[RW_RECOVERY_MPAGE_LEN] = { RW_RECOVERY_MPAGE, @@ -209,9 +213,6 @@ void ata_scsi_set_sense(struct ata_device *dev, struct scsi_cmnd *cmd, { bool d_sense = (dev->flags & ATA_DFLAG_D_SENSE); - if (!cmd) - return; - scsi_build_sense(cmd, d_sense, sk, asc, ascq); } @@ -221,9 +222,6 @@ void ata_scsi_set_sense_information(struct ata_device *dev, { u64 information; - if (!cmd) - return; - information = ata_tf_read_block(tf, dev); if (information == U64_MAX) return; @@ -1383,6 +1381,18 @@ static inline void scsi_16_lba_len(const u8 *cdb, u64 *plba, u32 *plen) } /** + * scsi_dld - Get duration limit descriptor index + * @cdb: SCSI command to translate + * + * Returns the dld bits indicating the index of a command duration limit + * descriptor. + */ +static inline int scsi_dld(const u8 *cdb) +{ + return ((cdb[1] & 0x01) << 2) | ((cdb[14] >> 6) & 0x03); +} + +/** * ata_scsi_verify_xlat - Translate SCSI VERIFY command into an ATA one * @qc: Storage for translated ATA taskfile * @@ -1550,6 +1560,7 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc) struct request *rq = scsi_cmd_to_rq(scmd); int class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq)); unsigned int tf_flags = 0; + int dld = 0; u64 block; u32 n_block; int rc; @@ -1600,6 +1611,7 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc) goto invalid_fld; } scsi_16_lba_len(cdb, &block, &n_block); + dld = scsi_dld(cdb); if (cdb[1] & (1 << 3)) tf_flags |= ATA_TFLAG_FUA; if (!ata_check_nblocks(scmd, n_block)) @@ -1624,7 +1636,7 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc) qc->flags |= ATA_QCFLAG_IO; qc->nbytes = n_block * scmd->device->sector_size; - rc = ata_build_rw_tf(qc, block, n_block, tf_flags, class); + rc = ata_build_rw_tf(qc, block, n_block, tf_flags, dld, class); if (likely(rc == 0)) return 0; @@ -2203,10 +2215,123 @@ static unsigned int ata_msense_caching(u16 *id, u8 *buf, bool changeable) return sizeof(def_cache_mpage); } +/* + * Simulate MODE SENSE control mode page, sub-page 0. + */ +static unsigned int ata_msense_control_spg0(struct ata_device *dev, u8 *buf, + bool changeable) +{ + modecpy(buf, def_control_mpage, + sizeof(def_control_mpage), changeable); + if (changeable) { + /* ata_mselect_control() */ + buf[2] |= (1 << 2); + } else { + bool d_sense = (dev->flags & ATA_DFLAG_D_SENSE); + + /* descriptor format sense data */ + buf[2] |= (d_sense << 2); + } + + return sizeof(def_control_mpage); +} + +/* + * Translate an ATA duration limit in microseconds to a SCSI duration limit + * using the t2cdlunits 0xa (10ms). Since the SCSI duration limits are 2-bytes + * only, take care of overflows. + */ +static inline u16 ata_xlat_cdl_limit(u8 *buf) +{ + u32 limit = get_unaligned_le32(buf); + + return min_t(u32, limit / 10000, 65535); +} + +/* + * Simulate MODE SENSE control mode page, sub-pages 07h and 08h + * (command duration limits T2A and T2B mode pages). + */ +static unsigned int ata_msense_control_spgt2(struct ata_device *dev, u8 *buf, + u8 spg) +{ + u8 *b, *cdl = dev->cdl, *desc; + u32 policy; + int i; + + /* + * Fill the subpage. The first four bytes of the T2A/T2B mode pages + * are a header. The PAGE LENGTH field is the size of the page + * excluding the header. + */ + buf[0] = CONTROL_MPAGE; + buf[1] = spg; + put_unaligned_be16(CDL_T2_SUB_MPAGE_LEN - 4, &buf[2]); + if (spg == CDL_T2A_SUB_MPAGE) { + /* + * Read descriptors map to the T2A page: + * set perf_vs_duration_guidleine. + */ + buf[7] = (cdl[0] & 0x03) << 4; + desc = cdl + 64; + } else { + /* Write descriptors map to the T2B page */ + desc = cdl + 288; + } + + /* Fill the T2 page descriptors */ + b = &buf[8]; + policy = get_unaligned_le32(&cdl[0]); + for (i = 0; i < 7; i++, b += 32, desc += 32) { + /* t2cdlunits: fixed to 10ms */ + b[0] = 0x0a; + + /* Max inactive time and its policy */ + put_unaligned_be16(ata_xlat_cdl_limit(&desc[8]), &b[2]); + b[6] = ((policy >> 8) & 0x0f) << 4; + + /* Max active time and its policy */ + put_unaligned_be16(ata_xlat_cdl_limit(&desc[4]), &b[4]); + b[6] |= (policy >> 4) & 0x0f; + + /* Command duration guideline and its policy */ + put_unaligned_be16(ata_xlat_cdl_limit(&desc[16]), &b[10]); + b[14] = policy & 0x0f; + } + + return CDL_T2_SUB_MPAGE_LEN; +} + +/* + * Simulate MODE SENSE control mode page, sub-page f2h + * (ATA feature control mode page). + */ +static unsigned int ata_msense_control_ata_feature(struct ata_device *dev, + u8 *buf) +{ + /* PS=0, SPF=1 */ + buf[0] = CONTROL_MPAGE | (1 << 6); + buf[1] = ATA_FEATURE_SUB_MPAGE; + + /* + * The first four bytes of ATA Feature Control mode page are a header. + * The PAGE LENGTH field is the size of the page excluding the header. + */ + put_unaligned_be16(ATA_FEATURE_SUB_MPAGE_LEN - 4, &buf[2]); + + if (dev->flags & ATA_DFLAG_CDL) + buf[4] = 0x02; /* Support T2A and T2B pages */ + else + buf[4] = 0; + + return ATA_FEATURE_SUB_MPAGE_LEN; +} + /** * ata_msense_control - Simulate MODE SENSE control mode page * @dev: ATA device of interest * @buf: output buffer + * @spg: sub-page code * @changeable: whether changeable parameters are requested * * Generate a generic MODE SENSE control mode page. @@ -2215,17 +2340,27 @@ static unsigned int ata_msense_caching(u16 *id, u8 *buf, bool changeable) * None. */ static unsigned int ata_msense_control(struct ata_device *dev, u8 *buf, - bool changeable) + u8 spg, bool changeable) { - modecpy(buf, def_control_mpage, sizeof(def_control_mpage), changeable); - if (changeable) { - buf[2] |= (1 << 2); /* ata_mselect_control() */ - } else { - bool d_sense = (dev->flags & ATA_DFLAG_D_SENSE); - - buf[2] |= (d_sense << 2); /* descriptor format sense data */ + unsigned int n; + + switch (spg) { + case 0: + return ata_msense_control_spg0(dev, buf, changeable); + case CDL_T2A_SUB_MPAGE: + case CDL_T2B_SUB_MPAGE: + return ata_msense_control_spgt2(dev, buf, spg); + case ATA_FEATURE_SUB_MPAGE: + return ata_msense_control_ata_feature(dev, buf); + case ALL_SUB_MPAGES: + n = ata_msense_control_spg0(dev, buf, changeable); + n += ata_msense_control_spgt2(dev, buf + n, CDL_T2A_SUB_MPAGE); + n += ata_msense_control_spgt2(dev, buf + n, CDL_T2A_SUB_MPAGE); + n += ata_msense_control_ata_feature(dev, buf + n); + return n; + default: + return 0; } - return sizeof(def_control_mpage); } /** @@ -2298,13 +2433,25 @@ static unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf) pg = scsicmd[2] & 0x3f; spg = scsicmd[3]; + /* - * No mode subpages supported (yet) but asking for _all_ - * subpages may be valid + * Supported subpages: all subpages and sub-pages 07h, 08h and f2h of + * the control page. */ - if (spg && (spg != ALL_SUB_MPAGES)) { - fp = 3; - goto invalid_fld; + if (spg) { + switch (spg) { + case ALL_SUB_MPAGES: + break; + case CDL_T2A_SUB_MPAGE: + case CDL_T2B_SUB_MPAGE: + case ATA_FEATURE_SUB_MPAGE: + if (dev->flags & ATA_DFLAG_CDL && pg == CONTROL_MPAGE) + break; + fallthrough; + default: + fp = 3; + goto invalid_fld; + } } switch(pg) { @@ -2317,13 +2464,13 @@ static unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf) break; case CONTROL_MPAGE: - p += ata_msense_control(args->dev, p, page_control == 1); + p += ata_msense_control(args->dev, p, spg, page_control == 1); break; case ALL_MPAGES: p += ata_msense_rw_recovery(p, page_control == 1); p += ata_msense_caching(args->id, p, page_control == 1); - p += ata_msense_control(args->dev, p, page_control == 1); + p += ata_msense_control(args->dev, p, spg, page_control == 1); break; default: /* invalid page code */ @@ -2342,10 +2489,7 @@ static unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf) memcpy(rbuf + 4, sat_blk_desc, sizeof(sat_blk_desc)); } } else { - unsigned int output_len = p - rbuf - 2; - - rbuf[0] = output_len >> 8; - rbuf[1] = output_len; + put_unaligned_be16(p - rbuf - 2, &rbuf[0]); rbuf[3] |= dpofua; if (ebd) { rbuf[7] = sizeof(sat_blk_desc); @@ -3260,7 +3404,7 @@ static unsigned int ata_scsiop_maint_in(struct ata_scsi_args *args, u8 *rbuf) { struct ata_device *dev = args->dev; u8 *cdb = args->cmd->cmnd; - u8 supported = 0; + u8 supported = 0, cdlp = 0, rwcdlp = 0; unsigned int err = 0; if (cdb[2] != 1 && cdb[2] != 3) { @@ -3287,10 +3431,8 @@ static unsigned int ata_scsiop_maint_in(struct ata_scsi_args *args, u8 *rbuf) case MAINTENANCE_IN: case READ_6: case READ_10: - case READ_16: case WRITE_6: case WRITE_10: - case WRITE_16: case ATA_12: case ATA_16: case VERIFY: @@ -3300,6 +3442,28 @@ static unsigned int ata_scsiop_maint_in(struct ata_scsi_args *args, u8 *rbuf) case START_STOP: supported = 3; break; + case READ_16: + supported = 3; + if (dev->flags & ATA_DFLAG_CDL) { + /* + * CDL read descriptors map to the T2A page, that is, + * rwcdlp = 0x01 and cdlp = 0x01 + */ + rwcdlp = 0x01; + cdlp = 0x01 << 3; + } + break; + case WRITE_16: + supported = 3; + if (dev->flags & ATA_DFLAG_CDL) { + /* + * CDL write descriptors map to the T2B page, that is, + * rwcdlp = 0x01 and cdlp = 0x02 + */ + rwcdlp = 0x01; + cdlp = 0x02 << 3; + } + break; case ZBC_IN: case ZBC_OUT: if (ata_id_zoned_cap(dev->id) || @@ -3315,7 +3479,9 @@ static unsigned int ata_scsiop_maint_in(struct ata_scsi_args *args, u8 *rbuf) break; } out: - rbuf[1] = supported; /* supported */ + /* One command format */ + rbuf[0] = rwcdlp; + rbuf[1] = cdlp | supported; return err; } @@ -3605,20 +3771,11 @@ static int ata_mselect_caching(struct ata_queued_cmd *qc, return 0; } -/** - * ata_mselect_control - Simulate MODE SELECT for control page - * @qc: Storage for translated ATA taskfile - * @buf: input buffer - * @len: number of valid bytes in the input buffer - * @fp: out parameter for the failed field on error - * - * Prepare a taskfile to modify caching information for the device. - * - * LOCKING: - * None. +/* + * Simulate MODE SELECT control mode page, sub-page 0. */ -static int ata_mselect_control(struct ata_queued_cmd *qc, - const u8 *buf, int len, u16 *fp) +static int ata_mselect_control_spg0(struct ata_queued_cmd *qc, + const u8 *buf, int len, u16 *fp) { struct ata_device *dev = qc->dev; u8 mpage[CONTROL_MPAGE_LEN]; @@ -3640,7 +3797,7 @@ static int ata_mselect_control(struct ata_queued_cmd *qc, /* * Check that read-only bits are not modified. */ - ata_msense_control(dev, mpage, false); + ata_msense_control_spg0(dev, mpage, false); for (i = 0; i < CONTROL_MPAGE_LEN - 2; i++) { if (i == 0) continue; @@ -3656,6 +3813,84 @@ static int ata_mselect_control(struct ata_queued_cmd *qc, return 0; } +/* + * Translate MODE SELECT control mode page, sub-pages f2h (ATA feature mode + * page) into a SET FEATURES command. + */ +static unsigned int ata_mselect_control_ata_feature(struct ata_queued_cmd *qc, + const u8 *buf, int len, + u16 *fp) +{ + struct ata_device *dev = qc->dev; + struct ata_taskfile *tf = &qc->tf; + u8 cdl_action; + + /* + * The first four bytes of ATA Feature Control mode page are a header, + * so offsets in mpage are off by 4 compared to buf. Same for len. + */ + if (len != ATA_FEATURE_SUB_MPAGE_LEN - 4) { + *fp = min(len, ATA_FEATURE_SUB_MPAGE_LEN - 4); + return -EINVAL; + } + + /* Check cdl_ctrl */ + switch (buf[0] & 0x03) { + case 0: + /* Disable CDL */ + cdl_action = 0; + dev->flags &= ~ATA_DFLAG_CDL_ENABLED; + break; + case 0x02: + /* Enable CDL T2A/T2B: NCQ priority must be disabled */ + if (dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLED) { + ata_dev_err(dev, + "NCQ priority must be disabled to enable CDL\n"); + return -EINVAL; + } + cdl_action = 1; + dev->flags |= ATA_DFLAG_CDL_ENABLED; + break; + default: + *fp = 0; + return -EINVAL; + } + + tf->flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; + tf->protocol = ATA_PROT_NODATA; + tf->command = ATA_CMD_SET_FEATURES; + tf->feature = SETFEATURES_CDL; + tf->nsect = cdl_action; + + return 1; +} + +/** + * ata_mselect_control - Simulate MODE SELECT for control page + * @qc: Storage for translated ATA taskfile + * @spg: target sub-page of the control page + * @buf: input buffer + * @len: number of valid bytes in the input buffer + * @fp: out parameter for the failed field on error + * + * Prepare a taskfile to modify caching information for the device. + * + * LOCKING: + * None. + */ +static int ata_mselect_control(struct ata_queued_cmd *qc, u8 spg, + const u8 *buf, int len, u16 *fp) +{ + switch (spg) { + case 0: + return ata_mselect_control_spg0(qc, buf, len, fp); + case ATA_FEATURE_SUB_MPAGE: + return ata_mselect_control_ata_feature(qc, buf, len, fp); + default: + return -EINVAL; + } +} + /** * ata_scsi_mode_select_xlat - Simulate MODE SELECT 6, 10 commands * @qc: Storage for translated ATA taskfile @@ -3673,7 +3908,7 @@ static unsigned int ata_scsi_mode_select_xlat(struct ata_queued_cmd *qc) const u8 *cdb = scmd->cmnd; u8 pg, spg; unsigned six_byte, pg_len, hdr_len, bd_len; - int len; + int len, ret; u16 fp = (u16)-1; u8 bp = 0xff; u8 buffer[64]; @@ -3758,13 +3993,29 @@ static unsigned int ata_scsi_mode_select_xlat(struct ata_queued_cmd *qc) } /* - * No mode subpages supported (yet) but asking for _all_ - * subpages may be valid + * Supported subpages: all subpages and ATA feature sub-page f2h of + * the control page. */ - if (spg && (spg != ALL_SUB_MPAGES)) { - fp = (p[0] & 0x40) ? 1 : 0; - fp += hdr_len + bd_len; - goto invalid_param; + if (spg) { + switch (spg) { + case ALL_SUB_MPAGES: + /* All subpages is not supported for the control page */ + if (pg == CONTROL_MPAGE) { + fp = (p[0] & 0x40) ? 1 : 0; + fp += hdr_len + bd_len; + goto invalid_param; + } + break; + case ATA_FEATURE_SUB_MPAGE: + if (qc->dev->flags & ATA_DFLAG_CDL && + pg == CONTROL_MPAGE) + break; + fallthrough; + default: + fp = (p[0] & 0x40) ? 1 : 0; + fp += hdr_len + bd_len; + goto invalid_param; + } } if (pg_len > len) goto invalid_param_len; @@ -3777,14 +4028,16 @@ static unsigned int ata_scsi_mode_select_xlat(struct ata_queued_cmd *qc) } break; case CONTROL_MPAGE: - if (ata_mselect_control(qc, p, pg_len, &fp) < 0) { + ret = ata_mselect_control(qc, spg, p, pg_len, &fp); + if (ret < 0) { fp += hdr_len + bd_len; goto invalid_param; - } else { - goto skip; /* No ATA command to send */ } + if (!ret) + goto skip; /* No ATA command to send */ break; - default: /* invalid page code */ + default: + /* Invalid page code */ fp = bd_len + hdr_len; goto invalid_param; } diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h index 926d0d33cd29..cf993885d2b2 100644 --- a/drivers/ata/libata.h +++ b/drivers/ata/libata.h @@ -45,7 +45,7 @@ static inline void ata_force_cbl(struct ata_port *ap) { } extern u64 ata_tf_to_lba(const struct ata_taskfile *tf); extern u64 ata_tf_to_lba48(const struct ata_taskfile *tf); extern int ata_build_rw_tf(struct ata_queued_cmd *qc, u64 block, u32 n_block, - unsigned int tf_flags, int class); + unsigned int tf_flags, int dld, int class); extern u64 ata_tf_read_block(const struct ata_taskfile *tf, struct ata_device *dev); extern unsigned ata_exec_internal(struct ata_device *dev, diff --git a/drivers/md/dm.c b/drivers/md/dm.c index fe2d4750d9c7..c4cdab508287 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -3143,6 +3143,8 @@ struct dm_pr { bool fail_early; int ret; enum pr_type type; + struct pr_keys *read_keys; + struct pr_held_reservation *rsv; }; static int dm_call_pr(struct block_device *bdev, iterate_devices_callout_fn fn, @@ -3375,12 +3377,79 @@ out: return r; } +static int __dm_pr_read_keys(struct dm_target *ti, struct dm_dev *dev, + sector_t start, sector_t len, void *data) +{ + struct dm_pr *pr = data; + const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops; + + if (!ops || !ops->pr_read_keys) { + pr->ret = -EOPNOTSUPP; + return -1; + } + + pr->ret = ops->pr_read_keys(dev->bdev, pr->read_keys); + if (!pr->ret) + return -1; + + return 0; +} + +static int dm_pr_read_keys(struct block_device *bdev, struct pr_keys *keys) +{ + struct dm_pr pr = { + .read_keys = keys, + }; + int ret; + + ret = dm_call_pr(bdev, __dm_pr_read_keys, &pr); + if (ret) + return ret; + + return pr.ret; +} + +static int __dm_pr_read_reservation(struct dm_target *ti, struct dm_dev *dev, + sector_t start, sector_t len, void *data) +{ + struct dm_pr *pr = data; + const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops; + + if (!ops || !ops->pr_read_reservation) { + pr->ret = -EOPNOTSUPP; + return -1; + } + + pr->ret = ops->pr_read_reservation(dev->bdev, pr->rsv); + if (!pr->ret) + return -1; + + return 0; +} + +static int dm_pr_read_reservation(struct block_device *bdev, + struct pr_held_reservation *rsv) +{ + struct dm_pr pr = { + .rsv = rsv, + }; + int ret; + + ret = dm_call_pr(bdev, __dm_pr_read_reservation, &pr); + if (ret) + return ret; + + return pr.ret; +} + static const struct pr_ops dm_pr_ops = { .pr_register = dm_pr_register, .pr_reserve = dm_pr_reserve, .pr_release = dm_pr_release, .pr_preempt = dm_pr_preempt, .pr_clear = dm_pr_clear, + .pr_read_keys = dm_pr_read_keys, + .pr_read_reservation = dm_pr_read_reservation, }; static const struct block_device_operations dm_blk_dops = { diff --git a/drivers/message/fusion/Kconfig b/drivers/message/fusion/Kconfig index a3d0288fd0e2..88a6e506a942 100644 --- a/drivers/message/fusion/Kconfig +++ b/drivers/message/fusion/Kconfig @@ -2,7 +2,7 @@ menuconfig FUSION bool "Fusion MPT device support" - depends on PCI + depends on PCI && HAS_IOPORT help Say Y here to get to see options for Fusion Message Passing Technology (MPT) drivers. diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c index 4f0afce8428d..4bf669c55649 100644 --- a/drivers/message/fusion/mptbase.c +++ b/drivers/message/fusion/mptbase.c @@ -712,7 +712,7 @@ mpt_register(MPT_CALLBACK cbfunc, MPT_DRIVER_CLASS dclass, char *func_name) MptDriverClass[cb_idx] = dclass; MptEvHandlers[cb_idx] = NULL; last_drv_idx = cb_idx; - strlcpy(MptCallbacksName[cb_idx], func_name, + strscpy(MptCallbacksName[cb_idx], func_name, MPT_MAX_CALLBACKNAME_LEN+1); break; } @@ -7666,7 +7666,7 @@ mpt_display_event_info(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply) break; } if (ds) - strlcpy(evStr, ds, EVENT_DESCR_STR_SZ); + strscpy(evStr, ds, EVENT_DESCR_STR_SZ); devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c index 1decd09a08d8..dd028df4b283 100644 --- a/drivers/message/fusion/mptctl.c +++ b/drivers/message/fusion/mptctl.c @@ -2408,7 +2408,7 @@ mptctl_hp_hostinfo(MPT_ADAPTER *ioc, unsigned long arg, unsigned int data_size) if (mpt_config(ioc, &cfg) == 0) { ManufacturingPage0_t *pdata = (ManufacturingPage0_t *) pbuf; if (strlen(pdata->BoardTracerNumber) > 1) { - strlcpy(karg.serial_number, + strscpy(karg.serial_number, pdata->BoardTracerNumber, 24); } } diff --git a/drivers/nvme/host/Makefile b/drivers/nvme/host/Makefile index d3fc5063e4be..c7c3cf202d12 100644 --- a/drivers/nvme/host/Makefile +++ b/drivers/nvme/host/Makefile @@ -10,7 +10,7 @@ obj-$(CONFIG_NVME_FC) += nvme-fc.o obj-$(CONFIG_NVME_TCP) += nvme-tcp.o obj-$(CONFIG_NVME_APPLE) += nvme-apple.o -nvme-core-y += core.o ioctl.o sysfs.o +nvme-core-y += core.o ioctl.o sysfs.o pr.o nvme-core-$(CONFIG_NVME_VERBOSE_ERRORS) += constants.o nvme-core-$(CONFIG_TRACING) += trace.o nvme-core-$(CONFIG_NVME_MULTIPATH) += multipath.o diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index fdfcf2781c85..98bfb3d9c22a 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -279,7 +279,7 @@ static blk_status_t nvme_error_status(u16 status) case NVME_SC_INVALID_PI: return BLK_STS_PROTECTION; case NVME_SC_RESERVATION_CONFLICT: - return BLK_STS_NEXUS; + return BLK_STS_RESV_CONFLICT; case NVME_SC_HOST_PATH_ERROR: return BLK_STS_TRANSPORT; case NVME_SC_ZONE_TOO_MANY_ACTIVE: @@ -2105,153 +2105,6 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_ns_info *info) } } -static char nvme_pr_type(enum pr_type type) -{ - switch (type) { - case PR_WRITE_EXCLUSIVE: - return 1; - case PR_EXCLUSIVE_ACCESS: - return 2; - case PR_WRITE_EXCLUSIVE_REG_ONLY: - return 3; - case PR_EXCLUSIVE_ACCESS_REG_ONLY: - return 4; - case PR_WRITE_EXCLUSIVE_ALL_REGS: - return 5; - case PR_EXCLUSIVE_ACCESS_ALL_REGS: - return 6; - default: - return 0; - } -} - -static int nvme_send_ns_head_pr_command(struct block_device *bdev, - struct nvme_command *c, u8 data[16]) -{ - struct nvme_ns_head *head = bdev->bd_disk->private_data; - int srcu_idx = srcu_read_lock(&head->srcu); - struct nvme_ns *ns = nvme_find_path(head); - int ret = -EWOULDBLOCK; - - if (ns) { - c->common.nsid = cpu_to_le32(ns->head->ns_id); - ret = nvme_submit_sync_cmd(ns->queue, c, data, 16); - } - srcu_read_unlock(&head->srcu, srcu_idx); - return ret; -} - -static int nvme_send_ns_pr_command(struct nvme_ns *ns, struct nvme_command *c, - u8 data[16]) -{ - c->common.nsid = cpu_to_le32(ns->head->ns_id); - return nvme_submit_sync_cmd(ns->queue, c, data, 16); -} - -static int nvme_sc_to_pr_err(int nvme_sc) -{ - if (nvme_is_path_error(nvme_sc)) - return PR_STS_PATH_FAILED; - - switch (nvme_sc) { - case NVME_SC_SUCCESS: - return PR_STS_SUCCESS; - case NVME_SC_RESERVATION_CONFLICT: - return PR_STS_RESERVATION_CONFLICT; - case NVME_SC_ONCS_NOT_SUPPORTED: - return -EOPNOTSUPP; - case NVME_SC_BAD_ATTRIBUTES: - case NVME_SC_INVALID_OPCODE: - case NVME_SC_INVALID_FIELD: - case NVME_SC_INVALID_NS: - return -EINVAL; - default: - return PR_STS_IOERR; - } -} - -static int nvme_pr_command(struct block_device *bdev, u32 cdw10, - u64 key, u64 sa_key, u8 op) -{ - struct nvme_command c = { }; - u8 data[16] = { 0, }; - int ret; - - put_unaligned_le64(key, &data[0]); - put_unaligned_le64(sa_key, &data[8]); - - c.common.opcode = op; - c.common.cdw10 = cpu_to_le32(cdw10); - - if (IS_ENABLED(CONFIG_NVME_MULTIPATH) && - bdev->bd_disk->fops == &nvme_ns_head_ops) - ret = nvme_send_ns_head_pr_command(bdev, &c, data); - else - ret = nvme_send_ns_pr_command(bdev->bd_disk->private_data, &c, - data); - if (ret < 0) - return ret; - - return nvme_sc_to_pr_err(ret); -} - -static int nvme_pr_register(struct block_device *bdev, u64 old, - u64 new, unsigned flags) -{ - u32 cdw10; - - if (flags & ~PR_FL_IGNORE_KEY) - return -EOPNOTSUPP; - - cdw10 = old ? 2 : 0; - cdw10 |= (flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0; - cdw10 |= (1 << 30) | (1 << 31); /* PTPL=1 */ - return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_register); -} - -static int nvme_pr_reserve(struct block_device *bdev, u64 key, - enum pr_type type, unsigned flags) -{ - u32 cdw10; - - if (flags & ~PR_FL_IGNORE_KEY) - return -EOPNOTSUPP; - - cdw10 = nvme_pr_type(type) << 8; - cdw10 |= ((flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0); - return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_acquire); -} - -static int nvme_pr_preempt(struct block_device *bdev, u64 old, u64 new, - enum pr_type type, bool abort) -{ - u32 cdw10 = nvme_pr_type(type) << 8 | (abort ? 2 : 1); - - return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_acquire); -} - -static int nvme_pr_clear(struct block_device *bdev, u64 key) -{ - u32 cdw10 = 1 | (key ? 0 : 1 << 3); - - return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release); -} - -static int nvme_pr_release(struct block_device *bdev, u64 key, enum pr_type type) -{ - u32 cdw10 = nvme_pr_type(type) << 8 | (key ? 0 : 1 << 3); - - return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release); -} - -const struct pr_ops nvme_pr_ops = { - .pr_register = nvme_pr_register, - .pr_reserve = nvme_pr_reserve, - .pr_release = nvme_pr_release, - .pr_preempt = nvme_pr_preempt, - .pr_clear = nvme_pr_clear, -}; - #ifdef CONFIG_BLK_SED_OPAL static int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len, bool send) diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 9a98c14c552a..a69e1efb3299 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -19,6 +19,8 @@ #include <trace/events/block.h> +extern const struct pr_ops nvme_pr_ops; + extern unsigned int nvme_io_timeout; #define NVME_IO_TIMEOUT (nvme_io_timeout * HZ) diff --git a/drivers/nvme/host/pr.c b/drivers/nvme/host/pr.c new file mode 100644 index 000000000000..391b1465ebfd --- /dev/null +++ b/drivers/nvme/host/pr.c @@ -0,0 +1,315 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2015 Intel Corporation + * Keith Busch <kbusch@kernel.org> + */ +#include <linux/blkdev.h> +#include <linux/pr.h> +#include <asm/unaligned.h> + +#include "nvme.h" + +static enum nvme_pr_type nvme_pr_type_from_blk(enum pr_type type) +{ + switch (type) { + case PR_WRITE_EXCLUSIVE: + return NVME_PR_WRITE_EXCLUSIVE; + case PR_EXCLUSIVE_ACCESS: + return NVME_PR_EXCLUSIVE_ACCESS; + case PR_WRITE_EXCLUSIVE_REG_ONLY: + return NVME_PR_WRITE_EXCLUSIVE_REG_ONLY; + case PR_EXCLUSIVE_ACCESS_REG_ONLY: + return NVME_PR_EXCLUSIVE_ACCESS_REG_ONLY; + case PR_WRITE_EXCLUSIVE_ALL_REGS: + return NVME_PR_WRITE_EXCLUSIVE_ALL_REGS; + case PR_EXCLUSIVE_ACCESS_ALL_REGS: + return NVME_PR_EXCLUSIVE_ACCESS_ALL_REGS; + } + + return 0; +} + +static enum pr_type block_pr_type_from_nvme(enum nvme_pr_type type) +{ + switch (type) { + case NVME_PR_WRITE_EXCLUSIVE: + return PR_WRITE_EXCLUSIVE; + case NVME_PR_EXCLUSIVE_ACCESS: + return PR_EXCLUSIVE_ACCESS; + case NVME_PR_WRITE_EXCLUSIVE_REG_ONLY: + return PR_WRITE_EXCLUSIVE_REG_ONLY; + case NVME_PR_EXCLUSIVE_ACCESS_REG_ONLY: + return PR_EXCLUSIVE_ACCESS_REG_ONLY; + case NVME_PR_WRITE_EXCLUSIVE_ALL_REGS: + return PR_WRITE_EXCLUSIVE_ALL_REGS; + case NVME_PR_EXCLUSIVE_ACCESS_ALL_REGS: + return PR_EXCLUSIVE_ACCESS_ALL_REGS; + } + + return 0; +} + +static int nvme_send_ns_head_pr_command(struct block_device *bdev, + struct nvme_command *c, void *data, unsigned int data_len) +{ + struct nvme_ns_head *head = bdev->bd_disk->private_data; + int srcu_idx = srcu_read_lock(&head->srcu); + struct nvme_ns *ns = nvme_find_path(head); + int ret = -EWOULDBLOCK; + + if (ns) { + c->common.nsid = cpu_to_le32(ns->head->ns_id); + ret = nvme_submit_sync_cmd(ns->queue, c, data, data_len); + } + srcu_read_unlock(&head->srcu, srcu_idx); + return ret; +} + +static int nvme_send_ns_pr_command(struct nvme_ns *ns, struct nvme_command *c, + void *data, unsigned int data_len) +{ + c->common.nsid = cpu_to_le32(ns->head->ns_id); + return nvme_submit_sync_cmd(ns->queue, c, data, data_len); +} + +static int nvme_sc_to_pr_err(int nvme_sc) +{ + if (nvme_is_path_error(nvme_sc)) + return PR_STS_PATH_FAILED; + + switch (nvme_sc) { + case NVME_SC_SUCCESS: + return PR_STS_SUCCESS; + case NVME_SC_RESERVATION_CONFLICT: + return PR_STS_RESERVATION_CONFLICT; + case NVME_SC_ONCS_NOT_SUPPORTED: + return -EOPNOTSUPP; + case NVME_SC_BAD_ATTRIBUTES: + case NVME_SC_INVALID_OPCODE: + case NVME_SC_INVALID_FIELD: + case NVME_SC_INVALID_NS: + return -EINVAL; + default: + return PR_STS_IOERR; + } +} + +static int nvme_send_pr_command(struct block_device *bdev, + struct nvme_command *c, void *data, unsigned int data_len) +{ + if (IS_ENABLED(CONFIG_NVME_MULTIPATH) && + bdev->bd_disk->fops == &nvme_ns_head_ops) + return nvme_send_ns_head_pr_command(bdev, c, data, data_len); + + return nvme_send_ns_pr_command(bdev->bd_disk->private_data, c, data, + data_len); +} + +static int nvme_pr_command(struct block_device *bdev, u32 cdw10, + u64 key, u64 sa_key, u8 op) +{ + struct nvme_command c = { }; + u8 data[16] = { 0, }; + int ret; + + put_unaligned_le64(key, &data[0]); + put_unaligned_le64(sa_key, &data[8]); + + c.common.opcode = op; + c.common.cdw10 = cpu_to_le32(cdw10); + + ret = nvme_send_pr_command(bdev, &c, data, sizeof(data)); + if (ret < 0) + return ret; + + return nvme_sc_to_pr_err(ret); +} + +static int nvme_pr_register(struct block_device *bdev, u64 old, + u64 new, unsigned flags) +{ + u32 cdw10; + + if (flags & ~PR_FL_IGNORE_KEY) + return -EOPNOTSUPP; + + cdw10 = old ? 2 : 0; + cdw10 |= (flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0; + cdw10 |= (1 << 30) | (1 << 31); /* PTPL=1 */ + return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_register); +} + +static int nvme_pr_reserve(struct block_device *bdev, u64 key, + enum pr_type type, unsigned flags) +{ + u32 cdw10; + + if (flags & ~PR_FL_IGNORE_KEY) + return -EOPNOTSUPP; + + cdw10 = nvme_pr_type_from_blk(type) << 8; + cdw10 |= ((flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0); + return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_acquire); +} + +static int nvme_pr_preempt(struct block_device *bdev, u64 old, u64 new, + enum pr_type type, bool abort) +{ + u32 cdw10 = nvme_pr_type_from_blk(type) << 8 | (abort ? 2 : 1); + + return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_acquire); +} + +static int nvme_pr_clear(struct block_device *bdev, u64 key) +{ + u32 cdw10 = 1 | (key ? 0 : 1 << 3); + + return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release); +} + +static int nvme_pr_release(struct block_device *bdev, u64 key, enum pr_type type) +{ + u32 cdw10 = nvme_pr_type_from_blk(type) << 8 | (key ? 0 : 1 << 3); + + return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release); +} + +static int nvme_pr_resv_report(struct block_device *bdev, void *data, + u32 data_len, bool *eds) +{ + struct nvme_command c = { }; + int ret; + + c.common.opcode = nvme_cmd_resv_report; + c.common.cdw10 = cpu_to_le32(nvme_bytes_to_numd(data_len)); + c.common.cdw11 = cpu_to_le32(NVME_EXTENDED_DATA_STRUCT); + *eds = true; + +retry: + ret = nvme_send_pr_command(bdev, &c, data, data_len); + if (ret == NVME_SC_HOST_ID_INCONSIST && + c.common.cdw11 == cpu_to_le32(NVME_EXTENDED_DATA_STRUCT)) { + c.common.cdw11 = 0; + *eds = false; + goto retry; + } + + if (ret < 0) + return ret; + + return nvme_sc_to_pr_err(ret); +} + +static int nvme_pr_read_keys(struct block_device *bdev, + struct pr_keys *keys_info) +{ + u32 rse_len, num_keys = keys_info->num_keys; + struct nvme_reservation_status_ext *rse; + int ret, i; + bool eds; + + /* + * Assume we are using 128-bit host IDs and allocate a buffer large + * enough to get enough keys to fill the return keys buffer. + */ + rse_len = struct_size(rse, regctl_eds, num_keys); + rse = kzalloc(rse_len, GFP_KERNEL); + if (!rse) + return -ENOMEM; + + ret = nvme_pr_resv_report(bdev, rse, rse_len, &eds); + if (ret) + goto free_rse; + + keys_info->generation = le32_to_cpu(rse->gen); + keys_info->num_keys = get_unaligned_le16(&rse->regctl); + + num_keys = min(num_keys, keys_info->num_keys); + for (i = 0; i < num_keys; i++) { + if (eds) { + keys_info->keys[i] = + le64_to_cpu(rse->regctl_eds[i].rkey); + } else { + struct nvme_reservation_status *rs; + + rs = (struct nvme_reservation_status *)rse; + keys_info->keys[i] = le64_to_cpu(rs->regctl_ds[i].rkey); + } + } + +free_rse: + kfree(rse); + return ret; +} + +static int nvme_pr_read_reservation(struct block_device *bdev, + struct pr_held_reservation *resv) +{ + struct nvme_reservation_status_ext tmp_rse, *rse; + int ret, i, num_regs; + u32 rse_len; + bool eds; + +get_num_regs: + /* + * Get the number of registrations so we know how big to allocate + * the response buffer. + */ + ret = nvme_pr_resv_report(bdev, &tmp_rse, sizeof(tmp_rse), &eds); + if (ret) + return ret; + + num_regs = get_unaligned_le16(&tmp_rse.regctl); + if (!num_regs) { + resv->generation = le32_to_cpu(tmp_rse.gen); + return 0; + } + + rse_len = struct_size(rse, regctl_eds, num_regs); + rse = kzalloc(rse_len, GFP_KERNEL); + if (!rse) + return -ENOMEM; + + ret = nvme_pr_resv_report(bdev, rse, rse_len, &eds); + if (ret) + goto free_rse; + + if (num_regs != get_unaligned_le16(&rse->regctl)) { + kfree(rse); + goto get_num_regs; + } + + resv->generation = le32_to_cpu(rse->gen); + resv->type = block_pr_type_from_nvme(rse->rtype); + + for (i = 0; i < num_regs; i++) { + if (eds) { + if (rse->regctl_eds[i].rcsts) { + resv->key = le64_to_cpu(rse->regctl_eds[i].rkey); + break; + } + } else { + struct nvme_reservation_status *rs; + + rs = (struct nvme_reservation_status *)rse; + if (rs->regctl_ds[i].rcsts) { + resv->key = le64_to_cpu(rs->regctl_ds[i].rkey); + break; + } + } + } + +free_rse: + kfree(rse); + return ret; +} + +const struct pr_ops nvme_pr_ops = { + .pr_register = nvme_pr_register, + .pr_reserve = nvme_pr_reserve, + .pr_release = nvme_pr_release, + .pr_preempt = nvme_pr_preempt, + .pr_clear = nvme_pr_clear, + .pr_read_keys = nvme_pr_read_keys, + .pr_read_reservation = nvme_pr_read_reservation, +}; diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 45788955c4e6..edcbf77852c3 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -2737,7 +2737,12 @@ static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr) else if (status == 0) { switch (cqr->intrc) { case -EPERM: - error = BLK_STS_NEXUS; + /* + * DASD doesn't implement SCSI/NVMe reservations, but it + * implements a locking scheme similar to them. We + * return this error when we no longer have the lock. + */ + error = BLK_STS_RESV_CONFLICT; break; case -ENOLINK: error = BLK_STS_TRANSPORT; diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c index 36c34ced0cc1..f39c9ec2e781 100644 --- a/drivers/scsi/3w-xxxx.c +++ b/drivers/scsi/3w-xxxx.c @@ -2305,8 +2305,10 @@ static int tw_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id) TW_DISABLE_INTERRUPTS(tw_dev); /* Initialize the card */ - if (tw_reset_sequence(tw_dev)) + if (tw_reset_sequence(tw_dev)) { + retval = -EINVAL; goto out_release_mem_region; + } /* Set host specific parameters */ host->max_id = TW_MAX_UNITS; diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 0704809d9d99..4962ce989113 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -334,7 +334,7 @@ config SGIWD93_SCSI config BLK_DEV_3W_XXXX_RAID tristate "3ware 5/6/7/8xxx ATA-RAID support" - depends on PCI && SCSI + depends on PCI && HAS_IOPORT && SCSI help 3ware is the only hardware ATA-Raid product in Linux to date. This card is 2,4, or 8 channel master mode support only. @@ -381,7 +381,7 @@ config SCSI_3W_SAS config SCSI_ACARD tristate "ACARD SCSI support" - depends on PCI && SCSI + depends on PCI && HAS_IOPORT && SCSI help This driver supports the ACARD SCSI host adapter. Support Chip <ATP870 ATP876 ATP880 ATP885> @@ -462,7 +462,7 @@ config SCSI_MVUMI config SCSI_ADVANSYS tristate "AdvanSys SCSI support" depends on SCSI - depends on ISA || EISA || PCI + depends on (ISA || EISA || PCI) && HAS_IOPORT depends on ISA_DMA_API || !ISA help This is a driver for all SCSI host adapters manufactured by @@ -503,7 +503,7 @@ config SCSI_HPTIOP config SCSI_BUSLOGIC tristate "BusLogic SCSI support" - depends on PCI && SCSI + depends on SCSI && PCI && HAS_IOPORT help This is support for BusLogic MultiMaster and FlashPoint SCSI Host Adapters. Consult the SCSI-HOWTO, available from @@ -518,7 +518,7 @@ config SCSI_BUSLOGIC config SCSI_FLASHPOINT bool "FlashPoint support" - depends on SCSI_BUSLOGIC && PCI + depends on SCSI_BUSLOGIC && PCI && HAS_IOPORT help This option allows you to add FlashPoint support to the BusLogic SCSI driver. The FlashPoint SCCB Manager code is @@ -632,7 +632,7 @@ config SCSI_SNIC_DEBUG_FS config SCSI_DMX3191D tristate "DMX3191D SCSI support" - depends on PCI && SCSI + depends on PCI && HAS_IOPORT && SCSI select SCSI_SPI_ATTRS help This is support for Domex DMX3191D SCSI Host Adapters. @@ -646,7 +646,7 @@ config SCSI_FDOMAIN config SCSI_FDOMAIN_PCI tristate "Future Domain TMC-3260/AHA-2920A PCI SCSI support" - depends on PCI && SCSI + depends on PCI && HAS_IOPORT && SCSI select SCSI_FDOMAIN help This is support for Future Domain's PCI SCSI host adapters (TMC-3260) @@ -699,7 +699,7 @@ config SCSI_GENERIC_NCR5380 config SCSI_IPS tristate "IBM ServeRAID support" - depends on PCI && SCSI + depends on PCI && HAS_IOPORT && SCSI help This is support for the IBM ServeRAID hardware RAID controllers. See <http://www.developer.ibm.com/welcome/netfinity/serveraid.html> @@ -759,7 +759,7 @@ config SCSI_IBMVFC_TRACE config SCSI_INITIO tristate "Initio 9100U(W) support" - depends on PCI && SCSI + depends on PCI && HAS_IOPORT && SCSI help This is support for the Initio 91XXU(W) SCSI host adapter. Please read the SCSI-HOWTO, available from @@ -770,7 +770,7 @@ config SCSI_INITIO config SCSI_INIA100 tristate "Initio INI-A100U2W support" - depends on PCI && SCSI + depends on PCI && HAS_IOPORT && SCSI help This is support for the Initio INI-A100U2W SCSI host adapter. Please read the SCSI-HOWTO, available from @@ -782,6 +782,7 @@ config SCSI_INIA100 config SCSI_PPA tristate "IOMEGA parallel port (ppa - older drives)" depends on SCSI && PARPORT_PC + depends on HAS_IOPORT help This driver supports older versions of IOMEGA's parallel port ZIP drive (a 100 MB removable media device). @@ -1175,7 +1176,7 @@ config SCSI_SIM710 config SCSI_DC395x tristate "Tekram DC395(U/UW/F) and DC315(U) SCSI support" - depends on PCI && SCSI + depends on PCI && HAS_IOPORT && SCSI select SCSI_SPI_ATTRS help This driver supports PCI SCSI host adapters based on the ASIC @@ -1207,7 +1208,7 @@ config SCSI_AM53C974 config SCSI_NSP32 tristate "Workbit NinjaSCSI-32Bi/UDE support" - depends on PCI && SCSI && !64BIT + depends on PCI && SCSI && !64BIT && HAS_IOPORT help This is support for the Workbit NinjaSCSI-32Bi/UDE PCI/Cardbus SCSI host adapter. Please read the SCSI-HOWTO, available from diff --git a/drivers/scsi/aic7xxx/Kconfig.aic79xx b/drivers/scsi/aic7xxx/Kconfig.aic79xx index a47dbd500e9a..4bc53eec4c83 100644 --- a/drivers/scsi/aic7xxx/Kconfig.aic79xx +++ b/drivers/scsi/aic7xxx/Kconfig.aic79xx @@ -5,7 +5,7 @@ # config SCSI_AIC79XX tristate "Adaptec AIC79xx U320 support" - depends on PCI && SCSI + depends on PCI && HAS_IOPORT && SCSI select SCSI_SPI_ATTRS help This driver supports all of Adaptec's Ultra 320 PCI-X diff --git a/drivers/scsi/aic7xxx/Kconfig.aic7xxx b/drivers/scsi/aic7xxx/Kconfig.aic7xxx index 0cfd92ce750a..f0425145a5f4 100644 --- a/drivers/scsi/aic7xxx/Kconfig.aic7xxx +++ b/drivers/scsi/aic7xxx/Kconfig.aic7xxx @@ -5,7 +5,7 @@ # config SCSI_AIC7XXX tristate "Adaptec AIC7xxx Fast -> U160 support" - depends on (PCI || EISA) && SCSI + depends on (PCI || EISA) && HAS_IOPORT && SCSI select SCSI_SPI_ATTRS help This driver supports all of Adaptec's Fast through Ultra 160 PCI diff --git a/drivers/scsi/aic94xx/Kconfig b/drivers/scsi/aic94xx/Kconfig index 71931c371b1c..aaa8dadc6e1c 100644 --- a/drivers/scsi/aic94xx/Kconfig +++ b/drivers/scsi/aic94xx/Kconfig @@ -8,7 +8,7 @@ config SCSI_AIC94XX tristate "Adaptec AIC94xx SAS/SATA support" - depends on PCI + depends on PCI && HAS_IOPORT select SCSI_SAS_LIBSAS select FW_LOADER help diff --git a/drivers/scsi/bfa/bfa_fcbuild.c b/drivers/scsi/bfa/bfa_fcbuild.c index df18d9d2af53..773c84af784c 100644 --- a/drivers/scsi/bfa/bfa_fcbuild.c +++ b/drivers/scsi/bfa/bfa_fcbuild.c @@ -1134,7 +1134,7 @@ fc_rspnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id, memset(rspnid, 0, sizeof(struct fcgs_rspnid_req_s)); rspnid->dap = s_id; - strlcpy(rspnid->spn, name, sizeof(rspnid->spn)); + strscpy(rspnid->spn, name, sizeof(rspnid->spn)); rspnid->spn_len = (u8) strlen(rspnid->spn); return sizeof(struct fcgs_rspnid_req_s) + sizeof(struct ct_hdr_s); @@ -1155,7 +1155,7 @@ fc_rsnn_nn_build(struct fchs_s *fchs, void *pyld, u32 s_id, memset(rsnn_nn, 0, sizeof(struct fcgs_rsnn_nn_req_s)); rsnn_nn->node_name = node_name; - strlcpy(rsnn_nn->snn, name, sizeof(rsnn_nn->snn)); + strscpy(rsnn_nn->snn, name, sizeof(rsnn_nn->snn)); rsnn_nn->snn_len = (u8) strlen(rsnn_nn->snn); return sizeof(struct fcgs_rsnn_nn_req_s) + sizeof(struct ct_hdr_s); diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c index d2d396ca0e9a..5023c0ab4277 100644 --- a/drivers/scsi/bfa/bfa_fcs.c +++ b/drivers/scsi/bfa/bfa_fcs.c @@ -761,7 +761,7 @@ bfa_fcs_fabric_psymb_init(struct bfa_fcs_fabric_s *fabric) bfa_ioc_get_adapter_model(&fabric->fcs->bfa->ioc, model); /* Model name/number */ - strlcpy(port_cfg->sym_name.symname, model, + strscpy(port_cfg->sym_name.symname, model, BFA_SYMNAME_MAXLEN); strlcat(port_cfg->sym_name.symname, BFA_FCS_PORT_SYMBNAME_SEPARATOR, BFA_SYMNAME_MAXLEN); @@ -822,7 +822,7 @@ bfa_fcs_fabric_nsymb_init(struct bfa_fcs_fabric_s *fabric) bfa_ioc_get_adapter_model(&fabric->fcs->bfa->ioc, model); /* Model name/number */ - strlcpy(port_cfg->node_sym_name.symname, model, + strscpy(port_cfg->node_sym_name.symname, model, BFA_SYMNAME_MAXLEN); strlcat(port_cfg->node_sym_name.symname, BFA_FCS_PORT_SYMBNAME_SEPARATOR, diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c index b12afcc4b189..008afd817087 100644 --- a/drivers/scsi/bfa/bfa_fcs_lport.c +++ b/drivers/scsi/bfa/bfa_fcs_lport.c @@ -2642,10 +2642,10 @@ bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_lport_fdmi_s *fdmi, bfa_ioc_get_adapter_fw_ver(&port->fcs->bfa->ioc, hba_attr->fw_version); - strlcpy(hba_attr->driver_version, (char *)driver_info->version, + strscpy(hba_attr->driver_version, (char *)driver_info->version, sizeof(hba_attr->driver_version)); - strlcpy(hba_attr->os_name, driver_info->host_os_name, + strscpy(hba_attr->os_name, driver_info->host_os_name, sizeof(hba_attr->os_name)); /* @@ -2663,13 +2663,13 @@ bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_lport_fdmi_s *fdmi, bfa_fcs_fdmi_get_portattr(fdmi, &fcs_port_attr); hba_attr->max_ct_pyld = fcs_port_attr.max_frm_size; - strlcpy(hba_attr->node_sym_name.symname, + strscpy(hba_attr->node_sym_name.symname, port->port_cfg.node_sym_name.symname, BFA_SYMNAME_MAXLEN); strcpy(hba_attr->vendor_info, "QLogic"); hba_attr->num_ports = cpu_to_be32(bfa_ioc_get_nports(&port->fcs->bfa->ioc)); hba_attr->fabric_name = port->fabric->lps->pr_nwwn; - strlcpy(hba_attr->bios_ver, hba_attr->option_rom_ver, BFA_VERSION_LEN); + strscpy(hba_attr->bios_ver, hba_attr->option_rom_ver, BFA_VERSION_LEN); } @@ -2736,19 +2736,19 @@ bfa_fcs_fdmi_get_portattr(struct bfa_fcs_lport_fdmi_s *fdmi, /* * OS device Name */ - strlcpy(port_attr->os_device_name, driver_info->os_device_name, + strscpy(port_attr->os_device_name, driver_info->os_device_name, sizeof(port_attr->os_device_name)); /* * Host name */ - strlcpy(port_attr->host_name, driver_info->host_machine_name, + strscpy(port_attr->host_name, driver_info->host_machine_name, sizeof(port_attr->host_name)); port_attr->node_name = bfa_fcs_lport_get_nwwn(port); port_attr->port_name = bfa_fcs_lport_get_pwwn(port); - strlcpy(port_attr->port_sym_name.symname, + strscpy(port_attr->port_sym_name.symname, bfa_fcs_lport_get_psym_name(port).symname, BFA_SYMNAME_MAXLEN); bfa_fcs_lport_get_attr(port, &lport_attr); port_attr->port_type = cpu_to_be32(lport_attr.port_type); @@ -3229,7 +3229,7 @@ bfa_fcs_lport_ms_gmal_response(void *fcsarg, struct bfa_fcxp_s *fcxp, rsp_str[gmal_entry->len-1] = 0; /* copy IP Address to fabric */ - strlcpy(bfa_fcs_lport_get_fabric_ipaddr(port), + strscpy(bfa_fcs_lport_get_fabric_ipaddr(port), gmal_entry->ip_addr, BFA_FCS_FABRIC_IPADDR_SZ); break; @@ -4667,7 +4667,7 @@ bfa_fcs_lport_ns_send_rspn_id(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced) * to that of the base port. */ - strlcpy(symbl, + strscpy(symbl, (char *)&(bfa_fcs_lport_get_psym_name (bfa_fcs_get_base_port(port->fcs))), sizeof(symbl)); @@ -5194,7 +5194,7 @@ bfa_fcs_lport_ns_util_send_rspn_id(void *cbarg, struct bfa_fcxp_s *fcxp_alloced) * For Vports, we append the vport's port symbolic name * to that of the base port. */ - strlcpy(symbl, (char *)&(bfa_fcs_lport_get_psym_name + strscpy(symbl, (char *)&(bfa_fcs_lport_get_psym_name (bfa_fcs_get_base_port(port->fcs))), sizeof(symbl)); diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c index 5740302d83ac..e1ed1424fddb 100644 --- a/drivers/scsi/bfa/bfa_ioc.c +++ b/drivers/scsi/bfa/bfa_ioc.c @@ -2788,7 +2788,7 @@ void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc, char *manufacturer) { memset((void *)manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN); - strlcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN); + strscpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN); } void diff --git a/drivers/scsi/bfa/bfa_svc.c b/drivers/scsi/bfa/bfa_svc.c index 4e3cef02f10f..c9745c0b4eee 100644 --- a/drivers/scsi/bfa/bfa_svc.c +++ b/drivers/scsi/bfa/bfa_svc.c @@ -330,7 +330,7 @@ bfa_plog_str(struct bfa_plog_s *plog, enum bfa_plog_mid mid, lp.eid = event; lp.log_type = BFA_PL_LOG_TYPE_STRING; lp.misc = misc; - strlcpy(lp.log_entry.string_log, log_str, + strscpy(lp.log_entry.string_log, log_str, BFA_PL_STRING_LOG_SZ); lp.log_entry.string_log[BFA_PL_STRING_LOG_SZ - 1] = '\0'; bfa_plog_add(plog, &lp); diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c index 529b73a83d69..62cb7a864fd5 100644 --- a/drivers/scsi/bfa/bfad.c +++ b/drivers/scsi/bfa/bfad.c @@ -965,19 +965,19 @@ bfad_start_ops(struct bfad_s *bfad) { /* Fill the driver_info info to fcs*/ memset(&driver_info, 0, sizeof(driver_info)); - strlcpy(driver_info.version, BFAD_DRIVER_VERSION, + strscpy(driver_info.version, BFAD_DRIVER_VERSION, sizeof(driver_info.version)); if (host_name) - strlcpy(driver_info.host_machine_name, host_name, + strscpy(driver_info.host_machine_name, host_name, sizeof(driver_info.host_machine_name)); if (os_name) - strlcpy(driver_info.host_os_name, os_name, + strscpy(driver_info.host_os_name, os_name, sizeof(driver_info.host_os_name)); if (os_patch) - strlcpy(driver_info.host_os_patch, os_patch, + strscpy(driver_info.host_os_patch, os_patch, sizeof(driver_info.host_os_patch)); - strlcpy(driver_info.os_device_name, bfad->pci_name, + strscpy(driver_info.os_device_name, bfad->pci_name, sizeof(driver_info.os_device_name)); /* FCS driver info init */ diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c index 5a85401e9e2d..e96e4b6df265 100644 --- a/drivers/scsi/bfa/bfad_attr.c +++ b/drivers/scsi/bfa/bfad_attr.c @@ -834,7 +834,7 @@ bfad_im_symbolic_name_show(struct device *dev, struct device_attribute *attr, char symname[BFA_SYMNAME_MAXLEN]; bfa_fcs_lport_get_attr(&bfad->bfa_fcs.fabric.bport, &port_attr); - strlcpy(symname, port_attr.port_cfg.sym_name.symname, + strscpy(symname, port_attr.port_cfg.sym_name.symname, BFA_SYMNAME_MAXLEN); return sysfs_emit(buf, "%s\n", symname); } diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c index 79d4f7ee5bcb..520f9152f3bf 100644 --- a/drivers/scsi/bfa/bfad_bsg.c +++ b/drivers/scsi/bfa/bfad_bsg.c @@ -119,7 +119,7 @@ bfad_iocmd_ioc_get_attr(struct bfad_s *bfad, void *cmd) /* fill in driver attr info */ strcpy(iocmd->ioc_attr.driver_attr.driver, BFAD_DRIVER_NAME); - strlcpy(iocmd->ioc_attr.driver_attr.driver_ver, + strscpy(iocmd->ioc_attr.driver_attr.driver_ver, BFAD_DRIVER_VERSION, BFA_VERSION_LEN); strcpy(iocmd->ioc_attr.driver_attr.fw_ver, iocmd->ioc_attr.adapter_attr.fw_ver); @@ -307,7 +307,7 @@ bfad_iocmd_port_get_attr(struct bfad_s *bfad, void *cmd) iocmd->attr.port_type = port_attr.port_type; iocmd->attr.loopback = port_attr.loopback; iocmd->attr.authfail = port_attr.authfail; - strlcpy(iocmd->attr.port_symname.symname, + strscpy(iocmd->attr.port_symname.symname, port_attr.port_cfg.sym_name.symname, sizeof(iocmd->attr.port_symname.symname)); diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c index c335f7a188d2..a9d3d8562d3c 100644 --- a/drivers/scsi/bfa/bfad_im.c +++ b/drivers/scsi/bfa/bfad_im.c @@ -1046,7 +1046,7 @@ bfad_fc_host_init(struct bfad_im_port_s *im_port) /* For fibre channel services type 0x20 */ fc_host_supported_fc4s(host)[7] = 1; - strlcpy(symname, bfad->bfa_fcs.fabric.bport.port_cfg.sym_name.symname, + strscpy(symname, bfad->bfa_fcs.fabric.bport.port_cfg.sym_name.symname, BFA_SYMNAME_MAXLEN); sprintf(fc_host_symbolic_name(host), "%s", symname); diff --git a/drivers/scsi/fcoe/fcoe_transport.c b/drivers/scsi/fcoe/fcoe_transport.c index 46b0bf237be1..a48d24af9ac3 100644 --- a/drivers/scsi/fcoe/fcoe_transport.c +++ b/drivers/scsi/fcoe/fcoe_transport.c @@ -711,7 +711,7 @@ static struct net_device *fcoe_if_to_netdev(const char *buffer) char ifname[IFNAMSIZ + 2]; if (buffer) { - strlcpy(ifname, buffer, IFNAMSIZ); + strscpy(ifname, buffer, IFNAMSIZ); cp = ifname + strlen(ifname); while (--cp >= ifname && *cp == '\n') *cp = '\0'; diff --git a/drivers/scsi/fnic/fnic_debugfs.c b/drivers/scsi/fnic/fnic_debugfs.c index 6fedc3b7d1ab..c4d9ed0d7d75 100644 --- a/drivers/scsi/fnic/fnic_debugfs.c +++ b/drivers/scsi/fnic/fnic_debugfs.c @@ -201,25 +201,21 @@ static int fnic_trace_debugfs_open(struct inode *inode, return -ENOMEM; if (*rdata_ptr == fc_trc_flag->fnic_trace) { - fnic_dbg_prt->buffer = vmalloc(array3_size(3, trace_max_pages, + fnic_dbg_prt->buffer = vzalloc(array3_size(3, trace_max_pages, PAGE_SIZE)); if (!fnic_dbg_prt->buffer) { kfree(fnic_dbg_prt); return -ENOMEM; } - memset((void *)fnic_dbg_prt->buffer, 0, - 3 * (trace_max_pages * PAGE_SIZE)); fnic_dbg_prt->buffer_len = fnic_get_trace_data(fnic_dbg_prt); } else { fnic_dbg_prt->buffer = - vmalloc(array3_size(3, fnic_fc_trace_max_pages, + vzalloc(array3_size(3, fnic_fc_trace_max_pages, PAGE_SIZE)); if (!fnic_dbg_prt->buffer) { kfree(fnic_dbg_prt); return -ENOMEM; } - memset((void *)fnic_dbg_prt->buffer, 0, - 3 * (fnic_fc_trace_max_pages * PAGE_SIZE)); fnic_dbg_prt->buffer_len = fnic_fc_trace_get_data(fnic_dbg_prt, *rdata_ptr); } diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h index fb7c52c119df..9e73e9cbbcfc 100644 --- a/drivers/scsi/hisi_sas/hisi_sas.h +++ b/drivers/scsi/hisi_sas/hisi_sas.h @@ -642,7 +642,7 @@ extern void hisi_sas_sata_done(struct sas_task *task, extern int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba); extern int hisi_sas_probe(struct platform_device *pdev, const struct hisi_sas_hw *ops); -extern int hisi_sas_remove(struct platform_device *pdev); +extern void hisi_sas_remove(struct platform_device *pdev); extern int hisi_sas_slave_configure(struct scsi_device *sdev); extern int hisi_sas_slave_alloc(struct scsi_device *sdev); diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c index 412431c901a7..8f22ece957bd 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_main.c +++ b/drivers/scsi/hisi_sas/hisi_sas_main.c @@ -2560,7 +2560,7 @@ err_out_ha: } EXPORT_SYMBOL_GPL(hisi_sas_probe); -int hisi_sas_remove(struct platform_device *pdev) +void hisi_sas_remove(struct platform_device *pdev) { struct sas_ha_struct *sha = platform_get_drvdata(pdev); struct hisi_hba *hisi_hba = sha->lldd_ha; @@ -2573,7 +2573,6 @@ int hisi_sas_remove(struct platform_device *pdev) hisi_sas_free(hisi_hba); scsi_host_put(shost); - return 0; } EXPORT_SYMBOL_GPL(hisi_sas_remove); diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c index 0aa8c9c88535..94fbbceddc2e 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c @@ -1790,11 +1790,6 @@ static int hisi_sas_v1_probe(struct platform_device *pdev) return hisi_sas_probe(pdev, &hisi_sas_v1_hw); } -static int hisi_sas_v1_remove(struct platform_device *pdev) -{ - return hisi_sas_remove(pdev); -} - static const struct of_device_id sas_v1_of_match[] = { { .compatible = "hisilicon,hip05-sas-v1",}, {}, @@ -1810,7 +1805,7 @@ MODULE_DEVICE_TABLE(acpi, sas_v1_acpi_match); static struct platform_driver hisi_sas_v1_driver = { .probe = hisi_sas_v1_probe, - .remove = hisi_sas_v1_remove, + .remove_new = hisi_sas_remove, .driver = { .name = DRV_NAME, .of_match_table = sas_v1_of_match, diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c index cd78e4c983aa..87d8e408ccd1 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c @@ -3619,11 +3619,6 @@ static int hisi_sas_v2_probe(struct platform_device *pdev) return hisi_sas_probe(pdev, &hisi_sas_v2_hw); } -static int hisi_sas_v2_remove(struct platform_device *pdev) -{ - return hisi_sas_remove(pdev); -} - static const struct of_device_id sas_v2_of_match[] = { { .compatible = "hisilicon,hip06-sas-v2",}, { .compatible = "hisilicon,hip07-sas-v2",}, @@ -3640,7 +3635,7 @@ MODULE_DEVICE_TABLE(acpi, sas_v2_acpi_match); static struct platform_driver hisi_sas_v2_driver = { .probe = hisi_sas_v2_probe, - .remove = hisi_sas_v2_remove, + .remove_new = hisi_sas_remove, .driver = { .name = DRV_NAME, .of_match_table = sas_v2_of_match, diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c index 12d588454f5d..20e1607c6282 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c @@ -30,6 +30,7 @@ #define SATA_INITI_D2H_STORE_ADDR_LO 0x60 #define SATA_INITI_D2H_STORE_ADDR_HI 0x64 #define CFG_MAX_TAG 0x68 +#define TRANS_LOCK_ICT_TIME 0X70 #define HGC_SAS_TX_OPEN_FAIL_RETRY_CTRL 0x84 #define HGC_SAS_TXFAIL_RETRY_CTRL 0x88 #define HGC_GET_ITV_TIME 0x90 @@ -627,13 +628,15 @@ static void interrupt_enable_v3_hw(struct hisi_hba *hisi_hba) static void init_reg_v3_hw(struct hisi_hba *hisi_hba) { + struct pci_dev *pdev = hisi_hba->pci_dev; int i, j; /* Global registers init */ hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, (u32)((1ULL << hisi_hba->queue_count) - 1)); - hisi_sas_write32(hisi_hba, SAS_AXI_USER3, 0); hisi_sas_write32(hisi_hba, CFG_MAX_TAG, 0xfff0400); + /* time / CLK_AHB = 2.5s / 2ns = 0x4A817C80 */ + hisi_sas_write32(hisi_hba, TRANS_LOCK_ICT_TIME, 0x4A817C80); hisi_sas_write32(hisi_hba, HGC_SAS_TXFAIL_RETRY_CTRL, 0x108); hisi_sas_write32(hisi_hba, CFG_AGING_TIME, 0x1); hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x1); @@ -652,6 +655,9 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba) hisi_sas_write32(hisi_hba, ARQOS_ARCACHE_CFG, 0xf0f0); hisi_sas_write32(hisi_hba, HYPER_STREAM_ID_EN_CFG, 1); + if (pdev->revision < 0x30) + hisi_sas_write32(hisi_hba, SAS_AXI_USER3, 0); + interrupt_enable_v3_hw(hisi_hba); for (i = 0; i < hisi_hba->n_phy; i++) { enum sas_linkrate max; @@ -669,7 +675,6 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba) prog_phy_link_rate |= hisi_sas_get_prog_phy_linkrate_mask(max); hisi_sas_phy_write32(hisi_hba, i, PROG_PHY_LINK_RATE, prog_phy_link_rate); - hisi_sas_phy_write32(hisi_hba, i, SERDES_CFG, 0xffc00); hisi_sas_phy_write32(hisi_hba, i, SAS_RX_TRAIN_TIMER, 0x13e80); hisi_sas_phy_write32(hisi_hba, i, CHL_INT0, 0xffffffff); hisi_sas_phy_write32(hisi_hba, i, CHL_INT1, 0xffffffff); @@ -680,13 +685,18 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba) hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_OOB_RESTART_MSK, 0x1); hisi_sas_phy_write32(hisi_hba, i, STP_LINK_TIMER, 0x7f7a120); hisi_sas_phy_write32(hisi_hba, i, CON_CFG_DRIVER, 0x2a0a01); - hisi_sas_phy_write32(hisi_hba, i, SAS_SSP_CON_TIMER_CFG, 0x32); hisi_sas_phy_write32(hisi_hba, i, SAS_EC_INT_COAL_TIME, 0x30f4240); - /* used for 12G negotiate */ - hisi_sas_phy_write32(hisi_hba, i, COARSETUNE_TIME, 0x1e); hisi_sas_phy_write32(hisi_hba, i, AIP_LIMIT, 0x2ffff); + /* set value through firmware for 920B and later version */ + if (pdev->revision < 0x30) { + hisi_sas_phy_write32(hisi_hba, i, SAS_SSP_CON_TIMER_CFG, 0x32); + hisi_sas_phy_write32(hisi_hba, i, SERDES_CFG, 0xffc00); + /* used for 12G negotiate */ + hisi_sas_phy_write32(hisi_hba, i, COARSETUNE_TIME, 0x1e); + } + /* get default FFE configuration for BIST */ for (j = 0; j < FFE_CFG_MAX; j++) { u32 val = hisi_sas_phy_read32(hisi_hba, i, @@ -2206,6 +2216,7 @@ slot_err_v3_hw(struct hisi_hba *hisi_hba, struct sas_task *task, u32 trans_tx_fail_type = le32_to_cpu(record->trans_tx_fail_type); u16 sipc_rx_err_type = le16_to_cpu(record->sipc_rx_err_type); u32 dw3 = le32_to_cpu(complete_hdr->dw3); + u32 dw0 = le32_to_cpu(complete_hdr->dw0); switch (task->task_proto) { case SAS_PROTOCOL_SSP: @@ -2215,8 +2226,8 @@ slot_err_v3_hw(struct hisi_hba *hisi_hba, struct sas_task *task, * but I/O information has been written to the host memory, we examine * response IU. */ - if (!(complete_hdr->dw0 & CMPLT_HDR_RSPNS_GOOD_MSK) && - (complete_hdr->dw0 & CMPLT_HDR_RSPNS_XFRD_MSK)) + if (!(dw0 & CMPLT_HDR_RSPNS_GOOD_MSK) && + (dw0 & CMPLT_HDR_RSPNS_XFRD_MSK)) return false; ts->residual = trans_tx_fail_type; @@ -2232,7 +2243,7 @@ slot_err_v3_hw(struct hisi_hba *hisi_hba, struct sas_task *task, case SAS_PROTOCOL_SATA: case SAS_PROTOCOL_STP: case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: - if ((complete_hdr->dw0 & CMPLT_HDR_RSPNS_XFRD_MSK) && + if ((dw0 & CMPLT_HDR_RSPNS_XFRD_MSK) && (sipc_rx_err_type & RX_FIS_STATUS_ERR_MSK)) { ts->stat = SAS_PROTO_RESPONSE; } else if (dma_rx_err_type & RX_DATA_LEN_UNDERFLOW_MSK) { @@ -2999,6 +3010,7 @@ static const struct hisi_sas_debugfs_reg_lu debugfs_global_reg_lu[] = { HISI_SAS_DEBUGFS_REG(SATA_INITI_D2H_STORE_ADDR_LO), HISI_SAS_DEBUGFS_REG(SATA_INITI_D2H_STORE_ADDR_HI), HISI_SAS_DEBUGFS_REG(CFG_MAX_TAG), + HISI_SAS_DEBUGFS_REG(TRANS_LOCK_ICT_TIME), HISI_SAS_DEBUGFS_REG(HGC_SAS_TX_OPEN_FAIL_RETRY_CTRL), HISI_SAS_DEBUGFS_REG(HGC_SAS_TXFAIL_RETRY_CTRL), HISI_SAS_DEBUGFS_REG(HGC_GET_ITV_TIME), diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c index f0bc8bbb3938..198edf03f929 100644 --- a/drivers/scsi/hosts.c +++ b/drivers/scsi/hosts.c @@ -441,6 +441,7 @@ struct Scsi_Host *scsi_host_alloc(const struct scsi_host_template *sht, int priv shost->cmd_per_lun = sht->cmd_per_lun; shost->no_write_same = sht->no_write_same; shost->host_tagset = sht->host_tagset; + shost->queuecommand_may_block = sht->queuecommand_may_block; if (shost_eh_deadline == -1 || !sht->eh_host_reset_handler) shost->eh_deadline = -1; diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c index dc670304f181..adcac57aaee6 100644 --- a/drivers/scsi/libsas/sas_expander.c +++ b/drivers/scsi/libsas/sas_expander.c @@ -1198,37 +1198,37 @@ static void sas_print_parent_topology_bug(struct domain_device *child, sas_route_char(child, child_phy)); } +static bool sas_eeds_valid(struct domain_device *parent, + struct domain_device *child) +{ + struct sas_discovery *disc = &parent->port->disc; + + return (SAS_ADDR(disc->eeds_a) == SAS_ADDR(parent->sas_addr) || + SAS_ADDR(disc->eeds_a) == SAS_ADDR(child->sas_addr)) && + (SAS_ADDR(disc->eeds_b) == SAS_ADDR(parent->sas_addr) || + SAS_ADDR(disc->eeds_b) == SAS_ADDR(child->sas_addr)); +} + static int sas_check_eeds(struct domain_device *child, - struct ex_phy *parent_phy, - struct ex_phy *child_phy) + struct ex_phy *parent_phy, + struct ex_phy *child_phy) { int res = 0; struct domain_device *parent = child->parent; + struct sas_discovery *disc = &parent->port->disc; - if (SAS_ADDR(parent->port->disc.fanout_sas_addr) != 0) { + if (SAS_ADDR(disc->fanout_sas_addr) != 0) { res = -ENODEV; pr_warn("edge ex %016llx phy S:%02d <--> edge ex %016llx phy S:%02d, while there is a fanout ex %016llx\n", SAS_ADDR(parent->sas_addr), parent_phy->phy_id, SAS_ADDR(child->sas_addr), child_phy->phy_id, - SAS_ADDR(parent->port->disc.fanout_sas_addr)); - } else if (SAS_ADDR(parent->port->disc.eeds_a) == 0) { - memcpy(parent->port->disc.eeds_a, parent->sas_addr, - SAS_ADDR_SIZE); - memcpy(parent->port->disc.eeds_b, child->sas_addr, - SAS_ADDR_SIZE); - } else if (((SAS_ADDR(parent->port->disc.eeds_a) == - SAS_ADDR(parent->sas_addr)) || - (SAS_ADDR(parent->port->disc.eeds_a) == - SAS_ADDR(child->sas_addr))) - && - ((SAS_ADDR(parent->port->disc.eeds_b) == - SAS_ADDR(parent->sas_addr)) || - (SAS_ADDR(parent->port->disc.eeds_b) == - SAS_ADDR(child->sas_addr)))) - ; - else { + SAS_ADDR(disc->fanout_sas_addr)); + } else if (SAS_ADDR(disc->eeds_a) == 0) { + memcpy(disc->eeds_a, parent->sas_addr, SAS_ADDR_SIZE); + memcpy(disc->eeds_b, child->sas_addr, SAS_ADDR_SIZE); + } else if (!sas_eeds_valid(parent, child)) { res = -ENODEV; pr_warn("edge ex %016llx phy%02d <--> edge ex %016llx phy%02d link forms a third EEDS!\n", SAS_ADDR(parent->sas_addr), @@ -1240,11 +1240,56 @@ static int sas_check_eeds(struct domain_device *child, return res; } -/* Here we spill over 80 columns. It is intentional. - */ -static int sas_check_parent_topology(struct domain_device *child) +static int sas_check_edge_expander_topo(struct domain_device *child, + struct ex_phy *parent_phy) +{ + struct expander_device *child_ex = &child->ex_dev; + struct expander_device *parent_ex = &child->parent->ex_dev; + struct ex_phy *child_phy; + + child_phy = &child_ex->ex_phy[parent_phy->attached_phy_id]; + + if (child->dev_type == SAS_FANOUT_EXPANDER_DEVICE) { + if (parent_phy->routing_attr != SUBTRACTIVE_ROUTING || + child_phy->routing_attr != TABLE_ROUTING) + goto error; + } else if (parent_phy->routing_attr == SUBTRACTIVE_ROUTING) { + if (child_phy->routing_attr == SUBTRACTIVE_ROUTING) + return sas_check_eeds(child, parent_phy, child_phy); + else if (child_phy->routing_attr != TABLE_ROUTING) + goto error; + } else if (parent_phy->routing_attr == TABLE_ROUTING) { + if (child_phy->routing_attr != SUBTRACTIVE_ROUTING && + (child_phy->routing_attr != TABLE_ROUTING || + !child_ex->t2t_supp || !parent_ex->t2t_supp)) + goto error; + } + + return 0; +error: + sas_print_parent_topology_bug(child, parent_phy, child_phy); + return -ENODEV; +} + +static int sas_check_fanout_expander_topo(struct domain_device *child, + struct ex_phy *parent_phy) { struct expander_device *child_ex = &child->ex_dev; + struct ex_phy *child_phy; + + child_phy = &child_ex->ex_phy[parent_phy->attached_phy_id]; + + if (parent_phy->routing_attr == TABLE_ROUTING && + child_phy->routing_attr == SUBTRACTIVE_ROUTING) + return 0; + + sas_print_parent_topology_bug(child, parent_phy, child_phy); + + return -ENODEV; +} + +static int sas_check_parent_topology(struct domain_device *child) +{ struct expander_device *parent_ex; int i; int res = 0; @@ -1259,7 +1304,6 @@ static int sas_check_parent_topology(struct domain_device *child) for (i = 0; i < parent_ex->num_phys; i++) { struct ex_phy *parent_phy = &parent_ex->ex_phy[i]; - struct ex_phy *child_phy; if (parent_phy->phy_state == PHY_VACANT || parent_phy->phy_state == PHY_NOT_PRESENT) @@ -1268,40 +1312,14 @@ static int sas_check_parent_topology(struct domain_device *child) if (!sas_phy_match_dev_addr(child, parent_phy)) continue; - child_phy = &child_ex->ex_phy[parent_phy->attached_phy_id]; - switch (child->parent->dev_type) { case SAS_EDGE_EXPANDER_DEVICE: - if (child->dev_type == SAS_FANOUT_EXPANDER_DEVICE) { - if (parent_phy->routing_attr != SUBTRACTIVE_ROUTING || - child_phy->routing_attr != TABLE_ROUTING) { - sas_print_parent_topology_bug(child, parent_phy, child_phy); - res = -ENODEV; - } - } else if (parent_phy->routing_attr == SUBTRACTIVE_ROUTING) { - if (child_phy->routing_attr == SUBTRACTIVE_ROUTING) { - res = sas_check_eeds(child, parent_phy, child_phy); - } else if (child_phy->routing_attr != TABLE_ROUTING) { - sas_print_parent_topology_bug(child, parent_phy, child_phy); - res = -ENODEV; - } - } else if (parent_phy->routing_attr == TABLE_ROUTING) { - if (child_phy->routing_attr == SUBTRACTIVE_ROUTING || - (child_phy->routing_attr == TABLE_ROUTING && - child_ex->t2t_supp && parent_ex->t2t_supp)) { - /* All good */; - } else { - sas_print_parent_topology_bug(child, parent_phy, child_phy); - res = -ENODEV; - } - } + if (sas_check_edge_expander_topo(child, parent_phy)) + res = -ENODEV; break; case SAS_FANOUT_EXPANDER_DEVICE: - if (parent_phy->routing_attr != TABLE_ROUTING || - child_phy->routing_attr != SUBTRACTIVE_ROUTING) { - sas_print_parent_topology_bug(child, parent_phy, child_phy); + if (sas_check_fanout_expander_topo(child, parent_phy)) res = -ENODEV; - } break; default: break; diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index 5e3a93d13a91..9a8963684369 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -429,6 +429,15 @@ struct lpfc_cgn_param { /* Max number of days of congestion data */ #define LPFC_MAX_CGN_DAYS 10 +struct lpfc_cgn_ts { + uint8_t month; + uint8_t day; + uint8_t year; + uint8_t hour; + uint8_t minute; + uint8_t second; +}; + /* Format of congestion buffer info * This structure defines memory thats allocated and registered with * the HBA firmware. When adding or removing fields from this structure @@ -442,6 +451,7 @@ struct lpfc_cgn_info { #define LPFC_CGN_INFO_V1 1 #define LPFC_CGN_INFO_V2 2 #define LPFC_CGN_INFO_V3 3 +#define LPFC_CGN_INFO_V4 4 uint8_t cgn_info_mode; /* 0=off 1=managed 2=monitor only */ uint8_t cgn_info_detect; uint8_t cgn_info_action; @@ -450,12 +460,7 @@ struct lpfc_cgn_info { uint8_t cgn_info_level2; /* Start Time */ - uint8_t cgn_info_month; - uint8_t cgn_info_day; - uint8_t cgn_info_year; - uint8_t cgn_info_hour; - uint8_t cgn_info_minute; - uint8_t cgn_info_second; + struct lpfc_cgn_ts base_time; /* minute / hours / daily indices */ uint8_t cgn_index_minute; @@ -496,45 +501,17 @@ struct lpfc_cgn_info { uint8_t cgn_stat_npm; /* Notifications per minute */ /* Start Time */ - uint8_t cgn_stat_month; - uint8_t cgn_stat_day; - uint8_t cgn_stat_year; - uint8_t cgn_stat_hour; - uint8_t cgn_stat_minute; - uint8_t cgn_pad2[2]; + struct lpfc_cgn_ts stat_start; /* Base time */ + uint8_t cgn_pad2; __le32 cgn_notification; __le32 cgn_peer_notification; __le32 link_integ_notification; __le32 delivery_notification; - - uint8_t cgn_stat_cgn_month; /* Last congestion notification FPIN */ - uint8_t cgn_stat_cgn_day; - uint8_t cgn_stat_cgn_year; - uint8_t cgn_stat_cgn_hour; - uint8_t cgn_stat_cgn_min; - uint8_t cgn_stat_cgn_sec; - - uint8_t cgn_stat_peer_month; /* Last peer congestion FPIN */ - uint8_t cgn_stat_peer_day; - uint8_t cgn_stat_peer_year; - uint8_t cgn_stat_peer_hour; - uint8_t cgn_stat_peer_min; - uint8_t cgn_stat_peer_sec; - - uint8_t cgn_stat_lnk_month; /* Last link integrity FPIN */ - uint8_t cgn_stat_lnk_day; - uint8_t cgn_stat_lnk_year; - uint8_t cgn_stat_lnk_hour; - uint8_t cgn_stat_lnk_min; - uint8_t cgn_stat_lnk_sec; - - uint8_t cgn_stat_del_month; /* Last delivery notification FPIN */ - uint8_t cgn_stat_del_day; - uint8_t cgn_stat_del_year; - uint8_t cgn_stat_del_hour; - uint8_t cgn_stat_del_min; - uint8_t cgn_stat_del_sec; + struct lpfc_cgn_ts stat_fpin; /* Last congestion notification FPIN */ + struct lpfc_cgn_ts stat_peer; /* Last peer congestion FPIN */ + struct lpfc_cgn_ts stat_lnk; /* Last link integrity FPIN */ + struct lpfc_cgn_ts stat_delivery; /* Last delivery notification FPIN */ ); __le32 cgn_info_crc; @@ -932,8 +909,6 @@ struct lpfc_hba { void (*__lpfc_sli_release_iocbq)(struct lpfc_hba *, struct lpfc_iocbq *); int (*lpfc_hba_down_post)(struct lpfc_hba *phba); - void (*lpfc_scsi_cmd_iocb_cmpl) - (struct lpfc_hba *, struct lpfc_iocbq *, struct lpfc_iocbq *); /* MBOX interface function jump table entries */ int (*lpfc_sli_issue_mbox) @@ -1045,8 +1020,6 @@ struct lpfc_hba { * capability */ #define HBA_FLOGI_ISSUED 0x100000 /* FLOGI was issued */ -#define HBA_SHORT_CMF 0x200000 /* shorter CMF timer routine */ -#define HBA_CGN_DAY_WRAP 0x400000 /* HBA Congestion info day wraps */ #define HBA_DEFER_FLOGI 0x800000 /* Defer FLOGI till read_sparm cmpl */ #define HBA_SETUP 0x1000000 /* Signifies HBA setup is completed */ #define HBA_NEEDS_CFG_PORT 0x2000000 /* SLI3 - needs a CONFIG_PORT mbox */ @@ -1529,6 +1502,7 @@ struct lpfc_hba { uint64_t cmf_last_sync_bw; #define LPFC_CMF_BLK_SIZE 512 struct hrtimer cmf_timer; + struct hrtimer cmf_stats_timer; /* 1 minute stats timer */ atomic_t cmf_bw_wait; atomic_t cmf_busy; atomic_t cmf_stop_io; /* To block request and stop IO's */ @@ -1576,12 +1550,11 @@ struct lpfc_hba { atomic_t cgn_sync_alarm_cnt; /* Total alarm events for SYNC wqe */ atomic_t cgn_driver_evt_cnt; /* Total driver cgn events for fmw */ atomic_t cgn_latency_evt_cnt; - struct timespec64 cgn_daily_ts; atomic64_t cgn_latency_evt; /* Avg latency per minute */ unsigned long cgn_evt_timestamp; #define LPFC_CGN_TIMER_TO_MIN 60000 /* ms in a minute */ uint32_t cgn_evt_minute; -#define LPFC_SEC_MIN 60 +#define LPFC_SEC_MIN 60UL #define LPFC_MIN_HOUR 60 #define LPFC_HOUR_DAY 24 #define LPFC_MIN_DAY (LPFC_MIN_HOUR * LPFC_HOUR_DAY) diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 3863a5341782..21c7ecd3ede5 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -5858,8 +5858,8 @@ int lpfc_fabric_cgn_frequency = 100; /* 100 ms default */ module_param(lpfc_fabric_cgn_frequency, int, 0444); MODULE_PARM_DESC(lpfc_fabric_cgn_frequency, "Congestion signaling fabric freq"); -int lpfc_acqe_cgn_frequency = 10; /* 10 sec default */ -module_param(lpfc_acqe_cgn_frequency, int, 0444); +unsigned char lpfc_acqe_cgn_frequency = 10; /* 10 sec default */ +module_param(lpfc_acqe_cgn_frequency, byte, 0444); MODULE_PARM_DESC(lpfc_acqe_cgn_frequency, "Congestion signaling ACQE freq"); int lpfc_use_cgn_signal = 1; /* 0 - only use FPINs, 1 - Use signals if avail */ diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index b833b983e69d..d4e46a08f94d 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h @@ -134,7 +134,6 @@ void lpfc_check_nlp_post_devloss(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp); void lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_iocbq *rspiocb); -int lpfc_nlp_not_used(struct lpfc_nodelist *ndlp); struct lpfc_nodelist *lpfc_setup_disc_node(struct lpfc_vport *, uint32_t); void lpfc_disc_list_loopmap(struct lpfc_vport *); void lpfc_disc_start(struct lpfc_vport *); @@ -248,6 +247,7 @@ irqreturn_t lpfc_sli_sp_intr_handler(int, void *); irqreturn_t lpfc_sli_fp_intr_handler(int, void *); irqreturn_t lpfc_sli4_intr_handler(int, void *); irqreturn_t lpfc_sli4_hba_intr_handler(int, void *); +irqreturn_t lpfc_sli4_hba_intr_handler_th(int irq, void *dev_id); int lpfc_read_object(struct lpfc_hba *phba, char *s, uint32_t *datap, uint32_t len); @@ -664,7 +664,7 @@ extern int lpfc_enable_nvmet_cnt; extern unsigned long long lpfc_enable_nvmet[]; extern int lpfc_no_hba_reset_cnt; extern unsigned long lpfc_no_hba_reset[]; -extern int lpfc_acqe_cgn_frequency; +extern unsigned char lpfc_acqe_cgn_frequency; extern int lpfc_fabric_cgn_frequency; extern int lpfc_use_cgn_signal; diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index f3bdcebe67f5..474834f313a7 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c @@ -287,7 +287,7 @@ lpfc_ct_handle_mibreq(struct lpfc_hba *phba, struct lpfc_iocbq *ctiocbq) u32 ulp_status = get_job_ulpstatus(phba, ctiocbq); u32 ulp_word4 = get_job_word4(phba, ctiocbq); u32 did; - u32 mi_cmd; + u16 mi_cmd; did = bf_get(els_rsp64_sid, &ctiocbq->wqe.xmit_els_rsp); if (ulp_status) { @@ -311,7 +311,7 @@ lpfc_ct_handle_mibreq(struct lpfc_hba *phba, struct lpfc_iocbq *ctiocbq) ct_req = (struct lpfc_sli_ct_request *)ctiocbq->cmd_dmabuf->virt; - mi_cmd = ct_req->CommandResponse.bits.CmdRsp; + mi_cmd = be16_to_cpu(ct_req->CommandResponse.bits.CmdRsp); lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "6442 : MI Cmd : x%x Not Supported\n", mi_cmd); lpfc_ct_reject_event(ndlp, ct_req, @@ -486,7 +486,7 @@ lpfc_free_ct_rsp(struct lpfc_hba *phba, struct lpfc_dmabuf *mlist) } static struct lpfc_dmabuf * -lpfc_alloc_ct_rsp(struct lpfc_hba *phba, int cmdcode, struct ulp_bde64 *bpl, +lpfc_alloc_ct_rsp(struct lpfc_hba *phba, __be16 cmdcode, struct ulp_bde64 *bpl, uint32_t size, int *entries) { struct lpfc_dmabuf *mlist = NULL; @@ -507,8 +507,8 @@ lpfc_alloc_ct_rsp(struct lpfc_hba *phba, int cmdcode, struct ulp_bde64 *bpl, INIT_LIST_HEAD(&mp->list); - if (cmdcode == be16_to_cpu(SLI_CTNS_GID_FT) || - cmdcode == be16_to_cpu(SLI_CTNS_GFF_ID)) + if (be16_to_cpu(cmdcode) == SLI_CTNS_GID_FT || + be16_to_cpu(cmdcode) == SLI_CTNS_GFF_ID) mp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(mp->phys)); else mp->virt = lpfc_mbuf_alloc(phba, 0, &(mp->phys)); @@ -671,7 +671,7 @@ lpfc_ct_cmd(struct lpfc_vport *vport, struct lpfc_dmabuf *inmp, struct ulp_bde64 *bpl = (struct ulp_bde64 *) bmp->virt; struct lpfc_dmabuf *outmp; int cnt = 0, status; - int cmdcode = ((struct lpfc_sli_ct_request *) inmp->virt)-> + __be16 cmdcode = ((struct lpfc_sli_ct_request *)inmp->virt)-> CommandResponse.bits.CmdRsp; bpl++; /* Skip past ct request */ @@ -1043,8 +1043,8 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, outp, CTreq->un.gid.Fc4Type, get_job_data_placed(phba, rspiocb)); - } else if (CTrsp->CommandResponse.bits.CmdRsp == - be16_to_cpu(SLI_CT_RESPONSE_FS_RJT)) { + } else if (be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp) == + SLI_CT_RESPONSE_FS_RJT) { /* NameServer Rsp Error */ if ((CTrsp->ReasonCode == SLI_CT_UNABLE_TO_PERFORM_REQ) && (CTrsp->Explanation == SLI_CT_NO_FC4_TYPES)) { @@ -1052,14 +1052,14 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, LOG_DISCOVERY, "0269 No NameServer Entries " "Data: x%x x%x x%x x%x\n", - CTrsp->CommandResponse.bits.CmdRsp, + be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp), (uint32_t) CTrsp->ReasonCode, (uint32_t) CTrsp->Explanation, vport->fc_flag); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, "GID_FT no entry cmd:x%x rsn:x%x exp:x%x", - (uint32_t)CTrsp->CommandResponse.bits.CmdRsp, + be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp), (uint32_t) CTrsp->ReasonCode, (uint32_t) CTrsp->Explanation); } else { @@ -1067,14 +1067,14 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, LOG_DISCOVERY, "0240 NameServer Rsp Error " "Data: x%x x%x x%x x%x\n", - CTrsp->CommandResponse.bits.CmdRsp, + be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp), (uint32_t) CTrsp->ReasonCode, (uint32_t) CTrsp->Explanation, vport->fc_flag); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, "GID_FT rsp err1 cmd:x%x rsn:x%x exp:x%x", - (uint32_t)CTrsp->CommandResponse.bits.CmdRsp, + be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp), (uint32_t) CTrsp->ReasonCode, (uint32_t) CTrsp->Explanation); } @@ -1085,14 +1085,14 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0241 NameServer Rsp Error " "Data: x%x x%x x%x x%x\n", - CTrsp->CommandResponse.bits.CmdRsp, + be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp), (uint32_t) CTrsp->ReasonCode, (uint32_t) CTrsp->Explanation, vport->fc_flag); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, "GID_FT rsp err2 cmd:x%x rsn:x%x exp:x%x", - (uint32_t)CTrsp->CommandResponse.bits.CmdRsp, + be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp), (uint32_t) CTrsp->ReasonCode, (uint32_t) CTrsp->Explanation); } @@ -1247,8 +1247,8 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* Good status, continue checking */ CTreq = (struct lpfc_sli_ct_request *)inp->virt; CTrsp = (struct lpfc_sli_ct_request *)outp->virt; - if (CTrsp->CommandResponse.bits.CmdRsp == - cpu_to_be16(SLI_CT_RESPONSE_FS_ACC)) { + if (be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp) == + SLI_CT_RESPONSE_FS_ACC) { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "4105 NameServer Rsp Data: x%x x%x " "x%x x%x sz x%x\n", @@ -1262,8 +1262,8 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, outp, CTreq->un.gid.Fc4Type, get_job_data_placed(phba, rspiocb)); - } else if (CTrsp->CommandResponse.bits.CmdRsp == - be16_to_cpu(SLI_CT_RESPONSE_FS_RJT)) { + } else if (be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp) == + SLI_CT_RESPONSE_FS_RJT) { /* NameServer Rsp Error */ if ((CTrsp->ReasonCode == SLI_CT_UNABLE_TO_PERFORM_REQ) && (CTrsp->Explanation == SLI_CT_NO_FC4_TYPES)) { @@ -1271,7 +1271,7 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, vport, KERN_INFO, LOG_DISCOVERY, "4106 No NameServer Entries " "Data: x%x x%x x%x x%x\n", - CTrsp->CommandResponse.bits.CmdRsp, + be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp), (uint32_t)CTrsp->ReasonCode, (uint32_t)CTrsp->Explanation, vport->fc_flag); @@ -1279,7 +1279,7 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, lpfc_debugfs_disc_trc( vport, LPFC_DISC_TRC_CT, "GID_PT no entry cmd:x%x rsn:x%x exp:x%x", - (uint32_t)CTrsp->CommandResponse.bits.CmdRsp, + be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp), (uint32_t)CTrsp->ReasonCode, (uint32_t)CTrsp->Explanation); } else { @@ -1287,7 +1287,7 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, vport, KERN_INFO, LOG_DISCOVERY, "4107 NameServer Rsp Error " "Data: x%x x%x x%x x%x\n", - CTrsp->CommandResponse.bits.CmdRsp, + be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp), (uint32_t)CTrsp->ReasonCode, (uint32_t)CTrsp->Explanation, vport->fc_flag); @@ -1295,7 +1295,7 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, lpfc_debugfs_disc_trc( vport, LPFC_DISC_TRC_CT, "GID_PT rsp err1 cmd:x%x rsn:x%x exp:x%x", - (uint32_t)CTrsp->CommandResponse.bits.CmdRsp, + be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp), (uint32_t)CTrsp->ReasonCode, (uint32_t)CTrsp->Explanation); } @@ -1304,7 +1304,7 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "4109 NameServer Rsp Error " "Data: x%x x%x x%x x%x\n", - CTrsp->CommandResponse.bits.CmdRsp, + be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp), (uint32_t)CTrsp->ReasonCode, (uint32_t)CTrsp->Explanation, vport->fc_flag); @@ -1312,7 +1312,7 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, lpfc_debugfs_disc_trc( vport, LPFC_DISC_TRC_CT, "GID_PT rsp err2 cmd:x%x rsn:x%x exp:x%x", - (uint32_t)CTrsp->CommandResponse.bits.CmdRsp, + be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp), (uint32_t)CTrsp->ReasonCode, (uint32_t)CTrsp->Explanation); } @@ -1391,8 +1391,8 @@ lpfc_cmpl_ct_cmd_gff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, (fbits & FC4_FEATURE_INIT) ? "Initiator" : " ", (fbits & FC4_FEATURE_TARGET) ? "Target" : " "); - if (CTrsp->CommandResponse.bits.CmdRsp == - be16_to_cpu(SLI_CT_RESPONSE_FS_ACC)) { + if (be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp) == + SLI_CT_RESPONSE_FS_ACC) { if ((fbits & FC4_FEATURE_INIT) && !(fbits & FC4_FEATURE_TARGET)) { lpfc_printf_vlog(vport, KERN_INFO, @@ -1631,7 +1631,7 @@ lpfc_cmpl_ct(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, "0209 CT Request completes, latt %d, " "ulp_status x%x CmdRsp x%x, Context x%x, Tag x%x\n", latt, ulp_status, - CTrsp->CommandResponse.bits.CmdRsp, + be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp), get_job_ulpcontext(phba, cmdiocb), cmdiocb->iotag); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, @@ -1681,8 +1681,8 @@ lpfc_cmpl_ct_cmd_rft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, outp = cmdiocb->rsp_dmabuf; CTrsp = (struct lpfc_sli_ct_request *)outp->virt; - if (CTrsp->CommandResponse.bits.CmdRsp == - be16_to_cpu(SLI_CT_RESPONSE_FS_ACC)) + if (be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp) == + SLI_CT_RESPONSE_FS_ACC) vport->ct_flags |= FC_CT_RFT_ID; } lpfc_cmpl_ct(phba, cmdiocb, rspiocb); @@ -1702,8 +1702,8 @@ lpfc_cmpl_ct_cmd_rnn_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, outp = cmdiocb->rsp_dmabuf; CTrsp = (struct lpfc_sli_ct_request *) outp->virt; - if (CTrsp->CommandResponse.bits.CmdRsp == - be16_to_cpu(SLI_CT_RESPONSE_FS_ACC)) + if (be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp) == + SLI_CT_RESPONSE_FS_ACC) vport->ct_flags |= FC_CT_RNN_ID; } lpfc_cmpl_ct(phba, cmdiocb, rspiocb); @@ -1723,8 +1723,8 @@ lpfc_cmpl_ct_cmd_rspn_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, outp = cmdiocb->rsp_dmabuf; CTrsp = (struct lpfc_sli_ct_request *)outp->virt; - if (CTrsp->CommandResponse.bits.CmdRsp == - be16_to_cpu(SLI_CT_RESPONSE_FS_ACC)) + if (be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp) == + SLI_CT_RESPONSE_FS_ACC) vport->ct_flags |= FC_CT_RSPN_ID; } lpfc_cmpl_ct(phba, cmdiocb, rspiocb); @@ -1744,8 +1744,8 @@ lpfc_cmpl_ct_cmd_rsnn_nn(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, outp = cmdiocb->rsp_dmabuf; CTrsp = (struct lpfc_sli_ct_request *) outp->virt; - if (CTrsp->CommandResponse.bits.CmdRsp == - be16_to_cpu(SLI_CT_RESPONSE_FS_ACC)) + if (be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp) == + SLI_CT_RESPONSE_FS_ACC) vport->ct_flags |= FC_CT_RSNN_NN; } lpfc_cmpl_ct(phba, cmdiocb, rspiocb); @@ -1777,8 +1777,8 @@ lpfc_cmpl_ct_cmd_rff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, outp = cmdiocb->rsp_dmabuf; CTrsp = (struct lpfc_sli_ct_request *)outp->virt; - if (CTrsp->CommandResponse.bits.CmdRsp == - be16_to_cpu(SLI_CT_RESPONSE_FS_ACC)) + if (be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp) == + SLI_CT_RESPONSE_FS_ACC) vport->ct_flags |= FC_CT_RFF_ID; } lpfc_cmpl_ct(phba, cmdiocb, rspiocb); @@ -2217,8 +2217,8 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_dmabuf *outp = cmdiocb->rsp_dmabuf; struct lpfc_sli_ct_request *CTcmd = inp->virt; struct lpfc_sli_ct_request *CTrsp = outp->virt; - uint16_t fdmi_cmd = CTcmd->CommandResponse.bits.CmdRsp; - uint16_t fdmi_rsp = CTrsp->CommandResponse.bits.CmdRsp; + __be16 fdmi_cmd = CTcmd->CommandResponse.bits.CmdRsp; + __be16 fdmi_rsp = CTrsp->CommandResponse.bits.CmdRsp; struct lpfc_nodelist *ndlp, *free_ndlp = NULL; uint32_t latt, cmd, err; u32 ulp_status = get_job_ulpstatus(phba, rspiocb); @@ -2278,7 +2278,7 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* Check for a CT LS_RJT response */ cmd = be16_to_cpu(fdmi_cmd); - if (fdmi_rsp == cpu_to_be16(SLI_CT_RESPONSE_FS_RJT)) { + if (be16_to_cpu(fdmi_rsp) == SLI_CT_RESPONSE_FS_RJT) { /* FDMI rsp failed */ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY | LOG_ELS, "0220 FDMI cmd failed FS_RJT Data: x%x", cmd); @@ -3110,7 +3110,7 @@ lpfc_fdmi_vendor_attr_mi(struct lpfc_vport *vport, void *attr) } /* RHBA attribute jump table */ -int (*lpfc_fdmi_hba_action[]) +static int (*lpfc_fdmi_hba_action[]) (struct lpfc_vport *vport, void *attrbuf) = { /* Action routine Mask bit Attribute type */ lpfc_fdmi_hba_attr_wwnn, /* bit0 RHBA_NODENAME */ @@ -3134,7 +3134,7 @@ int (*lpfc_fdmi_hba_action[]) }; /* RPA / RPRT attribute jump table */ -int (*lpfc_fdmi_port_action[]) +static int (*lpfc_fdmi_port_action[]) (struct lpfc_vport *vport, void *attrbuf) = { /* Action routine Mask bit Attribute type */ lpfc_fdmi_port_attr_fc4type, /* bit0 RPRT_SUPPORT_FC4_TYPES */ @@ -3570,7 +3570,7 @@ lpfc_cmpl_ct_cmd_vmid(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_dmabuf *outp = cmdiocb->rsp_dmabuf; struct lpfc_sli_ct_request *ctcmd = inp->virt; struct lpfc_sli_ct_request *ctrsp = outp->virt; - u16 rsp = ctrsp->CommandResponse.bits.CmdRsp; + __be16 rsp = ctrsp->CommandResponse.bits.CmdRsp; struct app_id_object *app; struct lpfc_nodelist *ndlp = cmdiocb->ndlp; u32 cmd, hash, bucket; @@ -3587,7 +3587,7 @@ lpfc_cmpl_ct_cmd_vmid(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, goto free_res; } /* Check for a CT LS_RJT response */ - if (rsp == be16_to_cpu(SLI_CT_RESPONSE_FS_RJT)) { + if (be16_to_cpu(rsp) == SLI_CT_RESPONSE_FS_RJT) { if (cmd != SLI_CTAS_DALLAPP_ID) lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY, "3306 VMID FS_RJT Data: x%x x%x x%x\n", @@ -3748,7 +3748,7 @@ lpfc_vmid_cmd(struct lpfc_vport *vport, rap->obj[0].entity_id_len = vmid->vmid_len; memcpy(rap->obj[0].entity_id, vmid->host_vmid, vmid->vmid_len); size = RAPP_IDENT_OFFSET + - sizeof(struct lpfc_vmid_rapp_ident_list); + struct_size(rap, obj, be32_to_cpu(rap->no_of_objects)); retry = 1; break; @@ -3767,7 +3767,7 @@ lpfc_vmid_cmd(struct lpfc_vport *vport, dap->obj[0].entity_id_len = vmid->vmid_len; memcpy(dap->obj[0].entity_id, vmid->host_vmid, vmid->vmid_len); size = DAPP_IDENT_OFFSET + - sizeof(struct lpfc_vmid_dapp_ident_list); + struct_size(dap, obj, be32_to_cpu(dap->no_of_objects)); write_lock(&vport->vmid_lock); vmid->flag &= ~LPFC_VMID_REGISTERED; write_unlock(&vport->vmid_lock); diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c index bdf34af4ef36..7f9b221e7c34 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/drivers/scsi/lpfc/lpfc_debugfs.c @@ -2259,11 +2259,15 @@ lpfc_debugfs_ras_log_open(struct inode *inode, struct file *file) goto out; } spin_unlock_irq(&phba->hbalock); - debug = kmalloc(sizeof(*debug), GFP_KERNEL); + + if (check_mul_overflow(LPFC_RAS_MIN_BUFF_POST_SIZE, + phba->cfg_ras_fwlog_buffsize, &size)) + goto out; + + debug = kzalloc(sizeof(*debug), GFP_KERNEL); if (!debug) goto out; - size = LPFC_RAS_MIN_BUFF_POST_SIZE * phba->cfg_ras_fwlog_buffsize; debug->buffer = vmalloc(size); if (!debug->buffer) goto free_debug; diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 6a15f879e517..2bad9954c355 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -5205,14 +5205,9 @@ lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb) * * This routine is the completion callback function to the Logout (LOGO) * Accept (ACC) Response ELS command. This routine is invoked to indicate - * the completion of the LOGO process. It invokes the lpfc_nlp_not_used() to - * release the ndlp if it has the last reference remaining (reference count - * is 1). If succeeded (meaning ndlp released), it sets the iocb ndlp - * field to NULL to inform the following lpfc_els_free_iocb() routine no - * ndlp reference count needs to be decremented. Otherwise, the ndlp - * reference use-count shall be decremented by the lpfc_els_free_iocb() - * routine. Finally, the lpfc_els_free_iocb() is invoked to release the - * IOCB data structure. + * the completion of the LOGO process. If the node has transitioned to NPR, + * this routine unregisters the RPI if it is still registered. The + * lpfc_els_free_iocb() is invoked to release the IOCB data structure. **/ static void lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, @@ -5253,19 +5248,9 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, (ndlp->nlp_last_elscmd == ELS_CMD_PLOGI)) goto out; - /* NPort Recovery mode or node is just allocated */ - if (!lpfc_nlp_not_used(ndlp)) { - /* A LOGO is completing and the node is in NPR state. - * Just unregister the RPI because the node is still - * required. - */ + if (ndlp->nlp_flag & NLP_RPI_REGISTERED) lpfc_unreg_rpi(vport, ndlp); - } else { - /* Indicate the node has already released, should - * not reference to it from within lpfc_els_free_iocb. - */ - cmdiocb->ndlp = NULL; - } + } out: /* @@ -5285,9 +5270,8 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, * RPI (Remote Port Index) mailbox command to the @phba. It simply releases * the associated lpfc Direct Memory Access (DMA) buffer back to the pool and * decrements the ndlp reference count held for this completion callback - * function. After that, it invokes the lpfc_nlp_not_used() to check - * whether there is only one reference left on the ndlp. If so, it will - * perform one more decrement and trigger the release of the ndlp. + * function. After that, it invokes the lpfc_drop_node to check + * whether it is appropriate to release the node. **/ void lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) @@ -5468,9 +5452,19 @@ out: ndlp->nlp_flag &= ~NLP_RELEASE_RPI; spin_unlock_irq(&ndlp->lock); } + lpfc_drop_node(vport, ndlp); + } else if (ndlp->nlp_state != NLP_STE_PLOGI_ISSUE && + ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE && + ndlp->nlp_state != NLP_STE_PRLI_ISSUE) { + /* Drop ndlp if there is no planned or outstanding + * issued PRLI. + * + * In cases when the ndlp is acting as both an initiator + * and target function, let our issued PRLI determine + * the final ndlp kref drop. + */ + lpfc_drop_node(vport, ndlp); } - - lpfc_drop_node(vport, ndlp); } /* Release the originating I/O reference. */ diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 5ba3a9ad9501..499849b58ee4 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -458,11 +458,9 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) if (ndlp->nlp_type & NLP_FABRIC) { spin_lock_irqsave(&ndlp->lock, iflags); - /* In massive vport configuration settings or when the FLOGI - * completes with a sequence timeout, it's possible - * dev_loss_tmo fired during node recovery. The driver has to - * account for this race to allow for recovery and keep - * the reference counting correct. + /* The driver has to account for a race between any fabric + * node that's in recovery when dev_loss_tmo expires. When this + * happens, the driver has to allow node recovery. */ switch (ndlp->nlp_DID) { case Fabric_DID: @@ -489,6 +487,17 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) ndlp->nlp_state <= NLP_STE_REG_LOGIN_ISSUE) recovering = true; break; + default: + /* Ensure the nlp_DID at least has the correct prefix. + * The fabric domain controller's last three nibbles + * vary so we handle it in the default case. + */ + if (ndlp->nlp_DID & Fabric_DID_MASK) { + if (ndlp->nlp_state >= NLP_STE_PLOGI_ISSUE && + ndlp->nlp_state <= NLP_STE_REG_LOGIN_ISSUE) + recovering = true; + } + break; } spin_unlock_irqrestore(&ndlp->lock, iflags); @@ -556,6 +565,9 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); } + spin_lock_irqsave(&ndlp->lock, iflags); + ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS; + spin_unlock_irqrestore(&ndlp->lock, iflags); /* If we are devloss, but we are in the process of rediscovering the * ndlp, don't issue a NLP_EVT_DEVICE_RM event. @@ -565,9 +577,6 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) return fcf_inuse; } - spin_lock_irqsave(&ndlp->lock, iflags); - ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS; - spin_unlock_irqrestore(&ndlp->lock, iflags); if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD)) lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); @@ -4333,13 +4342,14 @@ out: /* If the node is not registered with the scsi or nvme * transport, remove the fabric node. The failed reg_login - * is terminal. + * is terminal and forces the removal of the last node + * reference. */ if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) { spin_lock_irq(&ndlp->lock); ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; spin_unlock_irq(&ndlp->lock); - lpfc_nlp_not_used(ndlp); + lpfc_nlp_put(ndlp); } if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { @@ -4497,14 +4507,6 @@ lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) if (vport->load_flag & FC_UNLOADING) return; - /* - * Disassociate any older association between this ndlp and rport - */ - if (ndlp->rport) { - rdata = ndlp->rport->dd_data; - rdata->pnode = NULL; - } - ndlp->rport = rport = fc_remote_port_add(shost, 0, &rport_ids); if (!rport) { dev_printk(KERN_WARNING, &phba->pcidev->dev, @@ -4835,7 +4837,7 @@ lpfc_nlp_state_name(char *buffer, size_t size, int state) }; if (state < NLP_STE_MAX_STATE && states[state]) - strlcpy(buffer, states[state], size); + strscpy(buffer, states[state], size); else snprintf(buffer, size, "unknown (%d)", state); return buffer; @@ -6704,25 +6706,6 @@ lpfc_nlp_put(struct lpfc_nodelist *ndlp) return ndlp ? kref_put(&ndlp->kref, lpfc_nlp_release) : 0; } -/* This routine free's the specified nodelist if it is not in use - * by any other discovery thread. This routine returns 1 if the - * ndlp has been freed. A return value of 0 indicates the ndlp is - * not yet been released. - */ -int -lpfc_nlp_not_used(struct lpfc_nodelist *ndlp) -{ - lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE, - "node not used: did:x%x flg:x%x refcnt:x%x", - ndlp->nlp_DID, ndlp->nlp_flag, - kref_read(&ndlp->kref)); - - if (kref_read(&ndlp->kref) == 1) - if (lpfc_nlp_put(ndlp)) - return 1; - return 0; -} - /** * lpfc_fcf_inuse - Check if FCF can be unregistered. * @phba: Pointer to hba context object. diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h index 19b2d2754f32..663755842e4a 100644 --- a/drivers/scsi/lpfc/lpfc_hw.h +++ b/drivers/scsi/lpfc/lpfc_hw.h @@ -86,8 +86,8 @@ union CtRevisionId { union CtCommandResponse { /* Structure is in Big Endian format */ struct { - uint32_t CmdRsp:16; - uint32_t Size:16; + __be16 CmdRsp; + __be16 Size; } bits; uint32_t word; }; @@ -124,7 +124,7 @@ struct lpfc_sli_ct_request { #define LPFC_CT_PREAMBLE 20 /* Size of CTReq + 4 up to here */ union { - uint32_t PortID; + __be32 PortID; struct gid { uint8_t PortType; /* for GID_PT requests */ #define GID_PT_N_PORT 1 @@ -1408,19 +1408,19 @@ struct entity_id_object { }; struct app_id_object { - uint32_t port_id; - uint32_t app_id; + __be32 port_id; + __be32 app_id; struct entity_id_object obj; }; struct lpfc_vmid_rapp_ident_list { - uint32_t no_of_objects; - struct entity_id_object obj[1]; + __be32 no_of_objects; + struct entity_id_object obj[]; }; struct lpfc_vmid_dapp_ident_list { - uint32_t no_of_objects; - struct entity_id_object obj[1]; + __be32 no_of_objects; + struct entity_id_object obj[]; }; #define GALLAPPIA_ID_LAST 0x80 @@ -1512,7 +1512,7 @@ struct lpfc_fdmi_hba_ident { * Registered Port List Format */ struct lpfc_fdmi_reg_port_list { - uint32_t EntryCnt; + __be32 EntryCnt; struct lpfc_fdmi_port_entry pe; } __packed; diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h index 58fa39c403a0..5d4f9f27084d 100644 --- a/drivers/scsi/lpfc/lpfc_hw4.h +++ b/drivers/scsi/lpfc/lpfc_hw4.h @@ -395,9 +395,6 @@ struct lpfc_cqe { #define CQE_STATUS_NEED_BUFF_ENTRY 0xf #define CQE_STATUS_DI_ERROR 0x16 -/* Used when mapping CQE status to IOCB */ -#define LPFC_IOCB_STATUS_MASK 0xf - /* Status returned by hardware (valid only if status = CQE_STATUS_SUCCESS). */ #define CQE_HW_STATUS_NO_ERR 0x0 #define CQE_HW_STATUS_UNDERRUN 0x1 @@ -536,9 +533,9 @@ struct sli4_wcqe_xri_aborted { /* completion queue entry structure for rqe completion */ struct lpfc_rcqe { uint32_t word0; -#define lpfc_rcqe_bindex_SHIFT 16 -#define lpfc_rcqe_bindex_MASK 0x0000FFF -#define lpfc_rcqe_bindex_WORD word0 +#define lpfc_rcqe_iv_SHIFT 31 +#define lpfc_rcqe_iv_MASK 0x00000001 +#define lpfc_rcqe_iv_WORD word0 #define lpfc_rcqe_status_SHIFT 8 #define lpfc_rcqe_status_MASK 0x000000FF #define lpfc_rcqe_status_WORD word0 @@ -546,6 +543,7 @@ struct lpfc_rcqe { #define FC_STATUS_RQ_BUF_LEN_EXCEEDED 0x11 /* payload truncated */ #define FC_STATUS_INSUFF_BUF_NEED_BUF 0x12 /* Insufficient buffers */ #define FC_STATUS_INSUFF_BUF_FRM_DISC 0x13 /* Frame Discard */ +#define FC_STATUS_RQ_DMA_FAILURE 0x14 /* DMA failure */ uint32_t word1; #define lpfc_rcqe_fcf_id_v1_SHIFT 0 #define lpfc_rcqe_fcf_id_v1_MASK 0x0000003F @@ -4813,8 +4811,8 @@ struct cmf_sync_wqe { #define cmf_sync_cqid_WORD word11 uint32_t read_bytes; uint32_t word13; -#define cmf_sync_period_SHIFT 16 -#define cmf_sync_period_MASK 0x0000ffff +#define cmf_sync_period_SHIFT 24 +#define cmf_sync_period_MASK 0x000000ff #define cmf_sync_period_WORD word13 uint32_t word14; uint32_t word15; diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 867b4c788f08..3221a934066b 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -101,6 +101,7 @@ static struct scsi_transport_template *lpfc_vport_transport_template = NULL; static DEFINE_IDR(lpfc_hba_index); #define LPFC_NVMET_BUF_POST 254 static int lpfc_vmid_res_alloc(struct lpfc_hba *phba, struct lpfc_vport *vport); +static void lpfc_cgn_update_tstamp(struct lpfc_hba *phba, struct lpfc_cgn_ts *ts); /** * lpfc_config_port_prep - Perform lpfc initialization prior to config port @@ -1279,7 +1280,7 @@ lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) /* * lpfc_idle_stat_delay_work - idle_stat tracking * - * This routine tracks per-cq idle_stat and determines polling decisions. + * This routine tracks per-eq idle_stat and determines polling decisions. * * Return codes: * None @@ -1290,7 +1291,7 @@ lpfc_idle_stat_delay_work(struct work_struct *work) struct lpfc_hba *phba = container_of(to_delayed_work(work), struct lpfc_hba, idle_stat_delay_work); - struct lpfc_queue *cq; + struct lpfc_queue *eq; struct lpfc_sli4_hdw_queue *hdwq; struct lpfc_idle_stat *idle_stat; u32 i, idle_percent; @@ -1306,10 +1307,10 @@ lpfc_idle_stat_delay_work(struct work_struct *work) for_each_present_cpu(i) { hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq]; - cq = hdwq->io_cq; + eq = hdwq->hba_eq; - /* Skip if we've already handled this cq's primary CPU */ - if (cq->chann != i) + /* Skip if we've already handled this eq's primary CPU */ + if (eq->chann != i) continue; idle_stat = &phba->sli4_hba.idle_stat[i]; @@ -1333,9 +1334,9 @@ lpfc_idle_stat_delay_work(struct work_struct *work) idle_percent = 100 - idle_percent; if (idle_percent < 15) - cq->poll_mode = LPFC_QUEUE_WORK; + eq->poll_mode = LPFC_QUEUE_WORK; else - cq->poll_mode = LPFC_IRQ_POLL; + eq->poll_mode = LPFC_THREADED_IRQ; idle_stat->prev_idle = wall_idle; idle_stat->prev_wall = wall; @@ -3197,6 +3198,7 @@ lpfc_cmf_stop(struct lpfc_hba *phba) "6221 Stop CMF / Cancel Timer\n"); /* Cancel the CMF timer */ + hrtimer_cancel(&phba->cmf_stats_timer); hrtimer_cancel(&phba->cmf_timer); /* Zero CMF counters */ @@ -3283,7 +3285,10 @@ lpfc_cmf_start(struct lpfc_hba *phba) phba->cmf_timer_cnt = 0; hrtimer_start(&phba->cmf_timer, - ktime_set(0, LPFC_CMF_INTERVAL * 1000000), + ktime_set(0, LPFC_CMF_INTERVAL * NSEC_PER_MSEC), + HRTIMER_MODE_REL); + hrtimer_start(&phba->cmf_stats_timer, + ktime_set(0, LPFC_SEC_MIN * NSEC_PER_SEC), HRTIMER_MODE_REL); /* Setup for latency check in IO cmpl routines */ ktime_get_real_ts64(&phba->cmf_latency); @@ -4357,6 +4362,7 @@ lpfc_io_buf_replenish(struct lpfc_hba *phba, struct list_head *cbuf) struct lpfc_sli4_hdw_queue *qp; struct lpfc_io_buf *lpfc_cmd; int idx, cnt; + unsigned long iflags; qp = phba->sli4_hba.hdwq; cnt = 0; @@ -4371,12 +4377,13 @@ lpfc_io_buf_replenish(struct lpfc_hba *phba, struct list_head *cbuf) lpfc_cmd->hdwq_no = idx; lpfc_cmd->hdwq = qp; lpfc_cmd->cur_iocbq.cmd_cmpl = NULL; - spin_lock(&qp->io_buf_list_put_lock); + spin_lock_irqsave(&qp->io_buf_list_put_lock, iflags); list_add_tail(&lpfc_cmd->list, &qp->lpfc_io_buf_list_put); qp->put_io_bufs++; qp->total_io_bufs++; - spin_unlock(&qp->io_buf_list_put_lock); + spin_unlock_irqrestore(&qp->io_buf_list_put_lock, + iflags); } } return cnt; @@ -5593,81 +5600,74 @@ void lpfc_cgn_update_stat(struct lpfc_hba *phba, uint32_t dtag) { struct lpfc_cgn_info *cp; - struct tm broken; - struct timespec64 cur_time; - u32 cnt; u32 value; /* Make sure we have a congestion info buffer */ if (!phba->cgn_i) return; cp = (struct lpfc_cgn_info *)phba->cgn_i->virt; - ktime_get_real_ts64(&cur_time); - time64_to_tm(cur_time.tv_sec, 0, &broken); /* Update congestion statistics */ switch (dtag) { case ELS_DTAG_LNK_INTEGRITY: - cnt = le32_to_cpu(cp->link_integ_notification); - cnt++; - cp->link_integ_notification = cpu_to_le32(cnt); - - cp->cgn_stat_lnk_month = broken.tm_mon + 1; - cp->cgn_stat_lnk_day = broken.tm_mday; - cp->cgn_stat_lnk_year = broken.tm_year - 100; - cp->cgn_stat_lnk_hour = broken.tm_hour; - cp->cgn_stat_lnk_min = broken.tm_min; - cp->cgn_stat_lnk_sec = broken.tm_sec; + le32_add_cpu(&cp->link_integ_notification, 1); + lpfc_cgn_update_tstamp(phba, &cp->stat_lnk); break; case ELS_DTAG_DELIVERY: - cnt = le32_to_cpu(cp->delivery_notification); - cnt++; - cp->delivery_notification = cpu_to_le32(cnt); - - cp->cgn_stat_del_month = broken.tm_mon + 1; - cp->cgn_stat_del_day = broken.tm_mday; - cp->cgn_stat_del_year = broken.tm_year - 100; - cp->cgn_stat_del_hour = broken.tm_hour; - cp->cgn_stat_del_min = broken.tm_min; - cp->cgn_stat_del_sec = broken.tm_sec; + le32_add_cpu(&cp->delivery_notification, 1); + lpfc_cgn_update_tstamp(phba, &cp->stat_delivery); break; case ELS_DTAG_PEER_CONGEST: - cnt = le32_to_cpu(cp->cgn_peer_notification); - cnt++; - cp->cgn_peer_notification = cpu_to_le32(cnt); - - cp->cgn_stat_peer_month = broken.tm_mon + 1; - cp->cgn_stat_peer_day = broken.tm_mday; - cp->cgn_stat_peer_year = broken.tm_year - 100; - cp->cgn_stat_peer_hour = broken.tm_hour; - cp->cgn_stat_peer_min = broken.tm_min; - cp->cgn_stat_peer_sec = broken.tm_sec; + le32_add_cpu(&cp->cgn_peer_notification, 1); + lpfc_cgn_update_tstamp(phba, &cp->stat_peer); break; case ELS_DTAG_CONGESTION: - cnt = le32_to_cpu(cp->cgn_notification); - cnt++; - cp->cgn_notification = cpu_to_le32(cnt); - - cp->cgn_stat_cgn_month = broken.tm_mon + 1; - cp->cgn_stat_cgn_day = broken.tm_mday; - cp->cgn_stat_cgn_year = broken.tm_year - 100; - cp->cgn_stat_cgn_hour = broken.tm_hour; - cp->cgn_stat_cgn_min = broken.tm_min; - cp->cgn_stat_cgn_sec = broken.tm_sec; + le32_add_cpu(&cp->cgn_notification, 1); + lpfc_cgn_update_tstamp(phba, &cp->stat_fpin); } if (phba->cgn_fpin_frequency && phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) { value = LPFC_CGN_TIMER_TO_MIN / phba->cgn_fpin_frequency; cp->cgn_stat_npm = value; } + value = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED); cp->cgn_info_crc = cpu_to_le32(value); } /** - * lpfc_cgn_save_evt_cnt - Save data into registered congestion buffer + * lpfc_cgn_update_tstamp - Update cmf timestamp * @phba: pointer to lpfc hba data structure. + * @ts: structure to write the timestamp to. + */ +void +lpfc_cgn_update_tstamp(struct lpfc_hba *phba, struct lpfc_cgn_ts *ts) +{ + struct timespec64 cur_time; + struct tm tm_val; + + ktime_get_real_ts64(&cur_time); + time64_to_tm(cur_time.tv_sec, 0, &tm_val); + + ts->month = tm_val.tm_mon + 1; + ts->day = tm_val.tm_mday; + ts->year = tm_val.tm_year - 100; + ts->hour = tm_val.tm_hour; + ts->minute = tm_val.tm_min; + ts->second = tm_val.tm_sec; + + lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, + "2646 Updated CMF timestamp : " + "%u/%u/%u %u:%u:%u\n", + ts->day, ts->month, + ts->year, ts->hour, + ts->minute, ts->second); +} + +/** + * lpfc_cmf_stats_timer - Save data into registered congestion buffer + * @timer: Timer cookie to access lpfc private data * * Save the congestion event data every minute. * On the hour collapse all the minute data into hour data. Every day @@ -5675,12 +5675,11 @@ lpfc_cgn_update_stat(struct lpfc_hba *phba, uint32_t dtag) * and fabrc congestion event counters that will be saved out * to the registered congestion buffer every minute. */ -static void -lpfc_cgn_save_evt_cnt(struct lpfc_hba *phba) +static enum hrtimer_restart +lpfc_cmf_stats_timer(struct hrtimer *timer) { + struct lpfc_hba *phba; struct lpfc_cgn_info *cp; - struct tm broken; - struct timespec64 cur_time; uint32_t i, index; uint16_t value, mvalue; uint64_t bps; @@ -5691,21 +5690,18 @@ lpfc_cgn_save_evt_cnt(struct lpfc_hba *phba) __le32 *lptr; __le16 *mptr; + phba = container_of(timer, struct lpfc_hba, cmf_stats_timer); /* Make sure we have a congestion info buffer */ if (!phba->cgn_i) - return; + return HRTIMER_NORESTART; cp = (struct lpfc_cgn_info *)phba->cgn_i->virt; - if (time_before(jiffies, phba->cgn_evt_timestamp)) - return; phba->cgn_evt_timestamp = jiffies + msecs_to_jiffies(LPFC_CGN_TIMER_TO_MIN); phba->cgn_evt_minute++; /* We should get to this point in the routine on 1 minute intervals */ - - ktime_get_real_ts64(&cur_time); - time64_to_tm(cur_time.tv_sec, 0, &broken); + lpfc_cgn_update_tstamp(phba, &cp->base_time); if (phba->cgn_fpin_frequency && phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) { @@ -5858,31 +5854,6 @@ lpfc_cgn_save_evt_cnt(struct lpfc_hba *phba) index = 0; } - /* Anytime we overwrite daily index 0, after we wrap, - * we will be overwriting the oldest day, so we must - * update the congestion data start time for that day. - * That start time should have previously been saved after - * we wrote the last days worth of data. - */ - if ((phba->hba_flag & HBA_CGN_DAY_WRAP) && index == 0) { - time64_to_tm(phba->cgn_daily_ts.tv_sec, 0, &broken); - - cp->cgn_info_month = broken.tm_mon + 1; - cp->cgn_info_day = broken.tm_mday; - cp->cgn_info_year = broken.tm_year - 100; - cp->cgn_info_hour = broken.tm_hour; - cp->cgn_info_minute = broken.tm_min; - cp->cgn_info_second = broken.tm_sec; - - lpfc_printf_log - (phba, KERN_INFO, LOG_CGN_MGMT, - "2646 CGNInfo idx0 Start Time: " - "%d/%d/%d %d:%d:%d\n", - cp->cgn_info_day, cp->cgn_info_month, - cp->cgn_info_year, cp->cgn_info_hour, - cp->cgn_info_minute, cp->cgn_info_second); - } - dvalue = 0; wvalue = 0; lvalue = 0; @@ -5916,15 +5887,6 @@ lpfc_cgn_save_evt_cnt(struct lpfc_hba *phba) "2420 Congestion Info - daily (%d): " "%d %d %d %d %d\n", index, dvalue, wvalue, lvalue, mvalue, avalue); - - /* We just wrote LPFC_MAX_CGN_DAYS of data, - * so we are wrapped on any data after this. - * Save this as the start time for the next day. - */ - if (index == (LPFC_MAX_CGN_DAYS - 1)) { - phba->hba_flag |= HBA_CGN_DAY_WRAP; - ktime_get_real_ts64(&phba->cgn_daily_ts); - } } /* Use the frequency found in the last rcv'ed FPIN */ @@ -5935,6 +5897,10 @@ lpfc_cgn_save_evt_cnt(struct lpfc_hba *phba) lvalue = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED); cp->cgn_info_crc = cpu_to_le32(lvalue); + + hrtimer_forward_now(timer, ktime_set(0, LPFC_SEC_MIN * NSEC_PER_SEC)); + + return HRTIMER_RESTART; } /** @@ -6065,13 +6031,6 @@ lpfc_cmf_timer(struct hrtimer *timer) if (ms && ms < LPFC_CMF_INTERVAL) { cnt = div_u64(total, ms); /* bytes per ms */ cnt *= LPFC_CMF_INTERVAL; /* what total should be */ - - /* If the timeout is scheduled to be shorter, - * this value may skew the data, so cap it at mbpi. - */ - if ((phba->hba_flag & HBA_SHORT_CMF) && cnt > mbpi) - cnt = mbpi; - extra = cnt - total; } lpfc_issue_cmf_sync_wqe(phba, LPFC_CMF_INTERVAL, total + extra); @@ -6141,34 +6100,6 @@ lpfc_cmf_timer(struct hrtimer *timer) } phba->rx_block_cnt += div_u64(rcv, 512); /* save 512 byte block cnt */ - /* Each minute save Fabric and Driver congestion information */ - lpfc_cgn_save_evt_cnt(phba); - - phba->hba_flag &= ~HBA_SHORT_CMF; - - /* Since we need to call lpfc_cgn_save_evt_cnt every minute, on the - * minute, adjust our next timer interval, if needed, to ensure a - * 1 minute granularity when we get the next timer interrupt. - */ - if (time_after(jiffies + msecs_to_jiffies(LPFC_CMF_INTERVAL), - phba->cgn_evt_timestamp)) { - timer_interval = jiffies_to_msecs(phba->cgn_evt_timestamp - - jiffies); - if (timer_interval <= 0) - timer_interval = LPFC_CMF_INTERVAL; - else - phba->hba_flag |= HBA_SHORT_CMF; - - /* If we adjust timer_interval, max_bytes_per_interval - * needs to be adjusted as well. - */ - phba->cmf_link_byte_count = div_u64(phba->cmf_max_line_rate * - timer_interval, 1000); - if (phba->cmf_active_mode == LPFC_CFG_MONITOR) - phba->cmf_max_bytes_per_interval = - phba->cmf_link_byte_count; - } - /* Since total_bytes has already been zero'ed, its okay to unblock * after max_bytes_per_interval is setup. */ @@ -8014,6 +7945,9 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) /* CMF congestion timer */ hrtimer_init(&phba->cmf_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); phba->cmf_timer.function = lpfc_cmf_timer; + /* CMF 1 minute stats collection timer */ + hrtimer_init(&phba->cmf_stats_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + phba->cmf_stats_timer.function = lpfc_cmf_stats_timer; /* * Control structure for handling external multi-buffer mailbox @@ -13117,8 +13051,10 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba) } eqhdl->irq = rc; - rc = request_irq(eqhdl->irq, &lpfc_sli4_hba_intr_handler, 0, - name, eqhdl); + rc = request_threaded_irq(eqhdl->irq, + &lpfc_sli4_hba_intr_handler, + &lpfc_sli4_hba_intr_handler_th, + IRQF_ONESHOT, name, eqhdl); if (rc) { lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "0486 MSI-X fast-path (%d) " @@ -13521,6 +13457,7 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba) struct pci_dev *pdev = phba->pcidev; lpfc_stop_hba_timers(phba); + hrtimer_cancel(&phba->cmf_stats_timer); hrtimer_cancel(&phba->cmf_timer); if (phba->pport) @@ -13645,8 +13582,6 @@ void lpfc_init_congestion_buf(struct lpfc_hba *phba) { struct lpfc_cgn_info *cp; - struct timespec64 cmpl_time; - struct tm broken; uint16_t size; uint32_t crc; @@ -13666,11 +13601,10 @@ lpfc_init_congestion_buf(struct lpfc_hba *phba) atomic_set(&phba->cgn_latency_evt_cnt, 0); atomic64_set(&phba->cgn_latency_evt, 0); phba->cgn_evt_minute = 0; - phba->hba_flag &= ~HBA_CGN_DAY_WRAP; memset(cp, 0xff, offsetof(struct lpfc_cgn_info, cgn_stat)); cp->cgn_info_size = cpu_to_le16(LPFC_CGN_INFO_SZ); - cp->cgn_info_version = LPFC_CGN_INFO_V3; + cp->cgn_info_version = LPFC_CGN_INFO_V4; /* cgn parameters */ cp->cgn_info_mode = phba->cgn_p.cgn_param_mode; @@ -13678,22 +13612,7 @@ lpfc_init_congestion_buf(struct lpfc_hba *phba) cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1; cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2; - ktime_get_real_ts64(&cmpl_time); - time64_to_tm(cmpl_time.tv_sec, 0, &broken); - - cp->cgn_info_month = broken.tm_mon + 1; - cp->cgn_info_day = broken.tm_mday; - cp->cgn_info_year = broken.tm_year - 100; /* relative to 2000 */ - cp->cgn_info_hour = broken.tm_hour; - cp->cgn_info_minute = broken.tm_min; - cp->cgn_info_second = broken.tm_sec; - - lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT, - "2643 CGNInfo Init: Start Time " - "%d/%d/%d %d:%d:%d\n", - cp->cgn_info_day, cp->cgn_info_month, - cp->cgn_info_year, cp->cgn_info_hour, - cp->cgn_info_minute, cp->cgn_info_second); + lpfc_cgn_update_tstamp(phba, &cp->base_time); /* Fill in default LUN qdepth */ if (phba->pport) { @@ -13716,8 +13635,6 @@ void lpfc_init_congestion_stat(struct lpfc_hba *phba) { struct lpfc_cgn_info *cp; - struct timespec64 cmpl_time; - struct tm broken; uint32_t crc; lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT, @@ -13729,22 +13646,7 @@ lpfc_init_congestion_stat(struct lpfc_hba *phba) cp = (struct lpfc_cgn_info *)phba->cgn_i->virt; memset(&cp->cgn_stat, 0, sizeof(cp->cgn_stat)); - ktime_get_real_ts64(&cmpl_time); - time64_to_tm(cmpl_time.tv_sec, 0, &broken); - - cp->cgn_stat_month = broken.tm_mon + 1; - cp->cgn_stat_day = broken.tm_mday; - cp->cgn_stat_year = broken.tm_year - 100; /* relative to 2000 */ - cp->cgn_stat_hour = broken.tm_hour; - cp->cgn_stat_minute = broken.tm_min; - - lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT, - "2647 CGNstat Init: Start Time " - "%d/%d/%d %d:%d\n", - cp->cgn_stat_day, cp->cgn_stat_month, - cp->cgn_stat_year, cp->cgn_stat_hour, - cp->cgn_stat_minute); - + lpfc_cgn_update_tstamp(phba, &cp->stat_start); crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED); cp->cgn_info_crc = cpu_to_le32(crc); } @@ -14743,10 +14645,10 @@ lpfc_write_firmware(const struct firmware *fw, void *context) INIT_LIST_HEAD(&dma_buffer_list); lpfc_decode_firmware_rev(phba, fwrev, 1); if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) { - lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, - "3023 Updating Firmware, Current Version:%s " - "New Version:%s\n", - fwrev, image->revision); + lpfc_log_msg(phba, KERN_NOTICE, LOG_INIT | LOG_SLI, + "3023 Updating Firmware, Current Version:%s " + "New Version:%s\n", + fwrev, image->revision); for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) { dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); @@ -14793,10 +14695,10 @@ lpfc_write_firmware(const struct firmware *fw, void *context) } rc = offset; } else - lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, - "3029 Skipped Firmware update, Current " - "Version:%s New Version:%s\n", - fwrev, image->revision); + lpfc_log_msg(phba, KERN_NOTICE, LOG_INIT | LOG_SLI, + "3029 Skipped Firmware update, Current " + "Version:%s New Version:%s\n", + fwrev, image->revision); release_out: list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) { @@ -14808,11 +14710,11 @@ release_out: release_firmware(fw); out: if (rc < 0) - lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, - "3062 Firmware update error, status %d.\n", rc); + lpfc_log_msg(phba, KERN_ERR, LOG_INIT | LOG_SLI, + "3062 Firmware update error, status %d.\n", rc); else - lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, - "3024 Firmware update success: size %d.\n", rc); + lpfc_log_msg(phba, KERN_NOTICE, LOG_INIT | LOG_SLI, + "3024 Firmware update success: size %d.\n", rc); } /** diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h index b39cefcd8703..f896ec610433 100644 --- a/drivers/scsi/lpfc/lpfc_logmsg.h +++ b/drivers/scsi/lpfc/lpfc_logmsg.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2009 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -55,7 +55,7 @@ void lpfc_dbg_print(struct lpfc_hba *phba, const char *fmt, ...); /* generate message by verbose log setting or severity */ #define lpfc_vlog_msg(vport, level, mask, fmt, arg...) \ -{ if (((mask) & (vport)->cfg_log_verbose) || (level[1] <= '4')) \ +{ if (((mask) & (vport)->cfg_log_verbose) || (level[1] <= '5')) \ dev_printk(level, &((vport)->phba->pcidev)->dev, "%d:(%d):" \ fmt, (vport)->phba->brd_no, vport->vpi, ##arg); } @@ -64,7 +64,7 @@ do { \ { uint32_t log_verbose = (phba)->pport ? \ (phba)->pport->cfg_log_verbose : \ (phba)->cfg_log_verbose; \ - if (((mask) & log_verbose) || (level[1] <= '4')) \ + if (((mask) & log_verbose) || (level[1] <= '5')) \ dev_printk(level, &((phba)->pcidev)->dev, "%d:" \ fmt, phba->brd_no, ##arg); \ } \ diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c index adda70423c77..8db7cb99903d 100644 --- a/drivers/scsi/lpfc/lpfc_nvme.c +++ b/drivers/scsi/lpfc/lpfc_nvme.c @@ -310,20 +310,20 @@ lpfc_nvme_handle_lsreq(struct lpfc_hba *phba, * for the LS request. **/ void -__lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_vport *vport, +__lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_vport *vport, struct lpfc_iocbq *cmdwqe, struct lpfc_wcqe_complete *wcqe) { struct nvmefc_ls_req *pnvme_lsreq; struct lpfc_dmabuf *buf_ptr; struct lpfc_nodelist *ndlp; - uint32_t status; + int status; pnvme_lsreq = cmdwqe->context_un.nvme_lsreq; ndlp = cmdwqe->ndlp; buf_ptr = cmdwqe->bpl_dmabuf; - status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK; + status = bf_get(lpfc_wcqe_c_status, wcqe); lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, "6047 NVMEx LS REQ x%px cmpl DID %x Xri: %x " @@ -343,14 +343,17 @@ __lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_vport *vport, kfree(buf_ptr); cmdwqe->bpl_dmabuf = NULL; } - if (pnvme_lsreq->done) + if (pnvme_lsreq->done) { + if (status != CQE_STATUS_SUCCESS) + status = -ENXIO; pnvme_lsreq->done(pnvme_lsreq, status); - else + } else { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "6046 NVMEx cmpl without done call back? " "Data x%px DID %x Xri: %x status %x\n", pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0, cmdwqe->sli4_xritag, status); + } if (ndlp) { lpfc_nlp_put(ndlp); cmdwqe->ndlp = NULL; @@ -367,7 +370,7 @@ lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, uint32_t status; struct lpfc_wcqe_complete *wcqe = &rspwqe->wcqe_cmpl; - status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK; + status = bf_get(lpfc_wcqe_c_status, wcqe); if (vport->localport) { lport = (struct lpfc_nvme_lport *)vport->localport->private; @@ -1040,7 +1043,7 @@ lpfc_nvme_io_cmd_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, nCmd->rcv_rsplen = LPFC_NVME_ERSP_LEN; nCmd->transferred_length = nCmd->payload_length; } else { - lpfc_ncmd->status = (status & LPFC_IOCB_STATUS_MASK); + lpfc_ncmd->status = status; lpfc_ncmd->result = (wcqe->parameter & IOERR_PARAM_MASK); /* For NVME, the only failure path that results in an @@ -1893,38 +1896,38 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport, pnvme_rport->port_id, pnvme_fcreq); - /* If the hba is getting reset, this flag is set. It is - * cleared when the reset is complete and rings reestablished. - */ - spin_lock_irqsave(&phba->hbalock, flags); - /* driver queued commands are in process of being flushed */ - if (phba->hba_flag & HBA_IOQ_FLUSH) { - spin_unlock_irqrestore(&phba->hbalock, flags); - lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, - "6139 Driver in reset cleanup - flushing " - "NVME Req now. hba_flag x%x\n", - phba->hba_flag); - return; - } - lpfc_nbuf = freqpriv->nvme_buf; if (!lpfc_nbuf) { - spin_unlock_irqrestore(&phba->hbalock, flags); lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "6140 NVME IO req has no matching lpfc nvme " "io buffer. Skipping abort req.\n"); return; } else if (!lpfc_nbuf->nvmeCmd) { - spin_unlock_irqrestore(&phba->hbalock, flags); lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "6141 lpfc NVME IO req has no nvme_fcreq " "io buffer. Skipping abort req.\n"); return; } - nvmereq_wqe = &lpfc_nbuf->cur_iocbq; /* Guard against IO completion being called at same time */ - spin_lock(&lpfc_nbuf->buf_lock); + spin_lock_irqsave(&lpfc_nbuf->buf_lock, flags); + + /* If the hba is getting reset, this flag is set. It is + * cleared when the reset is complete and rings reestablished. + */ + spin_lock(&phba->hbalock); + /* driver queued commands are in process of being flushed */ + if (phba->hba_flag & HBA_IOQ_FLUSH) { + spin_unlock(&phba->hbalock); + spin_unlock_irqrestore(&lpfc_nbuf->buf_lock, flags); + lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, + "6139 Driver in reset cleanup - flushing " + "NVME Req now. hba_flag x%x\n", + phba->hba_flag); + return; + } + + nvmereq_wqe = &lpfc_nbuf->cur_iocbq; /* * The lpfc_nbuf and the mapped nvme_fcreq in the driver's @@ -1971,8 +1974,8 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport, ret_val = lpfc_sli4_issue_abort_iotag(phba, nvmereq_wqe, lpfc_nvme_abort_fcreq_cmpl); - spin_unlock(&lpfc_nbuf->buf_lock); - spin_unlock_irqrestore(&phba->hbalock, flags); + spin_unlock(&phba->hbalock); + spin_unlock_irqrestore(&lpfc_nbuf->buf_lock, flags); /* Make sure HBA is alive */ lpfc_issue_hb_tmo(phba); @@ -1998,8 +2001,8 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport, return; out_unlock: - spin_unlock(&lpfc_nbuf->buf_lock); - spin_unlock_irqrestore(&phba->hbalock, flags); + spin_unlock(&phba->hbalock); + spin_unlock_irqrestore(&lpfc_nbuf->buf_lock, flags); return; } diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c index 7517dd55fe91..dff4584d338b 100644 --- a/drivers/scsi/lpfc/lpfc_nvmet.c +++ b/drivers/scsi/lpfc/lpfc_nvmet.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term * + * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * @@ -300,7 +300,7 @@ __lpfc_nvme_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, struct nvmefc_ls_rsp *ls_rsp = &axchg->ls_rsp; uint32_t status, result; - status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK; + status = bf_get(lpfc_wcqe_c_status, wcqe); result = wcqe->parameter; if (axchg->state != LPFC_NVME_STE_LS_RSP || axchg->entry_cnt != 2) { @@ -350,7 +350,7 @@ lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, if (!phba->targetport) goto finish; - status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK; + status = bf_get(lpfc_wcqe_c_status, wcqe); result = wcqe->parameter; tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index e989f130434e..a62e091894f6 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -4026,7 +4026,7 @@ lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, struct lpfc_fast_path_event *fast_path_evt; struct Scsi_Host *shost; u32 logit = LOG_FCP; - u32 status, idx; + u32 idx; u32 lat; u8 wait_xb_clr = 0; @@ -4061,8 +4061,7 @@ lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, #endif shost = cmd->device->host; - status = bf_get(lpfc_wcqe_c_status, wcqe); - lpfc_cmd->status = (status & LPFC_IOCB_STATUS_MASK); + lpfc_cmd->status = bf_get(lpfc_wcqe_c_status, wcqe); lpfc_cmd->result = (wcqe->parameter & IOERR_PARAM_MASK); lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY; @@ -4104,11 +4103,6 @@ lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, } #endif if (unlikely(lpfc_cmd->status)) { - if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT && - (lpfc_cmd->result & IOERR_DRVR_MASK)) - lpfc_cmd->status = IOSTAT_DRIVER_REJECT; - else if (lpfc_cmd->status >= IOSTAT_CNT) - lpfc_cmd->status = IOSTAT_DEFAULT; if (lpfc_cmd->status == IOSTAT_FCP_RSP_ERROR && !lpfc_cmd->fcp_rsp->rspStatus3 && (lpfc_cmd->fcp_rsp->rspStatus2 & RESID_UNDER) && @@ -4133,16 +4127,16 @@ lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, } switch (lpfc_cmd->status) { - case IOSTAT_SUCCESS: + case CQE_STATUS_SUCCESS: cmd->result = DID_OK << 16; break; - case IOSTAT_FCP_RSP_ERROR: + case CQE_STATUS_FCP_RSP_FAILURE: lpfc_handle_fcp_err(vport, lpfc_cmd, pwqeIn->wqe.fcp_iread.total_xfer_len - wcqe->total_data_placed); break; - case IOSTAT_NPORT_BSY: - case IOSTAT_FABRIC_BSY: + case CQE_STATUS_NPORT_BSY: + case CQE_STATUS_FABRIC_BSY: cmd->result = DID_TRANSPORT_DISRUPTED << 16; fast_path_evt = lpfc_alloc_fast_evt(phba); if (!fast_path_evt) @@ -4185,7 +4179,27 @@ lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, wcqe->total_data_placed, lpfc_cmd->cur_iocbq.iocb.ulpIoTag); break; - case IOSTAT_REMOTE_STOP: + case CQE_STATUS_DI_ERROR: + if (bf_get(lpfc_wcqe_c_bg_edir, wcqe)) + lpfc_cmd->result = IOERR_RX_DMA_FAILED; + else + lpfc_cmd->result = IOERR_TX_DMA_FAILED; + lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_BG, + "9048 DI Error xri x%x status x%x DI ext " + "status x%x data placed x%x\n", + lpfc_cmd->cur_iocbq.sli4_xritag, + lpfc_cmd->status, wcqe->parameter, + wcqe->total_data_placed); + if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) { + /* BG enabled cmd. Parse BG error */ + lpfc_parse_bg_err(phba, lpfc_cmd, pwqeOut); + break; + } + cmd->result = DID_ERROR << 16; + lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, + "9040 DI Error on unprotected cmd\n"); + break; + case CQE_STATUS_REMOTE_STOP: if (ndlp) { /* This I/O was aborted by the target, we don't * know the rxid and because we did not send the @@ -4196,7 +4210,7 @@ lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, 0, 0); } fallthrough; - case IOSTAT_LOCAL_REJECT: + case CQE_STATUS_LOCAL_REJECT: if (lpfc_cmd->result & IOERR_DRVR_MASK) lpfc_cmd->status = IOSTAT_DRIVER_REJECT; if (lpfc_cmd->result == IOERR_ELXSEC_KEY_UNWRAP_ERROR || @@ -4217,24 +4231,6 @@ lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, cmd->result = DID_TRANSPORT_DISRUPTED << 16; break; } - if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED || - lpfc_cmd->result == IOERR_TX_DMA_FAILED) && - status == CQE_STATUS_DI_ERROR) { - if (scsi_get_prot_op(cmd) != - SCSI_PROT_NORMAL) { - /* - * This is a response for a BG enabled - * cmd. Parse BG error - */ - lpfc_parse_bg_err(phba, lpfc_cmd, pwqeOut); - break; - } else { - lpfc_printf_vlog(vport, KERN_WARNING, - LOG_BG, - "9040 non-zero BGSTAT " - "on unprotected cmd\n"); - } - } lpfc_printf_vlog(vport, KERN_WARNING, logit, "9036 Local Reject FCP cmd x%x failed" " <%d/%lld> " @@ -4253,10 +4249,8 @@ lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, lpfc_cmd->cur_iocbq.iocb.ulpIoTag); fallthrough; default: - if (lpfc_cmd->status >= IOSTAT_CNT) - lpfc_cmd->status = IOSTAT_DEFAULT; cmd->result = DID_ERROR << 16; - lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, + lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, "9037 FCP Completion Error: xri %x " "status x%x result x%x [x%x] " "placed x%x\n", @@ -4273,7 +4267,8 @@ lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, "x%x SNS x%x x%x LBA x%llx Data: x%x x%x\n", cmd->device->id, cmd->device->lun, cmd, cmd->result, *lp, *(lp + 3), - (u64)scsi_get_lba(cmd), + (cmd->device->sector_size) ? + (u64)scsi_get_lba(cmd) : 0, cmd->retries, scsi_get_resid(cmd)); } @@ -5009,7 +5004,6 @@ lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) return -ENODEV; } phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth; - phba->lpfc_scsi_cmd_iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl; return 0; } diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 8693578888f1..58d10f8f75a7 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -82,7 +82,8 @@ static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *, int); static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq, - struct lpfc_eqe *eqe); + struct lpfc_eqe *eqe, + enum lpfc_poll_mode poll_mode); static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba); static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba); static struct lpfc_cqe *lpfc_sli4_cq_get(struct lpfc_queue *q); @@ -629,7 +630,7 @@ lpfc_sli4_eqcq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq) static int lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq, - uint8_t rearm) + u8 rearm, enum lpfc_poll_mode poll_mode) { struct lpfc_eqe *eqe; int count = 0, consumed = 0; @@ -639,7 +640,7 @@ lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq, eqe = lpfc_sli4_eq_get(eq); while (eqe) { - lpfc_sli4_hba_handle_eqe(phba, eq, eqe); + lpfc_sli4_hba_handle_eqe(phba, eq, eqe, poll_mode); __lpfc_sli4_consume_eqe(phba, eq, eqe); consumed++; @@ -1931,7 +1932,7 @@ lpfc_issue_cmf_sync_wqe(struct lpfc_hba *phba, u32 ms, u64 total) unsigned long iflags; u32 ret_val; u32 atot, wtot, max; - u16 warn_sync_period = 0; + u8 warn_sync_period = 0; /* First address any alarm / warning activity */ atot = atomic_xchg(&phba->cgn_sync_alarm_cnt, 0); @@ -7957,7 +7958,7 @@ out_rdf: * lpfc_init_idle_stat_hb - Initialize idle_stat tracking * @phba: pointer to lpfc hba data structure. * - * This routine initializes the per-cq idle_stat to dynamically dictate + * This routine initializes the per-eq idle_stat to dynamically dictate * polling decisions. * * Return codes: @@ -7967,16 +7968,16 @@ static void lpfc_init_idle_stat_hb(struct lpfc_hba *phba) { int i; struct lpfc_sli4_hdw_queue *hdwq; - struct lpfc_queue *cq; + struct lpfc_queue *eq; struct lpfc_idle_stat *idle_stat; u64 wall; for_each_present_cpu(i) { hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq]; - cq = hdwq->io_cq; + eq = hdwq->hba_eq; - /* Skip if we've already handled this cq's primary CPU */ - if (cq->chann != i) + /* Skip if we've already handled this eq's primary CPU */ + if (eq->chann != i) continue; idle_stat = &phba->sli4_hba.idle_stat[i]; @@ -7985,13 +7986,14 @@ static void lpfc_init_idle_stat_hb(struct lpfc_hba *phba) idle_stat->prev_wall = wall; if (phba->nvmet_support || - phba->cmf_active_mode != LPFC_CFG_OFF) - cq->poll_mode = LPFC_QUEUE_WORK; + phba->cmf_active_mode != LPFC_CFG_OFF || + phba->intr_type != MSIX) + eq->poll_mode = LPFC_QUEUE_WORK; else - cq->poll_mode = LPFC_IRQ_POLL; + eq->poll_mode = LPFC_THREADED_IRQ; } - if (!phba->nvmet_support) + if (!phba->nvmet_support && phba->intr_type == MSIX) schedule_delayed_work(&phba->idle_stat_delay_work, msecs_to_jiffies(LPFC_IDLE_STAT_DELAY)); } @@ -9218,7 +9220,8 @@ lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba) if (mbox_pending) /* process and rearm the EQ */ - lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM); + lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM, + LPFC_QUEUE_WORK); else /* Always clear and re-arm the EQ */ sli4_hba->sli4_write_eq_db(phba, fpeq, 0, LPFC_QUEUE_REARM); @@ -11254,7 +11257,8 @@ inline void lpfc_sli4_poll_eq(struct lpfc_queue *eq) * will be handled through a sched from polling timer * function which is currently triggered every 1msec. */ - lpfc_sli4_process_eq(phba, eq, LPFC_QUEUE_NOARM); + lpfc_sli4_process_eq(phba, eq, LPFC_QUEUE_NOARM, + LPFC_QUEUE_WORK); } /** @@ -14682,6 +14686,38 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe) spin_unlock_irqrestore(&phba->hbalock, iflags); workposted = true; break; + case FC_STATUS_RQ_DMA_FAILURE: + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2564 RQE DMA Error x%x, x%08x x%08x x%08x " + "x%08x\n", + status, rcqe->word0, rcqe->word1, + rcqe->word2, rcqe->word3); + + /* If IV set, no further recovery */ + if (bf_get(lpfc_rcqe_iv, rcqe)) + break; + + /* recycle consumed resource */ + spin_lock_irqsave(&phba->hbalock, iflags); + lpfc_sli4_rq_release(hrq, drq); + dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list); + if (!dma_buf) { + hrq->RQ_no_buf_found++; + spin_unlock_irqrestore(&phba->hbalock, iflags); + break; + } + hrq->RQ_rcv_buf++; + hrq->RQ_buf_posted--; + spin_unlock_irqrestore(&phba->hbalock, iflags); + lpfc_in_buf_free(phba, &dma_buf->dbuf); + break; + default: + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2565 Unexpected RQE Status x%x, w0-3 x%08x " + "x%08x x%08x x%08x\n", + status, rcqe->word0, rcqe->word1, + rcqe->word2, rcqe->word3); + break; } out: return workposted; @@ -14803,7 +14839,6 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, * @cq: Pointer to CQ to be processed * @handler: Routine to process each cqe * @delay: Pointer to usdelay to set in case of rescheduling of the handler - * @poll_mode: Polling mode we were called from * * This routine processes completion queue entries in a CQ. While a valid * queue element is found, the handler is called. During processing checks @@ -14821,8 +14856,7 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, static bool __lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq, bool (*handler)(struct lpfc_hba *, struct lpfc_queue *, - struct lpfc_cqe *), unsigned long *delay, - enum lpfc_poll_mode poll_mode) + struct lpfc_cqe *), unsigned long *delay) { struct lpfc_cqe *cqe; bool workposted = false; @@ -14863,10 +14897,6 @@ __lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq, arm = false; } - /* Note: complete the irq_poll softirq before rearming CQ */ - if (poll_mode == LPFC_IRQ_POLL) - irq_poll_complete(&cq->iop); - /* Track the max number of CQEs processed in 1 EQ */ if (count > cq->CQ_max_cqe) cq->CQ_max_cqe = count; @@ -14916,17 +14946,17 @@ __lpfc_sli4_sp_process_cq(struct lpfc_queue *cq) case LPFC_MCQ: workposted |= __lpfc_sli4_process_cq(phba, cq, lpfc_sli4_sp_handle_mcqe, - &delay, LPFC_QUEUE_WORK); + &delay); break; case LPFC_WCQ: if (cq->subtype == LPFC_IO) workposted |= __lpfc_sli4_process_cq(phba, cq, lpfc_sli4_fp_handle_cqe, - &delay, LPFC_QUEUE_WORK); + &delay); else workposted |= __lpfc_sli4_process_cq(phba, cq, lpfc_sli4_sp_handle_cqe, - &delay, LPFC_QUEUE_WORK); + &delay); break; default: lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, @@ -15203,6 +15233,38 @@ drop: hrq->RQ_no_posted_buf++; /* Post more buffers if possible */ break; + case FC_STATUS_RQ_DMA_FAILURE: + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2575 RQE DMA Error x%x, x%08x x%08x x%08x " + "x%08x\n", + status, rcqe->word0, rcqe->word1, + rcqe->word2, rcqe->word3); + + /* If IV set, no further recovery */ + if (bf_get(lpfc_rcqe_iv, rcqe)) + break; + + /* recycle consumed resource */ + spin_lock_irqsave(&phba->hbalock, iflags); + lpfc_sli4_rq_release(hrq, drq); + dma_buf = lpfc_sli_rqbuf_get(phba, hrq); + if (!dma_buf) { + hrq->RQ_no_buf_found++; + spin_unlock_irqrestore(&phba->hbalock, iflags); + break; + } + hrq->RQ_rcv_buf++; + hrq->RQ_buf_posted--; + spin_unlock_irqrestore(&phba->hbalock, iflags); + lpfc_rq_buf_free(phba, &dma_buf->hbuf); + break; + default: + lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, + "2576 Unexpected RQE Status x%x, w0-3 x%08x " + "x%08x x%08x x%08x\n", + status, rcqe->word0, rcqe->word1, + rcqe->word2, rcqe->word3); + break; } out: return workposted; @@ -15271,45 +15333,64 @@ lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq, } /** - * lpfc_sli4_sched_cq_work - Schedules cq work - * @phba: Pointer to HBA context object. - * @cq: Pointer to CQ - * @cqid: CQ ID - * - * This routine checks the poll mode of the CQ corresponding to - * cq->chann, then either schedules a softirq or queue_work to complete - * cq work. + * __lpfc_sli4_hba_process_cq - Process a fast-path event queue entry + * @cq: Pointer to CQ to be processed * - * queue_work path is taken if in NVMET mode, or if poll_mode is in - * LPFC_QUEUE_WORK mode. Otherwise, softirq path is taken. + * This routine calls the cq processing routine with the handler for + * fast path CQEs. * + * The CQ routine returns two values: the first is the calling status, + * which indicates whether work was queued to the background discovery + * thread. If true, the routine should wakeup the discovery thread; + * the second is the delay parameter. If non-zero, rather than rearming + * the CQ and yet another interrupt, the CQ handler should be queued so + * that it is processed in a subsequent polling action. The value of + * the delay indicates when to reschedule it. **/ -static void lpfc_sli4_sched_cq_work(struct lpfc_hba *phba, - struct lpfc_queue *cq, uint16_t cqid) +static void +__lpfc_sli4_hba_process_cq(struct lpfc_queue *cq) { - int ret = 0; + struct lpfc_hba *phba = cq->phba; + unsigned long delay; + bool workposted = false; + int ret; - switch (cq->poll_mode) { - case LPFC_IRQ_POLL: - /* CGN mgmt is mutually exclusive from softirq processing */ - if (phba->cmf_active_mode == LPFC_CFG_OFF) { - irq_poll_sched(&cq->iop); - break; - } - fallthrough; - case LPFC_QUEUE_WORK: - default: + /* process and rearm the CQ */ + workposted |= __lpfc_sli4_process_cq(phba, cq, lpfc_sli4_fp_handle_cqe, + &delay); + + if (delay) { if (is_kdump_kernel()) - ret = queue_work(phba->wq, &cq->irqwork); + ret = queue_delayed_work(phba->wq, &cq->sched_irqwork, + delay); else - ret = queue_work_on(cq->chann, phba->wq, &cq->irqwork); + ret = queue_delayed_work_on(cq->chann, phba->wq, + &cq->sched_irqwork, delay); if (!ret) lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, - "0383 Cannot schedule queue work " - "for CQ eqcqid=%d, cqid=%d on CPU %d\n", - cqid, cq->queue_id, - raw_smp_processor_id()); + "0367 Cannot schedule queue work " + "for cqid=%d on CPU %d\n", + cq->queue_id, cq->chann); } + + /* wake up worker thread if there are works to be done */ + if (workposted) + lpfc_worker_wake_up(phba); +} + +/** + * lpfc_sli4_hba_process_cq - fast-path work handler when started by + * interrupt + * @work: pointer to work element + * + * translates from the work handler and calls the fast-path handler. + **/ +static void +lpfc_sli4_hba_process_cq(struct work_struct *work) +{ + struct lpfc_queue *cq = container_of(work, struct lpfc_queue, irqwork); + + __lpfc_sli4_hba_process_cq(cq); } /** @@ -15317,6 +15398,7 @@ static void lpfc_sli4_sched_cq_work(struct lpfc_hba *phba, * @phba: Pointer to HBA context object. * @eq: Pointer to the queue structure. * @eqe: Pointer to fast-path event queue entry. + * @poll_mode: poll_mode to execute processing the cq. * * This routine process a event queue entry from the fast-path event queue. * It will check the MajorCode and MinorCode to determine this is for a @@ -15327,11 +15409,12 @@ static void lpfc_sli4_sched_cq_work(struct lpfc_hba *phba, **/ static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq, - struct lpfc_eqe *eqe) + struct lpfc_eqe *eqe, enum lpfc_poll_mode poll_mode) { struct lpfc_queue *cq = NULL; uint32_t qidx = eq->hdwq; uint16_t cqid, id; + int ret; if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, @@ -15391,70 +15474,25 @@ work_cq: else cq->isr_timestamp = 0; #endif - lpfc_sli4_sched_cq_work(phba, cq, cqid); -} - -/** - * __lpfc_sli4_hba_process_cq - Process a fast-path event queue entry - * @cq: Pointer to CQ to be processed - * @poll_mode: Enum lpfc_poll_state to determine poll mode - * - * This routine calls the cq processing routine with the handler for - * fast path CQEs. - * - * The CQ routine returns two values: the first is the calling status, - * which indicates whether work was queued to the background discovery - * thread. If true, the routine should wakeup the discovery thread; - * the second is the delay parameter. If non-zero, rather than rearming - * the CQ and yet another interrupt, the CQ handler should be queued so - * that it is processed in a subsequent polling action. The value of - * the delay indicates when to reschedule it. - **/ -static void -__lpfc_sli4_hba_process_cq(struct lpfc_queue *cq, - enum lpfc_poll_mode poll_mode) -{ - struct lpfc_hba *phba = cq->phba; - unsigned long delay; - bool workposted = false; - int ret = 0; - - /* process and rearm the CQ */ - workposted |= __lpfc_sli4_process_cq(phba, cq, lpfc_sli4_fp_handle_cqe, - &delay, poll_mode); - if (delay) { + switch (poll_mode) { + case LPFC_THREADED_IRQ: + __lpfc_sli4_hba_process_cq(cq); + break; + case LPFC_QUEUE_WORK: + default: if (is_kdump_kernel()) - ret = queue_delayed_work(phba->wq, &cq->sched_irqwork, - delay); + ret = queue_work(phba->wq, &cq->irqwork); else - ret = queue_delayed_work_on(cq->chann, phba->wq, - &cq->sched_irqwork, delay); + ret = queue_work_on(cq->chann, phba->wq, &cq->irqwork); if (!ret) lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, - "0367 Cannot schedule queue work " - "for cqid=%d on CPU %d\n", - cq->queue_id, cq->chann); + "0383 Cannot schedule queue work " + "for CQ eqcqid=%d, cqid=%d on CPU %d\n", + cqid, cq->queue_id, + raw_smp_processor_id()); + break; } - - /* wake up worker thread if there are works to be done */ - if (workposted) - lpfc_worker_wake_up(phba); -} - -/** - * lpfc_sli4_hba_process_cq - fast-path work handler when started by - * interrupt - * @work: pointer to work element - * - * translates from the work handler and calls the fast-path handler. - **/ -static void -lpfc_sli4_hba_process_cq(struct work_struct *work) -{ - struct lpfc_queue *cq = container_of(work, struct lpfc_queue, irqwork); - - __lpfc_sli4_hba_process_cq(cq, LPFC_QUEUE_WORK); } /** @@ -15469,7 +15507,7 @@ lpfc_sli4_dly_hba_process_cq(struct work_struct *work) struct lpfc_queue *cq = container_of(to_delayed_work(work), struct lpfc_queue, sched_irqwork); - __lpfc_sli4_hba_process_cq(cq, LPFC_QUEUE_WORK); + __lpfc_sli4_hba_process_cq(cq); } /** @@ -15495,8 +15533,9 @@ lpfc_sli4_dly_hba_process_cq(struct work_struct *work) * and returns for these events. This function is called without any lock * held. It gets the hbalock to access and update SLI data structures. * - * This function returns IRQ_HANDLED when interrupt is handled else it - * returns IRQ_NONE. + * This function returns IRQ_HANDLED when interrupt is handled, IRQ_WAKE_THREAD + * when interrupt is scheduled to be handled from a threaded irq context, or + * else returns IRQ_NONE. **/ irqreturn_t lpfc_sli4_hba_intr_handler(int irq, void *dev_id) @@ -15505,8 +15544,8 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id) struct lpfc_hba_eq_hdl *hba_eq_hdl; struct lpfc_queue *fpeq; unsigned long iflag; - int ecount = 0; int hba_eqidx; + int ecount = 0; struct lpfc_eq_intr_info *eqi; /* Get the driver's phba structure from the dev_id */ @@ -15535,30 +15574,41 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id) return IRQ_NONE; } - eqi = this_cpu_ptr(phba->sli4_hba.eq_info); - eqi->icnt++; - - fpeq->last_cpu = raw_smp_processor_id(); + switch (fpeq->poll_mode) { + case LPFC_THREADED_IRQ: + /* CGN mgmt is mutually exclusive from irq processing */ + if (phba->cmf_active_mode == LPFC_CFG_OFF) + return IRQ_WAKE_THREAD; + fallthrough; + case LPFC_QUEUE_WORK: + default: + eqi = this_cpu_ptr(phba->sli4_hba.eq_info); + eqi->icnt++; - if (eqi->icnt > LPFC_EQD_ISR_TRIGGER && - fpeq->q_flag & HBA_EQ_DELAY_CHK && - phba->cfg_auto_imax && - fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY && - phba->sli.sli_flag & LPFC_SLI_USE_EQDR) - lpfc_sli4_mod_hba_eq_delay(phba, fpeq, LPFC_MAX_AUTO_EQ_DELAY); + fpeq->last_cpu = raw_smp_processor_id(); - /* process and rearm the EQ */ - ecount = lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM); + if (eqi->icnt > LPFC_EQD_ISR_TRIGGER && + fpeq->q_flag & HBA_EQ_DELAY_CHK && + phba->cfg_auto_imax && + fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY && + phba->sli.sli_flag & LPFC_SLI_USE_EQDR) + lpfc_sli4_mod_hba_eq_delay(phba, fpeq, + LPFC_MAX_AUTO_EQ_DELAY); - if (unlikely(ecount == 0)) { - fpeq->EQ_no_entry++; - if (phba->intr_type == MSIX) - /* MSI-X treated interrupt served as no EQ share INT */ - lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, - "0358 MSI-X interrupt with no EQE\n"); - else - /* Non MSI-X treated on interrupt as EQ share INT */ - return IRQ_NONE; + /* process and rearm the EQ */ + ecount = lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM, + LPFC_QUEUE_WORK); + + if (unlikely(ecount == 0)) { + fpeq->EQ_no_entry++; + if (phba->intr_type == MSIX) + /* MSI-X treated interrupt served as no EQ share INT */ + lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, + "0358 MSI-X interrupt with no EQE\n"); + else + /* Non MSI-X treated on interrupt as EQ share INT */ + return IRQ_NONE; + } } return IRQ_HANDLED; @@ -16115,13 +16165,69 @@ out: return status; } -static int lpfc_cq_poll_hdler(struct irq_poll *iop, int budget) +/** + * lpfc_sli4_hba_intr_handler_th - SLI4 HBA threaded interrupt handler + * @irq: Interrupt number. + * @dev_id: The device context pointer. + * + * This routine is a mirror of lpfc_sli4_hba_intr_handler, but executed within + * threaded irq context. + * + * Returns + * IRQ_HANDLED - interrupt is handled + * IRQ_NONE - otherwise + **/ +irqreturn_t lpfc_sli4_hba_intr_handler_th(int irq, void *dev_id) { - struct lpfc_queue *cq = container_of(iop, struct lpfc_queue, iop); + struct lpfc_hba *phba; + struct lpfc_hba_eq_hdl *hba_eq_hdl; + struct lpfc_queue *fpeq; + int ecount = 0; + int hba_eqidx; + struct lpfc_eq_intr_info *eqi; - __lpfc_sli4_hba_process_cq(cq, LPFC_IRQ_POLL); + /* Get the driver's phba structure from the dev_id */ + hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id; + phba = hba_eq_hdl->phba; + hba_eqidx = hba_eq_hdl->idx; - return 1; + if (unlikely(!phba)) + return IRQ_NONE; + if (unlikely(!phba->sli4_hba.hdwq)) + return IRQ_NONE; + + /* Get to the EQ struct associated with this vector */ + fpeq = phba->sli4_hba.hba_eq_hdl[hba_eqidx].eq; + if (unlikely(!fpeq)) + return IRQ_NONE; + + eqi = per_cpu_ptr(phba->sli4_hba.eq_info, raw_smp_processor_id()); + eqi->icnt++; + + fpeq->last_cpu = raw_smp_processor_id(); + + if (eqi->icnt > LPFC_EQD_ISR_TRIGGER && + fpeq->q_flag & HBA_EQ_DELAY_CHK && + phba->cfg_auto_imax && + fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY && + phba->sli.sli_flag & LPFC_SLI_USE_EQDR) + lpfc_sli4_mod_hba_eq_delay(phba, fpeq, LPFC_MAX_AUTO_EQ_DELAY); + + /* process and rearm the EQ */ + ecount = lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM, + LPFC_THREADED_IRQ); + + if (unlikely(ecount == 0)) { + fpeq->EQ_no_entry++; + if (phba->intr_type == MSIX) + /* MSI-X treated interrupt served as no EQ share INT */ + lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, + "3358 MSI-X interrupt with no EQE\n"); + else + /* Non MSI-X treated on interrupt as EQ share INT */ + return IRQ_NONE; + } + return IRQ_HANDLED; } /** @@ -16265,8 +16371,6 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq, if (cq->queue_id > phba->sli4_hba.cq_max) phba->sli4_hba.cq_max = cq->queue_id; - - irq_poll_init(&cq->iop, LPFC_IRQ_POLL_WEIGHT, lpfc_cq_poll_hdler); out: mempool_free(mbox, phba->mbox_mem_pool); return status; @@ -20696,23 +20800,23 @@ lpfc_log_fw_write_cmpl(struct lpfc_hba *phba, u32 shdr_status, if (shdr_add_status == LPFC_ADD_STATUS_INCOMPAT_OBJ) { switch (shdr_add_status_2) { case LPFC_ADD_STATUS_2_INCOMPAT_FLASH: - lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, - "4199 Firmware write failed: " - "image incompatible with flash x%02x\n", - phba->sli4_hba.flash_id); + lpfc_log_msg(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, + "4199 Firmware write failed: " + "image incompatible with flash x%02x\n", + phba->sli4_hba.flash_id); break; case LPFC_ADD_STATUS_2_INCORRECT_ASIC: - lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, - "4200 Firmware write failed: " - "image incompatible with ASIC " - "architecture x%02x\n", - phba->sli4_hba.asic_rev); + lpfc_log_msg(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, + "4200 Firmware write failed: " + "image incompatible with ASIC " + "architecture x%02x\n", + phba->sli4_hba.asic_rev); break; default: - lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, - "4210 Firmware write failed: " - "add_status_2 x%02x\n", - shdr_add_status_2); + lpfc_log_msg(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, + "4210 Firmware write failed: " + "add_status_2 x%02x\n", + shdr_add_status_2); break; } } else if (!shdr_status && !shdr_add_status) { @@ -20725,26 +20829,26 @@ lpfc_log_fw_write_cmpl(struct lpfc_hba *phba, u32 shdr_status, switch (shdr_change_status) { case (LPFC_CHANGE_STATUS_PHYS_DEV_RESET): - lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, - "3198 Firmware write complete: System " - "reboot required to instantiate\n"); + lpfc_log_msg(phba, KERN_NOTICE, LOG_MBOX | LOG_SLI, + "3198 Firmware write complete: System " + "reboot required to instantiate\n"); break; case (LPFC_CHANGE_STATUS_FW_RESET): - lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, - "3199 Firmware write complete: " - "Firmware reset required to " - "instantiate\n"); + lpfc_log_msg(phba, KERN_NOTICE, LOG_MBOX | LOG_SLI, + "3199 Firmware write complete: " + "Firmware reset required to " + "instantiate\n"); break; case (LPFC_CHANGE_STATUS_PORT_MIGRATION): - lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, - "3200 Firmware write complete: Port " - "Migration or PCI Reset required to " - "instantiate\n"); + lpfc_log_msg(phba, KERN_NOTICE, LOG_MBOX | LOG_SLI, + "3200 Firmware write complete: Port " + "Migration or PCI Reset required to " + "instantiate\n"); break; case (LPFC_CHANGE_STATUS_PCI_RESET): - lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, - "3201 Firmware write complete: PCI " - "Reset required to instantiate\n"); + lpfc_log_msg(phba, KERN_NOTICE, LOG_MBOX | LOG_SLI, + "3201 Firmware write complete: PCI " + "Reset required to instantiate\n"); break; default: break; diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h index 2a0864e6d7cd..2541a8fba093 100644 --- a/drivers/scsi/lpfc/lpfc_sli4.h +++ b/drivers/scsi/lpfc/lpfc_sli4.h @@ -140,7 +140,7 @@ struct lpfc_rqb { enum lpfc_poll_mode { LPFC_QUEUE_WORK, - LPFC_IRQ_POLL + LPFC_THREADED_IRQ, }; struct lpfc_idle_stat { @@ -279,8 +279,6 @@ struct lpfc_queue { struct list_head _poll_list; void **q_pgs; /* array to index entries per page */ -#define LPFC_IRQ_POLL_WEIGHT 256 - struct irq_poll iop; enum lpfc_poll_mode poll_mode; }; diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index c97411b0992e..6f35491aed0f 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h @@ -20,7 +20,7 @@ * included with this package. * *******************************************************************/ -#define LPFC_DRIVER_VERSION "14.2.0.11" +#define LPFC_DRIVER_VERSION "14.2.0.13" #define LPFC_DRIVER_NAME "lpfc" /* Used for SLI 2/3 */ diff --git a/drivers/scsi/megaraid/Kconfig.megaraid b/drivers/scsi/megaraid/Kconfig.megaraid index 2adc2afd9f91..3f2ce1eb081c 100644 --- a/drivers/scsi/megaraid/Kconfig.megaraid +++ b/drivers/scsi/megaraid/Kconfig.megaraid @@ -1,13 +1,13 @@ # SPDX-License-Identifier: GPL-2.0-only config MEGARAID_NEWGEN bool "LSI Logic New Generation RAID Device Drivers" - depends on PCI && SCSI + depends on PCI && HAS_IOPORT && SCSI help LSI Logic RAID Device Drivers config MEGARAID_MM tristate "LSI Logic Management Module (New Driver)" - depends on PCI && SCSI && MEGARAID_NEWGEN + depends on PCI && HAS_IOPORT && SCSI && MEGARAID_NEWGEN help Management Module provides ioctl, sysfs support for LSI Logic RAID controllers. @@ -67,7 +67,7 @@ config MEGARAID_MAILBOX config MEGARAID_LEGACY tristate "LSI Logic Legacy MegaRAID Driver" - depends on PCI && SCSI + depends on PCI && HAS_IOPORT && SCSI help This driver supports the LSI MegaRAID 418, 428, 438, 466, 762, 490 and 467 SCSI host adapters. This driver also support the all U320 diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index 63bac3684c19..3554f6b07727 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h @@ -1722,11 +1722,9 @@ struct megasas_sge_skinny { } __packed; union megasas_sgl { - - struct megasas_sge32 sge32[1]; - struct megasas_sge64 sge64[1]; - struct megasas_sge_skinny sge_skinny[1]; - + DECLARE_FLEX_ARRAY(struct megasas_sge32, sge32); + DECLARE_FLEX_ARRAY(struct megasas_sge64, sge64); + DECLARE_FLEX_ARRAY(struct megasas_sge_skinny, sge_skinny); } __attribute__ ((packed)); struct megasas_header { diff --git a/drivers/scsi/mpi3mr/mpi3mr.h b/drivers/scsi/mpi3mr/mpi3mr.h index dfe6b87fe288..0afb687402e1 100644 --- a/drivers/scsi/mpi3mr/mpi3mr.h +++ b/drivers/scsi/mpi3mr/mpi3mr.h @@ -1133,18 +1133,18 @@ struct mpi3mr_ioc { u32 chain_buf_count; struct dma_pool *chain_buf_pool; struct chain_element *chain_sgl_list; - void *chain_bitmap; + unsigned long *chain_bitmap; spinlock_t chain_buf_lock; struct mpi3mr_drv_cmd bsg_cmds; struct mpi3mr_drv_cmd host_tm_cmds; struct mpi3mr_drv_cmd dev_rmhs_cmds[MPI3MR_NUM_DEVRMCMD]; struct mpi3mr_drv_cmd evtack_cmds[MPI3MR_NUM_EVTACKCMD]; - void *devrem_bitmap; + unsigned long *devrem_bitmap; u16 dev_handle_bitmap_bits; - void *removepend_bitmap; + unsigned long *removepend_bitmap; struct list_head delayed_rmhs_list; - void *evtack_cmds_bitmap; + unsigned long *evtack_cmds_bitmap; struct list_head delayed_evtack_cmds_list; u32 ts_update_counter; diff --git a/drivers/scsi/mpi3mr/mpi3mr_fw.c b/drivers/scsi/mpi3mr/mpi3mr_fw.c index 075fa67e95ee..5fa07d6ee5b8 100644 --- a/drivers/scsi/mpi3mr/mpi3mr_fw.c +++ b/drivers/scsi/mpi3mr/mpi3mr_fw.c @@ -402,6 +402,11 @@ static void mpi3mr_process_admin_reply_desc(struct mpi3mr_ioc *mrioc, memcpy((u8 *)cmdptr->reply, (u8 *)def_reply, mrioc->reply_sz); } + if (sense_buf && cmdptr->sensebuf) { + cmdptr->is_sense = 1; + memcpy(cmdptr->sensebuf, sense_buf, + MPI3MR_SENSE_BUF_SZ); + } if (cmdptr->is_waiting) { complete(&cmdptr->done); cmdptr->is_waiting = 0; @@ -1134,7 +1139,7 @@ static int mpi3mr_issue_and_process_mur(struct mpi3mr_ioc *mrioc, static int mpi3mr_revalidate_factsdata(struct mpi3mr_ioc *mrioc) { - void *removepend_bitmap; + unsigned long *removepend_bitmap; if (mrioc->facts.reply_sz > mrioc->reply_sz) { ioc_err(mrioc, diff --git a/drivers/scsi/mpi3mr/mpi3mr_transport.c b/drivers/scsi/mpi3mr/mpi3mr_transport.c index 4d84d5bd173f..82b55e955730 100644 --- a/drivers/scsi/mpi3mr/mpi3mr_transport.c +++ b/drivers/scsi/mpi3mr/mpi3mr_transport.c @@ -2058,7 +2058,7 @@ int mpi3mr_expander_add(struct mpi3mr_ioc *mrioc, u16 handle) sas_expander = kzalloc(sizeof(struct mpi3mr_sas_node), GFP_KERNEL); if (!sas_expander) - return -1; + return -ENOMEM; sas_expander->handle = handle; sas_expander->num_phys = expander_pg0.num_phys; diff --git a/drivers/scsi/mvsas/Kconfig b/drivers/scsi/mvsas/Kconfig index 79812b80743b..5ac7fd593b17 100644 --- a/drivers/scsi/mvsas/Kconfig +++ b/drivers/scsi/mvsas/Kconfig @@ -9,7 +9,7 @@ config SCSI_MVSAS tristate "Marvell 88SE64XX/88SE94XX SAS/SATA support" - depends on PCI + depends on PCI && HAS_IOPORT select SCSI_SAS_LIBSAS select FW_LOADER help diff --git a/drivers/scsi/pcmcia/Kconfig b/drivers/scsi/pcmcia/Kconfig index 9696b6b5591f..449bd85db7bb 100644 --- a/drivers/scsi/pcmcia/Kconfig +++ b/drivers/scsi/pcmcia/Kconfig @@ -12,6 +12,7 @@ if SCSI_LOWLEVEL_PCMCIA && SCSI && PCMCIA && m config PCMCIA_AHA152X tristate "Adaptec AHA152X PCMCIA support" + depends on HAS_IOPORT select SCSI_SPI_ATTRS help Say Y here if you intend to attach this type of PCMCIA SCSI host @@ -22,6 +23,7 @@ config PCMCIA_AHA152X config PCMCIA_FDOMAIN tristate "Future Domain PCMCIA support" + depends on HAS_IOPORT select SCSI_FDOMAIN help Say Y here if you intend to attach this type of PCMCIA SCSI host @@ -32,7 +34,7 @@ config PCMCIA_FDOMAIN config PCMCIA_NINJA_SCSI tristate "NinjaSCSI-3 / NinjaSCSI-32Bi (16bit) PCMCIA support" - depends on !64BIT || COMPILE_TEST + depends on (!64BIT || COMPILE_TEST) && HAS_IOPORT help If you intend to attach this type of PCMCIA SCSI host adapter to your computer, say Y here and read @@ -66,6 +68,7 @@ config PCMCIA_NINJA_SCSI config PCMCIA_QLOGIC tristate "Qlogic PCMCIA support" + depends on HAS_IOPORT help Say Y here if you intend to attach this type of PCMCIA SCSI host adapter to your computer. @@ -75,6 +78,7 @@ config PCMCIA_QLOGIC config PCMCIA_SYM53C500 tristate "Symbios 53c500 PCMCIA support" + depends on HAS_IOPORT help Say Y here if you have a New Media Bus Toaster or other PCMCIA SCSI adapter based on the Symbios 53c500 controller. diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c index 8b9490011e36..2e886c1d867d 100644 --- a/drivers/scsi/pm8001/pm8001_init.c +++ b/drivers/scsi/pm8001/pm8001_init.c @@ -43,7 +43,8 @@ #include "pm8001_chips.h" #include "pm80xx_hwi.h" -static ulong logging_level = PM8001_FAIL_LOGGING | PM8001_IOERR_LOGGING; +static ulong logging_level = PM8001_FAIL_LOGGING | PM8001_IOERR_LOGGING | + PM8001_EVENT_LOGGING | PM8001_INIT_LOGGING; module_param(logging_level, ulong, 0644); MODULE_PARM_DESC(logging_level, " bits for enabling logging info."); @@ -666,7 +667,7 @@ static void pm8001_post_sas_ha_init(struct Scsi_Host *shost, * Currently we just set the fixed SAS address to our HBA, for manufacture, * it should read from the EEPROM */ -static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha) +static int pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha) { u8 i, j; u8 sas_add[8]; @@ -679,6 +680,12 @@ static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha) struct pm8001_ioctl_payload payload; u16 deviceid; int rc; + unsigned long time_remaining; + + if (PM8001_CHIP_DISP->fatal_errors(pm8001_ha)) { + pm8001_dbg(pm8001_ha, FAIL, "controller is in fatal error state\n"); + return -EIO; + } pci_read_config_word(pm8001_ha->pdev, PCI_DEVICE_ID, &deviceid); pm8001_ha->nvmd_completion = &completion; @@ -703,16 +710,23 @@ static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha) payload.offset = 0; payload.func_specific = kzalloc(payload.rd_length, GFP_KERNEL); if (!payload.func_specific) { - pm8001_dbg(pm8001_ha, INIT, "mem alloc fail\n"); - return; + pm8001_dbg(pm8001_ha, FAIL, "mem alloc fail\n"); + return -ENOMEM; } rc = PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload); if (rc) { kfree(payload.func_specific); - pm8001_dbg(pm8001_ha, INIT, "nvmd failed\n"); - return; + pm8001_dbg(pm8001_ha, FAIL, "nvmd failed\n"); + return -EIO; + } + time_remaining = wait_for_completion_timeout(&completion, + msecs_to_jiffies(60*1000)); // 1 min + if (!time_remaining) { + kfree(payload.func_specific); + pm8001_dbg(pm8001_ha, FAIL, "get_nvmd_req timeout\n"); + return -EIO; } - wait_for_completion(&completion); + for (i = 0, j = 0; i <= 7; i++, j++) { if (pm8001_ha->chip_id == chip_8001) { @@ -751,6 +765,7 @@ static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha) memcpy(pm8001_ha->sas_addr, &pm8001_ha->phy[0].dev_sas_addr, SAS_ADDR_SIZE); #endif + return 0; } /* @@ -1166,7 +1181,8 @@ static int pm8001_pci_probe(struct pci_dev *pdev, pm80xx_set_thermal_config(pm8001_ha); } - pm8001_init_sas_add(pm8001_ha); + if (pm8001_init_sas_add(pm8001_ha)) + goto err_out_shost; /* phy setting support for motherboard controller */ rc = pm8001_configure_phy_settings(pm8001_ha); if (rc) diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c index e5673c774f66..a5a31dfa4512 100644 --- a/drivers/scsi/pm8001/pm8001_sas.c +++ b/drivers/scsi/pm8001/pm8001_sas.c @@ -167,6 +167,17 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, pm8001_ha = sas_phy->ha->lldd_ha; phy = &pm8001_ha->phy[phy_id]; pm8001_ha->phy[phy_id].enable_completion = &completion; + + if (PM8001_CHIP_DISP->fatal_errors(pm8001_ha)) { + /* + * If the controller is in fatal error state, + * we will not get a response from the controller + */ + pm8001_dbg(pm8001_ha, FAIL, + "Phy control failed due to fatal errors\n"); + return -EFAULT; + } + switch (func) { case PHY_FUNC_SET_LINK_RATE: rates = funcdata; @@ -908,6 +919,17 @@ int pm8001_lu_reset(struct domain_device *dev, u8 *lun) struct pm8001_device *pm8001_dev = dev->lldd_dev; struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev); DECLARE_COMPLETION_ONSTACK(completion_setstate); + + if (PM8001_CHIP_DISP->fatal_errors(pm8001_ha)) { + /* + * If the controller is in fatal error state, + * we will not get a response from the controller + */ + pm8001_dbg(pm8001_ha, FAIL, + "LUN reset failed due to fatal errors\n"); + return rc; + } + if (dev_is_sata(dev)) { struct sas_phy *phy = sas_get_local_phy(dev); sas_execute_internal_abort_dev(dev, 0, NULL); diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h index dc1f4d958e03..953572fc0d9e 100644 --- a/drivers/scsi/pm8001/pm8001_sas.h +++ b/drivers/scsi/pm8001/pm8001_sas.h @@ -71,6 +71,7 @@ #define PM8001_DEV_LOGGING 0x80 /* development message logging */ #define PM8001_DEVIO_LOGGING 0x100 /* development io message logging */ #define PM8001_IOERR_LOGGING 0x200 /* development io err message logging */ +#define PM8001_EVENT_LOGGING 0x400 /* HW event logging */ #define pm8001_info(HBA, fmt, ...) \ pr_info("%s:: %s %d: " fmt, \ diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c index 9584cadc4201..39a12ee94a72 100644 --- a/drivers/scsi/pm8001/pm80xx_hwi.c +++ b/drivers/scsi/pm8001/pm80xx_hwi.c @@ -3239,9 +3239,9 @@ hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb) struct pm8001_port *port = &pm8001_ha->port[port_id]; struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; unsigned long flags; - pm8001_dbg(pm8001_ha, DEVIO, - "port id %d, phy id %d link_rate %d portstate 0x%x\n", - port_id, phy_id, link_rate, portstate); + pm8001_dbg(pm8001_ha, EVENT, + "HW_EVENT_SATA_PHY_UP phyid:%#x port_id:%#x link_rate:%d portstate:%#x\n", + phy_id, port_id, link_rate, portstate); phy->port = port; port->port_id = port_id; @@ -3291,10 +3291,14 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb) phy->phy_attached = 0; switch (portstate) { case PORT_VALID: + pm8001_dbg(pm8001_ha, EVENT, + "HW_EVENT_PHY_DOWN phyid:%#x port_id:%#x portstate: PORT_VALID\n", + phy_id, port_id); break; case PORT_INVALID: - pm8001_dbg(pm8001_ha, MSG, " PortInvalid portID %d\n", - port_id); + pm8001_dbg(pm8001_ha, EVENT, + "HW_EVENT_PHY_DOWN phyid:%#x port_id:%#x portstate: PORT_INVALID\n", + phy_id, port_id); pm8001_dbg(pm8001_ha, MSG, " Last phy Down and port invalid\n"); if (port_sata) { @@ -3306,18 +3310,21 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb) sas_phy_disconnected(&phy->sas_phy); break; case PORT_IN_RESET: - pm8001_dbg(pm8001_ha, MSG, " Port In Reset portID %d\n", - port_id); + pm8001_dbg(pm8001_ha, EVENT, + "HW_EVENT_PHY_DOWN phyid:%#x port_id:%#x portstate: PORT_IN_RESET\n", + phy_id, port_id); break; case PORT_NOT_ESTABLISHED: - pm8001_dbg(pm8001_ha, MSG, - " Phy Down and PORT_NOT_ESTABLISHED\n"); + pm8001_dbg(pm8001_ha, EVENT, + "HW_EVENT_PHY_DOWN phyid:%#x port_id:%#x portstate: PORT_NOT_ESTABLISHED\n", + phy_id, port_id); port->port_attached = 0; break; case PORT_LOSTCOMM: - pm8001_dbg(pm8001_ha, MSG, " Phy Down and PORT_LOSTCOMM\n"); - pm8001_dbg(pm8001_ha, MSG, - " Last phy Down and port invalid\n"); + pm8001_dbg(pm8001_ha, EVENT, + "HW_EVENT_PHY_DOWN phyid:%#x port_id:%#x portstate: PORT_LOSTCOMM\n", + phy_id, port_id); + pm8001_dbg(pm8001_ha, MSG, " Last phy Down and port invalid\n"); if (port_sata) { port->port_attached = 0; phy->phy_type = 0; @@ -3328,9 +3335,9 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb) break; default: port->port_attached = 0; - pm8001_dbg(pm8001_ha, DEVIO, - " Phy Down and(default) = 0x%x\n", - portstate); + pm8001_dbg(pm8001_ha, EVENT, + "HW_EVENT_PHY_DOWN phyid:%#x port_id:%#x portstate:%#x\n", + phy_id, port_id, portstate); break; } @@ -3410,6 +3417,7 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb) u8 port_id = (u8)(lr_status_evt_portid & 0x000000FF); u8 phy_id = (u8)((phyid_npip_portstate & 0xFF0000) >> 16); + u8 portstate = (u8)(phyid_npip_portstate & 0x0000000F); u16 eventType = (u16)((lr_status_evt_portid & 0x00FFFF00) >> 8); u8 status = @@ -3425,26 +3433,29 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb) switch (eventType) { case HW_EVENT_SAS_PHY_UP: - pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PHY_START_STATUS\n"); + pm8001_dbg(pm8001_ha, EVENT, + "HW_EVENT_SAS_PHY_UP phyid:%#x port_id:%#x\n", + phy_id, port_id); hw_event_sas_phy_up(pm8001_ha, piomb); break; case HW_EVENT_SATA_PHY_UP: - pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_SATA_PHY_UP\n"); hw_event_sata_phy_up(pm8001_ha, piomb); break; case HW_EVENT_SATA_SPINUP_HOLD: - pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_SATA_SPINUP_HOLD\n"); + pm8001_dbg(pm8001_ha, EVENT, + "HW_EVENT_SATA_SPINUP_HOLD phyid:%#x port_id:%#x\n", + phy_id, port_id); sas_notify_phy_event(&phy->sas_phy, PHYE_SPINUP_HOLD, GFP_ATOMIC); break; case HW_EVENT_PHY_DOWN: - pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PHY_DOWN\n"); hw_event_phy_down(pm8001_ha, piomb); - phy->phy_attached = 0; phy->phy_state = PHY_LINK_DISABLE; break; case HW_EVENT_PORT_INVALID: - pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PORT_INVALID\n"); + pm8001_dbg(pm8001_ha, EVENT, + "HW_EVENT_PORT_INVALID phyid:%#x port_id:%#x\n", + phy_id, port_id); sas_phy_disconnected(sas_phy); phy->phy_attached = 0; sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR, @@ -3463,7 +3474,9 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb) GFP_ATOMIC); break; case HW_EVENT_PHY_ERROR: - pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PHY_ERROR\n"); + pm8001_dbg(pm8001_ha, EVENT, + "HW_EVENT_PHY_ERROR phyid:%#x port_id:%#x\n", + phy_id, port_id); sas_phy_disconnected(&phy->sas_phy); phy->phy_attached = 0; sas_notify_phy_event(&phy->sas_phy, PHYE_OOB_ERROR, GFP_ATOMIC); @@ -3477,34 +3490,39 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb) GFP_ATOMIC); break; case HW_EVENT_LINK_ERR_INVALID_DWORD: - pm8001_dbg(pm8001_ha, MSG, - "HW_EVENT_LINK_ERR_INVALID_DWORD\n"); + pm8001_dbg(pm8001_ha, EVENT, + "HW_EVENT_LINK_ERR_INVALID_DWORD phyid:%#x port_id:%#x\n", + phy_id, port_id); pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_LINK_ERR_INVALID_DWORD, port_id, phy_id, 0, 0); break; case HW_EVENT_LINK_ERR_DISPARITY_ERROR: - pm8001_dbg(pm8001_ha, MSG, - "HW_EVENT_LINK_ERR_DISPARITY_ERROR\n"); + pm8001_dbg(pm8001_ha, EVENT, + "HW_EVENT_LINK_ERR_DISPARITY_ERROR phyid:%#x port_id:%#x\n", + phy_id, port_id); pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_LINK_ERR_DISPARITY_ERROR, port_id, phy_id, 0, 0); break; case HW_EVENT_LINK_ERR_CODE_VIOLATION: - pm8001_dbg(pm8001_ha, MSG, - "HW_EVENT_LINK_ERR_CODE_VIOLATION\n"); + pm8001_dbg(pm8001_ha, EVENT, + "HW_EVENT_LINK_ERR_CODE_VIOLATION phyid:%#x port_id:%#x\n", + phy_id, port_id); pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_LINK_ERR_CODE_VIOLATION, port_id, phy_id, 0, 0); break; case HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH: - pm8001_dbg(pm8001_ha, MSG, - "HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH\n"); + pm8001_dbg(pm8001_ha, EVENT, + "HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH phyid:%#x port_id:%#x\n", + phy_id, port_id); pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH, port_id, phy_id, 0, 0); break; case HW_EVENT_MALFUNCTION: - pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_MALFUNCTION\n"); + pm8001_dbg(pm8001_ha, EVENT, + "HW_EVENT_MALFUNCTION phyid:%#x\n", phy_id); break; case HW_EVENT_BROADCAST_SES: pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_BROADCAST_SES\n"); @@ -3515,25 +3533,30 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb) GFP_ATOMIC); break; case HW_EVENT_INBOUND_CRC_ERROR: - pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_INBOUND_CRC_ERROR\n"); + pm8001_dbg(pm8001_ha, EVENT, + "HW_EVENT_INBOUND_CRC_ERROR phyid:%#x port_id:%#x\n", + phy_id, port_id); pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_INBOUND_CRC_ERROR, port_id, phy_id, 0, 0); break; case HW_EVENT_HARD_RESET_RECEIVED: - pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_HARD_RESET_RECEIVED\n"); + pm8001_dbg(pm8001_ha, EVENT, + "HW_EVENT_HARD_RESET_RECEIVED phyid:%#x\n", phy_id); sas_notify_port_event(sas_phy, PORTE_HARD_RESET, GFP_ATOMIC); break; case HW_EVENT_ID_FRAME_TIMEOUT: - pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_ID_FRAME_TIMEOUT\n"); + pm8001_dbg(pm8001_ha, EVENT, + "HW_EVENT_ID_FRAME_TIMEOUT phyid:%#x\n", phy_id); sas_phy_disconnected(sas_phy); phy->phy_attached = 0; sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR, GFP_ATOMIC); break; case HW_EVENT_LINK_ERR_PHY_RESET_FAILED: - pm8001_dbg(pm8001_ha, MSG, - "HW_EVENT_LINK_ERR_PHY_RESET_FAILED\n"); + pm8001_dbg(pm8001_ha, EVENT, + "HW_EVENT_LINK_ERR_PHY_RESET_FAILED phyid:%#x port_id:%#x\n", + phy_id, port_id); pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_LINK_ERR_PHY_RESET_FAILED, port_id, phy_id, 0, 0); @@ -3543,13 +3566,16 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb) GFP_ATOMIC); break; case HW_EVENT_PORT_RESET_TIMER_TMO: - pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PORT_RESET_TIMER_TMO\n"); + pm8001_dbg(pm8001_ha, EVENT, + "HW_EVENT_PORT_RESET_TIMER_TMO phyid:%#x port_id:%#x portstate:%#x\n", + phy_id, port_id, portstate); if (!pm8001_ha->phy[phy_id].reset_completion) { pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN, port_id, phy_id, 0, 0); } sas_phy_disconnected(sas_phy); phy->phy_attached = 0; + port->port_state = portstate; sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR, GFP_ATOMIC); if (pm8001_ha->phy[phy_id].reset_completion) { @@ -3560,8 +3586,9 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb) } break; case HW_EVENT_PORT_RECOVERY_TIMER_TMO: - pm8001_dbg(pm8001_ha, MSG, - "HW_EVENT_PORT_RECOVERY_TIMER_TMO\n"); + pm8001_dbg(pm8001_ha, EVENT, + "HW_EVENT_PORT_RECOVERY_TIMER_TMO phyid:%#x port_id:%#x\n", + phy_id, port_id); pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PORT_RECOVERY_TIMER_TMO, port_id, phy_id, 0, 0); @@ -3575,24 +3602,32 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb) } break; case HW_EVENT_PORT_RECOVER: - pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PORT_RECOVER\n"); + pm8001_dbg(pm8001_ha, EVENT, + "HW_EVENT_PORT_RECOVER phyid:%#x port_id:%#x\n", + phy_id, port_id); hw_event_port_recover(pm8001_ha, piomb); break; case HW_EVENT_PORT_RESET_COMPLETE: - pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PORT_RESET_COMPLETE\n"); + pm8001_dbg(pm8001_ha, EVENT, + "HW_EVENT_PORT_RESET_COMPLETE phyid:%#x port_id:%#x portstate:%#x\n", + phy_id, port_id, portstate); if (pm8001_ha->phy[phy_id].reset_completion) { pm8001_ha->phy[phy_id].port_reset_status = PORT_RESET_SUCCESS; complete(pm8001_ha->phy[phy_id].reset_completion); pm8001_ha->phy[phy_id].reset_completion = NULL; } + phy->phy_attached = 1; + phy->phy_state = PHY_STATE_LINK_UP_SPCV; + port->port_state = portstate; break; case EVENT_BROADCAST_ASYNCH_EVENT: pm8001_dbg(pm8001_ha, MSG, "EVENT_BROADCAST_ASYNCH_EVENT\n"); break; default: - pm8001_dbg(pm8001_ha, DEVIO, "Unknown event type 0x%x\n", - eventType); + pm8001_dbg(pm8001_ha, DEVIO, + "Unknown event portid:%d phyid:%d event:0x%x status:0x%x\n", + port_id, phy_id, eventType, status); break; } return 0; @@ -4726,6 +4761,9 @@ static int pm80xx_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha, memcpy(payload.sas_addr, pm8001_dev->sas_device->sas_addr, SAS_ADDR_SIZE); + pm8001_dbg(pm8001_ha, INIT, + "register device req phy_id 0x%x port_id 0x%x\n", phy_id, + (port->port_id & 0xFF)); rc = pm8001_mpi_build_cmd(pm8001_ha, 0, opc, &payload, sizeof(payload), 0); if (rc) @@ -4815,7 +4853,7 @@ static void mpi_set_phy_profile_req(struct pm8001_hba_info *pm8001_ha, payload.tag = cpu_to_le32(tag); payload.ppc_phyid = cpu_to_le32(((operation & 0xF) << 8) | (phyid & 0xFF)); - pm8001_dbg(pm8001_ha, INIT, + pm8001_dbg(pm8001_ha, DISC, " phy profile command for phy %x ,length is %d\n", le32_to_cpu(payload.ppc_phyid), length); for (i = length; i < (length + PHY_DWORD_LENGTH - 1); i++) { diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c index 3b64de81ea0d..2a31ddc99dde 100644 --- a/drivers/scsi/qedf/qedf_main.c +++ b/drivers/scsi/qedf/qedf_main.c @@ -3041,9 +3041,8 @@ static int qedf_alloc_global_queues(struct qedf_ctx *qedf) * addresses of our queues */ if (!qedf->p_cpuq) { - status = -EINVAL; QEDF_ERR(&qedf->dbg_ctx, "p_cpuq is NULL.\n"); - goto mem_alloc_failure; + return -EINVAL; } qedf->global_queues = kzalloc((sizeof(struct global_queue *) diff --git a/drivers/scsi/qla2xxx/Kconfig b/drivers/scsi/qla2xxx/Kconfig index 802c373fd6d9..a584708d3056 100644 --- a/drivers/scsi/qla2xxx/Kconfig +++ b/drivers/scsi/qla2xxx/Kconfig @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0-only config SCSI_QLA_FC tristate "QLogic QLA2XXX Fibre Channel Support" - depends on PCI && SCSI + depends on PCI && HAS_IOPORT && SCSI depends on SCSI_FC_ATTRS depends on NVME_FC || !NVME_FC select FW_LOADER diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 70cfc94c3d43..b00222459607 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -2750,6 +2750,7 @@ static void qla2x00_terminate_rport_io(struct fc_rport *rport) { fc_port_t *fcport = *(fc_port_t **)rport->dd_data; + scsi_qla_host_t *vha; if (!fcport) return; @@ -2759,9 +2760,12 @@ qla2x00_terminate_rport_io(struct fc_rport *rport) if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags)) return; + vha = fcport->vha; if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) { qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16); + qla2x00_eh_wait_for_pending_commands(fcport->vha, fcport->d_id.b24, + 0, WAIT_TARGET); return; } /* @@ -2786,6 +2790,15 @@ qla2x00_terminate_rport_io(struct fc_rport *rport) qla2x00_port_logout(fcport->vha, fcport); } } + + /* check for any straggling io left behind */ + if (qla2x00_eh_wait_for_pending_commands(fcport->vha, fcport->d_id.b24, 0, WAIT_TARGET)) { + ql_log(ql_log_warn, vha, 0x300b, + "IO not return. Resetting. \n"); + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + qla2xxx_wake_dpc(vha); + qla2x00_wait_for_chip_reset(vha); + } } static int diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c index dba7bba788d7..19bb64bdd88b 100644 --- a/drivers/scsi/qla2xxx/qla_bsg.c +++ b/drivers/scsi/qla2xxx/qla_bsg.c @@ -283,6 +283,10 @@ qla2x00_process_els(struct bsg_job *bsg_job) if (bsg_request->msgcode == FC_BSG_RPT_ELS) { rport = fc_bsg_to_rport(bsg_job); + if (!rport) { + rval = -ENOMEM; + goto done; + } fcport = *(fc_port_t **) rport->dd_data; host = rport_to_shost(rport); vha = shost_priv(host); @@ -2992,6 +2996,8 @@ qla24xx_bsg_request(struct bsg_job *bsg_job) if (bsg_request->msgcode == FC_BSG_RPT_ELS) { rport = fc_bsg_to_rport(bsg_job); + if (!rport) + return ret; host = rport_to_shost(rport); vha = shost_priv(host); } else { diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 84aa3571be6d..d44c4d37b50b 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -465,6 +465,15 @@ static inline be_id_t port_id_to_be_id(port_id_t port_id) return res; } +struct tmf_arg { + struct qla_qpair *qpair; + struct fc_port *fcport; + struct scsi_qla_host *vha; + u64 lun; + u32 flags; + uint8_t modifier; +}; + struct els_logo_payload { uint8_t opcode; uint8_t rsvd[3]; @@ -544,6 +553,10 @@ struct srb_iocb { uint32_t data; struct completion comp; __le16 comp_status; + + uint8_t modifier; + uint8_t vp_index; + uint16_t loop_id; } tmf; struct { #define SRB_FXDISC_REQ_DMA_VALID BIT_0 @@ -647,6 +660,7 @@ struct srb_iocb { #define SRB_SA_UPDATE 25 #define SRB_ELS_CMD_HST_NOLOGIN 26 #define SRB_SA_REPLACE 27 +#define SRB_MARKER 28 struct qla_els_pt_arg { u8 els_opcode; @@ -689,7 +703,6 @@ typedef struct srb { struct iocb_resource iores; struct kref cmd_kref; /* need to migrate ref_count over to this */ void *priv; - wait_queue_head_t nvme_ls_waitq; struct fc_port *fcport; struct scsi_qla_host *vha; unsigned int start_timer:1; @@ -2528,6 +2541,7 @@ enum rscn_addr_format { typedef struct fc_port { struct list_head list; struct scsi_qla_host *vha; + struct list_head tmf_pending; unsigned int conf_compl_supported:1; unsigned int deleted:2; @@ -2548,6 +2562,8 @@ typedef struct fc_port { unsigned int do_prli_nvme:1; uint8_t nvme_flag; + uint8_t active_tmf; +#define MAX_ACTIVE_TMF 8 uint8_t node_name[WWN_SIZE]; uint8_t port_name[WWN_SIZE]; @@ -3157,12 +3173,12 @@ struct ct_sns_gpnft_rsp { uint8_t vendor_unique; }; /* Assume the largest number of targets for the union */ - struct ct_sns_gpn_ft_data { + DECLARE_FLEX_ARRAY(struct ct_sns_gpn_ft_data { u8 control_byte; u8 port_id[3]; u32 reserved; u8 port_name[8]; - } entries[1]; + }, entries); }; /* CT command response */ @@ -5499,4 +5515,8 @@ struct ql_vnd_tgt_stats_resp { _fp->disc_state, _fp->scan_state, _fp->loop_id, _fp->deleted, \ _fp->flags +#define TMF_NOT_READY(_fcport) \ + (!_fcport || IS_SESSION_DELETED(_fcport) || atomic_read(&_fcport->state) != FCS_ONLINE || \ + !_fcport->vha->hw->flags.fw_started) + #endif diff --git a/drivers/scsi/qla2xxx/qla_edif.c b/drivers/scsi/qla2xxx/qla_edif.c index ec0e20255bd3..26e6b3e3af43 100644 --- a/drivers/scsi/qla2xxx/qla_edif.c +++ b/drivers/scsi/qla2xxx/qla_edif.c @@ -2361,8 +2361,8 @@ qla24xx_issue_sa_replace_iocb(scsi_qla_host_t *vha, struct qla_work_evt *e) if (!sa_ctl) { ql_dbg(ql_dbg_edif, vha, 0x70e6, "sa_ctl allocation failed\n"); - rval = -ENOMEM; - goto done; + rval = -ENOMEM; + return rval; } fcport = sa_ctl->fcport; diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index 391c8b3623a6..ba7831f24734 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -69,7 +69,7 @@ extern int qla2x00_async_logout(struct scsi_qla_host *, fc_port_t *); extern int qla2x00_async_prlo(struct scsi_qla_host *, fc_port_t *); extern int qla2x00_async_adisc(struct scsi_qla_host *, fc_port_t *, uint16_t *); -extern int qla2x00_async_tm_cmd(fc_port_t *, uint32_t, uint32_t, uint32_t); +extern int qla2x00_async_tm_cmd(fc_port_t *, uint32_t, uint64_t, uint32_t); struct qla_work_evt *qla2x00_alloc_work(struct scsi_qla_host *, enum qla_work_type); extern int qla24xx_async_gnl(struct scsi_qla_host *, fc_port_t *); diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c index 4738f8935f7f..1cf9d200d563 100644 --- a/drivers/scsi/qla2xxx/qla_gs.c +++ b/drivers/scsi/qla2xxx/qla_gs.c @@ -3776,8 +3776,8 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp) sp->u.iocb_cmd.u.ctarg.req_size = GPN_FT_REQ_SIZE; rspsz = sizeof(struct ct_sns_gpnft_rsp) + - ((vha->hw->max_fibre_devices - 1) * - sizeof(struct ct_sns_gpn_ft_data)); + vha->hw->max_fibre_devices * + sizeof(struct ct_sns_gpn_ft_data); sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev, rspsz, diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 1a955c3ff3d6..c3dd8dd4f734 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -1996,6 +1996,11 @@ qla2x00_tmf_iocb_timeout(void *data) int rc, h; unsigned long flags; + if (sp->type == SRB_MARKER) { + complete(&tmf->u.tmf.comp); + return; + } + rc = qla24xx_async_abort_cmd(sp, false); if (rc) { spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags); @@ -2013,24 +2018,131 @@ qla2x00_tmf_iocb_timeout(void *data) } } +static void qla_marker_sp_done(srb_t *sp, int res) +{ + struct srb_iocb *tmf = &sp->u.iocb_cmd; + + if (res != QLA_SUCCESS) + ql_dbg(ql_dbg_taskm, sp->vha, 0x8004, + "Async-marker fail hdl=%x portid=%06x ctrl=%x lun=%lld qp=%d.\n", + sp->handle, sp->fcport->d_id.b24, sp->u.iocb_cmd.u.tmf.flags, + sp->u.iocb_cmd.u.tmf.lun, sp->qpair->id); + + sp->u.iocb_cmd.u.tmf.data = res; + complete(&tmf->u.tmf.comp); +} + +#define START_SP_W_RETRIES(_sp, _rval) \ +{\ + int cnt = 5; \ + do { \ + _rval = qla2x00_start_sp(_sp); \ + if (_rval == EAGAIN) \ + msleep(1); \ + else \ + break; \ + cnt--; \ + } while (cnt); \ +} + +/** + * qla26xx_marker: send marker IOCB and wait for the completion of it. + * @arg: pointer to argument list. + * It is assume caller will provide an fcport pointer and modifier + */ +static int +qla26xx_marker(struct tmf_arg *arg) +{ + struct scsi_qla_host *vha = arg->vha; + struct srb_iocb *tm_iocb; + srb_t *sp; + int rval = QLA_FUNCTION_FAILED; + fc_port_t *fcport = arg->fcport; + + if (TMF_NOT_READY(arg->fcport)) { + ql_dbg(ql_dbg_taskm, vha, 0x8039, + "FC port not ready for marker loop-id=%x portid=%06x modifier=%x lun=%lld qp=%d.\n", + fcport->loop_id, fcport->d_id.b24, + arg->modifier, arg->lun, arg->qpair->id); + return QLA_SUSPENDED; + } + + /* ref: INIT */ + sp = qla2xxx_get_qpair_sp(vha, arg->qpair, fcport, GFP_KERNEL); + if (!sp) + goto done; + + sp->type = SRB_MARKER; + sp->name = "marker"; + qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha), qla_marker_sp_done); + sp->u.iocb_cmd.timeout = qla2x00_tmf_iocb_timeout; + + tm_iocb = &sp->u.iocb_cmd; + init_completion(&tm_iocb->u.tmf.comp); + tm_iocb->u.tmf.modifier = arg->modifier; + tm_iocb->u.tmf.lun = arg->lun; + tm_iocb->u.tmf.loop_id = fcport->loop_id; + tm_iocb->u.tmf.vp_index = vha->vp_idx; + + START_SP_W_RETRIES(sp, rval); + + ql_dbg(ql_dbg_taskm, vha, 0x8006, + "Async-marker hdl=%x loop-id=%x portid=%06x modifier=%x lun=%lld qp=%d rval %d.\n", + sp->handle, fcport->loop_id, fcport->d_id.b24, + arg->modifier, arg->lun, sp->qpair->id, rval); + + if (rval != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0x8031, + "Marker IOCB send failure (%x).\n", rval); + goto done_free_sp; + } + + wait_for_completion(&tm_iocb->u.tmf.comp); + rval = tm_iocb->u.tmf.data; + + if (rval != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0x8019, + "Marker failed hdl=%x loop-id=%x portid=%06x modifier=%x lun=%lld qp=%d rval %d.\n", + sp->handle, fcport->loop_id, fcport->d_id.b24, + arg->modifier, arg->lun, sp->qpair->id, rval); + } + +done_free_sp: + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); +done: + return rval; +} + static void qla2x00_tmf_sp_done(srb_t *sp, int res) { struct srb_iocb *tmf = &sp->u.iocb_cmd; + if (res) + tmf->u.tmf.data = res; complete(&tmf->u.tmf.comp); } -int -qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun, - uint32_t tag) +static int +__qla2x00_async_tm_cmd(struct tmf_arg *arg) { - struct scsi_qla_host *vha = fcport->vha; + struct scsi_qla_host *vha = arg->vha; struct srb_iocb *tm_iocb; srb_t *sp; int rval = QLA_FUNCTION_FAILED; + fc_port_t *fcport = arg->fcport; + + if (TMF_NOT_READY(arg->fcport)) { + ql_dbg(ql_dbg_taskm, vha, 0x8032, + "FC port not ready for TM command loop-id=%x portid=%06x modifier=%x lun=%lld qp=%d.\n", + fcport->loop_id, fcport->d_id.b24, + arg->modifier, arg->lun, arg->qpair->id); + return QLA_SUSPENDED; + } + /* ref: INIT */ - sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); + sp = qla2xxx_get_qpair_sp(vha, arg->qpair, fcport, GFP_KERNEL); if (!sp) goto done; @@ -2043,15 +2155,16 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun, tm_iocb = &sp->u.iocb_cmd; init_completion(&tm_iocb->u.tmf.comp); - tm_iocb->u.tmf.flags = flags; - tm_iocb->u.tmf.lun = lun; + tm_iocb->u.tmf.flags = arg->flags; + tm_iocb->u.tmf.lun = arg->lun; + + START_SP_W_RETRIES(sp, rval); ql_dbg(ql_dbg_taskm, vha, 0x802f, - "Async-tmf hdl=%x loop-id=%x portid=%02x%02x%02x.\n", - sp->handle, fcport->loop_id, fcport->d_id.b.domain, - fcport->d_id.b.area, fcport->d_id.b.al_pa); + "Async-tmf hdl=%x loop-id=%x portid=%06x ctrl=%x lun=%lld qp=%d rval=%x.\n", + sp->handle, fcport->loop_id, fcport->d_id.b24, + arg->flags, arg->lun, sp->qpair->id, rval); - rval = qla2x00_start_sp(sp); if (rval != QLA_SUCCESS) goto done_free_sp; wait_for_completion(&tm_iocb->u.tmf.comp); @@ -2063,15 +2176,8 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun, "TM IOCB failed (%x).\n", rval); } - if (!test_bit(UNLOADING, &vha->dpc_flags) && !IS_QLAFX00(vha->hw)) { - flags = tm_iocb->u.tmf.flags; - lun = (uint16_t)tm_iocb->u.tmf.lun; - - /* Issue Marker IOCB */ - qla2x00_marker(vha, vha->hw->base_qpair, - fcport->loop_id, lun, - flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID); - } + if (!test_bit(UNLOADING, &vha->dpc_flags) && !IS_QLAFX00(vha->hw)) + rval = qla26xx_marker(arg); done_free_sp: /* ref: INIT */ @@ -2080,6 +2186,115 @@ done: return rval; } +static void qla_put_tmf(fc_port_t *fcport) +{ + struct scsi_qla_host *vha = fcport->vha; + struct qla_hw_data *ha = vha->hw; + unsigned long flags; + + spin_lock_irqsave(&ha->tgt.sess_lock, flags); + fcport->active_tmf--; + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); +} + +static +int qla_get_tmf(fc_port_t *fcport) +{ + struct scsi_qla_host *vha = fcport->vha; + struct qla_hw_data *ha = vha->hw; + unsigned long flags; + int rc = 0; + LIST_HEAD(tmf_elem); + + spin_lock_irqsave(&ha->tgt.sess_lock, flags); + list_add_tail(&tmf_elem, &fcport->tmf_pending); + + while (fcport->active_tmf >= MAX_ACTIVE_TMF) { + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); + + msleep(1); + + spin_lock_irqsave(&ha->tgt.sess_lock, flags); + if (TMF_NOT_READY(fcport)) { + ql_log(ql_log_warn, vha, 0x802c, + "Unable to acquire TM resource due to disruption.\n"); + rc = EIO; + break; + } + if (fcport->active_tmf < MAX_ACTIVE_TMF && + list_is_first(&tmf_elem, &fcport->tmf_pending)) + break; + } + + list_del(&tmf_elem); + + if (!rc) + fcport->active_tmf++; + + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); + + return rc; +} + +int +qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint64_t lun, + uint32_t tag) +{ + struct scsi_qla_host *vha = fcport->vha; + struct qla_qpair *qpair; + struct tmf_arg a; + int i, rval = QLA_SUCCESS; + + if (TMF_NOT_READY(fcport)) + return QLA_SUSPENDED; + + a.vha = fcport->vha; + a.fcport = fcport; + a.lun = lun; + if (flags & (TCF_LUN_RESET|TCF_ABORT_TASK_SET|TCF_CLEAR_TASK_SET|TCF_CLEAR_ACA)) { + a.modifier = MK_SYNC_ID_LUN; + + if (qla_get_tmf(fcport)) + return QLA_FUNCTION_FAILED; + } else { + a.modifier = MK_SYNC_ID; + } + + if (vha->hw->mqenable) { + for (i = 0; i < vha->hw->num_qpairs; i++) { + qpair = vha->hw->queue_pair_map[i]; + if (!qpair) + continue; + + if (TMF_NOT_READY(fcport)) { + ql_log(ql_log_warn, vha, 0x8026, + "Unable to send TM due to disruption.\n"); + rval = QLA_SUSPENDED; + break; + } + + a.qpair = qpair; + a.flags = flags|TCF_NOTMCMD_TO_TARGET; + rval = __qla2x00_async_tm_cmd(&a); + if (rval) + break; + } + } + + if (rval) + goto bailout; + + a.qpair = vha->hw->base_qpair; + a.flags = flags; + rval = __qla2x00_async_tm_cmd(&a); + +bailout: + if (a.modifier == MK_SYNC_ID_LUN) + qla_put_tmf(fcport); + + return rval; +} + int qla24xx_async_abort_command(srb_t *sp) { @@ -4861,7 +5076,7 @@ qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len, if (use_tbl && ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC && index < QLA_MODEL_NAMES) - strlcpy(ha->model_desc, + strscpy(ha->model_desc, qla2x00_model_name[index * 2 + 1], sizeof(ha->model_desc)); } else { @@ -4869,14 +5084,14 @@ qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len, if (use_tbl && ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC && index < QLA_MODEL_NAMES) { - strlcpy(ha->model_number, + strscpy(ha->model_number, qla2x00_model_name[index * 2], sizeof(ha->model_number)); - strlcpy(ha->model_desc, + strscpy(ha->model_desc, qla2x00_model_name[index * 2 + 1], sizeof(ha->model_desc)); } else { - strlcpy(ha->model_number, def, + strscpy(ha->model_number, def, sizeof(ha->model_number)); } } @@ -5291,6 +5506,7 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags) INIT_WORK(&fcport->reg_work, qla_register_fcport_fn); INIT_LIST_HEAD(&fcport->gnl_entry); INIT_LIST_HEAD(&fcport->list); + INIT_LIST_HEAD(&fcport->tmf_pending); INIT_LIST_HEAD(&fcport->sess_cmd_list); spin_lock_init(&fcport->sess_cmd_lock); @@ -5333,7 +5549,7 @@ static void qla_get_login_template(scsi_qla_host_t *vha) __be32 *q; memset(ha->init_cb, 0, ha->init_cb_size); - sz = min_t(int, sizeof(struct fc_els_flogi), ha->init_cb_size); + sz = min_t(int, sizeof(struct fc_els_csp), ha->init_cb_size); rval = qla24xx_get_port_login_templ(vha, ha->init_cb_dma, ha->init_cb, sz); if (rval != QLA_SUCCESS) { @@ -6004,7 +6220,6 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha) fc_port_t *fcport; uint16_t mb[MAILBOX_REGISTER_COUNT]; uint16_t loop_id; - LIST_HEAD(new_fcports); struct qla_hw_data *ha = vha->hw; int discovery_gen; diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h index 7b42558a8839..0167e85ba058 100644 --- a/drivers/scsi/qla2xxx/qla_inline.h +++ b/drivers/scsi/qla2xxx/qla_inline.h @@ -109,11 +109,13 @@ qla2x00_set_fcport_disc_state(fc_port_t *fcport, int state) { int old_val; uint8_t shiftbits, mask; + uint8_t port_dstate_str_sz; /* This will have to change when the max no. of states > 16 */ shiftbits = 4; mask = (1 << shiftbits) - 1; + port_dstate_str_sz = sizeof(port_dstate_str) / sizeof(char *); fcport->disc_state = state; while (1) { old_val = atomic_read(&fcport->shadow_disc_state); @@ -121,7 +123,8 @@ qla2x00_set_fcport_disc_state(fc_port_t *fcport, int state) old_val, (old_val << shiftbits) | state)) { ql_dbg(ql_dbg_disc, fcport->vha, 0x2134, "FCPort %8phC disc_state transition: %s to %s - portid=%06x.\n", - fcport->port_name, port_dstate_str[old_val & mask], + fcport->port_name, (old_val & mask) < port_dstate_str_sz ? + port_dstate_str[old_val & mask] : "Unknown", port_dstate_str[state], fcport->d_id.b24); return; } diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index b9b3e6f80ea9..a1675f056a5c 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c @@ -522,21 +522,25 @@ __qla2x00_marker(struct scsi_qla_host *vha, struct qla_qpair *qpair, return (QLA_FUNCTION_FAILED); } + mrk24 = (struct mrk_entry_24xx *)mrk; + mrk->entry_type = MARKER_TYPE; mrk->modifier = type; if (type != MK_SYNC_ALL) { if (IS_FWI2_CAPABLE(ha)) { - mrk24 = (struct mrk_entry_24xx *) mrk; mrk24->nport_handle = cpu_to_le16(loop_id); int_to_scsilun(lun, (struct scsi_lun *)&mrk24->lun); host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun)); mrk24->vp_index = vha->vp_idx; - mrk24->handle = make_handle(req->id, mrk24->handle); } else { SET_TARGET_ID(ha, mrk->target, loop_id); mrk->lun = cpu_to_le16((uint16_t)lun); } } + + if (IS_FWI2_CAPABLE(ha)) + mrk24->handle = QLA_SKIP_HANDLE; + wmb(); qla2x00_start_iocbs(vha, req); @@ -603,7 +607,8 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt, put_unaligned_le32(COMMAND_TYPE_6, &cmd_pkt->entry_type); /* No data transfer */ - if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) { + if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE || + tot_dsds == 0) { cmd_pkt->byte_count = cpu_to_le32(0); return 0; } @@ -2541,7 +2546,7 @@ qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk) scsi_qla_host_t *vha = fcport->vha; struct qla_hw_data *ha = vha->hw; struct srb_iocb *iocb = &sp->u.iocb_cmd; - struct req_que *req = vha->req; + struct req_que *req = sp->qpair->req; flags = iocb->u.tmf.flags; lun = iocb->u.tmf.lun; @@ -2557,7 +2562,8 @@ qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk) tsk->port_id[2] = fcport->d_id.b.domain; tsk->vp_index = fcport->vha->vp_idx; - if (flags == TCF_LUN_RESET) { + if (flags & (TCF_LUN_RESET | TCF_ABORT_TASK_SET| + TCF_CLEAR_TASK_SET|TCF_CLEAR_ACA)) { int_to_scsilun(lun, &tsk->lun); host_to_fcp_swap((uint8_t *)&tsk->lun, sizeof(tsk->lun)); @@ -3852,9 +3858,9 @@ static int qla_get_iocbs_resource(struct srb *sp) case SRB_NACK_LOGO: case SRB_LOGOUT_CMD: case SRB_CTRL_VP: - push_it_through = true; - fallthrough; + case SRB_MARKER: default: + push_it_through = true; get_exch = false; } @@ -3870,6 +3876,19 @@ static int qla_get_iocbs_resource(struct srb *sp) return qla_get_fw_resources(sp->qpair, &sp->iores); } +static void +qla_marker_iocb(srb_t *sp, struct mrk_entry_24xx *mrk) +{ + mrk->entry_type = MARKER_TYPE; + mrk->modifier = sp->u.iocb_cmd.u.tmf.modifier; + if (sp->u.iocb_cmd.u.tmf.modifier != MK_SYNC_ALL) { + mrk->nport_handle = cpu_to_le16(sp->u.iocb_cmd.u.tmf.loop_id); + int_to_scsilun(sp->u.iocb_cmd.u.tmf.lun, (struct scsi_lun *)&mrk->lun); + host_to_fcp_swap(mrk->lun, sizeof(mrk->lun)); + mrk->vp_index = sp->u.iocb_cmd.u.tmf.vp_index; + } +} + int qla2x00_start_sp(srb_t *sp) { @@ -3973,6 +3992,9 @@ qla2x00_start_sp(srb_t *sp) case SRB_SA_REPLACE: qla24xx_sa_replace_iocb(sp, pkt); break; + case SRB_MARKER: + qla_marker_iocb(sp, pkt); + break; default: break; } diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 245e3a5d81fd..656700f79325 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -1862,9 +1862,9 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha, } } -srb_t * -qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func, - struct req_que *req, void *iocb) +static srb_t * +qla_get_sp_from_handle(scsi_qla_host_t *vha, const char *func, + struct req_que *req, void *iocb, u16 *ret_index) { struct qla_hw_data *ha = vha->hw; sts_entry_t *pkt = iocb; @@ -1899,12 +1899,25 @@ qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func, return NULL; } - req->outstanding_cmds[index] = NULL; - + *ret_index = index; qla_put_fw_resources(sp->qpair, &sp->iores); return sp; } +srb_t * +qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func, + struct req_que *req, void *iocb) +{ + uint16_t index; + srb_t *sp; + + sp = qla_get_sp_from_handle(vha, func, req, iocb, &index); + if (sp) + req->outstanding_cmds[index] = NULL; + + return sp; +} + static void qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, struct mbx_entry *mbx) @@ -3237,13 +3250,13 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) return; } - req->outstanding_cmds[handle] = NULL; cp = GET_CMD_SP(sp); if (cp == NULL) { ql_dbg(ql_dbg_io, vha, 0x3018, "Command already returned (0x%x/%p).\n", sts->handle, sp); + req->outstanding_cmds[handle] = NULL; return; } @@ -3514,6 +3527,9 @@ out: if (rsp->status_srb == NULL) sp->done(sp, res); + + /* for io's, clearing of outstanding_cmds[handle] means scsi_done was called */ + req->outstanding_cmds[handle] = NULL; } /** @@ -3590,6 +3606,7 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt) uint16_t que = MSW(pkt->handle); struct req_que *req = NULL; int res = DID_ERROR << 16; + u16 index; ql_dbg(ql_dbg_async, vha, 0x502a, "iocb type %xh with error status %xh, handle %xh, rspq id %d\n", @@ -3608,7 +3625,6 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt) switch (pkt->entry_type) { case NOTIFY_ACK_TYPE: - case STATUS_TYPE: case STATUS_CONT_TYPE: case LOGINOUT_PORT_IOCB_TYPE: case CT_IOCB_TYPE: @@ -3628,6 +3644,14 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt) case CTIO_TYPE7: case CTIO_CRC2: return 1; + case STATUS_TYPE: + sp = qla_get_sp_from_handle(vha, func, req, pkt, &index); + if (sp) { + sp->done(sp, res); + req->outstanding_cmds[index] = NULL; + return 0; + } + break; } fatal: ql_log(ql_log_warn, vha, 0x5030, @@ -3750,6 +3774,28 @@ static int qla_chk_cont_iocb_avail(struct scsi_qla_host *vha, return rc; } +static void qla_marker_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, + struct mrk_entry_24xx *pkt) +{ + const char func[] = "MRK-IOCB"; + srb_t *sp; + int res = QLA_SUCCESS; + + if (!IS_FWI2_CAPABLE(vha->hw)) + return; + + sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); + if (!sp) + return; + + if (pkt->entry_status) { + ql_dbg(ql_dbg_taskm, vha, 0x8025, "marker failure.\n"); + res = QLA_COMMAND_ERROR; + } + sp->u.iocb_cmd.u.tmf.data = res; + sp->done(sp, res); +} + /** * qla24xx_process_response_queue() - Process response queue entries. * @vha: SCSI driver HA context @@ -3866,9 +3912,7 @@ process_err: (struct nack_to_isp *)pkt); break; case MARKER_TYPE: - /* Do nothing in this case, this check is to prevent it - * from falling into default case - */ + qla_marker_iocb_entry(vha, rsp->req, (struct mrk_entry_24xx *)pkt); break; case ABORT_IOCB_TYPE: qla24xx_abort_iocb_entry(vha, rsp->req, diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c index f726eb8449c5..083f94e43fba 100644 --- a/drivers/scsi/qla2xxx/qla_mr.c +++ b/drivers/scsi/qla2xxx/qla_mr.c @@ -691,7 +691,7 @@ qlafx00_pci_info_str(struct scsi_qla_host *vha, char *str, size_t str_len) struct qla_hw_data *ha = vha->hw; if (pci_is_pcie(ha->pdev)) - strlcpy(str, "PCIe iSA", str_len); + strscpy(str, "PCIe iSA", str_len); return str; } @@ -1850,21 +1850,21 @@ qlafx00_fx_disc(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t fx_type) phost_info = &preg_hsi->hsi; memset(preg_hsi, 0, sizeof(struct register_host_info)); phost_info->os_type = OS_TYPE_LINUX; - strlcpy(phost_info->sysname, p_sysid->sysname, + strscpy(phost_info->sysname, p_sysid->sysname, sizeof(phost_info->sysname)); - strlcpy(phost_info->nodename, p_sysid->nodename, + strscpy(phost_info->nodename, p_sysid->nodename, sizeof(phost_info->nodename)); if (!strcmp(phost_info->nodename, "(none)")) ha->mr.host_info_resend = true; - strlcpy(phost_info->release, p_sysid->release, + strscpy(phost_info->release, p_sysid->release, sizeof(phost_info->release)); - strlcpy(phost_info->version, p_sysid->version, + strscpy(phost_info->version, p_sysid->version, sizeof(phost_info->version)); - strlcpy(phost_info->machine, p_sysid->machine, + strscpy(phost_info->machine, p_sysid->machine, sizeof(phost_info->machine)); - strlcpy(phost_info->domainname, p_sysid->domainname, + strscpy(phost_info->domainname, p_sysid->domainname, sizeof(phost_info->domainname)); - strlcpy(phost_info->hostdriver, QLA2XXX_VERSION, + strscpy(phost_info->hostdriver, QLA2XXX_VERSION, sizeof(phost_info->hostdriver)); preg_hsi->utc = (uint64_t)ktime_get_real_seconds(); ql_dbg(ql_dbg_init, vha, 0x0149, @@ -1909,9 +1909,9 @@ qlafx00_fx_disc(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t fx_type) if (fx_type == FXDISC_GET_CONFIG_INFO) { struct config_info_data *pinfo = (struct config_info_data *) fdisc->u.fxiocb.rsp_addr; - strlcpy(vha->hw->model_number, pinfo->model_num, + strscpy(vha->hw->model_number, pinfo->model_num, ARRAY_SIZE(vha->hw->model_number)); - strlcpy(vha->hw->model_desc, pinfo->model_description, + strscpy(vha->hw->model_desc, pinfo->model_description, ARRAY_SIZE(vha->hw->model_desc)); memcpy(&vha->hw->mr.symbolic_name, pinfo->symbolic_name, sizeof(vha->hw->mr.symbolic_name)); diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c index 648e8f798606..86e85f2f4782 100644 --- a/drivers/scsi/qla2xxx/qla_nvme.c +++ b/drivers/scsi/qla2xxx/qla_nvme.c @@ -360,7 +360,6 @@ static int qla_nvme_ls_req(struct nvme_fc_local_port *lport, if (rval != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x700e, "qla2x00_start_sp failed = %d\n", rval); - wake_up(&sp->nvme_ls_waitq); sp->priv = NULL; priv->sp = NULL; qla2x00_rel_sp(sp); @@ -652,7 +651,6 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport, if (!sp) return -EBUSY; - init_waitqueue_head(&sp->nvme_ls_waitq); kref_init(&sp->cmd_kref); spin_lock_init(&priv->cmd_lock); sp->priv = priv; @@ -671,7 +669,6 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport, if (rval != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x212d, "qla2x00_start_nvme_mq failed = %d\n", rval); - wake_up(&sp->nvme_ls_waitq); sp->priv = NULL; priv->sp = NULL; qla2xxx_rel_qpair_sp(sp->qpair, sp); diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 2fa695bf38b7..877e4f446709 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -1079,43 +1079,6 @@ qc24_fail_command: } /* - * qla2x00_eh_wait_on_command - * Waits for the command to be returned by the Firmware for some - * max time. - * - * Input: - * cmd = Scsi Command to wait on. - * - * Return: - * Completed in time : QLA_SUCCESS - * Did not complete in time : QLA_FUNCTION_FAILED - */ -static int -qla2x00_eh_wait_on_command(struct scsi_cmnd *cmd) -{ -#define ABORT_POLLING_PERIOD 1000 -#define ABORT_WAIT_ITER ((2 * 1000) / (ABORT_POLLING_PERIOD)) - unsigned long wait_iter = ABORT_WAIT_ITER; - scsi_qla_host_t *vha = shost_priv(cmd->device->host); - struct qla_hw_data *ha = vha->hw; - srb_t *sp = scsi_cmd_priv(cmd); - int ret = QLA_SUCCESS; - - if (unlikely(pci_channel_offline(ha->pdev)) || ha->flags.eeh_busy) { - ql_dbg(ql_dbg_taskm, vha, 0x8005, - "Return:eh_wait.\n"); - return ret; - } - - while (sp->type && wait_iter--) - msleep(ABORT_POLLING_PERIOD); - if (sp->type) - ret = QLA_FUNCTION_FAILED; - - return ret; -} - -/* * qla2x00_wait_for_hba_online * Wait till the HBA is online after going through * <= MAX_RETRIES_OF_ISP_ABORT or @@ -1365,6 +1328,9 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd) return ret; } +#define ABORT_POLLING_PERIOD 1000 +#define ABORT_WAIT_ITER ((2 * 1000) / (ABORT_POLLING_PERIOD)) + /* * Returns: QLA_SUCCESS or QLA_FUNCTION_FAILED. */ @@ -1378,41 +1344,73 @@ __qla2x00_eh_wait_for_pending_commands(struct qla_qpair *qpair, unsigned int t, struct req_que *req = qpair->req; srb_t *sp; struct scsi_cmnd *cmd; + unsigned long wait_iter = ABORT_WAIT_ITER; + bool found; + struct qla_hw_data *ha = vha->hw; status = QLA_SUCCESS; - spin_lock_irqsave(qpair->qp_lock_ptr, flags); - for (cnt = 1; status == QLA_SUCCESS && - cnt < req->num_outstanding_cmds; cnt++) { - sp = req->outstanding_cmds[cnt]; - if (!sp) - continue; - if (sp->type != SRB_SCSI_CMD) - continue; - if (vha->vp_idx != sp->vha->vp_idx) - continue; - match = 0; - cmd = GET_CMD_SP(sp); - switch (type) { - case WAIT_HOST: - match = 1; - break; - case WAIT_TARGET: - match = cmd->device->id == t; - break; - case WAIT_LUN: - match = (cmd->device->id == t && - cmd->device->lun == l); - break; - } - if (!match) - continue; + while (wait_iter--) { + found = false; - spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); - status = qla2x00_eh_wait_on_command(cmd); spin_lock_irqsave(qpair->qp_lock_ptr, flags); + for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) { + sp = req->outstanding_cmds[cnt]; + if (!sp) + continue; + if (sp->type != SRB_SCSI_CMD) + continue; + if (vha->vp_idx != sp->vha->vp_idx) + continue; + match = 0; + cmd = GET_CMD_SP(sp); + switch (type) { + case WAIT_HOST: + match = 1; + break; + case WAIT_TARGET: + if (sp->fcport) + match = sp->fcport->d_id.b24 == t; + else + match = 0; + break; + case WAIT_LUN: + if (sp->fcport) + match = (sp->fcport->d_id.b24 == t && + cmd->device->lun == l); + else + match = 0; + break; + } + if (!match) + continue; + + spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); + + if (unlikely(pci_channel_offline(ha->pdev)) || + ha->flags.eeh_busy) { + ql_dbg(ql_dbg_taskm, vha, 0x8005, + "Return:eh_wait.\n"); + return status; + } + + /* + * SRB_SCSI_CMD is still in the outstanding_cmds array. + * it means scsi_done has not called. Wait for it to + * clear from outstanding_cmds. + */ + msleep(ABORT_POLLING_PERIOD); + spin_lock_irqsave(qpair->qp_lock_ptr, flags); + found = true; + } + spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); + + if (!found) + break; } - spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); + + if (wait_iter == -1) + status = QLA_FUNCTION_FAILED; return status; } @@ -5090,7 +5088,8 @@ struct scsi_qla_host *qla2x00_create_host(const struct scsi_host_template *sht, } INIT_DELAYED_WORK(&vha->scan.scan_work, qla_scan_work_fn); - sprintf(vha->host_str, "%s_%lu", QLA2XXX_DRIVER_NAME, vha->host_no); + snprintf(vha->host_str, sizeof(vha->host_str), "%s_%lu", + QLA2XXX_DRIVER_NAME, vha->host_no); ql_dbg(ql_dbg_init, vha, 0x0041, "Allocated the host=%p hw=%p vha=%p dev_name=%s", vha->host, vha->hw, vha, diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h index 42d69d89834f..e3771923b0d7 100644 --- a/drivers/scsi/qla2xxx/qla_version.h +++ b/drivers/scsi/qla2xxx/qla_version.h @@ -6,9 +6,9 @@ /* * Driver version */ -#define QLA2XXX_VERSION "10.02.08.200-k" +#define QLA2XXX_VERSION "10.02.08.400-k" #define QLA_DRIVER_MAJOR_VER 10 #define QLA_DRIVER_MINOR_VER 2 #define QLA_DRIVER_PATCH_VER 8 -#define QLA_DRIVER_BETA_VER 200 +#define QLA_DRIVER_BETA_VER 400 diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c index cd71074f3abe..249f1d7021d4 100644 --- a/drivers/scsi/qla4xxx/ql4_mbx.c +++ b/drivers/scsi/qla4xxx/ql4_mbx.c @@ -1611,8 +1611,8 @@ int qla4xxx_get_chap(struct scsi_qla_host *ha, char *username, char *password, goto exit_get_chap; } - strlcpy(password, chap_table->secret, QL4_CHAP_MAX_SECRET_LEN); - strlcpy(username, chap_table->name, QL4_CHAP_MAX_NAME_LEN); + strscpy(password, chap_table->secret, QL4_CHAP_MAX_SECRET_LEN); + strscpy(username, chap_table->name, QL4_CHAP_MAX_NAME_LEN); chap_table->cookie = cpu_to_le16(CHAP_VALID_COOKIE); exit_get_chap: @@ -1732,8 +1732,8 @@ int qla4xxx_get_uni_chap_at_index(struct scsi_qla_host *ha, char *username, goto exit_unlock_uni_chap; } - strlcpy(password, chap_table->secret, MAX_CHAP_SECRET_LEN); - strlcpy(username, chap_table->name, MAX_CHAP_NAME_LEN); + strscpy(password, chap_table->secret, MAX_CHAP_SECRET_LEN); + strscpy(username, chap_table->name, MAX_CHAP_NAME_LEN); rval = QLA_SUCCESS; diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c index ee6d784c095c..b2a3988e1e15 100644 --- a/drivers/scsi/qla4xxx/ql4_os.c +++ b/drivers/scsi/qla4xxx/ql4_os.c @@ -798,9 +798,9 @@ static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx, continue; chap_rec->chap_tbl_idx = i; - strlcpy(chap_rec->username, chap_table->name, + strscpy(chap_rec->username, chap_table->name, ISCSI_CHAP_AUTH_NAME_MAX_LEN); - strlcpy(chap_rec->password, chap_table->secret, + strscpy(chap_rec->password, chap_table->secret, QL4_CHAP_MAX_SECRET_LEN); chap_rec->password_length = chap_table->secret_len; @@ -6052,8 +6052,8 @@ static int qla4xxx_get_bidi_chap(struct scsi_qla_host *ha, char *username, if (!(chap_table->flags & BIT_6)) /* Not BIDI */ continue; - strlcpy(password, chap_table->secret, QL4_CHAP_MAX_SECRET_LEN); - strlcpy(username, chap_table->name, QL4_CHAP_MAX_NAME_LEN); + strscpy(password, chap_table->secret, QL4_CHAP_MAX_SECRET_LEN); + strscpy(username, chap_table->name, QL4_CHAP_MAX_NAME_LEN); ret = 0; break; } @@ -6281,8 +6281,8 @@ static void qla4xxx_get_param_ddb(struct ddb_entry *ddb_entry, tddb->tpgt = sess->tpgt; tddb->port = conn->persistent_port; - strlcpy(tddb->iscsi_name, sess->targetname, ISCSI_NAME_SIZE); - strlcpy(tddb->ip_addr, conn->persistent_address, DDB_IPADDR_LEN); + strscpy(tddb->iscsi_name, sess->targetname, ISCSI_NAME_SIZE); + strscpy(tddb->ip_addr, conn->persistent_address, DDB_IPADDR_LEN); } static void qla4xxx_convert_param_ddb(struct dev_db_entry *fw_ddb_entry, @@ -7781,7 +7781,7 @@ static int qla4xxx_sysfs_ddb_logout(struct iscsi_bus_flash_session *fnode_sess, goto exit_ddb_logout; } - strlcpy(flash_tddb->iscsi_name, fnode_sess->targetname, + strscpy(flash_tddb->iscsi_name, fnode_sess->targetname, ISCSI_NAME_SIZE); if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4)) diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index 09ef0b31dfc0..c4bf99a842f3 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c @@ -504,18 +504,22 @@ void scsi_attach_vpd(struct scsi_device *sdev) } /** - * scsi_report_opcode - Find out if a given command opcode is supported + * scsi_report_opcode - Find out if a given command is supported * @sdev: scsi device to query * @buffer: scratch buffer (must be at least 20 bytes long) * @len: length of buffer - * @opcode: opcode for command to look up - * - * Uses the REPORT SUPPORTED OPERATION CODES to look up the given - * opcode. Returns -EINVAL if RSOC fails, 0 if the command opcode is - * unsupported and 1 if the device claims to support the command. + * @opcode: opcode for the command to look up + * @sa: service action for the command to look up + * + * Uses the REPORT SUPPORTED OPERATION CODES to check support for the + * command identified with @opcode and @sa. If the command does not + * have a service action, @sa must be 0. Returns -EINVAL if RSOC fails, + * 0 if the command is not supported and 1 if the device claims to + * support the command. */ int scsi_report_opcode(struct scsi_device *sdev, unsigned char *buffer, - unsigned int len, unsigned char opcode) + unsigned int len, unsigned char opcode, + unsigned short sa) { unsigned char cmd[16]; struct scsi_sense_hdr sshdr; @@ -539,8 +543,14 @@ int scsi_report_opcode(struct scsi_device *sdev, unsigned char *buffer, memset(cmd, 0, 16); cmd[0] = MAINTENANCE_IN; cmd[1] = MI_REPORT_SUPPORTED_OPERATION_CODES; - cmd[2] = 1; /* One command format */ - cmd[3] = opcode; + if (!sa) { + cmd[2] = 1; /* One command format */ + cmd[3] = opcode; + } else { + cmd[2] = 3; /* One command format with service action */ + cmd[3] = opcode; + put_unaligned_be16(sa, &cmd[4]); + } put_unaligned_be32(request_len, &cmd[6]); memset(buffer, 0, len); @@ -560,6 +570,149 @@ int scsi_report_opcode(struct scsi_device *sdev, unsigned char *buffer, } EXPORT_SYMBOL(scsi_report_opcode); +#define SCSI_CDL_CHECK_BUF_LEN 64 + +static bool scsi_cdl_check_cmd(struct scsi_device *sdev, u8 opcode, u16 sa, + unsigned char *buf) +{ + int ret; + u8 cdlp; + + /* Check operation code */ + ret = scsi_report_opcode(sdev, buf, SCSI_CDL_CHECK_BUF_LEN, opcode, sa); + if (ret <= 0) + return false; + + if ((buf[1] & 0x03) != 0x03) + return false; + + /* See SPC-6, one command format of REPORT SUPPORTED OPERATION CODES */ + cdlp = (buf[1] & 0x18) >> 3; + if (buf[0] & 0x01) { + /* rwcdlp == 1 */ + switch (cdlp) { + case 0x01: + /* T2A page */ + return true; + case 0x02: + /* T2B page */ + return true; + } + } else { + /* rwcdlp == 0 */ + switch (cdlp) { + case 0x01: + /* A page */ + return true; + case 0x02: + /* B page */ + return true; + } + } + + return false; +} + +/** + * scsi_cdl_check - Check if a SCSI device supports Command Duration Limits + * @sdev: The device to check + */ +void scsi_cdl_check(struct scsi_device *sdev) +{ + bool cdl_supported; + unsigned char *buf; + + buf = kmalloc(SCSI_CDL_CHECK_BUF_LEN, GFP_KERNEL); + if (!buf) { + sdev->cdl_supported = 0; + return; + } + + /* Check support for READ_16, WRITE_16, READ_32 and WRITE_32 commands */ + cdl_supported = + scsi_cdl_check_cmd(sdev, READ_16, 0, buf) || + scsi_cdl_check_cmd(sdev, WRITE_16, 0, buf) || + scsi_cdl_check_cmd(sdev, VARIABLE_LENGTH_CMD, READ_32, buf) || + scsi_cdl_check_cmd(sdev, VARIABLE_LENGTH_CMD, WRITE_32, buf); + if (cdl_supported) { + /* + * We have CDL support: force the use of READ16/WRITE16. + * READ32 and WRITE32 will be used for devices that support + * the T10_PI_TYPE2_PROTECTION protection type. + */ + sdev->use_16_for_rw = 1; + sdev->use_10_for_rw = 0; + + sdev->cdl_supported = 1; + } else { + sdev->cdl_supported = 0; + } + + kfree(buf); +} + +/** + * scsi_cdl_enable - Enable or disable a SCSI device supports for Command + * Duration Limits + * @sdev: The target device + * @enable: the target state + */ +int scsi_cdl_enable(struct scsi_device *sdev, bool enable) +{ + struct scsi_mode_data data; + struct scsi_sense_hdr sshdr; + struct scsi_vpd *vpd; + bool is_ata = false; + char buf[64]; + int ret; + + if (!sdev->cdl_supported) + return -EOPNOTSUPP; + + rcu_read_lock(); + vpd = rcu_dereference(sdev->vpd_pg89); + if (vpd) + is_ata = true; + rcu_read_unlock(); + + /* + * For ATA devices, CDL needs to be enabled with a SET FEATURES command. + */ + if (is_ata) { + char *buf_data; + int len; + + ret = scsi_mode_sense(sdev, 0x08, 0x0a, 0xf2, buf, sizeof(buf), + 5 * HZ, 3, &data, NULL); + if (ret) + return -EINVAL; + + /* Enable CDL using the ATA feature page */ + len = min_t(size_t, sizeof(buf), + data.length - data.header_length - + data.block_descriptor_length); + buf_data = buf + data.header_length + + data.block_descriptor_length; + if (enable) + buf_data[4] = 0x02; + else + buf_data[4] = 0; + + ret = scsi_mode_select(sdev, 1, 0, buf_data, len, 5 * HZ, 3, + &data, &sshdr); + if (ret) { + if (scsi_sense_valid(&sshdr)) + scsi_print_sense_hdr(sdev, + dev_name(&sdev->sdev_gendev), &sshdr); + return ret; + } + } + + sdev->cdl_enable = enable; + + return 0; +} + /** * scsi_device_get - get an additional reference to a scsi_device * @sdev: device to get a reference to diff --git a/drivers/scsi/scsi_common.c b/drivers/scsi/scsi_common.c index 6e50e81a8216..9c14fdf61037 100644 --- a/drivers/scsi/scsi_common.c +++ b/drivers/scsi/scsi_common.c @@ -8,6 +8,7 @@ #include <linux/string.h> #include <linux/errno.h> #include <linux/module.h> +#include <uapi/linux/pr.h> #include <asm/unaligned.h> #include <scsi/scsi_common.h> @@ -63,6 +64,48 @@ const char *scsi_device_type(unsigned type) } EXPORT_SYMBOL(scsi_device_type); +enum pr_type scsi_pr_type_to_block(enum scsi_pr_type type) +{ + switch (type) { + case SCSI_PR_WRITE_EXCLUSIVE: + return PR_WRITE_EXCLUSIVE; + case SCSI_PR_EXCLUSIVE_ACCESS: + return PR_EXCLUSIVE_ACCESS; + case SCSI_PR_WRITE_EXCLUSIVE_REG_ONLY: + return PR_WRITE_EXCLUSIVE_REG_ONLY; + case SCSI_PR_EXCLUSIVE_ACCESS_REG_ONLY: + return PR_EXCLUSIVE_ACCESS_REG_ONLY; + case SCSI_PR_WRITE_EXCLUSIVE_ALL_REGS: + return PR_WRITE_EXCLUSIVE_ALL_REGS; + case SCSI_PR_EXCLUSIVE_ACCESS_ALL_REGS: + return PR_EXCLUSIVE_ACCESS_ALL_REGS; + } + + return 0; +} +EXPORT_SYMBOL_GPL(scsi_pr_type_to_block); + +enum scsi_pr_type block_pr_type_to_scsi(enum pr_type type) +{ + switch (type) { + case PR_WRITE_EXCLUSIVE: + return SCSI_PR_WRITE_EXCLUSIVE; + case PR_EXCLUSIVE_ACCESS: + return SCSI_PR_EXCLUSIVE_ACCESS; + case PR_WRITE_EXCLUSIVE_REG_ONLY: + return SCSI_PR_WRITE_EXCLUSIVE_REG_ONLY; + case PR_EXCLUSIVE_ACCESS_REG_ONLY: + return SCSI_PR_EXCLUSIVE_ACCESS_REG_ONLY; + case PR_WRITE_EXCLUSIVE_ALL_REGS: + return SCSI_PR_WRITE_EXCLUSIVE_ALL_REGS; + case PR_EXCLUSIVE_ACCESS_ALL_REGS: + return SCSI_PR_EXCLUSIVE_ACCESS_ALL_REGS; + } + + return 0; +} +EXPORT_SYMBOL_GPL(block_pr_type_to_scsi); + /** * scsilun_to_int - convert a scsi_lun to an int * @scsilun: struct scsi_lun to be converted. @@ -176,8 +219,7 @@ bool scsi_normalize_sense(const u8 *sense_buffer, int sb_len, if (sb_len > 2) sshdr->sense_key = (sense_buffer[2] & 0xf); if (sb_len > 7) { - sb_len = (sb_len < (sense_buffer[7] + 8)) ? - sb_len : (sense_buffer[7] + 8); + sb_len = min(sb_len, sense_buffer[7] + 8); if (sb_len > 12) sshdr->asc = sense_buffer[12]; if (sb_len > 13) diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index 3ec8bfd4090f..c67cdcdc3ba8 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c @@ -536,6 +536,7 @@ static inline void set_scsi_ml_byte(struct scsi_cmnd *cmd, u8 status) */ enum scsi_disposition scsi_check_sense(struct scsi_cmnd *scmd) { + struct request *req = scsi_cmd_to_rq(scmd); struct scsi_device *sdev = scmd->device; struct scsi_sense_hdr sshdr; @@ -595,6 +596,22 @@ enum scsi_disposition scsi_check_sense(struct scsi_cmnd *scmd) if (sshdr.asc == 0x10) /* DIF */ return SUCCESS; + /* + * Check aborts due to command duration limit policy: + * ABORTED COMMAND additional sense code with the + * COMMAND TIMEOUT BEFORE PROCESSING or + * COMMAND TIMEOUT DURING PROCESSING or + * COMMAND TIMEOUT DURING PROCESSING DUE TO ERROR RECOVERY + * additional sense code qualifiers. + */ + if (sshdr.asc == 0x2e && + sshdr.ascq >= 0x01 && sshdr.ascq <= 0x03) { + set_scsi_ml_byte(scmd, SCSIML_STAT_DL_TIMEOUT); + req->cmd_flags |= REQ_FAILFAST_DEV; + req->rq_flags |= RQF_QUIET; + return SUCCESS; + } + if (sshdr.asc == 0x44 && sdev->sdev_bflags & BLIST_RETRY_ITF) return ADD_TO_MLQUEUE; if (sshdr.asc == 0xc1 && sshdr.ascq == 0x01 && @@ -691,6 +708,14 @@ enum scsi_disposition scsi_check_sense(struct scsi_cmnd *scmd) } return SUCCESS; + case COMPLETED: + if (sshdr.asc == 0x55 && sshdr.ascq == 0x0a) { + set_scsi_ml_byte(scmd, SCSIML_STAT_DL_TIMEOUT); + req->cmd_flags |= REQ_FAILFAST_DEV; + req->rq_flags |= RQF_QUIET; + } + return SUCCESS; + default: return SUCCESS; } @@ -785,6 +810,14 @@ static enum scsi_disposition scsi_eh_completed_normally(struct scsi_cmnd *scmd) switch (get_status_byte(scmd)) { case SAM_STAT_GOOD: scsi_handle_queue_ramp_up(scmd->device); + if (scmd->sense_buffer && SCSI_SENSE_VALID(scmd)) + /* + * If we have sense data, call scsi_check_sense() in + * order to set the correct SCSI ML byte (if any). + * No point in checking the return value, since the + * command has already completed successfully. + */ + scsi_check_sense(scmd); fallthrough; case SAM_STAT_COMMAND_TERMINATED: return SUCCESS; @@ -1807,6 +1840,10 @@ bool scsi_noretry_cmd(struct scsi_cmnd *scmd) return !!(req->cmd_flags & REQ_FAILFAST_DRIVER); } + /* Never retry commands aborted due to a duration limit timeout */ + if (scsi_ml_byte(scmd->result) == SCSIML_STAT_DL_TIMEOUT) + return true; + if (!scsi_status_is_check_condition(scmd->result)) return false; @@ -1966,6 +2003,14 @@ enum scsi_disposition scsi_decide_disposition(struct scsi_cmnd *scmd) if (scmd->cmnd[0] == REPORT_LUNS) scmd->device->sdev_target->expecting_lun_change = 0; scsi_handle_queue_ramp_up(scmd->device); + if (scmd->sense_buffer && SCSI_SENSE_VALID(scmd)) + /* + * If we have sense data, call scsi_check_sense() in + * order to set the correct SCSI ML byte (if any). + * No point in checking the return value, since the + * command has already completed successfully. + */ + scsi_check_sense(scmd); fallthrough; case SAM_STAT_COMMAND_TERMINATED: return SUCCESS; @@ -2165,7 +2210,8 @@ void scsi_eh_flush_done_q(struct list_head *done_q) * scsi_eh_get_sense), scmd->result is already * set, do not set DID_TIME_OUT. */ - if (!scmd->result) + if (!scmd->result && + !(scmd->flags & SCMD_FORCE_EH_SUCCESS)) scmd->result |= (DID_TIME_OUT << 16); SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd, diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 0226c9279cef..ad9afae49544 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -122,11 +122,9 @@ static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd, unsigned long msecs) WARN_ON_ONCE(true); } - if (msecs) { - blk_mq_requeue_request(rq, false); + blk_mq_requeue_request(rq, false); + if (!scsi_host_in_recovery(cmd->device->host)) blk_mq_delay_kick_requeue_list(rq->q, msecs); - } else - blk_mq_requeue_request(rq, true); } /** @@ -165,7 +163,8 @@ static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, bool unbusy) */ cmd->result = 0; - blk_mq_requeue_request(scsi_cmd_to_rq(cmd), true); + blk_mq_requeue_request(scsi_cmd_to_rq(cmd), + !scsi_host_in_recovery(cmd->device->host)); } /** @@ -453,6 +452,7 @@ static void scsi_run_queue(struct request_queue *q) if (!list_empty(&sdev->host->starved_list)) scsi_starved_list_run(sdev->host); + blk_mq_kick_requeue_list(q); blk_mq_run_hw_queues(q, false); } @@ -503,6 +503,9 @@ static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd) static void scsi_run_queue_async(struct scsi_device *sdev) { + if (scsi_host_in_recovery(sdev->host)) + return; + if (scsi_target(sdev)->single_lun || !list_empty(&sdev->host->starved_list)) { kblockd_schedule_work(&sdev->requeue_work); @@ -578,11 +581,6 @@ static bool scsi_end_request(struct request *req, blk_status_t error, return false; } -static inline u8 get_scsi_ml_byte(int result) -{ - return (result >> 8) & 0xff; -} - /** * scsi_result_to_blk_status - translate a SCSI result code into blk_status_t * @result: scsi error code @@ -595,17 +593,19 @@ static blk_status_t scsi_result_to_blk_status(int result) * Check the scsi-ml byte first in case we converted a host or status * byte. */ - switch (get_scsi_ml_byte(result)) { + switch (scsi_ml_byte(result)) { case SCSIML_STAT_OK: break; case SCSIML_STAT_RESV_CONFLICT: - return BLK_STS_NEXUS; + return BLK_STS_RESV_CONFLICT; case SCSIML_STAT_NOSPC: return BLK_STS_NOSPC; case SCSIML_STAT_MED_ERROR: return BLK_STS_MEDIUM; case SCSIML_STAT_TGT_FAILURE: return BLK_STS_TARGET; + case SCSIML_STAT_DL_TIMEOUT: + return BLK_STS_DURATION_LIMIT; } switch (host_byte(result)) { @@ -803,6 +803,8 @@ static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result) blk_stat = BLK_STS_ZONE_OPEN_RESOURCE; } break; + case COMPLETED: + fallthrough; default: action = ACTION_FAIL; break; @@ -1985,6 +1987,8 @@ int scsi_mq_setup_tags(struct Scsi_Host *shost) tag_set->flags = BLK_MQ_F_SHOULD_MERGE; tag_set->flags |= BLK_ALLOC_POLICY_TO_MQ_FLAG(shost->hostt->tag_alloc_policy); + if (shost->queuecommand_may_block) + tag_set->flags |= BLK_MQ_F_BLOCKING; tag_set->driver_data = shost; if (shost->host_tagset) tag_set->flags |= BLK_MQ_F_TAG_HCTX_SHARED; @@ -2152,6 +2156,7 @@ EXPORT_SYMBOL_GPL(scsi_mode_select); * @sdev: SCSI device to be queried * @dbd: set to prevent mode sense from returning block descriptors * @modepage: mode page being requested + * @subpage: sub-page of the mode page being requested * @buffer: request buffer (may not be smaller than eight bytes) * @len: length of request buffer. * @timeout: command timeout @@ -2163,7 +2168,7 @@ EXPORT_SYMBOL_GPL(scsi_mode_select); * Returns zero if successful, or a negative error number on failure */ int -scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage, +scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage, int subpage, unsigned char *buffer, int len, int timeout, int retries, struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr) { @@ -2183,6 +2188,7 @@ scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage, dbd = sdev->set_dbd_for_ms ? 8 : dbd; cmd[1] = dbd & 0x18; /* allows DBD and LLBA bits */ cmd[2] = modepage; + cmd[3] = subpage; sshdr = exec_args.sshdr; @@ -2728,24 +2734,16 @@ void scsi_start_queue(struct scsi_device *sdev) blk_mq_unquiesce_queue(sdev->request_queue); } -static void scsi_stop_queue(struct scsi_device *sdev, bool nowait) +static void scsi_stop_queue(struct scsi_device *sdev) { /* * The atomic variable of ->queue_stopped covers that * blk_mq_quiesce_queue* is balanced with blk_mq_unquiesce_queue. * - * However, we still need to wait until quiesce is done - * in case that queue has been stopped. + * The caller needs to wait until quiesce is done. */ - if (!cmpxchg(&sdev->queue_stopped, 0, 1)) { - if (nowait) - blk_mq_quiesce_queue_nowait(sdev->request_queue); - else - blk_mq_quiesce_queue(sdev->request_queue); - } else { - if (!nowait) - blk_mq_wait_quiesce_done(sdev->request_queue->tag_set); - } + if (!cmpxchg(&sdev->queue_stopped, 0, 1)) + blk_mq_quiesce_queue_nowait(sdev->request_queue); } /** @@ -2772,19 +2770,19 @@ int scsi_internal_device_block_nowait(struct scsi_device *sdev) * request queue. */ if (!ret) - scsi_stop_queue(sdev, true); + scsi_stop_queue(sdev); return ret; } EXPORT_SYMBOL_GPL(scsi_internal_device_block_nowait); /** - * scsi_internal_device_block - try to transition to the SDEV_BLOCK state + * scsi_device_block - try to transition to the SDEV_BLOCK state * @sdev: device to block + * @data: dummy argument, ignored * - * Pause SCSI command processing on the specified device and wait until all - * ongoing scsi_request_fn() / scsi_queue_rq() calls have finished. May sleep. - * - * Returns zero if successful or a negative error code upon failure. + * Pause SCSI command processing on the specified device. Callers must wait + * until all ongoing scsi_queue_rq() calls have finished after this function + * returns. * * Note: * This routine transitions the device to the SDEV_BLOCK state (which must be @@ -2792,17 +2790,26 @@ EXPORT_SYMBOL_GPL(scsi_internal_device_block_nowait); * is paused until the device leaves the SDEV_BLOCK state. See also * scsi_internal_device_unblock(). */ -static int scsi_internal_device_block(struct scsi_device *sdev) +static void scsi_device_block(struct scsi_device *sdev, void *data) { int err; + enum scsi_device_state state; mutex_lock(&sdev->state_mutex); err = __scsi_internal_device_block_nowait(sdev); + state = sdev->sdev_state; if (err == 0) - scsi_stop_queue(sdev, false); + /* + * scsi_stop_queue() must be called with the state_mutex + * held. Otherwise a simultaneous scsi_start_queue() call + * might unquiesce the queue before we quiesce it. + */ + scsi_stop_queue(sdev); + mutex_unlock(&sdev->state_mutex); - return err; + WARN_ONCE(err, "%s: failed to block %s in state %d\n", + __func__, dev_name(&sdev->sdev_gendev), state); } /** @@ -2885,36 +2892,35 @@ static int scsi_internal_device_unblock(struct scsi_device *sdev, return ret; } -static void -device_block(struct scsi_device *sdev, void *data) -{ - int ret; - - ret = scsi_internal_device_block(sdev); - - WARN_ONCE(ret, "scsi_internal_device_block(%s) failed: ret = %d\n", - dev_name(&sdev->sdev_gendev), ret); -} - static int target_block(struct device *dev, void *data) { if (scsi_is_target_device(dev)) starget_for_each_device(to_scsi_target(dev), NULL, - device_block); + scsi_device_block); return 0; } +/** + * scsi_block_targets - transition all SCSI child devices to SDEV_BLOCK state + * @dev: a parent device of one or more scsi_target devices + * @shost: the Scsi_Host to which this device belongs + * + * Iterate over all children of @dev, which should be scsi_target devices, + * and switch all subordinate scsi devices to SDEV_BLOCK state. Wait for + * ongoing scsi_queue_rq() calls to finish. May sleep. + * + * Note: + * @dev must not itself be a scsi_target device. + */ void -scsi_target_block(struct device *dev) +scsi_block_targets(struct Scsi_Host *shost, struct device *dev) { - if (scsi_is_target_device(dev)) - starget_for_each_device(to_scsi_target(dev), NULL, - device_block); - else - device_for_each_child(dev, NULL, target_block); + WARN_ON_ONCE(scsi_is_target_device(dev)); + device_for_each_child(dev, NULL, target_block); + blk_mq_wait_quiesce_done(&shost->tag_set); } -EXPORT_SYMBOL_GPL(scsi_target_block); +EXPORT_SYMBOL_GPL(scsi_block_targets); static void device_unblock(struct scsi_device *sdev, void *data) @@ -2942,11 +2948,20 @@ scsi_target_unblock(struct device *dev, enum scsi_device_state new_state) } EXPORT_SYMBOL_GPL(scsi_target_unblock); +/** + * scsi_host_block - Try to transition all logical units to the SDEV_BLOCK state + * @shost: device to block + * + * Pause SCSI command processing for all logical units associated with the SCSI + * host and wait until pending scsi_queue_rq() calls have finished. + * + * Returns zero if successful or a negative error code upon failure. + */ int scsi_host_block(struct Scsi_Host *shost) { struct scsi_device *sdev; - int ret = 0; + int ret; /* * Call scsi_internal_device_block_nowait so we can avoid @@ -2958,20 +2973,14 @@ scsi_host_block(struct Scsi_Host *shost) mutex_unlock(&sdev->state_mutex); if (ret) { scsi_device_put(sdev); - break; + return ret; } } - /* - * SCSI never enables blk-mq's BLK_MQ_F_BLOCKING flag so - * calling synchronize_rcu() once is enough. - */ - WARN_ON_ONCE(shost->tag_set.flags & BLK_MQ_F_BLOCKING); - - if (!ret) - synchronize_rcu(); + /* Wait for ongoing scsi_queue_rq() calls to finish. */ + blk_mq_wait_quiesce_done(&shost->tag_set); - return ret; + return 0; } EXPORT_SYMBOL_GPL(scsi_host_block); diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h index 96284a0e13fe..f42388ecb024 100644 --- a/drivers/scsi/scsi_priv.h +++ b/drivers/scsi/scsi_priv.h @@ -27,8 +27,14 @@ enum scsi_ml_status { SCSIML_STAT_NOSPC = 0x02, /* Space allocation on the dev failed */ SCSIML_STAT_MED_ERROR = 0x03, /* Medium error */ SCSIML_STAT_TGT_FAILURE = 0x04, /* Permanent target failure */ + SCSIML_STAT_DL_TIMEOUT = 0x05, /* Command Duration Limit timeout */ }; +static inline u8 scsi_ml_byte(int result) +{ + return (result >> 8) & 0xff; +} + /* * Scsi Error Handler Flags */ diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index d217be323cc6..aa13feb17c62 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c @@ -1087,6 +1087,8 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result, if (sdev->scsi_level >= SCSI_3) scsi_attach_vpd(sdev); + scsi_cdl_check(sdev); + sdev->max_queue_depth = sdev->queue_depth; WARN_ON_ONCE(sdev->max_queue_depth > sdev->budget_map.depth); sdev->sdev_bflags = *bflags; @@ -1624,6 +1626,7 @@ void scsi_rescan_device(struct device *dev) device_lock(dev); scsi_attach_vpd(sdev); + scsi_cdl_check(sdev); if (sdev->handler && sdev->handler->rescan) sdev->handler->rescan(sdev); diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index 603e8fcfcb8a..60317676e45f 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c @@ -670,6 +670,7 @@ sdev_rd_attr (scsi_level, "%d\n"); sdev_rd_attr (vendor, "%.8s\n"); sdev_rd_attr (model, "%.16s\n"); sdev_rd_attr (rev, "%.4s\n"); +sdev_rd_attr (cdl_supported, "%d\n"); static ssize_t sdev_show_device_busy(struct device *dev, struct device_attribute *attr, @@ -1221,6 +1222,33 @@ static DEVICE_ATTR(queue_ramp_up_period, S_IRUGO | S_IWUSR, sdev_show_queue_ramp_up_period, sdev_store_queue_ramp_up_period); +static ssize_t sdev_show_cdl_enable(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct scsi_device *sdev = to_scsi_device(dev); + + return sysfs_emit(buf, "%d\n", (int)sdev->cdl_enable); +} + +static ssize_t sdev_store_cdl_enable(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + int ret; + bool v; + + if (kstrtobool(buf, &v)) + return -EINVAL; + + ret = scsi_cdl_enable(to_scsi_device(dev), v); + if (ret) + return ret; + + return count; +} +static DEVICE_ATTR(cdl_enable, S_IRUGO | S_IWUSR, + sdev_show_cdl_enable, sdev_store_cdl_enable); + static umode_t scsi_sdev_attr_is_visible(struct kobject *kobj, struct attribute *attr, int i) { @@ -1300,6 +1328,8 @@ static struct attribute *scsi_sdev_attrs[] = { &dev_attr_preferred_path.attr, #endif &dev_attr_queue_ramp_up_period.attr, + &dev_attr_cdl_supported.attr, + &dev_attr_cdl_enable.attr, REF_EVT(media_change), REF_EVT(inquiry_change_reported), REF_EVT(capacity_change_reported), diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c index 64ff2629eaf9..b04075f19445 100644 --- a/drivers/scsi/scsi_transport_fc.c +++ b/drivers/scsi/scsi_transport_fc.c @@ -3451,7 +3451,7 @@ fc_remote_port_delete(struct fc_rport *rport) spin_unlock_irqrestore(shost->host_lock, flags); - scsi_target_block(&rport->dev); + scsi_block_targets(shost, &rport->dev); /* see if we need to kill io faster than waiting for device loss */ if ((rport->fast_io_fail_tmo != -1) && diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index b9b97300e3b3..e527ece12453 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -1943,13 +1943,14 @@ static void __iscsi_block_session(struct work_struct *work) struct iscsi_cls_session *session = container_of(work, struct iscsi_cls_session, block_work); + struct Scsi_Host *shost = iscsi_session_to_shost(session); unsigned long flags; ISCSI_DBG_TRANS_SESSION(session, "Blocking session\n"); spin_lock_irqsave(&session->lock, flags); session->state = ISCSI_SESSION_FAILED; spin_unlock_irqrestore(&session->lock, flags); - scsi_target_block(&session->dev); + scsi_block_targets(shost, &session->dev); ISCSI_DBG_TRANS_SESSION(session, "Completed SCSI target blocking\n"); if (session->recovery_tmo >= 0) queue_delayed_work(session->workq, diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c index 74b99f2b0b74..d704c484a251 100644 --- a/drivers/scsi/scsi_transport_sas.c +++ b/drivers/scsi/scsi_transport_sas.c @@ -1245,7 +1245,7 @@ int sas_read_port_mode_page(struct scsi_device *sdev) if (!buffer) return -ENOMEM; - error = scsi_mode_sense(sdev, 1, 0x19, buffer, BUF_SIZE, 30*HZ, 3, + error = scsi_mode_sense(sdev, 1, 0x19, 0, buffer, BUF_SIZE, 30*HZ, 3, &mode_data, NULL); if (error) diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c index 87d0fb8dc503..64f6b22e8cc0 100644 --- a/drivers/scsi/scsi_transport_srp.c +++ b/drivers/scsi/scsi_transport_srp.c @@ -396,7 +396,7 @@ static void srp_reconnect_work(struct work_struct *work) } /* - * scsi_target_block() must have been called before this function is + * scsi_block_targets() must have been called before this function is * called to guarantee that no .queuecommand() calls are in progress. */ static void __rport_fail_io_fast(struct srp_rport *rport) @@ -480,7 +480,7 @@ static void __srp_start_tl_fail_timers(struct srp_rport *rport) srp_rport_set_state(rport, SRP_RPORT_BLOCKED) == 0) { pr_debug("%s new state: %d\n", dev_name(&shost->shost_gendev), rport->state); - scsi_target_block(&shost->shost_gendev); + scsi_block_targets(shost, &shost->shost_gendev); if (fast_io_fail_tmo >= 0) queue_delayed_work(system_long_wq, &rport->fast_io_fail_work, @@ -548,7 +548,7 @@ int srp_reconnect_rport(struct srp_rport *rport) * later is ok though, scsi_internal_device_unblock_nowait() * treats SDEV_TRANSPORT_OFFLINE like SDEV_BLOCK. */ - scsi_target_block(&shost->shost_gendev); + scsi_block_targets(shost, &shost->shost_gendev); res = rport->state != SRP_RPORT_LOST ? i->f->reconnect(rport) : -ENODEV; pr_debug("%s (state %d): transport.reconnect() returned %d\n", dev_name(&shost->shost_gendev), rport->state, res); diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index ab216976dbdc..68b12afa0721 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -67,6 +67,7 @@ #include <scsi/scsi_host.h> #include <scsi/scsi_ioctl.h> #include <scsi/scsicam.h> +#include <scsi/scsi_common.h> #include "sd.h" #include "scsi_priv.h" @@ -183,7 +184,7 @@ cache_type_store(struct device *dev, struct device_attribute *attr, return count; } - if (scsi_mode_sense(sdp, 0x08, 8, buffer, sizeof(buffer), SD_TIMEOUT, + if (scsi_mode_sense(sdp, 0x08, 8, 0, buffer, sizeof(buffer), SD_TIMEOUT, sdkp->max_retries, &data, NULL)) return -EINVAL; len = min_t(size_t, sizeof(buffer), data.length - data.header_length - @@ -1041,13 +1042,14 @@ static blk_status_t sd_setup_flush_cmnd(struct scsi_cmnd *cmd) static blk_status_t sd_setup_rw32_cmnd(struct scsi_cmnd *cmd, bool write, sector_t lba, unsigned int nr_blocks, - unsigned char flags) + unsigned char flags, unsigned int dld) { cmd->cmd_len = SD_EXT_CDB_SIZE; cmd->cmnd[0] = VARIABLE_LENGTH_CMD; cmd->cmnd[7] = 0x18; /* Additional CDB len */ cmd->cmnd[9] = write ? WRITE_32 : READ_32; cmd->cmnd[10] = flags; + cmd->cmnd[11] = dld & 0x07; put_unaligned_be64(lba, &cmd->cmnd[12]); put_unaligned_be32(lba, &cmd->cmnd[20]); /* Expected Indirect LBA */ put_unaligned_be32(nr_blocks, &cmd->cmnd[28]); @@ -1057,12 +1059,12 @@ static blk_status_t sd_setup_rw32_cmnd(struct scsi_cmnd *cmd, bool write, static blk_status_t sd_setup_rw16_cmnd(struct scsi_cmnd *cmd, bool write, sector_t lba, unsigned int nr_blocks, - unsigned char flags) + unsigned char flags, unsigned int dld) { cmd->cmd_len = 16; cmd->cmnd[0] = write ? WRITE_16 : READ_16; - cmd->cmnd[1] = flags; - cmd->cmnd[14] = 0; + cmd->cmnd[1] = flags | ((dld >> 2) & 0x01); + cmd->cmnd[14] = (dld & 0x03) << 6; cmd->cmnd[15] = 0; put_unaligned_be64(lba, &cmd->cmnd[2]); put_unaligned_be32(nr_blocks, &cmd->cmnd[10]); @@ -1114,6 +1116,31 @@ static blk_status_t sd_setup_rw6_cmnd(struct scsi_cmnd *cmd, bool write, return BLK_STS_OK; } +/* + * Check if a command has a duration limit set. If it does, and the target + * device supports CDL and the feature is enabled, return the limit + * descriptor index to use. Return 0 (no limit) otherwise. + */ +static int sd_cdl_dld(struct scsi_disk *sdkp, struct scsi_cmnd *scmd) +{ + struct scsi_device *sdp = sdkp->device; + int hint; + + if (!sdp->cdl_supported || !sdp->cdl_enable) + return 0; + + /* + * Use "no limit" if the request ioprio does not specify a duration + * limit hint. + */ + hint = IOPRIO_PRIO_HINT(req_get_ioprio(scsi_cmd_to_rq(scmd))); + if (hint < IOPRIO_HINT_DEV_DURATION_LIMIT_1 || + hint > IOPRIO_HINT_DEV_DURATION_LIMIT_7) + return 0; + + return (hint - IOPRIO_HINT_DEV_DURATION_LIMIT_1) + 1; +} + static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd) { struct request *rq = scsi_cmd_to_rq(cmd); @@ -1125,6 +1152,7 @@ static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd) unsigned int mask = logical_to_sectors(sdp, 1) - 1; bool write = rq_data_dir(rq) == WRITE; unsigned char protect, fua; + unsigned int dld; blk_status_t ret; unsigned int dif; bool dix; @@ -1174,6 +1202,7 @@ static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd) fua = rq->cmd_flags & REQ_FUA ? 0x8 : 0; dix = scsi_prot_sg_count(cmd); dif = scsi_host_dif_capable(cmd->device->host, sdkp->protection_type); + dld = sd_cdl_dld(sdkp, cmd); if (dif || dix) protect = sd_setup_protect_cmnd(cmd, dix, dif); @@ -1182,10 +1211,10 @@ static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd) if (protect && sdkp->protection_type == T10_PI_TYPE2_PROTECTION) { ret = sd_setup_rw32_cmnd(cmd, write, lba, nr_blocks, - protect | fua); + protect | fua, dld); } else if (sdp->use_16_for_rw || (nr_blocks > 0xffff)) { ret = sd_setup_rw16_cmnd(cmd, write, lba, nr_blocks, - protect | fua); + protect | fua, dld); } else if ((nr_blocks > 0xff) || (lba > 0x1fffff) || sdp->use_10_for_rw || protect) { ret = sd_setup_rw10_cmnd(cmd, write, lba, nr_blocks, @@ -1690,26 +1719,6 @@ out_unlock: return ret; } -static char sd_pr_type(enum pr_type type) -{ - switch (type) { - case PR_WRITE_EXCLUSIVE: - return 0x01; - case PR_EXCLUSIVE_ACCESS: - return 0x03; - case PR_WRITE_EXCLUSIVE_REG_ONLY: - return 0x05; - case PR_EXCLUSIVE_ACCESS_REG_ONLY: - return 0x06; - case PR_WRITE_EXCLUSIVE_ALL_REGS: - return 0x07; - case PR_EXCLUSIVE_ACCESS_ALL_REGS: - return 0x08; - default: - return 0; - } -}; - static int sd_scsi_to_pr_err(struct scsi_sense_hdr *sshdr, int result) { switch (host_byte(result)) { @@ -1740,8 +1749,97 @@ static int sd_scsi_to_pr_err(struct scsi_sense_hdr *sshdr, int result) } } -static int sd_pr_command(struct block_device *bdev, u8 sa, - u64 key, u64 sa_key, u8 type, u8 flags) +static int sd_pr_in_command(struct block_device *bdev, u8 sa, + unsigned char *data, int data_len) +{ + struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk); + struct scsi_device *sdev = sdkp->device; + struct scsi_sense_hdr sshdr; + u8 cmd[10] = { PERSISTENT_RESERVE_IN, sa }; + const struct scsi_exec_args exec_args = { + .sshdr = &sshdr, + }; + int result; + + put_unaligned_be16(data_len, &cmd[7]); + + result = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_IN, data, data_len, + SD_TIMEOUT, sdkp->max_retries, &exec_args); + if (scsi_status_is_check_condition(result) && + scsi_sense_valid(&sshdr)) { + sdev_printk(KERN_INFO, sdev, "PR command failed: %d\n", result); + scsi_print_sense_hdr(sdev, NULL, &sshdr); + } + + if (result <= 0) + return result; + + return sd_scsi_to_pr_err(&sshdr, result); +} + +static int sd_pr_read_keys(struct block_device *bdev, struct pr_keys *keys_info) +{ + int result, i, data_offset, num_copy_keys; + u32 num_keys = keys_info->num_keys; + int data_len = num_keys * 8 + 8; + u8 *data; + + data = kzalloc(data_len, GFP_KERNEL); + if (!data) + return -ENOMEM; + + result = sd_pr_in_command(bdev, READ_KEYS, data, data_len); + if (result) + goto free_data; + + keys_info->generation = get_unaligned_be32(&data[0]); + keys_info->num_keys = get_unaligned_be32(&data[4]) / 8; + + data_offset = 8; + num_copy_keys = min(num_keys, keys_info->num_keys); + + for (i = 0; i < num_copy_keys; i++) { + keys_info->keys[i] = get_unaligned_be64(&data[data_offset]); + data_offset += 8; + } + +free_data: + kfree(data); + return result; +} + +static int sd_pr_read_reservation(struct block_device *bdev, + struct pr_held_reservation *rsv) +{ + struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk); + struct scsi_device *sdev = sdkp->device; + u8 data[24] = { }; + int result, len; + + result = sd_pr_in_command(bdev, READ_RESERVATION, data, sizeof(data)); + if (result) + return result; + + len = get_unaligned_be32(&data[4]); + if (!len) + return 0; + + /* Make sure we have at least the key and type */ + if (len < 14) { + sdev_printk(KERN_INFO, sdev, + "READ RESERVATION failed due to short return buffer of %d bytes\n", + len); + return -EINVAL; + } + + rsv->generation = get_unaligned_be32(&data[0]); + rsv->key = get_unaligned_be64(&data[8]); + rsv->type = scsi_pr_type_to_block(data[21] & 0x0f); + return 0; +} + +static int sd_pr_out_command(struct block_device *bdev, u8 sa, u64 key, + u64 sa_key, enum scsi_pr_type type, u8 flags) { struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk); struct scsi_device *sdev = sdkp->device; @@ -1783,7 +1881,7 @@ static int sd_pr_register(struct block_device *bdev, u64 old_key, u64 new_key, { if (flags & ~PR_FL_IGNORE_KEY) return -EOPNOTSUPP; - return sd_pr_command(bdev, (flags & PR_FL_IGNORE_KEY) ? 0x06 : 0x00, + return sd_pr_out_command(bdev, (flags & PR_FL_IGNORE_KEY) ? 0x06 : 0x00, old_key, new_key, 0, (1 << 0) /* APTPL */); } @@ -1793,24 +1891,26 @@ static int sd_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type, { if (flags) return -EOPNOTSUPP; - return sd_pr_command(bdev, 0x01, key, 0, sd_pr_type(type), 0); + return sd_pr_out_command(bdev, 0x01, key, 0, + block_pr_type_to_scsi(type), 0); } static int sd_pr_release(struct block_device *bdev, u64 key, enum pr_type type) { - return sd_pr_command(bdev, 0x02, key, 0, sd_pr_type(type), 0); + return sd_pr_out_command(bdev, 0x02, key, 0, + block_pr_type_to_scsi(type), 0); } static int sd_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key, enum pr_type type, bool abort) { - return sd_pr_command(bdev, abort ? 0x05 : 0x04, old_key, new_key, - sd_pr_type(type), 0); + return sd_pr_out_command(bdev, abort ? 0x05 : 0x04, old_key, new_key, + block_pr_type_to_scsi(type), 0); } static int sd_pr_clear(struct block_device *bdev, u64 key) { - return sd_pr_command(bdev, 0x03, key, 0, 0, 0); + return sd_pr_out_command(bdev, 0x03, key, 0, 0, 0); } static const struct pr_ops sd_pr_ops = { @@ -1819,6 +1919,8 @@ static const struct pr_ops sd_pr_ops = { .pr_release = sd_pr_release, .pr_preempt = sd_pr_preempt, .pr_clear = sd_pr_clear, + .pr_read_keys = sd_pr_read_keys, + .pr_read_reservation = sd_pr_read_reservation, }; static void scsi_disk_free_disk(struct gendisk *disk) @@ -2608,9 +2710,8 @@ sd_do_mode_sense(struct scsi_disk *sdkp, int dbd, int modepage, if (sdkp->device->use_10_for_ms && len < 8) len = 8; - return scsi_mode_sense(sdkp->device, dbd, modepage, buffer, len, - SD_TIMEOUT, sdkp->max_retries, data, - sshdr); + return scsi_mode_sense(sdkp->device, dbd, modepage, 0, buffer, len, + SD_TIMEOUT, sdkp->max_retries, data, sshdr); } /* @@ -2867,7 +2968,7 @@ static void sd_read_app_tag_own(struct scsi_disk *sdkp, unsigned char *buffer) if (sdkp->protection_type == 0) return; - res = scsi_mode_sense(sdp, 1, 0x0a, buffer, 36, SD_TIMEOUT, + res = scsi_mode_sense(sdp, 1, 0x0a, 0, buffer, 36, SD_TIMEOUT, sdkp->max_retries, &data, &sshdr); if (res < 0 || !data.header_length || @@ -3056,7 +3157,7 @@ static void sd_read_write_same(struct scsi_disk *sdkp, unsigned char *buffer) return; } - if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, INQUIRY) < 0) { + if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, INQUIRY, 0) < 0) { struct scsi_vpd *vpd; sdev->no_report_opcodes = 1; @@ -3072,10 +3173,10 @@ static void sd_read_write_same(struct scsi_disk *sdkp, unsigned char *buffer) rcu_read_unlock(); } - if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, WRITE_SAME_16) == 1) + if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, WRITE_SAME_16, 0) == 1) sdkp->ws16 = 1; - if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, WRITE_SAME) == 1) + if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, WRITE_SAME, 0) == 1) sdkp->ws10 = 1; } @@ -3087,9 +3188,9 @@ static void sd_read_security(struct scsi_disk *sdkp, unsigned char *buffer) return; if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, - SECURITY_PROTOCOL_IN) == 1 && + SECURITY_PROTOCOL_IN, 0) == 1 && scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, - SECURITY_PROTOCOL_OUT) == 1) + SECURITY_PROTOCOL_OUT, 0) == 1) sdkp->security = 1; } diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c index 22801c24ea19..abbd08933ac7 100644 --- a/drivers/scsi/sd_zbc.c +++ b/drivers/scsi/sd_zbc.c @@ -889,7 +889,7 @@ int sd_zbc_revalidate_zones(struct scsi_disk *sdkp) } max_append = min_t(u32, logical_to_sectors(sdkp->device, zone_blocks), - q->limits.max_segments << (PAGE_SHIFT - 9)); + q->limits.max_segments << PAGE_SECTORS_SHIFT); max_append = min_t(u32, max_append, queue_max_hw_sectors(q)); blk_queue_max_zone_append_sectors(q, max_append); diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index dcb73787c29d..89fa046c7158 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -71,7 +71,7 @@ static int sg_proc_init(void); #define SG_ALLOW_DIO_DEF 0 -#define SG_MAX_DEVS 32768 +#define SG_MAX_DEVS (1 << MINORBITS) /* SG_MAX_CDB_SIZE should be 260 (spc4r37 section 3.1.30) however the type * of sg_io_hdr::cmd_len can only represent 255. All SCSI commands greater diff --git a/drivers/scsi/smartpqi/Kconfig b/drivers/scsi/smartpqi/Kconfig index 973d240649ab..789460b0a342 100644 --- a/drivers/scsi/smartpqi/Kconfig +++ b/drivers/scsi/smartpqi/Kconfig @@ -1,7 +1,7 @@ # # Kernel configuration file for the SMARTPQI # -# Copyright (c) 2019-2022 Microchip Technology Inc. and its subsidiaries +# Copyright (c) 2019-2023 Microchip Technology Inc. and its subsidiaries # Copyright (c) 2017-2018 Microsemi Corporation # Copyright (c) 2016 Microsemi Corporation # Copyright (c) 2016 PMC-Sierra, Inc. diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h index 228838eb3686..f960b5095d09 100644 --- a/drivers/scsi/smartpqi/smartpqi.h +++ b/drivers/scsi/smartpqi/smartpqi.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* * driver for Microchip PQI-based storage controllers - * Copyright (c) 2019-2022 Microchip Technology Inc. and its subsidiaries + * Copyright (c) 2019-2023 Microchip Technology Inc. and its subsidiaries * Copyright (c) 2016-2018 Microsemi Corporation * Copyright (c) 2016 PMC-Sierra, Inc. * @@ -1108,6 +1108,7 @@ struct pqi_scsi_dev { u8 volume_offline : 1; u8 rescan : 1; u8 ignore_device : 1; + u8 erase_in_progress : 1; bool aio_enabled; /* only valid for physical disks */ bool in_remove; bool device_offline; @@ -1147,7 +1148,7 @@ struct pqi_scsi_dev { struct pqi_stream_data stream_data[NUM_STREAMS_PER_LUN]; atomic_t scsi_cmds_outstanding[PQI_MAX_LUNS_PER_DEVICE]; - atomic_t raid_bypass_cnt; + unsigned int raid_bypass_cnt; }; /* VPD inquiry pages */ @@ -1357,6 +1358,7 @@ struct pqi_ctrl_info { u32 max_write_raid_5_6; u32 max_write_raid_1_10_2drive; u32 max_write_raid_1_10_3drive; + int numa_node; struct list_head scsi_device_list; spinlock_t scsi_device_list_lock; diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c index f4e0aa262164..19af36e9a16d 100644 --- a/drivers/scsi/smartpqi/smartpqi_init.c +++ b/drivers/scsi/smartpqi/smartpqi_init.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* * driver for Microchip PQI-based storage controllers - * Copyright (c) 2019-2022 Microchip Technology Inc. and its subsidiaries + * Copyright (c) 2019-2023 Microchip Technology Inc. and its subsidiaries * Copyright (c) 2016-2018 Microsemi Corporation * Copyright (c) 2016 PMC-Sierra, Inc. * @@ -33,11 +33,11 @@ #define BUILD_TIMESTAMP #endif -#define DRIVER_VERSION "2.1.20-035" +#define DRIVER_VERSION "2.1.22-040" #define DRIVER_MAJOR 2 #define DRIVER_MINOR 1 -#define DRIVER_RELEASE 20 -#define DRIVER_REVISION 35 +#define DRIVER_RELEASE 22 +#define DRIVER_REVISION 40 #define DRIVER_NAME "Microchip SmartPQI Driver (v" \ DRIVER_VERSION BUILD_TIMESTAMP ")" @@ -519,6 +519,36 @@ static inline void pqi_clear_soft_reset_status(struct pqi_ctrl_info *ctrl_info) writeb(status, ctrl_info->soft_reset_status); } +static inline bool pqi_is_io_high_priority(struct pqi_scsi_dev *device, struct scsi_cmnd *scmd) +{ + bool io_high_prio; + int priority_class; + + io_high_prio = false; + + if (device->ncq_prio_enable) { + priority_class = + IOPRIO_PRIO_CLASS(req_get_ioprio(scsi_cmd_to_rq(scmd))); + if (priority_class == IOPRIO_CLASS_RT) { + /* Set NCQ priority for read/write commands. */ + switch (scmd->cmnd[0]) { + case WRITE_16: + case READ_16: + case WRITE_12: + case READ_12: + case WRITE_10: + case READ_10: + case WRITE_6: + case READ_6: + io_high_prio = true; + break; + } + } + } + + return io_high_prio; +} + static int pqi_map_single(struct pci_dev *pci_dev, struct pqi_sg_descriptor *sg_descriptor, void *buffer, size_t buffer_length, enum dma_data_direction data_direction) @@ -578,10 +608,6 @@ static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info, cdb = request->cdb; switch (cmd) { - case TEST_UNIT_READY: - request->data_direction = SOP_READ_FLAG; - cdb[0] = TEST_UNIT_READY; - break; case INQUIRY: request->data_direction = SOP_READ_FLAG; cdb[0] = INQUIRY; @@ -708,7 +734,8 @@ static inline struct pqi_io_request *pqi_alloc_io_request(struct pqi_ctrl_info * } } - pqi_reinit_io_request(io_request); + if (io_request) + pqi_reinit_io_request(io_request); return io_request; } @@ -1588,6 +1615,7 @@ no_buffer: #define PQI_DEVICE_NCQ_PRIO_SUPPORTED 0x01 #define PQI_DEVICE_PHY_MAP_SUPPORTED 0x10 +#define PQI_DEVICE_ERASE_IN_PROGRESS 0x10 static int pqi_get_physical_device_info(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, @@ -1636,6 +1664,8 @@ static int pqi_get_physical_device_info(struct pqi_ctrl_info *ctrl_info, ((get_unaligned_le32(&id_phys->misc_drive_flags) >> 16) & PQI_DEVICE_NCQ_PRIO_SUPPORTED); + device->erase_in_progress = !!(get_unaligned_le16(&id_phys->extra_physical_drive_flags) & PQI_DEVICE_ERASE_IN_PROGRESS); + return 0; } @@ -1681,7 +1711,7 @@ out: /* * Prevent adding drive to OS for some corner cases such as a drive - * undergoing a sanitize operation. Some OSes will continue to poll + * undergoing a sanitize (erase) operation. Some OSes will continue to poll * the drive until the sanitize completes, which can take hours, * resulting in long bootup delays. Commands such as TUR, READ_CAP * are allowed, but READ/WRITE cause check condition. So the OS @@ -1689,73 +1719,9 @@ out: * Note: devices that have completed sanitize must be re-enabled * using the management utility. */ -static bool pqi_keep_device_offline(struct pqi_ctrl_info *ctrl_info, - struct pqi_scsi_dev *device) +static inline bool pqi_keep_device_offline(struct pqi_scsi_dev *device) { - u8 scsi_status; - int rc; - enum dma_data_direction dir; - char *buffer; - int buffer_length = 64; - size_t sense_data_length; - struct scsi_sense_hdr sshdr; - struct pqi_raid_path_request request; - struct pqi_raid_error_info error_info; - bool offline = false; /* Assume keep online */ - - /* Do not check controllers. */ - if (pqi_is_hba_lunid(device->scsi3addr)) - return false; - - /* Do not check LVs. */ - if (pqi_is_logical_device(device)) - return false; - - buffer = kmalloc(buffer_length, GFP_KERNEL); - if (!buffer) - return false; /* Assume not offline */ - - /* Check for SANITIZE in progress using TUR */ - rc = pqi_build_raid_path_request(ctrl_info, &request, - TEST_UNIT_READY, RAID_CTLR_LUNID, buffer, - buffer_length, 0, &dir); - if (rc) - goto out; /* Assume not offline */ - - memcpy(request.lun_number, device->scsi3addr, sizeof(request.lun_number)); - - rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, &error_info); - - if (rc) - goto out; /* Assume not offline */ - - scsi_status = error_info.status; - sense_data_length = get_unaligned_le16(&error_info.sense_data_length); - if (sense_data_length == 0) - sense_data_length = - get_unaligned_le16(&error_info.response_data_length); - if (sense_data_length) { - if (sense_data_length > sizeof(error_info.data)) - sense_data_length = sizeof(error_info.data); - - /* - * Check for sanitize in progress: asc:0x04, ascq: 0x1b - */ - if (scsi_status == SAM_STAT_CHECK_CONDITION && - scsi_normalize_sense(error_info.data, - sense_data_length, &sshdr) && - sshdr.sense_key == NOT_READY && - sshdr.asc == 0x04 && - sshdr.ascq == 0x1b) { - device->device_offline = true; - offline = true; - goto out; /* Keep device offline */ - } - } - -out: - kfree(buffer); - return offline; + return device->erase_in_progress; } static int pqi_get_device_info_phys_logical(struct pqi_ctrl_info *ctrl_info, @@ -2499,10 +2465,6 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info) if (!pqi_is_supported_device(device)) continue; - /* Do not present disks that the OS cannot fully probe */ - if (pqi_keep_device_offline(ctrl_info, device)) - continue; - /* Gather information about the device. */ rc = pqi_get_device_info(ctrl_info, device, id_phys); if (rc == -ENOMEM) { @@ -2525,6 +2487,10 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info) continue; } + /* Do not present disks that the OS cannot fully probe. */ + if (pqi_keep_device_offline(device)) + continue; + pqi_assign_bus_target_lun(device); if (device->is_physical_device) { @@ -5504,15 +5470,19 @@ static void pqi_raid_io_complete(struct pqi_io_request *io_request, pqi_scsi_done(scmd); } -static int pqi_raid_submit_scsi_cmd_with_io_request( - struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request, +static int pqi_raid_submit_io(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, struct scsi_cmnd *scmd, - struct pqi_queue_group *queue_group) + struct pqi_queue_group *queue_group, bool io_high_prio) { int rc; size_t cdb_length; + struct pqi_io_request *io_request; struct pqi_raid_path_request *request; + io_request = pqi_alloc_io_request(ctrl_info, scmd); + if (!io_request) + return SCSI_MLQUEUE_HOST_BUSY; + io_request->io_complete_callback = pqi_raid_io_complete; io_request->scmd = scmd; @@ -5522,6 +5492,7 @@ static int pqi_raid_submit_scsi_cmd_with_io_request( request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO; put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length); request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE; + request->command_priority = io_high_prio; put_unaligned_le16(io_request->index, &request->request_id); request->error_index = request->request_id; memcpy(request->lun_number, device->scsi3addr, sizeof(request->lun_number)); @@ -5587,14 +5558,11 @@ static inline int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group) { - struct pqi_io_request *io_request; + bool io_high_prio; - io_request = pqi_alloc_io_request(ctrl_info, scmd); - if (!io_request) - return SCSI_MLQUEUE_HOST_BUSY; + io_high_prio = pqi_is_io_high_priority(device, scmd); - return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info, io_request, - device, scmd, queue_group); + return pqi_raid_submit_io(ctrl_info, device, scmd, queue_group, io_high_prio); } static bool pqi_raid_bypass_retry_needed(struct pqi_io_request *io_request) @@ -5639,44 +5607,13 @@ static void pqi_aio_io_complete(struct pqi_io_request *io_request, pqi_scsi_done(scmd); } -static inline bool pqi_is_io_high_priority(struct pqi_ctrl_info *ctrl_info, - struct pqi_scsi_dev *device, struct scsi_cmnd *scmd) -{ - bool io_high_prio; - int priority_class; - - io_high_prio = false; - - if (device->ncq_prio_enable) { - priority_class = - IOPRIO_PRIO_CLASS(req_get_ioprio(scsi_cmd_to_rq(scmd))); - if (priority_class == IOPRIO_CLASS_RT) { - /* Set NCQ priority for read/write commands. */ - switch (scmd->cmnd[0]) { - case WRITE_16: - case READ_16: - case WRITE_12: - case READ_12: - case WRITE_10: - case READ_10: - case WRITE_6: - case READ_6: - io_high_prio = true; - break; - } - } - } - - return io_high_prio; -} - static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group) { bool io_high_prio; - io_high_prio = pqi_is_io_high_priority(ctrl_info, device, scmd); + io_high_prio = pqi_is_io_high_priority(device, scmd); return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle, scmd->cmnd, scmd->cmd_len, queue_group, NULL, @@ -5694,10 +5631,10 @@ static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info, struct pqi_aio_path_request *request; struct pqi_scsi_dev *device; - device = scmd->device->hostdata; io_request = pqi_alloc_io_request(ctrl_info, scmd); if (!io_request) return SCSI_MLQUEUE_HOST_BUSY; + io_request->io_complete_callback = pqi_aio_io_complete; io_request->scmd = scmd; io_request->raid_bypass = raid_bypass; @@ -5712,6 +5649,7 @@ static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info, request->command_priority = io_high_prio; put_unaligned_le16(io_request->index, &request->request_id); request->error_index = request->request_id; + device = scmd->device->hostdata; if (!pqi_is_logical_device(device) && ctrl_info->multi_lun_device_supported) put_unaligned_le64(((scmd->device->lun) << 8), &request->lun_number); if (cdb_length > sizeof(request->cdb)) @@ -6052,7 +5990,7 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scm rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device, scmd, queue_group); if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY) { raid_bypassed = true; - atomic_inc(&device->raid_bypass_cnt); + device->raid_bypass_cnt++; } } if (!raid_bypassed) @@ -6903,7 +6841,7 @@ static ssize_t pqi_lockup_action_store(struct device *dev, char *action_name; char action_name_buffer[32]; - strlcpy(action_name_buffer, buffer, sizeof(action_name_buffer)); + strscpy(action_name_buffer, buffer, sizeof(action_name_buffer)); action_name = strstrip(action_name_buffer); for (i = 0; i < ARRAY_SIZE(pqi_lockup_actions); i++) { @@ -7288,7 +7226,7 @@ static ssize_t pqi_raid_bypass_cnt_show(struct device *dev, struct scsi_device *sdev; struct pqi_scsi_dev *device; unsigned long flags; - int raid_bypass_cnt; + unsigned int raid_bypass_cnt; sdev = to_scsi_device(dev); ctrl_info = shost_to_hba(sdev->host); @@ -7304,7 +7242,7 @@ static ssize_t pqi_raid_bypass_cnt_show(struct device *dev, return -ENODEV; } - raid_bypass_cnt = atomic_read(&device->raid_bypass_cnt); + raid_bypass_cnt = device->raid_bypass_cnt; spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); @@ -7366,8 +7304,7 @@ static ssize_t pqi_sas_ncq_prio_enable_store(struct device *dev, return -ENODEV; } - if (!device->ncq_prio_support || - !device->is_physical_device) { + if (!device->ncq_prio_support) { spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); return -EINVAL; } @@ -7379,6 +7316,18 @@ static ssize_t pqi_sas_ncq_prio_enable_store(struct device *dev, return strlen(buf); } +static ssize_t pqi_numa_node_show(struct device *dev, + struct device_attribute *attr, char *buffer) +{ + struct scsi_device *sdev; + struct pqi_ctrl_info *ctrl_info; + + sdev = to_scsi_device(dev); + ctrl_info = shost_to_hba(sdev->host); + + return scnprintf(buffer, PAGE_SIZE, "%d\n", ctrl_info->numa_node); +} + static DEVICE_ATTR(lunid, 0444, pqi_lunid_show, NULL); static DEVICE_ATTR(unique_id, 0444, pqi_unique_id_show, NULL); static DEVICE_ATTR(path_info, 0444, pqi_path_info_show, NULL); @@ -7388,6 +7337,7 @@ static DEVICE_ATTR(raid_level, 0444, pqi_raid_level_show, NULL); static DEVICE_ATTR(raid_bypass_cnt, 0444, pqi_raid_bypass_cnt_show, NULL); static DEVICE_ATTR(sas_ncq_prio_enable, 0644, pqi_sas_ncq_prio_enable_show, pqi_sas_ncq_prio_enable_store); +static DEVICE_ATTR(numa_node, 0444, pqi_numa_node_show, NULL); static struct attribute *pqi_sdev_attrs[] = { &dev_attr_lunid.attr, @@ -7398,6 +7348,7 @@ static struct attribute *pqi_sdev_attrs[] = { &dev_attr_raid_level.attr, &dev_attr_raid_bypass_cnt.attr, &dev_attr_sas_ncq_prio_enable.attr, + &dev_attr_numa_node.attr, NULL }; @@ -7716,8 +7667,8 @@ static int pqi_enable_firmware_features(struct pqi_ctrl_info *ctrl_info, features_requested_iomem_addr + (le16_to_cpu(firmware_features->num_elements) * 2) + sizeof(__le16); - writew(PQI_FIRMWARE_FEATURE_MAXIMUM, - host_max_known_feature_iomem_addr); + writeb(PQI_FIRMWARE_FEATURE_MAXIMUM & 0xFF, host_max_known_feature_iomem_addr); + writeb((PQI_FIRMWARE_FEATURE_MAXIMUM & 0xFF00) >> 8, host_max_known_feature_iomem_addr + 1); } return pqi_config_table_update(ctrl_info, @@ -8560,7 +8511,7 @@ static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info) ctrl_info->iomem_base = ioremap(pci_resource_start( ctrl_info->pci_dev, 0), - sizeof(struct pqi_ctrl_registers)); + pci_resource_len(ctrl_info->pci_dev, 0)); if (!ctrl_info->iomem_base) { dev_err(&ctrl_info->pci_dev->dev, "failed to map memory for controller registers\n"); @@ -9018,6 +8969,7 @@ static int pqi_pci_probe(struct pci_dev *pci_dev, "failed to allocate controller info block\n"); return -ENOMEM; } + ctrl_info->numa_node = node; ctrl_info->pci_dev = pci_dev; @@ -9929,6 +9881,18 @@ static const struct pci_device_id pqi_pci_id_table[] = { }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1cf2, 0x0804) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1cf2, 0x0805) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1cf2, 0x0806) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 0x1cf2, 0x5445) }, { @@ -9965,6 +9929,18 @@ static const struct pci_device_id pqi_pci_id_table[] = { }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1cf2, 0x54da) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1cf2, 0x54db) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1cf2, 0x54dc) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 0x1cf2, 0x0b27) }, { @@ -10017,6 +9993,10 @@ static const struct pci_device_id pqi_pci_id_table[] = { }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1014, 0x0718) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 0x1e93, 0x1000) }, { @@ -10029,6 +10009,50 @@ static const struct pci_device_id pqi_pci_id_table[] = { }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1e93, 0x1005) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1f51, 0x1001) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1f51, 0x1002) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1f51, 0x1003) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1f51, 0x1004) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1f51, 0x1005) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1f51, 0x1006) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1f51, 0x1007) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1f51, 0x1008) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1f51, 0x1009) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1f51, 0x100a) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, PCI_ANY_ID, PCI_ANY_ID) }, { 0 } diff --git a/drivers/scsi/smartpqi/smartpqi_sas_transport.c b/drivers/scsi/smartpqi/smartpqi_sas_transport.c index 13e8c539010e..a981d0377948 100644 --- a/drivers/scsi/smartpqi/smartpqi_sas_transport.c +++ b/drivers/scsi/smartpqi/smartpqi_sas_transport.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* * driver for Microchip PQI-based storage controllers - * Copyright (c) 2019-2022 Microchip Technology Inc. and its subsidiaries + * Copyright (c) 2019-2023 Microchip Technology Inc. and its subsidiaries * Copyright (c) 2016-2018 Microsemi Corporation * Copyright (c) 2016 PMC-Sierra, Inc. * @@ -92,25 +92,23 @@ static int pqi_sas_port_add_rphy(struct pqi_sas_port *pqi_sas_port, identify = &rphy->identify; identify->sas_address = pqi_sas_port->sas_address; + identify->phy_identifier = pqi_sas_port->device->phy_id; identify->initiator_port_protocols = SAS_PROTOCOL_ALL; identify->target_port_protocols = SAS_PROTOCOL_STP; - if (pqi_sas_port->device) { - identify->phy_identifier = pqi_sas_port->device->phy_id; - switch (pqi_sas_port->device->device_type) { - case SA_DEVICE_TYPE_SAS: - case SA_DEVICE_TYPE_SES: - case SA_DEVICE_TYPE_NVME: - identify->target_port_protocols = SAS_PROTOCOL_SSP; - break; - case SA_DEVICE_TYPE_EXPANDER_SMP: - identify->target_port_protocols = SAS_PROTOCOL_SMP; - break; - case SA_DEVICE_TYPE_SATA: - default: - break; - } + switch (pqi_sas_port->device->device_type) { + case SA_DEVICE_TYPE_SAS: + case SA_DEVICE_TYPE_SES: + case SA_DEVICE_TYPE_NVME: + identify->target_port_protocols = SAS_PROTOCOL_SSP; + break; + case SA_DEVICE_TYPE_EXPANDER_SMP: + identify->target_port_protocols = SAS_PROTOCOL_SMP; + break; + case SA_DEVICE_TYPE_SATA: + default: + break; } return sas_rphy_add(rphy); @@ -295,10 +293,12 @@ int pqi_add_sas_device(struct pqi_sas_node *pqi_sas_node, rc = pqi_sas_port_add_rphy(pqi_sas_port, rphy); if (rc) - goto free_sas_port; + goto free_sas_rphy; return 0; +free_sas_rphy: + sas_rphy_free(rphy); free_sas_port: pqi_free_sas_port(pqi_sas_port); device->sas_port = NULL; diff --git a/drivers/scsi/smartpqi/smartpqi_sis.c b/drivers/scsi/smartpqi/smartpqi_sis.c index 5811fb3c22a9..673437c7152b 100644 --- a/drivers/scsi/smartpqi/smartpqi_sis.c +++ b/drivers/scsi/smartpqi/smartpqi_sis.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* * driver for Microchip PQI-based storage controllers - * Copyright (c) 2019-2022 Microchip Technology Inc. and its subsidiaries + * Copyright (c) 2019-2023 Microchip Technology Inc. and its subsidiaries * Copyright (c) 2016-2018 Microsemi Corporation * Copyright (c) 2016 PMC-Sierra, Inc. * diff --git a/drivers/scsi/smartpqi/smartpqi_sis.h b/drivers/scsi/smartpqi/smartpqi_sis.h index 9dcbae96a5c6..0c97626d87d4 100644 --- a/drivers/scsi/smartpqi/smartpqi_sis.h +++ b/drivers/scsi/smartpqi/smartpqi_sis.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* * driver for Microchip PQI-based storage controllers - * Copyright (c) 2019-2022 Microchip Technology Inc. and its subsidiaries + * Copyright (c) 2019-2023 Microchip Technology Inc. and its subsidiaries * Copyright (c) 2016-2018 Microsemi Corporation * Copyright (c) 2016 PMC-Sierra, Inc. * diff --git a/drivers/scsi/snic/snic_disc.c b/drivers/scsi/snic/snic_disc.c index 8fbf3c1b1311..3e2e5783924d 100644 --- a/drivers/scsi/snic/snic_disc.c +++ b/drivers/scsi/snic/snic_disc.c @@ -214,7 +214,7 @@ snic_tgt_del(struct work_struct *work) scsi_flush_work(shost); /* Block IOs on child devices, stops new IOs */ - scsi_target_block(&tgt->dev); + scsi_block_targets(shost, &tgt->dev); /* Cleanup IOs */ snic_tgt_scsi_abort_io(tgt); diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c index ce886c8c9dbe..07ef3db3d1a1 100644 --- a/drivers/scsi/sr.c +++ b/drivers/scsi/sr.c @@ -825,7 +825,7 @@ static int get_capabilities(struct scsi_cd *cd) scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr); /* ask for mode page 0x2a */ - rc = scsi_mode_sense(cd->device, 0, 0x2a, buffer, ms_len, + rc = scsi_mode_sense(cd->device, 0, 0x2a, 0, buffer, ms_len, SR_TIMEOUT, 3, &data, NULL); if (rc < 0 || data.length > ms_len || diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c index ee36a9c15d9c..17491ba10439 100644 --- a/drivers/scsi/sym53c8xx_2/sym_glue.c +++ b/drivers/scsi/sym53c8xx_2/sym_glue.c @@ -1286,7 +1286,7 @@ static struct Scsi_Host *sym_attach(const struct scsi_host_template *tpnt, int u /* * Edit its name. */ - strlcpy(np->s.chip_name, dev->chip.name, sizeof(np->s.chip_name)); + strscpy(np->s.chip_name, dev->chip.name, sizeof(np->s.chip_name)); sprintf(np->s.inst_name, "sym%d", np->s.unit); if ((SYM_CONF_DMA_ADDRESSING_MODE > 0) && (np->features & FE_DAC) && diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c index 58498da9869a..bd5633667d01 100644 --- a/drivers/scsi/virtio_scsi.c +++ b/drivers/scsi/virtio_scsi.c @@ -338,10 +338,8 @@ static int virtscsi_rescan_hotunplug(struct virtio_scsi *vscsi) int result, inquiry_len, inq_result_len = 256; char *inq_result = kmalloc(inq_result_len, GFP_KERNEL); - if (!inq_result) { - kfree(inq_result); + if (!inq_result) return -ENOMEM; - } shost_for_each_device(sdev, shost) { inquiry_len = sdev->inquiry_len ? sdev->inquiry_len : 36; diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c index 557516c642c3..5b90c22ee3dc 100644 --- a/drivers/target/iscsi/iscsi_target_parameters.c +++ b/drivers/target/iscsi/iscsi_target_parameters.c @@ -726,8 +726,8 @@ static int iscsi_add_notunderstood_response( } INIT_LIST_HEAD(&extra_response->er_list); - strlcpy(extra_response->key, key, sizeof(extra_response->key)); - strlcpy(extra_response->value, NOTUNDERSTOOD, + strscpy(extra_response->key, key, sizeof(extra_response->key)); + strscpy(extra_response->value, NOTUNDERSTOOD, sizeof(extra_response->value)); list_add_tail(&extra_response->er_list, diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c index 6231fa4ef5c6..91a75a4a7cc1 100644 --- a/drivers/target/iscsi/iscsi_target_util.c +++ b/drivers/target/iscsi/iscsi_target_util.c @@ -1375,7 +1375,7 @@ void iscsit_collect_login_stats( if (conn->param_list) intrname = iscsi_find_param_from_key(INITIATORNAME, conn->param_list); - strlcpy(ls->last_intr_fail_name, + strscpy(ls->last_intr_fail_name, (intrname ? intrname->value : "Unknown"), sizeof(ls->last_intr_fail_name)); @@ -1414,7 +1414,7 @@ void iscsit_fill_cxn_timeout_err_stats(struct iscsit_session *sess) return; spin_lock_bh(&tiqn->sess_err_stats.lock); - strlcpy(tiqn->sess_err_stats.last_sess_fail_rem_name, + strscpy(tiqn->sess_err_stats.last_sess_fail_rem_name, sess->sess_ops->InitiatorName, sizeof(tiqn->sess_err_stats.last_sess_fail_rem_name)); tiqn->sess_err_stats.last_sess_failure_type = diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index 74b67c346dfe..936e5ff1b209 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c @@ -649,7 +649,7 @@ static void dev_set_t10_wwn_model_alias(struct se_device *dev) * here without potentially breaking existing setups, so continue to * truncate one byte shorter than what can be carried in INQUIRY. */ - strlcpy(dev->t10_wwn.model, configname, INQUIRY_MODEL_LEN); + strscpy(dev->t10_wwn.model, configname, INQUIRY_MODEL_LEN); } static ssize_t emulate_model_alias_store(struct config_item *item, @@ -675,7 +675,7 @@ static ssize_t emulate_model_alias_store(struct config_item *item, if (flag) { dev_set_t10_wwn_model_alias(dev); } else { - strlcpy(dev->t10_wwn.model, dev->transport->inquiry_prod, + strscpy(dev->t10_wwn.model, dev->transport->inquiry_prod, sizeof(dev->t10_wwn.model)); } da->emulate_model_alias = flag; @@ -1426,7 +1426,7 @@ static ssize_t target_wwn_vendor_id_store(struct config_item *item, } BUILD_BUG_ON(sizeof(dev->t10_wwn.vendor) != INQUIRY_VENDOR_LEN + 1); - strlcpy(dev->t10_wwn.vendor, stripped, sizeof(dev->t10_wwn.vendor)); + strscpy(dev->t10_wwn.vendor, stripped, sizeof(dev->t10_wwn.vendor)); pr_debug("Target_Core_ConfigFS: Set emulated T10 Vendor Identification:" " %s\n", dev->t10_wwn.vendor); @@ -1482,7 +1482,7 @@ static ssize_t target_wwn_product_id_store(struct config_item *item, } BUILD_BUG_ON(sizeof(dev->t10_wwn.model) != INQUIRY_MODEL_LEN + 1); - strlcpy(dev->t10_wwn.model, stripped, sizeof(dev->t10_wwn.model)); + strscpy(dev->t10_wwn.model, stripped, sizeof(dev->t10_wwn.model)); pr_debug("Target_Core_ConfigFS: Set emulated T10 Model Identification: %s\n", dev->t10_wwn.model); @@ -1538,7 +1538,7 @@ static ssize_t target_wwn_revision_store(struct config_item *item, } BUILD_BUG_ON(sizeof(dev->t10_wwn.revision) != INQUIRY_REVISION_LEN + 1); - strlcpy(dev->t10_wwn.revision, stripped, sizeof(dev->t10_wwn.revision)); + strscpy(dev->t10_wwn.revision, stripped, sizeof(dev->t10_wwn.revision)); pr_debug("Target_Core_ConfigFS: Set emulated T10 Revision: %s\n", dev->t10_wwn.revision); diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index 90f3f4926172..b7ac60f4a219 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -789,10 +789,10 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name) xcopy_lun->lun_tpg = &xcopy_pt_tpg; /* Preload the default INQUIRY const values */ - strlcpy(dev->t10_wwn.vendor, "LIO-ORG", sizeof(dev->t10_wwn.vendor)); - strlcpy(dev->t10_wwn.model, dev->transport->inquiry_prod, + strscpy(dev->t10_wwn.vendor, "LIO-ORG", sizeof(dev->t10_wwn.vendor)); + strscpy(dev->t10_wwn.model, dev->transport->inquiry_prod, sizeof(dev->t10_wwn.model)); - strlcpy(dev->t10_wwn.revision, dev->transport->inquiry_rev, + strscpy(dev->t10_wwn.revision, dev->transport->inquiry_rev, sizeof(dev->t10_wwn.revision)); return dev; diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index ce0e000b74fc..4d447520bab8 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c @@ -896,7 +896,7 @@ static void fd_free_prot(struct se_device *dev) fd_dev->fd_prot_file = NULL; } -static struct sbc_ops fd_sbc_ops = { +static struct exec_cmd_ops fd_exec_cmd_ops = { .execute_rw = fd_execute_rw, .execute_sync_cache = fd_execute_sync_cache, .execute_write_same = fd_execute_write_same, @@ -906,7 +906,7 @@ static struct sbc_ops fd_sbc_ops = { static sense_reason_t fd_parse_cdb(struct se_cmd *cmd) { - return sbc_parse_cdb(cmd, &fd_sbc_ops); + return sbc_parse_cdb(cmd, &fd_exec_cmd_ops); } static const struct target_backend_ops fileio_ops = { diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index 3c462d69daca..a3c5f3558a33 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -23,13 +23,16 @@ #include <linux/file.h> #include <linux/module.h> #include <linux/scatterlist.h> +#include <linux/pr.h> #include <scsi/scsi_proto.h> +#include <scsi/scsi_common.h> #include <asm/unaligned.h> #include <target/target_core_base.h> #include <target/target_core_backend.h> #include "target_core_iblock.h" +#include "target_core_pr.h" #define IBLOCK_MAX_BIO_PER_TASK 32 /* max # of bios to submit at a time */ #define IBLOCK_BIO_POOL_SIZE 128 @@ -309,7 +312,7 @@ static sector_t iblock_get_blocks(struct se_device *dev) return blocks_long; } -static void iblock_complete_cmd(struct se_cmd *cmd) +static void iblock_complete_cmd(struct se_cmd *cmd, blk_status_t blk_status) { struct iblock_req *ibr = cmd->priv; u8 status; @@ -317,7 +320,9 @@ static void iblock_complete_cmd(struct se_cmd *cmd) if (!refcount_dec_and_test(&ibr->pending)) return; - if (atomic_read(&ibr->ib_bio_err_cnt)) + if (blk_status == BLK_STS_RESV_CONFLICT) + status = SAM_STAT_RESERVATION_CONFLICT; + else if (atomic_read(&ibr->ib_bio_err_cnt)) status = SAM_STAT_CHECK_CONDITION; else status = SAM_STAT_GOOD; @@ -330,6 +335,7 @@ static void iblock_bio_done(struct bio *bio) { struct se_cmd *cmd = bio->bi_private; struct iblock_req *ibr = cmd->priv; + blk_status_t blk_status = bio->bi_status; if (bio->bi_status) { pr_err("bio error: %p, err: %d\n", bio, bio->bi_status); @@ -342,7 +348,7 @@ static void iblock_bio_done(struct bio *bio) bio_put(bio); - iblock_complete_cmd(cmd); + iblock_complete_cmd(cmd, blk_status); } static struct bio *iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num, @@ -758,7 +764,7 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, if (!sgl_nents) { refcount_set(&ibr->pending, 1); - iblock_complete_cmd(cmd); + iblock_complete_cmd(cmd, BLK_STS_OK); return 0; } @@ -816,7 +822,7 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, } iblock_submit_bios(&list); - iblock_complete_cmd(cmd); + iblock_complete_cmd(cmd, BLK_STS_OK); return 0; fail_put_bios: @@ -828,6 +834,258 @@ fail: return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; } +static sense_reason_t iblock_execute_pr_out(struct se_cmd *cmd, u8 sa, u64 key, + u64 sa_key, u8 type, bool aptpl) +{ + struct se_device *dev = cmd->se_dev; + struct iblock_dev *ib_dev = IBLOCK_DEV(dev); + struct block_device *bdev = ib_dev->ibd_bd; + const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops; + int ret; + + if (!ops) { + pr_err("Block device does not support pr_ops but iblock device has been configured for PR passthrough.\n"); + return TCM_UNSUPPORTED_SCSI_OPCODE; + } + + switch (sa) { + case PRO_REGISTER: + case PRO_REGISTER_AND_IGNORE_EXISTING_KEY: + if (!ops->pr_register) { + pr_err("block device does not support pr_register.\n"); + return TCM_UNSUPPORTED_SCSI_OPCODE; + } + + /* The block layer pr ops always enables aptpl */ + if (!aptpl) + pr_info("APTPL not set by initiator, but will be used.\n"); + + ret = ops->pr_register(bdev, key, sa_key, + sa == PRO_REGISTER ? 0 : PR_FL_IGNORE_KEY); + break; + case PRO_RESERVE: + if (!ops->pr_reserve) { + pr_err("block_device does not support pr_reserve.\n"); + return TCM_UNSUPPORTED_SCSI_OPCODE; + } + + ret = ops->pr_reserve(bdev, key, scsi_pr_type_to_block(type), 0); + break; + case PRO_CLEAR: + if (!ops->pr_clear) { + pr_err("block_device does not support pr_clear.\n"); + return TCM_UNSUPPORTED_SCSI_OPCODE; + } + + ret = ops->pr_clear(bdev, key); + break; + case PRO_PREEMPT: + case PRO_PREEMPT_AND_ABORT: + if (!ops->pr_clear) { + pr_err("block_device does not support pr_preempt.\n"); + return TCM_UNSUPPORTED_SCSI_OPCODE; + } + + ret = ops->pr_preempt(bdev, key, sa_key, + scsi_pr_type_to_block(type), + sa == PRO_PREEMPT ? false : true); + break; + case PRO_RELEASE: + if (!ops->pr_clear) { + pr_err("block_device does not support pr_pclear.\n"); + return TCM_UNSUPPORTED_SCSI_OPCODE; + } + + ret = ops->pr_release(bdev, key, scsi_pr_type_to_block(type)); + break; + default: + pr_err("Unknown PERSISTENT_RESERVE_OUT SA: 0x%02x\n", sa); + return TCM_UNSUPPORTED_SCSI_OPCODE; + } + + if (!ret) + return TCM_NO_SENSE; + else if (ret == PR_STS_RESERVATION_CONFLICT) + return TCM_RESERVATION_CONFLICT; + else + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; +} + +static void iblock_pr_report_caps(unsigned char *param_data) +{ + u16 len = 8; + + put_unaligned_be16(len, ¶m_data[0]); + /* + * When using the pr_ops passthrough method we only support exporting + * the device through one target port because from the backend module + * level we can't see the target port config. As a result we only + * support registration directly from the I_T nexus the cmd is sent + * through and do not set ATP_C here. + * + * The block layer pr_ops do not support passing in initiators so + * we don't set SIP_C here. + */ + /* PTPL_C: Persistence across Target Power Loss bit */ + param_data[2] |= 0x01; + /* + * We are filling in the PERSISTENT RESERVATION TYPE MASK below, so + * set the TMV: Task Mask Valid bit. + */ + param_data[3] |= 0x80; + /* + * Change ALLOW COMMANDs to 0x20 or 0x40 later from Table 166 + */ + param_data[3] |= 0x10; /* ALLOW COMMANDs field 001b */ + /* + * PTPL_A: Persistence across Target Power Loss Active bit. The block + * layer pr ops always enables this so report it active. + */ + param_data[3] |= 0x01; + /* + * Setup the PERSISTENT RESERVATION TYPE MASK from Table 212 spc4r37. + */ + param_data[4] |= 0x80; /* PR_TYPE_EXCLUSIVE_ACCESS_ALLREG */ + param_data[4] |= 0x40; /* PR_TYPE_EXCLUSIVE_ACCESS_REGONLY */ + param_data[4] |= 0x20; /* PR_TYPE_WRITE_EXCLUSIVE_REGONLY */ + param_data[4] |= 0x08; /* PR_TYPE_EXCLUSIVE_ACCESS */ + param_data[4] |= 0x02; /* PR_TYPE_WRITE_EXCLUSIVE */ + param_data[5] |= 0x01; /* PR_TYPE_EXCLUSIVE_ACCESS_ALLREG */ +} + +static sense_reason_t iblock_pr_read_keys(struct se_cmd *cmd, + unsigned char *param_data) +{ + struct se_device *dev = cmd->se_dev; + struct iblock_dev *ib_dev = IBLOCK_DEV(dev); + struct block_device *bdev = ib_dev->ibd_bd; + const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops; + int i, len, paths, data_offset; + struct pr_keys *keys; + sense_reason_t ret; + + if (!ops) { + pr_err("Block device does not support pr_ops but iblock device has been configured for PR passthrough.\n"); + return TCM_UNSUPPORTED_SCSI_OPCODE; + } + + if (!ops->pr_read_keys) { + pr_err("Block device does not support read_keys.\n"); + return TCM_UNSUPPORTED_SCSI_OPCODE; + } + + /* + * We don't know what's under us, but dm-multipath will register every + * path with the same key, so start off with enough space for 16 paths. + * which is not a lot of memory and should normally be enough. + */ + paths = 16; +retry: + len = 8 * paths; + keys = kzalloc(sizeof(*keys) + len, GFP_KERNEL); + if (!keys) + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + + keys->num_keys = paths; + if (!ops->pr_read_keys(bdev, keys)) { + if (keys->num_keys > paths) { + kfree(keys); + paths *= 2; + goto retry; + } + } else { + ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + goto free_keys; + } + + ret = TCM_NO_SENSE; + + put_unaligned_be32(keys->generation, ¶m_data[0]); + if (!keys->num_keys) { + put_unaligned_be32(0, ¶m_data[4]); + goto free_keys; + } + + put_unaligned_be32(8 * keys->num_keys, ¶m_data[4]); + + data_offset = 8; + for (i = 0; i < keys->num_keys; i++) { + if (data_offset + 8 > cmd->data_length) + break; + + put_unaligned_be64(keys->keys[i], ¶m_data[data_offset]); + data_offset += 8; + } + +free_keys: + kfree(keys); + return ret; +} + +static sense_reason_t iblock_pr_read_reservation(struct se_cmd *cmd, + unsigned char *param_data) +{ + struct se_device *dev = cmd->se_dev; + struct iblock_dev *ib_dev = IBLOCK_DEV(dev); + struct block_device *bdev = ib_dev->ibd_bd; + const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops; + struct pr_held_reservation rsv = { }; + + if (!ops) { + pr_err("Block device does not support pr_ops but iblock device has been configured for PR passthrough.\n"); + return TCM_UNSUPPORTED_SCSI_OPCODE; + } + + if (!ops->pr_read_reservation) { + pr_err("Block device does not support read_keys.\n"); + return TCM_UNSUPPORTED_SCSI_OPCODE; + } + + if (ops->pr_read_reservation(bdev, &rsv)) + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + + put_unaligned_be32(rsv.generation, ¶m_data[0]); + if (!block_pr_type_to_scsi(rsv.type)) { + put_unaligned_be32(0, ¶m_data[4]); + return TCM_NO_SENSE; + } + + put_unaligned_be32(16, ¶m_data[4]); + + if (cmd->data_length < 16) + return TCM_NO_SENSE; + put_unaligned_be64(rsv.key, ¶m_data[8]); + + if (cmd->data_length < 22) + return TCM_NO_SENSE; + param_data[21] = block_pr_type_to_scsi(rsv.type); + + return TCM_NO_SENSE; +} + +static sense_reason_t iblock_execute_pr_in(struct se_cmd *cmd, u8 sa, + unsigned char *param_data) +{ + sense_reason_t ret = TCM_NO_SENSE; + + switch (sa) { + case PRI_REPORT_CAPABILITIES: + iblock_pr_report_caps(param_data); + break; + case PRI_READ_KEYS: + ret = iblock_pr_read_keys(cmd, param_data); + break; + case PRI_READ_RESERVATION: + ret = iblock_pr_read_reservation(cmd, param_data); + break; + default: + pr_err("Unknown PERSISTENT_RESERVE_IN SA: 0x%02x\n", sa); + return TCM_UNSUPPORTED_SCSI_OPCODE; + } + + return ret; +} + static sector_t iblock_get_alignment_offset_lbas(struct se_device *dev) { struct iblock_dev *ib_dev = IBLOCK_DEV(dev); @@ -868,17 +1126,19 @@ static unsigned int iblock_get_io_opt(struct se_device *dev) return bdev_io_opt(bd); } -static struct sbc_ops iblock_sbc_ops = { +static struct exec_cmd_ops iblock_exec_cmd_ops = { .execute_rw = iblock_execute_rw, .execute_sync_cache = iblock_execute_sync_cache, .execute_write_same = iblock_execute_write_same, .execute_unmap = iblock_execute_unmap, + .execute_pr_out = iblock_execute_pr_out, + .execute_pr_in = iblock_execute_pr_in, }; static sense_reason_t iblock_parse_cdb(struct se_cmd *cmd) { - return sbc_parse_cdb(cmd, &iblock_sbc_ops); + return sbc_parse_cdb(cmd, &iblock_exec_cmd_ops); } static bool iblock_get_write_cache(struct se_device *dev) @@ -889,6 +1149,7 @@ static bool iblock_get_write_cache(struct se_device *dev) static const struct target_backend_ops iblock_ops = { .name = "iblock", .inquiry_prod = "IBLOCK", + .transport_flags_changeable = TRANSPORT_FLAG_PASSTHROUGH_PGR, .inquiry_rev = IBLOCK_VERSION, .owner = THIS_MODULE, .attach_hba = iblock_attach_hba, diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index d19ec4e6a4c0..49d9167bb263 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c @@ -3538,6 +3538,37 @@ out_put_pr_reg: return ret; } +static sense_reason_t +target_try_pr_out_pt(struct se_cmd *cmd, u8 sa, u64 res_key, u64 sa_res_key, + u8 type, bool aptpl, bool all_tg_pt, bool spec_i_pt) +{ + struct exec_cmd_ops *ops = cmd->protocol_data; + + if (!cmd->se_sess || !cmd->se_lun) { + pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n"); + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + } + + if (!ops->execute_pr_out) { + pr_err("SPC-3 PR: Device has been configured for PR passthrough but it's not supported by the backend.\n"); + return TCM_UNSUPPORTED_SCSI_OPCODE; + } + + switch (sa) { + case PRO_REGISTER_AND_MOVE: + case PRO_REPLACE_LOST_RESERVATION: + pr_err("SPC-3 PR: PRO_REGISTER_AND_MOVE and PRO_REPLACE_LOST_RESERVATION are not supported by PR passthrough.\n"); + return TCM_UNSUPPORTED_SCSI_OPCODE; + } + + if (spec_i_pt || all_tg_pt) { + pr_err("SPC-3 PR: SPEC_I_PT and ALL_TG_PT are not supported by PR passthrough.\n"); + return TCM_UNSUPPORTED_SCSI_OPCODE; + } + + return ops->execute_pr_out(cmd, sa, res_key, sa_res_key, type, aptpl); +} + /* * See spc4r17 section 6.14 Table 170 */ @@ -3641,6 +3672,12 @@ target_scsi3_emulate_pr_out(struct se_cmd *cmd) return TCM_PARAMETER_LIST_LENGTH_ERROR; } + if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR) { + ret = target_try_pr_out_pt(cmd, sa, res_key, sa_res_key, type, + aptpl, all_tg_pt, spec_i_pt); + goto done; + } + /* * (core_scsi3_emulate_pro_* function parameters * are defined by spc4r17 Table 174: @@ -3682,6 +3719,7 @@ target_scsi3_emulate_pr_out(struct se_cmd *cmd) return TCM_INVALID_CDB_FIELD; } +done: if (!ret) target_complete_cmd(cmd, SAM_STAT_GOOD); return ret; @@ -4039,9 +4077,42 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd) return 0; } +static sense_reason_t target_try_pr_in_pt(struct se_cmd *cmd, u8 sa) +{ + struct exec_cmd_ops *ops = cmd->protocol_data; + unsigned char *buf; + sense_reason_t ret; + + if (cmd->data_length < 8) { + pr_err("PRIN SA SCSI Data Length: %u too small\n", + cmd->data_length); + return TCM_INVALID_CDB_FIELD; + } + + if (!ops->execute_pr_in) { + pr_err("SPC-3 PR: Device has been configured for PR passthrough but it's not supported by the backend.\n"); + return TCM_UNSUPPORTED_SCSI_OPCODE; + } + + if (sa == PRI_READ_FULL_STATUS) { + pr_err("SPC-3 PR: PRI_READ_FULL_STATUS is not supported by PR passthrough.\n"); + return TCM_UNSUPPORTED_SCSI_OPCODE; + } + + buf = transport_kmap_data_sg(cmd); + if (!buf) + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + + ret = ops->execute_pr_in(cmd, sa, buf); + + transport_kunmap_data_sg(cmd); + return ret; +} + sense_reason_t target_scsi3_emulate_pr_in(struct se_cmd *cmd) { + u8 sa = cmd->t_task_cdb[1] & 0x1f; sense_reason_t ret; /* @@ -4060,7 +4131,12 @@ target_scsi3_emulate_pr_in(struct se_cmd *cmd) return TCM_RESERVATION_CONFLICT; } - switch (cmd->t_task_cdb[1] & 0x1f) { + if (cmd->se_dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR) { + ret = target_try_pr_in_pt(cmd, sa); + goto done; + } + + switch (sa) { case PRI_READ_KEYS: ret = core_scsi3_pri_read_keys(cmd); break; @@ -4079,6 +4155,7 @@ target_scsi3_emulate_pr_in(struct se_cmd *cmd) return TCM_INVALID_CDB_FIELD; } +done: if (!ret) target_complete_cmd(cmd, SAM_STAT_GOOD); return ret; diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c index 6648c1c90e19..6f67cc09c2b5 100644 --- a/drivers/target/target_core_rd.c +++ b/drivers/target/target_core_rd.c @@ -643,14 +643,14 @@ static void rd_free_prot(struct se_device *dev) rd_release_prot_space(rd_dev); } -static struct sbc_ops rd_sbc_ops = { +static struct exec_cmd_ops rd_exec_cmd_ops = { .execute_rw = rd_execute_rw, }; static sense_reason_t rd_parse_cdb(struct se_cmd *cmd) { - return sbc_parse_cdb(cmd, &rd_sbc_ops); + return sbc_parse_cdb(cmd, &rd_exec_cmd_ops); } static const struct target_backend_ops rd_mcp_ops = { diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c index 7536ca797606..6a02561cc20c 100644 --- a/drivers/target/target_core_sbc.c +++ b/drivers/target/target_core_sbc.c @@ -192,7 +192,7 @@ EXPORT_SYMBOL(sbc_get_write_same_sectors); static sense_reason_t sbc_execute_write_same_unmap(struct se_cmd *cmd) { - struct sbc_ops *ops = cmd->protocol_data; + struct exec_cmd_ops *ops = cmd->protocol_data; sector_t nolb = sbc_get_write_same_sectors(cmd); sense_reason_t ret; @@ -271,7 +271,8 @@ static inline unsigned long long transport_lba_64(unsigned char *cdb) } static sense_reason_t -sbc_setup_write_same(struct se_cmd *cmd, unsigned char flags, struct sbc_ops *ops) +sbc_setup_write_same(struct se_cmd *cmd, unsigned char flags, + struct exec_cmd_ops *ops) { struct se_device *dev = cmd->se_dev; sector_t end_lba = dev->transport->get_blocks(dev) + 1; @@ -340,7 +341,7 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char flags, struct sbc_ops *op static sense_reason_t sbc_execute_rw(struct se_cmd *cmd) { - struct sbc_ops *ops = cmd->protocol_data; + struct exec_cmd_ops *ops = cmd->protocol_data; return ops->execute_rw(cmd, cmd->t_data_sg, cmd->t_data_nents, cmd->data_direction); @@ -566,7 +567,7 @@ out: static sense_reason_t sbc_compare_and_write(struct se_cmd *cmd) { - struct sbc_ops *ops = cmd->protocol_data; + struct exec_cmd_ops *ops = cmd->protocol_data; struct se_device *dev = cmd->se_dev; sense_reason_t ret; int rc; @@ -764,7 +765,7 @@ sbc_check_dpofua(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb) } sense_reason_t -sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) +sbc_parse_cdb(struct se_cmd *cmd, struct exec_cmd_ops *ops) { struct se_device *dev = cmd->se_dev; unsigned char *cdb = cmd->t_task_cdb; @@ -1076,7 +1077,7 @@ EXPORT_SYMBOL(sbc_get_device_type); static sense_reason_t sbc_execute_unmap(struct se_cmd *cmd) { - struct sbc_ops *ops = cmd->protocol_data; + struct exec_cmd_ops *ops = cmd->protocol_data; struct se_device *dev = cmd->se_dev; unsigned char *buf, *ptr = NULL; sector_t lba; diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c index 89c0d56294cc..50290abc07bc 100644 --- a/drivers/target/target_core_spc.c +++ b/drivers/target/target_core_spc.c @@ -1424,9 +1424,10 @@ static struct target_opcode_descriptor tcm_opcode_write_verify16 = { .update_usage_bits = set_dpofua_usage_bits, }; -static bool tcm_is_ws_enabled(struct se_cmd *cmd) +static bool tcm_is_ws_enabled(struct target_opcode_descriptor *descr, + struct se_cmd *cmd) { - struct sbc_ops *ops = cmd->protocol_data; + struct exec_cmd_ops *ops = cmd->protocol_data; struct se_device *dev = cmd->se_dev; return (dev->dev_attrib.emulate_tpws && !!ops->execute_unmap) || @@ -1451,7 +1452,8 @@ static struct target_opcode_descriptor tcm_opcode_write_same32 = { .update_usage_bits = set_dpofua_usage_bits32, }; -static bool tcm_is_caw_enabled(struct se_cmd *cmd) +static bool tcm_is_caw_enabled(struct target_opcode_descriptor *descr, + struct se_cmd *cmd) { struct se_device *dev = cmd->se_dev; @@ -1491,7 +1493,8 @@ static struct target_opcode_descriptor tcm_opcode_read_capacity16 = { 0xff, 0xff, 0x00, SCSI_CONTROL_MASK}, }; -static bool tcm_is_rep_ref_enabled(struct se_cmd *cmd) +static bool tcm_is_rep_ref_enabled(struct target_opcode_descriptor *descr, + struct se_cmd *cmd) { struct se_device *dev = cmd->se_dev; @@ -1502,7 +1505,6 @@ static bool tcm_is_rep_ref_enabled(struct se_cmd *cmd) } spin_unlock(&dev->t10_alua.lba_map_lock); return true; - } static struct target_opcode_descriptor tcm_opcode_read_report_refferals = { @@ -1537,9 +1539,10 @@ static struct target_opcode_descriptor tcm_opcode_sync_cache16 = { 0xff, 0xff, SCSI_GROUP_NUMBER_MASK, SCSI_CONTROL_MASK}, }; -static bool tcm_is_unmap_enabled(struct se_cmd *cmd) +static bool tcm_is_unmap_enabled(struct target_opcode_descriptor *descr, + struct se_cmd *cmd) { - struct sbc_ops *ops = cmd->protocol_data; + struct exec_cmd_ops *ops = cmd->protocol_data; struct se_device *dev = cmd->se_dev; return ops->execute_unmap && dev->dev_attrib.emulate_tpu; @@ -1659,11 +1662,46 @@ static struct target_opcode_descriptor tcm_opcode_pri_read_resrv = { 0xff, SCSI_CONTROL_MASK}, }; -static bool tcm_is_pr_enabled(struct se_cmd *cmd) +static bool tcm_is_pr_enabled(struct target_opcode_descriptor *descr, + struct se_cmd *cmd) { struct se_device *dev = cmd->se_dev; - return dev->dev_attrib.emulate_pr; + if (!dev->dev_attrib.emulate_pr) + return false; + + if (!(dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR)) + return true; + + switch (descr->opcode) { + case RESERVE: + case RESERVE_10: + case RELEASE: + case RELEASE_10: + /* + * The pr_ops which are used by the backend modules don't + * support these commands. + */ + return false; + case PERSISTENT_RESERVE_OUT: + switch (descr->service_action) { + case PRO_REGISTER_AND_MOVE: + case PRO_REPLACE_LOST_RESERVATION: + /* + * The backend modules don't have access to ports and + * I_T nexuses so they can't handle these type of + * requests. + */ + return false; + } + break; + case PERSISTENT_RESERVE_IN: + if (descr->service_action == PRI_READ_FULL_STATUS) + return false; + break; + } + + return true; } static struct target_opcode_descriptor tcm_opcode_pri_read_caps = { @@ -1788,20 +1826,13 @@ static struct target_opcode_descriptor tcm_opcode_pro_register_move = { .enabled = tcm_is_pr_enabled, }; -static bool tcm_is_scsi2_reservations_enabled(struct se_cmd *cmd) -{ - struct se_device *dev = cmd->se_dev; - - return dev->dev_attrib.emulate_pr; -} - static struct target_opcode_descriptor tcm_opcode_release = { .support = SCSI_SUPPORT_FULL, .opcode = RELEASE, .cdb_size = 6, .usage_bits = {RELEASE, 0x00, 0x00, 0x00, 0x00, SCSI_CONTROL_MASK}, - .enabled = tcm_is_scsi2_reservations_enabled, + .enabled = tcm_is_pr_enabled, }; static struct target_opcode_descriptor tcm_opcode_release10 = { @@ -1811,7 +1842,7 @@ static struct target_opcode_descriptor tcm_opcode_release10 = { .usage_bits = {RELEASE_10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, SCSI_CONTROL_MASK}, - .enabled = tcm_is_scsi2_reservations_enabled, + .enabled = tcm_is_pr_enabled, }; static struct target_opcode_descriptor tcm_opcode_reserve = { @@ -1820,7 +1851,7 @@ static struct target_opcode_descriptor tcm_opcode_reserve = { .cdb_size = 6, .usage_bits = {RESERVE, 0x00, 0x00, 0x00, 0x00, SCSI_CONTROL_MASK}, - .enabled = tcm_is_scsi2_reservations_enabled, + .enabled = tcm_is_pr_enabled, }; static struct target_opcode_descriptor tcm_opcode_reserve10 = { @@ -1830,7 +1861,7 @@ static struct target_opcode_descriptor tcm_opcode_reserve10 = { .usage_bits = {RESERVE_10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, SCSI_CONTROL_MASK}, - .enabled = tcm_is_scsi2_reservations_enabled, + .enabled = tcm_is_pr_enabled, }; static struct target_opcode_descriptor tcm_opcode_request_sense = { @@ -1849,7 +1880,8 @@ static struct target_opcode_descriptor tcm_opcode_inquiry = { 0xff, SCSI_CONTROL_MASK}, }; -static bool tcm_is_3pc_enabled(struct se_cmd *cmd) +static bool tcm_is_3pc_enabled(struct target_opcode_descriptor *descr, + struct se_cmd *cmd) { struct se_device *dev = cmd->se_dev; @@ -1910,8 +1942,8 @@ static struct target_opcode_descriptor tcm_opcode_report_target_pgs = { 0xff, 0xff, 0x00, SCSI_CONTROL_MASK}, }; - -static bool spc_rsoc_enabled(struct se_cmd *cmd) +static bool spc_rsoc_enabled(struct target_opcode_descriptor *descr, + struct se_cmd *cmd) { struct se_device *dev = cmd->se_dev; @@ -1931,7 +1963,8 @@ static struct target_opcode_descriptor tcm_opcode_report_supp_opcodes = { .enabled = spc_rsoc_enabled, }; -static bool tcm_is_set_tpg_enabled(struct se_cmd *cmd) +static bool tcm_is_set_tpg_enabled(struct target_opcode_descriptor *descr, + struct se_cmd *cmd) { struct t10_alua_tg_pt_gp *l_tg_pt_gp; struct se_lun *l_lun = cmd->se_lun; @@ -2118,7 +2151,7 @@ spc_rsoc_get_descr(struct se_cmd *cmd, struct target_opcode_descriptor **opcode) if (descr->serv_action_valid) return TCM_INVALID_CDB_FIELD; - if (!descr->enabled || descr->enabled(cmd)) + if (!descr->enabled || descr->enabled(descr, cmd)) *opcode = descr; break; case 0x2: @@ -2132,7 +2165,8 @@ spc_rsoc_get_descr(struct se_cmd *cmd, struct target_opcode_descriptor **opcode) */ if (descr->serv_action_valid && descr->service_action == requested_sa) { - if (!descr->enabled || descr->enabled(cmd)) + if (!descr->enabled || descr->enabled(descr, + cmd)) *opcode = descr; } else if (!descr->serv_action_valid) return TCM_INVALID_CDB_FIELD; @@ -2145,7 +2179,8 @@ spc_rsoc_get_descr(struct se_cmd *cmd, struct target_opcode_descriptor **opcode) * be returned in the one_command parameter data format. */ if (descr->service_action == requested_sa) - if (!descr->enabled || descr->enabled(cmd)) + if (!descr->enabled || descr->enabled(descr, + cmd)) *opcode = descr; break; } @@ -2202,7 +2237,7 @@ spc_emulate_report_supp_op_codes(struct se_cmd *cmd) for (i = 0; i < ARRAY_SIZE(tcm_supported_opcodes); i++) { descr = tcm_supported_opcodes[i]; - if (descr->enabled && !descr->enabled(cmd)) + if (descr->enabled && !descr->enabled(descr, cmd)) continue; response_length += spc_rsoc_encode_command_descriptor( @@ -2231,12 +2266,22 @@ spc_parse_cdb(struct se_cmd *cmd, unsigned int *size) struct se_device *dev = cmd->se_dev; unsigned char *cdb = cmd->t_task_cdb; - if (!dev->dev_attrib.emulate_pr && - ((cdb[0] == PERSISTENT_RESERVE_IN) || - (cdb[0] == PERSISTENT_RESERVE_OUT) || - (cdb[0] == RELEASE || cdb[0] == RELEASE_10) || - (cdb[0] == RESERVE || cdb[0] == RESERVE_10))) { - return TCM_UNSUPPORTED_SCSI_OPCODE; + switch (cdb[0]) { + case RESERVE: + case RESERVE_10: + case RELEASE: + case RELEASE_10: + if (!dev->dev_attrib.emulate_pr) + return TCM_UNSUPPORTED_SCSI_OPCODE; + + if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR) + return TCM_UNSUPPORTED_SCSI_OPCODE; + break; + case PERSISTENT_RESERVE_IN: + case PERSISTENT_RESERVE_OUT: + if (!dev->dev_attrib.emulate_pr) + return TCM_UNSUPPORTED_SCSI_OPCODE; + break; } switch (cdb[0]) { diff --git a/drivers/ufs/core/ufs-fault-injection.c b/drivers/ufs/core/ufs-fault-injection.c index 7ac7c4e7ff83..5b1184aac585 100644 --- a/drivers/ufs/core/ufs-fault-injection.c +++ b/drivers/ufs/core/ufs-fault-injection.c @@ -54,7 +54,7 @@ static int ufs_fault_set(const char *val, const struct kernel_param *kp) if (!setup_fault_attr(attr, (char *)val)) return -EINVAL; - strlcpy(kp->arg, val, FAULT_INJ_STR_SIZE); + strscpy(kp->arg, val, FAULT_INJ_STR_SIZE); return 0; } diff --git a/drivers/ufs/core/ufs-hwmon.c b/drivers/ufs/core/ufs-hwmon.c index 4c6a872b7a7c..101d7082446f 100644 --- a/drivers/ufs/core/ufs-hwmon.c +++ b/drivers/ufs/core/ufs-hwmon.c @@ -146,7 +146,7 @@ static umode_t ufs_hwmon_is_visible(const void *_data, enum hwmon_sensor_types t return 0; } -static const struct hwmon_channel_info *ufs_hwmon_info[] = { +static const struct hwmon_channel_info *const ufs_hwmon_info[] = { HWMON_CHANNEL_INFO(temp, HWMON_T_ENABLE | HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_LCRIT), NULL }; diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c index 51b3c6ae781d..6fb0e007af63 100644 --- a/drivers/ufs/core/ufs-mcq.c +++ b/drivers/ufs/core/ufs-mcq.c @@ -12,21 +12,26 @@ #include <linux/module.h> #include <linux/platform_device.h> #include "ufshcd-priv.h" +#include <linux/delay.h> +#include <scsi/scsi_cmnd.h> +#include <linux/bitfield.h> +#include <linux/iopoll.h> #define MAX_QUEUE_SUP GENMASK(7, 0) #define UFS_MCQ_MIN_RW_QUEUES 2 #define UFS_MCQ_MIN_READ_QUEUES 0 -#define UFS_MCQ_NUM_DEV_CMD_QUEUES 1 #define UFS_MCQ_MIN_POLL_QUEUES 0 #define QUEUE_EN_OFFSET 31 #define QUEUE_ID_OFFSET 16 -#define MAX_DEV_CMD_ENTRIES 2 #define MCQ_CFG_MAC_MASK GENMASK(16, 8) #define MCQ_QCFG_SIZE 0x40 #define MCQ_ENTRY_SIZE_IN_DWORD 8 #define CQE_UCD_BA GENMASK_ULL(63, 7) +/* Max mcq register polling time in microseconds */ +#define MCQ_POLL_US 500000 + static int rw_queue_count_set(const char *val, const struct kernel_param *kp) { return param_set_uint_minmax(val, kp, UFS_MCQ_MIN_RW_QUEUES, @@ -108,8 +113,7 @@ struct ufs_hw_queue *ufshcd_mcq_req_to_hwq(struct ufs_hba *hba, u32 utag = blk_mq_unique_tag(req); u32 hwq = blk_mq_unique_tag_to_hwq(utag); - /* uhq[0] is used to serve device commands */ - return &hba->uhq[hwq + UFSHCD_MCQ_IO_QUEUE_OFFSET]; + return &hba->uhq[hwq]; } /** @@ -153,8 +157,7 @@ static int ufshcd_mcq_config_nr_queues(struct ufs_hba *hba) /* maxq is 0 based value */ hba_maxq = FIELD_GET(MAX_QUEUE_SUP, hba->mcq_capabilities) + 1; - tot_queues = UFS_MCQ_NUM_DEV_CMD_QUEUES + read_queues + poll_queues + - rw_queues; + tot_queues = read_queues + poll_queues + rw_queues; if (hba_maxq < tot_queues) { dev_err(hba->dev, "Total queues (%d) exceeds HC capacity (%d)\n", @@ -162,7 +165,7 @@ static int ufshcd_mcq_config_nr_queues(struct ufs_hba *hba) return -EOPNOTSUPP; } - rem = hba_maxq - UFS_MCQ_NUM_DEV_CMD_QUEUES; + rem = hba_maxq; if (rw_queues) { hba->nr_queues[HCTX_TYPE_DEFAULT] = rw_queues; @@ -188,7 +191,7 @@ static int ufshcd_mcq_config_nr_queues(struct ufs_hba *hba) for (i = 0; i < HCTX_MAX_TYPES; i++) host->nr_hw_queues += hba->nr_queues[i]; - hba->nr_hw_queues = host->nr_hw_queues + UFS_MCQ_NUM_DEV_CMD_QUEUES; + hba->nr_hw_queues = host->nr_hw_queues; return 0; } @@ -270,44 +273,57 @@ static int ufshcd_mcq_get_tag(struct ufs_hba *hba, } static void ufshcd_mcq_process_cqe(struct ufs_hba *hba, - struct ufs_hw_queue *hwq) + struct ufs_hw_queue *hwq) { struct cq_entry *cqe = ufshcd_mcq_cur_cqe(hwq); int tag = ufshcd_mcq_get_tag(hba, hwq, cqe); - ufshcd_compl_one_cqe(hba, tag, cqe); + if (cqe->command_desc_base_addr) { + ufshcd_compl_one_cqe(hba, tag, cqe); + /* After processed the cqe, mark it empty (invalid) entry */ + cqe->command_desc_base_addr = 0; + } } -unsigned long ufshcd_mcq_poll_cqe_nolock(struct ufs_hba *hba, - struct ufs_hw_queue *hwq) +void ufshcd_mcq_compl_all_cqes_lock(struct ufs_hba *hba, + struct ufs_hw_queue *hwq) { - unsigned long completed_reqs = 0; + unsigned long flags; + u32 entries = hwq->max_entries; - ufshcd_mcq_update_cq_tail_slot(hwq); - while (!ufshcd_mcq_is_cq_empty(hwq)) { + spin_lock_irqsave(&hwq->cq_lock, flags); + while (entries > 0) { ufshcd_mcq_process_cqe(hba, hwq); ufshcd_mcq_inc_cq_head_slot(hwq); - completed_reqs++; + entries--; } - if (completed_reqs) - ufshcd_mcq_update_cq_head(hwq); - - return completed_reqs; + ufshcd_mcq_update_cq_tail_slot(hwq); + hwq->cq_head_slot = hwq->cq_tail_slot; + spin_unlock_irqrestore(&hwq->cq_lock, flags); } -EXPORT_SYMBOL_GPL(ufshcd_mcq_poll_cqe_nolock); unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba, struct ufs_hw_queue *hwq) { - unsigned long completed_reqs, flags; + unsigned long completed_reqs = 0; + unsigned long flags; spin_lock_irqsave(&hwq->cq_lock, flags); - completed_reqs = ufshcd_mcq_poll_cqe_nolock(hba, hwq); + ufshcd_mcq_update_cq_tail_slot(hwq); + while (!ufshcd_mcq_is_cq_empty(hwq)) { + ufshcd_mcq_process_cqe(hba, hwq); + ufshcd_mcq_inc_cq_head_slot(hwq); + completed_reqs++; + } + + if (completed_reqs) + ufshcd_mcq_update_cq_head(hwq); spin_unlock_irqrestore(&hwq->cq_lock, flags); return completed_reqs; } +EXPORT_SYMBOL_GPL(ufshcd_mcq_poll_cqe_lock); void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba) { @@ -420,13 +436,243 @@ int ufshcd_mcq_init(struct ufs_hba *hba) hwq->max_entries = hba->nutrs; spin_lock_init(&hwq->sq_lock); spin_lock_init(&hwq->cq_lock); + mutex_init(&hwq->sq_mutex); } /* The very first HW queue serves device commands */ hba->dev_cmd_queue = &hba->uhq[0]; - /* Give dev_cmd_queue the minimal number of entries */ - hba->dev_cmd_queue->max_entries = MAX_DEV_CMD_ENTRIES; host->host_tagset = 1; return 0; } + +static int ufshcd_mcq_sq_stop(struct ufs_hba *hba, struct ufs_hw_queue *hwq) +{ + void __iomem *reg; + u32 id = hwq->id, val; + int err; + + if (hba->quirks & UFSHCD_QUIRK_MCQ_BROKEN_RTC) + return -ETIMEDOUT; + + writel(SQ_STOP, mcq_opr_base(hba, OPR_SQD, id) + REG_SQRTC); + reg = mcq_opr_base(hba, OPR_SQD, id) + REG_SQRTS; + err = read_poll_timeout(readl, val, val & SQ_STS, 20, + MCQ_POLL_US, false, reg); + if (err) + dev_err(hba->dev, "%s: failed. hwq-id=%d, err=%d\n", + __func__, id, err); + return err; +} + +static int ufshcd_mcq_sq_start(struct ufs_hba *hba, struct ufs_hw_queue *hwq) +{ + void __iomem *reg; + u32 id = hwq->id, val; + int err; + + if (hba->quirks & UFSHCD_QUIRK_MCQ_BROKEN_RTC) + return -ETIMEDOUT; + + writel(SQ_START, mcq_opr_base(hba, OPR_SQD, id) + REG_SQRTC); + reg = mcq_opr_base(hba, OPR_SQD, id) + REG_SQRTS; + err = read_poll_timeout(readl, val, !(val & SQ_STS), 20, + MCQ_POLL_US, false, reg); + if (err) + dev_err(hba->dev, "%s: failed. hwq-id=%d, err=%d\n", + __func__, id, err); + return err; +} + +/** + * ufshcd_mcq_sq_cleanup - Clean up submission queue resources + * associated with the pending command. + * @hba - per adapter instance. + * @task_tag - The command's task tag. + * + * Returns 0 for success; error code otherwise. + */ +int ufshcd_mcq_sq_cleanup(struct ufs_hba *hba, int task_tag) +{ + struct ufshcd_lrb *lrbp = &hba->lrb[task_tag]; + struct scsi_cmnd *cmd = lrbp->cmd; + struct ufs_hw_queue *hwq; + void __iomem *reg, *opr_sqd_base; + u32 nexus, id, val; + int err; + + if (hba->quirks & UFSHCD_QUIRK_MCQ_BROKEN_RTC) + return -ETIMEDOUT; + + if (task_tag != hba->nutrs - UFSHCD_NUM_RESERVED) { + if (!cmd) + return -EINVAL; + hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd)); + } else { + hwq = hba->dev_cmd_queue; + } + + id = hwq->id; + + mutex_lock(&hwq->sq_mutex); + + /* stop the SQ fetching before working on it */ + err = ufshcd_mcq_sq_stop(hba, hwq); + if (err) + goto unlock; + + /* SQCTI = EXT_IID, IID, LUN, Task Tag */ + nexus = lrbp->lun << 8 | task_tag; + opr_sqd_base = mcq_opr_base(hba, OPR_SQD, id); + writel(nexus, opr_sqd_base + REG_SQCTI); + + /* SQRTCy.ICU = 1 */ + writel(SQ_ICU, opr_sqd_base + REG_SQRTC); + + /* Poll SQRTSy.CUS = 1. Return result from SQRTSy.RTC */ + reg = opr_sqd_base + REG_SQRTS; + err = read_poll_timeout(readl, val, val & SQ_CUS, 20, + MCQ_POLL_US, false, reg); + if (err) + dev_err(hba->dev, "%s: failed. hwq=%d, tag=%d err=%ld\n", + __func__, id, task_tag, + FIELD_GET(SQ_ICU_ERR_CODE_MASK, readl(reg))); + + if (ufshcd_mcq_sq_start(hba, hwq)) + err = -ETIMEDOUT; + +unlock: + mutex_unlock(&hwq->sq_mutex); + return err; +} + +/** + * ufshcd_mcq_nullify_sqe - Nullify the submission queue entry. + * Write the sqe's Command Type to 0xF. The host controller will not + * fetch any sqe with Command Type = 0xF. + * + * @utrd - UTP Transfer Request Descriptor to be nullified. + */ +static void ufshcd_mcq_nullify_sqe(struct utp_transfer_req_desc *utrd) +{ + u32 dword_0; + + dword_0 = le32_to_cpu(utrd->header.dword_0); + dword_0 &= ~UPIU_COMMAND_TYPE_MASK; + dword_0 |= FIELD_PREP(UPIU_COMMAND_TYPE_MASK, 0xF); + utrd->header.dword_0 = cpu_to_le32(dword_0); +} + +/** + * ufshcd_mcq_sqe_search - Search for the command in the submission queue + * If the command is in the submission queue and not issued to the device yet, + * nullify the sqe so the host controller will skip fetching the sqe. + * + * @hba - per adapter instance. + * @hwq - Hardware Queue to be searched. + * @task_tag - The command's task tag. + * + * Returns true if the SQE containing the command is present in the SQ + * (not fetched by the controller); returns false if the SQE is not in the SQ. + */ +static bool ufshcd_mcq_sqe_search(struct ufs_hba *hba, + struct ufs_hw_queue *hwq, int task_tag) +{ + struct ufshcd_lrb *lrbp = &hba->lrb[task_tag]; + struct utp_transfer_req_desc *utrd; + u32 mask = hwq->max_entries - 1; + __le64 cmd_desc_base_addr; + bool ret = false; + u64 addr, match; + u32 sq_head_slot; + + if (hba->quirks & UFSHCD_QUIRK_MCQ_BROKEN_RTC) + return true; + + mutex_lock(&hwq->sq_mutex); + + ufshcd_mcq_sq_stop(hba, hwq); + sq_head_slot = ufshcd_mcq_get_sq_head_slot(hwq); + if (sq_head_slot == hwq->sq_tail_slot) + goto out; + + cmd_desc_base_addr = lrbp->utr_descriptor_ptr->command_desc_base_addr; + addr = le64_to_cpu(cmd_desc_base_addr) & CQE_UCD_BA; + + while (sq_head_slot != hwq->sq_tail_slot) { + utrd = hwq->sqe_base_addr + + sq_head_slot * sizeof(struct utp_transfer_req_desc); + match = le64_to_cpu(utrd->command_desc_base_addr) & CQE_UCD_BA; + if (addr == match) { + ufshcd_mcq_nullify_sqe(utrd); + ret = true; + goto out; + } + sq_head_slot = (sq_head_slot + 1) & mask; + } + +out: + ufshcd_mcq_sq_start(hba, hwq); + mutex_unlock(&hwq->sq_mutex); + return ret; +} + +/** + * ufshcd_mcq_abort - Abort the command in MCQ. + * @cmd - The command to be aborted. + * + * Returns SUCCESS or FAILED error codes + */ +int ufshcd_mcq_abort(struct scsi_cmnd *cmd) +{ + struct Scsi_Host *host = cmd->device->host; + struct ufs_hba *hba = shost_priv(host); + int tag = scsi_cmd_to_rq(cmd)->tag; + struct ufshcd_lrb *lrbp = &hba->lrb[tag]; + struct ufs_hw_queue *hwq; + int err = FAILED; + + if (!ufshcd_cmd_inflight(lrbp->cmd)) { + dev_err(hba->dev, + "%s: skip abort. cmd at tag %d already completed.\n", + __func__, tag); + goto out; + } + + /* Skip task abort in case previous aborts failed and report failure */ + if (lrbp->req_abort_skip) { + dev_err(hba->dev, "%s: skip abort. tag %d failed earlier\n", + __func__, tag); + goto out; + } + + hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd)); + + if (ufshcd_mcq_sqe_search(hba, hwq, tag)) { + /* + * Failure. The command should not be "stuck" in SQ for + * a long time which resulted in command being aborted. + */ + dev_err(hba->dev, "%s: cmd found in sq. hwq=%d, tag=%d\n", + __func__, hwq->id, tag); + goto out; + } + + /* + * The command is not in the submission queue, and it is not + * in the completion queue either. Query the device to see if + * the command is being processed in the device. + */ + if (ufshcd_try_to_abort_task(hba, tag)) { + dev_err(hba->dev, "%s: device abort failed %d\n", __func__, err); + lrbp->req_abort_skip = true; + goto out; + } + + err = SUCCESS; + if (ufshcd_cmd_inflight(lrbp->cmd)) + ufshcd_release_scsi_cmd(hba, lrbp); + +out: + return err; +} diff --git a/drivers/ufs/core/ufs-sysfs.c b/drivers/ufs/core/ufs-sysfs.c index 883f0e44b54e..6c72075750dd 100644 --- a/drivers/ufs/core/ufs-sysfs.c +++ b/drivers/ufs/core/ufs-sysfs.c @@ -168,7 +168,7 @@ static ssize_t auto_hibern8_show(struct device *dev, } pm_runtime_get_sync(hba->dev); - ufshcd_hold(hba, false); + ufshcd_hold(hba); ahit = ufshcd_readl(hba, REG_AUTO_HIBERNATE_IDLE_TIMER); ufshcd_release(hba); pm_runtime_put_sync(hba->dev); @@ -298,6 +298,37 @@ out: return res < 0 ? res : count; } +static ssize_t wb_flush_threshold_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%u\n", hba->vps->wb_flush_threshold); +} + +static ssize_t wb_flush_threshold_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + unsigned int wb_flush_threshold; + + if (kstrtouint(buf, 0, &wb_flush_threshold)) + return -EINVAL; + + /* The range of values for wb_flush_threshold is (0,10] */ + if (wb_flush_threshold > UFS_WB_BUF_REMAIN_PERCENT(100) || + wb_flush_threshold == 0) { + dev_err(dev, "The value of wb_flush_threshold is invalid!\n"); + return -EINVAL; + } + + hba->vps->wb_flush_threshold = wb_flush_threshold; + + return count; +} + static DEVICE_ATTR_RW(rpm_lvl); static DEVICE_ATTR_RO(rpm_target_dev_state); static DEVICE_ATTR_RO(rpm_target_link_state); @@ -307,6 +338,7 @@ static DEVICE_ATTR_RO(spm_target_link_state); static DEVICE_ATTR_RW(auto_hibern8); static DEVICE_ATTR_RW(wb_on); static DEVICE_ATTR_RW(enable_wb_buf_flush); +static DEVICE_ATTR_RW(wb_flush_threshold); static struct attribute *ufs_sysfs_ufshcd_attrs[] = { &dev_attr_rpm_lvl.attr, @@ -318,6 +350,7 @@ static struct attribute *ufs_sysfs_ufshcd_attrs[] = { &dev_attr_auto_hibern8.attr, &dev_attr_wb_on.attr, &dev_attr_enable_wb_buf_flush.attr, + &dev_attr_wb_flush_threshold.attr, NULL }; diff --git a/drivers/ufs/core/ufshcd-crypto.c b/drivers/ufs/core/ufshcd-crypto.c index 198360fe5e8e..f2c4422cab86 100644 --- a/drivers/ufs/core/ufshcd-crypto.c +++ b/drivers/ufs/core/ufshcd-crypto.c @@ -24,7 +24,7 @@ static int ufshcd_program_key(struct ufs_hba *hba, u32 slot_offset = hba->crypto_cfg_register + slot * sizeof(*cfg); int err = 0; - ufshcd_hold(hba, false); + ufshcd_hold(hba); if (hba->vops && hba->vops->program_key) { err = hba->vops->program_key(hba, cfg, slot); diff --git a/drivers/ufs/core/ufshcd-priv.h b/drivers/ufs/core/ufshcd-priv.h index d53b93c21a0c..9566a95aeed9 100644 --- a/drivers/ufs/core/ufshcd-priv.h +++ b/drivers/ufs/core/ufshcd-priv.h @@ -71,22 +71,24 @@ void ufshcd_mcq_config_mac(struct ufs_hba *hba, u32 max_active_cmds); void ufshcd_mcq_select_mcq_mode(struct ufs_hba *hba); u32 ufshcd_mcq_read_cqis(struct ufs_hba *hba, int i); void ufshcd_mcq_write_cqis(struct ufs_hba *hba, u32 val, int i); -unsigned long ufshcd_mcq_poll_cqe_nolock(struct ufs_hba *hba, - struct ufs_hw_queue *hwq); struct ufs_hw_queue *ufshcd_mcq_req_to_hwq(struct ufs_hba *hba, struct request *req); unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba, struct ufs_hw_queue *hwq); +void ufshcd_mcq_compl_all_cqes_lock(struct ufs_hba *hba, + struct ufs_hw_queue *hwq); +bool ufshcd_cmd_inflight(struct scsi_cmnd *cmd); +int ufshcd_mcq_sq_cleanup(struct ufs_hba *hba, int task_tag); +int ufshcd_mcq_abort(struct scsi_cmnd *cmd); +int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag); +void ufshcd_release_scsi_cmd(struct ufs_hba *hba, + struct ufshcd_lrb *lrbp); -#define UFSHCD_MCQ_IO_QUEUE_OFFSET 1 #define SD_ASCII_STD true #define SD_RAW false int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index, u8 **buf, bool ascii); -int ufshcd_hold(struct ufs_hba *hba, bool async); -void ufshcd_release(struct ufs_hba *hba); - int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd); int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba, @@ -366,10 +368,11 @@ static inline bool ufs_is_valid_unit_desc_lun(struct ufs_dev_info *dev_info, u8 static inline void ufshcd_inc_sq_tail(struct ufs_hw_queue *q) __must_hold(&q->sq_lock) { - u32 mask = q->max_entries - 1; u32 val; - q->sq_tail_slot = (q->sq_tail_slot + 1) & mask; + q->sq_tail_slot++; + if (q->sq_tail_slot == q->max_entries) + q->sq_tail_slot = 0; val = q->sq_tail_slot * sizeof(struct utp_transfer_req_desc); writel(val, q->mcq_sq_tail); } @@ -404,4 +407,12 @@ static inline struct cq_entry *ufshcd_mcq_cur_cqe(struct ufs_hw_queue *q) return cqe + q->cq_head_slot; } + +static inline u32 ufshcd_mcq_get_sq_head_slot(struct ufs_hw_queue *q) +{ + u32 val = readl(q->mcq_sq_head); + + return val / sizeof(struct utp_transfer_req_desc); +} + #endif /* _UFSHCD_PRIV_H_ */ diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c index e7e79f515e14..983fae84d9e8 100644 --- a/drivers/ufs/core/ufshcd.c +++ b/drivers/ufs/core/ufshcd.c @@ -98,7 +98,7 @@ /* Polling time to wait for fDeviceInit */ #define FDEVICEINIT_COMPL_TIMEOUT 1500 /* millisecs */ -/* UFSHC 4.0 compliant HC support this mode, refer param_set_mcq_mode() */ +/* UFSHC 4.0 compliant HC support this mode. */ static bool use_mcq_mode = true; static bool is_mcq_supported(struct ufs_hba *hba) @@ -106,23 +106,7 @@ static bool is_mcq_supported(struct ufs_hba *hba) return hba->mcq_sup && use_mcq_mode; } -static int param_set_mcq_mode(const char *val, const struct kernel_param *kp) -{ - int ret; - - ret = param_set_bool(val, kp); - if (ret) - return ret; - - return 0; -} - -static const struct kernel_param_ops mcq_mode_ops = { - .set = param_set_mcq_mode, - .get = param_get_bool, -}; - -module_param_cb(use_mcq_mode, &mcq_mode_ops, &use_mcq_mode, 0644); +module_param(use_mcq_mode, bool, 0644); MODULE_PARM_DESC(use_mcq_mode, "Control MCQ mode for controllers starting from UFSHCI 4.0. 1 - enable MCQ, 0 - disable MCQ. MCQ is enabled by default"); #define ufshcd_toggle_vreg(_dev, _vreg, _on) \ @@ -173,7 +157,6 @@ EXPORT_SYMBOL_GPL(ufshcd_dump_regs); enum { UFSHCD_MAX_CHANNEL = 0, UFSHCD_MAX_ID = 1, - UFSHCD_NUM_RESERVED = 1, UFSHCD_CMD_PER_LUN = 32 - UFSHCD_NUM_RESERVED, UFSHCD_CAN_QUEUE = 32 - UFSHCD_NUM_RESERVED, }; @@ -301,7 +284,6 @@ static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on); static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on); static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba, struct ufs_vreg *vreg); -static int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag); static void ufshcd_wb_toggle_buf_flush_during_h8(struct ufs_hba *hba, bool enable); static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba); @@ -1205,7 +1187,7 @@ static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba, bool timeout = false, do_last_check = false; ktime_t start; - ufshcd_hold(hba, false); + ufshcd_hold(hba); spin_lock_irqsave(hba->host->host_lock, flags); /* * Wait for all the outstanding tasks/transfer requests. @@ -1326,7 +1308,7 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba, u64 timeout_us) } /* let's not get into low power until clock scaling is completed */ - ufshcd_hold(hba, false); + ufshcd_hold(hba); out: return ret; @@ -1656,7 +1638,7 @@ static ssize_t ufshcd_clkscale_enable_store(struct device *dev, goto out; ufshcd_rpm_get_sync(hba); - ufshcd_hold(hba, false); + ufshcd_hold(hba); hba->clk_scaling.is_enabled = value; @@ -1739,7 +1721,7 @@ static void ufshcd_ungate_work(struct work_struct *work) spin_lock_irqsave(hba->host->host_lock, flags); if (hba->clk_gating.state == CLKS_ON) { spin_unlock_irqrestore(hba->host->host_lock, flags); - goto unblock_reqs; + return; } spin_unlock_irqrestore(hba->host->host_lock, flags); @@ -1762,25 +1744,21 @@ static void ufshcd_ungate_work(struct work_struct *work) } hba->clk_gating.is_suspended = false; } -unblock_reqs: - ufshcd_scsi_unblock_requests(hba); } /** * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release. * Also, exit from hibern8 mode and set the link as active. * @hba: per adapter instance - * @async: This indicates whether caller should ungate clocks asynchronously. */ -int ufshcd_hold(struct ufs_hba *hba, bool async) +void ufshcd_hold(struct ufs_hba *hba) { - int rc = 0; bool flush_result; unsigned long flags; if (!ufshcd_is_clkgating_allowed(hba) || !hba->clk_gating.is_initialized) - goto out; + return; spin_lock_irqsave(hba->host->host_lock, flags); hba->clk_gating.active_reqs++; @@ -1797,15 +1775,10 @@ start: */ if (ufshcd_can_hibern8_during_gating(hba) && ufshcd_is_link_hibern8(hba)) { - if (async) { - rc = -EAGAIN; - hba->clk_gating.active_reqs--; - break; - } spin_unlock_irqrestore(hba->host->host_lock, flags); flush_result = flush_work(&hba->clk_gating.ungate_work); if (hba->clk_gating.is_suspended && !flush_result) - goto out; + return; spin_lock_irqsave(hba->host->host_lock, flags); goto start; } @@ -1827,21 +1800,14 @@ start: hba->clk_gating.state = REQ_CLKS_ON; trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state); - if (queue_work(hba->clk_gating.clk_gating_workq, - &hba->clk_gating.ungate_work)) - ufshcd_scsi_block_requests(hba); + queue_work(hba->clk_gating.clk_gating_workq, + &hba->clk_gating.ungate_work); /* * fall through to check if we should wait for this * work to be done or not. */ fallthrough; case REQ_CLKS_ON: - if (async) { - rc = -EAGAIN; - hba->clk_gating.active_reqs--; - break; - } - spin_unlock_irqrestore(hba->host->host_lock, flags); flush_work(&hba->clk_gating.ungate_work); /* Make sure state is CLKS_ON before returning */ @@ -1853,8 +1819,6 @@ start: break; } spin_unlock_irqrestore(hba->host->host_lock, flags); -out: - return rc; } EXPORT_SYMBOL_GPL(ufshcd_hold); @@ -2086,7 +2050,7 @@ static void ufshcd_exit_clk_gating(struct ufs_hba *hba) ufshcd_remove_clk_gating_sysfs(hba); /* Ungate the clock if necessary. */ - ufshcd_hold(hba, false); + ufshcd_hold(hba); hba->clk_gating.is_initialized = false; ufshcd_release(hba); @@ -2336,18 +2300,20 @@ static inline int ufshcd_hba_capabilities(struct ufs_hba *hba) /* Read crypto capabilities */ err = ufshcd_hba_init_crypto_capabilities(hba); - if (err) + if (err) { dev_err(hba->dev, "crypto setup failed\n"); + return err; + } hba->mcq_sup = FIELD_GET(MASK_MCQ_SUPPORT, hba->capabilities); if (!hba->mcq_sup) - return err; + return 0; hba->mcq_capabilities = ufshcd_readl(hba, REG_MCQCAP); hba->ext_iid_sup = FIELD_GET(MASK_EXT_IID_SUPPORT, hba->mcq_capabilities); - return err; + return 0; } /** @@ -2482,7 +2448,7 @@ int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) if (hba->quirks & UFSHCD_QUIRK_BROKEN_UIC_CMD) return 0; - ufshcd_hold(hba, false); + ufshcd_hold(hba); mutex_lock(&hba->uic_cmd_mutex); ufshcd_add_delay_before_dme_cmd(hba); @@ -2533,7 +2499,7 @@ static void ufshcd_sgl_to_prdt(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, int * 11b to indicate Dword granularity. A value of '3' * indicates 4 bytes, '7' indicates 8 bytes, etc." */ - WARN_ONCE(len > 256 * 1024, "len = %#x\n", len); + WARN_ONCE(len > SZ_256K, "len = %#x\n", len); prd->size = cpu_to_le32(len - 1); prd->addr = cpu_to_le64(sg->dma_address); prd->reserved = 0; @@ -2885,12 +2851,6 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) WARN_ONCE(tag < 0 || tag >= hba->nutrs, "Invalid tag %d\n", tag); - /* - * Allows the UFS error handler to wait for prior ufshcd_queuecommand() - * calls. - */ - rcu_read_lock(); - switch (hba->ufshcd_state) { case UFSHCD_STATE_OPERATIONAL: break; @@ -2936,16 +2896,9 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) hba->req_abort_count = 0; - err = ufshcd_hold(hba, true); - if (err) { - err = SCSI_MLQUEUE_HOST_BUSY; - goto out; - } - WARN_ON(ufshcd_is_clkgating_allowed(hba) && - (hba->clk_gating.state != CLKS_ON)); + ufshcd_hold(hba); lrbp = &hba->lrb[tag]; - WARN_ON(lrbp->cmd); lrbp->cmd = cmd; lrbp->task_tag = tag; lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun); @@ -2961,7 +2914,6 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) err = ufshcd_map_sg(hba, lrbp); if (err) { - lrbp->cmd = NULL; ufshcd_release(hba); goto out; } @@ -2972,8 +2924,6 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) ufshcd_send_command(hba, tag, hwq); out: - rcu_read_unlock(); - if (ufs_trigger_eh()) { unsigned long flags; @@ -2999,13 +2949,50 @@ static int ufshcd_compose_dev_cmd(struct ufs_hba *hba, } /* - * Clear all the requests from the controller for which a bit has been set in - * @mask and wait until the controller confirms that these requests have been - * cleared. + * Check with the block layer if the command is inflight + * @cmd: command to check. + * + * Returns true if command is inflight; false if not. + */ +bool ufshcd_cmd_inflight(struct scsi_cmnd *cmd) +{ + struct request *rq; + + if (!cmd) + return false; + + rq = scsi_cmd_to_rq(cmd); + if (!blk_mq_request_started(rq)) + return false; + + return true; +} + +/* + * Clear the pending command in the controller and wait until + * the controller confirms that the command has been cleared. + * @hba: per adapter instance + * @task_tag: The tag number of the command to be cleared. */ -static int ufshcd_clear_cmds(struct ufs_hba *hba, u32 mask) +static int ufshcd_clear_cmd(struct ufs_hba *hba, u32 task_tag) { + u32 mask = 1U << task_tag; unsigned long flags; + int err; + + if (is_mcq_enabled(hba)) { + /* + * MCQ mode. Clean up the MCQ resources similar to + * what the ufshcd_utrl_clear() does for SDB mode. + */ + err = ufshcd_mcq_sq_cleanup(hba, task_tag); + if (err) { + dev_err(hba->dev, "%s: failed tag=%d. err=%d\n", + __func__, task_tag, err); + return err; + } + return 0; + } /* clear outstanding transaction before retry */ spin_lock_irqsave(hba->host->host_lock, flags); @@ -3099,14 +3086,23 @@ retry: * not trigger any race conditions. */ hba->dev_cmd.complete = NULL; - err = ufshcd_get_tr_ocs(lrbp, hba->dev_cmd.cqe); + err = ufshcd_get_tr_ocs(lrbp, NULL); if (!err) err = ufshcd_dev_cmd_completion(hba, lrbp); } else { err = -ETIMEDOUT; dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n", __func__, lrbp->task_tag); - if (ufshcd_clear_cmds(hba, 1U << lrbp->task_tag) == 0) { + + /* MCQ mode */ + if (is_mcq_enabled(hba)) { + err = ufshcd_clear_cmd(hba, lrbp->task_tag); + hba->dev_cmd.complete = NULL; + return err; + } + + /* SDB mode */ + if (ufshcd_clear_cmd(hba, lrbp->task_tag) == 0) { /* successfully cleared the command, retry if needed */ err = -EAGAIN; /* @@ -3180,13 +3176,12 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba, down_read(&hba->clk_scaling_lock); lrbp = &hba->lrb[tag]; - WARN_ON(lrbp->cmd); + lrbp->cmd = NULL; err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag); if (unlikely(err)) goto out; hba->dev_cmd.complete = &wait; - hba->dev_cmd.cqe = NULL; ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr); @@ -3267,7 +3262,7 @@ int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, BUG_ON(!hba); - ufshcd_hold(hba, false); + ufshcd_hold(hba); mutex_lock(&hba->dev_cmd.lock); ufshcd_init_query(hba, &request, &response, opcode, idn, index, selector); @@ -3341,7 +3336,7 @@ int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode, return -EINVAL; } - ufshcd_hold(hba, false); + ufshcd_hold(hba); mutex_lock(&hba->dev_cmd.lock); ufshcd_init_query(hba, &request, &response, opcode, idn, index, @@ -3437,7 +3432,7 @@ static int __ufshcd_query_descriptor(struct ufs_hba *hba, return -EINVAL; } - ufshcd_hold(hba, false); + ufshcd_hold(hba); mutex_lock(&hba->dev_cmd.lock); ufshcd_init_query(hba, &request, &response, opcode, idn, index, @@ -3779,7 +3774,7 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba) /* * Allocate memory for UTP Transfer descriptors - * UFSHCI requires 1024 byte alignment of UTRD + * UFSHCI requires 1KB alignment of UTRD */ utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs); hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev, @@ -3787,7 +3782,7 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba) &hba->utrdl_dma_addr, GFP_KERNEL); if (!hba->utrdl_base_addr || - WARN_ON(hba->utrdl_dma_addr & (1024 - 1))) { + WARN_ON(hba->utrdl_dma_addr & (SZ_1K - 1))) { dev_err(hba->dev, "Transfer Descriptor Memory allocation failed\n"); goto out; @@ -3803,7 +3798,7 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba) goto skip_utmrdl; /* * Allocate memory for UTP Task Management descriptors - * UFSHCI requires 1024 byte alignment of UTMRD + * UFSHCI requires 1KB alignment of UTMRD */ utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs; hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev, @@ -3811,7 +3806,7 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba) &hba->utmrdl_dma_addr, GFP_KERNEL); if (!hba->utmrdl_base_addr || - WARN_ON(hba->utmrdl_dma_addr & (1024 - 1))) { + WARN_ON(hba->utmrdl_dma_addr & (SZ_1K - 1))) { dev_err(hba->dev, "Task Management Descriptor Memory allocation failed\n"); goto out; @@ -3868,10 +3863,8 @@ static void ufshcd_host_memory_configure(struct ufs_hba *hba) /* Configure UTRD with command descriptor base address */ cmd_desc_element_addr = (cmd_desc_dma_addr + (cmd_desc_size * i)); - utrdlp[i].command_desc_base_addr_lo = - cpu_to_le32(lower_32_bits(cmd_desc_element_addr)); - utrdlp[i].command_desc_base_addr_hi = - cpu_to_le32(upper_32_bits(cmd_desc_element_addr)); + utrdlp[i].command_desc_base_addr = + cpu_to_le64(cmd_desc_element_addr); /* Response upiu and prdt offset should be in double words */ if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) { @@ -4255,7 +4248,7 @@ int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode) uic_cmd.command = UIC_CMD_DME_SET; uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE); uic_cmd.argument3 = mode; - ufshcd_hold(hba, false); + ufshcd_hold(hba); ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd); ufshcd_release(hba); @@ -4362,7 +4355,7 @@ void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit) if (update && !pm_runtime_suspended(&hba->ufs_device_wlun->sdev_gendev)) { ufshcd_rpm_get_sync(hba); - ufshcd_hold(hba, false); + ufshcd_hold(hba); ufshcd_auto_hibern8_enable(hba); ufshcd_release(hba); ufshcd_rpm_put_sync(hba); @@ -4955,7 +4948,7 @@ static int ufshcd_verify_dev_init(struct ufs_hba *hba) int err = 0; int retries; - ufshcd_hold(hba, false); + ufshcd_hold(hba); mutex_lock(&hba->dev_cmd.lock); for (retries = NOP_OUT_RETRIES; retries > 0; retries--) { err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP, @@ -5148,7 +5141,7 @@ static int ufshcd_slave_configure(struct scsi_device *sdev) blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1); if (hba->quirks & UFSHCD_QUIRK_4KB_DMA_ALIGNMENT) - blk_queue_update_dma_alignment(q, 4096 - 1); + blk_queue_update_dma_alignment(q, SZ_4K - 1); /* * Block runtime-pm until all consumers are added. * Refer ufshcd_setup_links(). @@ -5416,13 +5409,12 @@ static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status) } /* Release the resources allocated for processing a SCSI command. */ -static void ufshcd_release_scsi_cmd(struct ufs_hba *hba, - struct ufshcd_lrb *lrbp) +void ufshcd_release_scsi_cmd(struct ufs_hba *hba, + struct ufshcd_lrb *lrbp) { struct scsi_cmnd *cmd = lrbp->cmd; scsi_dma_unmap(cmd); - lrbp->cmd = NULL; /* Mark the command as completed. */ ufshcd_release(hba); ufshcd_clk_scaling_update_busy(hba); } @@ -5438,6 +5430,7 @@ void ufshcd_compl_one_cqe(struct ufs_hba *hba, int task_tag, { struct ufshcd_lrb *lrbp; struct scsi_cmnd *cmd; + enum utp_ocs ocs; lrbp = &hba->lrb[task_tag]; lrbp->compl_time_stamp = ktime_get(); @@ -5453,8 +5446,11 @@ void ufshcd_compl_one_cqe(struct ufs_hba *hba, int task_tag, } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE || lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) { if (hba->dev_cmd.complete) { - hba->dev_cmd.cqe = cqe; - ufshcd_add_command_trace(hba, task_tag, UFS_DEV_COMP); + if (cqe) { + ocs = le32_to_cpu(cqe->status) & MASK_OCS; + lrbp->utr_descriptor_ptr->header.dword_2 = + cpu_to_le32(ocs); + } complete(hba->dev_cmd.complete); ufshcd_clk_scaling_update_busy(hba); } @@ -5507,7 +5503,7 @@ static int ufshcd_poll(struct Scsi_Host *shost, unsigned int queue_num) struct ufs_hw_queue *hwq; if (is_mcq_enabled(hba)) { - hwq = &hba->uhq[queue_num + UFSHCD_MCQ_IO_QUEUE_OFFSET]; + hwq = &hba->uhq[queue_num]; return ufshcd_mcq_poll_cqe_lock(hba, hwq); } @@ -5532,6 +5528,57 @@ static int ufshcd_poll(struct Scsi_Host *shost, unsigned int queue_num) } /** + * ufshcd_mcq_compl_pending_transfer - MCQ mode function. It is + * invoked from the error handler context or ufshcd_host_reset_and_restore() + * to complete the pending transfers and free the resources associated with + * the scsi command. + * + * @hba: per adapter instance + * @force_compl: This flag is set to true when invoked + * from ufshcd_host_reset_and_restore() in which case it requires special + * handling because the host controller has been reset by ufshcd_hba_stop(). + */ +static void ufshcd_mcq_compl_pending_transfer(struct ufs_hba *hba, + bool force_compl) +{ + struct ufs_hw_queue *hwq; + struct ufshcd_lrb *lrbp; + struct scsi_cmnd *cmd; + unsigned long flags; + u32 hwq_num, utag; + int tag; + + for (tag = 0; tag < hba->nutrs; tag++) { + lrbp = &hba->lrb[tag]; + cmd = lrbp->cmd; + if (!ufshcd_cmd_inflight(cmd) || + test_bit(SCMD_STATE_COMPLETE, &cmd->state)) + continue; + + utag = blk_mq_unique_tag(scsi_cmd_to_rq(cmd)); + hwq_num = blk_mq_unique_tag_to_hwq(utag); + hwq = &hba->uhq[hwq_num]; + + if (force_compl) { + ufshcd_mcq_compl_all_cqes_lock(hba, hwq); + /* + * For those cmds of which the cqes are not present + * in the cq, complete them explicitly. + */ + if (cmd && !test_bit(SCMD_STATE_COMPLETE, &cmd->state)) { + spin_lock_irqsave(&hwq->cq_lock, flags); + set_host_byte(cmd, DID_REQUEUE); + ufshcd_release_scsi_cmd(hba, lrbp); + scsi_done(cmd); + spin_unlock_irqrestore(&hwq->cq_lock, flags); + } + } else { + ufshcd_mcq_poll_cqe_lock(hba, hwq); + } + } +} + +/** * ufshcd_transfer_req_compl - handle SCSI and query command completion * @hba: per adapter instance * @@ -6095,9 +6142,13 @@ out: } /* Complete requests that have door-bell cleared */ -static void ufshcd_complete_requests(struct ufs_hba *hba) +static void ufshcd_complete_requests(struct ufs_hba *hba, bool force_compl) { - ufshcd_transfer_req_compl(hba); + if (is_mcq_enabled(hba)) + ufshcd_mcq_compl_pending_transfer(hba, force_compl); + else + ufshcd_transfer_req_compl(hba); + ufshcd_tmc_handler(hba); } @@ -6241,22 +6292,22 @@ static void ufshcd_err_handling_prepare(struct ufs_hba *hba) ufshcd_setup_vreg(hba, true); ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq); ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2); - ufshcd_hold(hba, false); + ufshcd_hold(hba); if (!ufshcd_is_clkgating_allowed(hba)) ufshcd_setup_clocks(hba, true); ufshcd_release(hba); pm_op = hba->is_sys_suspended ? UFS_SYSTEM_PM : UFS_RUNTIME_PM; ufshcd_vops_resume(hba, pm_op); } else { - ufshcd_hold(hba, false); + ufshcd_hold(hba); if (ufshcd_is_clkscaling_supported(hba) && hba->clk_scaling.is_enabled) ufshcd_suspend_clkscaling(hba); ufshcd_clk_scaling_allow(hba, false); } ufshcd_scsi_block_requests(hba); - /* Drain ufshcd_queuecommand() */ - synchronize_rcu(); + /* Wait for ongoing ufshcd_queuecommand() calls to finish. */ + blk_mq_wait_quiesce_done(&hba->host->tag_set); cancel_work_sync(&hba->eeh_work); } @@ -6338,18 +6389,36 @@ static bool ufshcd_abort_all(struct ufs_hba *hba) bool needs_reset = false; int tag, ret; - /* Clear pending transfer requests */ - for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) { - ret = ufshcd_try_to_abort_task(hba, tag); - dev_err(hba->dev, "Aborting tag %d / CDB %#02x %s\n", tag, - hba->lrb[tag].cmd ? hba->lrb[tag].cmd->cmnd[0] : -1, - ret ? "failed" : "succeeded"); - if (ret) { - needs_reset = true; - goto out; + if (is_mcq_enabled(hba)) { + struct ufshcd_lrb *lrbp; + int tag; + + for (tag = 0; tag < hba->nutrs; tag++) { + lrbp = &hba->lrb[tag]; + if (!ufshcd_cmd_inflight(lrbp->cmd)) + continue; + ret = ufshcd_try_to_abort_task(hba, tag); + dev_err(hba->dev, "Aborting tag %d / CDB %#02x %s\n", tag, + hba->lrb[tag].cmd ? hba->lrb[tag].cmd->cmnd[0] : -1, + ret ? "failed" : "succeeded"); + if (ret) { + needs_reset = true; + goto out; + } + } + } else { + /* Clear pending transfer requests */ + for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) { + ret = ufshcd_try_to_abort_task(hba, tag); + dev_err(hba->dev, "Aborting tag %d / CDB %#02x %s\n", tag, + hba->lrb[tag].cmd ? hba->lrb[tag].cmd->cmnd[0] : -1, + ret ? "failed" : "succeeded"); + if (ret) { + needs_reset = true; + goto out; + } } } - /* Clear pending task management requests */ for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) { if (ufshcd_clear_tm_cmd(hba, tag)) { @@ -6360,7 +6429,7 @@ static bool ufshcd_abort_all(struct ufs_hba *hba) out: /* Complete the requests that are cleared by s/w */ - ufshcd_complete_requests(hba); + ufshcd_complete_requests(hba, false); return needs_reset; } @@ -6400,7 +6469,7 @@ static void ufshcd_err_handler(struct work_struct *work) spin_unlock_irqrestore(hba->host->host_lock, flags); ufshcd_err_handling_prepare(hba); /* Complete requests that have door-bell cleared by h/w */ - ufshcd_complete_requests(hba); + ufshcd_complete_requests(hba, false); spin_lock_irqsave(hba->host->host_lock, flags); again: needs_restore = false; @@ -6771,7 +6840,7 @@ static irqreturn_t ufshcd_handle_mcq_cq_events(struct ufs_hba *hba) ufshcd_mcq_write_cqis(hba, events, i); if (events & UFSHCD_MCQ_CQIS_TAIL_ENT_PUSH_STS) - ufshcd_mcq_poll_cqe_nolock(hba, hwq); + ufshcd_mcq_poll_cqe_lock(hba, hwq); } return IRQ_HANDLED; @@ -6901,7 +6970,7 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba, return PTR_ERR(req); req->end_io_data = &wait; - ufshcd_hold(hba, false); + ufshcd_hold(hba); spin_lock_irqsave(host->host_lock, flags); @@ -7037,7 +7106,6 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba, down_read(&hba->clk_scaling_lock); lrbp = &hba->lrb[tag]; - WARN_ON(lrbp->cmd); lrbp->cmd = NULL; lrbp->task_tag = tag; lrbp->lun = 0; @@ -7138,7 +7206,7 @@ int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba, cmd_type = DEV_CMD_TYPE_NOP; fallthrough; case UPIU_TRANSACTION_QUERY_REQ: - ufshcd_hold(hba, false); + ufshcd_hold(hba); mutex_lock(&hba->dev_cmd.lock); err = ufshcd_issue_devman_upiu_cmd(hba, req_upiu, rsp_upiu, desc_buff, buff_len, @@ -7204,12 +7272,11 @@ int ufshcd_advanced_rpmb_req_handler(struct ufs_hba *hba, struct utp_upiu_req *r u16 ehs_len; /* Protects use of hba->reserved_slot. */ - ufshcd_hold(hba, false); + ufshcd_hold(hba); mutex_lock(&hba->dev_cmd.lock); down_read(&hba->clk_scaling_lock); lrbp = &hba->lrb[tag]; - WARN_ON(lrbp->cmd); lrbp->cmd = NULL; lrbp->task_tag = tag; lrbp->lun = UFS_UPIU_RPMB_WLUN; @@ -7281,7 +7348,9 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd) unsigned long flags, pending_reqs = 0, not_cleared = 0; struct Scsi_Host *host; struct ufs_hba *hba; - u32 pos; + struct ufs_hw_queue *hwq; + struct ufshcd_lrb *lrbp; + u32 pos, not_cleared_mask = 0; int err; u8 resp = 0xF, lun; @@ -7296,6 +7365,20 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd) goto out; } + if (is_mcq_enabled(hba)) { + for (pos = 0; pos < hba->nutrs; pos++) { + lrbp = &hba->lrb[pos]; + if (ufshcd_cmd_inflight(lrbp->cmd) && + lrbp->lun == lun) { + ufshcd_clear_cmd(hba, pos); + hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(lrbp->cmd)); + ufshcd_mcq_poll_cqe_lock(hba, hwq); + } + } + err = 0; + goto out; + } + /* clear the commands that were pending for corresponding LUN */ spin_lock_irqsave(&hba->outstanding_lock, flags); for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) @@ -7304,17 +7387,20 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd) hba->outstanding_reqs &= ~pending_reqs; spin_unlock_irqrestore(&hba->outstanding_lock, flags); - if (ufshcd_clear_cmds(hba, pending_reqs) < 0) { - spin_lock_irqsave(&hba->outstanding_lock, flags); - not_cleared = pending_reqs & - ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); - hba->outstanding_reqs |= not_cleared; - spin_unlock_irqrestore(&hba->outstanding_lock, flags); + for_each_set_bit(pos, &pending_reqs, hba->nutrs) { + if (ufshcd_clear_cmd(hba, pos) < 0) { + spin_lock_irqsave(&hba->outstanding_lock, flags); + not_cleared = 1U << pos & + ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); + hba->outstanding_reqs |= not_cleared; + not_cleared_mask |= not_cleared; + spin_unlock_irqrestore(&hba->outstanding_lock, flags); - dev_err(hba->dev, "%s: failed to clear requests %#lx\n", - __func__, not_cleared); + dev_err(hba->dev, "%s: failed to clear request %d\n", + __func__, pos); + } } - __ufshcd_transfer_req_compl(hba, pending_reqs & ~not_cleared); + __ufshcd_transfer_req_compl(hba, pending_reqs & ~not_cleared_mask); out: hba->req_abort_count = 0; @@ -7352,7 +7438,7 @@ static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap) * * Returns zero on success, non-zero on failure */ -static int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag) +int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag) { struct ufshcd_lrb *lrbp = &hba->lrb[tag]; int err = 0; @@ -7375,6 +7461,20 @@ static int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag) */ dev_err(hba->dev, "%s: cmd at tag %d not pending in the device.\n", __func__, tag); + if (is_mcq_enabled(hba)) { + /* MCQ mode */ + if (ufshcd_cmd_inflight(lrbp->cmd)) { + /* sleep for max. 200us same delay as in SDB mode */ + usleep_range(100, 200); + continue; + } + /* command completed already */ + dev_err(hba->dev, "%s: cmd at tag=%d is cleared.\n", + __func__, tag); + goto out; + } + + /* Single Doorbell Mode */ reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); if (reg & (1 << tag)) { /* sleep for max. 200us to stabilize */ @@ -7411,7 +7511,7 @@ static int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag) goto out; } - err = ufshcd_clear_cmds(hba, 1U << tag); + err = ufshcd_clear_cmd(hba, tag); if (err) dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d\n", __func__, tag, err); @@ -7439,14 +7539,17 @@ static int ufshcd_abort(struct scsi_cmnd *cmd) WARN_ONCE(tag < 0, "Invalid tag %d\n", tag); - ufshcd_hold(hba, false); - reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); - /* If command is already aborted/completed, return FAILED. */ - if (!(test_bit(tag, &hba->outstanding_reqs))) { - dev_err(hba->dev, - "%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n", - __func__, tag, hba->outstanding_reqs, reg); - goto release; + ufshcd_hold(hba); + + if (!is_mcq_enabled(hba)) { + reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); + if (!test_bit(tag, &hba->outstanding_reqs)) { + /* If command is already aborted/completed, return FAILED. */ + dev_err(hba->dev, + "%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n", + __func__, tag, hba->outstanding_reqs, reg); + goto release; + } } /* Print Transfer Request of aborted task */ @@ -7471,7 +7574,8 @@ static int ufshcd_abort(struct scsi_cmnd *cmd) } hba->req_abort_count++; - if (!(reg & (1 << tag))) { + if (!is_mcq_enabled(hba) && !(reg & (1 << tag))) { + /* only execute this code in single doorbell mode */ dev_err(hba->dev, "%s: cmd was completed, but without a notifying intr, tag = %d", __func__, tag); @@ -7497,6 +7601,12 @@ static int ufshcd_abort(struct scsi_cmnd *cmd) goto release; } + if (is_mcq_enabled(hba)) { + /* MCQ mode. Branch off to handle abort for mcq mode */ + err = ufshcd_mcq_abort(cmd); + goto release; + } + /* Skip task abort in case previous aborts failed and report failure */ if (lrbp->req_abort_skip) { dev_err(hba->dev, "%s: skipping abort\n", __func__); @@ -7552,7 +7662,7 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba) ufshpb_toggle_state(hba, HPB_PRESENT, HPB_RESET); ufshcd_hba_stop(hba); hba->silence_err_logs = true; - ufshcd_complete_requests(hba); + ufshcd_complete_requests(hba, true); hba->silence_err_logs = false; /* scale up clocks to max frequency before full reinitialization */ @@ -8502,11 +8612,15 @@ err: static void ufshcd_config_mcq(struct ufs_hba *hba) { int ret; + u32 intrs; ret = ufshcd_mcq_vops_config_esi(hba); dev_info(hba->dev, "ESI %sconfigured\n", ret ? "is not " : ""); - ufshcd_enable_intr(hba, UFSHCD_ENABLE_MCQ_INTRS); + intrs = UFSHCD_ENABLE_MCQ_INTRS; + if (hba->quirks & UFSHCD_QUIRK_MCQ_BROKEN_INTR) + intrs &= ~MCQ_CQ_EVENT_STATUS; + ufshcd_enable_intr(hba, intrs); ufshcd_mcq_make_queues_operational(hba); ufshcd_mcq_config_mac(hba, hba->nutrs); @@ -8774,7 +8888,7 @@ static const struct scsi_host_template ufshcd_driver_template = { .cmd_per_lun = UFSHCD_CMD_PER_LUN, .can_queue = UFSHCD_CAN_QUEUE, .max_segment_size = PRDT_DATA_BYTE_COUNT_MAX, - .max_sectors = (1 << 20) / SECTOR_SIZE, /* 1 MiB */ + .max_sectors = SZ_1M / SECTOR_SIZE, .max_host_blocked = 1, .track_queue_depth = 1, .skip_settle_delay = 1, @@ -9184,7 +9298,8 @@ static int ufshcd_execute_start_stop(struct scsi_device *sdev, }; return scsi_execute_cmd(sdev, cdb, REQ_OP_DRV_IN, /*buffer=*/NULL, - /*bufflen=*/0, /*timeout=*/HZ, /*retries=*/0, &args); + /*bufflen=*/0, /*timeout=*/10 * HZ, /*retries=*/0, + &args); } /** @@ -9430,7 +9545,7 @@ static int __ufshcd_wl_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) * If we can't transition into any of the low power modes * just gate the clocks. */ - ufshcd_hold(hba, false); + ufshcd_hold(hba); hba->clk_gating.is_suspended = true; if (ufshcd_is_clkscaling_supported(hba)) @@ -9775,28 +9890,6 @@ out: } #endif -static void ufshcd_wl_shutdown(struct device *dev) -{ - struct scsi_device *sdev = to_scsi_device(dev); - struct ufs_hba *hba; - - hba = shost_priv(sdev->host); - - down(&hba->host_sem); - hba->shutting_down = true; - up(&hba->host_sem); - - /* Turn on everything while shutting down */ - ufshcd_rpm_get_sync(hba); - scsi_device_quiesce(sdev); - shost_for_each_device(sdev, hba->host) { - if (sdev == hba->ufs_device_wlun) - continue; - scsi_device_quiesce(sdev); - } - __ufshcd_wl_suspend(hba, UFS_SHUTDOWN_PM); -} - /** * ufshcd_suspend - helper function for suspend operations * @hba: per adapter instance @@ -9981,25 +10074,34 @@ int ufshcd_runtime_resume(struct device *dev) EXPORT_SYMBOL(ufshcd_runtime_resume); #endif /* CONFIG_PM */ -/** - * ufshcd_shutdown - shutdown routine - * @hba: per adapter instance - * - * This function would turn off both UFS device and UFS hba - * regulators. It would also disable clocks. - * - * Returns 0 always to allow force shutdown even in case of errors. - */ -int ufshcd_shutdown(struct ufs_hba *hba) +static void ufshcd_wl_shutdown(struct device *dev) { + struct scsi_device *sdev = to_scsi_device(dev); + struct ufs_hba *hba = shost_priv(sdev->host); + + down(&hba->host_sem); + hba->shutting_down = true; + up(&hba->host_sem); + + /* Turn on everything while shutting down */ + ufshcd_rpm_get_sync(hba); + scsi_device_quiesce(sdev); + shost_for_each_device(sdev, hba->host) { + if (sdev == hba->ufs_device_wlun) + continue; + scsi_device_quiesce(sdev); + } + __ufshcd_wl_suspend(hba, UFS_SHUTDOWN_PM); + + /* + * Next, turn off the UFS controller and the UFS regulators. Disable + * clocks. + */ if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba)) ufshcd_suspend(hba); hba->is_powered = false; - /* allow force shutdown even in case of errors */ - return 0; } -EXPORT_SYMBOL(ufshcd_shutdown); /** * ufshcd_remove - de-allocate SCSI host and host memory space @@ -10226,6 +10328,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) host->max_channel = UFSHCD_MAX_CHANNEL; host->unique_id = host->host_no; host->max_cmd_len = UFS_CDB_SIZE; + host->queuecommand_may_block = !!(hba->caps & UFSHCD_CAP_CLK_GATING); hba->max_pwr_info.is_valid = false; diff --git a/drivers/ufs/core/ufshpb.c b/drivers/ufs/core/ufshpb.c index a46a7666c891..255f8b38d0c2 100644 --- a/drivers/ufs/core/ufshpb.c +++ b/drivers/ufs/core/ufshpb.c @@ -30,7 +30,7 @@ static struct kmem_cache *ufshpb_mctx_cache; static mempool_t *ufshpb_mctx_pool; static mempool_t *ufshpb_page_pool; /* A cache size of 2MB can cache ppn in the 1GB range. */ -static unsigned int ufshpb_host_map_kbytes = 2048; +static unsigned int ufshpb_host_map_kbytes = SZ_2K; static int tot_active_srgn_pages; static struct workqueue_struct *ufshpb_wq; @@ -2461,7 +2461,7 @@ static void ufshpb_hpb_lu_prepared(struct ufs_hba *hba) init_success = !ufshpb_check_hpb_reset_query(hba); - pool_size = PAGE_ALIGN(ufshpb_host_map_kbytes * 1024) / PAGE_SIZE; + pool_size = PAGE_ALIGN(ufshpb_host_map_kbytes * SZ_1K) / PAGE_SIZE; if (pool_size > tot_active_srgn_pages) { mempool_resize(ufshpb_mctx_pool, tot_active_srgn_pages); mempool_resize(ufshpb_page_pool, tot_active_srgn_pages); @@ -2527,7 +2527,7 @@ static int ufshpb_init_mem_wq(struct ufs_hba *hba) return -ENOMEM; } - pool_size = PAGE_ALIGN(ufshpb_host_map_kbytes * 1024) / PAGE_SIZE; + pool_size = PAGE_ALIGN(ufshpb_host_map_kbytes * SZ_1K) / PAGE_SIZE; dev_info(hba->dev, "%s:%d ufshpb_host_map_kbytes %u pool_size %u\n", __func__, __LINE__, ufshpb_host_map_kbytes, pool_size); diff --git a/drivers/ufs/core/ufshpb.h b/drivers/ufs/core/ufshpb.h index 0d6e6004d783..b428bbdd2799 100644 --- a/drivers/ufs/core/ufshpb.h +++ b/drivers/ufs/core/ufshpb.h @@ -25,7 +25,7 @@ /* hpb map & entries macro */ #define HPB_RGN_SIZE_UNIT 512 -#define HPB_ENTRY_BLOCK_SIZE 4096 +#define HPB_ENTRY_BLOCK_SIZE SZ_4K #define HPB_ENTRY_SIZE 0x8 #define PINNED_NOT_SET U32_MAX diff --git a/drivers/ufs/host/Kconfig b/drivers/ufs/host/Kconfig index 8793e3433580..16624ba08050 100644 --- a/drivers/ufs/host/Kconfig +++ b/drivers/ufs/host/Kconfig @@ -59,7 +59,7 @@ config SCSI_UFS_QCOM depends on SCSI_UFSHCD_PLATFORM && ARCH_QCOM depends on GENERIC_MSI_IRQ depends on RESET_CONTROLLER - select QCOM_SCM if SCSI_UFS_CRYPTO + select QCOM_INLINE_CRYPTO_ENGINE if SCSI_UFS_CRYPTO help This selects the QCOM specific additions to UFSHCD platform driver. UFS host on QCOM needs some vendor specific configuration before diff --git a/drivers/ufs/host/Makefile b/drivers/ufs/host/Makefile index d7c5bf7fa512..4573aead02eb 100644 --- a/drivers/ufs/host/Makefile +++ b/drivers/ufs/host/Makefile @@ -3,9 +3,7 @@ obj-$(CONFIG_SCSI_UFS_DWC_TC_PCI) += tc-dwc-g210-pci.o ufshcd-dwc.o tc-dwc-g210.o obj-$(CONFIG_SCSI_UFS_DWC_TC_PLATFORM) += tc-dwc-g210-pltfrm.o ufshcd-dwc.o tc-dwc-g210.o obj-$(CONFIG_SCSI_UFS_CDNS_PLATFORM) += cdns-pltfrm.o -obj-$(CONFIG_SCSI_UFS_QCOM) += ufs_qcom.o -ufs_qcom-y += ufs-qcom.o -ufs_qcom-$(CONFIG_SCSI_UFS_CRYPTO) += ufs-qcom-ice.o +obj-$(CONFIG_SCSI_UFS_QCOM) += ufs-qcom.o obj-$(CONFIG_SCSI_UFS_EXYNOS) += ufs-exynos.o obj-$(CONFIG_SCSI_UFSHCD_PCI) += ufshcd-pci.o obj-$(CONFIG_SCSI_UFSHCD_PLATFORM) += ufshcd-pltfrm.o diff --git a/drivers/ufs/host/cdns-pltfrm.c b/drivers/ufs/host/cdns-pltfrm.c index e05c0ae64eea..26761425a76c 100644 --- a/drivers/ufs/host/cdns-pltfrm.c +++ b/drivers/ufs/host/cdns-pltfrm.c @@ -328,7 +328,6 @@ static const struct dev_pm_ops cdns_ufs_dev_pm_ops = { static struct platform_driver cdns_ufs_pltfrm_driver = { .probe = cdns_ufs_pltfrm_probe, .remove = cdns_ufs_pltfrm_remove, - .shutdown = ufshcd_pltfrm_shutdown, .driver = { .name = "cdns-ufshcd", .pm = &cdns_ufs_dev_pm_ops, diff --git a/drivers/ufs/host/tc-dwc-g210-pci.c b/drivers/ufs/host/tc-dwc-g210-pci.c index 92b8ad4b58fe..f96fe5855841 100644 --- a/drivers/ufs/host/tc-dwc-g210-pci.c +++ b/drivers/ufs/host/tc-dwc-g210-pci.c @@ -33,15 +33,6 @@ static struct ufs_hba_variant_ops tc_dwc_g210_pci_hba_vops = { }; /** - * tc_dwc_g210_pci_shutdown - main function to put the controller in reset state - * @pdev: pointer to PCI device handle - */ -static void tc_dwc_g210_pci_shutdown(struct pci_dev *pdev) -{ - ufshcd_shutdown((struct ufs_hba *)pci_get_drvdata(pdev)); -} - -/** * tc_dwc_g210_pci_remove - de-allocate PCI/SCSI host and host memory space * data structure memory * @pdev: pointer to PCI handle @@ -137,7 +128,6 @@ static struct pci_driver tc_dwc_g210_pci_driver = { .id_table = tc_dwc_g210_pci_tbl, .probe = tc_dwc_g210_pci_probe, .remove = tc_dwc_g210_pci_remove, - .shutdown = tc_dwc_g210_pci_shutdown, .driver = { .pm = &tc_dwc_g210_pci_pm_ops }, diff --git a/drivers/ufs/host/tc-dwc-g210-pltfrm.c b/drivers/ufs/host/tc-dwc-g210-pltfrm.c index f15a84d0c176..4d5389dd9585 100644 --- a/drivers/ufs/host/tc-dwc-g210-pltfrm.c +++ b/drivers/ufs/host/tc-dwc-g210-pltfrm.c @@ -92,7 +92,6 @@ static const struct dev_pm_ops tc_dwc_g210_pltfm_pm_ops = { static struct platform_driver tc_dwc_g210_pltfm_driver = { .probe = tc_dwc_g210_pltfm_probe, .remove = tc_dwc_g210_pltfm_remove, - .shutdown = ufshcd_pltfrm_shutdown, .driver = { .name = "tc-dwc-g210-pltfm", .pm = &tc_dwc_g210_pltfm_pm_ops, diff --git a/drivers/ufs/host/ufs-exynos.c b/drivers/ufs/host/ufs-exynos.c index 0bf5390739e1..3396e0388512 100644 --- a/drivers/ufs/host/ufs-exynos.c +++ b/drivers/ufs/host/ufs-exynos.c @@ -1306,7 +1306,7 @@ static int exynos_ufs_hce_enable_notify(struct ufs_hba *hba, * (ufshcd_async_scan()). Note: this callback may also be called * from other functions than ufshcd_init(). */ - hba->host->max_segment_size = 4096; + hba->host->max_segment_size = SZ_4K; if (ufs->drv_data->pre_hce_enable) { ret = ufs->drv_data->pre_hce_enable(ufs); @@ -1757,7 +1757,6 @@ static const struct dev_pm_ops exynos_ufs_pm_ops = { static struct platform_driver exynos_ufs_pltform = { .probe = exynos_ufs_probe, .remove = exynos_ufs_remove, - .shutdown = ufshcd_pltfrm_shutdown, .driver = { .name = "exynos-ufshc", .pm = &exynos_ufs_pm_ops, diff --git a/drivers/ufs/host/ufs-hisi.c b/drivers/ufs/host/ufs-hisi.c index 4c423eba8aa9..5b3060cd0ab8 100644 --- a/drivers/ufs/host/ufs-hisi.c +++ b/drivers/ufs/host/ufs-hisi.c @@ -335,29 +335,29 @@ static void ufs_hisi_pwr_change_pre_change(struct ufs_hba *hba) /* PA_TxSkip */ ufshcd_dme_set(hba, UIC_ARG_MIB(0x155c), 0x0); /*PA_PWRModeUserData0 = 8191, default is 0*/ - ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b0), 8191); + ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b0), SZ_8K - 1); /*PA_PWRModeUserData1 = 65535, default is 0*/ - ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b1), 65535); + ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b1), SZ_64K - 1); /*PA_PWRModeUserData2 = 32767, default is 0*/ - ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b2), 32767); + ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b2), SZ_32K - 1); /*DME_FC0ProtectionTimeOutVal = 8191, default is 0*/ - ufshcd_dme_set(hba, UIC_ARG_MIB(0xd041), 8191); + ufshcd_dme_set(hba, UIC_ARG_MIB(0xd041), SZ_8K - 1); /*DME_TC0ReplayTimeOutVal = 65535, default is 0*/ - ufshcd_dme_set(hba, UIC_ARG_MIB(0xd042), 65535); + ufshcd_dme_set(hba, UIC_ARG_MIB(0xd042), SZ_64K - 1); /*DME_AFC0ReqTimeOutVal = 32767, default is 0*/ - ufshcd_dme_set(hba, UIC_ARG_MIB(0xd043), 32767); + ufshcd_dme_set(hba, UIC_ARG_MIB(0xd043), SZ_32K - 1); /*PA_PWRModeUserData3 = 8191, default is 0*/ - ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b3), 8191); + ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b3), SZ_8K - 1); /*PA_PWRModeUserData4 = 65535, default is 0*/ - ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b4), 65535); + ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b4), SZ_64K - 1); /*PA_PWRModeUserData5 = 32767, default is 0*/ - ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b5), 32767); + ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b5), SZ_32K - 1); /*DME_FC1ProtectionTimeOutVal = 8191, default is 0*/ - ufshcd_dme_set(hba, UIC_ARG_MIB(0xd044), 8191); + ufshcd_dme_set(hba, UIC_ARG_MIB(0xd044), SZ_8K - 1); /*DME_TC1ReplayTimeOutVal = 65535, default is 0*/ - ufshcd_dme_set(hba, UIC_ARG_MIB(0xd045), 65535); + ufshcd_dme_set(hba, UIC_ARG_MIB(0xd045), SZ_64K - 1); /*DME_AFC1ReqTimeOutVal = 32767, default is 0*/ - ufshcd_dme_set(hba, UIC_ARG_MIB(0xd046), 32767); + ufshcd_dme_set(hba, UIC_ARG_MIB(0xd046), SZ_32K - 1); } static int ufs_hisi_pwr_change_notify(struct ufs_hba *hba, @@ -593,7 +593,6 @@ static const struct dev_pm_ops ufs_hisi_pm_ops = { static struct platform_driver ufs_hisi_pltform = { .probe = ufs_hisi_probe, .remove = ufs_hisi_remove, - .shutdown = ufshcd_pltfrm_shutdown, .driver = { .name = "ufshcd-hisi", .pm = &ufs_hisi_pm_ops, diff --git a/drivers/ufs/host/ufs-mediatek.c b/drivers/ufs/host/ufs-mediatek.c index 73e217260390..e68b05976f9e 100644 --- a/drivers/ufs/host/ufs-mediatek.c +++ b/drivers/ufs/host/ufs-mediatek.c @@ -410,9 +410,6 @@ static int ufs_mtk_wait_link_state(struct ufs_hba *hba, u32 state, usleep_range(100, 200); } while (ktime_before(time_checked, timeout)); - if (val == state) - return 0; - return -ETIMEDOUT; } @@ -901,6 +898,8 @@ static int ufs_mtk_init(struct ufs_hba *hba) hba->caps |= UFSHCD_CAP_CLK_SCALING; hba->quirks |= UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL; + hba->quirks |= UFSHCD_QUIRK_MCQ_BROKEN_INTR; + hba->quirks |= UFSHCD_QUIRK_MCQ_BROKEN_RTC; hba->vps->wb_flush_threshold = UFS_WB_BUF_REMAIN_PERCENT(80); if (host->caps & UFS_MTK_CAP_DISABLE_AH8) @@ -1650,7 +1649,6 @@ static const struct dev_pm_ops ufs_mtk_pm_ops = { static struct platform_driver ufs_mtk_pltform = { .probe = ufs_mtk_probe, .remove = ufs_mtk_remove, - .shutdown = ufshcd_pltfrm_shutdown, .driver = { .name = "ufshcd-mtk", .pm = &ufs_mtk_pm_ops, diff --git a/drivers/ufs/host/ufs-qcom-ice.c b/drivers/ufs/host/ufs-qcom-ice.c deleted file mode 100644 index 453978877ae9..000000000000 --- a/drivers/ufs/host/ufs-qcom-ice.c +++ /dev/null @@ -1,244 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Qualcomm ICE (Inline Crypto Engine) support. - * - * Copyright (c) 2014-2019, The Linux Foundation. All rights reserved. - * Copyright 2019 Google LLC - */ - -#include <linux/delay.h> -#include <linux/platform_device.h> -#include <linux/firmware/qcom/qcom_scm.h> - -#include "ufs-qcom.h" - -#define AES_256_XTS_KEY_SIZE 64 - -/* QCOM ICE registers */ - -#define QCOM_ICE_REG_CONTROL 0x0000 -#define QCOM_ICE_REG_RESET 0x0004 -#define QCOM_ICE_REG_VERSION 0x0008 -#define QCOM_ICE_REG_FUSE_SETTING 0x0010 -#define QCOM_ICE_REG_PARAMETERS_1 0x0014 -#define QCOM_ICE_REG_PARAMETERS_2 0x0018 -#define QCOM_ICE_REG_PARAMETERS_3 0x001C -#define QCOM_ICE_REG_PARAMETERS_4 0x0020 -#define QCOM_ICE_REG_PARAMETERS_5 0x0024 - -/* QCOM ICE v3.X only */ -#define QCOM_ICE_GENERAL_ERR_STTS 0x0040 -#define QCOM_ICE_INVALID_CCFG_ERR_STTS 0x0030 -#define QCOM_ICE_GENERAL_ERR_MASK 0x0044 - -/* QCOM ICE v2.X only */ -#define QCOM_ICE_REG_NON_SEC_IRQ_STTS 0x0040 -#define QCOM_ICE_REG_NON_SEC_IRQ_MASK 0x0044 - -#define QCOM_ICE_REG_NON_SEC_IRQ_CLR 0x0048 -#define QCOM_ICE_REG_STREAM1_ERROR_SYNDROME1 0x0050 -#define QCOM_ICE_REG_STREAM1_ERROR_SYNDROME2 0x0054 -#define QCOM_ICE_REG_STREAM2_ERROR_SYNDROME1 0x0058 -#define QCOM_ICE_REG_STREAM2_ERROR_SYNDROME2 0x005C -#define QCOM_ICE_REG_STREAM1_BIST_ERROR_VEC 0x0060 -#define QCOM_ICE_REG_STREAM2_BIST_ERROR_VEC 0x0064 -#define QCOM_ICE_REG_STREAM1_BIST_FINISH_VEC 0x0068 -#define QCOM_ICE_REG_STREAM2_BIST_FINISH_VEC 0x006C -#define QCOM_ICE_REG_BIST_STATUS 0x0070 -#define QCOM_ICE_REG_BYPASS_STATUS 0x0074 -#define QCOM_ICE_REG_ADVANCED_CONTROL 0x1000 -#define QCOM_ICE_REG_ENDIAN_SWAP 0x1004 -#define QCOM_ICE_REG_TEST_BUS_CONTROL 0x1010 -#define QCOM_ICE_REG_TEST_BUS_REG 0x1014 - -/* BIST ("built-in self-test"?) status flags */ -#define QCOM_ICE_BIST_STATUS_MASK 0xF0000000 - -#define QCOM_ICE_FUSE_SETTING_MASK 0x1 -#define QCOM_ICE_FORCE_HW_KEY0_SETTING_MASK 0x2 -#define QCOM_ICE_FORCE_HW_KEY1_SETTING_MASK 0x4 - -#define qcom_ice_writel(host, val, reg) \ - writel((val), (host)->ice_mmio + (reg)) -#define qcom_ice_readl(host, reg) \ - readl((host)->ice_mmio + (reg)) - -static bool qcom_ice_supported(struct ufs_qcom_host *host) -{ - struct device *dev = host->hba->dev; - u32 regval = qcom_ice_readl(host, QCOM_ICE_REG_VERSION); - int major = regval >> 24; - int minor = (regval >> 16) & 0xFF; - int step = regval & 0xFFFF; - - /* For now this driver only supports ICE version 3. */ - if (major != 3) { - dev_warn(dev, "Unsupported ICE version: v%d.%d.%d\n", - major, minor, step); - return false; - } - - dev_info(dev, "Found QC Inline Crypto Engine (ICE) v%d.%d.%d\n", - major, minor, step); - - /* If fuses are blown, ICE might not work in the standard way. */ - regval = qcom_ice_readl(host, QCOM_ICE_REG_FUSE_SETTING); - if (regval & (QCOM_ICE_FUSE_SETTING_MASK | - QCOM_ICE_FORCE_HW_KEY0_SETTING_MASK | - QCOM_ICE_FORCE_HW_KEY1_SETTING_MASK)) { - dev_warn(dev, "Fuses are blown; ICE is unusable!\n"); - return false; - } - return true; -} - -int ufs_qcom_ice_init(struct ufs_qcom_host *host) -{ - struct ufs_hba *hba = host->hba; - struct device *dev = hba->dev; - struct platform_device *pdev = to_platform_device(dev); - struct resource *res; - int err; - - if (!(ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES) & - MASK_CRYPTO_SUPPORT)) - return 0; - - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ice"); - if (!res) { - dev_warn(dev, "ICE registers not found\n"); - goto disable; - } - - if (!qcom_scm_ice_available()) { - dev_warn(dev, "ICE SCM interface not found\n"); - goto disable; - } - - host->ice_mmio = devm_ioremap_resource(dev, res); - if (IS_ERR(host->ice_mmio)) { - err = PTR_ERR(host->ice_mmio); - return err; - } - - if (!qcom_ice_supported(host)) - goto disable; - - return 0; - -disable: - dev_warn(dev, "Disabling inline encryption support\n"); - hba->caps &= ~UFSHCD_CAP_CRYPTO; - return 0; -} - -static void qcom_ice_low_power_mode_enable(struct ufs_qcom_host *host) -{ - u32 regval; - - regval = qcom_ice_readl(host, QCOM_ICE_REG_ADVANCED_CONTROL); - /* - * Enable low power mode sequence - * [0]-0, [1]-0, [2]-0, [3]-E, [4]-0, [5]-0, [6]-0, [7]-0 - */ - regval |= 0x7000; - qcom_ice_writel(host, regval, QCOM_ICE_REG_ADVANCED_CONTROL); -} - -static void qcom_ice_optimization_enable(struct ufs_qcom_host *host) -{ - u32 regval; - - /* ICE Optimizations Enable Sequence */ - regval = qcom_ice_readl(host, QCOM_ICE_REG_ADVANCED_CONTROL); - regval |= 0xD807100; - /* ICE HPG requires delay before writing */ - udelay(5); - qcom_ice_writel(host, regval, QCOM_ICE_REG_ADVANCED_CONTROL); - udelay(5); -} - -int ufs_qcom_ice_enable(struct ufs_qcom_host *host) -{ - if (!(host->hba->caps & UFSHCD_CAP_CRYPTO)) - return 0; - qcom_ice_low_power_mode_enable(host); - qcom_ice_optimization_enable(host); - return ufs_qcom_ice_resume(host); -} - -/* Poll until all BIST bits are reset */ -static int qcom_ice_wait_bist_status(struct ufs_qcom_host *host) -{ - int count; - u32 reg; - - for (count = 0; count < 100; count++) { - reg = qcom_ice_readl(host, QCOM_ICE_REG_BIST_STATUS); - if (!(reg & QCOM_ICE_BIST_STATUS_MASK)) - break; - udelay(50); - } - if (reg) - return -ETIMEDOUT; - return 0; -} - -int ufs_qcom_ice_resume(struct ufs_qcom_host *host) -{ - int err; - - if (!(host->hba->caps & UFSHCD_CAP_CRYPTO)) - return 0; - - err = qcom_ice_wait_bist_status(host); - if (err) { - dev_err(host->hba->dev, "BIST status error (%d)\n", err); - return err; - } - return 0; -} - -/* - * Program a key into a QC ICE keyslot, or evict a keyslot. QC ICE requires - * vendor-specific SCM calls for this; it doesn't support the standard way. - */ -int ufs_qcom_ice_program_key(struct ufs_hba *hba, - const union ufs_crypto_cfg_entry *cfg, int slot) -{ - union ufs_crypto_cap_entry cap; - union { - u8 bytes[AES_256_XTS_KEY_SIZE]; - u32 words[AES_256_XTS_KEY_SIZE / sizeof(u32)]; - } key; - int i; - int err; - - if (!(cfg->config_enable & UFS_CRYPTO_CONFIGURATION_ENABLE)) - return qcom_scm_ice_invalidate_key(slot); - - /* Only AES-256-XTS has been tested so far. */ - cap = hba->crypto_cap_array[cfg->crypto_cap_idx]; - if (cap.algorithm_id != UFS_CRYPTO_ALG_AES_XTS || - cap.key_size != UFS_CRYPTO_KEY_SIZE_256) { - dev_err_ratelimited(hba->dev, - "Unhandled crypto capability; algorithm_id=%d, key_size=%d\n", - cap.algorithm_id, cap.key_size); - return -EINVAL; - } - - memcpy(key.bytes, cfg->crypto_key, AES_256_XTS_KEY_SIZE); - - /* - * The SCM call byte-swaps the 32-bit words of the key. So we have to - * do the same, in order for the final key be correct. - */ - for (i = 0; i < ARRAY_SIZE(key.words); i++) - __cpu_to_be32s(&key.words[i]); - - err = qcom_scm_ice_set_key(slot, key.bytes, AES_256_XTS_KEY_SIZE, - QCOM_SCM_ICE_CIPHER_AES_256_XTS, - cfg->data_unit_size); - memzero_explicit(&key, sizeof(key)); - return err; -} diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c index 82d02e7f3b4f..8d6fd4c3324f 100644 --- a/drivers/ufs/host/ufs-qcom.c +++ b/drivers/ufs/host/ufs-qcom.c @@ -15,6 +15,8 @@ #include <linux/reset-controller.h> #include <linux/devfreq.h> +#include <soc/qcom/ice.h> + #include <ufs/ufshcd.h> #include "ufshcd-pltfrm.h" #include <ufs/unipro.h> @@ -55,6 +57,100 @@ static struct ufs_qcom_host *rcdev_to_ufs_host(struct reset_controller_dev *rcd) return container_of(rcd, struct ufs_qcom_host, rcdev); } +#ifdef CONFIG_SCSI_UFS_CRYPTO + +static inline void ufs_qcom_ice_enable(struct ufs_qcom_host *host) +{ + if (host->hba->caps & UFSHCD_CAP_CRYPTO) + qcom_ice_enable(host->ice); +} + +static int ufs_qcom_ice_init(struct ufs_qcom_host *host) +{ + struct ufs_hba *hba = host->hba; + struct device *dev = hba->dev; + struct qcom_ice *ice; + + ice = of_qcom_ice_get(dev); + if (ice == ERR_PTR(-EOPNOTSUPP)) { + dev_warn(dev, "Disabling inline encryption support\n"); + ice = NULL; + } + + if (IS_ERR_OR_NULL(ice)) + return PTR_ERR_OR_ZERO(ice); + + host->ice = ice; + hba->caps |= UFSHCD_CAP_CRYPTO; + + return 0; +} + +static inline int ufs_qcom_ice_resume(struct ufs_qcom_host *host) +{ + if (host->hba->caps & UFSHCD_CAP_CRYPTO) + return qcom_ice_resume(host->ice); + + return 0; +} + +static inline int ufs_qcom_ice_suspend(struct ufs_qcom_host *host) +{ + if (host->hba->caps & UFSHCD_CAP_CRYPTO) + return qcom_ice_suspend(host->ice); + + return 0; +} + +static int ufs_qcom_ice_program_key(struct ufs_hba *hba, + const union ufs_crypto_cfg_entry *cfg, + int slot) +{ + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + union ufs_crypto_cap_entry cap; + bool config_enable = + cfg->config_enable & UFS_CRYPTO_CONFIGURATION_ENABLE; + + /* Only AES-256-XTS has been tested so far. */ + cap = hba->crypto_cap_array[cfg->crypto_cap_idx]; + if (cap.algorithm_id != UFS_CRYPTO_ALG_AES_XTS || + cap.key_size != UFS_CRYPTO_KEY_SIZE_256) + return -EINVAL; + + if (config_enable) + return qcom_ice_program_key(host->ice, + QCOM_ICE_CRYPTO_ALG_AES_XTS, + QCOM_ICE_CRYPTO_KEY_SIZE_256, + cfg->crypto_key, + cfg->data_unit_size, slot); + else + return qcom_ice_evict_key(host->ice, slot); +} + +#else + +#define ufs_qcom_ice_program_key NULL + +static inline void ufs_qcom_ice_enable(struct ufs_qcom_host *host) +{ +} + +static int ufs_qcom_ice_init(struct ufs_qcom_host *host) +{ + return 0; +} + +static inline int ufs_qcom_ice_resume(struct ufs_qcom_host *host) +{ + return 0; +} + +static inline int ufs_qcom_ice_suspend(struct ufs_qcom_host *host) +{ + return 0; +} +#endif + static int ufs_qcom_host_clk_get(struct device *dev, const char *name, struct clk **clk_out, bool optional) { @@ -607,7 +703,7 @@ static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op, ufs_qcom_disable_lane_clks(host); } - return 0; + return ufs_qcom_ice_suspend(host); } static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) @@ -853,7 +949,6 @@ static void ufs_qcom_set_caps(struct ufs_hba *hba) hba->caps |= UFSHCD_CAP_CLK_SCALING | UFSHCD_CAP_WB_WITH_CLK_SCALING; hba->caps |= UFSHCD_CAP_AUTO_BKOPS_SUSPEND; hba->caps |= UFSHCD_CAP_WB_EN; - hba->caps |= UFSHCD_CAP_CRYPTO; hba->caps |= UFSHCD_CAP_AGGR_POWER_COLLAPSE; hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND; @@ -1556,7 +1651,7 @@ static irqreturn_t ufs_qcom_mcq_esi_handler(int irq, void *__hba) struct ufs_hw_queue *hwq = &hba->uhq[id]; ufshcd_mcq_write_cqis(hba, 0x1, id); - ufshcd_mcq_poll_cqe_nolock(hba, hwq); + ufshcd_mcq_poll_cqe_lock(hba, hwq); return IRQ_HANDLED; } @@ -1723,7 +1818,6 @@ static const struct dev_pm_ops ufs_qcom_pm_ops = { static struct platform_driver ufs_qcom_pltform = { .probe = ufs_qcom_probe, .remove = ufs_qcom_remove, - .shutdown = ufshcd_pltfrm_shutdown, .driver = { .name = "ufshcd-qcom", .pm = &ufs_qcom_pm_ops, diff --git a/drivers/ufs/host/ufs-qcom.h b/drivers/ufs/host/ufs-qcom.h index 39e774254fb2..6289ad5a42d0 100644 --- a/drivers/ufs/host/ufs-qcom.h +++ b/drivers/ufs/host/ufs-qcom.h @@ -7,6 +7,7 @@ #include <linux/reset-controller.h> #include <linux/reset.h> +#include <soc/qcom/ice.h> #include <ufs/ufshcd.h> #define MAX_UFS_QCOM_HOSTS 1 @@ -205,12 +206,13 @@ struct ufs_qcom_host { struct clk *tx_l1_sync_clk; bool is_lane_clks_enabled; +#ifdef CONFIG_SCSI_UFS_CRYPTO + struct qcom_ice *ice; +#endif + void __iomem *dev_ref_clk_ctrl_mmio; bool is_dev_ref_clk_enabled; struct ufs_hw_version hw_ver; -#ifdef CONFIG_SCSI_UFS_CRYPTO - void __iomem *ice_mmio; -#endif u32 dev_ref_clk_en_mask; @@ -248,28 +250,4 @@ static inline bool ufs_qcom_cap_qunipro(struct ufs_qcom_host *host) return host->caps & UFS_QCOM_CAP_QUNIPRO; } -/* ufs-qcom-ice.c */ - -#ifdef CONFIG_SCSI_UFS_CRYPTO -int ufs_qcom_ice_init(struct ufs_qcom_host *host); -int ufs_qcom_ice_enable(struct ufs_qcom_host *host); -int ufs_qcom_ice_resume(struct ufs_qcom_host *host); -int ufs_qcom_ice_program_key(struct ufs_hba *hba, - const union ufs_crypto_cfg_entry *cfg, int slot); -#else -static inline int ufs_qcom_ice_init(struct ufs_qcom_host *host) -{ - return 0; -} -static inline int ufs_qcom_ice_enable(struct ufs_qcom_host *host) -{ - return 0; -} -static inline int ufs_qcom_ice_resume(struct ufs_qcom_host *host) -{ - return 0; -} -#define ufs_qcom_ice_program_key NULL -#endif /* !CONFIG_SCSI_UFS_CRYPTO */ - #endif /* UFS_QCOM_H_ */ diff --git a/drivers/ufs/host/ufs-sprd.c b/drivers/ufs/host/ufs-sprd.c index 051f3f40d92c..2bad75dd6d58 100644 --- a/drivers/ufs/host/ufs-sprd.c +++ b/drivers/ufs/host/ufs-sprd.c @@ -444,7 +444,6 @@ static const struct dev_pm_ops ufs_sprd_pm_ops = { static struct platform_driver ufs_sprd_pltform = { .probe = ufs_sprd_probe, .remove = ufs_sprd_remove, - .shutdown = ufshcd_pltfrm_shutdown, .driver = { .name = "ufshcd-sprd", .pm = &ufs_sprd_pm_ops, diff --git a/drivers/ufs/host/ufshcd-pci.c b/drivers/ufs/host/ufshcd-pci.c index 9c911787f84c..cf3987773051 100644 --- a/drivers/ufs/host/ufshcd-pci.c +++ b/drivers/ufs/host/ufshcd-pci.c @@ -505,15 +505,6 @@ static int ufshcd_pci_restore(struct device *dev) #endif /** - * ufshcd_pci_shutdown - main function to put the controller in reset state - * @pdev: pointer to PCI device handle - */ -static void ufshcd_pci_shutdown(struct pci_dev *pdev) -{ - ufshcd_shutdown((struct ufs_hba *)pci_get_drvdata(pdev)); -} - -/** * ufshcd_pci_remove - de-allocate PCI/SCSI host and host memory space * data structure memory * @pdev: pointer to PCI handle @@ -608,6 +599,7 @@ static const struct pci_device_id ufshcd_pci_tbl[] = { { PCI_VDEVICE(INTEL, 0x54FF), (kernel_ulong_t)&ufs_intel_adl_hba_vops }, { PCI_VDEVICE(INTEL, 0x7E47), (kernel_ulong_t)&ufs_intel_mtl_hba_vops }, { PCI_VDEVICE(INTEL, 0xA847), (kernel_ulong_t)&ufs_intel_mtl_hba_vops }, + { PCI_VDEVICE(INTEL, 0x7747), (kernel_ulong_t)&ufs_intel_mtl_hba_vops }, { } /* terminate list */ }; @@ -618,7 +610,6 @@ static struct pci_driver ufshcd_pci_driver = { .id_table = ufshcd_pci_tbl, .probe = ufshcd_pci_probe, .remove = ufshcd_pci_remove, - .shutdown = ufshcd_pci_shutdown, .driver = { .pm = &ufshcd_pci_pm_ops }, diff --git a/drivers/ufs/host/ufshcd-pltfrm.c b/drivers/ufs/host/ufshcd-pltfrm.c index 5739ff007828..0b7430033047 100644 --- a/drivers/ufs/host/ufshcd-pltfrm.c +++ b/drivers/ufs/host/ufshcd-pltfrm.c @@ -190,12 +190,6 @@ out: return err; } -void ufshcd_pltfrm_shutdown(struct platform_device *pdev) -{ - ufshcd_shutdown((struct ufs_hba *)platform_get_drvdata(pdev)); -} -EXPORT_SYMBOL_GPL(ufshcd_pltfrm_shutdown); - static void ufshcd_init_lanes_per_dir(struct ufs_hba *hba) { struct device *dev = hba->dev; diff --git a/drivers/ufs/host/ufshcd-pltfrm.h b/drivers/ufs/host/ufshcd-pltfrm.h index 2e4ba2bfbcad..2df108f4ac13 100644 --- a/drivers/ufs/host/ufshcd-pltfrm.h +++ b/drivers/ufs/host/ufshcd-pltfrm.h @@ -31,7 +31,6 @@ int ufshcd_get_pwr_dev_param(const struct ufs_dev_params *dev_param, void ufshcd_init_pwr_dev_param(struct ufs_dev_params *dev_param); int ufshcd_pltfrm_init(struct platform_device *pdev, const struct ufs_hba_variant_ops *vops); -void ufshcd_pltfrm_shutdown(struct platform_device *pdev); int ufshcd_populate_vreg(struct device *dev, const char *name, struct ufs_vreg **out_vreg); diff --git a/include/linux/ata.h b/include/linux/ata.h index c224dbddb9b2..792e10a09787 100644 --- a/include/linux/ata.h +++ b/include/linux/ata.h @@ -322,15 +322,21 @@ enum { ATA_LOG_SATA_NCQ = 0x10, ATA_LOG_NCQ_NON_DATA = 0x12, ATA_LOG_NCQ_SEND_RECV = 0x13, + ATA_LOG_CDL = 0x18, + ATA_LOG_CDL_SIZE = ATA_SECT_SIZE, ATA_LOG_IDENTIFY_DEVICE = 0x30, + ATA_LOG_SENSE_NCQ = 0x0F, + ATA_LOG_SENSE_NCQ_SIZE = ATA_SECT_SIZE * 2, ATA_LOG_CONCURRENT_POSITIONING_RANGES = 0x47, /* Identify device log pages: */ + ATA_LOG_SUPPORTED_CAPABILITIES = 0x03, + ATA_LOG_CURRENT_SETTINGS = 0x04, ATA_LOG_SECURITY = 0x06, ATA_LOG_SATA_SETTINGS = 0x08, ATA_LOG_ZONED_INFORMATION = 0x09, - /* Identify device SATA settings log:*/ + /* Identify device SATA settings log: */ ATA_LOG_DEVSLP_OFFSET = 0x30, ATA_LOG_DEVSLP_SIZE = 0x08, ATA_LOG_DEVSLP_MDAT = 0x00, @@ -415,6 +421,8 @@ enum { SETFEATURES_SATA_ENABLE = 0x10, /* Enable use of SATA feature */ SETFEATURES_SATA_DISABLE = 0x90, /* Disable use of SATA feature */ + SETFEATURES_CDL = 0x0d, /* Enable/disable cmd duration limits */ + /* SETFEATURE Sector counts for SATA features */ SATA_FPDMA_OFFSET = 0x01, /* FPDMA non-zero buffer offsets */ SATA_FPDMA_AA = 0x02, /* FPDMA Setup FIS Auto-Activate */ @@ -425,6 +433,7 @@ enum { SATA_DEVSLP = 0x09, /* Device Sleep */ SETFEATURE_SENSE_DATA = 0xC3, /* Sense Data Reporting feature */ + SETFEATURE_SENSE_DATA_SUCC_NCQ = 0xC4, /* Sense Data for successful NCQ commands */ /* feature values for SET_MAX */ ATA_SET_MAX_ADDR = 0x00, diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 752a54e3284b..0bad62cca3d0 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -103,7 +103,7 @@ typedef u16 blk_short_t; #define BLK_STS_NOSPC ((__force blk_status_t)3) #define BLK_STS_TRANSPORT ((__force blk_status_t)4) #define BLK_STS_TARGET ((__force blk_status_t)5) -#define BLK_STS_NEXUS ((__force blk_status_t)6) +#define BLK_STS_RESV_CONFLICT ((__force blk_status_t)6) #define BLK_STS_MEDIUM ((__force blk_status_t)7) #define BLK_STS_PROTECTION ((__force blk_status_t)8) #define BLK_STS_RESOURCE ((__force blk_status_t)9) @@ -173,6 +173,12 @@ typedef u16 blk_short_t; */ #define BLK_STS_OFFLINE ((__force blk_status_t)17) +/* + * BLK_STS_DURATION_LIMIT is returned from the driver when the target device + * aborted the command because it exceeded one of its Command Duration Limits. + */ +#define BLK_STS_DURATION_LIMIT ((__force blk_status_t)18) + /** * blk_path_error - returns true if error may be path related * @error: status the request was completed with @@ -191,7 +197,7 @@ static inline bool blk_path_error(blk_status_t error) case BLK_STS_NOTSUPP: case BLK_STS_NOSPC: case BLK_STS_TARGET: - case BLK_STS_NEXUS: + case BLK_STS_RESV_CONFLICT: case BLK_STS_MEDIUM: case BLK_STS_PROTECTION: return false; diff --git a/include/linux/libata.h b/include/linux/libata.h index b19c6028221d..820f7a3a2749 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -94,17 +94,19 @@ enum { ATA_DFLAG_DMADIR = (1 << 10), /* device requires DMADIR */ ATA_DFLAG_NCQ_SEND_RECV = (1 << 11), /* device supports NCQ SEND and RECV */ ATA_DFLAG_NCQ_PRIO = (1 << 12), /* device supports NCQ priority */ - ATA_DFLAG_CFG_MASK = (1 << 13) - 1, - - ATA_DFLAG_PIO = (1 << 13), /* device limited to PIO mode */ - ATA_DFLAG_NCQ_OFF = (1 << 14), /* device limited to non-NCQ mode */ - ATA_DFLAG_SLEEPING = (1 << 15), /* device is sleeping */ - ATA_DFLAG_DUBIOUS_XFER = (1 << 16), /* data transfer not verified */ - ATA_DFLAG_NO_UNLOAD = (1 << 17), /* device doesn't support unload */ - ATA_DFLAG_UNLOCK_HPA = (1 << 18), /* unlock HPA */ - ATA_DFLAG_INIT_MASK = (1 << 19) - 1, - - ATA_DFLAG_NCQ_PRIO_ENABLED = (1 << 19), /* Priority cmds sent to dev */ + ATA_DFLAG_CDL = (1 << 13), /* supports cmd duration limits */ + ATA_DFLAG_CFG_MASK = (1 << 14) - 1, + + ATA_DFLAG_PIO = (1 << 14), /* device limited to PIO mode */ + ATA_DFLAG_NCQ_OFF = (1 << 15), /* device limited to non-NCQ mode */ + ATA_DFLAG_SLEEPING = (1 << 16), /* device is sleeping */ + ATA_DFLAG_DUBIOUS_XFER = (1 << 17), /* data transfer not verified */ + ATA_DFLAG_NO_UNLOAD = (1 << 18), /* device doesn't support unload */ + ATA_DFLAG_UNLOCK_HPA = (1 << 19), /* unlock HPA */ + ATA_DFLAG_INIT_MASK = (1 << 20) - 1, + + ATA_DFLAG_NCQ_PRIO_ENABLED = (1 << 20), /* Priority cmds sent to dev */ + ATA_DFLAG_CDL_ENABLED = (1 << 21), /* cmd duration limits is enabled */ ATA_DFLAG_DETACH = (1 << 24), ATA_DFLAG_DETACHED = (1 << 25), ATA_DFLAG_DA = (1 << 26), /* device supports Device Attention */ @@ -115,7 +117,8 @@ enum { ATA_DFLAG_FEATURES_MASK = (ATA_DFLAG_TRUSTED | ATA_DFLAG_DA | \ ATA_DFLAG_DEVSLP | ATA_DFLAG_NCQ_SEND_RECV | \ - ATA_DFLAG_NCQ_PRIO | ATA_DFLAG_FUA), + ATA_DFLAG_NCQ_PRIO | ATA_DFLAG_FUA | \ + ATA_DFLAG_CDL), ATA_DEV_UNKNOWN = 0, /* unknown device */ ATA_DEV_ATA = 1, /* ATA device */ @@ -206,10 +209,12 @@ enum { ATA_QCFLAG_CLEAR_EXCL = (1 << 5), /* clear excl_link on completion */ ATA_QCFLAG_QUIET = (1 << 6), /* don't report device error */ ATA_QCFLAG_RETRY = (1 << 7), /* retry after failure */ + ATA_QCFLAG_HAS_CDL = (1 << 8), /* qc has CDL a descriptor set */ ATA_QCFLAG_EH = (1 << 16), /* cmd aborted and owned by EH */ ATA_QCFLAG_SENSE_VALID = (1 << 17), /* sense data valid */ ATA_QCFLAG_EH_SCHEDULED = (1 << 18), /* EH scheduled (obsolete) */ + ATA_QCFLAG_EH_SUCCESS_CMD = (1 << 19), /* EH should fetch sense for this successful cmd */ /* host set flags */ ATA_HOST_SIMPLEX = (1 << 0), /* Host is simplex, one DMA channel per host only */ @@ -308,8 +313,10 @@ enum { ATA_EH_RESET = ATA_EH_SOFTRESET | ATA_EH_HARDRESET, ATA_EH_ENABLE_LINK = (1 << 3), ATA_EH_PARK = (1 << 5), /* unload heads and stop I/O */ + ATA_EH_GET_SUCCESS_SENSE = (1 << 6), /* Get sense data for successful cmd */ - ATA_EH_PERDEV_MASK = ATA_EH_REVALIDATE | ATA_EH_PARK, + ATA_EH_PERDEV_MASK = ATA_EH_REVALIDATE | ATA_EH_PARK | + ATA_EH_GET_SUCCESS_SENSE, ATA_EH_ALL_ACTIONS = ATA_EH_REVALIDATE | ATA_EH_RESET | ATA_EH_ENABLE_LINK, @@ -709,6 +716,9 @@ struct ata_device { /* Concurrent positioning ranges */ struct ata_cpr_log *cpr_log; + /* Command Duration Limits log support */ + u8 cdl[ATA_LOG_CDL_SIZE]; + /* error history */ int spdn_cnt; /* ering is CLEAR_END, read comment above CLEAR_END */ @@ -860,6 +870,7 @@ struct ata_port { struct ata_acpi_gtm __acpi_init_gtm; /* use ata_acpi_init_gtm() */ #endif /* owned by EH */ + u8 *ncq_sense_buf; u8 sector_buf[ATA_SECT_SIZE] ____cacheline_aligned; }; @@ -1178,6 +1189,7 @@ extern int sata_link_hardreset(struct ata_link *link, bool *online, int (*check_ready)(struct ata_link *)); extern int sata_link_resume(struct ata_link *link, const unsigned long *params, unsigned long deadline); +extern int ata_eh_read_sense_success_ncq_log(struct ata_link *link); extern void ata_eh_analyze_ncq_error(struct ata_link *link); #else static inline const unsigned long * @@ -1215,6 +1227,10 @@ static inline int sata_link_resume(struct ata_link *link, { return -EOPNOTSUPP; } +static inline int ata_eh_read_sense_success_ncq_log(struct ata_link *link) +{ + return -EOPNOTSUPP; +} static inline void ata_eh_analyze_ncq_error(struct ata_link *link) { } #endif extern int sata_link_debounce(struct ata_link *link, diff --git a/include/linux/nvme.h b/include/linux/nvme.h index 779507ac750b..182b6d614eb1 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h @@ -759,20 +759,55 @@ enum { NVME_LBART_ATTRIB_HIDE = 1 << 1, }; +enum nvme_pr_type { + NVME_PR_WRITE_EXCLUSIVE = 1, + NVME_PR_EXCLUSIVE_ACCESS = 2, + NVME_PR_WRITE_EXCLUSIVE_REG_ONLY = 3, + NVME_PR_EXCLUSIVE_ACCESS_REG_ONLY = 4, + NVME_PR_WRITE_EXCLUSIVE_ALL_REGS = 5, + NVME_PR_EXCLUSIVE_ACCESS_ALL_REGS = 6, +}; + +enum nvme_eds { + NVME_EXTENDED_DATA_STRUCT = 0x1, +}; + +struct nvme_registered_ctrl { + __le16 cntlid; + __u8 rcsts; + __u8 rsvd3[5]; + __le64 hostid; + __le64 rkey; +}; + struct nvme_reservation_status { __le32 gen; __u8 rtype; __u8 regctl[2]; __u8 resv5[2]; __u8 ptpls; - __u8 resv10[13]; - struct { - __le16 cntlid; - __u8 rcsts; - __u8 resv3[5]; - __le64 hostid; - __le64 rkey; - } regctl_ds[]; + __u8 resv10[14]; + struct nvme_registered_ctrl regctl_ds[]; +}; + +struct nvme_registered_ctrl_ext { + __le16 cntlid; + __u8 rcsts; + __u8 rsvd3[5]; + __le64 rkey; + __u8 hostid[16]; + __u8 rsvd32[32]; +}; + +struct nvme_reservation_status_ext { + __le32 gen; + __u8 rtype; + __u8 regctl[2]; + __u8 resv5[2]; + __u8 ptpls; + __u8 resv10[14]; + __u8 rsvd24[40]; + struct nvme_registered_ctrl_ext regctl_eds[]; }; enum nvme_async_event_type { diff --git a/include/linux/pr.h b/include/linux/pr.h index 94ceec713afe..3003daec28a5 100644 --- a/include/linux/pr.h +++ b/include/linux/pr.h @@ -4,6 +4,18 @@ #include <uapi/linux/pr.h> +struct pr_keys { + u32 generation; + u32 num_keys; + u64 keys[]; +}; + +struct pr_held_reservation { + u64 key; + u32 generation; + enum pr_type type; +}; + struct pr_ops { int (*pr_register)(struct block_device *bdev, u64 old_key, u64 new_key, u32 flags); @@ -14,6 +26,19 @@ struct pr_ops { int (*pr_preempt)(struct block_device *bdev, u64 old_key, u64 new_key, enum pr_type type, bool abort); int (*pr_clear)(struct block_device *bdev, u64 key); + /* + * pr_read_keys - Read the registered keys and return them in the + * pr_keys->keys array. The keys array will have been allocated at the + * end of the pr_keys struct, and pr_keys->num_keys must be set to the + * number of keys the array can hold. If there are more than can fit + * in the array, success will still be returned and pr_keys->num_keys + * will reflect the total number of keys the device contains, so the + * caller can retry with a larger array. + */ + int (*pr_read_keys)(struct block_device *bdev, + struct pr_keys *keys_info); + int (*pr_read_reservation)(struct block_device *bdev, + struct pr_held_reservation *rsv); }; #endif /* LINUX_PR_H */ diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h index c2cb5f69635c..526def14e7fb 100644 --- a/include/scsi/scsi_cmnd.h +++ b/include/scsi/scsi_cmnd.h @@ -52,6 +52,11 @@ struct scsi_pointer { #define SCMD_TAGGED (1 << 0) #define SCMD_INITIALIZED (1 << 1) #define SCMD_LAST (1 << 2) +/* + * libata uses SCSI EH to fetch sense data for successful commands. + * SCSI EH should not overwrite scmd->result when SCMD_FORCE_EH_SUCCESS is set. + */ +#define SCMD_FORCE_EH_SUCCESS (1 << 3) #define SCMD_FAIL_IF_RECOVERING (1 << 4) /* flags preserved across unprep / reprep */ #define SCMD_PRESERVED_FLAGS (SCMD_INITIALIZED | SCMD_FAIL_IF_RECOVERING) diff --git a/include/scsi/scsi_common.h b/include/scsi/scsi_common.h index 5b567b43e1b1..fb58715fac86 100644 --- a/include/scsi/scsi_common.h +++ b/include/scsi/scsi_common.h @@ -7,8 +7,21 @@ #define _SCSI_COMMON_H_ #include <linux/types.h> +#include <uapi/linux/pr.h> #include <scsi/scsi_proto.h> +enum scsi_pr_type { + SCSI_PR_WRITE_EXCLUSIVE = 0x01, + SCSI_PR_EXCLUSIVE_ACCESS = 0x03, + SCSI_PR_WRITE_EXCLUSIVE_REG_ONLY = 0x05, + SCSI_PR_EXCLUSIVE_ACCESS_REG_ONLY = 0x06, + SCSI_PR_WRITE_EXCLUSIVE_ALL_REGS = 0x07, + SCSI_PR_EXCLUSIVE_ACCESS_ALL_REGS = 0x08, +}; + +enum scsi_pr_type block_pr_type_to_scsi(enum pr_type type); +enum pr_type scsi_pr_type_to_block(enum scsi_pr_type type); + static inline unsigned scsi_varlen_cdb_length(const void *hdr) { diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h index f10a008e5bfa..75b2235b99e2 100644 --- a/include/scsi/scsi_device.h +++ b/include/scsi/scsi_device.h @@ -218,6 +218,9 @@ struct scsi_device { unsigned silence_suspend:1; /* Do not print runtime PM related messages */ unsigned no_vpd_size:1; /* No VPD size reported in header */ + unsigned cdl_supported:1; /* Command duration limits supported */ + unsigned cdl_enable:1; /* Enable/disable Command duration limits */ + unsigned int queue_stopped; /* request queue is quiesced */ bool offline_already; /* Device offline message logged */ @@ -364,6 +367,8 @@ extern int scsi_register_device_handler(struct scsi_device_handler *scsi_dh); extern void scsi_remove_device(struct scsi_device *); extern int scsi_unregister_device_handler(struct scsi_device_handler *scsi_dh); void scsi_attach_vpd(struct scsi_device *sdev); +void scsi_cdl_check(struct scsi_device *sdev); +int scsi_cdl_enable(struct scsi_device *sdev, bool enable); extern struct scsi_device *scsi_device_from_queue(struct request_queue *q); extern int __must_check scsi_device_get(struct scsi_device *); @@ -421,10 +426,10 @@ extern int scsi_track_queue_full(struct scsi_device *, int); extern int scsi_set_medium_removal(struct scsi_device *, char); -extern int scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage, - unsigned char *buffer, int len, int timeout, - int retries, struct scsi_mode_data *data, - struct scsi_sense_hdr *); +int scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage, + int subpage, unsigned char *buffer, int len, int timeout, + int retries, struct scsi_mode_data *data, + struct scsi_sense_hdr *); extern int scsi_mode_select(struct scsi_device *sdev, int pf, int sp, unsigned char *buffer, int len, int timeout, int retries, struct scsi_mode_data *data, @@ -433,8 +438,9 @@ extern int scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries, struct scsi_sense_hdr *sshdr); extern int scsi_get_vpd_page(struct scsi_device *, u8 page, unsigned char *buf, int buf_len); -extern int scsi_report_opcode(struct scsi_device *sdev, unsigned char *buffer, - unsigned int len, unsigned char opcode); +int scsi_report_opcode(struct scsi_device *sdev, unsigned char *buffer, + unsigned int len, unsigned char opcode, + unsigned short sa); extern int scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state); extern struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type, @@ -450,7 +456,7 @@ extern void scsi_scan_target(struct device *parent, unsigned int channel, unsigned int id, u64 lun, enum scsi_scan_mode rescan); extern void scsi_target_reap(struct scsi_target *); -extern void scsi_target_block(struct device *); +void scsi_block_targets(struct Scsi_Host *shost, struct device *dev); extern void scsi_target_unblock(struct device *, enum scsi_device_state); extern void scsi_remove_target(struct device *); extern const char *scsi_device_state_name(enum scsi_device_state); diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h index 0f29799efa02..70b7475dcf56 100644 --- a/include/scsi/scsi_host.h +++ b/include/scsi/scsi_host.h @@ -458,6 +458,9 @@ struct scsi_host_template { /* True if the host uses host-wide tagspace */ unsigned host_tagset:1; + /* The queuecommand callback may block. See also BLK_MQ_F_BLOCKING. */ + unsigned queuecommand_may_block:1; + /* * Countdown for host blocking with no commands outstanding. */ @@ -653,6 +656,9 @@ struct Scsi_Host { /* True if the host uses host-wide tagspace */ unsigned host_tagset:1; + /* The queuecommand callback may block. See also BLK_MQ_F_BLOCKING. */ + unsigned queuecommand_may_block:1; + /* Host responded with short (<36 bytes) INQUIRY result */ unsigned short_inquiry:1; diff --git a/include/scsi/scsi_proto.h b/include/scsi/scsi_proto.h index fbe5bdfe4d6e..07d65c1f59db 100644 --- a/include/scsi/scsi_proto.h +++ b/include/scsi/scsi_proto.h @@ -151,6 +151,11 @@ #define ZO_FINISH_ZONE 0x02 #define ZO_OPEN_ZONE 0x03 #define ZO_RESET_WRITE_POINTER 0x04 +/* values for PR in service action */ +#define READ_KEYS 0x00 +#define READ_RESERVATION 0x01 +#define REPORT_CAPABILITES 0x02 +#define READ_FULL_STATUS 0x03 /* values for variable length command */ #define XDREAD_32 0x03 #define XDWRITE_32 0x04 diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h index a3c193df25b3..739df993aa5e 100644 --- a/include/target/target_core_backend.h +++ b/include/target/target_core_backend.h @@ -62,13 +62,17 @@ struct target_backend_ops { struct configfs_attribute **tb_dev_action_attrs; }; -struct sbc_ops { +struct exec_cmd_ops { sense_reason_t (*execute_rw)(struct se_cmd *cmd, struct scatterlist *, u32, enum dma_data_direction); sense_reason_t (*execute_sync_cache)(struct se_cmd *cmd); sense_reason_t (*execute_write_same)(struct se_cmd *cmd); sense_reason_t (*execute_unmap)(struct se_cmd *cmd, sector_t lba, sector_t nolb); + sense_reason_t (*execute_pr_out)(struct se_cmd *cmd, u8 sa, u64 key, + u64 sa_key, u8 type, bool aptpl); + sense_reason_t (*execute_pr_in)(struct se_cmd *cmd, u8 sa, + unsigned char *param_data); }; int transport_backend_register(const struct target_backend_ops *); @@ -86,7 +90,7 @@ sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd); sense_reason_t spc_emulate_inquiry_std(struct se_cmd *, unsigned char *); sense_reason_t spc_emulate_evpd_83(struct se_cmd *, unsigned char *); -sense_reason_t sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops); +sense_reason_t sbc_parse_cdb(struct se_cmd *cmd, struct exec_cmd_ops *ops); u32 sbc_get_device_rev(struct se_device *dev); u32 sbc_get_device_type(struct se_device *dev); sector_t sbc_get_write_same_sectors(struct se_cmd *cmd); diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 5f8e96f1516f..159567359bbb 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -880,7 +880,8 @@ struct target_opcode_descriptor { u8 specific_timeout; u16 nominal_timeout; u16 recommended_timeout; - bool (*enabled)(struct se_cmd *cmd); + bool (*enabled)(struct target_opcode_descriptor *descr, + struct se_cmd *cmd); void (*update_usage_bits)(u8 *usage_bits, struct se_device *dev); u8 usage_bits[]; diff --git a/include/trace/events/scsi.h b/include/trace/events/scsi.h index a2c7befd451a..8e2d9b1b0e77 100644 --- a/include/trace/events/scsi.h +++ b/include/trace/events/scsi.h @@ -269,9 +269,14 @@ DECLARE_EVENT_CLASS(scsi_cmd_done_timeout_template, __field( unsigned int, prot_sglen ) __field( unsigned char, prot_op ) __dynamic_array(unsigned char, cmnd, cmd->cmd_len) + __field( u8, sense_key ) + __field( u8, asc ) + __field( u8, ascq ) ), TP_fast_assign( + struct scsi_sense_hdr sshdr; + __entry->host_no = cmd->device->host->host_no; __entry->channel = cmd->device->channel; __entry->id = cmd->device->id; @@ -285,11 +290,22 @@ DECLARE_EVENT_CLASS(scsi_cmd_done_timeout_template, __entry->prot_sglen = scsi_prot_sg_count(cmd); __entry->prot_op = scsi_get_prot_op(cmd); memcpy(__get_dynamic_array(cmnd), cmd->cmnd, cmd->cmd_len); + if (cmd->sense_buffer && SCSI_SENSE_VALID(cmd) && + scsi_command_normalize_sense(cmd, &sshdr)) { + __entry->sense_key = sshdr.sense_key; + __entry->asc = sshdr.asc; + __entry->ascq = sshdr.ascq; + } else { + __entry->sense_key = 0; + __entry->asc = 0; + __entry->ascq = 0; + } ), TP_printk("host_no=%u channel=%u id=%u lun=%u data_sgl=%u prot_sgl=%u " \ "prot_op=%s driver_tag=%d scheduler_tag=%d cmnd=(%s %s raw=%s) " \ - "result=(driver=%s host=%s message=%s status=%s)", + "result=(driver=%s host=%s message=%s status=%s) " + "sense=(key=%#x asc=%#x ascq=%#x)", __entry->host_no, __entry->channel, __entry->id, __entry->lun, __entry->data_sglen, __entry->prot_sglen, show_prot_op_name(__entry->prot_op), __entry->driver_tag, @@ -299,7 +315,8 @@ DECLARE_EVENT_CLASS(scsi_cmd_done_timeout_template, "DRIVER_OK", show_hostbyte_name(((__entry->result) >> 16) & 0xff), "COMMAND_COMPLETE", - show_statusbyte_name(__entry->result & 0xff)) + show_statusbyte_name(__entry->result & 0xff), + __entry->sense_key, __entry->asc, __entry->ascq) ); DEFINE_EVENT(scsi_cmd_done_timeout_template, scsi_dispatch_cmd_done, diff --git a/include/uapi/linux/ioprio.h b/include/uapi/linux/ioprio.h index f70f2596a6bf..99440b2e8c35 100644 --- a/include/uapi/linux/ioprio.h +++ b/include/uapi/linux/ioprio.h @@ -2,22 +2,23 @@ #ifndef _UAPI_LINUX_IOPRIO_H #define _UAPI_LINUX_IOPRIO_H +#include <linux/stddef.h> +#include <linux/types.h> + /* * Gives us 8 prio classes with 13-bits of data for each class */ #define IOPRIO_CLASS_SHIFT 13 -#define IOPRIO_CLASS_MASK 0x07 +#define IOPRIO_NR_CLASSES 8 +#define IOPRIO_CLASS_MASK (IOPRIO_NR_CLASSES - 1) #define IOPRIO_PRIO_MASK ((1UL << IOPRIO_CLASS_SHIFT) - 1) #define IOPRIO_PRIO_CLASS(ioprio) \ (((ioprio) >> IOPRIO_CLASS_SHIFT) & IOPRIO_CLASS_MASK) #define IOPRIO_PRIO_DATA(ioprio) ((ioprio) & IOPRIO_PRIO_MASK) -#define IOPRIO_PRIO_VALUE(class, data) \ - ((((class) & IOPRIO_CLASS_MASK) << IOPRIO_CLASS_SHIFT) | \ - ((data) & IOPRIO_PRIO_MASK)) /* - * These are the io priority groups as implemented by the BFQ and mq-deadline + * These are the io priority classes as implemented by the BFQ and mq-deadline * schedulers. RT is the realtime class, it always gets premium service. For * ATA disks supporting NCQ IO priority, RT class IOs will be processed using * high priority NCQ commands. BE is the best-effort scheduling class, the @@ -25,18 +26,30 @@ * served when no one else is using the disk. */ enum { - IOPRIO_CLASS_NONE, - IOPRIO_CLASS_RT, - IOPRIO_CLASS_BE, - IOPRIO_CLASS_IDLE, + IOPRIO_CLASS_NONE = 0, + IOPRIO_CLASS_RT = 1, + IOPRIO_CLASS_BE = 2, + IOPRIO_CLASS_IDLE = 3, + + /* Special class to indicate an invalid ioprio value */ + IOPRIO_CLASS_INVALID = 7, }; /* - * The RT and BE priority classes both support up to 8 priority levels. + * The RT and BE priority classes both support up to 8 priority levels that + * can be specified using the lower 3-bits of the priority data. */ -#define IOPRIO_NR_LEVELS 8 -#define IOPRIO_BE_NR IOPRIO_NR_LEVELS +#define IOPRIO_LEVEL_NR_BITS 3 +#define IOPRIO_NR_LEVELS (1 << IOPRIO_LEVEL_NR_BITS) +#define IOPRIO_LEVEL_MASK (IOPRIO_NR_LEVELS - 1) +#define IOPRIO_PRIO_LEVEL(ioprio) ((ioprio) & IOPRIO_LEVEL_MASK) + +#define IOPRIO_BE_NR IOPRIO_NR_LEVELS +/* + * Possible values for the "which" argument of the ioprio_get() and + * ioprio_set() system calls (see "man ioprio_set"). + */ enum { IOPRIO_WHO_PROCESS = 1, IOPRIO_WHO_PGRP, @@ -44,9 +57,70 @@ enum { }; /* - * Fallback BE priority level. + * Fallback BE class priority level. */ #define IOPRIO_NORM 4 #define IOPRIO_BE_NORM IOPRIO_NORM +/* + * The 10 bits between the priority class and the priority level are used to + * optionally define I/O hints for any combination of I/O priority class and + * level. Depending on the kernel configuration, I/O scheduler being used and + * the target I/O device being used, hints can influence how I/Os are processed + * without affecting the I/O scheduling ordering defined by the I/O priority + * class and level. + */ +#define IOPRIO_HINT_SHIFT IOPRIO_LEVEL_NR_BITS +#define IOPRIO_HINT_NR_BITS 10 +#define IOPRIO_NR_HINTS (1 << IOPRIO_HINT_NR_BITS) +#define IOPRIO_HINT_MASK (IOPRIO_NR_HINTS - 1) +#define IOPRIO_PRIO_HINT(ioprio) \ + (((ioprio) >> IOPRIO_HINT_SHIFT) & IOPRIO_HINT_MASK) + +/* + * I/O hints. + */ +enum { + /* No hint */ + IOPRIO_HINT_NONE = 0, + + /* + * Device command duration limits: indicate to the device a desired + * duration limit for the commands that will be used to process an I/O. + * These will currently only be effective for SCSI and ATA devices that + * support the command duration limits feature. If this feature is + * enabled, then the commands issued to the device to process an I/O with + * one of these hints set will have the duration limit index (dld field) + * set to the value of the hint. + */ + IOPRIO_HINT_DEV_DURATION_LIMIT_1 = 1, + IOPRIO_HINT_DEV_DURATION_LIMIT_2 = 2, + IOPRIO_HINT_DEV_DURATION_LIMIT_3 = 3, + IOPRIO_HINT_DEV_DURATION_LIMIT_4 = 4, + IOPRIO_HINT_DEV_DURATION_LIMIT_5 = 5, + IOPRIO_HINT_DEV_DURATION_LIMIT_6 = 6, + IOPRIO_HINT_DEV_DURATION_LIMIT_7 = 7, +}; + +#define IOPRIO_BAD_VALUE(val, max) ((val) < 0 || (val) >= (max)) + +/* + * Return an I/O priority value based on a class, a level and a hint. + */ +static __always_inline __u16 ioprio_value(int class, int level, int hint) +{ + if (IOPRIO_BAD_VALUE(class, IOPRIO_NR_CLASSES) || + IOPRIO_BAD_VALUE(level, IOPRIO_NR_LEVELS) || + IOPRIO_BAD_VALUE(hint, IOPRIO_NR_HINTS)) + return IOPRIO_CLASS_INVALID << IOPRIO_CLASS_SHIFT; + + return (class << IOPRIO_CLASS_SHIFT) | + (hint << IOPRIO_HINT_SHIFT) | level; +} + +#define IOPRIO_PRIO_VALUE(class, level) \ + ioprio_value(class, level, IOPRIO_HINT_NONE) +#define IOPRIO_PRIO_VALUE_HINT(class, level, hint) \ + ioprio_value(class, level, hint) + #endif /* _UAPI_LINUX_IOPRIO_H */ diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h index df1d04f7a542..6dc11fa0ebb1 100644 --- a/include/ufs/ufshcd.h +++ b/include/ufs/ufshcd.h @@ -225,7 +225,6 @@ struct ufs_dev_cmd { struct mutex lock; struct completion *complete; struct ufs_query query; - struct cq_entry *cqe; }; /** @@ -611,6 +610,19 @@ enum ufshcd_quirks { * to reinit the device after switching to maximum gear. */ UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH = 1 << 19, + + /* + * Some host raises interrupt (per queue) in addition to + * CQES (traditional) when ESI is disabled. + * Enable this quirk will disable CQES and use per queue interrupt. + */ + UFSHCD_QUIRK_MCQ_BROKEN_INTR = 1 << 20, + + /* + * Some host does not implement SQ Run Time Command (SQRTC) register + * thus need this quirk to skip related flow. + */ + UFSHCD_QUIRK_MCQ_BROKEN_RTC = 1 << 21, }; enum ufshcd_caps { @@ -1087,6 +1099,7 @@ struct ufs_hba { * @cq_tail_slot: current slot to which CQ tail pointer is pointing * @cq_head_slot: current slot to which CQ head pointer is pointing * @cq_lock: Synchronize between multiple polling instances + * @sq_mutex: prevent submission queue concurrent access */ struct ufs_hw_queue { void __iomem *mcq_sq_head; @@ -1105,6 +1118,8 @@ struct ufs_hw_queue { u32 cq_tail_slot; u32 cq_head_slot; spinlock_t cq_lock; + /* prevent concurrent access to submission queue */ + struct mutex sq_mutex; }; static inline bool is_mcq_enabled(struct ufs_hba *hba) @@ -1240,7 +1255,7 @@ void ufshcd_update_evt_hist(struct ufs_hba *hba, u32 id, u32 val); void ufshcd_hba_stop(struct ufs_hba *hba); void ufshcd_schedule_eh_work(struct ufs_hba *hba); void ufshcd_mcq_write_cqis(struct ufs_hba *hba, u32 val, int i); -unsigned long ufshcd_mcq_poll_cqe_nolock(struct ufs_hba *hba, +unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba, struct ufs_hw_queue *hwq); void ufshcd_mcq_enable_esi(struct ufs_hba *hba); void ufshcd_mcq_config_esi(struct ufs_hba *hba, struct msi_msg *msg); @@ -1277,7 +1292,6 @@ extern int ufshcd_system_freeze(struct device *dev); extern int ufshcd_system_thaw(struct device *dev); extern int ufshcd_system_restore(struct device *dev); #endif -extern int ufshcd_shutdown(struct ufs_hba *hba); extern int ufshcd_dme_configure_adapt(struct ufs_hba *hba, int agreed_gear, @@ -1358,7 +1372,7 @@ void ufshcd_fixup_dev_quirks(struct ufs_hba *hba, int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index, u8 **buf, bool ascii); -int ufshcd_hold(struct ufs_hba *hba, bool async); +void ufshcd_hold(struct ufs_hba *hba); void ufshcd_release(struct ufs_hba *hba); void ufshcd_clkgate_delay_set(struct device *dev, unsigned long value); diff --git a/include/ufs/ufshci.h b/include/ufs/ufshci.h index 11424bb03814..146fbea76d98 100644 --- a/include/ufs/ufshci.h +++ b/include/ufs/ufshci.h @@ -99,6 +99,9 @@ enum { enum { REG_SQHP = 0x0, REG_SQTP = 0x4, + REG_SQRTC = 0x8, + REG_SQCTI = 0xC, + REG_SQRTS = 0x10, }; enum { @@ -111,12 +114,26 @@ enum { REG_CQIE = 0x4, }; +enum { + SQ_START = 0x0, + SQ_STOP = 0x1, + SQ_ICU = 0x2, +}; + +enum { + SQ_STS = 0x1, + SQ_CUS = 0x2, +}; + +#define SQ_ICU_ERR_CODE_MASK GENMASK(7, 4) +#define UPIU_COMMAND_TYPE_MASK GENMASK(31, 28) #define UFS_MASK(mask, offset) ((mask) << (offset)) /* UFS Version 08h */ #define MINOR_VERSION_NUM_MASK UFS_MASK(0xFFFF, 0) #define MAJOR_VERSION_NUM_MASK UFS_MASK(0xFFFF, 16) +#define UFSHCD_NUM_RESERVED 1 /* * Controller UFSHCI version * - 2.x and newer use the following scheme: @@ -453,7 +470,7 @@ enum { }; /* The maximum length of the data byte count field in the PRDT is 256KB */ -#define PRDT_DATA_BYTE_COUNT_MAX (256 * 1024) +#define PRDT_DATA_BYTE_COUNT_MAX SZ_256K /* The granularity of the data byte count field in the PRDT is 32-bit */ #define PRDT_DATA_BYTE_COUNT_PAD 4 @@ -503,8 +520,7 @@ struct request_desc_header { /** * struct utp_transfer_req_desc - UTP Transfer Request Descriptor (UTRD) * @header: UTRD header DW-0 to DW-3 - * @command_desc_base_addr_lo: UCD base address low DW-4 - * @command_desc_base_addr_hi: UCD base address high DW-5 + * @command_desc_base_addr: UCD base address DW 4-5 * @response_upiu_length: response UPIU length DW-6 * @response_upiu_offset: response UPIU offset DW-6 * @prd_table_length: Physical region descriptor length DW-7 @@ -516,8 +532,7 @@ struct utp_transfer_req_desc { struct request_desc_header header; /* DW 4-5*/ - __le32 command_desc_base_addr_lo; - __le32 command_desc_base_addr_hi; + __le64 command_desc_base_addr; /* DW 6 */ __le16 response_upiu_length; |