diff options
author | David S. Miller <davem@davemloft.net> | 2015-08-01 08:52:20 +0200 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2015-08-01 08:52:20 +0200 |
commit | 5510b3c2a173921374ec847848fb20b98e1c698a (patch) | |
tree | c9e185281ef17280ce0dc30be7923124874736b0 | |
parent | Merge branch 'ipv6-auto-flow-labels' (diff) | |
parent | Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net (diff) | |
download | linux-5510b3c2a173921374ec847848fb20b98e1c698a.tar.xz linux-5510b3c2a173921374ec847848fb20b98e1c698a.zip |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts:
arch/s390/net/bpf_jit_comp.c
drivers/net/ethernet/ti/netcp_ethss.c
net/bridge/br_multicast.c
net/ipv4/ip_fragment.c
All four conflicts were cases of simple overlapping
changes.
Signed-off-by: David S. Miller <davem@davemloft.net>
417 files changed, 5237 insertions, 2445 deletions
diff --git a/Documentation/devicetree/bindings/iommu/arm,smmu-v3.txt b/Documentation/devicetree/bindings/iommu/arm,smmu-v3.txt index c03eec116872..3443e0f838df 100644 --- a/Documentation/devicetree/bindings/iommu/arm,smmu-v3.txt +++ b/Documentation/devicetree/bindings/iommu/arm,smmu-v3.txt @@ -35,3 +35,6 @@ the PCIe specification. NOTE: this only applies to the SMMU itself, not masters connected upstream of the SMMU. + +- hisilicon,broken-prefetch-cmd + : Avoid sending CMD_PREFETCH_* commands to the SMMU. diff --git a/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt b/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt index 5d0376b8f202..211e7785f4d2 100644 --- a/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt +++ b/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt @@ -17,7 +17,6 @@ Required properties: "fsl,imx6sx-usdhc" Optional properties: -- fsl,cd-controller : Indicate to use controller internal card detection - fsl,wp-controller : Indicate to use controller internal write protection - fsl,delay-line : Specify the number of delay cells for override mode. This is used to set the clock delay for DLL(Delay Line) on override mode @@ -35,7 +34,6 @@ esdhc@70004000 { compatible = "fsl,imx51-esdhc"; reg = <0x70004000 0x4000>; interrupts = <1>; - fsl,cd-controller; fsl,wp-controller; }; diff --git a/Documentation/devicetree/bindings/sound/mt8173-max98090.txt b/Documentation/devicetree/bindings/sound/mt8173-max98090.txt index 829bd26d17f8..519e97c8f1b8 100644 --- a/Documentation/devicetree/bindings/sound/mt8173-max98090.txt +++ b/Documentation/devicetree/bindings/sound/mt8173-max98090.txt @@ -3,11 +3,13 @@ MT8173 with MAX98090 CODEC Required properties: - compatible : "mediatek,mt8173-max98090" - mediatek,audio-codec: the phandle of the MAX98090 audio codec +- mediatek,platform: the phandle of MT8173 ASoC platform Example: sound { compatible = "mediatek,mt8173-max98090"; mediatek,audio-codec = <&max98090>; + mediatek,platform = <&afe>; }; diff --git a/Documentation/devicetree/bindings/sound/mt8173-rt5650-rt5676.txt b/Documentation/devicetree/bindings/sound/mt8173-rt5650-rt5676.txt index 61e98c976bd4..f205ce9e31dd 100644 --- a/Documentation/devicetree/bindings/sound/mt8173-rt5650-rt5676.txt +++ b/Documentation/devicetree/bindings/sound/mt8173-rt5650-rt5676.txt @@ -3,11 +3,13 @@ MT8173 with RT5650 RT5676 CODECS Required properties: - compatible : "mediatek,mt8173-rt5650-rt5676" - mediatek,audio-codec: the phandles of rt5650 and rt5676 codecs +- mediatek,platform: the phandle of MT8173 ASoC platform Example: sound { compatible = "mediatek,mt8173-rt5650-rt5676"; mediatek,audio-codec = <&rt5650 &rt5676>; + mediatek,platform = <&afe>; }; diff --git a/Documentation/devicetree/bindings/spi/spi-ath79.txt b/Documentation/devicetree/bindings/spi/spi-ath79.txt index f1ad9c367532..9c696fa66f81 100644 --- a/Documentation/devicetree/bindings/spi/spi-ath79.txt +++ b/Documentation/devicetree/bindings/spi/spi-ath79.txt @@ -3,7 +3,7 @@ Binding for Qualcomm Atheros AR7xxx/AR9xxx SPI controller Required properties: - compatible: has to be "qca,<soc-type>-spi", "qca,ar7100-spi" as fallback. - reg: Base address and size of the controllers memory area -- clocks: phandle to the AHB clock. +- clocks: phandle of the AHB clock. - clock-names: has to be "ahb". - #address-cells: <1>, as required by generic SPI binding. - #size-cells: <0>, also as required by generic SPI binding. @@ -12,9 +12,9 @@ Child nodes as per the generic SPI binding. Example: - spi@1F000000 { + spi@1f000000 { compatible = "qca,ar9132-spi", "qca,ar7100-spi"; - reg = <0x1F000000 0x10>; + reg = <0x1f000000 0x10>; clocks = <&pll 2>; clock-names = "ahb"; diff --git a/Documentation/hwmon/nct7904 b/Documentation/hwmon/nct7904 index 014f112e2a14..57fffe33ebfc 100644 --- a/Documentation/hwmon/nct7904 +++ b/Documentation/hwmon/nct7904 @@ -35,11 +35,11 @@ temp1_input Local temperature (1/1000 degree, temp[2-9]_input CPU temperatures (1/1000 degree, 0.125 degree resolution) -fan[1-4]_mode R/W, 0/1 for manual or SmartFan mode +pwm[1-4]_enable R/W, 1/2 for manual or SmartFan mode Setting SmartFan mode is supported only if it has been previously configured by BIOS (or configuration EEPROM) -fan[1-4]_pwm R/O in SmartFan mode, R/W in manual control mode +pwm[1-4] R/O in SmartFan mode, R/W in manual control mode The driver checks sensor control registers and does not export the sensors that are not enabled. Anyway, a sensor that is enabled may actually be not diff --git a/Documentation/target/tcm_mod_builder.py b/Documentation/target/tcm_mod_builder.py index 949de191fcdc..cda56df9b8a7 100755 --- a/Documentation/target/tcm_mod_builder.py +++ b/Documentation/target/tcm_mod_builder.py @@ -199,7 +199,8 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name): buf += "#include <linux/string.h>\n" buf += "#include <linux/configfs.h>\n" buf += "#include <linux/ctype.h>\n" - buf += "#include <asm/unaligned.h>\n\n" + buf += "#include <asm/unaligned.h>\n" + buf += "#include <scsi/scsi_proto.h>\n\n" buf += "#include <target/target_core_base.h>\n" buf += "#include <target/target_core_fabric.h>\n" buf += "#include <target/target_core_fabric_configfs.h>\n" @@ -230,8 +231,14 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name): buf += " }\n" buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n" buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n" - buf += " ret = core_tpg_register(&" + fabric_mod_name + "_ops, wwn,\n" - buf += " &tpg->se_tpg, SCSI_PROTOCOL_SAS);\n" + + if proto_ident == "FC": + buf += " ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_FCP);\n" + elif proto_ident == "SAS": + buf += " ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_SAS);\n" + elif proto_ident == "iSCSI": + buf += " ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_ISCSI);\n" + buf += " if (ret < 0) {\n" buf += " kfree(tpg);\n" buf += " return NULL;\n" @@ -292,7 +299,7 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name): buf += "static const struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n" buf += " .module = THIS_MODULE,\n" - buf += " .name = " + fabric_mod_name + ",\n" + buf += " .name = \"" + fabric_mod_name + "\",\n" buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n" buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n" buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n" @@ -322,17 +329,17 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name): buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n" buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n" buf += "\n" - buf += " .tfc_wwn_attrs = " + fabric_mod_name + "_wwn_attrs;\n" + buf += " .tfc_wwn_attrs = " + fabric_mod_name + "_wwn_attrs,\n" buf += "};\n\n" buf += "static int __init " + fabric_mod_name + "_init(void)\n" buf += "{\n" - buf += " return target_register_template(" + fabric_mod_name + "_ops);\n" + buf += " return target_register_template(&" + fabric_mod_name + "_ops);\n" buf += "};\n\n" buf += "static void __exit " + fabric_mod_name + "_exit(void)\n" buf += "{\n" - buf += " target_unregister_template(" + fabric_mod_name + "_ops);\n" + buf += " target_unregister_template(&" + fabric_mod_name + "_ops);\n" buf += "};\n\n" buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n" diff --git a/MAINTAINERS b/MAINTAINERS index 5f5c897820f6..98ede02a96f2 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -5899,7 +5899,6 @@ S: Supported F: Documentation/s390/kvm.txt F: arch/s390/include/asm/kvm* F: arch/s390/kvm/ -F: drivers/s390/kvm/ KERNEL VIRTUAL MACHINE (KVM) FOR ARM M: Christoffer Dall <christoffer.dall@linaro.org> @@ -6848,6 +6847,12 @@ T: git git://linuxtv.org/anttip/media_tree.git S: Maintained F: drivers/media/usb/msi2500/ +MSYSTEMS DISKONCHIP G3 MTD DRIVER +M: Robert Jarzmik <robert.jarzmik@free.fr> +L: linux-mtd@lists.infradead.org +S: Maintained +F: drivers/mtd/devices/docg3* + MT9M032 APTINA SENSOR DRIVER M: Laurent Pinchart <laurent.pinchart@ideasonboard.com> L: linux-media@vger.kernel.org @@ -10912,6 +10917,15 @@ F: drivers/block/virtio_blk.c F: include/linux/virtio_*.h F: include/uapi/linux/virtio_*.h +VIRTIO DRIVERS FOR S390 +M: Christian Borntraeger <borntraeger@de.ibm.com> +M: Cornelia Huck <cornelia.huck@de.ibm.com> +L: linux-s390@vger.kernel.org +L: virtualization@lists.linux-foundation.org +L: kvm@vger.kernel.org +S: Supported +F: drivers/s390/virtio/ + VIRTIO GPU DRIVER M: David Airlie <airlied@linux.ie> M: Gerd Hoffmann <kraxel@redhat.com> @@ -1,7 +1,7 @@ VERSION = 4 PATCHLEVEL = 2 SUBLEVEL = 0 -EXTRAVERSION = -rc3 +EXTRAVERSION = -rc4 NAME = Hurr durr I'ma sheep # *DOCUMENTATION* diff --git a/arch/arm/boot/dts/imx25-pdk.dts b/arch/arm/boot/dts/imx25-pdk.dts index dd45e6971bc3..9351296356dc 100644 --- a/arch/arm/boot/dts/imx25-pdk.dts +++ b/arch/arm/boot/dts/imx25-pdk.dts @@ -10,6 +10,7 @@ */ /dts-v1/; +#include <dt-bindings/gpio/gpio.h> #include <dt-bindings/input/input.h> #include "imx25.dtsi" @@ -114,8 +115,8 @@ &esdhc1 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_esdhc1>; - cd-gpios = <&gpio2 1 0>; - wp-gpios = <&gpio2 0 0>; + cd-gpios = <&gpio2 1 GPIO_ACTIVE_LOW>; + wp-gpios = <&gpio2 0 GPIO_ACTIVE_HIGH>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx51-apf51dev.dts b/arch/arm/boot/dts/imx51-apf51dev.dts index 93d3ea12328c..0f3fe29b816e 100644 --- a/arch/arm/boot/dts/imx51-apf51dev.dts +++ b/arch/arm/boot/dts/imx51-apf51dev.dts @@ -98,7 +98,7 @@ &esdhc1 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_esdhc1>; - cd-gpios = <&gpio2 29 GPIO_ACTIVE_HIGH>; + cd-gpios = <&gpio2 29 GPIO_ACTIVE_LOW>; bus-width = <4>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx53-ard.dts b/arch/arm/boot/dts/imx53-ard.dts index e9337ad52f59..3bc18835fb4b 100644 --- a/arch/arm/boot/dts/imx53-ard.dts +++ b/arch/arm/boot/dts/imx53-ard.dts @@ -103,8 +103,8 @@ &esdhc1 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_esdhc1>; - cd-gpios = <&gpio1 1 0>; - wp-gpios = <&gpio1 9 0>; + cd-gpios = <&gpio1 1 GPIO_ACTIVE_LOW>; + wp-gpios = <&gpio1 9 GPIO_ACTIVE_HIGH>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx53-m53evk.dts b/arch/arm/boot/dts/imx53-m53evk.dts index d0e0f57eb432..53f40885c530 100644 --- a/arch/arm/boot/dts/imx53-m53evk.dts +++ b/arch/arm/boot/dts/imx53-m53evk.dts @@ -124,8 +124,8 @@ &esdhc1 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_esdhc1>; - cd-gpios = <&gpio1 1 0>; - wp-gpios = <&gpio1 9 0>; + cd-gpios = <&gpio1 1 GPIO_ACTIVE_LOW>; + wp-gpios = <&gpio1 9 GPIO_ACTIVE_HIGH>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx53-qsb-common.dtsi b/arch/arm/boot/dts/imx53-qsb-common.dtsi index ab4ba39f2ed9..b0d5542ac829 100644 --- a/arch/arm/boot/dts/imx53-qsb-common.dtsi +++ b/arch/arm/boot/dts/imx53-qsb-common.dtsi @@ -147,8 +147,8 @@ &esdhc3 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_esdhc3>; - cd-gpios = <&gpio3 11 0>; - wp-gpios = <&gpio3 12 0>; + cd-gpios = <&gpio3 11 GPIO_ACTIVE_LOW>; + wp-gpios = <&gpio3 12 GPIO_ACTIVE_HIGH>; bus-width = <8>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx53-smd.dts b/arch/arm/boot/dts/imx53-smd.dts index 1d325576bcc0..fc89ce1e5763 100644 --- a/arch/arm/boot/dts/imx53-smd.dts +++ b/arch/arm/boot/dts/imx53-smd.dts @@ -41,8 +41,8 @@ &esdhc1 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_esdhc1>; - cd-gpios = <&gpio3 13 0>; - wp-gpios = <&gpio4 11 0>; + cd-gpios = <&gpio3 13 GPIO_ACTIVE_LOW>; + wp-gpios = <&gpio4 11 GPIO_ACTIVE_HIGH>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx53-tqma53.dtsi b/arch/arm/boot/dts/imx53-tqma53.dtsi index 4f1f0e2868bf..e03373a58760 100644 --- a/arch/arm/boot/dts/imx53-tqma53.dtsi +++ b/arch/arm/boot/dts/imx53-tqma53.dtsi @@ -41,8 +41,8 @@ pinctrl-0 = <&pinctrl_esdhc2>, <&pinctrl_esdhc2_cdwp>; vmmc-supply = <®_3p3v>; - wp-gpios = <&gpio1 2 0>; - cd-gpios = <&gpio1 4 0>; + wp-gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>; + cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>; status = "disabled"; }; diff --git a/arch/arm/boot/dts/imx53-tx53.dtsi b/arch/arm/boot/dts/imx53-tx53.dtsi index 704bd72cbfec..d3e50b22064f 100644 --- a/arch/arm/boot/dts/imx53-tx53.dtsi +++ b/arch/arm/boot/dts/imx53-tx53.dtsi @@ -183,7 +183,7 @@ }; &esdhc1 { - cd-gpios = <&gpio3 24 GPIO_ACTIVE_HIGH>; + cd-gpios = <&gpio3 24 GPIO_ACTIVE_LOW>; fsl,wp-controller; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_esdhc1>; @@ -191,7 +191,7 @@ }; &esdhc2 { - cd-gpios = <&gpio3 25 GPIO_ACTIVE_HIGH>; + cd-gpios = <&gpio3 25 GPIO_ACTIVE_LOW>; fsl,wp-controller; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_esdhc2>; diff --git a/arch/arm/boot/dts/imx53-voipac-bsb.dts b/arch/arm/boot/dts/imx53-voipac-bsb.dts index c17d3ad6dba5..fc51b87ad208 100644 --- a/arch/arm/boot/dts/imx53-voipac-bsb.dts +++ b/arch/arm/boot/dts/imx53-voipac-bsb.dts @@ -119,8 +119,8 @@ &esdhc2 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_esdhc2>; - cd-gpios = <&gpio3 25 0>; - wp-gpios = <&gpio2 19 0>; + cd-gpios = <&gpio3 25 GPIO_ACTIVE_LOW>; + wp-gpios = <&gpio2 19 GPIO_ACTIVE_HIGH>; vmmc-supply = <®_3p3v>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx6dl-riotboard.dts b/arch/arm/boot/dts/imx6dl-riotboard.dts index 43cb3fd76be7..5111f5170d53 100644 --- a/arch/arm/boot/dts/imx6dl-riotboard.dts +++ b/arch/arm/boot/dts/imx6dl-riotboard.dts @@ -305,8 +305,8 @@ &usdhc2 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc2>; - cd-gpios = <&gpio1 4 0>; - wp-gpios = <&gpio1 2 0>; + cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>; + wp-gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>; vmmc-supply = <®_3p3v>; status = "okay"; }; @@ -314,8 +314,8 @@ &usdhc3 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc3>; - cd-gpios = <&gpio7 0 0>; - wp-gpios = <&gpio7 1 0>; + cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>; + wp-gpios = <&gpio7 1 GPIO_ACTIVE_HIGH>; vmmc-supply = <®_3p3v>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx6q-arm2.dts b/arch/arm/boot/dts/imx6q-arm2.dts index 78df05e9d1ce..d6515f7a56c4 100644 --- a/arch/arm/boot/dts/imx6q-arm2.dts +++ b/arch/arm/boot/dts/imx6q-arm2.dts @@ -11,6 +11,7 @@ */ /dts-v1/; +#include <dt-bindings/gpio/gpio.h> #include "imx6q.dtsi" / { @@ -196,8 +197,8 @@ }; &usdhc3 { - cd-gpios = <&gpio6 11 0>; - wp-gpios = <&gpio6 14 0>; + cd-gpios = <&gpio6 11 GPIO_ACTIVE_LOW>; + wp-gpios = <&gpio6 14 GPIO_ACTIVE_HIGH>; vmmc-supply = <®_3p3v>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc3 diff --git a/arch/arm/boot/dts/imx6q-gk802.dts b/arch/arm/boot/dts/imx6q-gk802.dts index 703539cf36d3..00bd63e63d0c 100644 --- a/arch/arm/boot/dts/imx6q-gk802.dts +++ b/arch/arm/boot/dts/imx6q-gk802.dts @@ -7,6 +7,7 @@ */ /dts-v1/; +#include <dt-bindings/gpio/gpio.h> #include "imx6q.dtsi" / { @@ -161,7 +162,7 @@ pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc3>; bus-width = <4>; - cd-gpios = <&gpio6 11 0>; + cd-gpios = <&gpio6 11 GPIO_ACTIVE_LOW>; vmmc-supply = <®_3p3v>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx6q-tbs2910.dts b/arch/arm/boot/dts/imx6q-tbs2910.dts index a43abfa21e33..5645d52850a7 100644 --- a/arch/arm/boot/dts/imx6q-tbs2910.dts +++ b/arch/arm/boot/dts/imx6q-tbs2910.dts @@ -251,7 +251,7 @@ pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc2>; bus-width = <4>; - cd-gpios = <&gpio2 2 GPIO_ACTIVE_HIGH>; + cd-gpios = <&gpio2 2 GPIO_ACTIVE_LOW>; vmmc-supply = <®_3p3v>; status = "okay"; }; @@ -260,7 +260,7 @@ pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc3>; bus-width = <4>; - cd-gpios = <&gpio2 0 GPIO_ACTIVE_HIGH>; + cd-gpios = <&gpio2 0 GPIO_ACTIVE_LOW>; wp-gpios = <&gpio2 1 GPIO_ACTIVE_HIGH>; vmmc-supply = <®_3p3v>; status = "okay"; diff --git a/arch/arm/boot/dts/imx6qdl-aristainetos.dtsi b/arch/arm/boot/dts/imx6qdl-aristainetos.dtsi index e6d9195a1da7..f4d6ae564ead 100644 --- a/arch/arm/boot/dts/imx6qdl-aristainetos.dtsi +++ b/arch/arm/boot/dts/imx6qdl-aristainetos.dtsi @@ -173,7 +173,7 @@ pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc1>; vmmc-supply = <®_3p3v>; - cd-gpios = <&gpio4 7 GPIO_ACTIVE_HIGH>; + cd-gpios = <&gpio4 7 GPIO_ACTIVE_LOW>; status = "okay"; }; @@ -181,7 +181,7 @@ pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc2>; vmmc-supply = <®_3p3v>; - cd-gpios = <&gpio4 8 GPIO_ACTIVE_HIGH>; + cd-gpios = <&gpio4 8 GPIO_ACTIVE_LOW>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx6qdl-aristainetos2.dtsi b/arch/arm/boot/dts/imx6qdl-aristainetos2.dtsi index 1d85de2befb3..a47a0399a172 100644 --- a/arch/arm/boot/dts/imx6qdl-aristainetos2.dtsi +++ b/arch/arm/boot/dts/imx6qdl-aristainetos2.dtsi @@ -392,7 +392,7 @@ &usdhc1 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc1>; - cd-gpios = <&gpio1 27 GPIO_ACTIVE_HIGH>; + cd-gpios = <&gpio1 27 GPIO_ACTIVE_LOW>; no-1-8-v; status = "okay"; }; @@ -400,7 +400,7 @@ &usdhc2 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc2>; - cd-gpios = <&gpio4 5 GPIO_ACTIVE_HIGH>; + cd-gpios = <&gpio4 5 GPIO_ACTIVE_LOW>; wp-gpios = <&gpio2 10 GPIO_ACTIVE_HIGH>; no-1-8-v; status = "okay"; diff --git a/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi b/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi index 59e5d15e3ec4..ff41f83551de 100644 --- a/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi +++ b/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi @@ -258,6 +258,6 @@ pinctrl-names = "default"; pinctrl-0 = <&pinctrl_cubox_i_usdhc2_aux &pinctrl_cubox_i_usdhc2>; vmmc-supply = <®_3p3v>; - cd-gpios = <&gpio1 4 0>; + cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx6qdl-dfi-fs700-m60.dtsi b/arch/arm/boot/dts/imx6qdl-dfi-fs700-m60.dtsi index 2c253d6d20bd..45e7c39e80d5 100644 --- a/arch/arm/boot/dts/imx6qdl-dfi-fs700-m60.dtsi +++ b/arch/arm/boot/dts/imx6qdl-dfi-fs700-m60.dtsi @@ -1,3 +1,5 @@ +#include <dt-bindings/gpio/gpio.h> + / { regulators { compatible = "simple-bus"; @@ -181,7 +183,7 @@ &usdhc2 { /* module slot */ pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc2>; - cd-gpios = <&gpio2 2 0>; + cd-gpios = <&gpio2 2 GPIO_ACTIVE_LOW>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi index b5756c21ea1d..4493f6e99330 100644 --- a/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi +++ b/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi @@ -318,7 +318,7 @@ &usdhc3 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc3>; - cd-gpios = <&gpio7 0 GPIO_ACTIVE_HIGH>; + cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>; vmmc-supply = <®_3p3v>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi index 86f03c1b147c..a857d1294609 100644 --- a/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi +++ b/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi @@ -324,7 +324,7 @@ &usdhc3 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc3>; - cd-gpios = <&gpio7 0 GPIO_ACTIVE_HIGH>; + cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>; vmmc-supply = <®_3p3v>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi index 4a8d97f47759..1afe3385e2d2 100644 --- a/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi +++ b/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi @@ -417,7 +417,7 @@ &usdhc3 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc3>; - cd-gpios = <&gpio7 0 GPIO_ACTIVE_HIGH>; + cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>; vmmc-supply = <®_3p3v>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx6qdl-hummingboard.dtsi b/arch/arm/boot/dts/imx6qdl-hummingboard.dtsi index 62a82f3eba88..6dd0b764e036 100644 --- a/arch/arm/boot/dts/imx6qdl-hummingboard.dtsi +++ b/arch/arm/boot/dts/imx6qdl-hummingboard.dtsi @@ -299,6 +299,6 @@ &pinctrl_hummingboard_usdhc2 >; vmmc-supply = <®_3p3v>; - cd-gpios = <&gpio1 4 0>; + cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi b/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi index 3af16dfe417b..d7fe6672d00c 100644 --- a/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi +++ b/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi @@ -453,7 +453,7 @@ &usdhc3 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc3>; - cd-gpios = <&gpio7 0 0>; + cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>; vmmc-supply = <®_3p3v>; status = "okay"; }; @@ -461,7 +461,7 @@ &usdhc4 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc4>; - cd-gpios = <&gpio2 6 0>; + cd-gpios = <&gpio2 6 GPIO_ACTIVE_LOW>; vmmc-supply = <®_3p3v>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi index 1ce6133b67f5..9e6ecd99b472 100644 --- a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi +++ b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi @@ -409,8 +409,8 @@ &usdhc2 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc2>; - cd-gpios = <&gpio1 4 0>; - wp-gpios = <&gpio1 2 0>; + cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>; + wp-gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>; status = "disabled"; }; @@ -418,7 +418,7 @@ pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc3 &pinctrl_usdhc3_cdwp>; - cd-gpios = <&gpio1 27 0>; - wp-gpios = <&gpio1 29 0>; + cd-gpios = <&gpio1 27 GPIO_ACTIVE_LOW>; + wp-gpios = <&gpio1 29 GPIO_ACTIVE_HIGH>; status = "disabled"; }; diff --git a/arch/arm/boot/dts/imx6qdl-rex.dtsi b/arch/arm/boot/dts/imx6qdl-rex.dtsi index 488a640796ac..3373fd958e95 100644 --- a/arch/arm/boot/dts/imx6qdl-rex.dtsi +++ b/arch/arm/boot/dts/imx6qdl-rex.dtsi @@ -342,7 +342,7 @@ pinctrl-0 = <&pinctrl_usdhc2>; bus-width = <4>; cd-gpios = <&gpio2 2 GPIO_ACTIVE_LOW>; - wp-gpios = <&gpio2 3 GPIO_ACTIVE_LOW>; + wp-gpios = <&gpio2 3 GPIO_ACTIVE_HIGH>; status = "okay"; }; @@ -351,6 +351,6 @@ pinctrl-0 = <&pinctrl_usdhc3>; bus-width = <4>; cd-gpios = <&gpio2 0 GPIO_ACTIVE_LOW>; - wp-gpios = <&gpio2 1 GPIO_ACTIVE_LOW>; + wp-gpios = <&gpio2 1 GPIO_ACTIVE_HIGH>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi b/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi index 3b24b12651b2..e329ca5c3322 100644 --- a/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi +++ b/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi @@ -467,8 +467,8 @@ pinctrl-0 = <&pinctrl_usdhc3>; pinctrl-1 = <&pinctrl_usdhc3_100mhz>; pinctrl-2 = <&pinctrl_usdhc3_200mhz>; - cd-gpios = <&gpio6 15 0>; - wp-gpios = <&gpio1 13 0>; + cd-gpios = <&gpio6 15 GPIO_ACTIVE_LOW>; + wp-gpios = <&gpio1 13 GPIO_ACTIVE_HIGH>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx6qdl-sabrelite.dtsi b/arch/arm/boot/dts/imx6qdl-sabrelite.dtsi index e00c44f6a0df..782379320517 100644 --- a/arch/arm/boot/dts/imx6qdl-sabrelite.dtsi +++ b/arch/arm/boot/dts/imx6qdl-sabrelite.dtsi @@ -448,8 +448,8 @@ &usdhc3 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc3>; - cd-gpios = <&gpio7 0 0>; - wp-gpios = <&gpio7 1 0>; + cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>; + wp-gpios = <&gpio7 1 GPIO_ACTIVE_HIGH>; vmmc-supply = <®_3p3v>; status = "okay"; }; @@ -457,7 +457,7 @@ &usdhc4 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc4>; - cd-gpios = <&gpio2 6 0>; + cd-gpios = <&gpio2 6 GPIO_ACTIVE_LOW>; vmmc-supply = <®_3p3v>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx6qdl-sabresd.dtsi b/arch/arm/boot/dts/imx6qdl-sabresd.dtsi index a626e6dd8022..944eb81cb2b8 100644 --- a/arch/arm/boot/dts/imx6qdl-sabresd.dtsi +++ b/arch/arm/boot/dts/imx6qdl-sabresd.dtsi @@ -562,8 +562,8 @@ pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc2>; bus-width = <8>; - cd-gpios = <&gpio2 2 0>; - wp-gpios = <&gpio2 3 0>; + cd-gpios = <&gpio2 2 GPIO_ACTIVE_LOW>; + wp-gpios = <&gpio2 3 GPIO_ACTIVE_HIGH>; status = "okay"; }; @@ -571,8 +571,8 @@ pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc3>; bus-width = <8>; - cd-gpios = <&gpio2 0 0>; - wp-gpios = <&gpio2 1 0>; + cd-gpios = <&gpio2 0 GPIO_ACTIVE_LOW>; + wp-gpios = <&gpio2 1 GPIO_ACTIVE_HIGH>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx6qdl-tx6.dtsi b/arch/arm/boot/dts/imx6qdl-tx6.dtsi index f02b80b41d4f..da08de324e9e 100644 --- a/arch/arm/boot/dts/imx6qdl-tx6.dtsi +++ b/arch/arm/boot/dts/imx6qdl-tx6.dtsi @@ -680,7 +680,7 @@ pinctrl-0 = <&pinctrl_usdhc1>; bus-width = <4>; no-1-8-v; - cd-gpios = <&gpio7 2 0>; + cd-gpios = <&gpio7 2 GPIO_ACTIVE_LOW>; fsl,wp-controller; status = "okay"; }; @@ -690,7 +690,7 @@ pinctrl-0 = <&pinctrl_usdhc2>; bus-width = <4>; no-1-8-v; - cd-gpios = <&gpio7 3 0>; + cd-gpios = <&gpio7 3 GPIO_ACTIVE_LOW>; fsl,wp-controller; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx6qdl-wandboard.dtsi b/arch/arm/boot/dts/imx6qdl-wandboard.dtsi index 5fb091675582..9e096d811bed 100644 --- a/arch/arm/boot/dts/imx6qdl-wandboard.dtsi +++ b/arch/arm/boot/dts/imx6qdl-wandboard.dtsi @@ -9,6 +9,8 @@ * */ +#include <dt-bindings/gpio/gpio.h> + / { regulators { compatible = "simple-bus"; @@ -250,13 +252,13 @@ &usdhc1 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc1>; - cd-gpios = <&gpio1 2 0>; + cd-gpios = <&gpio1 2 GPIO_ACTIVE_LOW>; status = "okay"; }; &usdhc3 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc3>; - cd-gpios = <&gpio3 9 0>; + cd-gpios = <&gpio3 9 GPIO_ACTIVE_LOW>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx6sl-evk.dts b/arch/arm/boot/dts/imx6sl-evk.dts index 945887d3fdb3..b84dff2e94ea 100644 --- a/arch/arm/boot/dts/imx6sl-evk.dts +++ b/arch/arm/boot/dts/imx6sl-evk.dts @@ -617,8 +617,8 @@ pinctrl-1 = <&pinctrl_usdhc1_100mhz>; pinctrl-2 = <&pinctrl_usdhc1_200mhz>; bus-width = <8>; - cd-gpios = <&gpio4 7 0>; - wp-gpios = <&gpio4 6 0>; + cd-gpios = <&gpio4 7 GPIO_ACTIVE_LOW>; + wp-gpios = <&gpio4 6 GPIO_ACTIVE_HIGH>; status = "okay"; }; @@ -627,8 +627,8 @@ pinctrl-0 = <&pinctrl_usdhc2>; pinctrl-1 = <&pinctrl_usdhc2_100mhz>; pinctrl-2 = <&pinctrl_usdhc2_200mhz>; - cd-gpios = <&gpio5 0 0>; - wp-gpios = <&gpio4 29 0>; + cd-gpios = <&gpio5 0 GPIO_ACTIVE_LOW>; + wp-gpios = <&gpio4 29 GPIO_ACTIVE_HIGH>; status = "okay"; }; @@ -637,6 +637,6 @@ pinctrl-0 = <&pinctrl_usdhc3>; pinctrl-1 = <&pinctrl_usdhc3_100mhz>; pinctrl-2 = <&pinctrl_usdhc3_200mhz>; - cd-gpios = <&gpio3 22 0>; + cd-gpios = <&gpio3 22 GPIO_ACTIVE_LOW>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx6sx-sabreauto.dts b/arch/arm/boot/dts/imx6sx-sabreauto.dts index e3c0b63c2205..115f3fd78971 100644 --- a/arch/arm/boot/dts/imx6sx-sabreauto.dts +++ b/arch/arm/boot/dts/imx6sx-sabreauto.dts @@ -49,7 +49,7 @@ pinctrl-1 = <&pinctrl_usdhc3_100mhz>; pinctrl-2 = <&pinctrl_usdhc3_200mhz>; bus-width = <8>; - cd-gpios = <&gpio7 10 GPIO_ACTIVE_HIGH>; + cd-gpios = <&gpio7 10 GPIO_ACTIVE_LOW>; wp-gpios = <&gpio3 19 GPIO_ACTIVE_HIGH>; keep-power-in-suspend; enable-sdio-wakeup; @@ -61,7 +61,7 @@ pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc4>; bus-width = <8>; - cd-gpios = <&gpio7 11 GPIO_ACTIVE_HIGH>; + cd-gpios = <&gpio7 11 GPIO_ACTIVE_LOW>; no-1-8-v; keep-power-in-suspend; enable-sdio-wakup; diff --git a/arch/arm/boot/dts/imx6sx-sdb.dtsi b/arch/arm/boot/dts/imx6sx-sdb.dtsi index cef04cef3a80..ac88c3467078 100644 --- a/arch/arm/boot/dts/imx6sx-sdb.dtsi +++ b/arch/arm/boot/dts/imx6sx-sdb.dtsi @@ -293,7 +293,7 @@ pinctrl-1 = <&pinctrl_usdhc3_100mhz>; pinctrl-2 = <&pinctrl_usdhc3_200mhz>; bus-width = <8>; - cd-gpios = <&gpio2 10 GPIO_ACTIVE_HIGH>; + cd-gpios = <&gpio2 10 GPIO_ACTIVE_LOW>; wp-gpios = <&gpio2 15 GPIO_ACTIVE_HIGH>; keep-power-in-suspend; enable-sdio-wakeup; @@ -304,7 +304,7 @@ &usdhc4 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc4>; - cd-gpios = <&gpio6 21 GPIO_ACTIVE_HIGH>; + cd-gpios = <&gpio6 21 GPIO_ACTIVE_LOW>; wp-gpios = <&gpio6 20 GPIO_ACTIVE_HIGH>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx7d-sdb.dts b/arch/arm/boot/dts/imx7d-sdb.dts index 4d1a4b977d84..fdd1d7c9a5cc 100644 --- a/arch/arm/boot/dts/imx7d-sdb.dts +++ b/arch/arm/boot/dts/imx7d-sdb.dts @@ -234,8 +234,8 @@ &usdhc1 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc1>; - cd-gpios = <&gpio5 0 0>; - wp-gpios = <&gpio5 1 0>; + cd-gpios = <&gpio5 0 GPIO_ACTIVE_LOW>; + wp-gpios = <&gpio5 1 GPIO_ACTIVE_HIGH>; enable-sdio-wakeup; keep-power-in-suspend; status = "okay"; diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c index 9d4aa18f2a82..e8ca6eaedd02 100644 --- a/arch/arm64/kernel/efi.c +++ b/arch/arm64/kernel/efi.c @@ -122,12 +122,12 @@ static int __init uefi_init(void) /* Show what we know for posterity */ c16 = early_memremap(efi_to_phys(efi.systab->fw_vendor), - sizeof(vendor)); + sizeof(vendor) * sizeof(efi_char16_t)); if (c16) { for (i = 0; i < (int) sizeof(vendor) - 1 && *c16; ++i) vendor[i] = c16[i]; vendor[i] = '\0'; - early_memunmap(c16, sizeof(vendor)); + early_memunmap(c16, sizeof(vendor) * sizeof(efi_char16_t)); } pr_info("EFI v%u.%.02u by %s\n", diff --git a/arch/avr32/mach-at32ap/clock.c b/arch/avr32/mach-at32ap/clock.c index 23b1a97fae7a..52c179bec0cc 100644 --- a/arch/avr32/mach-at32ap/clock.c +++ b/arch/avr32/mach-at32ap/clock.c @@ -80,6 +80,9 @@ int clk_enable(struct clk *clk) { unsigned long flags; + if (!clk) + return 0; + spin_lock_irqsave(&clk_lock, flags); __clk_enable(clk); spin_unlock_irqrestore(&clk_lock, flags); @@ -106,6 +109,9 @@ void clk_disable(struct clk *clk) { unsigned long flags; + if (IS_ERR_OR_NULL(clk)) + return; + spin_lock_irqsave(&clk_lock, flags); __clk_disable(clk); spin_unlock_irqrestore(&clk_lock, flags); @@ -117,6 +123,9 @@ unsigned long clk_get_rate(struct clk *clk) unsigned long flags; unsigned long rate; + if (!clk) + return 0; + spin_lock_irqsave(&clk_lock, flags); rate = clk->get_rate(clk); spin_unlock_irqrestore(&clk_lock, flags); @@ -129,6 +138,9 @@ long clk_round_rate(struct clk *clk, unsigned long rate) { unsigned long flags, actual_rate; + if (!clk) + return 0; + if (!clk->set_rate) return -ENOSYS; @@ -145,6 +157,9 @@ int clk_set_rate(struct clk *clk, unsigned long rate) unsigned long flags; long ret; + if (!clk) + return 0; + if (!clk->set_rate) return -ENOSYS; @@ -161,6 +176,9 @@ int clk_set_parent(struct clk *clk, struct clk *parent) unsigned long flags; int ret; + if (!clk) + return 0; + if (!clk->set_parent) return -ENOSYS; @@ -174,7 +192,7 @@ EXPORT_SYMBOL(clk_set_parent); struct clk *clk_get_parent(struct clk *clk) { - return clk->parent; + return !clk ? NULL : clk->parent; } EXPORT_SYMBOL(clk_get_parent); diff --git a/arch/m32r/include/asm/io.h b/arch/m32r/include/asm/io.h index 0c3f25ee3381..f8de767ce2bc 100644 --- a/arch/m32r/include/asm/io.h +++ b/arch/m32r/include/asm/io.h @@ -174,6 +174,11 @@ static inline void _writel(unsigned long l, unsigned long addr) #define iowrite16 writew #define iowrite32 writel +#define ioread16be(addr) be16_to_cpu(readw(addr)) +#define ioread32be(addr) be32_to_cpu(readl(addr)) +#define iowrite16be(v, addr) writew(cpu_to_be16(v), (addr)) +#define iowrite32be(v, addr) writel(cpu_to_be32(v), (addr)) + #define mmiowb() #define flush_write_buffers() do { } while (0) /* M32R_FIXME */ diff --git a/arch/s390/kernel/cache.c b/arch/s390/kernel/cache.c index bff5e3b6d822..8ba32436effe 100644 --- a/arch/s390/kernel/cache.c +++ b/arch/s390/kernel/cache.c @@ -138,6 +138,8 @@ int init_cache_level(unsigned int cpu) union cache_topology ct; enum cache_type ctype; + if (!test_facility(34)) + return -EOPNOTSUPP; if (!this_cpu_ci) return -EINVAL; ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0); diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c index 99c9ff87e018..6b755d125783 100644 --- a/arch/tile/kernel/setup.c +++ b/arch/tile/kernel/setup.c @@ -1139,7 +1139,7 @@ static void __init load_hv_initrd(void) void __init free_initrd_mem(unsigned long begin, unsigned long end) { - free_bootmem(__pa(begin), end - begin); + free_bootmem_late(__pa(begin), end - begin); } static int __init setup_initrd(char *str) diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S index bb187a6a877c..5a1844765a7a 100644 --- a/arch/x86/entry/entry_64_compat.S +++ b/arch/x86/entry/entry_64_compat.S @@ -205,7 +205,6 @@ sysexit_from_sys_call: movl RDX(%rsp), %edx /* arg3 */ movl RSI(%rsp), %ecx /* arg4 */ movl RDI(%rsp), %r8d /* arg5 */ - movl %ebp, %r9d /* arg6 */ .endm .macro auditsys_exit exit @@ -236,6 +235,7 @@ sysexit_from_sys_call: sysenter_auditsys: auditsys_entry_common + movl %ebp, %r9d /* reload 6th syscall arg */ jmp sysenter_dispatch sysexit_audit: @@ -336,7 +336,7 @@ ENTRY(entry_SYSCALL_compat) * 32-bit zero extended: */ ASM_STAC -1: movl (%r8), %ebp +1: movl (%r8), %r9d _ASM_EXTABLE(1b, ia32_badarg) ASM_CLAC orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS) @@ -346,7 +346,7 @@ ENTRY(entry_SYSCALL_compat) cstar_do_call: /* 32-bit syscall -> 64-bit C ABI argument conversion */ movl %edi, %r8d /* arg5 */ - movl %ebp, %r9d /* arg6 */ + /* r9 already loaded */ /* arg6 */ xchg %ecx, %esi /* rsi:arg2, rcx:arg4 */ movl %ebx, %edi /* arg1 */ movl %edx, %edx /* arg3 (zero extension) */ @@ -358,7 +358,6 @@ cstar_dispatch: call *ia32_sys_call_table(, %rax, 8) movq %rax, RAX(%rsp) 1: - movl RCX(%rsp), %ebp DISABLE_INTERRUPTS(CLBR_NONE) TRACE_IRQS_OFF testl $_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS) @@ -392,7 +391,9 @@ sysretl_from_sys_call: #ifdef CONFIG_AUDITSYSCALL cstar_auditsys: + movl %r9d, R9(%rsp) /* register to be clobbered by call */ auditsys_entry_common + movl R9(%rsp), %r9d /* reload 6th syscall arg */ jmp cstar_dispatch sysretl_audit: @@ -404,14 +405,16 @@ cstar_tracesys: testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT), ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS) jz cstar_auditsys #endif + xchgl %r9d, %ebp SAVE_EXTRA_REGS xorl %eax, %eax /* Do not leak kernel information */ movq %rax, R11(%rsp) movq %rax, R10(%rsp) - movq %rax, R9(%rsp) + movq %r9, R9(%rsp) movq %rax, R8(%rsp) movq %rsp, %rdi /* &pt_regs -> arg1 */ call syscall_trace_enter + movl R9(%rsp), %r9d /* Reload arg registers from stack. (see sysenter_tracesys) */ movl RCX(%rsp), %ecx @@ -421,6 +424,7 @@ cstar_tracesys: movl %eax, %eax /* zero extension */ RESTORE_EXTRA_REGS + xchgl %ebp, %r9d jmp cstar_do_call END(entry_SYSCALL_compat) diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h index a4ae82eb82aa..cd54147cb365 100644 --- a/arch/x86/include/uapi/asm/kvm.h +++ b/arch/x86/include/uapi/asm/kvm.h @@ -354,7 +354,7 @@ struct kvm_xcrs { struct kvm_sync_regs { }; -#define KVM_QUIRK_LINT0_REENABLED (1 << 0) -#define KVM_QUIRK_CD_NW_CLEARED (1 << 1) +#define KVM_X86_QUIRK_LINT0_REENABLED (1 << 0) +#define KVM_X86_QUIRK_CD_NW_CLEARED (1 << 1) #endif /* _ASM_X86_KVM_H */ diff --git a/arch/x86/kernel/cpu/perf_event_intel_cqm.c b/arch/x86/kernel/cpu/perf_event_intel_cqm.c index 188076161c1b..63eb68b73589 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_cqm.c +++ b/arch/x86/kernel/cpu/perf_event_intel_cqm.c @@ -952,6 +952,14 @@ static u64 intel_cqm_event_count(struct perf_event *event) return 0; /* + * Getting up-to-date values requires an SMP IPI which is not + * possible if we're being called in interrupt context. Return + * the cached values instead. + */ + if (unlikely(in_interrupt())) + goto out; + + /* * Notice that we don't perform the reading of an RMID * atomically, because we can't hold a spin lock across the * IPIs. diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c index 0b39173dd971..1e173f6285c7 100644 --- a/arch/x86/kernel/fpu/init.c +++ b/arch/x86/kernel/fpu/init.c @@ -351,9 +351,15 @@ static int __init x86_noxsave_setup(char *s) setup_clear_cpu_cap(X86_FEATURE_XSAVE); setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT); + setup_clear_cpu_cap(X86_FEATURE_XSAVEC); setup_clear_cpu_cap(X86_FEATURE_XSAVES); setup_clear_cpu_cap(X86_FEATURE_AVX); setup_clear_cpu_cap(X86_FEATURE_AVX2); + setup_clear_cpu_cap(X86_FEATURE_AVX512F); + setup_clear_cpu_cap(X86_FEATURE_AVX512PF); + setup_clear_cpu_cap(X86_FEATURE_AVX512ER); + setup_clear_cpu_cap(X86_FEATURE_AVX512CD); + setup_clear_cpu_cap(X86_FEATURE_MPX); return 1; } diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 954e98a8c2e3..2a5ca97c263b 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -1595,7 +1595,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event) for (i = 0; i < APIC_LVT_NUM; i++) apic_set_reg(apic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED); apic_update_lvtt(apic); - if (!(vcpu->kvm->arch.disabled_quirks & KVM_QUIRK_LINT0_REENABLED)) + if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_LINT0_REENABLED)) apic_set_reg(apic, APIC_LVT0, SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT)); apic_manage_nmi_watchdog(apic, kvm_apic_get_reg(apic, APIC_LVT0)); diff --git a/arch/x86/kvm/mtrr.c b/arch/x86/kvm/mtrr.c index de1d2d8062e2..dc0a84a6f309 100644 --- a/arch/x86/kvm/mtrr.c +++ b/arch/x86/kvm/mtrr.c @@ -120,6 +120,16 @@ static u8 mtrr_default_type(struct kvm_mtrr *mtrr_state) return mtrr_state->deftype & IA32_MTRR_DEF_TYPE_TYPE_MASK; } +static u8 mtrr_disabled_type(void) +{ + /* + * Intel SDM 11.11.2.2: all MTRRs are disabled when + * IA32_MTRR_DEF_TYPE.E bit is cleared, and the UC + * memory type is applied to all of physical memory. + */ + return MTRR_TYPE_UNCACHABLE; +} + /* * Three terms are used in the following code: * - segment, it indicates the address segments covered by fixed MTRRs. @@ -434,6 +444,8 @@ struct mtrr_iter { /* output fields. */ int mem_type; + /* mtrr is completely disabled? */ + bool mtrr_disabled; /* [start, end) is not fully covered in MTRRs? */ bool partial_map; @@ -549,7 +561,7 @@ static void mtrr_lookup_var_next(struct mtrr_iter *iter) static void mtrr_lookup_start(struct mtrr_iter *iter) { if (!mtrr_is_enabled(iter->mtrr_state)) { - iter->partial_map = true; + iter->mtrr_disabled = true; return; } @@ -563,6 +575,7 @@ static void mtrr_lookup_init(struct mtrr_iter *iter, iter->mtrr_state = mtrr_state; iter->start = start; iter->end = end; + iter->mtrr_disabled = false; iter->partial_map = false; iter->fixed = false; iter->range = NULL; @@ -656,15 +669,19 @@ u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn) return MTRR_TYPE_WRBACK; } - /* It is not covered by MTRRs. */ - if (iter.partial_map) { - /* - * We just check one page, partially covered by MTRRs is - * impossible. - */ - WARN_ON(type != -1); - type = mtrr_default_type(mtrr_state); - } + if (iter.mtrr_disabled) + return mtrr_disabled_type(); + + /* + * We just check one page, partially covered by MTRRs is + * impossible. + */ + WARN_ON(iter.partial_map); + + /* not contained in any MTRRs. */ + if (type == -1) + return mtrr_default_type(mtrr_state); + return type; } EXPORT_SYMBOL_GPL(kvm_mtrr_get_guest_memory_type); @@ -689,6 +706,9 @@ bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn, return false; } + if (iter.mtrr_disabled) + return true; + if (!iter.partial_map) return true; diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index bbc678a66b18..8e0c0844c6b9 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -1672,7 +1672,7 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) * does not do it - this results in some delay at * reboot */ - if (!(vcpu->kvm->arch.disabled_quirks & KVM_QUIRK_CD_NW_CLEARED)) + if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED)) cr0 &= ~(X86_CR0_CD | X86_CR0_NW); svm->vmcb->save.cr0 = cr0; mark_dirty(svm->vmcb, VMCB_CR); diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 5b4e9384717a..83b7b5cd75d5 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -8650,7 +8650,10 @@ static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) if (kvm_read_cr0(vcpu) & X86_CR0_CD) { ipat = VMX_EPT_IPAT_BIT; - cache = MTRR_TYPE_UNCACHABLE; + if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED)) + cache = MTRR_TYPE_WRBACK; + else + cache = MTRR_TYPE_UNCACHABLE; goto exit; } diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index edc8cdcd786b..0ca2f3e4803c 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -147,6 +147,11 @@ static inline void kvm_register_writel(struct kvm_vcpu *vcpu, return kvm_register_write(vcpu, reg, val); } +static inline bool kvm_check_has_quirk(struct kvm *kvm, u64 quirk) +{ + return !(kvm->arch.disabled_quirks & quirk); +} + void kvm_before_handle_nmi(struct kvm_vcpu *vcpu); void kvm_after_handle_nmi(struct kvm_vcpu *vcpu); void kvm_set_pending_timer(struct kvm_vcpu *vcpu); diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index cc5ccc415cc0..b9c78f3bcd67 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -63,8 +63,6 @@ static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages, !PageReserved(pfn_to_page(start_pfn + i))) return 1; - WARN_ONCE(1, "ioremap on RAM pfn 0x%lx\n", start_pfn); - return 0; } @@ -94,7 +92,6 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr, pgprot_t prot; int retval; void __iomem *ret_addr; - int ram_region; /* Don't allow wraparound or zero size */ last_addr = phys_addr + size - 1; @@ -117,23 +114,15 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr, /* * Don't allow anybody to remap normal RAM that we're using.. */ - /* First check if whole region can be identified as RAM or not */ - ram_region = region_is_ram(phys_addr, size); - if (ram_region > 0) { - WARN_ONCE(1, "ioremap on RAM at 0x%lx - 0x%lx\n", - (unsigned long int)phys_addr, - (unsigned long int)last_addr); + pfn = phys_addr >> PAGE_SHIFT; + last_pfn = last_addr >> PAGE_SHIFT; + if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL, + __ioremap_check_ram) == 1) { + WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n", + &phys_addr, &last_addr); return NULL; } - /* If could not be identified(-1), check page by page */ - if (ram_region < 0) { - pfn = phys_addr >> PAGE_SHIFT; - last_pfn = last_addr >> PAGE_SHIFT; - if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL, - __ioremap_check_ram) == 1) - return NULL; - } /* * Mappings have to be page-aligned */ diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c index 9d518d693b4b..844b06d67df4 100644 --- a/arch/x86/mm/mmap.c +++ b/arch/x86/mm/mmap.c @@ -126,3 +126,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm) mm->get_unmapped_area = arch_get_unmapped_area_topdown; } } + +const char *arch_vma_name(struct vm_area_struct *vma) +{ + if (vma->vm_flags & VM_MPX) + return "[mpx]"; + return NULL; +} diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c index 7a657f58bbea..db1b0bc5017c 100644 --- a/arch/x86/mm/mpx.c +++ b/arch/x86/mm/mpx.c @@ -20,20 +20,6 @@ #define CREATE_TRACE_POINTS #include <asm/trace/mpx.h> -static const char *mpx_mapping_name(struct vm_area_struct *vma) -{ - return "[mpx]"; -} - -static struct vm_operations_struct mpx_vma_ops = { - .name = mpx_mapping_name, -}; - -static int is_mpx_vma(struct vm_area_struct *vma) -{ - return (vma->vm_ops == &mpx_vma_ops); -} - static inline unsigned long mpx_bd_size_bytes(struct mm_struct *mm) { if (is_64bit_mm(mm)) @@ -53,9 +39,6 @@ static inline unsigned long mpx_bt_size_bytes(struct mm_struct *mm) /* * This is really a simplified "vm_mmap". it only handles MPX * bounds tables (the bounds directory is user-allocated). - * - * Later on, we use the vma->vm_ops to uniquely identify these - * VMAs. */ static unsigned long mpx_mmap(unsigned long len) { @@ -101,7 +84,6 @@ static unsigned long mpx_mmap(unsigned long len) ret = -ENOMEM; goto out; } - vma->vm_ops = &mpx_vma_ops; if (vm_flags & VM_LOCKED) { up_write(&mm->mmap_sem); @@ -812,7 +794,7 @@ static noinline int zap_bt_entries_mapping(struct mm_struct *mm, * so stop immediately and return an error. This * probably results in a SIGSEGV. */ - if (!is_mpx_vma(vma)) + if (!(vma->vm_flags & VM_MPX)) return -EINVAL; len = min(vma->vm_end, end) - addr; @@ -945,9 +927,9 @@ static int try_unmap_single_bt(struct mm_struct *mm, * lots of tables even though we have no actual table * entries in use. */ - while (next && is_mpx_vma(next)) + while (next && (next->vm_flags & VM_MPX)) next = next->vm_next; - while (prev && is_mpx_vma(prev)) + while (prev && (prev->vm_flags & VM_MPX)) prev = prev->vm_prev; /* * We know 'start' and 'end' lie within an area controlled diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 3250f2371aea..90b924acd982 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -117,7 +117,7 @@ static void flush_tlb_func(void *info) } else { unsigned long addr; unsigned long nr_pages = - f->flush_end - f->flush_start / PAGE_SIZE; + (f->flush_end - f->flush_start) / PAGE_SIZE; addr = f->flush_start; while (addr < f->flush_end) { __flush_tlb_single(addr); diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index c08000b850ef..ec5214f39aa8 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -269,7 +269,7 @@ static void emit_bpf_tail_call(u8 **pprog) EMIT4(0x48, 0x8B, 0x46, /* mov rax, qword ptr [rsi + 16] */ offsetof(struct bpf_array, map.max_entries)); EMIT3(0x48, 0x39, 0xD0); /* cmp rax, rdx */ -#define OFFSET1 44 /* number of bytes to jump */ +#define OFFSET1 47 /* number of bytes to jump */ EMIT2(X86_JBE, OFFSET1); /* jbe out */ label1 = cnt; @@ -278,15 +278,15 @@ static void emit_bpf_tail_call(u8 **pprog) */ EMIT2_off32(0x8B, 0x85, -STACKSIZE + 36); /* mov eax, dword ptr [rbp - 516] */ EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */ -#define OFFSET2 33 +#define OFFSET2 36 EMIT2(X86_JA, OFFSET2); /* ja out */ label2 = cnt; EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */ EMIT2_off32(0x89, 0x85, -STACKSIZE + 36); /* mov dword ptr [rbp - 516], eax */ /* prog = array->prog[index]; */ - EMIT4(0x48, 0x8D, 0x44, 0xD6); /* lea rax, [rsi + rdx * 8 + 0x50] */ - EMIT1(offsetof(struct bpf_array, prog)); + EMIT4_off32(0x48, 0x8D, 0x84, 0xD6, /* lea rax, [rsi + rdx * 8 + offsetof(...)] */ + offsetof(struct bpf_array, prog)); EMIT3(0x48, 0x8B, 0x00); /* mov rax, qword ptr [rax] */ /* if (prog == NULL) diff --git a/block/bio.c b/block/bio.c index 2a00d349cd68..d6e5ba3399f0 100644 --- a/block/bio.c +++ b/block/bio.c @@ -1831,8 +1831,9 @@ EXPORT_SYMBOL(bio_endio); * Allocates and returns a new bio which represents @sectors from the start of * @bio, and updates @bio to represent the remaining sectors. * - * The newly allocated bio will point to @bio's bi_io_vec; it is the caller's - * responsibility to ensure that @bio is not freed before the split. + * Unless this is a discard request the newly allocated bio will point + * to @bio's bi_io_vec; it is the caller's responsibility to ensure that + * @bio is not freed before the split. */ struct bio *bio_split(struct bio *bio, int sectors, gfp_t gfp, struct bio_set *bs) @@ -1842,7 +1843,15 @@ struct bio *bio_split(struct bio *bio, int sectors, BUG_ON(sectors <= 0); BUG_ON(sectors >= bio_sectors(bio)); - split = bio_clone_fast(bio, gfp, bs); + /* + * Discards need a mutable bio_vec to accommodate the payload + * required by the DSM TRIM and UNMAP commands. + */ + if (bio->bi_rw & REQ_DISCARD) + split = bio_clone_bioset(bio, gfp, bs); + else + split = bio_clone_fast(bio, gfp, bs); + if (!split) return NULL; @@ -2009,6 +2018,7 @@ int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css) bio->bi_css = blkcg_css; return 0; } +EXPORT_SYMBOL_GPL(bio_associate_blkcg); /** * bio_associate_current - associate a bio with %current @@ -2039,6 +2049,7 @@ int bio_associate_current(struct bio *bio) bio->bi_css = task_get_css(current, blkio_cgrp_id); return 0; } +EXPORT_SYMBOL_GPL(bio_associate_current); /** * bio_disassociate_task - undo bio_associate_current() diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 9da02c021ebe..d6283b3f5db5 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -718,8 +718,12 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, return -EINVAL; disk = get_gendisk(MKDEV(major, minor), &part); - if (!disk || part) + if (!disk) return -EINVAL; + if (part) { + put_disk(disk); + return -EINVAL; + } rcu_read_lock(); spin_lock_irq(disk->queue->queue_lock); diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c index 717afcdb5f4a..88dbbb115285 100644 --- a/drivers/acpi/device_pm.c +++ b/drivers/acpi/device_pm.c @@ -231,7 +231,7 @@ int acpi_device_set_power(struct acpi_device *device, int state) dev_warn(&device->dev, "Failed to change power state to %s\n", acpi_power_state_string(state)); } else { - device->power.state = state; + device->power.state = target_state; ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] transitioned to %s\n", device->pnp.bus_id, diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index e83fc3d0da9c..db5d9f79a247 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -2478,6 +2478,10 @@ int ata_dev_configure(struct ata_device *dev) dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128, dev->max_sectors); + if (dev->horkage & ATA_HORKAGE_MAX_SEC_1024) + dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_1024, + dev->max_sectors); + if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48) dev->max_sectors = ATA_MAX_SECTORS_LBA48; @@ -4146,6 +4150,12 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { { "Slimtype DVD A DS8A8SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 }, { "Slimtype DVD A DS8A9SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 }, + /* + * Causes silent data corruption with higher max sects. + * http://lkml.kernel.org/g/x49wpy40ysk.fsf@segfault.boston.devel.redhat.com + */ + { "ST380013AS", "3.20", ATA_HORKAGE_MAX_SEC_1024 }, + /* Devices we expect to fail diagnostics */ /* Devices where NCQ should be avoided */ @@ -4174,9 +4184,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ | ATA_HORKAGE_FIRMWARE_WARN }, - /* Seagate Momentus SpinPoint M8 seem to have FPMDA_AA issues */ + /* drives which fail FPDMA_AA activation (some may freeze afterwards) */ { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA }, { "ST1000LM024 HN-M101MBB", "2BA30001", ATA_HORKAGE_BROKEN_FPDMA_AA }, + { "VB0250EAVER", "HPG7", ATA_HORKAGE_BROKEN_FPDMA_AA }, /* Blacklist entries taken from Silicon Image 3124/3132 Windows driver .inf file - also several Linux problem reports */ @@ -4229,7 +4240,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { ATA_HORKAGE_ZERO_AFTER_TRIM, }, { "Crucial_CT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | ATA_HORKAGE_ZERO_AFTER_TRIM, }, - { "Micron_M5[15]0*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | + { "Micron_M5[15]0_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | ATA_HORKAGE_ZERO_AFTER_TRIM, }, { "Crucial_CT*M550*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | ATA_HORKAGE_ZERO_AFTER_TRIM, }, @@ -4238,6 +4249,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { { "Samsung SSD 8*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | ATA_HORKAGE_ZERO_AFTER_TRIM, }, + /* devices that don't properly handle TRIM commands */ + { "SuperSSpeed S238*", NULL, ATA_HORKAGE_NOTRIM, }, + /* * As defined, the DRAT (Deterministic Read After Trim) and RZAT * (Return Zero After Trim) flags in the ATA Command Set are @@ -4501,7 +4515,8 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev) else /* In the ancient relic department - skip all of this */ return 0; - err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); + /* On some disks, this command causes spin-up, so we need longer timeout */ + err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 15000); DPRINTK("EXIT, err_mask=%x\n", err_mask); return err_mask; diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c index 7ccc084bf1df..85aa76116a30 100644 --- a/drivers/ata/libata-pmp.c +++ b/drivers/ata/libata-pmp.c @@ -460,6 +460,13 @@ static void sata_pmp_quirks(struct ata_port *ap) ATA_LFLAG_NO_SRST | ATA_LFLAG_ASSUME_ATA; } + } else if (vendor == 0x11ab && devid == 0x4140) { + /* Marvell 4140 quirks */ + ata_for_each_link(link, ap, EDGE) { + /* port 4 is for SEMB device and it doesn't like SRST */ + if (link->pmp == 4) + link->flags |= ATA_LFLAG_DISABLED; + } } } diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 3131adcc1f87..641a61a59e89 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -2568,7 +2568,8 @@ static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf) rbuf[14] = (lowest_aligned >> 8) & 0x3f; rbuf[15] = lowest_aligned; - if (ata_id_has_trim(args->id)) { + if (ata_id_has_trim(args->id) && + !(dev->horkage & ATA_HORKAGE_NOTRIM)) { rbuf[14] |= 0x80; /* LBPME */ if (ata_id_has_zero_after_trim(args->id) && diff --git a/drivers/ata/libata-transport.c b/drivers/ata/libata-transport.c index d6c37bcd416d..e2d94972962d 100644 --- a/drivers/ata/libata-transport.c +++ b/drivers/ata/libata-transport.c @@ -569,6 +569,8 @@ show_ata_dev_trim(struct device *dev, if (!ata_id_has_trim(ata_dev->id)) mode = "unsupported"; + else if (ata_dev->horkage & ATA_HORKAGE_NOTRIM) + mode = "forced_unsupported"; else if (ata_dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM) mode = "forced_unqueued"; else if (ata_fpdma_dsm_supported(ata_dev)) diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index 69de41a87b74..3177b245d2bd 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c @@ -240,19 +240,19 @@ static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer) while ((entry = llist_del_all(&cq->list)) != NULL) { entry = llist_reverse_order(entry); do { + struct request_queue *q = NULL; + cmd = container_of(entry, struct nullb_cmd, ll_list); entry = entry->next; + if (cmd->rq) + q = cmd->rq->q; end_cmd(cmd); - if (cmd->rq) { - struct request_queue *q = cmd->rq->q; - - if (!q->mq_ops && blk_queue_stopped(q)) { - spin_lock(q->queue_lock); - if (blk_queue_stopped(q)) - blk_start_queue(q); - spin_unlock(q->queue_lock); - } + if (q && !q->mq_ops && blk_queue_stopped(q)) { + spin_lock(q->queue_lock); + if (blk_queue_stopped(q)) + blk_start_queue(q); + spin_unlock(q->queue_lock); } } while (entry); } diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 26063afb3eba..7a3c30c4336f 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -1002,7 +1002,7 @@ static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy) int ret = 0; /* Some related CPUs might not be present (physically hotplugged) */ - for_each_cpu_and(j, policy->related_cpus, cpu_present_mask) { + for_each_cpu(j, policy->real_cpus) { if (j == policy->kobj_cpu) continue; @@ -1019,7 +1019,7 @@ static void cpufreq_remove_dev_symlink(struct cpufreq_policy *policy) unsigned int j; /* Some related CPUs might not be present (physically hotplugged) */ - for_each_cpu_and(j, policy->related_cpus, cpu_present_mask) { + for_each_cpu(j, policy->real_cpus) { if (j == policy->kobj_cpu) continue; @@ -1163,11 +1163,14 @@ static struct cpufreq_policy *cpufreq_policy_alloc(struct device *dev) if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL)) goto err_free_cpumask; + if (!zalloc_cpumask_var(&policy->real_cpus, GFP_KERNEL)) + goto err_free_rcpumask; + ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, &dev->kobj, "cpufreq"); if (ret) { pr_err("%s: failed to init policy->kobj: %d\n", __func__, ret); - goto err_free_rcpumask; + goto err_free_real_cpus; } INIT_LIST_HEAD(&policy->policy_list); @@ -1184,6 +1187,8 @@ static struct cpufreq_policy *cpufreq_policy_alloc(struct device *dev) return policy; +err_free_real_cpus: + free_cpumask_var(policy->real_cpus); err_free_rcpumask: free_cpumask_var(policy->related_cpus); err_free_cpumask: @@ -1234,6 +1239,7 @@ static void cpufreq_policy_free(struct cpufreq_policy *policy, bool notify) write_unlock_irqrestore(&cpufreq_driver_lock, flags); cpufreq_policy_put_kobj(policy, notify); + free_cpumask_var(policy->real_cpus); free_cpumask_var(policy->related_cpus); free_cpumask_var(policy->cpus); kfree(policy); @@ -1258,14 +1264,17 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) pr_debug("adding CPU %u\n", cpu); - /* - * Only possible if 'cpu' wasn't physically present earlier and we are - * here from subsys_interface add callback. A hotplug notifier will - * follow and we will handle it like logical CPU hotplug then. For now, - * just create the sysfs link. - */ - if (cpu_is_offline(cpu)) - return add_cpu_dev_symlink(per_cpu(cpufreq_cpu_data, cpu), cpu); + if (cpu_is_offline(cpu)) { + /* + * Only possible if we are here from the subsys_interface add + * callback. A hotplug notifier will follow and we will handle + * it as CPU online then. For now, just create the sysfs link, + * unless there is no policy or the link is already present. + */ + policy = per_cpu(cpufreq_cpu_data, cpu); + return policy && !cpumask_test_and_set_cpu(cpu, policy->real_cpus) + ? add_cpu_dev_symlink(policy, cpu) : 0; + } if (!down_read_trylock(&cpufreq_rwsem)) return 0; @@ -1307,6 +1316,10 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) /* related cpus should atleast have policy->cpus */ cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus); + /* Remember which CPUs have been present at the policy creation time. */ + if (!recover_policy) + cpumask_and(policy->real_cpus, policy->cpus, cpu_present_mask); + /* * affected cpus must always be the one, which are online. We aren't * managing offline cpus here. @@ -1420,8 +1433,7 @@ nomem_out: return ret; } -static int __cpufreq_remove_dev_prepare(struct device *dev, - struct subsys_interface *sif) +static int __cpufreq_remove_dev_prepare(struct device *dev) { unsigned int cpu = dev->id; int ret = 0; @@ -1437,10 +1449,8 @@ static int __cpufreq_remove_dev_prepare(struct device *dev, if (has_target()) { ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP); - if (ret) { + if (ret) pr_err("%s: Failed to stop governor\n", __func__); - return ret; - } } down_write(&policy->rwsem); @@ -1473,8 +1483,7 @@ static int __cpufreq_remove_dev_prepare(struct device *dev, return ret; } -static int __cpufreq_remove_dev_finish(struct device *dev, - struct subsys_interface *sif) +static int __cpufreq_remove_dev_finish(struct device *dev) { unsigned int cpu = dev->id; int ret; @@ -1492,10 +1501,8 @@ static int __cpufreq_remove_dev_finish(struct device *dev, /* If cpu is last user of policy, free policy */ if (has_target()) { ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT); - if (ret) { + if (ret) pr_err("%s: Failed to exit governor\n", __func__); - return ret; - } } /* @@ -1506,10 +1513,6 @@ static int __cpufreq_remove_dev_finish(struct device *dev, if (cpufreq_driver->exit) cpufreq_driver->exit(policy); - /* Free the policy only if the driver is getting removed. */ - if (sif) - cpufreq_policy_free(policy, true); - return 0; } @@ -1521,42 +1524,41 @@ static int __cpufreq_remove_dev_finish(struct device *dev, static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif) { unsigned int cpu = dev->id; - int ret; - - /* - * Only possible if 'cpu' is getting physically removed now. A hotplug - * notifier should have already been called and we just need to remove - * link or free policy here. - */ - if (cpu_is_offline(cpu)) { - struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); - struct cpumask mask; + struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); - if (!policy) - return 0; + if (!policy) + return 0; - cpumask_copy(&mask, policy->related_cpus); - cpumask_clear_cpu(cpu, &mask); + if (cpu_online(cpu)) { + __cpufreq_remove_dev_prepare(dev); + __cpufreq_remove_dev_finish(dev); + } - /* - * Free policy only if all policy->related_cpus are removed - * physically. - */ - if (cpumask_intersects(&mask, cpu_present_mask)) { - remove_cpu_dev_symlink(policy, cpu); - return 0; - } + cpumask_clear_cpu(cpu, policy->real_cpus); + if (cpumask_empty(policy->real_cpus)) { cpufreq_policy_free(policy, true); return 0; } - ret = __cpufreq_remove_dev_prepare(dev, sif); + if (cpu != policy->kobj_cpu) { + remove_cpu_dev_symlink(policy, cpu); + } else { + /* + * The CPU owning the policy object is going away. Move it to + * another suitable CPU. + */ + unsigned int new_cpu = cpumask_first(policy->real_cpus); + struct device *new_dev = get_cpu_device(new_cpu); + + dev_dbg(dev, "%s: Moving policy object to CPU%u\n", __func__, new_cpu); - if (!ret) - ret = __cpufreq_remove_dev_finish(dev, sif); + sysfs_remove_link(&new_dev->kobj, "cpufreq"); + policy->kobj_cpu = new_cpu; + WARN_ON(kobject_move(&policy->kobj, &new_dev->kobj)); + } - return ret; + return 0; } static void handle_update(struct work_struct *work) @@ -2395,11 +2397,11 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb, break; case CPU_DOWN_PREPARE: - __cpufreq_remove_dev_prepare(dev, NULL); + __cpufreq_remove_dev_prepare(dev); break; case CPU_POST_DEAD: - __cpufreq_remove_dev_finish(dev, NULL); + __cpufreq_remove_dev_finish(dev); break; case CPU_DOWN_FAILED: diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 15ada47bb720..fcb929ec5304 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -681,6 +681,7 @@ static struct cpu_defaults knl_params = { .get_max = core_get_max_pstate, .get_min = core_get_min_pstate, .get_turbo = knl_get_turbo_pstate, + .get_scaling = core_get_scaling, .set = core_set_pstate, }, }; diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c index 4fd9961d552e..d42537425438 100644 --- a/drivers/firmware/efi/cper.c +++ b/drivers/firmware/efi/cper.c @@ -305,10 +305,17 @@ const char *cper_mem_err_unpack(struct trace_seq *p, return ret; } -static void cper_print_mem(const char *pfx, const struct cper_sec_mem_err *mem) +static void cper_print_mem(const char *pfx, const struct cper_sec_mem_err *mem, + int len) { struct cper_mem_err_compact cmem; + /* Don't trust UEFI 2.1/2.2 structure with bad validation bits */ + if (len == sizeof(struct cper_sec_mem_err_old) && + (mem->validation_bits & ~(CPER_MEM_VALID_RANK_NUMBER - 1))) { + pr_err(FW_WARN "valid bits set for fields beyond structure\n"); + return; + } if (mem->validation_bits & CPER_MEM_VALID_ERROR_STATUS) printk("%s""error_status: 0x%016llx\n", pfx, mem->error_status); if (mem->validation_bits & CPER_MEM_VALID_PA) @@ -405,8 +412,10 @@ static void cper_estatus_print_section( } else if (!uuid_le_cmp(*sec_type, CPER_SEC_PLATFORM_MEM)) { struct cper_sec_mem_err *mem_err = (void *)(gdata + 1); printk("%s""section_type: memory error\n", newpfx); - if (gdata->error_data_length >= sizeof(*mem_err)) - cper_print_mem(newpfx, mem_err); + if (gdata->error_data_length >= + sizeof(struct cper_sec_mem_err_old)) + cper_print_mem(newpfx, mem_err, + gdata->error_data_length); else goto err_section_too_small; } else if (!uuid_le_cmp(*sec_type, CPER_SEC_PCIE)) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 01657830b470..31b00f91cfcd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1614,6 +1614,9 @@ struct amdgpu_uvd { #define AMDGPU_MAX_VCE_HANDLES 16 #define AMDGPU_VCE_FIRMWARE_OFFSET 256 +#define AMDGPU_VCE_HARVEST_VCE0 (1 << 0) +#define AMDGPU_VCE_HARVEST_VCE1 (1 << 1) + struct amdgpu_vce { struct amdgpu_bo *vcpu_bo; uint64_t gpu_addr; @@ -1626,6 +1629,7 @@ struct amdgpu_vce { const struct firmware *fw; /* VCE firmware */ struct amdgpu_ring ring[AMDGPU_MAX_VCE_RINGS]; struct amdgpu_irq_src irq; + unsigned harvest_config; }; /* @@ -1862,6 +1866,12 @@ typedef void (*amdgpu_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t); typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t); typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t); +struct amdgpu_ip_block_status { + bool valid; + bool sw; + bool hw; +}; + struct amdgpu_device { struct device *dev; struct drm_device *ddev; @@ -2004,7 +2014,7 @@ struct amdgpu_device { const struct amdgpu_ip_block_version *ip_blocks; int num_ip_blocks; - bool *ip_block_enabled; + struct amdgpu_ip_block_status *ip_block_status; struct mutex mn_lock; DECLARE_HASHTABLE(mn_hash, 7); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index d79009b65867..99f158e1baff 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1191,8 +1191,9 @@ static int amdgpu_early_init(struct amdgpu_device *adev) return -EINVAL; } - adev->ip_block_enabled = kcalloc(adev->num_ip_blocks, sizeof(bool), GFP_KERNEL); - if (adev->ip_block_enabled == NULL) + adev->ip_block_status = kcalloc(adev->num_ip_blocks, + sizeof(struct amdgpu_ip_block_status), GFP_KERNEL); + if (adev->ip_block_status == NULL) return -ENOMEM; if (adev->ip_blocks == NULL) { @@ -1203,18 +1204,18 @@ static int amdgpu_early_init(struct amdgpu_device *adev) for (i = 0; i < adev->num_ip_blocks; i++) { if ((amdgpu_ip_block_mask & (1 << i)) == 0) { DRM_ERROR("disabled ip block: %d\n", i); - adev->ip_block_enabled[i] = false; + adev->ip_block_status[i].valid = false; } else { if (adev->ip_blocks[i].funcs->early_init) { r = adev->ip_blocks[i].funcs->early_init((void *)adev); if (r == -ENOENT) - adev->ip_block_enabled[i] = false; + adev->ip_block_status[i].valid = false; else if (r) return r; else - adev->ip_block_enabled[i] = true; + adev->ip_block_status[i].valid = true; } else { - adev->ip_block_enabled[i] = true; + adev->ip_block_status[i].valid = true; } } } @@ -1227,11 +1228,12 @@ static int amdgpu_init(struct amdgpu_device *adev) int i, r; for (i = 0; i < adev->num_ip_blocks; i++) { - if (!adev->ip_block_enabled[i]) + if (!adev->ip_block_status[i].valid) continue; r = adev->ip_blocks[i].funcs->sw_init((void *)adev); if (r) return r; + adev->ip_block_status[i].sw = true; /* need to do gmc hw init early so we can allocate gpu mem */ if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) { r = amdgpu_vram_scratch_init(adev); @@ -1243,11 +1245,12 @@ static int amdgpu_init(struct amdgpu_device *adev) r = amdgpu_wb_init(adev); if (r) return r; + adev->ip_block_status[i].hw = true; } } for (i = 0; i < adev->num_ip_blocks; i++) { - if (!adev->ip_block_enabled[i]) + if (!adev->ip_block_status[i].sw) continue; /* gmc hw init is done early */ if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) @@ -1255,6 +1258,7 @@ static int amdgpu_init(struct amdgpu_device *adev) r = adev->ip_blocks[i].funcs->hw_init((void *)adev); if (r) return r; + adev->ip_block_status[i].hw = true; } return 0; @@ -1265,7 +1269,7 @@ static int amdgpu_late_init(struct amdgpu_device *adev) int i = 0, r; for (i = 0; i < adev->num_ip_blocks; i++) { - if (!adev->ip_block_enabled[i]) + if (!adev->ip_block_status[i].valid) continue; /* enable clockgating to save power */ r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev, @@ -1287,7 +1291,7 @@ static int amdgpu_fini(struct amdgpu_device *adev) int i, r; for (i = adev->num_ip_blocks - 1; i >= 0; i--) { - if (!adev->ip_block_enabled[i]) + if (!adev->ip_block_status[i].hw) continue; if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) { amdgpu_wb_fini(adev); @@ -1300,14 +1304,16 @@ static int amdgpu_fini(struct amdgpu_device *adev) return r; r = adev->ip_blocks[i].funcs->hw_fini((void *)adev); /* XXX handle errors */ + adev->ip_block_status[i].hw = false; } for (i = adev->num_ip_blocks - 1; i >= 0; i--) { - if (!adev->ip_block_enabled[i]) + if (!adev->ip_block_status[i].sw) continue; r = adev->ip_blocks[i].funcs->sw_fini((void *)adev); /* XXX handle errors */ - adev->ip_block_enabled[i] = false; + adev->ip_block_status[i].sw = false; + adev->ip_block_status[i].valid = false; } return 0; @@ -1318,7 +1324,7 @@ static int amdgpu_suspend(struct amdgpu_device *adev) int i, r; for (i = adev->num_ip_blocks - 1; i >= 0; i--) { - if (!adev->ip_block_enabled[i]) + if (!adev->ip_block_status[i].valid) continue; /* ungate blocks so that suspend can properly shut them down */ r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev, @@ -1336,7 +1342,7 @@ static int amdgpu_resume(struct amdgpu_device *adev) int i, r; for (i = 0; i < adev->num_ip_blocks; i++) { - if (!adev->ip_block_enabled[i]) + if (!adev->ip_block_status[i].valid) continue; r = adev->ip_blocks[i].funcs->resume(adev); if (r) @@ -1582,8 +1588,8 @@ void amdgpu_device_fini(struct amdgpu_device *adev) amdgpu_fence_driver_fini(adev); amdgpu_fbdev_fini(adev); r = amdgpu_fini(adev); - kfree(adev->ip_block_enabled); - adev->ip_block_enabled = NULL; + kfree(adev->ip_block_status); + adev->ip_block_status = NULL; adev->accel_working = false; /* free i2c buses */ amdgpu_i2c_fini(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index ae43b58c9733..4afc507820c0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -449,7 +449,7 @@ out: * vital here, so they are not reported back to userspace. */ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, - struct amdgpu_bo_va *bo_va) + struct amdgpu_bo_va *bo_va, uint32_t operation) { struct ttm_validate_buffer tv, *entry; struct amdgpu_bo_list_entry *vm_bos; @@ -485,7 +485,9 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, if (r) goto error_unlock; - r = amdgpu_vm_bo_update(adev, bo_va, &bo_va->bo->tbo.mem); + + if (operation == AMDGPU_VA_OP_MAP) + r = amdgpu_vm_bo_update(adev, bo_va, &bo_va->bo->tbo.mem); error_unlock: mutex_unlock(&bo_va->vm->mutex); @@ -580,7 +582,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, } if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE)) - amdgpu_gem_va_update_vm(adev, bo_va); + amdgpu_gem_va_update_vm(adev, bo_va, args->operation); drm_gem_object_unreference_unlocked(gobj); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index 52dff75aac6f..bc0fac618a3f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c @@ -180,16 +180,16 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs, if (vm) { /* do context switch */ amdgpu_vm_flush(ring, vm, ib->sync.last_vm_update); - } - if (vm && ring->funcs->emit_gds_switch) - amdgpu_ring_emit_gds_switch(ring, ib->vm->ids[ring->idx].id, - ib->gds_base, ib->gds_size, - ib->gws_base, ib->gws_size, - ib->oa_base, ib->oa_size); + if (ring->funcs->emit_gds_switch) + amdgpu_ring_emit_gds_switch(ring, ib->vm->ids[ring->idx].id, + ib->gds_base, ib->gds_size, + ib->gws_base, ib->gws_size, + ib->oa_base, ib->oa_size); - if (ring->funcs->emit_hdp_flush) - amdgpu_ring_emit_hdp_flush(ring); + if (ring->funcs->emit_hdp_flush) + amdgpu_ring_emit_hdp_flush(ring); + } old_ctx = ring->current_ctx; for (i = 0; i < num_ibs; ++i) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 5533434c7a8f..9736892bcdf9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -235,7 +235,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file for (i = 0; i < adev->num_ip_blocks; i++) { if (adev->ip_blocks[i].type == type && - adev->ip_block_enabled[i]) { + adev->ip_block_status[i].valid) { ip.hw_ip_version_major = adev->ip_blocks[i].major; ip.hw_ip_version_minor = adev->ip_blocks[i].minor; ip.capabilities_flags = 0; @@ -274,7 +274,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file for (i = 0; i < adev->num_ip_blocks; i++) if (adev->ip_blocks[i].type == type && - adev->ip_block_enabled[i] && + adev->ip_block_status[i].valid && count < AMDGPU_HW_IP_INSTANCE_MAX_COUNT) count++; @@ -416,7 +416,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file return n ? -EFAULT : 0; } case AMDGPU_INFO_DEV_INFO: { - struct drm_amdgpu_info_device dev_info; + struct drm_amdgpu_info_device dev_info = {}; struct amdgpu_cu_info cu_info; dev_info.device_id = dev->pdev->device; @@ -459,6 +459,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file memcpy(&dev_info.cu_bitmap[0], &cu_info.bitmap[0], sizeof(cu_info.bitmap)); dev_info.vram_type = adev->mc.vram_type; dev_info.vram_bit_width = adev->mc.vram_width; + dev_info.vce_harvest_config = adev->vce.harvest_config; return copy_to_user(out, &dev_info, min((size_t)size, sizeof(dev_info))) ? -EFAULT : 0; diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c index 1a2d419cbf16..ace870afc7d4 100644 --- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c @@ -494,29 +494,67 @@ static void cz_dpm_fini(struct amdgpu_device *adev) amdgpu_free_extended_power_table(adev); } +#define ixSMUSVI_NB_CURRENTVID 0xD8230044 +#define CURRENT_NB_VID_MASK 0xff000000 +#define CURRENT_NB_VID__SHIFT 24 +#define ixSMUSVI_GFX_CURRENTVID 0xD8230048 +#define CURRENT_GFX_VID_MASK 0xff000000 +#define CURRENT_GFX_VID__SHIFT 24 + static void cz_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev, struct seq_file *m) { + struct cz_power_info *pi = cz_get_pi(adev); struct amdgpu_clock_voltage_dependency_table *table = &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; - u32 current_index = - (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) & - TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >> - TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT; - u32 sclk, tmp; - u16 vddc; - - if (current_index >= NUM_SCLK_LEVELS) { - seq_printf(m, "invalid dpm profile %d\n", current_index); + struct amdgpu_uvd_clock_voltage_dependency_table *uvd_table = + &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; + struct amdgpu_vce_clock_voltage_dependency_table *vce_table = + &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; + u32 sclk_index = REG_GET_FIELD(RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX), + TARGET_AND_CURRENT_PROFILE_INDEX, CURR_SCLK_INDEX); + u32 uvd_index = REG_GET_FIELD(RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_2), + TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_UVD_INDEX); + u32 vce_index = REG_GET_FIELD(RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_2), + TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_VCE_INDEX); + u32 sclk, vclk, dclk, ecclk, tmp; + u16 vddnb, vddgfx; + + if (sclk_index >= NUM_SCLK_LEVELS) { + seq_printf(m, "invalid sclk dpm profile %d\n", sclk_index); } else { - sclk = table->entries[current_index].clk; - tmp = (RREG32_SMC(ixSMU_VOLTAGE_STATUS) & - SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL_MASK) >> - SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL__SHIFT; - vddc = cz_convert_8bit_index_to_voltage(adev, (u16)tmp); - seq_printf(m, "power level %d sclk: %u vddc: %u\n", - current_index, sclk, vddc); + sclk = table->entries[sclk_index].clk; + seq_printf(m, "%u sclk: %u\n", sclk_index, sclk); + } + + tmp = (RREG32_SMC(ixSMUSVI_NB_CURRENTVID) & + CURRENT_NB_VID_MASK) >> CURRENT_NB_VID__SHIFT; + vddnb = cz_convert_8bit_index_to_voltage(adev, (u16)tmp); + tmp = (RREG32_SMC(ixSMUSVI_GFX_CURRENTVID) & + CURRENT_GFX_VID_MASK) >> CURRENT_GFX_VID__SHIFT; + vddgfx = cz_convert_8bit_index_to_voltage(adev, (u16)tmp); + seq_printf(m, "vddnb: %u vddgfx: %u\n", vddnb, vddgfx); + + seq_printf(m, "uvd %sabled\n", pi->uvd_power_gated ? "dis" : "en"); + if (!pi->uvd_power_gated) { + if (uvd_index >= CZ_MAX_HARDWARE_POWERLEVELS) { + seq_printf(m, "invalid uvd dpm level %d\n", uvd_index); + } else { + vclk = uvd_table->entries[uvd_index].vclk; + dclk = uvd_table->entries[uvd_index].dclk; + seq_printf(m, "%u uvd vclk: %u dclk: %u\n", uvd_index, vclk, dclk); + } + } + + seq_printf(m, "vce %sabled\n", pi->vce_power_gated ? "dis" : "en"); + if (!pi->vce_power_gated) { + if (vce_index >= CZ_MAX_HARDWARE_POWERLEVELS) { + seq_printf(m, "invalid vce dpm level %d\n", vce_index); + } else { + ecclk = vce_table->entries[vce_index].ecclk; + seq_printf(m, "%u vce ecclk: %u\n", vce_index, ecclk); + } } } diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index 6e77964f1b64..e70a26f587a0 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c @@ -2632,6 +2632,7 @@ static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode) struct drm_device *dev = crtc->dev; struct amdgpu_device *adev = dev->dev_private; struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + unsigned type; switch (mode) { case DRM_MODE_DPMS_ON: @@ -2640,6 +2641,9 @@ static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode) dce_v10_0_vga_enable(crtc, true); amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE); dce_v10_0_vga_enable(crtc, false); + /* Make sure VBLANK interrupt is still enabled */ + type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id); + amdgpu_irq_update(adev, &adev->crtc_irq, type); drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id); dce_v10_0_crtc_load_lut(crtc); break; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index 7f7abb0e0be5..dcb402ee048a 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c @@ -2631,6 +2631,7 @@ static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode) struct drm_device *dev = crtc->dev; struct amdgpu_device *adev = dev->dev_private; struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + unsigned type; switch (mode) { case DRM_MODE_DPMS_ON: @@ -2639,6 +2640,9 @@ static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode) dce_v11_0_vga_enable(crtc, true); amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE); dce_v11_0_vga_enable(crtc, false); + /* Make sure VBLANK interrupt is still enabled */ + type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id); + amdgpu_irq_update(adev, &adev->crtc_irq, type); drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id); dce_v11_0_crtc_load_lut(crtc); break; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 2c188fb9fd22..2db6ab0a543d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -2561,7 +2561,7 @@ static bool gfx_v7_0_ring_emit_semaphore(struct amdgpu_ring *ring, * sheduling on the ring. This function schedules the IB * on the gfx ring for execution by the GPU. */ -static void gfx_v7_0_ring_emit_ib(struct amdgpu_ring *ring, +static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, struct amdgpu_ib *ib) { bool need_ctx_switch = ring->current_ctx != ib->ctx; @@ -2569,15 +2569,10 @@ static void gfx_v7_0_ring_emit_ib(struct amdgpu_ring *ring, u32 next_rptr = ring->wptr + 5; /* drop the CE preamble IB for the same context */ - if ((ring->type == AMDGPU_RING_TYPE_GFX) && - (ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && - !need_ctx_switch) + if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && !need_ctx_switch) return; - if (ring->type == AMDGPU_RING_TYPE_COMPUTE) - control |= INDIRECT_BUFFER_VALID; - - if (need_ctx_switch && ring->type == AMDGPU_RING_TYPE_GFX) + if (need_ctx_switch) next_rptr += 2; next_rptr += 4; @@ -2588,7 +2583,7 @@ static void gfx_v7_0_ring_emit_ib(struct amdgpu_ring *ring, amdgpu_ring_write(ring, next_rptr); /* insert SWITCH_BUFFER packet before first IB in the ring frame */ - if (need_ctx_switch && ring->type == AMDGPU_RING_TYPE_GFX) { + if (need_ctx_switch) { amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); amdgpu_ring_write(ring, 0); } @@ -2611,6 +2606,35 @@ static void gfx_v7_0_ring_emit_ib(struct amdgpu_ring *ring, amdgpu_ring_write(ring, control); } +static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring, + struct amdgpu_ib *ib) +{ + u32 header, control = 0; + u32 next_rptr = ring->wptr + 5; + + control |= INDIRECT_BUFFER_VALID; + next_rptr += 4; + amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); + amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM); + amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); + amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff); + amdgpu_ring_write(ring, next_rptr); + + header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); + + control |= ib->length_dw | + (ib->vm ? (ib->vm->ids[ring->idx].id << 24) : 0); + + amdgpu_ring_write(ring, header); + amdgpu_ring_write(ring, +#ifdef __BIG_ENDIAN + (2 << 0) | +#endif + (ib->gpu_addr & 0xFFFFFFFC)); + amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF); + amdgpu_ring_write(ring, control); +} + /** * gfx_v7_0_ring_test_ib - basic ring IB test * @@ -5555,7 +5579,7 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = { .get_wptr = gfx_v7_0_ring_get_wptr_gfx, .set_wptr = gfx_v7_0_ring_set_wptr_gfx, .parse_cs = NULL, - .emit_ib = gfx_v7_0_ring_emit_ib, + .emit_ib = gfx_v7_0_ring_emit_ib_gfx, .emit_fence = gfx_v7_0_ring_emit_fence_gfx, .emit_semaphore = gfx_v7_0_ring_emit_semaphore, .emit_vm_flush = gfx_v7_0_ring_emit_vm_flush, @@ -5571,7 +5595,7 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = { .get_wptr = gfx_v7_0_ring_get_wptr_compute, .set_wptr = gfx_v7_0_ring_set_wptr_compute, .parse_cs = NULL, - .emit_ib = gfx_v7_0_ring_emit_ib, + .emit_ib = gfx_v7_0_ring_emit_ib_compute, .emit_fence = gfx_v7_0_ring_emit_fence_compute, .emit_semaphore = gfx_v7_0_ring_emit_semaphore, .emit_vm_flush = gfx_v7_0_ring_emit_vm_flush, diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 1c7c992dea37..9e1d4ddbf475 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -3753,7 +3753,7 @@ static void gfx_v8_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) amdgpu_ring_write(ring, 0x20); /* poll interval */ } -static void gfx_v8_0_ring_emit_ib(struct amdgpu_ring *ring, +static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, struct amdgpu_ib *ib) { bool need_ctx_switch = ring->current_ctx != ib->ctx; @@ -3761,15 +3761,10 @@ static void gfx_v8_0_ring_emit_ib(struct amdgpu_ring *ring, u32 next_rptr = ring->wptr + 5; /* drop the CE preamble IB for the same context */ - if ((ring->type == AMDGPU_RING_TYPE_GFX) && - (ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && - !need_ctx_switch) + if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && !need_ctx_switch) return; - if (ring->type == AMDGPU_RING_TYPE_COMPUTE) - control |= INDIRECT_BUFFER_VALID; - - if (need_ctx_switch && ring->type == AMDGPU_RING_TYPE_GFX) + if (need_ctx_switch) next_rptr += 2; next_rptr += 4; @@ -3780,7 +3775,7 @@ static void gfx_v8_0_ring_emit_ib(struct amdgpu_ring *ring, amdgpu_ring_write(ring, next_rptr); /* insert SWITCH_BUFFER packet before first IB in the ring frame */ - if (need_ctx_switch && ring->type == AMDGPU_RING_TYPE_GFX) { + if (need_ctx_switch) { amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); amdgpu_ring_write(ring, 0); } @@ -3803,6 +3798,36 @@ static void gfx_v8_0_ring_emit_ib(struct amdgpu_ring *ring, amdgpu_ring_write(ring, control); } +static void gfx_v8_0_ring_emit_ib_compute(struct amdgpu_ring *ring, + struct amdgpu_ib *ib) +{ + u32 header, control = 0; + u32 next_rptr = ring->wptr + 5; + + control |= INDIRECT_BUFFER_VALID; + + next_rptr += 4; + amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); + amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM); + amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); + amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff); + amdgpu_ring_write(ring, next_rptr); + + header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); + + control |= ib->length_dw | + (ib->vm ? (ib->vm->ids[ring->idx].id << 24) : 0); + + amdgpu_ring_write(ring, header); + amdgpu_ring_write(ring, +#ifdef __BIG_ENDIAN + (2 << 0) | +#endif + (ib->gpu_addr & 0xFFFFFFFC)); + amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF); + amdgpu_ring_write(ring, control); +} + static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr, u64 seq, unsigned flags) { @@ -4224,7 +4249,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = { .get_wptr = gfx_v8_0_ring_get_wptr_gfx, .set_wptr = gfx_v8_0_ring_set_wptr_gfx, .parse_cs = NULL, - .emit_ib = gfx_v8_0_ring_emit_ib, + .emit_ib = gfx_v8_0_ring_emit_ib_gfx, .emit_fence = gfx_v8_0_ring_emit_fence_gfx, .emit_semaphore = gfx_v8_0_ring_emit_semaphore, .emit_vm_flush = gfx_v8_0_ring_emit_vm_flush, @@ -4240,7 +4265,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = { .get_wptr = gfx_v8_0_ring_get_wptr_compute, .set_wptr = gfx_v8_0_ring_set_wptr_compute, .parse_cs = NULL, - .emit_ib = gfx_v8_0_ring_emit_ib, + .emit_ib = gfx_v8_0_ring_emit_ib_compute, .emit_fence = gfx_v8_0_ring_emit_fence_compute, .emit_semaphore = gfx_v8_0_ring_emit_semaphore, .emit_vm_flush = gfx_v8_0_ring_emit_vm_flush, diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c index d62c4002e39c..d1064ca3670e 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c @@ -35,6 +35,8 @@ #include "oss/oss_2_0_d.h" #include "oss/oss_2_0_sh_mask.h" #include "gca/gfx_8_0_d.h" +#include "smu/smu_7_1_2_d.h" +#include "smu/smu_7_1_2_sh_mask.h" #define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04 #define GRBM_GFX_INDEX__VCE_INSTANCE_MASK 0x10 @@ -112,6 +114,10 @@ static int vce_v3_0_start(struct amdgpu_device *adev) mutex_lock(&adev->grbm_idx_mutex); for (idx = 0; idx < 2; ++idx) { + + if (adev->vce.harvest_config & (1 << idx)) + continue; + if(idx == 0) WREG32_P(mmGRBM_GFX_INDEX, 0, ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK); @@ -190,10 +196,52 @@ static int vce_v3_0_start(struct amdgpu_device *adev) return 0; } +#define ixVCE_HARVEST_FUSE_MACRO__ADDRESS 0xC0014074 +#define VCE_HARVEST_FUSE_MACRO__SHIFT 27 +#define VCE_HARVEST_FUSE_MACRO__MASK 0x18000000 + +static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device *adev) +{ + u32 tmp; + unsigned ret; + + if (adev->flags & AMDGPU_IS_APU) + tmp = (RREG32_SMC(ixVCE_HARVEST_FUSE_MACRO__ADDRESS) & + VCE_HARVEST_FUSE_MACRO__MASK) >> + VCE_HARVEST_FUSE_MACRO__SHIFT; + else + tmp = (RREG32_SMC(ixCC_HARVEST_FUSES) & + CC_HARVEST_FUSES__VCE_DISABLE_MASK) >> + CC_HARVEST_FUSES__VCE_DISABLE__SHIFT; + + switch (tmp) { + case 1: + ret = AMDGPU_VCE_HARVEST_VCE0; + break; + case 2: + ret = AMDGPU_VCE_HARVEST_VCE1; + break; + case 3: + ret = AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1; + break; + default: + ret = 0; + } + + return ret; +} + static int vce_v3_0_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + adev->vce.harvest_config = vce_v3_0_get_harvest_config(adev); + + if ((adev->vce.harvest_config & + (AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1)) == + (AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1)) + return -ENOENT; + vce_v3_0_set_ring_funcs(adev); vce_v3_0_set_irq_funcs(adev); diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c index f69b92535505..5ae5c6923128 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c @@ -355,6 +355,7 @@ int atmel_hlcdc_crtc_create(struct drm_device *dev) planes->overlays[i]->base.possible_crtcs = 1 << crtc->id; drm_crtc_helper_add(&crtc->base, &lcdc_crtc_helper_funcs); + drm_crtc_vblank_reset(&crtc->base); dc->crtc = &crtc->base; diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c index 60b0c13d7ff5..6fad1f9648f3 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c @@ -313,20 +313,20 @@ static int atmel_hlcdc_dc_load(struct drm_device *dev) pm_runtime_enable(dev->dev); - ret = atmel_hlcdc_dc_modeset_init(dev); + ret = drm_vblank_init(dev, 1); if (ret < 0) { - dev_err(dev->dev, "failed to initialize mode setting\n"); + dev_err(dev->dev, "failed to initialize vblank\n"); goto err_periph_clk_disable; } - drm_mode_config_reset(dev); - - ret = drm_vblank_init(dev, 1); + ret = atmel_hlcdc_dc_modeset_init(dev); if (ret < 0) { - dev_err(dev->dev, "failed to initialize vblank\n"); + dev_err(dev->dev, "failed to initialize mode setting\n"); goto err_periph_clk_disable; } + drm_mode_config_reset(dev); + pm_runtime_get_sync(dev->dev); ret = drm_irq_install(dev, dc->hlcdc->irq); pm_runtime_put_sync(dev->dev); diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 357bd04a173b..fed748311b92 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -5398,12 +5398,9 @@ void drm_mode_config_reset(struct drm_device *dev) if (encoder->funcs->reset) encoder->funcs->reset(encoder); - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - connector->status = connector_status_unknown; - + list_for_each_entry(connector, &dev->mode_config.connector_list, head) if (connector->funcs->reset) connector->funcs->reset(connector); - } } EXPORT_SYMBOL(drm_mode_config_reset); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 5f27290201e0..fd1de451c8c6 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -3303,15 +3303,14 @@ int intel_freq_opcode(struct drm_i915_private *dev_priv, int val); #define I915_READ64(reg) dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true) #define I915_READ64_2x32(lower_reg, upper_reg) ({ \ - u32 upper = I915_READ(upper_reg); \ - u32 lower = I915_READ(lower_reg); \ - u32 tmp = I915_READ(upper_reg); \ - if (upper != tmp) { \ - upper = tmp; \ - lower = I915_READ(lower_reg); \ - WARN_ON(I915_READ(upper_reg) != upper); \ - } \ - (u64)upper << 32 | lower; }) + u32 upper, lower, tmp; \ + tmp = I915_READ(upper_reg); \ + do { \ + upper = tmp; \ + lower = I915_READ(lower_reg); \ + tmp = I915_READ(upper_reg); \ + } while (upper != tmp); \ + (u64)upper << 32 | lower; }) #define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg) #define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg) diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 56b52a4767d4..31e8269e6e3d 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -1923,6 +1923,17 @@ static int ggtt_bind_vma(struct i915_vma *vma, vma->vm->insert_entries(vma->vm, pages, vma->node.start, cache_level, pte_flags); + + /* Note the inconsistency here is due to absence of the + * aliasing ppgtt on gen4 and earlier. Though we always + * request PIN_USER for execbuffer (translated to LOCAL_BIND), + * without the appgtt, we cannot honour that request and so + * must substitute it with a global binding. Since we do this + * behind the upper layers back, we need to explicitly set + * the bound flag ourselves. + */ + vma->bound |= GLOBAL_BIND; + } if (dev_priv->mm.aliasing_ppgtt && flags & LOCAL_BIND) { diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index 633bd1fcab69..d19c9db5e18c 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c @@ -464,7 +464,10 @@ i915_gem_get_tiling(struct drm_device *dev, void *data, } /* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */ - args->phys_swizzle_mode = args->swizzle_mode; + if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) + args->phys_swizzle_mode = I915_BIT_6_SWIZZLE_UNKNOWN; + else + args->phys_swizzle_mode = args->swizzle_mode; if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17) args->swizzle_mode = I915_BIT_6_SWIZZLE_9; if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17) diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index a6d8a3ee7750..260389acfb77 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -1274,10 +1274,12 @@ int i915_reg_read_ioctl(struct drm_device *dev, struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_reg_read *reg = data; struct register_whitelist const *entry = whitelist; + unsigned size; + u64 offset; int i, ret = 0; for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) { - if (entry->offset == reg->offset && + if (entry->offset == (reg->offset & -entry->size) && (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask)) break; } @@ -1285,23 +1287,33 @@ int i915_reg_read_ioctl(struct drm_device *dev, if (i == ARRAY_SIZE(whitelist)) return -EINVAL; + /* We use the low bits to encode extra flags as the register should + * be naturally aligned (and those that are not so aligned merely + * limit the available flags for that register). + */ + offset = entry->offset; + size = entry->size; + size |= reg->offset ^ offset; + intel_runtime_pm_get(dev_priv); - switch (entry->size) { + switch (size) { + case 8 | 1: + reg->val = I915_READ64_2x32(offset, offset+4); + break; case 8: - reg->val = I915_READ64(reg->offset); + reg->val = I915_READ64(offset); break; case 4: - reg->val = I915_READ(reg->offset); + reg->val = I915_READ(offset); break; case 2: - reg->val = I915_READ16(reg->offset); + reg->val = I915_READ16(offset); break; case 1: - reg->val = I915_READ8(reg->offset); + reg->val = I915_READ8(offset); break; default: - MISSING_CASE(entry->size); ret = -EINVAL; goto out; } diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c index 0d1dbb737933..247a424445f7 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c @@ -220,13 +220,15 @@ static int mdp4_plane_mode_set(struct drm_plane *plane, uint32_t op_mode = 0; uint32_t phasex_step = MDP4_VG_PHASE_STEP_DEFAULT; uint32_t phasey_step = MDP4_VG_PHASE_STEP_DEFAULT; - enum mdp4_frame_format frame_type = mdp4_get_frame_format(fb); + enum mdp4_frame_format frame_type; if (!(crtc && fb)) { DBG("%s: disabled!", mdp4_plane->name); return 0; } + frame_type = mdp4_get_frame_format(fb); + /* src values are in Q16 fixed point, convert to integer: */ src_x = src_x >> 16; src_y = src_y >> 16; diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c index 206f758f7d64..e253db5de5aa 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c @@ -76,7 +76,20 @@ static void mdp5_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *st static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state) { + int i; struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); + int nplanes = mdp5_kms->dev->mode_config.num_total_plane; + + for (i = 0; i < nplanes; i++) { + struct drm_plane *plane = state->planes[i]; + struct drm_plane_state *plane_state = state->plane_states[i]; + + if (!plane) + continue; + + mdp5_plane_complete_commit(plane, plane_state); + } + mdp5_disable(mdp5_kms); } diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h index e0eb24587c84..e79ac09b7216 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h @@ -227,6 +227,8 @@ void mdp5_plane_install_properties(struct drm_plane *plane, struct drm_mode_object *obj); uint32_t mdp5_plane_get_flush(struct drm_plane *plane); void mdp5_plane_complete_flip(struct drm_plane *plane); +void mdp5_plane_complete_commit(struct drm_plane *plane, + struct drm_plane_state *state); enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane); struct drm_plane *mdp5_plane_init(struct drm_device *dev, enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset); diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c index 57b8f56ae9d0..22275568ab8b 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c @@ -31,8 +31,6 @@ struct mdp5_plane { uint32_t nformats; uint32_t formats[32]; - - bool enabled; }; #define to_mdp5_plane(x) container_of(x, struct mdp5_plane, base) @@ -56,22 +54,6 @@ static bool plane_enabled(struct drm_plane_state *state) return state->fb && state->crtc; } -static int mdp5_plane_disable(struct drm_plane *plane) -{ - struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); - struct mdp5_kms *mdp5_kms = get_kms(plane); - enum mdp5_pipe pipe = mdp5_plane->pipe; - - DBG("%s: disable", mdp5_plane->name); - - if (mdp5_kms) { - /* Release the memory we requested earlier from the SMP: */ - mdp5_smp_release(mdp5_kms->smp, pipe); - } - - return 0; -} - static void mdp5_plane_destroy(struct drm_plane *plane) { struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); @@ -224,7 +206,6 @@ static void mdp5_plane_atomic_update(struct drm_plane *plane, if (!plane_enabled(state)) { to_mdp5_plane_state(state)->pending = true; - mdp5_plane_disable(plane); } else if (to_mdp5_plane_state(state)->mode_changed) { int ret; to_mdp5_plane_state(state)->pending = true; @@ -602,6 +583,20 @@ uint32_t mdp5_plane_get_flush(struct drm_plane *plane) return mdp5_plane->flush_mask; } +/* called after vsync in thread context */ +void mdp5_plane_complete_commit(struct drm_plane *plane, + struct drm_plane_state *state) +{ + struct mdp5_kms *mdp5_kms = get_kms(plane); + struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); + enum mdp5_pipe pipe = mdp5_plane->pipe; + + if (!plane_enabled(plane->state)) { + DBG("%s: free SMP", mdp5_plane->name); + mdp5_smp_release(mdp5_kms->smp, pipe); + } +} + /* initialize plane */ struct drm_plane *mdp5_plane_init(struct drm_device *dev, enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset) diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c index 16702aecf0df..64a27d86f2f5 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c @@ -34,22 +34,44 @@ * and CANNOT be re-allocated (eg: MMB0 and MMB1 both tied to RGB0). * * For each block that can be dynamically allocated, it can be either - * free, or pending/in-use by a client. The updates happen in three steps: + * free: + * The block is free. + * + * pending: + * The block is allocated to some client and not free. + * + * configured: + * The block is allocated to some client, and assigned to that + * client in MDP5_MDP_SMP_ALLOC registers. + * + * inuse: + * The block is being actively used by a client. + * + * The updates happen in the following steps: * * 1) mdp5_smp_request(): * When plane scanout is setup, calculate required number of - * blocks needed per client, and request. Blocks not inuse or - * pending by any other client are added to client's pending - * set. + * blocks needed per client, and request. Blocks neither inuse nor + * configured nor pending by any other client are added to client's + * pending set. + * For shrinking, blocks in pending but not in configured can be freed + * directly, but those already in configured will be freed later by + * mdp5_smp_commit. * * 2) mdp5_smp_configure(): * As hw is programmed, before FLUSH, MDP5_MDP_SMP_ALLOC registers * are configured for the union(pending, inuse) + * Current pending is copied to configured. + * It is assumed that mdp5_smp_request and mdp5_smp_configure not run + * concurrently for the same pipe. * * 3) mdp5_smp_commit(): - * After next vblank, copy pending -> inuse. Optionally update + * After next vblank, copy configured -> inuse. Optionally update * MDP5_SMP_ALLOC registers if there are newly unused blocks * + * 4) mdp5_smp_release(): + * Must be called after the pipe is disabled and no longer uses any SMB + * * On the next vblank after changes have been committed to hw, the * client's pending blocks become it's in-use blocks (and no-longer * in-use blocks become available to other clients). @@ -77,6 +99,9 @@ struct mdp5_smp { struct mdp5_client_smp_state client_state[MAX_CLIENTS]; }; +static void update_smp_state(struct mdp5_smp *smp, + u32 cid, mdp5_smp_state_t *assigned); + static inline struct mdp5_kms *get_kms(struct mdp5_smp *smp) { @@ -149,7 +174,12 @@ static int smp_request_block(struct mdp5_smp *smp, for (i = cur_nblks; i > nblks; i--) { int blk = find_first_bit(ps->pending, cnt); clear_bit(blk, ps->pending); - /* don't clear in global smp_state until _commit() */ + + /* clear in global smp_state if not in configured + * otherwise until _commit() + */ + if (!test_bit(blk, ps->configured)) + clear_bit(blk, smp->state); } } @@ -223,10 +253,33 @@ int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe, u32 fmt, u32 wid /* Release SMP blocks for all clients of the pipe */ void mdp5_smp_release(struct mdp5_smp *smp, enum mdp5_pipe pipe) { - int i, nblks; + int i; + unsigned long flags; + int cnt = smp->blk_cnt; + + for (i = 0; i < pipe2nclients(pipe); i++) { + mdp5_smp_state_t assigned; + u32 cid = pipe2client(pipe, i); + struct mdp5_client_smp_state *ps = &smp->client_state[cid]; + + spin_lock_irqsave(&smp->state_lock, flags); + + /* clear hw assignment */ + bitmap_or(assigned, ps->inuse, ps->configured, cnt); + update_smp_state(smp, CID_UNUSED, &assigned); + + /* free to global pool */ + bitmap_andnot(smp->state, smp->state, ps->pending, cnt); + bitmap_andnot(smp->state, smp->state, assigned, cnt); + + /* clear client's infor */ + bitmap_zero(ps->pending, cnt); + bitmap_zero(ps->configured, cnt); + bitmap_zero(ps->inuse, cnt); + + spin_unlock_irqrestore(&smp->state_lock, flags); + } - for (i = 0, nblks = 0; i < pipe2nclients(pipe); i++) - smp_request_block(smp, pipe2client(pipe, i), 0); set_fifo_thresholds(smp, pipe, 0); } @@ -274,12 +327,20 @@ void mdp5_smp_configure(struct mdp5_smp *smp, enum mdp5_pipe pipe) u32 cid = pipe2client(pipe, i); struct mdp5_client_smp_state *ps = &smp->client_state[cid]; - bitmap_or(assigned, ps->inuse, ps->pending, cnt); + /* + * if vblank has not happened since last smp_configure + * skip the configure for now + */ + if (!bitmap_equal(ps->inuse, ps->configured, cnt)) + continue; + + bitmap_copy(ps->configured, ps->pending, cnt); + bitmap_or(assigned, ps->inuse, ps->configured, cnt); update_smp_state(smp, cid, &assigned); } } -/* step #3: after vblank, copy pending -> inuse: */ +/* step #3: after vblank, copy configured -> inuse: */ void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe) { int cnt = smp->blk_cnt; @@ -295,7 +356,7 @@ void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe) * using, which can be released and made available to other * clients: */ - if (bitmap_andnot(released, ps->inuse, ps->pending, cnt)) { + if (bitmap_andnot(released, ps->inuse, ps->configured, cnt)) { unsigned long flags; spin_lock_irqsave(&smp->state_lock, flags); @@ -306,7 +367,7 @@ void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe) update_smp_state(smp, CID_UNUSED, &released); } - bitmap_copy(ps->inuse, ps->pending, cnt); + bitmap_copy(ps->inuse, ps->configured, cnt); } } diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h index e47179f63585..5b6c2363f592 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h @@ -23,6 +23,7 @@ struct mdp5_client_smp_state { mdp5_smp_state_t inuse; + mdp5_smp_state_t configured; mdp5_smp_state_t pending; }; diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c index 1b22d8bfe142..1ceb4f22dd89 100644 --- a/drivers/gpu/drm/msm/msm_atomic.c +++ b/drivers/gpu/drm/msm/msm_atomic.c @@ -283,12 +283,8 @@ int msm_atomic_commit(struct drm_device *dev, timeout = ktime_add_ms(ktime_get(), 1000); - ret = msm_wait_fence_interruptable(dev, c->fence, &timeout); - if (ret) { - WARN_ON(ret); // TODO unswap state back? or?? - commit_destroy(c); - return ret; - } + /* uninterruptible wait */ + msm_wait_fence(dev, c->fence, &timeout, false); complete_commit(c); diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index b7ef56ed8d1c..d3467b115e04 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -637,8 +637,8 @@ static void msm_debugfs_cleanup(struct drm_minor *minor) * Fences: */ -int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence, - ktime_t *timeout) +int msm_wait_fence(struct drm_device *dev, uint32_t fence, + ktime_t *timeout , bool interruptible) { struct msm_drm_private *priv = dev->dev_private; int ret; @@ -667,7 +667,12 @@ int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence, remaining_jiffies = timespec_to_jiffies(&ts); } - ret = wait_event_interruptible_timeout(priv->fence_event, + if (interruptible) + ret = wait_event_interruptible_timeout(priv->fence_event, + fence_completed(dev, fence), + remaining_jiffies); + else + ret = wait_event_timeout(priv->fence_event, fence_completed(dev, fence), remaining_jiffies); @@ -853,7 +858,7 @@ static int msm_ioctl_wait_fence(struct drm_device *dev, void *data, return -EINVAL; } - return msm_wait_fence_interruptable(dev, args->fence, &timeout); + return msm_wait_fence(dev, args->fence, &timeout, true); } static const struct drm_ioctl_desc msm_ioctls[] = { diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index e7c5ea125d45..4ff0ec9c994b 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -164,8 +164,8 @@ int msm_atomic_commit(struct drm_device *dev, int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu); -int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence, - ktime_t *timeout); +int msm_wait_fence(struct drm_device *dev, uint32_t fence, + ktime_t *timeout, bool interruptible); int msm_queue_fence_cb(struct drm_device *dev, struct msm_fence_cb *cb, uint32_t fence); void msm_update_fence(struct drm_device *dev, uint32_t fence); diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index f211b80e3a1e..c76cc853b08a 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -460,7 +460,7 @@ int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout) if (op & MSM_PREP_NOSYNC) timeout = NULL; - ret = msm_wait_fence_interruptable(dev, fence, timeout); + ret = msm_wait_fence(dev, fence, timeout, true); } /* TODO cache maintenance */ diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c index dd7a7ab603e2..831461bc98a5 100644 --- a/drivers/gpu/drm/msm/msm_gem_prime.c +++ b/drivers/gpu/drm/msm/msm_gem_prime.c @@ -23,8 +23,12 @@ struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj) { struct msm_gem_object *msm_obj = to_msm_bo(obj); - BUG_ON(!msm_obj->sgt); /* should have already pinned! */ - return msm_obj->sgt; + int npages = obj->size >> PAGE_SHIFT; + + if (WARN_ON(!msm_obj->pages)) /* should have already pinned! */ + return NULL; + + return drm_prime_pages_to_sg(msm_obj->pages, npages); } void *msm_gem_prime_vmap(struct drm_gem_object *obj) diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 649024d4daf1..477cbb12809b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -128,6 +128,7 @@ nouveau_cli_destroy(struct nouveau_cli *cli) nvkm_vm_ref(NULL, &nvxx_client(&cli->base)->vm, NULL); nvif_client_fini(&cli->base); usif_client_fini(cli); + kfree(cli); } static void @@ -865,8 +866,10 @@ nouveau_drm_preclose(struct drm_device *dev, struct drm_file *fpriv) pm_runtime_get_sync(dev->dev); + mutex_lock(&cli->mutex); if (cli->abi16) nouveau_abi16_fini(cli->abi16); + mutex_unlock(&cli->mutex); mutex_lock(&drm->client.mutex); list_del(&cli->head); diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.c b/drivers/gpu/drm/nouveau/nouveau_platform.c index 775277f1edb0..dcfbbfaf1739 100644 --- a/drivers/gpu/drm/nouveau/nouveau_platform.c +++ b/drivers/gpu/drm/nouveau/nouveau_platform.c @@ -92,6 +92,8 @@ static int nouveau_platform_power_down(struct nouveau_platform_gpu *gpu) return 0; } +#if IS_ENABLED(CONFIG_IOMMU_API) + static void nouveau_platform_probe_iommu(struct device *dev, struct nouveau_platform_gpu *gpu) { @@ -158,6 +160,20 @@ static void nouveau_platform_remove_iommu(struct device *dev, } } +#else + +static void nouveau_platform_probe_iommu(struct device *dev, + struct nouveau_platform_gpu *gpu) +{ +} + +static void nouveau_platform_remove_iommu(struct device *dev, + struct nouveau_platform_gpu *gpu) +{ +} + +#endif + static int nouveau_platform_probe(struct platform_device *pdev) { struct nouveau_platform_gpu *gpu; diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c index 18f449715788..7464aef34674 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ttm.c +++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c @@ -175,15 +175,24 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man, node->page_shift = 12; switch (drm->device.info.family) { + case NV_DEVICE_INFO_V0_TNT: + case NV_DEVICE_INFO_V0_CELSIUS: + case NV_DEVICE_INFO_V0_KELVIN: + case NV_DEVICE_INFO_V0_RANKINE: + case NV_DEVICE_INFO_V0_CURIE: + break; case NV_DEVICE_INFO_V0_TESLA: if (drm->device.info.chipset != 0x50) node->memtype = (nvbo->tile_flags & 0x7f00) >> 8; break; case NV_DEVICE_INFO_V0_FERMI: case NV_DEVICE_INFO_V0_KEPLER: + case NV_DEVICE_INFO_V0_MAXWELL: node->memtype = (nvbo->tile_flags & 0xff00) >> 8; break; default: + NV_WARN(drm, "%s: unhandled family type %x\n", __func__, + drm->device.info.family); break; } diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c index 4ef602c5469d..495c57644ced 100644 --- a/drivers/gpu/drm/nouveau/nv04_fbcon.c +++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c @@ -203,7 +203,7 @@ nv04_fbcon_accel_init(struct fb_info *info) if (ret) return ret; - if (RING_SPACE(chan, 49)) { + if (RING_SPACE(chan, 49 + (device->info.chipset >= 0x11 ? 4 : 0))) { nouveau_fbcon_gpu_lockup(info); return 0; } diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index 7da7958556a3..981342d142ff 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c @@ -979,7 +979,7 @@ nv50_crtc_cursor_show_hide(struct nouveau_crtc *nv_crtc, bool show, bool update) { struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev); - if (show && nv_crtc->cursor.nvbo) + if (show && nv_crtc->cursor.nvbo && nv_crtc->base.enabled) nv50_crtc_cursor_show(nv_crtc); else nv50_crtc_cursor_hide(nv_crtc); diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c index 394c89abcc97..901130b06072 100644 --- a/drivers/gpu/drm/nouveau/nv50_fbcon.c +++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c @@ -188,7 +188,7 @@ nv50_fbcon_accel_init(struct fb_info *info) if (ret) return ret; - ret = RING_SPACE(chan, 59); + ret = RING_SPACE(chan, 58); if (ret) { nouveau_fbcon_gpu_lockup(info); return ret; @@ -252,6 +252,7 @@ nv50_fbcon_accel_init(struct fb_info *info) OUT_RING(chan, info->var.yres_virtual); OUT_RING(chan, upper_32_bits(fb->vma.offset)); OUT_RING(chan, lower_32_bits(fb->vma.offset)); + FIRE_RING(chan); return 0; } diff --git a/drivers/gpu/drm/nouveau/nvc0_fbcon.c b/drivers/gpu/drm/nouveau/nvc0_fbcon.c index 61246677e8dc..fcd2e5f27bb9 100644 --- a/drivers/gpu/drm/nouveau/nvc0_fbcon.c +++ b/drivers/gpu/drm/nouveau/nvc0_fbcon.c @@ -188,7 +188,7 @@ nvc0_fbcon_accel_init(struct fb_info *info) return -EINVAL; } - ret = RING_SPACE(chan, 60); + ret = RING_SPACE(chan, 58); if (ret) { WARN_ON(1); nouveau_fbcon_gpu_lockup(info); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf110.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf110.c index 9ef6728c528d..7f2f05f78cc8 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf110.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf110.c @@ -809,7 +809,7 @@ exec_lookup(struct nv50_disp_priv *priv, int head, int or, u32 ctrl, case 0x00000900: type = DCB_OUTPUT_DP; mask = 2; break; default: nv_error(priv, "unknown SOR mc 0x%08x\n", ctrl); - return 0x0000; + return NULL; } } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c index e10f9644140f..52c22b026005 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c @@ -166,14 +166,30 @@ gk104_fifo_context_attach(struct nvkm_object *parent, } static int +gk104_fifo_chan_kick(struct gk104_fifo_chan *chan) +{ + struct nvkm_object *obj = (void *)chan; + struct gk104_fifo_priv *priv = (void *)obj->engine; + + nv_wr32(priv, 0x002634, chan->base.chid); + if (!nv_wait(priv, 0x002634, 0x100000, 0x000000)) { + nv_error(priv, "channel %d [%s] kick timeout\n", + chan->base.chid, nvkm_client_name(chan)); + return -EBUSY; + } + + return 0; +} + +static int gk104_fifo_context_detach(struct nvkm_object *parent, bool suspend, struct nvkm_object *object) { struct nvkm_bar *bar = nvkm_bar(parent); - struct gk104_fifo_priv *priv = (void *)parent->engine; struct gk104_fifo_base *base = (void *)parent->parent; struct gk104_fifo_chan *chan = (void *)parent; u32 addr; + int ret; switch (nv_engidx(object->engine)) { case NVDEV_ENGINE_SW : return 0; @@ -188,13 +204,9 @@ gk104_fifo_context_detach(struct nvkm_object *parent, bool suspend, return -EINVAL; } - nv_wr32(priv, 0x002634, chan->base.chid); - if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) { - nv_error(priv, "channel %d [%s] kick timeout\n", - chan->base.chid, nvkm_client_name(chan)); - if (suspend) - return -EBUSY; - } + ret = gk104_fifo_chan_kick(chan); + if (ret && suspend) + return ret; if (addr) { nv_wo32(base, addr + 0x00, 0x00000000); @@ -319,6 +331,7 @@ gk104_fifo_chan_fini(struct nvkm_object *object, bool suspend) gk104_fifo_runlist_update(priv, chan->engine); } + gk104_fifo_chan_kick(chan); nv_wr32(priv, 0x800000 + (chid * 8), 0x00000000); return nvkm_fifo_channel_fini(&chan->base, suspend); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c index 5606c25e5d02..ca11ddb6ed46 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c @@ -663,6 +663,37 @@ gf100_gr_zbc_init(struct gf100_gr_priv *priv) gf100_gr_zbc_clear_depth(priv, index); } +/** + * Wait until GR goes idle. GR is considered idle if it is disabled by the + * MC (0x200) register, or GR is not busy and a context switch is not in + * progress. + */ +int +gf100_gr_wait_idle(struct gf100_gr_priv *priv) +{ + unsigned long end_jiffies = jiffies + msecs_to_jiffies(2000); + bool gr_enabled, ctxsw_active, gr_busy; + + do { + /* + * required to make sure FIFO_ENGINE_STATUS (0x2640) is + * up-to-date + */ + nv_rd32(priv, 0x400700); + + gr_enabled = nv_rd32(priv, 0x200) & 0x1000; + ctxsw_active = nv_rd32(priv, 0x2640) & 0x8000; + gr_busy = nv_rd32(priv, 0x40060c) & 0x1; + + if (!gr_enabled || (!gr_busy && !ctxsw_active)) + return 0; + } while (time_before(jiffies, end_jiffies)); + + nv_error(priv, "wait for idle timeout (en: %d, ctxsw: %d, busy: %d)\n", + gr_enabled, ctxsw_active, gr_busy); + return -EAGAIN; +} + void gf100_gr_mmio(struct gf100_gr_priv *priv, const struct gf100_gr_pack *p) { @@ -699,7 +730,13 @@ gf100_gr_icmd(struct gf100_gr_priv *priv, const struct gf100_gr_pack *p) while (addr < next) { nv_wr32(priv, 0x400200, addr); - nv_wait(priv, 0x400700, 0x00000002, 0x00000000); + /** + * Wait for GR to go idle after submitting a + * GO_IDLE bundle + */ + if ((addr & 0xffff) == 0xe100) + gf100_gr_wait_idle(priv); + nv_wait(priv, 0x400700, 0x00000004, 0x00000000); addr += init->pitch; } } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h index 8af1a89eda84..c9533fdac4fc 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h @@ -181,6 +181,7 @@ struct gf100_gr_oclass { int ppc_nr; }; +int gf100_gr_wait_idle(struct gf100_gr_priv *); void gf100_gr_mmio(struct gf100_gr_priv *, const struct gf100_gr_pack *); void gf100_gr_icmd(struct gf100_gr_priv *, const struct gf100_gr_pack *); void gf100_gr_mthd(struct gf100_gr_priv *, const struct gf100_gr_pack *); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c index 2006c445938d..4cf36a3aa814 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c @@ -332,9 +332,12 @@ static void nvkm_perfctx_dtor(struct nvkm_object *object) { struct nvkm_pm *ppm = (void *)object->engine; + struct nvkm_perfctx *ctx = (void *)object; + mutex_lock(&nv_subdev(ppm)->mutex); - nvkm_engctx_destroy(&ppm->context->base); - ppm->context = NULL; + nvkm_engctx_destroy(&ctx->base); + if (ppm->context == ctx) + ppm->context = NULL; mutex_unlock(&nv_subdev(ppm)->mutex); } @@ -355,12 +358,11 @@ nvkm_perfctx_ctor(struct nvkm_object *parent, struct nvkm_object *engine, mutex_lock(&nv_subdev(ppm)->mutex); if (ppm->context == NULL) ppm->context = ctx; - mutex_unlock(&nv_subdev(ppm)->mutex); - if (ctx != ppm->context) - return -EBUSY; + ret = -EBUSY; + mutex_unlock(&nv_subdev(ppm)->mutex); - return 0; + return ret; } struct nvkm_oclass diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c index f67cdae1e90a..f4611e3f0971 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c @@ -1285,6 +1285,44 @@ init_zm_reg_sequence(struct nvbios_init *init) } /** + * INIT_PLL_INDIRECT - opcode 0x59 + * + */ +static void +init_pll_indirect(struct nvbios_init *init) +{ + struct nvkm_bios *bios = init->bios; + u32 reg = nv_ro32(bios, init->offset + 1); + u16 addr = nv_ro16(bios, init->offset + 5); + u32 freq = (u32)nv_ro16(bios, addr) * 1000; + + trace("PLL_INDIRECT\tR[0x%06x] =PLL= VBIOS[%04x] = %dkHz\n", + reg, addr, freq); + init->offset += 7; + + init_prog_pll(init, reg, freq); +} + +/** + * INIT_ZM_REG_INDIRECT - opcode 0x5a + * + */ +static void +init_zm_reg_indirect(struct nvbios_init *init) +{ + struct nvkm_bios *bios = init->bios; + u32 reg = nv_ro32(bios, init->offset + 1); + u16 addr = nv_ro16(bios, init->offset + 5); + u32 data = nv_ro32(bios, addr); + + trace("ZM_REG_INDIRECT\tR[0x%06x] = VBIOS[0x%04x] = 0x%08x\n", + reg, addr, data); + init->offset += 7; + + init_wr32(init, addr, data); +} + +/** * INIT_SUB_DIRECT - opcode 0x5b * */ @@ -2145,6 +2183,8 @@ static struct nvbios_init_opcode { [0x56] = { init_condition_time }, [0x57] = { init_ltime }, [0x58] = { init_zm_reg_sequence }, + [0x59] = { init_pll_indirect }, + [0x5a] = { init_zm_reg_indirect }, [0x5b] = { init_sub_direct }, [0x5c] = { init_jump }, [0x5e] = { init_i2c_if }, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c index 822d32a28d6e..065e9f5c8db9 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c @@ -180,7 +180,8 @@ gt215_clk_info(struct nvkm_clk *clock, int clk, u32 khz, struct gt215_clk_info *info) { struct gt215_clk_priv *priv = (void *)clock; - u32 oclk, sclk, sdiv, diff; + u32 oclk, sclk, sdiv; + s32 diff; info->clk = 0; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c index c0fdb89e74ac..24dcdfb58a8d 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c @@ -38,6 +38,14 @@ gk20a_ibus_init_priv_ring(struct gk20a_ibus_priv *priv) nv_wr32(priv, 0x12004c, 0x4); nv_wr32(priv, 0x122204, 0x2); nv_rd32(priv, 0x122204); + + /* + * Bug: increase clock timeout to avoid operation failure at high + * gpcclk rate. + */ + nv_wr32(priv, 0x122354, 0x800); + nv_wr32(priv, 0x128328, 0x800); + nv_wr32(priv, 0x124320, 0x800); } static void diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c index 80614f1b2074..282143f49d72 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c @@ -50,7 +50,12 @@ nv04_instobj_dtor(struct nvkm_object *object) { struct nv04_instmem_priv *priv = (void *)nvkm_instmem(object); struct nv04_instobj_priv *node = (void *)object; + struct nvkm_subdev *subdev = (void *)priv; + + mutex_lock(&subdev->mutex); nvkm_mm_free(&priv->heap, &node->mem); + mutex_unlock(&subdev->mutex); + nvkm_instobj_destroy(&node->base); } @@ -62,6 +67,7 @@ nv04_instobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine, struct nv04_instmem_priv *priv = (void *)nvkm_instmem(parent); struct nv04_instobj_priv *node; struct nvkm_instobj_args *args = data; + struct nvkm_subdev *subdev = (void *)priv; int ret; if (!args->align) @@ -72,8 +78,10 @@ nv04_instobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine, if (ret) return ret; + mutex_lock(&subdev->mutex); ret = nvkm_mm_head(&priv->heap, 0, 1, args->size, args->size, args->align, &node->mem); + mutex_unlock(&subdev->mutex); if (ret) return ret; diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c index dd39f434b4a7..c3872598b85a 100644 --- a/drivers/gpu/drm/radeon/atombios_encoders.c +++ b/drivers/gpu/drm/radeon/atombios_encoders.c @@ -2299,8 +2299,7 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder, encoder_mode = atombios_get_encoder_mode(encoder); if (connector && (radeon_audio != 0) && ((encoder_mode == ATOM_ENCODER_MODE_HDMI) || - (ENCODER_MODE_IS_DP(encoder_mode) && - drm_detect_monitor_audio(radeon_connector_edid(connector))))) + ENCODER_MODE_IS_DP(encoder_mode))) radeon_audio_mode_set(encoder, adjusted_mode); } diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c index 68fd9fc677e3..44480c1b9738 100644 --- a/drivers/gpu/drm/radeon/dce6_afmt.c +++ b/drivers/gpu/drm/radeon/dce6_afmt.c @@ -93,30 +93,26 @@ void dce6_afmt_select_pin(struct drm_encoder *encoder) struct radeon_device *rdev = encoder->dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; - u32 offset; - if (!dig || !dig->afmt || !dig->afmt->pin) + if (!dig || !dig->afmt || !dig->pin) return; - offset = dig->afmt->offset; - - WREG32(AFMT_AUDIO_SRC_CONTROL + offset, - AFMT_AUDIO_SRC_SELECT(dig->afmt->pin->id)); + WREG32(AFMT_AUDIO_SRC_CONTROL + dig->afmt->offset, + AFMT_AUDIO_SRC_SELECT(dig->pin->id)); } void dce6_afmt_write_latency_fields(struct drm_encoder *encoder, - struct drm_connector *connector, struct drm_display_mode *mode) + struct drm_connector *connector, + struct drm_display_mode *mode) { struct radeon_device *rdev = encoder->dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; - u32 tmp = 0, offset; + u32 tmp = 0; - if (!dig || !dig->afmt || !dig->afmt->pin) + if (!dig || !dig->afmt || !dig->pin) return; - offset = dig->afmt->pin->offset; - if (mode->flags & DRM_MODE_FLAG_INTERLACE) { if (connector->latency_present[1]) tmp = VIDEO_LIPSYNC(connector->video_latency[1]) | @@ -130,24 +126,24 @@ void dce6_afmt_write_latency_fields(struct drm_encoder *encoder, else tmp = VIDEO_LIPSYNC(0) | AUDIO_LIPSYNC(0); } - WREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp); + WREG32_ENDPOINT(dig->pin->offset, + AZ_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp); } void dce6_afmt_hdmi_write_speaker_allocation(struct drm_encoder *encoder, - u8 *sadb, int sad_count) + u8 *sadb, int sad_count) { struct radeon_device *rdev = encoder->dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; - u32 offset, tmp; + u32 tmp; - if (!dig || !dig->afmt || !dig->afmt->pin) + if (!dig || !dig->afmt || !dig->pin) return; - offset = dig->afmt->pin->offset; - /* program the speaker allocation */ - tmp = RREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER); + tmp = RREG32_ENDPOINT(dig->pin->offset, + AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER); tmp &= ~(DP_CONNECTION | SPEAKER_ALLOCATION_MASK); /* set HDMI mode */ tmp |= HDMI_CONNECTION; @@ -155,24 +151,24 @@ void dce6_afmt_hdmi_write_speaker_allocation(struct drm_encoder *encoder, tmp |= SPEAKER_ALLOCATION(sadb[0]); else tmp |= SPEAKER_ALLOCATION(5); /* stereo */ - WREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp); + WREG32_ENDPOINT(dig->pin->offset, + AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp); } void dce6_afmt_dp_write_speaker_allocation(struct drm_encoder *encoder, - u8 *sadb, int sad_count) + u8 *sadb, int sad_count) { struct radeon_device *rdev = encoder->dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; - u32 offset, tmp; + u32 tmp; - if (!dig || !dig->afmt || !dig->afmt->pin) + if (!dig || !dig->afmt || !dig->pin) return; - offset = dig->afmt->pin->offset; - /* program the speaker allocation */ - tmp = RREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER); + tmp = RREG32_ENDPOINT(dig->pin->offset, + AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER); tmp &= ~(HDMI_CONNECTION | SPEAKER_ALLOCATION_MASK); /* set DP mode */ tmp |= DP_CONNECTION; @@ -180,13 +176,13 @@ void dce6_afmt_dp_write_speaker_allocation(struct drm_encoder *encoder, tmp |= SPEAKER_ALLOCATION(sadb[0]); else tmp |= SPEAKER_ALLOCATION(5); /* stereo */ - WREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp); + WREG32_ENDPOINT(dig->pin->offset, + AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp); } void dce6_afmt_write_sad_regs(struct drm_encoder *encoder, - struct cea_sad *sads, int sad_count) + struct cea_sad *sads, int sad_count) { - u32 offset; int i; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; @@ -206,11 +202,9 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder, { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO }, }; - if (!dig || !dig->afmt || !dig->afmt->pin) + if (!dig || !dig->afmt || !dig->pin) return; - offset = dig->afmt->pin->offset; - for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) { u32 value = 0; u8 stereo_freqs = 0; @@ -237,7 +231,7 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder, value |= SUPPORTED_FREQUENCIES_STEREO(stereo_freqs); - WREG32_ENDPOINT(offset, eld_reg_to_type[i][0], value); + WREG32_ENDPOINT(dig->pin->offset, eld_reg_to_type[i][0], value); } } @@ -253,7 +247,7 @@ void dce6_audio_enable(struct radeon_device *rdev, } void dce6_hdmi_audio_set_dto(struct radeon_device *rdev, - struct radeon_crtc *crtc, unsigned int clock) + struct radeon_crtc *crtc, unsigned int clock) { /* Two dtos; generally use dto0 for HDMI */ u32 value = 0; @@ -272,7 +266,7 @@ void dce6_hdmi_audio_set_dto(struct radeon_device *rdev, } void dce6_dp_audio_set_dto(struct radeon_device *rdev, - struct radeon_crtc *crtc, unsigned int clock) + struct radeon_crtc *crtc, unsigned int clock) { /* Two dtos; generally use dto1 for DP */ u32 value = 0; diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c index fa719c53449b..fbc8d88d6e5d 100644 --- a/drivers/gpu/drm/radeon/radeon_audio.c +++ b/drivers/gpu/drm/radeon/radeon_audio.c @@ -245,6 +245,28 @@ static struct radeon_audio_funcs dce6_dp_funcs = { static void radeon_audio_enable(struct radeon_device *rdev, struct r600_audio_pin *pin, u8 enable_mask) { + struct drm_encoder *encoder; + struct radeon_encoder *radeon_encoder; + struct radeon_encoder_atom_dig *dig; + int pin_count = 0; + + if (!pin) + return; + + if (rdev->mode_info.mode_config_initialized) { + list_for_each_entry(encoder, &rdev->ddev->mode_config.encoder_list, head) { + if (radeon_encoder_is_digital(encoder)) { + radeon_encoder = to_radeon_encoder(encoder); + dig = radeon_encoder->enc_priv; + if (dig->pin == pin) + pin_count++; + } + } + + if ((pin_count > 1) && (enable_mask == 0)) + return; + } + if (rdev->audio.funcs->enable) rdev->audio.funcs->enable(rdev, pin, enable_mask); } @@ -336,24 +358,13 @@ void radeon_audio_endpoint_wreg(struct radeon_device *rdev, u32 offset, static void radeon_audio_write_sad_regs(struct drm_encoder *encoder) { - struct radeon_encoder *radeon_encoder; - struct drm_connector *connector; - struct radeon_connector *radeon_connector = NULL; + struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct cea_sad *sads; int sad_count; - list_for_each_entry(connector, - &encoder->dev->mode_config.connector_list, head) { - if (connector->encoder == encoder) { - radeon_connector = to_radeon_connector(connector); - break; - } - } - - if (!radeon_connector) { - DRM_ERROR("Couldn't find encoder's connector\n"); + if (!connector) return; - } sad_count = drm_edid_to_sad(radeon_connector_edid(connector), &sads); if (sad_count <= 0) { @@ -362,8 +373,6 @@ static void radeon_audio_write_sad_regs(struct drm_encoder *encoder) } BUG_ON(!sads); - radeon_encoder = to_radeon_encoder(encoder); - if (radeon_encoder->audio && radeon_encoder->audio->write_sad_regs) radeon_encoder->audio->write_sad_regs(encoder, sads, sad_count); @@ -372,27 +381,16 @@ static void radeon_audio_write_sad_regs(struct drm_encoder *encoder) static void radeon_audio_write_speaker_allocation(struct drm_encoder *encoder) { + struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); - struct drm_connector *connector; - struct radeon_connector *radeon_connector = NULL; u8 *sadb = NULL; int sad_count; - list_for_each_entry(connector, - &encoder->dev->mode_config.connector_list, head) { - if (connector->encoder == encoder) { - radeon_connector = to_radeon_connector(connector); - break; - } - } - - if (!radeon_connector) { - DRM_ERROR("Couldn't find encoder's connector\n"); + if (!connector) return; - } - sad_count = drm_edid_to_speaker_allocation( - radeon_connector_edid(connector), &sadb); + sad_count = drm_edid_to_speaker_allocation(radeon_connector_edid(connector), + &sadb); if (sad_count < 0) { DRM_DEBUG("Couldn't read Speaker Allocation Data Block: %d\n", sad_count); @@ -406,26 +404,13 @@ static void radeon_audio_write_speaker_allocation(struct drm_encoder *encoder) } static void radeon_audio_write_latency_fields(struct drm_encoder *encoder, - struct drm_display_mode *mode) + struct drm_display_mode *mode) { - struct radeon_encoder *radeon_encoder; - struct drm_connector *connector; - struct radeon_connector *radeon_connector = 0; - - list_for_each_entry(connector, - &encoder->dev->mode_config.connector_list, head) { - if (connector->encoder == encoder) { - radeon_connector = to_radeon_connector(connector); - break; - } - } + struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); - if (!radeon_connector) { - DRM_ERROR("Couldn't find encoder's connector\n"); + if (!connector) return; - } - - radeon_encoder = to_radeon_encoder(encoder); if (radeon_encoder->audio && radeon_encoder->audio->write_latency_fields) radeon_encoder->audio->write_latency_fields(encoder, connector, mode); @@ -451,29 +436,23 @@ static void radeon_audio_select_pin(struct drm_encoder *encoder) } void radeon_audio_detect(struct drm_connector *connector, + struct drm_encoder *encoder, enum drm_connector_status status) { - struct radeon_device *rdev; - struct radeon_encoder *radeon_encoder; + struct drm_device *dev = connector->dev; + struct radeon_device *rdev = dev->dev_private; + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder_atom_dig *dig; - if (!connector || !connector->encoder) + if (!radeon_audio_chipset_supported(rdev)) return; - rdev = connector->encoder->dev->dev_private; - - if (!radeon_audio_chipset_supported(rdev)) + if (!radeon_encoder_is_digital(encoder)) return; - radeon_encoder = to_radeon_encoder(connector->encoder); dig = radeon_encoder->enc_priv; if (status == connector_status_connected) { - if (!drm_detect_monitor_audio(radeon_connector_edid(connector))) { - radeon_encoder->audio = NULL; - return; - } - if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) { struct radeon_connector *radeon_connector = to_radeon_connector(connector); @@ -486,11 +465,17 @@ void radeon_audio_detect(struct drm_connector *connector, radeon_encoder->audio = rdev->audio.hdmi_funcs; } - dig->afmt->pin = radeon_audio_get_pin(connector->encoder); - radeon_audio_enable(rdev, dig->afmt->pin, 0xf); + if (drm_detect_monitor_audio(radeon_connector_edid(connector))) { + if (!dig->pin) + dig->pin = radeon_audio_get_pin(encoder); + radeon_audio_enable(rdev, dig->pin, 0xf); + } else { + radeon_audio_enable(rdev, dig->pin, 0); + dig->pin = NULL; + } } else { - radeon_audio_enable(rdev, dig->afmt->pin, 0); - dig->afmt->pin = NULL; + radeon_audio_enable(rdev, dig->pin, 0); + dig->pin = NULL; } } @@ -518,29 +503,18 @@ static void radeon_audio_set_dto(struct drm_encoder *encoder, unsigned int clock } static int radeon_audio_set_avi_packet(struct drm_encoder *encoder, - struct drm_display_mode *mode) + struct drm_display_mode *mode) { struct radeon_device *rdev = encoder->dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; - struct drm_connector *connector; - struct radeon_connector *radeon_connector = NULL; + struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE]; struct hdmi_avi_infoframe frame; int err; - list_for_each_entry(connector, - &encoder->dev->mode_config.connector_list, head) { - if (connector->encoder == encoder) { - radeon_connector = to_radeon_connector(connector); - break; - } - } - - if (!radeon_connector) { - DRM_ERROR("Couldn't find encoder's connector\n"); - return -ENOENT; - } + if (!connector) + return -EINVAL; err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode); if (err < 0) { @@ -563,8 +537,8 @@ static int radeon_audio_set_avi_packet(struct drm_encoder *encoder, return err; } - if (dig && dig->afmt && - radeon_encoder->audio && radeon_encoder->audio->set_avi_packet) + if (dig && dig->afmt && radeon_encoder->audio && + radeon_encoder->audio->set_avi_packet) radeon_encoder->audio->set_avi_packet(rdev, dig->afmt->offset, buffer, sizeof(buffer)); @@ -722,30 +696,41 @@ static void radeon_audio_hdmi_mode_set(struct drm_encoder *encoder, { struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; + struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); if (!dig || !dig->afmt) return; - radeon_audio_set_mute(encoder, true); + if (!connector) + return; - radeon_audio_write_speaker_allocation(encoder); - radeon_audio_write_sad_regs(encoder); - radeon_audio_write_latency_fields(encoder, mode); - radeon_audio_set_dto(encoder, mode->clock); - radeon_audio_set_vbi_packet(encoder); - radeon_hdmi_set_color_depth(encoder); - radeon_audio_update_acr(encoder, mode->clock); - radeon_audio_set_audio_packet(encoder); - radeon_audio_select_pin(encoder); + if (drm_detect_monitor_audio(radeon_connector_edid(connector))) { + radeon_audio_set_mute(encoder, true); - if (radeon_audio_set_avi_packet(encoder, mode) < 0) - return; + radeon_audio_write_speaker_allocation(encoder); + radeon_audio_write_sad_regs(encoder); + radeon_audio_write_latency_fields(encoder, mode); + radeon_audio_set_dto(encoder, mode->clock); + radeon_audio_set_vbi_packet(encoder); + radeon_hdmi_set_color_depth(encoder); + radeon_audio_update_acr(encoder, mode->clock); + radeon_audio_set_audio_packet(encoder); + radeon_audio_select_pin(encoder); + + if (radeon_audio_set_avi_packet(encoder, mode) < 0) + return; - radeon_audio_set_mute(encoder, false); + radeon_audio_set_mute(encoder, false); + } else { + radeon_hdmi_set_color_depth(encoder); + + if (radeon_audio_set_avi_packet(encoder, mode) < 0) + return; + } } static void radeon_audio_dp_mode_set(struct drm_encoder *encoder, - struct drm_display_mode *mode) + struct drm_display_mode *mode) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; @@ -759,22 +744,27 @@ static void radeon_audio_dp_mode_set(struct drm_encoder *encoder, if (!dig || !dig->afmt) return; - radeon_audio_write_speaker_allocation(encoder); - radeon_audio_write_sad_regs(encoder); - radeon_audio_write_latency_fields(encoder, mode); - if (rdev->clock.dp_extclk || ASIC_IS_DCE5(rdev)) - radeon_audio_set_dto(encoder, rdev->clock.default_dispclk * 10); - else - radeon_audio_set_dto(encoder, dig_connector->dp_clock); - radeon_audio_set_audio_packet(encoder); - radeon_audio_select_pin(encoder); - - if (radeon_audio_set_avi_packet(encoder, mode) < 0) + if (!connector) return; + + if (drm_detect_monitor_audio(radeon_connector_edid(connector))) { + radeon_audio_write_speaker_allocation(encoder); + radeon_audio_write_sad_regs(encoder); + radeon_audio_write_latency_fields(encoder, mode); + if (rdev->clock.dp_extclk || ASIC_IS_DCE5(rdev)) + radeon_audio_set_dto(encoder, rdev->clock.default_dispclk * 10); + else + radeon_audio_set_dto(encoder, dig_connector->dp_clock); + radeon_audio_set_audio_packet(encoder); + radeon_audio_select_pin(encoder); + + if (radeon_audio_set_avi_packet(encoder, mode) < 0) + return; + } } void radeon_audio_mode_set(struct drm_encoder *encoder, - struct drm_display_mode *mode) + struct drm_display_mode *mode) { struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); diff --git a/drivers/gpu/drm/radeon/radeon_audio.h b/drivers/gpu/drm/radeon/radeon_audio.h index 8438304f7139..059cc3012062 100644 --- a/drivers/gpu/drm/radeon/radeon_audio.h +++ b/drivers/gpu/drm/radeon/radeon_audio.h @@ -68,7 +68,8 @@ struct radeon_audio_funcs int radeon_audio_init(struct radeon_device *rdev); void radeon_audio_detect(struct drm_connector *connector, - enum drm_connector_status status); + struct drm_encoder *encoder, + enum drm_connector_status status); u32 radeon_audio_endpoint_rreg(struct radeon_device *rdev, u32 offset, u32 reg); void radeon_audio_endpoint_wreg(struct radeon_device *rdev, diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index 3e5f6b71f3ad..c097d3a82bda 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c @@ -1255,10 +1255,15 @@ struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder if ((RBIOS16(tmp) == lvds->native_mode.hdisplay) && (RBIOS16(tmp + 2) == lvds->native_mode.vdisplay)) { + u32 hss = (RBIOS16(tmp + 21) - RBIOS16(tmp + 19) - 1) * 8; + + if (hss > lvds->native_mode.hdisplay) + hss = (10 - 1) * 8; + lvds->native_mode.htotal = lvds->native_mode.hdisplay + (RBIOS16(tmp + 17) - RBIOS16(tmp + 19)) * 8; lvds->native_mode.hsync_start = lvds->native_mode.hdisplay + - (RBIOS16(tmp + 21) - RBIOS16(tmp + 19) - 1) * 8; + hss; lvds->native_mode.hsync_end = lvds->native_mode.hsync_start + (RBIOS8(tmp + 23) * 8); diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index cebb65e07e1d..94b21ae70ef7 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -1379,8 +1379,16 @@ out: /* updated in get modes as well since we need to know if it's analog or digital */ radeon_connector_update_scratch_regs(connector, ret); - if (radeon_audio != 0) - radeon_audio_detect(connector, ret); + if ((radeon_audio != 0) && radeon_connector->use_digital) { + const struct drm_connector_helper_funcs *connector_funcs = + connector->helper_private; + + encoder = connector_funcs->best_encoder(connector); + if (encoder && (encoder->encoder_type == DRM_MODE_ENCODER_TMDS)) { + radeon_connector_get_edid(connector); + radeon_audio_detect(connector, encoder, ret); + } + } exit: pm_runtime_mark_last_busy(connector->dev->dev); @@ -1717,8 +1725,10 @@ radeon_dp_detect(struct drm_connector *connector, bool force) radeon_connector_update_scratch_regs(connector, ret); - if (radeon_audio != 0) - radeon_audio_detect(connector, ret); + if ((radeon_audio != 0) && encoder) { + radeon_connector_get_edid(connector); + radeon_audio_detect(connector, encoder, ret); + } out: pm_runtime_mark_last_busy(connector->dev->dev); diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index 07909d817381..aecc3e3dec0c 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h @@ -237,7 +237,6 @@ struct radeon_afmt { int offset; bool last_buffer_filled_status; int id; - struct r600_audio_pin *pin; }; struct radeon_mode_info { @@ -439,6 +438,7 @@ struct radeon_encoder_atom_dig { uint8_t backlight_level; int panel_mode; struct radeon_afmt *afmt; + struct r600_audio_pin *pin; int active_mst_links; }; diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 882cccdad272..ac6fe40b99f7 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -490,7 +490,8 @@ pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp) else if (boot_cpu_data.x86 > 3) tmp = pgprot_noncached(tmp); #endif -#if defined(__ia64__) || defined(__arm__) || defined(__powerpc__) +#if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \ + defined(__powerpc__) if (caching_flags & TTM_PL_FLAG_WC) tmp = pgprot_writecombine(tmp); else diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c index f822fd2a1ada..884d82f9190e 100644 --- a/drivers/hid/hid-apple.c +++ b/drivers/hid/hid-apple.c @@ -546,6 +546,12 @@ static const struct hid_device_id apple_devices[] = { .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS), .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI), + .driver_data = APPLE_HAS_FN }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ISO), + .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_JIS), + .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI), .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO), diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 157c62775053..e6fce23b121a 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -1782,6 +1782,9 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ISO) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ISO) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_JIS) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) }, @@ -2463,6 +2466,9 @@ static const struct hid_device_id hid_mouse_ignore_list[] = { { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ISO) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ISO) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_JIS) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) }, { } diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c index 3318de690e00..a2dbbbe0d8d7 100644 --- a/drivers/hid/hid-cp2112.c +++ b/drivers/hid/hid-cp2112.c @@ -356,6 +356,8 @@ static int cp2112_read(struct cp2112_device *dev, u8 *data, size_t size) struct cp2112_force_read_report report; int ret; + if (size > sizeof(dev->read_data)) + size = sizeof(dev->read_data); report.report = CP2112_DATA_READ_FORCE_SEND; report.length = cpu_to_be16(size); diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index b04b0820d816..b3b225b75d0a 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -142,6 +142,9 @@ #define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI 0x0290 #define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO 0x0291 #define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS 0x0292 +#define USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI 0x0272 +#define USB_DEVICE_ID_APPLE_WELLSPRING9_ISO 0x0273 +#define USB_DEVICE_ID_APPLE_WELLSPRING9_JIS 0x0274 #define USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY 0x030a #define USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY 0x030b #define USB_DEVICE_ID_APPLE_IRCONTROL 0x8240 diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c index 6a9b05b328a9..7c811252c1ce 100644 --- a/drivers/hid/hid-multitouch.c +++ b/drivers/hid/hid-multitouch.c @@ -778,9 +778,16 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi, /* * some egalax touchscreens have "application == HID_DG_TOUCHSCREEN" * for the stylus. + * The check for mt_report_id ensures we don't process + * HID_DG_CONTACTCOUNT from the pen report as it is outside the physical + * collection, but within the report ID. */ if (field->physical == HID_DG_STYLUS) return 0; + else if ((field->physical == 0) && + (field->report->id != td->mt_report_id) && + (td->mt_report_id != -1)) + return 0; if (field->application == HID_DG_TOUCHSCREEN || field->application == HID_DG_TOUCHPAD) diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c index 53e7de7cb9e2..20f9a653444c 100644 --- a/drivers/hid/usbhid/hid-quirks.c +++ b/drivers/hid/usbhid/hid-quirks.c @@ -87,6 +87,9 @@ static const struct hid_blacklist { { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C05A, HID_QUIRK_ALWAYS_POLL }, { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C06A, HID_QUIRK_ALWAYS_POLL }, { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET }, + { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_SURFACE_PRO_2, HID_QUIRK_NO_INIT_REPORTS }, + { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_2, HID_QUIRK_NO_INIT_REPORTS }, + { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TOUCH_COVER_2, HID_QUIRK_NO_INIT_REPORTS }, { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3, HID_QUIRK_NO_INIT_REPORTS }, { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3_JP, HID_QUIRK_NO_INIT_REPORTS }, { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER, HID_QUIRK_NO_INIT_REPORTS }, diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c index 4c0ffca97bef..44958d79d598 100644 --- a/drivers/hid/wacom_sys.c +++ b/drivers/hid/wacom_sys.c @@ -1271,11 +1271,13 @@ fail_leds: pad_input_dev = NULL; wacom_wac->pad_registered = false; fail_register_pad_input: - input_unregister_device(touch_input_dev); + if (touch_input_dev) + input_unregister_device(touch_input_dev); wacom_wac->touch_input = NULL; wacom_wac->touch_registered = false; fail_register_touch_input: - input_unregister_device(pen_input_dev); + if (pen_input_dev) + input_unregister_device(pen_input_dev); wacom_wac->pen_input = NULL; wacom_wac->pen_registered = false; fail_register_pen_input: diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c index 232da89f4e88..0d244239e55d 100644 --- a/drivers/hid/wacom_wac.c +++ b/drivers/hid/wacom_wac.c @@ -2213,6 +2213,9 @@ void wacom_setup_device_quirks(struct wacom *wacom) features->x_max = 4096; features->y_max = 4096; } + else if (features->pktlen == WACOM_PKGLEN_BBTOUCH) { + features->device_type |= WACOM_DEVICETYPE_PAD; + } } /* diff --git a/drivers/hwmon/nct7802.c b/drivers/hwmon/nct7802.c index 28fcb2e246d5..fbfc02bb2cfa 100644 --- a/drivers/hwmon/nct7802.c +++ b/drivers/hwmon/nct7802.c @@ -195,7 +195,7 @@ abort: } static int nct7802_write_voltage(struct nct7802_data *data, int nr, int index, - unsigned int voltage) + unsigned long voltage) { int shift = 8 - REG_VOLTAGE_LIMIT_MSB_SHIFT[index - 1][nr]; int err; diff --git a/drivers/hwmon/nct7904.c b/drivers/hwmon/nct7904.c index b77b82f24480..6153df735e82 100644 --- a/drivers/hwmon/nct7904.c +++ b/drivers/hwmon/nct7904.c @@ -412,8 +412,9 @@ static ssize_t show_pwm(struct device *dev, return sprintf(buf, "%d\n", val); } -static ssize_t store_mode(struct device *dev, struct device_attribute *devattr, - const char *buf, size_t count) +static ssize_t store_enable(struct device *dev, + struct device_attribute *devattr, + const char *buf, size_t count) { int index = to_sensor_dev_attr(devattr)->index; struct nct7904_data *data = dev_get_drvdata(dev); @@ -422,18 +423,18 @@ static ssize_t store_mode(struct device *dev, struct device_attribute *devattr, if (kstrtoul(buf, 10, &val) < 0) return -EINVAL; - if (val > 1 || (val && !data->fan_mode[index])) + if (val < 1 || val > 2 || (val == 2 && !data->fan_mode[index])) return -EINVAL; ret = nct7904_write_reg(data, BANK_3, FANCTL1_FMR_REG + index, - val ? data->fan_mode[index] : 0); + val == 2 ? data->fan_mode[index] : 0); return ret ? ret : count; } -/* Return 0 for manual mode or 1 for SmartFan mode */ -static ssize_t show_mode(struct device *dev, - struct device_attribute *devattr, char *buf) +/* Return 1 for manual mode or 2 for SmartFan mode */ +static ssize_t show_enable(struct device *dev, + struct device_attribute *devattr, char *buf) { int index = to_sensor_dev_attr(devattr)->index; struct nct7904_data *data = dev_get_drvdata(dev); @@ -443,36 +444,36 @@ static ssize_t show_mode(struct device *dev, if (val < 0) return val; - return sprintf(buf, "%d\n", val ? 1 : 0); + return sprintf(buf, "%d\n", val ? 2 : 1); } /* 2 attributes per channel: pwm and mode */ -static SENSOR_DEVICE_ATTR(fan1_pwm, S_IRUGO | S_IWUSR, +static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, show_pwm, store_pwm, 0); -static SENSOR_DEVICE_ATTR(fan1_mode, S_IRUGO | S_IWUSR, - show_mode, store_mode, 0); -static SENSOR_DEVICE_ATTR(fan2_pwm, S_IRUGO | S_IWUSR, +static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, + show_enable, store_enable, 0); +static SENSOR_DEVICE_ATTR(pwm2, S_IRUGO | S_IWUSR, show_pwm, store_pwm, 1); -static SENSOR_DEVICE_ATTR(fan2_mode, S_IRUGO | S_IWUSR, - show_mode, store_mode, 1); -static SENSOR_DEVICE_ATTR(fan3_pwm, S_IRUGO | S_IWUSR, +static SENSOR_DEVICE_ATTR(pwm2_enable, S_IRUGO | S_IWUSR, + show_enable, store_enable, 1); +static SENSOR_DEVICE_ATTR(pwm3, S_IRUGO | S_IWUSR, show_pwm, store_pwm, 2); -static SENSOR_DEVICE_ATTR(fan3_mode, S_IRUGO | S_IWUSR, - show_mode, store_mode, 2); -static SENSOR_DEVICE_ATTR(fan4_pwm, S_IRUGO | S_IWUSR, +static SENSOR_DEVICE_ATTR(pwm3_enable, S_IRUGO | S_IWUSR, + show_enable, store_enable, 2); +static SENSOR_DEVICE_ATTR(pwm4, S_IRUGO | S_IWUSR, show_pwm, store_pwm, 3); -static SENSOR_DEVICE_ATTR(fan4_mode, S_IRUGO | S_IWUSR, - show_mode, store_mode, 3); +static SENSOR_DEVICE_ATTR(pwm4_enable, S_IRUGO | S_IWUSR, + show_enable, store_enable, 3); static struct attribute *nct7904_fanctl_attrs[] = { - &sensor_dev_attr_fan1_pwm.dev_attr.attr, - &sensor_dev_attr_fan1_mode.dev_attr.attr, - &sensor_dev_attr_fan2_pwm.dev_attr.attr, - &sensor_dev_attr_fan2_mode.dev_attr.attr, - &sensor_dev_attr_fan3_pwm.dev_attr.attr, - &sensor_dev_attr_fan3_mode.dev_attr.attr, - &sensor_dev_attr_fan4_pwm.dev_attr.attr, - &sensor_dev_attr_fan4_mode.dev_attr.attr, + &sensor_dev_attr_pwm1.dev_attr.attr, + &sensor_dev_attr_pwm1_enable.dev_attr.attr, + &sensor_dev_attr_pwm2.dev_attr.attr, + &sensor_dev_attr_pwm2_enable.dev_attr.attr, + &sensor_dev_attr_pwm3.dev_attr.attr, + &sensor_dev_attr_pwm3_enable.dev_attr.attr, + &sensor_dev_attr_pwm4.dev_attr.attr, + &sensor_dev_attr_pwm4_enable.dev_attr.attr, NULL }; diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c index e8e2077c7244..13ea1ea23328 100644 --- a/drivers/iio/accel/mma8452.c +++ b/drivers/iio/accel/mma8452.c @@ -557,21 +557,21 @@ static void mma8452_transient_interrupt(struct iio_dev *indio_dev) if (src & MMA8452_TRANSIENT_SRC_XTRANSE) iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, IIO_MOD_X, - IIO_EV_TYPE_THRESH, + IIO_EV_TYPE_MAG, IIO_EV_DIR_RISING), ts); if (src & MMA8452_TRANSIENT_SRC_YTRANSE) iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, IIO_MOD_Y, - IIO_EV_TYPE_THRESH, + IIO_EV_TYPE_MAG, IIO_EV_DIR_RISING), ts); if (src & MMA8452_TRANSIENT_SRC_ZTRANSE) iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, IIO_MOD_Z, - IIO_EV_TYPE_THRESH, + IIO_EV_TYPE_MAG, IIO_EV_DIR_RISING), ts); } @@ -644,7 +644,7 @@ static int mma8452_reg_access_dbg(struct iio_dev *indio_dev, static const struct iio_event_spec mma8452_transient_event[] = { { - .type = IIO_EV_TYPE_THRESH, + .type = IIO_EV_TYPE_MAG, .dir = IIO_EV_DIR_RISING, .mask_separate = BIT(IIO_EV_INFO_ENABLE), .mask_shared_by_type = BIT(IIO_EV_INFO_VALUE) | diff --git a/drivers/iio/adc/mcp320x.c b/drivers/iio/adc/mcp320x.c index 8d9c9b9215dd..d819823f7257 100644 --- a/drivers/iio/adc/mcp320x.c +++ b/drivers/iio/adc/mcp320x.c @@ -299,6 +299,8 @@ static int mcp320x_probe(struct spi_device *spi) indio_dev->channels = chip_info->channels; indio_dev->num_channels = chip_info->num_channels; + adc->chip_info = chip_info; + adc->transfer[0].tx_buf = &adc->tx_buf; adc->transfer[0].len = sizeof(adc->tx_buf); adc->transfer[1].rx_buf = adc->rx_buf; diff --git a/drivers/iio/adc/vf610_adc.c b/drivers/iio/adc/vf610_adc.c index 480f335a0f9f..819632bf1fda 100644 --- a/drivers/iio/adc/vf610_adc.c +++ b/drivers/iio/adc/vf610_adc.c @@ -635,7 +635,7 @@ static int vf610_adc_reg_access(struct iio_dev *indio_dev, struct vf610_adc *info = iio_priv(indio_dev); if ((readval == NULL) || - (!(reg % 4) || (reg > VF610_REG_ADC_PCTL))) + ((reg % 4) || (reg > VF610_REG_ADC_PCTL))) return -EINVAL; *readval = readl(info->regs + reg); diff --git a/drivers/iio/light/stk3310.c b/drivers/iio/light/stk3310.c index c1a218236be5..11a027adc204 100644 --- a/drivers/iio/light/stk3310.c +++ b/drivers/iio/light/stk3310.c @@ -200,7 +200,7 @@ static int stk3310_read_event(struct iio_dev *indio_dev, int *val, int *val2) { u8 reg; - u16 buf; + __be16 buf; int ret; struct stk3310_data *data = iio_priv(indio_dev); @@ -222,7 +222,7 @@ static int stk3310_read_event(struct iio_dev *indio_dev, dev_err(&data->client->dev, "register read failed\n"); return ret; } - *val = swab16(buf); + *val = be16_to_cpu(buf); return IIO_VAL_INT; } @@ -235,7 +235,7 @@ static int stk3310_write_event(struct iio_dev *indio_dev, int val, int val2) { u8 reg; - u16 buf; + __be16 buf; int ret; unsigned int index; struct stk3310_data *data = iio_priv(indio_dev); @@ -252,7 +252,7 @@ static int stk3310_write_event(struct iio_dev *indio_dev, else return -EINVAL; - buf = swab16(val); + buf = cpu_to_be16(val); ret = regmap_bulk_write(data->regmap, reg, &buf, 2); if (ret < 0) dev_err(&client->dev, "failed to set PS threshold!\n"); @@ -301,7 +301,7 @@ static int stk3310_read_raw(struct iio_dev *indio_dev, int *val, int *val2, long mask) { u8 reg; - u16 buf; + __be16 buf; int ret; unsigned int index; struct stk3310_data *data = iio_priv(indio_dev); @@ -322,7 +322,7 @@ static int stk3310_read_raw(struct iio_dev *indio_dev, mutex_unlock(&data->lock); return ret; } - *val = swab16(buf); + *val = be16_to_cpu(buf); mutex_unlock(&data->lock); return IIO_VAL_INT; case IIO_CHAN_INFO_INT_TIME: @@ -608,13 +608,7 @@ static int stk3310_probe(struct i2c_client *client, if (ret < 0) return ret; - ret = iio_device_register(indio_dev); - if (ret < 0) { - dev_err(&client->dev, "device_register failed\n"); - stk3310_set_state(data, STK3310_STATE_STANDBY); - } - - if (client->irq <= 0) + if (client->irq < 0) client->irq = stk3310_gpio_probe(client); if (client->irq >= 0) { @@ -629,6 +623,12 @@ static int stk3310_probe(struct i2c_client *client, client->irq); } + ret = iio_device_register(indio_dev); + if (ret < 0) { + dev_err(&client->dev, "device_register failed\n"); + stk3310_set_state(data, STK3310_STATE_STANDBY); + } + return ret; } diff --git a/drivers/iio/magnetometer/Kconfig b/drivers/iio/magnetometer/Kconfig index dcadfc4f0661..efb9350b0d76 100644 --- a/drivers/iio/magnetometer/Kconfig +++ b/drivers/iio/magnetometer/Kconfig @@ -90,6 +90,7 @@ config IIO_ST_MAGN_SPI_3AXIS config BMC150_MAGN tristate "Bosch BMC150 Magnetometer Driver" depends on I2C + select REGMAP_I2C select IIO_BUFFER select IIO_TRIGGERED_BUFFER help diff --git a/drivers/iio/magnetometer/bmc150_magn.c b/drivers/iio/magnetometer/bmc150_magn.c index d4c178869991..1347a1f2e46f 100644 --- a/drivers/iio/magnetometer/bmc150_magn.c +++ b/drivers/iio/magnetometer/bmc150_magn.c @@ -706,11 +706,11 @@ static int bmc150_magn_init(struct bmc150_magn_data *data) goto err_poweroff; } if (chip_id != BMC150_MAGN_CHIP_ID_VAL) { - dev_err(&data->client->dev, "Invalid chip id 0x%x\n", ret); + dev_err(&data->client->dev, "Invalid chip id 0x%x\n", chip_id); ret = -ENODEV; goto err_poweroff; } - dev_dbg(&data->client->dev, "Chip id %x\n", ret); + dev_dbg(&data->client->dev, "Chip id %x\n", chip_id); preset = bmc150_magn_presets_table[BMC150_MAGN_DEFAULT_PRESET]; ret = bmc150_magn_set_odr(data, preset.odr); diff --git a/drivers/iio/magnetometer/mmc35240.c b/drivers/iio/magnetometer/mmc35240.c index d927397a6ef7..706ebfd6297f 100644 --- a/drivers/iio/magnetometer/mmc35240.c +++ b/drivers/iio/magnetometer/mmc35240.c @@ -202,8 +202,8 @@ static int mmc35240_hw_set(struct mmc35240_data *data, bool set) coil_bit = MMC35240_CTRL0_RESET_BIT; return regmap_update_bits(data->regmap, MMC35240_REG_CTRL0, - MMC35240_CTRL0_REFILL_BIT, - coil_bit); + coil_bit, coil_bit); + } static int mmc35240_init(struct mmc35240_data *data) @@ -222,14 +222,15 @@ static int mmc35240_init(struct mmc35240_data *data) /* * make sure we restore sensor characteristics, by doing - * a RESET/SET sequence + * a SET/RESET sequence, the axis polarity being naturally + * aligned after RESET */ - ret = mmc35240_hw_set(data, false); + ret = mmc35240_hw_set(data, true); if (ret < 0) return ret; usleep_range(MMC53240_WAIT_SET_RESET, MMC53240_WAIT_SET_RESET + 1); - ret = mmc35240_hw_set(data, true); + ret = mmc35240_hw_set(data, false); if (ret < 0) return ret; @@ -503,6 +504,7 @@ static int mmc35240_probe(struct i2c_client *client, } data = iio_priv(indio_dev); + i2c_set_clientdata(client, indio_dev); data->client = client; data->regmap = regmap; data->res = MMC35240_16_BITS_SLOW; diff --git a/drivers/iio/temperature/mlx90614.c b/drivers/iio/temperature/mlx90614.c index cb2e8ad8bfdc..7a2b639eaa96 100644 --- a/drivers/iio/temperature/mlx90614.c +++ b/drivers/iio/temperature/mlx90614.c @@ -204,7 +204,7 @@ static int mlx90614_read_raw(struct iio_dev *indio_dev, *val = ret; return IIO_VAL_INT; case IIO_CHAN_INFO_OFFSET: - *val = 13657; + *val = -13657; *val2 = 500000; return IIO_VAL_INT_PLUS_MICRO; case IIO_CHAN_INFO_SCALE: diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c index b1b73232f217..bbbe0184e592 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_provider.c +++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c @@ -736,6 +736,10 @@ static struct ib_mr *iwch_get_dma_mr(struct ib_pd *pd, int acc) /* * T3 only supports 32 bits of size. */ + if (sizeof(phys_addr_t) > 4) { + pr_warn_once(MOD "Cannot support dma_mrs on this platform.\n"); + return ERR_PTR(-ENOTSUPP); + } bl.size = 0xffffffff; bl.addr = 0; kva = 0; diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c index 2d7e503d13cb..871dbe56216a 100644 --- a/drivers/infiniband/hw/ipath/ipath_driver.c +++ b/drivers/infiniband/hw/ipath/ipath_driver.c @@ -31,6 +31,8 @@ * SOFTWARE. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/sched.h> #include <linux/spinlock.h> #include <linux/idr.h> @@ -399,8 +401,8 @@ static int ipath_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) u32 bar0 = 0, bar1 = 0; #ifdef CONFIG_X86_64 - if (WARN(pat_enabled(), - "ipath needs PAT disabled, boot with nopat kernel parameter\n")) { + if (pat_enabled()) { + pr_warn("ipath needs PAT disabled, boot with nopat kernel parameter\n"); ret = -ENODEV; goto bail; } diff --git a/drivers/infiniband/hw/ocrdma/ocrdma.h b/drivers/infiniband/hw/ocrdma/ocrdma.h index b396344fae16..6a36338593cd 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma.h +++ b/drivers/infiniband/hw/ocrdma/ocrdma.h @@ -1,21 +1,36 @@ -/******************************************************************* - * This file is part of the Emulex RoCE Device Driver for * - * RoCE (RDMA over Converged Ethernet) adapters. * - * Copyright (C) 2008-2012 Emulex. All rights reserved. * - * EMULEX and SLI are trademarks of Emulex. * - * www.emulex.com * - * * - * This program is free software; you can redistribute it and/or * - * modify it under the terms of version 2 of the GNU General * - * Public License as published by the Free Software Foundation. * - * This program is distributed in the hope that it will be useful. * - * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * - * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * - * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * - * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * - * TO BE LEGALLY INVALID. See the GNU General Public License for * - * more details, a copy of which can be found in the file COPYING * - * included with this package. * +/* This file is part of the Emulex RoCE Device Driver for + * RoCE (RDMA over Converged Ethernet) adapters. + * Copyright (C) 2012-2015 Emulex. All rights reserved. + * EMULEX and SLI are trademarks of Emulex. + * www.emulex.com + * + * This software is available to you under a choice of one of two licenses. + * You may choose to be licensed under the terms of the GNU General Public + * License (GPL) Version 2, available from the file COPYING in the main + * directory of this source tree, or the BSD license below: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * - Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * - Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Contact Information: * linux-drivers@emulex.com @@ -23,7 +38,7 @@ * Emulex * 3333 Susan Street * Costa Mesa, CA 92626 - *******************************************************************/ + */ #ifndef __OCRDMA_H__ #define __OCRDMA_H__ diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_abi.h b/drivers/infiniband/hw/ocrdma/ocrdma_abi.h index 1554cca5712a..430b1350fe96 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_abi.h +++ b/drivers/infiniband/hw/ocrdma/ocrdma_abi.h @@ -1,21 +1,36 @@ -/******************************************************************* - * This file is part of the Emulex RoCE Device Driver for * - * RoCE (RDMA over Converged Ethernet) adapters. * - * Copyright (C) 2008-2012 Emulex. All rights reserved. * - * EMULEX and SLI are trademarks of Emulex. * - * www.emulex.com * - * * - * This program is free software; you can redistribute it and/or * - * modify it under the terms of version 2 of the GNU General * - * Public License as published by the Free Software Foundation. * - * This program is distributed in the hope that it will be useful. * - * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * - * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * - * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * - * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * - * TO BE LEGALLY INVALID. See the GNU General Public License for * - * more details, a copy of which can be found in the file COPYING * - * included with this package. * +/* This file is part of the Emulex RoCE Device Driver for + * RoCE (RDMA over Converged Ethernet) adapters. + * Copyright (C) 2012-2015 Emulex. All rights reserved. + * EMULEX and SLI are trademarks of Emulex. + * www.emulex.com + * + * This software is available to you under a choice of one of two licenses. + * You may choose to be licensed under the terms of the GNU General Public + * License (GPL) Version 2, available from the file COPYING in the main + * directory of this source tree, or the BSD license below: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * - Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * - Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Contact Information: * linux-drivers@emulex.com @@ -23,7 +38,7 @@ * Emulex * 3333 Susan Street * Costa Mesa, CA 92626 - *******************************************************************/ + */ #ifndef __OCRDMA_ABI_H__ #define __OCRDMA_ABI_H__ diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c index 29b27675dd70..44766fee1f4e 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c @@ -1,21 +1,36 @@ -/******************************************************************* - * This file is part of the Emulex RoCE Device Driver for * - * RoCE (RDMA over Converged Ethernet) adapters. * - * Copyright (C) 2008-2012 Emulex. All rights reserved. * - * EMULEX and SLI are trademarks of Emulex. * - * www.emulex.com * - * * - * This program is free software; you can redistribute it and/or * - * modify it under the terms of version 2 of the GNU General * - * Public License as published by the Free Software Foundation. * - * This program is distributed in the hope that it will be useful. * - * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * - * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * - * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * - * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * - * TO BE LEGALLY INVALID. See the GNU General Public License for * - * more details, a copy of which can be found in the file COPYING * - * included with this package. * +/* This file is part of the Emulex RoCE Device Driver for + * RoCE (RDMA over Converged Ethernet) adapters. + * Copyright (C) 2012-2015 Emulex. All rights reserved. + * EMULEX and SLI are trademarks of Emulex. + * www.emulex.com + * + * This software is available to you under a choice of one of two licenses. + * You may choose to be licensed under the terms of the GNU General Public + * License (GPL) Version 2, available from the file COPYING in the main + * directory of this source tree, or the BSD license below: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * - Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * - Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Contact Information: * linux-drivers@emulex.com @@ -23,7 +38,7 @@ * Emulex * 3333 Susan Street * Costa Mesa, CA 92626 - *******************************************************************/ + */ #include <net/neighbour.h> #include <net/netevent.h> diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.h b/drivers/infiniband/hw/ocrdma/ocrdma_ah.h index cf366fe03cb8..04a30ae67473 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.h +++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.h @@ -1,21 +1,36 @@ -/******************************************************************* - * This file is part of the Emulex RoCE Device Driver for * - * RoCE (RDMA over Converged Ethernet) adapters. * - * Copyright (C) 2008-2012 Emulex. All rights reserved. * - * EMULEX and SLI are trademarks of Emulex. * - * www.emulex.com * - * * - * This program is free software; you can redistribute it and/or * - * modify it under the terms of version 2 of the GNU General * - * Public License as published by the Free Software Foundation. * - * This program is distributed in the hope that it will be useful. * - * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * - * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * - * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * - * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * - * TO BE LEGALLY INVALID. See the GNU General Public License for * - * more details, a copy of which can be found in the file COPYING * - * included with this package. * +/* This file is part of the Emulex RoCE Device Driver for + * RoCE (RDMA over Converged Ethernet) adapters. + * Copyright (C) 2012-2015 Emulex. All rights reserved. + * EMULEX and SLI are trademarks of Emulex. + * www.emulex.com + * + * This software is available to you under a choice of one of two licenses. + * You may choose to be licensed under the terms of the GNU General Public + * License (GPL) Version 2, available from the file COPYING in the main + * directory of this source tree, or the BSD license below: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * - Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * - Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Contact Information: * linux-drivers@emulex.com @@ -23,7 +38,7 @@ * Emulex * 3333 Susan Street * Costa Mesa, CA 92626 - *******************************************************************/ + */ #ifndef __OCRDMA_AH_H__ #define __OCRDMA_AH_H__ diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c index 47615ff33bc6..aab391a15db4 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c @@ -1,21 +1,36 @@ -/******************************************************************* - * This file is part of the Emulex RoCE Device Driver for * - * RoCE (RDMA over Converged Ethernet) CNA Adapters. * - * Copyright (C) 2008-2012 Emulex. All rights reserved. * - * EMULEX and SLI are trademarks of Emulex. * - * www.emulex.com * - * * - * This program is free software; you can redistribute it and/or * - * modify it under the terms of version 2 of the GNU General * - * Public License as published by the Free Software Foundation. * - * This program is distributed in the hope that it will be useful. * - * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * - * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * - * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * - * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * - * TO BE LEGALLY INVALID. See the GNU General Public License for * - * more details, a copy of which can be found in the file COPYING * - * included with this package. * +/* This file is part of the Emulex RoCE Device Driver for + * RoCE (RDMA over Converged Ethernet) adapters. + * Copyright (C) 2012-2015 Emulex. All rights reserved. + * EMULEX and SLI are trademarks of Emulex. + * www.emulex.com + * + * This software is available to you under a choice of one of two licenses. + * You may choose to be licensed under the terms of the GNU General Public + * License (GPL) Version 2, available from the file COPYING in the main + * directory of this source tree, or the BSD license below: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * - Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * - Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Contact Information: * linux-drivers@emulex.com @@ -23,7 +38,7 @@ * Emulex * 3333 Susan Street * Costa Mesa, CA 92626 - *******************************************************************/ + */ #include <linux/sched.h> #include <linux/interrupt.h> diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.h b/drivers/infiniband/hw/ocrdma/ocrdma_hw.h index e905972fceb7..7ed885c1851e 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.h +++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.h @@ -1,21 +1,36 @@ -/******************************************************************* - * This file is part of the Emulex RoCE Device Driver for * - * RoCE (RDMA over Converged Ethernet) CNA Adapters. * - * Copyright (C) 2008-2012 Emulex. All rights reserved. * - * EMULEX and SLI are trademarks of Emulex. * - * www.emulex.com * - * * - * This program is free software; you can redistribute it and/or * - * modify it under the terms of version 2 of the GNU General * - * Public License as published by the Free Software Foundation. * - * This program is distributed in the hope that it will be useful. * - * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * - * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * - * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * - * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * - * TO BE LEGALLY INVALID. See the GNU General Public License for * - * more details, a copy of which can be found in the file COPYING * - * included with this package. * +/* This file is part of the Emulex RoCE Device Driver for + * RoCE (RDMA over Converged Ethernet) adapters. + * Copyright (C) 2012-2015 Emulex. All rights reserved. + * EMULEX and SLI are trademarks of Emulex. + * www.emulex.com + * + * This software is available to you under a choice of one of two licenses. + * You may choose to be licensed under the terms of the GNU General Public + * License (GPL) Version 2, available from the file COPYING in the main + * directory of this source tree, or the BSD license below: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * - Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * - Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Contact Information: * linux-drivers@emulex.com @@ -23,7 +38,7 @@ * Emulex * 3333 Susan Street * Costa Mesa, CA 92626 - *******************************************************************/ + */ #ifndef __OCRDMA_HW_H__ #define __OCRDMA_HW_H__ diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c index d98a707a5eb9..b119a3413a15 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c @@ -1,21 +1,36 @@ -/******************************************************************* - * This file is part of the Emulex RoCE Device Driver for * - * RoCE (RDMA over Converged Ethernet) adapters. * - * Copyright (C) 2008-2012 Emulex. All rights reserved. * - * EMULEX and SLI are trademarks of Emulex. * - * www.emulex.com * - * * - * This program is free software; you can redistribute it and/or * - * modify it under the terms of version 2 of the GNU General * - * Public License as published by the Free Software Foundation. * - * This program is distributed in the hope that it will be useful. * - * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * - * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * - * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * - * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * - * TO BE LEGALLY INVALID. See the GNU General Public License for * - * more details, a copy of which can be found in the file COPYING * - * included with this package. * +/* This file is part of the Emulex RoCE Device Driver for + * RoCE (RDMA over Converged Ethernet) adapters. + * Copyright (C) 2012-2015 Emulex. All rights reserved. + * EMULEX and SLI are trademarks of Emulex. + * www.emulex.com + * + * This software is available to you under a choice of one of two licenses. + * You may choose to be licensed under the terms of the GNU General Public + * License (GPL) Version 2, available from the file COPYING in the main + * directory of this source tree, or the BSD license below: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * - Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * - Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Contact Information: * linux-drivers@emulex.com @@ -23,7 +38,7 @@ * Emulex * 3333 Susan Street * Costa Mesa, CA 92626 - *******************************************************************/ + */ #include <linux/module.h> #include <linux/idr.h> @@ -46,7 +61,7 @@ MODULE_VERSION(OCRDMA_ROCE_DRV_VERSION); MODULE_DESCRIPTION(OCRDMA_ROCE_DRV_DESC " " OCRDMA_ROCE_DRV_VERSION); MODULE_AUTHOR("Emulex Corporation"); -MODULE_LICENSE("GPL"); +MODULE_LICENSE("Dual BSD/GPL"); static LIST_HEAD(ocrdma_dev_list); static DEFINE_SPINLOCK(ocrdma_devlist_lock); diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h index 02ad0aee99af..80006b24aa11 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h +++ b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h @@ -1,21 +1,36 @@ -/******************************************************************* - * This file is part of the Emulex RoCE Device Driver for * - * RoCE (RDMA over Converged Ethernet) adapters. * - * Copyright (C) 2008-2012 Emulex. All rights reserved. * - * EMULEX and SLI are trademarks of Emulex. * - * www.emulex.com * - * * - * This program is free software; you can redistribute it and/or * - * modify it under the terms of version 2 of the GNU General * - * Public License as published by the Free Software Foundation. * - * This program is distributed in the hope that it will be useful. * - * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * - * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * - * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * - * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * - * TO BE LEGALLY INVALID. See the GNU General Public License for * - * more details, a copy of which can be found in the file COPYING * - * included with this package. * +/* This file is part of the Emulex RoCE Device Driver for + * RoCE (RDMA over Converged Ethernet) adapters. + * Copyright (C) 2012-2015 Emulex. All rights reserved. + * EMULEX and SLI are trademarks of Emulex. + * www.emulex.com + * + * This software is available to you under a choice of one of two licenses. + * You may choose to be licensed under the terms of the GNU General Public + * License (GPL) Version 2, available from the file COPYING in the main + * directory of this source tree, or the BSD license below: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * - Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * - Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Contact Information: * linux-drivers@emulex.com @@ -23,7 +38,7 @@ * Emulex * 3333 Susan Street * Costa Mesa, CA 92626 - *******************************************************************/ + */ #ifndef __OCRDMA_SLI_H__ #define __OCRDMA_SLI_H__ diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c index 48d7ef51aa0c..69334e214571 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c @@ -1,21 +1,36 @@ -/******************************************************************* - * This file is part of the Emulex RoCE Device Driver for * - * RoCE (RDMA over Converged Ethernet) adapters. * - * Copyright (C) 2008-2014 Emulex. All rights reserved. * - * EMULEX and SLI are trademarks of Emulex. * - * www.emulex.com * - * * - * This program is free software; you can redistribute it and/or * - * modify it under the terms of version 2 of the GNU General * - * Public License as published by the Free Software Foundation. * - * This program is distributed in the hope that it will be useful. * - * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * - * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * - * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * - * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * - * TO BE LEGALLY INVALID. See the GNU General Public License for * - * more details, a copy of which can be found in the file COPYING * - * included with this package. * +/* This file is part of the Emulex RoCE Device Driver for + * RoCE (RDMA over Converged Ethernet) adapters. + * Copyright (C) 2012-2015 Emulex. All rights reserved. + * EMULEX and SLI are trademarks of Emulex. + * www.emulex.com + * + * This software is available to you under a choice of one of two licenses. + * You may choose to be licensed under the terms of the GNU General Public + * License (GPL) Version 2, available from the file COPYING in the main + * directory of this source tree, or the BSD license below: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * - Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * - Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Contact Information: * linux-drivers@emulex.com @@ -23,7 +38,7 @@ * Emulex * 3333 Susan Street * Costa Mesa, CA 92626 - *******************************************************************/ + */ #include <rdma/ib_addr.h> #include <rdma/ib_pma.h> diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.h b/drivers/infiniband/hw/ocrdma/ocrdma_stats.h index 091edd68a8a3..c9e58d04c7b8 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_stats.h +++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.h @@ -1,21 +1,36 @@ -/******************************************************************* - * This file is part of the Emulex RoCE Device Driver for * - * RoCE (RDMA over Converged Ethernet) adapters. * - * Copyright (C) 2008-2014 Emulex. All rights reserved. * - * EMULEX and SLI are trademarks of Emulex. * - * www.emulex.com * - * * - * This program is free software; you can redistribute it and/or * - * modify it under the terms of version 2 of the GNU General * - * Public License as published by the Free Software Foundation. * - * This program is distributed in the hope that it will be useful. * - * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * - * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * - * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * - * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * - * TO BE LEGALLY INVALID. See the GNU General Public License for * - * more details, a copy of which can be found in the file COPYING * - * included with this package. * +/* This file is part of the Emulex RoCE Device Driver for + * RoCE (RDMA over Converged Ethernet) adapters. + * Copyright (C) 2012-2015 Emulex. All rights reserved. + * EMULEX and SLI are trademarks of Emulex. + * www.emulex.com + * + * This software is available to you under a choice of one of two licenses. + * You may choose to be licensed under the terms of the GNU General Public + * License (GPL) Version 2, available from the file COPYING in the main + * directory of this source tree, or the BSD license below: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * - Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * - Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Contact Information: * linux-drivers@emulex.com @@ -23,7 +38,7 @@ * Emulex * 3333 Susan Street * Costa Mesa, CA 92626 - *******************************************************************/ + */ #ifndef __OCRDMA_STATS_H__ #define __OCRDMA_STATS_H__ diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c index 5bb61eb58f2c..bc84cd462ecf 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c @@ -1,21 +1,36 @@ -/******************************************************************* - * This file is part of the Emulex RoCE Device Driver for * - * RoCE (RDMA over Converged Ethernet) adapters. * - * Copyright (C) 2008-2012 Emulex. All rights reserved. * - * EMULEX and SLI are trademarks of Emulex. * - * www.emulex.com * - * * - * This program is free software; you can redistribute it and/or * - * modify it under the terms of version 2 of the GNU General * - * Public License as published by the Free Software Foundation. * - * This program is distributed in the hope that it will be useful. * - * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * - * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * - * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * - * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * - * TO BE LEGALLY INVALID. See the GNU General Public License for * - * more details, a copy of which can be found in the file COPYING * - * included with this package. * +/* This file is part of the Emulex RoCE Device Driver for + * RoCE (RDMA over Converged Ethernet) adapters. + * Copyright (C) 2012-2015 Emulex. All rights reserved. + * EMULEX and SLI are trademarks of Emulex. + * www.emulex.com + * + * This software is available to you under a choice of one of two licenses. + * You may choose to be licensed under the terms of the GNU General Public + * License (GPL) Version 2, available from the file COPYING in the main + * directory of this source tree, or the BSD license below: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * - Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * - Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Contact Information: * linux-drivers@emulex.com @@ -23,7 +38,7 @@ * Emulex * 3333 Susan Street * Costa Mesa, CA 92626 - *******************************************************************/ + */ #include <linux/dma-mapping.h> #include <rdma/ib_verbs.h> diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h index b15c608efa7b..eaccb2d3cb9f 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h @@ -1,21 +1,36 @@ -/******************************************************************* - * This file is part of the Emulex RoCE Device Driver for * - * RoCE (RDMA over Converged Ethernet) adapters. * - * Copyright (C) 2008-2012 Emulex. All rights reserved. * - * EMULEX and SLI are trademarks of Emulex. * - * www.emulex.com * - * * - * This program is free software; you can redistribute it and/or * - * modify it under the terms of version 2 of the GNU General * - * Public License as published by the Free Software Foundation. * - * This program is distributed in the hope that it will be useful. * - * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * - * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * - * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * - * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * - * TO BE LEGALLY INVALID. See the GNU General Public License for * - * more details, a copy of which can be found in the file COPYING * - * included with this package. * +/* This file is part of the Emulex RoCE Device Driver for + * RoCE (RDMA over Converged Ethernet) adapters. + * Copyright (C) 2012-2015 Emulex. All rights reserved. + * EMULEX and SLI are trademarks of Emulex. + * www.emulex.com + * + * This software is available to you under a choice of one of two licenses. + * You may choose to be licensed under the terms of the GNU General Public + * License (GPL) Version 2, available from the file COPYING in the main + * directory of this source tree, or the BSD license below: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * - Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * - Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Contact Information: * linux-drivers@emulex.com @@ -23,7 +38,7 @@ * Emulex * 3333 Susan Street * Costa Mesa, CA 92626 - *******************************************************************/ + */ #ifndef __OCRDMA_VERBS_H__ #define __OCRDMA_VERBS_H__ diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c index 9e6ee82a8fd7..851c8219d501 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c @@ -177,7 +177,8 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca) else size += ipoib_recvq_size * ipoib_max_conn_qp; } else - goto out_free_wq; + if (ret != -ENOSYS) + goto out_free_wq; cq_attr.cqe = size; priv->recv_cq = ib_create_cq(priv->ca, ipoib_ib_completion, NULL, diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index 771700963127..d851e1828d6f 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -775,6 +775,17 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) ret = isert_rdma_post_recvl(isert_conn); if (ret) goto out_conn_dev; + /* + * Obtain the second reference now before isert_rdma_accept() to + * ensure that any initiator generated REJECT CM event that occurs + * asynchronously won't drop the last reference until the error path + * in iscsi_target_login_sess_out() does it's ->iscsit_free_conn() -> + * isert_free_conn() -> isert_put_conn() -> kref_put(). + */ + if (!kref_get_unless_zero(&isert_conn->kref)) { + isert_warn("conn %p connect_release is running\n", isert_conn); + goto out_conn_dev; + } ret = isert_rdma_accept(isert_conn); if (ret) @@ -836,11 +847,6 @@ isert_connected_handler(struct rdma_cm_id *cma_id) isert_info("conn %p\n", isert_conn); - if (!kref_get_unless_zero(&isert_conn->kref)) { - isert_warn("conn %p connect_release is running\n", isert_conn); - return; - } - mutex_lock(&isert_conn->mutex); if (isert_conn->state != ISER_CONN_FULL_FEATURE) isert_conn->state = ISER_CONN_UP; diff --git a/drivers/input/input-leds.c b/drivers/input/input-leds.c index 074a65ed17bb..766bf2660116 100644 --- a/drivers/input/input-leds.c +++ b/drivers/input/input-leds.c @@ -71,6 +71,18 @@ static void input_leds_event(struct input_handle *handle, unsigned int type, { } +static int input_leds_get_count(struct input_dev *dev) +{ + unsigned int led_code; + int count = 0; + + for_each_set_bit(led_code, dev->ledbit, LED_CNT) + if (input_led_info[led_code].name) + count++; + + return count; +} + static int input_leds_connect(struct input_handler *handler, struct input_dev *dev, const struct input_device_id *id) @@ -81,7 +93,7 @@ static int input_leds_connect(struct input_handler *handler, int led_no; int error; - num_leds = bitmap_weight(dev->ledbit, LED_CNT); + num_leds = input_leds_get_count(dev); if (!num_leds) return -ENXIO; @@ -112,7 +124,7 @@ static int input_leds_connect(struct input_handler *handler, led->handle = &leds->handle; led->code = led_code; - if (WARN_ON(!input_led_info[led_code].name)) + if (!input_led_info[led_code].name) continue; led->cdev.name = kasprintf(GFP_KERNEL, "%s::%s", diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c index b10709f04615..30e3442518f8 100644 --- a/drivers/input/mouse/bcm5974.c +++ b/drivers/input/mouse/bcm5974.c @@ -2,6 +2,7 @@ * Apple USB BCM5974 (Macbook Air and Penryn Macbook Pro) multitouch driver * * Copyright (C) 2008 Henrik Rydberg (rydberg@euromail.se) + * Copyright (C) 2015 John Horan (knasher@gmail.com) * * The USB initialization and package decoding was made by * Scott Shawcroft as part of the touchd user-space driver project: @@ -91,6 +92,10 @@ #define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI 0x0290 #define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO 0x0291 #define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS 0x0292 +/* MacbookPro12,1 (2015) */ +#define USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI 0x0272 +#define USB_DEVICE_ID_APPLE_WELLSPRING9_ISO 0x0273 +#define USB_DEVICE_ID_APPLE_WELLSPRING9_JIS 0x0274 #define BCM5974_DEVICE(prod) { \ .match_flags = (USB_DEVICE_ID_MATCH_DEVICE | \ @@ -152,6 +157,10 @@ static const struct usb_device_id bcm5974_table[] = { BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI), BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING8_ISO), BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING8_JIS), + /* MacbookPro12,1 */ + BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI), + BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING9_ISO), + BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING9_JIS), /* Terminating entry */ {} }; @@ -180,21 +189,47 @@ struct bt_data { enum tp_type { TYPE1, /* plain trackpad */ TYPE2, /* button integrated in trackpad */ - TYPE3 /* additional header fields since June 2013 */ + TYPE3, /* additional header fields since June 2013 */ + TYPE4 /* additional header field for pressure data */ }; /* trackpad finger data offsets, le16-aligned */ -#define FINGER_TYPE1 (13 * sizeof(__le16)) -#define FINGER_TYPE2 (15 * sizeof(__le16)) -#define FINGER_TYPE3 (19 * sizeof(__le16)) +#define HEADER_TYPE1 (13 * sizeof(__le16)) +#define HEADER_TYPE2 (15 * sizeof(__le16)) +#define HEADER_TYPE3 (19 * sizeof(__le16)) +#define HEADER_TYPE4 (23 * sizeof(__le16)) /* trackpad button data offsets */ +#define BUTTON_TYPE1 0 #define BUTTON_TYPE2 15 #define BUTTON_TYPE3 23 +#define BUTTON_TYPE4 31 /* list of device capability bits */ #define HAS_INTEGRATED_BUTTON 1 +/* trackpad finger data block size */ +#define FSIZE_TYPE1 (14 * sizeof(__le16)) +#define FSIZE_TYPE2 (14 * sizeof(__le16)) +#define FSIZE_TYPE3 (14 * sizeof(__le16)) +#define FSIZE_TYPE4 (15 * sizeof(__le16)) + +/* offset from header to finger struct */ +#define DELTA_TYPE1 (0 * sizeof(__le16)) +#define DELTA_TYPE2 (0 * sizeof(__le16)) +#define DELTA_TYPE3 (0 * sizeof(__le16)) +#define DELTA_TYPE4 (1 * sizeof(__le16)) + +/* usb control message mode switch data */ +#define USBMSG_TYPE1 8, 0x300, 0, 0, 0x1, 0x8 +#define USBMSG_TYPE2 8, 0x300, 0, 0, 0x1, 0x8 +#define USBMSG_TYPE3 8, 0x300, 0, 0, 0x1, 0x8 +#define USBMSG_TYPE4 2, 0x302, 2, 1, 0x1, 0x0 + +/* Wellspring initialization constants */ +#define BCM5974_WELLSPRING_MODE_READ_REQUEST_ID 1 +#define BCM5974_WELLSPRING_MODE_WRITE_REQUEST_ID 9 + /* trackpad finger structure, le16-aligned */ struct tp_finger { __le16 origin; /* zero when switching track finger */ @@ -207,14 +242,13 @@ struct tp_finger { __le16 orientation; /* 16384 when point, else 15 bit angle */ __le16 touch_major; /* touch area, major axis */ __le16 touch_minor; /* touch area, minor axis */ - __le16 unused[3]; /* zeros */ + __le16 unused[2]; /* zeros */ + __le16 pressure; /* pressure on forcetouch touchpad */ __le16 multi; /* one finger: varies, more fingers: constant */ } __attribute__((packed,aligned(2))); /* trackpad finger data size, empirically at least ten fingers */ #define MAX_FINGERS 16 -#define SIZEOF_FINGER sizeof(struct tp_finger) -#define SIZEOF_ALL_FINGERS (MAX_FINGERS * SIZEOF_FINGER) #define MAX_FINGER_ORIENTATION 16384 /* device-specific parameters */ @@ -232,8 +266,17 @@ struct bcm5974_config { int bt_datalen; /* data length of the button interface */ int tp_ep; /* the endpoint of the trackpad interface */ enum tp_type tp_type; /* type of trackpad interface */ - int tp_offset; /* offset to trackpad finger data */ + int tp_header; /* bytes in header block */ int tp_datalen; /* data length of the trackpad interface */ + int tp_button; /* offset to button data */ + int tp_fsize; /* bytes in single finger block */ + int tp_delta; /* offset from header to finger struct */ + int um_size; /* usb control message length */ + int um_req_val; /* usb control message value */ + int um_req_idx; /* usb control message index */ + int um_switch_idx; /* usb control message mode switch index */ + int um_switch_on; /* usb control message mode switch on */ + int um_switch_off; /* usb control message mode switch off */ struct bcm5974_param p; /* finger pressure limits */ struct bcm5974_param w; /* finger width limits */ struct bcm5974_param x; /* horizontal limits */ @@ -259,6 +302,24 @@ struct bcm5974 { int slots[MAX_FINGERS]; /* slot assignments */ }; +/* trackpad finger block data, le16-aligned */ +static const struct tp_finger *get_tp_finger(const struct bcm5974 *dev, int i) +{ + const struct bcm5974_config *c = &dev->cfg; + u8 *f_base = dev->tp_data + c->tp_header + c->tp_delta; + + return (const struct tp_finger *)(f_base + i * c->tp_fsize); +} + +#define DATAFORMAT(type) \ + type, \ + HEADER_##type, \ + HEADER_##type + (MAX_FINGERS) * (FSIZE_##type), \ + BUTTON_##type, \ + FSIZE_##type, \ + DELTA_##type, \ + USBMSG_##type + /* logical signal quality */ #define SN_PRESSURE 45 /* pressure signal-to-noise ratio */ #define SN_WIDTH 25 /* width signal-to-noise ratio */ @@ -273,7 +334,7 @@ static const struct bcm5974_config bcm5974_config_table[] = { USB_DEVICE_ID_APPLE_WELLSPRING_JIS, 0, 0x84, sizeof(struct bt_data), - 0x81, TYPE1, FINGER_TYPE1, FINGER_TYPE1 + SIZEOF_ALL_FINGERS, + 0x81, DATAFORMAT(TYPE1), { SN_PRESSURE, 0, 256 }, { SN_WIDTH, 0, 2048 }, { SN_COORD, -4824, 5342 }, @@ -286,7 +347,7 @@ static const struct bcm5974_config bcm5974_config_table[] = { USB_DEVICE_ID_APPLE_WELLSPRING2_JIS, 0, 0x84, sizeof(struct bt_data), - 0x81, TYPE1, FINGER_TYPE1, FINGER_TYPE1 + SIZEOF_ALL_FINGERS, + 0x81, DATAFORMAT(TYPE1), { SN_PRESSURE, 0, 256 }, { SN_WIDTH, 0, 2048 }, { SN_COORD, -4824, 4824 }, @@ -299,7 +360,7 @@ static const struct bcm5974_config bcm5974_config_table[] = { USB_DEVICE_ID_APPLE_WELLSPRING3_JIS, HAS_INTEGRATED_BUTTON, 0x84, sizeof(struct bt_data), - 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, + 0x81, DATAFORMAT(TYPE2), { SN_PRESSURE, 0, 300 }, { SN_WIDTH, 0, 2048 }, { SN_COORD, -4460, 5166 }, @@ -312,7 +373,7 @@ static const struct bcm5974_config bcm5974_config_table[] = { USB_DEVICE_ID_APPLE_WELLSPRING4_JIS, HAS_INTEGRATED_BUTTON, 0x84, sizeof(struct bt_data), - 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, + 0x81, DATAFORMAT(TYPE2), { SN_PRESSURE, 0, 300 }, { SN_WIDTH, 0, 2048 }, { SN_COORD, -4620, 5140 }, @@ -325,7 +386,7 @@ static const struct bcm5974_config bcm5974_config_table[] = { USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS, HAS_INTEGRATED_BUTTON, 0x84, sizeof(struct bt_data), - 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, + 0x81, DATAFORMAT(TYPE2), { SN_PRESSURE, 0, 300 }, { SN_WIDTH, 0, 2048 }, { SN_COORD, -4616, 5112 }, @@ -338,7 +399,7 @@ static const struct bcm5974_config bcm5974_config_table[] = { USB_DEVICE_ID_APPLE_WELLSPRING5_JIS, HAS_INTEGRATED_BUTTON, 0x84, sizeof(struct bt_data), - 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, + 0x81, DATAFORMAT(TYPE2), { SN_PRESSURE, 0, 300 }, { SN_WIDTH, 0, 2048 }, { SN_COORD, -4415, 5050 }, @@ -351,7 +412,7 @@ static const struct bcm5974_config bcm5974_config_table[] = { USB_DEVICE_ID_APPLE_WELLSPRING6_JIS, HAS_INTEGRATED_BUTTON, 0x84, sizeof(struct bt_data), - 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, + 0x81, DATAFORMAT(TYPE2), { SN_PRESSURE, 0, 300 }, { SN_WIDTH, 0, 2048 }, { SN_COORD, -4620, 5140 }, @@ -364,7 +425,7 @@ static const struct bcm5974_config bcm5974_config_table[] = { USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS, HAS_INTEGRATED_BUTTON, 0x84, sizeof(struct bt_data), - 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, + 0x81, DATAFORMAT(TYPE2), { SN_PRESSURE, 0, 300 }, { SN_WIDTH, 0, 2048 }, { SN_COORD, -4750, 5280 }, @@ -377,7 +438,7 @@ static const struct bcm5974_config bcm5974_config_table[] = { USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS, HAS_INTEGRATED_BUTTON, 0x84, sizeof(struct bt_data), - 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, + 0x81, DATAFORMAT(TYPE2), { SN_PRESSURE, 0, 300 }, { SN_WIDTH, 0, 2048 }, { SN_COORD, -4620, 5140 }, @@ -390,7 +451,7 @@ static const struct bcm5974_config bcm5974_config_table[] = { USB_DEVICE_ID_APPLE_WELLSPRING7_JIS, HAS_INTEGRATED_BUTTON, 0x84, sizeof(struct bt_data), - 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, + 0x81, DATAFORMAT(TYPE2), { SN_PRESSURE, 0, 300 }, { SN_WIDTH, 0, 2048 }, { SN_COORD, -4750, 5280 }, @@ -403,7 +464,7 @@ static const struct bcm5974_config bcm5974_config_table[] = { USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS, HAS_INTEGRATED_BUTTON, 0x84, sizeof(struct bt_data), - 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, + 0x81, DATAFORMAT(TYPE2), { SN_PRESSURE, 0, 300 }, { SN_WIDTH, 0, 2048 }, { SN_COORD, -4750, 5280 }, @@ -416,13 +477,26 @@ static const struct bcm5974_config bcm5974_config_table[] = { USB_DEVICE_ID_APPLE_WELLSPRING8_JIS, HAS_INTEGRATED_BUTTON, 0, sizeof(struct bt_data), - 0x83, TYPE3, FINGER_TYPE3, FINGER_TYPE3 + SIZEOF_ALL_FINGERS, + 0x83, DATAFORMAT(TYPE3), { SN_PRESSURE, 0, 300 }, { SN_WIDTH, 0, 2048 }, { SN_COORD, -4620, 5140 }, { SN_COORD, -150, 6600 }, { SN_ORIENT, -MAX_FINGER_ORIENTATION, MAX_FINGER_ORIENTATION } }, + { + USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI, + USB_DEVICE_ID_APPLE_WELLSPRING9_ISO, + USB_DEVICE_ID_APPLE_WELLSPRING9_JIS, + HAS_INTEGRATED_BUTTON, + 0, sizeof(struct bt_data), + 0x83, DATAFORMAT(TYPE4), + { SN_PRESSURE, 0, 300 }, + { SN_WIDTH, 0, 2048 }, + { SN_COORD, -4828, 5345 }, + { SN_COORD, -203, 6803 }, + { SN_ORIENT, -MAX_FINGER_ORIENTATION, MAX_FINGER_ORIENTATION } + }, {} }; @@ -549,19 +623,18 @@ static int report_tp_state(struct bcm5974 *dev, int size) struct input_dev *input = dev->input; int raw_n, i, n = 0; - if (size < c->tp_offset || (size - c->tp_offset) % SIZEOF_FINGER != 0) + if (size < c->tp_header || (size - c->tp_header) % c->tp_fsize != 0) return -EIO; - /* finger data, le16-aligned */ - f = (const struct tp_finger *)(dev->tp_data + c->tp_offset); - raw_n = (size - c->tp_offset) / SIZEOF_FINGER; + raw_n = (size - c->tp_header) / c->tp_fsize; for (i = 0; i < raw_n; i++) { - if (raw2int(f[i].touch_major) == 0) + f = get_tp_finger(dev, i); + if (raw2int(f->touch_major) == 0) continue; - dev->pos[n].x = raw2int(f[i].abs_x); - dev->pos[n].y = c->y.min + c->y.max - raw2int(f[i].abs_y); - dev->index[n++] = &f[i]; + dev->pos[n].x = raw2int(f->abs_x); + dev->pos[n].y = c->y.min + c->y.max - raw2int(f->abs_y); + dev->index[n++] = f; } input_mt_assign_slots(input, dev->slots, dev->pos, n, 0); @@ -572,32 +645,22 @@ static int report_tp_state(struct bcm5974 *dev, int size) input_mt_sync_frame(input); - report_synaptics_data(input, c, f, raw_n); + report_synaptics_data(input, c, get_tp_finger(dev, 0), raw_n); - /* type 2 reports button events via ibt only */ - if (c->tp_type == TYPE2) { - int ibt = raw2int(dev->tp_data[BUTTON_TYPE2]); + /* later types report button events via integrated button only */ + if (c->caps & HAS_INTEGRATED_BUTTON) { + int ibt = raw2int(dev->tp_data[c->tp_button]); input_report_key(input, BTN_LEFT, ibt); } - if (c->tp_type == TYPE3) - input_report_key(input, BTN_LEFT, dev->tp_data[BUTTON_TYPE3]); - input_sync(input); return 0; } -/* Wellspring initialization constants */ -#define BCM5974_WELLSPRING_MODE_READ_REQUEST_ID 1 -#define BCM5974_WELLSPRING_MODE_WRITE_REQUEST_ID 9 -#define BCM5974_WELLSPRING_MODE_REQUEST_VALUE 0x300 -#define BCM5974_WELLSPRING_MODE_REQUEST_INDEX 0 -#define BCM5974_WELLSPRING_MODE_VENDOR_VALUE 0x01 -#define BCM5974_WELLSPRING_MODE_NORMAL_VALUE 0x08 - static int bcm5974_wellspring_mode(struct bcm5974 *dev, bool on) { + const struct bcm5974_config *c = &dev->cfg; int retval = 0, size; char *data; @@ -605,7 +668,7 @@ static int bcm5974_wellspring_mode(struct bcm5974 *dev, bool on) if (dev->cfg.tp_type == TYPE3) return 0; - data = kmalloc(8, GFP_KERNEL); + data = kmalloc(c->um_size, GFP_KERNEL); if (!data) { dev_err(&dev->intf->dev, "out of memory\n"); retval = -ENOMEM; @@ -616,28 +679,24 @@ static int bcm5974_wellspring_mode(struct bcm5974 *dev, bool on) size = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0), BCM5974_WELLSPRING_MODE_READ_REQUEST_ID, USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE, - BCM5974_WELLSPRING_MODE_REQUEST_VALUE, - BCM5974_WELLSPRING_MODE_REQUEST_INDEX, data, 8, 5000); + c->um_req_val, c->um_req_idx, data, c->um_size, 5000); - if (size != 8) { + if (size != c->um_size) { dev_err(&dev->intf->dev, "could not read from device\n"); retval = -EIO; goto out; } /* apply the mode switch */ - data[0] = on ? - BCM5974_WELLSPRING_MODE_VENDOR_VALUE : - BCM5974_WELLSPRING_MODE_NORMAL_VALUE; + data[c->um_switch_idx] = on ? c->um_switch_on : c->um_switch_off; /* write configuration */ size = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), BCM5974_WELLSPRING_MODE_WRITE_REQUEST_ID, USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, - BCM5974_WELLSPRING_MODE_REQUEST_VALUE, - BCM5974_WELLSPRING_MODE_REQUEST_INDEX, data, 8, 5000); + c->um_req_val, c->um_req_idx, data, c->um_size, 5000); - if (size != 8) { + if (size != c->um_size) { dev_err(&dev->intf->dev, "could not write to device\n"); retval = -EIO; goto out; diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c index ce3d40004458..22b9ca901f4e 100644 --- a/drivers/input/mouse/elantech.c +++ b/drivers/input/mouse/elantech.c @@ -1167,7 +1167,7 @@ static int elantech_set_input_params(struct psmouse *psmouse) struct input_dev *dev = psmouse->dev; struct elantech_data *etd = psmouse->private; unsigned int x_min = 0, y_min = 0, x_max = 0, y_max = 0, width = 0; - unsigned int x_res = 0, y_res = 0; + unsigned int x_res = 31, y_res = 31; if (elantech_set_range(psmouse, &x_min, &y_min, &x_max, &y_max, &width)) return -1; @@ -1232,8 +1232,6 @@ static int elantech_set_input_params(struct psmouse *psmouse) /* For X to recognize me as touchpad. */ input_set_abs_params(dev, ABS_X, x_min, x_max, 0, 0); input_set_abs_params(dev, ABS_Y, y_min, y_max, 0, 0); - input_abs_set_res(dev, ABS_X, x_res); - input_abs_set_res(dev, ABS_Y, y_res); /* * range of pressure and width is the same as v2, * report ABS_PRESSURE, ABS_TOOL_WIDTH for compatibility. @@ -1246,8 +1244,6 @@ static int elantech_set_input_params(struct psmouse *psmouse) input_mt_init_slots(dev, ETP_MAX_FINGERS, 0); input_set_abs_params(dev, ABS_MT_POSITION_X, x_min, x_max, 0, 0); input_set_abs_params(dev, ABS_MT_POSITION_Y, y_min, y_max, 0, 0); - input_abs_set_res(dev, ABS_MT_POSITION_X, x_res); - input_abs_set_res(dev, ABS_MT_POSITION_Y, y_res); input_set_abs_params(dev, ABS_MT_PRESSURE, ETP_PMIN_V2, ETP_PMAX_V2, 0, 0); /* @@ -1259,6 +1255,13 @@ static int elantech_set_input_params(struct psmouse *psmouse) break; } + input_abs_set_res(dev, ABS_X, x_res); + input_abs_set_res(dev, ABS_Y, y_res); + if (etd->hw_version > 1) { + input_abs_set_res(dev, ABS_MT_POSITION_X, x_res); + input_abs_set_res(dev, ABS_MT_POSITION_Y, y_res); + } + etd->y_max = y_max; etd->width = width; diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c index 3a32caf06bf1..6025eb430c0a 100644 --- a/drivers/input/mouse/synaptics.c +++ b/drivers/input/mouse/synaptics.c @@ -1484,12 +1484,12 @@ static int __synaptics_init(struct psmouse *psmouse, bool absolute_mode) priv->pkt_type = SYN_MODEL_NEWABS(priv->model_id) ? SYN_NEWABS : SYN_OLDABS; psmouse_info(psmouse, - "Touchpad model: %ld, fw: %ld.%ld, id: %#lx, caps: %#lx/%#lx/%#lx, board id: %lu, fw id: %lu\n", + "Touchpad model: %ld, fw: %ld.%ld, id: %#lx, caps: %#lx/%#lx/%#lx/%#lx, board id: %lu, fw id: %lu\n", SYN_ID_MODEL(priv->identity), SYN_ID_MAJOR(priv->identity), SYN_ID_MINOR(priv->identity), priv->model_id, priv->capabilities, priv->ext_cap, priv->ext_cap_0c, - priv->board_id, priv->firmware_id); + priv->ext_cap_10, priv->board_id, priv->firmware_id); set_input_params(psmouse, priv); diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c index b4d12e29abff..e36162b28c2a 100644 --- a/drivers/input/touchscreen/goodix.c +++ b/drivers/input/touchscreen/goodix.c @@ -15,6 +15,7 @@ */ #include <linux/kernel.h> +#include <linux/dmi.h> #include <linux/i2c.h> #include <linux/input.h> #include <linux/input/mt.h> @@ -34,6 +35,7 @@ struct goodix_ts_data { int abs_y_max; unsigned int max_touch_num; unsigned int int_trigger_type; + bool rotated_screen; }; #define GOODIX_MAX_HEIGHT 4096 @@ -60,6 +62,30 @@ static const unsigned long goodix_irq_flags[] = { IRQ_TYPE_LEVEL_HIGH, }; +/* + * Those tablets have their coordinates origin at the bottom right + * of the tablet, as if rotated 180 degrees + */ +static const struct dmi_system_id rotated_screen[] = { +#if defined(CONFIG_DMI) && defined(CONFIG_X86) + { + .ident = "WinBook TW100", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "WinBook"), + DMI_MATCH(DMI_PRODUCT_NAME, "TW100") + } + }, + { + .ident = "WinBook TW700", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "WinBook"), + DMI_MATCH(DMI_PRODUCT_NAME, "TW700") + }, + }, +#endif + {} +}; + /** * goodix_i2c_read - read data from a register of the i2c slave device. * @@ -129,6 +155,11 @@ static void goodix_ts_report_touch(struct goodix_ts_data *ts, u8 *coor_data) int input_y = get_unaligned_le16(&coor_data[3]); int input_w = get_unaligned_le16(&coor_data[5]); + if (ts->rotated_screen) { + input_x = ts->abs_x_max - input_x; + input_y = ts->abs_y_max - input_y; + } + input_mt_slot(ts->input_dev, id); input_mt_report_slot_state(ts->input_dev, MT_TOOL_FINGER, true); input_report_abs(ts->input_dev, ABS_MT_POSITION_X, input_x); @@ -223,6 +254,11 @@ static void goodix_read_config(struct goodix_ts_data *ts) ts->abs_y_max = GOODIX_MAX_HEIGHT; ts->max_touch_num = GOODIX_MAX_CONTACTS; } + + ts->rotated_screen = dmi_check_system(rotated_screen); + if (ts->rotated_screen) + dev_dbg(&ts->client->dev, + "Applying '180 degrees rotated screen' quirk\n"); } /** diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c index f2c6c352c55a..2c41107240de 100644 --- a/drivers/input/touchscreen/usbtouchscreen.c +++ b/drivers/input/touchscreen/usbtouchscreen.c @@ -627,6 +627,9 @@ static int dmc_tsc10_init(struct usbtouch_usb *usbtouch) goto err_out; } + /* TSC-25 data sheet specifies a delay after the RESET command */ + msleep(150); + /* set coordinate output rate */ buf[0] = buf[1] = 0xFF; ret = usb_control_msg(dev, usb_rcvctrlpipe (dev, 0), diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index a57e9b749895..658ee39e6569 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -76,8 +76,6 @@ LIST_HEAD(hpet_map); * Domain for untranslated devices - only allocated * if iommu=pt passed on kernel cmd line. */ -static struct protection_domain *pt_domain; - static const struct iommu_ops amd_iommu_ops; static ATOMIC_NOTIFIER_HEAD(ppr_notifier); @@ -96,7 +94,7 @@ struct iommu_dev_data { struct protection_domain *domain; /* Domain the device is bound to */ u16 devid; /* PCI Device ID */ bool iommu_v2; /* Device can make use of IOMMUv2 */ - bool passthrough; /* Default for device is pt_domain */ + bool passthrough; /* Device is identity mapped */ struct { bool enabled; int qdep; @@ -116,7 +114,6 @@ struct iommu_cmd { struct kmem_cache *amd_iommu_irq_cache; static void update_domain(struct protection_domain *domain); -static int alloc_passthrough_domain(void); static int protection_domain_init(struct protection_domain *domain); /**************************************************************************** @@ -2167,15 +2164,17 @@ static int attach_device(struct device *dev, dev_data = get_dev_data(dev); if (domain->flags & PD_IOMMUV2_MASK) { - if (!dev_data->iommu_v2 || !dev_data->passthrough) + if (!dev_data->passthrough) return -EINVAL; - if (pdev_iommuv2_enable(pdev) != 0) - return -EINVAL; + if (dev_data->iommu_v2) { + if (pdev_iommuv2_enable(pdev) != 0) + return -EINVAL; - dev_data->ats.enabled = true; - dev_data->ats.qdep = pci_ats_queue_depth(pdev); - dev_data->pri_tlp = pci_pri_tlp_required(pdev); + dev_data->ats.enabled = true; + dev_data->ats.qdep = pci_ats_queue_depth(pdev); + dev_data->pri_tlp = pci_pri_tlp_required(pdev); + } } else if (amd_iommu_iotlb_sup && pci_enable_ats(pdev, PAGE_SHIFT) == 0) { dev_data->ats.enabled = true; @@ -2221,15 +2220,6 @@ static void __detach_device(struct iommu_dev_data *dev_data) do_detach(head); spin_unlock_irqrestore(&domain->lock, flags); - - /* - * If we run in passthrough mode the device must be assigned to the - * passthrough domain if it is detached from any other domain. - * Make sure we can deassign from the pt_domain itself. - */ - if (dev_data->passthrough && - (dev_data->domain == NULL && domain != pt_domain)) - __attach_device(dev_data, pt_domain); } /* @@ -2249,7 +2239,7 @@ static void detach_device(struct device *dev) __detach_device(dev_data); write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); - if (domain->flags & PD_IOMMUV2_MASK) + if (domain->flags & PD_IOMMUV2_MASK && dev_data->iommu_v2) pdev_iommuv2_disable(to_pci_dev(dev)); else if (dev_data->ats.enabled) pci_disable_ats(to_pci_dev(dev)); @@ -2287,17 +2277,15 @@ static int amd_iommu_add_device(struct device *dev) BUG_ON(!dev_data); - if (dev_data->iommu_v2) + if (iommu_pass_through || dev_data->iommu_v2) iommu_request_dm_for_dev(dev); /* Domains are initialized for this device - have a look what we ended up with */ domain = iommu_get_domain_for_dev(dev); - if (domain->type == IOMMU_DOMAIN_IDENTITY) { + if (domain->type == IOMMU_DOMAIN_IDENTITY) dev_data->passthrough = true; - dev->archdata.dma_ops = &nommu_dma_ops; - } else { + else dev->archdata.dma_ops = &amd_iommu_dma_ops; - } out: iommu_completion_wait(iommu); @@ -2862,8 +2850,17 @@ int __init amd_iommu_init_api(void) int __init amd_iommu_init_dma_ops(void) { + swiotlb = iommu_pass_through ? 1 : 0; iommu_detected = 1; - swiotlb = 0; + + /* + * In case we don't initialize SWIOTLB (actually the common case + * when AMD IOMMU is enabled), make sure there are global + * dma_ops set as a fall-back for devices not handled by this + * driver (for example non-PCI devices). + */ + if (!swiotlb) + dma_ops = &nommu_dma_ops; amd_iommu_stats_init(); @@ -2947,21 +2944,6 @@ out_err: return NULL; } -static int alloc_passthrough_domain(void) -{ - if (pt_domain != NULL) - return 0; - - /* allocate passthrough domain */ - pt_domain = protection_domain_alloc(); - if (!pt_domain) - return -ENOMEM; - - pt_domain->mode = PAGE_MODE_NONE; - - return 0; -} - static struct iommu_domain *amd_iommu_domain_alloc(unsigned type) { struct protection_domain *pdomain; @@ -3222,33 +3204,6 @@ static const struct iommu_ops amd_iommu_ops = { * *****************************************************************************/ -int __init amd_iommu_init_passthrough(void) -{ - struct iommu_dev_data *dev_data; - struct pci_dev *dev = NULL; - int ret; - - ret = alloc_passthrough_domain(); - if (ret) - return ret; - - for_each_pci_dev(dev) { - if (!check_device(&dev->dev)) - continue; - - dev_data = get_dev_data(&dev->dev); - dev_data->passthrough = true; - - attach_device(&dev->dev, pt_domain); - } - - amd_iommu_stats_init(); - - pr_info("AMD-Vi: Initialized for Passthrough Mode\n"); - - return 0; -} - /* IOMMUv2 specific functions */ int amd_iommu_register_ppr_notifier(struct notifier_block *nb) { @@ -3363,7 +3318,12 @@ static int __flush_pasid(struct protection_domain *domain, int pasid, struct amd_iommu *iommu; int qdep; - BUG_ON(!dev_data->ats.enabled); + /* + There might be non-IOMMUv2 capable devices in an IOMMUv2 + * domain. + */ + if (!dev_data->ats.enabled) + continue; qdep = dev_data->ats.qdep; iommu = amd_iommu_rlookup_table[dev_data->devid]; diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index dbda9ae68c5d..a24495eb4e26 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c @@ -2026,14 +2026,6 @@ static bool detect_ivrs(void) return true; } -static int amd_iommu_init_dma(void) -{ - if (iommu_pass_through) - return amd_iommu_init_passthrough(); - else - return amd_iommu_init_dma_ops(); -} - /**************************************************************************** * * AMD IOMMU Initialization State Machine @@ -2073,7 +2065,7 @@ static int __init state_next(void) init_state = ret ? IOMMU_INIT_ERROR : IOMMU_INTERRUPTS_EN; break; case IOMMU_INTERRUPTS_EN: - ret = amd_iommu_init_dma(); + ret = amd_iommu_init_dma_ops(); init_state = ret ? IOMMU_INIT_ERROR : IOMMU_DMA_OPS; break; case IOMMU_DMA_OPS: diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c index 3465faf1809e..f7b875bb70d4 100644 --- a/drivers/iommu/amd_iommu_v2.c +++ b/drivers/iommu/amd_iommu_v2.c @@ -132,11 +132,19 @@ static struct device_state *get_device_state(u16 devid) static void free_device_state(struct device_state *dev_state) { + struct iommu_group *group; + /* * First detach device from domain - No more PRI requests will arrive * from that device after it is unbound from the IOMMUv2 domain. */ - iommu_detach_device(dev_state->domain, &dev_state->pdev->dev); + group = iommu_group_get(&dev_state->pdev->dev); + if (WARN_ON(!group)) + return; + + iommu_detach_group(dev_state->domain, group); + + iommu_group_put(group); /* Everything is down now, free the IOMMUv2 domain */ iommu_domain_free(dev_state->domain); @@ -731,6 +739,7 @@ EXPORT_SYMBOL(amd_iommu_unbind_pasid); int amd_iommu_init_device(struct pci_dev *pdev, int pasids) { struct device_state *dev_state; + struct iommu_group *group; unsigned long flags; int ret, tmp; u16 devid; @@ -776,10 +785,16 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids) if (ret) goto out_free_domain; - ret = iommu_attach_device(dev_state->domain, &pdev->dev); - if (ret != 0) + group = iommu_group_get(&pdev->dev); + if (!group) goto out_free_domain; + ret = iommu_attach_group(dev_state->domain, group); + if (ret != 0) + goto out_drop_group; + + iommu_group_put(group); + spin_lock_irqsave(&state_lock, flags); if (__get_device_state(devid) != NULL) { @@ -794,6 +809,9 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids) return 0; +out_drop_group: + iommu_group_put(group); + out_free_domain: iommu_domain_free(dev_state->domain); diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index 8e9ec81ce4bb..da902baaa794 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -199,9 +199,10 @@ * Stream table. * * Linear: Enough to cover 1 << IDR1.SIDSIZE entries - * 2lvl: 8k L1 entries, 256 lazy entries per table (each table covers a PCI bus) + * 2lvl: 128k L1 entries, + * 256 lazy entries per table (each table covers a PCI bus) */ -#define STRTAB_L1_SZ_SHIFT 16 +#define STRTAB_L1_SZ_SHIFT 20 #define STRTAB_SPLIT 8 #define STRTAB_L1_DESC_DWORDS 1 @@ -269,10 +270,10 @@ #define ARM64_TCR_TG0_SHIFT 14 #define ARM64_TCR_TG0_MASK 0x3UL #define CTXDESC_CD_0_TCR_IRGN0_SHIFT 8 -#define ARM64_TCR_IRGN0_SHIFT 24 +#define ARM64_TCR_IRGN0_SHIFT 8 #define ARM64_TCR_IRGN0_MASK 0x3UL #define CTXDESC_CD_0_TCR_ORGN0_SHIFT 10 -#define ARM64_TCR_ORGN0_SHIFT 26 +#define ARM64_TCR_ORGN0_SHIFT 10 #define ARM64_TCR_ORGN0_MASK 0x3UL #define CTXDESC_CD_0_TCR_SH0_SHIFT 12 #define ARM64_TCR_SH0_SHIFT 12 @@ -542,6 +543,9 @@ struct arm_smmu_device { #define ARM_SMMU_FEAT_HYP (1 << 12) u32 features; +#define ARM_SMMU_OPT_SKIP_PREFETCH (1 << 0) + u32 options; + struct arm_smmu_cmdq cmdq; struct arm_smmu_evtq evtq; struct arm_smmu_priq priq; @@ -602,11 +606,35 @@ struct arm_smmu_domain { static DEFINE_SPINLOCK(arm_smmu_devices_lock); static LIST_HEAD(arm_smmu_devices); +struct arm_smmu_option_prop { + u32 opt; + const char *prop; +}; + +static struct arm_smmu_option_prop arm_smmu_options[] = { + { ARM_SMMU_OPT_SKIP_PREFETCH, "hisilicon,broken-prefetch-cmd" }, + { 0, NULL}, +}; + static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom) { return container_of(dom, struct arm_smmu_domain, domain); } +static void parse_driver_options(struct arm_smmu_device *smmu) +{ + int i = 0; + + do { + if (of_property_read_bool(smmu->dev->of_node, + arm_smmu_options[i].prop)) { + smmu->options |= arm_smmu_options[i].opt; + dev_notice(smmu->dev, "option %s\n", + arm_smmu_options[i].prop); + } + } while (arm_smmu_options[++i].opt); +} + /* Low-level queue manipulation functions */ static bool queue_full(struct arm_smmu_queue *q) { @@ -1036,7 +1064,8 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid, arm_smmu_sync_ste_for_sid(smmu, sid); /* It's likely that we'll want to use the new STE soon */ - arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd); + if (!(smmu->options & ARM_SMMU_OPT_SKIP_PREFETCH)) + arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd); } static void arm_smmu_init_bypass_stes(u64 *strtab, unsigned int nent) @@ -1064,7 +1093,7 @@ static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid) return 0; size = 1 << (STRTAB_SPLIT + ilog2(STRTAB_STE_DWORDS) + 3); - strtab = &cfg->strtab[sid >> STRTAB_SPLIT << STRTAB_L1_DESC_DWORDS]; + strtab = &cfg->strtab[(sid >> STRTAB_SPLIT) * STRTAB_L1_DESC_DWORDS]; desc->span = STRTAB_SPLIT + 1; desc->l2ptr = dma_zalloc_coherent(smmu->dev, size, &desc->l2ptr_dma, @@ -2020,21 +2049,23 @@ static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu) { void *strtab; u64 reg; - u32 size; + u32 size, l1size; int ret; struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg; /* Calculate the L1 size, capped to the SIDSIZE */ size = STRTAB_L1_SZ_SHIFT - (ilog2(STRTAB_L1_DESC_DWORDS) + 3); size = min(size, smmu->sid_bits - STRTAB_SPLIT); - if (size + STRTAB_SPLIT < smmu->sid_bits) + cfg->num_l1_ents = 1 << size; + + size += STRTAB_SPLIT; + if (size < smmu->sid_bits) dev_warn(smmu->dev, "2-level strtab only covers %u/%u bits of SID\n", - size + STRTAB_SPLIT, smmu->sid_bits); + size, smmu->sid_bits); - cfg->num_l1_ents = 1 << size; - size = cfg->num_l1_ents * (STRTAB_L1_DESC_DWORDS << 3); - strtab = dma_zalloc_coherent(smmu->dev, size, &cfg->strtab_dma, + l1size = cfg->num_l1_ents * (STRTAB_L1_DESC_DWORDS << 3); + strtab = dma_zalloc_coherent(smmu->dev, l1size, &cfg->strtab_dma, GFP_KERNEL); if (!strtab) { dev_err(smmu->dev, @@ -2055,8 +2086,7 @@ static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu) ret = arm_smmu_init_l1_strtab(smmu); if (ret) dma_free_coherent(smmu->dev, - cfg->num_l1_ents * - (STRTAB_L1_DESC_DWORDS << 3), + l1size, strtab, cfg->strtab_dma); return ret; @@ -2573,6 +2603,8 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev) if (irq > 0) smmu->gerr_irq = irq; + parse_driver_options(smmu); + /* Probe the h/w */ ret = arm_smmu_device_probe(smmu); if (ret) diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index a98a7b27aca1..0649b94f5958 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -1830,8 +1830,9 @@ static int domain_init(struct dmar_domain *domain, int guest_width) static void domain_exit(struct dmar_domain *domain) { + struct dmar_drhd_unit *drhd; + struct intel_iommu *iommu; struct page *freelist = NULL; - int i; /* Domain 0 is reserved, so dont process it */ if (!domain) @@ -1851,8 +1852,10 @@ static void domain_exit(struct dmar_domain *domain) /* clear attached or cached domains */ rcu_read_lock(); - for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) - iommu_detach_domain(domain, g_iommus[i]); + for_each_active_iommu(iommu, drhd) + if (domain_type_is_vm(domain) || + test_bit(iommu->seq_id, domain->iommu_bmp)) + iommu_detach_domain(domain, iommu); rcu_read_unlock(); dma_free_pagelist(freelist); diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig index b59727309072..bfec3bdfe598 100644 --- a/drivers/md/Kconfig +++ b/drivers/md/Kconfig @@ -259,7 +259,7 @@ config DM_CRYPT the ciphers you're going to use in the cryptoapi configuration. For further information on dm-crypt and userspace tools see: - <http://code.google.com/p/cryptsetup/wiki/DMCrypt> + <https://gitlab.com/cryptsetup/cryptsetup/wikis/DMCrypt> To compile this code as a module, choose M here: the module will be called dm-crypt. diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index ed2346ddf4c9..e51de52eeb94 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c @@ -494,7 +494,7 @@ static int bitmap_new_disk_sb(struct bitmap *bitmap) bitmap_super_t *sb; unsigned long chunksize, daemon_sleep, write_behind; - bitmap->storage.sb_page = alloc_page(GFP_KERNEL); + bitmap->storage.sb_page = alloc_page(GFP_KERNEL | __GFP_ZERO); if (bitmap->storage.sb_page == NULL) return -ENOMEM; bitmap->storage.sb_page->index = 0; @@ -541,6 +541,7 @@ static int bitmap_new_disk_sb(struct bitmap *bitmap) sb->state = cpu_to_le32(bitmap->flags); bitmap->events_cleared = bitmap->mddev->events; sb->events_cleared = cpu_to_le64(bitmap->mddev->events); + bitmap->mddev->bitmap_info.nodes = 0; kunmap_atomic(sb); @@ -558,6 +559,7 @@ static int bitmap_read_sb(struct bitmap *bitmap) unsigned long sectors_reserved = 0; int err = -EINVAL; struct page *sb_page; + loff_t offset = bitmap->mddev->bitmap_info.offset; if (!bitmap->storage.file && !bitmap->mddev->bitmap_info.offset) { chunksize = 128 * 1024 * 1024; @@ -584,9 +586,9 @@ re_read: bm_blocks = ((bm_blocks+7) >> 3) + sizeof(bitmap_super_t); /* to 4k blocks */ bm_blocks = DIV_ROUND_UP_SECTOR_T(bm_blocks, 4096); - bitmap->mddev->bitmap_info.offset += bitmap->cluster_slot * (bm_blocks << 3); + offset = bitmap->mddev->bitmap_info.offset + (bitmap->cluster_slot * (bm_blocks << 3)); pr_info("%s:%d bm slot: %d offset: %llu\n", __func__, __LINE__, - bitmap->cluster_slot, (unsigned long long)bitmap->mddev->bitmap_info.offset); + bitmap->cluster_slot, offset); } if (bitmap->storage.file) { @@ -597,7 +599,7 @@ re_read: bitmap, bytes, sb_page); } else { err = read_sb_page(bitmap->mddev, - bitmap->mddev->bitmap_info.offset, + offset, sb_page, 0, sizeof(bitmap_super_t)); } @@ -611,8 +613,16 @@ re_read: daemon_sleep = le32_to_cpu(sb->daemon_sleep) * HZ; write_behind = le32_to_cpu(sb->write_behind); sectors_reserved = le32_to_cpu(sb->sectors_reserved); - nodes = le32_to_cpu(sb->nodes); - strlcpy(bitmap->mddev->bitmap_info.cluster_name, sb->cluster_name, 64); + /* XXX: This is a hack to ensure that we don't use clustering + * in case: + * - dm-raid is in use and + * - the nodes written in bitmap_sb is erroneous. + */ + if (!bitmap->mddev->sync_super) { + nodes = le32_to_cpu(sb->nodes); + strlcpy(bitmap->mddev->bitmap_info.cluster_name, + sb->cluster_name, 64); + } /* verify that the bitmap-specific fields are valid */ if (sb->magic != cpu_to_le32(BITMAP_MAGIC)) @@ -671,7 +681,7 @@ out: kunmap_atomic(sb); /* Assiging chunksize is required for "re_read" */ bitmap->mddev->bitmap_info.chunksize = chunksize; - if (nodes && (bitmap->cluster_slot < 0)) { + if (err == 0 && nodes && (bitmap->cluster_slot < 0)) { err = md_setup_cluster(bitmap->mddev, nodes); if (err) { pr_err("%s: Could not setup cluster service (%d)\n", @@ -1866,10 +1876,6 @@ int bitmap_copy_from_slot(struct mddev *mddev, int slot, if (IS_ERR(bitmap)) return PTR_ERR(bitmap); - rv = bitmap_read_sb(bitmap); - if (rv) - goto err; - rv = bitmap_init_from_disk(bitmap, 0); if (rv) goto err; diff --git a/drivers/md/dm-cache-policy-smq.c b/drivers/md/dm-cache-policy-smq.c index b6f22651dd35..48a4a826ae07 100644 --- a/drivers/md/dm-cache-policy-smq.c +++ b/drivers/md/dm-cache-policy-smq.c @@ -1686,7 +1686,7 @@ static struct dm_cache_policy *smq_create(dm_cblock_t cache_size, if (from_cblock(cache_size)) { mq->cache_hit_bits = alloc_bitset(from_cblock(cache_size)); - if (!mq->cache_hit_bits && mq->cache_hit_bits) { + if (!mq->cache_hit_bits) { DMERR("couldn't allocate cache hit bitset"); goto bad_cache_hit_bits; } diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index b680da5d7b93..1fe93cfea7d3 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c @@ -424,6 +424,7 @@ static void free_migration(struct dm_cache_migration *mg) wake_up(&cache->migration_wait); mempool_free(mg, cache->migration_pool); + wake_worker(cache); } static int prealloc_data_structs(struct cache *cache, struct prealloc *p) @@ -1966,6 +1967,7 @@ static void process_deferred_bios(struct cache *cache) * this bio might require one, we pause until there are some * prepared mappings to process. */ + prealloc_used = true; if (prealloc_data_structs(cache, &structs)) { spin_lock_irqsave(&cache->lock, flags); bio_list_merge(&cache->deferred_bios, &bios); @@ -1981,7 +1983,6 @@ static void process_deferred_bios(struct cache *cache) process_discard_bio(cache, &structs, bio); else process_bio(cache, &structs, bio); - prealloc_used = true; } if (prealloc_used) @@ -2010,6 +2011,7 @@ static void process_deferred_cells(struct cache *cache) * this bio might require one, we pause until there are some * prepared mappings to process. */ + prealloc_used = true; if (prealloc_data_structs(cache, &structs)) { spin_lock_irqsave(&cache->lock, flags); list_splice(&cells, &cache->deferred_cells); @@ -2018,7 +2020,6 @@ static void process_deferred_cells(struct cache *cache) } process_cell(cache, &structs, cell); - prealloc_used = true; } if (prealloc_used) @@ -2080,6 +2081,7 @@ static void writeback_some_dirty_blocks(struct cache *cache) if (policy_writeback_work(cache->policy, &oblock, &cblock, busy)) break; /* no work to do */ + prealloc_used = true; if (prealloc_data_structs(cache, &structs) || get_cell(cache, oblock, &structs, &old_ocell)) { policy_set_dirty(cache->policy, oblock); @@ -2087,7 +2089,6 @@ static void writeback_some_dirty_blocks(struct cache *cache) } writeback(cache, &structs, oblock, cblock, old_ocell); - prealloc_used = true; } if (prealloc_used) diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 1c50c580215c..d2bbe8cc1e97 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -666,16 +666,21 @@ static void requeue_io(struct thin_c *tc) requeue_deferred_cells(tc); } -static void error_retry_list(struct pool *pool) +static void error_retry_list_with_code(struct pool *pool, int error) { struct thin_c *tc; rcu_read_lock(); list_for_each_entry_rcu(tc, &pool->active_thins, list) - error_thin_bio_list(tc, &tc->retry_on_resume_list, -EIO); + error_thin_bio_list(tc, &tc->retry_on_resume_list, error); rcu_read_unlock(); } +static void error_retry_list(struct pool *pool) +{ + return error_retry_list_with_code(pool, -EIO); +} + /* * This section of code contains the logic for processing a thin device's IO. * Much of the code depends on pool object resources (lists, workqueues, etc) @@ -2297,7 +2302,7 @@ static void do_no_space_timeout(struct work_struct *ws) if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space) { pool->pf.error_if_no_space = true; notify_of_pool_mode_change_to_oods(pool); - error_retry_list(pool); + error_retry_list_with_code(pool, -ENOSPC); } } diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c index fcfc4b9b2672..0072190515e0 100644 --- a/drivers/md/md-cluster.c +++ b/drivers/md/md-cluster.c @@ -44,6 +44,7 @@ struct resync_info { /* md_cluster_info flags */ #define MD_CLUSTER_WAITING_FOR_NEWDISK 1 +#define MD_CLUSTER_SUSPEND_READ_BALANCING 2 struct md_cluster_info { @@ -275,6 +276,9 @@ clear_bit: static void recover_prep(void *arg) { + struct mddev *mddev = arg; + struct md_cluster_info *cinfo = mddev->cluster_info; + set_bit(MD_CLUSTER_SUSPEND_READ_BALANCING, &cinfo->state); } static void recover_slot(void *arg, struct dlm_slot *slot) @@ -307,6 +311,7 @@ static void recover_done(void *arg, struct dlm_slot *slots, cinfo->slot_number = our_slot; complete(&cinfo->completion); + clear_bit(MD_CLUSTER_SUSPEND_READ_BALANCING, &cinfo->state); } static const struct dlm_lockspace_ops md_ls_ops = { @@ -816,12 +821,17 @@ static void resync_finish(struct mddev *mddev) resync_send(mddev, RESYNCING, 0, 0); } -static int area_resyncing(struct mddev *mddev, sector_t lo, sector_t hi) +static int area_resyncing(struct mddev *mddev, int direction, + sector_t lo, sector_t hi) { struct md_cluster_info *cinfo = mddev->cluster_info; int ret = 0; struct suspend_info *s; + if ((direction == READ) && + test_bit(MD_CLUSTER_SUSPEND_READ_BALANCING, &cinfo->state)) + return 1; + spin_lock_irq(&cinfo->suspend_lock); if (list_empty(&cinfo->suspend_list)) goto out; diff --git a/drivers/md/md-cluster.h b/drivers/md/md-cluster.h index 6817ee00e053..00defe2badbc 100644 --- a/drivers/md/md-cluster.h +++ b/drivers/md/md-cluster.h @@ -18,7 +18,7 @@ struct md_cluster_operations { int (*metadata_update_start)(struct mddev *mddev); int (*metadata_update_finish)(struct mddev *mddev); int (*metadata_update_cancel)(struct mddev *mddev); - int (*area_resyncing)(struct mddev *mddev, sector_t lo, sector_t hi); + int (*area_resyncing)(struct mddev *mddev, int direction, sector_t lo, sector_t hi); int (*add_new_disk_start)(struct mddev *mddev, struct md_rdev *rdev); int (*add_new_disk_finish)(struct mddev *mddev); int (*new_disk_ack)(struct mddev *mddev, bool ack); diff --git a/drivers/md/md.c b/drivers/md/md.c index d429c30cd514..0c2a4e8b873c 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -5382,6 +5382,8 @@ static void __md_stop(struct mddev *mddev) { struct md_personality *pers = mddev->pers; mddev_detach(mddev); + /* Ensure ->event_work is done */ + flush_workqueue(md_misc_wq); spin_lock(&mddev->lock); mddev->ready = 0; mddev->pers = NULL; @@ -7437,7 +7439,7 @@ int md_setup_cluster(struct mddev *mddev, int nodes) err = request_module("md-cluster"); if (err) { pr_err("md-cluster module not found.\n"); - return err; + return -ENOENT; } spin_lock(&pers_lock); diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index f80f1af61ce7..94f5b55069e0 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -336,7 +336,7 @@ static void raid1_end_read_request(struct bio *bio, int error) spin_lock_irqsave(&conf->device_lock, flags); if (r1_bio->mddev->degraded == conf->raid_disks || (r1_bio->mddev->degraded == conf->raid_disks-1 && - !test_bit(Faulty, &conf->mirrors[mirror].rdev->flags))) + test_bit(In_sync, &conf->mirrors[mirror].rdev->flags))) uptodate = 1; spin_unlock_irqrestore(&conf->device_lock, flags); } @@ -541,7 +541,7 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect if ((conf->mddev->recovery_cp < this_sector + sectors) || (mddev_is_clustered(conf->mddev) && - md_cluster_ops->area_resyncing(conf->mddev, this_sector, + md_cluster_ops->area_resyncing(conf->mddev, READ, this_sector, this_sector + sectors))) choose_first = 1; else @@ -1111,7 +1111,8 @@ static void make_request(struct mddev *mddev, struct bio * bio) ((bio_end_sector(bio) > mddev->suspend_lo && bio->bi_iter.bi_sector < mddev->suspend_hi) || (mddev_is_clustered(mddev) && - md_cluster_ops->area_resyncing(mddev, bio->bi_iter.bi_sector, bio_end_sector(bio))))) { + md_cluster_ops->area_resyncing(mddev, WRITE, + bio->bi_iter.bi_sector, bio_end_sector(bio))))) { /* As the suspend_* range is controlled by * userspace, we want an interruptible * wait. @@ -1124,7 +1125,7 @@ static void make_request(struct mddev *mddev, struct bio * bio) if (bio_end_sector(bio) <= mddev->suspend_lo || bio->bi_iter.bi_sector >= mddev->suspend_hi || (mddev_is_clustered(mddev) && - !md_cluster_ops->area_resyncing(mddev, + !md_cluster_ops->area_resyncing(mddev, WRITE, bio->bi_iter.bi_sector, bio_end_sector(bio)))) break; schedule(); diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 940f2f365461..38c58e19cfce 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -3556,6 +3556,7 @@ static struct r10conf *setup_conf(struct mddev *mddev) /* far_copies must be 1 */ conf->prev.stride = conf->dev_sectors; } + conf->reshape_safe = conf->reshape_progress; spin_lock_init(&conf->device_lock); INIT_LIST_HEAD(&conf->retry_list); @@ -3760,7 +3761,6 @@ static int run(struct mddev *mddev) } conf->offset_diff = min_offset_diff; - conf->reshape_safe = conf->reshape_progress; clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); @@ -4103,6 +4103,7 @@ static int raid10_start_reshape(struct mddev *mddev) conf->reshape_progress = size; } else conf->reshape_progress = 0; + conf->reshape_safe = conf->reshape_progress; spin_unlock_irq(&conf->device_lock); if (mddev->delta_disks && mddev->bitmap) { @@ -4170,6 +4171,7 @@ abort: rdev->new_data_offset = rdev->data_offset; smp_wmb(); conf->reshape_progress = MaxSector; + conf->reshape_safe = MaxSector; mddev->reshape_position = MaxSector; spin_unlock_irq(&conf->device_lock); return ret; @@ -4524,6 +4526,7 @@ static void end_reshape(struct r10conf *conf) md_finish_reshape(conf->mddev); smp_wmb(); conf->reshape_progress = MaxSector; + conf->reshape_safe = MaxSector; spin_unlock_irq(&conf->device_lock); /* read-ahead size must cover two whole stripes, which is diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 59e44e99eef3..643d217bfa13 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -2162,6 +2162,9 @@ static int resize_stripes(struct r5conf *conf, int newsize) if (!sc) return -ENOMEM; + /* Need to ensure auto-resizing doesn't interfere */ + mutex_lock(&conf->cache_size_mutex); + for (i = conf->max_nr_stripes; i; i--) { nsh = alloc_stripe(sc, GFP_KERNEL); if (!nsh) @@ -2178,6 +2181,7 @@ static int resize_stripes(struct r5conf *conf, int newsize) kmem_cache_free(sc, nsh); } kmem_cache_destroy(sc); + mutex_unlock(&conf->cache_size_mutex); return -ENOMEM; } /* Step 2 - Must use GFP_NOIO now. @@ -2224,6 +2228,7 @@ static int resize_stripes(struct r5conf *conf, int newsize) } else err = -ENOMEM; + mutex_unlock(&conf->cache_size_mutex); /* Step 4, return new stripes to service */ while(!list_empty(&newstripes)) { nsh = list_entry(newstripes.next, struct stripe_head, lru); @@ -4061,8 +4066,10 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s) &first_bad, &bad_sectors)) set_bit(R5_ReadRepl, &dev->flags); else { - if (rdev) + if (rdev && !test_bit(Faulty, &rdev->flags)) set_bit(R5_NeedReplace, &dev->flags); + else + clear_bit(R5_NeedReplace, &dev->flags); rdev = rcu_dereference(conf->disks[i].rdev); clear_bit(R5_ReadRepl, &dev->flags); } @@ -5857,12 +5864,14 @@ static void raid5d(struct md_thread *thread) pr_debug("%d stripes handled\n", handled); spin_unlock_irq(&conf->device_lock); - if (test_and_clear_bit(R5_ALLOC_MORE, &conf->cache_state)) { + if (test_and_clear_bit(R5_ALLOC_MORE, &conf->cache_state) && + mutex_trylock(&conf->cache_size_mutex)) { grow_one_stripe(conf, __GFP_NOWARN); /* Set flag even if allocation failed. This helps * slow down allocation requests when mem is short */ set_bit(R5_DID_ALLOC, &conf->cache_state); + mutex_unlock(&conf->cache_size_mutex); } async_tx_issue_pending_all(); @@ -5894,18 +5903,22 @@ raid5_set_cache_size(struct mddev *mddev, int size) return -EINVAL; conf->min_nr_stripes = size; + mutex_lock(&conf->cache_size_mutex); while (size < conf->max_nr_stripes && drop_one_stripe(conf)) ; + mutex_unlock(&conf->cache_size_mutex); err = md_allow_write(mddev); if (err) return err; + mutex_lock(&conf->cache_size_mutex); while (size > conf->max_nr_stripes) if (!grow_one_stripe(conf, GFP_KERNEL)) break; + mutex_unlock(&conf->cache_size_mutex); return 0; } @@ -6371,11 +6384,18 @@ static unsigned long raid5_cache_scan(struct shrinker *shrink, struct shrink_control *sc) { struct r5conf *conf = container_of(shrink, struct r5conf, shrinker); - int ret = 0; - while (ret < sc->nr_to_scan) { - if (drop_one_stripe(conf) == 0) - return SHRINK_STOP; - ret++; + unsigned long ret = SHRINK_STOP; + + if (mutex_trylock(&conf->cache_size_mutex)) { + ret= 0; + while (ret < sc->nr_to_scan) { + if (drop_one_stripe(conf) == 0) { + ret = SHRINK_STOP; + break; + } + ret++; + } + mutex_unlock(&conf->cache_size_mutex); } return ret; } @@ -6444,6 +6464,7 @@ static struct r5conf *setup_conf(struct mddev *mddev) goto abort; spin_lock_init(&conf->device_lock); seqcount_init(&conf->gen_lock); + mutex_init(&conf->cache_size_mutex); init_waitqueue_head(&conf->wait_for_quiescent); for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) { init_waitqueue_head(&conf->wait_for_stripe[i]); diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h index 02c3bf8fbfe7..d05144278690 100644 --- a/drivers/md/raid5.h +++ b/drivers/md/raid5.h @@ -482,7 +482,8 @@ struct r5conf { */ int active_name; char cache_name[2][32]; - struct kmem_cache *slab_cache; /* for allocating stripes */ + struct kmem_cache *slab_cache; /* for allocating stripes */ + struct mutex cache_size_mutex; /* Protect changes to cache size */ int seq_flush, seq_write; int quiesce; diff --git a/drivers/media/pci/ivtv/ivtvfb.c b/drivers/media/pci/ivtv/ivtvfb.c index 4cb365d4ffdc..8b95eefb610b 100644 --- a/drivers/media/pci/ivtv/ivtvfb.c +++ b/drivers/media/pci/ivtv/ivtvfb.c @@ -38,6 +38,8 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/module.h> #include <linux/kernel.h> #include <linux/fb.h> @@ -1171,6 +1173,13 @@ static int ivtvfb_init_card(struct ivtv *itv) { int rc; +#ifdef CONFIG_X86_64 + if (pat_enabled()) { + pr_warn("ivtvfb needs PAT disabled, boot with nopat kernel parameter\n"); + return -ENODEV; + } +#endif + if (itv->osd_info) { IVTVFB_ERR("Card %d already initialised\n", ivtvfb_card_id); return -EBUSY; @@ -1265,12 +1274,6 @@ static int __init ivtvfb_init(void) int registered = 0; int err; -#ifdef CONFIG_X86_64 - if (WARN(pat_enabled(), - "ivtvfb needs PAT disabled, boot with nopat kernel parameter\n")) { - return -ENODEV; - } -#endif if (ivtvfb_card_id < -1 || ivtvfb_card_id >= IVTV_MAX_CARDS) { printk(KERN_ERR "ivtvfb: ivtvfb_card_id parameter is out of range (valid range: -1 - %d)\n", diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c index 8eb0a9500a90..e9513d651cd3 100644 --- a/drivers/misc/mei/main.c +++ b/drivers/misc/mei/main.c @@ -682,7 +682,7 @@ int mei_register(struct mei_device *dev, struct device *parent) /* Fill in the data structures */ devno = MKDEV(MAJOR(mei_devt), dev->minor); cdev_init(&dev->cdev, &mei_fops); - dev->cdev.owner = mei_fops.owner; + dev->cdev.owner = parent->driver->owner; /* Add the device */ ret = cdev_add(&dev->cdev, devno, 1); diff --git a/drivers/misc/mic/scif/scif_nodeqp.c b/drivers/misc/mic/scif/scif_nodeqp.c index 41e3bdb10061..6dfdae3452d6 100644 --- a/drivers/misc/mic/scif/scif_nodeqp.c +++ b/drivers/misc/mic/scif/scif_nodeqp.c @@ -357,7 +357,7 @@ static void scif_p2p_freesg(struct scatterlist *sg) } static struct scatterlist * -scif_p2p_setsg(void __iomem *va, int page_size, int page_cnt) +scif_p2p_setsg(phys_addr_t pa, int page_size, int page_cnt) { struct scatterlist *sg; struct page *page; @@ -368,16 +368,11 @@ scif_p2p_setsg(void __iomem *va, int page_size, int page_cnt) return NULL; sg_init_table(sg, page_cnt); for (i = 0; i < page_cnt; i++) { - page = vmalloc_to_page((void __force *)va); - if (!page) - goto p2p_sg_err; + page = pfn_to_page(pa >> PAGE_SHIFT); sg_set_page(&sg[i], page, page_size, 0); - va += page_size; + pa += page_size; } return sg; -p2p_sg_err: - kfree(sg); - return NULL; } /* Init p2p mappings required to access peerdev from scifdev */ @@ -395,14 +390,14 @@ scif_init_p2p_info(struct scif_dev *scifdev, struct scif_dev *peerdev) p2p = kzalloc(sizeof(*p2p), GFP_KERNEL); if (!p2p) return NULL; - p2p->ppi_sg[SCIF_PPI_MMIO] = scif_p2p_setsg(psdev->mmio->va, + p2p->ppi_sg[SCIF_PPI_MMIO] = scif_p2p_setsg(psdev->mmio->pa, PAGE_SIZE, num_mmio_pages); if (!p2p->ppi_sg[SCIF_PPI_MMIO]) goto free_p2p; p2p->sg_nentries[SCIF_PPI_MMIO] = num_mmio_pages; sg_page_shift = get_order(min(psdev->aper->len, (u64)(1 << 30))); num_aper_chunks = num_aper_pages >> (sg_page_shift - PAGE_SHIFT); - p2p->ppi_sg[SCIF_PPI_APER] = scif_p2p_setsg(psdev->aper->va, + p2p->ppi_sg[SCIF_PPI_APER] = scif_p2p_setsg(psdev->aper->pa, 1 << sg_page_shift, num_aper_chunks); p2p->sg_nentries[SCIF_PPI_APER] = num_aper_chunks; diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c index c9c3d20b784b..a1b820fcb2a6 100644 --- a/drivers/mmc/card/block.c +++ b/drivers/mmc/card/block.c @@ -208,6 +208,8 @@ static ssize_t power_ro_lock_show(struct device *dev, ret = snprintf(buf, PAGE_SIZE, "%d\n", locked); + mmc_blk_put(md); + return ret; } diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index fd9a58e216a5..6a0f9c79be26 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig @@ -779,6 +779,7 @@ config MMC_TOSHIBA_PCI config MMC_MTK tristate "MediaTek SD/MMC Card Interface support" + depends on HAS_DMA help This selects the MediaTek(R) Secure digital and Multimedia card Interface. If you have a machine with a integrated SD/MMC card reader, say Y or M here. diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c index b2b411da297b..4d1203236890 100644 --- a/drivers/mmc/host/omap_hsmmc.c +++ b/drivers/mmc/host/omap_hsmmc.c @@ -1062,9 +1062,14 @@ static void omap_hsmmc_do_irq(struct omap_hsmmc_host *host, int status) if (status & (CTO_EN | CCRC_EN)) end_cmd = 1; + if (host->data || host->response_busy) { + end_trans = !end_cmd; + host->response_busy = 0; + } if (status & (CTO_EN | DTO_EN)) hsmmc_command_incomplete(host, -ETIMEDOUT, end_cmd); - else if (status & (CCRC_EN | DCRC_EN)) + else if (status & (CCRC_EN | DCRC_EN | DEB_EN | CEB_EN | + BADA_EN)) hsmmc_command_incomplete(host, -EILSEQ, end_cmd); if (status & ACE_EN) { @@ -1081,10 +1086,6 @@ static void omap_hsmmc_do_irq(struct omap_hsmmc_host *host, int status) } dev_dbg(mmc_dev(host->mmc), "AC12 err: 0x%x\n", ac12); } - if (host->data || host->response_busy) { - end_trans = !end_cmd; - host->response_busy = 0; - } } OMAP_HSMMC_WRITE(host->base, STAT, status); diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c index faf0cb910c96..c6b9f6492e1a 100644 --- a/drivers/mmc/host/sdhci-esdhc-imx.c +++ b/drivers/mmc/host/sdhci-esdhc-imx.c @@ -581,13 +581,8 @@ static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg) static unsigned int esdhc_pltfm_get_max_clock(struct sdhci_host *host) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); - struct pltfm_imx_data *imx_data = pltfm_host->priv; - struct esdhc_platform_data *boarddata = &imx_data->boarddata; - if (boarddata->f_max && (boarddata->f_max < pltfm_host->clock)) - return boarddata->f_max; - else - return pltfm_host->clock; + return pltfm_host->clock; } static unsigned int esdhc_pltfm_get_min_clock(struct sdhci_host *host) @@ -878,34 +873,19 @@ static const struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = { static int sdhci_esdhc_imx_probe_dt(struct platform_device *pdev, struct sdhci_host *host, - struct esdhc_platform_data *boarddata) + struct pltfm_imx_data *imx_data) { struct device_node *np = pdev->dev.of_node; - - if (!np) - return -ENODEV; - - if (of_get_property(np, "non-removable", NULL)) - boarddata->cd_type = ESDHC_CD_PERMANENT; - - if (of_get_property(np, "fsl,cd-controller", NULL)) - boarddata->cd_type = ESDHC_CD_CONTROLLER; + struct esdhc_platform_data *boarddata = &imx_data->boarddata; + int ret; if (of_get_property(np, "fsl,wp-controller", NULL)) boarddata->wp_type = ESDHC_WP_CONTROLLER; - boarddata->cd_gpio = of_get_named_gpio(np, "cd-gpios", 0); - if (gpio_is_valid(boarddata->cd_gpio)) - boarddata->cd_type = ESDHC_CD_GPIO; - boarddata->wp_gpio = of_get_named_gpio(np, "wp-gpios", 0); if (gpio_is_valid(boarddata->wp_gpio)) boarddata->wp_type = ESDHC_WP_GPIO; - of_property_read_u32(np, "bus-width", &boarddata->max_bus_width); - - of_property_read_u32(np, "max-frequency", &boarddata->f_max); - if (of_find_property(np, "no-1-8-v", NULL)) boarddata->support_vsel = false; else @@ -916,29 +896,119 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev, mmc_of_parse_voltage(np, &host->ocr_mask); + /* sdr50 and sdr104 needs work on 1.8v signal voltage */ + if ((boarddata->support_vsel) && esdhc_is_usdhc(imx_data) && + !IS_ERR(imx_data->pins_default)) { + imx_data->pins_100mhz = pinctrl_lookup_state(imx_data->pinctrl, + ESDHC_PINCTRL_STATE_100MHZ); + imx_data->pins_200mhz = pinctrl_lookup_state(imx_data->pinctrl, + ESDHC_PINCTRL_STATE_200MHZ); + if (IS_ERR(imx_data->pins_100mhz) || + IS_ERR(imx_data->pins_200mhz)) { + dev_warn(mmc_dev(host->mmc), + "could not get ultra high speed state, work on normal mode\n"); + /* + * fall back to not support uhs by specify no 1.8v quirk + */ + host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V; + } + } else { + host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V; + } + /* call to generic mmc_of_parse to support additional capabilities */ - return mmc_of_parse(host->mmc); + ret = mmc_of_parse(host->mmc); + if (ret) + return ret; + + if (!IS_ERR_VALUE(mmc_gpio_get_cd(host->mmc))) + host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; + + return 0; } #else static inline int sdhci_esdhc_imx_probe_dt(struct platform_device *pdev, struct sdhci_host *host, - struct esdhc_platform_data *boarddata) + struct pltfm_imx_data *imx_data) { return -ENODEV; } #endif +static int sdhci_esdhc_imx_probe_nondt(struct platform_device *pdev, + struct sdhci_host *host, + struct pltfm_imx_data *imx_data) +{ + struct esdhc_platform_data *boarddata = &imx_data->boarddata; + int err; + + if (!host->mmc->parent->platform_data) { + dev_err(mmc_dev(host->mmc), "no board data!\n"); + return -EINVAL; + } + + imx_data->boarddata = *((struct esdhc_platform_data *) + host->mmc->parent->platform_data); + /* write_protect */ + if (boarddata->wp_type == ESDHC_WP_GPIO) { + err = mmc_gpio_request_ro(host->mmc, boarddata->wp_gpio); + if (err) { + dev_err(mmc_dev(host->mmc), + "failed to request write-protect gpio!\n"); + return err; + } + host->mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH; + } + + /* card_detect */ + switch (boarddata->cd_type) { + case ESDHC_CD_GPIO: + err = mmc_gpio_request_cd(host->mmc, boarddata->cd_gpio, 0); + if (err) { + dev_err(mmc_dev(host->mmc), + "failed to request card-detect gpio!\n"); + return err; + } + /* fall through */ + + case ESDHC_CD_CONTROLLER: + /* we have a working card_detect back */ + host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; + break; + + case ESDHC_CD_PERMANENT: + host->mmc->caps |= MMC_CAP_NONREMOVABLE; + break; + + case ESDHC_CD_NONE: + break; + } + + switch (boarddata->max_bus_width) { + case 8: + host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA; + break; + case 4: + host->mmc->caps |= MMC_CAP_4_BIT_DATA; + break; + case 1: + default: + host->quirks |= SDHCI_QUIRK_FORCE_1_BIT_DATA; + break; + } + + return 0; +} + static int sdhci_esdhc_imx_probe(struct platform_device *pdev) { const struct of_device_id *of_id = of_match_device(imx_esdhc_dt_ids, &pdev->dev); struct sdhci_pltfm_host *pltfm_host; struct sdhci_host *host; - struct esdhc_platform_data *boarddata; int err; struct pltfm_imx_data *imx_data; - bool dt = true; host = sdhci_pltfm_init(pdev, &sdhci_esdhc_imx_pdata, 0); if (IS_ERR(host)) @@ -1030,84 +1100,12 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev) if (imx_data->socdata->flags & ESDHC_FLAG_ERR004536) host->quirks |= SDHCI_QUIRK_BROKEN_ADMA; - boarddata = &imx_data->boarddata; - if (sdhci_esdhc_imx_probe_dt(pdev, host, boarddata) < 0) { - if (!host->mmc->parent->platform_data) { - dev_err(mmc_dev(host->mmc), "no board data!\n"); - err = -EINVAL; - goto disable_clk; - } - imx_data->boarddata = *((struct esdhc_platform_data *) - host->mmc->parent->platform_data); - dt = false; - } - /* write_protect */ - if (boarddata->wp_type == ESDHC_WP_GPIO && !dt) { - err = mmc_gpio_request_ro(host->mmc, boarddata->wp_gpio); - if (err) { - dev_err(mmc_dev(host->mmc), - "failed to request write-protect gpio!\n"); - goto disable_clk; - } - host->mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH; - } - - /* card_detect */ - switch (boarddata->cd_type) { - case ESDHC_CD_GPIO: - if (dt) - break; - err = mmc_gpio_request_cd(host->mmc, boarddata->cd_gpio, 0); - if (err) { - dev_err(mmc_dev(host->mmc), - "failed to request card-detect gpio!\n"); - goto disable_clk; - } - /* fall through */ - - case ESDHC_CD_CONTROLLER: - /* we have a working card_detect back */ - host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; - break; - - case ESDHC_CD_PERMANENT: - host->mmc->caps |= MMC_CAP_NONREMOVABLE; - break; - - case ESDHC_CD_NONE: - break; - } - - switch (boarddata->max_bus_width) { - case 8: - host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA; - break; - case 4: - host->mmc->caps |= MMC_CAP_4_BIT_DATA; - break; - case 1: - default: - host->quirks |= SDHCI_QUIRK_FORCE_1_BIT_DATA; - break; - } - - /* sdr50 and sdr104 needs work on 1.8v signal voltage */ - if ((boarddata->support_vsel) && esdhc_is_usdhc(imx_data) && - !IS_ERR(imx_data->pins_default)) { - imx_data->pins_100mhz = pinctrl_lookup_state(imx_data->pinctrl, - ESDHC_PINCTRL_STATE_100MHZ); - imx_data->pins_200mhz = pinctrl_lookup_state(imx_data->pinctrl, - ESDHC_PINCTRL_STATE_200MHZ); - if (IS_ERR(imx_data->pins_100mhz) || - IS_ERR(imx_data->pins_200mhz)) { - dev_warn(mmc_dev(host->mmc), - "could not get ultra high speed state, work on normal mode\n"); - /* fall back to not support uhs by specify no 1.8v quirk */ - host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V; - } - } else { - host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V; - } + if (of_id) + err = sdhci_esdhc_imx_probe_dt(pdev, host, imx_data); + else + err = sdhci_esdhc_imx_probe_nondt(pdev, host, imx_data); + if (err) + goto disable_clk; err = sdhci_add_host(host); if (err) diff --git a/drivers/mmc/host/sdhci-esdhc.h b/drivers/mmc/host/sdhci-esdhc.h index 3497cfaf683c..a870c42731d7 100644 --- a/drivers/mmc/host/sdhci-esdhc.h +++ b/drivers/mmc/host/sdhci-esdhc.h @@ -45,6 +45,6 @@ #define ESDHC_DMA_SYSCTL 0x40c #define ESDHC_DMA_SNOOP 0x00000040 -#define ESDHC_HOST_CONTROL_RES 0x05 +#define ESDHC_HOST_CONTROL_RES 0x01 #endif /* _DRIVERS_MMC_SDHCI_ESDHC_H */ diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c index 9cd5fc62f130..946d37f94a31 100644 --- a/drivers/mmc/host/sdhci-pxav3.c +++ b/drivers/mmc/host/sdhci-pxav3.c @@ -411,6 +411,7 @@ static int sdhci_pxav3_probe(struct platform_device *pdev) goto err_of_parse; sdhci_get_of_property(pdev); pdata = pxav3_get_mmc_pdata(dev); + pdev->dev.platform_data = pdata; } else if (pdata) { /* on-chip device */ if (pdata->flags & PXA_FLAG_CARD_PERMANENT) diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index bc1445238fb3..1dbe93232030 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -2866,6 +2866,7 @@ int sdhci_add_host(struct sdhci_host *host) u32 max_current_caps; unsigned int ocr_avail; unsigned int override_timeout_clk; + u32 max_clk; int ret; WARN_ON(host == NULL); @@ -2978,8 +2979,11 @@ int sdhci_add_host(struct sdhci_host *host) GFP_KERNEL); host->align_buffer = kmalloc(host->align_buffer_sz, GFP_KERNEL); if (!host->adma_table || !host->align_buffer) { - dma_free_coherent(mmc_dev(mmc), host->adma_table_sz, - host->adma_table, host->adma_addr); + if (host->adma_table) + dma_free_coherent(mmc_dev(mmc), + host->adma_table_sz, + host->adma_table, + host->adma_addr); kfree(host->align_buffer); pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n", mmc_hostname(mmc)); @@ -3047,18 +3051,22 @@ int sdhci_add_host(struct sdhci_host *host) * Set host parameters. */ mmc->ops = &sdhci_ops; - mmc->f_max = host->max_clk; + max_clk = host->max_clk; + if (host->ops->get_min_clock) mmc->f_min = host->ops->get_min_clock(host); else if (host->version >= SDHCI_SPEC_300) { if (host->clk_mul) { mmc->f_min = (host->max_clk * host->clk_mul) / 1024; - mmc->f_max = host->max_clk * host->clk_mul; + max_clk = host->max_clk * host->clk_mul; } else mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300; } else mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200; + if (!mmc->f_max || (mmc->f_max && (mmc->f_max > max_clk))) + mmc->f_max = max_clk; + if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) { host->timeout_clk = (caps[0] & SDHCI_TIMEOUT_CLK_MASK) >> SDHCI_TIMEOUT_CLK_SHIFT; diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index a4e3f8655cb8..88c1e1a834f8 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c @@ -104,6 +104,57 @@ static void *macb_rx_buffer(struct macb *bp, unsigned int index) return bp->rx_buffers + bp->rx_buffer_size * macb_rx_ring_wrap(index); } +/* I/O accessors */ +static u32 hw_readl_native(struct macb *bp, int offset) +{ + return __raw_readl(bp->regs + offset); +} + +static void hw_writel_native(struct macb *bp, int offset, u32 value) +{ + __raw_writel(value, bp->regs + offset); +} + +static u32 hw_readl(struct macb *bp, int offset) +{ + return readl_relaxed(bp->regs + offset); +} + +static void hw_writel(struct macb *bp, int offset, u32 value) +{ + writel_relaxed(value, bp->regs + offset); +} + +/* + * Find the CPU endianness by using the loopback bit of NCR register. When the + * CPU is in big endian we need to program swaped mode for management + * descriptor access. + */ +static bool hw_is_native_io(void __iomem *addr) +{ + u32 value = MACB_BIT(LLB); + + __raw_writel(value, addr + MACB_NCR); + value = __raw_readl(addr + MACB_NCR); + + /* Write 0 back to disable everything */ + __raw_writel(0, addr + MACB_NCR); + + return value == MACB_BIT(LLB); +} + +static bool hw_is_gem(void __iomem *addr, bool native_io) +{ + u32 id; + + if (native_io) + id = __raw_readl(addr + MACB_MID); + else + id = readl_relaxed(addr + MACB_MID); + + return MACB_BFEXT(IDNUM, id) >= 0x2; +} + static void macb_set_hwaddr(struct macb *bp) { u32 bottom; @@ -160,7 +211,7 @@ static void macb_get_hwaddr(struct macb *bp) } } - netdev_info(bp->dev, "invalid hw address, using random\n"); + dev_info(&bp->pdev->dev, "invalid hw address, using random\n"); eth_hw_addr_random(bp->dev); } @@ -252,7 +303,6 @@ static void macb_handle_link_change(struct net_device *dev) struct macb *bp = netdev_priv(dev); struct phy_device *phydev = bp->phy_dev; unsigned long flags; - int status_change = 0; spin_lock_irqsave(&bp->lock, flags); @@ -449,14 +499,14 @@ err_out: static void macb_update_stats(struct macb *bp) { - u32 __iomem *reg = bp->regs + MACB_PFR; u32 *p = &bp->hw_stats.macb.rx_pause_frames; u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1; + int offset = MACB_PFR; WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4); - for(; p < end; p++, reg++) - *p += readl_relaxed(reg); + for(; p < end; p++, offset += 4) + *p += bp->macb_reg_readl(bp, offset); } static int macb_halt_tx(struct macb *bp) @@ -1107,12 +1157,6 @@ static void macb_poll_controller(struct net_device *dev) } #endif -static inline unsigned int macb_count_tx_descriptors(struct macb *bp, - unsigned int len) -{ - return (len + bp->max_tx_length - 1) / bp->max_tx_length; -} - static unsigned int macb_tx_map(struct macb *bp, struct macb_queue *queue, struct sk_buff *skb) @@ -1263,11 +1307,11 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev) * socket buffer: skb fragments of jumbo frames may need to be * splitted into many buffer descriptors. */ - count = macb_count_tx_descriptors(bp, skb_headlen(skb)); + count = DIV_ROUND_UP(skb_headlen(skb), bp->max_tx_length); nr_frags = skb_shinfo(skb)->nr_frags; for (f = 0; f < nr_frags; f++) { frag_size = skb_frag_size(&skb_shinfo(skb)->frags[f]); - count += macb_count_tx_descriptors(bp, frag_size); + count += DIV_ROUND_UP(frag_size, bp->max_tx_length); } spin_lock_irqsave(&bp->lock, flags); @@ -1603,7 +1647,6 @@ static u32 macb_dbw(struct macb *bp) static void macb_configure_dma(struct macb *bp) { u32 dmacfg; - u32 tmp, ncr; if (macb_is_gem(bp)) { dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L); @@ -1613,22 +1656,11 @@ static void macb_configure_dma(struct macb *bp) dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L); dmacfg &= ~GEM_BIT(ENDIA_PKT); - /* Find the CPU endianness by using the loopback bit of net_ctrl - * register. save it first. When the CPU is in big endian we - * need to program swaped mode for management descriptor access. - */ - ncr = macb_readl(bp, NCR); - __raw_writel(MACB_BIT(LLB), bp->regs + MACB_NCR); - tmp = __raw_readl(bp->regs + MACB_NCR); - - if (tmp == MACB_BIT(LLB)) + if (bp->native_io) dmacfg &= ~GEM_BIT(ENDIA_DESC); else dmacfg |= GEM_BIT(ENDIA_DESC); /* CPU in big endian */ - /* Restore net_ctrl */ - macb_writel(bp, NCR, ncr); - if (bp->dev->features & NETIF_F_HW_CSUM) dmacfg |= GEM_BIT(TXCOEN); else @@ -1897,19 +1929,19 @@ static int macb_change_mtu(struct net_device *dev, int new_mtu) static void gem_update_stats(struct macb *bp) { - int i; + unsigned int i; u32 *p = &bp->hw_stats.gem.tx_octets_31_0; for (i = 0; i < GEM_STATS_LEN; ++i, ++p) { u32 offset = gem_statistics[i].offset; - u64 val = readl_relaxed(bp->regs + offset); + u64 val = bp->macb_reg_readl(bp, offset); bp->ethtool_stats[i] += val; *p += val; if (offset == GEM_OCTTXL || offset == GEM_OCTRXL) { /* Add GEM_OCTTXH, GEM_OCTRXH */ - val = readl_relaxed(bp->regs + offset + 4); + val = bp->macb_reg_readl(bp, offset + 4); bp->ethtool_stats[i] += ((u64)val) << 32; *(++p) += val; } @@ -1976,7 +2008,7 @@ static int gem_get_sset_count(struct net_device *dev, int sset) static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p) { - int i; + unsigned int i; switch (sset) { case ETH_SS_STATS: @@ -2190,7 +2222,7 @@ static void macb_configure_caps(struct macb *bp, const struct macb_config *dt_co if (dt_conf) bp->caps = dt_conf->caps; - if (macb_is_gem_hw(bp->regs)) { + if (hw_is_gem(bp->regs, bp->native_io)) { bp->caps |= MACB_CAPS_MACB_IS_GEM; dcfg = gem_readl(bp, DCFG1); @@ -2201,10 +2233,11 @@ static void macb_configure_caps(struct macb *bp, const struct macb_config *dt_co bp->caps |= MACB_CAPS_FIFO_MODE; } - netdev_dbg(bp->dev, "Cadence caps 0x%08x\n", bp->caps); + dev_dbg(&bp->pdev->dev, "Cadence caps 0x%08x\n", bp->caps); } static void macb_probe_queues(void __iomem *mem, + bool native_io, unsigned int *queue_mask, unsigned int *num_queues) { @@ -2219,7 +2252,7 @@ static void macb_probe_queues(void __iomem *mem, * we are early in the probe process and don't have the * MACB_CAPS_MACB_IS_GEM flag positioned */ - if (!macb_is_gem_hw(mem)) + if (!hw_is_gem(mem, native_io)) return; /* bit 0 is never set but queue 0 always exists */ @@ -2784,6 +2817,7 @@ static int macb_probe(struct platform_device *pdev) struct clk *pclk, *hclk, *tx_clk; unsigned int queue_mask, num_queues; struct macb_platform_data *pdata; + bool native_io; struct phy_device *phydev; struct net_device *dev; struct resource *regs; @@ -2792,6 +2826,11 @@ static int macb_probe(struct platform_device *pdev) struct macb *bp; int err; + regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); + mem = devm_ioremap_resource(&pdev->dev, regs); + if (IS_ERR(mem)) + return PTR_ERR(mem); + if (np) { const struct of_device_id *match; @@ -2807,14 +2846,9 @@ static int macb_probe(struct platform_device *pdev) if (err) return err; - regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); - mem = devm_ioremap_resource(&pdev->dev, regs); - if (IS_ERR(mem)) { - err = PTR_ERR(mem); - goto err_disable_clocks; - } + native_io = hw_is_native_io(mem); - macb_probe_queues(mem, &queue_mask, &num_queues); + macb_probe_queues(mem, native_io, &queue_mask, &num_queues); dev = alloc_etherdev_mq(sizeof(*bp), num_queues); if (!dev) { err = -ENOMEM; @@ -2829,6 +2863,14 @@ static int macb_probe(struct platform_device *pdev) bp->pdev = pdev; bp->dev = dev; bp->regs = mem; + bp->native_io = native_io; + if (native_io) { + bp->macb_reg_readl = hw_readl_native; + bp->macb_reg_writel = hw_writel_native; + } else { + bp->macb_reg_readl = hw_readl; + bp->macb_reg_writel = hw_writel; + } bp->num_queues = num_queues; bp->queue_mask = queue_mask; if (macb_config) @@ -2836,9 +2878,8 @@ static int macb_probe(struct platform_device *pdev) bp->pclk = pclk; bp->hclk = hclk; bp->tx_clk = tx_clk; - if (macb_config->jumbo_max_len) { + if (macb_config) bp->jumbo_max_len = macb_config->jumbo_max_len; - } spin_lock_init(&bp->lock); diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h index 8fb80b2dcf82..6e1faea00ca8 100644 --- a/drivers/net/ethernet/cadence/macb.h +++ b/drivers/net/ethernet/cadence/macb.h @@ -429,18 +429,12 @@ | GEM_BF(name, value)) /* Register access macros */ -#define macb_readl(port,reg) \ - readl_relaxed((port)->regs + MACB_##reg) -#define macb_writel(port,reg,value) \ - writel_relaxed((value), (port)->regs + MACB_##reg) -#define gem_readl(port, reg) \ - readl_relaxed((port)->regs + GEM_##reg) -#define gem_writel(port, reg, value) \ - writel_relaxed((value), (port)->regs + GEM_##reg) -#define queue_readl(queue, reg) \ - readl_relaxed((queue)->bp->regs + (queue)->reg) -#define queue_writel(queue, reg, value) \ - writel_relaxed((value), (queue)->bp->regs + (queue)->reg) +#define macb_readl(port, reg) (port)->macb_reg_readl((port), MACB_##reg) +#define macb_writel(port, reg, value) (port)->macb_reg_writel((port), MACB_##reg, (value)) +#define gem_readl(port, reg) (port)->macb_reg_readl((port), GEM_##reg) +#define gem_writel(port, reg, value) (port)->macb_reg_writel((port), GEM_##reg, (value)) +#define queue_readl(queue, reg) (queue)->bp->macb_reg_readl((queue)->bp, (queue)->reg) +#define queue_writel(queue, reg, value) (queue)->bp->macb_reg_writel((queue)->bp, (queue)->reg, (value)) /* Conditional GEM/MACB macros. These perform the operation to the correct * register dependent on whether the device is a GEM or a MACB. For registers @@ -785,6 +779,11 @@ struct macb_queue { struct macb { void __iomem *regs; + bool native_io; + + /* hardware IO accessors */ + u32 (*macb_reg_readl)(struct macb *bp, int offset); + void (*macb_reg_writel)(struct macb *bp, int offset, u32 value); unsigned int rx_tail; unsigned int rx_prepared_head; @@ -817,9 +816,9 @@ struct macb { struct mii_bus *mii_bus; struct phy_device *phy_dev; - unsigned int link; - unsigned int speed; - unsigned int duplex; + int link; + int speed; + int duplex; u32 caps; unsigned int dma_burst_length; @@ -843,9 +842,4 @@ static inline bool macb_is_gem(struct macb *bp) return !!(bp->caps & MACB_CAPS_MACB_IS_GEM); } -static inline bool macb_is_gem_hw(void __iomem *addr) -{ - return !!(MACB_BFEXT(IDNUM, readl_relaxed(addr + MACB_MID)) >= 0x2); -} - #endif /* _MACB_H */ diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h index dda8a02b7322..8aee250904ec 100644 --- a/drivers/net/ethernet/cavium/thunder/nic.h +++ b/drivers/net/ethernet/cavium/thunder/nic.h @@ -125,6 +125,15 @@ */ #define NICPF_CLK_PER_INT_TICK 2 +/* Time to wait before we decide that a SQ is stuck. + * + * Since both pkt rx and tx notifications are done with same CQ, + * when packets are being received at very high rate (eg: L2 forwarding) + * then freeing transmitted skbs will be delayed and watchdog + * will kick in, resetting interface. Hence keeping this value high. + */ +#define NICVF_TX_TIMEOUT (50 * HZ) + struct nicvf_cq_poll { u8 cq_idx; /* Completion queue index */ struct napi_struct napi; @@ -216,8 +225,9 @@ struct nicvf_drv_stats { /* Tx */ u64 tx_frames_ok; u64 tx_drops; - u64 tx_busy; u64 tx_tso; + u64 txq_stop; + u64 txq_wake; }; struct nicvf { diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c index 16bd2d772db9..a4228e664567 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c @@ -66,9 +66,10 @@ static const struct nicvf_stat nicvf_drv_stats[] = { NICVF_DRV_STAT(rx_frames_jumbo), NICVF_DRV_STAT(rx_drops), NICVF_DRV_STAT(tx_frames_ok), - NICVF_DRV_STAT(tx_busy), NICVF_DRV_STAT(tx_tso), NICVF_DRV_STAT(tx_drops), + NICVF_DRV_STAT(txq_stop), + NICVF_DRV_STAT(txq_wake), }; static const struct nicvf_stat nicvf_queue_stats[] = { @@ -126,6 +127,7 @@ static void nicvf_set_msglevel(struct net_device *netdev, u32 lvl) static void nicvf_get_strings(struct net_device *netdev, u32 sset, u8 *data) { + struct nicvf *nic = netdev_priv(netdev); int stats, qidx; if (sset != ETH_SS_STATS) @@ -141,7 +143,7 @@ static void nicvf_get_strings(struct net_device *netdev, u32 sset, u8 *data) data += ETH_GSTRING_LEN; } - for (qidx = 0; qidx < MAX_RCV_QUEUES_PER_QS; qidx++) { + for (qidx = 0; qidx < nic->qs->rq_cnt; qidx++) { for (stats = 0; stats < nicvf_n_queue_stats; stats++) { sprintf(data, "rxq%d: %s", qidx, nicvf_queue_stats[stats].name); @@ -149,7 +151,7 @@ static void nicvf_get_strings(struct net_device *netdev, u32 sset, u8 *data) } } - for (qidx = 0; qidx < MAX_SND_QUEUES_PER_QS; qidx++) { + for (qidx = 0; qidx < nic->qs->sq_cnt; qidx++) { for (stats = 0; stats < nicvf_n_queue_stats; stats++) { sprintf(data, "txq%d: %s", qidx, nicvf_queue_stats[stats].name); @@ -170,12 +172,14 @@ static void nicvf_get_strings(struct net_device *netdev, u32 sset, u8 *data) static int nicvf_get_sset_count(struct net_device *netdev, int sset) { + struct nicvf *nic = netdev_priv(netdev); + if (sset != ETH_SS_STATS) return -EINVAL; return nicvf_n_hw_stats + nicvf_n_drv_stats + (nicvf_n_queue_stats * - (MAX_RCV_QUEUES_PER_QS + MAX_SND_QUEUES_PER_QS)) + + (nic->qs->rq_cnt + nic->qs->sq_cnt)) + BGX_RX_STATS_COUNT + BGX_TX_STATS_COUNT; } @@ -197,13 +201,13 @@ static void nicvf_get_ethtool_stats(struct net_device *netdev, *(data++) = ((u64 *)&nic->drv_stats) [nicvf_drv_stats[stat].index]; - for (qidx = 0; qidx < MAX_RCV_QUEUES_PER_QS; qidx++) { + for (qidx = 0; qidx < nic->qs->rq_cnt; qidx++) { for (stat = 0; stat < nicvf_n_queue_stats; stat++) *(data++) = ((u64 *)&nic->qs->rq[qidx].stats) [nicvf_queue_stats[stat].index]; } - for (qidx = 0; qidx < MAX_SND_QUEUES_PER_QS; qidx++) { + for (qidx = 0; qidx < nic->qs->sq_cnt; qidx++) { for (stat = 0; stat < nicvf_n_queue_stats; stat++) *(data++) = ((u64 *)&nic->qs->sq[qidx].stats) [nicvf_queue_stats[stat].index]; @@ -543,6 +547,7 @@ static int nicvf_set_channels(struct net_device *dev, { struct nicvf *nic = netdev_priv(dev); int err = 0; + bool if_up = netif_running(dev); if (!channel->rx_count || !channel->tx_count) return -EINVAL; @@ -551,6 +556,9 @@ static int nicvf_set_channels(struct net_device *dev, if (channel->tx_count > MAX_SND_QUEUES_PER_QS) return -EINVAL; + if (if_up) + nicvf_stop(dev); + nic->qs->rq_cnt = channel->rx_count; nic->qs->sq_cnt = channel->tx_count; nic->qs->cq_cnt = max(nic->qs->rq_cnt, nic->qs->sq_cnt); @@ -559,11 +567,9 @@ static int nicvf_set_channels(struct net_device *dev, if (err) return err; - if (!netif_running(dev)) - return err; + if (if_up) + nicvf_open(dev); - nicvf_stop(dev); - nicvf_open(dev); netdev_info(dev, "Setting num Tx rings to %d, Rx rings to %d success\n", nic->qs->sq_cnt, nic->qs->rq_cnt); diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index 8b119a035b7e..3b90afb8c293 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -234,7 +234,7 @@ static void nicvf_handle_mbx_intr(struct nicvf *nic) nic->duplex == DUPLEX_FULL ? "Full duplex" : "Half duplex"); netif_carrier_on(nic->netdev); - netif_tx_wake_all_queues(nic->netdev); + netif_tx_start_all_queues(nic->netdev); } else { netdev_info(nic->netdev, "%s: Link is Down\n", nic->netdev->name); @@ -425,6 +425,7 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev, if (skb) { prefetch(skb); dev_consume_skb_any(skb); + sq->skbuff[cqe_tx->sqe_ptr] = (u64)NULL; } } @@ -476,12 +477,13 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev, static int nicvf_cq_intr_handler(struct net_device *netdev, u8 cq_idx, struct napi_struct *napi, int budget) { - int processed_cqe, work_done = 0; + int processed_cqe, work_done = 0, tx_done = 0; int cqe_count, cqe_head; struct nicvf *nic = netdev_priv(netdev); struct queue_set *qs = nic->qs; struct cmp_queue *cq = &qs->cq[cq_idx]; struct cqe_rx_t *cq_desc; + struct netdev_queue *txq; spin_lock_bh(&cq->lock); loop: @@ -496,8 +498,8 @@ loop: cqe_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, cq_idx) >> 9; cqe_head &= 0xFFFF; - netdev_dbg(nic->netdev, "%s cqe_count %d cqe_head %d\n", - __func__, cqe_count, cqe_head); + netdev_dbg(nic->netdev, "%s CQ%d cqe_count %d cqe_head %d\n", + __func__, cq_idx, cqe_count, cqe_head); while (processed_cqe < cqe_count) { /* Get the CQ descriptor */ cq_desc = (struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head); @@ -511,8 +513,8 @@ loop: break; } - netdev_dbg(nic->netdev, "cq_desc->cqe_type %d\n", - cq_desc->cqe_type); + netdev_dbg(nic->netdev, "CQ%d cq_desc->cqe_type %d\n", + cq_idx, cq_desc->cqe_type); switch (cq_desc->cqe_type) { case CQE_TYPE_RX: nicvf_rcv_pkt_handler(netdev, napi, cq, @@ -522,6 +524,7 @@ loop: case CQE_TYPE_SEND: nicvf_snd_pkt_handler(netdev, cq, (void *)cq_desc, CQE_TYPE_SEND); + tx_done++; break; case CQE_TYPE_INVALID: case CQE_TYPE_RX_SPLIT: @@ -532,8 +535,9 @@ loop: } processed_cqe++; } - netdev_dbg(nic->netdev, "%s processed_cqe %d work_done %d budget %d\n", - __func__, processed_cqe, work_done, budget); + netdev_dbg(nic->netdev, + "%s CQ%d processed_cqe %d work_done %d budget %d\n", + __func__, cq_idx, processed_cqe, work_done, budget); /* Ring doorbell to inform H/W to reuse processed CQEs */ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_DOOR, @@ -543,6 +547,19 @@ loop: goto loop; done: + /* Wakeup TXQ if its stopped earlier due to SQ full */ + if (tx_done) { + txq = netdev_get_tx_queue(netdev, cq_idx); + if (netif_tx_queue_stopped(txq)) { + netif_tx_start_queue(txq); + nic->drv_stats.txq_wake++; + if (netif_msg_tx_err(nic)) + netdev_warn(netdev, + "%s: Transmit queue wakeup SQ%d\n", + netdev->name, cq_idx); + } + } + spin_unlock_bh(&cq->lock); return work_done; } @@ -554,15 +571,10 @@ static int nicvf_poll(struct napi_struct *napi, int budget) struct net_device *netdev = napi->dev; struct nicvf *nic = netdev_priv(netdev); struct nicvf_cq_poll *cq; - struct netdev_queue *txq; cq = container_of(napi, struct nicvf_cq_poll, napi); work_done = nicvf_cq_intr_handler(netdev, cq->cq_idx, napi, budget); - txq = netdev_get_tx_queue(netdev, cq->cq_idx); - if (netif_tx_queue_stopped(txq)) - netif_tx_wake_queue(txq); - if (work_done < budget) { /* Slow packet rate, exit polling */ napi_complete(napi); @@ -833,9 +845,9 @@ static netdev_tx_t nicvf_xmit(struct sk_buff *skb, struct net_device *netdev) return NETDEV_TX_OK; } - if (!nicvf_sq_append_skb(nic, skb) && !netif_tx_queue_stopped(txq)) { + if (!netif_tx_queue_stopped(txq) && !nicvf_sq_append_skb(nic, skb)) { netif_tx_stop_queue(txq); - nic->drv_stats.tx_busy++; + nic->drv_stats.txq_stop++; if (netif_msg_tx_err(nic)) netdev_warn(netdev, "%s: Transmit ring full, stopping SQ%d\n", @@ -859,7 +871,6 @@ int nicvf_stop(struct net_device *netdev) nicvf_send_msg_to_pf(nic, &mbx); netif_carrier_off(netdev); - netif_tx_disable(netdev); /* Disable RBDR & QS error interrupts */ for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) { @@ -894,6 +905,8 @@ int nicvf_stop(struct net_device *netdev) kfree(cq_poll); } + netif_tx_disable(netdev); + /* Free resources */ nicvf_config_data_transfer(nic, false); @@ -988,6 +1001,9 @@ int nicvf_open(struct net_device *netdev) for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx); + nic->drv_stats.txq_stop = 0; + nic->drv_stats.txq_wake = 0; + netif_carrier_on(netdev); netif_tx_start_all_queues(netdev); @@ -1278,6 +1294,7 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) netdev->hw_features = netdev->features; netdev->netdev_ops = &nicvf_netdev_ops; + netdev->watchdog_timeo = NICVF_TX_TIMEOUT; INIT_WORK(&nic->reset_task, nicvf_reset_task); @@ -1318,11 +1335,17 @@ static void nicvf_remove(struct pci_dev *pdev) pci_disable_device(pdev); } +static void nicvf_shutdown(struct pci_dev *pdev) +{ + nicvf_remove(pdev); +} + static struct pci_driver nicvf_driver = { .name = DRV_NAME, .id_table = nicvf_id_table, .probe = nicvf_probe, .remove = nicvf_remove, + .shutdown = nicvf_shutdown, }; static int __init nicvf_init_module(void) diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c index d69d228d11a0..ca4240aa6d15 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c @@ -103,9 +103,11 @@ static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp, /* Allocate a new page */ if (!nic->rb_page) { - nic->rb_page = alloc_pages(gfp | __GFP_COMP, order); + nic->rb_page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN, + order); if (!nic->rb_page) { - netdev_err(nic->netdev, "Failed to allocate new rcv buffer\n"); + netdev_err(nic->netdev, + "Failed to allocate new rcv buffer\n"); return -ENOMEM; } nic->rb_page_offset = 0; @@ -382,7 +384,8 @@ static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq) return; if (sq->tso_hdrs) - dma_free_coherent(&nic->pdev->dev, sq->dmem.q_len, + dma_free_coherent(&nic->pdev->dev, + sq->dmem.q_len * TSO_HEADER_SIZE, sq->tso_hdrs, sq->tso_hdrs_phys); kfree(sq->skbuff); @@ -863,10 +866,11 @@ void nicvf_sq_free_used_descs(struct net_device *netdev, struct snd_queue *sq, continue; } skb = (struct sk_buff *)sq->skbuff[sq->head]; + if (skb) + dev_kfree_skb_any(skb); atomic64_add(1, (atomic64_t *)&netdev->stats.tx_packets); atomic64_add(hdr->tot_len, (atomic64_t *)&netdev->stats.tx_bytes); - dev_kfree_skb_any(skb); nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1); } } @@ -992,7 +996,7 @@ static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry, memset(gather, 0, SND_QUEUE_DESC_SIZE); gather->subdesc_type = SQ_DESC_TYPE_GATHER; - gather->ld_type = NIC_SEND_LD_TYPE_E_LDWB; + gather->ld_type = NIC_SEND_LD_TYPE_E_LDD; gather->size = size; gather->addr = data; } @@ -1048,7 +1052,7 @@ static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq, } nicvf_sq_add_hdr_subdesc(sq, hdr_qentry, seg_subdescs - 1, skb, seg_len); - sq->skbuff[hdr_qentry] = 0; + sq->skbuff[hdr_qentry] = (u64)NULL; qentry = nicvf_get_nxt_sqentry(sq, qentry); desc_cnt += seg_subdescs; @@ -1062,6 +1066,7 @@ static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq, /* Inform HW to xmit all TSO segments */ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, skb_get_queue_mapping(skb), desc_cnt); + nic->drv_stats.tx_tso++; return 1; } diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h index 8341bdf755d1..f0937b7bfe9f 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h @@ -62,7 +62,7 @@ #define SND_QUEUE_CNT 8 #define CMP_QUEUE_CNT 8 /* Max of RCV and SND qcount */ -#define SND_QSIZE SND_QUEUE_SIZE4 +#define SND_QSIZE SND_QUEUE_SIZE2 #define SND_QUEUE_LEN (1ULL << (SND_QSIZE + 10)) #define MAX_SND_QUEUE_LEN (1ULL << (SND_QUEUE_SIZE6 + 10)) #define SND_QUEUE_THRESH 2ULL @@ -70,7 +70,10 @@ /* Since timestamp not enabled, otherwise 2 */ #define MAX_CQE_PER_PKT_XMIT 1 -#define CMP_QSIZE CMP_QUEUE_SIZE4 +/* Keep CQ and SQ sizes same, if timestamping + * is enabled this equation will change. + */ +#define CMP_QSIZE CMP_QUEUE_SIZE2 #define CMP_QUEUE_LEN (1ULL << (CMP_QSIZE + 10)) #define CMP_QUEUE_CQE_THRESH 0 #define CMP_QUEUE_TIMER_THRESH 220 /* 10usec */ @@ -87,7 +90,12 @@ #define MAX_CQES_FOR_TX ((SND_QUEUE_LEN / MIN_SQ_DESC_PER_PKT_XMIT) * \ MAX_CQE_PER_PKT_XMIT) -#define RQ_CQ_DROP ((CMP_QUEUE_LEN - MAX_CQES_FOR_TX) / 256) +/* Calculate number of CQEs to reserve for all SQEs. + * Its 1/256th level of CQ size. + * '+ 1' to account for pipelining + */ +#define RQ_CQ_DROP ((256 / (CMP_QUEUE_LEN / \ + (CMP_QUEUE_LEN - MAX_CQES_FOR_TX))) + 1) /* Descriptor size in bytes */ #define SND_QUEUE_DESC_SIZE 16 diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c index 633ec05dfe05..b961a89dc626 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c @@ -673,7 +673,10 @@ static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid) bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cmrx_cfg); bgx_flush_dmac_addrs(bgx, lmacid); - if (lmac->phydev) + if ((bgx->lmac_type != BGX_MODE_XFI) && + (bgx->lmac_type != BGX_MODE_XLAUI) && + (bgx->lmac_type != BGX_MODE_40G_KR) && + (bgx->lmac_type != BGX_MODE_10G_KR) && lmac->phydev) phy_disconnect(lmac->phydev); lmac->phydev = NULL; diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index 1eee73cccdf5..99d33e2d35e6 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@ -562,6 +562,7 @@ struct fec_enet_private { }; void fec_ptp_init(struct platform_device *pdev); +void fec_ptp_stop(struct platform_device *pdev); void fec_ptp_start_cyclecounter(struct net_device *ndev); int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr); int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr); diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 1f89c59b4353..32e3807c650e 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -24,6 +24,7 @@ #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> +#include <linux/pm_runtime.h> #include <linux/ptrace.h> #include <linux/errno.h> #include <linux/ioport.h> @@ -77,6 +78,7 @@ static void fec_enet_itr_coal_init(struct net_device *ndev); #define FEC_ENET_RAEM_V 0x8 #define FEC_ENET_RAFL_V 0x8 #define FEC_ENET_OPD_V 0xFFF0 +#define FEC_MDIO_PM_TIMEOUT 100 /* ms */ static struct platform_device_id fec_devtype[] = { { @@ -1767,7 +1769,13 @@ static void fec_enet_adjust_link(struct net_device *ndev) static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum) { struct fec_enet_private *fep = bus->priv; + struct device *dev = &fep->pdev->dev; unsigned long time_left; + int ret = 0; + + ret = pm_runtime_get_sync(dev); + if (IS_ERR_VALUE(ret)) + return ret; fep->mii_timeout = 0; init_completion(&fep->mdio_done); @@ -1783,18 +1791,30 @@ static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum) if (time_left == 0) { fep->mii_timeout = 1; netdev_err(fep->netdev, "MDIO read timeout\n"); - return -ETIMEDOUT; + ret = -ETIMEDOUT; + goto out; } - /* return value */ - return FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA)); + ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA)); + +out: + pm_runtime_mark_last_busy(dev); + pm_runtime_put_autosuspend(dev); + + return ret; } static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value) { struct fec_enet_private *fep = bus->priv; + struct device *dev = &fep->pdev->dev; unsigned long time_left; + int ret = 0; + + ret = pm_runtime_get_sync(dev); + if (IS_ERR_VALUE(ret)) + return ret; fep->mii_timeout = 0; init_completion(&fep->mdio_done); @@ -1811,10 +1831,13 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum, if (time_left == 0) { fep->mii_timeout = 1; netdev_err(fep->netdev, "MDIO write timeout\n"); - return -ETIMEDOUT; + ret = -ETIMEDOUT; } - return 0; + pm_runtime_mark_last_busy(dev); + pm_runtime_put_autosuspend(dev); + + return ret; } static int fec_enet_clk_enable(struct net_device *ndev, bool enable) @@ -1826,9 +1849,6 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable) ret = clk_prepare_enable(fep->clk_ahb); if (ret) return ret; - ret = clk_prepare_enable(fep->clk_ipg); - if (ret) - goto failed_clk_ipg; if (fep->clk_enet_out) { ret = clk_prepare_enable(fep->clk_enet_out); if (ret) @@ -1852,7 +1872,6 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable) } } else { clk_disable_unprepare(fep->clk_ahb); - clk_disable_unprepare(fep->clk_ipg); if (fep->clk_enet_out) clk_disable_unprepare(fep->clk_enet_out); if (fep->clk_ptp) { @@ -1874,8 +1893,6 @@ failed_clk_ptp: if (fep->clk_enet_out) clk_disable_unprepare(fep->clk_enet_out); failed_clk_enet_out: - clk_disable_unprepare(fep->clk_ipg); -failed_clk_ipg: clk_disable_unprepare(fep->clk_ahb); return ret; @@ -2847,10 +2864,14 @@ fec_enet_open(struct net_device *ndev) struct fec_enet_private *fep = netdev_priv(ndev); int ret; + ret = pm_runtime_get_sync(&fep->pdev->dev); + if (IS_ERR_VALUE(ret)) + return ret; + pinctrl_pm_select_default_state(&fep->pdev->dev); ret = fec_enet_clk_enable(ndev, true); if (ret) - return ret; + goto clk_enable; /* I should reset the ring buffers here, but I don't yet know * a simple way to do that. @@ -2881,6 +2902,9 @@ err_enet_mii_probe: fec_enet_free_buffers(ndev); err_enet_alloc: fec_enet_clk_enable(ndev, false); +clk_enable: + pm_runtime_mark_last_busy(&fep->pdev->dev); + pm_runtime_put_autosuspend(&fep->pdev->dev); pinctrl_pm_select_sleep_state(&fep->pdev->dev); return ret; } @@ -2903,6 +2927,9 @@ fec_enet_close(struct net_device *ndev) fec_enet_clk_enable(ndev, false); pinctrl_pm_select_sleep_state(&fep->pdev->dev); + pm_runtime_mark_last_busy(&fep->pdev->dev); + pm_runtime_put_autosuspend(&fep->pdev->dev); + fec_enet_free_buffers(ndev); return 0; @@ -3115,8 +3142,8 @@ static int fec_enet_init(struct net_device *ndev) fep->bufdesc_size; /* Allocate memory for buffer descriptors. */ - cbd_base = dma_alloc_coherent(NULL, bd_size, &bd_dma, - GFP_KERNEL); + cbd_base = dmam_alloc_coherent(&fep->pdev->dev, bd_size, &bd_dma, + GFP_KERNEL); if (!cbd_base) { return -ENOMEM; } @@ -3388,6 +3415,10 @@ fec_probe(struct platform_device *pdev) if (ret) goto failed_clk; + ret = clk_prepare_enable(fep->clk_ipg); + if (ret) + goto failed_clk_ipg; + fep->reg_phy = devm_regulator_get(&pdev->dev, "phy"); if (!IS_ERR(fep->reg_phy)) { ret = regulator_enable(fep->reg_phy); @@ -3400,6 +3431,11 @@ fec_probe(struct platform_device *pdev) fep->reg_phy = NULL; } + pm_runtime_set_autosuspend_delay(&pdev->dev, FEC_MDIO_PM_TIMEOUT); + pm_runtime_use_autosuspend(&pdev->dev); + pm_runtime_set_active(&pdev->dev); + pm_runtime_enable(&pdev->dev); + fec_reset_phy(pdev); if (fep->bufdesc_ex) @@ -3447,6 +3483,10 @@ fec_probe(struct platform_device *pdev) fep->rx_copybreak = COPYBREAK_DEFAULT; INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work); + + pm_runtime_mark_last_busy(&pdev->dev); + pm_runtime_put_autosuspend(&pdev->dev); + return 0; failed_register: @@ -3454,9 +3494,12 @@ failed_register: failed_mii_init: failed_irq: failed_init: + fec_ptp_stop(pdev); if (fep->reg_phy) regulator_disable(fep->reg_phy); failed_regulator: + clk_disable_unprepare(fep->clk_ipg); +failed_clk_ipg: fec_enet_clk_enable(ndev, false); failed_clk: failed_phy: @@ -3473,14 +3516,12 @@ fec_drv_remove(struct platform_device *pdev) struct net_device *ndev = platform_get_drvdata(pdev); struct fec_enet_private *fep = netdev_priv(ndev); - cancel_delayed_work_sync(&fep->time_keep); cancel_work_sync(&fep->tx_timeout_work); + fec_ptp_stop(pdev); unregister_netdev(ndev); fec_enet_mii_remove(fep); if (fep->reg_phy) regulator_disable(fep->reg_phy); - if (fep->ptp_clock) - ptp_clock_unregister(fep->ptp_clock); of_node_put(fep->phy_node); free_netdev(ndev); @@ -3568,7 +3609,28 @@ failed_clk: return ret; } -static SIMPLE_DEV_PM_OPS(fec_pm_ops, fec_suspend, fec_resume); +static int __maybe_unused fec_runtime_suspend(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct fec_enet_private *fep = netdev_priv(ndev); + + clk_disable_unprepare(fep->clk_ipg); + + return 0; +} + +static int __maybe_unused fec_runtime_resume(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct fec_enet_private *fep = netdev_priv(ndev); + + return clk_prepare_enable(fep->clk_ipg); +} + +static const struct dev_pm_ops fec_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(fec_suspend, fec_resume) + SET_RUNTIME_PM_OPS(fec_runtime_suspend, fec_runtime_resume, NULL) +}; static struct platform_driver fec_driver = { .driver = { diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c index 7a8386a8244d..1543cf0e8ef6 100644 --- a/drivers/net/ethernet/freescale/fec_ptp.c +++ b/drivers/net/ethernet/freescale/fec_ptp.c @@ -598,6 +598,16 @@ void fec_ptp_init(struct platform_device *pdev) schedule_delayed_work(&fep->time_keep, HZ); } +void fec_ptp_stop(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct fec_enet_private *fep = netdev_priv(ndev); + + cancel_delayed_work_sync(&fep->time_keep); + if (fep->ptp_clock) + ptp_clock_unregister(fep->ptp_clock); +} + /** * fec_ptp_check_pps_event * @fep: the fec_enet_private structure handle diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index afa3ea7ccf76..087ffcdc48a3 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -532,22 +532,6 @@ static void gfar_ints_enable(struct gfar_private *priv) } } -static void lock_tx_qs(struct gfar_private *priv) -{ - int i; - - for (i = 0; i < priv->num_tx_queues; i++) - spin_lock(&priv->tx_queue[i]->txlock); -} - -static void unlock_tx_qs(struct gfar_private *priv) -{ - int i; - - for (i = 0; i < priv->num_tx_queues; i++) - spin_unlock(&priv->tx_queue[i]->txlock); -} - static int gfar_alloc_tx_queues(struct gfar_private *priv) { int i; @@ -1340,7 +1324,6 @@ static int gfar_probe(struct platform_device *ofdev) priv->dev = &ofdev->dev; SET_NETDEV_DEV(dev, &ofdev->dev); - spin_lock_init(&priv->bflock); INIT_WORK(&priv->reset_task, gfar_reset_task); platform_set_drvdata(ofdev, priv); @@ -1432,9 +1415,8 @@ static int gfar_probe(struct platform_device *ofdev) goto register_fail; } - device_init_wakeup(&dev->dev, - priv->device_flags & - FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); + device_set_wakeup_capable(&dev->dev, priv->device_flags & + FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); /* fill out IRQ number and name fields */ for (i = 0; i < priv->num_grps; i++) { @@ -1502,48 +1484,37 @@ static int gfar_suspend(struct device *dev) struct gfar_private *priv = dev_get_drvdata(dev); struct net_device *ndev = priv->ndev; struct gfar __iomem *regs = priv->gfargrp[0].regs; - unsigned long flags; u32 tempval; - int magic_packet = priv->wol_en && (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); + if (!netif_running(ndev)) + return 0; + + disable_napi(priv); + netif_tx_lock(ndev); netif_device_detach(ndev); + netif_tx_unlock(ndev); - if (netif_running(ndev)) { + gfar_halt(priv); - local_irq_save(flags); - lock_tx_qs(priv); + if (magic_packet) { + /* Enable interrupt on Magic Packet */ + gfar_write(®s->imask, IMASK_MAG); - gfar_halt_nodisable(priv); + /* Enable Magic Packet mode */ + tempval = gfar_read(®s->maccfg2); + tempval |= MACCFG2_MPEN; + gfar_write(®s->maccfg2, tempval); - /* Disable Tx, and Rx if wake-on-LAN is disabled. */ + /* re-enable the Rx block */ tempval = gfar_read(®s->maccfg1); - - tempval &= ~MACCFG1_TX_EN; - - if (!magic_packet) - tempval &= ~MACCFG1_RX_EN; - + tempval |= MACCFG1_RX_EN; gfar_write(®s->maccfg1, tempval); - unlock_tx_qs(priv); - local_irq_restore(flags); - - disable_napi(priv); - - if (magic_packet) { - /* Enable interrupt on Magic Packet */ - gfar_write(®s->imask, IMASK_MAG); - - /* Enable Magic Packet mode */ - tempval = gfar_read(®s->maccfg2); - tempval |= MACCFG2_MPEN; - gfar_write(®s->maccfg2, tempval); - } else { - phy_stop(priv->phydev); - } + } else { + phy_stop(priv->phydev); } return 0; @@ -1554,37 +1525,26 @@ static int gfar_resume(struct device *dev) struct gfar_private *priv = dev_get_drvdata(dev); struct net_device *ndev = priv->ndev; struct gfar __iomem *regs = priv->gfargrp[0].regs; - unsigned long flags; u32 tempval; int magic_packet = priv->wol_en && (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); - if (!netif_running(ndev)) { - netif_device_attach(ndev); + if (!netif_running(ndev)) return 0; - } - if (!magic_packet && priv->phydev) + if (magic_packet) { + /* Disable Magic Packet mode */ + tempval = gfar_read(®s->maccfg2); + tempval &= ~MACCFG2_MPEN; + gfar_write(®s->maccfg2, tempval); + } else { phy_start(priv->phydev); - - /* Disable Magic Packet mode, in case something - * else woke us up. - */ - local_irq_save(flags); - lock_tx_qs(priv); - - tempval = gfar_read(®s->maccfg2); - tempval &= ~MACCFG2_MPEN; - gfar_write(®s->maccfg2, tempval); + } gfar_start(priv); - unlock_tx_qs(priv); - local_irq_restore(flags); - netif_device_attach(ndev); - enable_napi(priv); return 0; @@ -2010,7 +1970,8 @@ static int register_grp_irqs(struct gfar_priv_grp *grp) /* Install our interrupt handlers for Error, * Transmit, and Receive */ - err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0, + err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, + IRQF_NO_SUSPEND, gfar_irq(grp, ER)->name, grp); if (err < 0) { netif_err(priv, intr, dev, "Can't get IRQ %d\n", @@ -2033,7 +1994,8 @@ static int register_grp_irqs(struct gfar_priv_grp *grp) goto rx_irq_fail; } } else { - err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0, + err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, + IRQF_NO_SUSPEND, gfar_irq(grp, TX)->name, grp); if (err < 0) { netif_err(priv, intr, dev, "Can't get IRQ %d\n", @@ -2134,8 +2096,6 @@ static int gfar_enet_open(struct net_device *dev) if (err) return err; - device_set_wakeup_enable(&dev->dev, priv->wol_en); - return err; } diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h index 8a5f4de9df1c..8c1994856e93 100644 --- a/drivers/net/ethernet/freescale/gianfar.h +++ b/drivers/net/ethernet/freescale/gianfar.h @@ -1152,9 +1152,6 @@ struct gfar_private { int oldduplex; int oldlink; - /* Bitfield update lock */ - spinlock_t bflock; - uint32_t msg_enable; struct work_struct reset_task; diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c index 3020aaabf0e4..555e461b0cfe 100644 --- a/drivers/net/ethernet/freescale/gianfar_ethtool.c +++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c @@ -655,7 +655,6 @@ static void gfar_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) static int gfar_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct gfar_private *priv = netdev_priv(dev); - unsigned long flags; if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) && wol->wolopts != 0) @@ -666,9 +665,7 @@ static int gfar_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) device_set_wakeup_enable(&dev->dev, wol->wolopts & WAKE_MAGIC); - spin_lock_irqsave(&priv->bflock, flags); - priv->wol_en = !!device_may_wakeup(&dev->dev); - spin_unlock_irqrestore(&priv->bflock, flags); + priv->wol_en = !!device_may_wakeup(&dev->dev); return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index 82040137d7d9..0a3202047569 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c @@ -686,6 +686,7 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param, { struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd; struct mlx4_cmd_context *context; + long ret_wait; int err = 0; down(&cmd->event_sem); @@ -711,8 +712,20 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param, if (err) goto out_reset; - if (!wait_for_completion_timeout(&context->done, - msecs_to_jiffies(timeout))) { + if (op == MLX4_CMD_SENSE_PORT) { + ret_wait = + wait_for_completion_interruptible_timeout(&context->done, + msecs_to_jiffies(timeout)); + if (ret_wait < 0) { + context->fw_status = 0; + context->out_param = 0; + context->result = 0; + } + } else { + ret_wait = (long)wait_for_completion_timeout(&context->done, + msecs_to_jiffies(timeout)); + } + if (!ret_wait) { mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n", op); if (op == MLX4_CMD_NOP) { diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index a67fbb90d69e..4402a1e48c9b 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -246,7 +246,6 @@ static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv, static inline bool mlx4_en_is_ring_empty(struct mlx4_en_rx_ring *ring) { - BUG_ON((u32)(ring->prod - ring->cons) > ring->actual_size); return ring->prod == ring->cons; } diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c index aae13adfb492..8e81e53c370e 100644 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c @@ -601,7 +601,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) continue; mlx4_dbg(dev, "%s: Sending MLX4_PORT_CHANGE_SUBTYPE_DOWN to slave: %d, port:%d\n", __func__, i, port); - s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state; + s_info = &priv->mfunc.master.vf_oper[i].vport[port].state; if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) { eqe->event.port_change.port = cpu_to_be32( @@ -640,7 +640,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) continue; if (i == mlx4_master_func_num(dev)) continue; - s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state; + s_info = &priv->mfunc.master.vf_oper[i].vport[port].state; if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) { eqe->event.port_change.port = cpu_to_be32( diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 6f35b6c06193..121c579888bb 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -2288,6 +2288,11 @@ static int mlx4_allocate_default_counters(struct mlx4_dev *dev) } else if (err == -ENOENT) { err = 0; continue; + } else if (mlx4_is_slave(dev) && err == -EINVAL) { + priv->def_counter[port] = MLX4_SINK_COUNTER_INDEX(dev); + mlx4_warn(dev, "can't allocate counter from old PF driver, using index %d\n", + MLX4_SINK_COUNTER_INDEX(dev)); + err = 0; } else { mlx4_err(dev, "%s: failed to allocate default counter port %d err %d\n", __func__, port + 1, err); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c index 33669c29b341..753ea8bad953 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c @@ -1415,7 +1415,7 @@ static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter) if (fw->size & 0xF) { addr = dest + size; for (i = 0; i < (fw->size & 0xF); i++) - data[i] = temp[size + i]; + data[i] = ((u8 *)temp)[size + i]; for (; i < 16; i++) data[i] = 0; ret = qlcnic_ms_mem_write128(adapter, addr, diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 1cb660405f35..d02691ba3d7f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -349,3 +349,7 @@ static int stmmac_pltfr_resume(struct device *dev) SIMPLE_DEV_PM_OPS(stmmac_pltfr_pm_ops, stmmac_pltfr_suspend, stmmac_pltfr_resume); EXPORT_SYMBOL_GPL(stmmac_pltfr_pm_ops); + +MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet platform support"); +MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c index 0c5842aeb807..ab6051a43134 100644 --- a/drivers/net/ethernet/sun/niu.c +++ b/drivers/net/ethernet/sun/niu.c @@ -6658,10 +6658,8 @@ static netdev_tx_t niu_start_xmit(struct sk_buff *skb, struct sk_buff *skb_new; skb_new = skb_realloc_headroom(skb, len); - if (!skb_new) { - rp->tx_errors++; + if (!skb_new) goto out_drop; - } kfree_skb(skb); skb = skb_new; } else diff --git a/drivers/net/ethernet/ti/netcp.h b/drivers/net/ethernet/ti/netcp.h index bbacf5cccec2..a8a730641bbb 100644 --- a/drivers/net/ethernet/ti/netcp.h +++ b/drivers/net/ethernet/ti/netcp.h @@ -223,6 +223,7 @@ void *netcp_device_find_module(struct netcp_device *netcp_device, /* SGMII functions */ int netcp_sgmii_reset(void __iomem *sgmii_ofs, int port); +bool netcp_sgmii_rtreset(void __iomem *sgmii_ofs, int port, bool set); int netcp_sgmii_get_port_link(void __iomem *sgmii_ofs, int port); int netcp_sgmii_config(void __iomem *sgmii_ofs, int port, u32 interface); diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index 6f2e151fbc73..29ae672917b7 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c @@ -2123,6 +2123,7 @@ probe_quit: static int netcp_remove(struct platform_device *pdev) { struct netcp_device *netcp_device = platform_get_drvdata(pdev); + struct netcp_intf *netcp_intf, *netcp_tmp; struct netcp_inst_modpriv *inst_modpriv, *tmp; struct netcp_module *module; @@ -2134,10 +2135,17 @@ static int netcp_remove(struct platform_device *pdev) list_del(&inst_modpriv->inst_list); kfree(inst_modpriv); } - WARN(!list_empty(&netcp_device->interface_head), "%s interface list not empty!\n", - pdev->name); - devm_kfree(&pdev->dev, netcp_device); + /* now that all modules are removed, clean up the interfaces */ + list_for_each_entry_safe(netcp_intf, netcp_tmp, + &netcp_device->interface_head, + interface_list) { + netcp_delete_interface(netcp_device, netcp_intf->ndev); + } + + WARN(!list_empty(&netcp_device->interface_head), + "%s interface list not empty!\n", pdev->name); + pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); platform_set_drvdata(pdev, NULL); diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c index 01a955cf5603..6f16d6aaf7b7 100644 --- a/drivers/net/ethernet/ti/netcp_ethss.c +++ b/drivers/net/ethernet/ti/netcp_ethss.c @@ -2097,11 +2097,28 @@ static void gbe_port_config(struct gbe_priv *gbe_dev, struct gbe_slave *slave, writel(slave->mac_control, GBE_REG_ADDR(slave, emac_regs, mac_control)); } +static void gbe_sgmii_rtreset(struct gbe_priv *priv, + struct gbe_slave *slave, bool set) +{ + void __iomem *sgmii_port_regs; + + if (SLAVE_LINK_IS_XGMII(slave)) + return; + + if ((priv->ss_version == GBE_SS_VERSION_14) && (slave->slave_num >= 2)) + sgmii_port_regs = priv->sgmii_port34_regs; + else + sgmii_port_regs = priv->sgmii_port_regs; + + netcp_sgmii_rtreset(sgmii_port_regs, slave->slave_num, set); +} + static void gbe_slave_stop(struct gbe_intf *intf) { struct gbe_priv *gbe_dev = intf->gbe_dev; struct gbe_slave *slave = intf->slave; + gbe_sgmii_rtreset(gbe_dev, slave, true); gbe_port_reset(slave); /* Disable forwarding */ cpsw_ale_control_set(gbe_dev->ale, slave->port_num, @@ -2143,6 +2160,7 @@ static int gbe_slave_open(struct gbe_intf *gbe_intf) gbe_sgmii_config(priv, slave); gbe_port_reset(slave); + gbe_sgmii_rtreset(priv, slave, false); gbe_port_config(priv, slave, priv->rx_packet_max); gbe_set_slave_mac(slave, gbe_intf); /* enable forwarding */ @@ -2687,10 +2705,9 @@ static void free_secondary_ports(struct gbe_priv *gbe_dev) { struct gbe_slave *slave; - for (;;) { + while (!list_empty(&gbe_dev->secondary_slaves)) { slave = first_sec_slave(gbe_dev); - if (!slave) - break; + if (slave->phy) phy_disconnect(slave->phy); list_del(&slave->slave_list); @@ -3073,14 +3090,13 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev, &gbe_dev->dma_chan_name); if (ret < 0) { dev_err(dev, "missing \"tx-channel\" parameter\n"); - ret = -ENODEV; - goto quit; + return -EINVAL; } if (!strcmp(node->name, "gbe")) { ret = get_gbe_resource_version(gbe_dev, node); if (ret) - goto quit; + return ret; dev_dbg(dev, "ss_version: 0x%08x\n", gbe_dev->ss_version); @@ -3091,22 +3107,20 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev, else ret = -ENODEV; - if (ret) - goto quit; } else if (!strcmp(node->name, "xgbe")) { ret = set_xgbe_ethss10_priv(gbe_dev, node); if (ret) - goto quit; + return ret; ret = netcp_xgbe_serdes_init(gbe_dev->xgbe_serdes_regs, gbe_dev->ss_regs); - if (ret) - goto quit; } else { dev_err(dev, "unknown GBE node(%s)\n", node->name); ret = -ENODEV; - goto quit; } + if (ret) + return ret; + interfaces = of_get_child_by_name(node, "interfaces"); if (!interfaces) dev_err(dev, "could not find interfaces\n"); @@ -3114,11 +3128,11 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev, ret = netcp_txpipe_init(&gbe_dev->tx_pipe, netcp_device, gbe_dev->dma_chan_name, gbe_dev->tx_queue_id); if (ret) - goto quit; + return ret; ret = netcp_txpipe_open(&gbe_dev->tx_pipe); if (ret) - goto quit; + return ret; /* Create network interfaces */ INIT_LIST_HEAD(&gbe_dev->gbe_intf_head); @@ -3133,6 +3147,7 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev, if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves) break; } + of_node_put(interfaces); if (!gbe_dev->num_slaves) dev_warn(dev, "No network interface configured\n"); @@ -3145,9 +3160,10 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev, of_node_put(secondary_ports); if (!gbe_dev->num_slaves) { - dev_err(dev, "No network interface or secondary ports configured\n"); + dev_err(dev, + "No network interface or secondary ports configured\n"); ret = -ENODEV; - goto quit; + goto free_sec_ports; } memset(&ale_params, 0, sizeof(ale_params)); @@ -3161,7 +3177,7 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev, if (!gbe_dev->ale) { dev_err(gbe_dev->dev, "error initializing ale engine\n"); ret = -ENODEV; - goto quit; + goto free_sec_ports; } else { dev_dbg(gbe_dev->dev, "Created a gbe ale engine\n"); } @@ -3186,16 +3202,8 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev, *inst_priv = gbe_dev; return 0; -quit: - if (gbe_dev->hw_stats) - devm_kfree(dev, gbe_dev->hw_stats); - if (gbe_dev->hw_stats_prev) - devm_kfree(dev, gbe_dev->hw_stats_prev); - cpsw_ale_destroy(gbe_dev->ale); - if (gbe_dev->ss_regs) - devm_iounmap(dev, gbe_dev->ss_regs); - of_node_put(interfaces); - devm_kfree(dev, gbe_dev); +free_sec_ports: + free_secondary_ports(gbe_dev); return ret; } @@ -3268,12 +3276,9 @@ static int gbe_remove(struct netcp_device *netcp_device, void *inst_priv) free_secondary_ports(gbe_dev); if (!list_empty(&gbe_dev->gbe_intf_head)) - dev_alert(gbe_dev->dev, "unreleased ethss interfaces present\n"); + dev_alert(gbe_dev->dev, + "unreleased ethss interfaces present\n"); - devm_kfree(gbe_dev->dev, gbe_dev->hw_stats); - devm_iounmap(gbe_dev->dev, gbe_dev->ss_regs); - memset(gbe_dev, 0x00, sizeof(*gbe_dev)); - devm_kfree(gbe_dev->dev, gbe_dev); return 0; } diff --git a/drivers/net/ethernet/ti/netcp_sgmii.c b/drivers/net/ethernet/ti/netcp_sgmii.c index dbeb14266e2f..5d8419f658d0 100644 --- a/drivers/net/ethernet/ti/netcp_sgmii.c +++ b/drivers/net/ethernet/ti/netcp_sgmii.c @@ -18,6 +18,9 @@ #include "netcp.h" +#define SGMII_SRESET_RESET BIT(0) +#define SGMII_SRESET_RTRESET BIT(1) + #define SGMII_REG_STATUS_LOCK BIT(4) #define SGMII_REG_STATUS_LINK BIT(0) #define SGMII_REG_STATUS_AUTONEG BIT(2) @@ -51,12 +54,35 @@ static void sgmii_write_reg_bit(void __iomem *base, int reg, u32 val) int netcp_sgmii_reset(void __iomem *sgmii_ofs, int port) { /* Soft reset */ - sgmii_write_reg_bit(sgmii_ofs, SGMII_SRESET_REG(port), 0x1); - while (sgmii_read_reg(sgmii_ofs, SGMII_SRESET_REG(port)) != 0x0) + sgmii_write_reg_bit(sgmii_ofs, SGMII_SRESET_REG(port), + SGMII_SRESET_RESET); + + while ((sgmii_read_reg(sgmii_ofs, SGMII_SRESET_REG(port)) & + SGMII_SRESET_RESET) != 0x0) ; + return 0; } +/* port is 0 based */ +bool netcp_sgmii_rtreset(void __iomem *sgmii_ofs, int port, bool set) +{ + u32 reg; + bool oldval; + + /* Initiate a soft reset */ + reg = sgmii_read_reg(sgmii_ofs, SGMII_SRESET_REG(port)); + oldval = (reg & SGMII_SRESET_RTRESET) != 0x0; + if (set) + reg |= SGMII_SRESET_RTRESET; + else + reg &= ~SGMII_SRESET_RTRESET; + sgmii_write_reg(sgmii_ofs, SGMII_SRESET_REG(port), reg); + wmb(); + + return oldval; +} + int netcp_sgmii_get_port_link(void __iomem *sgmii_ofs, int port) { u32 status = 0, link = 0; diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index 3b933bb5a8d5..edd77342773a 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -719,6 +719,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, struct virtio_net_hdr vnet_hdr = { 0 }; int vnet_hdr_len = 0; int copylen = 0; + int depth; bool zerocopy = false; size_t linear; ssize_t n; @@ -804,6 +805,12 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, skb_probe_transport_header(skb, ETH_HLEN); + /* Move network header to the right position for VLAN tagged packets */ + if ((skb->protocol == htons(ETH_P_8021Q) || + skb->protocol == htons(ETH_P_8021AD)) && + __vlan_get_protocol(skb, skb->protocol, &depth) != 0) + skb_set_network_header(skb, depth); + rcu_read_lock(); vlan = rcu_dereference(q->vlan); /* copy skb_ubuf_info for callback when skb has no error */ diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 348652a190af..fe4ec324aebc 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -27,7 +27,7 @@ #include <linux/usb/cdc.h> /* Version Information */ -#define DRIVER_VERSION "v1.08.0 (2015/01/13)" +#define DRIVER_VERSION "v1.08.1 (2015/07/28)" #define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>" #define DRIVER_DESC "Realtek RTL8152/RTL8153 Based USB Ethernet Adapters" #define MODULENAME "r8152" @@ -1904,11 +1904,10 @@ static void rtl_drop_queued_tx(struct r8152 *tp) static void rtl8152_tx_timeout(struct net_device *netdev) { struct r8152 *tp = netdev_priv(netdev); - int i; netif_warn(tp, tx_err, netdev, "Tx timeout\n"); - for (i = 0; i < RTL8152_MAX_TX; i++) - usb_unlink_urb(tp->tx_info[i].urb); + + usb_queue_reset_device(tp->intf); } static void rtl8152_set_rx_mode(struct net_device *netdev) @@ -2077,7 +2076,6 @@ static int rtl_start_rx(struct r8152 *tp) { int i, ret = 0; - napi_disable(&tp->napi); INIT_LIST_HEAD(&tp->rx_done); for (i = 0; i < RTL8152_MAX_RX; i++) { INIT_LIST_HEAD(&tp->rx_info[i].list); @@ -2085,7 +2083,6 @@ static int rtl_start_rx(struct r8152 *tp) if (ret) break; } - napi_enable(&tp->napi); if (ret && ++i < RTL8152_MAX_RX) { struct list_head rx_queue; @@ -2168,6 +2165,7 @@ static int rtl8153_enable(struct r8152 *tp) if (test_bit(RTL8152_UNPLUG, &tp->flags)) return -ENODEV; + usb_disable_lpm(tp->udev); set_tx_qlen(tp); rtl_set_eee_plus(tp); r8153_set_rx_early_timeout(tp); @@ -2339,11 +2337,61 @@ static void __rtl_set_wol(struct r8152 *tp, u32 wolopts) device_set_wakeup_enable(&tp->udev->dev, false); } +static void r8153_u1u2en(struct r8152 *tp, bool enable) +{ + u8 u1u2[8]; + + if (enable) + memset(u1u2, 0xff, sizeof(u1u2)); + else + memset(u1u2, 0x00, sizeof(u1u2)); + + usb_ocp_write(tp, USB_TOLERANCE, BYTE_EN_SIX_BYTES, sizeof(u1u2), u1u2); +} + +static void r8153_u2p3en(struct r8152 *tp, bool enable) +{ + u32 ocp_data; + + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_U2P3_CTRL); + if (enable && tp->version != RTL_VER_03 && tp->version != RTL_VER_04) + ocp_data |= U2P3_ENABLE; + else + ocp_data &= ~U2P3_ENABLE; + ocp_write_word(tp, MCU_TYPE_USB, USB_U2P3_CTRL, ocp_data); +} + +static void r8153_power_cut_en(struct r8152 *tp, bool enable) +{ + u32 ocp_data; + + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_POWER_CUT); + if (enable) + ocp_data |= PWR_EN | PHASE2_EN; + else + ocp_data &= ~(PWR_EN | PHASE2_EN); + ocp_write_word(tp, MCU_TYPE_USB, USB_POWER_CUT, ocp_data); + + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0); + ocp_data &= ~PCUT_STATUS; + ocp_write_word(tp, MCU_TYPE_USB, USB_MISC_0, ocp_data); +} + +static bool rtl_can_wakeup(struct r8152 *tp) +{ + struct usb_device *udev = tp->udev; + + return (udev->actconfig->desc.bmAttributes & USB_CONFIG_ATT_WAKEUP); +} + static void rtl_runtime_suspend_enable(struct r8152 *tp, bool enable) { if (enable) { u32 ocp_data; + r8153_u1u2en(tp, false); + r8153_u2p3en(tp, false); + __rtl_set_wol(tp, WAKE_ANY); ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG); @@ -2355,6 +2403,8 @@ static void rtl_runtime_suspend_enable(struct r8152 *tp, bool enable) ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML); } else { __rtl_set_wol(tp, tp->saved_wolopts); + r8153_u2p3en(tp, true); + r8153_u1u2en(tp, true); } } @@ -2604,46 +2654,6 @@ static void r8153_hw_phy_cfg(struct r8152 *tp) set_bit(PHY_RESET, &tp->flags); } -static void r8153_u1u2en(struct r8152 *tp, bool enable) -{ - u8 u1u2[8]; - - if (enable) - memset(u1u2, 0xff, sizeof(u1u2)); - else - memset(u1u2, 0x00, sizeof(u1u2)); - - usb_ocp_write(tp, USB_TOLERANCE, BYTE_EN_SIX_BYTES, sizeof(u1u2), u1u2); -} - -static void r8153_u2p3en(struct r8152 *tp, bool enable) -{ - u32 ocp_data; - - ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_U2P3_CTRL); - if (enable) - ocp_data |= U2P3_ENABLE; - else - ocp_data &= ~U2P3_ENABLE; - ocp_write_word(tp, MCU_TYPE_USB, USB_U2P3_CTRL, ocp_data); -} - -static void r8153_power_cut_en(struct r8152 *tp, bool enable) -{ - u32 ocp_data; - - ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_POWER_CUT); - if (enable) - ocp_data |= PWR_EN | PHASE2_EN; - else - ocp_data &= ~(PWR_EN | PHASE2_EN); - ocp_write_word(tp, MCU_TYPE_USB, USB_POWER_CUT, ocp_data); - - ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0); - ocp_data &= ~PCUT_STATUS; - ocp_write_word(tp, MCU_TYPE_USB, USB_MISC_0, ocp_data); -} - static void r8153_first_init(struct r8152 *tp) { u32 ocp_data; @@ -2786,6 +2796,7 @@ static void rtl8153_disable(struct r8152 *tp) r8153_disable_aldps(tp); rtl_disable(tp); r8153_enable_aldps(tp); + usb_enable_lpm(tp->udev); } static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u16 speed, u8 duplex) @@ -2906,9 +2917,13 @@ static void rtl8153_up(struct r8152 *tp) if (test_bit(RTL8152_UNPLUG, &tp->flags)) return; + r8153_u1u2en(tp, false); r8153_disable_aldps(tp); r8153_first_init(tp); r8153_enable_aldps(tp); + r8153_u2p3en(tp, true); + r8153_u1u2en(tp, true); + usb_enable_lpm(tp->udev); } static void rtl8153_down(struct r8152 *tp) @@ -2919,6 +2934,7 @@ static void rtl8153_down(struct r8152 *tp) } r8153_u1u2en(tp, false); + r8153_u2p3en(tp, false); r8153_power_cut_en(tp, false); r8153_disable_aldps(tp); r8153_enter_oob(tp); @@ -2937,8 +2953,10 @@ static void set_carrier(struct r8152 *tp) if (!netif_carrier_ok(netdev)) { tp->rtl_ops.enable(tp); set_bit(RTL8152_SET_RX_MODE, &tp->flags); + napi_disable(&tp->napi); netif_carrier_on(netdev); rtl_start_rx(tp); + napi_enable(&tp->napi); } } else { if (netif_carrier_ok(netdev)) { @@ -3257,6 +3275,7 @@ static void r8153_init(struct r8152 *tp) msleep(20); } + usb_disable_lpm(tp->udev); r8153_u2p3en(tp, false); if (tp->version == RTL_VER_04) { @@ -3331,6 +3350,59 @@ static void r8153_init(struct r8152 *tp) r8153_enable_aldps(tp); r8152b_enable_fc(tp); rtl_tally_reset(tp); + r8153_u2p3en(tp, true); +} + +static int rtl8152_pre_reset(struct usb_interface *intf) +{ + struct r8152 *tp = usb_get_intfdata(intf); + struct net_device *netdev; + + if (!tp) + return 0; + + netdev = tp->netdev; + if (!netif_running(netdev)) + return 0; + + napi_disable(&tp->napi); + clear_bit(WORK_ENABLE, &tp->flags); + usb_kill_urb(tp->intr_urb); + cancel_delayed_work_sync(&tp->schedule); + if (netif_carrier_ok(netdev)) { + netif_stop_queue(netdev); + mutex_lock(&tp->control); + tp->rtl_ops.disable(tp); + mutex_unlock(&tp->control); + } + + return 0; +} + +static int rtl8152_post_reset(struct usb_interface *intf) +{ + struct r8152 *tp = usb_get_intfdata(intf); + struct net_device *netdev; + + if (!tp) + return 0; + + netdev = tp->netdev; + if (!netif_running(netdev)) + return 0; + + set_bit(WORK_ENABLE, &tp->flags); + if (netif_carrier_ok(netdev)) { + mutex_lock(&tp->control); + tp->rtl_ops.enable(tp); + rtl8152_set_rx_mode(netdev); + mutex_unlock(&tp->control); + netif_wake_queue(netdev); + } + + napi_enable(&tp->napi); + + return 0; } static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message) @@ -3386,9 +3458,11 @@ static int rtl8152_resume(struct usb_interface *intf) if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) { rtl_runtime_suspend_enable(tp, false); clear_bit(SELECTIVE_SUSPEND, &tp->flags); + napi_disable(&tp->napi); set_bit(WORK_ENABLE, &tp->flags); if (netif_carrier_ok(tp->netdev)) rtl_start_rx(tp); + napi_enable(&tp->napi); } else { tp->rtl_ops.up(tp); rtl8152_set_speed(tp, AUTONEG_ENABLE, @@ -3415,12 +3489,15 @@ static void rtl8152_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) if (usb_autopm_get_interface(tp->intf) < 0) return; - mutex_lock(&tp->control); - - wol->supported = WAKE_ANY; - wol->wolopts = __rtl_get_wol(tp); - - mutex_unlock(&tp->control); + if (!rtl_can_wakeup(tp)) { + wol->supported = 0; + wol->wolopts = 0; + } else { + mutex_lock(&tp->control); + wol->supported = WAKE_ANY; + wol->wolopts = __rtl_get_wol(tp); + mutex_unlock(&tp->control); + } usb_autopm_put_interface(tp->intf); } @@ -3430,6 +3507,9 @@ static int rtl8152_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) struct r8152 *tp = netdev_priv(dev); int ret; + if (!rtl_can_wakeup(tp)) + return -EOPNOTSUPP; + ret = usb_autopm_get_interface(tp->intf); if (ret < 0) goto out_set_wol; @@ -4076,6 +4156,9 @@ static int rtl8152_probe(struct usb_interface *intf, goto out1; } + if (!rtl_can_wakeup(tp)) + __rtl_set_wol(tp, 0); + tp->saved_wolopts = __rtl_get_wol(tp); if (tp->saved_wolopts) device_set_wakeup_enable(&udev->dev, true); @@ -4149,6 +4232,8 @@ static struct usb_driver rtl8152_driver = { .suspend = rtl8152_suspend, .resume = rtl8152_resume, .reset_resume = rtl8152_resume, + .pre_reset = rtl8152_pre_reset, + .post_reset = rtl8152_post_reset, .supports_autosuspend = 1, .disable_hub_initiated_lpm = 1, }; diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c index a5233422f9dc..7384455792bf 100644 --- a/drivers/nvdimm/region_devs.c +++ b/drivers/nvdimm/region_devs.c @@ -458,10 +458,15 @@ static void nd_region_notify_driver_action(struct nvdimm_bus *nvdimm_bus, nvdimm_bus_unlock(dev); } if (is_nd_btt(dev) && probe) { + struct nd_btt *nd_btt = to_nd_btt(dev); + nd_region = to_nd_region(dev->parent); nvdimm_bus_lock(dev); if (nd_region->btt_seed == dev) nd_region_create_btt_seed(nd_region); + if (nd_region->ns_seed == &nd_btt->ndns->dev && + is_nd_blk(dev->parent)) + nd_region_create_blk_seed(nd_region); nvdimm_bus_unlock(dev); } } diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig index 8df1b1777745..59bb8556e43a 100644 --- a/drivers/of/Kconfig +++ b/drivers/of/Kconfig @@ -47,7 +47,7 @@ config OF_DYNAMIC config OF_ADDRESS def_bool y - depends on !SPARC + depends on !SPARC && HAS_IOMEM select OF_ADDRESS_PCI if PCI config OF_ADDRESS_PCI diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c index 18016341d5a9..9f71770b6226 100644 --- a/drivers/of/unittest.c +++ b/drivers/of/unittest.c @@ -979,7 +979,6 @@ static struct platform_driver unittest_driver = { .remove = unittest_remove, .driver = { .name = "unittest", - .owner = THIS_MODULE, .of_match_table = of_match_ptr(unittest_match), }, }; @@ -1666,7 +1665,6 @@ static const struct i2c_device_id unittest_i2c_dev_id[] = { static struct i2c_driver unittest_i2c_dev_driver = { .driver = { .name = "unittest-i2c-dev", - .owner = THIS_MODULE, }, .probe = unittest_i2c_dev_probe, .remove = unittest_i2c_dev_remove, @@ -1761,7 +1759,6 @@ static const struct i2c_device_id unittest_i2c_mux_id[] = { static struct i2c_driver unittest_i2c_mux_driver = { .driver = { .name = "unittest-i2c-mux", - .owner = THIS_MODULE, }, .probe = unittest_i2c_mux_probe, .remove = unittest_i2c_mux_remove, diff --git a/drivers/parport/share.c b/drivers/parport/share.c index 8067f54ce050..5ce5ef211bdb 100644 --- a/drivers/parport/share.c +++ b/drivers/parport/share.c @@ -891,8 +891,10 @@ parport_register_dev_model(struct parport *port, const char *name, par_dev->dev.release = free_pardevice; par_dev->devmodel = true; ret = device_register(&par_dev->dev); - if (ret) - goto err_put_dev; + if (ret) { + put_device(&par_dev->dev); + goto err_put_port; + } /* Chain this onto the list */ par_dev->prev = NULL; @@ -907,7 +909,8 @@ parport_register_dev_model(struct parport *port, const char *name, spin_unlock(&port->physport->pardevice_lock); pr_debug("%s: cannot grant exclusive access for device %s\n", port->name, name); - goto err_put_dev; + device_unregister(&par_dev->dev); + goto err_put_port; } port->flags |= PARPORT_FLAG_EXCL; } @@ -938,8 +941,6 @@ parport_register_dev_model(struct parport *port, const char *name, return par_dev; -err_put_dev: - put_device(&par_dev->dev); err_free_devname: kfree(devname); err_free_par_dev: diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig index c0e6ede3e27d..6b8dd162f644 100644 --- a/drivers/phy/Kconfig +++ b/drivers/phy/Kconfig @@ -56,6 +56,7 @@ config PHY_EXYNOS_MIPI_VIDEO config PHY_PXA_28NM_HSIC tristate "Marvell USB HSIC 28nm PHY Driver" + depends on HAS_IOMEM select GENERIC_PHY help Enable this to support Marvell USB HSIC PHY driver for Marvell @@ -66,6 +67,7 @@ config PHY_PXA_28NM_HSIC config PHY_PXA_28NM_USB2 tristate "Marvell USB 2.0 28nm PHY Driver" + depends on HAS_IOMEM select GENERIC_PHY help Enable this to support Marvell USB 2.0 PHY driver for Marvell diff --git a/drivers/phy/phy-berlin-usb.c b/drivers/phy/phy-berlin-usb.c index c6fc95b53083..335e06d66ed9 100644 --- a/drivers/phy/phy-berlin-usb.c +++ b/drivers/phy/phy-berlin-usb.c @@ -105,9 +105,9 @@ static const u32 phy_berlin_pll_dividers[] = { /* Berlin 2 */ - CLK_REF_DIV(0xc) | FEEDBACK_CLK_DIV(0x54), - /* Berlin 2CD */ CLK_REF_DIV(0x6) | FEEDBACK_CLK_DIV(0x55), + /* Berlin 2CD/Q */ + CLK_REF_DIV(0xc) | FEEDBACK_CLK_DIV(0x54), }; struct phy_berlin_usb_priv { diff --git a/drivers/phy/phy-ti-pipe3.c b/drivers/phy/phy-ti-pipe3.c index 53f295c1bab1..3510b81db3fa 100644 --- a/drivers/phy/phy-ti-pipe3.c +++ b/drivers/phy/phy-ti-pipe3.c @@ -28,7 +28,6 @@ #include <linux/delay.h> #include <linux/phy/omap_control_phy.h> #include <linux/of_platform.h> -#include <linux/spinlock.h> #define PLL_STATUS 0x00000004 #define PLL_GO 0x00000008 @@ -83,10 +82,6 @@ struct ti_pipe3 { struct clk *refclk; struct clk *div_clk; struct pipe3_dpll_map *dpll_map; - bool enabled; - spinlock_t lock; /* serialize clock enable/disable */ - /* the below flag is needed specifically for SATA */ - bool refclk_enabled; }; static struct pipe3_dpll_map dpll_map_usb[] = { @@ -137,6 +132,9 @@ static struct pipe3_dpll_params *ti_pipe3_get_dpll_params(struct ti_pipe3 *phy) return NULL; } +static int ti_pipe3_enable_clocks(struct ti_pipe3 *phy); +static void ti_pipe3_disable_clocks(struct ti_pipe3 *phy); + static int ti_pipe3_power_off(struct phy *x) { struct ti_pipe3 *phy = phy_get_drvdata(x); @@ -217,6 +215,7 @@ static int ti_pipe3_init(struct phy *x) u32 val; int ret = 0; + ti_pipe3_enable_clocks(phy); /* * Set pcie_pcs register to 0x96 for proper functioning of phy * as recommended in AM572x TRM SPRUHZ6, section 18.5.2.2, table @@ -250,33 +249,35 @@ static int ti_pipe3_exit(struct phy *x) u32 val; unsigned long timeout; - /* SATA DPLL can't be powered down due to Errata i783 and PCIe - * does not have internal DPLL - */ - if (of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-sata") || - of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-pcie")) + /* SATA DPLL can't be powered down due to Errata i783 */ + if (of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-sata")) return 0; - /* Put DPLL in IDLE mode */ - val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_CONFIGURATION2); - val |= PLL_IDLE; - ti_pipe3_writel(phy->pll_ctrl_base, PLL_CONFIGURATION2, val); - - /* wait for LDO and Oscillator to power down */ - timeout = jiffies + msecs_to_jiffies(PLL_IDLE_TIME); - do { - cpu_relax(); - val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_STATUS); - if ((val & PLL_TICOPWDN) && (val & PLL_LDOPWDN)) - break; - } while (!time_after(jiffies, timeout)); + /* PCIe doesn't have internal DPLL */ + if (!of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-pcie")) { + /* Put DPLL in IDLE mode */ + val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_CONFIGURATION2); + val |= PLL_IDLE; + ti_pipe3_writel(phy->pll_ctrl_base, PLL_CONFIGURATION2, val); - if (!(val & PLL_TICOPWDN) || !(val & PLL_LDOPWDN)) { - dev_err(phy->dev, "Failed to power down: PLL_STATUS 0x%x\n", - val); - return -EBUSY; + /* wait for LDO and Oscillator to power down */ + timeout = jiffies + msecs_to_jiffies(PLL_IDLE_TIME); + do { + cpu_relax(); + val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_STATUS); + if ((val & PLL_TICOPWDN) && (val & PLL_LDOPWDN)) + break; + } while (!time_after(jiffies, timeout)); + + if (!(val & PLL_TICOPWDN) || !(val & PLL_LDOPWDN)) { + dev_err(phy->dev, "Failed to power down: PLL_STATUS 0x%x\n", + val); + return -EBUSY; + } } + ti_pipe3_disable_clocks(phy); + return 0; } static struct phy_ops ops = { @@ -306,7 +307,6 @@ static int ti_pipe3_probe(struct platform_device *pdev) return -ENOMEM; phy->dev = &pdev->dev; - spin_lock_init(&phy->lock); if (!of_device_is_compatible(node, "ti,phy-pipe3-pcie")) { match = of_match_device(ti_pipe3_id_table, &pdev->dev); @@ -402,6 +402,10 @@ static int ti_pipe3_probe(struct platform_device *pdev) platform_set_drvdata(pdev, phy); pm_runtime_enable(phy->dev); + /* Prevent auto-disable of refclk for SATA PHY due to Errata i783 */ + if (of_device_is_compatible(node, "ti,phy-pipe3-sata")) + if (!IS_ERR(phy->refclk)) + clk_prepare_enable(phy->refclk); generic_phy = devm_phy_create(phy->dev, NULL, &ops); if (IS_ERR(generic_phy)) @@ -413,63 +417,33 @@ static int ti_pipe3_probe(struct platform_device *pdev) if (IS_ERR(phy_provider)) return PTR_ERR(phy_provider); - pm_runtime_get(&pdev->dev); - return 0; } static int ti_pipe3_remove(struct platform_device *pdev) { - if (!pm_runtime_suspended(&pdev->dev)) - pm_runtime_put(&pdev->dev); pm_runtime_disable(&pdev->dev); return 0; } -#ifdef CONFIG_PM -static int ti_pipe3_enable_refclk(struct ti_pipe3 *phy) +static int ti_pipe3_enable_clocks(struct ti_pipe3 *phy) { - if (!IS_ERR(phy->refclk) && !phy->refclk_enabled) { - int ret; + int ret = 0; + if (!IS_ERR(phy->refclk)) { ret = clk_prepare_enable(phy->refclk); if (ret) { dev_err(phy->dev, "Failed to enable refclk %d\n", ret); return ret; } - phy->refclk_enabled = true; } - return 0; -} - -static void ti_pipe3_disable_refclk(struct ti_pipe3 *phy) -{ - if (!IS_ERR(phy->refclk)) - clk_disable_unprepare(phy->refclk); - - phy->refclk_enabled = false; -} - -static int ti_pipe3_enable_clocks(struct ti_pipe3 *phy) -{ - int ret = 0; - unsigned long flags; - - spin_lock_irqsave(&phy->lock, flags); - if (phy->enabled) - goto err1; - - ret = ti_pipe3_enable_refclk(phy); - if (ret) - goto err1; - if (!IS_ERR(phy->wkupclk)) { ret = clk_prepare_enable(phy->wkupclk); if (ret) { dev_err(phy->dev, "Failed to enable wkupclk %d\n", ret); - goto err2; + goto disable_refclk; } } @@ -477,96 +451,33 @@ static int ti_pipe3_enable_clocks(struct ti_pipe3 *phy) ret = clk_prepare_enable(phy->div_clk); if (ret) { dev_err(phy->dev, "Failed to enable div_clk %d\n", ret); - goto err3; + goto disable_wkupclk; } } - phy->enabled = true; - spin_unlock_irqrestore(&phy->lock, flags); return 0; -err3: +disable_wkupclk: if (!IS_ERR(phy->wkupclk)) clk_disable_unprepare(phy->wkupclk); -err2: +disable_refclk: if (!IS_ERR(phy->refclk)) clk_disable_unprepare(phy->refclk); - ti_pipe3_disable_refclk(phy); -err1: - spin_unlock_irqrestore(&phy->lock, flags); return ret; } static void ti_pipe3_disable_clocks(struct ti_pipe3 *phy) { - unsigned long flags; - - spin_lock_irqsave(&phy->lock, flags); - if (!phy->enabled) { - spin_unlock_irqrestore(&phy->lock, flags); - return; - } - if (!IS_ERR(phy->wkupclk)) clk_disable_unprepare(phy->wkupclk); - /* Don't disable refclk for SATA PHY due to Errata i783 */ - if (!of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-sata")) - ti_pipe3_disable_refclk(phy); + if (!IS_ERR(phy->refclk)) + clk_disable_unprepare(phy->refclk); if (!IS_ERR(phy->div_clk)) clk_disable_unprepare(phy->div_clk); - phy->enabled = false; - spin_unlock_irqrestore(&phy->lock, flags); -} - -static int ti_pipe3_runtime_suspend(struct device *dev) -{ - struct ti_pipe3 *phy = dev_get_drvdata(dev); - - ti_pipe3_disable_clocks(phy); - return 0; } -static int ti_pipe3_runtime_resume(struct device *dev) -{ - struct ti_pipe3 *phy = dev_get_drvdata(dev); - int ret = 0; - - ret = ti_pipe3_enable_clocks(phy); - return ret; -} - -static int ti_pipe3_suspend(struct device *dev) -{ - struct ti_pipe3 *phy = dev_get_drvdata(dev); - - ti_pipe3_disable_clocks(phy); - return 0; -} - -static int ti_pipe3_resume(struct device *dev) -{ - struct ti_pipe3 *phy = dev_get_drvdata(dev); - int ret; - - ret = ti_pipe3_enable_clocks(phy); - if (ret) - return ret; - - pm_runtime_disable(dev); - pm_runtime_set_active(dev); - pm_runtime_enable(dev); - return 0; -} -#endif - -static const struct dev_pm_ops ti_pipe3_pm_ops = { - SET_RUNTIME_PM_OPS(ti_pipe3_runtime_suspend, - ti_pipe3_runtime_resume, NULL) - SET_SYSTEM_SLEEP_PM_OPS(ti_pipe3_suspend, ti_pipe3_resume) -}; - static const struct of_device_id ti_pipe3_id_table[] = { { .compatible = "ti,phy-usb3", @@ -592,7 +503,6 @@ static struct platform_driver ti_pipe3_driver = { .remove = ti_pipe3_remove, .driver = { .name = "ti-pipe3", - .pm = &ti_pipe3_pm_ops, .of_match_table = ti_pipe3_id_table, }, }; diff --git a/drivers/regulator/88pm800.c b/drivers/regulator/88pm800.c index 832932bdc977..7fd4f511d78f 100644 --- a/drivers/regulator/88pm800.c +++ b/drivers/regulator/88pm800.c @@ -130,7 +130,7 @@ struct pm800_regulators { .owner = THIS_MODULE, \ .n_voltages = ARRAY_SIZE(ldo_volt_table), \ .vsel_reg = PM800_##vreg##_VOUT, \ - .vsel_mask = 0x1f, \ + .vsel_mask = 0xf, \ .enable_reg = PM800_##ereg, \ .enable_mask = 1 << (ebit), \ .volt_table = ldo_volt_table, \ diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index c9f72019bd68..78387a6cbae5 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -109,6 +109,7 @@ static int _regulator_do_set_voltage(struct regulator_dev *rdev, static struct regulator *create_regulator(struct regulator_dev *rdev, struct device *dev, const char *supply_name); +static void _regulator_put(struct regulator *regulator); static const char *rdev_get_name(struct regulator_dev *rdev) { @@ -1105,6 +1106,9 @@ static int set_supply(struct regulator_dev *rdev, rdev_info(rdev, "supplied by %s\n", rdev_get_name(supply_rdev)); + if (!try_module_get(supply_rdev->owner)) + return -ENODEV; + rdev->supply = create_regulator(supply_rdev, &rdev->dev, "SUPPLY"); if (rdev->supply == NULL) { err = -ENOMEM; @@ -1381,9 +1385,13 @@ static int regulator_resolve_supply(struct regulator_dev *rdev) } if (!r) { - dev_err(dev, "Failed to resolve %s-supply for %s\n", - rdev->supply_name, rdev->desc->name); - return -EPROBE_DEFER; + if (have_full_constraints()) { + r = dummy_regulator_rdev; + } else { + dev_err(dev, "Failed to resolve %s-supply for %s\n", + rdev->supply_name, rdev->desc->name); + return -EPROBE_DEFER; + } } /* Recursively resolve the supply of the supply */ @@ -1398,8 +1406,11 @@ static int regulator_resolve_supply(struct regulator_dev *rdev) /* Cascade always-on state to supply */ if (_regulator_is_enabled(rdev)) { ret = regulator_enable(rdev->supply); - if (ret < 0) + if (ret < 0) { + if (rdev->supply) + _regulator_put(rdev->supply); return ret; + } } return 0; diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c index 6f2bdad8b4d8..e94ddcf97722 100644 --- a/drivers/regulator/max8973-regulator.c +++ b/drivers/regulator/max8973-regulator.c @@ -450,7 +450,7 @@ static struct max8973_regulator_platform_data *max8973_parse_dt( pdata->control_flags |= MAX8973_CONTROL_FREQ_SHIFT_9PER_ENABLE; if (of_property_read_bool(np, "maxim,enable-bias-control")) - pdata->control_flags |= MAX8973_BIAS_ENABLE; + pdata->control_flags |= MAX8973_CONTROL_BIAS_ENABLE; return pdata; } diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c index 326ffb553371..72fc3c32db49 100644 --- a/drivers/regulator/s2mps11.c +++ b/drivers/regulator/s2mps11.c @@ -34,6 +34,8 @@ #include <linux/mfd/samsung/s2mps14.h> #include <linux/mfd/samsung/s2mpu02.h> +/* The highest number of possible regulators for supported devices. */ +#define S2MPS_REGULATOR_MAX S2MPS13_REGULATOR_MAX struct s2mps11_info { unsigned int rdev_num; int ramp_delay2; @@ -49,7 +51,7 @@ struct s2mps11_info { * One bit for each S2MPS13/S2MPS14/S2MPU02 regulator whether * the suspend mode was enabled. */ - unsigned long long s2mps14_suspend_state:50; + DECLARE_BITMAP(suspend_state, S2MPS_REGULATOR_MAX); /* Array of size rdev_num with GPIO-s for external sleep control */ int *ext_control_gpio; @@ -500,7 +502,7 @@ static int s2mps14_regulator_enable(struct regulator_dev *rdev) switch (s2mps11->dev_type) { case S2MPS13X: case S2MPS14X: - if (s2mps11->s2mps14_suspend_state & (1 << rdev_get_id(rdev))) + if (test_bit(rdev_get_id(rdev), s2mps11->suspend_state)) val = S2MPS14_ENABLE_SUSPEND; else if (gpio_is_valid(s2mps11->ext_control_gpio[rdev_get_id(rdev)])) val = S2MPS14_ENABLE_EXT_CONTROL; @@ -508,7 +510,7 @@ static int s2mps14_regulator_enable(struct regulator_dev *rdev) val = rdev->desc->enable_mask; break; case S2MPU02: - if (s2mps11->s2mps14_suspend_state & (1 << rdev_get_id(rdev))) + if (test_bit(rdev_get_id(rdev), s2mps11->suspend_state)) val = S2MPU02_ENABLE_SUSPEND; else val = rdev->desc->enable_mask; @@ -562,7 +564,7 @@ static int s2mps14_regulator_set_suspend_disable(struct regulator_dev *rdev) if (ret < 0) return ret; - s2mps11->s2mps14_suspend_state |= (1 << rdev_get_id(rdev)); + set_bit(rdev_get_id(rdev), s2mps11->suspend_state); /* * Don't enable suspend mode if regulator is already disabled because * this would effectively for a short time turn on the regulator after @@ -960,18 +962,22 @@ static int s2mps11_pmic_probe(struct platform_device *pdev) case S2MPS11X: s2mps11->rdev_num = ARRAY_SIZE(s2mps11_regulators); regulators = s2mps11_regulators; + BUILD_BUG_ON(S2MPS_REGULATOR_MAX < s2mps11->rdev_num); break; case S2MPS13X: s2mps11->rdev_num = ARRAY_SIZE(s2mps13_regulators); regulators = s2mps13_regulators; + BUILD_BUG_ON(S2MPS_REGULATOR_MAX < s2mps11->rdev_num); break; case S2MPS14X: s2mps11->rdev_num = ARRAY_SIZE(s2mps14_regulators); regulators = s2mps14_regulators; + BUILD_BUG_ON(S2MPS_REGULATOR_MAX < s2mps11->rdev_num); break; case S2MPU02: s2mps11->rdev_num = ARRAY_SIZE(s2mpu02_regulators); regulators = s2mpu02_regulators; + BUILD_BUG_ON(S2MPS_REGULATOR_MAX < s2mps11->rdev_num); break; default: dev_err(&pdev->dev, "Invalid device type: %u\n", diff --git a/drivers/s390/Makefile b/drivers/s390/Makefile index 95bccfd3f169..e5225ad9c5b1 100644 --- a/drivers/s390/Makefile +++ b/drivers/s390/Makefile @@ -2,7 +2,7 @@ # Makefile for the S/390 specific device drivers # -obj-y += cio/ block/ char/ crypto/ net/ scsi/ kvm/ +obj-y += cio/ block/ char/ crypto/ net/ scsi/ virtio/ drivers-y += drivers/s390/built-in.o diff --git a/drivers/s390/kvm/Makefile b/drivers/s390/virtio/Makefile index 241891a57caf..241891a57caf 100644 --- a/drivers/s390/kvm/Makefile +++ b/drivers/s390/virtio/Makefile diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/virtio/kvm_virtio.c index 53fb975c404b..53fb975c404b 100644 --- a/drivers/s390/kvm/kvm_virtio.c +++ b/drivers/s390/virtio/kvm_virtio.c diff --git a/drivers/s390/kvm/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c index f8d8fdb26b72..f8d8fdb26b72 100644 --- a/drivers/s390/kvm/virtio_ccw.c +++ b/drivers/s390/virtio/virtio_ccw.c diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 82b92c414a9c..437254e1c4de 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -738,7 +738,7 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj, ql_log(ql_log_info, vha, 0x706f, "Issuing MPI reset.\n"); - if (IS_QLA83XX(ha)) { + if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) { uint32_t idc_control; qla83xx_idc_lock(vha, 0); diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c index 0e6ee3ca30e6..8b011aef12bd 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.c +++ b/drivers/scsi/qla2xxx/qla_dbg.c @@ -67,10 +67,10 @@ * | | | 0xd031-0xd0ff | * | | | 0xd101-0xd1fe | * | | | 0xd214-0xd2fe | - * | Target Mode | 0xe079 | | - * | Target Mode Management | 0xf072 | 0xf002 | + * | Target Mode | 0xe080 | | + * | Target Mode Management | 0xf096 | 0xf002 | * | | | 0xf046-0xf049 | - * | Target Mode Task Management | 0x1000b | | + * | Target Mode Task Management | 0x1000d | | * ---------------------------------------------------------------------- */ diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index e86201d3b8c6..9ad819edcd67 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -274,6 +274,7 @@ #define RESPONSE_ENTRY_CNT_FX00 256 /* Number of response entries.*/ struct req_que; +struct qla_tgt_sess; /* * (sd.h is not exported, hence local inclusion) @@ -2026,6 +2027,7 @@ typedef struct fc_port { uint16_t port_id; unsigned long retry_delay_timestamp; + struct qla_tgt_sess *tgt_session; } fc_port_t; #include "qla_mr.h" @@ -3154,13 +3156,13 @@ struct qla_hw_data { /* Bit 21 of fw_attributes decides the MCTP capabilities */ #define IS_MCTP_CAPABLE(ha) (IS_QLA2031(ha) && \ ((ha)->fw_attributes_ext[0] & BIT_0)) -#define IS_PI_UNINIT_CAPABLE(ha) (IS_QLA83XX(ha)) -#define IS_PI_IPGUARD_CAPABLE(ha) (IS_QLA83XX(ha)) +#define IS_PI_UNINIT_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha)) +#define IS_PI_IPGUARD_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha)) #define IS_PI_DIFB_DIX0_CAPABLE(ha) (0) -#define IS_PI_SPLIT_DET_CAPABLE_HBA(ha) (IS_QLA83XX(ha)) +#define IS_PI_SPLIT_DET_CAPABLE_HBA(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha)) #define IS_PI_SPLIT_DET_CAPABLE(ha) (IS_PI_SPLIT_DET_CAPABLE_HBA(ha) && \ (((ha)->fw_attributes_h << 16 | (ha)->fw_attributes) & BIT_22)) -#define IS_ATIO_MSIX_CAPABLE(ha) (IS_QLA83XX(ha)) +#define IS_ATIO_MSIX_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha)) #define IS_TGT_MODE_CAPABLE(ha) (ha->tgt.atio_q_length) #define IS_SHADOW_REG_CAPABLE(ha) (IS_QLA27XX(ha)) #define IS_DPORT_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha)) @@ -3579,6 +3581,16 @@ typedef struct scsi_qla_host { uint16_t fcoe_fcf_idx; uint8_t fcoe_vn_port_mac[6]; + /* list of commands waiting on workqueue */ + struct list_head qla_cmd_list; + struct list_head qla_sess_op_cmd_list; + spinlock_t cmd_list_lock; + + /* Counter to detect races between ELS and RSCN events */ + atomic_t generation_tick; + /* Time when global fcport update has been scheduled */ + int total_fcport_update_gen; + uint32_t vp_abort_cnt; struct fc_vport *fc_vport; /* holds fc_vport * for each vport */ diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 664013115c9d..11f2f3279eab 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -115,6 +115,8 @@ qla2x00_async_iocb_timeout(void *data) QLA_LOGIO_LOGIN_RETRIED : 0; qla2x00_post_async_login_done_work(fcport->vha, fcport, lio->u.logio.data); + } else if (sp->type == SRB_LOGOUT_CMD) { + qlt_logo_completion_handler(fcport, QLA_FUNCTION_TIMEOUT); } } @@ -497,7 +499,10 @@ void qla2x00_async_logout_done(struct scsi_qla_host *vha, fc_port_t *fcport, uint16_t *data) { - qla2x00_mark_device_lost(vha, fcport, 1, 0); + /* Don't re-login in target mode */ + if (!fcport->tgt_session) + qla2x00_mark_device_lost(vha, fcport, 1, 0); + qlt_logo_completion_handler(fcport, data[0]); return; } @@ -1538,7 +1543,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha) mem_size = (ha->fw_memory_size - 0x11000 + 1) * sizeof(uint16_t); } else if (IS_FWI2_CAPABLE(ha)) { - if (IS_QLA83XX(ha)) + if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) fixed_size = offsetof(struct qla83xx_fw_dump, ext_mem); else if (IS_QLA81XX(ha)) fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem); @@ -1550,7 +1555,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha) mem_size = (ha->fw_memory_size - 0x100000 + 1) * sizeof(uint32_t); if (ha->mqenable) { - if (!IS_QLA83XX(ha)) + if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha)) mq_size = sizeof(struct qla2xxx_mq_chain); /* * Allocate maximum buffer size for all queues. @@ -2922,21 +2927,14 @@ qla2x00_rport_del(void *data) { fc_port_t *fcport = data; struct fc_rport *rport; - scsi_qla_host_t *vha = fcport->vha; unsigned long flags; spin_lock_irqsave(fcport->vha->host->host_lock, flags); rport = fcport->drport ? fcport->drport: fcport->rport; fcport->drport = NULL; spin_unlock_irqrestore(fcport->vha->host->host_lock, flags); - if (rport) { + if (rport) fc_remote_port_delete(rport); - /* - * Release the target mode FC NEXUS in qla_target.c code - * if target mod is enabled. - */ - qlt_fc_port_deleted(vha, fcport); - } } /** @@ -3303,6 +3301,7 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport) * Create target mode FC NEXUS in qla_target.c if target mode is * enabled.. */ + qlt_fc_port_added(vha, fcport); spin_lock_irqsave(fcport->vha->host->host_lock, flags); @@ -3341,8 +3340,7 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) if (IS_QLAFX00(vha->hw)) { qla2x00_set_fcport_state(fcport, FCS_ONLINE); - qla2x00_reg_remote_port(vha, fcport); - return; + goto reg_port; } fcport->login_retry = 0; fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT); @@ -3350,7 +3348,16 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) qla2x00_set_fcport_state(fcport, FCS_ONLINE); qla2x00_iidma_fcport(vha, fcport); qla24xx_update_fcport_fcp_prio(vha, fcport); - qla2x00_reg_remote_port(vha, fcport); + +reg_port: + if (qla_ini_mode_enabled(vha)) + qla2x00_reg_remote_port(vha, fcport); + else { + /* + * Create target mode FC NEXUS in qla_target.c + */ + qlt_fc_port_added(vha, fcport); + } } /* @@ -3375,6 +3382,7 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha) LIST_HEAD(new_fcports); struct qla_hw_data *ha = vha->hw; struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); + int discovery_gen; /* If FL port exists, then SNS is present */ if (IS_FWI2_CAPABLE(ha)) @@ -3445,6 +3453,14 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha) fcport->scan_state = QLA_FCPORT_SCAN; } + /* Mark the time right before querying FW for connected ports. + * This process is long, asynchronous and by the time it's done, + * collected information might not be accurate anymore. E.g. + * disconnected port might have re-connected and a brand new + * session has been created. In this case session's generation + * will be newer than discovery_gen. */ + qlt_do_generation_tick(vha, &discovery_gen); + rval = qla2x00_find_all_fabric_devs(vha, &new_fcports); if (rval != QLA_SUCCESS) break; @@ -3460,20 +3476,44 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha) if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) continue; - if (fcport->scan_state == QLA_FCPORT_SCAN && - atomic_read(&fcport->state) == FCS_ONLINE) { - qla2x00_mark_device_lost(vha, fcport, - ql2xplogiabsentdevice, 0); - if (fcport->loop_id != FC_NO_LOOP_ID && - (fcport->flags & FCF_FCP2_DEVICE) == 0 && - fcport->port_type != FCT_INITIATOR && - fcport->port_type != FCT_BROADCAST) { - ha->isp_ops->fabric_logout(vha, - fcport->loop_id, - fcport->d_id.b.domain, - fcport->d_id.b.area, - fcport->d_id.b.al_pa); - qla2x00_clear_loop_id(fcport); + if (fcport->scan_state == QLA_FCPORT_SCAN) { + if (qla_ini_mode_enabled(base_vha) && + atomic_read(&fcport->state) == FCS_ONLINE) { + qla2x00_mark_device_lost(vha, fcport, + ql2xplogiabsentdevice, 0); + if (fcport->loop_id != FC_NO_LOOP_ID && + (fcport->flags & FCF_FCP2_DEVICE) == 0 && + fcport->port_type != FCT_INITIATOR && + fcport->port_type != FCT_BROADCAST) { + ha->isp_ops->fabric_logout(vha, + fcport->loop_id, + fcport->d_id.b.domain, + fcport->d_id.b.area, + fcport->d_id.b.al_pa); + qla2x00_clear_loop_id(fcport); + } + } else if (!qla_ini_mode_enabled(base_vha)) { + /* + * In target mode, explicitly kill + * sessions and log out of devices + * that are gone, so that we don't + * end up with an initiator using the + * wrong ACL (if the fabric recycles + * an FC address and we have a stale + * session around) and so that we don't + * report initiators that are no longer + * on the fabric. + */ + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf077, + "port gone, logging out/killing session: " + "%8phC state 0x%x flags 0x%x fc4_type 0x%x " + "scan_state %d\n", + fcport->port_name, + atomic_read(&fcport->state), + fcport->flags, fcport->fc4_type, + fcport->scan_state); + qlt_fc_port_deleted(vha, fcport, + discovery_gen); } } } @@ -3494,6 +3534,28 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha) (fcport->flags & FCF_LOGIN_NEEDED) == 0) continue; + /* + * If we're not an initiator, skip looking for devices + * and logging in. There's no reason for us to do it, + * and it seems to actively cause problems in target + * mode if we race with the initiator logging into us + * (we might get the "port ID used" status back from + * our login command and log out the initiator, which + * seems to cause havoc). + */ + if (!qla_ini_mode_enabled(base_vha)) { + if (fcport->scan_state == QLA_FCPORT_FOUND) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf078, + "port %8phC state 0x%x flags 0x%x fc4_type 0x%x " + "scan_state %d (initiator mode disabled; skipping " + "login)\n", fcport->port_name, + atomic_read(&fcport->state), + fcport->flags, fcport->fc4_type, + fcport->scan_state); + } + continue; + } + if (fcport->loop_id == FC_NO_LOOP_ID) { fcport->loop_id = next_loopid; rval = qla2x00_find_new_loop_id( @@ -3520,16 +3582,38 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha) test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) break; - /* Find a new loop ID to use. */ - fcport->loop_id = next_loopid; - rval = qla2x00_find_new_loop_id(base_vha, fcport); - if (rval != QLA_SUCCESS) { - /* Ran out of IDs to use */ - break; - } + /* + * If we're not an initiator, skip looking for devices + * and logging in. There's no reason for us to do it, + * and it seems to actively cause problems in target + * mode if we race with the initiator logging into us + * (we might get the "port ID used" status back from + * our login command and log out the initiator, which + * seems to cause havoc). + */ + if (qla_ini_mode_enabled(base_vha)) { + /* Find a new loop ID to use. */ + fcport->loop_id = next_loopid; + rval = qla2x00_find_new_loop_id(base_vha, + fcport); + if (rval != QLA_SUCCESS) { + /* Ran out of IDs to use */ + break; + } - /* Login and update database */ - qla2x00_fabric_dev_login(vha, fcport, &next_loopid); + /* Login and update database */ + qla2x00_fabric_dev_login(vha, fcport, + &next_loopid); + } else { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf079, + "new port %8phC state 0x%x flags 0x%x fc4_type " + "0x%x scan_state %d (initiator mode disabled; " + "skipping login)\n", + fcport->port_name, + atomic_read(&fcport->state), + fcport->flags, fcport->fc4_type, + fcport->scan_state); + } list_move_tail(&fcport->list, &vha->vp_fcports); } @@ -3725,11 +3809,12 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha, fcport->fp_speed = new_fcport->fp_speed; /* - * If address the same and state FCS_ONLINE, nothing - * changed. + * If address the same and state FCS_ONLINE + * (or in target mode), nothing changed. */ if (fcport->d_id.b24 == new_fcport->d_id.b24 && - atomic_read(&fcport->state) == FCS_ONLINE) { + (atomic_read(&fcport->state) == FCS_ONLINE || + !qla_ini_mode_enabled(base_vha))) { break; } @@ -3749,6 +3834,22 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha, * Log it out if still logged in and mark it for * relogin later. */ + if (!qla_ini_mode_enabled(base_vha)) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf080, + "port changed FC ID, %8phC" + " old %x:%x:%x (loop_id 0x%04x)-> new %x:%x:%x\n", + fcport->port_name, + fcport->d_id.b.domain, + fcport->d_id.b.area, + fcport->d_id.b.al_pa, + fcport->loop_id, + new_fcport->d_id.b.domain, + new_fcport->d_id.b.area, + new_fcport->d_id.b.al_pa); + fcport->d_id.b24 = new_fcport->d_id.b24; + break; + } + fcport->d_id.b24 = new_fcport->d_id.b24; fcport->flags |= FCF_LOGIN_NEEDED; if (fcport->loop_id != FC_NO_LOOP_ID && @@ -3768,6 +3869,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha, if (found) continue; /* If device was not in our fcports list, then add it. */ + new_fcport->scan_state = QLA_FCPORT_FOUND; list_add_tail(&new_fcport->list, new_fcports); /* Allocate a new replacement fcport. */ @@ -4188,6 +4290,14 @@ qla2x00_update_fcports(scsi_qla_host_t *base_vha) atomic_read(&fcport->state) != FCS_UNCONFIGURED) { spin_unlock_irqrestore(&ha->vport_slock, flags); qla2x00_rport_del(fcport); + + /* + * Release the target mode FC NEXUS in + * qla_target.c, if target mod is enabled. + */ + qlt_fc_port_deleted(vha, fcport, + base_vha->total_fcport_update_gen); + spin_lock_irqsave(&ha->vport_slock, flags); } } diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index 36fbd4c7af8f..6f02b26a35cf 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c @@ -1943,6 +1943,9 @@ qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio) logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; logio->control_flags = cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO); + if (!sp->fcport->tgt_session || + !sp->fcport->tgt_session->keep_nport_handle) + logio->control_flags |= cpu_to_le16(LCF_FREE_NPORT); logio->nport_handle = cpu_to_le16(sp->fcport->loop_id); logio->port_id[0] = sp->fcport->d_id.b.al_pa; logio->port_id[1] = sp->fcport->d_id.b.area; diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index 02b1c1c5355b..b2f713ad9034 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -2415,7 +2415,8 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt, *orig_iocb_cnt = mcp->mb[10]; if (vha->hw->flags.npiv_supported && max_npiv_vports) *max_npiv_vports = mcp->mb[11]; - if ((IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw)) && max_fcfs) + if ((IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw) || + IS_QLA27XX(vha->hw)) && max_fcfs) *max_fcfs = mcp->mb[12]; } @@ -3898,7 +3899,7 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp) spin_lock_irqsave(&ha->hardware_lock, flags); if (!(rsp->options & BIT_0)) { WRT_REG_DWORD(rsp->rsp_q_out, 0); - if (!IS_QLA83XX(ha)) + if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha)) WRT_REG_DWORD(rsp->rsp_q_in, 0); } @@ -5345,7 +5346,7 @@ qla83xx_restart_nic_firmware(scsi_qla_host_t *vha) mbx_cmd_t *mcp = &mc; struct qla_hw_data *ha = vha->hw; - if (!IS_QLA83XX(ha)) + if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha)) return QLA_FUNCTION_FAILED; ql_dbg(ql_dbg_mbx, vha, 0x1143, "Entered %s.\n", __func__); diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index a28815b8276f..8a5cac8448c7 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -2504,6 +2504,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) ha->mbx_count = MAILBOX_REGISTER_COUNT; req_length = REQUEST_ENTRY_CNT_24XX; rsp_length = RESPONSE_ENTRY_CNT_2300; + ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; ha->max_loop_id = SNS_LAST_LOOP_ID_2300; ha->init_cb_size = sizeof(struct mid_init_cb_81xx); ha->gid_list_info_size = 8; @@ -3229,11 +3230,15 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport, spin_lock_irqsave(vha->host->host_lock, flags); fcport->drport = rport; spin_unlock_irqrestore(vha->host->host_lock, flags); + qlt_do_generation_tick(vha, &base_vha->total_fcport_update_gen); set_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags); qla2xxx_wake_dpc(base_vha); } else { - fc_remote_port_delete(rport); - qlt_fc_port_deleted(vha, fcport); + int now; + if (rport) + fc_remote_port_delete(rport); + qlt_do_generation_tick(vha, &now); + qlt_fc_port_deleted(vha, fcport, now); } } @@ -3763,8 +3768,11 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht, INIT_LIST_HEAD(&vha->vp_fcports); INIT_LIST_HEAD(&vha->work_list); INIT_LIST_HEAD(&vha->list); + INIT_LIST_HEAD(&vha->qla_cmd_list); + INIT_LIST_HEAD(&vha->qla_sess_op_cmd_list); spin_lock_init(&vha->work_lock); + spin_lock_init(&vha->cmd_list_lock); sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no); ql_dbg(ql_dbg_init, vha, 0x0041, diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c index 028e8c8a7de9..2feb5f38edcd 100644 --- a/drivers/scsi/qla2xxx/qla_sup.c +++ b/drivers/scsi/qla2xxx/qla_sup.c @@ -1697,7 +1697,7 @@ qla83xx_select_led_port(struct qla_hw_data *ha) { uint32_t led_select_value = 0; - if (!IS_QLA83XX(ha)) + if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha)) goto out; if (ha->port_no == 0) diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index b749026aa592..58651ecbd88c 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -113,6 +113,11 @@ static void qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha, static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha, struct atio_from_isp *atio, uint16_t status, int qfull); static void qlt_disable_vha(struct scsi_qla_host *vha); +static void qlt_clear_tgt_db(struct qla_tgt *tgt); +static void qlt_send_notify_ack(struct scsi_qla_host *vha, + struct imm_ntfy_from_isp *ntfy, + uint32_t add_flags, uint16_t resp_code, int resp_code_valid, + uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan); /* * Global Variables */ @@ -122,6 +127,16 @@ static struct workqueue_struct *qla_tgt_wq; static DEFINE_MUTEX(qla_tgt_mutex); static LIST_HEAD(qla_tgt_glist); +/* This API intentionally takes dest as a parameter, rather than returning + * int value to avoid caller forgetting to issue wmb() after the store */ +void qlt_do_generation_tick(struct scsi_qla_host *vha, int *dest) +{ + scsi_qla_host_t *base_vha = pci_get_drvdata(vha->hw->pdev); + *dest = atomic_inc_return(&base_vha->generation_tick); + /* memory barrier */ + wmb(); +} + /* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) */ static struct qla_tgt_sess *qlt_find_sess_by_port_name( struct qla_tgt *tgt, @@ -381,14 +396,73 @@ static void qlt_free_session_done(struct work_struct *work) struct qla_tgt *tgt = sess->tgt; struct scsi_qla_host *vha = sess->vha; struct qla_hw_data *ha = vha->hw; + unsigned long flags; + bool logout_started = false; + fc_port_t fcport; + + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf084, + "%s: se_sess %p / sess %p from port %8phC loop_id %#04x" + " s_id %02x:%02x:%02x logout %d keep %d plogi %d\n", + __func__, sess->se_sess, sess, sess->port_name, sess->loop_id, + sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa, + sess->logout_on_delete, sess->keep_nport_handle, + sess->plogi_ack_needed); BUG_ON(!tgt); + + if (sess->logout_on_delete) { + int rc; + + memset(&fcport, 0, sizeof(fcport)); + fcport.loop_id = sess->loop_id; + fcport.d_id = sess->s_id; + memcpy(fcport.port_name, sess->port_name, WWN_SIZE); + fcport.vha = vha; + fcport.tgt_session = sess; + + rc = qla2x00_post_async_logout_work(vha, &fcport, NULL); + if (rc != QLA_SUCCESS) + ql_log(ql_log_warn, vha, 0xf085, + "Schedule logo failed sess %p rc %d\n", + sess, rc); + else + logout_started = true; + } + /* * Release the target session for FC Nexus from fabric module code. */ if (sess->se_sess != NULL) ha->tgt.tgt_ops->free_session(sess); + if (logout_started) { + bool traced = false; + + while (!ACCESS_ONCE(sess->logout_completed)) { + if (!traced) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf086, + "%s: waiting for sess %p logout\n", + __func__, sess); + traced = true; + } + msleep(100); + } + + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf087, + "%s: sess %p logout completed\n", + __func__, sess); + } + + spin_lock_irqsave(&ha->hardware_lock, flags); + + if (sess->plogi_ack_needed) + qlt_send_notify_ack(vha, &sess->tm_iocb, + 0, 0, 0, 0, 0, 0); + + list_del(&sess->sess_list_entry); + + spin_unlock_irqrestore(&ha->hardware_lock, flags); + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001, "Unregistration of sess %p finished\n", sess); @@ -409,9 +483,9 @@ void qlt_unreg_sess(struct qla_tgt_sess *sess) vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess); - list_del(&sess->sess_list_entry); - if (sess->deleted) - list_del(&sess->del_list_entry); + if (!list_empty(&sess->del_list_entry)) + list_del_init(&sess->del_list_entry); + sess->deleted = QLA_SESS_DELETION_IN_PROGRESS; INIT_WORK(&sess->free_work, qlt_free_session_done); schedule_work(&sess->free_work); @@ -431,10 +505,10 @@ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd) loop_id = le16_to_cpu(n->u.isp24.nport_handle); if (loop_id == 0xFFFF) { -#if 0 /* FIXME: Re-enable Global event handling.. */ /* Global event */ - atomic_inc(&ha->tgt.qla_tgt->tgt_global_resets_count); - qlt_clear_tgt_db(ha->tgt.qla_tgt); + atomic_inc(&vha->vha_tgt.qla_tgt->tgt_global_resets_count); + qlt_clear_tgt_db(vha->vha_tgt.qla_tgt); +#if 0 /* FIXME: do we need to choose a session here? */ if (!list_empty(&ha->tgt.qla_tgt->sess_list)) { sess = list_entry(ha->tgt.qla_tgt->sess_list.next, typeof(*sess), sess_list_entry); @@ -489,27 +563,38 @@ static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess *sess, struct qla_tgt *tgt = sess->tgt; uint32_t dev_loss_tmo = tgt->ha->port_down_retry_count + 5; - if (sess->deleted) - return; + if (sess->deleted) { + /* Upgrade to unconditional deletion in case it was temporary */ + if (immediate && sess->deleted == QLA_SESS_DELETION_PENDING) + list_del(&sess->del_list_entry); + else + return; + } ql_dbg(ql_dbg_tgt, sess->vha, 0xe001, "Scheduling sess %p for deletion\n", sess); - list_add_tail(&sess->del_list_entry, &tgt->del_sess_list); - sess->deleted = 1; - if (immediate) + if (immediate) { dev_loss_tmo = 0; + sess->deleted = QLA_SESS_DELETION_IN_PROGRESS; + list_add(&sess->del_list_entry, &tgt->del_sess_list); + } else { + sess->deleted = QLA_SESS_DELETION_PENDING; + list_add_tail(&sess->del_list_entry, &tgt->del_sess_list); + } sess->expires = jiffies + dev_loss_tmo * HZ; ql_dbg(ql_dbg_tgt, sess->vha, 0xe048, - "qla_target(%d): session for port %8phC (loop ID %d) scheduled for " - "deletion in %u secs (expires: %lu) immed: %d\n", - sess->vha->vp_idx, sess->port_name, sess->loop_id, dev_loss_tmo, - sess->expires, immediate); + "qla_target(%d): session for port %8phC (loop ID %d s_id %02x:%02x:%02x)" + " scheduled for deletion in %u secs (expires: %lu) immed: %d, logout: %d, gen: %#x\n", + sess->vha->vp_idx, sess->port_name, sess->loop_id, + sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa, + dev_loss_tmo, sess->expires, immediate, sess->logout_on_delete, + sess->generation); if (immediate) - schedule_delayed_work(&tgt->sess_del_work, 0); + mod_delayed_work(system_wq, &tgt->sess_del_work, 0); else schedule_delayed_work(&tgt->sess_del_work, sess->expires - jiffies); @@ -578,9 +663,9 @@ out_free_id_list: /* ha->hardware_lock supposed to be held on entry */ static void qlt_undelete_sess(struct qla_tgt_sess *sess) { - BUG_ON(!sess->deleted); + BUG_ON(sess->deleted != QLA_SESS_DELETION_PENDING); - list_del(&sess->del_list_entry); + list_del_init(&sess->del_list_entry); sess->deleted = 0; } @@ -599,7 +684,9 @@ static void qlt_del_sess_work_fn(struct delayed_work *work) del_list_entry); elapsed = jiffies; if (time_after_eq(elapsed, sess->expires)) { - qlt_undelete_sess(sess); + /* No turning back */ + list_del_init(&sess->del_list_entry); + sess->deleted = QLA_SESS_DELETION_IN_PROGRESS; ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004, "Timeout: sess %p about to be deleted\n", @@ -643,6 +730,13 @@ static struct qla_tgt_sess *qlt_create_sess( fcport->d_id.b.al_pa, fcport->d_id.b.area, fcport->loop_id); + /* Cannot undelete at this point */ + if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) { + spin_unlock_irqrestore(&ha->hardware_lock, + flags); + return NULL; + } + if (sess->deleted) qlt_undelete_sess(sess); @@ -652,6 +746,9 @@ static struct qla_tgt_sess *qlt_create_sess( if (sess->local && !local) sess->local = 0; + + qlt_do_generation_tick(vha, &sess->generation); + spin_unlock_irqrestore(&ha->hardware_lock, flags); return sess; @@ -673,6 +770,14 @@ static struct qla_tgt_sess *qlt_create_sess( sess->s_id = fcport->d_id; sess->loop_id = fcport->loop_id; sess->local = local; + INIT_LIST_HEAD(&sess->del_list_entry); + + /* Under normal circumstances we want to logout from firmware when + * session eventually ends and release corresponding nport handle. + * In the exception cases (e.g. when new PLOGI is waiting) corresponding + * code will adjust these flags as necessary. */ + sess->logout_on_delete = 1; + sess->keep_nport_handle = 0; ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006, "Adding sess %p to tgt %p via ->check_initiator_node_acl()\n", @@ -705,6 +810,7 @@ static struct qla_tgt_sess *qlt_create_sess( spin_lock_irqsave(&ha->hardware_lock, flags); list_add_tail(&sess->sess_list_entry, &vha->vha_tgt.qla_tgt->sess_list); vha->vha_tgt.qla_tgt->sess_count++; + qlt_do_generation_tick(vha, &sess->generation); spin_unlock_irqrestore(&ha->hardware_lock, flags); ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b, @@ -718,7 +824,7 @@ static struct qla_tgt_sess *qlt_create_sess( } /* - * Called from drivers/scsi/qla2xxx/qla_init.c:qla2x00_reg_remote_port() + * Called from qla2x00_reg_remote_port() */ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport) { @@ -750,6 +856,10 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport) mutex_unlock(&vha->vha_tgt.tgt_mutex); spin_lock_irqsave(&ha->hardware_lock, flags); + } else if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) { + /* Point of no return */ + spin_unlock_irqrestore(&ha->hardware_lock, flags); + return; } else { kref_get(&sess->se_sess->sess_kref); @@ -780,27 +890,36 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport) spin_unlock_irqrestore(&ha->hardware_lock, flags); } -void qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport) +/* + * max_gen - specifies maximum session generation + * at which this deletion requestion is still valid + */ +void +qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport, int max_gen) { - struct qla_hw_data *ha = vha->hw; struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; struct qla_tgt_sess *sess; - unsigned long flags; if (!vha->hw->tgt.tgt_ops) return; - if (!tgt || (fcport->port_type != FCT_INITIATOR)) + if (!tgt) return; - spin_lock_irqsave(&ha->hardware_lock, flags); if (tgt->tgt_stop) { - spin_unlock_irqrestore(&ha->hardware_lock, flags); return; } sess = qlt_find_sess_by_port_name(tgt, fcport->port_name); if (!sess) { - spin_unlock_irqrestore(&ha->hardware_lock, flags); + return; + } + + if (max_gen - sess->generation < 0) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf092, + "Ignoring stale deletion request for se_sess %p / sess %p" + " for port %8phC, req_gen %d, sess_gen %d\n", + sess->se_sess, sess, sess->port_name, max_gen, + sess->generation); return; } @@ -808,7 +927,6 @@ void qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport) sess->local = 1; qlt_schedule_sess_for_deletion(sess, false); - spin_unlock_irqrestore(&ha->hardware_lock, flags); } static inline int test_tgt_sess_count(struct qla_tgt *tgt) @@ -1175,6 +1293,70 @@ static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha, FCP_TMF_CMPL, true); } +static int abort_cmd_for_tag(struct scsi_qla_host *vha, uint32_t tag) +{ + struct qla_tgt_sess_op *op; + struct qla_tgt_cmd *cmd; + + spin_lock(&vha->cmd_list_lock); + + list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) { + if (tag == op->atio.u.isp24.exchange_addr) { + op->aborted = true; + spin_unlock(&vha->cmd_list_lock); + return 1; + } + } + + list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) { + if (tag == cmd->atio.u.isp24.exchange_addr) { + cmd->state = QLA_TGT_STATE_ABORTED; + spin_unlock(&vha->cmd_list_lock); + return 1; + } + } + + spin_unlock(&vha->cmd_list_lock); + return 0; +} + +/* drop cmds for the given lun + * XXX only looks for cmds on the port through which lun reset was recieved + * XXX does not go through the list of other port (which may have cmds + * for the same lun) + */ +static void abort_cmds_for_lun(struct scsi_qla_host *vha, + uint32_t lun, uint8_t *s_id) +{ + struct qla_tgt_sess_op *op; + struct qla_tgt_cmd *cmd; + uint32_t key; + + key = sid_to_key(s_id); + spin_lock(&vha->cmd_list_lock); + list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) { + uint32_t op_key; + uint32_t op_lun; + + op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id); + op_lun = scsilun_to_int( + (struct scsi_lun *)&op->atio.u.isp24.fcp_cmnd.lun); + if (op_key == key && op_lun == lun) + op->aborted = true; + } + list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) { + uint32_t cmd_key; + uint32_t cmd_lun; + + cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id); + cmd_lun = scsilun_to_int( + (struct scsi_lun *)&cmd->atio.u.isp24.fcp_cmnd.lun); + if (cmd_key == key && cmd_lun == lun) + cmd->state = QLA_TGT_STATE_ABORTED; + } + spin_unlock(&vha->cmd_list_lock); +} + /* ha->hardware_lock supposed to be held on entry */ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha, struct abts_recv_from_24xx *abts, struct qla_tgt_sess *sess) @@ -1199,8 +1381,19 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha, } spin_unlock(&se_sess->sess_cmd_lock); - if (!found_lun) - return -ENOENT; + /* cmd not in LIO lists, look in qla list */ + if (!found_lun) { + if (abort_cmd_for_tag(vha, abts->exchange_addr_to_abort)) { + /* send TASK_ABORT response immediately */ + qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_CMPL, false); + return 0; + } else { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf081, + "unable to find cmd in driver or LIO for tag 0x%x\n", + abts->exchange_addr_to_abort); + return -ENOENT; + } + } ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f, "qla_target(%d): task abort (tag=%d)\n", @@ -1284,6 +1477,11 @@ static void qlt_24xx_handle_abts(struct scsi_qla_host *vha, return; } + if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) { + qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false); + return; + } + rc = __qlt_24xx_handle_abts(vha, abts, sess); if (rc != 0) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf054, @@ -1726,20 +1924,6 @@ static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd, struct qla_hw_data *ha = vha->hw; struct se_cmd *se_cmd = &cmd->se_cmd; - if (unlikely(cmd->aborted)) { - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014, - "qla_target(%d): terminating exchange for aborted cmd=%p (se_cmd=%p, tag=%lld)", - vha->vp_idx, cmd, se_cmd, se_cmd->tag); - - cmd->state = QLA_TGT_STATE_ABORTED; - cmd->cmd_flags |= BIT_6; - - qlt_send_term_exchange(vha, cmd, &cmd->atio, 0); - - /* !! At this point cmd could be already freed !! */ - return QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED; - } - prm->cmd = cmd; prm->tgt = tgt; prm->rq_result = scsi_status; @@ -2301,6 +2485,19 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, unsigned long flags = 0; int res; + spin_lock_irqsave(&ha->hardware_lock, flags); + if (cmd->sess && cmd->sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) { + cmd->state = QLA_TGT_STATE_PROCESSED; + if (cmd->sess->logout_completed) + /* no need to terminate. FW already freed exchange. */ + qlt_abort_cmd_on_host_reset(cmd->vha, cmd); + else + qlt_send_term_exchange(vha, cmd, &cmd->atio, 1); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + return 0; + } + spin_unlock_irqrestore(&ha->hardware_lock, flags); + memset(&prm, 0, sizeof(prm)); qlt_check_srr_debug(cmd, &xmit_type); @@ -2313,9 +2510,6 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status, &full_req_cnt); if (unlikely(res != 0)) { - if (res == QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED) - return 0; - return res; } @@ -2345,9 +2539,10 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, res = qlt_build_ctio_crc2_pkt(&prm, vha); else res = qlt_24xx_build_ctio_pkt(&prm, vha); - if (unlikely(res != 0)) + if (unlikely(res != 0)) { + vha->req->cnt += full_req_cnt; goto out_unmap_unlock; - + } pkt = (struct ctio7_to_24xx *)prm.pkt; @@ -2461,7 +2656,8 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd) spin_lock_irqsave(&ha->hardware_lock, flags); - if (qla2x00_reset_active(vha) || cmd->reset_count != ha->chip_reset) { + if (qla2x00_reset_active(vha) || (cmd->reset_count != ha->chip_reset) || + (cmd->sess && cmd->sess->deleted == QLA_SESS_DELETION_IN_PROGRESS)) { /* * Either a chip reset is active or this request was from * previous life, just abort the processing. @@ -2485,8 +2681,11 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd) else res = qlt_24xx_build_ctio_pkt(&prm, vha); - if (unlikely(res != 0)) + if (unlikely(res != 0)) { + vha->req->cnt += prm.req_cnt; goto out_unlock_free_unmap; + } + pkt = (struct ctio7_to_24xx *)prm.pkt; pkt->u.status0.flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT | CTIO7_FLAGS_STATUS_MODE_0); @@ -2651,6 +2850,89 @@ out: /* If hardware_lock held on entry, might drop it, then reaquire */ /* This function sends the appropriate CTIO to ISP 2xxx or 24xx */ +static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha, + struct imm_ntfy_from_isp *ntfy) +{ + struct nack_to_isp *nack; + struct qla_hw_data *ha = vha->hw; + request_t *pkt; + int ret = 0; + + ql_dbg(ql_dbg_tgt_tmr, vha, 0xe01c, + "Sending TERM ELS CTIO (ha=%p)\n", ha); + + pkt = (request_t *)qla2x00_alloc_iocbs_ready(vha, NULL); + if (pkt == NULL) { + ql_dbg(ql_dbg_tgt, vha, 0xe080, + "qla_target(%d): %s failed: unable to allocate " + "request packet\n", vha->vp_idx, __func__); + return -ENOMEM; + } + + pkt->entry_type = NOTIFY_ACK_TYPE; + pkt->entry_count = 1; + pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; + + nack = (struct nack_to_isp *)pkt; + nack->ox_id = ntfy->ox_id; + + nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle; + if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) { + nack->u.isp24.flags = ntfy->u.isp24.flags & + __constant_cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB); + } + + /* terminate */ + nack->u.isp24.flags |= + __constant_cpu_to_le16(NOTIFY_ACK_FLAGS_TERMINATE); + + nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id; + nack->u.isp24.status = ntfy->u.isp24.status; + nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode; + nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle; + nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address; + nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs; + nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui; + nack->u.isp24.vp_index = ntfy->u.isp24.vp_index; + + qla2x00_start_iocbs(vha, vha->req); + return ret; +} + +static void qlt_send_term_imm_notif(struct scsi_qla_host *vha, + struct imm_ntfy_from_isp *imm, int ha_locked) +{ + unsigned long flags = 0; + int rc; + + if (qlt_issue_marker(vha, ha_locked) < 0) + return; + + if (ha_locked) { + rc = __qlt_send_term_imm_notif(vha, imm); + +#if 0 /* Todo */ + if (rc == -ENOMEM) + qlt_alloc_qfull_cmd(vha, imm, 0, 0); +#endif + goto done; + } + + spin_lock_irqsave(&vha->hw->hardware_lock, flags); + rc = __qlt_send_term_imm_notif(vha, imm); + +#if 0 /* Todo */ + if (rc == -ENOMEM) + qlt_alloc_qfull_cmd(vha, imm, 0, 0); +#endif + +done: + if (!ha_locked) + spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); +} + +/* If hardware_lock held on entry, might drop it, then reaquire */ +/* This function sends the appropriate CTIO to ISP 2xxx or 24xx */ static int __qlt_send_term_exchange(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd, struct atio_from_isp *atio) @@ -2715,7 +2997,7 @@ static int __qlt_send_term_exchange(struct scsi_qla_host *vha, static void qlt_send_term_exchange(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked) { - unsigned long flags; + unsigned long flags = 0; int rc; if (qlt_issue_marker(vha, ha_locked) < 0) @@ -2731,17 +3013,18 @@ static void qlt_send_term_exchange(struct scsi_qla_host *vha, rc = __qlt_send_term_exchange(vha, cmd, atio); if (rc == -ENOMEM) qlt_alloc_qfull_cmd(vha, atio, 0, 0); - spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); done: if (cmd && ((cmd->state != QLA_TGT_STATE_ABORTED) || !cmd->cmd_sent_to_fw)) { - if (!ha_locked && !in_interrupt()) - msleep(250); /* just in case */ - - qlt_unmap_sg(vha, cmd); + if (cmd->sg_mapped) + qlt_unmap_sg(vha, cmd); vha->hw->tgt.tgt_ops->free_cmd(cmd); } + + if (!ha_locked) + spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); + return; } @@ -2792,6 +3075,24 @@ static void qlt_chk_exch_leak_thresh_hold(struct scsi_qla_host *vha) } +void qlt_abort_cmd(struct qla_tgt_cmd *cmd) +{ + struct qla_tgt *tgt = cmd->tgt; + struct scsi_qla_host *vha = tgt->vha; + struct se_cmd *se_cmd = &cmd->se_cmd; + + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014, + "qla_target(%d): terminating exchange for aborted cmd=%p " + "(se_cmd=%p, tag=%llu)", vha->vp_idx, cmd, &cmd->se_cmd, + se_cmd->tag); + + cmd->state = QLA_TGT_STATE_ABORTED; + cmd->cmd_flags |= BIT_6; + + qlt_send_term_exchange(vha, cmd, &cmd->atio, 0); +} +EXPORT_SYMBOL(qlt_abort_cmd); + void qlt_free_cmd(struct qla_tgt_cmd *cmd) { struct qla_tgt_sess *sess = cmd->sess; @@ -3015,7 +3316,7 @@ qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd) dump_stack(); } - cmd->cmd_flags |= BIT_12; + cmd->cmd_flags |= BIT_17; ha->tgt.tgt_ops->free_cmd(cmd); } @@ -3177,7 +3478,7 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle, skip_term: if (cmd->state == QLA_TGT_STATE_PROCESSED) { - ; + cmd->cmd_flags |= BIT_12; } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) { int rx_status = 0; @@ -3191,9 +3492,11 @@ skip_term: ha->tgt.tgt_ops->handle_data(cmd); return; } else if (cmd->state == QLA_TGT_STATE_ABORTED) { + cmd->cmd_flags |= BIT_18; ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e, "Aborted command %p (tag %lld) finished\n", cmd, se_cmd->tag); } else { + cmd->cmd_flags |= BIT_19; ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05c, "qla_target(%d): A command in state (%d) should " "not return a CTIO complete\n", vha->vp_idx, cmd->state); @@ -3205,7 +3508,6 @@ skip_term: dump_stack(); } - ha->tgt.tgt_ops->free_cmd(cmd); } @@ -3263,6 +3565,13 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd) if (tgt->tgt_stop) goto out_term; + if (cmd->state == QLA_TGT_STATE_ABORTED) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf082, + "cmd with tag %u is aborted\n", + cmd->atio.u.isp24.exchange_addr); + goto out_term; + } + cdb = &atio->u.isp24.fcp_cmnd.cdb[0]; cmd->se_cmd.tag = atio->u.isp24.exchange_addr; cmd->unpacked_lun = scsilun_to_int( @@ -3316,6 +3625,12 @@ out_term: static void qlt_do_work(struct work_struct *work) { struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); + scsi_qla_host_t *vha = cmd->vha; + unsigned long flags; + + spin_lock_irqsave(&vha->cmd_list_lock, flags); + list_del(&cmd->cmd_list); + spin_unlock_irqrestore(&vha->cmd_list_lock, flags); __qlt_do_work(cmd); } @@ -3345,6 +3660,11 @@ static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha, cmd->loop_id = sess->loop_id; cmd->conf_compl_supported = sess->conf_compl_supported; + cmd->cmd_flags = 0; + cmd->jiffies_at_alloc = get_jiffies_64(); + + cmd->reset_count = vha->hw->chip_reset; + return cmd; } @@ -3362,14 +3682,25 @@ static void qlt_create_sess_from_atio(struct work_struct *work) unsigned long flags; uint8_t *s_id = op->atio.u.isp24.fcp_hdr.s_id; + spin_lock_irqsave(&vha->cmd_list_lock, flags); + list_del(&op->cmd_list); + spin_unlock_irqrestore(&vha->cmd_list_lock, flags); + + if (op->aborted) { + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf083, + "sess_op with tag %u is aborted\n", + op->atio.u.isp24.exchange_addr); + goto out_term; + } + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf022, - "qla_target(%d): Unable to find wwn login" - " (s_id %x:%x:%x), trying to create it manually\n", - vha->vp_idx, s_id[0], s_id[1], s_id[2]); + "qla_target(%d): Unable to find wwn login" + " (s_id %x:%x:%x), trying to create it manually\n", + vha->vp_idx, s_id[0], s_id[1], s_id[2]); if (op->atio.u.raw.entry_count > 1) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf023, - "Dropping multy entry atio %p\n", &op->atio); + "Dropping multy entry atio %p\n", &op->atio); goto out_term; } @@ -3434,10 +3765,25 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha, memcpy(&op->atio, atio, sizeof(*atio)); op->vha = vha; + + spin_lock(&vha->cmd_list_lock); + list_add_tail(&op->cmd_list, &vha->qla_sess_op_cmd_list); + spin_unlock(&vha->cmd_list_lock); + INIT_WORK(&op->work, qlt_create_sess_from_atio); queue_work(qla_tgt_wq, &op->work); return 0; } + + /* Another WWN used to have our s_id. Our PLOGI scheduled its + * session deletion, but it's still in sess_del_work wq */ + if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) { + ql_dbg(ql_dbg_io, vha, 0x3061, + "New command while old session %p is being deleted\n", + sess); + return -EFAULT; + } + /* * Do kref_get() before returning + dropping qla_hw_data->hardware_lock. */ @@ -3451,13 +3797,13 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha, return -ENOMEM; } - cmd->cmd_flags = 0; - cmd->jiffies_at_alloc = get_jiffies_64(); - - cmd->reset_count = vha->hw->chip_reset; - cmd->cmd_in_wq = 1; cmd->cmd_flags |= BIT_0; + + spin_lock(&vha->cmd_list_lock); + list_add_tail(&cmd->cmd_list, &vha->qla_cmd_list); + spin_unlock(&vha->cmd_list_lock); + INIT_WORK(&cmd->work, qlt_do_work); queue_work(qla_tgt_wq, &cmd->work); return 0; @@ -3471,6 +3817,7 @@ static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun, struct scsi_qla_host *vha = sess->vha; struct qla_hw_data *ha = vha->hw; struct qla_tgt_mgmt_cmd *mcmd; + struct atio_from_isp *a = (struct atio_from_isp *)iocb; int res; uint8_t tmr_func; @@ -3511,6 +3858,7 @@ static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun, ql_dbg(ql_dbg_tgt_tmr, vha, 0x10002, "qla_target(%d): LUN_RESET received\n", sess->vha->vp_idx); tmr_func = TMR_LUN_RESET; + abort_cmds_for_lun(vha, lun, a->u.isp24.fcp_hdr.s_id); break; case QLA_TGT_CLEAR_TS: @@ -3599,6 +3947,9 @@ static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb) sizeof(struct atio_from_isp)); } + if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) + return -EFAULT; + return qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0); } @@ -3664,22 +4015,280 @@ static int qlt_abort_task(struct scsi_qla_host *vha, return __qlt_abort_task(vha, iocb, sess); } +void qlt_logo_completion_handler(fc_port_t *fcport, int rc) +{ + if (fcport->tgt_session) { + if (rc != MBS_COMMAND_COMPLETE) { + ql_dbg(ql_dbg_tgt_mgt, fcport->vha, 0xf093, + "%s: se_sess %p / sess %p from" + " port %8phC loop_id %#04x s_id %02x:%02x:%02x" + " LOGO failed: %#x\n", + __func__, + fcport->tgt_session->se_sess, + fcport->tgt_session, + fcport->port_name, fcport->loop_id, + fcport->d_id.b.domain, fcport->d_id.b.area, + fcport->d_id.b.al_pa, rc); + } + + fcport->tgt_session->logout_completed = 1; + } +} + +static void qlt_swap_imm_ntfy_iocb(struct imm_ntfy_from_isp *a, + struct imm_ntfy_from_isp *b) +{ + struct imm_ntfy_from_isp tmp; + memcpy(&tmp, a, sizeof(struct imm_ntfy_from_isp)); + memcpy(a, b, sizeof(struct imm_ntfy_from_isp)); + memcpy(b, &tmp, sizeof(struct imm_ntfy_from_isp)); +} + +/* +* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) +* +* Schedules sessions with matching port_id/loop_id but different wwn for +* deletion. Returns existing session with matching wwn if present. +* Null otherwise. +*/ +static struct qla_tgt_sess * +qlt_find_sess_invalidate_other(struct qla_tgt *tgt, uint64_t wwn, + port_id_t port_id, uint16_t loop_id) +{ + struct qla_tgt_sess *sess = NULL, *other_sess; + uint64_t other_wwn; + + list_for_each_entry(other_sess, &tgt->sess_list, sess_list_entry) { + + other_wwn = wwn_to_u64(other_sess->port_name); + + if (wwn == other_wwn) { + WARN_ON(sess); + sess = other_sess; + continue; + } + + /* find other sess with nport_id collision */ + if (port_id.b24 == other_sess->s_id.b24) { + if (loop_id != other_sess->loop_id) { + ql_dbg(ql_dbg_tgt_tmr, tgt->vha, 0x1000c, + "Invalidating sess %p loop_id %d wwn %llx.\n", + other_sess, other_sess->loop_id, other_wwn); + + /* + * logout_on_delete is set by default, but another + * session that has the same s_id/loop_id combo + * might have cleared it when requested this session + * deletion, so don't touch it + */ + qlt_schedule_sess_for_deletion(other_sess, true); + } else { + /* + * Another wwn used to have our s_id/loop_id + * combo - kill the session, but don't log out + */ + sess->logout_on_delete = 0; + qlt_schedule_sess_for_deletion(other_sess, + true); + } + continue; + } + + /* find other sess with nport handle collision */ + if (loop_id == other_sess->loop_id) { + ql_dbg(ql_dbg_tgt_tmr, tgt->vha, 0x1000d, + "Invalidating sess %p loop_id %d wwn %llx.\n", + other_sess, other_sess->loop_id, other_wwn); + + /* Same loop_id but different s_id + * Ok to kill and logout */ + qlt_schedule_sess_for_deletion(other_sess, true); + } + } + + return sess; +} + +/* Abort any commands for this s_id waiting on qla_tgt_wq workqueue */ +static int abort_cmds_for_s_id(struct scsi_qla_host *vha, port_id_t *s_id) +{ + struct qla_tgt_sess_op *op; + struct qla_tgt_cmd *cmd; + uint32_t key; + int count = 0; + + key = (((u32)s_id->b.domain << 16) | + ((u32)s_id->b.area << 8) | + ((u32)s_id->b.al_pa)); + + spin_lock(&vha->cmd_list_lock); + list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) { + uint32_t op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id); + if (op_key == key) { + op->aborted = true; + count++; + } + } + list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) { + uint32_t cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id); + if (cmd_key == key) { + cmd->state = QLA_TGT_STATE_ABORTED; + count++; + } + } + spin_unlock(&vha->cmd_list_lock); + + return count; +} + /* * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire */ static int qlt_24xx_handle_els(struct scsi_qla_host *vha, struct imm_ntfy_from_isp *iocb) { + struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; + struct qla_hw_data *ha = vha->hw; + struct qla_tgt_sess *sess = NULL; + uint64_t wwn; + port_id_t port_id; + uint16_t loop_id; + uint16_t wd3_lo; int res = 0; + wwn = wwn_to_u64(iocb->u.isp24.port_name); + + port_id.b.domain = iocb->u.isp24.port_id[2]; + port_id.b.area = iocb->u.isp24.port_id[1]; + port_id.b.al_pa = iocb->u.isp24.port_id[0]; + port_id.b.rsvd_1 = 0; + + loop_id = le16_to_cpu(iocb->u.isp24.nport_handle); + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf026, "qla_target(%d): Port ID: 0x%3phC ELS opcode: 0x%02x\n", vha->vp_idx, iocb->u.isp24.port_id, iocb->u.isp24.status_subcode); + /* res = 1 means ack at the end of thread + * res = 0 means ack async/later. + */ switch (iocb->u.isp24.status_subcode) { case ELS_PLOGI: - case ELS_FLOGI: + + /* Mark all stale commands in qla_tgt_wq for deletion */ + abort_cmds_for_s_id(vha, &port_id); + + if (wwn) + sess = qlt_find_sess_invalidate_other(tgt, wwn, + port_id, loop_id); + + if (!sess || IS_SW_RESV_ADDR(sess->s_id)) { + res = 1; + break; + } + + if (sess->plogi_ack_needed) { + /* + * Initiator sent another PLOGI before last PLOGI could + * finish. Swap plogi iocbs and terminate old one + * without acking, new one will get acked when session + * deletion completes. + */ + ql_log(ql_log_warn, sess->vha, 0xf094, + "sess %p received double plogi.\n", sess); + + qlt_swap_imm_ntfy_iocb(iocb, &sess->tm_iocb); + + qlt_send_term_imm_notif(vha, iocb, 1); + + res = 0; + break; + } + + res = 0; + + /* + * Save immediate Notif IOCB for Ack when sess is done + * and being deleted. + */ + memcpy(&sess->tm_iocb, iocb, sizeof(sess->tm_iocb)); + sess->plogi_ack_needed = 1; + + /* + * Under normal circumstances we want to release nport handle + * during LOGO process to avoid nport handle leaks inside FW. + * The exception is when LOGO is done while another PLOGI with + * the same nport handle is waiting as might be the case here. + * Note: there is always a possibily of a race where session + * deletion has already started for other reasons (e.g. ACL + * removal) and now PLOGI arrives: + * 1. if PLOGI arrived in FW after nport handle has been freed, + * FW must have assigned this PLOGI a new/same handle and we + * can proceed ACK'ing it as usual when session deletion + * completes. + * 2. if PLOGI arrived in FW before LOGO with LCF_FREE_NPORT + * bit reached it, the handle has now been released. We'll + * get an error when we ACK this PLOGI. Nothing will be sent + * back to initiator. Initiator should eventually retry + * PLOGI and situation will correct itself. + */ + sess->keep_nport_handle = ((sess->loop_id == loop_id) && + (sess->s_id.b24 == port_id.b24)); + qlt_schedule_sess_for_deletion(sess, true); + break; + case ELS_PRLI: + wd3_lo = le16_to_cpu(iocb->u.isp24.u.prli.wd3_lo); + + if (wwn) + sess = qlt_find_sess_invalidate_other(tgt, wwn, port_id, + loop_id); + + if (sess != NULL) { + if (sess->deleted) { + /* + * Impatient initiator sent PRLI before last + * PLOGI could finish. Will force him to re-try, + * while last one finishes. + */ + ql_log(ql_log_warn, sess->vha, 0xf095, + "sess %p PRLI received, before plogi ack.\n", + sess); + qlt_send_term_imm_notif(vha, iocb, 1); + res = 0; + break; + } + + /* + * This shouldn't happen under normal circumstances, + * since we have deleted the old session during PLOGI + */ + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf096, + "PRLI (loop_id %#04x) for existing sess %p (loop_id %#04x)\n", + sess->loop_id, sess, iocb->u.isp24.nport_handle); + + sess->local = 0; + sess->loop_id = loop_id; + sess->s_id = port_id; + + if (wd3_lo & BIT_7) + sess->conf_compl_supported = 1; + + } + res = 1; /* send notify ack */ + + /* Make session global (not used in fabric mode) */ + if (ha->current_topology != ISP_CFG_F) { + set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); + set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); + qla2xxx_wake_dpc(vha); + } else { + /* todo: else - create sess here. */ + res = 1; /* send notify ack */ + } + + break; + case ELS_LOGO: case ELS_PRLO: res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS); @@ -3697,6 +4306,7 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha, break; } + case ELS_FLOGI: /* should never happen */ default: ql_dbg(ql_dbg_tgt_mgt, vha, 0xf061, "qla_target(%d): Unsupported ELS command %x " @@ -5012,6 +5622,11 @@ static void qlt_abort_work(struct qla_tgt *tgt, if (!sess) goto out_term; } else { + if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) { + sess = NULL; + goto out_term; + } + kref_get(&sess->se_sess->sess_kref); } @@ -5066,6 +5681,11 @@ static void qlt_tmr_work(struct qla_tgt *tgt, if (!sess) goto out_term; } else { + if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) { + sess = NULL; + goto out_term; + } + kref_get(&sess->se_sess->sess_kref); } @@ -5552,6 +6172,7 @@ qlt_24xx_process_atio_queue(struct scsi_qla_host *vha) /* Adjust ring index */ WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index); + RD_REG_DWORD_RELAXED(ISP_ATIO_Q_OUT(vha)); } void @@ -5793,7 +6414,7 @@ qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha) if (!QLA_TGT_MODE_ENABLED()) return; - if (ha->mqenable || IS_QLA83XX(ha)) { + if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) { ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in; ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out; } else { diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h index 985d76dd706b..bca584ae45b7 100644 --- a/drivers/scsi/qla2xxx/qla_target.h +++ b/drivers/scsi/qla2xxx/qla_target.h @@ -167,7 +167,24 @@ struct imm_ntfy_from_isp { uint32_t srr_rel_offs; uint16_t srr_ui; uint16_t srr_ox_id; - uint8_t reserved_4[19]; + union { + struct { + uint8_t node_name[8]; + } plogi; /* PLOGI/ADISC/PDISC */ + struct { + /* PRLI word 3 bit 0-15 */ + uint16_t wd3_lo; + uint8_t resv0[6]; + } prli; + struct { + uint8_t port_id[3]; + uint8_t resv1; + uint16_t nport_handle; + uint16_t resv2; + } req_els; + } u; + uint8_t port_name[8]; + uint8_t resv3[3]; uint8_t vp_index; uint32_t reserved_5; uint8_t port_id[3]; @@ -234,6 +251,7 @@ struct nack_to_isp { uint8_t reserved[2]; uint16_t ox_id; } __packed; +#define NOTIFY_ACK_FLAGS_TERMINATE BIT_3 #define NOTIFY_ACK_SRR_FLAGS_ACCEPT 0 #define NOTIFY_ACK_SRR_FLAGS_REJECT 1 @@ -790,13 +808,6 @@ int qla2x00_wait_for_hba_online(struct scsi_qla_host *); #define FC_TM_REJECT 4 #define FC_TM_FAILED 5 -/* - * Error code of qlt_pre_xmit_response() meaning that cmd's exchange was - * terminated, so no more actions is needed and success should be returned - * to target. - */ -#define QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED 0x1717 - #if (BITS_PER_LONG > 32) || defined(CONFIG_HIGHMEM64G) #define pci_dma_lo32(a) (a & 0xffffffff) #define pci_dma_hi32(a) ((((a) >> 16)>>16) & 0xffffffff) @@ -874,6 +885,15 @@ struct qla_tgt_sess_op { struct scsi_qla_host *vha; struct atio_from_isp atio; struct work_struct work; + struct list_head cmd_list; + bool aborted; +}; + +enum qla_sess_deletion { + QLA_SESS_DELETION_NONE = 0, + QLA_SESS_DELETION_PENDING = 1, /* hopefully we can get rid of + * this one */ + QLA_SESS_DELETION_IN_PROGRESS = 2, }; /* @@ -884,8 +904,15 @@ struct qla_tgt_sess { port_id_t s_id; unsigned int conf_compl_supported:1; - unsigned int deleted:1; + unsigned int deleted:2; unsigned int local:1; + unsigned int logout_on_delete:1; + unsigned int plogi_ack_needed:1; + unsigned int keep_nport_handle:1; + + unsigned char logout_completed; + + int generation; struct se_session *se_sess; struct scsi_qla_host *vha; @@ -897,6 +924,10 @@ struct qla_tgt_sess { uint8_t port_name[WWN_SIZE]; struct work_struct free_work; + + union { + struct imm_ntfy_from_isp tm_iocb; + }; }; struct qla_tgt_cmd { @@ -912,7 +943,6 @@ struct qla_tgt_cmd { unsigned int conf_compl_supported:1; unsigned int sg_mapped:1; unsigned int free_sg:1; - unsigned int aborted:1; /* Needed in case of SRR */ unsigned int write_data_transferred:1; unsigned int ctx_dsd_alloced:1; unsigned int q_full:1; @@ -961,6 +991,9 @@ struct qla_tgt_cmd { * BIT_14 - Back end data received/sent. * BIT_15 - SRR prepare ctio * BIT_16 - complete free + * BIT_17 - flush - qlt_abort_cmd_on_host_reset + * BIT_18 - completion w/abort status + * BIT_19 - completion w/unknown status */ uint32_t cmd_flags; }; @@ -1026,6 +1059,10 @@ struct qla_tgt_srr_ctio { struct qla_tgt_cmd *cmd; }; +/* Check for Switch reserved address */ +#define IS_SW_RESV_ADDR(_s_id) \ + ((_s_id.b.domain == 0xff) && (_s_id.b.area == 0xfc)) + #define QLA_TGT_XMIT_DATA 1 #define QLA_TGT_XMIT_STATUS 2 #define QLA_TGT_XMIT_ALL (QLA_TGT_XMIT_STATUS|QLA_TGT_XMIT_DATA) @@ -1043,7 +1080,7 @@ extern int qlt_lport_register(void *, u64, u64, u64, extern void qlt_lport_deregister(struct scsi_qla_host *); extern void qlt_unreg_sess(struct qla_tgt_sess *); extern void qlt_fc_port_added(struct scsi_qla_host *, fc_port_t *); -extern void qlt_fc_port_deleted(struct scsi_qla_host *, fc_port_t *); +extern void qlt_fc_port_deleted(struct scsi_qla_host *, fc_port_t *, int); extern int __init qlt_init(void); extern void qlt_exit(void); extern void qlt_update_vp_map(struct scsi_qla_host *, int); @@ -1073,12 +1110,23 @@ static inline void qla_reverse_ini_mode(struct scsi_qla_host *ha) ha->host->active_mode |= MODE_INITIATOR; } +static inline uint32_t sid_to_key(const uint8_t *s_id) +{ + uint32_t key; + + key = (((unsigned long)s_id[0] << 16) | + ((unsigned long)s_id[1] << 8) | + (unsigned long)s_id[2]); + return key; +} + /* * Exported symbols from qla_target.c LLD logic used by qla2xxx code.. */ extern void qlt_response_pkt_all_vps(struct scsi_qla_host *, response_t *); extern int qlt_rdy_to_xfer(struct qla_tgt_cmd *); extern int qlt_xmit_response(struct qla_tgt_cmd *, int, uint8_t); +extern void qlt_abort_cmd(struct qla_tgt_cmd *); extern void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *); extern void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *); extern void qlt_free_cmd(struct qla_tgt_cmd *cmd); @@ -1109,5 +1157,7 @@ extern void qlt_stop_phase2(struct qla_tgt *); extern irqreturn_t qla83xx_msix_atio_q(int, void *); extern void qlt_83xx_iospace_config(struct qla_hw_data *); extern int qlt_free_qfull_cmds(struct scsi_qla_host *); +extern void qlt_logo_completion_handler(fc_port_t *, int); +extern void qlt_do_generation_tick(struct scsi_qla_host *, int *); #endif /* __QLA_TARGET_H */ diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c index d9a8c6084346..9224a06646e6 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c @@ -374,7 +374,7 @@ static int tcm_qla2xxx_write_pending(struct se_cmd *se_cmd) { struct qla_tgt_cmd *cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd); - + cmd->cmd_flags |= BIT_3; cmd->bufflen = se_cmd->data_length; cmd->dma_data_direction = target_reverse_dma_direction(se_cmd); @@ -405,7 +405,7 @@ static int tcm_qla2xxx_write_pending_status(struct se_cmd *se_cmd) se_cmd->t_state == TRANSPORT_COMPLETE_QF_WP) { spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); wait_for_completion_timeout(&se_cmd->t_transport_stop_comp, - 3000); + 3 * HZ); return 0; } spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); @@ -541,12 +541,10 @@ static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd) cmd->cmd_flags |= BIT_4; cmd->bufflen = se_cmd->data_length; cmd->dma_data_direction = target_reverse_dma_direction(se_cmd); - cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED); cmd->sg_cnt = se_cmd->t_data_nents; cmd->sg = se_cmd->t_data_sg; cmd->offset = 0; - cmd->cmd_flags |= BIT_3; cmd->prot_sg_cnt = se_cmd->t_prot_nents; cmd->prot_sg = se_cmd->t_prot_sg; @@ -571,7 +569,6 @@ static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd) cmd->sg_cnt = 0; cmd->offset = 0; cmd->dma_data_direction = target_reverse_dma_direction(se_cmd); - cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED); if (cmd->cmd_flags & BIT_5) { pr_crit("Bit_5 already set for cmd = %p.\n", cmd); dump_stack(); @@ -636,14 +633,7 @@ static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd) { struct qla_tgt_cmd *cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd); - struct scsi_qla_host *vha = cmd->vha; - struct qla_hw_data *ha = vha->hw; - - if (!cmd->sg_mapped) - return; - - pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction); - cmd->sg_mapped = 0; + qlt_abort_cmd(cmd); } static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *, @@ -1149,9 +1139,7 @@ static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_s_id( return NULL; } - key = (((unsigned long)s_id[0] << 16) | - ((unsigned long)s_id[1] << 8) | - (unsigned long)s_id[2]); + key = sid_to_key(s_id); pr_debug("find_sess_by_s_id: 0x%06x\n", key); se_nacl = btree_lookup32(&lport->lport_fcport_map, key); @@ -1186,9 +1174,7 @@ static void tcm_qla2xxx_set_sess_by_s_id( void *slot; int rc; - key = (((unsigned long)s_id[0] << 16) | - ((unsigned long)s_id[1] << 8) | - (unsigned long)s_id[2]); + key = sid_to_key(s_id); pr_debug("set_sess_by_s_id: %06x\n", key); slot = btree_lookup32(&lport->lport_fcport_map, key); @@ -1544,6 +1530,10 @@ static void tcm_qla2xxx_update_sess(struct qla_tgt_sess *sess, port_id_t s_id, } sess->conf_compl_supported = conf_compl_supported; + + /* Reset logout parameters to default */ + sess->logout_on_delete = 1; + sess->keep_nport_handle = 0; } /* diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c index 285f77544c36..7dbbb29d24c6 100644 --- a/drivers/scsi/virtio_scsi.c +++ b/drivers/scsi/virtio_scsi.c @@ -949,7 +949,7 @@ static int virtscsi_probe(struct virtio_device *vdev) { struct Scsi_Host *shost; struct virtio_scsi *vscsi; - int err, host_prot; + int err; u32 sg_elems, num_targets; u32 cmd_per_lun; u32 num_queues; @@ -1009,6 +1009,8 @@ static int virtscsi_probe(struct virtio_device *vdev) #ifdef CONFIG_BLK_DEV_INTEGRITY if (virtio_has_feature(vdev, VIRTIO_SCSI_F_T10_PI)) { + int host_prot; + host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION | SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION | SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION; diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index 0cae1694014d..b0f30fb68914 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -612,7 +612,7 @@ config SPI_XTENSA_XTFPGA config SPI_ZYNQMP_GQSPI tristate "Xilinx ZynqMP GQSPI controller" - depends on SPI_MASTER + depends on SPI_MASTER && HAS_DMA help Enables Xilinx GQSPI controller driver for Zynq UltraScale+ MPSoC. diff --git a/drivers/spi/spi-img-spfi.c b/drivers/spi/spi-img-spfi.c index 788e2b176a4f..acce90ac7371 100644 --- a/drivers/spi/spi-img-spfi.c +++ b/drivers/spi/spi-img-spfi.c @@ -40,6 +40,7 @@ #define SPFI_CONTROL_SOFT_RESET BIT(11) #define SPFI_CONTROL_SEND_DMA BIT(10) #define SPFI_CONTROL_GET_DMA BIT(9) +#define SPFI_CONTROL_SE BIT(8) #define SPFI_CONTROL_TMODE_SHIFT 5 #define SPFI_CONTROL_TMODE_MASK 0x7 #define SPFI_CONTROL_TMODE_SINGLE 0 @@ -491,6 +492,7 @@ static void img_spfi_config(struct spi_master *master, struct spi_device *spi, else if (xfer->tx_nbits == SPI_NBITS_QUAD && xfer->rx_nbits == SPI_NBITS_QUAD) val |= SPFI_CONTROL_TMODE_QUAD << SPFI_CONTROL_TMODE_SHIFT; + val |= SPFI_CONTROL_SE; spfi_writel(spfi, val, SPFI_CONTROL); } diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c index eb7d3a6fb14c..f9deb84e4e55 100644 --- a/drivers/spi/spi-imx.c +++ b/drivers/spi/spi-imx.c @@ -201,8 +201,9 @@ static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi, { struct spi_imx_data *spi_imx = spi_master_get_devdata(master); - if (spi_imx->dma_is_inited && (transfer->len > spi_imx->rx_wml) - && (transfer->len > spi_imx->tx_wml)) + if (spi_imx->dma_is_inited + && transfer->len > spi_imx->rx_wml * sizeof(u32) + && transfer->len > spi_imx->tx_wml * sizeof(u32)) return true; return false; } diff --git a/drivers/spi/spi-zynqmp-gqspi.c b/drivers/spi/spi-zynqmp-gqspi.c index 87b20a511a6b..f23f36ebaf3d 100644 --- a/drivers/spi/spi-zynqmp-gqspi.c +++ b/drivers/spi/spi-zynqmp-gqspi.c @@ -214,6 +214,7 @@ static void zynqmp_gqspi_selectslave(struct zynqmp_qspi *instanceptr, case GQSPI_SELECT_FLASH_CS_BOTH: instanceptr->genfifocs = GQSPI_GENFIFO_CS_LOWER | GQSPI_GENFIFO_CS_UPPER; + break; case GQSPI_SELECT_FLASH_CS_UPPER: instanceptr->genfifocs = GQSPI_GENFIFO_CS_UPPER; break; diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c index dd616ff0ffc5..c7de64171c45 100644 --- a/drivers/spi/spidev.c +++ b/drivers/spi/spidev.c @@ -693,6 +693,7 @@ static struct class *spidev_class; #ifdef CONFIG_OF static const struct of_device_id spidev_dt_ids[] = { { .compatible = "rohm,dh2228fv" }, + { .compatible = "lineartechnology,ltc2488" }, {}, }; MODULE_DEVICE_TABLE(of, spidev_dt_ids); diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 4e68b62193ed..cd77a064c772 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -3998,7 +3998,13 @@ get_immediate: } transport_err: - iscsit_take_action_for_connection_exit(conn); + /* + * Avoid the normal connection failure code-path if this connection + * is still within LOGIN mode, and iscsi_np process context is + * responsible for cleaning up the early connection failure. + */ + if (conn->conn_state != TARG_CONN_STATE_IN_LOGIN) + iscsit_take_action_for_connection_exit(conn); out: return 0; } @@ -4082,7 +4088,7 @@ reject: int iscsi_target_rx_thread(void *arg) { - int ret; + int ret, rc; u8 buffer[ISCSI_HDR_LEN], opcode; u32 checksum = 0, digest = 0; struct iscsi_conn *conn = arg; @@ -4092,10 +4098,16 @@ int iscsi_target_rx_thread(void *arg) * connection recovery / failure event can be triggered externally. */ allow_signal(SIGINT); + /* + * Wait for iscsi_post_login_handler() to complete before allowing + * incoming iscsi/tcp socket I/O, and/or failing the connection. + */ + rc = wait_for_completion_interruptible(&conn->rx_login_comp); + if (rc < 0) + return 0; if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) { struct completion comp; - int rc; init_completion(&comp); rc = wait_for_completion_interruptible(&comp); @@ -4532,7 +4544,18 @@ static void iscsit_logout_post_handler_closesession( struct iscsi_conn *conn) { struct iscsi_session *sess = conn->sess; - int sleep = cmpxchg(&conn->tx_thread_active, true, false); + int sleep = 1; + /* + * Traditional iscsi/tcp will invoke this logic from TX thread + * context during session logout, so clear tx_thread_active and + * sleep if iscsit_close_connection() has not already occured. + * + * Since iser-target invokes this logic from it's own workqueue, + * always sleep waiting for RX/TX thread shutdown to complete + * within iscsit_close_connection(). + */ + if (conn->conn_transport->transport_type == ISCSI_TCP) + sleep = cmpxchg(&conn->tx_thread_active, true, false); atomic_set(&conn->conn_logout_remove, 0); complete(&conn->conn_logout_comp); @@ -4546,7 +4569,10 @@ static void iscsit_logout_post_handler_closesession( static void iscsit_logout_post_handler_samecid( struct iscsi_conn *conn) { - int sleep = cmpxchg(&conn->tx_thread_active, true, false); + int sleep = 1; + + if (conn->conn_transport->transport_type == ISCSI_TCP) + sleep = cmpxchg(&conn->tx_thread_active, true, false); atomic_set(&conn->conn_logout_remove, 0); complete(&conn->conn_logout_comp); @@ -4765,6 +4791,7 @@ int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *tpg, int force) struct iscsi_session *sess; struct se_portal_group *se_tpg = &tpg->tpg_se_tpg; struct se_session *se_sess, *se_sess_tmp; + LIST_HEAD(free_list); int session_count = 0; spin_lock_bh(&se_tpg->session_lock); @@ -4786,14 +4813,17 @@ int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *tpg, int force) } atomic_set(&sess->session_reinstatement, 1); spin_unlock(&sess->conn_lock); - spin_unlock_bh(&se_tpg->session_lock); - iscsit_free_session(sess); - spin_lock_bh(&se_tpg->session_lock); + list_move_tail(&se_sess->sess_list, &free_list); + } + spin_unlock_bh(&se_tpg->session_lock); + + list_for_each_entry_safe(se_sess, se_sess_tmp, &free_list, sess_list) { + sess = (struct iscsi_session *)se_sess->fabric_sess_ptr; + iscsit_free_session(sess); session_count++; } - spin_unlock_bh(&se_tpg->session_lock); pr_debug("Released %d iSCSI Session(s) from Target Portal" " Group: %hu\n", session_count, tpg->tpgt); diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c index 3d0fe4ff5590..7e8f65e5448f 100644 --- a/drivers/target/iscsi/iscsi_target_login.c +++ b/drivers/target/iscsi/iscsi_target_login.c @@ -82,6 +82,7 @@ static struct iscsi_login *iscsi_login_init_conn(struct iscsi_conn *conn) init_completion(&conn->conn_logout_comp); init_completion(&conn->rx_half_close_comp); init_completion(&conn->tx_half_close_comp); + init_completion(&conn->rx_login_comp); spin_lock_init(&conn->cmd_lock); spin_lock_init(&conn->conn_usage_lock); spin_lock_init(&conn->immed_queue_lock); @@ -644,7 +645,7 @@ static void iscsi_post_login_start_timers(struct iscsi_conn *conn) iscsit_start_nopin_timer(conn); } -static int iscsit_start_kthreads(struct iscsi_conn *conn) +int iscsit_start_kthreads(struct iscsi_conn *conn) { int ret = 0; @@ -679,6 +680,7 @@ static int iscsit_start_kthreads(struct iscsi_conn *conn) return 0; out_tx: + send_sig(SIGINT, conn->tx_thread, 1); kthread_stop(conn->tx_thread); conn->tx_thread_active = false; out_bitmap: @@ -689,7 +691,7 @@ out_bitmap: return ret; } -int iscsi_post_login_handler( +void iscsi_post_login_handler( struct iscsi_np *np, struct iscsi_conn *conn, u8 zero_tsih) @@ -699,7 +701,6 @@ int iscsi_post_login_handler( struct se_session *se_sess = sess->se_sess; struct iscsi_portal_group *tpg = sess->tpg; struct se_portal_group *se_tpg = &tpg->tpg_se_tpg; - int rc; iscsit_inc_conn_usage_count(conn); @@ -739,10 +740,6 @@ int iscsi_post_login_handler( sess->sess_ops->InitiatorName); spin_unlock_bh(&sess->conn_lock); - rc = iscsit_start_kthreads(conn); - if (rc) - return rc; - iscsi_post_login_start_timers(conn); /* * Determine CPU mask to ensure connection's RX and TX kthreads @@ -751,15 +748,20 @@ int iscsi_post_login_handler( iscsit_thread_get_cpumask(conn); conn->conn_rx_reset_cpumask = 1; conn->conn_tx_reset_cpumask = 1; - + /* + * Wakeup the sleeping iscsi_target_rx_thread() now that + * iscsi_conn is in TARG_CONN_STATE_LOGGED_IN state. + */ + complete(&conn->rx_login_comp); iscsit_dec_conn_usage_count(conn); + if (stop_timer) { spin_lock_bh(&se_tpg->session_lock); iscsit_stop_time2retain_timer(sess); spin_unlock_bh(&se_tpg->session_lock); } iscsit_dec_session_usage_count(sess); - return 0; + return; } iscsi_set_session_parameters(sess->sess_ops, conn->param_list, 1); @@ -800,10 +802,6 @@ int iscsi_post_login_handler( " iSCSI Target Portal Group: %hu\n", tpg->nsessions, tpg->tpgt); spin_unlock_bh(&se_tpg->session_lock); - rc = iscsit_start_kthreads(conn); - if (rc) - return rc; - iscsi_post_login_start_timers(conn); /* * Determine CPU mask to ensure connection's RX and TX kthreads @@ -812,10 +810,12 @@ int iscsi_post_login_handler( iscsit_thread_get_cpumask(conn); conn->conn_rx_reset_cpumask = 1; conn->conn_tx_reset_cpumask = 1; - + /* + * Wakeup the sleeping iscsi_target_rx_thread() now that + * iscsi_conn is in TARG_CONN_STATE_LOGGED_IN state. + */ + complete(&conn->rx_login_comp); iscsit_dec_conn_usage_count(conn); - - return 0; } static void iscsi_handle_login_thread_timeout(unsigned long data) @@ -1380,23 +1380,12 @@ static int __iscsi_target_login_thread(struct iscsi_np *np) if (ret < 0) goto new_sess_out; - if (!conn->sess) { - pr_err("struct iscsi_conn session pointer is NULL!\n"); - goto new_sess_out; - } - iscsi_stop_login_thread_timer(np); - if (signal_pending(current)) - goto new_sess_out; - if (ret == 1) { tpg_np = conn->tpg_np; - ret = iscsi_post_login_handler(np, conn, zero_tsih); - if (ret < 0) - goto new_sess_out; - + iscsi_post_login_handler(np, conn, zero_tsih); iscsit_deaccess_np(np, tpg, tpg_np); } diff --git a/drivers/target/iscsi/iscsi_target_login.h b/drivers/target/iscsi/iscsi_target_login.h index 1c7358081533..57aa0d0fd820 100644 --- a/drivers/target/iscsi/iscsi_target_login.h +++ b/drivers/target/iscsi/iscsi_target_login.h @@ -12,7 +12,8 @@ extern int iscsit_accept_np(struct iscsi_np *, struct iscsi_conn *); extern int iscsit_get_login_rx(struct iscsi_conn *, struct iscsi_login *); extern int iscsit_put_login_tx(struct iscsi_conn *, struct iscsi_login *, u32); extern void iscsit_free_conn(struct iscsi_np *, struct iscsi_conn *); -extern int iscsi_post_login_handler(struct iscsi_np *, struct iscsi_conn *, u8); +extern int iscsit_start_kthreads(struct iscsi_conn *); +extern void iscsi_post_login_handler(struct iscsi_np *, struct iscsi_conn *, u8); extern void iscsi_target_login_sess_out(struct iscsi_conn *, struct iscsi_np *, bool, bool); extern int iscsi_target_login_thread(void *); diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c index 8c02fa34716f..f9cde9141836 100644 --- a/drivers/target/iscsi/iscsi_target_nego.c +++ b/drivers/target/iscsi/iscsi_target_nego.c @@ -17,6 +17,7 @@ ******************************************************************************/ #include <linux/ctype.h> +#include <linux/kthread.h> #include <scsi/iscsi_proto.h> #include <target/target_core_base.h> #include <target/target_core_fabric.h> @@ -361,10 +362,24 @@ static int iscsi_target_do_tx_login_io(struct iscsi_conn *conn, struct iscsi_log ntohl(login_rsp->statsn), login->rsp_length); padding = ((-login->rsp_length) & 3); + /* + * Before sending the last login response containing the transition + * bit for full-feature-phase, go ahead and start up TX/RX threads + * now to avoid potential resource allocation failures after the + * final login response has been sent. + */ + if (login->login_complete) { + int rc = iscsit_start_kthreads(conn); + if (rc) { + iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, + ISCSI_LOGIN_STATUS_NO_RESOURCES); + return -1; + } + } if (conn->conn_transport->iscsit_put_login_tx(conn, login, login->rsp_length + padding) < 0) - return -1; + goto err; login->rsp_length = 0; mutex_lock(&sess->cmdsn_mutex); @@ -373,6 +388,23 @@ static int iscsi_target_do_tx_login_io(struct iscsi_conn *conn, struct iscsi_log mutex_unlock(&sess->cmdsn_mutex); return 0; + +err: + if (login->login_complete) { + if (conn->rx_thread && conn->rx_thread_active) { + send_sig(SIGINT, conn->rx_thread, 1); + kthread_stop(conn->rx_thread); + } + if (conn->tx_thread && conn->tx_thread_active) { + send_sig(SIGINT, conn->tx_thread, 1); + kthread_stop(conn->tx_thread); + } + spin_lock(&iscsit_global->ts_bitmap_lock); + bitmap_release_region(iscsit_global->ts_bitmap, conn->bitmap_id, + get_order(1)); + spin_unlock(&iscsit_global->ts_bitmap_lock); + } + return -1; } static void iscsi_target_sk_data_ready(struct sock *sk) diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index 0b0de3647478..c2e9fea90b4a 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c @@ -747,7 +747,7 @@ static ssize_t store_pi_prot_type(struct se_dev_attrib *da, if (!dev->transport->init_prot || !dev->transport->free_prot) { /* 0 is only allowed value for non-supporting backends */ if (flag == 0) - return 0; + return count; pr_err("DIF protection not supported by backend: %s\n", dev->transport->name); @@ -1590,9 +1590,9 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata( u8 type = 0; if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) - return 0; + return count; if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS) - return 0; + return count; if (dev->export_count) { pr_debug("Unable to process APTPL metadata while" @@ -1658,22 +1658,32 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata( * PR APTPL Metadata for Reservation */ case Opt_res_holder: - match_int(args, &arg); + ret = match_int(args, &arg); + if (ret) + goto out; res_holder = arg; break; case Opt_res_type: - match_int(args, &arg); + ret = match_int(args, &arg); + if (ret) + goto out; type = (u8)arg; break; case Opt_res_scope: - match_int(args, &arg); + ret = match_int(args, &arg); + if (ret) + goto out; break; case Opt_res_all_tg_pt: - match_int(args, &arg); + ret = match_int(args, &arg); + if (ret) + goto out; all_tg_pt = (int)arg; break; case Opt_mapped_lun: - match_int(args, &arg); + ret = match_int(args, &arg); + if (ret) + goto out; mapped_lun = (u64)arg; break; /* @@ -1701,14 +1711,20 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata( } break; case Opt_tpgt: - match_int(args, &arg); + ret = match_int(args, &arg); + if (ret) + goto out; tpgt = (u16)arg; break; case Opt_port_rtpi: - match_int(args, &arg); + ret = match_int(args, &arg); + if (ret) + goto out; break; case Opt_target_lun: - match_int(args, &arg); + ret = match_int(args, &arg); + if (ret) + goto out; target_lun = (u64)arg; break; default: @@ -1985,7 +2001,7 @@ static ssize_t target_core_store_alua_lu_gp( lu_gp_mem = dev->dev_alua_lu_gp_mem; if (!lu_gp_mem) - return 0; + return count; if (count > LU_GROUP_NAME_BUF) { pr_err("ALUA LU Group Alias too large!\n"); diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index 0fdbe43b7dad..5ab7100de17e 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c @@ -1474,7 +1474,7 @@ core_scsi3_decode_spec_i_port( LIST_HEAD(tid_dest_list); struct pr_transport_id_holder *tidh_new, *tidh, *tidh_tmp; unsigned char *buf, *ptr, proto_ident; - const unsigned char *i_str; + const unsigned char *i_str = NULL; char *iport_ptr = NULL, i_buf[PR_REG_ISID_ID_LEN]; sense_reason_t ret; u32 tpdl, tid_len = 0; diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c index 4703f403f31c..384cf8894411 100644 --- a/drivers/target/target_core_rd.c +++ b/drivers/target/target_core_rd.c @@ -333,6 +333,7 @@ static int rd_configure_device(struct se_device *dev) dev->dev_attrib.hw_block_size = RD_BLOCKSIZE; dev->dev_attrib.hw_max_sectors = UINT_MAX; dev->dev_attrib.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH; + dev->dev_attrib.is_nonrot = 1; rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++; diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c index b0744433315a..b5ba1ec3c354 100644 --- a/drivers/target/target_core_spc.c +++ b/drivers/target/target_core_spc.c @@ -454,10 +454,17 @@ spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf) cmd->se_sess->sess_prot_type == TARGET_DIF_TYPE1_PROT) buf[4] = 0x5; else if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE3_PROT || - cmd->se_sess->sess_prot_type == TARGET_DIF_TYPE3_PROT) + cmd->se_sess->sess_prot_type == TARGET_DIF_TYPE3_PROT) buf[4] = 0x4; } + /* logical unit supports type 1 and type 3 protection */ + if ((dev->transport->get_device_type(dev) == TYPE_DISK) && + (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) && + (dev->dev_attrib.pi_prot_type || cmd->se_sess->sess_prot_type)) { + buf[4] |= (0x3 << 3); + } + /* Set HEADSUP, ORDSUP, SIMPSUP */ buf[5] = 0x07; diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c index c9c27f69e101..ee8bfacf2071 100644 --- a/drivers/tty/n_tty.c +++ b/drivers/tty/n_tty.c @@ -1108,19 +1108,29 @@ static void eraser(unsigned char c, struct tty_struct *tty) * Locking: ctrl_lock */ -static void isig(int sig, struct tty_struct *tty) +static void __isig(int sig, struct tty_struct *tty) { - struct n_tty_data *ldata = tty->disc_data; struct pid *tty_pgrp = tty_get_pgrp(tty); if (tty_pgrp) { kill_pgrp(tty_pgrp, sig, 1); put_pid(tty_pgrp); } +} - if (!L_NOFLSH(tty)) { +static void isig(int sig, struct tty_struct *tty) +{ + struct n_tty_data *ldata = tty->disc_data; + + if (L_NOFLSH(tty)) { + /* signal only */ + __isig(sig, tty); + + } else { /* signal and flush */ up_read(&tty->termios_rwsem); down_write(&tty->termios_rwsem); + __isig(sig, tty); + /* clear echo buffer */ mutex_lock(&ldata->output_lock); ldata->echo_head = ldata->echo_tail = 0; diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig index 76e65b714471..15b4079a335e 100644 --- a/drivers/tty/serial/Kconfig +++ b/drivers/tty/serial/Kconfig @@ -1185,7 +1185,7 @@ config SERIAL_SC16IS7XX_CORE config SERIAL_SC16IS7XX tristate "SC16IS7xx serial support" select SERIAL_CORE - depends on I2C || SPI_MASTER + depends on (SPI_MASTER && !I2C) || I2C help This selects support for SC16IS7xx serial ports. Supported ICs are SC16IS740, SC16IS741, SC16IS750, SC16IS752, diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c index 50cf5b10ceed..fd27e986b1dd 100644 --- a/drivers/tty/serial/amba-pl011.c +++ b/drivers/tty/serial/amba-pl011.c @@ -2310,8 +2310,8 @@ static int pl011_setup_port(struct device *dev, struct uart_amba_port *uap, void __iomem *base; base = devm_ioremap_resource(dev, mmiobase); - if (!base) - return -ENOMEM; + if (IS_ERR(base)) + return PTR_ERR(base); index = pl011_probe_dt_alias(index, dev); diff --git a/drivers/tty/serial/etraxfs-uart.c b/drivers/tty/serial/etraxfs-uart.c index a57301a6fe42..679709f51fd4 100644 --- a/drivers/tty/serial/etraxfs-uart.c +++ b/drivers/tty/serial/etraxfs-uart.c @@ -950,7 +950,7 @@ static int etraxfs_uart_remove(struct platform_device *pdev) port = platform_get_drvdata(pdev); uart_remove_one_port(&etraxfs_uart_driver, port); - etraxfs_uart_ports[pdev->id] = NULL; + etraxfs_uart_ports[port->line] = NULL; return 0; } diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c index 2c90dc31bfaa..54fdc7866ea1 100644 --- a/drivers/tty/serial/imx.c +++ b/drivers/tty/serial/imx.c @@ -1121,11 +1121,6 @@ static int imx_startup(struct uart_port *port) writel(temp & ~UCR4_DREN, sport->port.membase + UCR4); - /* Can we enable the DMA support? */ - if (is_imx6q_uart(sport) && !uart_console(port) && - !sport->dma_is_inited) - imx_uart_dma_init(sport); - spin_lock_irqsave(&sport->port.lock, flags); /* Reset fifo's and state machines */ i = 100; @@ -1143,9 +1138,6 @@ static int imx_startup(struct uart_port *port) writel(USR1_RTSD, sport->port.membase + USR1); writel(USR2_ORE, sport->port.membase + USR2); - if (sport->dma_is_inited && !sport->dma_is_enabled) - imx_enable_dma(sport); - temp = readl(sport->port.membase + UCR1); temp |= UCR1_RRDYEN | UCR1_RTSDEN | UCR1_UARTEN; @@ -1316,6 +1308,11 @@ imx_set_termios(struct uart_port *port, struct ktermios *termios, } else { ucr2 |= UCR2_CTSC; } + + /* Can we enable the DMA support? */ + if (is_imx6q_uart(sport) && !uart_console(port) + && !sport->dma_is_inited) + imx_uart_dma_init(sport); } else { termios->c_cflag &= ~CRTSCTS; } @@ -1432,6 +1429,8 @@ imx_set_termios(struct uart_port *port, struct ktermios *termios, if (UART_ENABLE_MS(&sport->port, termios->c_cflag)) imx_enable_ms(&sport->port); + if (sport->dma_is_inited && !sport->dma_is_enabled) + imx_enable_dma(sport); spin_unlock_irqrestore(&sport->port.lock, flags); } diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c index 9e6576004a42..5ccc698cbbfa 100644 --- a/drivers/tty/serial/sc16is7xx.c +++ b/drivers/tty/serial/sc16is7xx.c @@ -354,6 +354,26 @@ static void sc16is7xx_port_write(struct uart_port *port, u8 reg, u8 val) (reg << SC16IS7XX_REG_SHIFT) | port->line, val); } +static void sc16is7xx_fifo_read(struct uart_port *port, unsigned int rxlen) +{ + struct sc16is7xx_port *s = dev_get_drvdata(port->dev); + u8 addr = (SC16IS7XX_RHR_REG << SC16IS7XX_REG_SHIFT) | port->line; + + regcache_cache_bypass(s->regmap, true); + regmap_raw_read(s->regmap, addr, s->buf, rxlen); + regcache_cache_bypass(s->regmap, false); +} + +static void sc16is7xx_fifo_write(struct uart_port *port, u8 to_send) +{ + struct sc16is7xx_port *s = dev_get_drvdata(port->dev); + u8 addr = (SC16IS7XX_THR_REG << SC16IS7XX_REG_SHIFT) | port->line; + + regcache_cache_bypass(s->regmap, true); + regmap_raw_write(s->regmap, addr, s->buf, to_send); + regcache_cache_bypass(s->regmap, false); +} + static void sc16is7xx_port_update(struct uart_port *port, u8 reg, u8 mask, u8 val) { @@ -508,10 +528,7 @@ static void sc16is7xx_handle_rx(struct uart_port *port, unsigned int rxlen, s->buf[0] = sc16is7xx_port_read(port, SC16IS7XX_RHR_REG); bytes_read = 1; } else { - regcache_cache_bypass(s->regmap, true); - regmap_raw_read(s->regmap, SC16IS7XX_RHR_REG, - s->buf, rxlen); - regcache_cache_bypass(s->regmap, false); + sc16is7xx_fifo_read(port, rxlen); bytes_read = rxlen; } @@ -591,9 +608,8 @@ static void sc16is7xx_handle_tx(struct uart_port *port) s->buf[i] = xmit->buf[xmit->tail]; xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); } - regcache_cache_bypass(s->regmap, true); - regmap_raw_write(s->regmap, SC16IS7XX_THR_REG, s->buf, to_send); - regcache_cache_bypass(s->regmap, false); + + sc16is7xx_fifo_write(port, to_send); } if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c index 7ae1592f7ec9..f36852067f20 100644 --- a/drivers/tty/serial/serial_core.c +++ b/drivers/tty/serial/serial_core.c @@ -1418,7 +1418,7 @@ static void uart_close(struct tty_struct *tty, struct file *filp) mutex_lock(&port->mutex); uart_shutdown(tty, state); tty_port_tty_set(port, NULL); - tty->closing = 0; + spin_lock_irqsave(&port->lock, flags); if (port->blocked_open) { @@ -1444,6 +1444,7 @@ static void uart_close(struct tty_struct *tty, struct file *filp) mutex_unlock(&port->mutex); tty_ldisc_flush(tty); + tty->closing = 0; } static void uart_wait_until_sent(struct tty_struct *tty, int timeout) diff --git a/drivers/tty/vt/selection.c b/drivers/tty/vt/selection.c index ea27804d87af..381a2b13682c 100644 --- a/drivers/tty/vt/selection.c +++ b/drivers/tty/vt/selection.c @@ -356,6 +356,7 @@ int paste_selection(struct tty_struct *tty) schedule(); continue; } + __set_current_state(TASK_RUNNING); count = sel_buffer_lth - pasted; count = tty_ldisc_receive_buf(ld, sel_buffer + pasted, NULL, count); diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index 8fe52989b380..4462d167900c 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c @@ -742,6 +742,8 @@ static void visual_init(struct vc_data *vc, int num, int init) __module_get(vc->vc_sw->owner); vc->vc_num = num; vc->vc_display_fg = &master_display_fg; + if (vc->vc_uni_pagedir_loc) + con_free_unimap(vc); vc->vc_uni_pagedir_loc = &vc->vc_uni_pagedir; vc->vc_uni_pagedir = NULL; vc->vc_hi_font_mask = 0; diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 519a77ba214c..b30e7423549b 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -1944,6 +1944,7 @@ static void __exit acm_exit(void) usb_deregister(&acm_driver); tty_unregister_driver(acm_tty_driver); put_tty_driver(acm_tty_driver); + idr_destroy(&acm_minors); } module_init(acm_init); diff --git a/drivers/usb/common/ulpi.c b/drivers/usb/common/ulpi.c index 0e6f968e93fe..01c0c0477a9e 100644 --- a/drivers/usb/common/ulpi.c +++ b/drivers/usb/common/ulpi.c @@ -242,7 +242,7 @@ static int __init ulpi_init(void) { return bus_register(&ulpi_bus); } -module_init(ulpi_init); +subsys_initcall(ulpi_init); static void __exit ulpi_exit(void) { diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index be5b2074f906..cbcd0920fb51 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c @@ -1022,9 +1022,12 @@ static int register_root_hub(struct usb_hcd *hcd) dev_name(&usb_dev->dev), retval); return (retval < 0) ? retval : -EMSGSIZE; } - if (usb_dev->speed == USB_SPEED_SUPER) { + + if (le16_to_cpu(usb_dev->descriptor.bcdUSB) >= 0x0201) { retval = usb_get_bos_descriptor(usb_dev); - if (retval < 0) { + if (!retval) { + usb_dev->lpm_capable = usb_device_supports_lpm(usb_dev); + } else if (usb_dev->speed == USB_SPEED_SUPER) { mutex_unlock(&usb_bus_list_lock); dev_dbg(parent_dev, "can't read %s bos descriptor %d\n", dev_name(&usb_dev->dev), retval); diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 43cb2f2e3b43..73dfa194160b 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -122,7 +122,7 @@ struct usb_hub *usb_hub_to_struct_hub(struct usb_device *hdev) return usb_get_intfdata(hdev->actconfig->interface[0]); } -static int usb_device_supports_lpm(struct usb_device *udev) +int usb_device_supports_lpm(struct usb_device *udev) { /* USB 2.1 (and greater) devices indicate LPM support through * their USB 2.0 Extended Capabilities BOS descriptor. diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h index 7eb1e26798e5..457255a3306a 100644 --- a/drivers/usb/core/usb.h +++ b/drivers/usb/core/usb.h @@ -65,6 +65,7 @@ extern int usb_hub_init(void); extern void usb_hub_cleanup(void); extern int usb_major_init(void); extern void usb_major_cleanup(void); +extern int usb_device_supports_lpm(struct usb_device *udev); #ifdef CONFIG_PM diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c index 2ef3c8d6a9db..69e769c35cf5 100644 --- a/drivers/usb/dwc3/ep0.c +++ b/drivers/usb/dwc3/ep0.c @@ -727,6 +727,10 @@ static int dwc3_ep0_std_request(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl) dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_ISOCH_DELAY"); ret = dwc3_ep0_set_isoch_delay(dwc, ctrl); break; + case USB_REQ_SET_INTERFACE: + dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_INTERFACE"); + dwc->start_config_issued = false; + /* Fall through */ default: dwc3_trace(trace_dwc3_ep0, "Forwarding to gadget driver"); ret = dwc3_ep0_delegate_req(dwc, ctrl); diff --git a/drivers/usb/gadget/udc/mv_udc_core.c b/drivers/usb/gadget/udc/mv_udc_core.c index d32160d6463f..5da37c957b53 100644 --- a/drivers/usb/gadget/udc/mv_udc_core.c +++ b/drivers/usb/gadget/udc/mv_udc_core.c @@ -2167,7 +2167,7 @@ static int mv_udc_probe(struct platform_device *pdev) return -ENODEV; } - udc->phy_regs = ioremap(r->start, resource_size(r)); + udc->phy_regs = devm_ioremap(&pdev->dev, r->start, resource_size(r)); if (udc->phy_regs == NULL) { dev_err(&pdev->dev, "failed to map phy I/O memory\n"); return -EBUSY; diff --git a/drivers/usb/gadget/udc/udc-core.c b/drivers/usb/gadget/udc/udc-core.c index d69c35558f68..362ee8af5fce 100644 --- a/drivers/usb/gadget/udc/udc-core.c +++ b/drivers/usb/gadget/udc/udc-core.c @@ -60,13 +60,15 @@ static DEFINE_MUTEX(udc_lock); int usb_gadget_map_request(struct usb_gadget *gadget, struct usb_request *req, int is_in) { + struct device *dev = gadget->dev.parent; + if (req->length == 0) return 0; if (req->num_sgs) { int mapped; - mapped = dma_map_sg(&gadget->dev, req->sg, req->num_sgs, + mapped = dma_map_sg(dev, req->sg, req->num_sgs, is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE); if (mapped == 0) { dev_err(&gadget->dev, "failed to map SGs\n"); @@ -75,11 +77,11 @@ int usb_gadget_map_request(struct usb_gadget *gadget, req->num_mapped_sgs = mapped; } else { - req->dma = dma_map_single(&gadget->dev, req->buf, req->length, + req->dma = dma_map_single(dev, req->buf, req->length, is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE); - if (dma_mapping_error(&gadget->dev, req->dma)) { - dev_err(&gadget->dev, "failed to map buffer\n"); + if (dma_mapping_error(dev, req->dma)) { + dev_err(dev, "failed to map buffer\n"); return -EFAULT; } } @@ -95,12 +97,12 @@ void usb_gadget_unmap_request(struct usb_gadget *gadget, return; if (req->num_mapped_sgs) { - dma_unmap_sg(&gadget->dev, req->sg, req->num_mapped_sgs, + dma_unmap_sg(gadget->dev.parent, req->sg, req->num_mapped_sgs, is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE); req->num_mapped_sgs = 0; } else { - dma_unmap_single(&gadget->dev, req->dma, req->length, + dma_unmap_single(gadget->dev.parent, req->dma, req->length, is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE); } } diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c index f7d561ed3c23..d029bbe9eb36 100644 --- a/drivers/usb/host/ohci-q.c +++ b/drivers/usb/host/ohci-q.c @@ -981,10 +981,6 @@ rescan_all: int completed, modified; __hc32 *prev; - /* Is this ED already invisible to the hardware? */ - if (ed->state == ED_IDLE) - goto ed_idle; - /* only take off EDs that the HC isn't using, accounting for * frame counter wraps and EDs with partially retired TDs */ @@ -1012,12 +1008,10 @@ skip_ed: } /* ED's now officially unlinked, hc doesn't see */ - ed->state = ED_IDLE; ed->hwHeadP &= ~cpu_to_hc32(ohci, ED_H); ed->hwNextED = 0; wmb(); ed->hwINFO &= ~cpu_to_hc32(ohci, ED_SKIP | ED_DEQUEUE); -ed_idle: /* reentrancy: if we drop the schedule lock, someone might * have modified this list. normally it's just prepending @@ -1088,6 +1082,7 @@ rescan_this: if (list_empty(&ed->td_list)) { *last = ed->ed_next; ed->ed_next = NULL; + ed->state = ED_IDLE; list_del(&ed->in_use_list); } else if (ohci->rh_state == OHCI_RH_RUNNING) { *last = ed->ed_next; diff --git a/drivers/usb/host/ohci-tmio.c b/drivers/usb/host/ohci-tmio.c index e9a6eec39142..cfcfadfc94fc 100644 --- a/drivers/usb/host/ohci-tmio.c +++ b/drivers/usb/host/ohci-tmio.c @@ -58,7 +58,7 @@ #define CCR_PM_CKRNEN 0x0002 #define CCR_PM_USBPW1 0x0004 #define CCR_PM_USBPW2 0x0008 -#define CCR_PM_USBPW3 0x0008 +#define CCR_PM_USBPW3 0x0010 #define CCR_PM_PMEE 0x0100 #define CCR_PM_PMES 0x8000 diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index e75c565feb53..78241b5550df 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c @@ -484,10 +484,13 @@ static void xhci_hub_report_usb3_link_state(struct xhci_hcd *xhci, u32 pls = status_reg & PORT_PLS_MASK; /* resume state is a xHCI internal state. - * Do not report it to usb core. + * Do not report it to usb core, instead, pretend to be U3, + * thus usb core knows it's not ready for transfer */ - if (pls == XDEV_RESUME) + if (pls == XDEV_RESUME) { + *status |= USB_SS_PORT_LS_U3; return; + } /* When the CAS bit is set then warm reset * should be performed on port @@ -588,7 +591,14 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd, status |= USB_PORT_STAT_C_RESET << 16; /* USB3.0 only */ if (hcd->speed == HCD_USB3) { - if ((raw_port_status & PORT_PLC)) + /* Port link change with port in resume state should not be + * reported to usbcore, as this is an internal state to be + * handled by xhci driver. Reporting PLC to usbcore may + * cause usbcore clearing PLC first and port change event + * irq won't be generated. + */ + if ((raw_port_status & PORT_PLC) && + (raw_port_status & PORT_PLS_MASK) != XDEV_RESUME) status |= USB_PORT_STAT_C_LINK_STATE << 16; if ((raw_port_status & PORT_WRC)) status |= USB_PORT_STAT_C_BH_RESET << 16; @@ -1120,10 +1130,10 @@ int xhci_bus_suspend(struct usb_hcd *hcd) spin_lock_irqsave(&xhci->lock, flags); if (hcd->self.root_hub->do_remote_wakeup) { - if (bus_state->resuming_ports) { + if (bus_state->resuming_ports || /* USB2 */ + bus_state->port_remote_wakeup) { /* USB3 */ spin_unlock_irqrestore(&xhci->lock, flags); - xhci_dbg(xhci, "suspend failed because " - "a port is resuming\n"); + xhci_dbg(xhci, "suspend failed because a port is resuming\n"); return -EBUSY; } } diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index f8336408ef07..3e442f77a2b9 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -1427,10 +1427,10 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, /* Attempt to use the ring cache */ if (virt_dev->num_rings_cached == 0) return -ENOMEM; + virt_dev->num_rings_cached--; virt_dev->eps[ep_index].new_ring = virt_dev->ring_cache[virt_dev->num_rings_cached]; virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL; - virt_dev->num_rings_cached--; xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring, 1, type); } diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 4a4cb1d91ac8..5590eac2b22d 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -23,10 +23,15 @@ #include <linux/pci.h> #include <linux/slab.h> #include <linux/module.h> +#include <linux/acpi.h> #include "xhci.h" #include "xhci-trace.h" +#define PORT2_SSIC_CONFIG_REG2 0x883c +#define PROG_DONE (1 << 30) +#define SSIC_PORT_UNUSED (1 << 31) + /* Device for a quirk */ #define PCI_VENDOR_ID_FRESCO_LOGIC 0x1b73 #define PCI_DEVICE_ID_FRESCO_LOGIC_PDK 0x1000 @@ -176,20 +181,63 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) } /* + * In some Intel xHCI controllers, in order to get D3 working, + * through a vendor specific SSIC CONFIG register at offset 0x883c, + * SSIC PORT need to be marked as "unused" before putting xHCI + * into D3. After D3 exit, the SSIC port need to be marked as "used". + * Without this change, xHCI might not enter D3 state. * Make sure PME works on some Intel xHCI controllers by writing 1 to clear * the Internal PME flag bit in vendor specific PMCTRL register at offset 0x80a4 */ -static void xhci_pme_quirk(struct xhci_hcd *xhci) +static void xhci_pme_quirk(struct usb_hcd *hcd, bool suspend) { + struct xhci_hcd *xhci = hcd_to_xhci(hcd); + struct pci_dev *pdev = to_pci_dev(hcd->self.controller); u32 val; void __iomem *reg; + if (pdev->vendor == PCI_VENDOR_ID_INTEL && + pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI) { + + reg = (void __iomem *) xhci->cap_regs + PORT2_SSIC_CONFIG_REG2; + + /* Notify SSIC that SSIC profile programming is not done */ + val = readl(reg) & ~PROG_DONE; + writel(val, reg); + + /* Mark SSIC port as unused(suspend) or used(resume) */ + val = readl(reg); + if (suspend) + val |= SSIC_PORT_UNUSED; + else + val &= ~SSIC_PORT_UNUSED; + writel(val, reg); + + /* Notify SSIC that SSIC profile programming is done */ + val = readl(reg) | PROG_DONE; + writel(val, reg); + readl(reg); + } + reg = (void __iomem *) xhci->cap_regs + 0x80a4; val = readl(reg); writel(val | BIT(28), reg); readl(reg); } +#ifdef CONFIG_ACPI +static void xhci_pme_acpi_rtd3_enable(struct pci_dev *dev) +{ + static const u8 intel_dsm_uuid[] = { + 0xb7, 0x0c, 0x34, 0xac, 0x01, 0xe9, 0xbf, 0x45, + 0xb7, 0xe6, 0x2b, 0x34, 0xec, 0x93, 0x1e, 0x23, + }; + acpi_evaluate_dsm(ACPI_HANDLE(&dev->dev), intel_dsm_uuid, 3, 1, NULL); +} +#else + static void xhci_pme_acpi_rtd3_enable(struct pci_dev *dev) { } +#endif /* CONFIG_ACPI */ + /* called during probe() after chip reset completes */ static int xhci_pci_setup(struct usb_hcd *hcd) { @@ -263,6 +311,9 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) HCC_MAX_PSA(xhci->hcc_params) >= 4) xhci->shared_hcd->can_do_streams = 1; + if (xhci->quirks & XHCI_PME_STUCK_QUIRK) + xhci_pme_acpi_rtd3_enable(dev); + /* USB-2 and USB-3 roothubs initialized, allow runtime pm suspend */ pm_runtime_put_noidle(&dev->dev); @@ -307,7 +358,7 @@ static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup) pdev->no_d3cold = true; if (xhci->quirks & XHCI_PME_STUCK_QUIRK) - xhci_pme_quirk(xhci); + xhci_pme_quirk(hcd, true); return xhci_suspend(xhci, do_wakeup); } @@ -340,7 +391,7 @@ static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated) usb_enable_intel_xhci_ports(pdev); if (xhci->quirks & XHCI_PME_STUCK_QUIRK) - xhci_pme_quirk(xhci); + xhci_pme_quirk(hcd, false); retval = xhci_resume(xhci, hibernated); return retval; diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 94416ff70810..6a8fc52aed58 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -1546,6 +1546,9 @@ static void handle_port_status(struct xhci_hcd *xhci, usb_hcd_resume_root_hub(hcd); } + if (hcd->speed == HCD_USB3 && (temp & PORT_PLS_MASK) == XDEV_INACTIVE) + bus_state->port_remote_wakeup &= ~(1 << faked_port_index); + if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_RESUME) { xhci_dbg(xhci, "port resume event for port %d\n", port_id); diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 7da0d6043d33..526ebc0c7e72 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -3453,6 +3453,9 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev) return -EINVAL; } + if (virt_dev->tt_info) + old_active_eps = virt_dev->tt_info->active_eps; + if (virt_dev->udev != udev) { /* If the virt_dev and the udev does not match, this virt_dev * may belong to another udev. diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 31e46cc55807..ed2ebf647c38 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -285,6 +285,7 @@ struct xhci_op_regs { #define XDEV_U0 (0x0 << 5) #define XDEV_U2 (0x2 << 5) #define XDEV_U3 (0x3 << 5) +#define XDEV_INACTIVE (0x6 << 5) #define XDEV_RESUME (0xf << 5) /* true: port has power (see HCC_PPC) */ #define PORT_POWER (1 << 9) diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index caf188800c67..6b2479123de7 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h @@ -2065,6 +2065,18 @@ UNUSUAL_DEV( 0x1908, 0x3335, 0x0200, 0x0200, USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_NO_READ_DISC_INFO ), +/* Reported by Oliver Neukum <oneukum@suse.com> + * This device morphes spontaneously into another device if the access + * pattern of Windows isn't followed. Thus writable media would be dirty + * if the initial instance is used. So the device is limited to its + * virtual CD. + * And yes, the concept that BCD goes up to 9 is not heeded */ +UNUSUAL_DEV( 0x19d2, 0x1225, 0x0000, 0xffff, + "ZTE,Incorporated", + "ZTE WCDMA Technologies MSM", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_SINGLE_LUN ), + /* Reported by Sven Geggus <sven-usbst@geggus.net> * This encrypted pen drive returns bogus data for the initial READ(10). */ @@ -2074,6 +2086,17 @@ UNUSUAL_DEV( 0x1b1c, 0x1ab5, 0x0200, 0x0200, USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_INITIAL_READ10 ), +/* Reported by Hans de Goede <hdegoede@redhat.com> + * These are mini projectors using USB for both power and video data transport + * The usb-storage interface is a virtual windows driver CD, which the gm12u320 + * driver automatically converts into framebuffer & kms dri device nodes. + */ +UNUSUAL_DEV( 0x1de1, 0xc102, 0x0000, 0xffff, + "Grain-media Technology Corp.", + "USB3.0 Device GM12U320", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_IGNORE_DEVICE ), + /* Patch by Richard Schütz <r.schtz@t-online.de> * This external hard drive enclosure uses a JMicron chip which * needs the US_FL_IGNORE_RESIDUE flag to work properly. */ diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c index 2fb29dfeffbd..563c510f285c 100644 --- a/drivers/vfio/vfio.c +++ b/drivers/vfio/vfio.c @@ -689,6 +689,23 @@ struct vfio_device *vfio_device_get_from_dev(struct device *dev) } EXPORT_SYMBOL_GPL(vfio_device_get_from_dev); +static struct vfio_device *vfio_device_get_from_name(struct vfio_group *group, + char *buf) +{ + struct vfio_device *device; + + mutex_lock(&group->device_lock); + list_for_each_entry(device, &group->device_list, group_next) { + if (!strcmp(dev_name(device->dev), buf)) { + vfio_device_get(device); + break; + } + } + mutex_unlock(&group->device_lock); + + return device; +} + /* * Caller must hold a reference to the vfio_device */ @@ -1198,53 +1215,53 @@ static int vfio_group_get_device_fd(struct vfio_group *group, char *buf) { struct vfio_device *device; struct file *filep; - int ret = -ENODEV; + int ret; if (0 == atomic_read(&group->container_users) || !group->container->iommu_driver || !vfio_group_viable(group)) return -EINVAL; - mutex_lock(&group->device_lock); - list_for_each_entry(device, &group->device_list, group_next) { - if (strcmp(dev_name(device->dev), buf)) - continue; + device = vfio_device_get_from_name(group, buf); + if (!device) + return -ENODEV; - ret = device->ops->open(device->device_data); - if (ret) - break; - /* - * We can't use anon_inode_getfd() because we need to modify - * the f_mode flags directly to allow more than just ioctls - */ - ret = get_unused_fd_flags(O_CLOEXEC); - if (ret < 0) { - device->ops->release(device->device_data); - break; - } + ret = device->ops->open(device->device_data); + if (ret) { + vfio_device_put(device); + return ret; + } - filep = anon_inode_getfile("[vfio-device]", &vfio_device_fops, - device, O_RDWR); - if (IS_ERR(filep)) { - put_unused_fd(ret); - ret = PTR_ERR(filep); - device->ops->release(device->device_data); - break; - } + /* + * We can't use anon_inode_getfd() because we need to modify + * the f_mode flags directly to allow more than just ioctls + */ + ret = get_unused_fd_flags(O_CLOEXEC); + if (ret < 0) { + device->ops->release(device->device_data); + vfio_device_put(device); + return ret; + } - /* - * TODO: add an anon_inode interface to do this. - * Appears to be missing by lack of need rather than - * explicitly prevented. Now there's need. - */ - filep->f_mode |= (FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE); + filep = anon_inode_getfile("[vfio-device]", &vfio_device_fops, + device, O_RDWR); + if (IS_ERR(filep)) { + put_unused_fd(ret); + ret = PTR_ERR(filep); + device->ops->release(device->device_data); + vfio_device_put(device); + return ret; + } + + /* + * TODO: add an anon_inode interface to do this. + * Appears to be missing by lack of need rather than + * explicitly prevented. Now there's need. + */ + filep->f_mode |= (FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE); - vfio_device_get(device); - atomic_inc(&group->container_users); + atomic_inc(&group->container_users); - fd_install(ret, filep); - break; - } - mutex_unlock(&group->device_lock); + fd_install(ret, filep); return ret; } diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 9e8e004bb1c3..eec2f11809ff 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -22,14 +22,20 @@ #include <linux/file.h> #include <linux/highmem.h> #include <linux/slab.h> +#include <linux/vmalloc.h> #include <linux/kthread.h> #include <linux/cgroup.h> #include <linux/module.h> +#include <linux/sort.h> #include "vhost.h" +static ushort max_mem_regions = 64; +module_param(max_mem_regions, ushort, 0444); +MODULE_PARM_DESC(max_mem_regions, + "Maximum number of memory regions in memory map. (default: 64)"); + enum { - VHOST_MEMORY_MAX_NREGIONS = 64, VHOST_MEMORY_F_LOG = 0x1, }; @@ -543,7 +549,7 @@ void vhost_dev_cleanup(struct vhost_dev *dev, bool locked) fput(dev->log_file); dev->log_file = NULL; /* No one will access memory at this point */ - kfree(dev->memory); + kvfree(dev->memory); dev->memory = NULL; WARN_ON(!list_empty(&dev->work_list)); if (dev->worker) { @@ -663,6 +669,25 @@ int vhost_vq_access_ok(struct vhost_virtqueue *vq) } EXPORT_SYMBOL_GPL(vhost_vq_access_ok); +static int vhost_memory_reg_sort_cmp(const void *p1, const void *p2) +{ + const struct vhost_memory_region *r1 = p1, *r2 = p2; + if (r1->guest_phys_addr < r2->guest_phys_addr) + return 1; + if (r1->guest_phys_addr > r2->guest_phys_addr) + return -1; + return 0; +} + +static void *vhost_kvzalloc(unsigned long size) +{ + void *n = kzalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT); + + if (!n) + n = vzalloc(size); + return n; +} + static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m) { struct vhost_memory mem, *newmem, *oldmem; @@ -673,21 +698,23 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m) return -EFAULT; if (mem.padding) return -EOPNOTSUPP; - if (mem.nregions > VHOST_MEMORY_MAX_NREGIONS) + if (mem.nregions > max_mem_regions) return -E2BIG; - newmem = kmalloc(size + mem.nregions * sizeof *m->regions, GFP_KERNEL); + newmem = vhost_kvzalloc(size + mem.nregions * sizeof(*m->regions)); if (!newmem) return -ENOMEM; memcpy(newmem, &mem, size); if (copy_from_user(newmem->regions, m->regions, mem.nregions * sizeof *m->regions)) { - kfree(newmem); + kvfree(newmem); return -EFAULT; } + sort(newmem->regions, newmem->nregions, sizeof(*newmem->regions), + vhost_memory_reg_sort_cmp, NULL); if (!memory_access_ok(d, newmem, 0)) { - kfree(newmem); + kvfree(newmem); return -EFAULT; } oldmem = d->memory; @@ -699,7 +726,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m) d->vqs[i]->memory = newmem; mutex_unlock(&d->vqs[i]->mutex); } - kfree(oldmem); + kvfree(oldmem); return 0; } @@ -965,6 +992,7 @@ long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp) } if (eventfp != d->log_file) { filep = d->log_file; + d->log_file = eventfp; ctx = d->log_ctx; d->log_ctx = eventfp ? eventfd_ctx_fileget(eventfp) : NULL; @@ -992,17 +1020,22 @@ EXPORT_SYMBOL_GPL(vhost_dev_ioctl); static const struct vhost_memory_region *find_region(struct vhost_memory *mem, __u64 addr, __u32 len) { - struct vhost_memory_region *reg; - int i; + const struct vhost_memory_region *reg; + int start = 0, end = mem->nregions; - /* linear search is not brilliant, but we really have on the order of 6 - * regions in practice */ - for (i = 0; i < mem->nregions; ++i) { - reg = mem->regions + i; - if (reg->guest_phys_addr <= addr && - reg->guest_phys_addr + reg->memory_size - 1 >= addr) - return reg; + while (start < end) { + int slot = start + (end - start) / 2; + reg = mem->regions + slot; + if (addr >= reg->guest_phys_addr) + end = slot; + else + start = slot + 1; } + + reg = mem->regions + start; + if (addr >= reg->guest_phys_addr && + reg->guest_phys_addr + reg->memory_size > addr) + return reg; return NULL; } diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c index 862fbc206755..564a7de17d99 100644 --- a/fs/btrfs/dev-replace.c +++ b/fs/btrfs/dev-replace.c @@ -378,7 +378,7 @@ int btrfs_dev_replace_start(struct btrfs_root *root, ret = btrfs_kobj_add_device(tgt_device->fs_devices, tgt_device); if (ret) - btrfs_error(root->fs_info, ret, "kobj add dev failed"); + btrfs_err(root->fs_info, "kobj add dev failed %d\n", ret); printk_in_rcu(KERN_INFO "BTRFS: dev_replace from %s (devid %llu) to %s started\n", diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index a9aadb2ad525..f556c3732c2c 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -2842,6 +2842,7 @@ int open_ctree(struct super_block *sb, !extent_buffer_uptodate(chunk_root->node)) { printk(KERN_ERR "BTRFS: failed to read chunk root on %s\n", sb->s_id); + chunk_root->node = NULL; goto fail_tree_roots; } btrfs_set_root_node(&chunk_root->root_item, chunk_root->node); @@ -2879,7 +2880,7 @@ retry_root_backup: !extent_buffer_uptodate(tree_root->node)) { printk(KERN_WARNING "BTRFS: failed to read tree root on %s\n", sb->s_id); - + tree_root->node = NULL; goto recovery_tree_root; } diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 171312d51799..07204bf601ed 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -4227,6 +4227,24 @@ out: space_info->chunk_alloc = 0; spin_unlock(&space_info->lock); mutex_unlock(&fs_info->chunk_mutex); + /* + * When we allocate a new chunk we reserve space in the chunk block + * reserve to make sure we can COW nodes/leafs in the chunk tree or + * add new nodes/leafs to it if we end up needing to do it when + * inserting the chunk item and updating device items as part of the + * second phase of chunk allocation, performed by + * btrfs_finish_chunk_alloc(). So make sure we don't accumulate a + * large number of new block groups to create in our transaction + * handle's new_bgs list to avoid exhausting the chunk block reserve + * in extreme cases - like having a single transaction create many new + * block groups when starting to write out the free space caches of all + * the block groups that were made dirty during the lifetime of the + * transaction. + */ + if (trans->chunk_bytes_reserved >= (2 * 1024 * 1024ull)) { + btrfs_create_pending_block_groups(trans, trans->root); + btrfs_trans_release_chunk_metadata(trans); + } return ret; } diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 51e0f0d0053e..f5021fcb154e 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -2152,7 +2152,8 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, kmem_cache_free(btrfs_trans_handle_cachep, trans); - if (current != root->fs_info->transaction_kthread) + if (current != root->fs_info->transaction_kthread && + current != root->fs_info->cleaner_kthread) btrfs_run_delayed_iputs(root); return ret; @@ -319,6 +319,12 @@ static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh, * @vma: The virtual memory area where the fault occurred * @vmf: The description of the fault * @get_block: The filesystem method used to translate file offsets to blocks + * @complete_unwritten: The filesystem method used to convert unwritten blocks + * to written so the data written to them is exposed. This is required for + * required by write faults for filesystems that will return unwritten + * extent mappings from @get_block, but it is optional for reads as + * dax_insert_mapping() will always zero unwritten blocks. If the fs does + * not support unwritten extents, the it should pass NULL. * * When a page fault occurs, filesystems may call this helper in their * fault handler for DAX files. __dax_fault() assumes the caller has done all @@ -437,8 +443,12 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf, * as for normal BH based IO completions. */ error = dax_insert_mapping(inode, &bh, vma, vmf); - if (buffer_unwritten(&bh)) - complete_unwritten(&bh, !error); + if (buffer_unwritten(&bh)) { + if (complete_unwritten) + complete_unwritten(&bh, !error); + else + WARN_ON_ONCE(!(vmf->flags & FAULT_FLAG_WRITE)); + } out: if (error == -ENOMEM) diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index 9bedfa8dd3a5..f71e19a9dd3c 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -2072,8 +2072,6 @@ static int f2fs_set_data_page_dirty(struct page *page) return 1; } - mark_inode_dirty(inode); - if (!PageDirty(page)) { __set_page_dirty_nobuffers(page); update_dirty_page(inode, page); diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index ada2a3dd701a..b0f38c3b37f4 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c @@ -1331,12 +1331,13 @@ static int f2fs_ioc_commit_atomic_write(struct file *filp) if (ret) return ret; - if (f2fs_is_atomic_file(inode)) + if (f2fs_is_atomic_file(inode)) { + clear_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE); commit_inmem_pages(inode, false); + } ret = f2fs_sync_file(filp, 0, LONG_MAX, 0); mnt_drop_write_file(filp); - clear_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE); return ret; } @@ -1387,8 +1388,8 @@ static int f2fs_ioc_abort_volatile_write(struct file *filp) f2fs_balance_fs(F2FS_I_SB(inode)); if (f2fs_is_atomic_file(inode)) { - commit_inmem_pages(inode, false); clear_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE); + commit_inmem_pages(inode, false); } if (f2fs_is_volatile_file(inode)) diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c index e1e73617d13b..22fb5ef37966 100644 --- a/fs/f2fs/gc.c +++ b/fs/f2fs/gc.c @@ -556,27 +556,39 @@ static void move_encrypted_block(struct inode *inode, block_t bidx) if (!fio.encrypted_page) goto put_out; - f2fs_submit_page_bio(&fio); + err = f2fs_submit_page_bio(&fio); + if (err) + goto put_page_out; + + /* write page */ + lock_page(fio.encrypted_page); + + if (unlikely(!PageUptodate(fio.encrypted_page))) + goto put_page_out; + if (unlikely(fio.encrypted_page->mapping != META_MAPPING(fio.sbi))) + goto put_page_out; + + set_page_dirty(fio.encrypted_page); + f2fs_wait_on_page_writeback(fio.encrypted_page, META); + if (clear_page_dirty_for_io(fio.encrypted_page)) + dec_page_count(fio.sbi, F2FS_DIRTY_META); + + set_page_writeback(fio.encrypted_page); /* allocate block address */ f2fs_wait_on_page_writeback(dn.node_page, NODE); - allocate_data_block(fio.sbi, NULL, fio.blk_addr, &fio.blk_addr, &sum, CURSEG_COLD_DATA); - dn.data_blkaddr = fio.blk_addr; - - /* write page */ - lock_page(fio.encrypted_page); - set_page_writeback(fio.encrypted_page); fio.rw = WRITE_SYNC; f2fs_submit_page_mbio(&fio); + dn.data_blkaddr = fio.blk_addr; set_data_blkaddr(&dn); f2fs_update_extent_cache(&dn); set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE); if (page->index == 0) set_inode_flag(F2FS_I(inode), FI_FIRST_BLOCK_WRITTEN); - +put_page_out: f2fs_put_page(fio.encrypted_page, 1); put_out: f2fs_put_dnode(&dn); @@ -605,8 +617,8 @@ static void move_data_page(struct inode *inode, block_t bidx, int gc_type) .page = page, .encrypted_page = NULL, }; + set_page_dirty(page); f2fs_wait_on_page_writeback(page, DATA); - if (clear_page_dirty_for_io(page)) inode_dec_dirty_pages(inode); set_cold_data(page); diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c index 38e75fb1e488..a13ffcc32992 100644 --- a/fs/f2fs/inline.c +++ b/fs/f2fs/inline.c @@ -141,6 +141,8 @@ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page) kunmap_atomic(dst_addr); SetPageUptodate(page); no_update: + set_page_dirty(page); + /* clear dirty state */ dirty = clear_page_dirty_for_io(page); diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index 1eb343768781..61b97f9cb9f6 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -257,6 +257,7 @@ void commit_inmem_pages(struct inode *inode, bool abort) if (!abort) { lock_page(cur->page); if (cur->page->mapping == inode->i_mapping) { + set_page_dirty(cur->page); f2fs_wait_on_page_writeback(cur->page, DATA); if (clear_page_dirty_for_io(cur->page)) inode_dec_dirty_pages(inode); diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index f0520bcf2094..518c6294bf6c 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -702,6 +702,7 @@ void wbc_account_io(struct writeback_control *wbc, struct page *page, else wbc->wb_tcand_bytes -= min(bytes, wbc->wb_tcand_bytes); } +EXPORT_SYMBOL_GPL(wbc_account_io); /** * inode_congested - test whether an inode is congested diff --git a/fs/namespace.c b/fs/namespace.c index c7cb8a526c05..2b8aa15fd6df 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -1361,6 +1361,36 @@ enum umount_tree_flags { UMOUNT_PROPAGATE = 2, UMOUNT_CONNECTED = 4, }; + +static bool disconnect_mount(struct mount *mnt, enum umount_tree_flags how) +{ + /* Leaving mounts connected is only valid for lazy umounts */ + if (how & UMOUNT_SYNC) + return true; + + /* A mount without a parent has nothing to be connected to */ + if (!mnt_has_parent(mnt)) + return true; + + /* Because the reference counting rules change when mounts are + * unmounted and connected, umounted mounts may not be + * connected to mounted mounts. + */ + if (!(mnt->mnt_parent->mnt.mnt_flags & MNT_UMOUNT)) + return true; + + /* Has it been requested that the mount remain connected? */ + if (how & UMOUNT_CONNECTED) + return false; + + /* Is the mount locked such that it needs to remain connected? */ + if (IS_MNT_LOCKED(mnt)) + return false; + + /* By default disconnect the mount */ + return true; +} + /* * mount_lock must be held * namespace_sem must be held for write @@ -1398,10 +1428,7 @@ static void umount_tree(struct mount *mnt, enum umount_tree_flags how) if (how & UMOUNT_SYNC) p->mnt.mnt_flags |= MNT_SYNC_UMOUNT; - disconnect = !(((how & UMOUNT_CONNECTED) && - mnt_has_parent(p) && - (p->mnt_parent->mnt.mnt_flags & MNT_UMOUNT)) || - IS_MNT_LOCKED_AND_LAZY(p)); + disconnect = disconnect_mount(p, how); pin_insert_group(&p->mnt_umount, &p->mnt_parent->mnt, disconnect ? &unmounted : NULL); @@ -1538,11 +1565,8 @@ void __detach_mounts(struct dentry *dentry) while (!hlist_empty(&mp->m_list)) { mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list); if (mnt->mnt.mnt_flags & MNT_UMOUNT) { - struct mount *p, *tmp; - list_for_each_entry_safe(p, tmp, &mnt->mnt_mounts, mnt_child) { - hlist_add_head(&p->mnt_umount.s_list, &unmounted); - umount_mnt(p); - } + hlist_add_head(&mnt->mnt_umount.s_list, &unmounted); + umount_mnt(mnt); } else umount_tree(mnt, UMOUNT_CONNECTED); } diff --git a/fs/nfs/client.c b/fs/nfs/client.c index ecebb406cc1a..4a90c9bb3135 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c @@ -775,7 +775,7 @@ static int nfs_init_server(struct nfs_server *server, server->options = data->options; server->caps |= NFS_CAP_HARDLINKS|NFS_CAP_SYMLINKS|NFS_CAP_FILEID| NFS_CAP_MODE|NFS_CAP_NLINK|NFS_CAP_OWNER|NFS_CAP_OWNER_GROUP| - NFS_CAP_ATIME|NFS_CAP_CTIME|NFS_CAP_MTIME|NFS_CAP_CHANGE_ATTR; + NFS_CAP_ATIME|NFS_CAP_CTIME|NFS_CAP_MTIME; if (data->rsize) server->rsize = nfs_block_size(data->rsize, NULL); diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c index c12951b9551e..b3289d701eea 100644 --- a/fs/nfs/flexfilelayout/flexfilelayout.c +++ b/fs/nfs/flexfilelayout/flexfilelayout.c @@ -1852,7 +1852,7 @@ ff_layout_mirror_prepare_stats(struct nfs42_layoutstat_args *args, struct nfs42_layoutstat_devinfo *devinfo; int i; - for (i = 0; i <= FF_LAYOUT_MIRROR_COUNT(pls); i++) { + for (i = 0; i < FF_LAYOUT_MIRROR_COUNT(pls); i++) { if (*dev_count >= dev_limit) break; mirror = FF_LAYOUT_COMP(pls, i); diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index b77b328a06d7..0adc7d245b3d 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -442,8 +442,9 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr, st nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR); if (fattr->valid & NFS_ATTR_FATTR_CHANGE) inode->i_version = fattr->change_attr; - else if (nfs_server_capable(inode, NFS_CAP_CHANGE_ATTR)) - nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR); + else + nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR + | NFS_INO_REVAL_PAGECACHE); if (fattr->valid & NFS_ATTR_FATTR_SIZE) inode->i_size = nfs_size_to_loff_t(fattr->size); else @@ -1244,9 +1245,11 @@ static int nfs_check_inode_attributes(struct inode *inode, struct nfs_fattr *fat if (fattr->valid & NFS_ATTR_FATTR_SIZE) { cur_size = i_size_read(inode); new_isize = nfs_size_to_loff_t(fattr->size); - if (cur_size != new_isize && nfsi->nrequests == 0) + if (cur_size != new_isize) invalid |= NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE; } + if (nfsi->nrequests != 0) + invalid &= ~NFS_INO_REVAL_PAGECACHE; /* Have any file permissions changed? */ if ((fattr->valid & NFS_ATTR_FATTR_MODE) && (inode->i_mode & S_IALLUGO) != (fattr->mode & S_IALLUGO)) @@ -1684,13 +1687,12 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr) invalid |= NFS_INO_INVALID_ATTR | NFS_INO_INVALID_DATA | NFS_INO_INVALID_ACCESS - | NFS_INO_INVALID_ACL - | NFS_INO_REVAL_PAGECACHE; + | NFS_INO_INVALID_ACL; if (S_ISDIR(inode->i_mode)) nfs_force_lookup_revalidate(inode); inode->i_version = fattr->change_attr; } - } else if (server->caps & NFS_CAP_CHANGE_ATTR) + } else nfsi->cache_validity |= save_cache_validity; if (fattr->valid & NFS_ATTR_FATTR_MTIME) { @@ -1717,7 +1719,6 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr) if ((nfsi->nrequests == 0) || new_isize > cur_isize) { i_size_write(inode, new_isize); invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA; - invalid &= ~NFS_INO_REVAL_PAGECACHE; } dprintk("NFS: isize change on server for file %s/%ld " "(%Ld to %Ld)\n", diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index 7e3c4604bea8..9b372b845f6a 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h @@ -296,6 +296,22 @@ extern struct rpc_procinfo nfs4_procedures[]; #ifdef CONFIG_NFS_V4_SECURITY_LABEL extern struct nfs4_label *nfs4_label_alloc(struct nfs_server *server, gfp_t flags); +static inline struct nfs4_label * +nfs4_label_copy(struct nfs4_label *dst, struct nfs4_label *src) +{ + if (!dst || !src) + return NULL; + + if (src->len > NFS4_MAXLABELLEN) + return NULL; + + dst->lfs = src->lfs; + dst->pi = src->pi; + dst->len = src->len; + memcpy(dst->label, src->label, src->len); + + return dst; +} static inline void nfs4_label_free(struct nfs4_label *label) { if (label) { @@ -316,6 +332,11 @@ static inline void nfs4_label_free(void *label) {} static inline void nfs_zap_label_cache_locked(struct nfs_inode *nfsi) { } +static inline struct nfs4_label * +nfs4_label_copy(struct nfs4_label *dst, struct nfs4_label *src) +{ + return NULL; +} #endif /* CONFIG_NFS_V4_SECURITY_LABEL */ /* proc.c */ diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c index f486b80f927a..d731bbf974aa 100644 --- a/fs/nfs/nfs42proc.c +++ b/fs/nfs/nfs42proc.c @@ -135,7 +135,7 @@ int nfs42_proc_deallocate(struct file *filep, loff_t offset, loff_t len) return err; } -loff_t nfs42_proc_llseek(struct file *filep, loff_t offset, int whence) +static loff_t _nfs42_proc_llseek(struct file *filep, loff_t offset, int whence) { struct inode *inode = file_inode(filep); struct nfs42_seek_args args = { @@ -171,6 +171,23 @@ loff_t nfs42_proc_llseek(struct file *filep, loff_t offset, int whence) return vfs_setpos(filep, res.sr_offset, inode->i_sb->s_maxbytes); } +loff_t nfs42_proc_llseek(struct file *filep, loff_t offset, int whence) +{ + struct nfs_server *server = NFS_SERVER(file_inode(filep)); + struct nfs4_exception exception = { }; + int err; + + do { + err = _nfs42_proc_llseek(filep, offset, whence); + if (err == -ENOTSUPP) + return -EOPNOTSUPP; + err = nfs4_handle_exception(server, err, &exception); + } while (exception.retry); + + return err; +} + + static void nfs42_layoutstat_prepare(struct rpc_task *task, void *calldata) { diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 8bee93469617..3acb1eb72930 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -467,7 +467,10 @@ static void do_renew_lease(struct nfs_client *clp, unsigned long timestamp) static void renew_lease(const struct nfs_server *server, unsigned long timestamp) { - do_renew_lease(server->nfs_client, timestamp); + struct nfs_client *clp = server->nfs_client; + + if (!nfs4_has_session(clp)) + do_renew_lease(clp, timestamp); } struct nfs4_call_sync_data { @@ -616,8 +619,7 @@ int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res) clp = session->clp; do_renew_lease(clp, res->sr_timestamp); /* Check sequence flags */ - if (res->sr_status_flags != 0) - nfs4_schedule_lease_recovery(clp); + nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags); nfs41_update_target_slotid(slot->table, slot, res); break; case 1: @@ -910,6 +912,7 @@ struct nfs4_opendata { struct nfs_open_confirmres c_res; struct nfs4_string owner_name; struct nfs4_string group_name; + struct nfs4_label *a_label; struct nfs_fattr f_attr; struct nfs4_label *f_label; struct dentry *dir; @@ -1013,6 +1016,10 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry, if (IS_ERR(p->f_label)) goto err_free_p; + p->a_label = nfs4_label_alloc(server, gfp_mask); + if (IS_ERR(p->a_label)) + goto err_free_f; + alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid; p->o_arg.seqid = alloc_seqid(&sp->so_seqid, gfp_mask); if (IS_ERR(p->o_arg.seqid)) @@ -1041,7 +1048,7 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry, p->o_arg.server = server; p->o_arg.bitmask = nfs4_bitmask(server, label); p->o_arg.open_bitmap = &nfs4_fattr_bitmap[0]; - p->o_arg.label = label; + p->o_arg.label = nfs4_label_copy(p->a_label, label); p->o_arg.claim = nfs4_map_atomic_open_claim(server, claim); switch (p->o_arg.claim) { case NFS4_OPEN_CLAIM_NULL: @@ -1074,6 +1081,8 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry, return p; err_free_label: + nfs4_label_free(p->a_label); +err_free_f: nfs4_label_free(p->f_label); err_free_p: kfree(p); @@ -1093,6 +1102,7 @@ static void nfs4_opendata_free(struct kref *kref) nfs4_put_open_state(p->state); nfs4_put_state_owner(p->owner); + nfs4_label_free(p->a_label); nfs4_label_free(p->f_label); dput(p->dir); @@ -1198,12 +1208,15 @@ static bool nfs_need_update_open_stateid(struct nfs4_state *state, static void nfs_resync_open_stateid_locked(struct nfs4_state *state) { + if (!(state->n_wronly || state->n_rdonly || state->n_rdwr)) + return; if (state->n_wronly) set_bit(NFS_O_WRONLY_STATE, &state->flags); if (state->n_rdonly) set_bit(NFS_O_RDONLY_STATE, &state->flags); if (state->n_rdwr) set_bit(NFS_O_RDWR_STATE, &state->flags); + set_bit(NFS_OPEN_STATE, &state->flags); } static void nfs_clear_open_stateid_locked(struct nfs4_state *state, @@ -7571,13 +7584,8 @@ static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred) goto out; } ret = rpc_wait_for_completion_task(task); - if (!ret) { - struct nfs4_sequence_res *res = task->tk_msg.rpc_resp; - - if (task->tk_status == 0) - nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags); + if (!ret) ret = task->tk_status; - } rpc_put_task(task); out: dprintk("<-- %s status=%d\n", __func__, ret); @@ -7965,16 +7973,17 @@ static void nfs4_layoutreturn_release(void *calldata) { struct nfs4_layoutreturn *lrp = calldata; struct pnfs_layout_hdr *lo = lrp->args.layout; + LIST_HEAD(freeme); dprintk("--> %s\n", __func__); spin_lock(&lo->plh_inode->i_lock); if (lrp->res.lrs_present) pnfs_set_layout_stateid(lo, &lrp->res.stateid, true); + pnfs_mark_matching_lsegs_invalid(lo, &freeme, &lrp->args.range); pnfs_clear_layoutreturn_waitbit(lo); - clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE, &lo->plh_flags); - rpc_wake_up(&NFS_SERVER(lo->plh_inode)->roc_rpcwaitq); lo->plh_block_lgets--; spin_unlock(&lo->plh_inode->i_lock); + pnfs_free_lseg_list(&freeme); pnfs_put_layout_hdr(lrp->args.layout); nfs_iput_and_deactive(lrp->inode); kfree(calldata); @@ -8588,7 +8597,6 @@ static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = { .minor_version = 0, .init_caps = NFS_CAP_READDIRPLUS | NFS_CAP_ATOMIC_OPEN - | NFS_CAP_CHANGE_ATTR | NFS_CAP_POSIX_LOCK, .init_client = nfs40_init_client, .shutdown_client = nfs40_shutdown_client, @@ -8614,7 +8622,6 @@ static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = { .minor_version = 1, .init_caps = NFS_CAP_READDIRPLUS | NFS_CAP_ATOMIC_OPEN - | NFS_CAP_CHANGE_ATTR | NFS_CAP_POSIX_LOCK | NFS_CAP_STATEID_NFSV41 | NFS_CAP_ATOMIC_OPEN_V1, @@ -8637,7 +8644,6 @@ static const struct nfs4_minor_version_ops nfs_v4_2_minor_ops = { .minor_version = 2, .init_caps = NFS_CAP_READDIRPLUS | NFS_CAP_ATOMIC_OPEN - | NFS_CAP_CHANGE_ATTR | NFS_CAP_POSIX_LOCK | NFS_CAP_STATEID_NFSV41 | NFS_CAP_ATOMIC_OPEN_V1 diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 605840dc89cf..f2e2ad894461 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -2191,25 +2191,35 @@ static void nfs41_handle_server_reboot(struct nfs_client *clp) } } -static void nfs41_handle_state_revoked(struct nfs_client *clp) +static void nfs41_handle_all_state_revoked(struct nfs_client *clp) { nfs4_reset_all_state(clp); dprintk("%s: state revoked on server %s\n", __func__, clp->cl_hostname); } +static void nfs41_handle_some_state_revoked(struct nfs_client *clp) +{ + nfs4_state_mark_reclaim_helper(clp, nfs4_state_mark_reclaim_nograce); + nfs4_schedule_state_manager(clp); + + dprintk("%s: state revoked on server %s\n", __func__, clp->cl_hostname); +} + static void nfs41_handle_recallable_state_revoked(struct nfs_client *clp) { - /* This will need to handle layouts too */ - nfs_expire_all_delegations(clp); + /* FIXME: For now, we destroy all layouts. */ + pnfs_destroy_all_layouts(clp); + /* FIXME: For now, we test all delegations+open state+locks. */ + nfs41_handle_some_state_revoked(clp); dprintk("%s: Recallable state revoked on server %s!\n", __func__, clp->cl_hostname); } static void nfs41_handle_backchannel_fault(struct nfs_client *clp) { - nfs_expire_all_delegations(clp); - if (test_and_set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state) == 0) - nfs4_schedule_state_manager(clp); + set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state); + nfs4_schedule_state_manager(clp); + dprintk("%s: server %s declared a backchannel fault\n", __func__, clp->cl_hostname); } @@ -2231,10 +2241,11 @@ void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags) if (flags & SEQ4_STATUS_RESTART_RECLAIM_NEEDED) nfs41_handle_server_reboot(clp); - if (flags & (SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED | - SEQ4_STATUS_EXPIRED_SOME_STATE_REVOKED | + if (flags & (SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED)) + nfs41_handle_all_state_revoked(clp); + if (flags & (SEQ4_STATUS_EXPIRED_SOME_STATE_REVOKED | SEQ4_STATUS_ADMIN_STATE_REVOKED)) - nfs41_handle_state_revoked(clp); + nfs41_handle_some_state_revoked(clp); if (flags & SEQ4_STATUS_LEASE_MOVED) nfs4_schedule_lease_moved_recovery(clp); if (flags & SEQ4_STATUS_RECALLABLE_STATE_REVOKED) diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c index 1da68d3b1eda..4984bbe55ff1 100644 --- a/fs/nfs/pagelist.c +++ b/fs/nfs/pagelist.c @@ -1100,8 +1100,6 @@ static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc) mirror->pg_base = 0; mirror->pg_recoalesce = 0; - desc->pg_moreio = 0; - while (!list_empty(&head)) { struct nfs_page *req; @@ -1109,8 +1107,11 @@ static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc) nfs_list_remove_request(req); if (__nfs_pageio_add_request(desc, req)) continue; - if (desc->pg_error < 0) + if (desc->pg_error < 0) { + list_splice_tail(&head, &mirror->pg_list); + mirror->pg_recoalesce = 1; return 0; + } break; } } while (mirror->pg_recoalesce); diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index 0ba9a02c9566..70bf706b1090 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -352,7 +352,7 @@ pnfs_layout_need_return(struct pnfs_layout_hdr *lo, { struct pnfs_layout_segment *s; - if (!test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags)) + if (!test_and_clear_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags)) return false; list_for_each_entry(s, &lo->plh_segs, pls_list) @@ -362,6 +362,18 @@ pnfs_layout_need_return(struct pnfs_layout_hdr *lo, return true; } +static bool +pnfs_prepare_layoutreturn(struct pnfs_layout_hdr *lo) +{ + if (test_and_set_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) + return false; + lo->plh_return_iomode = 0; + lo->plh_block_lgets++; + pnfs_get_layout_hdr(lo); + clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE, &lo->plh_flags); + return true; +} + static void pnfs_layoutreturn_before_put_lseg(struct pnfs_layout_segment *lseg, struct pnfs_layout_hdr *lo, struct inode *inode) { @@ -372,17 +384,16 @@ static void pnfs_layoutreturn_before_put_lseg(struct pnfs_layout_segment *lseg, if (pnfs_layout_need_return(lo, lseg)) { nfs4_stateid stateid; enum pnfs_iomode iomode; + bool send; stateid = lo->plh_stateid; iomode = lo->plh_return_iomode; - /* decreased in pnfs_send_layoutreturn() */ - lo->plh_block_lgets++; - lo->plh_return_iomode = 0; + send = pnfs_prepare_layoutreturn(lo); spin_unlock(&inode->i_lock); - pnfs_get_layout_hdr(lo); - - /* Send an async layoutreturn so we dont deadlock */ - pnfs_send_layoutreturn(lo, stateid, iomode, false); + if (send) { + /* Send an async layoutreturn so we dont deadlock */ + pnfs_send_layoutreturn(lo, stateid, iomode, false); + } } else spin_unlock(&inode->i_lock); } @@ -411,6 +422,10 @@ pnfs_put_lseg(struct pnfs_layout_segment *lseg) pnfs_layoutreturn_before_put_lseg(lseg, lo, inode); if (atomic_dec_and_lock(&lseg->pls_refcount, &inode->i_lock)) { + if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags)) { + spin_unlock(&inode->i_lock); + return; + } pnfs_get_layout_hdr(lo); pnfs_layout_remove_lseg(lo, lseg); spin_unlock(&inode->i_lock); @@ -451,6 +466,8 @@ pnfs_put_lseg_locked(struct pnfs_layout_segment *lseg) test_bit(NFS_LSEG_VALID, &lseg->pls_flags)); if (atomic_dec_and_test(&lseg->pls_refcount)) { struct pnfs_layout_hdr *lo = lseg->pls_layout; + if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags)) + return; pnfs_get_layout_hdr(lo); pnfs_layout_remove_lseg(lo, lseg); pnfs_free_lseg_async(lseg); @@ -924,6 +941,7 @@ void pnfs_clear_layoutreturn_waitbit(struct pnfs_layout_hdr *lo) clear_bit_unlock(NFS_LAYOUT_RETURN, &lo->plh_flags); smp_mb__after_atomic(); wake_up_bit(&lo->plh_flags, NFS_LAYOUT_RETURN); + rpc_wake_up(&NFS_SERVER(lo->plh_inode)->roc_rpcwaitq); } static int @@ -978,6 +996,7 @@ _pnfs_return_layout(struct inode *ino) LIST_HEAD(tmp_list); nfs4_stateid stateid; int status = 0, empty; + bool send; dprintk("NFS: %s for inode %lu\n", __func__, ino->i_ino); @@ -1007,17 +1026,18 @@ _pnfs_return_layout(struct inode *ino) /* Don't send a LAYOUTRETURN if list was initially empty */ if (empty) { spin_unlock(&ino->i_lock); - pnfs_put_layout_hdr(lo); dprintk("NFS: %s no layout segments to return\n", __func__); - goto out; + goto out_put_layout_hdr; } set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags); - lo->plh_block_lgets++; + send = pnfs_prepare_layoutreturn(lo); spin_unlock(&ino->i_lock); pnfs_free_lseg_list(&tmp_list); - - status = pnfs_send_layoutreturn(lo, stateid, IOMODE_ANY, true); + if (send) + status = pnfs_send_layoutreturn(lo, stateid, IOMODE_ANY, true); +out_put_layout_hdr: + pnfs_put_layout_hdr(lo); out: dprintk("<-- %s status: %d\n", __func__, status); return status; @@ -1097,13 +1117,9 @@ bool pnfs_roc(struct inode *ino) out_noroc: if (lo) { stateid = lo->plh_stateid; - layoutreturn = - test_and_clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE, - &lo->plh_flags); - if (layoutreturn) { - lo->plh_block_lgets++; - pnfs_get_layout_hdr(lo); - } + if (test_and_clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE, + &lo->plh_flags)) + layoutreturn = pnfs_prepare_layoutreturn(lo); } spin_unlock(&ino->i_lock); if (layoutreturn) { @@ -1146,15 +1162,18 @@ bool pnfs_roc_drain(struct inode *ino, u32 *barrier, struct rpc_task *task) struct pnfs_layout_segment *lseg; nfs4_stateid stateid; u32 current_seqid; - bool found = false, layoutreturn = false; + bool layoutreturn = false; spin_lock(&ino->i_lock); - list_for_each_entry(lseg, &nfsi->layout->plh_segs, pls_list) - if (test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) { - rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL); - found = true; - goto out; - } + list_for_each_entry(lseg, &nfsi->layout->plh_segs, pls_list) { + if (!test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) + continue; + if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags)) + continue; + rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL); + spin_unlock(&ino->i_lock); + return true; + } lo = nfsi->layout; current_seqid = be32_to_cpu(lo->plh_stateid.seqid); @@ -1162,23 +1181,19 @@ bool pnfs_roc_drain(struct inode *ino, u32 *barrier, struct rpc_task *task) * a barrier, we choose the worst-case barrier. */ *barrier = current_seqid + atomic_read(&lo->plh_outstanding); -out: - if (!found) { - stateid = lo->plh_stateid; - layoutreturn = - test_and_clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE, - &lo->plh_flags); - if (layoutreturn) { - lo->plh_block_lgets++; - pnfs_get_layout_hdr(lo); - } - } + stateid = lo->plh_stateid; + if (test_and_clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE, + &lo->plh_flags)) + layoutreturn = pnfs_prepare_layoutreturn(lo); + if (test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) + rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL); + spin_unlock(&ino->i_lock); if (layoutreturn) { - rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL); pnfs_send_layoutreturn(lo, stateid, IOMODE_ANY, false); + return true; } - return found; + return false; } /* @@ -1695,7 +1710,6 @@ void pnfs_error_mark_layout_for_return(struct inode *inode, spin_lock(&inode->i_lock); /* set failure bit so that pnfs path will be retried later */ pnfs_layout_set_fail_bit(lo, iomode); - set_bit(NFS_LAYOUT_RETURN, &lo->plh_flags); if (lo->plh_return_iomode == 0) lo->plh_return_iomode = range.iomode; else if (lo->plh_return_iomode != range.iomode) @@ -2207,13 +2221,12 @@ pnfs_layoutcommit_inode(struct inode *inode, bool sync) if (ld->prepare_layoutcommit) { status = ld->prepare_layoutcommit(&data->args); if (status) { + put_rpccred(data->cred); spin_lock(&inode->i_lock); set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags); if (end_pos > nfsi->layout->plh_lwb) nfsi->layout->plh_lwb = end_pos; - spin_unlock(&inode->i_lock); - put_rpccred(data->cred); - goto clear_layoutcommitting; + goto out_unlock; } } diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 65869ca9c851..75a35a1afa79 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -1379,24 +1379,27 @@ static void nfs_writeback_check_extend(struct nfs_pgio_header *hdr, { struct nfs_pgio_args *argp = &hdr->args; struct nfs_pgio_res *resp = &hdr->res; + u64 size = argp->offset + resp->count; if (!(fattr->valid & NFS_ATTR_FATTR_SIZE)) + fattr->size = size; + if (nfs_size_to_loff_t(fattr->size) < i_size_read(hdr->inode)) { + fattr->valid &= ~NFS_ATTR_FATTR_SIZE; return; - if (argp->offset + resp->count != fattr->size) - return; - if (nfs_size_to_loff_t(fattr->size) < i_size_read(hdr->inode)) + } + if (size != fattr->size) return; /* Set attribute barrier */ nfs_fattr_set_barrier(fattr); + /* ...and update size */ + fattr->valid |= NFS_ATTR_FATTR_SIZE; } void nfs_writeback_update_inode(struct nfs_pgio_header *hdr) { - struct nfs_fattr *fattr = hdr->res.fattr; + struct nfs_fattr *fattr = &hdr->fattr; struct inode *inode = hdr->inode; - if (fattr == NULL) - return; spin_lock(&inode->i_lock); nfs_writeback_check_extend(hdr, fattr); nfs_post_op_update_inode_force_wcc_locked(inode, fattr); diff --git a/fs/pnode.h b/fs/pnode.h index 7114ce6e6b9e..0fcdbe7ca648 100644 --- a/fs/pnode.h +++ b/fs/pnode.h @@ -20,8 +20,6 @@ #define SET_MNT_MARK(m) ((m)->mnt.mnt_flags |= MNT_MARKED) #define CLEAR_MNT_MARK(m) ((m)->mnt.mnt_flags &= ~MNT_MARKED) #define IS_MNT_LOCKED(m) ((m)->mnt.mnt_flags & MNT_LOCKED) -#define IS_MNT_LOCKED_AND_LAZY(m) \ - (((m)->mnt.mnt_flags & (MNT_LOCKED|MNT_SYNC_UMOUNT)) == MNT_LOCKED) #define CL_EXPIRE 0x01 #define CL_SLAVE 0x02 diff --git a/fs/xfs/libxfs/xfs_attr_remote.c b/fs/xfs/libxfs/xfs_attr_remote.c index 20de88d1bf86..dd714037c322 100644 --- a/fs/xfs/libxfs/xfs_attr_remote.c +++ b/fs/xfs/libxfs/xfs_attr_remote.c @@ -159,11 +159,10 @@ xfs_attr3_rmt_write_verify( struct xfs_buf *bp) { struct xfs_mount *mp = bp->b_target->bt_mount; - struct xfs_buf_log_item *bip = bp->b_fspriv; + int blksize = mp->m_attr_geo->blksize; char *ptr; int len; xfs_daddr_t bno; - int blksize = mp->m_attr_geo->blksize; /* no verification of non-crc buffers */ if (!xfs_sb_version_hascrc(&mp->m_sb)) @@ -175,16 +174,22 @@ xfs_attr3_rmt_write_verify( ASSERT(len >= blksize); while (len > 0) { + struct xfs_attr3_rmt_hdr *rmt = (struct xfs_attr3_rmt_hdr *)ptr; + if (!xfs_attr3_rmt_verify(mp, ptr, blksize, bno)) { xfs_buf_ioerror(bp, -EFSCORRUPTED); xfs_verifier_error(bp); return; } - if (bip) { - struct xfs_attr3_rmt_hdr *rmt; - rmt = (struct xfs_attr3_rmt_hdr *)ptr; - rmt->rm_lsn = cpu_to_be64(bip->bli_item.li_lsn); + /* + * Ensure we aren't writing bogus LSNs to disk. See + * xfs_attr3_rmt_hdr_set() for the explanation. + */ + if (rmt->rm_lsn != cpu_to_be64(NULLCOMMITLSN)) { + xfs_buf_ioerror(bp, -EFSCORRUPTED); + xfs_verifier_error(bp); + return; } xfs_update_cksum(ptr, blksize, XFS_ATTR3_RMT_CRC_OFF); @@ -221,6 +226,18 @@ xfs_attr3_rmt_hdr_set( rmt->rm_owner = cpu_to_be64(ino); rmt->rm_blkno = cpu_to_be64(bno); + /* + * Remote attribute blocks are written synchronously, so we don't + * have an LSN that we can stamp in them that makes any sense to log + * recovery. To ensure that log recovery handles overwrites of these + * blocks sanely (i.e. once they've been freed and reallocated as some + * other type of metadata) we need to ensure that the LSN has a value + * that tells log recovery to ignore the LSN and overwrite the buffer + * with whatever is in it's log. To do this, we use the magic + * NULLCOMMITLSN to indicate that the LSN is invalid. + */ + rmt->rm_lsn = cpu_to_be64(NULLCOMMITLSN); + return sizeof(struct xfs_attr3_rmt_hdr); } @@ -434,14 +451,21 @@ xfs_attr_rmtval_set( /* * Allocate a single extent, up to the size of the value. + * + * Note that we have to consider this a data allocation as we + * write the remote attribute without logging the contents. + * Hence we must ensure that we aren't using blocks that are on + * the busy list so that we don't overwrite blocks which have + * recently been freed but their transactions are not yet + * committed to disk. If we overwrite the contents of a busy + * extent and then crash then the block may not contain the + * correct metadata after log recovery occurs. */ xfs_bmap_init(args->flist, args->firstblock); nmap = 1; error = xfs_bmapi_write(args->trans, dp, (xfs_fileoff_t)lblkno, - blkcnt, - XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA, - args->firstblock, args->total, &map, &nmap, - args->flist); + blkcnt, XFS_BMAPI_ATTRFORK, args->firstblock, + args->total, &map, &nmap, args->flist); if (!error) { error = xfs_bmap_finish(&args->trans, args->flist, &committed); diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index f0e8249722d4..db4acc1c3e73 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c @@ -1514,18 +1514,27 @@ xfs_filemap_fault( struct vm_area_struct *vma, struct vm_fault *vmf) { - struct xfs_inode *ip = XFS_I(file_inode(vma->vm_file)); + struct inode *inode = file_inode(vma->vm_file); int ret; - trace_xfs_filemap_fault(ip); + trace_xfs_filemap_fault(XFS_I(inode)); /* DAX can shortcut the normal fault path on write faults! */ - if ((vmf->flags & FAULT_FLAG_WRITE) && IS_DAX(VFS_I(ip))) + if ((vmf->flags & FAULT_FLAG_WRITE) && IS_DAX(inode)) return xfs_filemap_page_mkwrite(vma, vmf); - xfs_ilock(ip, XFS_MMAPLOCK_SHARED); - ret = filemap_fault(vma, vmf); - xfs_iunlock(ip, XFS_MMAPLOCK_SHARED); + xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED); + if (IS_DAX(inode)) { + /* + * we do not want to trigger unwritten extent conversion on read + * faults - that is unnecessary overhead and would also require + * changes to xfs_get_blocks_direct() to map unwritten extent + * ioend for conversion on read-only mappings. + */ + ret = __dax_fault(vma, vmf, xfs_get_blocks_direct, NULL); + } else + ret = filemap_fault(vma, vmf); + xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED); return ret; } diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index 01dd228ca05e..480ebba8464f 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -1886,9 +1886,14 @@ xlog_recover_get_buf_lsn( uuid = &((struct xfs_dir3_blk_hdr *)blk)->uuid; break; case XFS_ATTR3_RMT_MAGIC: - lsn = be64_to_cpu(((struct xfs_attr3_rmt_hdr *)blk)->rm_lsn); - uuid = &((struct xfs_attr3_rmt_hdr *)blk)->rm_uuid; - break; + /* + * Remote attr blocks are written synchronously, rather than + * being logged. That means they do not contain a valid LSN + * (i.e. transactionally ordered) in them, and hence any time we + * see a buffer to replay over the top of a remote attribute + * block we should simply do so. + */ + goto recover_immediately; case XFS_SB_MAGIC: lsn = be64_to_cpu(((struct xfs_dsb *)blk)->sb_lsn); uuid = &((struct xfs_dsb *)blk)->sb_uuid; diff --git a/include/linux/ata.h b/include/linux/ata.h index fed36418dd1c..6c78956aa470 100644 --- a/include/linux/ata.h +++ b/include/linux/ata.h @@ -45,6 +45,7 @@ enum { ATA_SECT_SIZE = 512, ATA_MAX_SECTORS_128 = 128, ATA_MAX_SECTORS = 256, + ATA_MAX_SECTORS_1024 = 1024, ATA_MAX_SECTORS_LBA48 = 65535,/* TODO: 65536? */ ATA_MAX_SECTORS_TAPE = 65535, diff --git a/include/linux/cper.h b/include/linux/cper.h index 76abba4b238e..dcacb1a72e26 100644 --- a/include/linux/cper.h +++ b/include/linux/cper.h @@ -340,7 +340,27 @@ struct cper_ia_proc_ctx { __u64 mm_reg_addr; }; -/* Memory Error Section */ +/* Old Memory Error Section UEFI 2.1, 2.2 */ +struct cper_sec_mem_err_old { + __u64 validation_bits; + __u64 error_status; + __u64 physical_addr; + __u64 physical_addr_mask; + __u16 node; + __u16 card; + __u16 module; + __u16 bank; + __u16 device; + __u16 row; + __u16 column; + __u16 bit_pos; + __u64 requestor_id; + __u64 responder_id; + __u64 target_id; + __u8 error_type; +}; + +/* Memory Error Section UEFI >= 2.3 */ struct cper_sec_mem_err { __u64 validation_bits; __u64 error_status; diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 29ad97c34fd5..bde1e567b3a9 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -62,6 +62,7 @@ struct cpufreq_policy { /* CPUs sharing clock, require sw coordination */ cpumask_var_t cpus; /* Online CPUs only */ cpumask_var_t related_cpus; /* Online + Offline CPUs */ + cpumask_var_t real_cpus; /* Related and present */ unsigned int shared_type; /* ACPI: ANY or ALL affected CPUs should set cpufreq */ diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 1da602982cf9..6cd8c0ee4b6f 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -116,6 +116,7 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops); * SAVE_REGS. If another ops with this flag set is already registered * for any of the functions that this ops will be registered for, then * this ops will fail to register or set_filter_ip. + * PID - Is affected by set_ftrace_pid (allows filtering on those pids) */ enum { FTRACE_OPS_FL_ENABLED = 1 << 0, @@ -132,6 +133,7 @@ enum { FTRACE_OPS_FL_MODIFYING = 1 << 11, FTRACE_OPS_FL_ALLOC_TRAMP = 1 << 12, FTRACE_OPS_FL_IPMODIFY = 1 << 13, + FTRACE_OPS_FL_PID = 1 << 14, }; #ifdef CONFIG_DYNAMIC_FTRACE @@ -159,6 +161,7 @@ struct ftrace_ops { struct ftrace_ops *next; unsigned long flags; void *private; + ftrace_func_t saved_func; int __percpu *disabled; #ifdef CONFIG_DYNAMIC_FTRACE int nr_trampolines; diff --git a/include/linux/libata.h b/include/linux/libata.h index 36ce37bcc963..c9cfbcdb8d14 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -431,6 +431,8 @@ enum { ATA_HORKAGE_WD_BROKEN_LPM = (1 << 21), /* some WDs have broken LPM */ ATA_HORKAGE_ZERO_AFTER_TRIM = (1 << 22),/* guarantees zero after trim */ ATA_HORKAGE_NO_NCQ_LOG = (1 << 23), /* don't use NCQ for log read */ + ATA_HORKAGE_NOTRIM = (1 << 24), /* don't use TRIM */ + ATA_HORKAGE_MAX_SEC_1024 = (1 << 25), /* Limit max sects to 1024 */ /* DMA mask for user DMA control: User visible values; DO NOT renumber */ diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index f25e2bdd188c..272f42952f34 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h @@ -178,17 +178,17 @@ typedef enum { /* Chip may not exist, so silence any errors in scan */ #define NAND_SCAN_SILENT_NODEV 0x00040000 /* - * This option could be defined by controller drivers to protect against - * kmap'ed, vmalloc'ed highmem buffers being passed from upper layers - */ -#define NAND_USE_BOUNCE_BUFFER 0x00080000 -/* * Autodetect nand buswidth with readid/onfi. * This suppose the driver will configure the hardware in 8 bits mode * when calling nand_scan_ident, and update its configuration * before calling nand_scan_tail. */ #define NAND_BUSWIDTH_AUTO 0x00080000 +/* + * This option could be defined by controller drivers to protect against + * kmap'ed, vmalloc'ed highmem buffers being passed from upper layers + */ +#define NAND_USE_BOUNCE_BUFFER 0x00100000 /* Options set by nand scan */ /* Nand scan has allocated controller struct */ diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index f91b5ade30c9..874b77228fb9 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -292,9 +292,12 @@ static inline void nfs_mark_for_revalidate(struct inode *inode) struct nfs_inode *nfsi = NFS_I(inode); spin_lock(&inode->i_lock); - nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS; + nfsi->cache_validity |= NFS_INO_INVALID_ATTR | + NFS_INO_REVAL_PAGECACHE | + NFS_INO_INVALID_ACCESS | + NFS_INO_INVALID_ACL; if (S_ISDIR(inode->i_mode)) - nfsi->cache_validity |= NFS_INO_REVAL_PAGECACHE|NFS_INO_INVALID_DATA; + nfsi->cache_validity |= NFS_INO_INVALID_DATA; spin_unlock(&inode->i_lock); } diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index a2ea1491d3df..20bc8e51b161 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h @@ -220,7 +220,7 @@ struct nfs_server { #define NFS_CAP_SYMLINKS (1U << 2) #define NFS_CAP_ACLS (1U << 3) #define NFS_CAP_ATOMIC_OPEN (1U << 4) -#define NFS_CAP_CHANGE_ATTR (1U << 5) +/* #define NFS_CAP_CHANGE_ATTR (1U << 5) */ #define NFS_CAP_FILEID (1U << 6) #define NFS_CAP_MODE (1U << 7) #define NFS_CAP_NLINK (1U << 8) diff --git a/include/linux/of_device.h b/include/linux/of_device.h index 4c508549833a..cc7dd687a89d 100644 --- a/include/linux/of_device.h +++ b/include/linux/of_device.h @@ -59,7 +59,7 @@ void of_dma_configure(struct device *dev, struct device_node *np); #else /* CONFIG_OF */ static inline int of_driver_match_device(struct device *dev, - struct device_driver *drv) + const struct device_driver *drv) { return 0; } diff --git a/include/linux/platform_data/macb.h b/include/linux/platform_data/macb.h index 044a124bfbbc..21b15f6fee25 100644 --- a/include/linux/platform_data/macb.h +++ b/include/linux/platform_data/macb.h @@ -8,11 +8,19 @@ #ifndef __MACB_PDATA_H__ #define __MACB_PDATA_H__ +/** + * struct macb_platform_data - platform data for MACB Ethernet + * @phy_mask: phy mask passed when register the MDIO bus + * within the driver + * @phy_irq_pin: PHY IRQ + * @is_rmii: using RMII interface? + * @rev_eth_addr: reverse Ethernet address byte order + */ struct macb_platform_data { u32 phy_mask; - int phy_irq_pin; /* PHY IRQ */ - u8 is_rmii; /* using RMII interface? */ - u8 rev_eth_addr; /* reverse Ethernet address byte order */ + int phy_irq_pin; + u8 is_rmii; + u8 rev_eth_addr; }; #endif /* __MACB_PDATA_H__ */ diff --git a/include/linux/platform_data/mmc-esdhc-imx.h b/include/linux/platform_data/mmc-esdhc-imx.h index 75f70f6ac137..e1571efa3f2b 100644 --- a/include/linux/platform_data/mmc-esdhc-imx.h +++ b/include/linux/platform_data/mmc-esdhc-imx.h @@ -43,7 +43,6 @@ struct esdhc_platform_data { enum wp_types wp_type; enum cd_types cd_type; int max_bus_width; - unsigned int f_max; bool support_vsel; unsigned int delay_line; }; diff --git a/include/net/act_api.h b/include/net/act_api.h index 8d2a707a9e87..4519c81304bd 100644 --- a/include/net/act_api.h +++ b/include/net/act_api.h @@ -112,7 +112,6 @@ struct tc_action_ops { int tcf_hash_search(struct tc_action *a, u32 index); void tcf_hash_destroy(struct tc_action *a); -int tcf_hash_release(struct tc_action *a, int bind); u32 tcf_hash_new_index(struct tcf_hashinfo *hinfo); int tcf_hash_check(u32 index, struct tc_action *a, int bind); int tcf_hash_create(u32 index, struct nlattr *est, struct tc_action *a, @@ -120,6 +119,13 @@ int tcf_hash_create(u32 index, struct nlattr *est, struct tc_action *a, void tcf_hash_cleanup(struct tc_action *a, struct nlattr *est); void tcf_hash_insert(struct tc_action *a); +int __tcf_hash_release(struct tc_action *a, bool bind, bool strict); + +static inline int tcf_hash_release(struct tc_action *a, bool bind) +{ + return __tcf_hash_release(a, bind, false); +} + int tcf_register_action(struct tc_action_ops *a, unsigned int mask); int tcf_unregister_action(struct tc_action_ops *a); int tcf_action_destroy(struct list_head *actions, int bind); diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h index e1300b3dd597..53eead2da743 100644 --- a/include/net/inet_frag.h +++ b/include/net/inet_frag.h @@ -21,13 +21,11 @@ struct netns_frags { * @INET_FRAG_FIRST_IN: first fragment has arrived * @INET_FRAG_LAST_IN: final fragment has arrived * @INET_FRAG_COMPLETE: frag queue has been processed and is due for destruction - * @INET_FRAG_EVICTED: frag queue is being evicted */ enum { INET_FRAG_FIRST_IN = BIT(0), INET_FRAG_LAST_IN = BIT(1), INET_FRAG_COMPLETE = BIT(2), - INET_FRAG_EVICTED = BIT(3) }; /** @@ -45,6 +43,7 @@ enum { * @flags: fragment queue flags * @max_size: maximum received fragment size * @net: namespace that this frag belongs to + * @list_evictor: list of queues to forcefully evict (e.g. due to low memory) */ struct inet_frag_queue { spinlock_t lock; @@ -59,6 +58,7 @@ struct inet_frag_queue { __u8 flags; u16 max_size; struct netns_frags *net; + struct hlist_node list_evictor; }; #define INETFRAGS_HASHSZ 1024 @@ -125,6 +125,11 @@ static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f inet_frag_destroy(q, f); } +static inline bool inet_frag_evicting(struct inet_frag_queue *q) +{ + return !hlist_unhashed(&q->list_evictor); +} + /* Memory Tracking Functions. */ /* The default percpu_counter batch size is not big enough to scale to @@ -139,14 +144,14 @@ static inline int frag_mem_limit(struct netns_frags *nf) return percpu_counter_read(&nf->mem); } -static inline void sub_frag_mem_limit(struct inet_frag_queue *q, int i) +static inline void sub_frag_mem_limit(struct netns_frags *nf, int i) { - __percpu_counter_add(&q->net->mem, -i, frag_percpu_counter_batch); + __percpu_counter_add(&nf->mem, -i, frag_percpu_counter_batch); } -static inline void add_frag_mem_limit(struct inet_frag_queue *q, int i) +static inline void add_frag_mem_limit(struct netns_frags *nf, int i) { - __percpu_counter_add(&q->net->mem, i, frag_percpu_counter_batch); + __percpu_counter_add(&nf->mem, i, frag_percpu_counter_batch); } static inline void init_frag_mem_limit(struct netns_frags *nf) diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h index 5e0196084f1e..a37d0432bebd 100644 --- a/include/net/ip_fib.h +++ b/include/net/ip_fib.h @@ -186,7 +186,6 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh); struct fib_table { struct hlist_node tb_hlist; u32 tb_id; - int tb_default; int tb_num_default; struct rcu_head rcu; unsigned long *tb_data; @@ -293,7 +292,7 @@ __be32 fib_compute_spec_dst(struct sk_buff *skb); int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst, u8 tos, int oif, struct net_device *dev, struct in_device *idev, u32 *itag); -void fib_select_default(struct fib_result *res); +void fib_select_default(const struct flowi4 *flp, struct fib_result *res); #ifdef CONFIG_IP_ROUTE_CLASSID static inline int fib_num_tclassid_users(struct net *net) { diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h index 095433b8a8b0..37cd3911d5c5 100644 --- a/include/net/netfilter/nf_conntrack.h +++ b/include/net/netfilter/nf_conntrack.h @@ -291,7 +291,7 @@ extern unsigned int nf_conntrack_max; extern unsigned int nf_conntrack_hash_rnd; void init_nf_conntrack_hash_rnd(void); -void nf_conntrack_tmpl_insert(struct net *net, struct nf_conn *tmpl); +struct nf_conn *nf_ct_tmpl_alloc(struct net *net, u16 zone, gfp_t flags); #define NF_CT_STAT_INC(net, count) __this_cpu_inc((net)->ct.stat->count) #define NF_CT_STAT_INC_ATOMIC(net, count) this_cpu_inc((net)->ct.stat->count) diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h index 29d6a94db54d..723b61c82b3f 100644 --- a/include/net/netns/conntrack.h +++ b/include/net/netns/conntrack.h @@ -68,7 +68,6 @@ struct ct_pcpu { spinlock_t lock; struct hlist_nulls_head unconfirmed; struct hlist_nulls_head dying; - struct hlist_nulls_head tmpl; }; struct netns_ct { diff --git a/include/net/sock.h b/include/net/sock.h index 24aa75c5317a..43c6abcf06ab 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -904,7 +904,7 @@ void sk_stream_kill_queues(struct sock *sk); void sk_set_memalloc(struct sock *sk); void sk_clear_memalloc(struct sock *sk); -int sk_wait_data(struct sock *sk, long *timeo); +int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb); struct request_sock_ops; struct timewait_sock_ops; diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h index 34117b8b72e4..0aedbb2c10e0 100644 --- a/include/target/iscsi/iscsi_target_core.h +++ b/include/target/iscsi/iscsi_target_core.h @@ -595,6 +595,7 @@ struct iscsi_conn { int bitmap_id; int rx_thread_active; struct task_struct *rx_thread; + struct completion rx_login_comp; int tx_thread_active; struct task_struct *tx_thread; /* list_head for session connection list */ diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h index b6fce900a833..fbdd11851725 100644 --- a/include/uapi/drm/amdgpu_drm.h +++ b/include/uapi/drm/amdgpu_drm.h @@ -32,7 +32,7 @@ #ifndef __AMDGPU_DRM_H__ #define __AMDGPU_DRM_H__ -#include <drm/drm.h> +#include "drm.h" #define DRM_AMDGPU_GEM_CREATE 0x00 #define DRM_AMDGPU_GEM_MMAP 0x01 @@ -614,6 +614,8 @@ struct drm_amdgpu_info_device { uint32_t vram_type; /** video memory bit width*/ uint32_t vram_bit_width; + /* vce harvesting instance */ + uint32_t vce_harvest_config; }; struct drm_amdgpu_info_hw_ip { diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h index 6e1a2ed116cb..db809b722985 100644 --- a/include/uapi/drm/i915_drm.h +++ b/include/uapi/drm/i915_drm.h @@ -1070,6 +1070,14 @@ struct drm_i915_reg_read { __u64 offset; __u64 val; /* Return value */ }; +/* Known registers: + * + * Render engine timestamp - 0x2358 + 64bit - gen7+ + * - Note this register returns an invalid value if using the default + * single instruction 8byte read, in order to workaround that use + * offset (0x2538 | 1) instead. + * + */ struct drm_i915_reset_stats { __u32 ctx_id; diff --git a/include/uapi/drm/radeon_drm.h b/include/uapi/drm/radeon_drm.h index 1ef76661e1a1..01aa2a8e3f8d 100644 --- a/include/uapi/drm/radeon_drm.h +++ b/include/uapi/drm/radeon_drm.h @@ -33,7 +33,7 @@ #ifndef __RADEON_DRM_H__ #define __RADEON_DRM_H__ -#include <drm/drm.h> +#include "drm.h" /* WARNING: If you change any of these defines, make sure to change the * defines in the X server file (radeon_sarea.h) diff --git a/include/uapi/linux/virtio_net.h b/include/uapi/linux/virtio_net.h index 7bbee79ca293..ec32293a00db 100644 --- a/include/uapi/linux/virtio_net.h +++ b/include/uapi/linux/virtio_net.h @@ -34,6 +34,7 @@ /* The feature bitmap for virtio net */ #define VIRTIO_NET_F_CSUM 0 /* Host handles pkts w/ partial csum */ #define VIRTIO_NET_F_GUEST_CSUM 1 /* Guest handles pkts w/ partial csum */ +#define VIRTIO_NET_F_CTRL_GUEST_OFFLOADS 2 /* Dynamic offload configuration. */ #define VIRTIO_NET_F_MAC 5 /* Host has given MAC address. */ #define VIRTIO_NET_F_GUEST_TSO4 7 /* Guest can handle TSOv4 in. */ #define VIRTIO_NET_F_GUEST_TSO6 8 /* Guest can handle TSOv6 in. */ @@ -226,4 +227,19 @@ struct virtio_net_ctrl_mq { #define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN 1 #define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX 0x8000 +/* + * Control network offloads + * + * Reconfigures the network offloads that Guest can handle. + * + * Available with the VIRTIO_NET_F_CTRL_GUEST_OFFLOADS feature bit. + * + * Command data format matches the feature bit mask exactly. + * + * See VIRTIO_NET_F_GUEST_* for the list of offloads + * that can be enabled/disabled. + */ +#define VIRTIO_NET_CTRL_GUEST_OFFLOADS 5 +#define VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET 0 + #endif /* _LINUX_VIRTIO_NET_H */ diff --git a/include/uapi/linux/virtio_pci.h b/include/uapi/linux/virtio_pci.h index 75301468359f..90007a1abcab 100644 --- a/include/uapi/linux/virtio_pci.h +++ b/include/uapi/linux/virtio_pci.h @@ -157,6 +157,12 @@ struct virtio_pci_common_cfg { __le32 queue_used_hi; /* read-write */ }; +/* Fields in VIRTIO_PCI_CAP_PCI_CFG: */ +struct virtio_pci_cfg_cap { + struct virtio_pci_cap cap; + __u8 pci_cfg_data[4]; /* Data for BAR access. */ +}; + /* Macro versions of offsets for the Old Timers! */ #define VIRTIO_PCI_CAP_VNDR 0 #define VIRTIO_PCI_CAP_NEXT 1 diff --git a/include/uapi/linux/virtio_ring.h b/include/uapi/linux/virtio_ring.h index 915980ac68df..c07295969b7e 100644 --- a/include/uapi/linux/virtio_ring.h +++ b/include/uapi/linux/virtio_ring.h @@ -31,6 +31,9 @@ * SUCH DAMAGE. * * Copyright Rusty Russell IBM Corporation 2007. */ +#ifndef __KERNEL__ +#include <stdint.h> +#endif #include <linux/types.h> #include <linux/virtio_types.h> @@ -143,7 +146,7 @@ static inline void vring_init(struct vring *vr, unsigned int num, void *p, vr->num = num; vr->desc = p; vr->avail = p + num*sizeof(struct vring_desc); - vr->used = (void *)(((unsigned long)&vr->avail->ring[num] + sizeof(__virtio16) + vr->used = (void *)(((uintptr_t)&vr->avail->ring[num] + sizeof(__virtio16) + align-1) & ~(align - 1)); } diff --git a/include/uapi/sound/asoc.h b/include/uapi/sound/asoc.h index 12215205ab8d..785c5ca0994b 100644 --- a/include/uapi/sound/asoc.h +++ b/include/uapi/sound/asoc.h @@ -110,7 +110,7 @@ /* * Block Header. - * This header preceeds all object and object arrays below. + * This header precedes all object and object arrays below. */ struct snd_soc_tplg_hdr { __le32 magic; /* magic number */ @@ -222,7 +222,7 @@ struct snd_soc_tplg_stream_config { /* * Manifest. List totals for each payload type. Not used in parsing, but will * be passed to the component driver before any other objects in order for any - * global componnent resource allocations. + * global component resource allocations. * * File block representation for manifest :- * +-----------------------------------+----+ diff --git a/kernel/resource.c b/kernel/resource.c index 90552aab5f2d..fed052a1bc9f 100644 --- a/kernel/resource.c +++ b/kernel/resource.c @@ -504,13 +504,13 @@ int region_is_ram(resource_size_t start, unsigned long size) { struct resource *p; resource_size_t end = start + size - 1; - int flags = IORESOURCE_MEM | IORESOURCE_BUSY; + unsigned long flags = IORESOURCE_MEM | IORESOURCE_BUSY; const char *name = "System RAM"; int ret = -1; read_lock(&resource_lock); for (p = iomem_resource.child; p ; p = p->sibling) { - if (end < p->start) + if (p->end < start) continue; if (p->start <= start && end <= p->end) { @@ -521,7 +521,7 @@ int region_is_ram(resource_size_t start, unsigned long size) ret = 1; break; } - if (p->end < start) + if (end < p->start) break; /* not found */ } read_unlock(&resource_lock); diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 02bece4a99ea..eb11011b5292 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -98,6 +98,13 @@ struct ftrace_pid { struct pid *pid; }; +static bool ftrace_pids_enabled(void) +{ + return !list_empty(&ftrace_pids); +} + +static void ftrace_update_trampoline(struct ftrace_ops *ops); + /* * ftrace_disabled is set when an anomaly is discovered. * ftrace_disabled is much stronger than ftrace_enabled. @@ -109,7 +116,6 @@ static DEFINE_MUTEX(ftrace_lock); static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end; static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end; ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; -ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub; static struct ftrace_ops global_ops; static struct ftrace_ops control_ops; @@ -183,14 +189,7 @@ static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip, if (!test_tsk_trace_trace(current)) return; - ftrace_pid_function(ip, parent_ip, op, regs); -} - -static void set_ftrace_pid_function(ftrace_func_t func) -{ - /* do not set ftrace_pid_function to itself! */ - if (func != ftrace_pid_func) - ftrace_pid_function = func; + op->saved_func(ip, parent_ip, op, regs); } /** @@ -202,7 +201,6 @@ static void set_ftrace_pid_function(ftrace_func_t func) void clear_ftrace_function(void) { ftrace_trace_function = ftrace_stub; - ftrace_pid_function = ftrace_stub; } static void control_ops_disable_all(struct ftrace_ops *ops) @@ -436,6 +434,12 @@ static int __register_ftrace_function(struct ftrace_ops *ops) } else add_ftrace_ops(&ftrace_ops_list, ops); + /* Always save the function, and reset at unregistering */ + ops->saved_func = ops->func; + + if (ops->flags & FTRACE_OPS_FL_PID && ftrace_pids_enabled()) + ops->func = ftrace_pid_func; + ftrace_update_trampoline(ops); if (ftrace_enabled) @@ -463,15 +467,28 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops) if (ftrace_enabled) update_ftrace_function(); + ops->func = ops->saved_func; + return 0; } static void ftrace_update_pid_func(void) { + bool enabled = ftrace_pids_enabled(); + struct ftrace_ops *op; + /* Only do something if we are tracing something */ if (ftrace_trace_function == ftrace_stub) return; + do_for_each_ftrace_op(op, ftrace_ops_list) { + if (op->flags & FTRACE_OPS_FL_PID) { + op->func = enabled ? ftrace_pid_func : + op->saved_func; + ftrace_update_trampoline(op); + } + } while_for_each_ftrace_op(op); + update_ftrace_function(); } @@ -1133,7 +1150,8 @@ static struct ftrace_ops global_ops = { .local_hash.filter_hash = EMPTY_HASH, INIT_OPS_HASH(global_ops) .flags = FTRACE_OPS_FL_RECURSION_SAFE | - FTRACE_OPS_FL_INITIALIZED, + FTRACE_OPS_FL_INITIALIZED | + FTRACE_OPS_FL_PID, }; /* @@ -5023,7 +5041,9 @@ static void ftrace_update_trampoline(struct ftrace_ops *ops) static struct ftrace_ops global_ops = { .func = ftrace_stub, - .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED, + .flags = FTRACE_OPS_FL_RECURSION_SAFE | + FTRACE_OPS_FL_INITIALIZED | + FTRACE_OPS_FL_PID, }; static int __init ftrace_nodyn_init(void) @@ -5080,11 +5100,6 @@ void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func) if (WARN_ON(tr->ops->func != ftrace_stub)) printk("ftrace ops had %pS for function\n", tr->ops->func); - /* Only the top level instance does pid tracing */ - if (!list_empty(&ftrace_pids)) { - set_ftrace_pid_function(func); - func = ftrace_pid_func; - } } tr->ops->func = func; tr->ops->private = tr; @@ -5371,7 +5386,7 @@ static void *fpid_start(struct seq_file *m, loff_t *pos) { mutex_lock(&ftrace_lock); - if (list_empty(&ftrace_pids) && (!*pos)) + if (!ftrace_pids_enabled() && (!*pos)) return (void *) 1; return seq_list_start(&ftrace_pids, *pos); @@ -5610,6 +5625,7 @@ static struct ftrace_ops graph_ops = { .func = ftrace_stub, .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED | + FTRACE_OPS_FL_PID | FTRACE_OPS_FL_STUB, #ifdef FTRACE_GRAPH_TRAMP_ADDR .trampoline = FTRACE_GRAPH_TRAMP_ADDR, diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c index 9dd49ca67dbc..6e70ddb158b4 100644 --- a/net/9p/trans_virtio.c +++ b/net/9p/trans_virtio.c @@ -704,6 +704,7 @@ static void p9_virtio_remove(struct virtio_device *vdev) mutex_unlock(&virtio_9p_lock); + vdev->config->reset(vdev); vdev->config->del_vqs(vdev); sysfs_remove_file(&(vdev->dev.kobj), &dev_attr_mount_tag.attr); diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c index 3d0f7d2a0616..ad82324f710f 100644 --- a/net/bluetooth/smp.c +++ b/net/bluetooth/smp.c @@ -2312,6 +2312,10 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level) return 1; chan = conn->smp; + if (!chan) { + BT_ERR("SMP security requested but not available"); + return 1; + } if (!hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED)) return 1; diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c index 0ff6e1bbca91..fa7bfced888e 100644 --- a/net/bridge/br_forward.c +++ b/net/bridge/br_forward.c @@ -37,15 +37,30 @@ static inline int should_deliver(const struct net_bridge_port *p, int br_dev_queue_push_xmit(struct sock *sk, struct sk_buff *skb) { - if (!is_skb_forwardable(skb->dev, skb)) { - kfree_skb(skb); - } else { - skb_push(skb, ETH_HLEN); - br_drop_fake_rtable(skb); - skb_sender_cpu_clear(skb); - dev_queue_xmit(skb); + if (!is_skb_forwardable(skb->dev, skb)) + goto drop; + + skb_push(skb, ETH_HLEN); + br_drop_fake_rtable(skb); + skb_sender_cpu_clear(skb); + + if (skb->ip_summed == CHECKSUM_PARTIAL && + (skb->protocol == htons(ETH_P_8021Q) || + skb->protocol == htons(ETH_P_8021AD))) { + int depth; + + if (!__vlan_get_protocol(skb, skb->protocol, &depth)) + goto drop; + + skb_set_network_header(skb, depth); } + dev_queue_xmit(skb); + + return 0; + +drop: + kfree_skb(skb); return 0; } EXPORT_SYMBOL_GPL(br_dev_queue_push_xmit); diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c index 5e9d1c5e1194..6a592856da1c 100644 --- a/net/bridge/br_mdb.c +++ b/net/bridge/br_mdb.c @@ -519,6 +519,7 @@ static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry) if (p->port->state == BR_STATE_DISABLED) goto unlock; + entry->state = p->state; rcu_assign_pointer(*pp, p->next); hlist_del_init(&p->mglist); del_timer(&p->timer); diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index fd238587e032..0752796fe0ba 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c @@ -1432,8 +1432,7 @@ br_multicast_leave_group(struct net_bridge *br, spin_lock(&br->multicast_lock); if (!netif_running(br->dev) || - (port && port->state == BR_STATE_DISABLED) || - timer_pending(&other_query->timer)) + (port && port->state == BR_STATE_DISABLED)) goto out; mdb = mlock_dereference(br->mdb, br); @@ -1441,6 +1440,32 @@ br_multicast_leave_group(struct net_bridge *br, if (!mp) goto out; + if (port && (port->flags & BR_MULTICAST_FAST_LEAVE)) { + struct net_bridge_port_group __rcu **pp; + + for (pp = &mp->ports; + (p = mlock_dereference(*pp, br)) != NULL; + pp = &p->next) { + if (p->port != port) + continue; + + rcu_assign_pointer(*pp, p->next); + hlist_del_init(&p->mglist); + del_timer(&p->timer); + call_rcu_bh(&p->rcu, br_multicast_free_pg); + br_mdb_notify(br->dev, port, group, RTM_DELMDB, + p->state); + + if (!mp->ports && !mp->mglist && + netif_running(br->dev)) + mod_timer(&mp->timer, jiffies); + } + goto out; + } + + if (timer_pending(&other_query->timer)) + goto out; + if (br->multicast_querier) { __br_multicast_send_query(br, port, &mp->addr); @@ -1466,29 +1491,6 @@ br_multicast_leave_group(struct net_bridge *br, } } - if (port && (port->flags & BR_MULTICAST_FAST_LEAVE)) { - struct net_bridge_port_group __rcu **pp; - - for (pp = &mp->ports; - (p = mlock_dereference(*pp, br)) != NULL; - pp = &p->next) { - if (p->port != port) - continue; - - rcu_assign_pointer(*pp, p->next); - hlist_del_init(&p->mglist); - del_timer(&p->timer); - br_mdb_notify(br->dev, port, group, RTM_DELMDB, - p->state); - call_rcu_bh(&p->rcu, br_multicast_free_pg); - - if (!mp->ports && !mp->mglist && - netif_running(br->dev)) - mod_timer(&mp->timer, jiffies); - } - goto out; - } - now = jiffies; time = now + br->multicast_last_member_count * br->multicast_last_member_interval; diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index 793d247ac2ca..91a2e08c2bb8 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c @@ -691,9 +691,17 @@ static int br_port_slave_changelink(struct net_device *brdev, struct nlattr *tb[], struct nlattr *data[]) { + struct net_bridge *br = netdev_priv(brdev); + int ret; + if (!data) return 0; - return br_setport(br_port_get_rtnl(dev), data); + + spin_lock_bh(&br->lock); + ret = br_setport(br_port_get_rtnl(dev), data); + spin_unlock_bh(&br->lock); + + return ret; } static int br_port_fill_slave_info(struct sk_buff *skb, diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c index b4b6dab9c285..ed74ffaa851f 100644 --- a/net/bridge/br_stp.c +++ b/net/bridge/br_stp.c @@ -209,8 +209,9 @@ void br_transmit_config(struct net_bridge_port *p) br_send_config_bpdu(p, &bpdu); p->topology_change_ack = 0; p->config_pending = 0; - mod_timer(&p->hold_timer, - round_jiffies(jiffies + BR_HOLD_TIME)); + if (p->br->stp_enabled == BR_KERNEL_STP) + mod_timer(&p->hold_timer, + round_jiffies(jiffies + BR_HOLD_TIME)); } } diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c index a2730e7196cd..4ca449a16132 100644 --- a/net/bridge/br_stp_if.c +++ b/net/bridge/br_stp_if.c @@ -48,7 +48,8 @@ void br_stp_enable_bridge(struct net_bridge *br) struct net_bridge_port *p; spin_lock_bh(&br->lock); - mod_timer(&br->hello_timer, jiffies + br->hello_time); + if (br->stp_enabled == BR_KERNEL_STP) + mod_timer(&br->hello_timer, jiffies + br->hello_time); mod_timer(&br->gc_timer, jiffies + HZ/10); br_config_bpdu_generation(br); @@ -127,6 +128,7 @@ static void br_stp_start(struct net_bridge *br) int r; char *argv[] = { BR_STP_PROG, br->dev->name, "start", NULL }; char *envp[] = { NULL }; + struct net_bridge_port *p; r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC); @@ -140,6 +142,10 @@ static void br_stp_start(struct net_bridge *br) if (r == 0) { br->stp_enabled = BR_USER_STP; br_debug(br, "userspace STP started\n"); + /* Stop hello and hold timers */ + del_timer(&br->hello_timer); + list_for_each_entry(p, &br->port_list, list) + del_timer(&p->hold_timer); } else { br->stp_enabled = BR_KERNEL_STP; br_debug(br, "using kernel STP\n"); @@ -156,12 +162,17 @@ static void br_stp_stop(struct net_bridge *br) int r; char *argv[] = { BR_STP_PROG, br->dev->name, "stop", NULL }; char *envp[] = { NULL }; + struct net_bridge_port *p; if (br->stp_enabled == BR_USER_STP) { r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC); br_info(br, "userspace STP stopped, return code %d\n", r); /* To start timers on any ports left in blocking */ + mod_timer(&br->hello_timer, jiffies + br->hello_time); + list_for_each_entry(p, &br->port_list, list) + mod_timer(&p->hold_timer, + round_jiffies(jiffies + BR_HOLD_TIME)); spin_lock_bh(&br->lock); br_port_state_selection(br); spin_unlock_bh(&br->lock); diff --git a/net/bridge/br_stp_timer.c b/net/bridge/br_stp_timer.c index 7caf7fae2d5b..5f0f5af0ec35 100644 --- a/net/bridge/br_stp_timer.c +++ b/net/bridge/br_stp_timer.c @@ -40,7 +40,9 @@ static void br_hello_timer_expired(unsigned long arg) if (br->dev->flags & IFF_UP) { br_config_bpdu_generation(br); - mod_timer(&br->hello_timer, round_jiffies(jiffies + br->hello_time)); + if (br->stp_enabled != BR_USER_STP) + mod_timer(&br->hello_timer, + round_jiffies(jiffies + br->hello_time)); } spin_unlock(&br->lock); } diff --git a/net/core/netclassid_cgroup.c b/net/core/netclassid_cgroup.c index 1f2a126f4ffa..6441f47b1a8f 100644 --- a/net/core/netclassid_cgroup.c +++ b/net/core/netclassid_cgroup.c @@ -23,7 +23,8 @@ static inline struct cgroup_cls_state *css_cls_state(struct cgroup_subsys_state struct cgroup_cls_state *task_cls_state(struct task_struct *p) { - return css_cls_state(task_css(p, net_cls_cgrp_id)); + return css_cls_state(task_css_check(p, net_cls_cgrp_id, + rcu_read_lock_bh_held())); } EXPORT_SYMBOL_GPL(task_cls_state); diff --git a/net/core/sock.c b/net/core/sock.c index 08f16db46070..193901d09757 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -1497,7 +1497,8 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) sock_copy(newsk, sk); /* SANITY */ - get_net(sock_net(newsk)); + if (likely(newsk->sk_net_refcnt)) + get_net(sock_net(newsk)); sk_node_init(&newsk->sk_node); sock_lock_init(newsk); bh_lock_sock(newsk); @@ -1967,20 +1968,21 @@ static void __release_sock(struct sock *sk) * sk_wait_data - wait for data to arrive at sk_receive_queue * @sk: sock to wait on * @timeo: for how long + * @skb: last skb seen on sk_receive_queue * * Now socket state including sk->sk_err is changed only under lock, * hence we may omit checks after joining wait queue. * We check receive queue before schedule() only as optimization; * it is very likely that release_sock() added new data. */ -int sk_wait_data(struct sock *sk, long *timeo) +int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb) { int rc; DEFINE_WAIT(wait); prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); - rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue)); + rc = sk_wait_event(sk, timeo, skb_peek_tail(&sk->sk_receive_queue) != skb); clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); finish_wait(sk_sleep(sk), &wait); return rc; diff --git a/net/dccp/proto.c b/net/dccp/proto.c index 52a94016526d..b5cf13a28009 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c @@ -886,7 +886,7 @@ verify_sock_status: break; } - sk_wait_data(sk, &timeo); + sk_wait_data(sk, &timeo, NULL); continue; found_ok_skb: if (len > skb->len) diff --git a/net/ieee802154/6lowpan/reassembly.c b/net/ieee802154/6lowpan/reassembly.c index f46e4d1306f2..214d44aef35b 100644 --- a/net/ieee802154/6lowpan/reassembly.c +++ b/net/ieee802154/6lowpan/reassembly.c @@ -207,7 +207,7 @@ found: } else { fq->q.meat += skb->len; } - add_frag_mem_limit(&fq->q, skb->truesize); + add_frag_mem_limit(fq->q.net, skb->truesize); if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) && fq->q.meat == fq->q.len) { @@ -287,7 +287,7 @@ static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *prev, clone->data_len = clone->len; head->data_len -= clone->len; head->len -= clone->len; - add_frag_mem_limit(&fq->q, clone->truesize); + add_frag_mem_limit(fq->q.net, clone->truesize); } WARN_ON(head == NULL); @@ -310,7 +310,7 @@ static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *prev, } fp = next; } - sub_frag_mem_limit(&fq->q, sum_truesize); + sub_frag_mem_limit(fq->q.net, sum_truesize); head->next = NULL; head->dev = dev; diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c index 1d59e50ce8b7..34a308573f4b 100644 --- a/net/ipv4/arp.c +++ b/net/ipv4/arp.c @@ -1026,14 +1026,16 @@ static int arp_req_get(struct arpreq *r, struct net_device *dev) neigh = neigh_lookup(&arp_tbl, &ip, dev); if (neigh) { - read_lock_bh(&neigh->lock); - memcpy(r->arp_ha.sa_data, neigh->ha, dev->addr_len); - r->arp_flags = arp_state_to_flags(neigh); - read_unlock_bh(&neigh->lock); - r->arp_ha.sa_family = dev->type; - strlcpy(r->arp_dev, dev->name, sizeof(r->arp_dev)); + if (!(neigh->nud_state & NUD_NOARP)) { + read_lock_bh(&neigh->lock); + memcpy(r->arp_ha.sa_data, neigh->ha, dev->addr_len); + r->arp_flags = arp_state_to_flags(neigh); + read_unlock_bh(&neigh->lock); + r->arp_ha.sa_family = dev->type; + strlcpy(r->arp_dev, dev->name, sizeof(r->arp_dev)); + err = 0; + } neigh_release(neigh); - err = 0; } return err; } diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index e813196c91c7..2d9cb1748f81 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c @@ -882,7 +882,6 @@ static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh) queue_delayed_work(system_power_efficient_wq, &check_lifetime_work, 0); rtmsg_ifa(RTM_NEWADDR, ifa, nlh, NETLINK_CB(skb).portid); - blocking_notifier_call_chain(&inetaddr_chain, NETDEV_UP, ifa); } return 0; } diff --git a/net/ipv4/fib_lookup.h b/net/ipv4/fib_lookup.h index c6211ed60b03..9c02920725db 100644 --- a/net/ipv4/fib_lookup.h +++ b/net/ipv4/fib_lookup.h @@ -13,6 +13,7 @@ struct fib_alias { u8 fa_state; u8 fa_slen; u32 tb_id; + s16 fa_default; struct rcu_head rcu; }; diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index 65e00399a9a6..558e196bae0f 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c @@ -1297,23 +1297,40 @@ int fib_sync_down_dev(struct net_device *dev, unsigned long event) } /* Must be invoked inside of an RCU protected region. */ -void fib_select_default(struct fib_result *res) +void fib_select_default(const struct flowi4 *flp, struct fib_result *res) { struct fib_info *fi = NULL, *last_resort = NULL; struct hlist_head *fa_head = res->fa_head; struct fib_table *tb = res->table; + u8 slen = 32 - res->prefixlen; int order = -1, last_idx = -1; - struct fib_alias *fa; + struct fib_alias *fa, *fa1 = NULL; + u32 last_prio = res->fi->fib_priority; + u8 last_tos = 0; hlist_for_each_entry_rcu(fa, fa_head, fa_list) { struct fib_info *next_fi = fa->fa_info; + if (fa->fa_slen != slen) + continue; + if (fa->fa_tos && fa->fa_tos != flp->flowi4_tos) + continue; + if (fa->tb_id != tb->tb_id) + continue; + if (next_fi->fib_priority > last_prio && + fa->fa_tos == last_tos) { + if (last_tos) + continue; + break; + } + if (next_fi->fib_flags & RTNH_F_DEAD) + continue; + last_tos = fa->fa_tos; + last_prio = next_fi->fib_priority; + if (next_fi->fib_scope != res->scope || fa->fa_type != RTN_UNICAST) continue; - - if (next_fi->fib_priority > res->fi->fib_priority) - break; if (!next_fi->fib_nh[0].nh_gw || next_fi->fib_nh[0].nh_scope != RT_SCOPE_LINK) continue; @@ -1323,10 +1340,11 @@ void fib_select_default(struct fib_result *res) if (!fi) { if (next_fi != res->fi) break; + fa1 = fa; } else if (!fib_detect_death(fi, order, &last_resort, - &last_idx, tb->tb_default)) { + &last_idx, fa1->fa_default)) { fib_result_assign(res, fi); - tb->tb_default = order; + fa1->fa_default = order; goto out; } fi = next_fi; @@ -1334,20 +1352,21 @@ void fib_select_default(struct fib_result *res) } if (order <= 0 || !fi) { - tb->tb_default = -1; + if (fa1) + fa1->fa_default = -1; goto out; } if (!fib_detect_death(fi, order, &last_resort, &last_idx, - tb->tb_default)) { + fa1->fa_default)) { fib_result_assign(res, fi); - tb->tb_default = order; + fa1->fa_default = order; goto out; } if (last_idx >= 0) fib_result_assign(res, last_resort); - tb->tb_default = last_idx; + fa1->fa_default = last_idx; out: return; } diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index 15d32612e3c6..37c4bb89a708 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c @@ -1171,6 +1171,7 @@ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg) new_fa->fa_state = state & ~FA_S_ACCESSED; new_fa->fa_slen = fa->fa_slen; new_fa->tb_id = tb->tb_id; + new_fa->fa_default = -1; err = switchdev_fib_ipv4_add(key, plen, fi, new_fa->fa_tos, @@ -1222,6 +1223,7 @@ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg) new_fa->fa_state = 0; new_fa->fa_slen = slen; new_fa->tb_id = tb->tb_id; + new_fa->fa_default = -1; /* (Optionally) offload fib entry to switch hardware. */ err = switchdev_fib_ipv4_add(key, plen, fi, tos, cfg->fc_type, @@ -1791,8 +1793,6 @@ void fib_table_flush_external(struct fib_table *tb) if (hlist_empty(&n->leaf)) { put_child_root(pn, n->key, NULL); node_free(n); - } else { - leaf_pull_suffix(pn, n); } } } @@ -1862,8 +1862,6 @@ int fib_table_flush(struct fib_table *tb) if (hlist_empty(&n->leaf)) { put_child_root(pn, n->key, NULL); node_free(n); - } else { - leaf_pull_suffix(pn, n); } } @@ -1990,7 +1988,6 @@ struct fib_table *fib_trie_table(u32 id, struct fib_table *alias) return NULL; tb->tb_id = id; - tb->tb_default = -1; tb->tb_num_default = 0; tb->tb_data = (alias ? alias->__data : tb->__data); diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c index 5e346a082e5f..d0a7c0319e3d 100644 --- a/net/ipv4/inet_fragment.c +++ b/net/ipv4/inet_fragment.c @@ -131,34 +131,22 @@ inet_evict_bucket(struct inet_frags *f, struct inet_frag_bucket *hb) unsigned int evicted = 0; HLIST_HEAD(expired); -evict_again: spin_lock(&hb->chain_lock); hlist_for_each_entry_safe(fq, n, &hb->chain, list) { if (!inet_fragq_should_evict(fq)) continue; - if (!del_timer(&fq->timer)) { - /* q expiring right now thus increment its refcount so - * it won't be freed under us and wait until the timer - * has finished executing then destroy it - */ - atomic_inc(&fq->refcnt); - spin_unlock(&hb->chain_lock); - del_timer_sync(&fq->timer); - inet_frag_put(fq, f); - goto evict_again; - } + if (!del_timer(&fq->timer)) + continue; - fq->flags |= INET_FRAG_EVICTED; - hlist_del(&fq->list); - hlist_add_head(&fq->list, &expired); + hlist_add_head(&fq->list_evictor, &expired); ++evicted; } spin_unlock(&hb->chain_lock); - hlist_for_each_entry_safe(fq, n, &expired, list) + hlist_for_each_entry_safe(fq, n, &expired, list_evictor) f->frag_expire((unsigned long) fq); return evicted; @@ -240,18 +228,20 @@ void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f) int i; nf->low_thresh = 0; - local_bh_disable(); evict_again: + local_bh_disable(); seq = read_seqbegin(&f->rnd_seqlock); for (i = 0; i < INETFRAGS_HASHSZ ; i++) inet_evict_bucket(f, &f->hash[i]); - if (read_seqretry(&f->rnd_seqlock, seq)) - goto evict_again; - local_bh_enable(); + cond_resched(); + + if (read_seqretry(&f->rnd_seqlock, seq) || + percpu_counter_sum(&nf->mem)) + goto evict_again; percpu_counter_destroy(&nf->mem); } @@ -284,8 +274,8 @@ static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f) struct inet_frag_bucket *hb; hb = get_frag_bucket_locked(fq, f); - if (!(fq->flags & INET_FRAG_EVICTED)) - hlist_del(&fq->list); + hlist_del(&fq->list); + fq->flags |= INET_FRAG_COMPLETE; spin_unlock(&hb->chain_lock); } @@ -297,7 +287,6 @@ void inet_frag_kill(struct inet_frag_queue *fq, struct inet_frags *f) if (!(fq->flags & INET_FRAG_COMPLETE)) { fq_unlink(fq, f); atomic_dec(&fq->refcnt); - fq->flags |= INET_FRAG_COMPLETE; } } EXPORT_SYMBOL(inet_frag_kill); @@ -330,11 +319,12 @@ void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f) fp = xp; } sum = sum_truesize + f->qsize; - sub_frag_mem_limit(q, sum); if (f->destructor) f->destructor(q); kmem_cache_free(f->frags_cachep, q); + + sub_frag_mem_limit(nf, sum); } EXPORT_SYMBOL(inet_frag_destroy); @@ -390,7 +380,7 @@ static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf, q->net = nf; f->constructor(q, arg); - add_frag_mem_limit(q, f->qsize); + add_frag_mem_limit(nf, f->qsize); setup_timer(&q->timer, f->frag_expire, (unsigned long)q); spin_lock_init(&q->lock); diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index f44bccc42494..d96722ae8979 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c @@ -202,7 +202,7 @@ static void ip_expire(unsigned long arg) ipq_kill(qp); IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS); - if (!(qp->q.flags & INET_FRAG_EVICTED)) { + if (!inet_frag_evicting(&qp->q)) { struct sk_buff *head = qp->q.fragments; const struct iphdr *iph; int err; @@ -309,7 +309,7 @@ static int ip_frag_reinit(struct ipq *qp) kfree_skb(fp); fp = xp; } while (fp); - sub_frag_mem_limit(&qp->q, sum_truesize); + sub_frag_mem_limit(qp->q.net, sum_truesize); qp->q.flags = 0; qp->q.len = 0; @@ -455,7 +455,7 @@ found: qp->q.fragments = next; qp->q.meat -= free_it->len; - sub_frag_mem_limit(&qp->q, free_it->truesize); + sub_frag_mem_limit(qp->q.net, free_it->truesize); kfree_skb(free_it); } } @@ -479,7 +479,7 @@ found: qp->q.stamp = skb->tstamp; qp->q.meat += skb->len; qp->ecn |= ecn; - add_frag_mem_limit(&qp->q, skb->truesize); + add_frag_mem_limit(qp->q.net, skb->truesize); if (offset == 0) qp->q.flags |= INET_FRAG_FIRST_IN; @@ -586,7 +586,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, head->len -= clone->len; clone->csum = 0; clone->ip_summed = head->ip_summed; - add_frag_mem_limit(&qp->q, clone->truesize); + add_frag_mem_limit(qp->q.net, clone->truesize); } skb_shinfo(head)->frag_list = head->next; @@ -601,7 +601,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, head->csum = csum_add(head->csum, fp->csum); head->truesize += fp->truesize; } - sub_frag_mem_limit(&qp->q, head->truesize); + sub_frag_mem_limit(qp->q.net, head->truesize); head->next = NULL; head->dev = dev; diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 11096396ef4a..908f7ef2f12a 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -2194,7 +2194,7 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4) if (!res.prefixlen && res.table->tb_num_default > 1 && res.type == RTN_UNICAST && !fl4->flowi4_oif) - fib_select_default(&res); + fib_select_default(fl4, &res); if (!fl4->saddr) fl4->saddr = FIB_RES_PREFSRC(net, res); diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 7f4056785acc..45534a5ab430 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -780,7 +780,7 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos, ret = -EAGAIN; break; } - sk_wait_data(sk, &timeo); + sk_wait_data(sk, &timeo, NULL); if (signal_pending(current)) { ret = sock_intr_errno(timeo); break; @@ -1575,7 +1575,7 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, int target; /* Read at least this many bytes */ long timeo; struct task_struct *user_recv = NULL; - struct sk_buff *skb; + struct sk_buff *skb, *last; u32 urg_hole = 0; if (unlikely(flags & MSG_ERRQUEUE)) @@ -1635,7 +1635,9 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, /* Next get a buffer. */ + last = skb_peek_tail(&sk->sk_receive_queue); skb_queue_walk(&sk->sk_receive_queue, skb) { + last = skb; /* Now that we have two receive queues this * shouldn't happen. */ @@ -1754,8 +1756,9 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, /* Do not sleep, just process backlog. */ release_sock(sk); lock_sock(sk); - } else - sk_wait_data(sk, &timeo); + } else { + sk_wait_data(sk, &timeo, last); + } if (user_recv) { int chunk; diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index 6e184e02fd3c..b3054611f88a 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c @@ -1648,6 +1648,7 @@ int ndisc_rcv(struct sk_buff *skb) static int ndisc_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) { struct net_device *dev = netdev_notifier_info_to_dev(ptr); + struct netdev_notifier_change_info *change_info; struct net *net = dev_net(dev); struct inet6_dev *idev; @@ -1662,6 +1663,11 @@ static int ndisc_netdev_event(struct notifier_block *this, unsigned long event, ndisc_send_unsol_na(dev); in6_dev_put(idev); break; + case NETDEV_CHANGE: + change_info = ptr; + if (change_info->flags_changed & IFF_NOARP) + neigh_changeaddr(&nd_tbl, dev); + break; case NETDEV_DOWN: neigh_ifdown(&nd_tbl, dev); fib6_run_gc(0, net, false); diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c index 6f187c8d8a1b..6d02498172c1 100644 --- a/net/ipv6/netfilter/nf_conntrack_reasm.c +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c @@ -348,7 +348,7 @@ found: fq->ecn |= ecn; if (payload_len > fq->q.max_size) fq->q.max_size = payload_len; - add_frag_mem_limit(&fq->q, skb->truesize); + add_frag_mem_limit(fq->q.net, skb->truesize); /* The first fragment. * nhoffset is obtained from the first fragment, of course. @@ -430,7 +430,7 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct net_device *dev) clone->ip_summed = head->ip_summed; NFCT_FRAG6_CB(clone)->orig = NULL; - add_frag_mem_limit(&fq->q, clone->truesize); + add_frag_mem_limit(fq->q.net, clone->truesize); } /* We have to remove fragment header from datagram and to relocate @@ -454,7 +454,7 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct net_device *dev) head->csum = csum_add(head->csum, fp->csum); head->truesize += fp->truesize; } - sub_frag_mem_limit(&fq->q, head->truesize); + sub_frag_mem_limit(fq->q.net, head->truesize); head->ignore_df = 1; head->next = NULL; diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c index 8ffa2c8cce77..f1159bb76e0a 100644 --- a/net/ipv6/reassembly.c +++ b/net/ipv6/reassembly.c @@ -144,7 +144,7 @@ void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq, IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS); - if (fq->q.flags & INET_FRAG_EVICTED) + if (inet_frag_evicting(&fq->q)) goto out_rcu_unlock; IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT); @@ -330,7 +330,7 @@ found: fq->q.stamp = skb->tstamp; fq->q.meat += skb->len; fq->ecn |= ecn; - add_frag_mem_limit(&fq->q, skb->truesize); + add_frag_mem_limit(fq->q.net, skb->truesize); /* The first fragment. * nhoffset is obtained from the first fragment, of course. @@ -443,7 +443,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev, head->len -= clone->len; clone->csum = 0; clone->ip_summed = head->ip_summed; - add_frag_mem_limit(&fq->q, clone->truesize); + add_frag_mem_limit(fq->q.net, clone->truesize); } /* We have to remove fragment header from datagram and to relocate @@ -481,7 +481,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev, } fp = next; } - sub_frag_mem_limit(&fq->q, sum_truesize); + sub_frag_mem_limit(fq->q.net, sum_truesize); head->next = NULL; head->dev = dev; diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c index 8fd9febaa5ba..8dab4e569571 100644 --- a/net/llc/af_llc.c +++ b/net/llc/af_llc.c @@ -613,7 +613,7 @@ static int llc_wait_data(struct sock *sk, long timeo) if (signal_pending(current)) break; rc = 0; - if (sk_wait_data(sk, &timeo)) + if (sk_wait_data(sk, &timeo, NULL)) break; } return rc; @@ -802,7 +802,7 @@ static int llc_ui_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, release_sock(sk); lock_sock(sk); } else - sk_wait_data(sk, &timeo); + sk_wait_data(sk, &timeo, NULL); if ((flags & MSG_PEEK) && peek_seq != llc->copied_seq) { net_dbg_ratelimited("LLC(%s:%d): Application bug, race in MSG_PEEK\n", diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c index 5d2b806a862e..38fbc194b9cb 100644 --- a/net/netfilter/ipvs/ip_vs_core.c +++ b/net/netfilter/ipvs/ip_vs_core.c @@ -319,7 +319,13 @@ ip_vs_sched_persist(struct ip_vs_service *svc, * return *ignored=0 i.e. ICMP and NF_DROP */ sched = rcu_dereference(svc->scheduler); - dest = sched->schedule(svc, skb, iph); + if (sched) { + /* read svc->sched_data after svc->scheduler */ + smp_rmb(); + dest = sched->schedule(svc, skb, iph); + } else { + dest = NULL; + } if (!dest) { IP_VS_DBG(1, "p-schedule: no dest found.\n"); kfree(param.pe_data); @@ -467,7 +473,13 @@ ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb, } sched = rcu_dereference(svc->scheduler); - dest = sched->schedule(svc, skb, iph); + if (sched) { + /* read svc->sched_data after svc->scheduler */ + smp_rmb(); + dest = sched->schedule(svc, skb, iph); + } else { + dest = NULL; + } if (dest == NULL) { IP_VS_DBG(1, "Schedule: no dest found.\n"); return NULL; diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index 285eae3a1454..24c554201a76 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c @@ -842,15 +842,16 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest, __ip_vs_dst_cache_reset(dest); spin_unlock_bh(&dest->dst_lock); - sched = rcu_dereference_protected(svc->scheduler, 1); if (add) { ip_vs_start_estimator(svc->net, &dest->stats); list_add_rcu(&dest->n_list, &svc->destinations); svc->num_dests++; - if (sched->add_dest) + sched = rcu_dereference_protected(svc->scheduler, 1); + if (sched && sched->add_dest) sched->add_dest(svc, dest); } else { - if (sched->upd_dest) + sched = rcu_dereference_protected(svc->scheduler, 1); + if (sched && sched->upd_dest) sched->upd_dest(svc, dest); } } @@ -1084,7 +1085,7 @@ static void __ip_vs_unlink_dest(struct ip_vs_service *svc, struct ip_vs_scheduler *sched; sched = rcu_dereference_protected(svc->scheduler, 1); - if (sched->del_dest) + if (sched && sched->del_dest) sched->del_dest(svc, dest); } } @@ -1175,11 +1176,14 @@ ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u, ip_vs_use_count_inc(); /* Lookup the scheduler by 'u->sched_name' */ - sched = ip_vs_scheduler_get(u->sched_name); - if (sched == NULL) { - pr_info("Scheduler module ip_vs_%s not found\n", u->sched_name); - ret = -ENOENT; - goto out_err; + if (strcmp(u->sched_name, "none")) { + sched = ip_vs_scheduler_get(u->sched_name); + if (!sched) { + pr_info("Scheduler module ip_vs_%s not found\n", + u->sched_name); + ret = -ENOENT; + goto out_err; + } } if (u->pe_name && *u->pe_name) { @@ -1240,10 +1244,12 @@ ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u, spin_lock_init(&svc->stats.lock); /* Bind the scheduler */ - ret = ip_vs_bind_scheduler(svc, sched); - if (ret) - goto out_err; - sched = NULL; + if (sched) { + ret = ip_vs_bind_scheduler(svc, sched); + if (ret) + goto out_err; + sched = NULL; + } /* Bind the ct retriever */ RCU_INIT_POINTER(svc->pe, pe); @@ -1291,17 +1297,20 @@ ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u, static int ip_vs_edit_service(struct ip_vs_service *svc, struct ip_vs_service_user_kern *u) { - struct ip_vs_scheduler *sched, *old_sched; + struct ip_vs_scheduler *sched = NULL, *old_sched; struct ip_vs_pe *pe = NULL, *old_pe = NULL; int ret = 0; /* * Lookup the scheduler, by 'u->sched_name' */ - sched = ip_vs_scheduler_get(u->sched_name); - if (sched == NULL) { - pr_info("Scheduler module ip_vs_%s not found\n", u->sched_name); - return -ENOENT; + if (strcmp(u->sched_name, "none")) { + sched = ip_vs_scheduler_get(u->sched_name); + if (!sched) { + pr_info("Scheduler module ip_vs_%s not found\n", + u->sched_name); + return -ENOENT; + } } old_sched = sched; @@ -1329,14 +1338,20 @@ ip_vs_edit_service(struct ip_vs_service *svc, struct ip_vs_service_user_kern *u) old_sched = rcu_dereference_protected(svc->scheduler, 1); if (sched != old_sched) { + if (old_sched) { + ip_vs_unbind_scheduler(svc, old_sched); + RCU_INIT_POINTER(svc->scheduler, NULL); + /* Wait all svc->sched_data users */ + synchronize_rcu(); + } /* Bind the new scheduler */ - ret = ip_vs_bind_scheduler(svc, sched); - if (ret) { - old_sched = sched; - goto out; + if (sched) { + ret = ip_vs_bind_scheduler(svc, sched); + if (ret) { + ip_vs_scheduler_put(sched); + goto out; + } } - /* Unbind the old scheduler on success */ - ip_vs_unbind_scheduler(svc, old_sched); } /* @@ -1982,6 +1997,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v) const struct ip_vs_iter *iter = seq->private; const struct ip_vs_dest *dest; struct ip_vs_scheduler *sched = rcu_dereference(svc->scheduler); + char *sched_name = sched ? sched->name : "none"; if (iter->table == ip_vs_svc_table) { #ifdef CONFIG_IP_VS_IPV6 @@ -1990,18 +2006,18 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v) ip_vs_proto_name(svc->protocol), &svc->addr.in6, ntohs(svc->port), - sched->name); + sched_name); else #endif seq_printf(seq, "%s %08X:%04X %s %s ", ip_vs_proto_name(svc->protocol), ntohl(svc->addr.ip), ntohs(svc->port), - sched->name, + sched_name, (svc->flags & IP_VS_SVC_F_ONEPACKET)?"ops ":""); } else { seq_printf(seq, "FWM %08X %s %s", - svc->fwmark, sched->name, + svc->fwmark, sched_name, (svc->flags & IP_VS_SVC_F_ONEPACKET)?"ops ":""); } @@ -2427,13 +2443,15 @@ ip_vs_copy_service(struct ip_vs_service_entry *dst, struct ip_vs_service *src) { struct ip_vs_scheduler *sched; struct ip_vs_kstats kstats; + char *sched_name; sched = rcu_dereference_protected(src->scheduler, 1); + sched_name = sched ? sched->name : "none"; dst->protocol = src->protocol; dst->addr = src->addr.ip; dst->port = src->port; dst->fwmark = src->fwmark; - strlcpy(dst->sched_name, sched->name, sizeof(dst->sched_name)); + strlcpy(dst->sched_name, sched_name, sizeof(dst->sched_name)); dst->flags = src->flags; dst->timeout = src->timeout / HZ; dst->netmask = src->netmask; @@ -2892,6 +2910,7 @@ static int ip_vs_genl_fill_service(struct sk_buff *skb, struct ip_vs_flags flags = { .flags = svc->flags, .mask = ~0 }; struct ip_vs_kstats kstats; + char *sched_name; nl_service = nla_nest_start(skb, IPVS_CMD_ATTR_SERVICE); if (!nl_service) @@ -2910,8 +2929,9 @@ static int ip_vs_genl_fill_service(struct sk_buff *skb, } sched = rcu_dereference_protected(svc->scheduler, 1); + sched_name = sched ? sched->name : "none"; pe = rcu_dereference_protected(svc->pe, 1); - if (nla_put_string(skb, IPVS_SVC_ATTR_SCHED_NAME, sched->name) || + if (nla_put_string(skb, IPVS_SVC_ATTR_SCHED_NAME, sched_name) || (pe && nla_put_string(skb, IPVS_SVC_ATTR_PE_NAME, pe->name)) || nla_put(skb, IPVS_SVC_ATTR_FLAGS, sizeof(flags), &flags) || nla_put_u32(skb, IPVS_SVC_ATTR_TIMEOUT, svc->timeout / HZ) || diff --git a/net/netfilter/ipvs/ip_vs_sched.c b/net/netfilter/ipvs/ip_vs_sched.c index 199760c71f39..7e8141647943 100644 --- a/net/netfilter/ipvs/ip_vs_sched.c +++ b/net/netfilter/ipvs/ip_vs_sched.c @@ -74,7 +74,7 @@ void ip_vs_unbind_scheduler(struct ip_vs_service *svc, if (sched->done_service) sched->done_service(svc); - /* svc->scheduler can not be set to NULL */ + /* svc->scheduler can be set to NULL only by caller */ } @@ -147,21 +147,21 @@ void ip_vs_scheduler_put(struct ip_vs_scheduler *scheduler) void ip_vs_scheduler_err(struct ip_vs_service *svc, const char *msg) { - struct ip_vs_scheduler *sched; + struct ip_vs_scheduler *sched = rcu_dereference(svc->scheduler); + char *sched_name = sched ? sched->name : "none"; - sched = rcu_dereference(svc->scheduler); if (svc->fwmark) { IP_VS_ERR_RL("%s: FWM %u 0x%08X - %s\n", - sched->name, svc->fwmark, svc->fwmark, msg); + sched_name, svc->fwmark, svc->fwmark, msg); #ifdef CONFIG_IP_VS_IPV6 } else if (svc->af == AF_INET6) { IP_VS_ERR_RL("%s: %s [%pI6c]:%d - %s\n", - sched->name, ip_vs_proto_name(svc->protocol), + sched_name, ip_vs_proto_name(svc->protocol), &svc->addr.in6, ntohs(svc->port), msg); #endif } else { IP_VS_ERR_RL("%s: %s %pI4:%d - %s\n", - sched->name, ip_vs_proto_name(svc->protocol), + sched_name, ip_vs_proto_name(svc->protocol), &svc->addr.ip, ntohs(svc->port), msg); } } diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c index b08ba9538d12..d99ad93eb855 100644 --- a/net/netfilter/ipvs/ip_vs_sync.c +++ b/net/netfilter/ipvs/ip_vs_sync.c @@ -612,7 +612,7 @@ static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp, pkts = atomic_add_return(1, &cp->in_pkts); else pkts = sysctl_sync_threshold(ipvs); - ip_vs_sync_conn(net, cp->control, pkts); + ip_vs_sync_conn(net, cp, pkts); } } diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c index bf66a8657a5f..258a0b0e82a2 100644 --- a/net/netfilter/ipvs/ip_vs_xmit.c +++ b/net/netfilter/ipvs/ip_vs_xmit.c @@ -130,7 +130,6 @@ static struct rtable *do_output_route4(struct net *net, __be32 daddr, memset(&fl4, 0, sizeof(fl4)); fl4.daddr = daddr; - fl4.saddr = (rt_mode & IP_VS_RT_MODE_CONNECT) ? *saddr : 0; fl4.flowi4_flags = (rt_mode & IP_VS_RT_MODE_KNOWN_NH) ? FLOWI_FLAG_KNOWN_NH : 0; @@ -505,6 +504,13 @@ err_put: return -1; err_unreach: + /* The ip6_link_failure function requires the dev field to be set + * in order to get the net (further for the sake of fwmark + * reflection). + */ + if (!skb->dev) + skb->dev = skb_dst(skb)->dev; + dst_link_failure(skb); return -1; } @@ -523,10 +529,27 @@ static inline int ip_vs_tunnel_xmit_prepare(struct sk_buff *skb, if (ret == NF_ACCEPT) { nf_reset(skb); skb_forward_csum(skb); + if (!skb->sk) + skb_sender_cpu_clear(skb); } return ret; } +/* In the event of a remote destination, it's possible that we would have + * matches against an old socket (particularly a TIME-WAIT socket). This + * causes havoc down the line (ip_local_out et. al. expect regular sockets + * and invalid memory accesses will happen) so simply drop the association + * in this case. +*/ +static inline void ip_vs_drop_early_demux_sk(struct sk_buff *skb) +{ + /* If dev is set, the packet came from the LOCAL_IN callback and + * not from a local TCP socket. + */ + if (skb->dev) + skb_orphan(skb); +} + /* return NF_STOLEN (sent) or NF_ACCEPT if local=1 (not sent) */ static inline int ip_vs_nat_send_or_cont(int pf, struct sk_buff *skb, struct ip_vs_conn *cp, int local) @@ -538,12 +561,23 @@ static inline int ip_vs_nat_send_or_cont(int pf, struct sk_buff *skb, ip_vs_notrack(skb); else ip_vs_update_conntrack(skb, cp, 1); + + /* Remove the early_demux association unless it's bound for the + * exact same port and address on this host after translation. + */ + if (!local || cp->vport != cp->dport || + !ip_vs_addr_equal(cp->af, &cp->vaddr, &cp->daddr)) + ip_vs_drop_early_demux_sk(skb); + if (!local) { skb_forward_csum(skb); + if (!skb->sk) + skb_sender_cpu_clear(skb); NF_HOOK(pf, NF_INET_LOCAL_OUT, NULL, skb, NULL, skb_dst(skb)->dev, dst_output_sk); } else ret = NF_ACCEPT; + return ret; } @@ -557,7 +591,10 @@ static inline int ip_vs_send_or_cont(int pf, struct sk_buff *skb, if (likely(!(cp->flags & IP_VS_CONN_F_NFCT))) ip_vs_notrack(skb); if (!local) { + ip_vs_drop_early_demux_sk(skb); skb_forward_csum(skb); + if (!skb->sk) + skb_sender_cpu_clear(skb); NF_HOOK(pf, NF_INET_LOCAL_OUT, NULL, skb, NULL, skb_dst(skb)->dev, dst_output_sk); } else @@ -845,6 +882,8 @@ ip_vs_prepare_tunneled_skb(struct sk_buff *skb, int skb_af, struct ipv6hdr *old_ipv6h = NULL; #endif + ip_vs_drop_early_demux_sk(skb); + if (skb_headroom(skb) < max_headroom || skb_cloned(skb)) { new_skb = skb_realloc_headroom(skb, max_headroom); if (!new_skb) diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 13fad8668f83..651039ad1681 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -287,6 +287,46 @@ static void nf_ct_del_from_dying_or_unconfirmed_list(struct nf_conn *ct) spin_unlock(&pcpu->lock); } +/* Released via destroy_conntrack() */ +struct nf_conn *nf_ct_tmpl_alloc(struct net *net, u16 zone, gfp_t flags) +{ + struct nf_conn *tmpl; + + tmpl = kzalloc(sizeof(struct nf_conn), GFP_KERNEL); + if (tmpl == NULL) + return NULL; + + tmpl->status = IPS_TEMPLATE; + write_pnet(&tmpl->ct_net, net); + +#ifdef CONFIG_NF_CONNTRACK_ZONES + if (zone) { + struct nf_conntrack_zone *nf_ct_zone; + + nf_ct_zone = nf_ct_ext_add(tmpl, NF_CT_EXT_ZONE, GFP_ATOMIC); + if (!nf_ct_zone) + goto out_free; + nf_ct_zone->id = zone; + } +#endif + atomic_set(&tmpl->ct_general.use, 0); + + return tmpl; +#ifdef CONFIG_NF_CONNTRACK_ZONES +out_free: + kfree(tmpl); + return NULL; +#endif +} +EXPORT_SYMBOL_GPL(nf_ct_tmpl_alloc); + +static void nf_ct_tmpl_free(struct nf_conn *tmpl) +{ + nf_ct_ext_destroy(tmpl); + nf_ct_ext_free(tmpl); + kfree(tmpl); +} + static void destroy_conntrack(struct nf_conntrack *nfct) { @@ -298,6 +338,10 @@ destroy_conntrack(struct nf_conntrack *nfct) NF_CT_ASSERT(atomic_read(&nfct->use) == 0); NF_CT_ASSERT(!timer_pending(&ct->timeout)); + if (unlikely(nf_ct_is_template(ct))) { + nf_ct_tmpl_free(ct); + return; + } rcu_read_lock(); l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct)); if (l4proto && l4proto->destroy) @@ -540,28 +584,6 @@ out: } EXPORT_SYMBOL_GPL(nf_conntrack_hash_check_insert); -/* deletion from this larval template list happens via nf_ct_put() */ -void nf_conntrack_tmpl_insert(struct net *net, struct nf_conn *tmpl) -{ - struct ct_pcpu *pcpu; - - __set_bit(IPS_TEMPLATE_BIT, &tmpl->status); - __set_bit(IPS_CONFIRMED_BIT, &tmpl->status); - nf_conntrack_get(&tmpl->ct_general); - - /* add this conntrack to the (per cpu) tmpl list */ - local_bh_disable(); - tmpl->cpu = smp_processor_id(); - pcpu = per_cpu_ptr(nf_ct_net(tmpl)->ct.pcpu_lists, tmpl->cpu); - - spin_lock(&pcpu->lock); - /* Overload tuple linked list to put us in template list. */ - hlist_nulls_add_head_rcu(&tmpl->tuplehash[IP_CT_DIR_ORIGINAL].hnnode, - &pcpu->tmpl); - spin_unlock_bh(&pcpu->lock); -} -EXPORT_SYMBOL_GPL(nf_conntrack_tmpl_insert); - /* Confirm a connection given skb; places it in hash table */ int __nf_conntrack_confirm(struct sk_buff *skb) @@ -1751,7 +1773,6 @@ int nf_conntrack_init_net(struct net *net) spin_lock_init(&pcpu->lock); INIT_HLIST_NULLS_HEAD(&pcpu->unconfirmed, UNCONFIRMED_NULLS_VAL); INIT_HLIST_NULLS_HEAD(&pcpu->dying, DYING_NULLS_VAL); - INIT_HLIST_NULLS_HEAD(&pcpu->tmpl, TEMPLATE_NULLS_VAL); } net->ct.stat = alloc_percpu(struct ip_conntrack_stat); diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c index 7a17070c5dab..b45a4223cb05 100644 --- a/net/netfilter/nf_conntrack_expect.c +++ b/net/netfilter/nf_conntrack_expect.c @@ -219,7 +219,8 @@ static inline int expect_clash(const struct nf_conntrack_expect *a, a->mask.src.u3.all[count] & b->mask.src.u3.all[count]; } - return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask); + return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask) && + nf_ct_zone(a->master) == nf_ct_zone(b->master); } static inline int expect_matches(const struct nf_conntrack_expect *a, diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index d1c23940a86a..6b8b0abbfab4 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -2995,11 +2995,6 @@ ctnetlink_create_expect(struct net *net, u16 zone, } err = nf_ct_expect_related_report(exp, portid, report); - if (err < 0) - goto err_exp; - - return 0; -err_exp: nf_ct_expect_put(exp); err_ct: nf_ct_put(ct); diff --git a/net/netfilter/nf_synproxy_core.c b/net/netfilter/nf_synproxy_core.c index 789feeae6c44..71f1e9fdfa18 100644 --- a/net/netfilter/nf_synproxy_core.c +++ b/net/netfilter/nf_synproxy_core.c @@ -349,12 +349,10 @@ static void __net_exit synproxy_proc_exit(struct net *net) static int __net_init synproxy_net_init(struct net *net) { struct synproxy_net *snet = synproxy_pernet(net); - struct nf_conntrack_tuple t; struct nf_conn *ct; int err = -ENOMEM; - memset(&t, 0, sizeof(t)); - ct = nf_conntrack_alloc(net, 0, &t, &t, GFP_KERNEL); + ct = nf_ct_tmpl_alloc(net, 0, GFP_KERNEL); if (IS_ERR(ct)) { err = PTR_ERR(ct); goto err1; @@ -365,7 +363,8 @@ static int __net_init synproxy_net_init(struct net *net) if (!nfct_synproxy_ext_add(ct)) goto err2; - nf_conntrack_tmpl_insert(net, ct); + __set_bit(IPS_CONFIRMED_BIT, &ct->status); + nf_conntrack_get(&ct->ct_general); snet->tmpl = ct; snet->stats = alloc_percpu(struct synproxy_stats); diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c index 75747aecdebe..c6630030c912 100644 --- a/net/netfilter/xt_CT.c +++ b/net/netfilter/xt_CT.c @@ -184,7 +184,6 @@ out: static int xt_ct_tg_check(const struct xt_tgchk_param *par, struct xt_ct_target_info_v1 *info) { - struct nf_conntrack_tuple t; struct nf_conn *ct; int ret = -EOPNOTSUPP; @@ -202,8 +201,7 @@ static int xt_ct_tg_check(const struct xt_tgchk_param *par, if (ret < 0) goto err1; - memset(&t, 0, sizeof(t)); - ct = nf_conntrack_alloc(par->net, info->zone, &t, &t, GFP_KERNEL); + ct = nf_ct_tmpl_alloc(par->net, info->zone, GFP_KERNEL); ret = PTR_ERR(ct); if (IS_ERR(ct)) goto err2; @@ -227,8 +225,8 @@ static int xt_ct_tg_check(const struct xt_tgchk_param *par, if (ret < 0) goto err3; } - - nf_conntrack_tmpl_insert(par->net, ct); + __set_bit(IPS_CONFIRMED_BIT, &ct->status); + nf_conntrack_get(&ct->ct_general); out: info->ct = ct; return 0; diff --git a/net/netfilter/xt_IDLETIMER.c b/net/netfilter/xt_IDLETIMER.c index f407ebc13481..29d2c31f406c 100644 --- a/net/netfilter/xt_IDLETIMER.c +++ b/net/netfilter/xt_IDLETIMER.c @@ -126,6 +126,7 @@ static int idletimer_tg_create(struct idletimer_tg_info *info) goto out; } + sysfs_attr_init(&info->timer->attr.attr); info->timer->attr.attr.name = kstrdup(info->label, GFP_KERNEL); if (!info->timer->attr.attr.name) { ret = -ENOMEM; diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 2af8590d9e0f..b5afe538bb88 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -2401,7 +2401,8 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg) } tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto, addr, hlen); - if (tp_len > dev->mtu + dev->hard_header_len) { + if (likely(tp_len >= 0) && + tp_len > dev->mtu + dev->hard_header_len) { struct ethhdr *ehdr; /* Earlier code assumed this would be a VLAN pkt, * double-check this now that we have the actual @@ -2782,7 +2783,7 @@ static int packet_release(struct socket *sock) static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 proto) { struct packet_sock *po = pkt_sk(sk); - const struct net_device *dev_curr; + struct net_device *dev_curr; __be16 proto_curr; bool need_rehook; @@ -2806,15 +2807,13 @@ static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 proto) po->num = proto; po->prot_hook.type = proto; - - if (po->prot_hook.dev) - dev_put(po->prot_hook.dev); - po->prot_hook.dev = dev; po->ifindex = dev ? dev->ifindex : 0; packet_cached_dev_assign(po, dev); } + if (dev_curr) + dev_put(dev_curr); if (proto == 0 || !need_rehook) goto out_unlock; diff --git a/net/sched/act_api.c b/net/sched/act_api.c index 074a32f466f8..b087087ccfa9 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c @@ -54,7 +54,7 @@ void tcf_hash_destroy(struct tc_action *a) } EXPORT_SYMBOL(tcf_hash_destroy); -int tcf_hash_release(struct tc_action *a, int bind) +int __tcf_hash_release(struct tc_action *a, bool bind, bool strict) { struct tcf_common *p = a->priv; int ret = 0; @@ -62,7 +62,7 @@ int tcf_hash_release(struct tc_action *a, int bind) if (p) { if (bind) p->tcfc_bindcnt--; - else if (p->tcfc_bindcnt > 0) + else if (strict && p->tcfc_bindcnt > 0) return -EPERM; p->tcfc_refcnt--; @@ -73,9 +73,10 @@ int tcf_hash_release(struct tc_action *a, int bind) ret = 1; } } + return ret; } -EXPORT_SYMBOL(tcf_hash_release); +EXPORT_SYMBOL(__tcf_hash_release); static int tcf_dump_walker(struct sk_buff *skb, struct netlink_callback *cb, struct tc_action *a) @@ -145,7 +146,7 @@ static int tcf_del_walker(struct sk_buff *skb, struct tc_action *a) head = &hinfo->htab[tcf_hash(i, hinfo->hmask)]; hlist_for_each_entry_safe(p, n, head, tcfc_head) { a->priv = p; - ret = tcf_hash_release(a, 0); + ret = __tcf_hash_release(a, false, true); if (ret == ACT_P_DELETED) { module_put(a->ops->owner); n_i++; @@ -432,7 +433,7 @@ int tcf_action_destroy(struct list_head *actions, int bind) int ret = 0; list_for_each_entry_safe(a, tmp, actions, list) { - ret = tcf_hash_release(a, bind); + ret = __tcf_hash_release(a, bind, true); if (ret == ACT_P_DELETED) module_put(a->ops->owner); else if (ret < 0) diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c index e9e923a8c747..aaae8e83bf18 100644 --- a/net/sched/act_bpf.c +++ b/net/sched/act_bpf.c @@ -27,9 +27,10 @@ struct tcf_bpf_cfg { struct bpf_prog *filter; struct sock_filter *bpf_ops; - char *bpf_name; + const char *bpf_name; u32 bpf_fd; u16 bpf_num_ops; + bool is_ebpf; }; static int tcf_bpf(struct sk_buff *skb, const struct tc_action *act, @@ -207,6 +208,7 @@ static int tcf_bpf_init_from_ops(struct nlattr **tb, struct tcf_bpf_cfg *cfg) cfg->bpf_ops = bpf_ops; cfg->bpf_num_ops = bpf_num_ops; cfg->filter = fp; + cfg->is_ebpf = false; return 0; } @@ -241,18 +243,40 @@ static int tcf_bpf_init_from_efd(struct nlattr **tb, struct tcf_bpf_cfg *cfg) cfg->bpf_fd = bpf_fd; cfg->bpf_name = name; cfg->filter = fp; + cfg->is_ebpf = true; return 0; } +static void tcf_bpf_cfg_cleanup(const struct tcf_bpf_cfg *cfg) +{ + if (cfg->is_ebpf) + bpf_prog_put(cfg->filter); + else + bpf_prog_destroy(cfg->filter); + + kfree(cfg->bpf_ops); + kfree(cfg->bpf_name); +} + +static void tcf_bpf_prog_fill_cfg(const struct tcf_bpf *prog, + struct tcf_bpf_cfg *cfg) +{ + cfg->is_ebpf = tcf_bpf_is_ebpf(prog); + cfg->filter = prog->filter; + + cfg->bpf_ops = prog->bpf_ops; + cfg->bpf_name = prog->bpf_name; +} + static int tcf_bpf_init(struct net *net, struct nlattr *nla, struct nlattr *est, struct tc_action *act, int replace, int bind) { struct nlattr *tb[TCA_ACT_BPF_MAX + 1]; + struct tcf_bpf_cfg cfg, old; struct tc_act_bpf *parm; struct tcf_bpf *prog; - struct tcf_bpf_cfg cfg; bool is_bpf, is_ebpf; int ret; @@ -301,6 +325,9 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla, prog = to_bpf(act); spin_lock_bh(&prog->tcf_lock); + if (ret != ACT_P_CREATED) + tcf_bpf_prog_fill_cfg(prog, &old); + prog->bpf_ops = cfg.bpf_ops; prog->bpf_name = cfg.bpf_name; @@ -316,32 +343,22 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla, if (ret == ACT_P_CREATED) tcf_hash_insert(act); + else + tcf_bpf_cfg_cleanup(&old); return ret; destroy_fp: - if (is_ebpf) - bpf_prog_put(cfg.filter); - else - bpf_prog_destroy(cfg.filter); - - kfree(cfg.bpf_ops); - kfree(cfg.bpf_name); - + tcf_bpf_cfg_cleanup(&cfg); return ret; } static void tcf_bpf_cleanup(struct tc_action *act, int bind) { - const struct tcf_bpf *prog = act->priv; - - if (tcf_bpf_is_ebpf(prog)) - bpf_prog_put(prog->filter); - else - bpf_prog_destroy(prog->filter); + struct tcf_bpf_cfg tmp; - kfree(prog->bpf_ops); - kfree(prog->bpf_name); + tcf_bpf_prog_fill_cfg(act->priv, &tmp); + tcf_bpf_cfg_cleanup(&tmp); } static struct tc_action_ops act_bpf_ops __read_mostly = { diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index ce8676ad892f..e38a7701f154 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c @@ -69,13 +69,12 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla, } ret = ACT_P_CREATED; } else { - p = to_pedit(a); - tcf_hash_release(a, bind); if (bind) return 0; + tcf_hash_release(a, bind); if (!ovr) return -EEXIST; - + p = to_pedit(a); if (p->tcfp_nkeys && p->tcfp_nkeys != parm->nkeys) { keys = kmalloc(ksize, GFP_KERNEL); if (keys == NULL) diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c index 93d5742dc7e0..6a783afe4960 100644 --- a/net/sched/sch_choke.c +++ b/net/sched/sch_choke.c @@ -385,6 +385,19 @@ static void choke_reset(struct Qdisc *sch) { struct choke_sched_data *q = qdisc_priv(sch); + while (q->head != q->tail) { + struct sk_buff *skb = q->tab[q->head]; + + q->head = (q->head + 1) & q->tab_mask; + if (!skb) + continue; + qdisc_qstats_backlog_dec(sch, skb); + --sch->q.qlen; + qdisc_drop(skb, sch); + } + + memset(q->tab, 0, (q->tab_mask + 1) * sizeof(struct sk_buff *)); + q->head = q->tail = 0; red_restart(&q->vars); } diff --git a/net/sched/sch_plug.c b/net/sched/sch_plug.c index 89f8fcf73f18..ade9445a55ab 100644 --- a/net/sched/sch_plug.c +++ b/net/sched/sch_plug.c @@ -216,6 +216,7 @@ static struct Qdisc_ops plug_qdisc_ops __read_mostly = { .peek = qdisc_peek_head, .init = plug_init, .change = plug_change, + .reset = qdisc_reset_queue, .owner = THIS_MODULE, }; diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 1425ec2bbd5a..17bef01b9aa3 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -2200,12 +2200,6 @@ static int sctp_setsockopt_events(struct sock *sk, char __user *optval, if (copy_from_user(&sctp_sk(sk)->subscribe, optval, optlen)) return -EFAULT; - if (sctp_sk(sk)->subscribe.sctp_data_io_event) - pr_warn_ratelimited(DEPRECATED "%s (pid %d) " - "Requested SCTP_SNDRCVINFO event.\n" - "Use SCTP_RCVINFO through SCTP_RECVRCVINFO option instead.\n", - current->comm, task_pid_nr(current)); - /* At the time when a user app subscribes to SCTP_SENDER_DRY_EVENT, * if there is no data to be sent or retransmit, the stack will * immediately send up this notification. diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c index 9825ff0f91d6..6255d141133b 100644 --- a/net/sunrpc/backchannel_rqst.c +++ b/net/sunrpc/backchannel_rqst.c @@ -240,8 +240,8 @@ static struct rpc_rqst *xprt_alloc_bc_request(struct rpc_xprt *xprt, __be32 xid) req = xprt_alloc_bc_req(xprt, GFP_ATOMIC); if (!req) goto not_found; - /* Note: this 'free' request adds it to xprt->bc_pa_list */ - xprt_free_bc_request(req); + list_add_tail(&req->rq_bc_pa_list, &xprt->bc_pa_list); + xprt->bc_alloc_count++; } req = list_first_entry(&xprt->bc_pa_list, struct rpc_rqst, rq_bc_pa_list); @@ -336,7 +336,7 @@ void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied) spin_lock(&xprt->bc_pa_lock); list_del(&req->rq_bc_pa_list); - xprt->bc_alloc_count--; + xprt_dec_alloc_count(xprt, 1); spin_unlock(&xprt->bc_pa_lock); req->rq_private_buf.len = copied; diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index cbc6af923dd1..23608eb0ded2 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -1902,6 +1902,7 @@ call_transmit_status(struct rpc_task *task) switch (task->tk_status) { case -EAGAIN: + case -ENOBUFS: break; default: dprint_status(task); @@ -1928,7 +1929,6 @@ call_transmit_status(struct rpc_task *task) case -ECONNABORTED: case -EADDRINUSE: case -ENOTCONN: - case -ENOBUFS: case -EPIPE: rpc_task_force_reencode(task); } @@ -2057,12 +2057,13 @@ call_status(struct rpc_task *task) case -ECONNABORTED: rpc_force_rebind(clnt); case -EADDRINUSE: - case -ENOBUFS: rpc_delay(task, 3*HZ); case -EPIPE: case -ENOTCONN: task->tk_action = call_bind; break; + case -ENOBUFS: + rpc_delay(task, HZ>>2); case -EAGAIN: task->tk_action = call_transmit; break; diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index e193c2b5476b..0030376327b7 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -527,6 +527,10 @@ static int xs_local_send_request(struct rpc_task *task) true, &sent); dprintk("RPC: %s(%u) = %d\n", __func__, xdr->len - req->rq_bytes_sent, status); + + if (status == -EAGAIN && sock_writeable(transport->inet)) + status = -ENOBUFS; + if (likely(sent > 0) || status == 0) { req->rq_bytes_sent += sent; req->rq_xmit_bytes_sent += sent; @@ -539,6 +543,7 @@ static int xs_local_send_request(struct rpc_task *task) switch (status) { case -ENOBUFS: + break; case -EAGAIN: status = xs_nospace(task); break; @@ -589,6 +594,9 @@ static int xs_udp_send_request(struct rpc_task *task) if (status == -EPERM) goto process_status; + if (status == -EAGAIN && sock_writeable(transport->inet)) + status = -ENOBUFS; + if (sent > 0 || status == 0) { req->rq_xmit_bytes_sent += sent; if (sent >= req->rq_slen) @@ -669,9 +677,6 @@ static int xs_tcp_send_request(struct rpc_task *task) dprintk("RPC: xs_tcp_send_request(%u) = %d\n", xdr->len - req->rq_bytes_sent, status); - if (unlikely(sent == 0 && status < 0)) - break; - /* If we've sent the entire packet, immediately * reset the count of bytes sent. */ req->rq_bytes_sent += sent; @@ -681,18 +686,21 @@ static int xs_tcp_send_request(struct rpc_task *task) return 0; } - if (sent != 0) - continue; - status = -EAGAIN; - break; + if (status < 0) + break; + if (sent == 0) { + status = -EAGAIN; + break; + } } + if (status == -EAGAIN && sk_stream_is_writeable(transport->inet)) + status = -ENOBUFS; switch (status) { case -ENOTSOCK: status = -ENOTCONN; /* Should we call xs_close() here? */ break; - case -ENOBUFS: case -EAGAIN: status = xs_nospace(task); break; @@ -703,6 +711,7 @@ static int xs_tcp_send_request(struct rpc_task *task) case -ECONNREFUSED: case -ENOTCONN: case -EADDRINUSE: + case -ENOBUFS: case -EPIPE: clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags); } diff --git a/security/keys/keyring.c b/security/keys/keyring.c index e72548b5897e..d33437007ad2 100644 --- a/security/keys/keyring.c +++ b/security/keys/keyring.c @@ -1181,9 +1181,11 @@ void __key_link_end(struct key *keyring, if (index_key->type == &key_type_keyring) up_write(&keyring_serialise_link_sem); - if (edit && !edit->dead_leaf) { - key_payload_reserve(keyring, - keyring->datalen - KEYQUOTA_LINK_BYTES); + if (edit) { + if (!edit->dead_leaf) { + key_payload_reserve(keyring, + keyring->datalen - KEYQUOTA_LINK_BYTES); + } assoc_array_cancel_edit(edit); } up_write(&keyring->sem); diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c index d126c03361ae..75888dd38a7f 100644 --- a/sound/core/pcm_native.c +++ b/sound/core/pcm_native.c @@ -85,7 +85,7 @@ static DECLARE_RWSEM(snd_pcm_link_rwsem); void snd_pcm_stream_lock(struct snd_pcm_substream *substream) { if (substream->pcm->nonatomic) { - down_read(&snd_pcm_link_rwsem); + down_read_nested(&snd_pcm_link_rwsem, SINGLE_DEPTH_NESTING); mutex_lock(&substream->self_group.mutex); } else { read_lock(&snd_pcm_link_rwlock); diff --git a/sound/firewire/fireworks/fireworks.c b/sound/firewire/fireworks/fireworks.c index 2682e7e3e5c9..c670db4eee70 100644 --- a/sound/firewire/fireworks/fireworks.c +++ b/sound/firewire/fireworks/fireworks.c @@ -248,6 +248,8 @@ efw_probe(struct fw_unit *unit, err = get_hardware_info(efw); if (err < 0) goto error; + if (entry->model_id == MODEL_ECHO_AUDIOFIRE_2) + efw->is_af2 = true; if (entry->model_id == MODEL_ECHO_AUDIOFIRE_9) efw->is_af9 = true; diff --git a/sound/firewire/fireworks/fireworks.h b/sound/firewire/fireworks/fireworks.h index 4f0201a95222..c33252b7bc84 100644 --- a/sound/firewire/fireworks/fireworks.h +++ b/sound/firewire/fireworks/fireworks.h @@ -70,6 +70,7 @@ struct snd_efw { bool resp_addr_changable; /* for quirks */ + bool is_af2; bool is_af9; u32 firmware_version; diff --git a/sound/firewire/fireworks/fireworks_stream.c b/sound/firewire/fireworks/fireworks_stream.c index c55db1bddc80..a0762dd6231e 100644 --- a/sound/firewire/fireworks/fireworks_stream.c +++ b/sound/firewire/fireworks/fireworks_stream.c @@ -172,6 +172,9 @@ int snd_efw_stream_init_duplex(struct snd_efw *efw) efw->tx_stream.flags |= CIP_DBC_IS_END_EVENT; /* Fireworks reset dbc at bus reset. */ efw->tx_stream.flags |= CIP_SKIP_DBC_ZERO_CHECK; + /* AudioFire2 starts packets with non-zero dbc. */ + if (efw->is_af2) + efw->tx_stream.flags |= CIP_SKIP_INIT_DBC_CHECK; /* AudioFire9 always reports wrong dbs. */ if (efw->is_af9) efw->tx_stream.flags |= CIP_WRONG_DBS; diff --git a/sound/hda/hdac_i915.c b/sound/hda/hdac_i915.c index 442500e06b7c..5676b849379d 100644 --- a/sound/hda/hdac_i915.c +++ b/sound/hda/hdac_i915.c @@ -56,8 +56,11 @@ int snd_hdac_display_power(struct hdac_bus *bus, bool enable) enable ? "enable" : "disable"); if (enable) { - if (!bus->i915_power_refcount++) + if (!bus->i915_power_refcount++) { acomp->ops->get_power(acomp->dev); + snd_hdac_set_codec_wakeup(bus, true); + snd_hdac_set_codec_wakeup(bus, false); + } } else { WARN_ON(!bus->i915_power_refcount); if (!--bus->i915_power_refcount) diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 745535d1840a..c38c68f57938 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -867,7 +867,7 @@ static int azx_suspend(struct device *dev) chip = card->private_data; hda = container_of(chip, struct hda_intel, chip); - if (chip->disabled || hda->init_failed) + if (chip->disabled || hda->init_failed || !chip->running) return 0; bus = azx_bus(chip); @@ -902,7 +902,7 @@ static int azx_resume(struct device *dev) chip = card->private_data; hda = container_of(chip, struct hda_intel, chip); - if (chip->disabled || hda->init_failed) + if (chip->disabled || hda->init_failed || !chip->running) return 0; if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL @@ -979,14 +979,16 @@ static int azx_runtime_resume(struct device *dev) if (!azx_has_pm_runtime(chip)) return 0; - if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL - && hda->need_i915_power) { - bus = azx_bus(chip); - snd_hdac_display_power(bus, true); - haswell_set_bclk(hda); - /* toggle codec wakeup bit for STATESTS read */ - snd_hdac_set_codec_wakeup(bus, true); - snd_hdac_set_codec_wakeup(bus, false); + if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) { + bus = azx_bus(chip); + if (hda->need_i915_power) { + snd_hdac_display_power(bus, true); + haswell_set_bclk(hda); + } else { + /* toggle codec wakeup bit for STATESTS read */ + snd_hdac_set_codec_wakeup(bus, true); + snd_hdac_set_codec_wakeup(bus, false); + } } /* Read STATESTS before controller reset */ @@ -1025,7 +1027,7 @@ static int azx_runtime_idle(struct device *dev) return 0; if (!power_save_controller || !azx_has_pm_runtime(chip) || - azx_bus(chip)->codec_powered) + azx_bus(chip)->codec_powered || !chip->running) return -EBUSY; return 0; @@ -2182,6 +2184,8 @@ static const struct pci_device_id azx_ids[] = { /* ATI HDMI */ { PCI_DEVICE(0x1002, 0x1308), .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS }, + { PCI_DEVICE(0x1002, 0x157a), + .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS }, { PCI_DEVICE(0x1002, 0x793b), .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI }, { PCI_DEVICE(0x1002, 0x7919), @@ -2236,8 +2240,14 @@ static const struct pci_device_id azx_ids[] = { .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS }, { PCI_DEVICE(0x1002, 0xaab0), .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS }, + { PCI_DEVICE(0x1002, 0xaac0), + .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS }, { PCI_DEVICE(0x1002, 0xaac8), .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS }, + { PCI_DEVICE(0x1002, 0xaad8), + .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS }, + { PCI_DEVICE(0x1002, 0xaae8), + .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS }, /* VIA VT8251/VT8237A */ { PCI_DEVICE(0x1106, 0x3288), .driver_data = AZX_DRIVER_VIA | AZX_DCAPS_POSFIX_VIA }, diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c index 25ccf781fbe7..584a0343ab0c 100644 --- a/sound/pci/hda/patch_cirrus.c +++ b/sound/pci/hda/patch_cirrus.c @@ -999,9 +999,7 @@ static void cs4210_spdif_automute(struct hda_codec *codec, spec->spdif_present = spdif_present; /* SPDIF TX on/off */ - if (spdif_present) - snd_hda_set_pin_ctl(codec, spdif_pin, - spdif_present ? PIN_OUT : 0); + snd_hda_set_pin_ctl(codec, spdif_pin, spdif_present ? PIN_OUT : 0); cs_automute(codec); } diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index 95158914cc6c..a97db5fc8a15 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c @@ -3512,6 +3512,7 @@ static const struct hda_codec_preset snd_hda_preset_hdmi[] = { { .id = 0x10de0070, .name = "GPU 70 HDMI/DP", .patch = patch_nvhdmi }, { .id = 0x10de0071, .name = "GPU 71 HDMI/DP", .patch = patch_nvhdmi }, { .id = 0x10de0072, .name = "GPU 72 HDMI/DP", .patch = patch_nvhdmi }, +{ .id = 0x10de007d, .name = "GPU 7d HDMI/DP", .patch = patch_nvhdmi }, { .id = 0x10de8001, .name = "MCP73 HDMI", .patch = patch_nvhdmi_2ch }, { .id = 0x11069f80, .name = "VX900 HDMI/DP", .patch = patch_via_hdmi }, { .id = 0x11069f81, .name = "VX900 HDMI/DP", .patch = patch_via_hdmi }, @@ -3576,6 +3577,7 @@ MODULE_ALIAS("snd-hda-codec-id:10de0067"); MODULE_ALIAS("snd-hda-codec-id:10de0070"); MODULE_ALIAS("snd-hda-codec-id:10de0071"); MODULE_ALIAS("snd-hda-codec-id:10de0072"); +MODULE_ALIAS("snd-hda-codec-id:10de007d"); MODULE_ALIAS("snd-hda-codec-id:10de8001"); MODULE_ALIAS("snd-hda-codec-id:11069f80"); MODULE_ALIAS("snd-hda-codec-id:11069f81"); diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index d35cf506a7db..c456c04e0928 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -2222,7 +2222,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = { SND_PCI_QUIRK(0x106b, 0x4300, "iMac 9,1", ALC889_FIXUP_IMAC91_VREF), SND_PCI_QUIRK(0x106b, 0x4600, "MacbookPro 5,2", ALC889_FIXUP_IMAC91_VREF), SND_PCI_QUIRK(0x106b, 0x4900, "iMac 9,1 Aluminum", ALC889_FIXUP_IMAC91_VREF), - SND_PCI_QUIRK(0x106b, 0x4a00, "Macbook 5,2", ALC889_FIXUP_IMAC91_VREF), + SND_PCI_QUIRK(0x106b, 0x4a00, "Macbook 5,2", ALC889_FIXUP_MBA11_VREF), SND_PCI_QUIRK(0x1071, 0x8258, "Evesham Voyaeger", ALC882_FIXUP_EAPD), SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD), @@ -5061,7 +5061,7 @@ static const struct hda_fixup alc269_fixups[] = { { 0x14, 0x90170110 }, { 0x17, 0x40000008 }, { 0x18, 0x411111f0 }, - { 0x19, 0x411111f0 }, + { 0x19, 0x01a1913c }, { 0x1a, 0x411111f0 }, { 0x1b, 0x411111f0 }, { 0x1d, 0x40f89b2d }, @@ -5185,6 +5185,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1028, 0x064a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x064b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x0665, "Dell XPS 13", ALC288_FIXUP_DELL_XPS_13), + SND_PCI_QUIRK(0x1028, 0x069a, "Dell Vostro 5480", ALC290_FIXUP_SUBWOOFER_HSJACK), SND_PCI_QUIRK(0x1028, 0x06c7, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x06d9, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x06da, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), @@ -5398,8 +5399,6 @@ static const struct hda_model_fixup alc269_fixup_models[] = { {0x19, 0x411111f0}, \ {0x1a, 0x411111f0}, \ {0x1b, 0x411111f0}, \ - {0x1d, 0x40700001}, \ - {0x1e, 0x411111f0}, \ {0x21, 0x02211020} #define ALC282_STANDARD_PINS \ @@ -5430,8 +5429,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = { {0x15, 0x0221401f}, \ {0x1a, 0x411111f0}, \ {0x1b, 0x411111f0}, \ - {0x1d, 0x40700001}, \ - {0x1e, 0x411111f0} + {0x1d, 0x40700001} #define ALC298_STANDARD_PINS \ {0x18, 0x411111f0}, \ @@ -5463,6 +5461,39 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { {0x1d, 0x40700001}, {0x21, 0x02211030}), SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, + {0x12, 0x40000000}, + {0x14, 0x90170130}, + {0x17, 0x411111f0}, + {0x18, 0x411111f0}, + {0x19, 0x411111f0}, + {0x1a, 0x411111f0}, + {0x1b, 0x01014020}, + {0x1d, 0x4054c029}, + {0x1e, 0x411111f0}, + {0x21, 0x0221103f}), + SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, + {0x12, 0x40000000}, + {0x14, 0x90170150}, + {0x17, 0x411111f0}, + {0x18, 0x411111f0}, + {0x19, 0x411111f0}, + {0x1a, 0x411111f0}, + {0x1b, 0x02011020}, + {0x1d, 0x4054c029}, + {0x1e, 0x411111f0}, + {0x21, 0x0221105f}), + SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, + {0x12, 0x40000000}, + {0x14, 0x90170110}, + {0x17, 0x411111f0}, + {0x18, 0x411111f0}, + {0x19, 0x411111f0}, + {0x1a, 0x411111f0}, + {0x1b, 0x01014020}, + {0x1d, 0x4054c029}, + {0x1e, 0x411111f0}, + {0x21, 0x0221101f}), + SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, {0x12, 0x90a60160}, {0x14, 0x90170120}, {0x17, 0x90170140}, @@ -5524,10 +5555,19 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { {0x21, 0x02211030}), SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, ALC256_STANDARD_PINS, - {0x13, 0x40000000}), + {0x13, 0x40000000}, + {0x1d, 0x40700001}, + {0x1e, 0x411111f0}), + SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, + ALC256_STANDARD_PINS, + {0x13, 0x411111f0}, + {0x1d, 0x40700001}, + {0x1e, 0x411111f0}), SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, ALC256_STANDARD_PINS, - {0x13, 0x411111f0}), + {0x13, 0x411111f0}, + {0x1d, 0x4077992d}, + {0x1e, 0x411111ff}), SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC280_FIXUP_HP_GPIO4, {0x12, 0x90a60130}, {0x13, 0x40000000}, @@ -5690,35 +5730,48 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { {0x13, 0x411111f0}, {0x16, 0x01014020}, {0x18, 0x411111f0}, - {0x19, 0x01a19030}), + {0x19, 0x01a19030}, + {0x1e, 0x411111f0}), SND_HDA_PIN_QUIRK(0x10ec0292, 0x1028, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE, ALC292_STANDARD_PINS, {0x12, 0x90a60140}, {0x13, 0x411111f0}, {0x16, 0x01014020}, {0x18, 0x02a19031}, - {0x19, 0x01a1903e}), + {0x19, 0x01a1903e}, + {0x1e, 0x411111f0}), SND_HDA_PIN_QUIRK(0x10ec0292, 0x1028, "Dell", ALC269_FIXUP_DELL3_MIC_NO_PRESENCE, ALC292_STANDARD_PINS, {0x12, 0x90a60140}, {0x13, 0x411111f0}, {0x16, 0x411111f0}, {0x18, 0x411111f0}, - {0x19, 0x411111f0}), + {0x19, 0x411111f0}, + {0x1e, 0x411111f0}), SND_HDA_PIN_QUIRK(0x10ec0293, 0x1028, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE, ALC292_STANDARD_PINS, {0x12, 0x40000000}, {0x13, 0x90a60140}, {0x16, 0x21014020}, {0x18, 0x411111f0}, - {0x19, 0x21a19030}), + {0x19, 0x21a19030}, + {0x1e, 0x411111f0}), SND_HDA_PIN_QUIRK(0x10ec0293, 0x1028, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE, ALC292_STANDARD_PINS, {0x12, 0x40000000}, {0x13, 0x90a60140}, {0x16, 0x411111f0}, {0x18, 0x411111f0}, - {0x19, 0x411111f0}), + {0x19, 0x411111f0}, + {0x1e, 0x411111f0}), + SND_HDA_PIN_QUIRK(0x10ec0293, 0x1028, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE, + ALC292_STANDARD_PINS, + {0x12, 0x40000000}, + {0x13, 0x90a60140}, + {0x16, 0x21014020}, + {0x18, 0x411111f0}, + {0x19, 0x21a19030}, + {0x1e, 0x411111ff}), SND_HDA_PIN_QUIRK(0x10ec0298, 0x1028, "Dell", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE, ALC298_STANDARD_PINS, {0x12, 0x90a60130}, diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c index dcc7fe91244c..9d947aef2c8b 100644 --- a/sound/pci/hda/patch_sigmatel.c +++ b/sound/pci/hda/patch_sigmatel.c @@ -2920,7 +2920,8 @@ static const struct snd_pci_quirk stac92hd83xxx_fixup_tbl[] = { SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x148a, "HP Mini", STAC_92HD83XXX_HP_LED), SND_PCI_QUIRK_VENDOR(PCI_VENDOR_ID_HP, "HP", STAC_92HD83XXX_HP), - SND_PCI_QUIRK(PCI_VENDOR_ID_TOSHIBA, 0xfa91, + /* match both for 0xfa91 and 0xfa93 */ + SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_TOSHIBA, 0xfffd, 0xfa91, "Toshiba Satellite S50D", STAC_92HD83XXX_GPIO10_EAPD), {} /* terminator */ }; diff --git a/sound/soc/codecs/pcm1681.c b/sound/soc/codecs/pcm1681.c index 477e13d30971..e7ba557979cb 100644 --- a/sound/soc/codecs/pcm1681.c +++ b/sound/soc/codecs/pcm1681.c @@ -102,7 +102,7 @@ static int pcm1681_set_deemph(struct snd_soc_codec *codec) if (val != -1) { regmap_update_bits(priv->regmap, PCM1681_DEEMPH_CONTROL, - PCM1681_DEEMPH_RATE_MASK, val); + PCM1681_DEEMPH_RATE_MASK, val << 3); enable = 1; } else enable = 0; diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c index 9ce311e088fc..e9cc3aae5366 100644 --- a/sound/soc/codecs/rt5645.c +++ b/sound/soc/codecs/rt5645.c @@ -2943,6 +2943,9 @@ static int rt5645_irq_detection(struct rt5645_priv *rt5645) { int val, btn_type, gpio_state = 0, report = 0; + if (!rt5645->codec) + return -EINVAL; + switch (rt5645->pdata.jd_mode) { case 0: /* Not using rt5645 JD */ if (rt5645->gpiod_hp_det) { diff --git a/sound/soc/codecs/sgtl5000.h b/sound/soc/codecs/sgtl5000.h index bd7a344bf8c5..1c317de26176 100644 --- a/sound/soc/codecs/sgtl5000.h +++ b/sound/soc/codecs/sgtl5000.h @@ -275,7 +275,7 @@ #define SGTL5000_BIAS_CTRL_MASK 0x000e #define SGTL5000_BIAS_CTRL_SHIFT 1 #define SGTL5000_BIAS_CTRL_WIDTH 3 -#define SGTL5000_SMALL_POP 0 +#define SGTL5000_SMALL_POP 1 /* * SGTL5000_CHIP_MIC_CTRL diff --git a/sound/soc/codecs/ssm4567.c b/sound/soc/codecs/ssm4567.c index 938d2cb6d78b..84a4f5ad8064 100644 --- a/sound/soc/codecs/ssm4567.c +++ b/sound/soc/codecs/ssm4567.c @@ -315,7 +315,13 @@ static int ssm4567_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt) if (invert_fclk) ctrl1 |= SSM4567_SAI_CTRL_1_FSYNC; - return regmap_write(ssm4567->regmap, SSM4567_REG_SAI_CTRL_1, ctrl1); + return regmap_update_bits(ssm4567->regmap, SSM4567_REG_SAI_CTRL_1, + SSM4567_SAI_CTRL_1_BCLK | + SSM4567_SAI_CTRL_1_FSYNC | + SSM4567_SAI_CTRL_1_LJ | + SSM4567_SAI_CTRL_1_TDM | + SSM4567_SAI_CTRL_1_PDM, + ctrl1); } static int ssm4567_set_power(struct ssm4567 *ssm4567, bool enable) diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c index c7647e066cfd..c0b940e2019f 100644 --- a/sound/soc/fsl/fsl_ssi.c +++ b/sound/soc/fsl/fsl_ssi.c @@ -633,7 +633,7 @@ static int fsl_ssi_set_bclk(struct snd_pcm_substream *substream, sub *= 100000; do_div(sub, freq); - if (sub < savesub) { + if (sub < savesub && !(i == 0 && psr == 0 && div2 == 0)) { baudrate = tmprate; savesub = sub; pm = i; diff --git a/sound/soc/intel/Makefile b/sound/soc/intel/Makefile index 3853ec2ddbc7..6de5d5cd3280 100644 --- a/sound/soc/intel/Makefile +++ b/sound/soc/intel/Makefile @@ -7,4 +7,4 @@ obj-$(CONFIG_SND_SOC_INTEL_BAYTRAIL) += baytrail/ obj-$(CONFIG_SND_SST_MFLD_PLATFORM) += atom/ # Machine support -obj-$(CONFIG_SND_SOC_INTEL_SST) += boards/ +obj-$(CONFIG_SND_SOC) += boards/ diff --git a/sound/soc/intel/atom/sst/sst_drv_interface.c b/sound/soc/intel/atom/sst/sst_drv_interface.c index 620da1d1b9e3..0e0e4d9c021f 100644 --- a/sound/soc/intel/atom/sst/sst_drv_interface.c +++ b/sound/soc/intel/atom/sst/sst_drv_interface.c @@ -42,6 +42,11 @@ #define MIN_FRAGMENT_SIZE (50 * 1024) #define MAX_FRAGMENT_SIZE (1024 * 1024) #define SST_GET_BYTES_PER_SAMPLE(pcm_wd_sz) (((pcm_wd_sz + 15) >> 4) << 1) +#ifdef CONFIG_PM +#define GET_USAGE_COUNT(dev) (atomic_read(&dev->power.usage_count)) +#else +#define GET_USAGE_COUNT(dev) 1 +#endif int free_stream_context(struct intel_sst_drv *ctx, unsigned int str_id) { @@ -141,15 +146,9 @@ static int sst_power_control(struct device *dev, bool state) int ret = 0; int usage_count = 0; -#ifdef CONFIG_PM - usage_count = atomic_read(&dev->power.usage_count); -#else - usage_count = 1; -#endif - if (state == true) { ret = pm_runtime_get_sync(dev); - + usage_count = GET_USAGE_COUNT(dev); dev_dbg(ctx->dev, "Enable: pm usage count: %d\n", usage_count); if (ret < 0) { dev_err(ctx->dev, "Runtime get failed with err: %d\n", ret); @@ -164,6 +163,7 @@ static int sst_power_control(struct device *dev, bool state) } } } else { + usage_count = GET_USAGE_COUNT(dev); dev_dbg(ctx->dev, "Disable: pm usage count: %d\n", usage_count); return sst_pm_runtime_put(ctx); } diff --git a/sound/soc/intel/boards/cht_bsw_max98090_ti.c b/sound/soc/intel/boards/cht_bsw_max98090_ti.c index d604ee80eda4..70f832114a5a 100644 --- a/sound/soc/intel/boards/cht_bsw_max98090_ti.c +++ b/sound/soc/intel/boards/cht_bsw_max98090_ti.c @@ -69,12 +69,12 @@ static const struct snd_soc_dapm_route cht_audio_map[] = { {"Headphone", NULL, "HPR"}, {"Ext Spk", NULL, "SPKL"}, {"Ext Spk", NULL, "SPKR"}, - {"AIF1 Playback", NULL, "ssp2 Tx"}, + {"HiFi Playback", NULL, "ssp2 Tx"}, {"ssp2 Tx", NULL, "codec_out0"}, {"ssp2 Tx", NULL, "codec_out1"}, {"codec_in0", NULL, "ssp2 Rx" }, {"codec_in1", NULL, "ssp2 Rx" }, - {"ssp2 Rx", NULL, "AIF1 Capture"}, + {"ssp2 Rx", NULL, "HiFi Capture"}, }; static const struct snd_kcontrol_new cht_mc_controls[] = { diff --git a/sound/soc/mediatek/mt8173-max98090.c b/sound/soc/mediatek/mt8173-max98090.c index 4d44b5803e55..2d2536af141f 100644 --- a/sound/soc/mediatek/mt8173-max98090.c +++ b/sound/soc/mediatek/mt8173-max98090.c @@ -103,7 +103,6 @@ static struct snd_soc_dai_link mt8173_max98090_dais[] = { .name = "MAX98090 Playback", .stream_name = "MAX98090 Playback", .cpu_dai_name = "DL1", - .platform_name = "11220000.mt8173-afe-pcm", .codec_name = "snd-soc-dummy", .codec_dai_name = "snd-soc-dummy-dai", .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST}, @@ -114,7 +113,6 @@ static struct snd_soc_dai_link mt8173_max98090_dais[] = { .name = "MAX98090 Capture", .stream_name = "MAX98090 Capture", .cpu_dai_name = "VUL", - .platform_name = "11220000.mt8173-afe-pcm", .codec_name = "snd-soc-dummy", .codec_dai_name = "snd-soc-dummy-dai", .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST}, @@ -125,7 +123,6 @@ static struct snd_soc_dai_link mt8173_max98090_dais[] = { { .name = "Codec", .cpu_dai_name = "I2S", - .platform_name = "11220000.mt8173-afe-pcm", .no_pcm = 1, .codec_dai_name = "HiFi", .init = mt8173_max98090_init, @@ -152,9 +149,21 @@ static struct snd_soc_card mt8173_max98090_card = { static int mt8173_max98090_dev_probe(struct platform_device *pdev) { struct snd_soc_card *card = &mt8173_max98090_card; - struct device_node *codec_node; + struct device_node *codec_node, *platform_node; int ret, i; + platform_node = of_parse_phandle(pdev->dev.of_node, + "mediatek,platform", 0); + if (!platform_node) { + dev_err(&pdev->dev, "Property 'platform' missing or invalid\n"); + return -EINVAL; + } + for (i = 0; i < card->num_links; i++) { + if (mt8173_max98090_dais[i].platform_name) + continue; + mt8173_max98090_dais[i].platform_of_node = platform_node; + } + codec_node = of_parse_phandle(pdev->dev.of_node, "mediatek,audio-codec", 0); if (!codec_node) { diff --git a/sound/soc/mediatek/mt8173-rt5650-rt5676.c b/sound/soc/mediatek/mt8173-rt5650-rt5676.c index 094055323059..6f52eca05e26 100644 --- a/sound/soc/mediatek/mt8173-rt5650-rt5676.c +++ b/sound/soc/mediatek/mt8173-rt5650-rt5676.c @@ -138,7 +138,6 @@ static struct snd_soc_dai_link mt8173_rt5650_rt5676_dais[] = { .name = "rt5650_rt5676 Playback", .stream_name = "rt5650_rt5676 Playback", .cpu_dai_name = "DL1", - .platform_name = "11220000.mt8173-afe-pcm", .codec_name = "snd-soc-dummy", .codec_dai_name = "snd-soc-dummy-dai", .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST}, @@ -149,7 +148,6 @@ static struct snd_soc_dai_link mt8173_rt5650_rt5676_dais[] = { .name = "rt5650_rt5676 Capture", .stream_name = "rt5650_rt5676 Capture", .cpu_dai_name = "VUL", - .platform_name = "11220000.mt8173-afe-pcm", .codec_name = "snd-soc-dummy", .codec_dai_name = "snd-soc-dummy-dai", .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST}, @@ -161,7 +159,6 @@ static struct snd_soc_dai_link mt8173_rt5650_rt5676_dais[] = { { .name = "Codec", .cpu_dai_name = "I2S", - .platform_name = "11220000.mt8173-afe-pcm", .no_pcm = 1, .codecs = mt8173_rt5650_rt5676_codecs, .num_codecs = 2, @@ -209,7 +206,21 @@ static struct snd_soc_card mt8173_rt5650_rt5676_card = { static int mt8173_rt5650_rt5676_dev_probe(struct platform_device *pdev) { struct snd_soc_card *card = &mt8173_rt5650_rt5676_card; - int ret; + struct device_node *platform_node; + int i, ret; + + platform_node = of_parse_phandle(pdev->dev.of_node, + "mediatek,platform", 0); + if (!platform_node) { + dev_err(&pdev->dev, "Property 'platform' missing or invalid\n"); + return -EINVAL; + } + + for (i = 0; i < card->num_links; i++) { + if (mt8173_rt5650_rt5676_dais[i].platform_name) + continue; + mt8173_rt5650_rt5676_dais[i].platform_of_node = platform_node; + } mt8173_rt5650_rt5676_codecs[0].of_node = of_parse_phandle(pdev->dev.of_node, "mediatek,audio-codec", 0); diff --git a/sound/soc/mediatek/mtk-afe-pcm.c b/sound/soc/mediatek/mtk-afe-pcm.c index cc228db5fb76..9863da73dfe0 100644 --- a/sound/soc/mediatek/mtk-afe-pcm.c +++ b/sound/soc/mediatek/mtk-afe-pcm.c @@ -1199,6 +1199,8 @@ err_pm_disable: static int mtk_afe_pcm_dev_remove(struct platform_device *pdev) { pm_runtime_disable(&pdev->dev); + if (!pm_runtime_status_suspended(&pdev->dev)) + mtk_afe_runtime_suspend(&pdev->dev); snd_soc_unregister_component(&pdev->dev); snd_soc_unregister_platform(&pdev->dev); return 0; diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index 3a4a5c0e3f97..0e1e69c7abd5 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c @@ -1716,6 +1716,7 @@ card_probe_error: if (card->remove) card->remove(card); + snd_soc_dapm_free(&card->dapm); soc_cleanup_card_debugfs(card); snd_card_free(card->snd_card); diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c index aa327c92480c..e0de8072c514 100644 --- a/sound/soc/soc-dapm.c +++ b/sound/soc/soc-dapm.c @@ -358,9 +358,10 @@ static int dapm_kcontrol_data_alloc(struct snd_soc_dapm_widget *widget, data->widget = snd_soc_dapm_new_control_unlocked(widget->dapm, &template); + kfree(name); if (!data->widget) { ret = -ENOMEM; - goto err_name; + goto err_data; } } break; @@ -389,11 +390,12 @@ static int dapm_kcontrol_data_alloc(struct snd_soc_dapm_widget *widget, data->value = template.on_val; - data->widget = snd_soc_dapm_new_control(widget->dapm, - &template); + data->widget = snd_soc_dapm_new_control_unlocked( + widget->dapm, &template); + kfree(name); if (!data->widget) { ret = -ENOMEM; - goto err_name; + goto err_data; } snd_soc_dapm_add_path(widget->dapm, data->widget, @@ -408,8 +410,6 @@ static int dapm_kcontrol_data_alloc(struct snd_soc_dapm_widget *widget, return 0; -err_name: - kfree(name); err_data: kfree(data); return ret; @@ -418,8 +418,6 @@ err_data: static void dapm_kcontrol_free(struct snd_kcontrol *kctl) { struct dapm_kcontrol_data *data = snd_kcontrol_chip(kctl); - if (data->widget) - kfree(data->widget->name); kfree(data->wlist); kfree(data); } @@ -1952,6 +1950,7 @@ static ssize_t dapm_widget_power_read_file(struct file *file, size_t count, loff_t *ppos) { struct snd_soc_dapm_widget *w = file->private_data; + struct snd_soc_card *card = w->dapm->card; char *buf; int in, out; ssize_t ret; @@ -1961,6 +1960,8 @@ static ssize_t dapm_widget_power_read_file(struct file *file, if (!buf) return -ENOMEM; + mutex_lock(&card->dapm_mutex); + /* Supply widgets are not handled by is_connected_{input,output}_ep() */ if (w->is_supply) { in = 0; @@ -2007,6 +2008,8 @@ static ssize_t dapm_widget_power_read_file(struct file *file, p->sink->name); } + mutex_unlock(&card->dapm_mutex); + ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret); kfree(buf); @@ -2281,11 +2284,15 @@ static ssize_t dapm_widget_show(struct device *dev, struct snd_soc_pcm_runtime *rtd = dev_get_drvdata(dev); int i, count = 0; + mutex_lock(&rtd->card->dapm_mutex); + for (i = 0; i < rtd->num_codecs; i++) { struct snd_soc_codec *codec = rtd->codec_dais[i]->codec; count += dapm_widget_show_codec(codec, buf + count); } + mutex_unlock(&rtd->card->dapm_mutex); + return count; } @@ -3334,16 +3341,10 @@ snd_soc_dapm_new_control_unlocked(struct snd_soc_dapm_context *dapm, } prefix = soc_dapm_prefix(dapm); - if (prefix) { + if (prefix) w->name = kasprintf(GFP_KERNEL, "%s %s", prefix, widget->name); - if (widget->sname) - w->sname = kasprintf(GFP_KERNEL, "%s %s", prefix, - widget->sname); - } else { + else w->name = kasprintf(GFP_KERNEL, "%s", widget->name); - if (widget->sname) - w->sname = kasprintf(GFP_KERNEL, "%s", widget->sname); - } if (w->name == NULL) { kfree(w); return NULL; @@ -3792,7 +3793,7 @@ int snd_soc_dapm_link_dai_widgets(struct snd_soc_card *card) break; } - if (!w->sname || !strstr(w->sname, dai_w->name)) + if (!w->sname || !strstr(w->sname, dai_w->sname)) continue; if (dai_w->id == snd_soc_dapm_dai_in) { diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c index d0960683c409..59ac211f8fe7 100644 --- a/sound/soc/soc-topology.c +++ b/sound/soc/soc-topology.c @@ -144,7 +144,7 @@ static const struct snd_soc_tplg_kcontrol_ops io_ops[] = { {SND_SOC_TPLG_CTL_STROBE, snd_soc_get_strobe, snd_soc_put_strobe, NULL}, {SND_SOC_TPLG_DAPM_CTL_VOLSW, snd_soc_dapm_get_volsw, - snd_soc_dapm_put_volsw, NULL}, + snd_soc_dapm_put_volsw, snd_soc_info_volsw}, {SND_SOC_TPLG_DAPM_CTL_ENUM_DOUBLE, snd_soc_dapm_get_enum_double, snd_soc_dapm_put_enum_double, snd_soc_info_enum_double}, {SND_SOC_TPLG_DAPM_CTL_ENUM_VIRT, snd_soc_dapm_get_enum_double, @@ -580,27 +580,26 @@ static int soc_tplg_init_kcontrol(struct soc_tplg *tplg, } static int soc_tplg_create_tlv(struct soc_tplg *tplg, - struct snd_kcontrol_new *kc, u32 tlv_size) + struct snd_kcontrol_new *kc, struct snd_soc_tplg_ctl_tlv *tplg_tlv) { - struct snd_soc_tplg_ctl_tlv *tplg_tlv; struct snd_ctl_tlv *tlv; + int size; - if (tlv_size == 0) + if (tplg_tlv->count == 0) return 0; - tplg_tlv = (struct snd_soc_tplg_ctl_tlv *) tplg->pos; - tplg->pos += tlv_size; - - tlv = kzalloc(sizeof(*tlv) + tlv_size, GFP_KERNEL); + size = ((tplg_tlv->count + (sizeof(unsigned int) - 1)) & + ~(sizeof(unsigned int) - 1)); + tlv = kzalloc(sizeof(*tlv) + size, GFP_KERNEL); if (tlv == NULL) return -ENOMEM; dev_dbg(tplg->dev, " created TLV type %d size %d bytes\n", - tplg_tlv->numid, tplg_tlv->size); + tplg_tlv->numid, size); tlv->numid = tplg_tlv->numid; - tlv->length = tplg_tlv->size; - memcpy(tlv->tlv, tplg_tlv + 1, tplg_tlv->size); + tlv->length = size; + memcpy(&tlv->tlv[0], tplg_tlv->data, size); kc->tlv.p = (void *)tlv; return 0; @@ -773,7 +772,7 @@ static int soc_tplg_dmixer_create(struct soc_tplg *tplg, unsigned int count, } /* create any TLV data */ - soc_tplg_create_tlv(tplg, &kc, mc->hdr.tlv_size); + soc_tplg_create_tlv(tplg, &kc, &mc->tlv); /* register control here */ err = soc_tplg_add_kcontrol(tplg, &kc, diff --git a/sound/soc/zte/zx296702-i2s.c b/sound/soc/zte/zx296702-i2s.c index 98d96e1b17e0..1930c42e1f55 100644 --- a/sound/soc/zte/zx296702-i2s.c +++ b/sound/soc/zte/zx296702-i2s.c @@ -393,9 +393,9 @@ static int zx_i2s_probe(struct platform_device *pdev) res = platform_get_resource(pdev, IORESOURCE_MEM, 0); zx_i2s->mapbase = res->start; zx_i2s->reg_base = devm_ioremap_resource(&pdev->dev, res); - if (!zx_i2s->reg_base) { + if (IS_ERR(zx_i2s->reg_base)) { dev_err(&pdev->dev, "ioremap failed!\n"); - return -EIO; + return PTR_ERR(zx_i2s->reg_base); } writel_relaxed(0, zx_i2s->reg_base + ZX_I2S_FIFO_CTRL); diff --git a/sound/soc/zte/zx296702-spdif.c b/sound/soc/zte/zx296702-spdif.c index 11a0e46a1156..26265ce4caca 100644 --- a/sound/soc/zte/zx296702-spdif.c +++ b/sound/soc/zte/zx296702-spdif.c @@ -322,9 +322,9 @@ static int zx_spdif_probe(struct platform_device *pdev) res = platform_get_resource(pdev, IORESOURCE_MEM, 0); zx_spdif->mapbase = res->start; zx_spdif->reg_base = devm_ioremap_resource(&pdev->dev, res); - if (!zx_spdif->reg_base) { + if (IS_ERR(zx_spdif->reg_base)) { dev_err(&pdev->dev, "ioremap failed!\n"); - return -EIO; + return PTR_ERR(zx_spdif->reg_base); } zx_spdif_dev_init(zx_spdif->reg_base); diff --git a/sound/sparc/amd7930.c b/sound/sparc/amd7930.c index 1b1a89e80d13..784ceb85b2d9 100644 --- a/sound/sparc/amd7930.c +++ b/sound/sparc/amd7930.c @@ -956,6 +956,7 @@ static int snd_amd7930_create(struct snd_card *card, if (!amd->regs) { snd_printk(KERN_ERR "amd7930-%d: Unable to map chip registers.\n", dev); + kfree(amd); return -EIO; } diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c index e5000da9e9d7..6a803eff87f7 100644 --- a/sound/usb/mixer_maps.c +++ b/sound/usb/mixer_maps.c @@ -341,6 +341,20 @@ static const struct usbmix_name_map scms_usb3318_map[] = { { 0 } }; +/* Bose companion 5, the dB conversion factor is 16 instead of 256 */ +static struct usbmix_dB_map bose_companion5_dB = {-5006, -6}; +static struct usbmix_name_map bose_companion5_map[] = { + { 3, NULL, .dB = &bose_companion5_dB }, + { 0 } /* terminator */ +}; + +/* Dragonfly DAC 1.2, the dB conversion factor is 1 instead of 256 */ +static struct usbmix_dB_map dragonfly_1_2_dB = {0, 5000}; +static struct usbmix_name_map dragonfly_1_2_map[] = { + { 7, NULL, .dB = &dragonfly_1_2_dB }, + { 0 } /* terminator */ +}; + /* * Control map entries */ @@ -451,6 +465,16 @@ static struct usbmix_ctl_map usbmix_ctl_maps[] = { .id = USB_ID(0x25c4, 0x0003), .map = scms_usb3318_map, }, + { + /* Bose Companion 5 */ + .id = USB_ID(0x05a7, 0x1020), + .map = bose_companion5_map, + }, + { + /* Dragonfly DAC 1.2 */ + .id = USB_ID(0x21b4, 0x0081), + .map = dragonfly_1_2_map, + }, { 0 } /* terminator */ }; diff --git a/tools/testing/selftests/futex/functional/futex_requeue_pi_signal_restart.c b/tools/testing/selftests/futex/functional/futex_requeue_pi_signal_restart.c index 7f0c756993af..3d7dc6afc3f8 100644 --- a/tools/testing/selftests/futex/functional/futex_requeue_pi_signal_restart.c +++ b/tools/testing/selftests/futex/functional/futex_requeue_pi_signal_restart.c @@ -191,7 +191,7 @@ int main(int argc, char *argv[]) if (res > 0) { atomic_set(&requeued, 1); break; - } else if (res > 0) { + } else if (res < 0) { error("FUTEX_CMP_REQUEUE_PI failed\n", errno); ret = RET_ERROR; break; |