diff options
author | Chris Mason <clm@fb.com> | 2014-12-03 03:42:03 +0100 |
---|---|---|
committer | Chris Mason <clm@fb.com> | 2014-12-03 03:42:03 +0100 |
commit | 9627aeee3e203e30679549e4962633698a6bf87f (patch) | |
tree | 30ee313a7049bf3fcc17e346df5737e967fd9a95 | |
parent | Btrfs: make get_caching_control unconditionally return the ctl (diff) | |
parent | Btrfs, replace: enable dev-replace for raid56 (diff) | |
download | linux-9627aeee3e203e30679549e4962633698a6bf87f.tar.xz linux-9627aeee3e203e30679549e4962633698a6bf87f.zip |
Merge branch 'raid56-scrub-replace' of git://github.com/miaoxie/linux-btrfs into for-linus
212 files changed, 2916 insertions, 910 deletions
diff --git a/Documentation/devicetree/bindings/interrupt-controller/interrupts.txt b/Documentation/devicetree/bindings/interrupt-controller/interrupts.txt index ce6a1a072028..8a3c40829899 100644 --- a/Documentation/devicetree/bindings/interrupt-controller/interrupts.txt +++ b/Documentation/devicetree/bindings/interrupt-controller/interrupts.txt @@ -30,10 +30,6 @@ should only be used when a device has multiple interrupt parents. Example: interrupts-extended = <&intc1 5 1>, <&intc2 1 0>; -A device node may contain either "interrupts" or "interrupts-extended", but not -both. If both properties are present, then the operating system should log an -error and use only the data in "interrupts". - 2) Interrupt controller nodes ----------------------------- diff --git a/Documentation/devicetree/bindings/pci/pci.txt b/Documentation/devicetree/bindings/pci/pci.txt index 41aeed38926d..f8fbe9af7b2f 100644 --- a/Documentation/devicetree/bindings/pci/pci.txt +++ b/Documentation/devicetree/bindings/pci/pci.txt @@ -7,3 +7,14 @@ And for the interrupt mapping part: Open Firmware Recommended Practice: Interrupt Mapping http://www.openfirmware.org/1275/practice/imap/imap0_9d.pdf + +Additionally to the properties specified in the above standards a host bridge +driver implementation may support the following properties: + +- linux,pci-domain: + If present this property assigns a fixed PCI domain number to a host bridge, + otherwise an unstable (across boots) unique number will be assigned. + It is required to either not set this property at all or set it for all + host bridges in the system, otherwise potentially conflicting domain numbers + may be assigned to root buses behind different host bridges. The domain + number for each host bridge in the system must be unique. diff --git a/Documentation/devicetree/bindings/pinctrl/img,tz1090-pdc-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/img,tz1090-pdc-pinctrl.txt index a186181c402b..51b943cc9770 100644 --- a/Documentation/devicetree/bindings/pinctrl/img,tz1090-pdc-pinctrl.txt +++ b/Documentation/devicetree/bindings/pinctrl/img,tz1090-pdc-pinctrl.txt @@ -9,7 +9,7 @@ Please refer to pinctrl-bindings.txt in this directory for details of the common pinctrl bindings used by client devices, including the meaning of the phrase "pin configuration node". -TZ1090-PDC's pin configuration nodes act as a container for an abitrary number +TZ1090-PDC's pin configuration nodes act as a container for an arbitrary number of subnodes. Each of these subnodes represents some desired configuration for a pin, a group, or a list of pins or groups. This configuration can include the mux function to select on those pin(s)/group(s), and various pin configuration diff --git a/Documentation/devicetree/bindings/pinctrl/img,tz1090-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/img,tz1090-pinctrl.txt index 4b27c99f7f9d..49d0e6050940 100644 --- a/Documentation/devicetree/bindings/pinctrl/img,tz1090-pinctrl.txt +++ b/Documentation/devicetree/bindings/pinctrl/img,tz1090-pinctrl.txt @@ -9,7 +9,7 @@ Please refer to pinctrl-bindings.txt in this directory for details of the common pinctrl bindings used by client devices, including the meaning of the phrase "pin configuration node". -TZ1090's pin configuration nodes act as a container for an abitrary number of +TZ1090's pin configuration nodes act as a container for an arbitrary number of subnodes. Each of these subnodes represents some desired configuration for a pin, a group, or a list of pins or groups. This configuration can include the mux function to select on those pin(s)/group(s), and various pin configuration diff --git a/Documentation/devicetree/bindings/pinctrl/lantiq,falcon-pinumx.txt b/Documentation/devicetree/bindings/pinctrl/lantiq,falcon-pinumx.txt index daa768956069..ac4da9fe07bd 100644 --- a/Documentation/devicetree/bindings/pinctrl/lantiq,falcon-pinumx.txt +++ b/Documentation/devicetree/bindings/pinctrl/lantiq,falcon-pinumx.txt @@ -9,7 +9,7 @@ Please refer to pinctrl-bindings.txt in this directory for details of the common pinctrl bindings used by client devices, including the meaning of the phrase "pin configuration node". -Lantiq's pin configuration nodes act as a container for an abitrary number of +Lantiq's pin configuration nodes act as a container for an arbitrary number of subnodes. Each of these subnodes represents some desired configuration for a pin, a group, or a list of pins or groups. This configuration can include the mux function to select on those group(s), and two pin configuration parameters: diff --git a/Documentation/devicetree/bindings/pinctrl/lantiq,xway-pinumx.txt b/Documentation/devicetree/bindings/pinctrl/lantiq,xway-pinumx.txt index b5469db1d7ad..e89b4677567d 100644 --- a/Documentation/devicetree/bindings/pinctrl/lantiq,xway-pinumx.txt +++ b/Documentation/devicetree/bindings/pinctrl/lantiq,xway-pinumx.txt @@ -9,7 +9,7 @@ Please refer to pinctrl-bindings.txt in this directory for details of the common pinctrl bindings used by client devices, including the meaning of the phrase "pin configuration node". -Lantiq's pin configuration nodes act as a container for an abitrary number of +Lantiq's pin configuration nodes act as a container for an arbitrary number of subnodes. Each of these subnodes represents some desired configuration for a pin, a group, or a list of pins or groups. This configuration can include the mux function to select on those group(s), and two pin configuration parameters: diff --git a/Documentation/devicetree/bindings/pinctrl/nvidia,tegra20-pinmux.txt b/Documentation/devicetree/bindings/pinctrl/nvidia,tegra20-pinmux.txt index 61e73cde9ae9..3c8ce28baad6 100644 --- a/Documentation/devicetree/bindings/pinctrl/nvidia,tegra20-pinmux.txt +++ b/Documentation/devicetree/bindings/pinctrl/nvidia,tegra20-pinmux.txt @@ -9,7 +9,7 @@ Please refer to pinctrl-bindings.txt in this directory for details of the common pinctrl bindings used by client devices, including the meaning of the phrase "pin configuration node". -Tegra's pin configuration nodes act as a container for an abitrary number of +Tegra's pin configuration nodes act as a container for an arbitrary number of subnodes. Each of these subnodes represents some desired configuration for a pin, a group, or a list of pins or groups. This configuration can include the mux function to select on those pin(s)/group(s), and various pin configuration diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl-sirf.txt b/Documentation/devicetree/bindings/pinctrl/pinctrl-sirf.txt index c596a6ad3285..5f55be59d914 100644 --- a/Documentation/devicetree/bindings/pinctrl/pinctrl-sirf.txt +++ b/Documentation/devicetree/bindings/pinctrl/pinctrl-sirf.txt @@ -13,7 +13,7 @@ Optional properties: Please refer to pinctrl-bindings.txt in this directory for details of the common pinctrl bindings used by client devices. -SiRFprimaII's pinmux nodes act as a container for an abitrary number of subnodes. +SiRFprimaII's pinmux nodes act as a container for an arbitrary number of subnodes. Each of these subnodes represents some desired configuration for a group of pins. Required subnode-properties: diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl_spear.txt b/Documentation/devicetree/bindings/pinctrl/pinctrl_spear.txt index b4480d5c3aca..458615596946 100644 --- a/Documentation/devicetree/bindings/pinctrl/pinctrl_spear.txt +++ b/Documentation/devicetree/bindings/pinctrl/pinctrl_spear.txt @@ -32,7 +32,7 @@ Required properties: Please refer to pinctrl-bindings.txt in this directory for details of the common pinctrl bindings used by client devices. -SPEAr's pinmux nodes act as a container for an abitrary number of subnodes. Each +SPEAr's pinmux nodes act as a container for an arbitrary number of subnodes. Each of these subnodes represents muxing for a pin, a group, or a list of pins or groups. diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,apq8064-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/qcom,apq8064-pinctrl.txt index 2fb90b37aa09..a7bde64798c7 100644 --- a/Documentation/devicetree/bindings/pinctrl/qcom,apq8064-pinctrl.txt +++ b/Documentation/devicetree/bindings/pinctrl/qcom,apq8064-pinctrl.txt @@ -18,7 +18,7 @@ Please refer to pinctrl-bindings.txt in this directory for details of the common pinctrl bindings used by client devices, including the meaning of the phrase "pin configuration node". -Qualcomm's pin configuration nodes act as a container for an abitrary number of +Qualcomm's pin configuration nodes act as a container for an arbitrary number of subnodes. Each of these subnodes represents some desired configuration for a pin, a group, or a list of pins or groups. This configuration can include the mux function to select on those pin(s)/group(s), and various pin configuration diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,apq8084-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/qcom,apq8084-pinctrl.txt index ffafa1990a30..c4ea61ac56f2 100644 --- a/Documentation/devicetree/bindings/pinctrl/qcom,apq8084-pinctrl.txt +++ b/Documentation/devicetree/bindings/pinctrl/qcom,apq8084-pinctrl.txt @@ -47,7 +47,7 @@ Please refer to pinctrl-bindings.txt in this directory for details of the common pinctrl bindings used by client devices, including the meaning of the phrase "pin configuration node". -The pin configuration nodes act as a container for an abitrary number of +The pin configuration nodes act as a container for an arbitrary number of subnodes. Each of these subnodes represents some desired configuration for a pin, a group, or a list of pins or groups. This configuration can include the mux function to select on those pin(s)/group(s), and various pin configuration diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,ipq8064-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/qcom,ipq8064-pinctrl.txt index e33e4dcdce79..6e88e91feb11 100644 --- a/Documentation/devicetree/bindings/pinctrl/qcom,ipq8064-pinctrl.txt +++ b/Documentation/devicetree/bindings/pinctrl/qcom,ipq8064-pinctrl.txt @@ -18,7 +18,7 @@ Please refer to pinctrl-bindings.txt in this directory for details of the common pinctrl bindings used by client devices, including the meaning of the phrase "pin configuration node". -Qualcomm's pin configuration nodes act as a container for an abitrary number of +Qualcomm's pin configuration nodes act as a container for an arbitrary number of subnodes. Each of these subnodes represents some desired configuration for a pin, a group, or a list of pins or groups. This configuration can include the mux function to select on those pin(s)/group(s), and various pin configuration diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,msm8960-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/qcom,msm8960-pinctrl.txt index 93b7de91b9f6..eb8d8aa41f20 100644 --- a/Documentation/devicetree/bindings/pinctrl/qcom,msm8960-pinctrl.txt +++ b/Documentation/devicetree/bindings/pinctrl/qcom,msm8960-pinctrl.txt @@ -47,7 +47,7 @@ Please refer to pinctrl-bindings.txt in this directory for details of the common pinctrl bindings used by client devices, including the meaning of the phrase "pin configuration node". -The pin configuration nodes act as a container for an abitrary number of +The pin configuration nodes act as a container for an arbitrary number of subnodes. Each of these subnodes represents some desired configuration for a pin, a group, or a list of pins or groups. This configuration can include the mux function to select on those pin(s)/group(s), and various pin configuration diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,msm8974-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/qcom,msm8974-pinctrl.txt index d2ea80dc43eb..e4d6a9d20f7d 100644 --- a/Documentation/devicetree/bindings/pinctrl/qcom,msm8974-pinctrl.txt +++ b/Documentation/devicetree/bindings/pinctrl/qcom,msm8974-pinctrl.txt @@ -18,7 +18,7 @@ Please refer to pinctrl-bindings.txt in this directory for details of the common pinctrl bindings used by client devices, including the meaning of the phrase "pin configuration node". -Qualcomm's pin configuration nodes act as a container for an abitrary number of +Qualcomm's pin configuration nodes act as a container for an arbitrary number of subnodes. Each of these subnodes represents some desired configuration for a pin, a group, or a list of pins or groups. This configuration can include the mux function to select on those pin(s)/group(s), and various pin configuration diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt index 723999d73744..a344ec2713a5 100644 --- a/Documentation/devicetree/bindings/vendor-prefixes.txt +++ b/Documentation/devicetree/bindings/vendor-prefixes.txt @@ -34,6 +34,7 @@ chipidea Chipidea, Inc chrp Common Hardware Reference Platform chunghwa Chunghwa Picture Tubes Ltd. cirrus Cirrus Logic, Inc. +cnm Chips&Media, Inc. cortina Cortina Systems, Inc. crystalfontz Crystalfontz America, Inc. dallas Maxim Integrated Products (formerly Dallas Semiconductor) @@ -92,6 +93,7 @@ maxim Maxim Integrated Products mediatek MediaTek Inc. micrel Micrel Inc. microchip Microchip Technology Inc. +micron Micron Technology Inc. mitsubishi Mitsubishi Electric Corporation mosaixtech Mosaix Technologies, Inc. moxa Moxa @@ -127,6 +129,7 @@ renesas Renesas Electronics Corporation ricoh Ricoh Co. Ltd. rockchip Fuzhou Rockchip Electronics Co., Ltd samsung Samsung Semiconductor +sandisk Sandisk Corporation sbs Smart Battery System schindler Schindler seagate Seagate Technology PLC @@ -138,7 +141,7 @@ silergy Silergy Corp. sirf SiRF Technology, Inc. sitronix Sitronix Technology Corporation smsc Standard Microsystems Corporation -snps Synopsys, Inc. +snps Synopsys, Inc. solidrun SolidRun sony Sony Corporation spansion Spansion Inc. diff --git a/Documentation/filesystems/overlayfs.txt b/Documentation/filesystems/overlayfs.txt index 530850a72735..a27c950ece61 100644 --- a/Documentation/filesystems/overlayfs.txt +++ b/Documentation/filesystems/overlayfs.txt @@ -64,7 +64,7 @@ is formed. At mount time, the two directories given as mount options "lowerdir" and "upperdir" are combined into a merged directory: - mount -t overlayfs overlayfs -olowerdir=/lower,upperdir=/upper,\ + mount -t overlay overlay -olowerdir=/lower,upperdir=/upper,\ workdir=/work /merged The "workdir" needs to be an empty directory on the same filesystem diff --git a/MAINTAINERS b/MAINTAINERS index c444907ccd69..0ff630de8a6d 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -6888,11 +6888,12 @@ F: drivers/scsi/osd/ F: include/scsi/osd_* F: fs/exofs/ -OVERLAYFS FILESYSTEM +OVERLAY FILESYSTEM M: Miklos Szeredi <miklos@szeredi.hu> -L: linux-fsdevel@vger.kernel.org +L: linux-unionfs@vger.kernel.org +T: git git://git.kernel.org/pub/scm/linux/kernel/git/mszeredi/vfs.git S: Supported -F: fs/overlayfs/* +F: fs/overlayfs/ F: Documentation/filesystems/overlayfs.txt P54 WIRELESS DRIVER @@ -1,7 +1,7 @@ VERSION = 3 PATCHLEVEL = 18 SUBLEVEL = 0 -EXTRAVERSION = -rc5 +EXTRAVERSION = -rc6 NAME = Diseased Newt # *DOCUMENTATION* diff --git a/arch/arm/boot/dts/r8a7740.dtsi b/arch/arm/boot/dts/r8a7740.dtsi index d46c213a17ad..eed697a6bd6b 100644 --- a/arch/arm/boot/dts/r8a7740.dtsi +++ b/arch/arm/boot/dts/r8a7740.dtsi @@ -433,7 +433,7 @@ clocks = <&cpg_clocks R8A7740_CLK_S>, <&cpg_clocks R8A7740_CLK_S>, <&sub_clk>, <&cpg_clocks R8A7740_CLK_B>, - <&sub_clk>, <&sub_clk>, + <&cpg_clocks R8A7740_CLK_HPP>, <&sub_clk>, <&cpg_clocks R8A7740_CLK_B>; #clock-cells = <1>; renesas,clock-indices = < diff --git a/arch/arm/boot/dts/r8a7790.dtsi b/arch/arm/boot/dts/r8a7790.dtsi index d0e17733dc1a..e20affe156c1 100644 --- a/arch/arm/boot/dts/r8a7790.dtsi +++ b/arch/arm/boot/dts/r8a7790.dtsi @@ -666,9 +666,9 @@ #clock-cells = <0>; clock-output-names = "sd2"; }; - sd3_clk: sd3_clk@e615007c { + sd3_clk: sd3_clk@e615026c { compatible = "renesas,r8a7790-div6-clock", "renesas,cpg-div6-clock"; - reg = <0 0xe615007c 0 4>; + reg = <0 0xe615026c 0 4>; clocks = <&pll1_div2_clk>; #clock-cells = <0>; clock-output-names = "sd3"; diff --git a/arch/arm/boot/dts/sun6i-a31.dtsi b/arch/arm/boot/dts/sun6i-a31.dtsi index 543f895d18d3..2e652e2339e9 100644 --- a/arch/arm/boot/dts/sun6i-a31.dtsi +++ b/arch/arm/boot/dts/sun6i-a31.dtsi @@ -361,6 +361,10 @@ clocks = <&ahb1_gates 6>; resets = <&ahb1_rst 6>; #dma-cells = <1>; + + /* DMA controller requires AHB1 clocked from PLL6 */ + assigned-clocks = <&ahb1_mux>; + assigned-clock-parents = <&pll6>; }; mmc0: mmc@01c0f000 { diff --git a/arch/arm/boot/dts/tegra114-dalmore.dts b/arch/arm/boot/dts/tegra114-dalmore.dts index 5c21d216515a..8b7aa0dcdc6e 100644 --- a/arch/arm/boot/dts/tegra114-dalmore.dts +++ b/arch/arm/boot/dts/tegra114-dalmore.dts @@ -15,6 +15,7 @@ aliases { rtc0 = "/i2c@7000d000/tps65913@58"; rtc1 = "/rtc@7000e000"; + serial0 = &uartd; }; memory { diff --git a/arch/arm/boot/dts/tegra114-roth.dts b/arch/arm/boot/dts/tegra114-roth.dts index c7c6825f11fb..38acf78d7815 100644 --- a/arch/arm/boot/dts/tegra114-roth.dts +++ b/arch/arm/boot/dts/tegra114-roth.dts @@ -15,6 +15,10 @@ linux,initrd-end = <0x82800000>; }; + aliases { + serial0 = &uartd; + }; + firmware { trusted-foundations { compatible = "tlm,trusted-foundations"; @@ -916,8 +920,6 @@ regulator-name = "vddio-sdmmc3"; regulator-min-microvolt = <1800000>; regulator-max-microvolt = <3300000>; - regulator-always-on; - regulator-boot-on; }; ldousb { @@ -962,7 +964,7 @@ sdhci@78000400 { status = "okay"; bus-width = <4>; - vmmc-supply = <&vddio_sdmmc3>; + vqmmc-supply = <&vddio_sdmmc3>; cd-gpios = <&gpio TEGRA_GPIO(V, 2) GPIO_ACTIVE_LOW>; power-gpios = <&gpio TEGRA_GPIO(H, 0) GPIO_ACTIVE_HIGH>; }; @@ -971,7 +973,6 @@ sdhci@78000600 { status = "okay"; bus-width = <8>; - vmmc-supply = <&vdd_1v8>; non-removable; }; diff --git a/arch/arm/boot/dts/tegra114-tn7.dts b/arch/arm/boot/dts/tegra114-tn7.dts index 963662145635..f91c2c9b2f94 100644 --- a/arch/arm/boot/dts/tegra114-tn7.dts +++ b/arch/arm/boot/dts/tegra114-tn7.dts @@ -15,6 +15,10 @@ linux,initrd-end = <0x82800000>; }; + aliases { + serial0 = &uartd; + }; + firmware { trusted-foundations { compatible = "tlm,trusted-foundations"; @@ -240,7 +244,6 @@ sdhci@78000600 { status = "okay"; bus-width = <8>; - vmmc-supply = <&vdd_1v8>; non-removable; }; diff --git a/arch/arm/boot/dts/tegra114.dtsi b/arch/arm/boot/dts/tegra114.dtsi index 2ca9c1807f72..222f3b3f4dd5 100644 --- a/arch/arm/boot/dts/tegra114.dtsi +++ b/arch/arm/boot/dts/tegra114.dtsi @@ -9,13 +9,6 @@ compatible = "nvidia,tegra114"; interrupt-parent = <&gic>; - aliases { - serial0 = &uarta; - serial1 = &uartb; - serial2 = &uartc; - serial3 = &uartd; - }; - host1x@50000000 { compatible = "nvidia,tegra114-host1x", "simple-bus"; reg = <0x50000000 0x00028000>; diff --git a/arch/arm/boot/dts/tegra124-jetson-tk1.dts b/arch/arm/boot/dts/tegra124-jetson-tk1.dts index 029c9a021541..51b373ff1065 100644 --- a/arch/arm/boot/dts/tegra124-jetson-tk1.dts +++ b/arch/arm/boot/dts/tegra124-jetson-tk1.dts @@ -10,6 +10,7 @@ aliases { rtc0 = "/i2c@0,7000d000/pmic@40"; rtc1 = "/rtc@0,7000e000"; + serial0 = &uartd; }; memory { diff --git a/arch/arm/boot/dts/tegra124-nyan-big.dts b/arch/arm/boot/dts/tegra124-nyan-big.dts index 7d0784ce4c74..53181d310247 100644 --- a/arch/arm/boot/dts/tegra124-nyan-big.dts +++ b/arch/arm/boot/dts/tegra124-nyan-big.dts @@ -10,6 +10,7 @@ aliases { rtc0 = "/i2c@0,7000d000/pmic@40"; rtc1 = "/rtc@0,7000e000"; + serial0 = &uarta; }; memory { diff --git a/arch/arm/boot/dts/tegra124-venice2.dts b/arch/arm/boot/dts/tegra124-venice2.dts index 13008858e967..5c3f7813360d 100644 --- a/arch/arm/boot/dts/tegra124-venice2.dts +++ b/arch/arm/boot/dts/tegra124-venice2.dts @@ -10,6 +10,7 @@ aliases { rtc0 = "/i2c@0,7000d000/pmic@40"; rtc1 = "/rtc@0,7000e000"; + serial0 = &uarta; }; memory { diff --git a/arch/arm/boot/dts/tegra124.dtsi b/arch/arm/boot/dts/tegra124.dtsi index 478c555ebd96..df2b06b29985 100644 --- a/arch/arm/boot/dts/tegra124.dtsi +++ b/arch/arm/boot/dts/tegra124.dtsi @@ -286,7 +286,7 @@ * the APB DMA based serial driver, the comptible is * "nvidia,tegra124-hsuart", "nvidia,tegra30-hsuart". */ - serial@0,70006000 { + uarta: serial@0,70006000 { compatible = "nvidia,tegra124-uart", "nvidia,tegra20-uart"; reg = <0x0 0x70006000 0x0 0x40>; reg-shift = <2>; @@ -299,7 +299,7 @@ status = "disabled"; }; - serial@0,70006040 { + uartb: serial@0,70006040 { compatible = "nvidia,tegra124-uart", "nvidia,tegra20-uart"; reg = <0x0 0x70006040 0x0 0x40>; reg-shift = <2>; @@ -312,7 +312,7 @@ status = "disabled"; }; - serial@0,70006200 { + uartc: serial@0,70006200 { compatible = "nvidia,tegra124-uart", "nvidia,tegra20-uart"; reg = <0x0 0x70006200 0x0 0x40>; reg-shift = <2>; @@ -325,7 +325,7 @@ status = "disabled"; }; - serial@0,70006300 { + uartd: serial@0,70006300 { compatible = "nvidia,tegra124-uart", "nvidia,tegra20-uart"; reg = <0x0 0x70006300 0x0 0x40>; reg-shift = <2>; diff --git a/arch/arm/boot/dts/tegra20-harmony.dts b/arch/arm/boot/dts/tegra20-harmony.dts index a37279af687c..b926a07b9443 100644 --- a/arch/arm/boot/dts/tegra20-harmony.dts +++ b/arch/arm/boot/dts/tegra20-harmony.dts @@ -10,6 +10,7 @@ aliases { rtc0 = "/i2c@7000d000/tps6586x@34"; rtc1 = "/rtc@7000e000"; + serial0 = &uartd; }; memory { diff --git a/arch/arm/boot/dts/tegra20-iris-512.dts b/arch/arm/boot/dts/tegra20-iris-512.dts index 8cfb83f42e1f..1dd7d7bfdfcc 100644 --- a/arch/arm/boot/dts/tegra20-iris-512.dts +++ b/arch/arm/boot/dts/tegra20-iris-512.dts @@ -6,6 +6,11 @@ model = "Toradex Colibri T20 512MB on Iris"; compatible = "toradex,iris", "toradex,colibri_t20-512", "nvidia,tegra20"; + aliases { + serial0 = &uarta; + serial1 = &uartd; + }; + host1x@50000000 { hdmi@54280000 { status = "okay"; diff --git a/arch/arm/boot/dts/tegra20-medcom-wide.dts b/arch/arm/boot/dts/tegra20-medcom-wide.dts index 1b7c56b33aca..9b87526ab0b7 100644 --- a/arch/arm/boot/dts/tegra20-medcom-wide.dts +++ b/arch/arm/boot/dts/tegra20-medcom-wide.dts @@ -6,6 +6,10 @@ model = "Avionic Design Medcom-Wide board"; compatible = "ad,medcom-wide", "ad,tamonten", "nvidia,tegra20"; + aliases { + serial0 = &uartd; + }; + pwm@7000a000 { status = "okay"; }; diff --git a/arch/arm/boot/dts/tegra20-paz00.dts b/arch/arm/boot/dts/tegra20-paz00.dts index d4438e30de45..ed7e1009326c 100644 --- a/arch/arm/boot/dts/tegra20-paz00.dts +++ b/arch/arm/boot/dts/tegra20-paz00.dts @@ -10,6 +10,8 @@ aliases { rtc0 = "/i2c@7000d000/tps6586x@34"; rtc1 = "/rtc@7000e000"; + serial0 = &uarta; + serial1 = &uartc; }; memory { diff --git a/arch/arm/boot/dts/tegra20-seaboard.dts b/arch/arm/boot/dts/tegra20-seaboard.dts index a1d4bf9895d7..ea282c7c0ca5 100644 --- a/arch/arm/boot/dts/tegra20-seaboard.dts +++ b/arch/arm/boot/dts/tegra20-seaboard.dts @@ -10,6 +10,7 @@ aliases { rtc0 = "/i2c@7000d000/tps6586x@34"; rtc1 = "/rtc@7000e000"; + serial0 = &uartd; }; memory { diff --git a/arch/arm/boot/dts/tegra20-tamonten.dtsi b/arch/arm/boot/dts/tegra20-tamonten.dtsi index 80e7d386ce34..13d4e6185275 100644 --- a/arch/arm/boot/dts/tegra20-tamonten.dtsi +++ b/arch/arm/boot/dts/tegra20-tamonten.dtsi @@ -7,6 +7,7 @@ aliases { rtc0 = "/i2c@7000d000/tps6586x@34"; rtc1 = "/rtc@7000e000"; + serial0 = &uartd; }; memory { diff --git a/arch/arm/boot/dts/tegra20-trimslice.dts b/arch/arm/boot/dts/tegra20-trimslice.dts index 5ad87979ab13..d99af4ef9c64 100644 --- a/arch/arm/boot/dts/tegra20-trimslice.dts +++ b/arch/arm/boot/dts/tegra20-trimslice.dts @@ -10,6 +10,7 @@ aliases { rtc0 = "/i2c@7000c500/rtc@56"; rtc1 = "/rtc@7000e000"; + serial0 = &uarta; }; memory { diff --git a/arch/arm/boot/dts/tegra20-ventana.dts b/arch/arm/boot/dts/tegra20-ventana.dts index ca8484cccddc..04c58e9ca490 100644 --- a/arch/arm/boot/dts/tegra20-ventana.dts +++ b/arch/arm/boot/dts/tegra20-ventana.dts @@ -10,6 +10,7 @@ aliases { rtc0 = "/i2c@7000d000/tps6586x@34"; rtc1 = "/rtc@7000e000"; + serial0 = &uartd; }; memory { diff --git a/arch/arm/boot/dts/tegra20-whistler.dts b/arch/arm/boot/dts/tegra20-whistler.dts index 1843725785c9..340d81108df1 100644 --- a/arch/arm/boot/dts/tegra20-whistler.dts +++ b/arch/arm/boot/dts/tegra20-whistler.dts @@ -10,6 +10,7 @@ aliases { rtc0 = "/i2c@7000d000/max8907@3c"; rtc1 = "/rtc@7000e000"; + serial0 = &uarta; }; memory { diff --git a/arch/arm/boot/dts/tegra20.dtsi b/arch/arm/boot/dts/tegra20.dtsi index 3b374c49d04d..8acf5d85c99d 100644 --- a/arch/arm/boot/dts/tegra20.dtsi +++ b/arch/arm/boot/dts/tegra20.dtsi @@ -9,14 +9,6 @@ compatible = "nvidia,tegra20"; interrupt-parent = <&intc>; - aliases { - serial0 = &uarta; - serial1 = &uartb; - serial2 = &uartc; - serial3 = &uartd; - serial4 = &uarte; - }; - host1x@50000000 { compatible = "nvidia,tegra20-host1x", "simple-bus"; reg = <0x50000000 0x00024000>; diff --git a/arch/arm/boot/dts/tegra30-apalis-eval.dts b/arch/arm/boot/dts/tegra30-apalis-eval.dts index 45d40f024585..6236bdecb48b 100644 --- a/arch/arm/boot/dts/tegra30-apalis-eval.dts +++ b/arch/arm/boot/dts/tegra30-apalis-eval.dts @@ -11,6 +11,10 @@ rtc0 = "/i2c@7000c000/rtc@68"; rtc1 = "/i2c@7000d000/tps65911@2d"; rtc2 = "/rtc@7000e000"; + serial0 = &uarta; + serial1 = &uartb; + serial2 = &uartc; + serial3 = &uartd; }; pcie-controller@00003000 { diff --git a/arch/arm/boot/dts/tegra30-beaver.dts b/arch/arm/boot/dts/tegra30-beaver.dts index cee8f2246fdb..6b157eeabcc5 100644 --- a/arch/arm/boot/dts/tegra30-beaver.dts +++ b/arch/arm/boot/dts/tegra30-beaver.dts @@ -9,6 +9,7 @@ aliases { rtc0 = "/i2c@7000d000/tps65911@2d"; rtc1 = "/rtc@7000e000"; + serial0 = &uarta; }; memory { diff --git a/arch/arm/boot/dts/tegra30-cardhu.dtsi b/arch/arm/boot/dts/tegra30-cardhu.dtsi index 206379546244..a1b682ea01bd 100644 --- a/arch/arm/boot/dts/tegra30-cardhu.dtsi +++ b/arch/arm/boot/dts/tegra30-cardhu.dtsi @@ -30,6 +30,8 @@ aliases { rtc0 = "/i2c@7000d000/tps65911@2d"; rtc1 = "/rtc@7000e000"; + serial0 = &uarta; + serial1 = &uartc; }; memory { diff --git a/arch/arm/boot/dts/tegra30-colibri-eval-v3.dts b/arch/arm/boot/dts/tegra30-colibri-eval-v3.dts index 7793abd5bef1..4d3ddc585641 100644 --- a/arch/arm/boot/dts/tegra30-colibri-eval-v3.dts +++ b/arch/arm/boot/dts/tegra30-colibri-eval-v3.dts @@ -10,6 +10,9 @@ rtc0 = "/i2c@7000c000/rtc@68"; rtc1 = "/i2c@7000d000/tps65911@2d"; rtc2 = "/rtc@7000e000"; + serial0 = &uarta; + serial1 = &uartb; + serial2 = &uartd; }; host1x@50000000 { diff --git a/arch/arm/boot/dts/tegra30.dtsi b/arch/arm/boot/dts/tegra30.dtsi index aa6ccea13d30..b270b9e3d455 100644 --- a/arch/arm/boot/dts/tegra30.dtsi +++ b/arch/arm/boot/dts/tegra30.dtsi @@ -9,14 +9,6 @@ compatible = "nvidia,tegra30"; interrupt-parent = <&intc>; - aliases { - serial0 = &uarta; - serial1 = &uartb; - serial2 = &uartc; - serial3 = &uartd; - serial4 = &uarte; - }; - pcie-controller@00003000 { compatible = "nvidia,tegra30-pcie"; device_type = "pci"; diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig index 3487046d8a78..9d7a32f93fcf 100644 --- a/arch/arm/configs/multi_v7_defconfig +++ b/arch/arm/configs/multi_v7_defconfig @@ -217,6 +217,7 @@ CONFIG_I2C_CADENCE=y CONFIG_I2C_DESIGNWARE_PLATFORM=y CONFIG_I2C_EXYNOS5=y CONFIG_I2C_MV64XXX=y +CONFIG_I2C_S3C2410=y CONFIG_I2C_SIRF=y CONFIG_I2C_TEGRA=y CONFIG_I2C_ST=y diff --git a/arch/arm/mach-shmobile/clock-r8a7740.c b/arch/arm/mach-shmobile/clock-r8a7740.c index 0794f0426e70..19df9cb30495 100644 --- a/arch/arm/mach-shmobile/clock-r8a7740.c +++ b/arch/arm/mach-shmobile/clock-r8a7740.c @@ -455,7 +455,7 @@ enum { MSTP128, MSTP127, MSTP125, MSTP116, MSTP111, MSTP100, MSTP117, - MSTP230, + MSTP230, MSTP229, MSTP222, MSTP218, MSTP217, MSTP216, MSTP214, MSTP207, MSTP206, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200, @@ -474,11 +474,12 @@ static struct clk mstp_clks[MSTP_NR] = { [MSTP127] = SH_CLK_MSTP32(&div4_clks[DIV4_S], SMSTPCR1, 27, 0), /* CEU20 */ [MSTP125] = SH_CLK_MSTP32(&div6_clks[DIV6_SUB], SMSTPCR1, 25, 0), /* TMU0 */ [MSTP117] = SH_CLK_MSTP32(&div4_clks[DIV4_B], SMSTPCR1, 17, 0), /* LCDC1 */ - [MSTP116] = SH_CLK_MSTP32(&div6_clks[DIV6_SUB], SMSTPCR1, 16, 0), /* IIC0 */ + [MSTP116] = SH_CLK_MSTP32(&div4_clks[DIV4_HPP], SMSTPCR1, 16, 0), /* IIC0 */ [MSTP111] = SH_CLK_MSTP32(&div6_clks[DIV6_SUB], SMSTPCR1, 11, 0), /* TMU1 */ [MSTP100] = SH_CLK_MSTP32(&div4_clks[DIV4_B], SMSTPCR1, 0, 0), /* LCDC0 */ [MSTP230] = SH_CLK_MSTP32(&div6_clks[DIV6_SUB], SMSTPCR2, 30, 0), /* SCIFA6 */ + [MSTP229] = SH_CLK_MSTP32(&div4_clks[DIV4_HP], SMSTPCR2, 29, 0), /* INTCA */ [MSTP222] = SH_CLK_MSTP32(&div6_clks[DIV6_SUB], SMSTPCR2, 22, 0), /* SCIFA7 */ [MSTP218] = SH_CLK_MSTP32(&div4_clks[DIV4_HP], SMSTPCR2, 18, 0), /* DMAC1 */ [MSTP217] = SH_CLK_MSTP32(&div4_clks[DIV4_HP], SMSTPCR2, 17, 0), /* DMAC2 */ @@ -575,6 +576,10 @@ static struct clk_lookup lookups[] = { CLKDEV_DEV_ID("sh-dma-engine.0", &mstp_clks[MSTP218]), CLKDEV_DEV_ID("sh-sci.7", &mstp_clks[MSTP222]), CLKDEV_DEV_ID("e6cd0000.serial", &mstp_clks[MSTP222]), + CLKDEV_DEV_ID("renesas_intc_irqpin.0", &mstp_clks[MSTP229]), + CLKDEV_DEV_ID("renesas_intc_irqpin.1", &mstp_clks[MSTP229]), + CLKDEV_DEV_ID("renesas_intc_irqpin.2", &mstp_clks[MSTP229]), + CLKDEV_DEV_ID("renesas_intc_irqpin.3", &mstp_clks[MSTP229]), CLKDEV_DEV_ID("sh-sci.6", &mstp_clks[MSTP230]), CLKDEV_DEV_ID("e6cc0000.serial", &mstp_clks[MSTP230]), diff --git a/arch/arm/mach-shmobile/clock-r8a7790.c b/arch/arm/mach-shmobile/clock-r8a7790.c index 126ddafad526..f62265200592 100644 --- a/arch/arm/mach-shmobile/clock-r8a7790.c +++ b/arch/arm/mach-shmobile/clock-r8a7790.c @@ -68,7 +68,7 @@ #define SDCKCR 0xE6150074 #define SD2CKCR 0xE6150078 -#define SD3CKCR 0xE615007C +#define SD3CKCR 0xE615026C #define MMC0CKCR 0xE6150240 #define MMC1CKCR 0xE6150244 #define SSPCKCR 0xE6150248 diff --git a/arch/arm/mach-shmobile/setup-sh73a0.c b/arch/arm/mach-shmobile/setup-sh73a0.c index b7bd8e509668..328657d011d5 100644 --- a/arch/arm/mach-shmobile/setup-sh73a0.c +++ b/arch/arm/mach-shmobile/setup-sh73a0.c @@ -26,6 +26,7 @@ #include <linux/of_platform.h> #include <linux/delay.h> #include <linux/input.h> +#include <linux/i2c/i2c-sh_mobile.h> #include <linux/io.h> #include <linux/serial_sci.h> #include <linux/sh_dma.h> @@ -192,11 +193,18 @@ static struct resource i2c4_resources[] = { }, }; +static struct i2c_sh_mobile_platform_data i2c_platform_data = { + .clks_per_count = 2, +}; + static struct platform_device i2c0_device = { .name = "i2c-sh_mobile", .id = 0, .resource = i2c0_resources, .num_resources = ARRAY_SIZE(i2c0_resources), + .dev = { + .platform_data = &i2c_platform_data, + }, }; static struct platform_device i2c1_device = { @@ -204,6 +212,9 @@ static struct platform_device i2c1_device = { .id = 1, .resource = i2c1_resources, .num_resources = ARRAY_SIZE(i2c1_resources), + .dev = { + .platform_data = &i2c_platform_data, + }, }; static struct platform_device i2c2_device = { @@ -211,6 +222,9 @@ static struct platform_device i2c2_device = { .id = 2, .resource = i2c2_resources, .num_resources = ARRAY_SIZE(i2c2_resources), + .dev = { + .platform_data = &i2c_platform_data, + }, }; static struct platform_device i2c3_device = { @@ -218,6 +232,9 @@ static struct platform_device i2c3_device = { .id = 3, .resource = i2c3_resources, .num_resources = ARRAY_SIZE(i2c3_resources), + .dev = { + .platform_data = &i2c_platform_data, + }, }; static struct platform_device i2c4_device = { @@ -225,6 +242,9 @@ static struct platform_device i2c4_device = { .id = 4, .resource = i2c4_resources, .num_resources = ARRAY_SIZE(i2c4_resources), + .dev = { + .platform_data = &i2c_platform_data, + }, }; static const struct sh_dmae_slave_config sh73a0_dmae_slaves[] = { diff --git a/arch/mips/include/asm/jump_label.h b/arch/mips/include/asm/jump_label.h index e194f957ca8c..fdbff44e5482 100644 --- a/arch/mips/include/asm/jump_label.h +++ b/arch/mips/include/asm/jump_label.h @@ -20,9 +20,15 @@ #define WORD_INSN ".word" #endif +#ifdef CONFIG_CPU_MICROMIPS +#define NOP_INSN "nop32" +#else +#define NOP_INSN "nop" +#endif + static __always_inline bool arch_static_branch(struct static_key *key) { - asm_volatile_goto("1:\tnop\n\t" + asm_volatile_goto("1:\t" NOP_INSN "\n\t" "nop\n\t" ".pushsection __jump_table, \"aw\"\n\t" WORD_INSN " 1b, %l[l_yes], %0\n\t" diff --git a/arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h b/arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h index 7d28f95b0512..6d69332f21ec 100644 --- a/arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h +++ b/arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h @@ -41,10 +41,8 @@ #define cpu_has_mcheck 0 #define cpu_has_mdmx 0 #define cpu_has_mips16 0 -#define cpu_has_mips32r1 0 #define cpu_has_mips32r2 0 #define cpu_has_mips3d 0 -#define cpu_has_mips64r1 0 #define cpu_has_mips64r2 0 #define cpu_has_mipsmt 0 #define cpu_has_prefetch 0 diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h index a10951090234..133678ab4eb8 100644 --- a/arch/mips/include/asm/uaccess.h +++ b/arch/mips/include/asm/uaccess.h @@ -301,7 +301,8 @@ do { \ __get_kernel_common((x), size, __gu_ptr); \ else \ __get_user_common((x), size, __gu_ptr); \ - } \ + } else \ + (x) = 0; \ \ __gu_err; \ }) @@ -316,6 +317,7 @@ do { \ " .insn \n" \ " .section .fixup,\"ax\" \n" \ "3: li %0, %4 \n" \ + " move %1, $0 \n" \ " j 2b \n" \ " .previous \n" \ " .section __ex_table,\"a\" \n" \ @@ -630,6 +632,7 @@ do { \ " .insn \n" \ " .section .fixup,\"ax\" \n" \ "3: li %0, %4 \n" \ + " move %1, $0 \n" \ " j 2b \n" \ " .previous \n" \ " .section __ex_table,\"a\" \n" \ @@ -773,10 +776,11 @@ extern void __put_user_unaligned_unknown(void); "jal\t" #destination "\n\t" #endif -#ifndef CONFIG_CPU_DADDI_WORKAROUNDS -#define DADDI_SCRATCH "$0" -#else +#if defined(CONFIG_CPU_DADDI_WORKAROUNDS) || (defined(CONFIG_EVA) && \ + defined(CONFIG_CPU_HAS_PREFETCH)) #define DADDI_SCRATCH "$3" +#else +#define DADDI_SCRATCH "$0" #endif extern size_t __copy_user(void *__to, const void *__from, size_t __n); diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index 94c4a0c0a577..d5a4f380b019 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c @@ -757,31 +757,34 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) c->cputype = CPU_LOONGSON2; __cpu_name[cpu] = "ICT Loongson-2"; set_elf_platform(cpu, "loongson2e"); + set_isa(c, MIPS_CPU_ISA_III); break; case PRID_REV_LOONGSON2F: c->cputype = CPU_LOONGSON2; __cpu_name[cpu] = "ICT Loongson-2"; set_elf_platform(cpu, "loongson2f"); + set_isa(c, MIPS_CPU_ISA_III); break; case PRID_REV_LOONGSON3A: c->cputype = CPU_LOONGSON3; - c->writecombine = _CACHE_UNCACHED_ACCELERATED; __cpu_name[cpu] = "ICT Loongson-3"; set_elf_platform(cpu, "loongson3a"); + set_isa(c, MIPS_CPU_ISA_M64R1); break; case PRID_REV_LOONGSON3B_R1: case PRID_REV_LOONGSON3B_R2: c->cputype = CPU_LOONGSON3; __cpu_name[cpu] = "ICT Loongson-3"; set_elf_platform(cpu, "loongson3b"); + set_isa(c, MIPS_CPU_ISA_M64R1); break; } - set_isa(c, MIPS_CPU_ISA_III); c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_LLSC | MIPS_CPU_32FPR; c->tlbsize = 64; + c->writecombine = _CACHE_UNCACHED_ACCELERATED; break; case PRID_IMP_LOONGSON_32: /* Loongson-1 */ decode_configs(c); diff --git a/arch/mips/kernel/jump_label.c b/arch/mips/kernel/jump_label.c index 6001610cfe55..dda800e9e731 100644 --- a/arch/mips/kernel/jump_label.c +++ b/arch/mips/kernel/jump_label.c @@ -18,31 +18,53 @@ #ifdef HAVE_JUMP_LABEL -#define J_RANGE_MASK ((1ul << 28) - 1) +/* + * Define parameters for the standard MIPS and the microMIPS jump + * instruction encoding respectively: + * + * - the ISA bit of the target, either 0 or 1 respectively, + * + * - the amount the jump target address is shifted right to fit in the + * immediate field of the machine instruction, either 2 or 1, + * + * - the mask determining the size of the jump region relative to the + * delay-slot instruction, either 256MB or 128MB, + * + * - the jump target alignment, either 4 or 2 bytes. + */ +#define J_ISA_BIT IS_ENABLED(CONFIG_CPU_MICROMIPS) +#define J_RANGE_SHIFT (2 - J_ISA_BIT) +#define J_RANGE_MASK ((1ul << (26 + J_RANGE_SHIFT)) - 1) +#define J_ALIGN_MASK ((1ul << J_RANGE_SHIFT) - 1) void arch_jump_label_transform(struct jump_entry *e, enum jump_label_type type) { + union mips_instruction *insn_p; union mips_instruction insn; - union mips_instruction *insn_p = - (union mips_instruction *)(unsigned long)e->code; - /* Jump only works within a 256MB aligned region. */ - BUG_ON((e->target & ~J_RANGE_MASK) != (e->code & ~J_RANGE_MASK)); + insn_p = (union mips_instruction *)msk_isa16_mode(e->code); + + /* Jump only works within an aligned region its delay slot is in. */ + BUG_ON((e->target & ~J_RANGE_MASK) != ((e->code + 4) & ~J_RANGE_MASK)); - /* Target must have 4 byte alignment. */ - BUG_ON((e->target & 3) != 0); + /* Target must have the right alignment and ISA must be preserved. */ + BUG_ON((e->target & J_ALIGN_MASK) != J_ISA_BIT); if (type == JUMP_LABEL_ENABLE) { - insn.j_format.opcode = j_op; - insn.j_format.target = (e->target & J_RANGE_MASK) >> 2; + insn.j_format.opcode = J_ISA_BIT ? mm_j32_op : j_op; + insn.j_format.target = e->target >> J_RANGE_SHIFT; } else { insn.word = 0; /* nop */ } get_online_cpus(); mutex_lock(&text_mutex); - *insn_p = insn; + if (IS_ENABLED(CONFIG_CPU_MICROMIPS)) { + insn_p->halfword[0] = insn.word >> 16; + insn_p->halfword[1] = insn.word; + } else + *insn_p = insn; flush_icache_range((unsigned long)insn_p, (unsigned long)insn_p + sizeof(*insn_p)); diff --git a/arch/mips/lib/memcpy.S b/arch/mips/lib/memcpy.S index c17ef80cf65a..5d3238af9b5c 100644 --- a/arch/mips/lib/memcpy.S +++ b/arch/mips/lib/memcpy.S @@ -503,6 +503,7 @@ STOREB(t0, NBYTES-2(dst), .Ls_exc_p1\@) .Ldone\@: jr ra + nop .if __memcpy == 1 END(memcpy) .set __memcpy, 0 diff --git a/arch/mips/loongson/loongson-3/numa.c b/arch/mips/loongson/loongson-3/numa.c index 37ed184398c6..42323bcc5d28 100644 --- a/arch/mips/loongson/loongson-3/numa.c +++ b/arch/mips/loongson/loongson-3/numa.c @@ -33,6 +33,7 @@ static struct node_data prealloc__node_data[MAX_NUMNODES]; unsigned char __node_distances[MAX_NUMNODES][MAX_NUMNODES]; +EXPORT_SYMBOL(__node_distances); struct node_data *__node_data[MAX_NUMNODES]; EXPORT_SYMBOL(__node_data); diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c index fa6ebd4bc9e9..c3917e251f59 100644 --- a/arch/mips/mm/tlb-r4k.c +++ b/arch/mips/mm/tlb-r4k.c @@ -299,6 +299,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte) local_irq_save(flags); + htw_stop(); pid = read_c0_entryhi() & ASID_MASK; address &= (PAGE_MASK << 1); write_c0_entryhi(address | pid); @@ -346,6 +347,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte) tlb_write_indexed(); } tlbw_use_hazard(); + htw_start(); flush_itlb_vm(vma); local_irq_restore(flags); } @@ -422,6 +424,7 @@ __init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1, local_irq_save(flags); /* Save old context and create impossible VPN2 value */ + htw_stop(); old_ctx = read_c0_entryhi(); old_pagemask = read_c0_pagemask(); wired = read_c0_wired(); @@ -443,6 +446,7 @@ __init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1, write_c0_entryhi(old_ctx); write_c0_pagemask(old_pagemask); + htw_start(); out: local_irq_restore(flags); return ret; diff --git a/arch/mips/oprofile/backtrace.c b/arch/mips/oprofile/backtrace.c index 6854ed5097d2..83a1dfd8f0e3 100644 --- a/arch/mips/oprofile/backtrace.c +++ b/arch/mips/oprofile/backtrace.c @@ -92,7 +92,7 @@ static inline int unwind_user_frame(struct stackframe *old_frame, /* This marks the end of the previous function, which means we overran. */ break; - stack_size = (unsigned) stack_adjustment; + stack_size = (unsigned long) stack_adjustment; } else if (is_ra_save_ins(&ip)) { int ra_slot = ip.i_format.simmediate; if (ra_slot < 0) diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c index a95c00f5fb96..a304bcc37e4f 100644 --- a/arch/mips/sgi-ip27/ip27-memory.c +++ b/arch/mips/sgi-ip27/ip27-memory.c @@ -107,6 +107,7 @@ static void router_recurse(klrou_t *router_a, klrou_t *router_b, int depth) } unsigned char __node_distances[MAX_COMPACT_NODES][MAX_COMPACT_NODES]; +EXPORT_SYMBOL(__node_distances); static int __init compute_node_distance(nasid_t nasid_a, nasid_t nasid_b) { diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c index de40b48b460e..da08ed088157 100644 --- a/arch/powerpc/sysdev/fsl_msi.c +++ b/arch/powerpc/sysdev/fsl_msi.c @@ -361,7 +361,7 @@ static int fsl_msi_setup_hwirq(struct fsl_msi *msi, struct platform_device *dev, cascade_data->virq = virt_msir; msi->cascade_array[irq_index] = cascade_data; - ret = request_irq(virt_msir, fsl_msi_cascade, 0, + ret = request_irq(virt_msir, fsl_msi_cascade, IRQF_NO_THREAD, "fsl-msi-cascade", cascade_data); if (ret) { dev_err(&dev->dev, "failed to request_irq(%d), ret = %d\n", diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index ded8a6774ac9..41a503c15862 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -144,7 +144,7 @@ config INSTRUCTION_DECODER config PERF_EVENTS_INTEL_UNCORE def_bool y - depends on PERF_EVENTS && SUP_SUP_INTEL && PCI + depends on PERF_EVENTS && CPU_SUP_INTEL && PCI config OUTPUT_FORMAT string diff --git a/arch/x86/include/asm/page_32_types.h b/arch/x86/include/asm/page_32_types.h index f48b17df4224..3a52ee0e726d 100644 --- a/arch/x86/include/asm/page_32_types.h +++ b/arch/x86/include/asm/page_32_types.h @@ -20,7 +20,6 @@ #define THREAD_SIZE_ORDER 1 #define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) -#define STACKFAULT_STACK 0 #define DOUBLEFAULT_STACK 1 #define NMI_STACK 0 #define DEBUG_STACK 0 diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h index 678205195ae1..75450b2c7be4 100644 --- a/arch/x86/include/asm/page_64_types.h +++ b/arch/x86/include/asm/page_64_types.h @@ -14,12 +14,11 @@ #define IRQ_STACK_ORDER 2 #define IRQ_STACK_SIZE (PAGE_SIZE << IRQ_STACK_ORDER) -#define STACKFAULT_STACK 1 -#define DOUBLEFAULT_STACK 2 -#define NMI_STACK 3 -#define DEBUG_STACK 4 -#define MCE_STACK 5 -#define N_EXCEPTION_STACKS 5 /* hw limit: 7 */ +#define DOUBLEFAULT_STACK 1 +#define NMI_STACK 2 +#define DEBUG_STACK 3 +#define MCE_STACK 4 +#define N_EXCEPTION_STACKS 4 /* hw limit: 7 */ #define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT) #define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1)) diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index 854053889d4d..547e344a6dc6 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h @@ -141,7 +141,7 @@ struct thread_info { /* Only used for 64 bit */ #define _TIF_DO_NOTIFY_MASK \ (_TIF_SIGPENDING | _TIF_MCE_NOTIFY | _TIF_NOTIFY_RESUME | \ - _TIF_USER_RETURN_NOTIFY) + _TIF_USER_RETURN_NOTIFY | _TIF_UPROBE) /* flags to check in __switch_to() */ #define _TIF_WORK_CTXSW \ diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h index bc8352e7010a..707adc6549d8 100644 --- a/arch/x86/include/asm/traps.h +++ b/arch/x86/include/asm/traps.h @@ -39,6 +39,7 @@ asmlinkage void simd_coprocessor_error(void); #ifdef CONFIG_TRACING asmlinkage void trace_page_fault(void); +#define trace_stack_segment stack_segment #define trace_divide_error divide_error #define trace_bounds bounds #define trace_invalid_op invalid_op diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 4b4f78c9ba19..cfa9b5b2c27a 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -146,6 +146,8 @@ EXPORT_PER_CPU_SYMBOL_GPL(gdt_page); static int __init x86_xsave_setup(char *s) { + if (strlen(s)) + return 0; setup_clear_cpu_cap(X86_FEATURE_XSAVE); setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT); setup_clear_cpu_cap(X86_FEATURE_XSAVES); diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index dd9d6190b08d..2ce9051174e6 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c @@ -465,6 +465,14 @@ static void mc_bp_resume(void) if (uci->valid && uci->mc) microcode_ops->apply_microcode(cpu); + else if (!uci->mc) + /* + * We might resume and not have applied late microcode but still + * have a newer patch stashed from the early loader. We don't + * have it in uci->mc so we have to load it the same way we're + * applying patches early on the APs. + */ + load_ucode_ap(); } static struct syscore_ops mc_syscore_ops = { diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c b/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c index adf138eac85c..f9ed429d6e4f 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c @@ -486,14 +486,17 @@ static struct attribute_group snbep_uncore_qpi_format_group = { .attrs = snbep_uncore_qpi_formats_attr, }; -#define SNBEP_UNCORE_MSR_OPS_COMMON_INIT() \ - .init_box = snbep_uncore_msr_init_box, \ +#define __SNBEP_UNCORE_MSR_OPS_COMMON_INIT() \ .disable_box = snbep_uncore_msr_disable_box, \ .enable_box = snbep_uncore_msr_enable_box, \ .disable_event = snbep_uncore_msr_disable_event, \ .enable_event = snbep_uncore_msr_enable_event, \ .read_counter = uncore_msr_read_counter +#define SNBEP_UNCORE_MSR_OPS_COMMON_INIT() \ + __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(), \ + .init_box = snbep_uncore_msr_init_box \ + static struct intel_uncore_ops snbep_uncore_msr_ops = { SNBEP_UNCORE_MSR_OPS_COMMON_INIT(), }; @@ -1919,6 +1922,30 @@ static struct intel_uncore_type hswep_uncore_cbox = { .format_group = &hswep_uncore_cbox_format_group, }; +/* + * Write SBOX Initialization register bit by bit to avoid spurious #GPs + */ +static void hswep_uncore_sbox_msr_init_box(struct intel_uncore_box *box) +{ + unsigned msr = uncore_msr_box_ctl(box); + + if (msr) { + u64 init = SNBEP_PMON_BOX_CTL_INT; + u64 flags = 0; + int i; + + for_each_set_bit(i, (unsigned long *)&init, 64) { + flags |= (1ULL << i); + wrmsrl(msr, flags); + } + } +} + +static struct intel_uncore_ops hswep_uncore_sbox_msr_ops = { + __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(), + .init_box = hswep_uncore_sbox_msr_init_box +}; + static struct attribute *hswep_uncore_sbox_formats_attr[] = { &format_attr_event.attr, &format_attr_umask.attr, @@ -1944,7 +1971,7 @@ static struct intel_uncore_type hswep_uncore_sbox = { .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK, .box_ctl = HSWEP_S0_MSR_PMON_BOX_CTL, .msr_offset = HSWEP_SBOX_MSR_OFFSET, - .ops = &snbep_uncore_msr_ops, + .ops = &hswep_uncore_sbox_msr_ops, .format_group = &hswep_uncore_sbox_format_group, }; @@ -2025,13 +2052,27 @@ static struct intel_uncore_type hswep_uncore_imc = { SNBEP_UNCORE_PCI_COMMON_INIT(), }; +static unsigned hswep_uncore_irp_ctrs[] = {0xa0, 0xa8, 0xb0, 0xb8}; + +static u64 hswep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event) +{ + struct pci_dev *pdev = box->pci_dev; + struct hw_perf_event *hwc = &event->hw; + u64 count = 0; + + pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx], (u32 *)&count); + pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1); + + return count; +} + static struct intel_uncore_ops hswep_uncore_irp_ops = { .init_box = snbep_uncore_pci_init_box, .disable_box = snbep_uncore_pci_disable_box, .enable_box = snbep_uncore_pci_enable_box, .disable_event = ivbep_uncore_irp_disable_event, .enable_event = ivbep_uncore_irp_enable_event, - .read_counter = ivbep_uncore_irp_read_counter, + .read_counter = hswep_uncore_irp_read_counter, }; static struct intel_uncore_type hswep_uncore_irp = { diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c index 1abcb50b48ae..ff86f19b5758 100644 --- a/arch/x86/kernel/dumpstack_64.c +++ b/arch/x86/kernel/dumpstack_64.c @@ -24,7 +24,6 @@ static char x86_stack_ids[][8] = { [ DEBUG_STACK-1 ] = "#DB", [ NMI_STACK-1 ] = "NMI", [ DOUBLEFAULT_STACK-1 ] = "#DF", - [ STACKFAULT_STACK-1 ] = "#SS", [ MCE_STACK-1 ] = "#MC", #if DEBUG_STKSZ > EXCEPTION_STKSZ [ N_EXCEPTION_STACKS ... diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index df088bb03fb3..c0226ab54106 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -828,9 +828,15 @@ ENTRY(native_iret) jnz native_irq_return_ldt #endif +.global native_irq_return_iret native_irq_return_iret: + /* + * This may fault. Non-paranoid faults on return to userspace are + * handled by fixup_bad_iret. These include #SS, #GP, and #NP. + * Double-faults due to espfix64 are handled in do_double_fault. + * Other faults here are fatal. + */ iretq - _ASM_EXTABLE(native_irq_return_iret, bad_iret) #ifdef CONFIG_X86_ESPFIX64 native_irq_return_ldt: @@ -858,25 +864,6 @@ native_irq_return_ldt: jmp native_irq_return_iret #endif - .section .fixup,"ax" -bad_iret: - /* - * The iret traps when the %cs or %ss being restored is bogus. - * We've lost the original trap vector and error code. - * #GPF is the most likely one to get for an invalid selector. - * So pretend we completed the iret and took the #GPF in user mode. - * - * We are now running with the kernel GS after exception recovery. - * But error_entry expects us to have user GS to match the user %cs, - * so swap back. - */ - pushq $0 - - SWAPGS - jmp general_protection - - .previous - /* edi: workmask, edx: work */ retint_careful: CFI_RESTORE_STATE @@ -922,37 +909,6 @@ ENTRY(retint_kernel) CFI_ENDPROC END(common_interrupt) - /* - * If IRET takes a fault on the espfix stack, then we - * end up promoting it to a doublefault. In that case, - * modify the stack to make it look like we just entered - * the #GP handler from user space, similar to bad_iret. - */ -#ifdef CONFIG_X86_ESPFIX64 - ALIGN -__do_double_fault: - XCPT_FRAME 1 RDI+8 - movq RSP(%rdi),%rax /* Trap on the espfix stack? */ - sarq $PGDIR_SHIFT,%rax - cmpl $ESPFIX_PGD_ENTRY,%eax - jne do_double_fault /* No, just deliver the fault */ - cmpl $__KERNEL_CS,CS(%rdi) - jne do_double_fault - movq RIP(%rdi),%rax - cmpq $native_irq_return_iret,%rax - jne do_double_fault /* This shouldn't happen... */ - movq PER_CPU_VAR(kernel_stack),%rax - subq $(6*8-KERNEL_STACK_OFFSET),%rax /* Reset to original stack */ - movq %rax,RSP(%rdi) - movq $0,(%rax) /* Missing (lost) #GP error code */ - movq $general_protection,RIP(%rdi) - retq - CFI_ENDPROC -END(__do_double_fault) -#else -# define __do_double_fault do_double_fault -#endif - /* * APIC interrupts. */ @@ -1124,7 +1080,7 @@ idtentry overflow do_overflow has_error_code=0 idtentry bounds do_bounds has_error_code=0 idtentry invalid_op do_invalid_op has_error_code=0 idtentry device_not_available do_device_not_available has_error_code=0 -idtentry double_fault __do_double_fault has_error_code=1 paranoid=1 +idtentry double_fault do_double_fault has_error_code=1 paranoid=1 idtentry coprocessor_segment_overrun do_coprocessor_segment_overrun has_error_code=0 idtentry invalid_TSS do_invalid_TSS has_error_code=1 idtentry segment_not_present do_segment_not_present has_error_code=1 @@ -1289,7 +1245,7 @@ apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \ idtentry debug do_debug has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK idtentry int3 do_int3 has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK -idtentry stack_segment do_stack_segment has_error_code=1 paranoid=1 +idtentry stack_segment do_stack_segment has_error_code=1 #ifdef CONFIG_XEN idtentry xen_debug do_debug has_error_code=0 idtentry xen_int3 do_int3 has_error_code=0 @@ -1399,17 +1355,16 @@ error_sti: /* * There are two places in the kernel that can potentially fault with - * usergs. Handle them here. The exception handlers after iret run with - * kernel gs again, so don't set the user space flag. B stepping K8s - * sometimes report an truncated RIP for IRET exceptions returning to - * compat mode. Check for these here too. + * usergs. Handle them here. B stepping K8s sometimes report a + * truncated RIP for IRET exceptions returning to compat mode. Check + * for these here too. */ error_kernelspace: CFI_REL_OFFSET rcx, RCX+8 incl %ebx leaq native_irq_return_iret(%rip),%rcx cmpq %rcx,RIP+8(%rsp) - je error_swapgs + je error_bad_iret movl %ecx,%eax /* zero extend */ cmpq %rax,RIP+8(%rsp) je bstep_iret @@ -1420,7 +1375,15 @@ error_kernelspace: bstep_iret: /* Fix truncated RIP */ movq %rcx,RIP+8(%rsp) - jmp error_swapgs + /* fall through */ + +error_bad_iret: + SWAPGS + mov %rsp,%rdi + call fixup_bad_iret + mov %rax,%rsp + decl %ebx /* Return to usergs */ + jmp error_sti CFI_ENDPROC END(error_entry) diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index 749b0e423419..e510618b2e91 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c @@ -1484,7 +1484,7 @@ unsigned long syscall_trace_enter_phase1(struct pt_regs *regs, u32 arch) */ if (work & _TIF_NOHZ) { user_exit(); - work &= ~TIF_NOHZ; + work &= ~_TIF_NOHZ; } #ifdef CONFIG_SECCOMP diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 0d0e922fafc1..de801f22128a 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -233,32 +233,40 @@ DO_ERROR(X86_TRAP_UD, SIGILL, "invalid opcode", invalid_op) DO_ERROR(X86_TRAP_OLD_MF, SIGFPE, "coprocessor segment overrun",coprocessor_segment_overrun) DO_ERROR(X86_TRAP_TS, SIGSEGV, "invalid TSS", invalid_TSS) DO_ERROR(X86_TRAP_NP, SIGBUS, "segment not present", segment_not_present) -#ifdef CONFIG_X86_32 DO_ERROR(X86_TRAP_SS, SIGBUS, "stack segment", stack_segment) -#endif DO_ERROR(X86_TRAP_AC, SIGBUS, "alignment check", alignment_check) #ifdef CONFIG_X86_64 /* Runs on IST stack */ -dotraplinkage void do_stack_segment(struct pt_regs *regs, long error_code) -{ - enum ctx_state prev_state; - - prev_state = exception_enter(); - if (notify_die(DIE_TRAP, "stack segment", regs, error_code, - X86_TRAP_SS, SIGBUS) != NOTIFY_STOP) { - preempt_conditional_sti(regs); - do_trap(X86_TRAP_SS, SIGBUS, "stack segment", regs, error_code, NULL); - preempt_conditional_cli(regs); - } - exception_exit(prev_state); -} - dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code) { static const char str[] = "double fault"; struct task_struct *tsk = current; +#ifdef CONFIG_X86_ESPFIX64 + extern unsigned char native_irq_return_iret[]; + + /* + * If IRET takes a non-IST fault on the espfix64 stack, then we + * end up promoting it to a doublefault. In that case, modify + * the stack to make it look like we just entered the #GP + * handler from user space, similar to bad_iret. + */ + if (((long)regs->sp >> PGDIR_SHIFT) == ESPFIX_PGD_ENTRY && + regs->cs == __KERNEL_CS && + regs->ip == (unsigned long)native_irq_return_iret) + { + struct pt_regs *normal_regs = task_pt_regs(current); + + /* Fake a #GP(0) from userspace. */ + memmove(&normal_regs->ip, (void *)regs->sp, 5*8); + normal_regs->orig_ax = 0; /* Missing (lost) #GP error code */ + regs->ip = (unsigned long)general_protection; + regs->sp = (unsigned long)&normal_regs->orig_ax; + return; + } +#endif + exception_enter(); /* Return not checked because double check cannot be ignored */ notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV); @@ -399,6 +407,35 @@ asmlinkage __visible struct pt_regs *sync_regs(struct pt_regs *eregs) return regs; } NOKPROBE_SYMBOL(sync_regs); + +struct bad_iret_stack { + void *error_entry_ret; + struct pt_regs regs; +}; + +asmlinkage __visible +struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s) +{ + /* + * This is called from entry_64.S early in handling a fault + * caused by a bad iret to user mode. To handle the fault + * correctly, we want move our stack frame to task_pt_regs + * and we want to pretend that the exception came from the + * iret target. + */ + struct bad_iret_stack *new_stack = + container_of(task_pt_regs(current), + struct bad_iret_stack, regs); + + /* Copy the IRET target to the new stack. */ + memmove(&new_stack->regs.ip, (void *)s->regs.sp, 5*8); + + /* Copy the remainder of the stack from the current stack. */ + memmove(new_stack, s, offsetof(struct bad_iret_stack, regs.ip)); + + BUG_ON(!user_mode_vm(&new_stack->regs)); + return new_stack; +} #endif /* @@ -778,7 +815,7 @@ void __init trap_init(void) set_intr_gate(X86_TRAP_OLD_MF, coprocessor_segment_overrun); set_intr_gate(X86_TRAP_TS, invalid_TSS); set_intr_gate(X86_TRAP_NP, segment_not_present); - set_intr_gate_ist(X86_TRAP_SS, &stack_segment, STACKFAULT_STACK); + set_intr_gate(X86_TRAP_SS, stack_segment); set_intr_gate(X86_TRAP_GP, general_protection); set_intr_gate(X86_TRAP_SPURIOUS, spurious_interrupt_bug); set_intr_gate(X86_TRAP_MF, coprocessor_error); diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 4cb8763868fc..4e5dfec750fc 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -1123,7 +1123,7 @@ void mark_rodata_ro(void) unsigned long end = (unsigned long) &__end_rodata_hpage_align; unsigned long text_end = PFN_ALIGN(&__stop___ex_table); unsigned long rodata_end = PFN_ALIGN(&__end_rodata); - unsigned long all_end = PFN_ALIGN(&_end); + unsigned long all_end; printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n", (end - start) >> 10); @@ -1134,7 +1134,16 @@ void mark_rodata_ro(void) /* * The rodata/data/bss/brk section (but not the kernel text!) * should also be not-executable. + * + * We align all_end to PMD_SIZE because the existing mapping + * is a full PMD. If we would align _brk_end to PAGE_SIZE we + * split the PMD and the reminder between _brk_end and the end + * of the PMD will remain mapped executable. + * + * Any PMD which was setup after the one which covers _brk_end + * has been zapped already via cleanup_highmem(). */ + all_end = roundup((unsigned long)_brk_end, PMD_SIZE); set_memory_nx(rodata_start, (all_end - rodata_start) >> PAGE_SHIFT); rodata_test(); diff --git a/arch/x86/tools/calc_run_size.pl b/arch/x86/tools/calc_run_size.pl index 0b0b124d3ece..23210baade2d 100644 --- a/arch/x86/tools/calc_run_size.pl +++ b/arch/x86/tools/calc_run_size.pl @@ -19,7 +19,16 @@ while (<>) { if ($file_offset == 0) { $file_offset = $offset; } elsif ($file_offset != $offset) { - die ".bss and .brk lack common file offset\n"; + # BFD linker shows the same file offset in ELF. + # Gold linker shows them as consecutive. + next if ($file_offset + $mem_size == $offset + $size); + + printf STDERR "file_offset: 0x%lx\n", $file_offset; + printf STDERR "mem_size: 0x%lx\n", $mem_size; + printf STDERR "offset: 0x%lx\n", $offset; + printf STDERR "size: 0x%lx\n", $size; + + die ".bss and .brk are non-contiguous\n"; } } } diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c index 143ec6ea1468..7db193160766 100644 --- a/drivers/acpi/device_pm.c +++ b/drivers/acpi/device_pm.c @@ -878,7 +878,7 @@ int acpi_dev_suspend_late(struct device *dev) return 0; target_state = acpi_target_system_state(); - wakeup = device_may_wakeup(dev); + wakeup = device_may_wakeup(dev) && acpi_device_can_wakeup(adev); error = acpi_device_wakeup(adev, target_state, wakeup); if (wakeup && error) return error; diff --git a/drivers/clocksource/sun4i_timer.c b/drivers/clocksource/sun4i_timer.c index efb17c3ee120..f4a9c0058b4d 100644 --- a/drivers/clocksource/sun4i_timer.c +++ b/drivers/clocksource/sun4i_timer.c @@ -182,6 +182,12 @@ static void __init sun4i_timer_init(struct device_node *node) /* Make sure timer is stopped before playing with interrupts */ sun4i_clkevt_time_stop(0); + sun4i_clockevent.cpumask = cpu_possible_mask; + sun4i_clockevent.irq = irq; + + clockevents_config_and_register(&sun4i_clockevent, rate, + TIMER_SYNC_TICKS, 0xffffffff); + ret = setup_irq(irq, &sun4i_timer_irq); if (ret) pr_warn("failed to setup irq %d\n", irq); @@ -189,12 +195,6 @@ static void __init sun4i_timer_init(struct device_node *node) /* Enable timer0 interrupt */ val = readl(timer_base + TIMER_IRQ_EN_REG); writel(val | TIMER_IRQ_EN(0), timer_base + TIMER_IRQ_EN_REG); - - sun4i_clockevent.cpumask = cpu_possible_mask; - sun4i_clockevent.irq = irq; - - clockevents_config_and_register(&sun4i_clockevent, rate, - TIMER_SYNC_TICKS, 0xffffffff); } CLOCKSOURCE_OF_DECLARE(sun4i, "allwinner,sun4i-a10-timer", sun4i_timer_init); diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 4839bfa74a10..19a99743cf52 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -271,7 +271,7 @@ struct pl330_config { #define DMAC_MODE_NS (1 << 0) unsigned int mode; unsigned int data_bus_width:10; /* In number of bits */ - unsigned int data_buf_dep:10; + unsigned int data_buf_dep:11; unsigned int num_chan:4; unsigned int num_peri:6; u32 peri_ns; @@ -2336,7 +2336,7 @@ static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len) int burst_len; burst_len = pl330->pcfg.data_bus_width / 8; - burst_len *= pl330->pcfg.data_buf_dep; + burst_len *= pl330->pcfg.data_buf_dep / pl330->pcfg.num_chan; burst_len >>= desc->rqcfg.brst_size; /* src/dst_burst_len can't be more than 16 */ @@ -2459,16 +2459,25 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst, /* Select max possible burst size */ burst = pl330->pcfg.data_bus_width / 8; - while (burst > 1) { - if (!(len % burst)) - break; + /* + * Make sure we use a burst size that aligns with all the memcpy + * parameters because our DMA programming algorithm doesn't cope with + * transfers which straddle an entry in the DMA device's MFIFO. + */ + while ((src | dst | len) & (burst - 1)) burst /= 2; - } desc->rqcfg.brst_size = 0; while (burst != (1 << desc->rqcfg.brst_size)) desc->rqcfg.brst_size++; + /* + * If burst size is smaller than bus width then make sure we only + * transfer one at a time to avoid a burst stradling an MFIFO entry. + */ + if (desc->rqcfg.brst_size * 8 < pl330->pcfg.data_bus_width) + desc->rqcfg.brst_len = 1; + desc->rqcfg.brst_len = get_burst_len(desc, len); desc->txd.flags = flags; @@ -2732,7 +2741,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) dev_info(&adev->dev, - "Loaded driver for PL330 DMAC-%d\n", adev->periphid); + "Loaded driver for PL330 DMAC-%x\n", adev->periphid); dev_info(&adev->dev, "\tDBUFF-%ux%ubytes Num_Chans-%u Num_Peri-%u Num_Events-%u\n", pcfg->data_buf_dep, pcfg->data_bus_width / 8, pcfg->num_chan, diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c index 3aa10b328254..91292f5513ff 100644 --- a/drivers/dma/sun6i-dma.c +++ b/drivers/dma/sun6i-dma.c @@ -230,30 +230,25 @@ static inline void sun6i_dma_dump_chan_regs(struct sun6i_dma_dev *sdev, readl(pchan->base + DMA_CHAN_CUR_PARA)); } -static inline int convert_burst(u32 maxburst, u8 *burst) +static inline s8 convert_burst(u32 maxburst) { switch (maxburst) { case 1: - *burst = 0; - break; + return 0; case 8: - *burst = 2; - break; + return 2; default: return -EINVAL; } - - return 0; } -static inline int convert_buswidth(enum dma_slave_buswidth addr_width, u8 *width) +static inline s8 convert_buswidth(enum dma_slave_buswidth addr_width) { if ((addr_width < DMA_SLAVE_BUSWIDTH_1_BYTE) || (addr_width > DMA_SLAVE_BUSWIDTH_4_BYTES)) return -EINVAL; - *width = addr_width >> 1; - return 0; + return addr_width >> 1; } static void *sun6i_dma_lli_add(struct sun6i_dma_lli *prev, @@ -284,26 +279,25 @@ static inline int sun6i_dma_cfg_lli(struct sun6i_dma_lli *lli, struct dma_slave_config *config) { u8 src_width, dst_width, src_burst, dst_burst; - int ret; if (!config) return -EINVAL; - ret = convert_burst(config->src_maxburst, &src_burst); - if (ret) - return ret; + src_burst = convert_burst(config->src_maxburst); + if (src_burst) + return src_burst; - ret = convert_burst(config->dst_maxburst, &dst_burst); - if (ret) - return ret; + dst_burst = convert_burst(config->dst_maxburst); + if (dst_burst) + return dst_burst; - ret = convert_buswidth(config->src_addr_width, &src_width); - if (ret) - return ret; + src_width = convert_buswidth(config->src_addr_width); + if (src_width) + return src_width; - ret = convert_buswidth(config->dst_addr_width, &dst_width); - if (ret) - return ret; + dst_width = convert_buswidth(config->dst_addr_width); + if (dst_width) + return dst_width; lli->cfg = DMA_CHAN_CFG_SRC_BURST(src_burst) | DMA_CHAN_CFG_SRC_WIDTH(src_width) | @@ -542,11 +536,10 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_memcpy( { struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device); struct sun6i_vchan *vchan = to_sun6i_vchan(chan); - struct dma_slave_config *sconfig = &vchan->cfg; struct sun6i_dma_lli *v_lli; struct sun6i_desc *txd; dma_addr_t p_lli; - int ret; + s8 burst, width; dev_dbg(chan2dev(chan), "%s; chan: %d, dest: %pad, src: %pad, len: %zu. flags: 0x%08lx\n", @@ -565,14 +558,21 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_memcpy( goto err_txd_free; } - ret = sun6i_dma_cfg_lli(v_lli, src, dest, len, sconfig); - if (ret) - goto err_dma_free; + v_lli->src = src; + v_lli->dst = dest; + v_lli->len = len; + v_lli->para = NORMAL_WAIT; + burst = convert_burst(8); + width = convert_buswidth(DMA_SLAVE_BUSWIDTH_4_BYTES); v_lli->cfg |= DMA_CHAN_CFG_SRC_DRQ(DRQ_SDRAM) | DMA_CHAN_CFG_DST_DRQ(DRQ_SDRAM) | DMA_CHAN_CFG_DST_LINEAR_MODE | - DMA_CHAN_CFG_SRC_LINEAR_MODE; + DMA_CHAN_CFG_SRC_LINEAR_MODE | + DMA_CHAN_CFG_SRC_BURST(burst) | + DMA_CHAN_CFG_SRC_WIDTH(width) | + DMA_CHAN_CFG_DST_BURST(burst) | + DMA_CHAN_CFG_DST_WIDTH(width); sun6i_dma_lli_add(NULL, v_lli, p_lli, txd); @@ -580,8 +580,6 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_memcpy( return vchan_tx_prep(&vchan->vc, &txd->vd, flags); -err_dma_free: - dma_pool_free(sdev->pool, v_lli, p_lli); err_txd_free: kfree(txd); return NULL; @@ -915,6 +913,7 @@ static int sun6i_dma_probe(struct platform_device *pdev) sdc->slave.device_prep_dma_memcpy = sun6i_dma_prep_dma_memcpy; sdc->slave.device_control = sun6i_dma_control; sdc->slave.chancnt = NR_MAX_VCHANS; + sdc->slave.copy_align = 4; sdc->slave.dev = &pdev->dev; diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 1403b01e8216..318ade9bb5af 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1670,15 +1670,17 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) goto out_regs; if (drm_core_check_feature(dev, DRIVER_MODESET)) { - ret = i915_kick_out_vgacon(dev_priv); + /* WARNING: Apparently we must kick fbdev drivers before vgacon, + * otherwise the vga fbdev driver falls over. */ + ret = i915_kick_out_firmware_fb(dev_priv); if (ret) { - DRM_ERROR("failed to remove conflicting VGA console\n"); + DRM_ERROR("failed to remove conflicting framebuffer drivers\n"); goto out_gtt; } - ret = i915_kick_out_firmware_fb(dev_priv); + ret = i915_kick_out_vgacon(dev_priv); if (ret) { - DRM_ERROR("failed to remove conflicting framebuffer drivers\n"); + DRM_ERROR("failed to remove conflicting VGA console\n"); goto out_gtt; } } diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index c27b6140bfd1..ad2fd605f76b 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -5469,11 +5469,6 @@ static void gen6_init_clock_gating(struct drm_device *dev) I915_WRITE(_3D_CHICKEN, _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB)); - /* WaSetupGtModeTdRowDispatch:snb */ - if (IS_SNB_GT1(dev)) - I915_WRITE(GEN6_GT_MODE, - _MASKED_BIT_ENABLE(GEN6_TD_FOUR_ROW_DISPATCH_DISABLE)); - /* WaDisable_RenderCache_OperationalFlush:snb */ I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE)); diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c index f6309bd23e01..b5c73df8e202 100644 --- a/drivers/gpu/drm/radeon/r600_dpm.c +++ b/drivers/gpu/drm/radeon/r600_dpm.c @@ -1256,7 +1256,7 @@ int r600_parse_extended_power_table(struct radeon_device *rdev) (mode_info->atom_context->bios + data_offset + le16_to_cpu(ext_hdr->usPowerTuneTableOffset)); rdev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = - ppt->usMaximumPowerDeliveryLimit; + le16_to_cpu(ppt->usMaximumPowerDeliveryLimit); pt = &ppt->power_tune_table; } else { ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *) diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index 9a19e52cc655..6b670b0bc47b 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c @@ -179,6 +179,9 @@ static void radeon_encoder_add_backlight(struct radeon_encoder *radeon_encoder, (rdev->pdev->subsystem_vendor == 0x1734) && (rdev->pdev->subsystem_device == 0x1107)) use_bl = false; + /* disable native backlight control on older asics */ + else if (rdev->family < CHIP_R600) + use_bl = false; else use_bl = true; } diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index 3effa931fce2..10641b7816f4 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -115,9 +115,12 @@ isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id, attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS; /* * FIXME: Use devattr.max_sge - 2 for max_send_sge as - * work-around for RDMA_READ.. + * work-around for RDMA_READs with ConnectX-2. + * + * Also, still make sure to have at least two SGEs for + * outgoing control PDU responses. */ - attr.cap.max_send_sge = device->dev_attr.max_sge - 2; + attr.cap.max_send_sge = max(2, device->dev_attr.max_sge - 2); isert_conn->max_sge = attr.cap.max_send_sge; attr.cap.max_recv_sge = 1; @@ -225,12 +228,16 @@ isert_create_device_ib_res(struct isert_device *device) struct isert_cq_desc *cq_desc; struct ib_device_attr *dev_attr; int ret = 0, i, j; + int max_rx_cqe, max_tx_cqe; dev_attr = &device->dev_attr; ret = isert_query_device(ib_dev, dev_attr); if (ret) return ret; + max_rx_cqe = min(ISER_MAX_RX_CQ_LEN, dev_attr->max_cqe); + max_tx_cqe = min(ISER_MAX_TX_CQ_LEN, dev_attr->max_cqe); + /* asign function handlers */ if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS && dev_attr->device_cap_flags & IB_DEVICE_SIGNATURE_HANDOVER) { @@ -272,7 +279,7 @@ isert_create_device_ib_res(struct isert_device *device) isert_cq_rx_callback, isert_cq_event_callback, (void *)&cq_desc[i], - ISER_MAX_RX_CQ_LEN, i); + max_rx_cqe, i); if (IS_ERR(device->dev_rx_cq[i])) { ret = PTR_ERR(device->dev_rx_cq[i]); device->dev_rx_cq[i] = NULL; @@ -284,7 +291,7 @@ isert_create_device_ib_res(struct isert_device *device) isert_cq_tx_callback, isert_cq_event_callback, (void *)&cq_desc[i], - ISER_MAX_TX_CQ_LEN, i); + max_tx_cqe, i); if (IS_ERR(device->dev_tx_cq[i])) { ret = PTR_ERR(device->dev_tx_cq[i]); device->dev_tx_cq[i] = NULL; @@ -803,14 +810,25 @@ wake_up: complete(&isert_conn->conn_wait); } -static void +static int isert_disconnected_handler(struct rdma_cm_id *cma_id, bool disconnect) { - struct isert_conn *isert_conn = (struct isert_conn *)cma_id->context; + struct isert_conn *isert_conn; + + if (!cma_id->qp) { + struct isert_np *isert_np = cma_id->context; + + isert_np->np_cm_id = NULL; + return -1; + } + + isert_conn = (struct isert_conn *)cma_id->context; isert_conn->disconnect = disconnect; INIT_WORK(&isert_conn->conn_logout_work, isert_disconnect_work); schedule_work(&isert_conn->conn_logout_work); + + return 0; } static int @@ -825,6 +843,9 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) switch (event->event) { case RDMA_CM_EVENT_CONNECT_REQUEST: ret = isert_connect_request(cma_id, event); + if (ret) + pr_err("isert_cma_handler failed RDMA_CM_EVENT: 0x%08x %d\n", + event->event, ret); break; case RDMA_CM_EVENT_ESTABLISHED: isert_connected_handler(cma_id); @@ -834,7 +855,7 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) case RDMA_CM_EVENT_DEVICE_REMOVAL: /* FALLTHRU */ disconnect = true; case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */ - isert_disconnected_handler(cma_id, disconnect); + ret = isert_disconnected_handler(cma_id, disconnect); break; case RDMA_CM_EVENT_CONNECT_ERROR: default: @@ -842,12 +863,6 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) break; } - if (ret != 0) { - pr_err("isert_cma_handler failed RDMA_CM_EVENT: 0x%08x %d\n", - event->event, ret); - dump_stack(); - } - return ret; } @@ -3190,7 +3205,8 @@ isert_free_np(struct iscsi_np *np) { struct isert_np *isert_np = (struct isert_np *)np->np_context; - rdma_destroy_id(isert_np->np_cm_id); + if (isert_np->np_cm_id) + rdma_destroy_id(isert_np->np_cm_id); np->np_context = NULL; kfree(isert_np); diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c index 7206547c13ce..dc829682701a 100644 --- a/drivers/infiniband/ulp/srpt/ib_srpt.c +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c @@ -2092,6 +2092,7 @@ static int srpt_create_ch_ib(struct srpt_rdma_ch *ch) if (!qp_init) goto out; +retry: ch->cq = ib_create_cq(sdev->device, srpt_completion, NULL, ch, ch->rq_size + srp_sq_size, 0); if (IS_ERR(ch->cq)) { @@ -2115,6 +2116,13 @@ static int srpt_create_ch_ib(struct srpt_rdma_ch *ch) ch->qp = ib_create_qp(sdev->pd, qp_init); if (IS_ERR(ch->qp)) { ret = PTR_ERR(ch->qp); + if (ret == -ENOMEM) { + srp_sq_size /= 2; + if (srp_sq_size >= MIN_SRPT_SQ_SIZE) { + ib_destroy_cq(ch->cq); + goto retry; + } + } printk(KERN_ERR "failed to create_qp ret= %d\n", ret); goto err_destroy_cq; } diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index c9ac06cfe6b7..a5115fb7cf33 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -2471,7 +2471,8 @@ static void bond_loadbalance_arp_mon(struct work_struct *work) bond_slave_state_change(bond); if (BOND_MODE(bond) == BOND_MODE_XOR) bond_update_slave_arr(bond, NULL); - } else if (do_failover) { + } + if (do_failover) { block_netpoll_tx(); bond_select_active_slave(bond); unblock_netpoll_tx(); diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c index 02492d241e4c..2cfe5012e4e5 100644 --- a/drivers/net/can/dev.c +++ b/drivers/net/can/dev.c @@ -110,7 +110,7 @@ static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt, long rate; u64 v64; - /* Use CIA recommended sample points */ + /* Use CiA recommended sample points */ if (bt->sample_point) { sampl_pt = bt->sample_point; } else { @@ -382,7 +382,7 @@ void can_free_echo_skb(struct net_device *dev, unsigned int idx) BUG_ON(idx >= priv->echo_skb_max); if (priv->echo_skb[idx]) { - kfree_skb(priv->echo_skb[idx]); + dev_kfree_skb_any(priv->echo_skb[idx]); priv->echo_skb[idx] = NULL; } } diff --git a/drivers/net/can/m_can/Kconfig b/drivers/net/can/m_can/Kconfig index fca5482c09ac..04f20dd39007 100644 --- a/drivers/net/can/m_can/Kconfig +++ b/drivers/net/can/m_can/Kconfig @@ -1,4 +1,5 @@ config CAN_M_CAN + depends on HAS_IOMEM tristate "Bosch M_CAN devices" ---help--- Say Y here if you want to support for Bosch M_CAN controller. diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c index 10d571eaed85..d7bc462aafdc 100644 --- a/drivers/net/can/m_can/m_can.c +++ b/drivers/net/can/m_can/m_can.c @@ -105,14 +105,36 @@ enum m_can_mram_cfg { MRAM_CFG_NUM, }; +/* Fast Bit Timing & Prescaler Register (FBTP) */ +#define FBTR_FBRP_MASK 0x1f +#define FBTR_FBRP_SHIFT 16 +#define FBTR_FTSEG1_SHIFT 8 +#define FBTR_FTSEG1_MASK (0xf << FBTR_FTSEG1_SHIFT) +#define FBTR_FTSEG2_SHIFT 4 +#define FBTR_FTSEG2_MASK (0x7 << FBTR_FTSEG2_SHIFT) +#define FBTR_FSJW_SHIFT 0 +#define FBTR_FSJW_MASK 0x3 + /* Test Register (TEST) */ #define TEST_LBCK BIT(4) /* CC Control Register(CCCR) */ -#define CCCR_TEST BIT(7) -#define CCCR_MON BIT(5) -#define CCCR_CCE BIT(1) -#define CCCR_INIT BIT(0) +#define CCCR_TEST BIT(7) +#define CCCR_CMR_MASK 0x3 +#define CCCR_CMR_SHIFT 10 +#define CCCR_CMR_CANFD 0x1 +#define CCCR_CMR_CANFD_BRS 0x2 +#define CCCR_CMR_CAN 0x3 +#define CCCR_CME_MASK 0x3 +#define CCCR_CME_SHIFT 8 +#define CCCR_CME_CAN 0 +#define CCCR_CME_CANFD 0x1 +#define CCCR_CME_CANFD_BRS 0x2 +#define CCCR_TEST BIT(7) +#define CCCR_MON BIT(5) +#define CCCR_CCE BIT(1) +#define CCCR_INIT BIT(0) +#define CCCR_CANFD 0x10 /* Bit Timing & Prescaler Register (BTP) */ #define BTR_BRP_MASK 0x3ff @@ -204,6 +226,7 @@ enum m_can_mram_cfg { /* Rx Buffer / FIFO Element Size Configuration (RXESC) */ #define M_CAN_RXESC_8BYTES 0x0 +#define M_CAN_RXESC_64BYTES 0x777 /* Tx Buffer Configuration(TXBC) */ #define TXBC_NDTB_OFF 16 @@ -211,6 +234,7 @@ enum m_can_mram_cfg { /* Tx Buffer Element Size Configuration(TXESC) */ #define TXESC_TBDS_8BYTES 0x0 +#define TXESC_TBDS_64BYTES 0x7 /* Tx Event FIFO Con.guration (TXEFC) */ #define TXEFC_EFS_OFF 16 @@ -219,11 +243,11 @@ enum m_can_mram_cfg { /* Message RAM Configuration (in bytes) */ #define SIDF_ELEMENT_SIZE 4 #define XIDF_ELEMENT_SIZE 8 -#define RXF0_ELEMENT_SIZE 16 -#define RXF1_ELEMENT_SIZE 16 +#define RXF0_ELEMENT_SIZE 72 +#define RXF1_ELEMENT_SIZE 72 #define RXB_ELEMENT_SIZE 16 #define TXE_ELEMENT_SIZE 8 -#define TXB_ELEMENT_SIZE 16 +#define TXB_ELEMENT_SIZE 72 /* Message RAM Elements */ #define M_CAN_FIFO_ID 0x0 @@ -231,11 +255,17 @@ enum m_can_mram_cfg { #define M_CAN_FIFO_DATA(n) (0x8 + ((n) << 2)) /* Rx Buffer Element */ +/* R0 */ #define RX_BUF_ESI BIT(31) #define RX_BUF_XTD BIT(30) #define RX_BUF_RTR BIT(29) +/* R1 */ +#define RX_BUF_ANMF BIT(31) +#define RX_BUF_EDL BIT(21) +#define RX_BUF_BRS BIT(20) /* Tx Buffer Element */ +/* R0 */ #define TX_BUF_XTD BIT(30) #define TX_BUF_RTR BIT(29) @@ -296,6 +326,7 @@ static inline void m_can_config_endisable(const struct m_can_priv *priv, if (enable) { /* enable m_can configuration */ m_can_write(priv, M_CAN_CCCR, cccr | CCCR_INIT); + udelay(5); /* CCCR.CCE can only be set/reset while CCCR.INIT = '1' */ m_can_write(priv, M_CAN_CCCR, cccr | CCCR_INIT | CCCR_CCE); } else { @@ -326,41 +357,67 @@ static inline void m_can_disable_all_interrupts(const struct m_can_priv *priv) m_can_write(priv, M_CAN_ILE, 0x0); } -static void m_can_read_fifo(const struct net_device *dev, struct can_frame *cf, - u32 rxfs) +static void m_can_read_fifo(struct net_device *dev, u32 rxfs) { + struct net_device_stats *stats = &dev->stats; struct m_can_priv *priv = netdev_priv(dev); - u32 id, fgi; + struct canfd_frame *cf; + struct sk_buff *skb; + u32 id, fgi, dlc; + int i; /* calculate the fifo get index for where to read data */ fgi = (rxfs & RXFS_FGI_MASK) >> RXFS_FGI_OFF; + dlc = m_can_fifo_read(priv, fgi, M_CAN_FIFO_DLC); + if (dlc & RX_BUF_EDL) + skb = alloc_canfd_skb(dev, &cf); + else + skb = alloc_can_skb(dev, (struct can_frame **)&cf); + if (!skb) { + stats->rx_dropped++; + return; + } + + if (dlc & RX_BUF_EDL) + cf->len = can_dlc2len((dlc >> 16) & 0x0F); + else + cf->len = get_can_dlc((dlc >> 16) & 0x0F); + id = m_can_fifo_read(priv, fgi, M_CAN_FIFO_ID); if (id & RX_BUF_XTD) cf->can_id = (id & CAN_EFF_MASK) | CAN_EFF_FLAG; else cf->can_id = (id >> 18) & CAN_SFF_MASK; - if (id & RX_BUF_RTR) { + if (id & RX_BUF_ESI) { + cf->flags |= CANFD_ESI; + netdev_dbg(dev, "ESI Error\n"); + } + + if (!(dlc & RX_BUF_EDL) && (id & RX_BUF_RTR)) { cf->can_id |= CAN_RTR_FLAG; } else { - id = m_can_fifo_read(priv, fgi, M_CAN_FIFO_DLC); - cf->can_dlc = get_can_dlc((id >> 16) & 0x0F); - *(u32 *)(cf->data + 0) = m_can_fifo_read(priv, fgi, - M_CAN_FIFO_DATA(0)); - *(u32 *)(cf->data + 4) = m_can_fifo_read(priv, fgi, - M_CAN_FIFO_DATA(1)); + if (dlc & RX_BUF_BRS) + cf->flags |= CANFD_BRS; + + for (i = 0; i < cf->len; i += 4) + *(u32 *)(cf->data + i) = + m_can_fifo_read(priv, fgi, + M_CAN_FIFO_DATA(i / 4)); } /* acknowledge rx fifo 0 */ m_can_write(priv, M_CAN_RXF0A, fgi); + + stats->rx_packets++; + stats->rx_bytes += cf->len; + + netif_receive_skb(skb); } static int m_can_do_rx_poll(struct net_device *dev, int quota) { struct m_can_priv *priv = netdev_priv(dev); - struct net_device_stats *stats = &dev->stats; - struct sk_buff *skb; - struct can_frame *frame; u32 pkts = 0; u32 rxfs; @@ -374,18 +431,7 @@ static int m_can_do_rx_poll(struct net_device *dev, int quota) if (rxfs & RXFS_RFL) netdev_warn(dev, "Rx FIFO 0 Message Lost\n"); - skb = alloc_can_skb(dev, &frame); - if (!skb) { - stats->rx_dropped++; - return pkts; - } - - m_can_read_fifo(dev, frame, rxfs); - - stats->rx_packets++; - stats->rx_bytes += frame->can_dlc; - - netif_receive_skb(skb); + m_can_read_fifo(dev, rxfs); quota--; pkts++; @@ -481,11 +527,23 @@ static int m_can_handle_lec_err(struct net_device *dev, return 1; } +static int __m_can_get_berr_counter(const struct net_device *dev, + struct can_berr_counter *bec) +{ + struct m_can_priv *priv = netdev_priv(dev); + unsigned int ecr; + + ecr = m_can_read(priv, M_CAN_ECR); + bec->rxerr = (ecr & ECR_REC_MASK) >> ECR_REC_SHIFT; + bec->txerr = ecr & ECR_TEC_MASK; + + return 0; +} + static int m_can_get_berr_counter(const struct net_device *dev, struct can_berr_counter *bec) { struct m_can_priv *priv = netdev_priv(dev); - unsigned int ecr; int err; err = clk_prepare_enable(priv->hclk); @@ -498,9 +556,7 @@ static int m_can_get_berr_counter(const struct net_device *dev, return err; } - ecr = m_can_read(priv, M_CAN_ECR); - bec->rxerr = (ecr & ECR_REC_MASK) >> ECR_REC_SHIFT; - bec->txerr = ecr & ECR_TEC_MASK; + __m_can_get_berr_counter(dev, bec); clk_disable_unprepare(priv->cclk); clk_disable_unprepare(priv->hclk); @@ -544,7 +600,7 @@ static int m_can_handle_state_change(struct net_device *dev, if (unlikely(!skb)) return 0; - m_can_get_berr_counter(dev, &bec); + __m_can_get_berr_counter(dev, &bec); switch (new_state) { case CAN_STATE_ERROR_ACTIVE: @@ -596,14 +652,14 @@ static int m_can_handle_state_errors(struct net_device *dev, u32 psr) if ((psr & PSR_EP) && (priv->can.state != CAN_STATE_ERROR_PASSIVE)) { - netdev_dbg(dev, "entered error warning state\n"); + netdev_dbg(dev, "entered error passive state\n"); work_done += m_can_handle_state_change(dev, CAN_STATE_ERROR_PASSIVE); } if ((psr & PSR_BO) && (priv->can.state != CAN_STATE_BUS_OFF)) { - netdev_dbg(dev, "entered error warning state\n"); + netdev_dbg(dev, "entered error bus off state\n"); work_done += m_can_handle_state_change(dev, CAN_STATE_BUS_OFF); } @@ -615,7 +671,7 @@ static void m_can_handle_other_err(struct net_device *dev, u32 irqstatus) { if (irqstatus & IR_WDI) netdev_err(dev, "Message RAM Watchdog event due to missing READY\n"); - if (irqstatus & IR_BEU) + if (irqstatus & IR_ELO) netdev_err(dev, "Error Logging Overflow\n"); if (irqstatus & IR_BEU) netdev_err(dev, "Bit Error Uncorrected\n"); @@ -733,10 +789,23 @@ static const struct can_bittiming_const m_can_bittiming_const = { .brp_inc = 1, }; +static const struct can_bittiming_const m_can_data_bittiming_const = { + .name = KBUILD_MODNAME, + .tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */ + .tseg1_max = 16, + .tseg2_min = 1, /* Time segment 2 = phase_seg2 */ + .tseg2_max = 8, + .sjw_max = 4, + .brp_min = 1, + .brp_max = 32, + .brp_inc = 1, +}; + static int m_can_set_bittiming(struct net_device *dev) { struct m_can_priv *priv = netdev_priv(dev); const struct can_bittiming *bt = &priv->can.bittiming; + const struct can_bittiming *dbt = &priv->can.data_bittiming; u16 brp, sjw, tseg1, tseg2; u32 reg_btp; @@ -747,7 +816,17 @@ static int m_can_set_bittiming(struct net_device *dev) reg_btp = (brp << BTR_BRP_SHIFT) | (sjw << BTR_SJW_SHIFT) | (tseg1 << BTR_TSEG1_SHIFT) | (tseg2 << BTR_TSEG2_SHIFT); m_can_write(priv, M_CAN_BTP, reg_btp); - netdev_dbg(dev, "setting BTP 0x%x\n", reg_btp); + + if (priv->can.ctrlmode & CAN_CTRLMODE_FD) { + brp = dbt->brp - 1; + sjw = dbt->sjw - 1; + tseg1 = dbt->prop_seg + dbt->phase_seg1 - 1; + tseg2 = dbt->phase_seg2 - 1; + reg_btp = (brp << FBTR_FBRP_SHIFT) | (sjw << FBTR_FSJW_SHIFT) | + (tseg1 << FBTR_FTSEG1_SHIFT) | + (tseg2 << FBTR_FTSEG2_SHIFT); + m_can_write(priv, M_CAN_FBTP, reg_btp); + } return 0; } @@ -767,8 +846,8 @@ static void m_can_chip_config(struct net_device *dev) m_can_config_endisable(priv, true); - /* RX Buffer/FIFO Element Size 8 bytes data field */ - m_can_write(priv, M_CAN_RXESC, M_CAN_RXESC_8BYTES); + /* RX Buffer/FIFO Element Size 64 bytes data field */ + m_can_write(priv, M_CAN_RXESC, M_CAN_RXESC_64BYTES); /* Accept Non-matching Frames Into FIFO 0 */ m_can_write(priv, M_CAN_GFC, 0x0); @@ -777,8 +856,8 @@ static void m_can_chip_config(struct net_device *dev) m_can_write(priv, M_CAN_TXBC, (1 << TXBC_NDTB_OFF) | priv->mcfg[MRAM_TXB].off); - /* only support 8 bytes firstly */ - m_can_write(priv, M_CAN_TXESC, TXESC_TBDS_8BYTES); + /* support 64 bytes payload */ + m_can_write(priv, M_CAN_TXESC, TXESC_TBDS_64BYTES); m_can_write(priv, M_CAN_TXEFC, (1 << TXEFC_EFS_OFF) | priv->mcfg[MRAM_TXE].off); @@ -793,7 +872,8 @@ static void m_can_chip_config(struct net_device *dev) RXFC_FWM_1 | priv->mcfg[MRAM_RXF1].off); cccr = m_can_read(priv, M_CAN_CCCR); - cccr &= ~(CCCR_TEST | CCCR_MON); + cccr &= ~(CCCR_TEST | CCCR_MON | (CCCR_CMR_MASK << CCCR_CMR_SHIFT) | + (CCCR_CME_MASK << CCCR_CME_SHIFT)); test = m_can_read(priv, M_CAN_TEST); test &= ~TEST_LBCK; @@ -805,6 +885,9 @@ static void m_can_chip_config(struct net_device *dev) test |= TEST_LBCK; } + if (priv->can.ctrlmode & CAN_CTRLMODE_FD) + cccr |= CCCR_CME_CANFD_BRS << CCCR_CME_SHIFT; + m_can_write(priv, M_CAN_CCCR, cccr); m_can_write(priv, M_CAN_TEST, test); @@ -869,11 +952,13 @@ static struct net_device *alloc_m_can_dev(void) priv->dev = dev; priv->can.bittiming_const = &m_can_bittiming_const; + priv->can.data_bittiming_const = &m_can_data_bittiming_const; priv->can.do_set_mode = m_can_set_mode; priv->can.do_get_berr_counter = m_can_get_berr_counter; priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY | - CAN_CTRLMODE_BERR_REPORTING; + CAN_CTRLMODE_BERR_REPORTING | + CAN_CTRLMODE_FD; return dev; } @@ -956,8 +1041,9 @@ static netdev_tx_t m_can_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct m_can_priv *priv = netdev_priv(dev); - struct can_frame *cf = (struct can_frame *)skb->data; - u32 id; + struct canfd_frame *cf = (struct canfd_frame *)skb->data; + u32 id, cccr; + int i; if (can_dropped_invalid_skb(dev, skb)) return NETDEV_TX_OK; @@ -976,11 +1062,28 @@ static netdev_tx_t m_can_start_xmit(struct sk_buff *skb, /* message ram configuration */ m_can_fifo_write(priv, 0, M_CAN_FIFO_ID, id); - m_can_fifo_write(priv, 0, M_CAN_FIFO_DLC, cf->can_dlc << 16); - m_can_fifo_write(priv, 0, M_CAN_FIFO_DATA(0), *(u32 *)(cf->data + 0)); - m_can_fifo_write(priv, 0, M_CAN_FIFO_DATA(1), *(u32 *)(cf->data + 4)); + m_can_fifo_write(priv, 0, M_CAN_FIFO_DLC, can_len2dlc(cf->len) << 16); + + for (i = 0; i < cf->len; i += 4) + m_can_fifo_write(priv, 0, M_CAN_FIFO_DATA(i / 4), + *(u32 *)(cf->data + i)); + can_put_echo_skb(skb, dev, 0); + if (priv->can.ctrlmode & CAN_CTRLMODE_FD) { + cccr = m_can_read(priv, M_CAN_CCCR); + cccr &= ~(CCCR_CMR_MASK << CCCR_CMR_SHIFT); + if (can_is_canfd_skb(skb)) { + if (cf->flags & CANFD_BRS) + cccr |= CCCR_CMR_CANFD_BRS << CCCR_CMR_SHIFT; + else + cccr |= CCCR_CMR_CANFD << CCCR_CMR_SHIFT; + } else { + cccr |= CCCR_CMR_CAN << CCCR_CMR_SHIFT; + } + m_can_write(priv, M_CAN_CCCR, cccr); + } + /* enable first TX buffer to start transfer */ m_can_write(priv, M_CAN_TXBTIE, 0x1); m_can_write(priv, M_CAN_TXBAR, 0x1); @@ -992,6 +1095,7 @@ static const struct net_device_ops m_can_netdev_ops = { .ndo_open = m_can_open, .ndo_stop = m_can_close, .ndo_start_xmit = m_can_start_xmit, + .ndo_change_mtu = can_change_mtu, }; static int register_m_can_dev(struct net_device *dev) @@ -1009,7 +1113,7 @@ static int m_can_of_parse_mram(struct platform_device *pdev, struct resource *res; void __iomem *addr; u32 out_val[MRAM_CFG_LEN]; - int ret; + int i, start, end, ret; /* message ram could be shared */ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "message_ram"); @@ -1060,6 +1164,15 @@ static int m_can_of_parse_mram(struct platform_device *pdev, priv->mcfg[MRAM_TXE].off, priv->mcfg[MRAM_TXE].num, priv->mcfg[MRAM_TXB].off, priv->mcfg[MRAM_TXB].num); + /* initialize the entire Message RAM in use to avoid possible + * ECC/parity checksum errors when reading an uninitialized buffer + */ + start = priv->mcfg[MRAM_SIDF].off; + end = priv->mcfg[MRAM_TXB].off + + priv->mcfg[MRAM_TXB].num * TXB_ELEMENT_SIZE; + for (i = start; i < end; i += 4) + writel(0x0, priv->mram_base + i); + return 0; } diff --git a/drivers/net/can/rcar_can.c b/drivers/net/can/rcar_can.c index 1abe133d1594..9718248e55f1 100644 --- a/drivers/net/can/rcar_can.c +++ b/drivers/net/can/rcar_can.c @@ -628,6 +628,7 @@ static const struct net_device_ops rcar_can_netdev_ops = { .ndo_open = rcar_can_open, .ndo_stop = rcar_can_close, .ndo_start_xmit = rcar_can_start_xmit, + .ndo_change_mtu = can_change_mtu, }; static void rcar_can_rx_pkt(struct rcar_can_priv *priv) diff --git a/drivers/net/can/sja1000/kvaser_pci.c b/drivers/net/can/sja1000/kvaser_pci.c index 8ff3424d5147..15c00faeec61 100644 --- a/drivers/net/can/sja1000/kvaser_pci.c +++ b/drivers/net/can/sja1000/kvaser_pci.c @@ -214,7 +214,7 @@ static int kvaser_pci_add_chan(struct pci_dev *pdev, int channel, struct net_device *dev; struct sja1000_priv *priv; struct kvaser_pci *board; - int err, init_step; + int err; dev = alloc_sja1000dev(sizeof(struct kvaser_pci)); if (dev == NULL) @@ -235,7 +235,6 @@ static int kvaser_pci_add_chan(struct pci_dev *pdev, int channel, if (channel == 0) { board->xilinx_ver = ioread8(board->res_addr + XILINX_VERINT) >> 4; - init_step = 2; /* Assert PTADR# - we're in passive mode so the other bits are not important */ @@ -264,8 +263,6 @@ static int kvaser_pci_add_chan(struct pci_dev *pdev, int channel, priv->irq_flags = IRQF_SHARED; dev->irq = pdev->irq; - init_step = 4; - dev_info(&pdev->dev, "reg_base=%p conf_addr=%p irq=%d\n", priv->reg_base, board->conf_addr, dev->irq); diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c index 00f2534dde73..29d3f0938eb8 100644 --- a/drivers/net/can/usb/ems_usb.c +++ b/drivers/net/can/usb/ems_usb.c @@ -434,10 +434,9 @@ static void ems_usb_read_bulk_callback(struct urb *urb) if (urb->actual_length > CPC_HEADER_SIZE) { struct ems_cpc_msg *msg; u8 *ibuf = urb->transfer_buffer; - u8 msg_count, again, start; + u8 msg_count, start; msg_count = ibuf[0] & ~0x80; - again = ibuf[0] & 0x80; start = CPC_HEADER_SIZE; diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c index b7c9e8b11460..c063a54ab8dd 100644 --- a/drivers/net/can/usb/esd_usb2.c +++ b/drivers/net/can/usb/esd_usb2.c @@ -464,7 +464,6 @@ static void esd_usb2_write_bulk_callback(struct urb *urb) { struct esd_tx_urb_context *context = urb->context; struct esd_usb2_net_priv *priv; - struct esd_usb2 *dev; struct net_device *netdev; size_t size = sizeof(struct esd_usb2_msg); @@ -472,7 +471,6 @@ static void esd_usb2_write_bulk_callback(struct urb *urb) priv = context->priv; netdev = priv->netdev; - dev = priv->usb2; /* free up our allocated buffer */ usb_free_coherent(urb->dev, size, @@ -1143,6 +1141,7 @@ static void esd_usb2_disconnect(struct usb_interface *intf) } } unlink_all_urbs(dev); + kfree(dev); } } diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c index 04b0f84612f0..009acc8641fc 100644 --- a/drivers/net/can/usb/gs_usb.c +++ b/drivers/net/can/usb/gs_usb.c @@ -718,6 +718,7 @@ static const struct net_device_ops gs_usb_netdev_ops = { .ndo_open = gs_can_open, .ndo_stop = gs_can_close, .ndo_start_xmit = gs_can_start_xmit, + .ndo_change_mtu = can_change_mtu, }; static struct gs_can *gs_make_candev(unsigned int channel, struct usb_interface *intf) diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c index 5e8b5609c067..8a998e3884ce 100644 --- a/drivers/net/can/xilinx_can.c +++ b/drivers/net/can/xilinx_can.c @@ -300,7 +300,8 @@ static int xcan_set_bittiming(struct net_device *ndev) static int xcan_chip_start(struct net_device *ndev) { struct xcan_priv *priv = netdev_priv(ndev); - u32 err, reg_msr, reg_sr_mask; + u32 reg_msr, reg_sr_mask; + int err; unsigned long timeout; /* Check if it is in reset mode */ @@ -961,6 +962,7 @@ static const struct net_device_ops xcan_netdev_ops = { .ndo_open = xcan_open, .ndo_stop = xcan_close, .ndo_start_xmit = xcan_start_xmit, + .ndo_change_mtu = can_change_mtu, }; /** diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c index cca604994003..4fe33606f372 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c @@ -1082,7 +1082,7 @@ static int cxgb4_cee_peer_getpg(struct net_device *dev, struct cee_pg *pg) pgid = be32_to_cpu(pcmd.u.dcb.pgid.pgid); for (i = 0; i < CXGB4_MAX_PRIORITY; i++) - pg->prio_pg[i] = (pgid >> (i * 4)) & 0xF; + pg->prio_pg[7 - i] = (pgid >> (i * 4)) & 0xF; INIT_PORT_DCB_READ_PEER_CMD(pcmd, pi->port_id); pcmd.u.dcb.pgrate.type = FW_PORT_DCB_TYPE_PGRATE; diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 9a18e7930b31..3e8475cae4f9 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -4421,6 +4421,11 @@ static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family, "Disabled VxLAN offloads for UDP port %d\n", be16_to_cpu(port)); } + +static bool be_gso_check(struct sk_buff *skb, struct net_device *dev) +{ + return vxlan_gso_check(skb); +} #endif static const struct net_device_ops be_netdev_ops = { @@ -4450,6 +4455,7 @@ static const struct net_device_ops be_netdev_ops = { #ifdef CONFIG_BE2NET_VXLAN .ndo_add_vxlan_port = be_add_vxlan_port, .ndo_del_vxlan_port = be_del_vxlan_port, + .ndo_gso_check = be_gso_check, #endif }; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 02266e3de514..4d69e382b4e5 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -1693,7 +1693,7 @@ int mlx4_en_start_port(struct net_device *dev) mlx4_set_stats_bitmap(mdev->dev, &priv->stats_bitmap); #ifdef CONFIG_MLX4_EN_VXLAN - if (priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS) + if (priv->mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) vxlan_get_rx_port(dev); #endif priv->port_up = true; @@ -2355,6 +2355,11 @@ static void mlx4_en_del_vxlan_port(struct net_device *dev, queue_work(priv->mdev->workqueue, &priv->vxlan_del_task); } + +static bool mlx4_en_gso_check(struct sk_buff *skb, struct net_device *dev) +{ + return vxlan_gso_check(skb); +} #endif static const struct net_device_ops mlx4_netdev_ops = { @@ -2386,6 +2391,7 @@ static const struct net_device_ops mlx4_netdev_ops = { #ifdef CONFIG_MLX4_EN_VXLAN .ndo_add_vxlan_port = mlx4_en_add_vxlan_port, .ndo_del_vxlan_port = mlx4_en_del_vxlan_port, + .ndo_gso_check = mlx4_en_gso_check, #endif }; @@ -2416,6 +2422,11 @@ static const struct net_device_ops mlx4_netdev_ops_master = { .ndo_rx_flow_steer = mlx4_en_filter_rfs, #endif .ndo_get_phys_port_id = mlx4_en_get_phys_port_id, +#ifdef CONFIG_MLX4_EN_VXLAN + .ndo_add_vxlan_port = mlx4_en_add_vxlan_port, + .ndo_del_vxlan_port = mlx4_en_del_vxlan_port, + .ndo_gso_check = mlx4_en_gso_check, +#endif }; int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index f5e29f7bdae3..a913b3ad2f89 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -503,6 +503,11 @@ static void qlcnic_del_vxlan_port(struct net_device *netdev, adapter->flags |= QLCNIC_DEL_VXLAN_PORT; } + +static bool qlcnic_gso_check(struct sk_buff *skb, struct net_device *dev) +{ + return vxlan_gso_check(skb); +} #endif static const struct net_device_ops qlcnic_netdev_ops = { @@ -526,6 +531,7 @@ static const struct net_device_ops qlcnic_netdev_ops = { #ifdef CONFIG_QLCNIC_VXLAN .ndo_add_vxlan_port = qlcnic_add_vxlan_port, .ndo_del_vxlan_port = qlcnic_del_vxlan_port, + .ndo_gso_check = qlcnic_gso_check, #endif #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = qlcnic_poll_controller, diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index d8794488f80a..c560f9aeb55d 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -129,9 +129,9 @@ do { \ #define CPSW_VLAN_AWARE BIT(1) #define CPSW_ALE_VLAN_AWARE 1 -#define CPSW_FIFO_NORMAL_MODE (0 << 15) -#define CPSW_FIFO_DUAL_MAC_MODE (1 << 15) -#define CPSW_FIFO_RATE_LIMIT_MODE (2 << 15) +#define CPSW_FIFO_NORMAL_MODE (0 << 16) +#define CPSW_FIFO_DUAL_MAC_MODE (1 << 16) +#define CPSW_FIFO_RATE_LIMIT_MODE (2 << 16) #define CPSW_INTPACEEN (0x3f << 16) #define CPSW_INTPRESCALE_MASK (0x7FF << 0) diff --git a/drivers/net/ieee802154/fakehard.c b/drivers/net/ieee802154/fakehard.c index 9ce854f43917..6cbc56ad9ff4 100644 --- a/drivers/net/ieee802154/fakehard.c +++ b/drivers/net/ieee802154/fakehard.c @@ -377,17 +377,20 @@ static int ieee802154fake_probe(struct platform_device *pdev) err = wpan_phy_register(phy); if (err) - goto out; + goto err_phy_reg; err = register_netdev(dev); - if (err < 0) - goto out; + if (err) + goto err_netdev_reg; dev_info(&pdev->dev, "Added ieee802154 HardMAC hardware\n"); return 0; -out: - unregister_netdev(dev); +err_netdev_reg: + wpan_phy_unregister(phy); +err_phy_reg: + free_netdev(dev); + wpan_phy_free(phy); return err; } diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c index 1aff970be33e..1dc628ffce2b 100644 --- a/drivers/net/ppp/pptp.c +++ b/drivers/net/ppp/pptp.c @@ -506,7 +506,9 @@ static int pptp_getname(struct socket *sock, struct sockaddr *uaddr, int len = sizeof(struct sockaddr_pppox); struct sockaddr_pppox sp; - sp.sa_family = AF_PPPOX; + memset(&sp.sa_addr, 0, sizeof(sp.sa_addr)); + + sp.sa_family = AF_PPPOX; sp.sa_protocol = PX_PROTO_PPTP; sp.sa_addr.pptp = pppox_sk(sock->sk)->proto.pptp.src_addr; diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 22756db53dca..b8a82b86f909 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -780,6 +780,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x413c, 0x81a4, 8)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */ {QMI_FIXED_INTF(0x413c, 0x81a8, 8)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */ {QMI_FIXED_INTF(0x413c, 0x81a9, 8)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */ + {QMI_FIXED_INTF(0x03f0, 0x581d, 4)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Module (Huawei me906e) */ /* 4. Gobi 1000 devices */ {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index ec2a8b41ed41..b0bc8ead47de 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -1673,6 +1673,40 @@ static const struct attribute_group virtio_net_mrg_rx_group = { }; #endif +static bool virtnet_fail_on_feature(struct virtio_device *vdev, + unsigned int fbit, + const char *fname, const char *dname) +{ + if (!virtio_has_feature(vdev, fbit)) + return false; + + dev_err(&vdev->dev, "device advertises feature %s but not %s", + fname, dname); + + return true; +} + +#define VIRTNET_FAIL_ON(vdev, fbit, dbit) \ + virtnet_fail_on_feature(vdev, fbit, #fbit, dbit) + +static bool virtnet_validate_features(struct virtio_device *vdev) +{ + if (!virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) && + (VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_RX, + "VIRTIO_NET_F_CTRL_VQ") || + VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_VLAN, + "VIRTIO_NET_F_CTRL_VQ") || + VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE, + "VIRTIO_NET_F_CTRL_VQ") || + VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_MQ, "VIRTIO_NET_F_CTRL_VQ") || + VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR, + "VIRTIO_NET_F_CTRL_VQ"))) { + return false; + } + + return true; +} + static int virtnet_probe(struct virtio_device *vdev) { int i, err; @@ -1680,6 +1714,9 @@ static int virtnet_probe(struct virtio_device *vdev) struct virtnet_info *vi; u16 max_queue_pairs; + if (!virtnet_validate_features(vdev)) + return -EINVAL; + /* Find if host supports multiqueue virtio_net device */ err = virtio_cread_feature(vdev, VIRTIO_NET_F_MQ, struct virtio_net_config, diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index fa9dc45b75a6..e1e335c339e3 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -67,12 +67,6 @@ #define VXLAN_FLAGS 0x08000000 /* struct vxlanhdr.vx_flags required value. */ -/* VXLAN protocol header */ -struct vxlanhdr { - __be32 vx_flags; - __be32 vx_vni; -}; - /* UDP port for VXLAN traffic. * The IANA assigned port is 4789, but the Linux default is 8472 * for compatibility with early adopters. diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c index 697c4ae90af0..1e8ea5e4d4ca 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c @@ -664,6 +664,19 @@ static void ar9003_hw_override_ini(struct ath_hw *ah) ah->enabled_cals |= TX_CL_CAL; else ah->enabled_cals &= ~TX_CL_CAL; + + if (AR_SREV_9340(ah) || AR_SREV_9531(ah) || AR_SREV_9550(ah)) { + if (ah->is_clk_25mhz) { + REG_WRITE(ah, AR_RTC_DERIVED_CLK, 0x17c << 1); + REG_WRITE(ah, AR_SLP32_MODE, 0x0010f3d7); + REG_WRITE(ah, AR_SLP32_INC, 0x0001e7ae); + } else { + REG_WRITE(ah, AR_RTC_DERIVED_CLK, 0x261 << 1); + REG_WRITE(ah, AR_SLP32_MODE, 0x0010f400); + REG_WRITE(ah, AR_SLP32_INC, 0x0001e800); + } + udelay(100); + } } static void ar9003_hw_prog_ini(struct ath_hw *ah, diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index 8be4b1453394..2ad605760e21 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c @@ -861,19 +861,6 @@ static void ath9k_hw_init_pll(struct ath_hw *ah, udelay(RTC_PLL_SETTLE_DELAY); REG_WRITE(ah, AR_RTC_SLEEP_CLK, AR_RTC_FORCE_DERIVED_CLK); - - if (AR_SREV_9340(ah) || AR_SREV_9550(ah)) { - if (ah->is_clk_25mhz) { - REG_WRITE(ah, AR_RTC_DERIVED_CLK, 0x17c << 1); - REG_WRITE(ah, AR_SLP32_MODE, 0x0010f3d7); - REG_WRITE(ah, AR_SLP32_INC, 0x0001e7ae); - } else { - REG_WRITE(ah, AR_RTC_DERIVED_CLK, 0x261 << 1); - REG_WRITE(ah, AR_SLP32_MODE, 0x0010f400); - REG_WRITE(ah, AR_SLP32_INC, 0x0001e800); - } - udelay(100); - } } static void ath9k_hw_init_interrupt_masks(struct ath_hw *ah, diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 30c66dfcd7a0..4f18a6be0c7d 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -974,9 +974,8 @@ void ath9k_calculate_iter_data(struct ath_softc *sc, struct ath_vif *avp; /* - * Pick the MAC address of the first interface as the new hardware - * MAC address. The hardware will use it together with the BSSID mask - * when matching addresses. + * The hardware will use primary station addr together with the + * BSSID mask when matching addresses. */ memset(iter_data, 0, sizeof(*iter_data)); memset(&iter_data->mask, 0xff, ETH_ALEN); @@ -1205,6 +1204,8 @@ static int ath9k_add_interface(struct ieee80211_hw *hw, list_add_tail(&avp->list, &avp->chanctx->vifs); } + ath9k_calculate_summary_state(sc, avp->chanctx); + ath9k_assign_hw_queues(hw, vif); an->sc = sc; @@ -1274,6 +1275,8 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw, ath_tx_node_cleanup(sc, &avp->mcast_node); + ath9k_calculate_summary_state(sc, avp->chanctx); + mutex_unlock(&sc->mutex); } diff --git a/drivers/net/wireless/b43/phy_common.c b/drivers/net/wireless/b43/phy_common.c index 1dfc682a8055..ee27b06074e1 100644 --- a/drivers/net/wireless/b43/phy_common.c +++ b/drivers/net/wireless/b43/phy_common.c @@ -300,9 +300,7 @@ void b43_phy_write(struct b43_wldev *dev, u16 reg, u16 value) void b43_phy_copy(struct b43_wldev *dev, u16 destreg, u16 srcreg) { - assert_mac_suspended(dev); - dev->phy.ops->phy_write(dev, destreg, - dev->phy.ops->phy_read(dev, srcreg)); + b43_phy_write(dev, destreg, b43_phy_read(dev, srcreg)); } void b43_phy_mask(struct b43_wldev *dev, u16 offset, u16 mask) diff --git a/drivers/net/wireless/brcm80211/brcmfmac/of.c b/drivers/net/wireless/brcm80211/brcmfmac/of.c index f05f5270fec1..927bffd5be64 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/of.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/of.c @@ -40,8 +40,8 @@ void brcmf_of_probe(struct brcmf_sdio_dev *sdiodev) return; irq = irq_of_parse_and_map(np, 0); - if (irq < 0) { - brcmf_err("interrupt could not be mapped: err=%d\n", irq); + if (!irq) { + brcmf_err("interrupt could not be mapped\n"); devm_kfree(dev, sdiodev->pdata); return; } diff --git a/drivers/net/wireless/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/brcm80211/brcmfmac/pcie.c index 8c0632ec9f7a..16fef3382019 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/pcie.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/pcie.c @@ -19,10 +19,10 @@ #include <linux/pci.h> #include <linux/vmalloc.h> #include <linux/delay.h> -#include <linux/unaligned/access_ok.h> #include <linux/interrupt.h> #include <linux/bcma/bcma.h> #include <linux/sched.h> +#include <asm/unaligned.h> #include <soc.h> #include <chipcommon.h> diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/brcm80211/brcmfmac/usb.c index dc135915470d..875d1142c8b0 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/usb.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/usb.c @@ -669,10 +669,12 @@ static int brcmf_usb_dl_cmd(struct brcmf_usbdev_info *devinfo, u8 cmd, goto finalize; } - if (!brcmf_usb_ioctl_resp_wait(devinfo)) + if (!brcmf_usb_ioctl_resp_wait(devinfo)) { + usb_kill_urb(devinfo->ctl_urb); ret = -ETIMEDOUT; - else + } else { memcpy(buffer, tmpbuf, buflen); + } finalize: kfree(tmpbuf); diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c index 28fa25b509db..39b45c038a93 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c @@ -299,6 +299,7 @@ static u16 chandef_to_chanspec(struct brcmu_d11inf *d11inf, primary_offset = ch->center_freq1 - ch->chan->center_freq; switch (ch->width) { case NL80211_CHAN_WIDTH_20: + case NL80211_CHAN_WIDTH_20_NOHT: ch_inf.bw = BRCMU_CHAN_BW_20; WARN_ON(primary_offset != 0); break; @@ -323,6 +324,10 @@ static u16 chandef_to_chanspec(struct brcmu_d11inf *d11inf, ch_inf.sb = BRCMU_CHAN_SB_LU; } break; + case NL80211_CHAN_WIDTH_80P80: + case NL80211_CHAN_WIDTH_160: + case NL80211_CHAN_WIDTH_5: + case NL80211_CHAN_WIDTH_10: default: WARN_ON_ONCE(1); } @@ -333,6 +338,7 @@ static u16 chandef_to_chanspec(struct brcmu_d11inf *d11inf, case IEEE80211_BAND_5GHZ: ch_inf.band = BRCMU_CHAN_BAND_5G; break; + case IEEE80211_BAND_60GHZ: default: WARN_ON_ONCE(1); } diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c index b280d5d87127..7554f7053830 100644 --- a/drivers/net/wireless/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/iwlwifi/mvm/scan.c @@ -602,16 +602,6 @@ static int iwl_mvm_cancel_regular_scan(struct iwl_mvm *mvm) SCAN_COMPLETE_NOTIFICATION }; int ret; - if (mvm->scan_status == IWL_MVM_SCAN_NONE) - return 0; - - if (iwl_mvm_is_radio_killed(mvm)) { - ieee80211_scan_completed(mvm->hw, true); - iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); - mvm->scan_status = IWL_MVM_SCAN_NONE; - return 0; - } - iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_abort, scan_abort_notif, ARRAY_SIZE(scan_abort_notif), @@ -1400,6 +1390,16 @@ int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm, int iwl_mvm_cancel_scan(struct iwl_mvm *mvm) { + if (mvm->scan_status == IWL_MVM_SCAN_NONE) + return 0; + + if (iwl_mvm_is_radio_killed(mvm)) { + ieee80211_scan_completed(mvm->hw, true); + iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); + mvm->scan_status = IWL_MVM_SCAN_NONE; + return 0; + } + if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) return iwl_mvm_scan_offload_stop(mvm, true); return iwl_mvm_cancel_regular_scan(mvm); diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c index 160c3ebc48d0..dd2f3f8baa9d 100644 --- a/drivers/net/wireless/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/iwlwifi/pcie/trans.c @@ -1894,8 +1894,7 @@ static u32 iwl_trans_pcie_dump_prph(struct iwl_trans *trans, int reg; __le32 *val; - prph_len += sizeof(*data) + sizeof(*prph) + - num_bytes_in_chunk; + prph_len += sizeof(**data) + sizeof(*prph) + num_bytes_in_chunk; (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PRPH); (*data)->len = cpu_to_le32(sizeof(*prph) + diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c index 8e68f87ab13c..66ff36447b94 100644 --- a/drivers/net/wireless/rt2x00/rt2x00queue.c +++ b/drivers/net/wireless/rt2x00/rt2x00queue.c @@ -158,55 +158,29 @@ void rt2x00queue_align_frame(struct sk_buff *skb) skb_trim(skb, frame_length); } -void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int header_length) +/* + * H/W needs L2 padding between the header and the paylod if header size + * is not 4 bytes aligned. + */ +void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int hdr_len) { - unsigned int payload_length = skb->len - header_length; - unsigned int header_align = ALIGN_SIZE(skb, 0); - unsigned int payload_align = ALIGN_SIZE(skb, header_length); - unsigned int l2pad = payload_length ? L2PAD_SIZE(header_length) : 0; + unsigned int l2pad = (skb->len > hdr_len) ? L2PAD_SIZE(hdr_len) : 0; - /* - * Adjust the header alignment if the payload needs to be moved more - * than the header. - */ - if (payload_align > header_align) - header_align += 4; - - /* There is nothing to do if no alignment is needed */ - if (!header_align) + if (!l2pad) return; - /* Reserve the amount of space needed in front of the frame */ - skb_push(skb, header_align); - - /* - * Move the header. - */ - memmove(skb->data, skb->data + header_align, header_length); - - /* Move the payload, if present and if required */ - if (payload_length && payload_align) - memmove(skb->data + header_length + l2pad, - skb->data + header_length + l2pad + payload_align, - payload_length); - - /* Trim the skb to the correct size */ - skb_trim(skb, header_length + l2pad + payload_length); + skb_push(skb, l2pad); + memmove(skb->data, skb->data + l2pad, hdr_len); } -void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int header_length) +void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int hdr_len) { - /* - * L2 padding is only present if the skb contains more than just the - * IEEE 802.11 header. - */ - unsigned int l2pad = (skb->len > header_length) ? - L2PAD_SIZE(header_length) : 0; + unsigned int l2pad = (skb->len > hdr_len) ? L2PAD_SIZE(hdr_len) : 0; if (!l2pad) return; - memmove(skb->data + l2pad, skb->data, header_length); + memmove(skb->data + l2pad, skb->data, hdr_len); skb_pull(skb, l2pad); } diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c index 25daa8715219..61f5d36eca6a 100644 --- a/drivers/net/wireless/rtlwifi/pci.c +++ b/drivers/net/wireless/rtlwifi/pci.c @@ -842,7 +842,8 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw) break; } /* handle command packet here */ - if (rtlpriv->cfg->ops->rx_command_packet(hw, stats, skb)) { + if (rtlpriv->cfg->ops->rx_command_packet && + rtlpriv->cfg->ops->rx_command_packet(hw, stats, skb)) { dev_kfree_skb_any(skb); goto end; } @@ -1127,9 +1128,14 @@ static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw) __skb_queue_tail(&ring->queue, pskb); - rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc, true, HW_DESC_OWN, - &temp_one); - + if (rtlpriv->use_new_trx_flow) { + temp_one = 4; + rtlpriv->cfg->ops->set_desc(hw, (u8 *)pbuffer_desc, true, + HW_DESC_OWN, (u8 *)&temp_one); + } else { + rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc, true, HW_DESC_OWN, + &temp_one); + } return; } @@ -1370,9 +1376,9 @@ static void _rtl_pci_free_tx_ring(struct ieee80211_hw *hw, ring->desc = NULL; if (rtlpriv->use_new_trx_flow) { pci_free_consistent(rtlpci->pdev, - sizeof(*ring->desc) * ring->entries, + sizeof(*ring->buffer_desc) * ring->entries, ring->buffer_desc, ring->buffer_desc_dma); - ring->desc = NULL; + ring->buffer_desc = NULL; } } @@ -1543,7 +1549,6 @@ int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw) true, HW_DESC_TXBUFF_ADDR), skb->len, PCI_DMA_TODEVICE); - ring->idx = (ring->idx + 1) % ring->entries; kfree_skb(skb); ring->idx = (ring->idx + 1) % ring->entries; } diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/hw.c b/drivers/net/wireless/rtlwifi/rtl8192se/hw.c index 00e067044c08..5761d5b49e39 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192se/hw.c +++ b/drivers/net/wireless/rtlwifi/rtl8192se/hw.c @@ -1201,6 +1201,9 @@ static int _rtl92se_set_media_status(struct ieee80211_hw *hw, } + if (type != NL80211_IFTYPE_AP && + rtlpriv->mac80211.link_state < MAC80211_LINKED) + bt_msr = rtl_read_byte(rtlpriv, MSR) & ~MSR_LINK_MASK; rtl_write_byte(rtlpriv, (MSR), bt_msr); temp = rtl_read_dword(rtlpriv, TCR); @@ -1262,6 +1265,7 @@ void rtl92se_enable_interrupt(struct ieee80211_hw *hw) rtl_write_dword(rtlpriv, INTA_MASK, rtlpci->irq_mask[0]); /* Support Bit 32-37(Assign as Bit 0-5) interrupt setting now */ rtl_write_dword(rtlpriv, INTA_MASK + 4, rtlpci->irq_mask[1] & 0x3F); + rtlpci->irq_enabled = true; } void rtl92se_disable_interrupt(struct ieee80211_hw *hw) @@ -1276,8 +1280,7 @@ void rtl92se_disable_interrupt(struct ieee80211_hw *hw) rtlpci = rtl_pcidev(rtl_pcipriv(hw)); rtl_write_dword(rtlpriv, INTA_MASK, 0); rtl_write_dword(rtlpriv, INTA_MASK + 4, 0); - - synchronize_irq(rtlpci->pdev->irq); + rtlpci->irq_enabled = false; } static u8 _rtl92s_set_sysclk(struct ieee80211_hw *hw, u8 data) diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/phy.c b/drivers/net/wireless/rtlwifi/rtl8192se/phy.c index 77c5b5f35244..4b4612fe2fdb 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192se/phy.c +++ b/drivers/net/wireless/rtlwifi/rtl8192se/phy.c @@ -399,6 +399,8 @@ static bool _rtl92s_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw, case 2: currentcmd = &postcommoncmd[*step]; break; + default: + return true; } if (currentcmd->cmdid == CMDID_END) { diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/rtlwifi/rtl8192se/sw.c index aadba29c167a..fb003868bdef 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192se/sw.c +++ b/drivers/net/wireless/rtlwifi/rtl8192se/sw.c @@ -236,6 +236,19 @@ static void rtl92s_deinit_sw_vars(struct ieee80211_hw *hw) } } +static bool rtl92se_is_tx_desc_closed(struct ieee80211_hw *hw, u8 hw_queue, + u16 index) +{ + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); + struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue]; + u8 *entry = (u8 *)(&ring->desc[ring->idx]); + u8 own = (u8)rtl92se_get_desc(entry, true, HW_DESC_OWN); + + if (own) + return false; + return true; +} + static struct rtl_hal_ops rtl8192se_hal_ops = { .init_sw_vars = rtl92s_init_sw_vars, .deinit_sw_vars = rtl92s_deinit_sw_vars, @@ -269,6 +282,7 @@ static struct rtl_hal_ops rtl8192se_hal_ops = { .led_control = rtl92se_led_control, .set_desc = rtl92se_set_desc, .get_desc = rtl92se_get_desc, + .is_tx_desc_closed = rtl92se_is_tx_desc_closed, .tx_polling = rtl92se_tx_polling, .enable_hw_sec = rtl92se_enable_hw_security_config, .set_key = rtl92se_set_key, @@ -306,6 +320,8 @@ static struct rtl_hal_cfg rtl92se_hal_cfg = { .maps[MAC_RCR_ACRC32] = RCR_ACRC32, .maps[MAC_RCR_ACF] = RCR_ACF, .maps[MAC_RCR_AAP] = RCR_AAP, + .maps[MAC_HIMR] = INTA_MASK, + .maps[MAC_HIMRE] = INTA_MASK + 4, .maps[EFUSE_TEST] = REG_EFUSE_TEST, .maps[EFUSE_CTRL] = REG_EFUSE_CTRL, diff --git a/drivers/of/address.c b/drivers/of/address.c index afdb78299f61..06af494184d6 100644 --- a/drivers/of/address.c +++ b/drivers/of/address.c @@ -450,6 +450,21 @@ static struct of_bus *of_match_bus(struct device_node *np) return NULL; } +static int of_empty_ranges_quirk(void) +{ + if (IS_ENABLED(CONFIG_PPC)) { + /* To save cycles, we cache the result */ + static int quirk_state = -1; + + if (quirk_state < 0) + quirk_state = + of_machine_is_compatible("Power Macintosh") || + of_machine_is_compatible("MacRISC"); + return quirk_state; + } + return false; +} + static int of_translate_one(struct device_node *parent, struct of_bus *bus, struct of_bus *pbus, __be32 *addr, int na, int ns, int pna, const char *rprop) @@ -475,12 +490,10 @@ static int of_translate_one(struct device_node *parent, struct of_bus *bus, * This code is only enabled on powerpc. --gcl */ ranges = of_get_property(parent, rprop, &rlen); -#if !defined(CONFIG_PPC) - if (ranges == NULL) { + if (ranges == NULL && !of_empty_ranges_quirk()) { pr_err("OF: no ranges; cannot translate\n"); return 1; } -#endif /* !defined(CONFIG_PPC) */ if (ranges == NULL || rlen == 0) { offset = of_read_number(addr, na); memset(addr, 0, pna * 4); diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c index f297891d8529..d4994177dec2 100644 --- a/drivers/of/dynamic.c +++ b/drivers/of/dynamic.c @@ -247,7 +247,7 @@ void of_node_release(struct kobject *kobj) * @allocflags: Allocation flags (typically pass GFP_KERNEL) * * Copy a property by dynamically allocating the memory of both the - * property stucture and the property name & contents. The property's + * property structure and the property name & contents. The property's * flags have the OF_DYNAMIC bit set so that we can differentiate between * dynamically allocated properties and not. * Returns the newly allocated property or NULL on out of memory error. diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c index d1ffca8b34ea..30e97bcc4f88 100644 --- a/drivers/of/fdt.c +++ b/drivers/of/fdt.c @@ -773,7 +773,7 @@ int __init early_init_dt_scan_chosen_serial(void) if (offset < 0) return -ENODEV; - while (match->compatible) { + while (match->compatible[0]) { unsigned long addr; if (fdt_node_check_compatible(fdt, offset, match->compatible)) { match++; diff --git a/drivers/of/selftest.c b/drivers/of/selftest.c index 11b873c54a77..e2d79afa9dc6 100644 --- a/drivers/of/selftest.c +++ b/drivers/of/selftest.c @@ -896,10 +896,14 @@ static void selftest_data_remove(void) return; } - while (last_node_index >= 0) { + while (last_node_index-- > 0) { if (nodes[last_node_index]) { np = of_find_node_by_path(nodes[last_node_index]->full_name); - if (strcmp(np->full_name, "/aliases") != 0) { + if (np == nodes[last_node_index]) { + if (of_aliases == np) { + of_node_put(of_aliases); + of_aliases = NULL; + } detach_node_and_children(np); } else { for_each_property_of_node(np, prop) { @@ -908,7 +912,6 @@ static void selftest_data_remove(void) } } } - last_node_index--; } } @@ -921,6 +924,8 @@ static int __init of_selftest(void) res = selftest_data_add(); if (res) return res; + if (!of_aliases) + of_aliases = of_find_node_by_path("/aliases"); np = of_find_node_by_path("/testcase-data/phandle-tests/consumer-a"); if (!np) { diff --git a/drivers/pci/access.c b/drivers/pci/access.c index d292d7cb3417..49dd766852ba 100644 --- a/drivers/pci/access.c +++ b/drivers/pci/access.c @@ -444,7 +444,7 @@ static inline int pcie_cap_version(const struct pci_dev *dev) return pcie_caps_reg(dev) & PCI_EXP_FLAGS_VERS; } -static inline bool pcie_cap_has_lnkctl(const struct pci_dev *dev) +bool pcie_cap_has_lnkctl(const struct pci_dev *dev) { int type = pci_pcie_type(dev); diff --git a/drivers/pci/host/pci-xgene.c b/drivers/pci/host/pci-xgene.c index 9ecabfa8c634..2988fe136c1e 100644 --- a/drivers/pci/host/pci-xgene.c +++ b/drivers/pci/host/pci-xgene.c @@ -631,10 +631,15 @@ static int xgene_pcie_probe_bridge(struct platform_device *pdev) if (ret) return ret; - bus = pci_scan_root_bus(&pdev->dev, 0, &xgene_pcie_ops, port, &res); + bus = pci_create_root_bus(&pdev->dev, 0, + &xgene_pcie_ops, port, &res); if (!bus) return -ENOMEM; + pci_scan_child_bus(bus); + pci_assign_unassigned_bus_resources(bus); + pci_bus_add_devices(bus); + platform_set_drvdata(pdev, port); return 0; } diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index 0601890db22d..4a3902d8e6fe 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -6,6 +6,8 @@ extern const unsigned char pcie_link_speed[]; +bool pcie_cap_has_lnkctl(const struct pci_dev *dev); + /* Functions internal to the PCI core code */ int pci_create_sysfs_dev_files(struct pci_dev *pdev); diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 5ed99309c758..c8ca98c2b480 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -407,15 +407,16 @@ static void pci_read_bridge_mmio_pref(struct pci_bus *child) { struct pci_dev *dev = child->self; u16 mem_base_lo, mem_limit_lo; - unsigned long base, limit; + u64 base64, limit64; + dma_addr_t base, limit; struct pci_bus_region region; struct resource *res; res = child->resource[2]; pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo); pci_read_config_word(dev, PCI_PREF_MEMORY_LIMIT, &mem_limit_lo); - base = ((unsigned long) mem_base_lo & PCI_PREF_RANGE_MASK) << 16; - limit = ((unsigned long) mem_limit_lo & PCI_PREF_RANGE_MASK) << 16; + base64 = (mem_base_lo & PCI_PREF_RANGE_MASK) << 16; + limit64 = (mem_limit_lo & PCI_PREF_RANGE_MASK) << 16; if ((mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) { u32 mem_base_hi, mem_limit_hi; @@ -429,17 +430,20 @@ static void pci_read_bridge_mmio_pref(struct pci_bus *child) * this, just assume they are not being used. */ if (mem_base_hi <= mem_limit_hi) { -#if BITS_PER_LONG == 64 - base |= ((unsigned long) mem_base_hi) << 32; - limit |= ((unsigned long) mem_limit_hi) << 32; -#else - if (mem_base_hi || mem_limit_hi) { - dev_err(&dev->dev, "can't handle 64-bit address space for bridge\n"); - return; - } -#endif + base64 |= (u64) mem_base_hi << 32; + limit64 |= (u64) mem_limit_hi << 32; } } + + base = (dma_addr_t) base64; + limit = (dma_addr_t) limit64; + + if (base != base64) { + dev_err(&dev->dev, "can't handle bridge window above 4GB (bus address %#010llx)\n", + (unsigned long long) base64); + return; + } + if (base <= limit) { res->flags = (mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) | IORESOURCE_MEM | IORESOURCE_PREFETCH; @@ -1323,7 +1327,7 @@ static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp) ~hpp->pci_exp_devctl_and, hpp->pci_exp_devctl_or); /* Initialize Link Control Register */ - if (dev->subordinate) + if (pcie_cap_has_lnkctl(dev)) pcie_capability_clear_and_set_word(dev, PCI_EXP_LNKCTL, ~hpp->pci_exp_lnkctl_and, hpp->pci_exp_lnkctl_or); diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c index 81bb3bd7909d..15081257cfc8 100644 --- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c +++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c @@ -828,6 +828,8 @@ static void do_act_open_rpl(struct cxgbi_device *cdev, struct sk_buff *skb) if (status == CPL_ERR_RTX_NEG_ADVICE) goto rel_skb; + module_put(THIS_MODULE); + if (status && status != CPL_ERR_TCAM_FULL && status != CPL_ERR_CONN_EXIST && status != CPL_ERR_ARP_MISS) diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c index 13d869a92248..7da59c38a69e 100644 --- a/drivers/scsi/cxgbi/libcxgbi.c +++ b/drivers/scsi/cxgbi/libcxgbi.c @@ -816,7 +816,7 @@ static void cxgbi_inform_iscsi_conn_closing(struct cxgbi_sock *csk) read_lock_bh(&csk->callback_lock); if (csk->user_data) iscsi_conn_failure(csk->user_data, - ISCSI_ERR_CONN_FAILED); + ISCSI_ERR_TCP_CONN_CLOSE); read_unlock_bh(&csk->callback_lock); } } diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index b19e4329ba00..73e58d22e325 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -3491,7 +3491,7 @@ iscsit_build_sendtargets_response(struct iscsi_cmd *cmd, len = sprintf(buf, "TargetAddress=" "%s:%hu,%hu", inaddr_any ? conn->local_ip : np->np_ip, - inaddr_any ? conn->local_port : np->np_port, + np->np_port, tpg->tpgt); len += 1; diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index 8c60a1a1ae8d..9f93b8234095 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c @@ -2738,7 +2738,8 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key, struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_reg_n, *pr_res_holder; struct t10_reservation *pr_tmpl = &dev->t10_pr; u32 pr_res_mapped_lun = 0; - int all_reg = 0, calling_it_nexus = 0, released_regs = 0; + int all_reg = 0, calling_it_nexus = 0; + bool sa_res_key_unmatched = sa_res_key != 0; int prh_type = 0, prh_scope = 0; if (!se_sess) @@ -2813,6 +2814,7 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key, if (!all_reg) { if (pr_reg->pr_res_key != sa_res_key) continue; + sa_res_key_unmatched = false; calling_it_nexus = (pr_reg_n == pr_reg) ? 1 : 0; pr_reg_nacl = pr_reg->pr_reg_nacl; @@ -2820,7 +2822,6 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key, __core_scsi3_free_registration(dev, pr_reg, (preempt_type == PREEMPT_AND_ABORT) ? &preempt_and_abort_list : NULL, calling_it_nexus); - released_regs++; } else { /* * Case for any existing all registrants type @@ -2838,6 +2839,7 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key, if ((sa_res_key) && (pr_reg->pr_res_key != sa_res_key)) continue; + sa_res_key_unmatched = false; calling_it_nexus = (pr_reg_n == pr_reg) ? 1 : 0; if (calling_it_nexus) @@ -2848,7 +2850,6 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key, __core_scsi3_free_registration(dev, pr_reg, (preempt_type == PREEMPT_AND_ABORT) ? &preempt_and_abort_list : NULL, 0); - released_regs++; } if (!calling_it_nexus) core_scsi3_ua_allocate(pr_reg_nacl, @@ -2863,7 +2864,7 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key, * registered reservation key, then the device server shall * complete the command with RESERVATION CONFLICT status. */ - if (!released_regs) { + if (sa_res_key_unmatched) { spin_unlock(&dev->dev_reservation_lock); core_scsi3_put_pr_reg(pr_reg_n); return TCM_RESERVATION_CONFLICT; diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 9ea0d5f03f7a..be877bf6f730 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -2292,7 +2292,7 @@ transport_generic_new_cmd(struct se_cmd *cmd) * and let it call back once the write buffers are ready. */ target_add_to_state_list(cmd); - if (cmd->data_direction != DMA_TO_DEVICE) { + if (cmd->data_direction != DMA_TO_DEVICE || cmd->data_length == 0) { target_execute_cmd(cmd); return 0; } diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index 69906cacd04f..a17f11850669 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -1312,6 +1312,7 @@ static int vhost_scsi_set_endpoint(struct vhost_scsi *vs, struct vhost_scsi_target *t) { + struct se_portal_group *se_tpg; struct tcm_vhost_tport *tv_tport; struct tcm_vhost_tpg *tpg; struct tcm_vhost_tpg **vs_tpg; @@ -1359,6 +1360,21 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs, ret = -EEXIST; goto out; } + /* + * In order to ensure individual vhost-scsi configfs + * groups cannot be removed while in use by vhost ioctl, + * go ahead and take an explicit se_tpg->tpg_group.cg_item + * dependency now. + */ + se_tpg = &tpg->se_tpg; + ret = configfs_depend_item(se_tpg->se_tpg_tfo->tf_subsys, + &se_tpg->tpg_group.cg_item); + if (ret) { + pr_warn("configfs_depend_item() failed: %d\n", ret); + kfree(vs_tpg); + mutex_unlock(&tpg->tv_tpg_mutex); + goto out; + } tpg->tv_tpg_vhost_count++; tpg->vhost_scsi = vs; vs_tpg[tpg->tport_tpgt] = tpg; @@ -1401,6 +1417,7 @@ static int vhost_scsi_clear_endpoint(struct vhost_scsi *vs, struct vhost_scsi_target *t) { + struct se_portal_group *se_tpg; struct tcm_vhost_tport *tv_tport; struct tcm_vhost_tpg *tpg; struct vhost_virtqueue *vq; @@ -1449,6 +1466,13 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs, vs->vs_tpg[target] = NULL; match = true; mutex_unlock(&tpg->tv_tpg_mutex); + /* + * Release se_tpg->tpg_group.cg_item configfs dependency now + * to allow vhost-scsi WWPN se_tpg->tpg_group shutdown to occur. + */ + se_tpg = &tpg->se_tpg; + configfs_undepend_item(se_tpg->se_tpg_tfo->tf_subsys, + &se_tpg->tpg_group.cg_item); } if (match) { for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) { diff --git a/fs/Makefile b/fs/Makefile index 34a1b9dea6dd..da0bbb456d3f 100644 --- a/fs/Makefile +++ b/fs/Makefile @@ -104,7 +104,7 @@ obj-$(CONFIG_QNX6FS_FS) += qnx6/ obj-$(CONFIG_AUTOFS4_FS) += autofs4/ obj-$(CONFIG_ADFS_FS) += adfs/ obj-$(CONFIG_FUSE_FS) += fuse/ -obj-$(CONFIG_OVERLAYFS_FS) += overlayfs/ +obj-$(CONFIG_OVERLAY_FS) += overlayfs/ obj-$(CONFIG_UDF_FS) += udf/ obj-$(CONFIG_SUN_OPENPROMFS) += openpromfs/ obj-$(CONFIG_OMFS_FS) += omfs/ diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 817234168a7f..14a72ed14ef7 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -80,13 +80,6 @@ noinline void btrfs_clear_path_blocking(struct btrfs_path *p, { int i; -#ifdef CONFIG_DEBUG_LOCK_ALLOC - /* lockdep really cares that we take all of these spinlocks - * in the right order. If any of the locks in the path are not - * currently blocking, it is going to complain. So, make really - * really sure by forcing the path to blocking before we clear - * the path blocking. - */ if (held) { btrfs_set_lock_blocking_rw(held, held_rw); if (held_rw == BTRFS_WRITE_LOCK) @@ -95,7 +88,6 @@ noinline void btrfs_clear_path_blocking(struct btrfs_path *p, held_rw = BTRFS_READ_LOCK_BLOCKING; } btrfs_set_path_blocking(p); -#endif for (i = BTRFS_MAX_LEVEL - 1; i >= 0; i--) { if (p->nodes[i] && p->locks[i]) { @@ -107,10 +99,8 @@ noinline void btrfs_clear_path_blocking(struct btrfs_path *p, } } -#ifdef CONFIG_DEBUG_LOCK_ALLOC if (held) btrfs_clear_lock_blocking_rw(held, held_rw); -#endif } /* this also releases the path */ @@ -2893,7 +2883,7 @@ cow_done: } p->locks[level] = BTRFS_WRITE_LOCK; } else { - err = btrfs_try_tree_read_lock(b); + err = btrfs_tree_read_lock_atomic(b); if (!err) { btrfs_set_path_blocking(p); btrfs_tree_read_lock(b); @@ -3025,7 +3015,7 @@ again: } level = btrfs_header_level(b); - err = btrfs_try_tree_read_lock(b); + err = btrfs_tree_read_lock_atomic(b); if (!err) { btrfs_set_path_blocking(p); btrfs_tree_read_lock(b); diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index d71915e04e92..e6fbbd74b716 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -4167,7 +4167,12 @@ int btrfs_scrub_progress(struct btrfs_root *root, u64 devid, /* dev-replace.c */ void btrfs_bio_counter_inc_blocked(struct btrfs_fs_info *fs_info); void btrfs_bio_counter_inc_noblocked(struct btrfs_fs_info *fs_info); -void btrfs_bio_counter_dec(struct btrfs_fs_info *fs_info); +void btrfs_bio_counter_sub(struct btrfs_fs_info *fs_info, s64 amount); + +static inline void btrfs_bio_counter_dec(struct btrfs_fs_info *fs_info) +{ + btrfs_bio_counter_sub(fs_info, 1); +} /* reada.c */ struct reada_control { diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c index 3fbd0628620b..ca6a3a3b6b6c 100644 --- a/fs/btrfs/dev-replace.c +++ b/fs/btrfs/dev-replace.c @@ -316,11 +316,6 @@ int btrfs_dev_replace_start(struct btrfs_root *root, struct btrfs_device *tgt_device = NULL; struct btrfs_device *src_device = NULL; - if (btrfs_fs_incompat(fs_info, RAID56)) { - btrfs_warn(fs_info, "dev_replace cannot yet handle RAID5/RAID6"); - return -EOPNOTSUPP; - } - switch (args->start.cont_reading_from_srcdev_mode) { case BTRFS_IOCTL_DEV_REPLACE_CONT_READING_FROM_SRCDEV_MODE_ALWAYS: case BTRFS_IOCTL_DEV_REPLACE_CONT_READING_FROM_SRCDEV_MODE_AVOID: @@ -927,9 +922,9 @@ void btrfs_bio_counter_inc_noblocked(struct btrfs_fs_info *fs_info) percpu_counter_inc(&fs_info->bio_counter); } -void btrfs_bio_counter_dec(struct btrfs_fs_info *fs_info) +void btrfs_bio_counter_sub(struct btrfs_fs_info *fs_info, s64 amount) { - percpu_counter_dec(&fs_info->bio_counter); + percpu_counter_sub(&fs_info->bio_counter, amount); if (waitqueue_active(&fs_info->replace_wait)) wake_up(&fs_info->replace_wait); diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c index 5665d2149249..f8229ef1b46d 100644 --- a/fs/btrfs/locking.c +++ b/fs/btrfs/locking.c @@ -128,6 +128,26 @@ again: } /* + * take a spinning read lock. + * returns 1 if we get the read lock and 0 if we don't + * this won't wait for blocking writers + */ +int btrfs_tree_read_lock_atomic(struct extent_buffer *eb) +{ + if (atomic_read(&eb->blocking_writers)) + return 0; + + read_lock(&eb->lock); + if (atomic_read(&eb->blocking_writers)) { + read_unlock(&eb->lock); + return 0; + } + atomic_inc(&eb->read_locks); + atomic_inc(&eb->spinning_readers); + return 1; +} + +/* * returns 1 if we get the read lock and 0 if we don't * this won't wait for blocking writers */ @@ -158,9 +178,7 @@ int btrfs_try_tree_write_lock(struct extent_buffer *eb) atomic_read(&eb->blocking_readers)) return 0; - if (!write_trylock(&eb->lock)) - return 0; - + write_lock(&eb->lock); if (atomic_read(&eb->blocking_writers) || atomic_read(&eb->blocking_readers)) { write_unlock(&eb->lock); diff --git a/fs/btrfs/locking.h b/fs/btrfs/locking.h index b81e0e9a4894..c44a9d5f5362 100644 --- a/fs/btrfs/locking.h +++ b/fs/btrfs/locking.h @@ -35,6 +35,8 @@ void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw); void btrfs_assert_tree_locked(struct extent_buffer *eb); int btrfs_try_tree_read_lock(struct extent_buffer *eb); int btrfs_try_tree_write_lock(struct extent_buffer *eb); +int btrfs_tree_read_lock_atomic(struct extent_buffer *eb); + static inline void btrfs_tree_unlock_rw(struct extent_buffer *eb, int rw) { diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c index 6a41631cb959..8ab2a17bbba8 100644 --- a/fs/btrfs/raid56.c +++ b/fs/btrfs/raid56.c @@ -58,9 +58,23 @@ */ #define RBIO_CACHE_READY_BIT 3 +/* + * bbio and raid_map is managed by the caller, so we shouldn't free + * them here. And besides that, all rbios with this flag should not + * be cached, because we need raid_map to check the rbios' stripe + * is the same or not, but it is very likely that the caller has + * free raid_map, so don't cache those rbios. + */ +#define RBIO_HOLD_BBIO_MAP_BIT 4 #define RBIO_CACHE_SIZE 1024 +enum btrfs_rbio_ops { + BTRFS_RBIO_WRITE = 0, + BTRFS_RBIO_READ_REBUILD = 1, + BTRFS_RBIO_PARITY_SCRUB = 2, +}; + struct btrfs_raid_bio { struct btrfs_fs_info *fs_info; struct btrfs_bio *bbio; @@ -117,13 +131,16 @@ struct btrfs_raid_bio { /* number of data stripes (no p/q) */ int nr_data; + int real_stripes; + + int stripe_npages; /* * set if we're doing a parity rebuild * for a read from higher up, which is handled * differently from a parity rebuild as part of * rmw */ - int read_rebuild; + enum btrfs_rbio_ops operation; /* first bad stripe */ int faila; @@ -131,6 +148,7 @@ struct btrfs_raid_bio { /* second bad stripe (for raid6 use) */ int failb; + int scrubp; /* * number of pages needed to represent the full * stripe @@ -144,8 +162,13 @@ struct btrfs_raid_bio { */ int bio_list_bytes; + int generic_bio_cnt; + atomic_t refs; + atomic_t stripes_pending; + + atomic_t error; /* * these are two arrays of pointers. We allocate the * rbio big enough to hold them both and setup their @@ -162,6 +185,11 @@ struct btrfs_raid_bio { * here for faster lookup */ struct page **bio_pages; + + /* + * bitmap to record which horizontal stripe has data + */ + unsigned long *dbitmap; }; static int __raid56_parity_recover(struct btrfs_raid_bio *rbio); @@ -176,6 +204,10 @@ static void __free_raid_bio(struct btrfs_raid_bio *rbio); static void index_rbio_pages(struct btrfs_raid_bio *rbio); static int alloc_rbio_pages(struct btrfs_raid_bio *rbio); +static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio, + int need_check); +static void async_scrub_parity(struct btrfs_raid_bio *rbio); + /* * the stripe hash table is used for locking, and to collect * bios in hopes of making a full stripe @@ -324,6 +356,7 @@ static void merge_rbio(struct btrfs_raid_bio *dest, { bio_list_merge(&dest->bio_list, &victim->bio_list); dest->bio_list_bytes += victim->bio_list_bytes; + dest->generic_bio_cnt += victim->generic_bio_cnt; bio_list_init(&victim->bio_list); } @@ -577,11 +610,20 @@ static int rbio_can_merge(struct btrfs_raid_bio *last, cur->raid_map[0]) return 0; - /* reads can't merge with writes */ - if (last->read_rebuild != - cur->read_rebuild) { + /* we can't merge with different operations */ + if (last->operation != cur->operation) + return 0; + /* + * We've need read the full stripe from the drive. + * check and repair the parity and write the new results. + * + * We're not allowed to add any new bios to the + * bio list here, anyone else that wants to + * change this stripe needs to do their own rmw. + */ + if (last->operation == BTRFS_RBIO_PARITY_SCRUB || + cur->operation == BTRFS_RBIO_PARITY_SCRUB) return 0; - } return 1; } @@ -601,7 +643,7 @@ static struct page *rbio_pstripe_page(struct btrfs_raid_bio *rbio, int index) */ static struct page *rbio_qstripe_page(struct btrfs_raid_bio *rbio, int index) { - if (rbio->nr_data + 1 == rbio->bbio->num_stripes) + if (rbio->nr_data + 1 == rbio->real_stripes) return NULL; index += ((rbio->nr_data + 1) * rbio->stripe_len) >> @@ -772,11 +814,14 @@ static noinline void unlock_stripe(struct btrfs_raid_bio *rbio) spin_unlock(&rbio->bio_list_lock); spin_unlock_irqrestore(&h->lock, flags); - if (next->read_rebuild) + if (next->operation == BTRFS_RBIO_READ_REBUILD) async_read_rebuild(next); - else { + else if (next->operation == BTRFS_RBIO_WRITE) { steal_rbio(rbio, next); async_rmw_stripe(next); + } else if (next->operation == BTRFS_RBIO_PARITY_SCRUB) { + steal_rbio(rbio, next); + async_scrub_parity(next); } goto done_nolock; @@ -796,6 +841,21 @@ done_nolock: remove_rbio_from_cache(rbio); } +static inline void +__free_bbio_and_raid_map(struct btrfs_bio *bbio, u64 *raid_map, int need) +{ + if (need) { + kfree(raid_map); + kfree(bbio); + } +} + +static inline void free_bbio_and_raid_map(struct btrfs_raid_bio *rbio) +{ + __free_bbio_and_raid_map(rbio->bbio, rbio->raid_map, + !test_bit(RBIO_HOLD_BBIO_MAP_BIT, &rbio->flags)); +} + static void __free_raid_bio(struct btrfs_raid_bio *rbio) { int i; @@ -814,8 +874,9 @@ static void __free_raid_bio(struct btrfs_raid_bio *rbio) rbio->stripe_pages[i] = NULL; } } - kfree(rbio->raid_map); - kfree(rbio->bbio); + + free_bbio_and_raid_map(rbio); + kfree(rbio); } @@ -833,6 +894,10 @@ static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, int err, int uptodate) { struct bio *cur = bio_list_get(&rbio->bio_list); struct bio *next; + + if (rbio->generic_bio_cnt) + btrfs_bio_counter_sub(rbio->fs_info, rbio->generic_bio_cnt); + free_raid_bio(rbio); while (cur) { @@ -858,13 +923,13 @@ static void raid_write_end_io(struct bio *bio, int err) bio_put(bio); - if (!atomic_dec_and_test(&rbio->bbio->stripes_pending)) + if (!atomic_dec_and_test(&rbio->stripes_pending)) return; err = 0; /* OK, we have read all the stripes we need to. */ - if (atomic_read(&rbio->bbio->error) > rbio->bbio->max_errors) + if (atomic_read(&rbio->error) > rbio->bbio->max_errors) err = -EIO; rbio_orig_end_io(rbio, err, 0); @@ -925,16 +990,16 @@ static struct btrfs_raid_bio *alloc_rbio(struct btrfs_root *root, { struct btrfs_raid_bio *rbio; int nr_data = 0; - int num_pages = rbio_nr_pages(stripe_len, bbio->num_stripes); + int real_stripes = bbio->num_stripes - bbio->num_tgtdevs; + int num_pages = rbio_nr_pages(stripe_len, real_stripes); + int stripe_npages = DIV_ROUND_UP(stripe_len, PAGE_SIZE); void *p; - rbio = kzalloc(sizeof(*rbio) + num_pages * sizeof(struct page *) * 2, + rbio = kzalloc(sizeof(*rbio) + num_pages * sizeof(struct page *) * 2 + + DIV_ROUND_UP(stripe_npages, BITS_PER_LONG / 8), GFP_NOFS); - if (!rbio) { - kfree(raid_map); - kfree(bbio); + if (!rbio) return ERR_PTR(-ENOMEM); - } bio_list_init(&rbio->bio_list); INIT_LIST_HEAD(&rbio->plug_list); @@ -946,9 +1011,13 @@ static struct btrfs_raid_bio *alloc_rbio(struct btrfs_root *root, rbio->fs_info = root->fs_info; rbio->stripe_len = stripe_len; rbio->nr_pages = num_pages; + rbio->real_stripes = real_stripes; + rbio->stripe_npages = stripe_npages; rbio->faila = -1; rbio->failb = -1; atomic_set(&rbio->refs, 1); + atomic_set(&rbio->error, 0); + atomic_set(&rbio->stripes_pending, 0); /* * the stripe_pages and bio_pages array point to the extra @@ -957,11 +1026,12 @@ static struct btrfs_raid_bio *alloc_rbio(struct btrfs_root *root, p = rbio + 1; rbio->stripe_pages = p; rbio->bio_pages = p + sizeof(struct page *) * num_pages; + rbio->dbitmap = p + sizeof(struct page *) * num_pages * 2; - if (raid_map[bbio->num_stripes - 1] == RAID6_Q_STRIPE) - nr_data = bbio->num_stripes - 2; + if (raid_map[real_stripes - 1] == RAID6_Q_STRIPE) + nr_data = real_stripes - 2; else - nr_data = bbio->num_stripes - 1; + nr_data = real_stripes - 1; rbio->nr_data = nr_data; return rbio; @@ -1073,7 +1143,7 @@ static int rbio_add_io_page(struct btrfs_raid_bio *rbio, static void validate_rbio_for_rmw(struct btrfs_raid_bio *rbio) { if (rbio->faila >= 0 || rbio->failb >= 0) { - BUG_ON(rbio->faila == rbio->bbio->num_stripes - 1); + BUG_ON(rbio->faila == rbio->real_stripes - 1); __raid56_parity_recover(rbio); } else { finish_rmw(rbio); @@ -1134,7 +1204,7 @@ static void index_rbio_pages(struct btrfs_raid_bio *rbio) static noinline void finish_rmw(struct btrfs_raid_bio *rbio) { struct btrfs_bio *bbio = rbio->bbio; - void *pointers[bbio->num_stripes]; + void *pointers[rbio->real_stripes]; int stripe_len = rbio->stripe_len; int nr_data = rbio->nr_data; int stripe; @@ -1148,11 +1218,11 @@ static noinline void finish_rmw(struct btrfs_raid_bio *rbio) bio_list_init(&bio_list); - if (bbio->num_stripes - rbio->nr_data == 1) { - p_stripe = bbio->num_stripes - 1; - } else if (bbio->num_stripes - rbio->nr_data == 2) { - p_stripe = bbio->num_stripes - 2; - q_stripe = bbio->num_stripes - 1; + if (rbio->real_stripes - rbio->nr_data == 1) { + p_stripe = rbio->real_stripes - 1; + } else if (rbio->real_stripes - rbio->nr_data == 2) { + p_stripe = rbio->real_stripes - 2; + q_stripe = rbio->real_stripes - 1; } else { BUG(); } @@ -1169,7 +1239,7 @@ static noinline void finish_rmw(struct btrfs_raid_bio *rbio) set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags); spin_unlock_irq(&rbio->bio_list_lock); - atomic_set(&rbio->bbio->error, 0); + atomic_set(&rbio->error, 0); /* * now that we've set rmw_locked, run through the @@ -1209,7 +1279,7 @@ static noinline void finish_rmw(struct btrfs_raid_bio *rbio) SetPageUptodate(p); pointers[stripe++] = kmap(p); - raid6_call.gen_syndrome(bbio->num_stripes, PAGE_SIZE, + raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE, pointers); } else { /* raid5 */ @@ -1218,7 +1288,7 @@ static noinline void finish_rmw(struct btrfs_raid_bio *rbio) } - for (stripe = 0; stripe < bbio->num_stripes; stripe++) + for (stripe = 0; stripe < rbio->real_stripes; stripe++) kunmap(page_in_rbio(rbio, stripe, pagenr, 0)); } @@ -1227,7 +1297,7 @@ static noinline void finish_rmw(struct btrfs_raid_bio *rbio) * higher layers (the bio_list in our rbio) and our p/q. Ignore * everything else. */ - for (stripe = 0; stripe < bbio->num_stripes; stripe++) { + for (stripe = 0; stripe < rbio->real_stripes; stripe++) { for (pagenr = 0; pagenr < pages_per_stripe; pagenr++) { struct page *page; if (stripe < rbio->nr_data) { @@ -1245,8 +1315,34 @@ static noinline void finish_rmw(struct btrfs_raid_bio *rbio) } } - atomic_set(&bbio->stripes_pending, bio_list_size(&bio_list)); - BUG_ON(atomic_read(&bbio->stripes_pending) == 0); + if (likely(!bbio->num_tgtdevs)) + goto write_data; + + for (stripe = 0; stripe < rbio->real_stripes; stripe++) { + if (!bbio->tgtdev_map[stripe]) + continue; + + for (pagenr = 0; pagenr < pages_per_stripe; pagenr++) { + struct page *page; + if (stripe < rbio->nr_data) { + page = page_in_rbio(rbio, stripe, pagenr, 1); + if (!page) + continue; + } else { + page = rbio_stripe_page(rbio, stripe, pagenr); + } + + ret = rbio_add_io_page(rbio, &bio_list, page, + rbio->bbio->tgtdev_map[stripe], + pagenr, rbio->stripe_len); + if (ret) + goto cleanup; + } + } + +write_data: + atomic_set(&rbio->stripes_pending, bio_list_size(&bio_list)); + BUG_ON(atomic_read(&rbio->stripes_pending) == 0); while (1) { bio = bio_list_pop(&bio_list); @@ -1283,7 +1379,8 @@ static int find_bio_stripe(struct btrfs_raid_bio *rbio, stripe = &rbio->bbio->stripes[i]; stripe_start = stripe->physical; if (physical >= stripe_start && - physical < stripe_start + rbio->stripe_len) { + physical < stripe_start + rbio->stripe_len && + bio->bi_bdev == stripe->dev->bdev) { return i; } } @@ -1331,11 +1428,11 @@ static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed) if (rbio->faila == -1) { /* first failure on this rbio */ rbio->faila = failed; - atomic_inc(&rbio->bbio->error); + atomic_inc(&rbio->error); } else if (rbio->failb == -1) { /* second failure on this rbio */ rbio->failb = failed; - atomic_inc(&rbio->bbio->error); + atomic_inc(&rbio->error); } else { ret = -EIO; } @@ -1394,11 +1491,11 @@ static void raid_rmw_end_io(struct bio *bio, int err) bio_put(bio); - if (!atomic_dec_and_test(&rbio->bbio->stripes_pending)) + if (!atomic_dec_and_test(&rbio->stripes_pending)) return; err = 0; - if (atomic_read(&rbio->bbio->error) > rbio->bbio->max_errors) + if (atomic_read(&rbio->error) > rbio->bbio->max_errors) goto cleanup; /* @@ -1439,7 +1536,6 @@ static void async_read_rebuild(struct btrfs_raid_bio *rbio) static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio) { int bios_to_read = 0; - struct btrfs_bio *bbio = rbio->bbio; struct bio_list bio_list; int ret; int nr_pages = DIV_ROUND_UP(rbio->stripe_len, PAGE_CACHE_SIZE); @@ -1455,7 +1551,7 @@ static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio) index_rbio_pages(rbio); - atomic_set(&rbio->bbio->error, 0); + atomic_set(&rbio->error, 0); /* * build a list of bios to read all the missing parts of this * stripe @@ -1503,7 +1599,7 @@ static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio) * the bbio may be freed once we submit the last bio. Make sure * not to touch it after that */ - atomic_set(&bbio->stripes_pending, bios_to_read); + atomic_set(&rbio->stripes_pending, bios_to_read); while (1) { bio = bio_list_pop(&bio_list); if (!bio) @@ -1686,19 +1782,30 @@ int raid56_parity_write(struct btrfs_root *root, struct bio *bio, struct btrfs_raid_bio *rbio; struct btrfs_plug_cb *plug = NULL; struct blk_plug_cb *cb; + int ret; rbio = alloc_rbio(root, bbio, raid_map, stripe_len); - if (IS_ERR(rbio)) + if (IS_ERR(rbio)) { + __free_bbio_and_raid_map(bbio, raid_map, 1); return PTR_ERR(rbio); + } bio_list_add(&rbio->bio_list, bio); rbio->bio_list_bytes = bio->bi_iter.bi_size; + rbio->operation = BTRFS_RBIO_WRITE; + + btrfs_bio_counter_inc_noblocked(root->fs_info); + rbio->generic_bio_cnt = 1; /* * don't plug on full rbios, just get them out the door * as quickly as we can */ - if (rbio_is_full(rbio)) - return full_stripe_write(rbio); + if (rbio_is_full(rbio)) { + ret = full_stripe_write(rbio); + if (ret) + btrfs_bio_counter_dec(root->fs_info); + return ret; + } cb = blk_check_plugged(btrfs_raid_unplug, root->fs_info, sizeof(*plug)); @@ -1709,10 +1816,13 @@ int raid56_parity_write(struct btrfs_root *root, struct bio *bio, INIT_LIST_HEAD(&plug->rbio_list); } list_add_tail(&rbio->plug_list, &plug->rbio_list); + ret = 0; } else { - return __raid56_parity_write(rbio); + ret = __raid56_parity_write(rbio); + if (ret) + btrfs_bio_counter_dec(root->fs_info); } - return 0; + return ret; } /* @@ -1730,7 +1840,7 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio) int err; int i; - pointers = kzalloc(rbio->bbio->num_stripes * sizeof(void *), + pointers = kzalloc(rbio->real_stripes * sizeof(void *), GFP_NOFS); if (!pointers) { err = -ENOMEM; @@ -1740,7 +1850,7 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio) faila = rbio->faila; failb = rbio->failb; - if (rbio->read_rebuild) { + if (rbio->operation == BTRFS_RBIO_READ_REBUILD) { spin_lock_irq(&rbio->bio_list_lock); set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags); spin_unlock_irq(&rbio->bio_list_lock); @@ -1749,15 +1859,23 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio) index_rbio_pages(rbio); for (pagenr = 0; pagenr < nr_pages; pagenr++) { + /* + * Now we just use bitmap to mark the horizontal stripes in + * which we have data when doing parity scrub. + */ + if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB && + !test_bit(pagenr, rbio->dbitmap)) + continue; + /* setup our array of pointers with pages * from each stripe */ - for (stripe = 0; stripe < rbio->bbio->num_stripes; stripe++) { + for (stripe = 0; stripe < rbio->real_stripes; stripe++) { /* * if we're rebuilding a read, we have to use * pages from the bio list */ - if (rbio->read_rebuild && + if (rbio->operation == BTRFS_RBIO_READ_REBUILD && (stripe == faila || stripe == failb)) { page = page_in_rbio(rbio, stripe, pagenr, 0); } else { @@ -1767,7 +1885,7 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio) } /* all raid6 handling here */ - if (rbio->raid_map[rbio->bbio->num_stripes - 1] == + if (rbio->raid_map[rbio->real_stripes - 1] == RAID6_Q_STRIPE) { /* @@ -1817,10 +1935,10 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio) } if (rbio->raid_map[failb] == RAID5_P_STRIPE) { - raid6_datap_recov(rbio->bbio->num_stripes, + raid6_datap_recov(rbio->real_stripes, PAGE_SIZE, faila, pointers); } else { - raid6_2data_recov(rbio->bbio->num_stripes, + raid6_2data_recov(rbio->real_stripes, PAGE_SIZE, faila, failb, pointers); } @@ -1850,7 +1968,7 @@ pstripe: * know they can be trusted. If this was a read reconstruction, * other endio functions will fiddle the uptodate bits */ - if (!rbio->read_rebuild) { + if (rbio->operation == BTRFS_RBIO_WRITE) { for (i = 0; i < nr_pages; i++) { if (faila != -1) { page = rbio_stripe_page(rbio, faila, i); @@ -1862,12 +1980,12 @@ pstripe: } } } - for (stripe = 0; stripe < rbio->bbio->num_stripes; stripe++) { + for (stripe = 0; stripe < rbio->real_stripes; stripe++) { /* * if we're rebuilding a read, we have to use * pages from the bio list */ - if (rbio->read_rebuild && + if (rbio->operation == BTRFS_RBIO_READ_REBUILD && (stripe == faila || stripe == failb)) { page = page_in_rbio(rbio, stripe, pagenr, 0); } else { @@ -1882,9 +2000,9 @@ cleanup: kfree(pointers); cleanup_io: - - if (rbio->read_rebuild) { - if (err == 0) + if (rbio->operation == BTRFS_RBIO_READ_REBUILD) { + if (err == 0 && + !test_bit(RBIO_HOLD_BBIO_MAP_BIT, &rbio->flags)) cache_rbio_pages(rbio); else clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags); @@ -1893,7 +2011,13 @@ cleanup_io: } else if (err == 0) { rbio->faila = -1; rbio->failb = -1; - finish_rmw(rbio); + + if (rbio->operation == BTRFS_RBIO_WRITE) + finish_rmw(rbio); + else if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB) + finish_parity_scrub(rbio, 0); + else + BUG(); } else { rbio_orig_end_io(rbio, err, 0); } @@ -1917,10 +2041,10 @@ static void raid_recover_end_io(struct bio *bio, int err) set_bio_pages_uptodate(bio); bio_put(bio); - if (!atomic_dec_and_test(&rbio->bbio->stripes_pending)) + if (!atomic_dec_and_test(&rbio->stripes_pending)) return; - if (atomic_read(&rbio->bbio->error) > rbio->bbio->max_errors) + if (atomic_read(&rbio->error) > rbio->bbio->max_errors) rbio_orig_end_io(rbio, -EIO, 0); else __raid_recover_end_io(rbio); @@ -1937,7 +2061,6 @@ static void raid_recover_end_io(struct bio *bio, int err) static int __raid56_parity_recover(struct btrfs_raid_bio *rbio) { int bios_to_read = 0; - struct btrfs_bio *bbio = rbio->bbio; struct bio_list bio_list; int ret; int nr_pages = DIV_ROUND_UP(rbio->stripe_len, PAGE_CACHE_SIZE); @@ -1951,16 +2074,16 @@ static int __raid56_parity_recover(struct btrfs_raid_bio *rbio) if (ret) goto cleanup; - atomic_set(&rbio->bbio->error, 0); + atomic_set(&rbio->error, 0); /* * read everything that hasn't failed. Thanks to the * stripe cache, it is possible that some or all of these * pages are going to be uptodate. */ - for (stripe = 0; stripe < bbio->num_stripes; stripe++) { + for (stripe = 0; stripe < rbio->real_stripes; stripe++) { if (rbio->faila == stripe || rbio->failb == stripe) { - atomic_inc(&rbio->bbio->error); + atomic_inc(&rbio->error); continue; } @@ -1990,7 +2113,7 @@ static int __raid56_parity_recover(struct btrfs_raid_bio *rbio) * were up to date, or we might have no bios to read because * the devices were gone. */ - if (atomic_read(&rbio->bbio->error) <= rbio->bbio->max_errors) { + if (atomic_read(&rbio->error) <= rbio->bbio->max_errors) { __raid_recover_end_io(rbio); goto out; } else { @@ -2002,7 +2125,7 @@ static int __raid56_parity_recover(struct btrfs_raid_bio *rbio) * the bbio may be freed once we submit the last bio. Make sure * not to touch it after that */ - atomic_set(&bbio->stripes_pending, bios_to_read); + atomic_set(&rbio->stripes_pending, bios_to_read); while (1) { bio = bio_list_pop(&bio_list); if (!bio) @@ -2021,7 +2144,7 @@ out: return 0; cleanup: - if (rbio->read_rebuild) + if (rbio->operation == BTRFS_RBIO_READ_REBUILD) rbio_orig_end_io(rbio, -EIO, 0); return -EIO; } @@ -2034,34 +2157,42 @@ cleanup: */ int raid56_parity_recover(struct btrfs_root *root, struct bio *bio, struct btrfs_bio *bbio, u64 *raid_map, - u64 stripe_len, int mirror_num) + u64 stripe_len, int mirror_num, int generic_io) { struct btrfs_raid_bio *rbio; int ret; rbio = alloc_rbio(root, bbio, raid_map, stripe_len); - if (IS_ERR(rbio)) + if (IS_ERR(rbio)) { + __free_bbio_and_raid_map(bbio, raid_map, generic_io); return PTR_ERR(rbio); + } - rbio->read_rebuild = 1; + rbio->operation = BTRFS_RBIO_READ_REBUILD; bio_list_add(&rbio->bio_list, bio); rbio->bio_list_bytes = bio->bi_iter.bi_size; rbio->faila = find_logical_bio_stripe(rbio, bio); if (rbio->faila == -1) { BUG(); - kfree(raid_map); - kfree(bbio); + __free_bbio_and_raid_map(bbio, raid_map, generic_io); kfree(rbio); return -EIO; } + if (generic_io) { + btrfs_bio_counter_inc_noblocked(root->fs_info); + rbio->generic_bio_cnt = 1; + } else { + set_bit(RBIO_HOLD_BBIO_MAP_BIT, &rbio->flags); + } + /* * reconstruct from the q stripe if they are * asking for mirror 3 */ if (mirror_num == 3) - rbio->failb = bbio->num_stripes - 2; + rbio->failb = rbio->real_stripes - 2; ret = lock_stripe_add(rbio); @@ -2098,3 +2229,483 @@ static void read_rebuild_work(struct btrfs_work *work) rbio = container_of(work, struct btrfs_raid_bio, work); __raid56_parity_recover(rbio); } + +/* + * The following code is used to scrub/replace the parity stripe + * + * Note: We need make sure all the pages that add into the scrub/replace + * raid bio are correct and not be changed during the scrub/replace. That + * is those pages just hold metadata or file data with checksum. + */ + +struct btrfs_raid_bio * +raid56_parity_alloc_scrub_rbio(struct btrfs_root *root, struct bio *bio, + struct btrfs_bio *bbio, u64 *raid_map, + u64 stripe_len, struct btrfs_device *scrub_dev, + unsigned long *dbitmap, int stripe_nsectors) +{ + struct btrfs_raid_bio *rbio; + int i; + + rbio = alloc_rbio(root, bbio, raid_map, stripe_len); + if (IS_ERR(rbio)) + return NULL; + bio_list_add(&rbio->bio_list, bio); + /* + * This is a special bio which is used to hold the completion handler + * and make the scrub rbio is similar to the other types + */ + ASSERT(!bio->bi_iter.bi_size); + rbio->operation = BTRFS_RBIO_PARITY_SCRUB; + + for (i = 0; i < rbio->real_stripes; i++) { + if (bbio->stripes[i].dev == scrub_dev) { + rbio->scrubp = i; + break; + } + } + + /* Now we just support the sectorsize equals to page size */ + ASSERT(root->sectorsize == PAGE_SIZE); + ASSERT(rbio->stripe_npages == stripe_nsectors); + bitmap_copy(rbio->dbitmap, dbitmap, stripe_nsectors); + + return rbio; +} + +void raid56_parity_add_scrub_pages(struct btrfs_raid_bio *rbio, + struct page *page, u64 logical) +{ + int stripe_offset; + int index; + + ASSERT(logical >= rbio->raid_map[0]); + ASSERT(logical + PAGE_SIZE <= rbio->raid_map[0] + + rbio->stripe_len * rbio->nr_data); + stripe_offset = (int)(logical - rbio->raid_map[0]); + index = stripe_offset >> PAGE_CACHE_SHIFT; + rbio->bio_pages[index] = page; +} + +/* + * We just scrub the parity that we have correct data on the same horizontal, + * so we needn't allocate all pages for all the stripes. + */ +static int alloc_rbio_essential_pages(struct btrfs_raid_bio *rbio) +{ + int i; + int bit; + int index; + struct page *page; + + for_each_set_bit(bit, rbio->dbitmap, rbio->stripe_npages) { + for (i = 0; i < rbio->real_stripes; i++) { + index = i * rbio->stripe_npages + bit; + if (rbio->stripe_pages[index]) + continue; + + page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); + if (!page) + return -ENOMEM; + rbio->stripe_pages[index] = page; + ClearPageUptodate(page); + } + } + return 0; +} + +/* + * end io function used by finish_rmw. When we finally + * get here, we've written a full stripe + */ +static void raid_write_parity_end_io(struct bio *bio, int err) +{ + struct btrfs_raid_bio *rbio = bio->bi_private; + + if (err) + fail_bio_stripe(rbio, bio); + + bio_put(bio); + + if (!atomic_dec_and_test(&rbio->stripes_pending)) + return; + + err = 0; + + if (atomic_read(&rbio->error)) + err = -EIO; + + rbio_orig_end_io(rbio, err, 0); +} + +static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio, + int need_check) +{ + struct btrfs_bio *bbio = rbio->bbio; + void *pointers[rbio->real_stripes]; + DECLARE_BITMAP(pbitmap, rbio->stripe_npages); + int nr_data = rbio->nr_data; + int stripe; + int pagenr; + int p_stripe = -1; + int q_stripe = -1; + struct page *p_page = NULL; + struct page *q_page = NULL; + struct bio_list bio_list; + struct bio *bio; + int is_replace = 0; + int ret; + + bio_list_init(&bio_list); + + if (rbio->real_stripes - rbio->nr_data == 1) { + p_stripe = rbio->real_stripes - 1; + } else if (rbio->real_stripes - rbio->nr_data == 2) { + p_stripe = rbio->real_stripes - 2; + q_stripe = rbio->real_stripes - 1; + } else { + BUG(); + } + + if (bbio->num_tgtdevs && bbio->tgtdev_map[rbio->scrubp]) { + is_replace = 1; + bitmap_copy(pbitmap, rbio->dbitmap, rbio->stripe_npages); + } + + /* + * Because the higher layers(scrubber) are unlikely to + * use this area of the disk again soon, so don't cache + * it. + */ + clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags); + + if (!need_check) + goto writeback; + + p_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); + if (!p_page) + goto cleanup; + SetPageUptodate(p_page); + + if (q_stripe != -1) { + q_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); + if (!q_page) { + __free_page(p_page); + goto cleanup; + } + SetPageUptodate(q_page); + } + + atomic_set(&rbio->error, 0); + + for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) { + struct page *p; + void *parity; + /* first collect one page from each data stripe */ + for (stripe = 0; stripe < nr_data; stripe++) { + p = page_in_rbio(rbio, stripe, pagenr, 0); + pointers[stripe] = kmap(p); + } + + /* then add the parity stripe */ + pointers[stripe++] = kmap(p_page); + + if (q_stripe != -1) { + + /* + * raid6, add the qstripe and call the + * library function to fill in our p/q + */ + pointers[stripe++] = kmap(q_page); + + raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE, + pointers); + } else { + /* raid5 */ + memcpy(pointers[nr_data], pointers[0], PAGE_SIZE); + run_xor(pointers + 1, nr_data - 1, PAGE_CACHE_SIZE); + } + + /* Check scrubbing pairty and repair it */ + p = rbio_stripe_page(rbio, rbio->scrubp, pagenr); + parity = kmap(p); + if (memcmp(parity, pointers[rbio->scrubp], PAGE_CACHE_SIZE)) + memcpy(parity, pointers[rbio->scrubp], PAGE_CACHE_SIZE); + else + /* Parity is right, needn't writeback */ + bitmap_clear(rbio->dbitmap, pagenr, 1); + kunmap(p); + + for (stripe = 0; stripe < rbio->real_stripes; stripe++) + kunmap(page_in_rbio(rbio, stripe, pagenr, 0)); + } + + __free_page(p_page); + if (q_page) + __free_page(q_page); + +writeback: + /* + * time to start writing. Make bios for everything from the + * higher layers (the bio_list in our rbio) and our p/q. Ignore + * everything else. + */ + for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) { + struct page *page; + + page = rbio_stripe_page(rbio, rbio->scrubp, pagenr); + ret = rbio_add_io_page(rbio, &bio_list, + page, rbio->scrubp, pagenr, rbio->stripe_len); + if (ret) + goto cleanup; + } + + if (!is_replace) + goto submit_write; + + for_each_set_bit(pagenr, pbitmap, rbio->stripe_npages) { + struct page *page; + + page = rbio_stripe_page(rbio, rbio->scrubp, pagenr); + ret = rbio_add_io_page(rbio, &bio_list, page, + bbio->tgtdev_map[rbio->scrubp], + pagenr, rbio->stripe_len); + if (ret) + goto cleanup; + } + +submit_write: + nr_data = bio_list_size(&bio_list); + if (!nr_data) { + /* Every parity is right */ + rbio_orig_end_io(rbio, 0, 0); + return; + } + + atomic_set(&rbio->stripes_pending, nr_data); + + while (1) { + bio = bio_list_pop(&bio_list); + if (!bio) + break; + + bio->bi_private = rbio; + bio->bi_end_io = raid_write_parity_end_io; + BUG_ON(!test_bit(BIO_UPTODATE, &bio->bi_flags)); + submit_bio(WRITE, bio); + } + return; + +cleanup: + rbio_orig_end_io(rbio, -EIO, 0); +} + +static inline int is_data_stripe(struct btrfs_raid_bio *rbio, int stripe) +{ + if (stripe >= 0 && stripe < rbio->nr_data) + return 1; + return 0; +} + +/* + * While we're doing the parity check and repair, we could have errors + * in reading pages off the disk. This checks for errors and if we're + * not able to read the page it'll trigger parity reconstruction. The + * parity scrub will be finished after we've reconstructed the failed + * stripes + */ +static void validate_rbio_for_parity_scrub(struct btrfs_raid_bio *rbio) +{ + if (atomic_read(&rbio->error) > rbio->bbio->max_errors) + goto cleanup; + + if (rbio->faila >= 0 || rbio->failb >= 0) { + int dfail = 0, failp = -1; + + if (is_data_stripe(rbio, rbio->faila)) + dfail++; + else if (is_parity_stripe(rbio->faila)) + failp = rbio->faila; + + if (is_data_stripe(rbio, rbio->failb)) + dfail++; + else if (is_parity_stripe(rbio->failb)) + failp = rbio->failb; + + /* + * Because we can not use a scrubbing parity to repair + * the data, so the capability of the repair is declined. + * (In the case of RAID5, we can not repair anything) + */ + if (dfail > rbio->bbio->max_errors - 1) + goto cleanup; + + /* + * If all data is good, only parity is correctly, just + * repair the parity. + */ + if (dfail == 0) { + finish_parity_scrub(rbio, 0); + return; + } + + /* + * Here means we got one corrupted data stripe and one + * corrupted parity on RAID6, if the corrupted parity + * is scrubbing parity, luckly, use the other one to repair + * the data, or we can not repair the data stripe. + */ + if (failp != rbio->scrubp) + goto cleanup; + + __raid_recover_end_io(rbio); + } else { + finish_parity_scrub(rbio, 1); + } + return; + +cleanup: + rbio_orig_end_io(rbio, -EIO, 0); +} + +/* + * end io for the read phase of the rmw cycle. All the bios here are physical + * stripe bios we've read from the disk so we can recalculate the parity of the + * stripe. + * + * This will usually kick off finish_rmw once all the bios are read in, but it + * may trigger parity reconstruction if we had any errors along the way + */ +static void raid56_parity_scrub_end_io(struct bio *bio, int err) +{ + struct btrfs_raid_bio *rbio = bio->bi_private; + + if (err) + fail_bio_stripe(rbio, bio); + else + set_bio_pages_uptodate(bio); + + bio_put(bio); + + if (!atomic_dec_and_test(&rbio->stripes_pending)) + return; + + /* + * this will normally call finish_rmw to start our write + * but if there are any failed stripes we'll reconstruct + * from parity first + */ + validate_rbio_for_parity_scrub(rbio); +} + +static void raid56_parity_scrub_stripe(struct btrfs_raid_bio *rbio) +{ + int bios_to_read = 0; + struct bio_list bio_list; + int ret; + int pagenr; + int stripe; + struct bio *bio; + + ret = alloc_rbio_essential_pages(rbio); + if (ret) + goto cleanup; + + bio_list_init(&bio_list); + + atomic_set(&rbio->error, 0); + /* + * build a list of bios to read all the missing parts of this + * stripe + */ + for (stripe = 0; stripe < rbio->real_stripes; stripe++) { + for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) { + struct page *page; + /* + * we want to find all the pages missing from + * the rbio and read them from the disk. If + * page_in_rbio finds a page in the bio list + * we don't need to read it off the stripe. + */ + page = page_in_rbio(rbio, stripe, pagenr, 1); + if (page) + continue; + + page = rbio_stripe_page(rbio, stripe, pagenr); + /* + * the bio cache may have handed us an uptodate + * page. If so, be happy and use it + */ + if (PageUptodate(page)) + continue; + + ret = rbio_add_io_page(rbio, &bio_list, page, + stripe, pagenr, rbio->stripe_len); + if (ret) + goto cleanup; + } + } + + bios_to_read = bio_list_size(&bio_list); + if (!bios_to_read) { + /* + * this can happen if others have merged with + * us, it means there is nothing left to read. + * But if there are missing devices it may not be + * safe to do the full stripe write yet. + */ + goto finish; + } + + /* + * the bbio may be freed once we submit the last bio. Make sure + * not to touch it after that + */ + atomic_set(&rbio->stripes_pending, bios_to_read); + while (1) { + bio = bio_list_pop(&bio_list); + if (!bio) + break; + + bio->bi_private = rbio; + bio->bi_end_io = raid56_parity_scrub_end_io; + + btrfs_bio_wq_end_io(rbio->fs_info, bio, + BTRFS_WQ_ENDIO_RAID56); + + BUG_ON(!test_bit(BIO_UPTODATE, &bio->bi_flags)); + submit_bio(READ, bio); + } + /* the actual write will happen once the reads are done */ + return; + +cleanup: + rbio_orig_end_io(rbio, -EIO, 0); + return; + +finish: + validate_rbio_for_parity_scrub(rbio); +} + +static void scrub_parity_work(struct btrfs_work *work) +{ + struct btrfs_raid_bio *rbio; + + rbio = container_of(work, struct btrfs_raid_bio, work); + raid56_parity_scrub_stripe(rbio); +} + +static void async_scrub_parity(struct btrfs_raid_bio *rbio) +{ + btrfs_init_work(&rbio->work, btrfs_rmw_helper, + scrub_parity_work, NULL, NULL); + + btrfs_queue_work(rbio->fs_info->rmw_workers, + &rbio->work); +} + +void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio) +{ + if (!lock_stripe_add(rbio)) + async_scrub_parity(rbio); +} diff --git a/fs/btrfs/raid56.h b/fs/btrfs/raid56.h index ea5d73bfdfbe..31d4a157b5e3 100644 --- a/fs/btrfs/raid56.h +++ b/fs/btrfs/raid56.h @@ -39,13 +39,25 @@ static inline int nr_data_stripes(struct map_lookup *map) #define is_parity_stripe(x) (((x) == RAID5_P_STRIPE) || \ ((x) == RAID6_Q_STRIPE)) +struct btrfs_raid_bio; +struct btrfs_device; + int raid56_parity_recover(struct btrfs_root *root, struct bio *bio, - struct btrfs_bio *bbio, u64 *raid_map, - u64 stripe_len, int mirror_num); + struct btrfs_bio *bbio, u64 *raid_map, + u64 stripe_len, int mirror_num, int generic_io); int raid56_parity_write(struct btrfs_root *root, struct bio *bio, struct btrfs_bio *bbio, u64 *raid_map, u64 stripe_len); +struct btrfs_raid_bio * +raid56_parity_alloc_scrub_rbio(struct btrfs_root *root, struct bio *bio, + struct btrfs_bio *bbio, u64 *raid_map, + u64 stripe_len, struct btrfs_device *scrub_dev, + unsigned long *dbitmap, int stripe_nsectors); +void raid56_parity_add_scrub_pages(struct btrfs_raid_bio *rbio, + struct page *page, u64 logical); +void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio); + int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info); void btrfs_free_stripe_hash_table(struct btrfs_fs_info *info); #endif diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 4325bb0111d9..f2bb13a23f86 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -63,10 +63,18 @@ struct scrub_ctx; */ #define SCRUB_MAX_PAGES_PER_BLOCK 16 /* 64k per node/leaf/sector */ +struct scrub_recover { + atomic_t refs; + struct btrfs_bio *bbio; + u64 *raid_map; + u64 map_length; +}; + struct scrub_page { struct scrub_block *sblock; struct page *page; struct btrfs_device *dev; + struct list_head list; u64 flags; /* extent flags */ u64 generation; u64 logical; @@ -79,6 +87,8 @@ struct scrub_page { unsigned int io_error:1; }; u8 csum[BTRFS_CSUM_SIZE]; + + struct scrub_recover *recover; }; struct scrub_bio { @@ -105,14 +115,52 @@ struct scrub_block { atomic_t outstanding_pages; atomic_t ref_count; /* free mem on transition to zero */ struct scrub_ctx *sctx; + struct scrub_parity *sparity; struct { unsigned int header_error:1; unsigned int checksum_error:1; unsigned int no_io_error_seen:1; unsigned int generation_error:1; /* also sets header_error */ + + /* The following is for the data used to check parity */ + /* It is for the data with checksum */ + unsigned int data_corrected:1; }; }; +/* Used for the chunks with parity stripe such RAID5/6 */ +struct scrub_parity { + struct scrub_ctx *sctx; + + struct btrfs_device *scrub_dev; + + u64 logic_start; + + u64 logic_end; + + int nsectors; + + int stripe_len; + + atomic_t ref_count; + + struct list_head spages; + + /* Work of parity check and repair */ + struct btrfs_work work; + + /* Mark the parity blocks which have data */ + unsigned long *dbitmap; + + /* + * Mark the parity blocks which have data, but errors happen when + * read data or check data + */ + unsigned long *ebitmap; + + unsigned long bitmap[0]; +}; + struct scrub_wr_ctx { struct scrub_bio *wr_curr_bio; struct btrfs_device *tgtdev; @@ -196,7 +244,7 @@ static int scrub_setup_recheck_block(struct scrub_ctx *sctx, static void scrub_recheck_block(struct btrfs_fs_info *fs_info, struct scrub_block *sblock, int is_metadata, int have_csum, u8 *csum, u64 generation, - u16 csum_size); + u16 csum_size, int retry_failed_mirror); static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info, struct scrub_block *sblock, int is_metadata, int have_csum, @@ -218,6 +266,8 @@ static void scrub_block_get(struct scrub_block *sblock); static void scrub_block_put(struct scrub_block *sblock); static void scrub_page_get(struct scrub_page *spage); static void scrub_page_put(struct scrub_page *spage); +static void scrub_parity_get(struct scrub_parity *sparity); +static void scrub_parity_put(struct scrub_parity *sparity); static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx, struct scrub_page *spage); static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len, @@ -790,6 +840,20 @@ out: scrub_pending_trans_workers_dec(sctx); } +static inline void scrub_get_recover(struct scrub_recover *recover) +{ + atomic_inc(&recover->refs); +} + +static inline void scrub_put_recover(struct scrub_recover *recover) +{ + if (atomic_dec_and_test(&recover->refs)) { + kfree(recover->bbio); + kfree(recover->raid_map); + kfree(recover); + } +} + /* * scrub_handle_errored_block gets called when either verification of the * pages failed or the bio failed to read, e.g. with EIO. In the latter @@ -906,7 +970,7 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check) /* build and submit the bios for the failed mirror, check checksums */ scrub_recheck_block(fs_info, sblock_bad, is_metadata, have_csum, - csum, generation, sctx->csum_size); + csum, generation, sctx->csum_size, 1); if (!sblock_bad->header_error && !sblock_bad->checksum_error && sblock_bad->no_io_error_seen) { @@ -920,6 +984,7 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check) */ spin_lock(&sctx->stat_lock); sctx->stat.unverified_errors++; + sblock_to_check->data_corrected = 1; spin_unlock(&sctx->stat_lock); if (sctx->is_dev_replace) @@ -1019,7 +1084,7 @@ nodatasum_case: /* build and submit the bios, check checksums */ scrub_recheck_block(fs_info, sblock_other, is_metadata, have_csum, csum, generation, - sctx->csum_size); + sctx->csum_size, 0); if (!sblock_other->header_error && !sblock_other->checksum_error && @@ -1169,7 +1234,7 @@ nodatasum_case: */ scrub_recheck_block(fs_info, sblock_bad, is_metadata, have_csum, csum, - generation, sctx->csum_size); + generation, sctx->csum_size, 1); if (!sblock_bad->header_error && !sblock_bad->checksum_error && sblock_bad->no_io_error_seen) @@ -1180,6 +1245,7 @@ nodatasum_case: corrected_error: spin_lock(&sctx->stat_lock); sctx->stat.corrected_errors++; + sblock_to_check->data_corrected = 1; spin_unlock(&sctx->stat_lock); printk_ratelimited_in_rcu(KERN_ERR "BTRFS: fixed up error at logical %llu on dev %s\n", @@ -1201,11 +1267,18 @@ out: mirror_index++) { struct scrub_block *sblock = sblocks_for_recheck + mirror_index; + struct scrub_recover *recover; int page_index; for (page_index = 0; page_index < sblock->page_count; page_index++) { sblock->pagev[page_index]->sblock = NULL; + recover = sblock->pagev[page_index]->recover; + if (recover) { + scrub_put_recover(recover); + sblock->pagev[page_index]->recover = + NULL; + } scrub_page_put(sblock->pagev[page_index]); } } @@ -1215,14 +1288,63 @@ out: return 0; } +static inline int scrub_nr_raid_mirrors(struct btrfs_bio *bbio, u64 *raid_map) +{ + if (raid_map) { + if (raid_map[bbio->num_stripes - 1] == RAID6_Q_STRIPE) + return 3; + else + return 2; + } else { + return (int)bbio->num_stripes; + } +} + +static inline void scrub_stripe_index_and_offset(u64 logical, u64 *raid_map, + u64 mapped_length, + int nstripes, int mirror, + int *stripe_index, + u64 *stripe_offset) +{ + int i; + + if (raid_map) { + /* RAID5/6 */ + for (i = 0; i < nstripes; i++) { + if (raid_map[i] == RAID6_Q_STRIPE || + raid_map[i] == RAID5_P_STRIPE) + continue; + + if (logical >= raid_map[i] && + logical < raid_map[i] + mapped_length) + break; + } + + *stripe_index = i; + *stripe_offset = logical - raid_map[i]; + } else { + /* The other RAID type */ + *stripe_index = mirror; + *stripe_offset = 0; + } +} + static int scrub_setup_recheck_block(struct scrub_ctx *sctx, struct btrfs_fs_info *fs_info, struct scrub_block *original_sblock, u64 length, u64 logical, struct scrub_block *sblocks_for_recheck) { + struct scrub_recover *recover; + struct btrfs_bio *bbio; + u64 *raid_map; + u64 sublen; + u64 mapped_length; + u64 stripe_offset; + int stripe_index; int page_index; int mirror_index; + int nmirrors; int ret; /* @@ -1233,23 +1355,39 @@ static int scrub_setup_recheck_block(struct scrub_ctx *sctx, page_index = 0; while (length > 0) { - u64 sublen = min_t(u64, length, PAGE_SIZE); - u64 mapped_length = sublen; - struct btrfs_bio *bbio = NULL; + sublen = min_t(u64, length, PAGE_SIZE); + mapped_length = sublen; + bbio = NULL; + raid_map = NULL; /* * with a length of PAGE_SIZE, each returned stripe * represents one mirror */ - ret = btrfs_map_block(fs_info, REQ_GET_READ_MIRRORS, logical, - &mapped_length, &bbio, 0); + ret = btrfs_map_sblock(fs_info, REQ_GET_READ_MIRRORS, logical, + &mapped_length, &bbio, 0, &raid_map); if (ret || !bbio || mapped_length < sublen) { kfree(bbio); + kfree(raid_map); return -EIO; } + recover = kzalloc(sizeof(struct scrub_recover), GFP_NOFS); + if (!recover) { + kfree(bbio); + kfree(raid_map); + return -ENOMEM; + } + + atomic_set(&recover->refs, 1); + recover->bbio = bbio; + recover->raid_map = raid_map; + recover->map_length = mapped_length; + BUG_ON(page_index >= SCRUB_PAGES_PER_RD_BIO); - for (mirror_index = 0; mirror_index < (int)bbio->num_stripes; + + nmirrors = scrub_nr_raid_mirrors(bbio, raid_map); + for (mirror_index = 0; mirror_index < nmirrors; mirror_index++) { struct scrub_block *sblock; struct scrub_page *page; @@ -1265,26 +1403,38 @@ leave_nomem: spin_lock(&sctx->stat_lock); sctx->stat.malloc_errors++; spin_unlock(&sctx->stat_lock); - kfree(bbio); + scrub_put_recover(recover); return -ENOMEM; } scrub_page_get(page); sblock->pagev[page_index] = page; page->logical = logical; - page->physical = bbio->stripes[mirror_index].physical; + + scrub_stripe_index_and_offset(logical, raid_map, + mapped_length, + bbio->num_stripes, + mirror_index, + &stripe_index, + &stripe_offset); + page->physical = bbio->stripes[stripe_index].physical + + stripe_offset; + page->dev = bbio->stripes[stripe_index].dev; + BUG_ON(page_index >= original_sblock->page_count); page->physical_for_dev_replace = original_sblock->pagev[page_index]-> physical_for_dev_replace; /* for missing devices, dev->bdev is NULL */ - page->dev = bbio->stripes[mirror_index].dev; page->mirror_num = mirror_index + 1; sblock->page_count++; page->page = alloc_page(GFP_NOFS); if (!page->page) goto leave_nomem; + + scrub_get_recover(recover); + page->recover = recover; } - kfree(bbio); + scrub_put_recover(recover); length -= sublen; logical += sublen; page_index++; @@ -1293,6 +1443,51 @@ leave_nomem: return 0; } +struct scrub_bio_ret { + struct completion event; + int error; +}; + +static void scrub_bio_wait_endio(struct bio *bio, int error) +{ + struct scrub_bio_ret *ret = bio->bi_private; + + ret->error = error; + complete(&ret->event); +} + +static inline int scrub_is_page_on_raid56(struct scrub_page *page) +{ + return page->recover && page->recover->raid_map; +} + +static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info, + struct bio *bio, + struct scrub_page *page) +{ + struct scrub_bio_ret done; + int ret; + + init_completion(&done.event); + done.error = 0; + bio->bi_iter.bi_sector = page->logical >> 9; + bio->bi_private = &done; + bio->bi_end_io = scrub_bio_wait_endio; + + ret = raid56_parity_recover(fs_info->fs_root, bio, page->recover->bbio, + page->recover->raid_map, + page->recover->map_length, + page->mirror_num, 0); + if (ret) + return ret; + + wait_for_completion(&done.event); + if (done.error) + return -EIO; + + return 0; +} + /* * this function will check the on disk data for checksum errors, header * errors and read I/O errors. If any I/O errors happen, the exact pages @@ -1303,7 +1498,7 @@ leave_nomem: static void scrub_recheck_block(struct btrfs_fs_info *fs_info, struct scrub_block *sblock, int is_metadata, int have_csum, u8 *csum, u64 generation, - u16 csum_size) + u16 csum_size, int retry_failed_mirror) { int page_num; @@ -1329,11 +1524,17 @@ static void scrub_recheck_block(struct btrfs_fs_info *fs_info, continue; } bio->bi_bdev = page->dev->bdev; - bio->bi_iter.bi_sector = page->physical >> 9; bio_add_page(bio, page->page, PAGE_SIZE, 0); - if (btrfsic_submit_bio_wait(READ, bio)) - sblock->no_io_error_seen = 0; + if (!retry_failed_mirror && scrub_is_page_on_raid56(page)) { + if (scrub_submit_raid56_bio_wait(fs_info, bio, page)) + sblock->no_io_error_seen = 0; + } else { + bio->bi_iter.bi_sector = page->physical >> 9; + + if (btrfsic_submit_bio_wait(READ, bio)) + sblock->no_io_error_seen = 0; + } bio_put(bio); } @@ -1486,6 +1687,13 @@ static void scrub_write_block_to_dev_replace(struct scrub_block *sblock) { int page_num; + /* + * This block is used for the check of the parity on the source device, + * so the data needn't be written into the destination device. + */ + if (sblock->sparity) + return; + for (page_num = 0; page_num < sblock->page_count; page_num++) { int ret; @@ -1867,6 +2075,9 @@ static void scrub_block_put(struct scrub_block *sblock) if (atomic_dec_and_test(&sblock->ref_count)) { int i; + if (sblock->sparity) + scrub_parity_put(sblock->sparity); + for (i = 0; i < sblock->page_count; i++) scrub_page_put(sblock->pagev[i]); kfree(sblock); @@ -2124,9 +2335,51 @@ static void scrub_bio_end_io_worker(struct btrfs_work *work) scrub_pending_bio_dec(sctx); } +static inline void __scrub_mark_bitmap(struct scrub_parity *sparity, + unsigned long *bitmap, + u64 start, u64 len) +{ + int offset; + int nsectors; + int sectorsize = sparity->sctx->dev_root->sectorsize; + + if (len >= sparity->stripe_len) { + bitmap_set(bitmap, 0, sparity->nsectors); + return; + } + + start -= sparity->logic_start; + offset = (int)do_div(start, sparity->stripe_len); + offset /= sectorsize; + nsectors = (int)len / sectorsize; + + if (offset + nsectors <= sparity->nsectors) { + bitmap_set(bitmap, offset, nsectors); + return; + } + + bitmap_set(bitmap, offset, sparity->nsectors - offset); + bitmap_set(bitmap, 0, nsectors - (sparity->nsectors - offset)); +} + +static inline void scrub_parity_mark_sectors_error(struct scrub_parity *sparity, + u64 start, u64 len) +{ + __scrub_mark_bitmap(sparity, sparity->ebitmap, start, len); +} + +static inline void scrub_parity_mark_sectors_data(struct scrub_parity *sparity, + u64 start, u64 len) +{ + __scrub_mark_bitmap(sparity, sparity->dbitmap, start, len); +} + static void scrub_block_complete(struct scrub_block *sblock) { + int corrupted = 0; + if (!sblock->no_io_error_seen) { + corrupted = 1; scrub_handle_errored_block(sblock); } else { /* @@ -2134,9 +2387,19 @@ static void scrub_block_complete(struct scrub_block *sblock) * dev replace case, otherwise write here in dev replace * case. */ - if (!scrub_checksum(sblock) && sblock->sctx->is_dev_replace) + corrupted = scrub_checksum(sblock); + if (!corrupted && sblock->sctx->is_dev_replace) scrub_write_block_to_dev_replace(sblock); } + + if (sblock->sparity && corrupted && !sblock->data_corrected) { + u64 start = sblock->pagev[0]->logical; + u64 end = sblock->pagev[sblock->page_count - 1]->logical + + PAGE_SIZE; + + scrub_parity_mark_sectors_error(sblock->sparity, + start, end - start); + } } static int scrub_find_csum(struct scrub_ctx *sctx, u64 logical, u64 len, @@ -2228,6 +2491,132 @@ behind_scrub_pages: return 0; } +static int scrub_pages_for_parity(struct scrub_parity *sparity, + u64 logical, u64 len, + u64 physical, struct btrfs_device *dev, + u64 flags, u64 gen, int mirror_num, u8 *csum) +{ + struct scrub_ctx *sctx = sparity->sctx; + struct scrub_block *sblock; + int index; + + sblock = kzalloc(sizeof(*sblock), GFP_NOFS); + if (!sblock) { + spin_lock(&sctx->stat_lock); + sctx->stat.malloc_errors++; + spin_unlock(&sctx->stat_lock); + return -ENOMEM; + } + + /* one ref inside this function, plus one for each page added to + * a bio later on */ + atomic_set(&sblock->ref_count, 1); + sblock->sctx = sctx; + sblock->no_io_error_seen = 1; + sblock->sparity = sparity; + scrub_parity_get(sparity); + + for (index = 0; len > 0; index++) { + struct scrub_page *spage; + u64 l = min_t(u64, len, PAGE_SIZE); + + spage = kzalloc(sizeof(*spage), GFP_NOFS); + if (!spage) { +leave_nomem: + spin_lock(&sctx->stat_lock); + sctx->stat.malloc_errors++; + spin_unlock(&sctx->stat_lock); + scrub_block_put(sblock); + return -ENOMEM; + } + BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK); + /* For scrub block */ + scrub_page_get(spage); + sblock->pagev[index] = spage; + /* For scrub parity */ + scrub_page_get(spage); + list_add_tail(&spage->list, &sparity->spages); + spage->sblock = sblock; + spage->dev = dev; + spage->flags = flags; + spage->generation = gen; + spage->logical = logical; + spage->physical = physical; + spage->mirror_num = mirror_num; + if (csum) { + spage->have_csum = 1; + memcpy(spage->csum, csum, sctx->csum_size); + } else { + spage->have_csum = 0; + } + sblock->page_count++; + spage->page = alloc_page(GFP_NOFS); + if (!spage->page) + goto leave_nomem; + len -= l; + logical += l; + physical += l; + } + + WARN_ON(sblock->page_count == 0); + for (index = 0; index < sblock->page_count; index++) { + struct scrub_page *spage = sblock->pagev[index]; + int ret; + + ret = scrub_add_page_to_rd_bio(sctx, spage); + if (ret) { + scrub_block_put(sblock); + return ret; + } + } + + /* last one frees, either here or in bio completion for last page */ + scrub_block_put(sblock); + return 0; +} + +static int scrub_extent_for_parity(struct scrub_parity *sparity, + u64 logical, u64 len, + u64 physical, struct btrfs_device *dev, + u64 flags, u64 gen, int mirror_num) +{ + struct scrub_ctx *sctx = sparity->sctx; + int ret; + u8 csum[BTRFS_CSUM_SIZE]; + u32 blocksize; + + if (flags & BTRFS_EXTENT_FLAG_DATA) { + blocksize = sctx->sectorsize; + } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { + blocksize = sctx->nodesize; + } else { + blocksize = sctx->sectorsize; + WARN_ON(1); + } + + while (len) { + u64 l = min_t(u64, len, blocksize); + int have_csum = 0; + + if (flags & BTRFS_EXTENT_FLAG_DATA) { + /* push csums to sbio */ + have_csum = scrub_find_csum(sctx, logical, l, csum); + if (have_csum == 0) + goto skip; + } + ret = scrub_pages_for_parity(sparity, logical, l, physical, dev, + flags, gen, mirror_num, + have_csum ? csum : NULL); +skip: + if (ret) + return ret; + len -= l; + logical += l; + physical += l; + } + return 0; +} + /* * Given a physical address, this will calculate it's * logical offset. if this is a parity stripe, it will return @@ -2236,7 +2625,8 @@ behind_scrub_pages: * return 0 if it is a data stripe, 1 means parity stripe. */ static int get_raid56_logic_offset(u64 physical, int num, - struct map_lookup *map, u64 *offset) + struct map_lookup *map, u64 *offset, + u64 *stripe_start) { int i; int j = 0; @@ -2247,6 +2637,9 @@ static int get_raid56_logic_offset(u64 physical, int num, last_offset = (physical - map->stripes[num].physical) * nr_data_stripes(map); + if (stripe_start) + *stripe_start = last_offset; + *offset = last_offset; for (i = 0; i < nr_data_stripes(map); i++) { *offset = last_offset + i * map->stripe_len; @@ -2269,13 +2662,330 @@ static int get_raid56_logic_offset(u64 physical, int num, return 1; } +static void scrub_free_parity(struct scrub_parity *sparity) +{ + struct scrub_ctx *sctx = sparity->sctx; + struct scrub_page *curr, *next; + int nbits; + + nbits = bitmap_weight(sparity->ebitmap, sparity->nsectors); + if (nbits) { + spin_lock(&sctx->stat_lock); + sctx->stat.read_errors += nbits; + sctx->stat.uncorrectable_errors += nbits; + spin_unlock(&sctx->stat_lock); + } + + list_for_each_entry_safe(curr, next, &sparity->spages, list) { + list_del_init(&curr->list); + scrub_page_put(curr); + } + + kfree(sparity); +} + +static void scrub_parity_bio_endio(struct bio *bio, int error) +{ + struct scrub_parity *sparity = (struct scrub_parity *)bio->bi_private; + struct scrub_ctx *sctx = sparity->sctx; + + if (error) + bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap, + sparity->nsectors); + + scrub_free_parity(sparity); + scrub_pending_bio_dec(sctx); + bio_put(bio); +} + +static void scrub_parity_check_and_repair(struct scrub_parity *sparity) +{ + struct scrub_ctx *sctx = sparity->sctx; + struct bio *bio; + struct btrfs_raid_bio *rbio; + struct scrub_page *spage; + struct btrfs_bio *bbio = NULL; + u64 *raid_map = NULL; + u64 length; + int ret; + + if (!bitmap_andnot(sparity->dbitmap, sparity->dbitmap, sparity->ebitmap, + sparity->nsectors)) + goto out; + + length = sparity->logic_end - sparity->logic_start + 1; + ret = btrfs_map_sblock(sctx->dev_root->fs_info, WRITE, + sparity->logic_start, + &length, &bbio, 0, &raid_map); + if (ret || !bbio || !raid_map) + goto bbio_out; + + bio = btrfs_io_bio_alloc(GFP_NOFS, 0); + if (!bio) + goto bbio_out; + + bio->bi_iter.bi_sector = sparity->logic_start >> 9; + bio->bi_private = sparity; + bio->bi_end_io = scrub_parity_bio_endio; + + rbio = raid56_parity_alloc_scrub_rbio(sctx->dev_root, bio, bbio, + raid_map, length, + sparity->scrub_dev, + sparity->dbitmap, + sparity->nsectors); + if (!rbio) + goto rbio_out; + + list_for_each_entry(spage, &sparity->spages, list) + raid56_parity_add_scrub_pages(rbio, spage->page, + spage->logical); + + scrub_pending_bio_inc(sctx); + raid56_parity_submit_scrub_rbio(rbio); + return; + +rbio_out: + bio_put(bio); +bbio_out: + kfree(bbio); + kfree(raid_map); + bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap, + sparity->nsectors); + spin_lock(&sctx->stat_lock); + sctx->stat.malloc_errors++; + spin_unlock(&sctx->stat_lock); +out: + scrub_free_parity(sparity); +} + +static inline int scrub_calc_parity_bitmap_len(int nsectors) +{ + return DIV_ROUND_UP(nsectors, BITS_PER_LONG) * (BITS_PER_LONG / 8); +} + +static void scrub_parity_get(struct scrub_parity *sparity) +{ + atomic_inc(&sparity->ref_count); +} + +static void scrub_parity_put(struct scrub_parity *sparity) +{ + if (!atomic_dec_and_test(&sparity->ref_count)) + return; + + scrub_parity_check_and_repair(sparity); +} + +static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx, + struct map_lookup *map, + struct btrfs_device *sdev, + struct btrfs_path *path, + u64 logic_start, + u64 logic_end) +{ + struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info; + struct btrfs_root *root = fs_info->extent_root; + struct btrfs_root *csum_root = fs_info->csum_root; + struct btrfs_extent_item *extent; + u64 flags; + int ret; + int slot; + struct extent_buffer *l; + struct btrfs_key key; + u64 generation; + u64 extent_logical; + u64 extent_physical; + u64 extent_len; + struct btrfs_device *extent_dev; + struct scrub_parity *sparity; + int nsectors; + int bitmap_len; + int extent_mirror_num; + int stop_loop = 0; + + nsectors = map->stripe_len / root->sectorsize; + bitmap_len = scrub_calc_parity_bitmap_len(nsectors); + sparity = kzalloc(sizeof(struct scrub_parity) + 2 * bitmap_len, + GFP_NOFS); + if (!sparity) { + spin_lock(&sctx->stat_lock); + sctx->stat.malloc_errors++; + spin_unlock(&sctx->stat_lock); + return -ENOMEM; + } + + sparity->stripe_len = map->stripe_len; + sparity->nsectors = nsectors; + sparity->sctx = sctx; + sparity->scrub_dev = sdev; + sparity->logic_start = logic_start; + sparity->logic_end = logic_end; + atomic_set(&sparity->ref_count, 1); + INIT_LIST_HEAD(&sparity->spages); + sparity->dbitmap = sparity->bitmap; + sparity->ebitmap = (void *)sparity->bitmap + bitmap_len; + + ret = 0; + while (logic_start < logic_end) { + if (btrfs_fs_incompat(fs_info, SKINNY_METADATA)) + key.type = BTRFS_METADATA_ITEM_KEY; + else + key.type = BTRFS_EXTENT_ITEM_KEY; + key.objectid = logic_start; + key.offset = (u64)-1; + + ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); + if (ret < 0) + goto out; + + if (ret > 0) { + ret = btrfs_previous_extent_item(root, path, 0); + if (ret < 0) + goto out; + if (ret > 0) { + btrfs_release_path(path); + ret = btrfs_search_slot(NULL, root, &key, + path, 0, 0); + if (ret < 0) + goto out; + } + } + + stop_loop = 0; + while (1) { + u64 bytes; + + l = path->nodes[0]; + slot = path->slots[0]; + if (slot >= btrfs_header_nritems(l)) { + ret = btrfs_next_leaf(root, path); + if (ret == 0) + continue; + if (ret < 0) + goto out; + + stop_loop = 1; + break; + } + btrfs_item_key_to_cpu(l, &key, slot); + + if (key.type == BTRFS_METADATA_ITEM_KEY) + bytes = root->nodesize; + else + bytes = key.offset; + + if (key.objectid + bytes <= logic_start) + goto next; + + if (key.type != BTRFS_EXTENT_ITEM_KEY && + key.type != BTRFS_METADATA_ITEM_KEY) + goto next; + + if (key.objectid > logic_end) { + stop_loop = 1; + break; + } + + while (key.objectid >= logic_start + map->stripe_len) + logic_start += map->stripe_len; + + extent = btrfs_item_ptr(l, slot, + struct btrfs_extent_item); + flags = btrfs_extent_flags(l, extent); + generation = btrfs_extent_generation(l, extent); + + if (key.objectid < logic_start && + (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)) { + btrfs_err(fs_info, + "scrub: tree block %llu spanning stripes, ignored. logical=%llu", + key.objectid, logic_start); + goto next; + } +again: + extent_logical = key.objectid; + extent_len = bytes; + + if (extent_logical < logic_start) { + extent_len -= logic_start - extent_logical; + extent_logical = logic_start; + } + + if (extent_logical + extent_len > + logic_start + map->stripe_len) + extent_len = logic_start + map->stripe_len - + extent_logical; + + scrub_parity_mark_sectors_data(sparity, extent_logical, + extent_len); + + scrub_remap_extent(fs_info, extent_logical, + extent_len, &extent_physical, + &extent_dev, + &extent_mirror_num); + + ret = btrfs_lookup_csums_range(csum_root, + extent_logical, + extent_logical + extent_len - 1, + &sctx->csum_list, 1); + if (ret) + goto out; + + ret = scrub_extent_for_parity(sparity, extent_logical, + extent_len, + extent_physical, + extent_dev, flags, + generation, + extent_mirror_num); + if (ret) + goto out; + + scrub_free_csums(sctx); + if (extent_logical + extent_len < + key.objectid + bytes) { + logic_start += map->stripe_len; + + if (logic_start >= logic_end) { + stop_loop = 1; + break; + } + + if (logic_start < key.objectid + bytes) { + cond_resched(); + goto again; + } + } +next: + path->slots[0]++; + } + + btrfs_release_path(path); + + if (stop_loop) + break; + + logic_start += map->stripe_len; + } +out: + if (ret < 0) + scrub_parity_mark_sectors_error(sparity, logic_start, + logic_end - logic_start + 1); + scrub_parity_put(sparity); + scrub_submit(sctx); + mutex_lock(&sctx->wr_ctx.wr_lock); + scrub_wr_submit(sctx); + mutex_unlock(&sctx->wr_ctx.wr_lock); + + btrfs_release_path(path); + return ret < 0 ? ret : 0; +} + static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx, struct map_lookup *map, struct btrfs_device *scrub_dev, int num, u64 base, u64 length, int is_dev_replace) { - struct btrfs_path *path; + struct btrfs_path *path, *ppath; struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info; struct btrfs_root *root = fs_info->extent_root; struct btrfs_root *csum_root = fs_info->csum_root; @@ -2302,6 +3012,8 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx, u64 extent_logical; u64 extent_physical; u64 extent_len; + u64 stripe_logical; + u64 stripe_end; struct btrfs_device *extent_dev; int extent_mirror_num; int stop_loop = 0; @@ -2327,7 +3039,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx, mirror_num = num % map->num_stripes + 1; } else if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)) { - get_raid56_logic_offset(physical, num, map, &offset); + get_raid56_logic_offset(physical, num, map, &offset, NULL); increment = map->stripe_len * nr_data_stripes(map); mirror_num = 1; } else { @@ -2339,6 +3051,12 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx, if (!path) return -ENOMEM; + ppath = btrfs_alloc_path(); + if (!ppath) { + btrfs_free_path(ppath); + return -ENOMEM; + } + /* * work on commit root. The related disk blocks are static as * long as COW is applied. This means, it is save to rewrite @@ -2357,7 +3075,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx, if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)) { get_raid56_logic_offset(physical_end, num, - map, &logic_end); + map, &logic_end, NULL); logic_end += base; } else { logic_end = logical + increment * nstripes; @@ -2404,10 +3122,18 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx, if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)) { ret = get_raid56_logic_offset(physical, num, - map, &logical); + map, &logical, &stripe_logical); logical += base; - if (ret) + if (ret) { + stripe_logical += base; + stripe_end = stripe_logical + increment - 1; + ret = scrub_raid56_parity(sctx, map, scrub_dev, + ppath, stripe_logical, + stripe_end); + if (ret) + goto out; goto skip; + } } /* * canceled? @@ -2558,13 +3284,25 @@ again: * loop until we find next data stripe * or we have finished all stripes. */ - do { - physical += map->stripe_len; - ret = get_raid56_logic_offset( - physical, num, - map, &logical); - logical += base; - } while (physical < physical_end && ret); +loop: + physical += map->stripe_len; + ret = get_raid56_logic_offset(physical, + num, map, &logical, + &stripe_logical); + logical += base; + + if (ret && physical < physical_end) { + stripe_logical += base; + stripe_end = stripe_logical + + increment - 1; + ret = scrub_raid56_parity(sctx, + map, scrub_dev, ppath, + stripe_logical, + stripe_end); + if (ret) + goto out; + goto loop; + } } else { physical += map->stripe_len; logical += increment; @@ -2605,6 +3343,7 @@ out: blk_finish_plug(&plug); btrfs_free_path(path); + btrfs_free_path(ppath); return ret < 0 ? ret : 0; } diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index ff2b35114972..0144790e296e 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -4879,13 +4879,15 @@ static inline int parity_smaller(u64 a, u64 b) static void sort_parity_stripes(struct btrfs_bio *bbio, u64 *raid_map) { struct btrfs_bio_stripe s; + int real_stripes = bbio->num_stripes - bbio->num_tgtdevs; int i; u64 l; int again = 1; + int m; while (again) { again = 0; - for (i = 0; i < bbio->num_stripes - 1; i++) { + for (i = 0; i < real_stripes - 1; i++) { if (parity_smaller(raid_map[i], raid_map[i+1])) { s = bbio->stripes[i]; l = raid_map[i]; @@ -4893,6 +4895,14 @@ static void sort_parity_stripes(struct btrfs_bio *bbio, u64 *raid_map) raid_map[i] = raid_map[i+1]; bbio->stripes[i+1] = s; raid_map[i+1] = l; + + if (bbio->tgtdev_map) { + m = bbio->tgtdev_map[i]; + bbio->tgtdev_map[i] = + bbio->tgtdev_map[i + 1]; + bbio->tgtdev_map[i + 1] = m; + } + again = 1; } } @@ -4921,6 +4931,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, int ret = 0; int num_stripes; int max_errors = 0; + int tgtdev_indexes = 0; struct btrfs_bio *bbio = NULL; struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; int dev_replace_is_ongoing = 0; @@ -5159,15 +5170,14 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, BTRFS_BLOCK_GROUP_RAID6)) { u64 tmp; - if (bbio_ret && ((rw & REQ_WRITE) || mirror_num > 1) - && raid_map_ret) { + if (raid_map_ret && + ((rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) || + mirror_num > 1)) { int i, rot; /* push stripe_nr back to the start of the full stripe */ stripe_nr = raid56_full_stripe_start; - do_div(stripe_nr, stripe_len); - - stripe_index = do_div(stripe_nr, nr_data_stripes(map)); + do_div(stripe_nr, stripe_len * nr_data_stripes(map)); /* RAID[56] write or recovery. Return all stripes */ num_stripes = map->num_stripes; @@ -5233,14 +5243,19 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, num_alloc_stripes <<= 1; if (rw & REQ_GET_READ_MIRRORS) num_alloc_stripes++; + tgtdev_indexes = num_stripes; } - bbio = kzalloc(btrfs_bio_size(num_alloc_stripes), GFP_NOFS); + + bbio = kzalloc(btrfs_bio_size(num_alloc_stripes, tgtdev_indexes), + GFP_NOFS); if (!bbio) { kfree(raid_map); ret = -ENOMEM; goto out; } atomic_set(&bbio->error, 0); + if (dev_replace_is_ongoing) + bbio->tgtdev_map = (int *)(bbio->stripes + num_alloc_stripes); if (rw & REQ_DISCARD) { int factor = 0; @@ -5325,6 +5340,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) max_errors = btrfs_chunk_max_errors(map); + tgtdev_indexes = 0; if (dev_replace_is_ongoing && (rw & (REQ_WRITE | REQ_DISCARD)) && dev_replace->tgtdev != NULL) { int index_where_to_add; @@ -5353,8 +5369,10 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, new->physical = old->physical; new->length = old->length; new->dev = dev_replace->tgtdev; + bbio->tgtdev_map[i] = index_where_to_add; index_where_to_add++; max_errors++; + tgtdev_indexes++; } } num_stripes = index_where_to_add; @@ -5400,7 +5418,9 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, tgtdev_stripe->length = bbio->stripes[index_srcdev].length; tgtdev_stripe->dev = dev_replace->tgtdev; + bbio->tgtdev_map[index_srcdev] = num_stripes; + tgtdev_indexes++; num_stripes++; } } @@ -5410,6 +5430,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, bbio->num_stripes = num_stripes; bbio->max_errors = max_errors; bbio->mirror_num = mirror_num; + bbio->num_tgtdevs = tgtdev_indexes; /* * this is the case that REQ_READ && dev_replace_is_ongoing && @@ -5441,6 +5462,16 @@ int btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, mirror_num, NULL); } +/* For Scrub/replace */ +int btrfs_map_sblock(struct btrfs_fs_info *fs_info, int rw, + u64 logical, u64 *length, + struct btrfs_bio **bbio_ret, int mirror_num, + u64 **raid_map_ret) +{ + return __btrfs_map_block(fs_info, rw, logical, length, bbio_ret, + mirror_num, raid_map_ret); +} + int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree, u64 chunk_start, u64 physical, u64 devid, u64 **logical, int *naddrs, int *stripe_len) @@ -5810,12 +5841,9 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio, } else { ret = raid56_parity_recover(root, bio, bbio, raid_map, map_length, - mirror_num); + mirror_num, 1); } - /* - * FIXME, replace dosen't support raid56 yet, please fix - * it in the future. - */ + btrfs_bio_counter_dec(root->fs_info); return ret; } diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index 637bcfadadb2..d6fe73c0f4a2 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h @@ -292,7 +292,7 @@ struct btrfs_bio_stripe { struct btrfs_bio; typedef void (btrfs_bio_end_io_t) (struct btrfs_bio *bio, int err); -#define BTRFS_BIO_ORIG_BIO_SUBMITTED 0x1 +#define BTRFS_BIO_ORIG_BIO_SUBMITTED (1 << 0) struct btrfs_bio { atomic_t stripes_pending; @@ -305,6 +305,8 @@ struct btrfs_bio { int max_errors; int num_stripes; int mirror_num; + int num_tgtdevs; + int *tgtdev_map; struct btrfs_bio_stripe stripes[]; }; @@ -387,12 +389,18 @@ struct btrfs_balance_control { int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start, u64 end, u64 *length); -#define btrfs_bio_size(n) (sizeof(struct btrfs_bio) + \ - (sizeof(struct btrfs_bio_stripe) * (n))) +#define btrfs_bio_size(total_stripes, real_stripes) \ + (sizeof(struct btrfs_bio) + \ + (sizeof(struct btrfs_bio_stripe) * (total_stripes)) + \ + (sizeof(int) * (real_stripes))) int btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, u64 logical, u64 *length, struct btrfs_bio **bbio_ret, int mirror_num); +int btrfs_map_sblock(struct btrfs_fs_info *fs_info, int rw, + u64 logical, u64 *length, + struct btrfs_bio **bbio_ret, int mirror_num, + u64 **raid_map_ret); int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree, u64 chunk_start, u64 physical, u64 devid, u64 **logical, int *naddrs, int *stripe_len); diff --git a/fs/dcache.c b/fs/dcache.c index 3ffef7f4e5cd..5bc72b07fde2 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -778,6 +778,7 @@ restart: struct dentry *parent = lock_parent(dentry); if (likely(!dentry->d_lockref.count)) { __dentry_kill(dentry); + dput(parent); goto restart; } if (parent) diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c index fe839b915116..d67a16f2a45d 100644 --- a/fs/isofs/inode.c +++ b/fs/isofs/inode.c @@ -174,27 +174,6 @@ struct iso9660_options{ * Compute the hash for the isofs name corresponding to the dentry. */ static int -isofs_hash_common(struct qstr *qstr, int ms) -{ - const char *name; - int len; - - len = qstr->len; - name = qstr->name; - if (ms) { - while (len && name[len-1] == '.') - len--; - } - - qstr->hash = full_name_hash(name, len); - - return 0; -} - -/* - * Compute the hash for the isofs name corresponding to the dentry. - */ -static int isofs_hashi_common(struct qstr *qstr, int ms) { const char *name; @@ -263,6 +242,27 @@ isofs_dentry_cmpi(const struct dentry *parent, const struct dentry *dentry, } #ifdef CONFIG_JOLIET +/* + * Compute the hash for the isofs name corresponding to the dentry. + */ +static int +isofs_hash_common(struct qstr *qstr, int ms) +{ + const char *name; + int len; + + len = qstr->len; + name = qstr->name; + if (ms) { + while (len && name[len-1] == '.') + len--; + } + + qstr->hash = full_name_hash(name, len); + + return 0; +} + static int isofs_hash_ms(const struct dentry *dentry, struct qstr *qstr) { diff --git a/fs/overlayfs/Kconfig b/fs/overlayfs/Kconfig index e60125976873..34355818a2e0 100644 --- a/fs/overlayfs/Kconfig +++ b/fs/overlayfs/Kconfig @@ -1,4 +1,4 @@ -config OVERLAYFS_FS +config OVERLAY_FS tristate "Overlay filesystem support" help An overlay filesystem combines two filesystems - an 'upper' filesystem diff --git a/fs/overlayfs/Makefile b/fs/overlayfs/Makefile index 8f91889480d0..900daed3e91d 100644 --- a/fs/overlayfs/Makefile +++ b/fs/overlayfs/Makefile @@ -2,6 +2,6 @@ # Makefile for the overlay filesystem. # -obj-$(CONFIG_OVERLAYFS_FS) += overlayfs.o +obj-$(CONFIG_OVERLAY_FS) += overlay.o -overlayfs-objs := super.o inode.o dir.o readdir.o copy_up.o +overlay-objs := super.o inode.o dir.o readdir.o copy_up.o diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c index 15cd91ad9940..8ffc4b980f1b 100644 --- a/fs/overlayfs/dir.c +++ b/fs/overlayfs/dir.c @@ -284,8 +284,7 @@ out: return ERR_PTR(err); } -static struct dentry *ovl_check_empty_and_clear(struct dentry *dentry, - enum ovl_path_type type) +static struct dentry *ovl_check_empty_and_clear(struct dentry *dentry) { int err; struct dentry *ret = NULL; @@ -294,8 +293,17 @@ static struct dentry *ovl_check_empty_and_clear(struct dentry *dentry, err = ovl_check_empty_dir(dentry, &list); if (err) ret = ERR_PTR(err); - else if (type == OVL_PATH_MERGE) - ret = ovl_clear_empty(dentry, &list); + else { + /* + * If no upperdentry then skip clearing whiteouts. + * + * Can race with copy-up, since we don't hold the upperdir + * mutex. Doesn't matter, since copy-up can't create a + * non-empty directory from an empty one. + */ + if (ovl_dentry_upper(dentry)) + ret = ovl_clear_empty(dentry, &list); + } ovl_cache_free(&list); @@ -487,8 +495,7 @@ out: return err; } -static int ovl_remove_and_whiteout(struct dentry *dentry, - enum ovl_path_type type, bool is_dir) +static int ovl_remove_and_whiteout(struct dentry *dentry, bool is_dir) { struct dentry *workdir = ovl_workdir(dentry); struct inode *wdir = workdir->d_inode; @@ -500,7 +507,7 @@ static int ovl_remove_and_whiteout(struct dentry *dentry, int err; if (is_dir) { - opaquedir = ovl_check_empty_and_clear(dentry, type); + opaquedir = ovl_check_empty_and_clear(dentry); err = PTR_ERR(opaquedir); if (IS_ERR(opaquedir)) goto out; @@ -515,9 +522,10 @@ static int ovl_remove_and_whiteout(struct dentry *dentry, if (IS_ERR(whiteout)) goto out_unlock; - if (type == OVL_PATH_LOWER) { + upper = ovl_dentry_upper(dentry); + if (!upper) { upper = lookup_one_len(dentry->d_name.name, upperdir, - dentry->d_name.len); + dentry->d_name.len); err = PTR_ERR(upper); if (IS_ERR(upper)) goto kill_whiteout; @@ -529,7 +537,6 @@ static int ovl_remove_and_whiteout(struct dentry *dentry, } else { int flags = 0; - upper = ovl_dentry_upper(dentry); if (opaquedir) upper = opaquedir; err = -ESTALE; @@ -648,7 +655,7 @@ static int ovl_do_remove(struct dentry *dentry, bool is_dir) cap_raise(override_cred->cap_effective, CAP_CHOWN); old_cred = override_creds(override_cred); - err = ovl_remove_and_whiteout(dentry, type, is_dir); + err = ovl_remove_and_whiteout(dentry, is_dir); revert_creds(old_cred); put_cred(override_cred); @@ -781,7 +788,7 @@ static int ovl_rename2(struct inode *olddir, struct dentry *old, } if (overwrite && (new_type == OVL_PATH_LOWER || new_type == OVL_PATH_MERGE) && new_is_dir) { - opaquedir = ovl_check_empty_and_clear(new, new_type); + opaquedir = ovl_check_empty_and_clear(new); err = PTR_ERR(opaquedir); if (IS_ERR(opaquedir)) { opaquedir = NULL; diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c index af2d18c9fcee..07d74b24913b 100644 --- a/fs/overlayfs/inode.c +++ b/fs/overlayfs/inode.c @@ -235,26 +235,36 @@ out: return err; } +static bool ovl_need_xattr_filter(struct dentry *dentry, + enum ovl_path_type type) +{ + return type == OVL_PATH_UPPER && S_ISDIR(dentry->d_inode->i_mode); +} + ssize_t ovl_getxattr(struct dentry *dentry, const char *name, void *value, size_t size) { - if (ovl_path_type(dentry->d_parent) == OVL_PATH_MERGE && - ovl_is_private_xattr(name)) + struct path realpath; + enum ovl_path_type type = ovl_path_real(dentry, &realpath); + + if (ovl_need_xattr_filter(dentry, type) && ovl_is_private_xattr(name)) return -ENODATA; - return vfs_getxattr(ovl_dentry_real(dentry), name, value, size); + return vfs_getxattr(realpath.dentry, name, value, size); } ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size) { + struct path realpath; + enum ovl_path_type type = ovl_path_real(dentry, &realpath); ssize_t res; int off; - res = vfs_listxattr(ovl_dentry_real(dentry), list, size); + res = vfs_listxattr(realpath.dentry, list, size); if (res <= 0 || size == 0) return res; - if (ovl_path_type(dentry->d_parent) != OVL_PATH_MERGE) + if (!ovl_need_xattr_filter(dentry, type)) return res; /* filter out private xattrs */ @@ -279,17 +289,16 @@ int ovl_removexattr(struct dentry *dentry, const char *name) { int err; struct path realpath; - enum ovl_path_type type; + enum ovl_path_type type = ovl_path_real(dentry, &realpath); err = ovl_want_write(dentry); if (err) goto out; - if (ovl_path_type(dentry->d_parent) == OVL_PATH_MERGE && - ovl_is_private_xattr(name)) + err = -ENODATA; + if (ovl_need_xattr_filter(dentry, type) && ovl_is_private_xattr(name)) goto out_drop_write; - type = ovl_path_real(dentry, &realpath); if (type == OVL_PATH_LOWER) { err = vfs_getxattr(realpath.dentry, name, NULL, 0); if (err < 0) diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c index 2a7ef4f8e2a6..ab1e3dcbed95 100644 --- a/fs/overlayfs/readdir.c +++ b/fs/overlayfs/readdir.c @@ -274,11 +274,11 @@ static int ovl_dir_mark_whiteouts(struct dentry *dir, return 0; } -static inline int ovl_dir_read_merged(struct path *upperpath, - struct path *lowerpath, - struct list_head *list) +static int ovl_dir_read_merged(struct dentry *dentry, struct list_head *list) { int err; + struct path lowerpath; + struct path upperpath; struct ovl_readdir_data rdd = { .ctx.actor = ovl_fill_merge, .list = list, @@ -286,25 +286,28 @@ static inline int ovl_dir_read_merged(struct path *upperpath, .is_merge = false, }; - if (upperpath->dentry) { - err = ovl_dir_read(upperpath, &rdd); + ovl_path_lower(dentry, &lowerpath); + ovl_path_upper(dentry, &upperpath); + + if (upperpath.dentry) { + err = ovl_dir_read(&upperpath, &rdd); if (err) goto out; - if (lowerpath->dentry) { - err = ovl_dir_mark_whiteouts(upperpath->dentry, &rdd); + if (lowerpath.dentry) { + err = ovl_dir_mark_whiteouts(upperpath.dentry, &rdd); if (err) goto out; } } - if (lowerpath->dentry) { + if (lowerpath.dentry) { /* * Insert lowerpath entries before upperpath ones, this allows * offsets to be reasonably constant */ list_add(&rdd.middle, rdd.list); rdd.is_merge = true; - err = ovl_dir_read(lowerpath, &rdd); + err = ovl_dir_read(&lowerpath, &rdd); list_del(&rdd.middle); } out: @@ -329,8 +332,6 @@ static void ovl_seek_cursor(struct ovl_dir_file *od, loff_t pos) static struct ovl_dir_cache *ovl_cache_get(struct dentry *dentry) { int res; - struct path lowerpath; - struct path upperpath; struct ovl_dir_cache *cache; cache = ovl_dir_cache(dentry); @@ -347,10 +348,7 @@ static struct ovl_dir_cache *ovl_cache_get(struct dentry *dentry) cache->refcount = 1; INIT_LIST_HEAD(&cache->entries); - ovl_path_lower(dentry, &lowerpath); - ovl_path_upper(dentry, &upperpath); - - res = ovl_dir_read_merged(&upperpath, &lowerpath, &cache->entries); + res = ovl_dir_read_merged(dentry, &cache->entries); if (res) { ovl_cache_free(&cache->entries); kfree(cache); @@ -452,10 +450,10 @@ static int ovl_dir_fsync(struct file *file, loff_t start, loff_t end, /* * Need to check if we started out being a lower dir, but got copied up */ - if (!od->is_upper && ovl_path_type(dentry) == OVL_PATH_MERGE) { + if (!od->is_upper && ovl_path_type(dentry) != OVL_PATH_LOWER) { struct inode *inode = file_inode(file); - realfile =lockless_dereference(od->upperfile); + realfile = lockless_dereference(od->upperfile); if (!realfile) { struct path upperpath; @@ -538,14 +536,9 @@ const struct file_operations ovl_dir_operations = { int ovl_check_empty_dir(struct dentry *dentry, struct list_head *list) { int err; - struct path lowerpath; - struct path upperpath; struct ovl_cache_entry *p; - ovl_path_upper(dentry, &upperpath); - ovl_path_lower(dentry, &lowerpath); - - err = ovl_dir_read_merged(&upperpath, &lowerpath, list); + err = ovl_dir_read_merged(dentry, list); if (err) return err; diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c index 08b704cebfc4..f16d318b71f8 100644 --- a/fs/overlayfs/super.c +++ b/fs/overlayfs/super.c @@ -24,7 +24,7 @@ MODULE_AUTHOR("Miklos Szeredi <miklos@szeredi.hu>"); MODULE_DESCRIPTION("Overlay filesystem"); MODULE_LICENSE("GPL"); -#define OVERLAYFS_SUPER_MAGIC 0x794c764f +#define OVERLAYFS_SUPER_MAGIC 0x794c7630 struct ovl_config { char *lowerdir; @@ -84,12 +84,7 @@ enum ovl_path_type ovl_path_type(struct dentry *dentry) static struct dentry *ovl_upperdentry_dereference(struct ovl_entry *oe) { - struct dentry *upperdentry = ACCESS_ONCE(oe->__upperdentry); - /* - * Make sure to order reads to upperdentry wrt ovl_dentry_update() - */ - smp_read_barrier_depends(); - return upperdentry; + return lockless_dereference(oe->__upperdentry); } void ovl_path_upper(struct dentry *dentry, struct path *path) @@ -462,11 +457,34 @@ static const match_table_t ovl_tokens = { {OPT_ERR, NULL} }; +static char *ovl_next_opt(char **s) +{ + char *sbegin = *s; + char *p; + + if (sbegin == NULL) + return NULL; + + for (p = sbegin; *p; p++) { + if (*p == '\\') { + p++; + if (!*p) + break; + } else if (*p == ',') { + *p = '\0'; + *s = p + 1; + return sbegin; + } + } + *s = NULL; + return sbegin; +} + static int ovl_parse_opt(char *opt, struct ovl_config *config) { char *p; - while ((p = strsep(&opt, ",")) != NULL) { + while ((p = ovl_next_opt(&opt)) != NULL) { int token; substring_t args[MAX_OPT_ARGS]; @@ -554,15 +572,34 @@ out_dput: goto out_unlock; } +static void ovl_unescape(char *s) +{ + char *d = s; + + for (;; s++, d++) { + if (*s == '\\') + s++; + *d = *s; + if (!*s) + break; + } +} + static int ovl_mount_dir(const char *name, struct path *path) { int err; + char *tmp = kstrdup(name, GFP_KERNEL); + + if (!tmp) + return -ENOMEM; - err = kern_path(name, LOOKUP_FOLLOW, path); + ovl_unescape(tmp); + err = kern_path(tmp, LOOKUP_FOLLOW, path); if (err) { - pr_err("overlayfs: failed to resolve '%s': %i\n", name, err); + pr_err("overlayfs: failed to resolve '%s': %i\n", tmp, err); err = -EINVAL; } + kfree(tmp); return err; } @@ -776,11 +813,11 @@ static struct dentry *ovl_mount(struct file_system_type *fs_type, int flags, static struct file_system_type ovl_fs_type = { .owner = THIS_MODULE, - .name = "overlayfs", + .name = "overlay", .mount = ovl_mount, .kill_sb = kill_anon_super, }; -MODULE_ALIAS_FS("overlayfs"); +MODULE_ALIAS_FS("overlay"); static int __init ovl_init(void) { diff --git a/include/linux/bitops.h b/include/linux/bitops.h index be5fd38bd5a0..5d858e02997f 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h @@ -18,8 +18,11 @@ * position @h. For example * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000. */ -#define GENMASK(h, l) (((U32_C(1) << ((h) - (l) + 1)) - 1) << (l)) -#define GENMASK_ULL(h, l) (((U64_C(1) << ((h) - (l) + 1)) - 1) << (l)) +#define GENMASK(h, l) \ + (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h)))) + +#define GENMASK_ULL(h, l) \ + (((~0ULL) << (l)) & (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h)))) extern unsigned int __sw_hweight8(unsigned int w); extern unsigned int __sw_hweight16(unsigned int w); diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h index 6992afc6ba7f..b37ea95bc348 100644 --- a/include/linux/can/dev.h +++ b/include/linux/can/dev.h @@ -99,6 +99,12 @@ inval_skb: return 1; } +static inline bool can_is_canfd_skb(const struct sk_buff *skb) +{ + /* the CAN specific type of skb is identified by its data length */ + return skb->len == CANFD_MTU; +} + /* get data length from can_dlc with sanitized can_dlc */ u8 can_dlc2len(u8 can_dlc); diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h index 0068708161ff..0a21fbefdfbe 100644 --- a/include/linux/inetdevice.h +++ b/include/linux/inetdevice.h @@ -242,7 +242,7 @@ static inline void in_dev_put(struct in_device *idev) static __inline__ __be32 inet_make_mask(int logmask) { if (logmask) - return htonl(~((1<<(32-logmask))-1)); + return htonl(~((1U<<(32-logmask))-1)); return 0; } diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h index 8422b4ed6882..b9376cd5a187 100644 --- a/include/linux/kernel_stat.h +++ b/include/linux/kernel_stat.h @@ -77,11 +77,6 @@ static inline unsigned int kstat_cpu_irqs_sum(unsigned int cpu) return kstat_cpu(cpu).irqs_sum; } -/* - * Lock/unlock the current runqueue - to extract task statistics: - */ -extern unsigned long long task_delta_exec(struct task_struct *); - extern void account_user_time(struct task_struct *, cputime_t, cputime_t); extern void account_system_time(struct task_struct *, int, cputime_t, cputime_t); extern void account_steal_time(cputime_t); diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h index d5c89e0dd0e6..51ce60c35f4c 100644 --- a/include/linux/percpu-refcount.h +++ b/include/linux/percpu-refcount.h @@ -133,7 +133,13 @@ static inline bool __ref_is_percpu(struct percpu_ref *ref, /* paired with smp_store_release() in percpu_ref_reinit() */ smp_read_barrier_depends(); - if (unlikely(percpu_ptr & __PERCPU_REF_ATOMIC)) + /* + * Theoretically, the following could test just ATOMIC; however, + * then we'd have to mask off DEAD separately as DEAD may be + * visible without ATOMIC if we race with percpu_ref_kill(). DEAD + * implies ATOMIC anyway. Test them together. + */ + if (unlikely(percpu_ptr & __PERCPU_REF_ATOMIC_DEAD)) return false; *percpu_countp = (unsigned long __percpu *)percpu_ptr; diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index 845c596bf594..3ae969e3acf0 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -396,14 +396,12 @@ struct nft_rule { /** * struct nft_trans - nf_tables object update in transaction * - * @rcu_head: rcu head to defer release of transaction data * @list: used internally * @msg_type: message type * @ctx: transaction context * @data: internal information related to the transaction */ struct nft_trans { - struct rcu_head rcu_head; struct list_head list; int msg_type; struct nft_ctx ctx; diff --git a/include/net/vxlan.h b/include/net/vxlan.h index d5f59f3fc35d..57cccd0052e5 100644 --- a/include/net/vxlan.h +++ b/include/net/vxlan.h @@ -8,6 +8,12 @@ #define VNI_HASH_BITS 10 #define VNI_HASH_SIZE (1<<VNI_HASH_BITS) +/* VXLAN protocol header */ +struct vxlanhdr { + __be32 vx_flags; + __be32 vx_vni; +}; + struct vxlan_sock; typedef void (vxlan_rcv_t)(struct vxlan_sock *vh, struct sk_buff *skb, __be32 key); @@ -45,6 +51,18 @@ int vxlan_xmit_skb(struct vxlan_sock *vs, __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df, __be16 src_port, __be16 dst_port, __be32 vni, bool xnet); +static inline bool vxlan_gso_check(struct sk_buff *skb) +{ + if ((skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL) && + (skb->inner_protocol_type != ENCAP_TYPE_ETHER || + skb->inner_protocol != htons(ETH_P_TEB) || + (skb_inner_mac_header(skb) - skb_transport_header(skb) != + sizeof(struct udphdr) + sizeof(struct vxlanhdr)))) + return false; + + return true; +} + /* IP header + UDP + VXLAN + Ethernet header */ #define VXLAN_HEADROOM (20 + 8 + 8 + 14) /* IPv6 header + UDP + VXLAN + Ethernet header */ diff --git a/include/sound/soc-dpcm.h b/include/sound/soc-dpcm.h index 2883a7a6f9f3..98f2ade0266e 100644 --- a/include/sound/soc-dpcm.h +++ b/include/sound/soc-dpcm.h @@ -102,6 +102,8 @@ struct snd_soc_dpcm_runtime { /* state and update */ enum snd_soc_dpcm_update runtime_update; enum snd_soc_dpcm_state state; + + int trigger_pending; /* trigger cmd + 1 if pending, 0 if not */ }; /* can this BE stop and free */ diff --git a/kernel/events/core.c b/kernel/events/core.c index 2b02c9fda790..1cd5eef1fcdd 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -1562,8 +1562,10 @@ static void perf_remove_from_context(struct perf_event *event, bool detach_group if (!task) { /* - * Per cpu events are removed via an smp call and - * the removal is always successful. + * Per cpu events are removed via an smp call. The removal can + * fail if the CPU is currently offline, but in that case we + * already called __perf_remove_from_context from + * perf_event_exit_cpu. */ cpu_function_call(event->cpu, __perf_remove_from_context, &re); return; @@ -8117,7 +8119,7 @@ static void perf_pmu_rotate_stop(struct pmu *pmu) static void __perf_event_exit_context(void *__info) { - struct remove_event re = { .detach_group = false }; + struct remove_event re = { .detach_group = true }; struct perf_event_context *ctx = __info; perf_pmu_rotate_stop(ctx->pmu); diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 1d0af8a2c646..ed8f2cde34c5 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -1640,7 +1640,6 @@ bool uprobe_deny_signal(void) if (__fatal_signal_pending(t) || arch_uprobe_xol_was_trapped(t)) { utask->state = UTASK_SSTEP_TRAPPED; set_tsk_thread_flag(t, TIF_UPROBE); - set_tsk_thread_flag(t, TIF_NOTIFY_RESUME); } } diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 240157c13ddc..24beb9bb4c3e 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2475,44 +2475,6 @@ EXPORT_PER_CPU_SYMBOL(kstat); EXPORT_PER_CPU_SYMBOL(kernel_cpustat); /* - * Return any ns on the sched_clock that have not yet been accounted in - * @p in case that task is currently running. - * - * Called with task_rq_lock() held on @rq. - */ -static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq) -{ - u64 ns = 0; - - /* - * Must be ->curr _and_ ->on_rq. If dequeued, we would - * project cycles that may never be accounted to this - * thread, breaking clock_gettime(). - */ - if (task_current(rq, p) && task_on_rq_queued(p)) { - update_rq_clock(rq); - ns = rq_clock_task(rq) - p->se.exec_start; - if ((s64)ns < 0) - ns = 0; - } - - return ns; -} - -unsigned long long task_delta_exec(struct task_struct *p) -{ - unsigned long flags; - struct rq *rq; - u64 ns = 0; - - rq = task_rq_lock(p, &flags); - ns = do_task_delta_exec(p, rq); - task_rq_unlock(rq, p, &flags); - - return ns; -} - -/* * Return accounted runtime for the task. * In case the task is currently running, return the runtime plus current's * pending runtime that have not been accounted yet. @@ -2521,7 +2483,7 @@ unsigned long long task_sched_runtime(struct task_struct *p) { unsigned long flags; struct rq *rq; - u64 ns = 0; + u64 ns; #if defined(CONFIG_64BIT) && defined(CONFIG_SMP) /* @@ -2540,7 +2502,16 @@ unsigned long long task_sched_runtime(struct task_struct *p) #endif rq = task_rq_lock(p, &flags); - ns = p->se.sum_exec_runtime + do_task_delta_exec(p, rq); + /* + * Must be ->curr _and_ ->on_rq. If dequeued, we would + * project cycles that may never be accounted to this + * thread, breaking clock_gettime(). + */ + if (task_current(rq, p) && task_on_rq_queued(p)) { + update_rq_clock(rq); + p->sched_class->update_curr(rq); + } + ns = p->se.sum_exec_runtime; task_rq_unlock(rq, p, &flags); return ns; @@ -6368,6 +6339,10 @@ static void sched_init_numa(void) if (!sched_debug()) break; } + + if (!level) + return; + /* * 'level' contains the number of unique distances, excluding the * identity distance node_distance(i,i). @@ -7444,8 +7419,12 @@ void sched_move_task(struct task_struct *tsk) if (unlikely(running)) put_prev_task(rq, tsk); - tg = container_of(task_css_check(tsk, cpu_cgrp_id, - lockdep_is_held(&tsk->sighand->siglock)), + /* + * All callers are synchronized by task_rq_lock(); we do not use RCU + * which is pointless here. Thus, we pass "true" to task_css_check() + * to prevent lockdep warnings. + */ + tg = container_of(task_css_check(tsk, cpu_cgrp_id, true), struct task_group, css); tg = autogroup_task_group(tsk, tg); tsk->sched_task_group = tg; diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 5285332392d5..28fa9d9e9201 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -1701,4 +1701,6 @@ const struct sched_class dl_sched_class = { .prio_changed = prio_changed_dl, .switched_from = switched_from_dl, .switched_to = switched_to_dl, + + .update_curr = update_curr_dl, }; diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 34baa60f8a7b..ef2b104b254c 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -726,6 +726,11 @@ static void update_curr(struct cfs_rq *cfs_rq) account_cfs_rq_runtime(cfs_rq, delta_exec); } +static void update_curr_fair(struct rq *rq) +{ + update_curr(cfs_rq_of(&rq->curr->se)); +} + static inline void update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se) { @@ -1180,6 +1185,13 @@ static void task_numa_compare(struct task_numa_env *env, raw_spin_unlock_irq(&dst_rq->lock); /* + * Because we have preemption enabled we can get migrated around and + * end try selecting ourselves (current == env->p) as a swap candidate. + */ + if (cur == env->p) + goto unlock; + + /* * "imp" is the fault differential for the source task between the * source and destination node. Calculate the total differential for * the source task and potential destination task. The more negative @@ -7949,6 +7961,8 @@ const struct sched_class fair_sched_class = { .get_rr_interval = get_rr_interval_fair, + .update_curr = update_curr_fair, + #ifdef CONFIG_FAIR_GROUP_SCHED .task_move_group = task_move_group_fair, #endif diff --git a/kernel/sched/idle_task.c b/kernel/sched/idle_task.c index 67ad4e7f506a..c65dac8c97cd 100644 --- a/kernel/sched/idle_task.c +++ b/kernel/sched/idle_task.c @@ -75,6 +75,10 @@ static unsigned int get_rr_interval_idle(struct rq *rq, struct task_struct *task return 0; } +static void update_curr_idle(struct rq *rq) +{ +} + /* * Simple, special scheduling class for the per-CPU idle tasks: */ @@ -101,4 +105,5 @@ const struct sched_class idle_sched_class = { .prio_changed = prio_changed_idle, .switched_to = switched_to_idle, + .update_curr = update_curr_idle, }; diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index d024e6ce30ba..20bca398084a 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -2128,6 +2128,8 @@ const struct sched_class rt_sched_class = { .prio_changed = prio_changed_rt, .switched_to = switched_to_rt, + + .update_curr = update_curr_rt, }; #ifdef CONFIG_SCHED_DEBUG diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 24156c8434d1..2df8ef067cc5 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1135,6 +1135,8 @@ struct sched_class { unsigned int (*get_rr_interval) (struct rq *rq, struct task_struct *task); + void (*update_curr) (struct rq *rq); + #ifdef CONFIG_FAIR_GROUP_SCHED void (*task_move_group) (struct task_struct *p, int on_rq); #endif diff --git a/kernel/sched/stop_task.c b/kernel/sched/stop_task.c index 67426e529f59..79ffec45a6ac 100644 --- a/kernel/sched/stop_task.c +++ b/kernel/sched/stop_task.c @@ -102,6 +102,10 @@ get_rr_interval_stop(struct rq *rq, struct task_struct *task) return 0; } +static void update_curr_stop(struct rq *rq) +{ +} + /* * Simple, special scheduling class for the per-CPU stop tasks: */ @@ -128,4 +132,5 @@ const struct sched_class stop_sched_class = { .prio_changed = prio_changed_stop, .switched_to = switched_to_stop, + .update_curr = update_curr_stop, }; diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c index 492b986195d5..a16b67859e2a 100644 --- a/kernel/time/posix-cpu-timers.c +++ b/kernel/time/posix-cpu-timers.c @@ -553,7 +553,7 @@ static int cpu_timer_sample_group(const clockid_t which_clock, *sample = cputime_to_expires(cputime.utime); break; case CPUCLOCK_SCHED: - *sample = cputime.sum_exec_runtime + task_delta_exec(p); + *sample = cputime.sum_exec_runtime; break; } return 0; diff --git a/lib/Makefile b/lib/Makefile index 7512dc978f18..0211d2bd5e17 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -10,7 +10,7 @@ endif lib-y := ctype.o string.o vsprintf.o cmdline.o \ rbtree.o radix-tree.o dump_stack.o timerqueue.o\ idr.o int_sqrt.o extable.o \ - sha1.o md5.o irq_regs.o reciprocal_div.o argv_split.o \ + sha1.o md5.o irq_regs.o argv_split.o \ proportions.o flex_proportions.o ratelimit.o show_mem.o \ is_single_threaded.o plist.o decompress.o kobject_uevent.o \ earlycpio.o @@ -26,7 +26,7 @@ obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ gcd.o lcm.o list_sort.o uuid.o flex_array.o iovec.o clz_ctz.o \ bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o \ - percpu-refcount.o percpu_ida.o hash.o rhashtable.o + percpu-refcount.o percpu_ida.o hash.o rhashtable.o reciprocal_div.o obj-y += string_helpers.o obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o obj-y += kstrtox.o diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index 648d79ccf462..c465876c7861 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c @@ -813,10 +813,9 @@ static void __br_multicast_send_query(struct net_bridge *br, return; if (port) { - __skb_push(skb, sizeof(struct ethhdr)); skb->dev = port->dev; NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev, - dev_queue_xmit); + br_dev_queue_push_xmit); } else { br_multicast_select_own_querier(br, ip, skb); netif_rx(skb); diff --git a/net/core/skbuff.c b/net/core/skbuff.c index c16615bfb61e..32e31c299631 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -552,20 +552,13 @@ static void kfree_skbmem(struct sk_buff *skb) case SKB_FCLONE_CLONE: fclones = container_of(skb, struct sk_buff_fclones, skb2); - /* Warning : We must perform the atomic_dec_and_test() before - * setting skb->fclone back to SKB_FCLONE_FREE, otherwise - * skb_clone() could set clone_ref to 2 before our decrement. - * Anyway, if we are going to free the structure, no need to - * rewrite skb->fclone. + /* The clone portion is available for + * fast-cloning again. */ - if (atomic_dec_and_test(&fclones->fclone_ref)) { + skb->fclone = SKB_FCLONE_FREE; + + if (atomic_dec_and_test(&fclones->fclone_ref)) kmem_cache_free(skbuff_fclone_cache, fclones); - } else { - /* The clone portion is available for - * fast-cloning again. - */ - skb->fclone = SKB_FCLONE_FREE; - } break; } } @@ -887,11 +880,7 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) if (skb->fclone == SKB_FCLONE_ORIG && n->fclone == SKB_FCLONE_FREE) { n->fclone = SKB_FCLONE_CLONE; - /* As our fastclone was free, clone_ref must be 1 at this point. - * We could use atomic_inc() here, but it is faster - * to set the final value. - */ - atomic_set(&fclones->fclone_ref, 2); + atomic_inc(&fclones->fclone_ref); } else { if (skb_pfmemalloc(skb)) gfp_mask |= __GFP_MEMALLOC; diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c index ca11d283bbeb..93ea80196f0e 100644 --- a/net/dcb/dcbnl.c +++ b/net/dcb/dcbnl.c @@ -1080,13 +1080,13 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev) if (!app) return -EMSGSIZE; - spin_lock(&dcb_lock); + spin_lock_bh(&dcb_lock); list_for_each_entry(itr, &dcb_app_list, list) { if (itr->ifindex == netdev->ifindex) { err = nla_put(skb, DCB_ATTR_IEEE_APP, sizeof(itr->app), &itr->app); if (err) { - spin_unlock(&dcb_lock); + spin_unlock_bh(&dcb_lock); return -EMSGSIZE; } } @@ -1097,7 +1097,7 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev) else dcbx = -EOPNOTSUPP; - spin_unlock(&dcb_lock); + spin_unlock_bh(&dcb_lock); nla_nest_end(skb, app); /* get peer info if available */ @@ -1234,7 +1234,7 @@ static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev) } /* local app */ - spin_lock(&dcb_lock); + spin_lock_bh(&dcb_lock); app = nla_nest_start(skb, DCB_ATTR_CEE_APP_TABLE); if (!app) goto dcb_unlock; @@ -1271,7 +1271,7 @@ static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev) else dcbx = -EOPNOTSUPP; - spin_unlock(&dcb_lock); + spin_unlock_bh(&dcb_lock); /* features flags */ if (ops->getfeatcfg) { @@ -1326,7 +1326,7 @@ static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev) return 0; dcb_unlock: - spin_unlock(&dcb_lock); + spin_unlock_bh(&dcb_lock); nla_put_failure: return err; } @@ -1762,10 +1762,10 @@ u8 dcb_getapp(struct net_device *dev, struct dcb_app *app) struct dcb_app_type *itr; u8 prio = 0; - spin_lock(&dcb_lock); + spin_lock_bh(&dcb_lock); if ((itr = dcb_app_lookup(app, dev->ifindex, 0))) prio = itr->app.priority; - spin_unlock(&dcb_lock); + spin_unlock_bh(&dcb_lock); return prio; } @@ -1789,7 +1789,7 @@ int dcb_setapp(struct net_device *dev, struct dcb_app *new) if (dev->dcbnl_ops->getdcbx) event.dcbx = dev->dcbnl_ops->getdcbx(dev); - spin_lock(&dcb_lock); + spin_lock_bh(&dcb_lock); /* Search for existing match and replace */ if ((itr = dcb_app_lookup(new, dev->ifindex, 0))) { if (new->priority) @@ -1804,7 +1804,7 @@ int dcb_setapp(struct net_device *dev, struct dcb_app *new) if (new->priority) err = dcb_app_add(new, dev->ifindex); out: - spin_unlock(&dcb_lock); + spin_unlock_bh(&dcb_lock); if (!err) call_dcbevent_notifiers(DCB_APP_EVENT, &event); return err; @@ -1823,10 +1823,10 @@ u8 dcb_ieee_getapp_mask(struct net_device *dev, struct dcb_app *app) struct dcb_app_type *itr; u8 prio = 0; - spin_lock(&dcb_lock); + spin_lock_bh(&dcb_lock); if ((itr = dcb_app_lookup(app, dev->ifindex, 0))) prio |= 1 << itr->app.priority; - spin_unlock(&dcb_lock); + spin_unlock_bh(&dcb_lock); return prio; } @@ -1850,7 +1850,7 @@ int dcb_ieee_setapp(struct net_device *dev, struct dcb_app *new) if (dev->dcbnl_ops->getdcbx) event.dcbx = dev->dcbnl_ops->getdcbx(dev); - spin_lock(&dcb_lock); + spin_lock_bh(&dcb_lock); /* Search for existing match and abort if found */ if (dcb_app_lookup(new, dev->ifindex, new->priority)) { err = -EEXIST; @@ -1859,7 +1859,7 @@ int dcb_ieee_setapp(struct net_device *dev, struct dcb_app *new) err = dcb_app_add(new, dev->ifindex); out: - spin_unlock(&dcb_lock); + spin_unlock_bh(&dcb_lock); if (!err) call_dcbevent_notifiers(DCB_APP_EVENT, &event); return err; @@ -1882,7 +1882,7 @@ int dcb_ieee_delapp(struct net_device *dev, struct dcb_app *del) if (dev->dcbnl_ops->getdcbx) event.dcbx = dev->dcbnl_ops->getdcbx(dev); - spin_lock(&dcb_lock); + spin_lock_bh(&dcb_lock); /* Search for existing match and remove it. */ if ((itr = dcb_app_lookup(del, dev->ifindex, del->priority))) { list_del(&itr->list); @@ -1890,7 +1890,7 @@ int dcb_ieee_delapp(struct net_device *dev, struct dcb_app *del) err = 0; } - spin_unlock(&dcb_lock); + spin_unlock_bh(&dcb_lock); if (!err) call_dcbevent_notifiers(DCB_APP_EVENT, &event); return err; @@ -1902,12 +1902,12 @@ static void dcb_flushapp(void) struct dcb_app_type *app; struct dcb_app_type *tmp; - spin_lock(&dcb_lock); + spin_lock_bh(&dcb_lock); list_for_each_entry_safe(app, tmp, &dcb_app_list, list) { list_del(&app->list); kfree(app); } - spin_unlock(&dcb_lock); + spin_unlock_bh(&dcb_lock); } static int __init dcbnl_init(void) diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c index f2e15738534d..8f7bd56955b0 100644 --- a/net/ipv4/fib_rules.c +++ b/net/ipv4/fib_rules.c @@ -62,6 +62,10 @@ int __fib_lookup(struct net *net, struct flowi4 *flp, struct fib_result *res) else res->tclassid = 0; #endif + + if (err == -ESRCH) + err = -ENETUNREACH; + return err; } EXPORT_SYMBOL_GPL(__fib_lookup); diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index fb70e3ecc3e4..bb15d0e03d4f 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -318,9 +318,7 @@ igmp_scount(struct ip_mc_list *pmc, int type, int gdeleted, int sdeleted) return scount; } -#define igmp_skb_size(skb) (*(unsigned int *)((skb)->cb)) - -static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size) +static struct sk_buff *igmpv3_newpack(struct net_device *dev, unsigned int mtu) { struct sk_buff *skb; struct rtable *rt; @@ -330,6 +328,7 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size) struct flowi4 fl4; int hlen = LL_RESERVED_SPACE(dev); int tlen = dev->needed_tailroom; + unsigned int size = mtu; while (1) { skb = alloc_skb(size + hlen + tlen, @@ -341,7 +340,6 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size) return NULL; } skb->priority = TC_PRIO_CONTROL; - igmp_skb_size(skb) = size; rt = ip_route_output_ports(net, &fl4, NULL, IGMPV3_ALL_MCR, 0, 0, 0, @@ -354,6 +352,8 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size) skb_dst_set(skb, &rt->dst); skb->dev = dev; + skb->reserved_tailroom = skb_end_offset(skb) - + min(mtu, skb_end_offset(skb)); skb_reserve(skb, hlen); skb_reset_network_header(skb); @@ -423,8 +423,7 @@ static struct sk_buff *add_grhead(struct sk_buff *skb, struct ip_mc_list *pmc, return skb; } -#define AVAILABLE(skb) ((skb) ? ((skb)->dev ? igmp_skb_size(skb) - (skb)->len : \ - skb_tailroom(skb)) : 0) +#define AVAILABLE(skb) ((skb) ? skb_availroom(skb) : 0) static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc, int type, int gdeleted, int sdeleted) diff --git a/net/ipv4/netfilter/nft_masq_ipv4.c b/net/ipv4/netfilter/nft_masq_ipv4.c index c1023c445920..665de06561cd 100644 --- a/net/ipv4/netfilter/nft_masq_ipv4.c +++ b/net/ipv4/netfilter/nft_masq_ipv4.c @@ -24,6 +24,7 @@ static void nft_masq_ipv4_eval(const struct nft_expr *expr, struct nf_nat_range range; unsigned int verdict; + memset(&range, 0, sizeof(range)); range.flags = priv->flags; verdict = nf_nat_masquerade_ipv4(pkt->skb, pkt->ops->hooknum, diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 88fa2d160685..d107ee246a1d 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -5231,7 +5231,7 @@ slow_path: if (len < (th->doff << 2) || tcp_checksum_complete_user(sk, skb)) goto csum_error; - if (!th->ack && !th->rst) + if (!th->ack && !th->rst && !th->syn) goto discard; /* @@ -5650,7 +5650,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, goto discard; } - if (!th->ack && !th->rst) + if (!th->ack && !th->rst && !th->syn) goto discard; if (!tcp_validate_incoming(sk, skb, th, 0)) diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index 0171f08325c3..1a01d79b8698 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@ -1439,6 +1439,10 @@ reg_pernet_fail: void ip6_mr_cleanup(void) { + rtnl_unregister(RTNL_FAMILY_IP6MR, RTM_GETROUTE); +#ifdef CONFIG_IPV6_PIMSM_V2 + inet6_del_protocol(&pim6_protocol, IPPROTO_PIM); +#endif unregister_netdevice_notifier(&ip6_mr_notifier); unregister_pernet_subsys(&ip6mr_net_ops); kmem_cache_destroy(mrt_cachep); diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index 9648de2b6745..ed2c4e400b46 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c @@ -1550,7 +1550,7 @@ static void ip6_mc_hdr(struct sock *sk, struct sk_buff *skb, hdr->daddr = *daddr; } -static struct sk_buff *mld_newpack(struct inet6_dev *idev, int size) +static struct sk_buff *mld_newpack(struct inet6_dev *idev, unsigned int mtu) { struct net_device *dev = idev->dev; struct net *net = dev_net(dev); @@ -1561,13 +1561,13 @@ static struct sk_buff *mld_newpack(struct inet6_dev *idev, int size) const struct in6_addr *saddr; int hlen = LL_RESERVED_SPACE(dev); int tlen = dev->needed_tailroom; + unsigned int size = mtu + hlen + tlen; int err; u8 ra[8] = { IPPROTO_ICMPV6, 0, IPV6_TLV_ROUTERALERT, 2, 0, 0, IPV6_TLV_PADN, 0 }; /* we assume size > sizeof(ra) here */ - size += hlen + tlen; /* limit our allocations to order-0 page */ size = min_t(int, size, SKB_MAX_ORDER(0, 0)); skb = sock_alloc_send_skb(sk, size, 1, &err); @@ -1576,6 +1576,8 @@ static struct sk_buff *mld_newpack(struct inet6_dev *idev, int size) return NULL; skb->priority = TC_PRIO_CONTROL; + skb->reserved_tailroom = skb_end_offset(skb) - + min(mtu, skb_end_offset(skb)); skb_reserve(skb, hlen); if (__ipv6_get_lladdr(idev, &addr_buf, IFA_F_TENTATIVE)) { @@ -1690,8 +1692,7 @@ static struct sk_buff *add_grhead(struct sk_buff *skb, struct ifmcaddr6 *pmc, return skb; } -#define AVAILABLE(skb) ((skb) ? ((skb)->dev ? (skb)->dev->mtu - (skb)->len : \ - skb_tailroom(skb)) : 0) +#define AVAILABLE(skb) ((skb) ? skb_availroom(skb) : 0) static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc, int type, int gdeleted, int sdeleted, int crsend) diff --git a/net/ipv6/netfilter/nft_masq_ipv6.c b/net/ipv6/netfilter/nft_masq_ipv6.c index 8a7ac685076d..529c119cbb14 100644 --- a/net/ipv6/netfilter/nft_masq_ipv6.c +++ b/net/ipv6/netfilter/nft_masq_ipv6.c @@ -25,6 +25,7 @@ static void nft_masq_ipv6_eval(const struct nft_expr *expr, struct nf_nat_range range; unsigned int verdict; + memset(&range, 0, sizeof(range)); range.flags = priv->flags; verdict = nf_nat_masquerade_ipv6(pkt->skb, &range, pkt->out); diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c index 91729b807c7d..1b095ca37aa4 100644 --- a/net/ipx/af_ipx.c +++ b/net/ipx/af_ipx.c @@ -1764,6 +1764,7 @@ static int ipx_recvmsg(struct kiocb *iocb, struct socket *sock, struct ipxhdr *ipx = NULL; struct sk_buff *skb; int copied, rc; + bool locked = true; lock_sock(sk); /* put the autobinding in */ @@ -1790,6 +1791,8 @@ static int ipx_recvmsg(struct kiocb *iocb, struct socket *sock, if (sock_flag(sk, SOCK_ZAPPED)) goto out; + release_sock(sk); + locked = false; skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &rc); if (!skb) { @@ -1826,7 +1829,8 @@ static int ipx_recvmsg(struct kiocb *iocb, struct socket *sock, out_free: skb_free_datagram(sk, skb); out: - release_sock(sk); + if (locked) + release_sock(sk); return rc; } diff --git a/net/mac80211/aes_ccm.c b/net/mac80211/aes_ccm.c index ec24378caaaf..09d9caaec591 100644 --- a/net/mac80211/aes_ccm.c +++ b/net/mac80211/aes_ccm.c @@ -53,6 +53,9 @@ int ieee80211_aes_ccm_decrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad, __aligned(__alignof__(struct aead_request)); struct aead_request *aead_req = (void *) aead_req_data; + if (data_len == 0) + return -EINVAL; + memset(aead_req, 0, sizeof(aead_req_data)); sg_init_one(&pt, data, data_len); diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c index df90ce2db00c..408fd8ab4eef 100644 --- a/net/mac80211/rc80211_minstrel_ht.c +++ b/net/mac80211/rc80211_minstrel_ht.c @@ -252,19 +252,16 @@ minstrel_ht_sort_best_tp_rates(struct minstrel_ht_sta *mi, u8 index, cur_thr = mi->groups[cur_group].rates[cur_idx].cur_tp; cur_prob = mi->groups[cur_group].rates[cur_idx].probability; - tmp_group = tp_list[j - 1] / MCS_GROUP_RATES; - tmp_idx = tp_list[j - 1] % MCS_GROUP_RATES; - tmp_thr = mi->groups[tmp_group].rates[tmp_idx].cur_tp; - tmp_prob = mi->groups[tmp_group].rates[tmp_idx].probability; - - while (j > 0 && (cur_thr > tmp_thr || - (cur_thr == tmp_thr && cur_prob > tmp_prob))) { - j--; + do { tmp_group = tp_list[j - 1] / MCS_GROUP_RATES; tmp_idx = tp_list[j - 1] % MCS_GROUP_RATES; tmp_thr = mi->groups[tmp_group].rates[tmp_idx].cur_tp; tmp_prob = mi->groups[tmp_group].rates[tmp_idx].probability; - } + if (cur_thr < tmp_thr || + (cur_thr == tmp_thr && cur_prob <= tmp_prob)) + break; + j--; + } while (j > 0); if (j < MAX_THR_RATES - 1) { memmove(&tp_list[j + 1], &tp_list[j], (sizeof(*tp_list) * diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c index 86f9d76b1464..d259da3ce67a 100644 --- a/net/netfilter/ipset/ip_set_core.c +++ b/net/netfilter/ipset/ip_set_core.c @@ -1863,6 +1863,12 @@ ip_set_sockfn_get(struct sock *sk, int optval, void __user *user, int *len) if (*op < IP_SET_OP_VERSION) { /* Check the version at the beginning of operations */ struct ip_set_req_version *req_version = data; + + if (*len < sizeof(struct ip_set_req_version)) { + ret = -EINVAL; + goto done; + } + if (req_version->version != IPSET_PROTOCOL) { ret = -EPROTO; goto done; diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c index 437a3663ad03..bd90bf8107da 100644 --- a/net/netfilter/ipvs/ip_vs_xmit.c +++ b/net/netfilter/ipvs/ip_vs_xmit.c @@ -846,6 +846,8 @@ ip_vs_prepare_tunneled_skb(struct sk_buff *skb, int skb_af, new_skb = skb_realloc_headroom(skb, max_headroom); if (!new_skb) goto error; + if (skb->sk) + skb_set_owner_w(new_skb, skb->sk); consume_skb(skb); skb = new_skb; } diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 5016a6929085..2c699757bccf 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -611,12 +611,16 @@ __nf_conntrack_confirm(struct sk_buff *skb) */ NF_CT_ASSERT(!nf_ct_is_confirmed(ct)); pr_debug("Confirming conntrack %p\n", ct); - /* We have to check the DYING flag inside the lock to prevent - a race against nf_ct_get_next_corpse() possibly called from - user context, else we insert an already 'dead' hash, blocking - further use of that particular connection -JM */ + + /* We have to check the DYING flag after unlink to prevent + * a race against nf_ct_get_next_corpse() possibly called from + * user context, else we insert an already 'dead' hash, blocking + * further use of that particular connection -JM. + */ + nf_ct_del_from_dying_or_unconfirmed_list(ct); if (unlikely(nf_ct_is_dying(ct))) { + nf_ct_add_to_dying_list(ct); nf_conntrack_double_unlock(hash, reply_hash); local_bh_enable(); return NF_ACCEPT; @@ -636,8 +640,6 @@ __nf_conntrack_confirm(struct sk_buff *skb) zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h))) goto out; - nf_ct_del_from_dying_or_unconfirmed_list(ct); - /* Timer relative to confirmation time, not original setting time, otherwise we'd get timer wrap in weird delay cases. */ diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 11ab4b078f3b..66e8425dbfe7 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -3484,13 +3484,8 @@ static void nft_chain_commit_update(struct nft_trans *trans) } } -/* Schedule objects for release via rcu to make sure no packets are accesing - * removed rules. - */ -static void nf_tables_commit_release_rcu(struct rcu_head *rt) +static void nf_tables_commit_release(struct nft_trans *trans) { - struct nft_trans *trans = container_of(rt, struct nft_trans, rcu_head); - switch (trans->msg_type) { case NFT_MSG_DELTABLE: nf_tables_table_destroy(&trans->ctx); @@ -3612,10 +3607,11 @@ static int nf_tables_commit(struct sk_buff *skb) } } + synchronize_rcu(); + list_for_each_entry_safe(trans, next, &net->nft.commit_list, list) { list_del(&trans->list); - trans->ctx.nla = NULL; - call_rcu(&trans->rcu_head, nf_tables_commit_release_rcu); + nf_tables_commit_release(trans); } nf_tables_gen_notify(net, skb, NFT_MSG_NEWGEN); @@ -3623,13 +3619,8 @@ static int nf_tables_commit(struct sk_buff *skb) return 0; } -/* Schedule objects for release via rcu to make sure no packets are accesing - * aborted rules. - */ -static void nf_tables_abort_release_rcu(struct rcu_head *rt) +static void nf_tables_abort_release(struct nft_trans *trans) { - struct nft_trans *trans = container_of(rt, struct nft_trans, rcu_head); - switch (trans->msg_type) { case NFT_MSG_NEWTABLE: nf_tables_table_destroy(&trans->ctx); @@ -3725,11 +3716,12 @@ static int nf_tables_abort(struct sk_buff *skb) } } + synchronize_rcu(); + list_for_each_entry_safe_reverse(trans, next, &net->nft.commit_list, list) { list_del(&trans->list); - trans->ctx.nla = NULL; - call_rcu(&trans->rcu_head, nf_tables_abort_release_rcu); + nf_tables_abort_release(trans); } return 0; diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c index 6c5a915cfa75..13c2e17bbe27 100644 --- a/net/netfilter/nfnetlink.c +++ b/net/netfilter/nfnetlink.c @@ -47,6 +47,8 @@ static const int nfnl_group2type[NFNLGRP_MAX+1] = { [NFNLGRP_CONNTRACK_EXP_NEW] = NFNL_SUBSYS_CTNETLINK_EXP, [NFNLGRP_CONNTRACK_EXP_UPDATE] = NFNL_SUBSYS_CTNETLINK_EXP, [NFNLGRP_CONNTRACK_EXP_DESTROY] = NFNL_SUBSYS_CTNETLINK_EXP, + [NFNLGRP_NFTABLES] = NFNL_SUBSYS_NFTABLES, + [NFNLGRP_ACCT_QUOTA] = NFNL_SUBSYS_ACCT, }; void nfnl_lock(__u8 subsys_id) @@ -464,7 +466,12 @@ static void nfnetlink_rcv(struct sk_buff *skb) static int nfnetlink_bind(int group) { const struct nfnetlink_subsystem *ss; - int type = nfnl_group2type[group]; + int type; + + if (group <= NFNLGRP_NONE || group > NFNLGRP_MAX) + return -EINVAL; + + type = nfnl_group2type[group]; rcu_read_lock(); ss = nfnetlink_get_subsys(type); @@ -514,6 +521,9 @@ static int __init nfnetlink_init(void) { int i; + for (i = NFNLGRP_NONE + 1; i <= NFNLGRP_MAX; i++) + BUG_ON(nfnl_group2type[i] == NFNL_SUBSYS_NONE); + for (i=0; i<NFNL_SUBSYS_COUNT; i++) mutex_init(&table[i].mutex); diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c index 9d6d6f60a80f..265e190f2218 100644 --- a/net/netfilter/nft_compat.c +++ b/net/netfilter/nft_compat.c @@ -21,45 +21,17 @@ #include <linux/netfilter_ipv6/ip6_tables.h> #include <net/netfilter/nf_tables.h> -static const struct { - const char *name; - u8 type; -} table_to_chaintype[] = { - { "filter", NFT_CHAIN_T_DEFAULT }, - { "raw", NFT_CHAIN_T_DEFAULT }, - { "security", NFT_CHAIN_T_DEFAULT }, - { "mangle", NFT_CHAIN_T_ROUTE }, - { "nat", NFT_CHAIN_T_NAT }, - { }, -}; - -static int nft_compat_table_to_chaintype(const char *table) -{ - int i; - - for (i = 0; table_to_chaintype[i].name != NULL; i++) { - if (strcmp(table_to_chaintype[i].name, table) == 0) - return table_to_chaintype[i].type; - } - - return -1; -} - static int nft_compat_chain_validate_dependency(const char *tablename, const struct nft_chain *chain) { - enum nft_chain_type type; const struct nft_base_chain *basechain; if (!tablename || !(chain->flags & NFT_BASE_CHAIN)) return 0; - type = nft_compat_table_to_chaintype(tablename); - if (type < 0) - return -EINVAL; - basechain = nft_base_chain(chain); - if (basechain->type->type != type) + if (strcmp(tablename, "nat") == 0 && + basechain->type->type != NFT_CHAIN_T_NAT) return -EINVAL; return 0; @@ -117,7 +89,7 @@ nft_target_set_tgchk_param(struct xt_tgchk_param *par, struct xt_target *target, void *info, union nft_entry *entry, u8 proto, bool inv) { - par->net = &init_net; + par->net = ctx->net; par->table = ctx->table->name; switch (ctx->afi->family) { case AF_INET: @@ -324,7 +296,7 @@ nft_match_set_mtchk_param(struct xt_mtchk_param *par, const struct nft_ctx *ctx, struct xt_match *match, void *info, union nft_entry *entry, u8 proto, bool inv) { - par->net = &init_net; + par->net = ctx->net; par->table = ctx->table->name; switch (ctx->afi->family) { case AF_INET: @@ -374,7 +346,7 @@ nft_match_init(const struct nft_ctx *ctx, const struct nft_expr *expr, union nft_entry e = {}; int ret; - ret = nft_compat_chain_validate_dependency(match->name, ctx->chain); + ret = nft_compat_chain_validate_dependency(match->table, ctx->chain); if (ret < 0) goto err; @@ -448,7 +420,7 @@ static int nft_match_validate(const struct nft_ctx *ctx, if (!(hook_mask & match->hooks)) return -EINVAL; - ret = nft_compat_chain_validate_dependency(match->name, + ret = nft_compat_chain_validate_dependency(match->table, ctx->chain); if (ret < 0) return ret; diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c index 006886dbee36..8c4229b11c34 100644 --- a/net/openvswitch/actions.c +++ b/net/openvswitch/actions.c @@ -246,11 +246,11 @@ static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto, { int transport_len = skb->len - skb_transport_offset(skb); - if (l4_proto == IPPROTO_TCP) { + if (l4_proto == NEXTHDR_TCP) { if (likely(transport_len >= sizeof(struct tcphdr))) inet_proto_csum_replace16(&tcp_hdr(skb)->check, skb, addr, new_addr, 1); - } else if (l4_proto == IPPROTO_UDP) { + } else if (l4_proto == NEXTHDR_UDP) { if (likely(transport_len >= sizeof(struct udphdr))) { struct udphdr *uh = udp_hdr(skb); @@ -261,6 +261,10 @@ static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto, uh->check = CSUM_MANGLED_0; } } + } else if (l4_proto == NEXTHDR_ICMP) { + if (likely(transport_len >= sizeof(struct icmp6hdr))) + inet_proto_csum_replace16(&icmp6_hdr(skb)->icmp6_cksum, + skb, addr, new_addr, 1); } } @@ -722,8 +726,6 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb, case OVS_ACTION_ATTR_SAMPLE: err = sample(dp, skb, key, a); - if (unlikely(err)) /* skb already freed. */ - return err; break; } diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index e6d7255183eb..f9e556b56086 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c @@ -1265,7 +1265,7 @@ static size_t ovs_dp_cmd_msg_size(void) return msgsize; } -/* Called with ovs_mutex or RCU read lock. */ +/* Called with ovs_mutex. */ static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb, u32 portid, u32 seq, u32 flags, u8 cmd) { @@ -1555,7 +1555,7 @@ static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info) if (!reply) return -ENOMEM; - rcu_read_lock(); + ovs_lock(); dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs); if (IS_ERR(dp)) { err = PTR_ERR(dp); @@ -1564,12 +1564,12 @@ static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info) err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid, info->snd_seq, 0, OVS_DP_CMD_NEW); BUG_ON(err < 0); - rcu_read_unlock(); + ovs_unlock(); return genlmsg_reply(reply, info); err_unlock_free: - rcu_read_unlock(); + ovs_unlock(); kfree_skb(reply); return err; } @@ -1581,8 +1581,8 @@ static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) int skip = cb->args[0]; int i = 0; - rcu_read_lock(); - list_for_each_entry_rcu(dp, &ovs_net->dps, list_node) { + ovs_lock(); + list_for_each_entry(dp, &ovs_net->dps, list_node) { if (i >= skip && ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, NLM_F_MULTI, @@ -1590,7 +1590,7 @@ static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) break; i++; } - rcu_read_unlock(); + ovs_unlock(); cb->args[0] = i; diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c index 939bcb32100f..089b195c064a 100644 --- a/net/openvswitch/flow_netlink.c +++ b/net/openvswitch/flow_netlink.c @@ -145,7 +145,7 @@ static bool match_validate(const struct sw_flow_match *match, if (match->key->eth.type == htons(ETH_P_ARP) || match->key->eth.type == htons(ETH_P_RARP)) { key_expected |= 1 << OVS_KEY_ATTR_ARP; - if (match->mask && (match->mask->key.eth.type == htons(0xffff))) + if (match->mask && (match->mask->key.tp.src == htons(0xff))) mask_allowed |= 1 << OVS_KEY_ATTR_ARP; } @@ -689,6 +689,13 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs, ipv6_key->ipv6_frag, OVS_FRAG_TYPE_MAX); return -EINVAL; } + + if (!is_mask && ipv6_key->ipv6_label & htonl(0xFFF00000)) { + OVS_NLERR("IPv6 flow label %x is out of range (max=%x).\n", + ntohl(ipv6_key->ipv6_label), (1 << 20) - 1); + return -EINVAL; + } + SW_FLOW_KEY_PUT(match, ipv6.label, ipv6_key->ipv6_label, is_mask); SW_FLOW_KEY_PUT(match, ip.proto, diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 172395465e8a..8fea1b86df25 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -4520,6 +4520,8 @@ static const struct hda_fixup alc269_fixups[] = { [ALC269_FIXUP_HEADSET_MODE] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_headset_mode, + .chained = true, + .chain_id = ALC255_FIXUP_DELL_WMI_MIC_MUTE_LED }, [ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC] = { .type = HDA_FIXUP_FUNC, @@ -4709,6 +4711,8 @@ static const struct hda_fixup alc269_fixups[] = { [ALC255_FIXUP_HEADSET_MODE] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_headset_mode_alc255, + .chained = true, + .chain_id = ALC255_FIXUP_DELL_WMI_MIC_MUTE_LED }, [ALC255_FIXUP_HEADSET_MODE_NO_HP_MIC] = { .type = HDA_FIXUP_FUNC, @@ -4744,8 +4748,6 @@ static const struct hda_fixup alc269_fixups[] = { [ALC255_FIXUP_DELL_WMI_MIC_MUTE_LED] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_dell_wmi, - .chained_before = true, - .chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE }, [ALC282_FIXUP_ASPIRE_V5_PINS] = { .type = HDA_FIXUP_PINS, @@ -4783,10 +4785,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1028, 0x05f4, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x05f5, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x05f6, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), - SND_PCI_QUIRK(0x1028, 0x0610, "Dell", ALC255_FIXUP_DELL_WMI_MIC_MUTE_LED), SND_PCI_QUIRK(0x1028, 0x0615, "Dell Vostro 5470", ALC290_FIXUP_SUBWOOFER_HSJACK), SND_PCI_QUIRK(0x1028, 0x0616, "Dell Vostro 5470", ALC290_FIXUP_SUBWOOFER_HSJACK), - SND_PCI_QUIRK(0x1028, 0x061f, "Dell", ALC255_FIXUP_DELL_WMI_MIC_MUTE_LED), SND_PCI_QUIRK(0x1028, 0x0638, "Dell Inspiron 5439", ALC290_FIXUP_MONO_SPEAKERS_HSJACK), SND_PCI_QUIRK(0x1028, 0x064a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x064b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), diff --git a/sound/soc/codecs/cs42l51-i2c.c b/sound/soc/codecs/cs42l51-i2c.c index cee51ae177c1..c40428f25ba5 100644 --- a/sound/soc/codecs/cs42l51-i2c.c +++ b/sound/soc/codecs/cs42l51-i2c.c @@ -46,6 +46,7 @@ static struct i2c_driver cs42l51_i2c_driver = { .driver = { .name = "cs42l51", .owner = THIS_MODULE, + .of_match_table = cs42l51_of_match, }, .probe = cs42l51_i2c_probe, .remove = cs42l51_i2c_remove, diff --git a/sound/soc/codecs/cs42l51.c b/sound/soc/codecs/cs42l51.c index 09488d97de60..669c38fc3034 100644 --- a/sound/soc/codecs/cs42l51.c +++ b/sound/soc/codecs/cs42l51.c @@ -558,11 +558,13 @@ error: } EXPORT_SYMBOL_GPL(cs42l51_probe); -static const struct of_device_id cs42l51_of_match[] = { +const struct of_device_id cs42l51_of_match[] = { { .compatible = "cirrus,cs42l51", }, { } }; MODULE_DEVICE_TABLE(of, cs42l51_of_match); +EXPORT_SYMBOL_GPL(cs42l51_of_match); + MODULE_AUTHOR("Arnaud Patard <arnaud.patard@rtp-net.org>"); MODULE_DESCRIPTION("Cirrus Logic CS42L51 ALSA SoC Codec Driver"); MODULE_LICENSE("GPL"); diff --git a/sound/soc/codecs/cs42l51.h b/sound/soc/codecs/cs42l51.h index 8c55bf384bc6..0ca805492ac4 100644 --- a/sound/soc/codecs/cs42l51.h +++ b/sound/soc/codecs/cs42l51.h @@ -22,6 +22,7 @@ struct device; extern const struct regmap_config cs42l51_regmap; int cs42l51_probe(struct device *dev, struct regmap *regmap); +extern const struct of_device_id cs42l51_of_match[]; #define CS42L51_CHIP_ID 0x1B #define CS42L51_CHIP_REV_A 0x00 diff --git a/sound/soc/codecs/es8328-i2c.c b/sound/soc/codecs/es8328-i2c.c index aae410d122ee..2d05b5d3a6ce 100644 --- a/sound/soc/codecs/es8328-i2c.c +++ b/sound/soc/codecs/es8328-i2c.c @@ -19,7 +19,7 @@ #include "es8328.h" static const struct i2c_device_id es8328_id[] = { - { "everest,es8328", 0 }, + { "es8328", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, es8328_id); diff --git a/sound/soc/codecs/max98090.c b/sound/soc/codecs/max98090.c index d519294f57c7..1229554f1464 100644 --- a/sound/soc/codecs/max98090.c +++ b/sound/soc/codecs/max98090.c @@ -1941,13 +1941,13 @@ static int max98090_dai_set_sysclk(struct snd_soc_dai *dai, * 0x02 (when master clk is 20MHz to 40MHz).. * 0x03 (when master clk is 40MHz to 60MHz).. */ - if ((freq >= 10000000) && (freq < 20000000)) { + if ((freq >= 10000000) && (freq <= 20000000)) { snd_soc_write(codec, M98090_REG_SYSTEM_CLOCK, M98090_PSCLK_DIV1); - } else if ((freq >= 20000000) && (freq < 40000000)) { + } else if ((freq > 20000000) && (freq <= 40000000)) { snd_soc_write(codec, M98090_REG_SYSTEM_CLOCK, M98090_PSCLK_DIV2); - } else if ((freq >= 40000000) && (freq < 60000000)) { + } else if ((freq > 40000000) && (freq <= 60000000)) { snd_soc_write(codec, M98090_REG_SYSTEM_CLOCK, M98090_PSCLK_DIV4); } else { diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c index 3fb83bf09768..d16331e0b64d 100644 --- a/sound/soc/codecs/rt5645.c +++ b/sound/soc/codecs/rt5645.c @@ -139,6 +139,7 @@ static const struct reg_default rt5645_reg[] = { { 0x76, 0x000a }, { 0x77, 0x0c00 }, { 0x78, 0x0000 }, + { 0x79, 0x0123 }, { 0x80, 0x0000 }, { 0x81, 0x0000 }, { 0x82, 0x0000 }, @@ -334,6 +335,7 @@ static bool rt5645_readable_register(struct device *dev, unsigned int reg) case RT5645_DMIC_CTRL2: case RT5645_TDM_CTRL_1: case RT5645_TDM_CTRL_2: + case RT5645_TDM_CTRL_3: case RT5645_GLB_CLK: case RT5645_PLL_CTRL1: case RT5645_PLL_CTRL2: diff --git a/sound/soc/codecs/rt5670.c b/sound/soc/codecs/rt5670.c index ba9d9b4d4857..9bd8b4f63303 100644 --- a/sound/soc/codecs/rt5670.c +++ b/sound/soc/codecs/rt5670.c @@ -100,18 +100,18 @@ static const struct reg_default rt5670_reg[] = { { 0x4c, 0x5380 }, { 0x4f, 0x0073 }, { 0x52, 0x00d3 }, - { 0x53, 0xf0f0 }, + { 0x53, 0xf000 }, { 0x61, 0x0000 }, { 0x62, 0x0001 }, { 0x63, 0x00c3 }, { 0x64, 0x0000 }, - { 0x65, 0x0000 }, + { 0x65, 0x0001 }, { 0x66, 0x0000 }, { 0x6f, 0x8000 }, { 0x70, 0x8000 }, { 0x71, 0x8000 }, { 0x72, 0x8000 }, - { 0x73, 0x1110 }, + { 0x73, 0x7770 }, { 0x74, 0x0e00 }, { 0x75, 0x1505 }, { 0x76, 0x0015 }, @@ -125,21 +125,21 @@ static const struct reg_default rt5670_reg[] = { { 0x83, 0x0000 }, { 0x84, 0x0000 }, { 0x85, 0x0000 }, - { 0x86, 0x0008 }, + { 0x86, 0x0004 }, { 0x87, 0x0000 }, { 0x88, 0x0000 }, { 0x89, 0x0000 }, { 0x8a, 0x0000 }, { 0x8b, 0x0000 }, - { 0x8c, 0x0007 }, + { 0x8c, 0x0003 }, { 0x8d, 0x0000 }, { 0x8e, 0x0004 }, { 0x8f, 0x1100 }, { 0x90, 0x0646 }, { 0x91, 0x0c06 }, { 0x93, 0x0000 }, - { 0x94, 0x0000 }, - { 0x95, 0x0000 }, + { 0x94, 0x1270 }, + { 0x95, 0x1000 }, { 0x97, 0x0000 }, { 0x98, 0x0000 }, { 0x99, 0x0000 }, @@ -150,11 +150,11 @@ static const struct reg_default rt5670_reg[] = { { 0x9e, 0x0400 }, { 0xae, 0x7000 }, { 0xaf, 0x0000 }, - { 0xb0, 0x6000 }, + { 0xb0, 0x7000 }, { 0xb1, 0x0000 }, { 0xb2, 0x0000 }, { 0xb3, 0x001f }, - { 0xb4, 0x2206 }, + { 0xb4, 0x220c }, { 0xb5, 0x1f00 }, { 0xb6, 0x0000 }, { 0xb7, 0x0000 }, @@ -171,25 +171,25 @@ static const struct reg_default rt5670_reg[] = { { 0xcf, 0x1813 }, { 0xd0, 0x0690 }, { 0xd1, 0x1c17 }, - { 0xd3, 0xb320 }, + { 0xd3, 0xa220 }, { 0xd4, 0x0000 }, { 0xd6, 0x0400 }, { 0xd9, 0x0809 }, { 0xda, 0x0000 }, { 0xdb, 0x0001 }, { 0xdc, 0x0049 }, - { 0xdd, 0x0009 }, + { 0xdd, 0x0024 }, { 0xe6, 0x8000 }, { 0xe7, 0x0000 }, - { 0xec, 0xb300 }, + { 0xec, 0xa200 }, { 0xed, 0x0000 }, - { 0xee, 0xb300 }, + { 0xee, 0xa200 }, { 0xef, 0x0000 }, { 0xf8, 0x0000 }, { 0xf9, 0x0000 }, { 0xfa, 0x8010 }, { 0xfb, 0x0033 }, - { 0xfc, 0x0080 }, + { 0xfc, 0x0100 }, }; static bool rt5670_volatile_register(struct device *dev, unsigned int reg) @@ -1877,6 +1877,10 @@ static const struct snd_soc_dapm_route rt5670_dapm_routes[] = { { "DAC1 MIXR", "DAC1 Switch", "DAC1 R Mux" }, { "DAC1 MIXR", NULL, "DAC Stereo1 Filter" }, + { "DAC Stereo1 Filter", NULL, "PLL1", is_sys_clk_from_pll }, + { "DAC Mono Left Filter", NULL, "PLL1", is_sys_clk_from_pll }, + { "DAC Mono Right Filter", NULL, "PLL1", is_sys_clk_from_pll }, + { "DAC MIX", NULL, "DAC1 MIXL" }, { "DAC MIX", NULL, "DAC1 MIXR" }, @@ -1926,14 +1930,10 @@ static const struct snd_soc_dapm_route rt5670_dapm_routes[] = { { "DAC L1", NULL, "DAC L1 Power" }, { "DAC L1", NULL, "Stereo DAC MIXL" }, - { "DAC L1", NULL, "PLL1", is_sys_clk_from_pll }, { "DAC R1", NULL, "DAC R1 Power" }, { "DAC R1", NULL, "Stereo DAC MIXR" }, - { "DAC R1", NULL, "PLL1", is_sys_clk_from_pll }, { "DAC L2", NULL, "Mono DAC MIXL" }, - { "DAC L2", NULL, "PLL1", is_sys_clk_from_pll }, { "DAC R2", NULL, "Mono DAC MIXR" }, - { "DAC R2", NULL, "PLL1", is_sys_clk_from_pll }, { "OUT MIXL", "BST1 Switch", "BST1" }, { "OUT MIXL", "INL Switch", "INL VOL" }, diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c index 6bb77d76561b..dab9b15304af 100644 --- a/sound/soc/codecs/sgtl5000.c +++ b/sound/soc/codecs/sgtl5000.c @@ -1299,8 +1299,7 @@ static int sgtl5000_probe(struct snd_soc_codec *codec) /* enable small pop, introduce 400ms delay in turning off */ snd_soc_update_bits(codec, SGTL5000_CHIP_REF_CTRL, - SGTL5000_SMALL_POP, - SGTL5000_SMALL_POP); + SGTL5000_SMALL_POP, 1); /* disable short cut detector */ snd_soc_write(codec, SGTL5000_CHIP_SHORT_CTRL, 0); diff --git a/sound/soc/codecs/sgtl5000.h b/sound/soc/codecs/sgtl5000.h index 2f8c88931f69..bd7a344bf8c5 100644 --- a/sound/soc/codecs/sgtl5000.h +++ b/sound/soc/codecs/sgtl5000.h @@ -275,7 +275,7 @@ #define SGTL5000_BIAS_CTRL_MASK 0x000e #define SGTL5000_BIAS_CTRL_SHIFT 1 #define SGTL5000_BIAS_CTRL_WIDTH 3 -#define SGTL5000_SMALL_POP 0x0001 +#define SGTL5000_SMALL_POP 0 /* * SGTL5000_CHIP_MIC_CTRL diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c index f412a9911a75..67124783558a 100644 --- a/sound/soc/codecs/wm_adsp.c +++ b/sound/soc/codecs/wm_adsp.c @@ -1355,6 +1355,7 @@ static int wm_adsp_load_coeff(struct wm_adsp *dsp) file, blocks, pos - firmware->size); out_fw: + regmap_async_complete(regmap); release_firmware(firmware); wm_adsp_buf_free(&buf_list); out: diff --git a/sound/soc/fsl/fsl_asrc.c b/sound/soc/fsl/fsl_asrc.c index ed866e9a2928..9deabdd2b1a2 100644 --- a/sound/soc/fsl/fsl_asrc.c +++ b/sound/soc/fsl/fsl_asrc.c @@ -684,12 +684,38 @@ static bool fsl_asrc_writeable_reg(struct device *dev, unsigned int reg) } } +static struct reg_default fsl_asrc_reg[] = { + { REG_ASRCTR, 0x0000 }, { REG_ASRIER, 0x0000 }, + { REG_ASRCNCR, 0x0000 }, { REG_ASRCFG, 0x0000 }, + { REG_ASRCSR, 0x0000 }, { REG_ASRCDR1, 0x0000 }, + { REG_ASRCDR2, 0x0000 }, { REG_ASRSTR, 0x0000 }, + { REG_ASRRA, 0x0000 }, { REG_ASRRB, 0x0000 }, + { REG_ASRRC, 0x0000 }, { REG_ASRPM1, 0x0000 }, + { REG_ASRPM2, 0x0000 }, { REG_ASRPM3, 0x0000 }, + { REG_ASRPM4, 0x0000 }, { REG_ASRPM5, 0x0000 }, + { REG_ASRTFR1, 0x0000 }, { REG_ASRCCR, 0x0000 }, + { REG_ASRDIA, 0x0000 }, { REG_ASRDOA, 0x0000 }, + { REG_ASRDIB, 0x0000 }, { REG_ASRDOB, 0x0000 }, + { REG_ASRDIC, 0x0000 }, { REG_ASRDOC, 0x0000 }, + { REG_ASRIDRHA, 0x0000 }, { REG_ASRIDRLA, 0x0000 }, + { REG_ASRIDRHB, 0x0000 }, { REG_ASRIDRLB, 0x0000 }, + { REG_ASRIDRHC, 0x0000 }, { REG_ASRIDRLC, 0x0000 }, + { REG_ASR76K, 0x0A47 }, { REG_ASR56K, 0x0DF3 }, + { REG_ASRMCRA, 0x0000 }, { REG_ASRFSTA, 0x0000 }, + { REG_ASRMCRB, 0x0000 }, { REG_ASRFSTB, 0x0000 }, + { REG_ASRMCRC, 0x0000 }, { REG_ASRFSTC, 0x0000 }, + { REG_ASRMCR1A, 0x0000 }, { REG_ASRMCR1B, 0x0000 }, + { REG_ASRMCR1C, 0x0000 }, +}; + static const struct regmap_config fsl_asrc_regmap_config = { .reg_bits = 32, .reg_stride = 4, .val_bits = 32, .max_register = REG_ASRMCR1C, + .reg_defaults = fsl_asrc_reg, + .num_reg_defaults = ARRAY_SIZE(fsl_asrc_reg), .readable_reg = fsl_asrc_readable_reg, .volatile_reg = fsl_asrc_volatile_reg, .writeable_reg = fsl_asrc_writeable_reg, diff --git a/sound/soc/rockchip/rockchip_i2s.c b/sound/soc/rockchip/rockchip_i2s.c index f373e37f8305..c74ba37f862c 100644 --- a/sound/soc/rockchip/rockchip_i2s.c +++ b/sound/soc/rockchip/rockchip_i2s.c @@ -154,8 +154,10 @@ static void rockchip_snd_rxctrl(struct rk_i2s_dev *i2s, int on) while (val) { regmap_read(i2s->regmap, I2S_CLR, &val); retry--; - if (!retry) + if (!retry) { dev_warn(i2s->dev, "fail to clear\n"); + break; + } } } } diff --git a/sound/soc/samsung/snow.c b/sound/soc/samsung/snow.c index 0acf5d0eed53..72118a77dd5b 100644 --- a/sound/soc/samsung/snow.c +++ b/sound/soc/samsung/snow.c @@ -110,6 +110,7 @@ static const struct of_device_id snow_of_match[] = { { .compatible = "google,snow-audio-max98095", }, {}, }; +MODULE_DEVICE_TABLE(of, snow_of_match); static struct platform_driver snow_driver = { .driver = { diff --git a/sound/soc/sh/fsi.c b/sound/soc/sh/fsi.c index 66fddec9543d..88e5df474ccf 100644 --- a/sound/soc/sh/fsi.c +++ b/sound/soc/sh/fsi.c @@ -1711,8 +1711,7 @@ static const struct snd_soc_dai_ops fsi_dai_ops = { static struct snd_pcm_hardware fsi_pcm_hardware = { .info = SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_MMAP | - SNDRV_PCM_INFO_MMAP_VALID | - SNDRV_PCM_INFO_PAUSE, + SNDRV_PCM_INFO_MMAP_VALID, .buffer_bytes_max = 64 * 1024, .period_bytes_min = 32, .period_bytes_max = 8192, diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c index 1922ec57d10a..70042197f9e2 100644 --- a/sound/soc/sh/rcar/core.c +++ b/sound/soc/sh/rcar/core.c @@ -886,8 +886,7 @@ static int rsnd_dai_probe(struct platform_device *pdev, static struct snd_pcm_hardware rsnd_pcm_hardware = { .info = SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_MMAP | - SNDRV_PCM_INFO_MMAP_VALID | - SNDRV_PCM_INFO_PAUSE, + SNDRV_PCM_INFO_MMAP_VALID, .buffer_bytes_max = 64 * 1024, .period_bytes_min = 32, .period_bytes_max = 8192, diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index 4c8f8a23a0e9..b60ff56ebc0f 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c @@ -884,7 +884,7 @@ static struct snd_soc_dai *snd_soc_find_dai( list_for_each_entry(component, &component_list, list) { if (dlc->of_node && component->dev->of_node != dlc->of_node) continue; - if (dlc->name && strcmp(dev_name(component->dev), dlc->name)) + if (dlc->name && strcmp(component->name, dlc->name)) continue; list_for_each_entry(dai, &component->dai_list, list) { if (dlc->dai_name && strcmp(dai->name, dlc->dai_name)) diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c index 002311afdeaa..57277dd79e11 100644 --- a/sound/soc/soc-pcm.c +++ b/sound/soc/soc-pcm.c @@ -1522,13 +1522,36 @@ static void dpcm_set_fe_runtime(struct snd_pcm_substream *substream) dpcm_init_runtime_hw(runtime, &cpu_dai_drv->capture); } +static int dpcm_fe_dai_do_trigger(struct snd_pcm_substream *substream, int cmd); + +/* Set FE's runtime_update state; the state is protected via PCM stream lock + * for avoiding the race with trigger callback. + * If the state is unset and a trigger is pending while the previous operation, + * process the pending trigger action here. + */ +static void dpcm_set_fe_update_state(struct snd_soc_pcm_runtime *fe, + int stream, enum snd_soc_dpcm_update state) +{ + struct snd_pcm_substream *substream = + snd_soc_dpcm_get_substream(fe, stream); + + snd_pcm_stream_lock_irq(substream); + if (state == SND_SOC_DPCM_UPDATE_NO && fe->dpcm[stream].trigger_pending) { + dpcm_fe_dai_do_trigger(substream, + fe->dpcm[stream].trigger_pending - 1); + fe->dpcm[stream].trigger_pending = 0; + } + fe->dpcm[stream].runtime_update = state; + snd_pcm_stream_unlock_irq(substream); +} + static int dpcm_fe_dai_startup(struct snd_pcm_substream *fe_substream) { struct snd_soc_pcm_runtime *fe = fe_substream->private_data; struct snd_pcm_runtime *runtime = fe_substream->runtime; int stream = fe_substream->stream, ret = 0; - fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE; + dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_FE); ret = dpcm_be_dai_startup(fe, fe_substream->stream); if (ret < 0) { @@ -1550,13 +1573,13 @@ static int dpcm_fe_dai_startup(struct snd_pcm_substream *fe_substream) dpcm_set_fe_runtime(fe_substream); snd_pcm_limit_hw_rates(runtime); - fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO; + dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO); return 0; unwind: dpcm_be_dai_startup_unwind(fe, fe_substream->stream); be_err: - fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO; + dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO); return ret; } @@ -1603,7 +1626,7 @@ static int dpcm_fe_dai_shutdown(struct snd_pcm_substream *substream) struct snd_soc_pcm_runtime *fe = substream->private_data; int stream = substream->stream; - fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE; + dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_FE); /* shutdown the BEs */ dpcm_be_dai_shutdown(fe, substream->stream); @@ -1617,7 +1640,7 @@ static int dpcm_fe_dai_shutdown(struct snd_pcm_substream *substream) dpcm_dapm_stream_event(fe, stream, SND_SOC_DAPM_STREAM_STOP); fe->dpcm[stream].state = SND_SOC_DPCM_STATE_CLOSE; - fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO; + dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO); return 0; } @@ -1665,7 +1688,7 @@ static int dpcm_fe_dai_hw_free(struct snd_pcm_substream *substream) int err, stream = substream->stream; mutex_lock_nested(&fe->card->mutex, SND_SOC_CARD_CLASS_RUNTIME); - fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE; + dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_FE); dev_dbg(fe->dev, "ASoC: hw_free FE %s\n", fe->dai_link->name); @@ -1680,7 +1703,7 @@ static int dpcm_fe_dai_hw_free(struct snd_pcm_substream *substream) err = dpcm_be_dai_hw_free(fe, stream); fe->dpcm[stream].state = SND_SOC_DPCM_STATE_HW_FREE; - fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO; + dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO); mutex_unlock(&fe->card->mutex); return 0; @@ -1773,7 +1796,7 @@ static int dpcm_fe_dai_hw_params(struct snd_pcm_substream *substream, int ret, stream = substream->stream; mutex_lock_nested(&fe->card->mutex, SND_SOC_CARD_CLASS_RUNTIME); - fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE; + dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_FE); memcpy(&fe->dpcm[substream->stream].hw_params, params, sizeof(struct snd_pcm_hw_params)); @@ -1796,7 +1819,7 @@ static int dpcm_fe_dai_hw_params(struct snd_pcm_substream *substream, fe->dpcm[stream].state = SND_SOC_DPCM_STATE_HW_PARAMS; out: - fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO; + dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO); mutex_unlock(&fe->card->mutex); return ret; } @@ -1910,7 +1933,7 @@ int dpcm_be_dai_trigger(struct snd_soc_pcm_runtime *fe, int stream, } EXPORT_SYMBOL_GPL(dpcm_be_dai_trigger); -static int dpcm_fe_dai_trigger(struct snd_pcm_substream *substream, int cmd) +static int dpcm_fe_dai_do_trigger(struct snd_pcm_substream *substream, int cmd) { struct snd_soc_pcm_runtime *fe = substream->private_data; int stream = substream->stream, ret; @@ -1984,6 +2007,23 @@ out: return ret; } +static int dpcm_fe_dai_trigger(struct snd_pcm_substream *substream, int cmd) +{ + struct snd_soc_pcm_runtime *fe = substream->private_data; + int stream = substream->stream; + + /* if FE's runtime_update is already set, we're in race; + * process this trigger later at exit + */ + if (fe->dpcm[stream].runtime_update != SND_SOC_DPCM_UPDATE_NO) { + fe->dpcm[stream].trigger_pending = cmd + 1; + return 0; /* delayed, assuming it's successful */ + } + + /* we're alone, let's trigger */ + return dpcm_fe_dai_do_trigger(substream, cmd); +} + int dpcm_be_dai_prepare(struct snd_soc_pcm_runtime *fe, int stream) { struct snd_soc_dpcm *dpcm; @@ -2027,7 +2067,7 @@ static int dpcm_fe_dai_prepare(struct snd_pcm_substream *substream) dev_dbg(fe->dev, "ASoC: prepare FE %s\n", fe->dai_link->name); - fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE; + dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_FE); /* there is no point preparing this FE if there are no BEs */ if (list_empty(&fe->dpcm[stream].be_clients)) { @@ -2054,7 +2094,7 @@ static int dpcm_fe_dai_prepare(struct snd_pcm_substream *substream) fe->dpcm[stream].state = SND_SOC_DPCM_STATE_PREPARE; out: - fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO; + dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO); mutex_unlock(&fe->card->mutex); return ret; @@ -2201,11 +2241,11 @@ static int dpcm_run_new_update(struct snd_soc_pcm_runtime *fe, int stream) { int ret; - fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_BE; + dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_BE); ret = dpcm_run_update_startup(fe, stream); if (ret < 0) dev_err(fe->dev, "ASoC: failed to startup some BEs\n"); - fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO; + dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO); return ret; } @@ -2214,11 +2254,11 @@ static int dpcm_run_old_update(struct snd_soc_pcm_runtime *fe, int stream) { int ret; - fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_BE; + dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_BE); ret = dpcm_run_update_shutdown(fe, stream); if (ret < 0) dev_err(fe->dev, "ASoC: failed to shutdown some BEs\n"); - fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO; + dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO); return ret; } diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c index 2e4a9dbc51fa..6e354d326858 100644 --- a/sound/usb/mixer.c +++ b/sound/usb/mixer.c @@ -2033,10 +2033,11 @@ static int parse_audio_selector_unit(struct mixer_build *state, int unitid, cval->res = 1; cval->initialized = 1; - if (desc->bDescriptorSubtype == UAC2_CLOCK_SELECTOR) - cval->control = UAC2_CX_CLOCK_SELECTOR; - else + if (state->mixer->protocol == UAC_VERSION_1) cval->control = 0; + else /* UAC_VERSION_2 */ + cval->control = (desc->bDescriptorSubtype == UAC2_CLOCK_SELECTOR) ? + UAC2_CX_CLOCK_SELECTOR : UAC2_SU_SELECTOR; namelist = kmalloc(sizeof(char *) * desc->bNrInPins, GFP_KERNEL); if (!namelist) { diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c index d2aa45a8d895..a5941f80fc5b 100644 --- a/sound/usb/quirks.c +++ b/sound/usb/quirks.c @@ -1146,6 +1146,20 @@ void snd_usb_ctl_msg_quirk(struct usb_device *dev, unsigned int pipe, if ((le16_to_cpu(dev->descriptor.idVendor) == 0x23ba) && (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS) mdelay(20); + + /* Marantz/Denon devices with USB DAC functionality need a delay + * after each class compliant request + */ + if ((le16_to_cpu(dev->descriptor.idVendor) == 0x154e) && + (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS) { + + switch (le16_to_cpu(dev->descriptor.idProduct)) { + case 0x3005: /* Marantz HD-DAC1 */ + case 0x3006: /* Marantz SA-14S1 */ + mdelay(20); + break; + } + } } /* |